serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
19,901 | #include <fstream>
#include <iostream>
#include <iomanip>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <math.h>
using namespace std;
//Execute 1 thread per pixel of output image.
//Each thread handles all four channels of the output pixels
__global__ void encode_per_pixel_kernel(uchar4* const d_destImg,
const char* const d_binData,
int numBytesData)
{
//Get pixel index
//Theres two pixels per byte of data
//Thread 2 would be pixel 2 and working on byte 1 nibble 0
//Thread 3 would be pixel 3 and working on byte 1 nibble 1
//Thread 4 would be pixel 4 and working on byte 2 nibble 0
//Thread 5 would be pixel 5 and working on byte 2 nibble 1
int pixel = threadIdx.x + blockDim.x * blockIdx.x;
if(pixel >= 2 * numBytesData)
return;
//Calculate which nibble (0 or 1) in the byte
//and which byte (0 to numBytesData)
int byteIndex = pixel / 2;
int nibble = pixel % 2;
char dataByte = d_binData[byteIndex];
//Let's work with a local copy. We only need two global accesses this way.
uchar4 outputPixel = d_destImg[pixel];
//Channel 0 (first bit in the nibble)
int offset = (7 - 4 * nibble);
bool bit = (dataByte >> offset) & 1;
outputPixel.x = outputPixel.x & ~1 | bit;
//Channel 1 (2nd bit)
offset -= 1;
bit = (dataByte >> offset) & 1;
outputPixel.y = outputPixel.y & ~1 | bit;
//Channel 2 (3rd bit)
offset -= 1;
bit = (dataByte >> offset) & 1;
outputPixel.z = outputPixel.z & ~1 | bit;
//Channel 3 (4th bit) This is the alpha channel
offset -= 1;
bit = (dataByte >> offset) & 1;
outputPixel.w = outputPixel.w & ~1 | bit;
d_destImg[pixel] = outputPixel;
}
//1 channel per bit of data
//8 channels per byte of data
//This calls requires two global memory accesses
__global__ void encode_per_channel_kernel(uchar4* const d_destImg,
const char* const d_binData,
int numBytesData)
{
//1 thread per bit of data
//Thread 0 works on pixel 0 channel 0 byte 0 nibble 0 bit 0
//Thread 1 works on pixel 0 channel 1 byte 0 nibble 0 bit 1
//Thread 2 works on pixel 0 channel 2 byte 0 nibble 0 bit 2
//Thread 3 works on pixel 0 channel 3 byte 0 nibble 0 bit 3
//Thread 4 works on pixel 1 channel 0 byte 0 nubble 1 bit 0
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx >= 8 * numBytesData)
return;
//Calculate channel (0-4) and pixel (0 - 2*numBytes - 1)
int channel = idx % 4;
int pixel = idx / 4;
//Calculate which nibble (0 or 1) in the byte
//and which byte (0 to numBytesData - 1)
int byteIndex = pixel / 2;
int nibble = pixel % 2;
char dataByte = d_binData[byteIndex];
//Let's work with a local copy.
uchar4 outputPixel = d_destImg[pixel];
//Get the bit
//Offset should be 7 for channel 0, nibble 0
//Offset should be 0 for channel 3, nibble 1
int offset = (7 - 4 * nibble) - channel;
bool bit = (dataByte >> offset) & 1;
if(channel == 0) {
outputPixel.x = outputPixel.x & ~1 | bit;
} else if(channel == 1){
outputPixel.y = outputPixel.y & ~1 | bit;
} else if(channel == 2){
outputPixel.z = outputPixel.z & ~1 | bit;
} else if(channel == 3){
outputPixel.w = outputPixel.w & ~1 | bit;
}
d_destImg[pixel] = outputPixel;
}
/**
| 10 11 12 15 ; 11 255 12 0 |
| 15 10 13 5 ; 15 14 19 80 | Original image (each set of 4 is 1 pixel).
| 12 14 16 21 ; 14 18 10 16 |
| 11 11 11 11 ; 10 10 10 10 |
and
[ 1001 0110 1111 0000 1010 0101 0100 1100] Data file
=
| 11 10 12 15 ; 10 255 13 0 |
| 15 11 13 5 ; 14 14 18 80 | Encoded image
| 13 14 17 20 ; 14 19 10 17 |
| 11 10 11 11 ; 11 11 10 10 |
To encode the data, we will use the least significant bit approach by
modifying the LSB of each channel of each pixel of th input image. The
LSB will match the corresponding bit of the input data. The data can be
decoded by reading the LSB from the encoded image.
For example, if the channel byte is 0001 1001 (value of 25) and we want to
encode a 1, the byte would remain the same. If we want to encode a 0, the
byte would become 0001 1000 (value of 24).
If the channel byte is 0010 1110 (value of 46), and we want to encode a 1,
then the byte would become 0010 1111 (value of 47). If we want to encode a
0, then the byte would remain the same.
*/
void encode_parallel(const uchar4* const h_sourceImg,
uchar4* const h_destImg,
const char* const h_binData,
int numBytesData,
const size_t numRowsSource, const size_t numColsSource)
{
//Allocate device memory
uchar4* d_destImg;
char* d_binData;
cudaMalloc(&d_destImg, sizeof(uchar4) * numRowsSource * numColsSource);
cudaMalloc(&d_binData, sizeof(char) * numBytesData);
cudaMemcpy(d_destImg, h_sourceImg, sizeof(uchar4) * numRowsSource * numColsSource, cudaMemcpyHostToDevice);
cudaMemcpy(d_binData, h_binData, numBytesData, cudaMemcpyHostToDevice);
//Each thread handles 1 pixel
//This means 1 thread per 4 bits of data (2 threads per byte)
int numThreads = numBytesData * 2.0;
int threadsPerBlock = 1024;
int numBlocks = ceil((float)numThreads / threadsPerBlock);
encode_per_pixel_kernel<<<numBlocks, threadsPerBlock>>>(d_destImg, d_binData, numBytesData);
//Each thread handles 1 channel of 1 pixel
//This means 1 thread per bit of data (8 threads per byte)
//int numThreads = numBytesData * 8;
//int threadsPerBlock = 1024;
//int numBlocks = ceil((float)numThreads / threadsPerBlock);
//encode_per_channel_kernel<<<numBlocks, threadsPerBlock>>>(d_destImg, d_binData, numBytesData);
cudaMemcpy(h_destImg, d_destImg, sizeof(uchar4) * numRowsSource * numColsSource, cudaMemcpyDeviceToHost);
//Free memory
cudaFree(d_destImg);
cudaFree(d_binData);
}
|
19,902 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void hypterm_0 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
double _t_1_ = cons_1[k][j][i+1];
_t_1_ -= cons_1[k][j][i-1];
double flux_0kc0jc0ic0 = dxinv0 * 0.8 * _t_1_;
double _t_2_ = cons_1[k][j][i+2];
_t_2_ -= cons_1[k][j][i-2];
flux_0kc0jc0ic0 -= dxinv0 * 0.2 * _t_2_;
double _t_3_ = cons_1[k][j][i+3];
_t_3_ -= cons_1[k][j][i-3];
flux_0kc0jc0ic0 += dxinv0 * 0.038 * _t_3_;
double _t_4_ = cons_1[k][j][i+4];
_t_4_ -= cons_1[k][j][i-4];
flux_0kc0jc0ic0 -= dxinv0 * 0.0035 * _t_4_;
double _t_12_ = cons_1[k][j][i+1] * q_1[k][j][i+1];
_t_12_ -= cons_1[k][j][i-1] * q_1[k][j][i-1];
_t_12_ += q_4[k][j][i+1];
_t_12_ -= q_4[k][j][i-1];
double flux_1kc0jc0ic0 = dxinv0 * 0.8 * _t_12_;
double _t_13_ = cons_1[k][j][i+2] * q_1[k][j][i+2];
_t_13_ -= cons_1[k][j][i-2] * q_1[k][j][i-2];
_t_13_ += q_4[k][j][i+2];
_t_13_ -= q_4[k][j][i-2];
flux_1kc0jc0ic0 -= dxinv0 * 0.2 * _t_13_;
double _t_14_ = cons_1[k][j][i+3] * q_1[k][j][i+3];
_t_14_ -= cons_1[k][j][i-3] * q_1[k][j][i-3];
_t_14_ += q_4[k][j][i+3];
_t_14_ -= q_4[k][j][i-3];
flux_1kc0jc0ic0 += dxinv0 * 0.038 * _t_14_;
double _t_15_ = cons_1[k][j][i+4] * q_1[k][j][i+4];
_t_15_ -= cons_1[k][j][i-4] * q_1[k][j][i-4];
_t_15_ += q_4[k][j][i+4];
_t_15_ -= q_4[k][j][i-4];
flux_1kc0jc0ic0 -= dxinv0 * 0.0035 * _t_15_;
double _t_23_ = cons_2[k][j][i+1] * q_1[k][j][i+1];
_t_23_ -= cons_2[k][j][i-1] * q_1[k][j][i-1];
double flux_2kc0jc0ic0 = dxinv0 * 0.8 * _t_23_;
double _t_24_ = cons_2[k][j][i+2] * q_1[k][j][i+2];
_t_24_ -= cons_2[k][j][i-2] * q_1[k][j][i-2];
flux_2kc0jc0ic0 -= dxinv0 * 0.2 * _t_24_;
double _t_25_ = cons_2[k][j][i+3] * q_1[k][j][i+3];
_t_25_ -= cons_2[k][j][i-3] * q_1[k][j][i-3];
flux_2kc0jc0ic0 += dxinv0 * 0.038 * _t_25_;
double _t_26_ = cons_2[k][j][i+4] * q_1[k][j][i+4];
_t_26_ -= cons_2[k][j][i-4] * q_1[k][j][i-4];
flux_2kc0jc0ic0 -= dxinv0 * 0.0035 * _t_26_;
double _t_34_ = cons_3[k][j][i+1] * q_1[k][j][i+1];
_t_34_ -= cons_3[k][j][i-1] * q_1[k][j][i-1];
double flux_3kc0jc0ic0 = dxinv0 * 0.8 * _t_34_;
double _t_35_ = cons_3[k][j][i+2] * q_1[k][j][i+2];
_t_35_ -= cons_3[k][j][i-2] * q_1[k][j][i-2];
flux_3kc0jc0ic0 -= dxinv0 * 0.2 * _t_35_;
double _t_36_ = cons_3[k][j][i+3] * q_1[k][j][i+3];
_t_36_ -= cons_3[k][j][i-3] * q_1[k][j][i-3];
flux_3kc0jc0ic0 += dxinv0 * 0.038 * _t_36_;
double _t_37_ = cons_3[k][j][i+4] * q_1[k][j][i+4];
_t_37_ -= cons_3[k][j][i-4] * q_1[k][j][i-4];
flux_3kc0jc0ic0 -= dxinv0 * 0.0035 * _t_37_;
double _t_7_ = cons_2[k][j+1][i];
_t_7_ -= cons_2[k][j-1][i];
double _t_5_ = dxinv1 * 0.8 * _t_7_;
double _t_8_ = cons_2[k][j+2][i];
_t_8_ -= cons_2[k][j-2][i];
_t_5_ -= dxinv1 * 0.2 * _t_8_;
double _t_9_ = cons_2[k][j+3][i];
_t_9_ -= cons_2[k][j-3][i];
_t_5_ += dxinv1 * 0.038 * _t_9_;
double _t_10_ = cons_2[k][j+4][i];
_t_10_ -= cons_2[k][j-4][i];
_t_5_ -= dxinv1 * 0.0035 * _t_10_;
flux_0kc0jc0ic0 -= _t_5_;
double _t_29_ = cons_2[k][j+1][i] * q_2[k][j+1][i];
_t_29_ -= cons_2[k][j-1][i] * q_2[k][j-1][i];
_t_29_ += q_4[k][j+1][i];
_t_29_ -= q_4[k][j-1][i];
double _t_27_ = dxinv1 * 0.8 * _t_29_;
double _t_30_ = cons_2[k][j+2][i] * q_2[k][j+2][i];
_t_30_ -= cons_2[k][j-2][i] * q_2[k][j-2][i];
_t_30_ += q_4[k][j+2][i];
_t_30_ -= q_4[k][j-2][i];
_t_27_ -= dxinv1 * 0.2 * _t_30_;
double _t_31_ = cons_2[k][j+3][i] * q_2[k][j+3][i];
_t_31_ -= cons_2[k][j-3][i] * q_2[k][j-3][i];
_t_31_ += q_4[k][j+3][i];
_t_31_ -= q_4[k][j-3][i];
_t_27_ += dxinv1 * 0.038 * _t_31_;
double _t_32_ = cons_2[k][j+4][i] * q_2[k][j+4][i];
_t_32_ -= cons_2[k][j-4][i] * q_2[k][j-4][i];
_t_32_ += q_4[k][j+4][i];
_t_32_ -= q_4[k][j-4][i];
_t_27_ -= dxinv1 * 0.0035 * _t_32_;
flux_2kc0jc0ic0 -= _t_27_;
double _t_18_ = cons_1[k][j+1][i] * q_2[k][j+1][i];
_t_18_ -= cons_1[k][j-1][i] * q_2[k][j-1][i];
double _t_16_ = dxinv1 * 0.8 * _t_18_;
double _t_19_ = cons_1[k][j+2][i] * q_2[k][j+2][i];
_t_19_ -= cons_1[k][j-2][i] * q_2[k][j-2][i];
_t_16_ -= dxinv1 * 0.2 * _t_19_;
double _t_20_ = cons_1[k][j+3][i] * q_2[k][j+3][i];
_t_20_ -= cons_1[k][j-3][i] * q_2[k][j-3][i];
_t_16_ += dxinv1 * 0.038 * _t_20_;
double _t_21_ = cons_1[k][j+4][i] * q_2[k][j+4][i];
_t_21_ -= cons_1[k][j-4][i] * q_2[k][j-4][i];
_t_16_ -= dxinv1 * 0.0035 * _t_21_;
flux_1kc0jc0ic0 -= _t_16_;
double _t_40_ = cons_3[k][j+1][i] * q_2[k][j+1][i];
_t_40_ -= cons_3[k][j-1][i] * q_2[k][j-1][i];
double _t_38_ = dxinv1 * 0.8 * _t_40_;
double _t_41_ = cons_3[k][j+2][i] * q_2[k][j+2][i];
_t_41_ -= cons_3[k][j-2][i] * q_2[k][j-2][i];
_t_38_ -= dxinv1 * 0.2 * _t_41_;
double _t_42_ = cons_3[k][j+3][i] * q_2[k][j+3][i];
_t_42_ -= cons_3[k][j-3][i] * q_2[k][j-3][i];
_t_38_ += dxinv1 * 0.038 * _t_42_;
double _t_43_ = cons_3[k][j+4][i] * q_2[k][j+4][i];
_t_43_ -= cons_3[k][j-4][i] * q_2[k][j-4][i];
_t_38_ -= dxinv1 * 0.0035 * _t_43_;
flux_3kc0jc0ic0 -= _t_38_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
}
}
__global__ void hypterm_1 (double * __restrict__ flux_in_0, double * __restrict__ flux_in_1, double * __restrict__ flux_in_2, double * __restrict__ flux_in_3, double * __restrict__ cons_in_1, double * __restrict__ cons_in_2, double * __restrict__ cons_in_3, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(4*blockdim_k);
int k = max (k0, 0) + (int)(4*threadIdx.z);
double (*flux_0)[308][308] = (double (*)[308][308])flux_in_0;
double (*flux_1)[308][308] = (double (*)[308][308])flux_in_1;
double (*flux_2)[308][308] = (double (*)[308][308])flux_in_2;
double (*flux_3)[308][308] = (double (*)[308][308])flux_in_3;
double (*cons_1)[308][308] = (double (*)[308][308])cons_in_1;
double (*cons_2)[308][308] = (double (*)[308][308])cons_in_2;
double (*cons_3)[308][308] = (double (*)[308][308])cons_in_3;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double flux0_a, flux1_a, flux2_a, flux3_a;
double flux0_b, flux1_b, flux2_b, flux3_b;
double flux0_c, flux1_c, flux2_c, flux3_c;
double flux0_d, flux1_d, flux2_d, flux3_d;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux0_a = flux_0[k][j][i];
double flux_0kc0jc0ic0 = flux0_a;
double _t_1_ = cons_3[k+1][j][i];
_t_1_ -= cons_3[k-1][j][i];
double _v_0_ = dxinv2 * 0.8 * _t_1_;
double _t_2_ = cons_3[k+2][j][i];
_t_2_ -= cons_3[k-2][j][i];
_v_0_ -= dxinv2 * 0.2 * _t_2_;
double _t_3_ = cons_3[k+3][j][i];
_t_3_ -= cons_3[k-3][j][i];
_v_0_ += dxinv2 * 0.038 * _t_3_;
double _t_4_ = cons_3[k+4][j][i];
_t_4_ -= cons_3[k-4][j][i];
_v_0_ -= dxinv2 * 0.0035 * _t_4_;
flux_0kc0jc0ic0 -= _v_0_;
flux0_b = flux_0[k+1][j][i];
double flux_0kp1jc0ic0 = flux0_b;
double _t_7_ = cons_3[k+3][j][i];
_t_7_ -= cons_3[k-1][j][i];
double _v_1_ = -(dxinv2 * 0.2 * _t_7_);
double _t_8_ = cons_3[k+4][j][i];
_t_8_ -= cons_3[k-2][j][i];
_v_1_ += dxinv2 * 0.038 * _t_8_;
double _t_6_ = cons_3[k+2][j][i];
_t_6_ -= cons_3[k][j][i];
_v_1_ += dxinv2 * 0.8 * _t_6_;
double _t_9_ = -(cons_3[k-3][j][i]);
_t_9_ += cons_3[k+5][j][i];
_v_1_ -= dxinv2 * 0.0035 * _t_9_;
flux_0kp1jc0ic0 -= _v_1_;
flux0_c = flux_0[k+2][j][i];
double flux_0kp2jc0ic0 = flux0_c;
double _t_11_ = cons_3[k+3][j][i];
_t_11_ -= cons_3[k+1][j][i];
double _v_2_ = dxinv2 * 0.8 * _t_11_;
double _t_12_ = cons_3[k+4][j][i];
_t_12_ -= cons_3[k][j][i];
_v_2_ -= dxinv2 * 0.2 * _t_12_;
double _t_13_ = cons_3[k+5][j][i];
_t_13_ -= cons_3[k-1][j][i];
_v_2_ += dxinv2 * 0.038 * _t_13_;
double _t_14_ = -(cons_3[k-2][j][i]);
_t_14_ += cons_3[k+6][j][i];
_v_2_ -= dxinv2 * 0.0035 * _t_14_;
flux_0kp2jc0ic0 -= _v_2_;
flux0_d = flux_0[k+3][j][i];
double flux_0kp3jc0ic0 = flux0_d;
double _t_16_ = cons_3[k+4][j][i];
_t_16_ -= cons_3[k+2][j][i];
double _v_3_ = dxinv2 * 0.8 * _t_16_;
double _t_17_ = cons_3[k+5][j][i];
_t_17_ -= cons_3[k+1][j][i];
_v_3_ -= dxinv2 * 0.2 * _t_17_;
double _t_18_ = cons_3[k+6][j][i];
_t_18_ -= cons_3[k][j][i];
_v_3_ += dxinv2 * 0.038 * _t_18_;
double _t_19_ = -(cons_3[k-1][j][i]);
_t_19_ += cons_3[k+7][j][i];
_v_3_ -= dxinv2 * 0.0035 * _t_19_;
flux_0kp3jc0ic0 -= _v_3_;
flux1_a = flux_1[k][j][i];
double flux_1kc0jc0ic0 = flux1_a;
double _t_24_ = -(cons_1[k-4][j][i] * q_3[k-4][j][i]);
_t_24_ += cons_1[k+4][j][i] * q_3[k+4][j][i];
double _v_12_ = -(dxinv2 * 0.0035 * _t_24_);
double _t_21_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
_t_21_ -= cons_1[k-1][j][i] * q_3[k-1][j][i];
_v_12_ += dxinv2 * 0.8 * _t_21_;
double _t_22_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
_t_22_ -= cons_1[k-2][j][i] * q_3[k-2][j][i];
_v_12_ -= dxinv2 * 0.2 * _t_22_;
double _t_23_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
_t_23_ -= cons_1[k-3][j][i] * q_3[k-3][j][i];
_v_12_ += dxinv2 * 0.038 * _t_23_;
flux_1kc0jc0ic0 -= _v_12_;
flux1_b = flux_1[k+1][j][i];
double flux_1kp1jc0ic0 = flux1_b;
double _v_15_ = cons_1[k+3][j][i] * q_3[k+3][j][i];
double _v_16_ = cons_1[k-1][j][i] * q_3[k-1][j][i];
double _v_17_ = cons_1[k+4][j][i] * q_3[k+4][j][i];
double _v_18_ = cons_1[k-2][j][i] * q_3[k-2][j][i];
double _v_20_ = cons_1[k-3][j][i] * q_3[k-3][j][i];
double _v_13_ = cons_1[k+2][j][i] * q_3[k+2][j][i];
double _v_23_ = cons_1[k+1][j][i] * q_3[k+1][j][i];
double _v_47_ = cons_2[k-4][j][i] * q_3[k-4][j][i];
double _v_83_ = cons_3[k-4][j][i] * q_3[k-4][j][i];
double _v_82_ = cons_3[k+4][j][i] * q_3[k+4][j][i];
double _v_76_ = cons_3[k+1][j][i] * q_3[k+1][j][i];
double _v_77_ = cons_3[k-1][j][i] * q_3[k-1][j][i];
double _v_78_ = cons_3[k+2][j][i] * q_3[k+2][j][i];
double _v_79_ = cons_3[k-2][j][i] * q_3[k-2][j][i];
double _v_80_ = cons_3[k+3][j][i] * q_3[k+3][j][i];
double _v_81_ = cons_3[k-3][j][i] * q_3[k-3][j][i];
double _t_27_ = _v_15_;
_t_27_ -= _v_16_;
double _v_21_ = -(dxinv2 * 0.2 * _t_27_);
double _t_28_ = _v_17_;
_t_28_ -= _v_18_;
_v_21_ += dxinv2 * 0.038 * _t_28_;
double _t_29_ = -(_v_20_);
_t_29_ += cons_1[k+5][j][i] * q_3[k+5][j][i];
_v_21_ -= dxinv2 * 0.0035 * _t_29_;
double _t_26_ = _v_13_;
_t_26_ -= cons_1[k][j][i] * q_3[k][j][i];
_v_21_ += dxinv2 * 0.8 * _t_26_;
flux_1kp1jc0ic0 -= _v_21_;
flux1_c = flux_1[k+2][j][i];
double flux_1kp2jc0ic0 = flux1_c;
double _v_25_ = cons_1[k][j][i] * q_3[k][j][i];
double _v_26_ = cons_1[k+5][j][i] * q_3[k+5][j][i];
double _v_91_ = cons_3[k+5][j][i] * q_3[k+5][j][i];
double _v_86_ = cons_3[k][j][i] * q_3[k][j][i];
double _t_31_ = _v_15_;
_t_31_ -= _v_23_;
double _v_30_ = dxinv2 * 0.8 * _t_31_;
double _t_32_ = _v_17_;
_t_32_ -= _v_25_;
_v_30_ -= dxinv2 * 0.2 * _t_32_;
double _t_33_ = _v_26_;
_t_33_ -= _v_16_;
_v_30_ += dxinv2 * 0.038 * _t_33_;
double _t_34_ = -(_v_18_);
_t_34_ += cons_1[k+6][j][i] * q_3[k+6][j][i];
_v_30_ -= dxinv2 * 0.0035 * _t_34_;
flux_1kp2jc0ic0 -= _v_30_;
flux1_d = flux_1[k+3][j][i];
double flux_1kp3jc0ic0 = flux1_d;
double _v_35_ = cons_1[k+6][j][i] * q_3[k+6][j][i];
double _v_100_ = cons_3[k+6][j][i] * q_3[k+6][j][i];
double _t_36_ = _v_17_;
_t_36_ -= _v_13_;
double _v_39_ = dxinv2 * 0.8 * _t_36_;
double _t_37_ = _v_26_;
_t_37_ -= _v_23_;
_v_39_ -= dxinv2 * 0.2 * _t_37_;
double _t_38_ = _v_35_;
_t_38_ -= _v_25_;
_v_39_ += dxinv2 * 0.038 * _t_38_;
double _t_39_ = -(_v_16_);
_t_39_ += cons_1[k+7][j][i] * q_3[k+7][j][i];
_v_39_ -= dxinv2 * 0.0035 * _t_39_;
flux_1kp3jc0ic0 -= _v_39_;
flux2_a = flux_2[k][j][i];
double flux_2kc0jc0ic0 = flux2_a;
double _v_73_ = cons_2[k+7][j][i] * q_3[k+7][j][i];
double _v_109_ = cons_3[k+7][j][i] * q_3[k+7][j][i];
double _t_44_ = -(_v_47_);
double _v_46_ = cons_2[k+4][j][i] * q_3[k+4][j][i];
_t_44_ += _v_46_;
double _v_48_ = -(dxinv2 * 0.0035 * _t_44_);
double _v_40_ = cons_2[k+1][j][i] * q_3[k+1][j][i];
double _t_41_ = _v_40_;
double _v_41_ = cons_2[k-1][j][i] * q_3[k-1][j][i];
_t_41_ -= _v_41_;
_v_48_ += dxinv2 * 0.8 * _t_41_;
double _v_42_ = cons_2[k+2][j][i] * q_3[k+2][j][i];
double _t_42_ = _v_42_;
double _v_43_ = cons_2[k-2][j][i] * q_3[k-2][j][i];
_t_42_ -= _v_43_;
_v_48_ -= dxinv2 * 0.2 * _t_42_;
double _v_44_ = cons_2[k+3][j][i] * q_3[k+3][j][i];
double _t_43_ = _v_44_;
double _v_45_ = cons_2[k-3][j][i] * q_3[k-3][j][i];
_t_43_ -= _v_45_;
_v_48_ += dxinv2 * 0.038 * _t_43_;
flux_2kc0jc0ic0 -= _v_48_;
flux2_b = flux_2[k+1][j][i];
double flux_2kp1jc0ic0 = flux2_b;
double _t_47_ = _v_44_;
_t_47_ -= _v_41_;
double _v_57_ = -(dxinv2 * 0.2 * _t_47_);
double _t_48_ = _v_46_;
_t_48_ -= _v_43_;
_v_57_ += dxinv2 * 0.038 * _t_48_;
double _t_49_ = -(_v_45_);
double _v_55_ = cons_2[k+5][j][i] * q_3[k+5][j][i];
_t_49_ += _v_55_;
_v_57_ -= dxinv2 * 0.0035 * _t_49_;
double _t_46_ = _v_42_;
double _v_50_ = cons_2[k][j][i] * q_3[k][j][i];
_t_46_ -= _v_50_;
_v_57_ += dxinv2 * 0.8 * _t_46_;
flux_2kp1jc0ic0 -= _v_57_;
flux2_c = flux_2[k+2][j][i];
double flux_2kp2jc0ic0 = flux2_c;
double _t_51_ = _v_44_;
_t_51_ -= _v_40_;
double _v_66_ = dxinv2 * 0.8 * _t_51_;
double _t_52_ = _v_46_;
_t_52_ -= _v_50_;
_v_66_ -= dxinv2 * 0.2 * _t_52_;
double _t_53_ = _v_55_;
_t_53_ -= _v_41_;
_v_66_ += dxinv2 * 0.038 * _t_53_;
double _t_54_ = -(_v_43_);
double _v_64_ = cons_2[k+6][j][i] * q_3[k+6][j][i];
_t_54_ += _v_64_;
_v_66_ -= dxinv2 * 0.0035 * _t_54_;
flux_2kp2jc0ic0 -= _v_66_;
flux2_d = flux_2[k+3][j][i];
double flux_2kp3jc0ic0 = flux2_d;
double _t_56_ = _v_46_;
_t_56_ -= _v_42_;
double _v_75_ = dxinv2 * 0.8 * _t_56_;
double _t_57_ = _v_55_;
_t_57_ -= _v_40_;
_v_75_ -= dxinv2 * 0.2 * _t_57_;
double _t_58_ = _v_64_;
_t_58_ -= _v_50_;
_v_75_ += dxinv2 * 0.038 * _t_58_;
double _t_59_ = -(_v_41_);
_t_59_ += _v_73_;
_v_75_ -= dxinv2 * 0.0035 * _t_59_;
flux_2kp3jc0ic0 -= _v_75_;
flux3_a = flux_3[k][j][i];
double flux_3kc0jc0ic0 = flux3_a;
double _t_64_ = -(_v_83_);
_t_64_ += _v_82_;
_t_64_ -= q_4[k-4][j][i];
_t_64_ += q_4[k+4][j][i];
double _v_84_ = -(dxinv2 * 0.0035 * _t_64_);
double _t_61_ = _v_76_;
_t_61_ -= _v_77_;
_t_61_ += q_4[k+1][j][i];
_t_61_ -= q_4[k-1][j][i];
_v_84_ += dxinv2 * 0.8 * _t_61_;
double _t_62_ = _v_78_;
_t_62_ -= _v_79_;
_t_62_ += q_4[k+2][j][i];
_t_62_ -= q_4[k-2][j][i];
_v_84_ -= dxinv2 * 0.2 * _t_62_;
double _t_63_ = _v_80_;
_t_63_ -= _v_81_;
_t_63_ += q_4[k+3][j][i];
_t_63_ -= q_4[k-3][j][i];
_v_84_ += dxinv2 * 0.038 * _t_63_;
flux_3kc0jc0ic0 -= _v_84_;
flux3_b = flux_3[k+1][j][i];
double flux_3kp1jc0ic0 = flux3_b;
double _t_69_ = -(q_4[k-3][j][i]);
_t_69_ -= _v_81_;
_t_69_ += _v_91_;
_t_69_ += q_4[k+5][j][i];
double _v_93_ = -(dxinv2 * 0.0035 * _t_69_);
double _t_67_ = _v_80_;
_t_67_ -= _v_77_;
_t_67_ += q_4[k+3][j][i];
_t_67_ -= q_4[k-1][j][i];
_v_93_ -= dxinv2 * 0.2 * _t_67_;
double _t_68_ = _v_82_;
_t_68_ -= _v_79_;
_t_68_ += q_4[k+4][j][i];
_t_68_ -= q_4[k-2][j][i];
_v_93_ += dxinv2 * 0.038 * _t_68_;
double _t_66_ = _v_78_;
_t_66_ -= _v_86_;
_t_66_ += q_4[k+2][j][i];
_t_66_ -= q_4[k][j][i];
_v_93_ += dxinv2 * 0.8 * _t_66_;
flux_3kp1jc0ic0 -= _v_93_;
flux3_c = flux_3[k+2][j][i];
double flux_3kp2jc0ic0 = flux3_c;
double _t_71_ = q_4[k+3][j][i];
_t_71_ -= q_4[k+1][j][i];
double _t_74_ = -(q_4[k-2][j][i]);
double _t_72_ = q_4[k+4][j][i];
_t_72_ -= q_4[k][j][i];
double _t_73_ = q_4[k+5][j][i];
_t_73_ -= q_4[k-1][j][i];
double _t_76_ = q_4[k+4][j][i];
_t_76_ -= q_4[k+2][j][i];
double _t_77_ = q_4[k+5][j][i];
_t_77_ -= q_4[k+1][j][i];
double _t_78_ = -(q_4[k][j][i]);
double _t_79_ = -(q_4[k-1][j][i]);
_t_71_ += _v_80_;
_t_71_ -= _v_76_;
double _v_102_ = dxinv2 * 0.8 * _t_71_;
_t_74_ -= _v_79_;
_t_74_ += _v_100_;
_t_74_ += q_4[k+6][j][i];
_t_78_ += q_4[k+6][j][i];
_v_102_ -= dxinv2 * 0.0035 * _t_74_;
_t_72_ += _v_82_;
_t_72_ -= _v_86_;
_v_102_ -= dxinv2 * 0.2 * _t_72_;
_t_73_ += _v_91_;
_t_73_ -= _v_77_;
_v_102_ += dxinv2 * 0.038 * _t_73_;
flux_3kp2jc0ic0 -= _v_102_;
flux3_d = flux_3[k+3][j][i];
double flux_3kp3jc0ic0 = flux3_d;
_t_76_ += _v_82_;
_t_76_ -= _v_78_;
double _v_111_ = dxinv2 * 0.8 * _t_76_;
_t_77_ += _v_91_;
_t_77_ -= _v_76_;
_v_111_ -= dxinv2 * 0.2 * _t_77_;
_t_78_ += _v_100_;
_t_78_ -= _v_86_;
_v_111_ += dxinv2 * 0.038 * _t_78_;
_t_79_ += _v_109_;
_t_79_ -= _v_77_;
_t_79_ += q_4[k+7][j][i];
_v_111_ -= dxinv2 * 0.0035 * _t_79_;
flux_3kp3jc0ic0 -= _v_111_;
flux_0[k][j][i] = flux_0kc0jc0ic0;
flux_0[k+1][j][i] = flux_0kp1jc0ic0;
flux_0[k+2][j][i] = flux_0kp2jc0ic0;
flux_0[k+3][j][i] = flux_0kp3jc0ic0;
flux_1[k][j][i] = flux_1kc0jc0ic0;
flux_1[k+1][j][i] = flux_1kp1jc0ic0;
flux_1[k+2][j][i] = flux_1kp2jc0ic0;
flux_1[k+3][j][i] = flux_1kp3jc0ic0;
flux_2[k][j][i] = flux_2kc0jc0ic0;
flux_2[k+1][j][i] = flux_2kp1jc0ic0;
flux_2[k+2][j][i] = flux_2kp2jc0ic0;
flux_2[k+3][j][i] = flux_2kp3jc0ic0;
flux_3[k][j][i] = flux_3kc0jc0ic0;
flux_3[k+1][j][i] = flux_3kp1jc0ic0;
flux_3[k+2][j][i] = flux_3kp2jc0ic0;
flux_3[k+3][j][i] = flux_3kp3jc0ic0;
}
}
__global__ void hypterm_2 (double * __restrict__ flux_in_4, double * __restrict__ cons_in_4, double * __restrict__ q_in_1, double * __restrict__ q_in_2, double * __restrict__ q_in_3, double * __restrict__ q_in_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(2*blockdim_k);
int k = max (k0, 0) + (int)(2*threadIdx.z);
double (*flux_4)[308][308] = (double (*)[308][308])flux_in_4;
double (*q_1)[308][308] = (double (*)[308][308])q_in_1;
double (*q_2)[308][308] = (double (*)[308][308])q_in_2;
double (*q_3)[308][308] = (double (*)[308][308])q_in_3;
double (*q_4)[308][308] = (double (*)[308][308])q_in_4;
double (*cons_4)[308][308] = (double (*)[308][308])cons_in_4;
if (i>=4 & j>=4 & k>=4 & i<=N-5 & j<=N-5 & k<=N-5) {
flux_4[k][j][i] = ((0.8*(cons_4[k][j][i+1]*q_1[k][j][i+1]-cons_4[k][j][i-1]*q_1[k][j][i-1]+(q_4[k][j][i+1]*q_1[k][j][i+1]-q_4[k][j][i-1]*q_1[k][j][i-1]))-0.2*(cons_4[k][j][i+2]*q_1[k][j][i+2]-cons_4[k][j][i-2]*q_1[k][j][i-2]+(q_4[k][j][i+2]*q_1[k][j][i+2]-q_4[k][j][i-2]*q_1[k][j][i-2]))+0.038*(cons_4[k][j][i+3]*q_1[k][j][i+3]-cons_4[k][j][i-3]*q_1[k][j][i-3]+(q_4[k][j][i+3]*q_1[k][j][i+3]-q_4[k][j][i-3]*q_1[k][j][i-3]))-0.0035*(cons_4[k][j][i+4]*q_1[k][j][i+4]-cons_4[k][j][i-4]*q_1[k][j][i-4]+(q_4[k][j][i+4]*q_1[k][j][i+4]-q_4[k][j][i-4]*q_1[k][j][i-4])))*dxinv0);
flux_4[k+1][j][i] = ((0.8*(cons_4[k+1][j][i+1]*q_1[k+1][j][i+1]-cons_4[k+1][j][i-1]*q_1[k+1][j][i-1]+(q_4[k+1][j][i+1]*q_1[k+1][j][i+1]-q_4[k+1][j][i-1]*q_1[k+1][j][i-1]))-0.2*(cons_4[k+1][j][i+2]*q_1[k+1][j][i+2]-cons_4[k+1][j][i-2]*q_1[k+1][j][i-2]+(q_4[k+1][j][i+2]*q_1[k+1][j][i+2]-q_4[k+1][j][i-2]*q_1[k+1][j][i-2]))+0.038*(cons_4[k+1][j][i+3]*q_1[k+1][j][i+3]-cons_4[k+1][j][i-3]*q_1[k+1][j][i-3]+(q_4[k+1][j][i+3]*q_1[k+1][j][i+3]-q_4[k+1][j][i-3]*q_1[k+1][j][i-3]))-0.0035*(cons_4[k+1][j][i+4]*q_1[k+1][j][i+4]-cons_4[k+1][j][i-4]*q_1[k+1][j][i-4]+(q_4[k+1][j][i+4]*q_1[k+1][j][i+4]-q_4[k+1][j][i-4]*q_1[k+1][j][i-4])))*dxinv0);
flux_4[k][j][i] -= (0.8*(cons_4[k][j+1][i]*q_2[k][j+1][i]-cons_4[k][j-1][i]*q_2[k][j-1][i]+(q_4[k][j+1][i]*q_2[k][j+1][i]-q_4[k][j-1][i]*q_2[k][j-1][i]))-0.2*(cons_4[k][j+2][i]*q_2[k][j+2][i]-cons_4[k][j-2][i]*q_2[k][j-2][i]+(q_4[k][j+2][i]*q_2[k][j+2][i]-q_4[k][j-2][i]*q_2[k][j-2][i]))+0.038*(cons_4[k][j+3][i]*q_2[k][j+3][i]-cons_4[k][j-3][i]*q_2[k][j-3][i]+(q_4[k][j+3][i]*q_2[k][j+3][i]-q_4[k][j-3][i]*q_2[k][j-3][i]))-0.0035*(cons_4[k][j+4][i]*q_2[k][j+4][i]-cons_4[k][j-4][i]*q_2[k][j-4][i]+(q_4[k][j+4][i]*q_2[k][j+4][i]-q_4[k][j-4][i]*q_2[k][j-4][i])))*dxinv1;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1][j+1][i]*q_2[k+1][j+1][i]-cons_4[k+1][j-1][i]*q_2[k+1][j-1][i]+(q_4[k+1][j+1][i]*q_2[k+1][j+1][i]-q_4[k+1][j-1][i]*q_2[k+1][j-1][i]))-0.2*(cons_4[k+1][j+2][i]*q_2[k+1][j+2][i]-cons_4[k+1][j-2][i]*q_2[k+1][j-2][i]+(q_4[k+1][j+2][i]*q_2[k+1][j+2][i]-q_4[k+1][j-2][i]*q_2[k+1][j-2][i]))+0.038*(cons_4[k+1][j+3][i]*q_2[k+1][j+3][i]-cons_4[k+1][j-3][i]*q_2[k+1][j-3][i]+(q_4[k+1][j+3][i]*q_2[k+1][j+3][i]-q_4[k+1][j-3][i]*q_2[k+1][j-3][i]))-0.0035*(cons_4[k+1][j+4][i]*q_2[k+1][j+4][i]-cons_4[k+1][j-4][i]*q_2[k+1][j-4][i]+(q_4[k+1][j+4][i]*q_2[k+1][j+4][i]-q_4[k+1][j-4][i]*q_2[k+1][j-4][i])))*dxinv1;
flux_4[k][j][i] -= (0.8*(cons_4[k+1][j][i]*q_3[k+1][j][i]-cons_4[k-1][j][i]*q_3[k-1][j][i]+(q_4[k+1][j][i]*q_3[k+1][j][i]-q_4[k-1][j][i]*q_3[k-1][j][i]))-0.2*(cons_4[k+2][j][i]*q_3[k+2][j][i]-cons_4[k-2][j][i]*q_3[k-2][j][i]+(q_4[k+2][j][i]*q_3[k+2][j][i]-q_4[k-2][j][i]*q_3[k-2][j][i]))+0.038*(cons_4[k+3][j][i]*q_3[k+3][j][i]-cons_4[k-3][j][i]*q_3[k-3][j][i]+(q_4[k+3][j][i]*q_3[k+3][j][i]-q_4[k-3][j][i]*q_3[k-3][j][i]))-0.0035*(cons_4[k+4][j][i]*q_3[k+4][j][i]-cons_4[k-4][j][i]*q_3[k-4][j][i]+(q_4[k+4][j][i]*q_3[k+4][j][i]-q_4[k-4][j][i]*q_3[k-4][j][i])))*dxinv2;
flux_4[k+1][j][i] -= (0.8*(cons_4[k+1+1][j][i]*q_3[k+1+1][j][i]-cons_4[k+1-1][j][i]*q_3[k+1-1][j][i]+(q_4[k+1+1][j][i]*q_3[k+1+1][j][i]-q_4[k+1-1][j][i]*q_3[k+1-1][j][i]))-0.2*(cons_4[k+1+2][j][i]*q_3[k+1+2][j][i]-cons_4[k+1-2][j][i]*q_3[k+1-2][j][i]+(q_4[k+1+2][j][i]*q_3[k+1+2][j][i]-q_4[k+1-2][j][i]*q_3[k+1-2][j][i]))+0.038*(cons_4[k+1+3][j][i]*q_3[k+1+3][j][i]-cons_4[k+1-3][j][i]*q_3[k+1-3][j][i]+(q_4[k+1+3][j][i]*q_3[k+1+3][j][i]-q_4[k+1-3][j][i]*q_3[k+1-3][j][i]))-0.0035*(cons_4[k+1+4][j][i]*q_3[k+1+4][j][i]-cons_4[k+1-4][j][i]*q_3[k+1-4][j][i]+(q_4[k+1+4][j][i]*q_3[k+1+4][j][i]-q_4[k+1-4][j][i]*q_3[k+1-4][j][i])))*dxinv2;
}
}
extern "C" void host_code (double *h_flux_0, double *h_flux_1, double *h_flux_2, double *h_flux_3, double *h_flux_4, double *h_cons_1, double *h_cons_2, double *h_cons_3, double *h_cons_4, double *h_q_1, double *h_q_2, double *h_q_3, double *h_q_4, double dxinv0, double dxinv1, double dxinv2, int L, int M, int N) {
double *flux_0;
cudaMalloc (&flux_0, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_0\n");
cudaMemcpy (flux_0, h_flux_0, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_1;
cudaMalloc (&flux_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_1\n");
cudaMemcpy (flux_1, h_flux_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_2;
cudaMalloc (&flux_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_2\n");
cudaMemcpy (flux_2, h_flux_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_3;
cudaMalloc (&flux_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_3\n");
cudaMemcpy (flux_3, h_flux_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *flux_4;
cudaMalloc (&flux_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for flux_4\n");
cudaMemcpy (flux_4, h_flux_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_1;
cudaMalloc (&cons_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_1\n");
cudaMemcpy (cons_1, h_cons_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_2;
cudaMalloc (&cons_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_2\n");
cudaMemcpy (cons_2, h_cons_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_3;
cudaMalloc (&cons_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_3\n");
cudaMemcpy (cons_3, h_cons_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *cons_4;
cudaMalloc (&cons_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for cons_4\n");
cudaMemcpy (cons_4, h_cons_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_1;
cudaMalloc (&q_1, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_1\n");
cudaMemcpy (q_1, h_q_1, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_2;
cudaMalloc (&q_2, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_2\n");
cudaMemcpy (q_2, h_q_2, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_3;
cudaMalloc (&q_3, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_3\n");
cudaMemcpy (q_3, h_q_3, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
double *q_4;
cudaMalloc (&q_4, sizeof(double)*L*M*N);
check_error ("Failed to allocate device memory for q_4\n");
cudaMemcpy (q_4, h_q_4, sizeof(double)*L*M*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 4, 4);
dim3 gridconfig_0 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, blockconfig.z));
hypterm_0 <<<gridconfig_0, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_1 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 4*blockconfig.z));
hypterm_1 <<<gridconfig_1, blockconfig>>> (flux_0, flux_1, flux_2, flux_3, cons_1, cons_2, cons_3, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
dim3 gridconfig_2 (ceil(N, blockconfig.x), ceil(M, blockconfig.y), ceil(L, 2*blockconfig.z));
hypterm_2 <<<gridconfig_2, blockconfig>>> (flux_4, cons_4, q_1, q_2, q_3, q_4, -dxinv0, dxinv1, dxinv2, L, M, N);
cudaMemcpy (h_flux_0, flux_0, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_1, flux_1, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_3, flux_3, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_4, flux_4, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_flux_2, flux_2, sizeof(double)*L*M*N, cudaMemcpyDeviceToHost);
}
|
19,903 | #include <cmath>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <fstream>
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess) {
std::cout << cudaGetErrorString( err ) << " in " << file << " line " << line << std::endl;
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
#define IMG_WIDTH 2024
#define IMG_HEIGHT 2024
#define SPHERES 10
#define INF 2e10f
#define rnd(x) (x*rand() / (float)RAND_MAX)
class Sphere {
public:
float r,g,b;
float radius;
float x,y,z;
__device__ float hit(float ox, float oy, float *n) {
float dx = ox - x;
float dy = oy - y;
if (dx*dx + dy*dy < radius*radius) {
float dz = sqrtf(radius*radius - dx*dx - dy*dy);
*n = dz/sqrtf(radius*radius);
return dz+z;
}
return -INF;
}
};
Sphere *dev_s;
__global__ void kernel(int *ptr, Sphere *dev_s) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float ox = (x - (float)IMG_WIDTH/2);
float oy = (y - (float)IMG_HEIGHT/2);
float r=0, g=0, b=0;
float maxz = -INF;
for (int i=0; i < SPHERES; i++) {
float n;
float t = dev_s[i].hit(ox,oy,&n);
if (t > maxz) {
float fscale = n;
r = dev_s[i].r * fscale;
g = dev_s[i].g * fscale;
b = dev_s[i].b * fscale;
}
}
ptr[offset*3 + 0] = (int) 255 * r;
ptr[offset*3 + 1] = (int) 255 * g;
ptr[offset*3 + 2] = (int) 255 * b;
}
int main( void ) {
// Init img on host
int img_size = IMG_WIDTH*IMG_HEIGHT*3;
size_t img_size_t = (size_t)IMG_WIDTH*IMG_HEIGHT*3*sizeof(float);
int *img;
img = (int*)malloc(img_size_t);
for (int i=0; i<img_size; i+=3) { // init empty img
img[i+0] = 0;
img[i+1] = 0;
img[i+2] = 0;
}
// Init spheres on host
Sphere *temp_s = (Sphere*)malloc( sizeof(Sphere) * SPHERES );
for (int i=0; i<SPHERES; i++) {
temp_s[i].r = (float) rnd(1.0f);
temp_s[i].g = (float) rnd(1.0f);
temp_s[i].b = (float) rnd(1.0f);
temp_s[i].x = (float) rnd(1000.0f) - 500;
temp_s[i].y = (float) rnd(1000.0f) - 500;
temp_s[i].z = (float) rnd(1000.0f) - 500;
temp_s[i].radius = (float) rnd(100.0f) + 20;
}
cudaEvent_t start, stop;
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
HANDLE_ERROR( cudaEventRecord( start,0 ) );
int *dev_img;
HANDLE_ERROR(cudaMalloc(&dev_img, img_size_t));
HANDLE_ERROR(cudaMalloc(
(void**)&dev_s,
sizeof(Sphere) * (size_t)SPHERES));
HANDLE_ERROR(cudaMemcpy(
dev_s,
temp_s,
sizeof(Sphere) * (size_t)SPHERES,
cudaMemcpyHostToDevice));
free(temp_s);
dim3 grids(IMG_WIDTH/16,IMG_HEIGHT/16);
dim3 threads(16,16);
kernel<<<grids, threads>>>(dev_img, dev_s);
HANDLE_ERROR(cudaMemcpy(
img,
dev_img,
img_size_t,
cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaFree(dev_img));
HANDLE_ERROR(cudaFree(dev_s));
HANDLE_ERROR( cudaEventRecord( stop,0 ) );
float elapsedTime;
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ));
std::cout << "Time to generate: " << elapsedTime << "ms" << std::endl;
// write img
std::ofstream ofs;
ofs.open("img.ppm");
ofs << "P3\n" << IMG_WIDTH << " " << IMG_HEIGHT << "\n255\n";
for (int i=0; i<img_size; i+=3) {
ofs
<< img[i+0] << " "
<< img[i+1] << " "
<< img[i+2] << "\n";
}
ofs.close();
return 0;
}
|
19,904 | #include "includes.h"
__global__ void __fillToInds4D(float A, float *B, int ldb, int rdb, int tdb, int *I, int nrows, int *J, int ncols, int *K, int nk, int *L, int nl) {
int tid = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
int step = blockDim.x * gridDim.x * gridDim.y;
int l = tid / (nrows * ncols * nk);
int tidrem = tid - l * (nrows * ncols * nk);
int lstep = step / (nrows * ncols * nk);
int steprem = step - lstep * (nrows * ncols * nk);
int k = tidrem / (nrows * ncols);
tidrem = tidrem - k * (nrows * ncols);
int kstep = steprem / (nrows * ncols);
steprem = steprem - kstep * (nrows * ncols);
int j = tidrem / nrows;
int i = tidrem - j * nrows;
int jstep = steprem / nrows;
int istep = steprem - jstep * nrows;
int id, mapi, mapj, mapk, mapl;
for (id = tid; id < nrows * ncols * nk * nl; id += step) {
mapl = l;
if (L != NULL) mapl = L[l];
mapk = k;
if (K != NULL) mapk = K[k];
mapj = j;
if (J != NULL) mapj = J[j];
mapi = i;
if (I != NULL) mapi = I[i];
B[mapi + ldb * (mapj + rdb * (mapk + tdb * mapl))] = A;
i += istep;
if (i >= nrows) {i -= nrows; j++;}
j += jstep;
if (j >= ncols) {j -= ncols; k++;}
k += kstep;
if (k >= nk) {k -= nk; l++;}
l += lstep;
}
} |
19,905 | //this is a sample CUDA program
#include <stdio.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
__global__ void hello_cuda()
{
printf("hello cuda world \n");
}
int main(){
//kernel_name <<<number_of_blocks, thread_per_block>>>(arguments)
//hello_cuda <<<1,4>>>();
//...<<<grid,block>>>(argument)
dim3 block(4);
dim3 grid(8);
//hello_cuda <<< grid,block >>>();
//dynamically allocation number of blocks and threads
//declare number of blocks,threads for grids and blocks
//int nx,ny;
//nx = 16, ny = 4;
//dim3 grid_dy(16,4);
//dim3 block_dy(nx/16,ny/4);
//hello_cuda <<<grid_dy,block_dy>>>();
//limitations of threads for each block
//dim3 block_limit(1025,0,0);
//hello_cuda <<<grid,block_limit>>> ();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
//int *a;
//cudaMalloc(&a,10);
//cudaFree(a);
//printf("this is a test");
//return 0;
}
|
19,906 | #include <iostream>
#include <sys/time.h>
using namespace std;
__global__ void Plus(float A[],float B[],float C[],int n){
int i = threadIdx.x+blockIdx.x*blockDim.x;
C[i]=A[i]+B[i];
}
int main(){
struct timeval start ,end;
gettimeofday(&start,NULL);
float *A,*B,*C,*Ad,*Bd,*Cd;
int n=1024*1024;
int size=n*sizeof(float);
A=(float*)malloc(size);
B=(float*)malloc(size);
C=(float*)malloc(size);
for(int i=0;i<n;i++){
A[i]=90;
B[i]=10;
}
cudaMalloc(&Ad,size);//传入Ad指针的地址,初始化Ad指针,指向分配的显存地址 void** 为了类型通用
cudaMalloc((void **)&Bd,size);
cudaMalloc((void **)&Cd,size);
cudaMemcpy(Ad,A,size,cudaMemcpyHostToDevice);
cudaMemcpy(Bd,B,size,cudaMemcpyHostToDevice);
cudaMemcpy(Cd,C,size,cudaMemcpyHostToDevice);
dim3 dimBlock(512);
dim3 dimGrid(n/512); // 此处总共使用了1024 * 1024 个线程处理数据,每个数组元素对应一个thread,
Plus<<<dimGrid,dimBlock>>>(Ad,Bd,Cd,n);
cudaMemcpy(C,Cd,size,cudaMemcpyDeviceToHost);
float error=0;
for(int i=0;i<n;i++){
error=fabs(100-C[i]);
}
cout<<"error is "<<error<<endl;
free(A);
free(B);
free(C);
cudaFree(Ad);
cudaFree(Bd);
cudaFree(Cd);
gettimeofday(&end,NULL);
int timeuse=1000000*(end.tv_sec-start.tv_sec)+end.tv_usec-start.tv_usec;
cout<<"time use:"<<timeuse/1000<<"ms"<<endl;
return 0;
}
|
19,907 | #include "ward_implement.h"
#include "brdf_common.h"
__global__ void
ward_kernel(float3* pos, unsigned int width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float3 L = calculateL(pos, width, x, y);
float3 H = normalize(add(L, V));
float ax = alpha_x;
float ay = anisotropic ? alpha_y : alpha_x;
float exponent = -2.f * (sqr(dot(H,X) / ax) + sqr(dot(H,Y) / ay)) / sqrt(dot(H, N));
float spec = 1.f / (4.f * 3.1415926f * ax * ay * sqrt(dot(L,N) * dot(V, N)));
spec *= exp(exponent);
pos[y*width+x] = scale(L, spec);
}
extern "C" void ward_brdf(float3 *pos, unsigned numVertices, unsigned width, float3 V, float3 N, float3 X, float3 Y, float alpha_x, float alpha_y, bool anisotropic)
{
dim3 block(8, 8, 1);
unsigned height = numVertices / width;
dim3 grid(width / block.x, height / block.y, 1);
ward_kernel<<< grid, block>>>(pos, width, V, N, X, Y, alpha_x, alpha_y, anisotropic);
}
|
19,908 | #include "includes.h"
__device__ void __gpu_sync(int blocks_to_synch)
{
__syncthreads();
//thread ID in a block
int tid_in_block= threadIdx.x;
// only thread 0 is used for synchronization
if (tid_in_block == 0)
{
atomicAdd((int *)&g_mutex, 1);
//only when all blocks add 1 to g_mutex will
//g_mutex equal to blocks_to_synch
while(g_mutex < blocks_to_synch);
}
__syncthreads();
}
__global__ void BFS_kernel_SM_block_spill( volatile unsigned int *frontier, volatile unsigned int *frontier2, unsigned int frontier_len, volatile unsigned int *cost, volatile int *visited, unsigned int *edgeArray, unsigned int *edgeArrayAux, unsigned int numVertices, unsigned int numEdges, volatile unsigned int *frontier_length, const unsigned int max_local_mem)
{
extern volatile __shared__ unsigned int b_q[];
volatile __shared__ unsigned int b_q_length[1];
volatile __shared__ unsigned int b_offset[1];
//get the threadId
unsigned int tid=threadIdx.x + blockDim.x * blockIdx.x;
unsigned int lid=threadIdx.x;
int loop_index=0;
unsigned int l_mutex=g_mutex2;
unsigned int f_len=frontier_len;
while(1)
{
//initialize the block queue length and warp queue offset
if (lid==0)
{
b_q_length[0]=0;
b_offset[0]=0;
}
__syncthreads();
//Initialize the warp queue sizes to 0
if(tid<f_len)
{
//get the nodes to traverse from block queue
unsigned int node_to_process;
if(loop_index==0)
node_to_process=frontier[tid];
else
node_to_process=frontier2[tid];
//remove from frontier
visited[node_to_process]=0;
//get the offsets of the vertex in the edge list
unsigned int offset=edgeArray[node_to_process];
unsigned int next=edgeArray[node_to_process+1];
//Iterate through the neighbors of the vertex
while(offset<next)
{
//get neighbor
unsigned int nid=edgeArrayAux[offset];
//get its cost
unsigned int v=atomicMin((unsigned int *)&cost[nid],
cost[node_to_process]+1);
//if cost is less than previously set add to frontier
if(v>cost[node_to_process]+1)
{
int is_in_frontier=atomicExch((int *)&visited[nid],1);
//if node already in frontier do nothing
if(is_in_frontier==0)
{
//increment the warp queue size
unsigned int t=atomicAdd((unsigned int *)&b_q_length[0],
1);
if(t<max_local_mem)
{
b_q[t]=nid;
}
//write to global memory if shared memory full
else
{
int off=atomicAdd((unsigned int *)g_q_offsets,1);
if(loop_index==0)
frontier2[off]=nid;
else
frontier[off]=nid;
}
}
}
offset++;
}
}
//get offset of block queue in global queue
__syncthreads();
if(lid==0)
{
if(b_q_length[0] > max_local_mem)
{
b_q_length[0] = max_local_mem;
}
b_offset[0]=atomicAdd((unsigned int *)g_q_offsets,b_q_length[0]);
}
__syncthreads();
l_mutex+=gridDim.x;
__gpu_sync(l_mutex);
//store frontier size
if(tid==0)
{
g_q_size[0]=g_q_offsets[0];
g_q_offsets[0]=0;
}
//copy block queue to global queue
if(lid < b_q_length[0])
{
if(loop_index==0)
frontier2[lid+b_offset[0]]=b_q[lid];
else
frontier[lid+b_offset[0]]=b_q[lid];
}
l_mutex+=gridDim.x;
__gpu_sync(l_mutex);
//if frontier exceeds SM blocks or less than 1 block exit
if(g_q_size[0] < blockDim.x ||
g_q_size[0] > blockDim.x * gridDim.x)
{
//TODO:Call the 1-block bfs right here
break;
}
loop_index=(loop_index+1)%2;
//store the current frontier size
f_len=g_q_size[0];
}
if(loop_index==0)
{
for(int i=tid;i<g_q_size[0];i += blockDim.x*gridDim.x)
frontier[i]=frontier2[i];
}
if(tid==0)
{
frontier_length[0]=g_q_size[0];
}
} |
19,909 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <stdio.h>
#include <cstdlib>
#include <math.h>
#include<time.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define Number 1000
#define Delta_t 0.01
__global__
void Simulate(double* Vortex_p, double* Omega_v_p, double* VortexN_p, double* Omega_vN_p, double *Sigma_p)
{
double radiika_p, t1_p, t2_p;
double t3_p, Om22P_p, ssss_p, vxx_p, vyy_p, vzz_p;
double dvxdxmov, dvxdymov, dvxdzmov;
double dvydxmov, dvydymov, dvydzmov;
double dvzdxmov, dvzdymov, dvzdzmov;
double Vxc, Vyc, Vzc, dssss_dr;
int j = 3*(threadIdx.x + blockIdx.x*blockDim.x);
if (true) {
Vxc = 1.0;
Vyc = 0;
Vzc = 0;
dvxdxmov = 0.0;
dvxdymov = 0;
dvxdzmov = 0;
dvydxmov = 0;
dvydymov = 0;
dvydzmov = 0;
dvzdxmov = 0;
dvzdymov = 0;
dvzdzmov = 0;
}
for (int i=0; i < Number; i++) {
vxx_p = Vortex_p[j] - Vortex_p[i * 3];
vyy_p = Vortex_p[j + 1] - Vortex_p[(i * 3) + 1];
vzz_p = Vortex_p[j + 2] - Vortex_p[(i * 3) + 2];
radiika_p = vxx_p*vxx_p + vyy_p*vyy_p + vzz_p*vzz_p;
t1_p = vyy_p*Omega_v_p[(i * 3) + 2] - vzz_p*Omega_v_p[(i * 3) + 1];
t2_p = vzz_p*Omega_v_p[i * 3] - vxx_p*Omega_v_p[(i * 3) + 2];
t3_p = vxx_p*Omega_v_p[(i * 3) + 1] - vyy_p*Omega_v_p[i * 3];
Om22P_p = 3.1416 / Sigma_p[i] / Sigma_p[i] / 2.0;
ssss_p = exp(-radiika_p*Om22P_p);
Vxc = Vxc + ssss_p*t1_p;
Vyc = Vyc + ssss_p*t2_p;
Vzc = Vzc + ssss_p*t3_p;
dssss_dr = (-Om22P_p)*ssss_p;
dvxdxmov = dssss_dr*vxx_p*t1_p + dvxdxmov;
dvxdymov = dssss_dr*vyy_p*t1_p + Omega_v_p[(i * 3) + 2] * ssss_p + dvxdymov;
dvxdzmov = dssss_dr*vzz_p*t1_p - Omega_v_p[(i * 3) + 1] * ssss_p + dvxdzmov;
dvydxmov = dssss_dr*vxx_p*t2_p - Omega_v_p[(i * 3) + 2] * ssss_p + dvydxmov;
dvydymov = dssss_dr*vyy_p*t2_p + dvydymov;
dvydzmov = dssss_dr*vzz_p*t2_p + Omega_v_p[i * 3] * ssss_p + dvydzmov;
dvzdxmov = dssss_dr*vxx_p*t3_p + Omega_v_p[(i * 3) + 1] * ssss_p + dvzdxmov;
dvzdymov = dssss_dr*vyy_p*t3_p - Omega_v_p[i * 3] * ssss_p + dvzdymov;
dvzdzmov = dssss_dr*vzz_p*t3_p + dvzdzmov;
}
if ( true) {
VortexN_p[j] = Vortex_p[j] + Delta_t*Vxc;
VortexN_p[j + 1] = Vortex_p[j + 1] + Delta_t*Vyc;
VortexN_p[j + 2] = Vortex_p[j + 2] + Delta_t*Vzc;
double domxdt, domydt, domzdt;
domxdt = dvxdxmov*Omega_v_p[j] + dvxdymov*Omega_v_p[j + 1] + dvxdzmov*Omega_v_p[j + 2];
domydt = dvydxmov*Omega_v_p[j] + dvydymov*Omega_v_p[j + 1] + dvydzmov*Omega_v_p[j + 2];
domzdt = dvzdxmov*Omega_v_p[j] + dvzdymov*Omega_v_p[j + 1] + dvzdzmov*Omega_v_p[j + 2];
Omega_vN_p[j] = Omega_v_p[j] + domxdt*Delta_t;
Omega_vN_p[j + 1] = Omega_v_p[j + 1] + domydt*Delta_t;
Omega_vN_p[j + 2] = Omega_v_p[j + 2] + domzdt*Delta_t;
Vxc = 0, Vyc = 0, Vzc = 0;
dvxdxmov = 0, dvxdymov = 0, dvxdzmov = 0;
dvydxmov = 0, dvydymov = 0, dvydzmov = 0;
dvzdxmov = 0, dvzdymov = 0, dvzdzmov = 0;
}
}
int main()
{
const int Ntime = 10000;
//const double Delta_t = 0.01;
const double Radius = 0.1;
//const int Number = 10;
//const double V_mean = 1.0;
double Vortex[Number][3];
double Omega_v[Number][3];
double VortexN[Number][3];
double Omega_vN[Number][3];
double *Vortex_p=new double[Number*3];
double *Omega_v_p = new double[Number * 3];
double *VortexN_p = new double[Number * 3];
double *Omega_vN_p = new double[Number * 3];
double Sigma[Number];
double *Sigma_p;
double StatisticalMoments[4] = {0.000};
double Amagni=0.0,Amagnit_old,Amagnit_new,Speed_max,Sigmas;
double Energy=0;
int Ncout=0;
FILE *fp1,*fp2;
fp1 = fopen("D:\\cudaa\\Velocities1.txt","w+");
fp2 = fopen("D:\\cudaa\\MaxValue1.txt","w+");
double Vx;
double vxx, vyy, vzz;
double *vxx_p, *vyy_p, *vzz_p;
clock_t time0,time1;
for (int ivorton = 0; ivorton < Number; ivorton++) {
Vortex[ivorton][0] = (double)rand() / (double)RAND_MAX;
Vortex[ivorton][1] = (double)rand() / (double)RAND_MAX;
Vortex[ivorton][2] = (double)rand() / (double)RAND_MAX;
Omega_v[ivorton][0] = (((double)rand() / (double)RAND_MAX) - 0.5);
Omega_v[ivorton][1] = (((double)rand() / (double)RAND_MAX) - 0.5);
Omega_v[ivorton][2] = (((double)rand() / (double)RAND_MAX) - 0.5);
Sigma[ivorton] = Radius;
//printf("%f",Vortex[ivorton][1]);
}
int counter = 0;
for (int h = 0; h < Number; h++) {
for (int w = 0; w < 3; w++) {
Vortex_p[counter] = Vortex[h][w];
Omega_v_p[counter] = Omega_v[h][w];
VortexN_p[counter] = VortexN[h][w];
Omega_vN_p[counter] = Omega_vN[h][w];
counter++;
}
}
time0 = clock();
double radiika;
double t1, t2, t3;
double Om22P;
double ssss,dssss_dr;
double domxdt, domydt, domzdt,Replace=0;
double Vxc, Vyc , Vzc ;
double dvxdxmov , dvxdymov , dvxdzmov ;
double dvydxmov , dvydymov , dvydzmov ;
double dvzdxmov , dvzdymov , dvzdzmov ;
double *radiika_p;
double *t1_p, *t2_p, *t3_p;
double *Om22P_p;
double *ssss_p, *dssss_dr_p;
double *domxdt_p, *domydt_p, *domzdt_p, *Replace_p ;
double *Vxc_p, *Vyc_p, *Vzc_p;
double *dvxdxmov_p, *dvxdymov_p, *dvxdzmov_p;
double *dvydxmov_p, *dvydymov_p, *dvydzmov_p;
double *dvzdxmov_p, *dvzdymov_p, *dvzdzmov_p;
vxx_p = &vxx;
cudaMalloc((void**)&Vortex_p, (Number * 3) * sizeof(double));
cudaMalloc((void**)&Omega_v_p, (Number * 3) * sizeof(double));
cudaMalloc((void**)&VortexN_p, (Number * 3) * sizeof(double));
cudaMalloc((void**)&Omega_vN_p, (Number * 3) * sizeof(double));
cudaMalloc((void**)&Sigma_p, (Number) * sizeof(double));
for (int itime = 0; itime < Ntime; itime++) {
printf("%*d %f %e %d \n ",4,itime,Amagni,Energy,Ncout);
//cudaMalloc((void**)&domzdt_p, sizeof(double));
cudaMemcpy(Vortex_p, Vortex, (Number * 3) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Omega_v_p, Omega_v, (Number * 3) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(VortexN_p, VortexN, (Number * 3) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Omega_vN_p, Omega_vN, (Number * 3) * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(Sigma_p, Sigma, (Number) * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimBlock(Number, 1);
dim3 dimGrid(Number, 1);
Simulate <<<Number/32, 32 >>> (Vortex_p,Omega_v_p, VortexN_p, Omega_vN_p,Sigma_p);
cudaMemcpy(Vortex, Vortex_p, (Number * 3) * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(Omega_v,Omega_v_p, (Number * 3) * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(VortexN, VortexN_p, (Number * 3) * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(Omega_vN, Omega_vN_p, (Number * 3) * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(Sigma, Sigma_p, (Number) * sizeof(double), cudaMemcpyDeviceToHost);
/*
for (int ivorton = 0; ivorton < Number;ivorton++) {
double Vxc=V_mean, Vyc=0.0, Vzc=0.0;
double dvxdxmov = 0.0, dvxdymov = 0.0, dvxdzmov = 0.0;
double dvydxmov = 0.0, dvydymov = 0.0, dvydzmov = 0.0;
double dvzdxmov = 0.0, dvzdymov = 0.0, dvzdzmov = 0.0;
for (int induced = 0; induced < Number; induced++) {
vxx = Vortex[ivorton][0] - Vortex[induced][0];
vyy = Vortex[ivorton][1] - Vortex[induced][1];
vzz = Vortex[ivorton][2] - Vortex[induced][2];
radiika = vxx*vxx + vyy*vyy + vzz*vzz;
t1 = vyy*Omega_v[induced][2] - vzz*Omega_v[induced][1];
t2 = vzz*Omega_v[induced][0] - vzz*Omega_v[induced][2];
t3 = vxx*Omega_v[induced][1] - vzz*Omega_v[induced][0];
Om22P = 3.1416 / Sigma[induced] / Sigma[induced] / 2.0;
ssss = exp(-radiika* Om22P);
Vxc = Vxc + ssss*t1;
Vyc = Vyc + ssss*t2;
Vzc = Vzc + ssss*t3;
dssss_dr = (-Om22P)*ssss;
dvxdxmov = dssss_dr*vxx*t1 + dvxdxmov;
dvxdymov = dssss_dr*vyy*t1 + Omega_v[induced][2]*ssss + dvxdymov;
dvxdzmov = dssss_dr*vzz*t1 - Omega_v[induced][1]*ssss + dvxdzmov;
dvydxmov = dssss_dr*vxx*t2 - Omega_v[induced][2]*ssss + dvydxmov;
dvydymov = dssss_dr*vyy*t2 + dvydymov;
dvydzmov = dssss_dr*vzz*t2 + Omega_v[induced][0]*ssss + dvydzmov;
dvzdxmov = dssss_dr*vxx*t3 + Omega_v[induced][1]*ssss + dvzdxmov;
dvzdymov = dssss_dr*vyy*t3 - Omega_v[induced][0]*ssss + dvzdymov;
dvzdzmov = dssss_dr*vzz*t3 + dvzdzmov;
}
VortexN[ivorton][0] = Vortex[ivorton][0] + Delta_t*Vxc;
VortexN[ivorton][1] = Vortex[ivorton][1] + Delta_t*Vyc;
VortexN[ivorton][2] = Vortex[ivorton][2] + Delta_t*Vzc;
// domxdt=dvxdxmov*Omega_v[ivorton][0]+dvxdymov*Omega_v[][]
domxdt = dvxdxmov*Omega_v[ivorton][0] + dvxdymov*Omega_v[ivorton][1] + dvxdzmov*Omega_v[ivorton][2];
domydt = dvydxmov*Omega_v[ivorton][0] + dvydymov*Omega_v[ivorton][1] + dvydzmov*Omega_v[ivorton][2];
domzdt = dvzdxmov*Omega_v[ivorton][0] + dvzdymov*Omega_v[ivorton][1] + dvzdzmov*Omega_v[ivorton][2];
Omega_vN[ivorton][0] = Omega_v[ivorton][0] + domxdt*Delta_t;
Omega_vN[ivorton][1]= Omega_v[ivorton][1] + domydt*Delta_t;
Omega_vN[ivorton][2] = Omega_v[ivorton][2] + domzdt*Delta_t;
}
*/
Ncout = 0;
for (int ivorton = 0; ivorton < Number; ivorton++) {
Replace = 0.0;
for (int kkk = 0; kkk < 3; kkk++) {
if (VortexN[ivorton][kkk] < 0.0) {
Replace = 1.0;
}
if (VortexN[ivorton][kkk] > 1.0) {
Replace = 1.0;
}
}
if (Replace == 1.0) {
Ncout = Ncout + 1;
VortexN[ivorton][0] = (double)rand() / (double)RAND_MAX;
VortexN[ivorton][1] = (double)rand() / (double)RAND_MAX;
VortexN[ivorton][2] = (double)rand() / (double)RAND_MAX;
Omega_vN[ivorton][0] = (((double)rand() / (double)RAND_MAX) - 0.5);
Omega_vN[ivorton][1] = (((double)rand() / (double)RAND_MAX) - 0.5);
Omega_vN[ivorton][2] = (((double)rand() / (double)RAND_MAX) - 0.5);
Sigma[ivorton] = Radius;
}
}
Amagni = 0.0;
for (int ivorton = 0; ivorton < Number; ivorton++) {
Vortex[ivorton][0] = VortexN[ivorton][0];
Vortex[ivorton][1] = VortexN[ivorton][1];
Vortex[ivorton][2] = VortexN[ivorton][2];
Amagnit_old = sqrt((Omega_v[ivorton][0] * Omega_v[ivorton][0]) +( Omega_v[ivorton][1] * Omega_v[ivorton][1]) +( Omega_v[ivorton][2] * Omega_v[ivorton][2]));
Omega_v[ivorton][0] = Omega_vN[ivorton][0];
Omega_v[ivorton][1] = Omega_vN[ivorton][1];
Omega_v[ivorton][2] = Omega_vN[ivorton][2];
Amagnit_new= sqrt((Omega_v[ivorton][0] * Omega_v[ivorton][0]) + (Omega_v[ivorton][1] * Omega_v[ivorton][1]) + (Omega_v[ivorton][2] * Omega_v[ivorton][2]));
Sigma[ivorton] = Sigma[ivorton] * sqrt(Amagnit_old / Amagnit_new);
if (Amagnit_new >= Amagni) {
Amagni = Amagnit_new;
Energy = (Amagnit_new*Amagnit_new)*(pow(Sigma[ivorton], 5));
Speed_max = Amagnit_new*Sigma[ivorton];
Sigmas = Sigma[ivorton];
}
}
//file write
fprintf(fp1, "%f %f %f %f %f \n", itime*Delta_t, Amagni, Energy, Speed_max, Sigmas);
Vx = 0.0;
for (int induced = 0; induced < Number; induced++) {
vxx = 0.5 - Vortex[induced][0];
vyy = 0.5 - Vortex[induced][1];
vzz = 0.5 - Vortex[induced][2];
radiika = vxx*vxx + vyy*vyy + vzz*vzz;
t1 = vyy* Omega_v[induced][2] - vzz*Omega_v[induced][1];
Om22P = 3.1416 / Sigma[induced] / Sigma[induced] / 2.0;
ssss = exp(-radiika*Om22P);
Vx = Vx + ssss*t1;
}
fprintf(fp2, "%f %f \n", itime*Delta_t, Vx);
for (int ier = 0; ier < 4; ier++) {
StatisticalMoments[ier] = StatisticalMoments[ier] +pow(Vx, ier);
}
}
cudaFree(Vortex_p);
cudaFree(Omega_v_p);
cudaFree(VortexN_p);
cudaFree(Omega_vN_p);
cudaFree(Sigma_p);
fclose(fp1);
fclose(fp2);
time1 = clock();
printf("Time taken for execution with GPU acceleration (~31blocks & 32 threads)= %f sec(s)", (double)(time1 - time0) / CLOCKS_PER_SEC);
}
|
19,910 | #include "includes.h"
// Copyright (c) 2020, Michael Kunz. All rights reserved.
// https://github.com/kunzmi/ImageStackAlignator
//
// This file is part of ImageStackAlignator.
//
// ImageStackAlignator is free software: you can redistribute it and/or modify
// it under the terms of the GNU Lesser General Public License as
// published by the Free Software Foundation, version 3.
//
// ImageStackAlignator is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
// MA 02110-1301 USA, http://www.gnu.org/licenses/.
//squared sum of a tile without the border
extern "C"
//Boxfilter ignoring the border parts
//blockDim.X must be tileSize + 2 * maxShift
//blockDim.Y must be 1
extern "C"
//Boxfilter ignoring the border parts
//blockDim.Y must be tileSize + 2 * maxShift
//blockDim.X must be 1
extern "C"
//Computed the normalized CC values out of the different input data
//Cross correlation is fft shifted
//blockDim.X must be 2 * maxShift
//blockDim.Y must be 2 * maxShift
//blockDim.Z must be nr of tiles
extern "C"
//Convert a tiled image into consecutive tiles for FFT
//input img has a pitch, output tiles are consecutive
//output tiles overlap by maxShift is filled by zero
extern "C"
//Convert a tiled image into consecutive tiles for FFT
//input img has a pitch, output tiles are consecutive
//output tiles overlap by maxShift on each side
extern "C"
__device__ float applysRGBGamma(float valIn)
{
if (valIn <= 0.0031308f)
{
return 12.92f * valIn;
}
else
{
return (1.0f + 0.055f) * powf(valIn, 1.0f / 2.4f) - 0.055f;
}
}
__global__ void GammasRGB( float3 * __restrict__ inOutImg, int imgWidth, int imgHeight, int imgPitch)
{
int pxX = blockIdx.x * blockDim.x + threadIdx.x;
int pxY = blockIdx.y * blockDim.y + threadIdx.y;
if (pxX >= imgWidth || pxY >= imgHeight)
return;
float3 val = *(((float3*)((char*)inOutImg + imgPitch * pxY)) + pxX);
//apply gamma:
if (isnan(val.x))
val.x = 0;
if (isnan(val.y))
val.y = 0;
if (isnan(val.z))
val.z = 0;
val.x = fmaxf(fminf(val.x, 1.0f), 0.0f);
val.y = fmaxf(fminf(val.y, 1.0f), 0.0f);
val.z = fmaxf(fminf(val.z, 1.0f), 0.0f);
val.x = applysRGBGamma(val.x);
val.y = applysRGBGamma(val.y);
val.z = applysRGBGamma(val.z);
*(((float3*)((char*)inOutImg + imgPitch * pxY)) + pxX) = val;
} |
19,911 | #include <stdint.h>
#include <unistd.h>
#include <stdio.h>
#include <assert.h>
#include <sys/time.h>
#include <time.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/syscall.h>
#define gpu_hook(x) syscall(380,x)
static void HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
inline double diff_s(struct timeval start, struct timeval end)
{
return ((double) (end.tv_usec - start.tv_usec) / 1000000 + (double) (end.tv_sec - start.tv_sec));
}
typedef struct blob {
uint64_t data;
char pad[24];
}theblob;
__global__ void kernel(theblob *out, int threads)
{
//__shared__ uint64_t temp[512];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
//if(tid < threads){
//printf("hostalloc out addr =%p, in addr =%p\n",&(out[tid].data), &(in[tid].data));
//printf("hostalloc out addr =%p, in addr =%p\n",&(out[0].data), &(in[0].data));
//out[tid].data = in[tid].data+tid;
//int temp = in[tid].data+tid;
//if(temp == 999999)
// out[tid].data = 5;
//out[0].data += 5;
//temp = in[tid].data;
//out[tid].data = 5;
// temp[tid]=out[tid].data;
out[0].data += 5;
//}
}
__global__ void kernel_d(theblob *out, int threads)
{
//__shared__ uint64_t temp[512];
int tid = threadIdx.x + blockIdx.x*blockDim.x;
//if(tid < threads){
//printf("managed out addr =%p, in addr =%p\n",&(out[tid].data), &(in[tid].data));
//printf("managed out addr =%p, in addr =%p\n",&(out[0].data), &(in[0].data));
//out[tid].data = in[tid].data+tid;
//int temp = in[tid].data+tid;
//if(temp == 999999)
// out[tid].data = 5;
//out[0].data += 5;
//temp = in[tid].data;
//out[tid].data = 5;
//temp[tid]=out[tid].data;
out[0].data += 5;
//}
}
__global__ void nullKernel(void)
{
}
void verify(theblob *in, theblob *out, int numBytes)
{
int error = 0;
for(int i =0; i<numBytes; i++)
if(out[i].data!=in[i].data+i)
error = 1;
if(error)
printf("ERROR in verification!\n");
else
printf("SUCCESS!\n");
}
void cpu_compute(theblob *out, int numBytes)
{
for(int i =0; i<numBytes; i++)
out[i].data=out[i].data+1;
}
int main( int argc, char *argv[] )
{
theblob *out, *out_d;
int opt;
int iterations = 1;
int blocks = 1;
int threads = 1;
while ((opt = getopt(argc, argv, "b:t:i:")) != -1) {
switch (opt) {
case 'b':
blocks = atoi(optarg);
//assert(numBytes%16 == 0 && numBytes<=1024);
break;
case 'i':
iterations = atoi(optarg);
break;
case 't':
threads = atoi(optarg);
break;
default: /* '?' */
break;
}
}
// int *ptr_array_host_0[100];
// int *ptr_array_host[100];
// int *ptr_array_managed[100];
HANDLE_ERROR(cudaFree(0));
/* for(int a=0; a< 10; a++)
{
cudaHostAlloc(&ptr_array_host_0[a],sizeof(int),0);
printf("host_0 = %p\n",ptr_array_host_0[a]);
}
*/
//gpu_hook(1);
//gpu_hook(3);
printf("done with init...\n");
getchar();
/* for(int a=0; a< 10; a++)
{
cudaHostAlloc(&ptr_array_host[a],sizeof(int),0);
cudaMallocManaged(&ptr_array_managed[a],sizeof(int));
printf("host = %p\n",ptr_array_host[a]);
printf("managed = %p\n",ptr_array_managed[a]);
}
*/
cudaHostAlloc(&out,blocks*threads*sizeof(theblob),0);
//cudaMallocManaged(&out_d,blocks*threads*sizeof(theblob));
// printf("%lu\n",(unsigned long)out[0].data);
printf("done with init and memory allocations...\n");
getchar();
//printf("done with init and memory allocations...\n");
//getchar();
for(int i = 0; i<iterations; i++) {
kernel<<<blocks,threads>>>(out,blocks*threads);
// *ptr_array_host[0]=5;
cudaDeviceSynchronize();
// gpu_hook(5);
// printf("done with hostalloc kernel...\n");
// getchar();
// kernel_d<<<blocks,threads>>>(out_d,blocks*threads);
//*ptr_array_managed[0]=5;
//cudaDeviceSynchronize();
// gpu_hook(5);
// printf("done with managed kernel...\n");
// getchar();
}
// printf("Press enter to continue...\n");
// getchar();
cpu_compute(out,blocks*threads);
//cpu_compute(out_d,blocks*threads);
cudaFreeHost(out);
//cudaFree(out_d);
/*for(int a=0; a< 10; a++)
{
cudaFreeHost(ptr_array_host_0[a]);
cudaFreeHost(ptr_array_host[a]);
cudaFree(ptr_array_managed[a]);
}
*/
cudaDeviceReset();
return 0;
}
|
19,912 | #include "includes.h"
__global__ void matmul(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
//printf("%d %d %d %d %d %d\n",blockDim.x,blockDim.y,blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y);
int idx = j * n + i;
if(i < n and j < m){
//printf("%d %d %d %d %d %d\n", i, j, idx, a[idx], b[idx], c[idx]);
float sum = 0;
for(int k = 0; k < n; k++){
int idxa = j * n + k;
int idxb = k * n + i;
sum += a[idxa] * b[idxb];
}
c[idx] = sum;
}
} |
19,913 | #include <iostream>
#include <math.h>
#include <unistd.h>
#include <memory>
#include <algorithm>
#include <array>
#include <numeric>
// the output incorrectly says that the data mismatches, but it appears to be an issue with doubles
// changing all types to integral types showed 0 issues, which for small N, was also hand verified
// moving to double reduced the error rate
const std::size_t N = 1 << 3;
const double h_m = 7.2;
const double h_b = 3.4;
struct linear_params
{
double m;
double b;
};
typedef struct linear_params linear_params_t;
__global__
void linear(double* y, const std::size_t n, const double* x, const double m, const double b)
{
for (std::size_t i = threadIdx.x + (blockIdx.x * blockDim.x); i < n; i += (blockDim.x * gridDim.x))
{
y[i] = m * x[i] + b;
}
}
int main(void)
{
const dim3 grid_size(1, 1, 1);
const dim3 block_size(1024, 1, 1);
std::unique_ptr<std::array<double, N>> h_x (new std::array<double, N>());
std::unique_ptr<std::array<double, N>> h_y (new std::array<double, N>());
double* d_y = NULL;
double* d_x = NULL;
cudaStream_t stream;
std::iota(h_x->begin(), h_x->end(), 0);
if (cudaSuccess != cudaStreamCreate(&stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMallocHost(&d_y, sizeof(d_y[0]) * h_y->size()))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMallocHost(&d_x, sizeof(d_x[0]) * h_x->size()))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaMemcpyAsync(d_x, h_x->data(), sizeof(d_x[0]) * N, cudaMemcpyHostToDevice, stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
linear<<<grid_size, block_size, 0, stream>>>(d_y, N, d_x, h_m, h_b);
if (cudaSuccess != cudaMemcpyAsync(h_y->data(), d_y, sizeof(d_y[0]) * N, cudaMemcpyDeviceToHost, stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
if (cudaSuccess != cudaStreamSynchronize(stream))
{
std::cout << __LINE__ << std::endl;
return -__LINE__;
}
std::size_t error_count = 0;
for (std::size_t i = 0; i < h_x->size(); ++i)
{
if (((h_m * (*h_x)[i]) + h_b) != (*h_y)[i])
{
// because the GPU and CPU compute differences in floats, output what the difference is
// it should be very small
std::cout << (((h_m * (*h_x)[i]) + h_b) - (*h_y)[i]) << std::endl;
++ error_count;
}
}
//for (std::size_t i = 0; i < h_x->size(); ++i)
//{
// std::cout << h_y(i) << " = (" << m << ")" << h_x(i) << " + " << b << std::endl;
//}
//for (std::size_t i = 0; i < h_x->size(); ++i)
//{
// std::cout << h_y(i) << "," << m << "," << h_x(i) << "," << b << std::endl;
//}
std::cout << error_count << "/" << N << std::endl << std::flush;
cudaFreeHost(d_x);
cudaFreeHost(d_y);
cudaStreamDestroy(stream);
return 0;
}
|
19,914 | #include "includes.h"
__global__ void bpnn_layerforward_CUDA(float *input_cuda, float *output_hidden_cuda, float *input_hidden_cuda, float *hidden_partial_sum, int in, int hid)
{
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int index = ( hid + 1 ) * HEIGHT * by + ( hid + 1 ) * ty + tx + 1 + ( hid + 1 ) ;
int index_in = HEIGHT * by + ty + 1;
__shared__ float input_node[HEIGHT];
__shared__ float weight_matrix[HEIGHT][WIDTH];
if ( tx == 0 )
input_node[ty] = input_cuda[index_in] ;
__syncthreads();
weight_matrix[ty][tx] = input_hidden_cuda[index];
__syncthreads();
weight_matrix[ty][tx] = weight_matrix[ty][tx] * input_node[ty];
__syncthreads();
for ( int i = 1 ; i <= __log2f(HEIGHT) ; i++){
int power_two = __powf(2, i);
if( ty % power_two == 0 )
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
__syncthreads();
}
//__syncthreads();
input_hidden_cuda[index] = weight_matrix[ty][tx];
/*
for ( unsigned int i = 2 ; i <= HEIGHT ; i *= 2){
unsigned int power_two = i - 1;
if( (ty & power_two) == 0 ) {
weight_matrix[ty][tx] = weight_matrix[ty][tx] + weight_matrix[ty + power_two/2][tx];
}
}
*/
__syncthreads();
if ( tx == 0 ) {
hidden_partial_sum[by * hid + ty] = weight_matrix[tx][ty];
}
} |
19,915 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void matAdd(double *a, double *b, double *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds - should be the same as before
if (id < n)
c[id] = a[id] + b[id];
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void matAddCol(double *a, double *b, double *c, int col_size, int row_size)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds - should be the same as before
// loop over column
int i;
for( i = 0; i < col_size; i++){
c[i*row_size + id] = a[i*row_size + id] + b[i*row_size + id];
}
}
// CUDA kernel. Each thread takes care of one element of c
__global__ void matAddRow(double *a, double *b, double *c, int col_size, int row_size)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds - should be the same as before
int add = id*col_size;
int i;
for(i = 0; i < row_size; i++){
c[add+i] = a[add+i] + b[add+i];
}
}
int main( int argc, char* argv[] ){
// variable size of matrix < 1024
int col_size = 10, row_size = 10;
//check that sizes are correct
if(col_size > 1025 || row_size > 1025){
printf("size not valid\n");
return 1;
}
//how many data points there are row*col
int data_size = col_size * row_size;
// Host input matrix
double *h_a;
double *h_b;
//Host output matrix
double *h_c;
// Device input matrix
double *d_a;
double *d_b;
//Device output matrix
double *d_c;
// Size, in bytes, of each matrix
size_t bytes = data_size*sizeof(double);
// Allocate memory for each matrix on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each matrix on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int row_id;
// Initialize matrix on host
// Simple initialize for now
for( row_id = 0; row_id < data_size; row_id++ ) {
h_a[row_id] = 4;
h_b[row_id] = 2;
}
// Copy host matrices to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
// Only thing that is different is to determine amount of warps/blocks/grid
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid 1024 height*width/blockSize
gridSize = (int)ceil(data_size/(float)blockSize);
// Execute the kernel add each thread for one output
matAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, data_size);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Number of thread blocks, since 1 thread per row, dont need a larger grid
gridSize = 1;
// Execute the kernel add each thread for one output
matAddCol<<<gridSize, col_size>>>(d_a, d_b, d_c, col_size, row_size);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Execute the kernel add each thread for one output
matAddRow<<<gridSize, row_size>>>(d_a, d_b, d_c, col_size, row_size);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
int col_id;
for( row_id = 0; row_id < 5; row_id++){
for( col_id = 0; col_id < 5; col_id++){
printf("%d ", (int)h_c[row_size*row_id + col_id]);
}
printf("\n");
}
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
} |
19,916 | #include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <time.h>
#include <iostream>
using namespace std;
__global__ void boyer_moore (int *g){
char s_shared[32768];
for(long j=0;j<10000000;j++){
for(int i=0;i<32;i++){
//s_shared[i*1024+(threadIdx.x*4)+(threadIdx.x%4)] = char(threadIdx.x%256);
s_shared[i*1024+(threadIdx.x)] = char(threadIdx.x%256);
if(threadIdx.x%32==0){
float randum = 0;
randum = g[threadIdx.x];
randum/=45.0;
}
float a = g[threadIdx.x];
a++;
a = a/(a+threadIdx.x);
}
}
if(threadIdx.x==0);
//printf("%d\n",blockIdx.x);
}
int8_t h_string[1000000];
int8_t h_pat[100];
int main(int argc, char const *argv[]) {
cudaEvent_t start,stop;
int g[1024];
for(int i=0;i<1024;i++)
g[i] = i;
int *d_g;
cudaMalloc(&d_g,1024*sizeof(int));
cudaMemcpy(&d_g,g,sizeof(int)*1024,cudaMemcpyHostToDevice);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
boyer_moore<<<1000,1024>>>(d_g);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float millsec=0;
cudaEventElapsedTime(&millsec,start,stop);
cout<<"This is time elapsed "<<millsec;
return 0;
}
|
19,917 | #include <stdio.h>
#include <stdlib.h>
void CPU_Matrix_Multiply(int m, int n, int k, double *a, double *b, double *c){
for (int x = 0; x < m; x++) { // row number of output
for (int y = 0; y < k; y++) { // column number of output
c[k*x+y] = 0;
for (int z = 0; z < n; z++) { //Add n elements
c[k*x+y] += a[n*x+z] * b[k*z+y];
}
}
}
}
|
19,918 | extern "C"
__global__ void calcDir(// Dots props
float* pX,
float* pY,
float* pZ,
//Tree specs
// per Block
//int* dotIndexes,
float* avgPX,
float* avgPY,
float* avgPZ,
int* idFurthest,
// per GPU Block
// output values, per block
float* dirX,
float* dirY,
float* dirZ,
float nBlocs
)
{
int idBloc = blockIdx.x*blockDim.x+threadIdx.x;
if (idBloc<nBlocs) {
int mx=avgPX[idBloc];
int my=avgPY[idBloc];
int mz=avgPZ[idBloc];
int idPtFurthest = idFurthest[idBloc];
float dx=pX[idPtFurthest]-mx;
float dy=pY[idPtFurthest]-my;
float dz=pZ[idPtFurthest]-mz;
float dist = sqrtf(dx*dx+dy*dy+dz*dz);
dirX[idBloc]=dx/dist;
dirY[idBloc]=dy/dist;
dirZ[idBloc]=dz/dist;
}
}
|
19,919 | //Author: Ugo Varetto
//Parallel dot product with timing. Link with librt (-lrt)
//#include <cuda_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
#include <numeric>
#include <ctime>
typedef double real_t;
const size_t BLOCK_SIZE = 1024;
//------------------------------------------------------------------------------
double time_diff_ms(const timespec& start, const timespec& end) {
return end.tv_sec * 1E3 + end.tv_nsec / 1E6
- (start.tv_sec * 1E3 + start.tv_nsec / 1E6);
}
__global__ void partial_dot( const real_t* v1, const real_t* v2, real_t* out, int N ) {
__shared__ real_t cache[ BLOCK_SIZE ];
int i = blockIdx.x * blockDim.x + threadIdx.x;
cache[ threadIdx.x ] = 0.f;
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2; //not sure bitwise operations are actually faster
}
if( threadIdx.x == 0 ) out[ blockIdx.x ] = cache[ 0 ];
}
real_t dot( const real_t* v1, const real_t* v2, int N ) {
real_t s = 0;
for( int i = 0; i != N; ++i ) {
s += v1[ i ] * v2[ i ];
}
return s;
}
real_t dot_block( const real_t* v1, const real_t* v2, int N, int block_size ) {
std::vector< real_t > b1(block_size);
std::vector< real_t > b2(block_size);
real_t s = 0;
for( int i = 0; i < N; i += block_size ) {
std::copy(v1 + i, v1 + i + block_size, b1.begin());
std::copy(v2 + i, v2 + i + block_size, b2.begin());
s += dot(&b1[0], &b2[0], block_size);
}
return s;
}
__global__ void init_vector( real_t* v, int N ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
while( i < N ) {
v[ i ] = 1.0f;//real_t( i ) / 1000000.f;
i += gridDim.x * blockDim.x;
}
}
//------------------------------------------------------------------------------
int main(int argc, char** argv ) {
const size_t ARRAY_SIZE = 1024 * 1024 * 256; //1Mi elements
const int THREADS_PER_BLOCK = 1024;
const int BLOCKS = ARRAY_SIZE / THREADS_PER_BLOCK;//512;
const size_t SIZE = ARRAY_SIZE * sizeof( real_t );
// device storage
real_t* dev_v1 = 0; // vector 1
real_t* dev_v2 = 0; // vector 2
real_t* dev_vout = 0; // partial redution = number of blocks
cudaMalloc( &dev_v1, SIZE );
cudaMalloc( &dev_v2, SIZE );
cudaMalloc( &dev_vout, BLOCKS * sizeof( real_t ) );
// host storage
std::vector< real_t > host_v1( ARRAY_SIZE );
std::vector< real_t > host_v2( ARRAY_SIZE );
std::vector< real_t > host_vout( BLOCKS );
// initialize vector 1 with kernel; much faster than using for loops on the cpu
init_vector<<< BLOCKS, THREADS_PER_BLOCK >>>( dev_v1, ARRAY_SIZE );
cudaMemcpy( &host_v1[ 0 ], dev_v1, SIZE, cudaMemcpyDeviceToHost );
// initialize vector 2 with kernel; much faster than using for loops on the cpu
init_vector<<< BLOCKS, THREADS_PER_BLOCK >>>( dev_v2, ARRAY_SIZE );
cudaMemcpy( &host_v2[ 0 ], dev_v2, SIZE, cudaMemcpyDeviceToHost );
timespec s, e;
clock_gettime(CLOCK_MONOTONIC, &s);
// execute kernel
partial_dot<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_v1, dev_v2, dev_vout, ARRAY_SIZE );
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &e);
const double elapsed = time_diff_ms(s, e);
// copy output data from device(gpu) to host(cpu)
clock_gettime(CLOCK_MONOTONIC, &s);
cudaMemcpy( &host_vout[ 0 ], dev_vout, BLOCKS * sizeof( real_t ), cudaMemcpyDeviceToHost );
clock_gettime(CLOCK_MONOTONIC, &e);
const double transferTime = time_diff_ms(s, e);
clock_gettime(CLOCK_MONOTONIC, &s);
const real_t device_dot = std::accumulate( host_vout.begin(), host_vout.end(), real_t( 0 ) );
clock_gettime(CLOCK_MONOTONIC, &e);
const double acc = time_diff_ms(s, e);
//dot product on host
clock_gettime(CLOCK_MONOTONIC, &s);
//const real_t host_dot = std::inner_product(host_v1.begin(), host_v1.end(), host_v2.begin(), real_t(0));
const real_t host_dot = dot_block( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE, 16384);
clock_gettime(CLOCK_MONOTONIC, &e);
const double host_time = time_diff_ms(s, e);
// print dot product by summing up the partially reduced vectors
std::cout << "GPU: " << device_dot << std::endl;
// print dot product on cpu
std::cout << "CPU: " << host_dot << std::endl;
//std::cout << "CPU: " << dot( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE ) << std::endl;
std::cout << "ELAPSED TIME(ms) kernel + cpu sum: " << elapsed << " + " << acc << " = " << (elapsed + acc) << std::endl;
std::cout << "TRANSFER TIME(ms): " << transferTime << std::endl;
std::cout << "HOST TIME: " << host_time << std::endl;
// free memory
cudaFree( dev_v1 );
cudaFree( dev_v2 );
cudaFree( dev_vout );
return 0;
}
|
19,920 | #include "includes.h"
__global__ void reluBackward(float* dZ, float* top_diff, float* V, int x, int y){
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < x*y){
if(V[index] > 0) {
dZ[index] = top_diff[index];
}else{
dZ[index] = 0;
}
}
} |
19,921 | #include <iostream>
#include <thrust/binary_search.h>
#include <thrust/host_vector.h>
#include <thrust/execution_policy.h>
int main(void)
{
thrust::host_vector<int> input(5);
input[0] = 0;
input[1] = 2;
input[2] = 5;
input[3] = 7;
input[4] = 8;
std::cout << thrust::binary_search(thrust::host, input.begin(), input.end(), 0); // returns true
std::cout << thrust::binary_search(thrust::host, input.begin(), input.end(), 1); // returns false
std::cout << thrust::binary_search(thrust::host, input.begin(), input.end(), 2); // returns true
std::cout << std::endl;
/*
thrust::binary_search(thrust::host, input.begin(), input.end(), 3); // returns false
thrust::binary_search(thrust::host, input.begin(), input.end(), 8); // returns true
thrust::binary_search(thrust::host, input.begin(), input.end(), 9); // returns false
*/
return 0;
}
|
19,922 | //使用constant memory存放向量
//global memory
#include<stdio.h>
#include<math.h>
#include<time.h>
#include <stdlib.h>
int Max=16384;
int width=32;
double err = 0.1;
__constant__ double con_b[8192];
__global__ void multi(double *A,double *C,const int Max,int i){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
//int idy=threadIdx.y+blockDim.y*blockIdx.y;
if(idx<Max){
int k=0;
double sum=0;
for(k=i*Max/2;k<(i+1)*Max/2;k++){
sum+=A[idx*Max+k]*con_b[k%(Max/2)];
}
C[idx]+=sum;
}
}
int main(){
printf("使用constant memory存放向量:\n");
double *A =(double *)malloc(Max * Max * sizeof(double)); //A
double b[Max]; //b
double *C =(double *)malloc(Max * sizeof(double)); //C
double *test_c=(double *)malloc(Max * sizeof(double)); //cpu_test
int i,j;
for(i=0;i<Max;i++){
for(j=0;j<Max;j++){
A[i*Max+j]=i-0.1*j+1;
}
}
for(i=0;i<Max;i++){
b[i]=log(sqrt(i*i-i+2));
C[i]=0.0;
}
double *A_d,*C_d;
cudaMalloc((void **)&A_d,Max * Max * sizeof(double));
cudaMalloc((void **)&C_d,Max *sizeof(double));
clock_t start,end;
start=clock();
cudaMemcpy(A_d, A,Max*Max*sizeof(double),cudaMemcpyHostToDevice);
//cudaMemcpyToSymbol(con_b, b, sizeof(double) * Max);
cudaMemcpy(C_d, C,Max * sizeof(double), cudaMemcpyHostToDevice);
dim3 block(width,1);
dim3 grid(Max/block.x, 1);
for(int i=0;i<2;i++){
cudaMemcpyToSymbol(con_b, &b[i*Max/2], sizeof(double) * Max/2);
multi<<<grid,block>>>(A_d,C_d,Max,i);
}
cudaMemcpy(C, C_d, Max * sizeof(double), cudaMemcpyDeviceToHost);
end=clock();
double time=(end-start)*1000/CLOCKS_PER_SEC;
//cpu:
clock_t start_c,end_c;
start_c=clock();
for (int i = 0; i < Max; ++i){
for (int j = 0; j < Max; ++j)
{
test_c[i]+=A[i*Max+j]*b[j];
}
}
end_c=clock();
double time_C=(end_c-start_c)*1000/CLOCKS_PER_SEC;
printf("GPU TIME:%lf ms\n",time);
printf("CPU TIME:%lf ms\n",time_C);
//check result:
bool flag = true;
for (int i = 0; i < Max; ++i){
double a=test_c[i];
double b=C[i];
if (abs(a-b)>err)
{
printf("cpu:%lf gpu:%lf\n",a,b);
flag = false;
}
}
if (flag == true)
printf("result correct\n");
else{
printf("resul wrong\n");
}
cudaFree(A_d);
cudaFree(C_d);
free(A);
free(test_c);
free(C);
}
|
19,923 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#define _EPSILON 0.001
#define _ABS(x) ( x > 0.0f ? x : -x )
__host__ int allclose(float *A, float *B, int len)
{
int returnval = 0;
for (int i = 0; i < len; i++)
{
if ( _ABS(A[i] - B[i]) > _EPSILON )
{
returnval = -1;
break;
}
}
return(returnval);
}
// row-column dot-product for matrix multiplication
__device__ float rowcol_dot(float *matrix_a, float *matrix_b, int row, int col, int N)
{
float val = 0;
for (int k=0; k < N; k++)
{
val += matrix_a[ row*N + k ] * matrix_b[ col + k*N];
}
return(val);
}
// matrix multiplication kernel that is parallelized over row/column tuples.
__global__ void matrix_mult_ker(float * matrix_a, float * matrix_b, float * output_matrix, int N)
{
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
output_matrix[col + row*N] = rowcol_dot(matrix_a, matrix_b, row, col, N);
}
__host__ int main()
{
// Initialize to use first GPU.
cudaSetDevice(0);
// this indicates the width/height of the matrices
int N = 4;
// this will indicate how many bytes to allocate to store a test or output matrix
int num_bytes = sizeof(float)*N*N;
// input test matrix A
float h_A[] = { 1.0, 2.0, 3.0, 4.0, \
1.0, 2.0, 3.0, 4.0, \
1.0, 2.0, 3.0, 4.0, \
1.0, 2.0, 3.0, 4.0 };
// input test matrix B
float h_B[] = { 14.0, 13.0, 12.0, 11.0, \
14.0, 13.0, 12.0, 11.0, \
14.0, 13.0, 12.0, 11.0, \
14.0, 13.0, 12.0, 11.0 };
// expected output of A times B
float h_AxB[] = { 140.0, 130.0, 120.0, 110.0, \
140.0, 130.0, 120.0, 110.0, \
140.0, 130.0, 120.0, 110.0, \
140.0, 130.0, 120.0, 110.0 };
// these pointers will be used for the GPU.
// (notice how we use normal float pointers)
float * d_A;
float * d_B;
float * d_output;
// allocate memory for the test matrices on the GPU
cudaMalloc((float **) &d_A, num_bytes);
cudaMalloc((float **) &d_B, num_bytes);
// copy the test matrices to the GPU
cudaMemcpy(d_A, h_A, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, num_bytes, cudaMemcpyHostToDevice);
// allocate memory for output on GPU
cudaMalloc((float **) &d_output, num_bytes);
// this will store the output from the GPU
float * h_output;
h_output = (float *) malloc(num_bytes);
// setup our block and grid launch parameters with the dim3 class.
dim3 block(2,2,1);
dim3 grid(2,2,1);
// launch our kernel
matrix_mult_ker <<< grid, block >>> (d_A, d_B, d_output, N);
// synchronize on the host, to ensure our kernel has finished executing.
cudaDeviceSynchronize();
// copy output from device to host.
cudaMemcpy(h_output, d_output, num_bytes, cudaMemcpyDeviceToHost);
// synchronize again.
cudaDeviceSynchronize();
// free arrays on device.
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_output);
// reset the GPU.
cudaDeviceReset();
// Check to see if we got the expected output.
// in both cases, remember to de-allocate h_output before returning.
if (allclose(h_AxB, h_output, N*N) < 0)
{
printf("Error! Output of kernel does not match expected output.\n");
free(h_output);
return(-1);
}
else
{
printf("Success! Output of kernel matches expected output.\n");
free(h_output);
return(0);
}
}
|
19,924 | #include <stdio.h>
#include <iostream>
#include <unistd.h>
#include <sys/time.h>
using namespace std;
// Shorthand for formatting and printing usage options to stderr
#define fpe(msg) fprintf(stderr, "\t%s\n", msg);
// Shorthand for handling CUDA errors.
#define HANDLE_ERROR(err) ( HandleError( err, __FILE__, __LINE__ ) )
/**
* DEFINED VALUES HERE
*/
#define TILE_WIDTH 64
#define TILE_HEIGHT 8
#define TILE_DEPTH 1
#define TILE_AGE 2
#define PER_THREAD_X 2
#define PER_THREAD_Y 2
#define PER_THREAD_Z 1
/*****************
* CUDA Utilites *
*****************/
void HandleError(cudaError_t err, const char *file, int line) {
//
// Handle and report on CUDA errors.
//
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
void checkCUDAError(const char *msg, bool exitOnError) {
//
// Check cuda error and print result if appropriate.
//
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
if (exitOnError) {
exit(-1);
}
}
}
void cleanupCuda(void) {
//
// Clean up CUDA resources.
//
//
// Explicitly cleans up all runtime-related resources associated with the
// calling host thread.
//
HANDLE_ERROR(
cudaThreadExit()
);
}
/*********************
* End CUDA Utilites *
*********************/
struct Args {
bool debug;
bool sequential;
bool blocked;
bool overlapped;
// Data attributes
int size, dimensions, alloc_size;
int xSize, ySize, zSize;
int xBlockSize, yBlockSize, zBlockSize, tBlockSize;
// Run attributes
int grid_size, block_count, thread_count, iterations;
};
void usage(char *prog_name, string msg) {
if (msg.size() > 0) {
fputs(msg.c_str(), stderr);
}
fprintf(stderr, "%s\n", prog_name);
fprintf(stderr, "Options are:\n");
fpe("-n<size> Set data size (default: 1024)");
fpe("-d<dims> Set number of data dimensions (1, 2, or 3) (default: 2)");
fpe("-g<size> Set grid size");
fpe("-b<num> Set block count");
fpe("-t<num> Set thread count");
fpe("-i<iter> Number of iterations to perform (default: 1000)");
fpe("-x<size> X Dimension");
fpe("-y<size> Y Dimension");
fpe("-z<size> Z Dimension");
fpe("-T<size> T Dimension");
fpe("-S Execute sequential, CPU version");
fpe("-B Execute blocked sequential, CPU version");
fpe("-O Execute sequential overlapped tiling, CPU version");
fpe("-D Print debug info");
fpe("-h Print usage info (this message)");
exit(EXIT_FAILURE);
}
Args parse_arguments(int argc, char *argv[]) {
Args args = Args();
args.debug = false;
args.sequential = false;
args.blocked = false;
args.overlapped = false;
args.size = 1024;
args.dimensions = 2;
args.xSize = args.ySize = args.zSize = 1;
args.xBlockSize = args.yBlockSize = args.zBlockSize = 1;
args.grid_size = 1;
args.block_count = -1;
args.thread_count = -1;
args.iterations = 1000;
int opt;
// Parse args
while ((opt = getopt(argc, argv, "n:d:g:b:t:i:x:y:z:T:hSBOD")) != -1) {
switch (opt) {
case 'D':
args.debug = true;
break;
case 'S':
args.sequential = true;
break;
case 'B':
args.blocked = true;
break;
case 'O':
args.overlapped = true;
break;
case 'n':
args.size = atoi(optarg);
break;
case 'd':
args.dimensions = atoi(optarg);
break;
case 'g':
args.grid_size = atoi(optarg);
break;
case 'b':
args.block_count = atoi(optarg);
break;
case 't':
args.thread_count = atoi(optarg);
break;
case 'i':
args.iterations = atoi(optarg);
break;
case 'x':
args.xBlockSize = atoi(optarg);
break;
case 'X':
args.xSize = atoi(optarg);
break;
case 'y':
args.yBlockSize = atoi(optarg);
break;
case 'Y':
args.ySize = atoi(optarg);
break;
case 'z':
args.zBlockSize = atoi(optarg);
break;
case 'Z':
args.zSize = atoi(optarg);
break;
case 'T':
args.tBlockSize = atoi(optarg);
break;
case 'h':
usage(argv[0], "");
break;
default:
usage(argv[0], "Unrecognized option\n");
}
}
// check sizes
if (args.size <= 0) {
cout << "Data size must be larger than 0" << endl;
exit(EXIT_FAILURE);
}
if (args.dimensions <= 0 || args.dimensions >= 4) {
cerr << "Data must be 1, 2, or 3 dimensions" << endl;
exit(EXIT_FAILURE);
}
// Calculations
if (args.dimensions == 1) {
args.alloc_size = args.size;
} else if (args.dimensions == 2) {
args.alloc_size = args.size * args.size;
} else {
args.alloc_size = args.size * args.size * args.size;
}
if (args.thread_count > 0) {
args.block_count = args.alloc_size / args.thread_count;
} else if (args.block_count > 0) {
args.thread_count = args.alloc_size / args.block_count;
} else {
args.thread_count = 16;
args.block_count = args.alloc_size / args.thread_count;
}
return args;
}
typedef struct {
int dimensions;
int height;
int width;
int depth;
float *elements;
} Matrix;
Matrix initialize_matrix(int dimensions, int width, int height = 1, int depth = 1) {
Matrix data;
if (dimensions == 1 && width > 1) {
data.width = width;
data.height = 1;
data.depth = 1;
data.elements = (float *) malloc(width * sizeof(float));
data.elements[0] = 1.0;
data.elements[width - 1] = 1.0;
} else if (dimensions == 2 && width > 1 && height > 1) {
data.width = width;
data.height = height;
data.depth = 1;
data.elements = (float *) malloc(width * height * sizeof(float));
for (int y = 0; y < height; y += height - 1) {
for (int x = 0; x < width; x++) {
data.elements[y * width + x] = 1.0;
}
}
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x += width - 1) {
data.elements[y * width + x] = 1.0;
}
}
} else if (dimensions == 3 && width > 1 && height > 1 && depth > 1) {
data.width = width;
data.height = height;
data.depth = depth;
data.elements = (float *) malloc(width * height * depth * sizeof(float));
for (int z = 0; z < depth; z++) {
// X = 0 & N planes
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x += width - 1) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
// Y = 0 & N planes
for (int y = 0; y < height; y += height - 1) {
for (int x = 0; x < width; x++) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
}
// Z = 0 & N planes
for (int z = 0; z < depth; z += depth - 1) {
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
data.elements[z * width * height + y * width + x] = 1.0;
}
}
}
} else {
fprintf(stderr, "Improper dimension or size.");
exit(1);
}
return data;
}
/****************
* CUDA KERNELS *
****************/
#define BLOCK_DIM_X TILE_WIDTH/PER_THREAD_X
#define BLOCK_DIM_Y TILE_HEIGHT/PER_THREAD_Y
#define BLOCK_DIM_Z TILE_DEPTH/PER_THREAD_Z
// ceil integer division, have to use the BLOCK_DIM_ definitions rather than the defines themselves or it won't work
#define PER_THREAD_OVERLAPPED_COUNT_X (TILE_AGE + TILE_WIDTH/PER_THREAD_X - 1) / (TILE_WIDTH/PER_THREAD_X)
#define PER_THREAD_OVERLAPPED_COUNT_Y (TILE_AGE + TILE_HEIGHT/PER_THREAD_Y - 1) / (TILE_HEIGHT/PER_THREAD_Y)
#define PER_THREAD_OVERLAPPED_COUNT_Z (TILE_AGE + TILE_DEPTH/PER_THREAD_Z - 1) / (TILE_DEPTH/PER_THREAD_Z)
#define PER_THREAD_COMBINED_ITERATIONS_X (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X)
#define PER_THREAD_COMBINED_ITERATIONS_Y (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y)
#define PER_THREAD_COMBINED_ITERATIONS_Z (PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z + PER_THREAD_OVERLAPPED_COUNT_Z)
__global__ void jacobi1d(Matrix data, Matrix result) {
int threadCol = threadIdx.x;
int blockCol = blockIdx.x;
int globalX[PER_THREAD_COMBINED_ITERATIONS_X];
int sharedX[PER_THREAD_COMBINED_ITERATIONS_X];
// Shared and local data arrays
__shared__ float shared[2][(TILE_AGE + TILE_WIDTH + TILE_AGE)];
int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1;
int tCurr = 0;
int tPrev = 1;
// Some useful bits of info
int globalBlockStart = blockCol * TILE_WIDTH;
// Use >= comparison
int globalBlockReadStart = max(0, globalBlockStart - TILE_AGE);
// Use <= comparison
int globalBlockReadEnd = min(data.width - 1, globalBlockStart + TILE_WIDTH + TILE_AGE);
// Indexes in overlapped region left of the block
#pragma unroll
for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X;
int globX = globalBlockStart + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
// Locations inside the block
int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X);
int globX = globalBlockStart + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X));
int globX = globalBlockStart + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
__syncthreads();
/**
* Global Memory:
*
* Block 0 Block 1 Block 2 Block 3 Block 4
* | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ |
*
* If we're block 2, we need:
*
* Block 0 Block 1 Block 2 Block 3 Block 4
* | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ |
* | this |
*
* And for a tile age of AGE we also need:
*
* Block 0 Block 1 Block 2 Block 3 Block 4
* | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ |
* | this | | this |
*
* So what we end up with is
*
* Block 0 Block 1 Block 2 Block 3 Block 4
* | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ | _ _ _ _ |
* | AGE | TLSIZE | AGE |
*
* TILE_AGE + TILE_SIZE + TILE_AGE
*/
// Read the block data itself into shared memory, this will always coalesce nicely
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
shared[0][sharedX[x]] = data.elements[globalX[x]];
}
// Read the left overlapped data into shared memory
#pragma unroll
for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) {
// Left hand side data
int globX = globalX[x];
if (globX >= globalBlockReadStart && globX <= globalBlockReadEnd) {
shared[0][sharedX[x]] = data.elements[globX];
}
}
// Read the right overlapped data into shared memory
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) {
// Left hand side data
int globX = globalX[x];
if (globX >= globalBlockReadStart && globX <= globalBlockReadEnd) {
shared[0][sharedX[x]] = data.elements[globX];
}
}
/*
* Calculate Values
*/
#pragma unroll
for (int t = 1; t <= TILE_AGE; t++) {
int tmp = tCurr;
tCurr = tPrev;
tPrev = tmp;
__syncthreads();
int iterationCalculateStart = max(globalBlockStart - TILE_AGE + t - 1, 0);
int iterationCalculateEnd = min(globalBlockStart + TILE_WIDTH + TILE_AGE - t, data.width - 1);
// First let's do the block itself, since that always plays nicely
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > iterationCalculateStart && globX < iterationCalculateEnd) {
shared[tCurr][sharX] = (shared[tPrev][sharX] + shared[tPrev][sharX - 1] + shared[tPrev][sharX + 1]) / 3;
} else if (sharX >= 0){
shared[tCurr][sharX] = shared[tPrev][sharX];
}
}
// Now the left overlapped regions
#pragma unroll
for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > iterationCalculateStart && globX < iterationCalculateEnd) {
shared[tCurr][sharX] = (shared[tPrev][sharX - 1] + shared[tPrev][sharX] + shared[tPrev][sharX + 1]) / 3;
} else if (sharX >= 0){
shared[tCurr][sharX] = shared[tPrev][sharX];
}
}
// And the right overlapped regions
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > iterationCalculateStart && globX < iterationCalculateEnd) {
shared[tCurr][sharX] = (shared[tPrev][sharX - 1] + shared[tPrev][sharX] + shared[tPrev][sharX + 1]) / 3;
} else if (sharX >= 0){
shared[tCurr][sharX] = shared[tPrev][sharX];
}
}
}
__syncthreads();
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
result.elements[globalX[x]] = shared[tCurr][sharedX[x]];
}
}
__global__ void jacobi2d(Matrix data, Matrix result) {
int threadRow = threadIdx.y;
int threadCol = threadIdx.x;
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Indexes so we don't have to recompute them.
int globalIndex[PER_THREAD_COMBINED_ITERATIONS_Y][PER_THREAD_COMBINED_ITERATIONS_X];
int globalX[PER_THREAD_COMBINED_ITERATIONS_X];
int globalY[PER_THREAD_COMBINED_ITERATIONS_Y];
int sharedX[PER_THREAD_COMBINED_ITERATIONS_X];
int sharedY[PER_THREAD_COMBINED_ITERATIONS_Y];
// Shared and local data arrays
__shared__ float shared[2][TILE_AGE + TILE_HEIGHT + TILE_AGE][TILE_AGE + TILE_WIDTH + TILE_AGE];
int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1;
int sharedYMax = TILE_AGE + TILE_HEIGHT + TILE_AGE - 1;
int tCurr = 0;
int tPrev = 1;
// Some useful bits of info
int globalBlockStartX = blockCol * TILE_WIDTH;
int globalBlockStartY = blockRow * TILE_HEIGHT;
// Use >= comparison
int globalBlockReadStartX = max(0, globalBlockStartX - TILE_AGE);
int globalBlockReadStartY = max(0, globalBlockStartY - TILE_AGE);
// Use <= comparison
int globalBlockReadEndX = min(data.width - 1, globalBlockStartX + TILE_WIDTH + TILE_AGE);
int globalBlockReadEndY = min(data.height - 1, globalBlockStartY + TILE_HEIGHT + TILE_AGE);
/*
* Calculate indexes into the global and shared arrays
*/
// X Indexes
// Overlapped region to the left of the block
#pragma unroll
for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X;
int globX = globalBlockStartX + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
// Locations inside the block
int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X);
int globX = globalBlockStartX + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X));
int globX = globalBlockStartX + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
// Y Indexes
// Overlapped region below block
#pragma unroll
for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) {
// Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE
int sharY = TILE_AGE + threadRow - (PER_THREAD_OVERLAPPED_COUNT_Y - y) * BLOCK_DIM_Y;
int globY = globalBlockStartY + sharY - TILE_AGE;
if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) {
sharedY[y] = -1;
globalY[y] = -1;
} else {
sharedY[y] = sharY;
globalY[y] = globY;
}
}
// Main block
#pragma unroll
for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) {
int sharY = TILE_AGE + threadRow + BLOCK_DIM_Y * (y - PER_THREAD_OVERLAPPED_COUNT_Y);
int globY = globalBlockStartY + sharY - TILE_AGE;
if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) {
sharedY[y] = -1;
globalY[y] = -1;
} else {
sharedY[y] = sharY;
globalY[y] = globY;
}
}
// Above block
#pragma unroll
for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) {
int sharY = TILE_AGE + TILE_HEIGHT + threadRow + BLOCK_DIM_Y * (y - (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y));
int globY = globalBlockStartY + sharY - TILE_AGE;
if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) {
sharedY[y] = -1;
globalY[y] = -1;
} else {
sharedY[y] = sharY;
globalY[y] = globY;
}
}
// Global absolute index
#pragma unroll
for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) {
globalIndex[y][x] = globalX[x] + globalY[y] * data.width;
}
}
/*
* Copy into shared memory
*/
// TODO: Break into main block and overlapped regions blocks so the main block can at least be coalesced
#pragma unroll
for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) {
/*
* We want to be doing block-contiguous reads, e.g. for 2x2 block dimension, 2 per thread for x and y
* we want the read pattern to look like:
*
* 11|22
* 11|22
* -----
* 33|44
* 33|44
*
* Optimizing the width for reads is the responsibility of the calling code.
*/
if (globalX[x] >= 0 && globalX[x] < data.width && globalY[y] >= 0 && globalY[y] < data.height) {
shared[0][sharedY[y]][sharedX[x]] = data.elements[globalIndex[y][x]];
}
}
}
/*
* Calculate Values
*/
// TODO Brevity and clarity might be better than this mismatched thing after all
#pragma unroll
for (int t = 1; t <= TILE_AGE; t++) {
int tmp = tCurr;
tCurr = tPrev;
tPrev = tmp;
__syncthreads();
int calculateStartX = max(globalBlockStartX - TILE_AGE + t - 1, 0);
int calculateEndX = min(globalBlockStartX + TILE_WIDTH + TILE_AGE - t, data.width - 1);
int calculateStartY = max(globalBlockStartY - TILE_AGE + t - 1, 0);
int calculateEndY = min(globalBlockStartY + TILE_HEIGHT + TILE_AGE - t, data.height - 1);
#pragma unroll
for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) {
int globY = globalY[y];
int sharY = sharedY[y];
// First the main block since that's nicely laid out
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX &&
globY > calculateStartY && globY < calculateEndY) {
// Calculate new value
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
// Now the left overlapped regions
#pragma unroll
for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) {
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
// And the right overlapped regions
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) {
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
}
// Now the overlapped region below the block
#pragma unroll
for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) {
int globY = globalY[y];
int sharY = sharedY[y];
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) {
// Calculate new value
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
// Now the left and below overlapped region
#pragma unroll
for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) {
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
// And the right and below overlapped region
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) {
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
}
// Overlapped region above the block
#pragma unroll
for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) {
int globY = globalY[y];
int sharY = sharedY[y];
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) {
// Calculate new value
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
// Now the left and below overlapped region
#pragma unroll
for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) {
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
// And the right and below overlapped region
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX && globY > calculateStartY && globY < calculateEndY) {
shared[tCurr][sharY][sharX] =
(
shared[tPrev][sharY][sharX - 1] +
shared[tPrev][sharY][sharX] +
shared[tPrev][sharY][sharX + 1] +
shared[tPrev][sharY - 1][sharX] +
shared[tPrev][sharY + 1][sharX]
) * 0.2f;
} else if (sharX >= 0 && sharY >=0){
shared[tCurr][sharY][sharX] = shared[tPrev][sharY][sharX];
}
}
}
}
__syncthreads();
#pragma unroll
for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) {
int sharY = sharedY[y];
int globY = globalY[y];
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
int sharX = sharedX[x];
int globX = globalX[x];
if (globX >= 0 && globX < data.width && globY >= 0 && globY < data.height) {
result.elements[globalIndex[y][x]] = shared[tCurr][sharY][sharX];
}
}
}
}
__global__ void jacobi3d(Matrix data, Matrix result) {
int threadCol = threadIdx.x;
int threadRow = threadIdx.y;
int threadDep = threadIdx.z;
int blockCol = blockIdx.x;
int blockRow = blockIdx.y;
int blockDep = blockIdx.z;
// Indexes so we don't have to recompute them.
int globalIndex[PER_THREAD_COMBINED_ITERATIONS_Z][PER_THREAD_COMBINED_ITERATIONS_Y][PER_THREAD_COMBINED_ITERATIONS_X];
int globalX[PER_THREAD_COMBINED_ITERATIONS_X];
int globalY[PER_THREAD_COMBINED_ITERATIONS_Y];
int globalZ[PER_THREAD_COMBINED_ITERATIONS_Z];
int sharedX[PER_THREAD_COMBINED_ITERATIONS_X];
int sharedY[PER_THREAD_COMBINED_ITERATIONS_Y];
int sharedZ[PER_THREAD_COMBINED_ITERATIONS_Z];
// Shared and local data arrays
__shared__ float shared[2][TILE_AGE + TILE_DEPTH + TILE_AGE][TILE_AGE + TILE_HEIGHT + TILE_AGE][TILE_AGE + TILE_WIDTH + TILE_AGE];
int sharedXMax = TILE_AGE + TILE_WIDTH + TILE_AGE - 1;
int sharedYMax = TILE_AGE + TILE_HEIGHT + TILE_AGE - 1;
int sharedZMax = TILE_AGE + TILE_DEPTH + TILE_AGE - 1;
int tCurr = 0;
int tPrev = 1;
// Some useful bits of info
int globalBlockStartX = blockCol * TILE_WIDTH;
int globalBlockStartY = blockRow * TILE_HEIGHT;
int globalBlockStartZ = blockDep * TILE_DEPTH;
// Use >= comparison
int globalBlockReadStartX = max(0, globalBlockStartX - TILE_AGE);
int globalBlockReadStartY = max(0, globalBlockStartY - TILE_AGE);
int globalBlockReadStartZ = max(0, globalBlockStartZ - TILE_AGE);
// Use <= comparison
int globalBlockReadEndX = min(data.width - 1, globalBlockStartX + TILE_WIDTH + TILE_AGE);
int globalBlockReadEndY = min(data.height - 1, globalBlockStartY + TILE_HEIGHT + TILE_AGE);
int globalBlockReadEndZ = min(data.depth - 1, globalBlockStartZ + TILE_DEPTH + TILE_AGE);
/*
* Calculate indexes into the global and shared arrays
*/
// Overlapped region to the left of the block
#pragma unroll
for (int x = 0; x < PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int sharX = TILE_AGE + threadCol - (PER_THREAD_OVERLAPPED_COUNT_X - x) * BLOCK_DIM_X;
int globX = globalBlockStartX + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
// Locations inside the block
int sharX = TILE_AGE + threadCol + BLOCK_DIM_X * (x - PER_THREAD_OVERLAPPED_COUNT_X);
int globX = globalBlockStartX + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X + PER_THREAD_OVERLAPPED_COUNT_X; x++) {
int sharX = TILE_AGE + TILE_WIDTH + threadCol + BLOCK_DIM_X * (x - (PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X));
int globX = globalBlockStartX + sharX - TILE_AGE;
if (sharX < 0 || sharX > sharedXMax || globX < 0 || globX > data.width - 1) {
sharedX[x] = -1;
globalX[x] = -1;
} else {
sharedX[x] = sharX;
globalX[x] = globX;
}
}
// Y Indexes
// Overlapped region below block
#pragma unroll
for (int y = 0; y < PER_THREAD_OVERLAPPED_COUNT_Y; y++) {
// Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE
int sharY = TILE_AGE + threadRow - (PER_THREAD_OVERLAPPED_COUNT_Y - y) * BLOCK_DIM_Y;
int globY = globalBlockStartY + sharY - TILE_AGE;
if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) {
sharedY[y] = -1;
globalY[y] = -1;
} else {
sharedY[y] = sharY;
globalY[y] = globY;
}
}
// Main block
#pragma unroll
for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) {
int sharY = TILE_AGE + threadRow + BLOCK_DIM_Y * (y - PER_THREAD_OVERLAPPED_COUNT_Y);
int globY = globalBlockStartY + sharY - TILE_AGE;
if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) {
sharedY[y] = -1;
globalY[y] = -1;
} else {
sharedY[y] = sharY;
globalY[y] = globY;
}
}
// Above block
#pragma unroll
for (int y = PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y + PER_THREAD_OVERLAPPED_COUNT_Y; y++) {
int sharY = TILE_AGE + TILE_HEIGHT + threadRow + BLOCK_DIM_Y * (y - (PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y));
int globY = globalBlockStartY + sharY - TILE_AGE;
if (sharY < 0 || sharY > sharedYMax || globY < 0 || globY > data.height - 1) {
sharedY[y] = -1;
globalY[y] = -1;
} else {
sharedY[y] = sharY;
globalY[y] = globY;
}
}
// Z Indexes
// Overlapped region in front of block
#pragma unroll
for (int z = 0; z < PER_THREAD_OVERLAPPED_COUNT_Z; z++) {
// Offset by TILE_AGE to make sure it's within the range since we're going back by TILE_AGE
int sharZ = TILE_AGE + threadDep - (PER_THREAD_OVERLAPPED_COUNT_Z - z) * BLOCK_DIM_Z;
// Remove the offset for the global index
int globZ = globalBlockStartZ + sharZ - TILE_AGE;
if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) {
sharedZ[z] = -1;
globalZ[z] = -1;
} else {
sharedZ[z] = sharZ;
globalZ[z] = globZ;
}
}
// Main block
#pragma unroll
for (int z = PER_THREAD_OVERLAPPED_COUNT_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z++) {
int sharZ = TILE_AGE + threadDep + BLOCK_DIM_Z * (z - PER_THREAD_OVERLAPPED_COUNT_Z);
int globZ = globalBlockStartZ + sharZ - TILE_AGE;
if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) {
sharedZ[z] = -1;
globalZ[z] = -1;
} else {
sharedZ[z] = sharZ;
globalZ[z] = globZ;
}
}
// Overlapped region behind block
#pragma unroll
for (int z = PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z + PER_THREAD_OVERLAPPED_COUNT_Z; z++) {
int sharZ = TILE_AGE + TILE_DEPTH + threadDep + BLOCK_DIM_Z * (z - (PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z));
int globZ = globalBlockStartZ + sharZ - TILE_AGE;
if (sharZ < 0 || sharZ > sharedZMax || globZ < 0 || globZ > data.depth - 1) {
sharedZ[z] = -1;
globalZ[z] = -1;
} else {
sharedZ[z] = sharZ;
globalZ[z] = globZ;
}
}
// Global absolute index
#pragma unroll
for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) {
int zTemp = globalZ[z] * data.width * data.height;
#pragma unroll
for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) {
int yTemp = globalY[y] * data.width;
#pragma unroll
for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) {
globalIndex[z][y][x] = globalX[x] + yTemp + zTemp;
}
}
}
/*
* Copy into shared memory
*/
#pragma unroll
for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) {
#pragma unroll
for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) {
#pragma unroll
for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) {
if (globalX[x] >= 0 && globalX[x] < data.width &&
globalY[y] >= 0 && globalY[y] < data.height &&
globalZ[z] >= 0 && globalZ[z] < data.depth) {
shared[0][sharedZ[z]][sharedY[y]][sharedX[x]] = data.elements[globalIndex[z][y][x]];
}
}
}
}
#pragma unroll
for (int t = 1; t <= TILE_AGE; t++) {
int tmp = tCurr;
tCurr = tPrev;
tPrev = tmp;
__syncthreads();
int calculateStartX = max(globalBlockStartX - TILE_AGE + t - 1, 0);
int calculateEndX = min(globalBlockStartX + TILE_WIDTH + TILE_AGE - t, data.width - 1);
int calculateStartY = max(globalBlockStartY - TILE_AGE + t - 1, 0);
int calculateEndY = min(globalBlockStartY + TILE_HEIGHT + TILE_AGE - t, data.height - 1);
int calculateStartZ = max(globalBlockStartZ - TILE_AGE + t - 1, 0);
int calculateEndZ = min(globalBlockStartZ + TILE_DEPTH + TILE_AGE - t, data.depth - 1);
#pragma unroll
for (int z = 0; z < PER_THREAD_COMBINED_ITERATIONS_Z; z++) {
int globZ = globalZ[z];
int sharZ = sharedZ[z];
#pragma unroll
for (int y = 0; y < PER_THREAD_COMBINED_ITERATIONS_Y; y++) {
int globY = globalY[y];
int sharY = sharedY[y];
#pragma unroll
for (int x = 0; x < PER_THREAD_COMBINED_ITERATIONS_X; x++) {
int globX = globalX[x];
int sharX = sharedX[x];
if (globX > calculateStartX && globX < calculateEndX &&
globY > calculateStartY && globY < calculateEndY &&
globZ > calculateStartZ && globZ < calculateEndZ) {
shared[tCurr][sharZ][sharY][sharX] =
(
shared[tPrev][sharZ][sharY][sharX] +
shared[tPrev][sharZ][sharY][sharX - 1] +
shared[tPrev][sharZ][sharY][sharX + 1] +
shared[tPrev][sharZ][sharY - 1][sharX] +
shared[tPrev][sharZ][sharY + 1][sharX] +
shared[tPrev][sharZ - 1][sharY][sharX] +
shared[tPrev][sharZ + 1][sharY][sharX]
) / 7;
} else if (sharX >= 0 && sharY >= 0 && sharZ >= 0) {
shared[tCurr][sharZ][sharY][sharX] = shared[tPrev][sharZ][sharY][sharX];
}
}
}
}
}
__syncthreads();
#pragma unroll
for (int z = PER_THREAD_OVERLAPPED_COUNT_Z; z < PER_THREAD_OVERLAPPED_COUNT_Z + PER_THREAD_Z; z++) {
int sharZ = sharedZ[z];
int globZ = globalZ[z];
#pragma unroll
for (int y = PER_THREAD_OVERLAPPED_COUNT_Y; y < PER_THREAD_OVERLAPPED_COUNT_Y + PER_THREAD_Y; y++) {
int sharY = sharedY[y];
int globY = globalY[y];
#pragma unroll
for (int x = PER_THREAD_OVERLAPPED_COUNT_X; x < PER_THREAD_OVERLAPPED_COUNT_X + PER_THREAD_X; x++) {
int sharX = sharedX[x];
int globX = globalX[x];
if (globX >= 0 && globY >= 0 && globZ >= 0) {
result.elements[globalIndex[z][y][x]] = shared[tCurr][sharZ][sharY][sharX];
}
}
}
}
}
/********************
* END CUDA KERNELS *
********************/
Matrix initialize_device(Matrix A, bool copyToDevice) {
Matrix deviceA;
deviceA.width = A.width;
deviceA.height = A.height;
deviceA.depth = A.depth;
deviceA.dimensions = A.dimensions;
size_t sizeA = A.width * A.height * A.depth * sizeof(float);
HANDLE_ERROR(cudaMalloc((void **) &deviceA.elements, sizeA));
if (copyToDevice) {
HANDLE_ERROR(cudaMemcpy(deviceA.elements, A.elements, sizeA, cudaMemcpyHostToDevice));
}
return deviceA;
}
void callKernel(Args args, Matrix A, Matrix B) {
Matrix deviceA, deviceB;
deviceA = initialize_device(A, true);
deviceB = initialize_device(B, false);
if (args.dimensions == 1) {
dim3 blocks(max(args.size / TILE_WIDTH, 1));
dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1));
for (int t = 0; t < args.iterations / TILE_AGE; t++) {
jacobi1d<<<blocks, threads>>>(deviceA, deviceB);
// checkCUDAError("jacobi1d", true);
swap(deviceA, deviceB);
}
} else if (args.dimensions == 2) {
dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1));
dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1), max(TILE_HEIGHT / PER_THREAD_Y, 1));
for (int t = 0; t < args.iterations / TILE_AGE; t++) {
jacobi2d<<<blocks, threads>>>(deviceA, deviceB);
// checkCUDAError("jacobi2d", true);
swap(deviceA, deviceB);
}
} else {
dim3 blocks(max(args.size / TILE_WIDTH, 1), max(args.size / TILE_HEIGHT, 1), max(args.size / TILE_DEPTH, 1));
dim3 threads(max(TILE_WIDTH / PER_THREAD_X, 1), max(TILE_HEIGHT / PER_THREAD_Y, 1), max(TILE_DEPTH / PER_THREAD_Z, 1));
for (int t = 0; t < args.iterations / TILE_AGE; t++) {
jacobi3d<<<blocks, threads>>>(deviceA, deviceB);
// checkCUDAError("jacobi3d", true);
swap(deviceA, deviceB);
}
}
HANDLE_ERROR(cudaMemcpy(B.elements, deviceA.elements, A.width * A.height * A.depth * sizeof(float), cudaMemcpyDeviceToHost));
}
// Data output
void print_data(float *data, int size, int dimensions) {
// if (size > 32) {
// cerr << "Data too big to print\n" << endl;
// return;
// }
if (dimensions == 1) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[x]);
}
} else if (dimensions == 2) {
for (int y = 0; y < size; y++) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[y * size + x]);
}
cout << endl;
}
} else if (dimensions == 3) {
for (int z = 0; z < size; z++) {
for (int y = 0; y < size; y++) {
for (int x = 0; x < size; x++) {
printf("%.3f ", data[z * size * size + y * size + x]);
}
cout << endl;
}
cout << endl;
}
}
cout << endl << endl;
}
// Main
int main(int argc, char *argv[]) {
Args args = parse_arguments(argc, argv);
Matrix A, B;
A = initialize_matrix(args.dimensions, args.size, args.size, args.size);
B = initialize_matrix(args.dimensions, args.size, args.size, args.size);
float runtime;
struct timeval start, end;
gettimeofday(&start, NULL);
callKernel(args, A, B);
gettimeofday(&end, NULL);
runtime = ((end.tv_sec - start.tv_sec) * 1000.0) + ((end.tv_usec - start.tv_usec) / 1000.0);
printf("Processing Time: %4.4f milliseconds\n", runtime);
if (args.debug) { print_data(B.elements, args.size, args.dimensions); }
}
|
19,925 | #include "includes.h"
__global__ void add_reference_points_norm(float * array, int width, int pitch, int height, float * norm){
unsigned int tx = threadIdx.x;
unsigned int ty = threadIdx.y;
unsigned int xIndex = blockIdx.x * blockDim.x + tx;
unsigned int yIndex = blockIdx.y * blockDim.y + ty;
__shared__ float shared_vec[16];
if (tx==0 && yIndex<height)
shared_vec[ty] = norm[yIndex];
__syncthreads();
if (xIndex<width && yIndex<height)
array[yIndex*pitch+xIndex] += shared_vec[ty];
} |
19,926 | #include "includes.h"
constexpr const int SECTION_SIZE = 2048;
constexpr const int MAX_SECTIONS = 1024;
__device__ void brent_kung_scan_(float *X, float *Y, int InputSize) {
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int bdx = blockDim.x;
__shared__ float XY[SECTION_SIZE];
int i = 2 * bx * bdx + tx;
if (i < InputSize)
XY[tx] = X[i];
if (i + bdx < InputSize)
XY[tx + bdx] = X[i + bdx];
for (unsigned int stride = 1; stride <= bdx; stride *= 2) {
__syncthreads();
int index = (tx + 1) * 2 * stride - 1;
if (index < SECTION_SIZE) {
XY[index] += XY[index - stride];
}
}
for (int stride = SECTION_SIZE / 4; stride > 0; stride /= 2) {
__syncthreads();
int index = (tx + 1) * stride * 2 - 1;
if (index + stride < SECTION_SIZE) {
XY[index + stride] += XY[index];
}
}
__syncthreads();
if (i < InputSize)
Y[i] = XY[tx];
if (i + bdx < InputSize)
Y[i + bdx] = XY[tx + bdx];
}
__global__ void brent_kung_scan_kernel(float *X, float *Y, int InputSize) {
brent_kung_scan_(X, Y, InputSize);
} |
19,927 | #include <ctime>
#include <stdio.h>
__global__ void print_3d(int *vector) {
int threads_per_block = blockDim.x * blockDim.y * blockDim.z;
int index = threadIdx.x + (threadIdx.y * (blockDim.z * blockDim.x)) +
(threadIdx.z * blockDim.z) + (blockIdx.x * threads_per_block) +
(blockIdx.z * gridDim.x * threads_per_block) +
(blockIdx.y * gridDim.z * gridDim.x * threads_per_block);
printf("index: %d value: %d\n", index, vector[index]);
}
// ==================================================
int main() {
printf(" starts ... \n");
int size = 64;
int byte_size = size * sizeof(int);
int *h_input;
h_input = (int *)malloc(byte_size);
for (int i = 0; i < size; i++) {
h_input[i] = rand() % 1000;
}
int *d_input;
cudaMalloc((void **)&d_input, byte_size);
cudaMemcpy(d_input, h_input, byte_size, cudaMemcpyHostToDevice);
int nx, ny, nz;
nx = 4;
ny = 4;
nz = 4;
dim3 block(2, 2, 2);
dim3 grid(nx / block.x, ny / block.y, nz / block.z);
print_3d<<<grid, block>>>(d_input);
cudaDeviceSynchronize();
cudaDeviceReset();
printf(" finished. \n");
return 0;
} |
19,928 | #include<stdio.h>
#define CHECK_FOR_CORRECTNESS 1
#define MIN(a,b) (( (a) < (b) )?(a):(b))
#define GE 1
#define GI 2
/* Following section contains Kernel functions used by prefix sum */
/* Kernel Function1 - Initialize the array */
__global__ void initializeArray(int* A, int* B, int N)
{
int i = threadIdx.x;
if(i<N)
B[i] = A[i];
}
/* Kernel Function2 - PrefixOperations on B */
__global__ void prefixOnB(int* B, int t, int s)
{
int i = threadIdx.x;
B[t + i] = MIN(B[s + 2*i - 1] , B[s + 2*i]);
}
/* kernel Function3 - PrefixOperations on C */
__global__ void prefixOnC(int* B, int* C,int t, int s)
{
int i = threadIdx.x;
if (1 == i)
C[t + i] = B[t + i];
else if((i%2) == 0)
{
C[t + i] = C[s + (i>>1)];
}
else {
C[t + i] = MIN(C[s +((i-1)>>1)] , B[t + i]);
}
}
/* Kernel Function4 - Copy the results */
__global__ void copyArray(int* S, int* C, int N)
{
int i = threadIdx.x;
S[i] = C[i];
//printf("Setting S[%d] = %d , from C[%d] = %d\n", i, S[i], i, C[i]);
}
/* Just a somple function to get log to base 2*/
int log2(int x)
{
int k = 0;
while(x>>=1) k++;
return k;
}
/* Compute prefix sum of A into B
* @param N - size of array A
* @param d_A - Initial device(CUDA)-array over which prefixSum should be calculated
* @param d_S - device(CUDA)-array into which prefix Sum has to be calculated
*/
void computePrefixSum(int * d_A, int* d_S, int N)
{
int * d_B, *d_C;
size_t arrSize = N*sizeof(int);
cudaMalloc(&d_B, 2*arrSize);
cudaMalloc(&d_C, 2*arrSize);
/* First call to Kernel Function to Initialize B */
int threadsPerBlock = N;
int blocksPerGrid = 1;
initializeArray<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, N);
/* A few variables required in prefix-computations */
int m = N, t = 0, h=1;
int k = log2(N);
int s = 0;
for(h =1; h<=k; h++)
{
s = t; t += m; m >>=1;
/* Second call to CUDA Kernel Function - This time logN calls. Every call has m parallel instances */
blocksPerGrid = 1;
threadsPerBlock = m;
prefixOnB<<<blocksPerGrid, threadsPerBlock>>>(d_B, t , s);
}
for(h=k;h>=0;h--)
{
blocksPerGrid = 1;
threadsPerBlock = m;
/* Third call to kernel function - Again logN times m of them */
prefixOnC<<<blocksPerGrid, threadsPerBlock>>>(d_B, d_C, t , s);
m<<=1; s= t; t-=m;
}
/* Copy the results from C */
threadsPerBlock = N;
blocksPerGrid = 1;
copyArray<<<blocksPerGrid, threadsPerBlock>>>(d_S, d_C, N);
/* Freeing two temporary device arrays B, C */
cudaFree(d_B);
cudaFree(d_C);
return;
}
/* Set of Kernel Functions used in sequence alignment calculation */
/* Kernel function to initialize d_G0, d_D0, d_I0 */
__global__ void initFirstRow(int *d_D0,int * d_I0, int *d_G0)
{
int i = threadIdx.x;
d_G0[i] = GI + GE*i;
d_D0[i] = GE*(i+1) + GI;
if(0 == i) d_I0[i] = d_G0[i] + GE;
}
/* Kernel Function to update D from previous row */
__global__ void updateD(int* d_D1, int* d_D0, int* d_G0)
{
int j = threadIdx.x;
d_D1[j] = MIN(d_D0[j] , d_G0[j] + GI )+GE;
}
/* Kernel Function to update array-U from currentD and previous G */
__global__ void updateU(int* d_U , int* d_D1, int* d_G0, int i, char* d_X, char* d_Y)
{
int j = threadIdx.x;
if(j!=0)
{
int Sij;
if(d_X[i] == d_Y[j]) Sij = 0;
else Sij = 1;
d_U[j] = MIN(d_D1[j], d_G0[j-1] + Sij);
}
}
/* Kernel Function to update array-V from array-U */
__global__ void updateV(int* d_V, int* d_U)
{
int j = threadIdx.x;
if(j!=0)
{
d_V[j] = d_U[j] + GI - j*GE;
}
}
/* Main function - All of the implementation is in main */
int main()
{
int N;
int blocksPerGrid, threadsPerBlock;
char * X, *Y; /* char arrays in */
char * d_X, *d_Y; /* Global so that everyone can access */
/* Set of rows for matrices D, I, G and arrays U, V */
/* Have two versions R0, R1 for every array and they are used interchangably in every iteration */
int* d_D0, *d_D1, *d_I0, *d_I1, *d_G0, *d_G1, *d_U, *d_V, *d_S;
scanf("%d",&N);
size_t strSize = (N+1)*sizeof(char);
size_t arrSize = N*sizeof(int);
X = (char*) malloc(strSize);
Y = (char*) malloc(strSize);
printf("Going to take input for string with size %d\n", N);
scanf("%s", X);
scanf("%s", Y);
printf("%s\n", X);
printf("%s\n", Y);
/* Declare and Initialize device arrays d_X, d_Y */
cudaMalloc(&d_X, strSize );
cudaMalloc(&d_Y, strSize );
cudaMalloc(&d_D0, arrSize );
cudaMalloc(&d_D1, arrSize );
cudaMalloc(&d_G0, arrSize );
cudaMalloc(&d_G1, arrSize );
cudaMalloc(&d_I0, arrSize );
cudaMalloc(&d_I1, arrSize );
cudaMalloc(&d_U, arrSize );
cudaMalloc(&d_V, arrSize );
cudaMalloc(&d_S, arrSize );
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_X, X, strSize , cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, Y, strSize , cudaMemcpyHostToDevice);
/*Initialize set of rows d_G0, d_I0, d_D0 */
blocksPerGrid = 1;
threadsPerBlock = N;
initFirstRow<<<blocksPerGrid, threadsPerBlock>>>(d_D0, d_I0, d_G0);
/* For rows 1 to N calculate D, G, I from previous rows */
for(int i=1;i<N;i++)
{
if(i%2 == 1) /* Odd rows */
{
updateD<<<blocksPerGrid, threadsPerBlock>>>(d_D1, d_D0, d_G0);
updateU<<<blocksPerGrid, threadsPerBlock>>>(d_U , d_D1, d_G0, i, d_X, d_Y);
updateV<<<blocksPerGrid, threadsPerBlock>>>(d_V , d_U);
computePrefixSum(d_V, d_S, N);
}
else /*Even rows*/
{
updateD<<<blocksPerGrid, threadsPerBlock>>>(d_D0, d_D1, d_G1);
updateU<<<blocksPerGrid, threadsPerBlock>>>(d_U , d_D0, d_G1, i, d_X, d_Y );
updateV<<<blocksPerGrid, threadsPerBlock>>>(d_V , d_U);
computePrefixSum(d_V, d_S, N);
}
}
/*Done with calculations - Free Device memory */
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_G0);
cudaFree(d_G1);
cudaFree(d_I0);
cudaFree(d_I1);
cudaFree(d_D0);
cudaFree(d_D1);
cudaFree(d_V);
cudaFree(d_U);
cudaFree(d_S);
printf("%s\n", X);
printf("%s\n", Y);
/* Free host memory */
free(X);
free(Y);
return 0;
}
|
19,929 |
// https://www.nvidia.com/docs/IO/116711/sc11-cuda-c-basics.pdf
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <iterator>
#include <algorithm>
#include <time.h>
#include <stdlib.h>
#include <stdio.h>
cudaError_t addWithCuda(int *c, int *a, int *b, unsigned int size);
// __global__ se usa para declarar la funcion que va a correr en la placa de video
__global__ void addKernel(int *c, int *a, int *b, int n)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index] + b[index];
}
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
// esta funcion corre en el cpu y orquesta a la gpu
int main()
{
// copias en host de las variables,
// notese que no tienen dev_ delante
unsigned int arraySize = N * sizeof(int);
int *a,*b,*c;
srand(time(NULL));
// Array no populado
a = (int *)malloc(arraySize);
// Array no populado
b = (int *)malloc(arraySize);
c = (int *)malloc(arraySize);
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
using namespace std;
copy(c,
c + sizeof(c) / sizeof(c[0]),
ostream_iterator<short>(cout, "\n"));
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, int *a, int *b, unsigned int size)
{
//inicializa vectores de datos de entrada y salida
// dev = device
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
//Todas las operaciones te devuelven un status y siempre lo comparamos para encontrar errores
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
// Nunca se deberia cambiar a menos que halla SLI
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
// Asignacion de espacio en memoria paara los vectores, pero en la VRAM
// Vector resultado
cudaStatus = cudaMalloc((void**)&dev_c, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Vector sum1
cudaStatus = cudaMalloc((void**)&dev_a, size);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Vector sum2
cudaStatus = cudaMalloc((void**)&dev_b, size );
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Luego de asignar el espacio en la VRAM
// Se copia de la ram del CPU a la VRAM de la GPU los datos
//Basicamente lo movemos del mother a la placa de video
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
// Esto es lo que inicia la funcion que se distribuye en los nucleos
// Triple angle brackets mark a call from host code to device code
addKernel<<<N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>> (dev_c, dev_a, dev_b, N);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
// Esto es similar a un thread.Join();
// espera a que termine el hilo antes de continuar con el resto del codigo
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
// Se realiza el movimiento de memoria desde GPU a CPU
// Lo mismo de antes pero el camino inverso
cudaStatus = cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
// Siempre de todos los siempres hay que limpiar la memoria de la placa de video
// Actualmente yo tengo 8gb, estaria bueno nunca sobrepasar los 2gb o 4gb
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
19,930 | #include <stdio.h>
#define SIZE 1024
__global__ void VectorAdd(int *a, int *b, int *c, int n) // __global__ Լ GPU ˷
{
int i = threadIdx.x; // read only variable
if (i < n)
c[i] = a[i] * b[i];
//int i; // for ۼϸ ̷ ȴ.
//for (i = 0; i < n; ++i)
// c[i] = a[i] + b[i];
}
int main()
{
int *a, *b, *c;
cudaMallocManaged(&a, SIZE * sizeof(int)); // cuda Ҵϴ
cudaMallocManaged(&b, SIZE * sizeof(int));
cudaMallocManaged(&c, SIZE * sizeof(int));
for (int i = 0; i < SIZE; i++) // Ʈ CPU ̿ؼ ʱȭ Ѵ. ( )
{
a[i] = i;
b[i] = i;
c[i] = 0;
}
VectorAdd <<<1, SIZE>>> (a, b, c, SIZE);
cudaDeviceSynchronize();
int count;
cudaGetDeviceCount(&count);
printf("The number of GPU devices is %d\n", count);
cudaDeviceProp prop;
for (int i = 0; i < count; i++)
{
cudaGetDeviceProperties(&prop, i);
printf(" --- General Information for device %d ---\n", i);
printf(" Name : %s\n", prop.name);
printf(" Compute capability: %d.%d\n", prop.major, prop.minor);
printf(" Clock rate : %d \n", prop.clockRate);
printf(" Total global memory : %ld MB\n", prop.totalGlobalMem/(1024*1024));
printf(" Multiprocessor count : %d\n", prop.multiProcessorCount);
}
printf("\n\n");
printf("CUDA Matrix addition example\n");
for (int i = 0; i < 10; ++i)
printf(" c[%d] = %d\n", i, c[i]);
cudaFree(a); // free ſ cudaFree GPU Ҵ
cudaFree(b);
cudaFree(c);
return 0;
} |
19,931 | #include <cuda.h>
#include <stdio.h>
#include <string.h>
char* concat(const char *s1, const char *s2)
{
char *result = (char*)malloc(strlen(s1) + strlen(s2) + 1); // +1 for the null-terminator
// in real code you would check for errors in malloc here
strcpy(result, s1);
strcat(result, s2);
return result;
}
void run(char * name){
char * file_name = concat(name, ".cubin");
int *output;
cudaMalloc((void**)&output, sizeof(int)*128);
CUmodule module;
CUfunction kernel;
cuModuleLoad(&module, file_name);
cuModuleGetFunction(&kernel, module, "kern");
void * args[1] = {&output};
cuLaunchKernel(kernel, 1, 1, 1,
32, 1, 1,
32*1024, 0, args, 0);
int *output_h = (int*)malloc(sizeof(int)*32);
cudaMemcpy(output_h, output, sizeof(int)*32, cudaMemcpyDeviceToHost);
printf("%s took %d clocks.\n", name, output_h[0]);
printf("Each instruction takes %.2f clocks.\n\n", (float)output_h[0]/(128.0*128.0));
cudaFree(output);
free(output_h);
}
int main(){
run("lds32");
return 0;
}
|
19,932 | /*
* CPSC 4210
* - High Performance Parallel Computing
*
* Name: Austin Kothig
* ID: 001182645
* Sem: Spring 2018
*
* Purpose:
*
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <unistd.h>
#include <getopt.h>
#include <iostream>
/* Enable / Disable debugging */
#define debug 0
/* Block Size specification */
#define BLOCK 16
/* For running all Matrix Matrix Multiplication Tests */
void RunAllTests (int n);
/* Helper Function Prototypes */
float randomize (int *seed);
void clear (int n, float *X);
void stats (char* desc, int n, double *T, double *R);
void help ( );
void getGPUStats (cudaDeviceProp& prop);
int validate (int n, float *S, float *X);
/* Matrix Multiplication Prototypes*/
void global_cuda (int n, float *A, float *B, float *C);
void shared_cuda (int n, float *A, float *B, float *C);
/* kernel Function Implementation */
__global__
void global_cuda_kernel(int n, float* A, float* B, float* C) {
//-- get current position to be calculated
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
const unsigned int ROW = blockIdx.y * blockDim.y + ty;
const unsigned int COL = blockIdx.x * blockDim.x + tx;
float sum = 0.f;
//-- make sure we are valid
if (ROW < n && COL < n) {
//-- compute the element in block
for (int i = 0; i < n; i++) {
sum += B[ROW*n + i] * C[i*n + COL];
}
}
//-- write sum to device memory
A[ROW*n + COL] = sum;
}
__global__
void shared_cuda_kernel(int n, float* A, float* B, float* C) {
//-- get current position to be calculated
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
const unsigned int ROW = blockIdx.y * blockDim.y + ty;
const unsigned int COL = blockIdx.x * blockDim.x + tx;
const unsigned int grid = gridDim.y;
//-- allocate shared memory on the device
__shared__ float d_b[BLOCK][BLOCK], d_c[BLOCK][BLOCK];
//-- check that we are in range
if (ROW < n && COL < n) {
float sum = 0.f;
//-- scan through the elements of the grid
for (int i = 0; i < grid; i++) {
//-- load the block from device memory to shared memory
d_b[ty][tx] = B[ROW*n + i*BLOCK + tx];
d_c[ty][tx] = C[COL+n*(i*BLOCK + ty)];
//-- wait for all threads to load device memory
//-- into shared memory before continuing.
__syncthreads();
//-- multiply the shared memories together
for (int j = 0; j < BLOCK; j++) {
sum += d_b[ty][j] * d_c[j][tx];
}
//-- wait for all calculations to finish
__syncthreads();
}
//-- write to device memory
A[ROW*n + COL] = sum;
}
}
#if debug
/* Used to build a validation Matrix */
void optim_serial (int n, float *A, float *B, float *C);
/* Variables for error checking */
int ErrorCount = 0;
float *s;
#endif
/* Global Variables */
cudaEvent_t time_begin;
cudaEvent_t time_stop;
double avgTime_Global; double avgRate_Global;
double avgTime_Shared; double avgRate_Shared;
//--
//-- Main
//--
int main (int argc, char *argv[]) {
//--
//-- @@@ SH Note 1b:
//-- These values need to be read in from command line.
int n = -1;
//-- loop through arguments
int opt;
while ((opt = getopt(argc, argv, "hn:")) != -1) {
switch (opt) {
case 'h': help(); exit(0); break;
case 'n': n = atoi(optarg); break;
default :
printf("wrong argument\n");
exit(0); break;
}
}
//-- check to see if we missed any arguments
if (n == -1) {
printf("\n\n./MatMultCUDA: Missing required n!!\n");
help();
return 0;
}
//-- display general information
printf ( "\n" );
printf ( "Dense NxN\n" );
printf ( " CUDA version.\n" );
printf ( "\n" );
printf ( " Matrix multiplication tests.\n" );
#if debug
//--
//-- generate a validation matrix, and give debug stats
//--
printf("n is %d\n", n);
int i, j;
float* b = (float *) malloc (n*n*sizeof (float));
float* c = (float *) malloc (n*n*sizeof (float));
//--
//-- Assign randomly generated values to the input matrices B and C.
//--
int seed = 123456789;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
b[i*n + j] = randomize (&seed);
}
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
c[i*n + j] = randomize (&seed);
}
}
//-- allocate the space for s
s = (float *) malloc (n*n*sizeof (float));
//-- Generate a "Good" Solution
optim_serial (n, s, b, c);
printf("\n\nFinished Generating Solution Mat.\n\n");
free(b); free(c);
#endif
//-- Display FOPS
unsigned long long ops;
ops = (unsigned long long)n;
ops *= (unsigned long long)n;
ops *= (unsigned long long)n;
ops *= 2;
printf(" Floating point OPS roughly = %llu\n", ops);
//--
//-- @@@ SH Note 1a:
//-- You must read in the dimension of the matrix and the number of threads
//-- from the command line.
//-- cuda initializations
cudaDeviceProp prop;
getGPUStats(prop);
printf ( "\n" );
printf ( " Thread Blocks = %d\n", (((n+BLOCK-1)/BLOCK)*((n+BLOCK-1)/BLOCK))-((n+BLOCK-1)/BLOCK));
printf ( " Threads Per Block %d\n", BLOCK*BLOCK);
avgTime_Global = 0.0; avgRate_Global = 0.0;
avgTime_Shared = 0.0; avgRate_Shared = 0.0;
for (int i = 1; i <= 10; i++) {
printf("\n\n\n\n Beginning Trial %d, of Matrix Size %d\n", i, n);
printf( "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
//-- call the matrix multiplication routines for serial cases
RunAllTests(n);
}
avgTime_Global /= 10.0; avgRate_Global /= 10.0;
avgTime_Shared /= 10.0; avgRate_Shared /= 10.0;
printf("\n\n\n Total Averages for All 10 CUDA Trials \n");
printf( "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^\n");
printf(" Global Time %f\n Global Rate %f\n\n", avgTime_Global, avgRate_Global);
printf(" Shared Time %f\n Shared Rate %f\n\n", avgTime_Shared, avgRate_Shared);
//--
//-- Terminate.
//--
printf("\n");
printf("Dense NxN:\n");
printf(" Normal end of execution.\n" );
#if debug
printf(" Execution Finished with %d Error(s) Found.\n", ErrorCount);
//-- Deallocate the used memory
free(s);
#endif
return 0;
}
//--
//-- Run a series of NxN Matrix Matrix multiplication
//-- using different stratagies
//--
void RunAllTests (int n) {
//--
//-- Variables used in this function
//--
int i; int j; int seed;
double T; double R;
//--
//-- Allocate the storage for matrices.
//--
float *a; float *b; float *c;
a = (float *) malloc (n*n*sizeof (float));
b = (float *) malloc (n*n*sizeof (float));
c = (float *) malloc (n*n*sizeof (float));
//--
//-- Assign randomly generated values to the input matrices B and C.
//--
seed = 123456789;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
b[i*n + j] = randomize (&seed);
}
}
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
c[i*n + j] = randomize (&seed);
}
}
clear(n, a);
//######################################################
//--
//-- Run the Global CUDA Test
//--
//######################################################
//-- create an event
cudaEventCreate(&time_begin);
cudaEventCreate(&time_stop);
//-- run the test
global_cuda(n, a, b, c);
#if debug
//-- Optional Validation
if (validate (n, s, a)) {
printf ("\n\n\n###################################\n\n\n");
printf ("global_cuda is incorrect!!");
printf ("\n\n\n###################################\n\n\n");
ErrorCount++;
}
#endif
//-- Display Stats
char global_cuda_desc[] = "Global CUDA.";
stats(global_cuda_desc, n, &T, &R);
//-- add to averages
avgTime_Global += T;
avgRate_Global += R;
//-- destroy the cuda events
cudaEventDestroy(time_begin);
cudaEventDestroy(time_stop);
//-- Clear out Mat A
clear(n, a);
//######################################################
//--
//-- Run the Shared CUDA Test
//--
//######################################################
//-- create an event
cudaEventCreate(&time_begin);
cudaEventCreate(&time_stop);
//-- run the test
shared_cuda (n, a, b, c);
#if debug
//-- Optional Validation
if (validate (n, s, a)) {
printf ("\n\n\n###################################\n\n\n");
printf ("shared_cuda is incorrect!!");
printf ("\n\n\n###################################\n\n\n");
ErrorCount++;
}
#endif
//-- Display Stats
char shared_cuda_desc[] = "Shared CUDA.";
stats(shared_cuda_desc, n, &T, &R);
avgTime_Shared += T;
avgRate_Shared += R;
//-- destroy the cuda events
cudaEventDestroy(time_begin);
cudaEventDestroy(time_stop);
//-- Clear out Mat A
clear(n, a);
//-- Deallocate the used memory
free(a); free(b); free(c);
return;
}
//--
//-- Get a randomized value, and refresh seed.
//--
float randomize (int *seed) {
int k; float r;
k = *seed / 127773;
*seed = 16807 * ( *seed - k * 127773 ) - k * 2836;
if ( *seed < 0 ) { *seed = *seed + 2147483647; }
r = (float) (*seed) * 4.656612875E-10;
return r;
}
//--
//-- clear out the contents of X
//--
void clear (int n, float *X) {
int i ,j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
X[i*n + j] = 0.f;
}
}
}
//--
//-- compare the passed in matracies to see
//-- if there are any differences between them
//--
int validate (int n, float *S, float *X) {
int i, j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
if (abs(S[i*n + j] - X[i*n + j]) > 0.001) {
std::cout << "\n\n\n\n";
std::cout << "Fail at pos " << i*n << " x " << j << std::endl;
std::cout << S[i*n + j] << " != " << X[i*n + j] << std::endl;
return 1;
}
}
}
return 0;
}
//--
//-- Stats : give the user the stats of this implementation
//--
void stats (char* desc, int n, double *T, double *R) {
unsigned long long ops;
float time;
double rate;
ops = (unsigned long long)n;
ops *= (unsigned long long)n;
ops *= (unsigned long long)n;
ops *= 2;
cudaEventElapsedTime(&time, time_begin, time_stop);
time /= 1000.f;
rate = ( double ) ( ops ) / (time) / 1000000.0;
printf("\n############################################\n");
printf(" Test = %s\n", desc);
printf(" N = %d\n", n);
printf(" Floating point OPS roughly = %llu\n", ops);
printf(" Elapsed time dT = %f\n", time);
printf(" Rate = MegaOPS/dT = %f\n", rate);
(*T) = time;
(*R) = rate;
}
//--
//-- Help : simple function for how to use this program
//--
void help () {
printf("\n");
printf("Usage: ./MatMultCUDA [-h] -n <num> -t <num> \n");
printf("Options:\n");
printf(" -h\t\tPrint this help message.\n");
printf(" -n <num>\tSize of N.\n");
printf("Examples:\n");
printf("linux> ./MatMultCUDA -n 1024\n");
}
//--
//-- getGPUStats : print out general information about the GPU
//--
void getGPUStats (cudaDeviceProp &prop) {
int count;
cudaGetDeviceCount(&count);
for (int i = 0; i < count; i++) {
cudaGetDeviceProperties(&prop, i);
std::cout << "---------------------------------------------------------------" << std::endl;
std::cout << "Name " << prop.name << std::endl;
std::cout << "GPU clock rate " << (double)prop.clockRate / 1024 << " MHz" << std::endl;
std::cout << "Registers Per Block " << prop.regsPerBlock << std::endl;
std::cout << "Compute capability " << prop.major << "." << prop.minor << std::endl;
std::cout << "Total global memory " << (double)prop.totalGlobalMem / (1024*1024) << " MB" << std::endl;
std::cout << "Total constant memory " << (double)prop.totalConstMem / (1024) << " KB" << std::endl;
std::cout << "Shared memory per block " << (double)prop.sharedMemPerBlock / (1024) << " KB" << std::endl;
std::cout << "Maximum threads per block " << prop.maxThreadsPerBlock << std::endl << std::endl;
std::cout << "Maximum threads along X " << prop.maxThreadsDim[0] << std::endl;
std::cout << " Y " << prop.maxThreadsDim[1] << std::endl;
std::cout << " Z " << prop.maxThreadsDim[2] << std::endl << std::endl;
std::cout << "Maximum grid size along X " << prop.maxGridSize[0] << std::endl;
std::cout << " Y " << prop.maxGridSize[1] << std::endl;
std::cout << " Z " << prop.maxGridSize[2] << std::endl << std::endl;
std::cout << "Warp size " << prop.warpSize << std::endl;
std::cout << "Multiprocessor count " << prop.multiProcessorCount << std::endl;
std::cout << "Device overlap " << prop.deviceOverlap << std::endl << std::endl;
std::cout << "Maximum resident threads " << prop.maxThreadsPerMultiProcessor << std::endl
<< " per multi-processor \n";
std::cout << std::endl;
}
}
//--
//-- Implementation of Different NxN Matrix Multiplication
//--
//--
//-- global_cuda : use global memory on GPU to multiply two matracies
//--
void global_cuda (int n, float *A, float *B, float *C) {
//-- initialize variables
float *d_A; float *d_B; float *d_C;
//-- Allocate Memory on the GPU
cudaMalloc(&d_A, n*n*sizeof (float));
cudaMalloc(&d_B, n*n*sizeof (float));
cudaMalloc(&d_C, n*n*sizeof (float));
//-- copy data over to gpu
cudaMemcpy(d_A, A, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, n*n*sizeof(float), cudaMemcpyHostToDevice);
//-- initialize blocks and threads per blocks
dim3 DimBlock(BLOCK, BLOCK);
dim3 DimGrid((n + DimBlock.x - 1) / DimBlock.x,
(n + DimBlock.y - 1) / DimBlock.y);
size_t SharedMemBytes = 128;
//-- recored when the event begin
cudaEventRecord(time_begin);
//-- Start the Kernel
global_cuda_kernel<<<DimGrid,DimBlock,SharedMemBytes>>>(n, d_A, d_B, d_C);
//-- sync the threads
cudaThreadSynchronize();
//-- record when the event ended
cudaEventRecord(time_stop);
//-- sync the events
cudaEventSynchronize(time_stop);
//-- copy the results out of gpu
cudaMemcpy(A, d_A, n*n*sizeof(float), cudaMemcpyDeviceToHost);
//-- Deallocate device Memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//--
//-- shared_cuda : use shared memory on GPU to multiply two matracies
//--
void shared_cuda (int n, float *A, float *B, float *C) {
//-- initialize variables
float *d_A; float *d_B; float *d_C;
//-- Allocate Memory on the GPU
cudaMalloc(&d_A, n*n*sizeof (float));
cudaMalloc(&d_B, n*n*sizeof (float));
cudaMalloc(&d_C, n*n*sizeof (float));
//-- copy data over to gpu
cudaMemcpy(d_A, A, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, n*n*sizeof(float), cudaMemcpyHostToDevice);
//-- initialize blocks and threads per blocks
dim3 DimBlock(BLOCK, BLOCK);
dim3 DimGrid((n + DimBlock.x - 1) / DimBlock.x,
(n + DimBlock.y - 1) / DimBlock.y);
size_t SharedMemBytes = 128;
//-- recored when the event begin
cudaEventRecord(time_begin);
//-- Start the kernel
shared_cuda_kernel<<<DimGrid,DimBlock,SharedMemBytes>>>(n, d_A, d_B, d_C);
//-- sync the threads
cudaThreadSynchronize();
//-- record when the event ended
cudaEventRecord(time_stop);
//-- sync the events
cudaEventSynchronize(time_stop);
//-- copy the results out of gpu
cudaMemcpy(A, d_A, n*n*sizeof(float), cudaMemcpyDeviceToHost);
//-- Deallocate device Memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
#if debug
//--
//-- optim_serial : kij row by row with fixed B.
//--
//-- notes : good cache performance, serial.
//-- used to build a validation matrix.
//--
void optim_serial (int n, float *A, float *B, float *C) {
int i, j, k;
float r;
for (k = 0; k < n; k++) {
for (i = 0; i < n; i++) {
r = B[i*n + k];
for (j = 0; j < n; j++) {
A[i*n + j] += r * C[k*n + j];
}
}
}
}
#endif
|
19,933 | #include "includes.h"
__global__ void HydroComputedUx_CUDA3_kernel(float *FluxD, float *FluxS1, float *FluxS2, float *FluxS3, float *FluxTau, float *dUD, float *dUS1, float *dUS2, float *dUS3, float *dUTau, float dtdx, int size)
{
// get thread and block index
const long tx = threadIdx.x;
const long bx = blockIdx.x;
const long by = blockIdx.y;
int igrid = tx + bx*CUDA_BLOCK_SIZE + by*CUDA_BLOCK_SIZE*CUDA_GRID_SIZE;
if (igrid < 2 || igrid > size - 3)
return;
int igridp1 = igrid + 1;
dUD [igrid] = (FluxD [igrid] - FluxD [igridp1])*dtdx;
dUS1 [igrid] = (FluxS1 [igrid] - FluxS1 [igridp1])*dtdx;
dUS2 [igrid] = (FluxS2 [igrid] - FluxS2 [igridp1])*dtdx;
dUS3 [igrid] = (FluxS3 [igrid] - FluxS3 [igridp1])*dtdx;
dUTau[igrid] = (FluxTau[igrid] - FluxTau[igridp1])*dtdx;
} |
19,934 | // System includes
#include <stdio.h>
#include<time.h>
// CUDA runtime
#include <cuda_runtime.h>
#include<device_launch_parameters.h>
#include<curand.h>
__global__ void addTen(float* d, int count)
{
int threadsPerBlock = blockDim.x * blockDim.y * blockDim.z;
int threadPosInBlock = threadIdx.x +
blockDim.x * threadIdx.y +
blockDim.x * blockDim.y * threadIdx.z;
int blockPosInGrid = blockIdx.x +
gridDim.x * blockIdx.y +
gridDim.x * gridDim.y * blockIdx.z;
int tid = blockPosInGrid * threadsPerBlock + threadPosInBlock;
if(tid < count) {
d[tid] += 10;
}
}
int simple_map()
{
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_MTGP32);
curandSetPseudoRandomGeneratorSeed(gen, time(0));
const int count = 123456;
const int size = count * sizeof(float);
float *d;
float h[count];
cudaMalloc(&d, size);
curandGenerateUniform(gen, d, count);
dim3 block(8, 8, 8);
dim3 grid(16, 16);
addTen<<<grid, block>>>(d, count);
cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
for(int i = 0 ; i < 100 ; i++) {
printf("%f \n", h[i]);
}
return 0;
}
|
19,935 | typedef struct{
int* indices;
float* points;
int* neighbor;
float* k_simplices;
} alpha_complex;
__device__ float calc_sigma(int* indices, float* points)
//circle radius of triangle
{
float d[3];
float s = 0;
for (int i = 0; i<3; i++){
float p1 = points[indices[i]*2] - points[indices[(i+1)%3]*2];
float p2 = points[indices[i]*2+1]-points[indices[(i+1)%3]*2+1];
d[i] = sqrtf(p1*p1+p2*p2);
s += d[i];}
s = s/2;
float area = sqrtf(s*(s-d[0])*(s-d[1])*(s-d[2]));
float circle_r = d[0]*d[1]*d[2]/(4.0*area);
return circle_r;
}
__global__ void create_simplices(alpha_complex* complex){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int indices[3];
int indices2[3];
float *points = complex->points;
float *k_sim = complex->k_simplices;
for (int i = 0; i<3; i++){
indices[i] = complex->indices[idx*3+i];
}
for (int i = 0; i<3; i++){
k_sim[idx*15 + i*5 + 0] = (float)complex->indices[idx*3+i];
k_sim[idx*15 + i*5 + 1] = (float)complex->indices[idx*3+(i+1)%3];
float p1 = points[indices[i]*2] - points[indices[(i+1)%3]*2];
float p2 = points[indices[i]*2+1] - points[indices[(i+1)%3]*2+1];
float sigma = sqrtf(p1*p1+p2*p2);
k_sim[idx*15 + i*5 +2] = sigma;
if(complex->neighbor[idx*3+(i+2)%3] == -1)
//only calc one radius if no neighbor
{
float dist1 = calc_sigma(indices, points);
k_sim[idx*15 + i*5 + 3] = fminf(dist1,sigma);
k_sim[idx*15 + i*5 + 4] = 99999.0;
}
else
//calc radius of nearest neighbor triangles and line distance
{
//todo: set neighbor to -1 to avoid double analysis
for(int j = 0;j<3;j++){
indices2[j] = complex->indices[complex->neighbor[idx*3+(i+2)%3]*3+j];
//weird indexing from scipy delaunay
}
float dist1 = calc_sigma(indices, points);
float dist2 = calc_sigma(indices2, points);
if (fminf(dist1, dist2)<1){
k_sim[idx*15 + i*5 + 3] = sigma;
}
else{
k_sim[idx*15 + i*5 + 3] = fminf(dist1, dist2);
}
k_sim[idx*15 + i*5 + 4] = fmaxf(dist1, dist2);
}
}
} |
19,936 | /*
* mat_prod.cu
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define FALSE 0
#define TRUE 1
#define TxB 4
#define N 4
#define M 4
typedef unsigned char uChar;
const uChar SBOX[256] = {
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
const uChar RCON[256] = {
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d
};
__device__ const uChar CUDASBOX[256] = {
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
__device__ const uChar GM2[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5
};
__device__ const uChar GM3[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a
};
/*
__device__ const uChar GM9[256] = {
0x00, 0x09, 0x12, 0x1b, 0x24, 0x2d, 0x36, 0x3f, 0x48, 0x41, 0x5a, 0x53, 0x6c, 0x65, 0x7e, 0x77,
0x90, 0x99, 0x82, 0x8b, 0xb4, 0xbd, 0xa6, 0xaf, 0xd8, 0xd1, 0xca, 0xc3, 0xfc, 0xf5, 0xee, 0xe7,
0x3b, 0x32, 0x29, 0x20, 0x1f, 0x16, 0x0d, 0x04, 0x73, 0x7a, 0x61, 0x68, 0x57, 0x5e, 0x45, 0x4c,
0xab, 0xa2, 0xb9, 0xb0, 0x8f, 0x86, 0x9d, 0x94, 0xe3, 0xea, 0xf1, 0xf8, 0xc7, 0xce, 0xd5, 0xdc,
0x76, 0x7f, 0x64, 0x6d, 0x52, 0x5b, 0x40, 0x49, 0x3e, 0x37, 0x2c, 0x25, 0x1a, 0x13, 0x08, 0x01,
0xe6, 0xef, 0xf4, 0xfd, 0xc2, 0xcb, 0xd0, 0xd9, 0xae, 0xa7, 0xbc, 0xb5, 0x8a, 0x83, 0x98, 0x91,
0x4d, 0x44, 0x5f, 0x56, 0x69, 0x60, 0x7b, 0x72, 0x05, 0x0c, 0x17, 0x1e, 0x21, 0x28, 0x33, 0x3a,
0xdd, 0xd4, 0xcf, 0xc6, 0xf9, 0xf0, 0xeb, 0xe2, 0x95, 0x9c, 0x87, 0x8e, 0xb1, 0xb8, 0xa3, 0xaa,
0xec, 0xe5, 0xfe, 0xf7, 0xc8, 0xc1, 0xda, 0xd3, 0xa4, 0xad, 0xb6, 0xbf, 0x80, 0x89, 0x92, 0x9b,
0x7c, 0x75, 0x6e, 0x67, 0x58, 0x51, 0x4a, 0x43, 0x34, 0x3d, 0x26, 0x2f, 0x10, 0x19, 0x02, 0x0b,
0xd7, 0xde, 0xc5, 0xcc, 0xf3, 0xfa, 0xe1, 0xe8, 0x9f, 0x96, 0x8d, 0x84, 0xbb, 0xb2, 0xa9, 0xa0,
0x47, 0x4e, 0x55, 0x5c, 0x63, 0x6a, 0x71, 0x78, 0x0f, 0x06, 0x1d, 0x14, 0x2b, 0x22, 0x39, 0x30,
0x9a, 0x93, 0x88, 0x81, 0xbe, 0xb7, 0xac, 0xa5, 0xd2, 0xdb, 0xc0, 0xc9, 0xf6, 0xff, 0xe4, 0xed,
0x0a, 0x03, 0x18, 0x11, 0x2e, 0x27, 0x3c, 0x35, 0x42, 0x4b, 0x50, 0x59, 0x66, 0x6f, 0x74, 0x7d,
0xa1, 0xa8, 0xb3, 0xba, 0x85, 0x8c, 0x97, 0x9e, 0xe9, 0xe0, 0xfb, 0xf2, 0xcd, 0xc4, 0xdf, 0xd6,
0x31, 0x38, 0x23, 0x2a, 0x15, 0x1c, 0x07, 0x0e, 0x79, 0x70, 0x6b, 0x62, 0x5d, 0x54, 0x4f, 0x46
};
*/
uChar state[16];
__device__ int bits = 128;
int rows = 4;
int columns = 4;
uChar* plain_text;
int allTestSuccess;
//file per le statistiche
FILE *data;
long plain_text_size; //dimensione file di input
//----------PARAMETRI AES, QUI ASSEGNATI DI DEFAULT-------
__device__ int tot_rounds = 11;
int pass_lenght; //lunghezza della password
uChar password[16];
uChar expanded_key[176]; //chiave calcolata dallo schedule key
//----------------------FUNZIONI AUSILIARIE---------------
void printStateInline(uChar* matrix) {
for (int i = 0; i < 16; i++) {
printf("%02x", matrix[i]);
}
printf("\n");
}
void printKey(uChar* in, int dim) {
printf("Password: ");
for (int i = 0; i < dim; ++i)
{
printf("%02x ", in[i] );
}
printf("\n" );
}
void printState(uChar* matrix) {
for (int i = 0; i < rows; i++) {
for (int j = 0; j < columns; j++) {
printf("%02x ", matrix[j * M + i]);
}
printf("\n");
}
printf("\n");
}
int getVal(char c)
{
int rtVal = 0;
if (c >= '0' && c <= '9')
{
rtVal = c - '0';
}
else
{
rtVal = c - 'a' + 10;
}
return rtVal;
}
void initStateHex( int state_selected) {
columns = 4;
int k = state_selected;
for (int i = 0; i < columns; i++) {
for (int j = 0; j < rows; j++) {
if (plain_text[k] == NULL) {
state[j * M + i] = 0;
//esci dal for e riempi di zeri
//oppure riempi di zeri da qui, prova se fare null+1 è ancora null
}
else {
state[i * M + j] = getVal(plain_text[k]) * 16 + getVal(plain_text[k + 1]);
}
k += 2;
}
}
}
void readKey(char* file_name) {
FILE *fr;
fr = fopen (file_name, "r");
char c;
for (int i = 0; i < 16; ++i)
{
c = fgetc(fr);
int val = getVal(c) * 16 + getVal(fgetc(fr));
password[i] = val;
}
fclose(fr);
}
//-------------------GESTORE DELLE CHIAVI----------------
void rotate(unsigned char *in) {
unsigned char a;
a = in[0];
for (int i = 0; i < 3; i++)
in[i] = in[i + 1];
in[3] = a;
}
void schedule_core(unsigned char *in, unsigned char i) {
char a;
/* Rotate the input 8 bits to the left */
rotate(in);
/* Apply Rijndael's s-box on all 4 bytes */
for (a = 0; a < 4; a++)
in[a] = SBOX[in[a]];
/* On just the first byte, add 2^i to the byte */
in[0] ^= RCON[i];
}
void expand_key(uChar *in) {
uChar t[4];
/* c is 16 because the first sub-key is the user-supplied key */
int c = 16;
uChar i = 1;
uChar a;
memcpy(expanded_key, in, 16);
/* We need 11 sets of sixteen bytes each for 128-bit mode */
while (c < 176) {
/* Copy the temporary variable over from the last 4-byte
* block */
for (a = 0; a < 4; a++) {
t[a] = expanded_key[a + c - 4];
}
/* Every four blocks (of four bytes),
* do a complex calculation */
if (c % 16 == 0) {
schedule_core(t, i);
i++;
}
for (a = 0; a < 4; a++) {
expanded_key[c] = expanded_key[c - 16] ^ t[a];
c++;
}
}
//printKey(expanded_key, 176);
}
/*
* FUNZIONI KERNEL
*/
//FUNZIONA
__device__ void subBytes(uChar* state) {
int sbox_r, sbox_c;
// indici di riga e colonna
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
sbox_r = (state[Col * M + Row] & 0xf0) >> 4;
sbox_c = state[Col * M + Row] & 0x0f;
state[Col * M + Row] = CUDASBOX[sbox_r * 16 + sbox_c];
}
//FUNZIONA
__device__ void addRoundKey(uChar* bufferGPU, int cur_round, uChar* expanded_keyGPU) {
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
int index = Col * M + Row;
int val = (cur_round * 16) + index;
bufferGPU[(blockIdx.x*cur_round)+index] = bufferGPU[(blockIdx.x*cur_round)+index] ^ expanded_keyGPU[val];
//printf("%2x\n", bufferGPU[(blockIdx.x*stateDIm)+index]);
}
//FUNZIONA
__device__ void gmixColumns(uChar* state) {
int r = threadIdx.y;
int Col = threadIdx.x;
int index = Col * M + r;
uChar new_val;
if (r == 0) {
new_val = GM2[state[0 + Col * 4]] ^ GM3[state[1 + Col * 4]] ^ state[2 + Col * 4] ^ state[3 + Col * 4];
}
if (r == 1) {
new_val = state[0 + Col * 4] ^ GM2[state[1 + Col * 4]] ^ GM3[state[2 + Col * 4]] ^ state[3 + Col * 4];
}
if (r == 2) {
new_val = state[0 + Col * 4] ^ state[1 + Col * 4] ^ GM2[state[2 + Col * 4]] ^ GM3[state[3 + Col * 4]];
}
if (r == 3) {
new_val = GM3[state[0 + Col * 4]] ^ state[1 + Col * 4] ^ state[2 + Col * 4] ^ GM2[state[3 + Col * 4]];
}
__syncthreads();
state[index] = new_val;
}
__device__ void shiftRow(uChar *tmp_state, uChar *state, int row_index) {
for (int i = row_index; i < row_index + 13; i += 4) {
int new_index = i - ((row_index) * 4);
if (new_index < row_index) {
new_index += (16);
}
tmp_state[new_index] = state[i];
}
}
__device__ void shiftRows(uChar *state) {
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
int index = Col * M + Row;
uChar tmp_state[16];
memcpy(tmp_state, state, 16);
if (index < 4) {
shiftRow(tmp_state, state, index);
for (int i = index; i < index + 13; i += 4) {
state[i] = tmp_state[i];
}
}
}
//-----------------------------------------------------------------
__global__ void AESEncryptKernel(int show_all, uChar* stateGPU, uChar* expanded_keyGPU) {
int cur_round = 0;
addRoundKey(stateGPU, cur_round, expanded_keyGPU);
__syncthreads();
if (show_all) {
printf("After addRroundKey\n");
//printState(dev_state);
}
cur_round++;
while (cur_round < (tot_rounds - 1)) {
if (show_all)printf("ROUND N %d\n", cur_round );
subBytes(stateGPU);
__syncthreads();
if (show_all) {
printf("After SubBytes\n");
//printState(dev_state);
}
shiftRows(stateGPU);
__syncthreads();
if (show_all) {
printf("After shiftRows\n");
//printState(dev_state);
}
gmixColumns(stateGPU);
__syncthreads();
if (show_all) {
printf("After mixColumns\n");
//printState(dev_state);
}
addRoundKey(stateGPU, cur_round, expanded_keyGPU);
__syncthreads();
if (show_all) {
printf("After addRroundKey\n");
//printState(dev_state);
}
cur_round++;
}
subBytes(stateGPU);
__syncthreads();
if (show_all) {
printf("After SubBytes\n");
//printState(dev_state);
}
shiftRows(stateGPU);
__syncthreads();
if (show_all) {
printf("After shiftRows\n");
//printState(dev_state);
}
addRoundKey(stateGPU, cur_round, expanded_keyGPU);
__syncthreads();
}
void AES_Encrypt(int show_all, int collect_data)
{
//----------VARIABILI GPU
uChar *stateGPU;
uChar *expanded_keyGPU;
//clock_t start, stop;
int nBytes = N * M * sizeof(uChar);
// set up device
float dev = 0;
cudaSetDevice(dev);
// malloc device memory
cudaMalloc((void**) &stateGPU, nBytes);
cudaMalloc((void**) &expanded_keyGPU, 176);
cudaMemcpy(expanded_keyGPU, expanded_key, 176, cudaMemcpyHostToDevice);
cudaMemcpy(stateGPU, state, nBytes, cudaMemcpyHostToDevice);
dim3 dimBlock(TxB, TxB, 1);
dim3 dimGrid(ceil(N / TxB), ceil(N / TxB), 1);
//int cur_round = 0;
AESEncryptKernel <<< dimGrid, dimBlock>>>(show_all, stateGPU, expanded_keyGPU);
cudaMemcpy(state, stateGPU, nBytes, cudaMemcpyDeviceToHost);
//printStateInline(state);
cudaFree(stateGPU);
}
int checkResult(uChar * state) {
uChar result[] = "\x3a\xd7\x7b\xb4\x0d\x7a\x36\x60\xa8\x9e\xca\xf3\x24\x66\xef\x97";
for (int i = 0; i < 16; ++i)
{
if (state[i] != result[i]) return 0;
}
//uChar *result[33] = "3ad77bb40d7a3660a89ecaf32466ef97";
//return strcmp(state,result);
return 1;
}
uChar* readPlainText(char* file_name) {
FILE *fp;
uChar* buffer;
fp = fopen ( file_name, "rb" );
if ( !fp ) perror(file_name), exit(1);
fseek( fp , 0L , SEEK_END);
plain_text_size = ftell( fp );
rewind( fp );
/* allocate memory for entire content */
buffer = (uChar*)calloc( 1, plain_text_size + 1 );
if ( !buffer ) fclose(fp), fputs("memory alloc fails", stderr), exit(1);
/* copy the file into the buffer */
if ( 1 != fread( buffer , plain_text_size, 1 , fp) )
fclose(fp), free(buffer), fputs("entire read fails", stderr), exit(1);
/* do your work here, buffer is a string contains the whole text */
fclose(fp);
return buffer;
}
int main(int argc, char** argv) {
plain_text = readPlainText("plainText.txt");
int collect_data = FALSE;
int show_all = FALSE;
for (int i = 0; i < argc; i++)
{
if (strcmp(argv[i], "-s") == 0) {
show_all = TRUE;
}
if (strcmp(argv[i], "-c" ) == 0) {
collect_data = TRUE;
//apre o crea il file per le statistiche
data = fopen("scoresGPU.dat", "w");
}
}
readKey("key.txt");
expand_key(password);
//numero di test da eseguire su diverse lunghezze
int vector_dim = 16 * 2;
int num_test = 6;
long *test_sizes = (long*)malloc(num_test * sizeof(long));
test_sizes[0] = vector_dim * 128;
test_sizes[1] = vector_dim * 512;
test_sizes[2] = vector_dim * 1024;
test_sizes[3] = vector_dim * 2048;
test_sizes[4] = vector_dim * 4096;
test_sizes[5] = vector_dim * 8192;
long double *test_res = (long double*)malloc(num_test * sizeof(long double));
for (int cur_test = 0; cur_test < num_test; cur_test++) {
long dim_test = test_sizes[cur_test] / 2;
printf("TEST con dimensione %d\n", dim_test );
allTestSuccess = 1;
clock_t start, stop;
start = clock();
for (int i = 0; i < test_sizes[cur_test] ; i += vector_dim) {
initStateHex(i);
//printStateInline(state);
AES_Encrypt(show_all, collect_data);
allTestSuccess = allTestSuccess && checkResult(state);
//printStateInline(state);
}
stop = clock();
long double elapsed_time = (stop - start) / (double) CLOCKS_PER_SEC;
test_res[cur_test] = elapsed_time;
printf("Risultato test: %d\n", allTestSuccess );
}
char string_data[5000];
for (int i = 0; i < num_test; ++i) {
char temp[100];
sprintf(temp, "%d %f\n", test_sizes[i] / 2, test_res[i]);
strcat(string_data, temp);
}
if (collect_data) fprintf(data, "%s", string_data);
} |
19,937 | #include <stdlib.h>
#include <stdio.h>
__global__ void cudaadd(float* cA, float* cB, float* cC);
const int N = 32;
int main()
{
int deviceN = 0; //Number of CUDA-enabled GPUs (graphics cards)
cudaGetDeviceCount(&deviceN);
if (deviceN == 0) {printf("Error! No cuda-enabled devices found!"); return 1;}
cudaSetDevice(0); //Set 0-th device as active. Previous versions of CUDA didn't alow use of multiple GPU in a single program (except mutlti-thread programs)
// The latest CUDA release has included this featurem but I've never used it, and don't know how it works.
float* A = (float*)malloc(N*sizeof(float)); //GeForce GPU's (like we have here) are supposed to work much faster with single-precision floating point numbers (float type) rather then
//with double type.
float* B = (float*)malloc(N*sizeof(float));
float* C = (float*)malloc(N*sizeof(float));
for (int i=0; i<N; i++) {A[i]=i%5 + i/100.0; B[i]=2.0 + i;} //fill in arrays
float* cA = NULL;
float* cB = NULL;
float* cC = NULL;
cudaMalloc(&cA, N*sizeof(float)); //Allocate memory in GPU.
cudaMalloc(&cB, N*sizeof(float));
cudaMalloc(&cC, N*sizeof(float));
cudaMemcpy(cA, A, N*sizeof(float), cudaMemcpyHostToDevice); //copy arrays A and B to GPU.
cudaMemcpy(cB, B, N*sizeof(float), cudaMemcpyHostToDevice);
// Take care! cA and cB point to address in GPU memory. You cannot directly write there (e.g. like cA[i] = 10.1). You MUST use cudaMemcpy
cudaadd<<<1,N>>>(cA, cB, cC); //call GPU procedure (or "kernel"), using 1 block with N threads in block
cudaMemcpy(C, cC, N*sizeof(float), cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) printf("\nA[%d]=%g, B[%d]=%g, C[%d]=%g, should be %g", i, A[i], i, B[i], i, C[i], A[i]+B[i]);
free(A); free(B); free(C); //Free host arrays
cudaFree(cA); cudaFree(cB); cudaFree(cC); //Free GPU arrays
return 0;
}
__global__ void cudaadd(float* cA, float* cB, float* cC)
{
int n = threadIdx.x;
cC[n] = cA[n]+cB[n];
}
|
19,938 | /*----------------------------------------------------------------------------------*
* Copyright (c) 2010-2018 Pauli Parkkinen, Eelis Solala, Wen-Hua Xu, *
* Sergio Losilla, Elias Toivanen, Jonas Juselius *
* *
* Permission is hereby granted, free of charge, to any person obtaining a copy *
* of this software and associated documentation files (the "Software"), to deal *
* in the Software without restriction, including without limitation the rights *
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell *
* copies of the Software, and to permit persons to whom the Software is *
* furnished to do so, subject to the following conditions: *
* *
* The above copyright notice and this permission notice shall be included in all*
* copies or substantial portions of the Software. *
* *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR *
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, *
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE *
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER *
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, *
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE *
* SOFTWARE. *
*----------------------------------------------------------------------------------*/
#ifdef HAVE_CUDA_PROFILING
#include <stdio.h>
#include "nvToolsExt.h"
#include "cuda_profiler_api.h"
const uint32_t colors[] = { 0x0000ff00, 0x000000ff, 0x00ffff00, 0x00ff00ff, 0x0000ffff, 0x00ff0000, 0x00ffffff };
const int num_colors = sizeof(colors)/sizeof(uint32_t);
#ifdef HAVE_NVTX
#define PUSH_RANGE(name,cid) { \
int color_id = cid; \
color_id = color_id%num_colors;\
nvtxEventAttributes_t eventAttrib = {0}; \
eventAttrib.version = NVTX_VERSION; \
eventAttrib.size = NVTX_EVENT_ATTRIB_STRUCT_SIZE; \
eventAttrib.colorType = NVTX_COLOR_ARGB; \
eventAttrib.color = colors[color_id]; \
eventAttrib.messageType = NVTX_MESSAGE_TYPE_ASCII; \
eventAttrib.message.ascii = name; \
nvtxRangePushEx(&eventAttrib); \
}
#define POP_RANGE nvtxRangePop();
#else
#define PUSH_RANGE(name,cid)
#define POP_RANGE
#endif
/*extern "C" void initialize_cuda_profiler_() {
cudaProfilerInitialize();
}*/
int color_number = 0;
bool cuda_profiler_started = false;
extern "C" void start_cuda_profiling_() {
printf("------------------Starting cuda profiling----------------\n");
int number_of_devices;
cudaGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
cudaSetDevice(device);
cudaProfilerStart();
}
cuda_profiler_started = true;
}
extern "C" void stop_cuda_profiling_() {
printf("------------------Stopping cuda profiling----------------\n");
int number_of_devices;
cudaGetDeviceCount(&number_of_devices);
for (int device = 0; device < number_of_devices; device ++) {
cudaSetDevice(device);
cudaProfilerStop();
}
cuda_profiler_started = false;
}
extern "C" void start_nvtx_timing_(char * title) {
if (cuda_profiler_started) {
PUSH_RANGE(title, color_number);
color_number ++;
}
}
extern "C" void stop_nvtx_timing_() {
if (cuda_profiler_started) {
POP_RANGE;
}
}
#endif
|
19,939 | #include <stdio.h>
#include <cuda.h>
__global__ void TwoDimHeatEq(float *d_A, float *d_B, double s)
{
// 2-dimensional block, 2-dimensional grid
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = (blockId * (blockDim.x * blockDim.y) +
(threadIdx.y * blockDim.x) + threadIdx.x);
int threadAbove = threadId - (blockDim.x * gridDim.x);
int threadBelow = threadId + (blockDim.x * gridDim.x);
int N = gridDim.x * gridDim.y * blockDim.x * blockDim.y;
// Punt if this thread is a boundary point
// I should make this a function in a header file
if ((threadAbove <= 0) || (threadBelow >= N-1) ||
(threadId % (blockDim.x * gridDim.x) == 0) ||
((threadId+1) % (blockDim.x * gridDim.x) == 0))
return;
else
{
//d_B[threadId] = 33.0f;
d_B[threadId] = (s*(d_A[threadId+1] + d_A[threadId-1]
+ d_A[threadAbove] + d_A[threadBelow])
+ (1 - 4*s) * d_A[threadId]);
}
}
int main(int argc, char** argv)
{
const int n = 16;
const int BYTES = n*n * sizeof(float);
float* h_A = new float[n*n];
float* h_B = new float[n*n];
double s = 0.25;
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
h_A[n*i + j] = 0;
h_B[n*i + j] = 0;
if (j==0) h_A[n*i + j] = 1000;
}
}
//declare GPU memory pointers
float *d_A;
float *d_B;
//allocate memory on the device
cudaMalloc((void **) &d_A, BYTES);
cudaMalloc((void **) &d_B, BYTES);
//transfer the array to the GPU
//destination, source, size, method
cudaMemcpy(d_B, h_B, BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_A, h_A, BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, d_A, BYTES, cudaMemcpyDeviceToDevice);
//launch the kernel
for (int i=0; i < 10; i++)
{
TwoDimHeatEq<<<dim3(n/4,n/4),dim3(n/4,n/4)>>>(d_A, d_B, s);
cudaMemcpy(d_A, d_B, BYTES, cudaMemcpyDeviceToDevice);
cudaDeviceSynchronize();
}
//copy the results back onto the device
//destination, source, size, method
cudaMemcpy(h_B, d_B, BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
printf("%-10f \t", h_B[i*n + j]);
}
printf("\n");
}
printf("\n \n");
//free memory previously allocated on the device
cudaFree(d_A);
cudaFree(d_B);
}
|
19,940 | #include "includes.h"
__global__ void kernel_3(float *d_data_in, float *d_data_out, int data_size)
{
__shared__ float s_data[BLKSIZE];
int tid = threadIdx.x;
int index = tid + blockIdx.x*blockDim.x;
s_data[tid] = 0.0;
if (index < data_size){
s_data[tid] = d_data_in[index];
}
__syncthreads();
for (int s = blockDim.x/2; s >= 1; s = s >> 1){
if (tid<s){
s_data[tid] += s_data[tid + s];
}
__syncthreads();
}
if (tid == 0){
d_data_out[blockIdx.x] = s_data[tid];
}
} |
19,941 | #include "includes.h"
__global__ void pnpoly_cnGPU(const float *px, const float *py, const float *vx, const float *vy, char* cs, int npoint, int nvert)
{
__shared__ float tvx[607];
__shared__ float tvy[607];
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < npoint) {
int j, k, c = 0;
for (j = 0, k = nvert-1; j < nvert; k = j++) {
tvx[j] = vx [j];
tvy[j] = vy [j];
if ( ((tvy[j]>py[i]) != (tvy[k]>py[i])) &&
(px[i] < (tvx[k]-tvx[j]) * (py[i]-tvy[j]) / (tvy[k]-tvy[j]) + tvx[j]) )
c = !c;
}
cs[i] = c & 1;
}
__syncthreads();
} |
19,942 | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#include<cuda_runtime.h>
#include<stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <sys/time.h>
__global__ void find_prime(int N,int* a,float* b,int* c)
{
//*p_size = s+1;
//__shared__ cuda_count = 1;
//__shared__ cuda_largest = 2;
int i = blockIdx.x;
//atomicAdd(&cuda_count,i);
//__syncthreads();
//to find a[i] is prime or not
int flag = 0;
for(int j=3;j<=b[i];j=j+2)
{
if(a[i]%j==0)
{
flag = 1;
c[i] = -1;
break;
}
}
}
int* prime_numbers;
float* sqrt_prime;
int *cuda_prime;
float *cuda_sqrt;
int *is_prime;
int *cuda_is_prime;
int main(int argc,char *args[])
{
if(argc!=3)
{
printf("./GPU_Prime -t Problem_Size\n");
return 0;
}
struct timeval time;
int count = 1;
int largest = 2;
int problem_size = atoi(args[2]);
printf("Problem Size %d\n",problem_size);
int no_of_elements = 0;
problem_size = problem_size - 2;//for 1 and 2
if(problem_size%2 == 0)
{
no_of_elements = problem_size/2;
}
else
{
no_of_elements = problem_size/2 + 1;
}
prime_numbers = (int *)malloc(no_of_elements*sizeof(int));
sqrt_prime = (float *)malloc(no_of_elements*sizeof(float));
is_prime = (int *)malloc(no_of_elements*sizeof(int));
int h = 3;
for(int f=0;f<no_of_elements;f++)
{
prime_numbers[f] = h;
sqrt_prime[f] = sqrt(h);
is_prime[f] = 1;
//printf("prime[%d] = %d sqrt[%d] = %f\n",f,prime_numbers[f],f,sqrt_prime[f]);
h = h+2;
}
gettimeofday(&time,NULL);
double t1 = time.tv_sec + (time.tv_usec/1000000.0);
cudaMalloc ( (void**)&cuda_prime, no_of_elements * sizeof (int) );
cudaMalloc ( (void**)&cuda_sqrt, no_of_elements * sizeof (float) );
cudaMalloc ( (void**)&cuda_is_prime, no_of_elements * sizeof (int) );
cudaMemcpy( cuda_prime, prime_numbers, no_of_elements * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy( cuda_sqrt, sqrt_prime, no_of_elements * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy( cuda_is_prime, is_prime, no_of_elements * sizeof(int), cudaMemcpyHostToDevice);
find_prime<<<no_of_elements,1>>>(no_of_elements,cuda_prime,cuda_sqrt,cuda_is_prime);
cudaMemcpy( is_prime, cuda_is_prime , no_of_elements * sizeof(int), cudaMemcpyDeviceToHost);
for(int g=0;g<no_of_elements;g++)
{
//printf("is_prime[%d] = %d number %d\n",g,is_prime[g],prime_numbers[g]);
if(is_prime[g]!=-1)
{
count++;
largest = prime_numbers[g];
}
}
printf("Count %d\n Largest %d\n",count,largest);
gettimeofday(&time,NULL);
double t2 = time.tv_sec + (time.tv_usec / 1000000.0);
printf("Time Taken %f \n",t2-t1);
cudaFree(cuda_prime);
cudaFree(cuda_sqrt);
cudaFree(cuda_is_prime);
}
|
19,943 | /**
* @file compare.cu
* @brief element wise product
* @author HIKARU KONDO
* @date 2021/08/24
*/
#include "element_wise_operator.cuh"
#define BLOCKDIM 256
template<typename T>
__global__ void element_wise_product(T *arrayA, T *arrayB, T *resArray, int size) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size) { return ; }
resArray[idx] = arrayA[idx] * arrayB[idx];
}
template<typename T>
__global__ void element_wise_devide(T *arrayA, T *arrayB, T *resArray, int size) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= size) { return ; }
resArray[idx] = arrayA[idx] / arrayB[idx];
}
void float_element_wise_product(float *arrayA, float *arrayB, float *resArray, int size) {
dim3 blockDim(BLOCKDIM);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
element_wise_product<<< gridDim, blockDim>>> (arrayA, arrayB, resArray, size);
}
void float_element_wise_devide(float *arrayA, float *arrayB, float *resArray, int size) {
dim3 blockDim(BLOCKDIM);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
element_wise_devide<<< gridDim, blockDim >>> (arrayA, arrayB, resArray, size);
}
void double_element_wise_product(double *arrayA, double *arrayB, double *resArray, int size) {
dim3 blockDim(BLOCKDIM);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
element_wise_product<<< gridDim, blockDim>>> (arrayA, arrayB, resArray, size);
}
void double_element_wise_devide(double *arrayA, double *arrayB, double *resArray, int size) {
dim3 blockDim(BLOCKDIM);
dim3 gridDim((size + blockDim.x - 1) / blockDim.x);
element_wise_devide<<< gridDim, blockDim >>> (arrayA, arrayB, resArray, size);
}
|
19,944 | #include "includes.h"
__global__ void shift0(float* in, float* out, int inDim0, int inStride0, int inStride1, int inScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < inScalarCount; tid += stride) {
int linearIndex = tid;
int inIndex0 = linearIndex / inStride0;
linearIndex = linearIndex - inIndex0 * inStride0;
int inIndex1 = linearIndex / inStride1;
if (inIndex0 + inIndex1 >= inDim0) return;
out[tid + inIndex1 * inStride0] = in[tid];
}
} |
19,945 | #include "includes.h"
__global__ void dotCudaHeapSharedMemory(const float* a, const float* b, float* dest, const size_t length) {
} |
19,946 | /*
The purpose of this program is to compare the performance of calculating
the square root element-wise on an array. The 3 types of executions compared
will be CPU, GPU with only blocks and GPU with only threads.
The size of array <N> is taken as a parameter when the program is executed.
*/
#include <math.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <time.h>
#include <math.h>
#include <chrono>
int N;
using namespace std::chrono;
using namespace std;
void write_table(float *t, string fname){
ofstream myfile;
myfile.open(fname);
for (int i = 0; i < N; ++i) myfile << t[i] << "\n";
myfile.close();
}
// Compares two arrays and print error if there is a difference.
void cmp_tab(float *t1, float *t2){
for(int i=0; i<N; ++i)
if(t1[i]!=t2[i]){
printf("Error at index %d: %f - %f\n", i, t1[i], t2[i]);
return;
}
printf("Tables are identical\n");
}
/**
* Performs square root for each elements of <tab>
* and write them in <out>.
*/
void racine(float *tab, float *out) {
for (int i = 0; i < N; ++i)
out[i] = sqrtf(tab[i]);
}
/**
* For each element of <f> a block performs a square root
* and write the result in <f_out>.
*/
__global__ void cuda_racine_block(float *f, float *f_out) {
int tid = blockIdx.x;
f_out[tid] = sqrtf(f[tid]);
}
/**
* For each element of <f> a thread performs a square root
* and write the result in <f_out>.
*/
__global__ void cuda_racine_thread(float *f, float *f_out) {
int tid = threadIdx.x;
f_out[tid] = sqrtf(f[tid]);
}
int main(int argc, char **argv) {
if (argc != 2) {
printf("Usage: %s <N>\n", argv[0]);
exit(-1);
}
//Retrieve the table size from args
N = atoi(argv[1]);
if(N>1024 || N<=0){
printf("N must belong to ]0:1024]\n");
exit(-1);
}
float *rnd_floats = (float*) malloc(N*sizeof(float)); //random vector of floats
float *sqrt_floats = (float*) malloc(N*sizeof(float)); //output vector
float *sqrt_floats_cuda = (float*) malloc(N*sizeof(float)); //output vector for CUDA runs
//Create table of random floats simple precision between 0 and 1
srand((unsigned) time(NULL));
for (int i = 0; i < N; ++i)
rnd_floats[i] = static_cast<float> (rand()) / static_cast<float> (RAND_MAX);
std::cout << "********************************************************" << std::endl;
std::cout << " Run on CPU " << std::endl;
std::cout << "********************************************************" << std::endl;
auto start = high_resolution_clock::now();
racine(rnd_floats, sqrt_floats);
auto stop = high_resolution_clock::now();
duration<double> duration = stop - start;
printf("\nTime to generate: %3.7f ms\n\n", duration.count() * 1000.0F);
//write_table(sqrt_floats, "cpu.txt");
std::cout << "********************************************************" << std::endl;
std::cout << " CUDA run on N blocks " << std::endl;
std::cout << "********************************************************" << std::endl;
float *dev_rnd_floats, *dev_rnd_floats_out;
cudaMalloc((void ** ) &dev_rnd_floats, N * sizeof(float));
cudaMalloc((void ** ) &dev_rnd_floats_out, N * sizeof(float));
cudaMemcpy(dev_rnd_floats, rnd_floats, N * sizeof(float), cudaMemcpyHostToDevice);
float elapsedTime;
cudaEvent_t cuda_start, cuda_stop;
cudaEventCreate( &cuda_start);
cudaEventCreate( &cuda_stop);
cudaEventRecord(cuda_start, 0);
cuda_racine_block<<< N, 1>>> (dev_rnd_floats, dev_rnd_floats_out);
cudaMemcpy(sqrt_floats_cuda, dev_rnd_floats_out, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(cuda_stop, 0);
cudaEventSynchronize(cuda_stop);
cudaEventElapsedTime( &elapsedTime, cuda_start, cuda_stop);
printf("\nParallel time to generate: %3.7f ms\n\n", elapsedTime);
cudaEventDestroy(cuda_start);
cudaEventDestroy(cuda_stop);
cudaFree(dev_rnd_floats_out);
cmp_tab(sqrt_floats, sqrt_floats_cuda);
//write_table(sqrt_floats_cuda, "blocks.txt")
std::cout << "********************************************************" << std::endl;
std::cout << " CUDA run on N threads " << std::endl;
std::cout << "********************************************************" << std::endl;
cudaMalloc((void**) &dev_rnd_floats_out, N * sizeof(float));
cudaEventCreate( &cuda_start);
cudaEventCreate( &cuda_stop);
cudaEventRecord(cuda_start, 0);
cuda_racine_thread<<< 1, N >>>(dev_rnd_floats, dev_rnd_floats_out);
cudaMemcpy(sqrt_floats_cuda, dev_rnd_floats_out, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(cuda_stop, 0);
cudaEventSynchronize(cuda_stop);
cudaEventElapsedTime( &elapsedTime, cuda_start, cuda_stop);
printf("\nParallel time to generate: %3.7f ms\n\n", elapsedTime);
cudaEventDestroy(cuda_start);
cudaEventDestroy(cuda_stop);
cudaFree(dev_rnd_floats);
cudaFree(dev_rnd_floats_out);
cmp_tab(sqrt_floats, sqrt_floats_cuda);
//write_table(sqrt_floats_cuda, "threads.txt")
return 0;
} |
19,947 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
void printDevProp(cudaDeviceProp devProp) {
printf("%s\n", devProp.name);
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Total global memory: %u bytes\n", devProp.totalGlobalMem);
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Total shared memory per block: %u\n",devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
printf("Maximum threads per dimension: %d,%d,%d\n", devProp.maxThreadsDim[0], devProp.maxThreadsDim[1], devProp.maxThreadsDim[2]);
return;
}
void printMatrix(float *A, int dim) {
printf("[\n");
for (int i=0; i<dim; i++) {
printf(" [");
for (int j=0; j<dim; j++) {
printf("%.2f, ", A[i*dim + j]);
}
printf("]\n");
}
printf("]\n");
}
void populateMatrix(float *A, int dim) {
// Generate the values
for (int i=0; i<dim; i++) {
for (int j=0; j<dim; j++) {
A[i*dim + j] = (float) rand() / (float) (RAND_MAX / 100);
}
}
}
__global__
void kernel_1t1e(float *d_A, float *d_B, float *d_C, int size) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
d_A[idx] = d_B[idx] + d_C[idx];
}
}
__global__
void kernel_1t1r(float *d_A, float *d_B, float *d_C, int rows) {
int i = 0;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < rows) {
for(i = 0;i<rows;i++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
__global__
void kernel_1t1c(float *d_A, float *d_B, float *d_C, int rows) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = 0;
if (i < rows) {
for(j = 0;j<rows;j++){
d_A[i*rows + j] = d_B[i*rows + j] + d_C[i*rows + j];
}
}
}
double hostFunction(float *A, float *B, float *C, int rows, int blockSize, int kernel_choice) {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate device memory
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, rows*rows*sizeof(float));
cudaMalloc(&d_B, rows*rows*sizeof(float));
cudaMalloc(&d_C, rows*rows*sizeof(float));
// Copy values to device memory
cudaMemcpy(d_B, B, rows*rows*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, rows*rows*sizeof(float), cudaMemcpyHostToDevice);
// Call kernel function
int size = rows*rows;
int numBlocks = (int) (rows/blockSize) + 1;
dim3 threadsPerBlock(blockSize,1);
cudaEventRecord(start);
if (kernel_choice == 0) {
int numBlocks = (int) (size/blockSize) + 1;
kernel_1t1e<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, size);
} else if (kernel_choice == 1) {
kernel_1t1r<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
} else if (kernel_choice == 2) {
kernel_1t1c<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, rows);
}
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Get return value
cudaMemcpy(A, d_A, rows*rows*sizeof(float), cudaMemcpyDeviceToHost);
// Free memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return milliseconds;
}
int main() {
// Device Query first
int deviceCount;
int blockSize = 1024;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printDevProp(deviceProp);
blockSize = deviceProp.maxThreadsPerBlock;
}
// In my (Francis) local machine there is only one CUDA machine, so I'll hardcode that one here
// Allocate memory
const int rows = 64;
const int cols = rows;
float *A, *B, *C;
A = (float*) malloc(sizeof(float) * rows * cols);
B = (float*) malloc(sizeof(float) * rows * cols);
C = (float*) malloc(sizeof(float) * rows * cols);
// Call the host function
// Benchmarking
int kernel = 0;
int runs = 10;
double time_spent = 0.0;
double ave_time = 0.0;
printf("\n");
while (kernel < 3) {
printf("#%d:\t", kernel);
for (int run=0; run<runs; run++) {
populateMatrix(B, rows);
populateMatrix(C, rows);
time_spent = hostFunction(A, B, C, rows, blockSize, kernel);
ave_time += time_spent;
printf("%.4f\t", time_spent);
}
ave_time /= runs;
printf("Ave: %.4f\n", ave_time);
kernel++;
}
// Free memory
free(A);
free(B);
free(C);
printf("\nDone!\n");
} |
19,948 | #include "includes.h"
__global__ void prescan(float* d_in, int nGlobe, int step, int upSweep) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int from = 2 * tid * (step + 1) + step;
int to = 2 * tid * (step + 1) + 2 * step + 1;
if (upSweep) {
d_in[to] += d_in[from];
} else {
int temp = d_in[to];
d_in[to] += d_in[from];
d_in[from] = temp;
}
} |
19,949 | #include "includes.h"
__global__ void TopForcing(double ppt, double *eff_rain, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < size) {
eff_rain[tid] = ppt;
tid += blockDim.x * gridDim.x;
}
} |
19,950 | #include <cstdio>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void scanHillisSteele(int *d_out, int *d_in, int n)
{
int idx = threadIdx.x;
extern __shared__ int tmp[];
int pout = 0, pin = 1;
tmp[idx] = (idx > 0) ? d_in[idx-1] : 0;
__syncthreads();
for (int offset = 1; offset < n; offset *=2)
{
// swap double buffer indices
pout = 1 - pout;
pin = 1 - pout;
if (idx >= offset) {
tmp[pout*n + idx] = tmp[pin*n + idx - offset] + tmp[pin*n + idx];
} else {
tmp[pout*n + idx] = tmp[pin*n + idx];
}
__syncthreads();
}
d_out[idx] = tmp[pout*n + idx];
}
int main()
{
const int ARRAY_SIZE = 10;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
int h_in[ARRAY_SIZE]{1, 2, 5, 7, 8, 10, 11, 12, 15, 19};
int h_out[ARRAY_SIZE];
int *d_in;
int *d_out;
cudaMalloc((void **)&d_in, ARRAY_BYTES);
cudaMalloc((void **)&d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
scanHillisSteele<<<1, ARRAY_SIZE, 2*ARRAY_BYTES>>>(d_out, d_in, ARRAY_SIZE);
cudaDeviceSynchronize();
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
std::cout << "Input: " << std::endl;
for (int i = 0; i < ARRAY_SIZE; i++) {
std::cout << h_in[i] << " " << std::endl;
}
std::cout << "Exclusive scan with operation +;" << std::endl;
for (int i = 0; i < ARRAY_SIZE; i++) {
std::cout << h_out[i] << " " << std::endl;
}
cudaFree(d_in);
cudaFree(d_out);
}
|
19,951 | #include <cuda.h>
#include <iostream>
#define uint unsigned int
#define uchar unsigned char
#define ushort unsigned short
#define int64_t long long
#define uint64_t unsigned long long
extern "C" __global__ void conv3(float* __restrict__ data,
float* __restrict__ kernel,
float* __restrict__ compute) {
float compute_local[32];
__shared__ float pad_temp_shared[720];
__shared__ float kernel_shared[1152];
float pad_temp_shared_local[10];
float kernel_shared_local[12];
for (int yy_c_init = 0; yy_c_init < 8; ++yy_c_init) {
compute_local[(yy_c_init)] = 0.000000e+00f;
compute_local[((yy_c_init + 8))] = 0.000000e+00f;
compute_local[((yy_c_init + 16))] = 0.000000e+00f;
compute_local[((yy_c_init + 24))] = 0.000000e+00f;
}
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
__syncthreads();
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 6; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if (((((int)threadIdx.z) * 5) + (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) < 40) {
if ((((((int)threadIdx.z) * 90) + (((int)threadIdx.x) * 6)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 720) {
if (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 90) {
pad_temp_shared[((((((int)threadIdx.z) * 90) + (((int)threadIdx.x) * 6)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((1 <= ((((int)blockIdx.y) * 8) + (((((int)threadIdx.z) * 5) + (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 10))) && (((((int)blockIdx.y) * 8) + (((((int)threadIdx.z) * 5) + (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 10)) < 257)) && (1 <= ((((int)blockIdx.x) * 16) + (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)))) && (((((int)blockIdx.x) * 16) + (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) < 257)) ? data[((((((((rc_outer * 262144) + ((((((int)threadIdx.z) * 5) + (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) / 10) * 65536)) + (((int)blockIdx.y) * 2048)) + ((((((int)threadIdx.z) * 5) + (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 18)) % 10) * 256)) + (((int)blockIdx.x) * 16)) + (((((int)threadIdx.x) * 6) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) % 18)) - 257))] : 0.000000e+00f);
}
}
}
}
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 9; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
kernel_shared[((((((int)threadIdx.z) * 144) + (((int)threadIdx.x) * 9)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[((((((((int)threadIdx.z) * 1152) + ((((int)threadIdx.x) >> 2) * 288)) + (rc_outer * 36)) + ((((int)threadIdx.x) & 3) * 9)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))];
}
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 4; ++rc_inner_outer) {
for (int rx_inner_outer = 0; rx_inner_outer < 3; ++rx_inner_outer) {
for (int ax2 = 0; ax2 < 10; ++ax2) {
pad_temp_shared_local[(ax2)] = pad_temp_shared[(((((rc_inner_outer * 180) + (ax2 * 18)) + ((int)threadIdx.x)) + rx_inner_outer))];
}
for (int ax21 = 0; ax21 < 3; ++ax21) {
kernel_shared_local[(ax21)] = kernel_shared[(((((((int)threadIdx.z) * 36) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer))];
kernel_shared_local[((ax21 + 3))] = kernel_shared[((((((((int)threadIdx.z) * 36) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer) + 288))];
kernel_shared_local[((ax21 + 6))] = kernel_shared[((((((((int)threadIdx.z) * 36) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer) + 576))];
kernel_shared_local[((ax21 + 9))] = kernel_shared[((((((((int)threadIdx.z) * 36) + (rc_inner_outer * 9)) + (ax21 * 3)) + rx_inner_outer) + 864))];
}
for (int ry_inner_inner = 0; ry_inner_inner < 3; ++ry_inner_inner) {
for (int yy_c = 0; yy_c < 8; ++yy_c) {
compute_local[(yy_c)] = (compute_local[(yy_c)] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[(ry_inner_inner)]));
compute_local[((yy_c + 8))] = (compute_local[((yy_c + 8))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[((ry_inner_inner + 3))]));
compute_local[((yy_c + 16))] = (compute_local[((yy_c + 16))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[((ry_inner_inner + 6))]));
compute_local[((yy_c + 24))] = (compute_local[((yy_c + 24))] + (pad_temp_shared_local[((yy_c + ry_inner_inner))] * kernel_shared_local[((ry_inner_inner + 9))]));
}
}
}
}
}
for (int yy_inner_inner_inner = 0; yy_inner_inner_inner < 8; ++yy_inner_inner_inner) {
compute[((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 2048)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)))] = compute_local[(yy_inner_inner_inner)];
compute[(((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 2048)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 524288))] = compute_local[((yy_inner_inner_inner + 8))];
compute[(((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 2048)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 1048576))] = compute_local[((yy_inner_inner_inner + 16))];
compute[(((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 2048)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 16)) + ((int)threadIdx.x)) + 1572864))] = compute_local[((yy_inner_inner_inner + 24))];
}
}
extern "C" __global__ void conv5(float* __restrict__ data,
float* __restrict__ kernel,
float* __restrict__ compute) {
float compute_local[64];
__shared__ float pad_temp_shared[544];
__shared__ float kernel_shared[800];
float pad_temp_shared_local[16];
float kernel_shared_local[4];
for (int yy_c_init = 0; yy_c_init < 2; ++yy_c_init) {
for (int xx_c_init = 0; xx_c_init < 2; ++xx_c_init) {
compute_local[(((yy_c_init * 2) + xx_c_init))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 16))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 32))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 48))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 8))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 24))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 40))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 56))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 4))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 20))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 36))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 52))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 12))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 28))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 44))] = 0.000000e+00f;
compute_local[((((yy_c_init * 2) + xx_c_init) + 60))] = 0.000000e+00f;
}
}
for (int rc_outer = 0; rc_outer < 32; ++rc_outer) {
__syncthreads();
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner < 5; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) {
if (((((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) / 68) + ((int)threadIdx.z)) < 8) {
if ((((((int)threadIdx.z) * 68) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 544) {
if (((((int)threadIdx.x) * 5) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 68) {
pad_temp_shared[((((((int)threadIdx.z) * 68) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))] = (((((2 <= ((((int)blockIdx.y) * 4) + ((int)threadIdx.z))) && (((((int)blockIdx.y) * 4) + ((int)threadIdx.z)) < 258)) && (2 <= (((((int)blockIdx.x) * 64) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner))) && ((((((int)blockIdx.x) * 64) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) < 258)) ? data[((((((((rc_outer * 65536) + (((int)blockIdx.y) * 1024)) + (((int)threadIdx.z) * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 5)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner) - 514))] : 0.000000e+00f);
}
}
}
}
for (int ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 = 0; ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1 < 7; ++ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 7) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 25)) < 32) {
if (((((int)threadIdx.z) * 20) + (((((int)threadIdx.x) * 7) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 5)) < 160) {
if ((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 7)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 800) {
if (((((int)threadIdx.x) * 7) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) < 100) {
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 7)) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1))] = kernel[(((((((int)threadIdx.z) * 3200) + ((((((int)threadIdx.x) * 7) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) / 25) * 800)) + (rc_outer * 25)) + (((((int)threadIdx.x) * 7) + ax0_ax1_fused_ax2_fused_ax3_fused_inner_inner_inner1) % 25)))];
}
}
}
}
}
__syncthreads();
for (int ry_inner_outer = 0; ry_inner_outer < 5; ++ry_inner_outer) {
for (int rx_inner_outer = 0; rx_inner_outer < 5; ++rx_inner_outer) {
for (int ax2 = 0; ax2 < 2; ++ax2) {
for (int ax3 = 0; ax3 < 2; ++ax3) {
pad_temp_shared_local[(((ax2 * 2) + ax3))] = pad_temp_shared[((((((ax2 * 68) + (ry_inner_outer * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_inner_outer))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 8))] = pad_temp_shared[(((((((ax2 * 68) + (ry_inner_outer * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_inner_outer) + 136))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 4))] = pad_temp_shared[(((((((ax2 * 68) + (ry_inner_outer * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_inner_outer) + 32))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 12))] = pad_temp_shared[(((((((ax2 * 68) + (ry_inner_outer * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_inner_outer) + 168))];
}
}
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 25) + (ry_inner_outer * 5)) + rx_inner_outer))];
kernel_shared_local[(1)] = kernel_shared[(((((((int)threadIdx.z) * 25) + (ry_inner_outer * 5)) + rx_inner_outer) + 200))];
kernel_shared_local[(2)] = kernel_shared[(((((((int)threadIdx.z) * 25) + (ry_inner_outer * 5)) + rx_inner_outer) + 400))];
kernel_shared_local[(3)] = kernel_shared[(((((((int)threadIdx.z) * 25) + (ry_inner_outer * 5)) + rx_inner_outer) + 600))];
for (int yy_c = 0; yy_c < 2; ++yy_c) {
for (int xx_c = 0; xx_c < 2; ++xx_c) {
compute_local[(((yy_c * 2) + xx_c))] = (compute_local[(((yy_c * 2) + xx_c))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(0)]));
compute_local[((((yy_c * 2) + xx_c) + 16))] = (compute_local[((((yy_c * 2) + xx_c) + 16))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(1)]));
compute_local[((((yy_c * 2) + xx_c) + 32))] = (compute_local[((((yy_c * 2) + xx_c) + 32))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(2)]));
compute_local[((((yy_c * 2) + xx_c) + 48))] = (compute_local[((((yy_c * 2) + xx_c) + 48))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(3)]));
compute_local[((((yy_c * 2) + xx_c) + 8))] = (compute_local[((((yy_c * 2) + xx_c) + 8))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(0)]));
compute_local[((((yy_c * 2) + xx_c) + 24))] = (compute_local[((((yy_c * 2) + xx_c) + 24))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(1)]));
compute_local[((((yy_c * 2) + xx_c) + 40))] = (compute_local[((((yy_c * 2) + xx_c) + 40))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(2)]));
compute_local[((((yy_c * 2) + xx_c) + 56))] = (compute_local[((((yy_c * 2) + xx_c) + 56))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(3)]));
compute_local[((((yy_c * 2) + xx_c) + 4))] = (compute_local[((((yy_c * 2) + xx_c) + 4))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(0)]));
compute_local[((((yy_c * 2) + xx_c) + 20))] = (compute_local[((((yy_c * 2) + xx_c) + 20))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(1)]));
compute_local[((((yy_c * 2) + xx_c) + 36))] = (compute_local[((((yy_c * 2) + xx_c) + 36))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(2)]));
compute_local[((((yy_c * 2) + xx_c) + 52))] = (compute_local[((((yy_c * 2) + xx_c) + 52))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(3)]));
compute_local[((((yy_c * 2) + xx_c) + 12))] = (compute_local[((((yy_c * 2) + xx_c) + 12))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(0)]));
compute_local[((((yy_c * 2) + xx_c) + 28))] = (compute_local[((((yy_c * 2) + xx_c) + 28))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(1)]));
compute_local[((((yy_c * 2) + xx_c) + 44))] = (compute_local[((((yy_c * 2) + xx_c) + 44))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(2)]));
compute_local[((((yy_c * 2) + xx_c) + 60))] = (compute_local[((((yy_c * 2) + xx_c) + 60))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(3)]));
}
}
}
}
}
for (int yy_inner_inner_inner = 0; yy_inner_inner_inner < 2; ++yy_inner_inner_inner) {
for (int xx_inner_inner_inner = 0; xx_inner_inner_inner < 2; ++xx_inner_inner_inner) {
compute[(((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner))] = compute_local[(((yy_inner_inner_inner * 2) + xx_inner_inner_inner))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 524288))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 16))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 1048576))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 32))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 1572864))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 48))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 512))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 8))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 524800))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 24))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 1049088))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 40))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 1573376))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 56))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 32))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 4))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 524320))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 20))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 1048608))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 36))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 1572896))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 52))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 544))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 12))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 524832))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 28))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 1049120))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 44))];
compute[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_inner_inner_inner * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_inner_inner_inner) + 1573408))] = compute_local[((((yy_inner_inner_inner * 2) + xx_inner_inner_inner) + 60))];
}
}
}
extern "C" __global__ void conv_partially_fused(float* __restrict__ data,
float* __restrict__ kernel5,
float* __restrict__ kernel3,
float* __restrict__ compute5,
float* __restrict__ compute3) {
float compute5_local[64];
float compute3_local[64];
__shared__ float pad_temp_shared[544];
__shared__ float kernel5_shared[800];
__shared__ float kernel3_shared[288];
float pad_temp_shared_local[16];
float kernel_shared_local[4];
for (int yy_c_init = 0; yy_c_init < 2; ++yy_c_init) {
for (int xx_c_init = 0; xx_c_init < 2; ++xx_c_init) {
compute5_local[(((yy_c_init * 2) + xx_c_init))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 16))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 32))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 48))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 8))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 24))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 40))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 56))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 4))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 20))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 36))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 52))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 12))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 28))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 44))] = 0.000000e+00f;
compute5_local[((((yy_c_init * 2) + xx_c_init) + 60))] = 0.000000e+00f;
compute3_local[(((yy_c_init * 2) + xx_c_init))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 16))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 32))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 48))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 8))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 24))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 40))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 56))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 4))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 20))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 36))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 52))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 12))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 28))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 44))] = 0.000000e+00f;
compute3_local[((((yy_c_init * 2) + xx_c_init) + 60))] = 0.000000e+00f;
}
}
for (int rc_outer = 0; rc_outer < 32; ++rc_outer) {
__syncthreads();
// Load image to shared
for (int ax_ffiii = 0; ax_ffiii < 5; ++ax_ffiii) {
if (((((((int)threadIdx.x) * 5) + ax_ffiii) / 68) + ((int)threadIdx.z)) < 8) {
if ((((((int)threadIdx.z) * 68) + (((int)threadIdx.x) * 5)) + ax_ffiii) < 544) {
if (((((int)threadIdx.x) * 5) + ax_ffiii) < 68) {
pad_temp_shared[((((((int)threadIdx.z) * 68) + (((int)threadIdx.x) * 5)) + ax_ffiii))] = (((((2 <= ((((int)blockIdx.y) * 4) + ((int)threadIdx.z))) && (((((int)blockIdx.y) * 4) + ((int)threadIdx.z)) < 258)) && (2 <= (((((int)blockIdx.x) * 64) + (((int)threadIdx.x) * 5)) + ax_ffiii))) && ((((((int)blockIdx.x) * 64) + (((int)threadIdx.x) * 5)) + ax_ffiii) < 258)) ? data[((((((((rc_outer * 65536) + (((int)blockIdx.y) * 1024)) + (((int)threadIdx.z) * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 5)) + ax_ffiii) - 514))] : 0.000000e+00f);
}
}
}
}
// Load 5x5 kernel to shared
for (int ax_ffiii1 = 0; ax_ffiii1 < 7; ++ax_ffiii1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 7) + ax_ffiii1) / 25)) < 32) {
if (((((int)threadIdx.z) * 20) + (((((int)threadIdx.x) * 7) + ax_ffiii1) / 5)) < 160) {
if ((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 7)) + ax_ffiii1) < 800) {
if (((((int)threadIdx.x) * 7) + ax_ffiii1) < 100) {
kernel5_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 7)) + ax_ffiii1))] = kernel5[(((((((int)threadIdx.z) * 3200) + ((((((int)threadIdx.x) * 7) + ax_ffiii1) / 25) * 800)) + (rc_outer * 25)) + (((((int)threadIdx.x) * 7) + ax_ffiii1) % 25)))];
}
}
}
}
}
// Load 3x3 kernel to shared
for (int ax_ffiii1 = 0; ax_ffiii1 < 3; ++ax_ffiii1) {
if (((((int)threadIdx.z) * 4) + (((((int)threadIdx.x) * 3) + ax_ffiii1) / 9)) < 32) {
if (((((int)threadIdx.z) * 12) + (((((int)threadIdx.x) * 3) + ax_ffiii1) / 3)) < 96) {
if ((((((int)threadIdx.z) * 36) + (((int)threadIdx.x) * 3)) + ax_ffiii1) < 288) {
if (((((int)threadIdx.x) * 3) + ax_ffiii1) < 36) {
kernel3_shared[((((((int)threadIdx.z) * 36) + (((int)threadIdx.x) * 3)) + ax_ffiii1))] = kernel3[(((((((int)threadIdx.z) * 1152) + ((((((int)threadIdx.x) * 3) + ax_ffiii1) / 25) * 288)) + (rc_outer * 9)) + (((((int)threadIdx.x) * 3) + ax_ffiii1) % 9)))];
}
}
}
}
}
__syncthreads();
// 5x5 reduction in registers
for (int ry_io = 0; ry_io < 5; ++ry_io) {
for (int rx_io = 0; rx_io < 5; ++rx_io) {
for (int ax2 = 0; ax2 < 2; ++ax2) {
for (int ax3 = 0; ax3 < 2; ++ax3) {
pad_temp_shared_local[(((ax2 * 2) + ax3))] = pad_temp_shared[((((((ax2 * 68) + (ry_io * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_io))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 8))] = pad_temp_shared[(((((((ax2 * 68) + (ry_io * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_io) + 136))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 4))] = pad_temp_shared[(((((((ax2 * 68) + (ry_io * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_io) + 32))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 12))] = pad_temp_shared[(((((((ax2 * 68) + (ry_io * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_io) + 168))];
}
}
kernel_shared_local[(0)] = kernel5_shared[((((((int)threadIdx.z) * 25) + (ry_io * 5)) + rx_io))];
kernel_shared_local[(1)] = kernel5_shared[(((((((int)threadIdx.z) * 25) + (ry_io * 5)) + rx_io) + 200))];
kernel_shared_local[(2)] = kernel5_shared[(((((((int)threadIdx.z) * 25) + (ry_io * 5)) + rx_io) + 400))];
kernel_shared_local[(3)] = kernel5_shared[(((((((int)threadIdx.z) * 25) + (ry_io * 5)) + rx_io) + 600))];
for (int yy_c = 0; yy_c < 2; ++yy_c) {
for (int xx_c = 0; xx_c < 2; ++xx_c) {
compute5_local[(((yy_c * 2) + xx_c))] = (compute5_local[(((yy_c * 2) + xx_c))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(0)]));
compute5_local[((((yy_c * 2) + xx_c) + 16))] = (compute5_local[((((yy_c * 2) + xx_c) + 16))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(1)]));
compute5_local[((((yy_c * 2) + xx_c) + 32))] = (compute5_local[((((yy_c * 2) + xx_c) + 32))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(2)]));
compute5_local[((((yy_c * 2) + xx_c) + 48))] = (compute5_local[((((yy_c * 2) + xx_c) + 48))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(3)]));
compute5_local[((((yy_c * 2) + xx_c) + 8))] = (compute5_local[((((yy_c * 2) + xx_c) + 8))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(0)]));
compute5_local[((((yy_c * 2) + xx_c) + 24))] = (compute5_local[((((yy_c * 2) + xx_c) + 24))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(1)]));
compute5_local[((((yy_c * 2) + xx_c) + 40))] = (compute5_local[((((yy_c * 2) + xx_c) + 40))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(2)]));
compute5_local[((((yy_c * 2) + xx_c) + 56))] = (compute5_local[((((yy_c * 2) + xx_c) + 56))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(3)]));
compute5_local[((((yy_c * 2) + xx_c) + 4))] = (compute5_local[((((yy_c * 2) + xx_c) + 4))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(0)]));
compute5_local[((((yy_c * 2) + xx_c) + 20))] = (compute5_local[((((yy_c * 2) + xx_c) + 20))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(1)]));
compute5_local[((((yy_c * 2) + xx_c) + 36))] = (compute5_local[((((yy_c * 2) + xx_c) + 36))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(2)]));
compute5_local[((((yy_c * 2) + xx_c) + 52))] = (compute5_local[((((yy_c * 2) + xx_c) + 52))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(3)]));
compute5_local[((((yy_c * 2) + xx_c) + 12))] = (compute5_local[((((yy_c * 2) + xx_c) + 12))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(0)]));
compute5_local[((((yy_c * 2) + xx_c) + 28))] = (compute5_local[((((yy_c * 2) + xx_c) + 28))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(1)]));
compute5_local[((((yy_c * 2) + xx_c) + 44))] = (compute5_local[((((yy_c * 2) + xx_c) + 44))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(2)]));
compute5_local[((((yy_c * 2) + xx_c) + 60))] = (compute5_local[((((yy_c * 2) + xx_c) + 60))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(3)]));
}
}
}
}
// 3x3 reduction in registers
for (int ry_io = 0; ry_io < 3; ++ry_io) {
for (int rx_io = 0; rx_io < 3; ++rx_io) {
for (int ax2 = 0; ax2 < 2; ++ax2) {
for (int ax3 = 0; ax3 < 2; ++ax3) {
pad_temp_shared_local[(((ax2 * 2) + ax3))] = pad_temp_shared[((((((ax2 * 68) + (ry_io * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_io))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 8))] = pad_temp_shared[(((((((ax2 * 68) + (ry_io * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_io) + 136))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 4))] = pad_temp_shared[(((((((ax2 * 68) + (ry_io * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_io) + 32))];
pad_temp_shared_local[((((ax2 * 2) + ax3) + 12))] = pad_temp_shared[(((((((ax2 * 68) + (ry_io * 68)) + (((int)threadIdx.x) * 2)) + ax3) + rx_io) + 168))];
}
}
kernel_shared_local[(0)] = kernel3_shared[((((((int)threadIdx.z) * 9) + (ry_io * 3)) + rx_io))];
kernel_shared_local[(1)] = kernel3_shared[(((((((int)threadIdx.z) * 9) + (ry_io * 3)) + rx_io) + 72))];
kernel_shared_local[(2)] = kernel3_shared[(((((((int)threadIdx.z) * 9) + (ry_io * 3)) + rx_io) + 144))];
kernel_shared_local[(3)] = kernel3_shared[(((((((int)threadIdx.z) * 9) + (ry_io * 3)) + rx_io) + 216))];
for (int yy_c = 0; yy_c < 2; ++yy_c) {
for (int xx_c = 0; xx_c < 2; ++xx_c) {
compute3_local[(((yy_c * 2) + xx_c))] = (compute3_local[(((yy_c * 2) + xx_c))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(0)]));
compute3_local[((((yy_c * 2) + xx_c) + 16))] = (compute3_local[((((yy_c * 2) + xx_c) + 16))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(1)]));
compute3_local[((((yy_c * 2) + xx_c) + 32))] = (compute3_local[((((yy_c * 2) + xx_c) + 32))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(2)]));
compute3_local[((((yy_c * 2) + xx_c) + 48))] = (compute3_local[((((yy_c * 2) + xx_c) + 48))] + (pad_temp_shared_local[(((yy_c * 2) + xx_c))] * kernel_shared_local[(3)]));
compute3_local[((((yy_c * 2) + xx_c) + 8))] = (compute3_local[((((yy_c * 2) + xx_c) + 8))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(0)]));
compute3_local[((((yy_c * 2) + xx_c) + 24))] = (compute3_local[((((yy_c * 2) + xx_c) + 24))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(1)]));
compute3_local[((((yy_c * 2) + xx_c) + 40))] = (compute3_local[((((yy_c * 2) + xx_c) + 40))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(2)]));
compute3_local[((((yy_c * 2) + xx_c) + 56))] = (compute3_local[((((yy_c * 2) + xx_c) + 56))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 8))] * kernel_shared_local[(3)]));
compute3_local[((((yy_c * 2) + xx_c) + 4))] = (compute3_local[((((yy_c * 2) + xx_c) + 4))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(0)]));
compute3_local[((((yy_c * 2) + xx_c) + 20))] = (compute3_local[((((yy_c * 2) + xx_c) + 20))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(1)]));
compute3_local[((((yy_c * 2) + xx_c) + 36))] = (compute3_local[((((yy_c * 2) + xx_c) + 36))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(2)]));
compute3_local[((((yy_c * 2) + xx_c) + 52))] = (compute3_local[((((yy_c * 2) + xx_c) + 52))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 4))] * kernel_shared_local[(3)]));
compute3_local[((((yy_c * 2) + xx_c) + 12))] = (compute3_local[((((yy_c * 2) + xx_c) + 12))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(0)]));
compute3_local[((((yy_c * 2) + xx_c) + 28))] = (compute3_local[((((yy_c * 2) + xx_c) + 28))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(1)]));
compute3_local[((((yy_c * 2) + xx_c) + 44))] = (compute3_local[((((yy_c * 2) + xx_c) + 44))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(2)]));
compute3_local[((((yy_c * 2) + xx_c) + 60))] = (compute3_local[((((yy_c * 2) + xx_c) + 60))] + (pad_temp_shared_local[((((yy_c * 2) + xx_c) + 12))] * kernel_shared_local[(3)]));
}
}
}
}
}
// Store to global
for (int yy_iii = 0; yy_iii < 2; ++yy_iii) {
for (int xx_iii = 0; xx_iii < 2; ++xx_iii) {
compute5[(((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii))] = compute5_local[(((yy_iii * 2) + xx_iii))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 524288))] = compute5_local[((((yy_iii * 2) + xx_iii) + 16))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1048576))] = compute5_local[((((yy_iii * 2) + xx_iii) + 32))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1572864))] = compute5_local[((((yy_iii * 2) + xx_iii) + 48))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 512))] = compute5_local[((((yy_iii * 2) + xx_iii) + 8))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 524800))] = compute5_local[((((yy_iii * 2) + xx_iii) + 24))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1049088))] = compute5_local[((((yy_iii * 2) + xx_iii) + 40))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1573376))] = compute5_local[((((yy_iii * 2) + xx_iii) + 56))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 32))] = compute5_local[((((yy_iii * 2) + xx_iii) + 4))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 524320))] = compute5_local[((((yy_iii * 2) + xx_iii) + 20))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1048608))] = compute5_local[((((yy_iii * 2) + xx_iii) + 36))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1572896))] = compute5_local[((((yy_iii * 2) + xx_iii) + 52))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 544))] = compute5_local[((((yy_iii * 2) + xx_iii) + 12))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 524832))] = compute5_local[((((yy_iii * 2) + xx_iii) + 28))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1049120))] = compute5_local[((((yy_iii * 2) + xx_iii) + 44))];
compute5[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1573408))] = compute5_local[((((yy_iii * 2) + xx_iii) + 60))];
compute3[(((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii))] = compute3_local[(((yy_iii * 2) + xx_iii))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 524288))] = compute3_local[((((yy_iii * 2) + xx_iii) + 16))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1048576))] = compute3_local[((((yy_iii * 2) + xx_iii) + 32))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1572864))] = compute3_local[((((yy_iii * 2) + xx_iii) + 48))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 512))] = compute3_local[((((yy_iii * 2) + xx_iii) + 8))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 524800))] = compute3_local[((((yy_iii * 2) + xx_iii) + 24))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1049088))] = compute3_local[((((yy_iii * 2) + xx_iii) + 40))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1573376))] = compute3_local[((((yy_iii * 2) + xx_iii) + 56))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 32))] = compute3_local[((((yy_iii * 2) + xx_iii) + 4))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 524320))] = compute3_local[((((yy_iii * 2) + xx_iii) + 20))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1048608))] = compute3_local[((((yy_iii * 2) + xx_iii) + 36))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1572896))] = compute3_local[((((yy_iii * 2) + xx_iii) + 52))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 544))] = compute3_local[((((yy_iii * 2) + xx_iii) + 12))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 524832))] = compute3_local[((((yy_iii * 2) + xx_iii) + 28))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1049120))] = compute3_local[((((yy_iii * 2) + xx_iii) + 44))];
compute3[((((((((((int)threadIdx.z) * 65536) + (((int)blockIdx.y) * 1024)) + (yy_iii * 256)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + xx_iii) + 1573408))] = compute3_local[((((yy_iii * 2) + xx_iii) + 60))];
}
}
}
extern "C" __global__ void conv_partially_fused_old(float* __restrict__ data,
float* __restrict__ kernel5,
float* __restrict__ kernel3,
float* __restrict__ compute5,
float* __restrict__ compute3) {
float compute5_local[64];
float compute3_local[64];
__shared__ float pad_temp_shared[1600];
__shared__ float kernel5_shared[320];
__shared__ float kernel3_shared[192];
float pad_temp_shared_local[16];
float kernel_shared_local[4];
for (int ff_c_init = 0; ff_c_init < 4; ++ff_c_init) {
for (int xx_c_init = 0; xx_c_init < 4; ++xx_c_init) {
compute5_local[(((ff_c_init * 4) + xx_c_init))] = 0.000000e+00f;
compute5_local[((((ff_c_init * 4) + xx_c_init) + 16))] = 0.000000e+00f;
compute5_local[((((ff_c_init * 4) + xx_c_init) + 32))] = 0.000000e+00f;
compute5_local[((((ff_c_init * 4) + xx_c_init) + 48))] = 0.000000e+00f;
compute3_local[(((ff_c_init * 4) + xx_c_init))] = 0.000000e+00f;
compute3_local[((((ff_c_init * 4) + xx_c_init) + 16))] = 0.000000e+00f;
compute3_local[((((ff_c_init * 4) + xx_c_init) + 32))] = 0.000000e+00f;
compute3_local[((((ff_c_init * 4) + xx_c_init) + 48))] = 0.000000e+00f;
}
}
for (int rc_outer = 0; rc_outer < 16; ++rc_outer) {
for (int ry_outer = 0; ry_outer < 5; ++ry_outer) {
__syncthreads();
for (int mfi = 0; mfi < 5; ++mfi) {
pad_temp_shared[ry_outer * 320 + (((((threadIdx.z * 40) + (threadIdx.y * 20)) + (threadIdx.x * 5)) + mfi))] = (((((2 <= (((blockIdx.y * 8) + ry_outer) + (((threadIdx.z * 2) + threadIdx.y) & 7))) && ((((blockIdx.y * 8) + ry_outer) + (((threadIdx.z * 2) + threadIdx.y) & 7)) < 258)) && (2 <= (((blockIdx.x * 16) + (threadIdx.x * 5)) + mfi))) && ((((blockIdx.x * 16) + (threadIdx.x * 5)) + mfi) < 258)) ? data[((((((((((rc_outer * 131072) + ((((threadIdx.z * 2) + threadIdx.y) >> 3) * 65536)) + (blockIdx.y * 2048)) + (ry_outer * 256)) + ((((threadIdx.z * 2) + threadIdx.y) & 7) * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 5)) + mfi) - 514))] : 0.000000e+00f);
}
}
for (int ry_outer = 0; ry_outer < 5; ++ry_outer) {
__syncthreads();
for (int mfi1 = 0; mfi1 < 5; ++mfi1) {
kernel5_shared[(((((threadIdx.z * 40) + (threadIdx.y * 20)) + (threadIdx.x * 5)) + mfi1))] = kernel5[((((((((threadIdx.z * 3200) + (threadIdx.y * 1600)) + ((threadIdx.x >> 1) * 800)) + (rc_outer * 50)) + ((threadIdx.x & 1) * 25)) + (ry_outer * 5)) + mfi1))];
}
__syncthreads();
for (int rc_io = 0; rc_io < 2; ++rc_io) {
for (int rx_io = 0; rx_io < 5; ++rx_io) {
for (int ax3 = 0; ax3 < 4; ++ax3) {
pad_temp_shared_local[(ax3)] = pad_temp_shared[ry_outer*320 + ((((((rc_io * 160) + (threadIdx.y * 20)) + (threadIdx.x * 4)) + ax3) + rx_io))];
pad_temp_shared_local[((ax3 + 4))] = pad_temp_shared[ry_outer*320 + (((((((rc_io * 160) + (threadIdx.y * 20)) + (threadIdx.x * 4)) + ax3) + rx_io) + 40))];
pad_temp_shared_local[((ax3 + 8))] = pad_temp_shared[ry_outer*320 + (((((((rc_io * 160) + (threadIdx.y * 20)) + (threadIdx.x * 4)) + ax3) + rx_io) + 80))];
pad_temp_shared_local[((ax3 + 12))] = pad_temp_shared[ry_outer*320 + (((((((rc_io * 160) + (threadIdx.y * 20)) + (threadIdx.x * 4)) + ax3) + rx_io) + 120))];
}
for (int ax0 = 0; ax0 < 4; ++ax0) {
kernel_shared_local[(ax0)] = kernel5_shared[(((((threadIdx.z * 40) + (ax0 * 10)) + (rc_io * 5)) + rx_io))];
}
for (int ff_c = 0; ff_c < 4; ++ff_c) {
for (int xx_c = 0; xx_c < 4; ++xx_c) {
compute5_local[(((ff_c * 4) + xx_c))] = (compute5_local[(((ff_c * 4) + xx_c))] + (pad_temp_shared_local[(xx_c)] * kernel_shared_local[(ff_c)]));
compute5_local[((((ff_c * 4) + xx_c) + 16))] = (compute5_local[((((ff_c * 4) + xx_c) + 16))] + (pad_temp_shared_local[((xx_c + 4))] * kernel_shared_local[(ff_c)]));
compute5_local[((((ff_c * 4) + xx_c) + 32))] = (compute5_local[((((ff_c * 4) + xx_c) + 32))] + (pad_temp_shared_local[((xx_c + 8))] * kernel_shared_local[(ff_c)]));
compute5_local[((((ff_c * 4) + xx_c) + 48))] = (compute5_local[((((ff_c * 4) + xx_c) + 48))] + (pad_temp_shared_local[((xx_c + 12))] * kernel_shared_local[(ff_c)]));
}
}
}
}
}
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
__syncthreads();
for (int mfi1 = 0; mfi1 < 3; ++mfi1) {
kernel3_shared[(((((threadIdx.z * 24) + (threadIdx.y * 12)) + (threadIdx.x * 3)) + mfi1))] = kernel3[((((((((threadIdx.z * 1152) + (threadIdx.y * 576)) + ((threadIdx.x >> 1) * 288)) + (rc_outer * 18)) + ((threadIdx.x & 1) * 9)) + (ry_outer * 3)) + mfi1))];
}
__syncthreads();
for (int rc_io = 0; rc_io < 2; ++rc_io) {
for (int rx_io = 0; rx_io < 3; ++rx_io) {
for (int ax3 = 0; ax3 < 4; ++ax3) {
pad_temp_shared_local[(ax3)] = pad_temp_shared[ry_outer*192 + ((((((rc_io * 96) + (threadIdx.y * 12)) + (threadIdx.x * 4)) + ax3) + rx_io))];
pad_temp_shared_local[((ax3 + 4))] = pad_temp_shared[ry_outer*192 + (((((((rc_io * 96) + (threadIdx.y * 12)) + (threadIdx.x * 4)) + ax3) + rx_io) + 24))];
pad_temp_shared_local[((ax3 + 8))] = pad_temp_shared[ry_outer*192 + (((((((rc_io * 96) + (threadIdx.y * 12)) + (threadIdx.x * 4)) + ax3) + rx_io) + 48))];
pad_temp_shared_local[((ax3 + 12))] = pad_temp_shared[ry_outer*192 + (((((((rc_io * 96) + (threadIdx.y * 12)) + (threadIdx.x * 4)) + ax3) + rx_io) + 72))];
}
for (int ax0 = 0; ax0 < 4; ++ax0) {
kernel_shared_local[(ax0)] = kernel3_shared[(((((threadIdx.z * 24) + (ax0 * 6)) + (rc_io * 3)) + rx_io))];
}
for (int ff_c = 0; ff_c < 4; ++ff_c) {
for (int xx_c = 0; xx_c < 4; ++xx_c) {
compute3_local[(((ff_c * 4) + xx_c))] = (compute3_local[(((ff_c * 4) + xx_c))] + (pad_temp_shared_local[(xx_c)] * kernel_shared_local[(ff_c)]));
compute3_local[((((ff_c * 4) + xx_c) + 16))] = (compute3_local[((((ff_c * 4) + xx_c) + 16))] + (pad_temp_shared_local[((xx_c + 4))] * kernel_shared_local[(ff_c)]));
compute3_local[((((ff_c * 4) + xx_c) + 32))] = (compute3_local[((((ff_c * 4) + xx_c) + 32))] + (pad_temp_shared_local[((xx_c + 8))] * kernel_shared_local[(ff_c)]));
compute3_local[((((ff_c * 4) + xx_c) + 48))] = (compute3_local[((((ff_c * 4) + xx_c) + 48))] + (pad_temp_shared_local[((xx_c + 12))] * kernel_shared_local[(ff_c)]));
}
}
}
}
}
}
for (int ff_iii = 0; ff_iii < 4; ++ff_iii) {
for (int xx_iii = 0; xx_iii < 4; ++xx_iii) {
compute5[((((((((threadIdx.z * 262144) + (ff_iii * 65536)) + (blockIdx.y * 2048)) + (threadIdx.y * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 4)) + xx_iii))] = compute5_local[(((ff_iii * 4) + xx_iii))];
compute5[(((((((((threadIdx.z * 262144) + (ff_iii * 65536)) + (blockIdx.y * 2048)) + (threadIdx.y * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 4)) + xx_iii) + 512))] = compute5_local[((((ff_iii * 4) + xx_iii) + 16))];
compute5[(((((((((threadIdx.z * 262144) + (ff_iii * 65536)) + (blockIdx.y * 2048)) + (threadIdx.y * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 4)) + xx_iii) + 1024))] = compute5_local[((((ff_iii * 4) + xx_iii) + 32))];
compute5[(((((((((threadIdx.z * 262144) + (ff_iii * 65536)) + (blockIdx.y * 2048)) + (threadIdx.y * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 4)) + xx_iii) + 1536))] = compute5_local[((((ff_iii * 4) + xx_iii) + 48))];
compute3[((((((((threadIdx.z * 262144) + (ff_iii * 65536)) + (blockIdx.y * 2048)) + (threadIdx.y * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 4)) + xx_iii))] = compute3_local[(((ff_iii * 4) + xx_iii))];
compute3[(((((((((threadIdx.z * 262144) + (ff_iii * 65536)) + (blockIdx.y * 2048)) + (threadIdx.y * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 4)) + xx_iii) + 512))] = compute3_local[((((ff_iii * 4) + xx_iii) + 16))];
compute3[(((((((((threadIdx.z * 262144) + (ff_iii * 65536)) + (blockIdx.y * 2048)) + (threadIdx.y * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 4)) + xx_iii) + 1024))] = compute3_local[((((ff_iii * 4) + xx_iii) + 32))];
compute3[(((((((((threadIdx.z * 262144) + (ff_iii * 65536)) + (blockIdx.y * 2048)) + (threadIdx.y * 256)) + (blockIdx.x * 16)) + (threadIdx.x * 4)) + xx_iii) + 1536))] = compute3_local[((((ff_iii * 4) + xx_iii) + 48))];
}
}
}
extern "C" __global__ void conv_fused(float* __restrict__ data,
float* __restrict__ kernel,
float* __restrict__ compute) {
float compute_local[16];
__shared__ float pad_temp_shared[640];
__shared__ float kernel_shared[3200];
float pad_temp_shared_local[16];
float kernel_shared_local[1];
compute_local[(0)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
compute_local[(4)] = 0.000000e+00f;
compute_local[(5)] = 0.000000e+00f;
compute_local[(6)] = 0.000000e+00f;
compute_local[(7)] = 0.000000e+00f;
compute_local[(8)] = 0.000000e+00f;
compute_local[(9)] = 0.000000e+00f;
compute_local[(10)] = 0.000000e+00f;
compute_local[(11)] = 0.000000e+00f;
compute_local[(12)] = 0.000000e+00f;
compute_local[(13)] = 0.000000e+00f;
compute_local[(14)] = 0.000000e+00f;
compute_local[(15)] = 0.000000e+00f;
for (int rc_outer = 0; rc_outer < 8; ++rc_outer) {
__syncthreads();
pad_temp_shared[(((((int)threadIdx.z) * 20) + (((int)threadIdx.x) * 5)))] = ((((2 <= ((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7))) && (((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7)) < 258)) && (2 <= ((((int)blockIdx.x) * 16) + (((int)threadIdx.x) * 5)))) ? data[((((((((rc_outer * 262144) + ((((int)threadIdx.z) >> 3) * 65536)) + (((int)blockIdx.y) * 1024)) + ((((int)threadIdx.z) & 7) * 256)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 5)) - 514))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 20) + (((int)threadIdx.x) * 5)) + 1))] = ((((2 <= ((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7))) && (((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7)) < 258)) && (1 <= ((((int)blockIdx.x) * 16) + (((int)threadIdx.x) * 5)))) ? data[((((((((rc_outer * 262144) + ((((int)threadIdx.z) >> 3) * 65536)) + (((int)blockIdx.y) * 1024)) + ((((int)threadIdx.z) & 7) * 256)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 5)) - 513))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 20) + (((int)threadIdx.x) * 5)) + 2))] = (((2 <= ((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7))) && (((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7)) < 258)) ? data[((((((((rc_outer * 262144) + ((((int)threadIdx.z) >> 3) * 65536)) + (((int)blockIdx.y) * 1024)) + ((((int)threadIdx.z) & 7) * 256)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 5)) - 512))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 20) + (((int)threadIdx.x) * 5)) + 3))] = ((((2 <= ((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7))) && (((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7)) < 258)) && (((((int)blockIdx.x) * 16) + (((int)threadIdx.x) * 5)) < 255)) ? data[((((((((rc_outer * 262144) + ((((int)threadIdx.z) >> 3) * 65536)) + (((int)blockIdx.y) * 1024)) + ((((int)threadIdx.z) & 7) * 256)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 5)) - 511))] : 0.000000e+00f);
pad_temp_shared[((((((int)threadIdx.z) * 20) + (((int)threadIdx.x) * 5)) + 4))] = ((((2 <= ((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7))) && (((((int)blockIdx.y) * 4) + (((int)threadIdx.z) & 7)) < 258)) && (((((int)blockIdx.x) * 16) + (((int)threadIdx.x) * 5)) < 254)) ? data[((((((((rc_outer * 262144) + ((((int)threadIdx.z) >> 3) * 65536)) + (((int)blockIdx.y) * 1024)) + ((((int)threadIdx.z) & 7) * 256)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 5)) - 510))] : 0.000000e+00f);
kernel_shared[(((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)))] = kernel[(((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 1))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 1))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 2))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 2))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 3))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 3))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 4))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 4))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 5))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 5))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 6))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 6))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 7))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 7))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 8))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 8))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 9))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 9))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 10))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 10))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 11))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 11))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 12))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 12))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 13))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 13))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 14))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 14))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 15))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 15))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 16))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 16))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 17))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 17))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 18))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 18))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 19))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 19))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 20))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 20))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 21))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 21))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 22))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 22))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 23))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 23))];
kernel_shared[((((((int)threadIdx.z) * 100) + (((int)threadIdx.x) * 25)) + 24))] = kernel[((((((((int)blockIdx.z) * 25600) + (((int)threadIdx.z) * 800)) + (rc_outer * 100)) + (((int)threadIdx.x) * 25)) + 24))];
__syncthreads();
for (int rc_inner_outer = 0; rc_inner_outer < 4; ++rc_inner_outer) {
pad_temp_shared_local[(0)] = pad_temp_shared[(((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 1))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 2))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 3))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 20))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 21))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 22))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 23))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 40))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 41))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 60))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 61))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 1))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 2))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 3))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 4))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 21))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 22))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 23))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 24))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 41))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 61))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 1))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 2))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 3))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 4))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 5))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 22))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 23))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 24))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 25))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 2))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 3))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 4))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 5))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 6))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 23))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 24))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 25))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 26))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 46))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 66))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 3))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 4))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 5))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 6))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 7))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 24))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 25))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 26))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 27))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 46))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 47))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 66))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 67))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 4))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 20))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 21))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 22))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 23))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 40))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 41))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 60))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 61))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 80))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 81))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 5))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 21))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 22))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 23))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 24))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 41))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 61))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 81))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 6))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 22))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 23))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 24))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 25))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 7))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 23))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 24))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 25))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 26))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 46))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 66))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 86))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 8))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 24))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 25))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 26))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 27))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 46))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 47))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 66))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 67))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 86))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 87))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 9))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 40))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 41))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 60))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 61))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 80))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 81))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 100))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 101))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 10))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 41))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 61))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 81))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 101))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 11))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 42))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 12))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 43))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 46))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 66))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 86))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 106))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 13))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 44))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 45))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 46))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 47))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 66))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 67))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 86))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 87))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 106))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 107))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 14))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 60))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 61))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 80))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 81))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 100))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 101))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 120))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 121))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 122))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 123))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 15))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 61))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 81))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 101))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 121))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 122))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 123))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 124))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 16))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 62))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 122))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 123))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 124))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 125))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 17))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 63))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 66))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 86))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 106))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 123))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 124))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 125))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 126))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 18))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 64))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 65))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 66))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 67))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 86))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 87))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 106))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 107))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 124))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 125))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 126))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 127))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 19))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 80))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 81))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 100))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 101))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 120))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 121))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 122))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 123))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 140))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 141))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 142))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 143))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 20))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 81))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 101))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 121))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 122))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 123))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 124))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 141))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 142))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 143))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 144))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 21))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 82))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 102))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 122))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 123))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 124))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 125))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 142))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 143))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 144))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 145))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 22))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 83))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 86))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 103))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 106))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 123))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 124))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 125))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 126))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 143))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 144))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 145))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 146))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 23))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 84))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 85))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 86))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 87))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 104))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 105))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 106))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 107))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 124))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 125))];
pad_temp_shared_local[(10)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 126))];
pad_temp_shared_local[(11)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 127))];
pad_temp_shared_local[(12)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 144))];
pad_temp_shared_local[(13)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 145))];
pad_temp_shared_local[(14)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 146))];
pad_temp_shared_local[(15)] = pad_temp_shared[((((rc_inner_outer * 160) + (((int)threadIdx.x) * 4)) + 147))];
kernel_shared_local[(0)] = kernel_shared[((((((int)threadIdx.z) * 100) + (rc_inner_outer * 25)) + 24))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(0)]));
compute_local[(4)] = (compute_local[(4)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(0)]));
compute_local[(5)] = (compute_local[(5)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(6)] = (compute_local[(6)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(0)]));
compute_local[(7)] = (compute_local[(7)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(0)]));
compute_local[(8)] = (compute_local[(8)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(0)]));
compute_local[(9)] = (compute_local[(9)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(0)]));
compute_local[(10)] = (compute_local[(10)] + (pad_temp_shared_local[(10)] * kernel_shared_local[(0)]));
compute_local[(11)] = (compute_local[(11)] + (pad_temp_shared_local[(11)] * kernel_shared_local[(0)]));
compute_local[(12)] = (compute_local[(12)] + (pad_temp_shared_local[(12)] * kernel_shared_local[(0)]));
compute_local[(13)] = (compute_local[(13)] + (pad_temp_shared_local[(13)] * kernel_shared_local[(0)]));
compute_local[(14)] = (compute_local[(14)] + (pad_temp_shared_local[(14)] * kernel_shared_local[(0)]));
compute_local[(15)] = (compute_local[(15)] + (pad_temp_shared_local[(15)] * kernel_shared_local[(0)]));
}
}
compute[((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)))] = compute_local[(0)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 1))] = compute_local[(1)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 2))] = compute_local[(2)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 3))] = compute_local[(3)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 256))] = compute_local[(4)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 257))] = compute_local[(5)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 258))] = compute_local[(6)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 259))] = compute_local[(7)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 512))] = compute_local[(8)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 513))] = compute_local[(9)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 514))] = compute_local[(10)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 515))] = compute_local[(11)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 768))] = compute_local[(12)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 769))] = compute_local[(13)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 770))] = compute_local[(14)];
compute[(((((((((int)blockIdx.z) * 2097152) + (((int)threadIdx.z) * 65536)) + (((int)blockIdx.y) * 1024)) + (((int)blockIdx.x) * 16)) + (((int)threadIdx.x) * 4)) + 771))] = compute_local[(15)];
}
int main() {
int batch = 1;
int in_channel = 32;
int in_height = 256;
int in_width = 256;
int num_filter = 64;
float* images;
float* kernelsf;
float* computef;
float* kernels5;
float* compute5;
float* kernels3;
float* compute3;
cudaMalloc((void**)&images, batch * in_channel * in_height * in_width * sizeof(float));
cudaMalloc((void**)&kernelsf, 2 * num_filter * in_channel * 5 * 5 * sizeof(float));
cudaMalloc((void**)&computef, 2 * batch * in_width * in_height * num_filter * sizeof(float));
cudaMalloc((void**)&kernels5, num_filter * in_channel * 5 * 5 * sizeof(float));
cudaMalloc((void**)&compute5, batch * in_width * in_height * num_filter * sizeof(float));
cudaMalloc((void**)&kernels3, 2 * num_filter * in_channel * 3 * 3 * sizeof(float));
cudaMalloc((void**)&compute3, batch * in_width * in_height * num_filter * sizeof(float));
int w_iters = 1000;
int iters = 1000 + w_iters;
float no_fused5 = 0;
float no_fused3 = 0;
float partially_fused = 0;
float fully_fused = 0;
dim3 ugrid3 = dim3(16, 32, 1);
dim3 ublock3 = dim3(16, 1, 8);
dim3 ugrid5 = dim3(4, 64, 1);
dim3 ublock5 = dim3(16, 1, 8);
// dim3 pgrid = dim3(16, 32, 1);
// dim3 pblock = dim3(4, 2, 8);
dim3 pgrid = dim3(4, 64, 1);
dim3 pblock = dim3(16, 1, 8);
dim3 fgrid = dim3(16, 64, 2);
dim3 fblock = dim3(4, 1, 32);
for (int i = 0; i < iters; ++i) {
{
cudaEvent_t start, end;
float elapsed = 0;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
conv5<<<ugrid5, ublock5>>>(images, kernels5, compute5);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
if (i >= w_iters) no_fused5 += elapsed;
}
{
cudaEvent_t start, end;
float elapsed = 0;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
conv3<<<ugrid3, ublock3>>>(images, kernels3, compute3);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
if (i >= w_iters) no_fused3 += elapsed;
}
{
cudaEvent_t start, end;
float elapsed = 0;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
conv_partially_fused<<<pgrid, pblock>>>(images, kernels5, kernels3, compute5, compute3);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
if (i >= w_iters) partially_fused += elapsed;
}
{
cudaEvent_t start, end;
float elapsed = 0;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
conv_fused<<<fgrid, fblock>>>(images, kernelsf, computef);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
if (i >= w_iters) fully_fused += elapsed;
}
}
float no_fused = no_fused5 + no_fused3;
std::cout << "No Fusion : " << no_fused / iters << " (" << no_fused5 / iters << ", " << no_fused3 / iters << ")" << std::endl;
std::cout << "Partial Fusion : " << partially_fused / iters << std::endl;
std::cout << "Full Fusion : " << fully_fused / iters << std::endl;
}
|
19,952 | #include "includes.h"
__device__ void warp_reduce(float* S,int tx){
S[tx] += S[tx + 32]; __syncthreads();
S[tx] += S[tx + 16]; __syncthreads();
S[tx] += S[tx + 8]; __syncthreads();
S[tx] += S[tx + 4]; __syncthreads();
S[tx] += S[tx + 2]; __syncthreads();
S[tx] += S[tx + 1]; __syncthreads();
}
__global__ void reduce_v4(float* in,float* out, int n){
int tx = threadIdx.x;
int bx = blockIdx.x;
int BX = blockDim.x; //same as THEAD_MAX
int i = bx*(BX*2)+tx;
__shared__ float S[THEAD_MAX];
S[tx] = in[i] + in[i+BX]; //Increased part thread activity at start and start only half the threads
__syncthreads();
for(int s=BX/2; s>WARP_SIZE ;s>>=1){
if(tx < s)
S[tx] += S[tx+s];
__syncthreads();
}
if(tx < WARP_SIZE)
warp_reduce(S,tx); //Unroaling the last warp
if(tx==0)
out[bx] = S[0];
} |
19,953 | #include "includes.h"
__global__ void clock_block(clock_t *d, clock_t clock_count) {
clock_t start_clock = clock64();
clock_t clock_offset = 0;
while (clock_offset < clock_count) {
clock_offset = clock64() - start_clock;
}
if (d) {
*d = clock_offset;
}
} |
19,954 | /*__global__ void getPointEvals(float* unknowns, float* mPoints, float* outs)
{
int mpidx = i * 3;
int cidx = i * 4;
Vector3f vk((*mPoints)[mpidx], (*mPoints)[mpidx + 1], (*mPoints)[mpidx + 2]);
float alpha = (*unknowns)(cidx);
Vector3f beta((*unknowns)(cidx + 1), (*unknowns)(cidx + 2), (*unknowns)(cidx + 3));
Vector3f diff = p - vk;
Vector3f grad(derivx(diff(0), diff(1), diff(2)), derivy(diff(0), diff(1), diff(2)), derivz(diff(0), diff(1), diff(2)));
//std::cout << alpha << std::endl;
//std::cout << beta(0) << "," << beta(1) << "," << beta(2) << " " << diff(0) << "," << diff(1) << "," << diff(2) << " " << grad(0) << "," << grad(1) << "," << grad(2) << std::endl;
out += alpha * smoothfunc(diff(0), diff(1), diff(2)) - beta.dot(grad);
//std::cout << out << std::endl;
}*/ |
19,955 | #include <stdlib.h>
#include <stdint.h>
//#include "cuda_utils.h"
#define ALPHABET_SIZE 128
#define MAX_THREADS_PER_BLOCK 1024
#define min(a,b) (((a) < (b)) ? (a) : (b))
__global__ void init_precompute( uint8_t* precompute, uint8_t* pixel_row, int* prop )
{
// pack BLOCK_CHUNK, image_size, current_row_num and MAX_THREADS_PER_BLOCK in an int* array
int BLOCK_CHUNK = prop[0];
int image_size = prop[1];
int current_row_num = prop[2];
int mtpb = prop[3];
uint8_t pix_val = pixel_row[ ( blockIdx.x % BLOCK_CHUNK ) * blockDim.x + threadIdx.x ];
int my_letter = blockIdx.x / BLOCK_CHUNK;
precompute[ ( ( blockIdx.x / BLOCK_CHUNK ) * image_size + current_row_num ) + threadIdx.x + ( ( blockIdx.x % BLOCK_CHUNK ) * mtpb ) ] = 1;
if( pix_val == (uint8_t) my_letter ){
precompute[ ( ( blockIdx.x / BLOCK_CHUNK ) * image_size + current_row_num ) + threadIdx.x + ( ( blockIdx.x % BLOCK_CHUNK ) * mtpb ) ] = 0;
}
}
|
19,956 | /*
* Parallel Processing Teaching Toolkit
* CUDA - Example 03
* Vector Multiplication
* https://github.com/javierip/parallel-processing-teaching-toolkit
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <time.h>
/**
* CUDA Kernel Device code
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
**/
__global__ void
vectorMul(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] * B[i];
}
}
/// Functions Propotypes
//Get CUDA Platform Info
void get_CUDAinfo ();
//Free GPU Memory
bool free_memGPU (float *arr1,float *arr2,float *arr3);
//Load the CPU vectors
bool init_vectors_CPU (float *arr_A,float *arr_B,int elements);
//Operation with CPU
bool multi_cpu(float *arr_A,float *arr_B,float *arr_C,int elements);
//Check multiplication CPU vs GPU
bool check_multi(float *arr_A,float *arr_B,float *arr_C,int elements);
//Check CUDA Errors
bool check (cudaError_t error );
/**
* Host main routine
*/
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
get_CUDAinfo();
clock_t start, end;
double time_cpu,time_gpu;
// Print the vector length to be used, and compute its size
int numElements = 50000;
size_t size = numElements * sizeof(float);
printf("[Vector Multiplication of %d elements]\n", numElements);
//Vectors on RAM
float *h_A = (float *)malloc(size);
float *h_B = (float *)malloc(size);
float *h_C = (float *)malloc(size);
if(!init_vectors_CPU(h_A,h_B,numElements))printf( "Failed to init vectors!\n");
//Vectors on GPU Memory
float *d_A = NULL;
float *d_B = NULL;
float *d_C = NULL;
if (!check( cudaMalloc((void **)&d_A, size)))
{
printf( "Failed to allocate device vector A (error code %s)!\n");
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
if (!check(cudaMalloc((void **)&d_B, size)))
{
printf( "Failed to allocate device vector B (error code %s)!\n");
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
if (!check(cudaMalloc((void **)&d_C, size)))
{
printf("Failed to allocate device vector C (error code %s)!\n");
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
printf("Copy input data from the host memory to the CUDA device\n");
if (!check(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice)))
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
if (!check(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice)))
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
// Operation with the GPU
start = clock();
vectorMul<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
end = clock();
time_gpu= (double ) (end - start) / CLOCKS_PER_SEC * 1000;
if (!check(cudaGetLastError()))
{
fprintf(stderr, "Failed to launch vectorMul kernel (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
if (!check(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost)))
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
// Operation with the CPU
start = clock();
multi_cpu(h_A,h_B,h_C,numElements);
end = clock();
time_cpu= (double ) (end - start) / CLOCKS_PER_SEC * 1000;
// Verify that the result vector is correct
if(check_multi(h_A,h_B,h_C,numElements)) printf("Test PASSED\n");
printf("Time GPU: %lf\n", time_gpu);
printf("Time CPU: %lf\n", time_cpu);
if(!free_memGPU(d_A,d_B,d_C))return 0;
// Free host memory
free(h_A);
free(h_B);
free(h_C);
printf("Done\n");
return 0;
}
///Functions
void get_CUDAinfo (){
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
bool free_memGPU (float *arr1,float *arr2,float *arr3){
// Free device global memory
cudaError_t err;
err = cudaFree(arr1);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
err = cudaFree(arr2);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
err = cudaFree(arr3);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
return 0;
}
printf("Resources free from CUDA Device\n");
return 1;
}
bool init_vectors_CPU (float *arr_A,float *arr_B,int elements){
// Verify that allocations succeeded
if (arr_A == NULL || arr_B == NULL )
{
fprintf(stderr, "Failed to allocate host vectors!\n");
return 0;
}
// Initialize the host input vectors
for (int i = 0; i < elements; ++i)
{
arr_A[i] = rand()/(float)RAND_MAX;
arr_B[i] = rand()/(float)RAND_MAX;
}
return 1;
}
bool multi_cpu(float *arr_A,float *arr_B,float * arr_C,int elements){
for (int i = 0; i < elements; ++i)
{
arr_C[i]= arr_A[i] * arr_B[i];
}
return 1;
}
bool check_multi(float *arr_A,float *arr_B,float *arr_C,int elements){
for (int i = 0; i < elements; ++i)
{
if (fabs(arr_A[i] * arr_B[i] - arr_C[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
return 0;
}
}
return 1;
}
bool check (cudaError_t error ){
if (error != cudaSuccess) return 0;
//printf ("Error checkeado\n");
return 1;
}
|
19,957 | #include "includes.h"
__global__ void gpu_floyd_kernel(int k, int* adjacency_mtx, int* paths, int size)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= size)return;
int idx = size * blockIdx.y + col;
__shared__ int best;
if (threadIdx.x == 0)
best = adjacency_mtx[size * blockIdx.y + k];
__syncthreads();
if (best == INF)
return;
int tmp_b = adjacency_mtx[k * size + col];
if (tmp_b == INF)
return;
int cur = best + tmp_b;
if (cur < adjacency_mtx[idx]) {
adjacency_mtx[idx] = cur;
paths[idx] = k;
}
} |
19,958 | #include <cuda_runtime.h>
#include <vector>
#include <iostream>
#include <iomanip>
#include <sstream>
#include <string>
typedef float real_t;
static const int TILE_DIM = 21; //initialized in main
__global__ void transpose( real_t *odata, real_t *idata, int width, int height) {
int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
int index_in = xIndex + yIndex * width;
int index_out = yIndex + xIndex * height;
odata[ index_out ] = idata[ index_in ];
}
/// Transpose matrix by first copying element into local cache.
/// Coalescing happens when tiles are multiple of 16
///# select indices of element to copy into shared memory
///# select target block indides: if input block is I,J target block is J, I
///# use threadIdx to select target element in target block
///# copy transposed element from local share; if elements are copied into local
/// share in [threadIdx.y][threadIdx.x] then target element must be copied from
/// [threadIdx.x][threadIdx.y]
__global__ void transposeCoalesced( real_t *odata, real_t *idata, int width, int height ) {
__shared__ real_t tile[ TILE_DIM ][ TILE_DIM /*+1*/ ];
const int xBlock = blockIdx.x * blockDim.x;
const int yBlock = blockIdx.y * blockDim.y;
int xIndex = xBlock + threadIdx.x;
int yIndex = yBlock + threadIdx.y;
const int index_in = xIndex + yIndex * width;
xIndex = yBlock + threadIdx.x;
yIndex = xBlock + threadIdx.y;
const int index_out = xIndex + (yIndex)*height;
tile[ threadIdx.y ][ threadIdx.x ] = idata[ index_in ];
__syncthreads();
odata[ index_out ] = tile[ threadIdx.x ][ threadIdx.y ];
}
__global__ void initMatrix( real_t* in ) {
const int c = threadIdx.x + blockDim.x * blockIdx.x;
const int r = threadIdx.y + blockDim.y * blockIdx.y;
const int idx = c + gridDim.x * blockDim.x * r;
in[ idx ] = ( real_t ) idx;
}
void printMatrix( const real_t* m, int r, int c ) {
for( int i = 0; i != r; ++i ) {
for( int j = 0; j != c; ++j )
std::cout << m[ i * c + j ] << ' ';
std::cout << '\n';
}
std::cout << std::endl;
}
real_t strToReal( const char* str ) {
if( !str ) throw std::runtime_error( "strToReal - NULL srting");
std::istringstream is( str );
real_t v = real_t();
is >> v;
return v;
}
int main(int argc, char** argv ) {
const int DEFROWS = 30;
const int DEFCOLUMNS = 40;
int ROWS = DEFROWS * TILE_DIM;
int COLUMNS = DEFCOLUMNS * TILE_DIM;
bool CSV = false;
if( argc < 3 || argc > 4 ) {
std::cout << "usage: " << argv[ 0 ] << " <# tile rows> <# tile columns> [csv]\n";
std::cout << " using default: tile size=" << TILE_DIM << " tile rows=" << ROWS << " tile columns=" << COLUMNS << std::endl;
} else {
ROWS = TILE_DIM * strToReal( argv[ 1 ] );
COLUMNS = TILE_DIM * strToReal( argv[ 2 ] );
if( argc == 4 ) { CSV = std::string( argv[ 3 ] ) == "csv"; }
}
const dim3 BLOCKS( COLUMNS / TILE_DIM, ROWS / TILE_DIM );
const dim3 THREADS_PER_BLOCK( TILE_DIM, TILE_DIM );
const size_t SIZE = ROWS * COLUMNS * sizeof( real_t );
real_t* dev_in = 0;
real_t* dev_out = 0;
std::vector< real_t > outmatrix( ROWS * COLUMNS, 0.f );
cudaMalloc( &dev_in, SIZE );
cudaMalloc( &dev_out, SIZE );
initMatrix<<<dim3( COLUMNS, ROWS ), 1>>>( dev_in );
cudaMemcpy( &outmatrix[ 0 ], dev_in, SIZE, cudaMemcpyDeviceToHost );
//std::cout << "INPUT MATRIX - " << ROWS << " rows, " << COLUMNS << " columns" << std::endl;
//printMatrix( &outmatrix[ 0 ], ROWS, COLUMNS );
cudaEvent_t start = cudaEvent_t();
cudaEvent_t stop = cudaEvent_t();
float elapsed = 0.f;
cudaEventCreate( &start );
cudaEventCreate( &stop );
//default
cudaEventRecord( start, 0 );
transpose<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_out, dev_in, COLUMNS, ROWS );
cudaEventRecord( stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed, start, stop );
cudaMemcpy( &outmatrix[ 0 ], dev_out, SIZE, cudaMemcpyDeviceToHost );
//std::cout << "\nOUTPUT MATRIX - " << COLUMNS << " rows, " << ROWS << " columns" << std::endl;
//printMatrix( &outmatrix[ 0 ], COLUMNS, ROWS );
if( CSV ) {
std::cout << "default," << ROWS << 'x' << COLUMNS << ',' << TILE_DIM << ','
<< elapsed << std::endl;
}
else {
std::cout << "[default] elapsed time (ms): " << elapsed << std::endl;
}
//coalesced
cudaEventRecord( start, 0 );
transposeCoalesced<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_out, dev_in, COLUMNS, ROWS );
cudaEventRecord( stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsed, start, stop );
cudaMemcpy( &outmatrix[ 0 ], dev_out, SIZE, cudaMemcpyDeviceToHost );
//std::cout << "\nOUTPUT MATRIX - " << COLUMNS << " rows, " << ROWS << " columns" << std::endl;
//printMatrix( &outmatrix[ 0 ], COLUMNS, ROWS );
if( CSV ) {
std::cout << "coalesced," << ROWS << 'x' << COLUMNS << ',' << TILE_DIM << ','
<< elapsed << std::endl;
}
else {
std::cout << "[coalesced] elapsed time (ms): " << elapsed << std::endl;
}
cudaFree( dev_in );
cudaFree( dev_out );
cudaEventDestroy( start );
cudaEventDestroy( stop );
return 0;
}
|
19,959 | #include<stdio.h>
__global__ void print_kernel(){
printf("Block numarasi %d\t is parcacigi numarasi %d\n",blockIdx.x,threadIdx.x);
}
int main(){
print_kernel<<<5,3>>>();
cudaDeviceSynchronize();
}
|
19,960 |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
int main() {
int n;
int i, j, k;
printf("Please enter the size of matrix: \n");
scanf("%d", &n);
int *a, *b, *c;
cudaMallocHost((void**)&a, sizeof(int) * n * n);
cudaMallocHost((void**)&b, sizeof(int) * n * n);
cudaMallocHost((void**)&c, sizeof(int) * n * n);
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
a[i * n + j] = round(rand() % 2);
b[i * n + j] = round(rand() % 2);
}
}
printf("Start...\n");
clock_t start_time = clock();
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
int tmp = 0;
for (k = 0; k < n; k++)
tmp += a[i * n + k] * b[k * n + j];
c[i * n + j] = tmp;
}
}
clock_t end_time = clock();
printf("Time of calculating %dx%d matrix using CPU is %f ms.\n", n, n, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000);
cudaFreeHost(a);
cudaFreeHost(b);
cudaFreeHost(c);
return 0;
}
|
19,961 |
/************************************************************************
Source Code : warpDivergence.cu
Objective : To demonstrate the difference in bandwidth achieved when
threads within a warp follow different execution paths
This Program measures the bandwidth of global memory
for the initialization operation [a(i) = value] using
NVIDIA GPU which has a SIMT architecture
Output : Bandwidth achieved and timing(average) for initialization
kernels with varying no. of execution paths
Modified : Aug 2011
Author : RarchK
****************************************************************************/
#include <stdio.h>
#include <cuda.h>
#define ARRAY_SIZE 5120000
#define BLOCK_SIZE 128
#define NTIMES 100
#define HLINE "--------------------------------------------------------------\n"
void printResults();
void printDeviceDetails();
void cudaSafeMalloc(void ** , size_t );
void CudaGetDeviceProperties(cudaDeviceProp *, int);
void CudaGetDevice(int *);
void checkCudaErrors();
float avgTime[4] = {0};
char *label[] = {"With 3 branch instructions ","With 2 branch instructions ","With 1 branch instruction ","With no branch instructions"};
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Warp Divergence Kernel1 : 4 execution paths within a warp
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void warpDivergence1(float *dest, float value1,float value2, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < size)
{
if(idx % 5 == 0)
dest[idx] = value1;
else if(idx % 3 == 0)
dest[idx] = value2;
else if(idx % 2 == 0)
dest[idx] = value1;
else
dest[idx] = value2;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Warp Divergence Kernel2 : 3 execution paths within a warp
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void warpDivergence2(float *dest, float value1, float value2, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < size)
{
if ( idx % 3 == 0)
dest[idx] = value1;
else if( threadIdx.x % 2 == 0)
dest[idx] = value2;
else
dest[idx] = value1;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Warp Divergence Kernel3 : 1 branch within a warp leading to 2 execution paths within a warp
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void warpDivergence3(float *dest, float value1, float value2, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < size)
{
if ( idx % 2 == 0)
dest[idx] = value1;
else
dest[idx] = value2;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Warp Divergence Kernel4 : No branches within a warp i.e. single execution path
///////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void warpDivergence4(float *dest, float value1, float value2, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < size)
dest[idx] = value1;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////
// Main function to time all the kernels
//////////////////////////////////////////////////////////////////////////////////////////////////////
int main()
{
float *d_dest;
int i,j;
float elapsedTimes[4][NTIMES];
cudaEvent_t start,stop;
size_t size = ARRAY_SIZE *sizeof(float);
// event creation, which will be used for timing the code
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaSafeMalloc((void **) &d_dest, size);
int gridSize = ARRAY_SIZE/BLOCK_SIZE;
if(ARRAY_SIZE % BLOCK_SIZE != 0)
gridSize += 1;
dim3 grid,block;
block.x = BLOCK_SIZE;
grid.x = gridSize;
for(i=0; i<NTIMES; i++)
{
// timing the initialization kernel1
cudaEventRecord(start,0);
warpDivergence1<<<grid, block>>>(d_dest, 1.0,2.0,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[0][i],start,stop);
checkCudaErrors();
// timing the initialization kernel2
cudaEventRecord(start,0);
warpDivergence2<<<grid, block>>>(d_dest, 1.0,2.0,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[1][i],start,stop);
checkCudaErrors();
// timing the initialization kernel3
cudaEventRecord(start,0);
warpDivergence3<<<grid, block>>>(d_dest, 1.0,2.0,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[2][i],start,stop);
checkCudaErrors();
// timing the initialization kernel4
cudaEventRecord(start,0);
warpDivergence4<<<grid, block>>>(d_dest, 1.0,2.0,size);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[3][i],start,stop);
checkCudaErrors();
}
//Computing average time taken
for(i=0; i<4; i++)
{
for(j=1; j<NTIMES; j++) //skipping first iteration
{
avgTime[i] += elapsedTimes[i][j];
}
avgTime[i] = avgTime[i]/(NTIMES-1);
}
// Printing the results
printResults();
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Host Function to print the results
//
/////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
int j;
printf("\n\n");
printf(HLINE);
printf("WARP DIVERGENCE DEMONSTRATION\n");
printf(HLINE);
printDeviceDetails();
printf(HLINE);
printf("Array Size = %llu\n",(unsigned long long)ARRAY_SIZE);
printf("Block Size = %d\n",(int)BLOCK_SIZE);
printf(HLINE);
printf("Initialization Kernels Rate (GB/s) Avg time \n");
for (j=0; j<4; j++)
{
printf("%s%11.4f %11.4f \n", label[j], 1.0E-06 * (ARRAY_SIZE * sizeof(float))/avgTime[j],avgTime[j]);
}
printf(HLINE);
}
void printDeviceDetails()
{
int deviceId;
cudaDeviceProp prop;
CudaGetDevice(&deviceId);
CudaGetDeviceProperties(&prop, deviceId);
printf("Device Name is %s\n", prop.name);
//printf("Clock Rate of this device is %f GHz\n",(float)prop.clockRate * 1.0E-06);
printf("Compute Capability of this device is %d.%d\n",prop.major,prop.minor);
//printf("Number of Multiprocessors = %d\n", prop.multiProcessorCount);
//printf("Max no. of blocks allowed in a 1D Grid = %d\n", prop.maxGridSize[0]);
//printf("Max no. of threads allowed in 1D block = %d\n", prop.maxThreadsDim[0]);
//printf("Max no. of threads allowed in a block = %d\n", prop.maxThreadsPerBlock);
//printf("No. of registers per block = %d\n", prop.regsPerBlock);
//printf("Shared Memory Per block (in KB) = %f\n", (float)prop.sharedMemPerBlock * 1.0E-03);
printf("Total Global Memory available = %f GB\n",(float)prop.totalGlobalMem * 1.0E-09);
printf("Warp Size in threads = %d\n",prop.warpSize);
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// Wrapper Fuctions for error checking
//////////////////////////////////////////////////////////////////////////////////////////////////////////
void cudaSafeMalloc(void ** devicePtr, size_t size)
{
cudaMalloc(devicePtr, size);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
void CudaGetDeviceProperties(cudaDeviceProp *devicePropPtr, int deviceId)
{
cudaGetDeviceProperties(devicePropPtr, deviceId);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
void CudaGetDevice(int *deviceIdPtr)
{
cudaGetDevice(deviceIdPtr);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
void checkCudaErrors()
{
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("Cuda Error: %s\n",cudaGetErrorString(error));
cudaThreadExit();
exit(-1);
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
19,962 | /**
* @file : params_kernelf_og.cu
* @brief : Original implementation from njuffa, verbotim;
* CUDA kernel functions as parameters with CUDA C++14, CUDA Unified Memory Management
* @details : Original implementation from njuffa, verbotim
* std::function vs. function pointer in C++11, C++14, and now in CUDA
* std::function vs. function pointer for CUDA kernel functions (i.e. __global__ )
*
* @author : Ernest Yeung <ernestyalumni@gmail.com>
* @date : 20171020
* @ref : https://devtalk.nvidia.com/default/topic/487190/kernel-functions-as-parameters-/
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc -std=c++14 params_kernelf_og.cu -o params_kernelf_og.exe
*
* */
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_COUNT 240
#define THREAD_COUNT 128
// pf = processing function
// C style
typedef float (*pf)(float a, float b);
/** @brief function pointer example, pf = "processing function"
* @ref Scott Meyers, Effective Modern C++, pp. 63, Item 9
* */
//using pf = float (*)(float, float);
__device__ float minimum(float a, float b)
{
return fminf(a,b);
};
__device__ float maximum(float a, float b)
{
return fmaxf(a,b);
};
/**
* @brief
* @ref Scott Meyers, Effective Modern C++, pp. 63, Item 9
*/
__device__ pf func_d[2] = { maximum, minimum };
__shared__ float partExtr[THREAD_COUNT];
__device__ void minmax(float *x, float *res, int n, pf func)
{
// __shared__ float partExtr[THREAD_COUNT];
int i;
int tid = threadIdx.x;
float extr = x[0];
for (i = THREAD_COUNT*blockIdx.x+tid; i < n; i += gridDim.x*THREAD_COUNT) {
extr = func (extr, x[i]);
}
partExtr[tid] = extr;
for (i = THREAD_COUNT >> 1; i > 0; i >>= 1) {
__syncthreads();
if (tid < i) {
partExtr[tid] = func(partExtr[tid], partExtr[tid+i]);
}
}
if (tid == 0) {
res[blockIdx.x] = partExtr[tid];
}
}
__global__ void minmax_kernel(float *x, float *res, int n, int findmin)
{
minmax(x, res, n, func_d[findmin]);
}
float findExtremum (float *x, int n, int findmin)
{
pf func_h[2] = { fmaxf, fminf };
float *res_d;
float *res_h;
float *x_d;
float r;
if (n < 1) return sqrtf(-1.0f); // NaN
cudaMalloc((void**)&res_d, BLOCK_COUNT*sizeof(res_d[0]));
cudaMalloc((void**)&x_d, n * sizeof(x_d[0]));
cudaMemcpy(x_d, x, n * sizeof(x_d[0]), cudaMemcpyHostToDevice);
minmax_kernel<<<BLOCK_COUNT,THREAD_COUNT>>>(x_d, res_d, n, !!findmin);
res_h = (float *)malloc (BLOCK_COUNT * sizeof(res_h[0]));
if (!res_h) {
fprintf(stderr, "res_h allocation failed\n");
exit(EXIT_FAILURE);
}
cudaMemcpy(res_h, res_d, BLOCK_COUNT * sizeof(res_d[0]), cudaMemcpyDeviceToHost);
cudaFree(res_d);
cudaFree(x_d);
r = res_h[0];
for (int i = 1; i < BLOCK_COUNT; i++) {
r = func_h[findmin](r, res_h[i]);
}
free(res_h);
return r;
}
int main (void)
{
constexpr const int ELEM_COUNT = 8 ;
float x[ELEM_COUNT] = {-1.3f, 2.4f, 3.5f, -2.3f, 4.5f, 0.4f, -5.3f, -1.6f};
float minimum = findExtremum(x, ELEM_COUNT, 1);
float maximum = findExtremum(x, ELEM_COUNT, 0);
printf("min=% 13.6e max=% 13.6e\n", minimum, maximum);
return EXIT_SUCCESS;
}
/**
* @brief std::function
* @details "The type of a std::function-declared variable holding a closure
* is an instantiation of std::function template, and that has
* fixed size for any given signature.
* This size may not be adequate for the closure it's asked to store,
* and when that's the case, std::function constructor will allocate
* heap memory to store the closure.
* @ref Scott Meyers, Effective Modern C++, pp. 39 Item 5
* */
|
19,963 | /*
xor_train.cu
Implementation of a XOR neural network in CUDA, including
network training using backpropagation.
Andrei de A. Formiga, 2012-03-31
*/
#include <stdio.h>
#include <stdlib.h>
#include <curand.h>
// constant for the RNG seed
#define SEED 419217ULL
//#define SEED 419229ULL
// maximum absolute value for random initialization of weights
#define MAX_ABS 1.5f
// learning rate
#define LRATE 0.75f
// total number of weights
#define NWEIGHTS 9
// number of active neurons
#define NEURONS 3
#define NEURONS_HIDDEN 2
#define NEURONS_OUT 1
// number of deltas (= number of neurons)
#define NDELTAS NEURONS
#define DELTAS_HIDDEN NEURONS_HIDDEN
#define DELTAS_OUT NEURONS_OUT
#define NCASES 4
#define INPUT_SIZE 2
#define HIDDEN_SIZE 2
#define OUTPUT_SIZE 1
// the network weights on the device
float *dev_weights;
const int l1w_off = 0; // offset into the weight array for layer 1 weights
const int l2w_off = 6; // offset into the weight array for layer 2 weights
// the random number generator
curandGenerator_t gen;
// device input
float *dev_in;
// hidden outputs and activations (on device)
float *dev_hidden;
// outputs and activations for final layer (on device)
float *dev_out;
// inputs
float inputs[] = { 0.0f, 0.0f, 0.0f, 1.0f,
1.0f, 0.0f, 1.0f, 1.0f };
const int ncases = 4;
const int input_size = 2;
const int hidden_size = 2;
const int out_size = 1;
// desired outputs
float outputs[] = { 0.1f, 0.9f, 0.9f, 0.1f };
float *dev_dout; // for the device
// deltas and derivatives (on the device)
float *dev_delta_h;
float *dev_delta_o;
float *dev_deriv;
// errors (device)
float *dev_err;
// sigmoid activation function
__device__ float asigmoid(float t)
{
return 1.0f / (1.0f + expf(-t));
}
__device__ float dsigmoid(float output)
{
return output * (1.0f - output);
}
// --- initialization kernels ---------------------------------------------
// make randomly generated weights in (0.0, 1.0] be in the
// interval from -max_abs to +max_abs
__global__ void normalize_weights(float *w, float max_abs)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
w[tid] = ((w[tid] - 0.5f) / 0.5f) * max_abs;
}
// random initialization for weights
// w must be an array of floats on the device
void random_initialize_weights(float *w, float max_abs, int nweights)
{
curandGenerator_t gen;
// create and initialize generator
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_XORWOW);
curandSetPseudoRandomGeneratorSeed(gen, SEED);
curandSetGeneratorOrdering(gen, CURAND_ORDERING_PSEUDO_SEEDED);
curandGenerateUniform(gen, w, nweights);
normalize_weights<<<1, nweights>>>(w, max_abs);
curandDestroyGenerator(gen);
}
// --- forward propagation kernels ----------------------------------------
// kernel for hidden layer
__global__ void forward_hidden(float *w, float *input, float *hidden)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int input_ix = blockIdx.x * 2; // 2 neurons in the previous layer
int toff = l1w_off + threadIdx.x * 3; // 3 weights per neuron in hidden layer
float h;
h = w[toff] * 1.0f +
w[toff + 1] * input[input_ix] +
w[toff + 2] * input[input_ix+1];
hidden[tid] = asigmoid(h);
}
// kernel for output layer
__global__ void forward_output(float *w, float *hidden, float *output)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int hidden_ix = blockIdx.x * 2; // 2 neurons in the previous layer
int toff = l2w_off + threadIdx.x; // 3 weights per neuron, but only 1 neuron
float o;
o = w[toff] * 1.0f +
w[toff+1] * hidden[hidden_ix] +
w[toff+2] * hidden[hidden_ix+1];
output[tid] = asigmoid(o);
}
// --- kernels for backpropagation ----------------------------------------
// launch grid: <<<N, 1>>> for N = number of cases, 1 output neuron
__global__ void deltas_output(float *output, float *expected_out, float *deltao, float *err)
{
// there's one delta for each output node
int tid = blockIdx.x * blockDim.x + threadIdx.x;
err[tid] = expected_out[tid] - output[tid];
deltao[tid] = -err[tid] * dsigmoid(output[tid]);
}
// launch grid: <<<N, 2>>> for N = number of cases, 2 hidden neurons
__global__ void deltas_hidden(float *hidden, float *w, float *deltah, float *deltao)
{
// tid is the index for deltah and hidden
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// oid is the corresponding index in the output layer
// there's only one node in output so 1 per block
int oid = blockIdx.x;
// wid is the index into the weights, taking into account the bias weight
int wid = l2w_off + threadIdx.x + 1;
deltah[tid] = w[wid] * deltao[oid] * dsigmoid(hidden[tid]);
}
// launch grid: <<<N, 6>>> for N cases, 6 weights for hidden layer
__global__ void derivs_hidden(float *input, float *deltah, float *deriv)
{
// weights per node (2 inputs + bias)
const int wpn = INPUT_SIZE + 1;
// weight index
int wid = blockIdx.x * NWEIGHTS + l1w_off + threadIdx.x;
// delta index (3 weights per node: 2 inputs + bias)
int did = blockIdx.x * DELTAS_HIDDEN + (threadIdx.x / wpn);
// input index (3 weights per node)
int iid = blockIdx.x * INPUT_SIZE + (threadIdx.x % wpn) - 1;
// divergence due to bias weight
float in = (threadIdx.x % wpn == 0? 1.0f : input[iid]);
deriv[wid] = deltah[did] * in;
}
// launch grid: <<<N, 3>>> for N cases, 3 weights for output layer
__global__ void derivs_output(float *hidden, float *deltao, float *deriv)
{
// weights per node (2 hidden neurons + bias)
const int wpn = NEURONS_HIDDEN + 1;
// weight index
int wid = blockIdx.x * NWEIGHTS + l2w_off + threadIdx.x;
// delta index (3 weights per node)
int did = blockIdx.x * DELTAS_OUT + (threadIdx.x / wpn);
// hidden index (3 weights per node)
int hid = blockIdx.x * HIDDEN_SIZE + (threadIdx.x % wpn) - 1;
// divergence due to bias weight
float h = (threadIdx.x % wpn == 0? 1.0f : hidden[hid]);
deriv[wid] = deltao[did] * h;
}
// <<<N, 9>>> for N cases, 9 derivs per case?
__global__ void sum_derivs(float *deriv)
{
}
// launch grid: <<<1, NWEIGHTS>>> for number of weights
__global__ void update_weights_nreduc(float *ws, float *deriv, float lrate)
{
float dE = 0.0f;
int wid = blockIdx.x * blockDim.x + threadIdx.x;
// sum all derivs for the same weight
for (int i = 0; i < NCASES; ++i)
dE += deriv[i * NWEIGHTS + wid];
// update weight
ws[wid] -= (lrate * dE);
}
// --- memory allocations and initialization ------------------------------
inline float* allocateFloatsDev(int n)
{
float *res;
if (cudaMalloc((void**) &res, n * sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Could not allocate memory on the device\n");
exit(1);
}
return res;
}
void allocateDev(void)
{
// weights
dev_weights = allocateFloatsDev(NWEIGHTS);
// node values
dev_in = allocateFloatsDev(ncases * input_size);
dev_hidden = allocateFloatsDev(ncases * hidden_size);
dev_out = allocateFloatsDev(ncases * out_size);
// desired outputs and errors on device
dev_dout = allocateFloatsDev(ncases * out_size);
dev_err = allocateFloatsDev(ncases * out_size);
// deltas and derivatives
dev_delta_h = allocateFloatsDev(NEURONS_HIDDEN);
dev_delta_o = allocateFloatsDev(NEURONS_OUT);
dev_deriv = allocateFloatsDev(NWEIGHTS);
}
void freeDev(void)
{
// weights
cudaFree(dev_weights);
// node values
cudaFree(dev_in);
cudaFree(dev_hidden);
cudaFree(dev_out);
// desired outputs and errors
cudaFree(dev_dout);
cudaFree(dev_err);
// deltas and derivatives
cudaFree(dev_delta_h);
cudaFree(dev_deriv);
cudaFree(dev_delta_o);
}
// initialize memory on the device to run kernels
void memorySetup(void)
{
allocateDev();
// initialize weights
random_initialize_weights(dev_weights, MAX_ABS, NWEIGHTS);
// copy inputs and desired outputs
cudaMemcpy(dev_in, inputs, ncases * input_size * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_dout, outputs, ncases * out_size * sizeof(float), cudaMemcpyHostToDevice);
}
void printDevArray(float *devA, int length)
{
float *hostA;
hostA = (float*) malloc(length * sizeof(float));
cudaMemcpy(hostA, devA, length * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < length; ++i)
printf("%6.3f ", hostA[i]);
printf("\n");
}
// --- training -----------------------------------------------------------
float batch_train(int epochs, int calc_sse, int print_sse)
{
float err[NCASES * OUTPUT_SIZE];
float sse = 0.0f;
for (int e = 0; e < epochs; ++e) {
// forward propagation for all input cases
forward_hidden<<<4, 2>>>(dev_weights, dev_in, dev_hidden);
forward_output<<<4, 1>>>(dev_weights, dev_hidden, dev_out);
// printf("Outputs: ");
// printDevArray(dev_out, NCASES * OUTPUT_SIZE);
// backprop
deltas_output<<<4, 1>>>(dev_out, dev_dout, dev_delta_o, dev_err);
deltas_hidden<<<4, 2>>>(dev_hidden, dev_weights, dev_delta_h, dev_delta_o);
// printf("Deltas (hidden): ");
// printDevArray(dev_delta_h, DELTAS_HIDDEN);
// printf("Deltas (output): ");
// printDevArray(dev_delta_o, DELTAS_OUT);
// calculate SSE for this trial
if (calc_sse) {
sse = 0.0f;
cudaMemcpy(err, dev_err, NCASES * OUTPUT_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < NCASES * OUTPUT_SIZE; ++i) {
//printf("%6.3f ", err[i]);
sse += (err[i] * err[i]);
}
if (print_sse)
printf("SSE = %5.3f\n", sse);
}
// calculate derivatives
derivs_hidden<<<4, 6>>>(dev_in, dev_delta_h, dev_deriv);
derivs_output<<<4, 3>>>(dev_hidden, dev_delta_o, dev_deriv);
// update weights
update_weights_nreduc<<<1, NWEIGHTS>>>(dev_weights, dev_deriv, LRATE);
}
return sse;
}
void print_weights(void)
{
float weights[NWEIGHTS];
cudaMemcpy(weights, dev_weights, NWEIGHTS * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < NWEIGHTS; ++i)
printf("%6.4f ", weights[i]);
printf("\n");
}
// --- main ---------------------------------------------------------------
int main(int argc, char **argv)
{
float sse;
memorySetup();
// print the generated weights
printf("Randomly generated weights on the device:\n");
print_weights();
// do training
printf("Batch training with 5000 epochs...\n");
sse = batch_train(8000, 1, 0);
printf("Final SSE: %6.3f\n", sse);
printf("Outputs: ");
printDevArray(dev_out, NCASES * OUTPUT_SIZE);
// weights after training
printf("Weights after training:\n");
print_weights();
freeDev();
return 0;
}
|
19,964 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define CHECK(call) { const cudaError_t error = call; if (error != cudaSuccess) { printf("Error: %s:%d, ", __FILE__, __LINE__); printf("code:%d, reason: %s\n", error, cudaGetErrorString(error)); exit(1); }}
__global__ void compute_covariance (float *variance,float *covariance,int points,float bias) {
int k=threadIdx.x + blockDim.x * blockIdx.x;
int j;
unsigned long long int grid_num;
float R,T;
R=0.00198588;
T=300.0;
if(k<points){
for (j=0;j<points;j++){
grid_num=(unsigned long long)j*points;
grid_num+=k;
covariance[grid_num]+=(variance[k]*variance[j])*expf((-1.0*bias)/(R*T));
}
}
} // End of Global
__global__ void compute_covariance_2 (float *covariance,int points,float bias) {
int k=threadIdx.x + blockDim.x * blockIdx.x;
int j;
unsigned long long int grid_num;
float R,T;
R=0.00198588;
T=300.0;
if(k<points){
for (j=0;j<points;j++){
grid_num=(unsigned long long)j*points;
grid_num+=k;
covariance[grid_num]+=expf((-1.0*bias)/(R*T));
}
}
} // End of Global
int main ()
{
int blocks,threads,line_num,frame,count,k,j,x,y,z,points,grid_point,min_x,min_y,min_z,max_x,max_y,max_z,max_frame,line_counter,avg_only,print_flg,all_points,test_sum;
unsigned long long int sqpoints,grid_num;
int devCount;
float bias,R,T;
int *reso;
float *top_sum,*bottom_sum,*covariance,*covariance_2,*variance;
float *dev_covariance,*dev_variance;
char buf[4096];
FILE* file=fopen("hb_count_matrix.dat","r");
FILE* file2=fopen("reso_map.dat","r");
FILE* file3=fopen("map_density.dat","r");
FILE *ofp;
FILE *ofp2;
char outputFilename[] = "weighted_avg.dat";
char outputFilename2[] = "hb_covariance_matrix.dat";
CHECK (cudaSetDevice ( 0 ) );
avg_only=0;
print_flg=1;
R=0.001986;
T=300.00;
min_x=999;
min_y=999;
min_z=999;
max_x=-999;
max_y=-999;
max_z=-999;
points=0;
while (fgets(buf, sizeof (buf), file3)) {
sscanf (buf, "%i\t%i\t%i",&x,&y,&z);
points+=1;}
fclose (file3);
reso=(int *)malloc(points*sizeof(int));
if(reso == NULL){
printf("Error: %s:%d, ", __FILE__, __LINE__);
exit(1);}
memset(reso,0,points*sizeof(int));
all_points=points;
points=0;
while (fgets(buf, sizeof (buf), file2)) {
sscanf (buf, "%i\t%i\t%i\t%i",&x,&y,&z,&line_num);
if(x<min_x){min_x=x;}
if(y<min_y){min_y=y;}
if(z<min_z){min_z=z;}
if(x>max_x){max_x=x;}
if(y>max_y){max_y=y;}
if(z>max_z){max_z=z;}
reso[line_num]=1;
points+=1;}
fclose (file2);
sqpoints= (unsigned long long )points*points;
test_sum=0;
for (k=0;k<all_points;k++){
test_sum+=reso[k];
}
printf("~~~~~~~~Box Information~~~~~~~~\n");
printf("Minx=%i\n",min_x);
printf("Miny=%i\n",min_y);
printf("Minz=%i\n",min_z);
printf("Maxx=%i\n",max_x);
printf("Maxy=%i\n",max_y);
printf("Maxz=%i\n",max_z);
printf("Points=%i\n",points);
printf("Check=%i\n",test_sum);
printf("sqpoints=%llu\n",sqpoints);
printf("Check2=%llu\n",sqpoints/points);
top_sum=(float *)malloc(points*sizeof(float));
if(top_sum == NULL){
printf("Error: %s:%d, ", __FILE__, __LINE__);
exit(1);}
bottom_sum=(float *)malloc(points*sizeof(float));
if(bottom_sum == NULL){
printf("Error: %s:%d, ", __FILE__, __LINE__);
exit(1);}
if(avg_only == 0){
variance=(float *)malloc(points*sizeof(float));
if(variance == NULL){
printf("Error: %s:%d, ", __FILE__, __LINE__);
exit(1);}
covariance=(float *)malloc(sqpoints*sizeof(float));
if(covariance == NULL){
printf("Error: %s:%d, ", __FILE__, __LINE__);
exit(1);}
covariance_2=(float *)malloc(sqpoints*sizeof(float));
if(covariance_2 == NULL){
printf("Error: %s:%d, ", __FILE__, __LINE__);
exit(1);}
}
printf("Set Memory...\n");
memset(top_sum,0,points*sizeof(float));
memset(bottom_sum,0,points*sizeof(float));
if(avg_only == 0){
memset(variance,0,points*sizeof(float));
memset(covariance,0,(sqpoints)*sizeof(float));
memset(covariance_2,0,(sqpoints)*sizeof(float));
}
printf("Reading Input...\n");
grid_point=0;
max_frame=0;
line_counter=0;
while (fgets(buf, sizeof (buf), file)) {
if(line_counter==all_points){
grid_point=0;
line_counter=0;}
sscanf (buf, "%i\t%i\t%f",&frame,&count,&bias);
if(frame>max_frame){max_frame=frame;}
if(reso[line_counter]==1){
top_sum[grid_point]+=(expf((-1.0*bias)/(R*T))*float(count));
bottom_sum[grid_point]+=(expf(((-1.0*bias)/(R*T))));
grid_point+=1;}
line_counter+=1;
}
printf("Write Average...\n");
ofp=fopen(outputFilename, "w");
for (k=0;k<points;k++){
fprintf(ofp,"%f\n",top_sum[k]/bottom_sum[k]);
}
fclose(ofp);
//Avg Only Below
if(avg_only == 0){
cudaGetDeviceCount(&devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i){
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
threads=devProp.maxThreadsPerBlock;
}
blocks=ceil(float(points)/float(threads))+1;
printf("Threads=%i\n",threads);
printf("Blocks=%i\n",blocks);
CHECK (cudaMalloc((void **) &dev_covariance, (sqpoints)*sizeof(float)) );
CHECK (cudaMalloc((void **) &dev_variance, points*sizeof(float)) );
CHECK (cudaMemcpy(dev_covariance, covariance, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) );
CHECK (cudaMemcpy(dev_variance, variance, points*sizeof(float), cudaMemcpyHostToDevice) );
rewind(file);
grid_point=0;
line_counter=0;
printf("Compute Covariance...\n");
while (fgets(buf, sizeof (buf), file)) {
if(line_counter==all_points){
CHECK (cudaMemcpy(dev_covariance, covariance, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) );
CHECK (cudaMemcpy(dev_variance, variance, points*sizeof(float), cudaMemcpyHostToDevice) );
compute_covariance<<<blocks,threads>>>(dev_variance,dev_covariance,points,bias);
CHECK (cudaMemcpy(covariance, dev_covariance, (sqpoints)*sizeof(float), cudaMemcpyDeviceToHost) );
grid_point=0;
line_counter=0;}
if(reso[line_counter]==1){
sscanf (buf, "%i\t%i\t%f",&frame,&count,&bias);
variance[grid_point]=(float(count)-(top_sum[grid_point]/bottom_sum[grid_point]));
grid_point+=1;}
line_counter+=1;
}
rewind(file);
CHECK (cudaMemcpy(dev_covariance, covariance_2, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) );
grid_point=0;
line_counter=0;
printf("Compute Covariance_2...\n");
while (fgets(buf, sizeof (buf), file)) {
if(line_counter==all_points){
CHECK (cudaMemcpy(dev_covariance, covariance_2, (sqpoints)*sizeof(float), cudaMemcpyHostToDevice) );
compute_covariance_2<<<blocks,threads>>>(dev_covariance,points,bias);
CHECK (cudaMemcpy(covariance_2, dev_covariance, (sqpoints)*sizeof(float), cudaMemcpyDeviceToHost) );
grid_point=0;
line_counter=0;}
if(reso[line_counter]==1){
sscanf (buf, "%i\t%i\t%f",&frame,&count,&bias);
grid_point+=1;}
line_counter+=1;
}
fclose (file);
CHECK (cudaFree(dev_covariance) );
CHECK (cudaFree(dev_variance) );
cudaDeviceReset();
if(print_flg==1){
printf("Write Covariance...\n");
ofp2=fopen(outputFilename2, "w");
for (k=0;k<points;k++){
for (j=0;j<points;j++){
grid_num=(unsigned long long) (j*points);
grid_num+=k;
fprintf(ofp2,"%i\t%i\t%f\n",k+1,j+1,(covariance[grid_num]/covariance_2[grid_num]));
}
}
fclose(ofp2);
}
}//Avg_only
free(reso);
free(top_sum);
free(bottom_sum);
if(avg_only == 0){
free(covariance);
free(covariance_2);
free(variance);
}
printf("Complete!\n");
return 0;
}
|
19,965 | #define LENGTH_V 1024*1024
#define LENGTH_SHOW 10
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/scan.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include <time.h>
void show_vector(char *myString, int lengthMyString, thrust::host_vector<int> vector) {
int j;
printf("\n%s\n",myString);
for (j = 0; j < lengthMyString; j++)
printf("-");
printf("\n");
if (LENGTH_SHOW * 2 < LENGTH_V) {
for (j = 0; j < LENGTH_SHOW; j++)
printf(" %d", vector[j]);
printf(" ...");
for (j = LENGTH_V-LENGTH_SHOW; j < LENGTH_V; j++)
printf(" %d", vector[j]);
printf("\n");
} else {
for (j=0 ; j<LENGTH_V; j++)
printf(" %d", vector[j]);
printf("\n");
}
printf("\n");
}
int RandomNumber () { return ( (std::rand() % 100) - 50); }
int main(void) {
clock_t start, end;
double time_used;
// -----------------------
thrust::host_vector<int> h_vec(LENGTH_V);
srand(time(NULL));
std::generate(h_vec.begin(), h_vec.end(), RandomNumber);
char msg1[] = "Vector original";
show_vector(msg1, strlen(msg1), h_vec);
// -----------------------
// Vector scan (Thrust)
// -----------------------
thrust::device_vector<int> d_vec = h_vec;
start = clock();
thrust::inclusive_scan(d_vec.begin(), d_vec.end(), d_vec.begin());
end = clock();
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
// -----------------------
char msg2[] = "Scan result (GPU)";
show_vector(msg2, strlen(msg2), h_vec);
time_used = 1000.0 * ((double)(end - start)) / CLOCKS_PER_SEC;
printf("Thrust scan kernel processing time: %f millisec. (nº elements %d)\n",time_used, LENGTH_V);
printf("...\n");
return 0;
} |
19,966 | #include <fstream>
#include <iostream>
using namespace std;
#define Mask_width 3
#define Mask_radius Mask_width/2
#define TILE_WIDTH 32
#define w (TILE_WIDTH + Mask_width - 1)
#define clamp(x) (min(max((x), 0), 255))
__global__
void convolution(double *I, const int* __restrict__ M, double *P, int channels, int width, int height)
{
__shared__ double N_ds[w][w];
int k;
for (k = 0; k < channels; k++) {
// First batch loading
int dest = threadIdx.y * TILE_WIDTH + threadIdx.x,
destY = dest / w, destX = dest % w,
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius,
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius,
src = (srcY * width + srcX) * channels + k;
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0;
// Second batch loading
dest = threadIdx.y * TILE_WIDTH + threadIdx.x + TILE_WIDTH * TILE_WIDTH;
destY = dest / w, destX = dest % w;
srcY = blockIdx.y * TILE_WIDTH + destY - Mask_radius;
srcX = blockIdx.x * TILE_WIDTH + destX - Mask_radius;
src = (srcY * width + srcX) * channels + k;
if (destY < w) {
if (srcY >= 0 && srcY < height && srcX >= 0 && srcX < width)
N_ds[destY][destX] = I[src];
else
N_ds[destY][destX] = 0;
}
__syncthreads();
int accum = 0;
int y, x;
for (y = 0; y < Mask_width; y++)
for (x = 0; x < Mask_width; x++)
accum += N_ds[threadIdx.y + y][threadIdx.x + x] * M[y * Mask_width + x];
y = blockIdx.y * TILE_WIDTH + threadIdx.y;
x = blockIdx.x * TILE_WIDTH + threadIdx.x;
if (y < height && x < width)
P[(y * width + x) * channels + k] = clamp(accum);
__syncthreads();
}
}
void WritePGM(double * Pout, int fil , int cols,const char *name)
{
ofstream file(name);
file<<"P2"<<endl;
file<<"# Function ConvertRGBtoGray @eddyrene"<<endl;
file<<fil<<" "<<cols<<endl;
file<<255<<endl;
int n = fil*cols;
int i=0;
while(i<n)
{
file<<(int)Pout[i]<<endl;
i++;
}
}
void kernel_convolution(double *R , double *sR , int order ,int chanel )
{
double * d_R;//,*d_G,*d_B;
double * sd_R;//,*sd_G,*sd_B;
int N=order;
int M=order;
int THREADS_PER_BLOCK = 32;
int size =1*N*M*sizeof(double);
//cout<<"tamano Imagen "<<N<<" "<<M<<" size "<<size<<endl;
int k[9]={-1,0,1,-2,0,2,-1,0,1};
int *d_k;
cudaMalloc((void **)&d_R, size);
cudaMalloc((void **)&sd_R, size);
cudaMalloc((void **)&d_k,9*sizeof(int));
cudaMemcpy(d_R, R, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_k, k, 9*sizeof(int), cudaMemcpyHostToDevice);
int blocks= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
dim3 dimGrid(blocks, blocks, 1);
dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, 1);
convolution<<<dimGrid,dimBlock>>>(d_R,d_k,sd_R,chanel, N, M);
cudaMemcpy(sR, sd_R, size, cudaMemcpyDeviceToHost);
string name = "result.ppm";
WritePGM(sR, N,M,name.c_str());
//free(R); //free(G);free(B);
cudaFree(d_R); //cudaFree(d_B);cudaFree(d_G);
cudaFree(sd_R); //cudaFree(sd_B);cudaFree(sd_G);
//return 0;
} |
19,967 | #include <cuda.h>
#include <cmath>
#include <cstdio>
#include <iostream>
#include <chrono>
using namespace std;
/*B*/
__global__
void MatrixAddB(float* A, float* B, float* C, int n) {
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i < n*n) {
C[i] = A[i] + B[i];
}
}
/*C=>Row*/
__global__
void MatrixAddC(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = i*n + j;
C[offset] = A[offset] + B[offset];
}
}
}
/*D=>Column*/
__global__
void MatrixAddD(float* A, float* B, float* C, int n) {
int i = threadIdx.x;
int offset;
if (i < n) {
for (int j = 0; j < n; j++) {
offset = j*n + i;
C[offset] = A[offset] + B[offset];
}
}
}
/*AB*/
void MatrixAddAB(float* h_A, float* h_B, float* h_C, int n) {
int size = n*n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
cudaMalloc(&d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_B, size);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_C, size);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatrixAddB <<< 10, 10>>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
/*AC*/
void MatrixAddAC(float* h_A, float* h_B, float* h_C, int n) {
int size = n*n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
cudaMalloc(&d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_B, size);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_C, size);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatrixAddC <<< 1, 10 >>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
/*AD*/
void MatrixAddAD(float* h_A, float* h_B, float* h_C, int n) {
int size = n*n * sizeof(float);
float *d_A;
float *d_B;
float *d_C;
cudaMalloc(&d_A, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_B, size);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMalloc(&d_C, size);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
//dim3 dimGrid(ceil(n / 32.0), 1, 1);
//dim3 dimBlock(32.0, 1, 1);
MatrixAddD <<< 1, 10 >>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
//Host Matrix
float *h_A0, *h_B0, *h_C0;
float *h_A1, *h_B1, *h_C1;
float *h_A2, *h_B2, *h_C2;
int n = 10;
h_A0 = (float*)malloc(n*n * sizeof(float));
h_B0 = (float*)malloc(n*n * sizeof(float));
h_C0 = (float*)malloc(n*n * sizeof(float));
h_A1 = (float*)malloc(n*n * sizeof(float));
h_B1 = (float*)malloc(n*n * sizeof(float));
h_C1 = (float*)malloc(n*n * sizeof(float));
h_A2 = (float*)malloc(n*n * sizeof(float));
h_B2 = (float*)malloc(n*n * sizeof(float));
h_C2 = (float*)malloc(n*n * sizeof(float));
//Create Matrix
for (int i = 0; i < n*n; i++) {
h_A0[i] = 1.0;
h_B0[i] = 1.0;
h_C0[i] = 1.0;
h_A1[i] = 1.0;
h_B1[i] = 1.0;
h_C1[i] = 1.0;
h_A2[i] = 1.0;
h_B2[i] = 1.0;
h_C2[i] = 1.0;
}
//B (Main)
chrono::time_point<chrono::system_clock> B_GPU_Start, B_GPU_End;
B_GPU_Start = chrono::system_clock::now();
MatrixAddAB(h_A0, h_B0, h_C0, n);
B_GPU_End = chrono::system_clock::now();
cout << "B_GPU: " << chrono::duration_cast<chrono::nanoseconds>(B_GPU_End - B_GPU_Start).count() << "ns." << endl;
//Print B
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
cout << h_C0[i*n + j] << " ";
}
cout << endl;
}
//C (Main)
chrono::time_point<chrono::system_clock> C_GPU_Start, C_GPU_End;
C_GPU_Start = chrono::system_clock::now();
MatrixAddAC(h_A1, h_B1, h_C1, n);
C_GPU_End = chrono::system_clock::now();
cout << "C_GPU: " << chrono::duration_cast<chrono::nanoseconds>(C_GPU_End - C_GPU_Start).count() << "ns." << endl;
//Print C
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
cout << h_C1[i*n + j] << " ";
}
cout << endl;
}
//D (Main)
chrono::time_point<chrono::system_clock> D_GPU_Start, D_GPU_End;
D_GPU_Start = chrono::system_clock::now();
MatrixAddAD(h_A2, h_B2, h_C2, n);
D_GPU_End = chrono::system_clock::now();
cout << "D_GPU: " << chrono::duration_cast<chrono::nanoseconds>(D_GPU_End - D_GPU_Start).count() << "ns." << endl;
//Print D
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
cout << h_C2[i*n + j] << " ";
}
cout << endl;
}
//Free
free(h_A0);
free(h_B0);
free(h_C0);
free(h_A1);
free(h_B1);
free(h_C1);
free(h_A2);
free(h_B2);
free(h_C2);
return 0;
}
|
19,968 | #include "includes.h"
__global__ void reduce(float *g_idata, float *g_odata){
extern __shared__ float sdata[];
//each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s = 1;s < blockDim.x; s *= 2){
if(tid % (2 * s) == 0){
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
} |
19,969 | /* Copyright 2017 Eric Aubanel
* This file contains code implementing Algorithm 4.14 from
* Elements of Parallel Computing, by Eric Aubanel, 2016, CRC Press.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
* -------------------------------------------------------------------
* Implementation of reduction of n floats using CUDA
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int isPowerOf2(int n);
//reduce n floats in array a to a partial sum for each block,
//stored in array c. Block size must be power of 2
__global__ void reductionGPU(float *a, float *c, int n){
//size of b indicated by kernel call in main (blockSize)
extern __shared__ float b[];
int gsize = blockDim.x; //block size
int nt = gsize * gridDim.x; //total number of threads
int gid = blockIdx.x; //block id
int tid = threadIdx.x; //local thread id
int id = gid*gsize + tid; //global thread id
//if n<nt, then some threads will have nothing to do
int istart;
int iend;
if(n<nt){
istart = id;
iend = id;
}
else{
//evaluate as float to avoid overflow
istart = (float)id*n/nt;
iend = (float)(id+1)*n/nt - 1;
}
if(n<nt && id>=n){
b[tid] = 0;
}else{
float psum = 0.0;
for(int i=istart; i<=iend; i++)
psum += a[i];
b[tid] = psum;
}
__syncthreads();
for(int j=gsize>>1; j>=1; j >>= 1){
if(tid<j)
b[tid] += b[tid+j];
__syncthreads();
}
c[gid] = b[0];
}
int main(int argc, char **argv){
float *a_h; //array to be reduced on host (CPU)
float *c_h; //array of partial sums on host
float *a_d; //array to be reduced on device (GPU)
float *c_d; //array of partial sums on device
cudaError_t error1, error2;
struct timespec tstart, tend;
float time;
if(argc < 4){
fprintf(stderr,"usage: %s n blockSize numBlocks\n", argv[0]);
return 1;
}
int n = strtol(argv[1], NULL, 10);
int blockSize = strtol(argv[2], NULL, 10); //size of thread block on device
int numBlocks = strtol(argv[3], NULL, 10); //number of blocks on device
if(!isPowerOf2(blockSize)){
fprintf(stderr,"blockSize must be power of 2\n");
return 1;
}
//memory allocation on host and device
a_h = (float *)malloc(n*sizeof(float));
c_h = (float *)malloc(numBlocks*sizeof(float));
if(a_h == NULL || c_h == NULL){
fprintf(stderr,"couldn't allocate memory on host\n");
return 1;
}
error1 = cudaMalloc((void **)&a_d, n*sizeof(float));
error2 = cudaMalloc((void **)&c_d, numBlocks*sizeof(float));
if(error1 != cudaSuccess || error2 != cudaSuccess){
fprintf(stderr,"couldn't allocate memory on device\n");
return 1;
}
for(int i=0; i<n; i++)
a_h[i] = rand()%100;
//sequential reduction for verification and timing
clock_gettime(CLOCK_MONOTONIC, &tstart);
float sum = 0.0;
for(int i=0; i<n; i++)
sum += a_h[i];
clock_gettime(CLOCK_MONOTONIC, &tend);
time = (tend.tv_sec-tstart.tv_sec) + (tend.tv_nsec-tstart.tv_nsec)*1.0e-9;
printf("CPU reduction time in s: %f\n", time);
//timing won't include transfer of array to device
cudaMemcpy(a_d, a_h, n*sizeof(float), cudaMemcpyHostToDevice);
clock_gettime(CLOCK_MONOTONIC, &tstart);
reductionGPU <<<numBlocks, blockSize, blockSize*sizeof(float)>>> (a_d, c_d, n);
error1 = cudaThreadSynchronize();// wait until GPU kernel finished
if(error1 != cudaSuccess){
fprintf(stderr,"error executing kernel: %s\n", cudaGetErrorString(error1));
return 1;
}
//Do not copy back to host and do CPU reduction. Do it on the GPU recersively.
while(1){
cudaMemset(a_d, 0, n);
n = numBlocks;
numBlocks = n/blockSize + ((n%blockSize==0)?0:1);
cudaMemcpy(a_d, c_d, n*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemset(c_d, 0, n*sizeof(float));
reductionGPU <<<numBlocks, blockSize, blockSize*sizeof(float)>>> (a_d, c_d, n);
error2 = cudaThreadSynchronize();// wait until GPU kernel finished
if(error2 != cudaSuccess){
fprintf(stderr,"error executing kernel: %s\n", cudaGetErrorString(error2));
return 1;
}
cudaMemcpy(c_h, c_d, numBlocks*sizeof(float), cudaMemcpyDeviceToHost);
if(numBlocks==1)break;
}
cudaMemcpy(c_h, c_d, numBlocks*sizeof(float), cudaMemcpyDeviceToHost);
float dsum = c_h[0];
printf("dsum: %f\n", dsum);
clock_gettime(CLOCK_MONOTONIC, &tend);
time = (tend.tv_sec-tstart.tv_sec) + (tend.tv_nsec-tstart.tv_nsec)*1.0e-9;
printf("GPUS time in s: %f\n", time);
//not necessarily the same because of differences in roundoff error
printf("relative difference between sequential and parallel sums: %g\n",
fabs(dsum-sum)/sum);
return 0;
}
int isPowerOf2(int n){
while(n){
if(n & 1)
break;
n >>= 1;
}
return (1 == n? 1:0);
} |
19,970 | #include "includes.h"
__global__ void add_bias(float *a, float *bias, float *out, int size_x, int size_y, int size_z)
{
const int i = blockDim.y * blockIdx.y + threadIdx.y,
j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size_x && j < size_y)
{
int k = (i * size_y + j) * size_z;
for (int c = 0; c < size_z; c++)
out[k+c] = a[k+c] + bias[c];
}
} |
19,971 | #include "includes.h"
__global__ void CopyPointsCoordinatesKernel( float *pointsCoordinates, int *activityFlag, float xNonValid, float yNonValid, float zNonValid, float *dataVertex, int dataVertexOffset, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
float xToCopy = pointsCoordinates[threadId * 3];
float yToCopy = pointsCoordinates[threadId * 3 + 1];
float zToCopy = pointsCoordinates[threadId * 3 + 2];
if(activityFlag[threadId] == 0)
{
xToCopy = xNonValid;
yToCopy = yNonValid;
zToCopy = zNonValid;
}
dataVertex[dataVertexOffset + threadId * 3] = xToCopy;
dataVertex[dataVertexOffset + threadId * 3 + 1] = yToCopy;
dataVertex[dataVertexOffset + threadId * 3 + 2] = zToCopy;
}
} |
19,972 | #include "add_ghost_cells.cuh"
__global__ void add_ghost_cells
(
BoundaryConditions bcs,
SimulationParameters sim_params,
AssembledSolution d_assem_sol
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x == 0)
{
d_assem_sol.q_BC[x] = bcs.q_imposed_up > 0 ? bcs.q_imposed_up : d_assem_sol.q_BC[x + 1];
d_assem_sol.h_BC[x] = bcs.h_imposed_up > 0 ? bcs.h_imposed_up : d_assem_sol.h_BC[x + 1];
d_assem_sol.z_BC[x] = d_assem_sol.z_BC[x + 1];
}
if (x == sim_params.cells + 1)
{
d_assem_sol.q_BC[x] = bcs.q_imposed_down > 0 ? bcs.q_imposed_down : d_assem_sol.q_BC[x - 1];
d_assem_sol.h_BC[x] = bcs.h_imposed_down > 0 ? bcs.h_imposed_down : d_assem_sol.h_BC[x - 1];
d_assem_sol.z_BC[x] = d_assem_sol.z_BC[x - 1];
}
} |
19,973 | #include <fstream>
#include <iostream>
#include <string>
#include <cstring>
#include <cstdlib>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/binary_search.h>
#include <thrust/pair.h>
#define GPU_MEM 10000000
using namespace std;
//the particle index in the r200 file
bool isIndexBin = true;
string index_file_txt = "";//"/home/lyang/data/vl2b.00400.r200.index";
string index_file_bin = "";// "/home/lyang/data/vl2b.00400.r200.index";
//the particle file in the AHF particles
bool isAHFPartFileBin = false;
string ahf_part_file_txt = "";//"/home/lyang/halodata/vl_400_rhovesc.z0.000.AHF_particles";
string ahf_part_file_bin = "";//"/home/lyang/halodata/vl_400_rhovesc.z0.000.AHF_particles";
//the output halo flags (a binary file)
string output_file = "";//"vl2b.00400.r200.ahf.haloflags";
//halo ids to be selected
//the first int is a number of total number to be selected
//followed by the list of halo ids
bool isHaloIdsBin = false;
string haloids_to_be_selected_bin = "to_be_seleted.ids";
string haloids_to_be_selected_txt = "";
int * haloParticles_;
int * haloIds_;
int * searchParts_;
int * searchIndex_;
bool * searchResult_;
char * flags_;
int * particles_;
//bool verbose = false;
int numParts_ = 0;
int numOfHalos_ = 0;
void getSearchRes(int requiredSearchPartNum, int numPartsRead_,
thrust::device_vector<int> &dev_searchParts_,
thrust::device_vector<int> &dev_searchResult_,
thrust::device_vector<int> &dev_val){
//do the search
thrust::copy(searchParts_, searchParts_ + requiredSearchPartNum, dev_searchParts_.begin());
thrust::binary_search(dev_val.begin(), dev_val.begin() + numPartsRead_,
dev_searchParts_.begin(),
dev_searchParts_.begin() + requiredSearchPartNum,
dev_searchResult_.begin());
thrust::copy(dev_searchResult_.begin(), dev_searchResult_.begin() + requiredSearchPartNum, searchResult_);
for(int l = 0; l < requiredSearchPartNum; l++){
if(searchResult_[l]){
flags_[searchIndex_[l]] = 1;
}
}
}
void doSearch(int numPartsRead_, thrust::device_vector<int> &dev_searchParts_,
thrust::device_vector<int> &dev_searchResult_,
thrust::device_vector<int> &dev_val){
printf("Start testing %d halo particles...\n", numPartsRead_);
//start filling the tags
//step 1: sorting
printf("Sorting ...\n");
thrust::copy(haloParticles_, haloParticles_ + numPartsRead_, dev_val.begin());
thrust::sort(dev_val.begin(), dev_val.begin() + numPartsRead_);
//step 2: testing
printf("Searching ...\n");
//test every particle whether it's in the array
int requiredSearchPartNum = 0;
for(int k = 0; k < numParts_; k ++){
if(flags_[k] == 0){
searchParts_[requiredSearchPartNum] = particles_[k];
searchIndex_[requiredSearchPartNum] = k;
requiredSearchPartNum ++;
}
if(requiredSearchPartNum >= GPU_MEM){
getSearchRes(requiredSearchPartNum, numPartsRead_,
dev_searchParts_, dev_searchResult_, dev_val);
requiredSearchPartNum = 0;
}
}
if(requiredSearchPartNum > 0){
getSearchRes(requiredSearchPartNum, numPartsRead_,
dev_searchParts_, dev_searchResult_, dev_val);
requiredSearchPartNum = 0;
}
}
//get flags
void getFlag(){
thrust::device_vector<int> dev_searchParts_(GPU_MEM);
thrust::device_vector<int> dev_searchResult_(GPU_MEM);
thrust::device_vector<int> dev_val(GPU_MEM);
thrust::device_vector<int> dev_searchHaloIds_(numOfHalos_);
//thrust::binary_search(input.begin(), input.end(), 0, thrust::less<int>()); // returns true
thrust::copy(haloIds_, haloIds_ + numOfHalos_, dev_searchHaloIds_.begin());
thrust::sort(dev_searchHaloIds_.begin(), dev_searchHaloIds_.begin() + numOfHalos_);
//int currentHalo = 0;
int totalNumHalos = 0;
for(int i = 0; i < numParts_; i++){
flags_[i] = 0;
}
haloParticles_ = new int[GPU_MEM];
searchParts_ = new int[GPU_MEM];
searchIndex_ = new int[GPU_MEM];
searchResult_ = new bool[GPU_MEM];
ifstream haloInputFile_;
if(isAHFPartFileBin){
haloInputFile_.open(ahf_part_file_bin.c_str(), ios::binary | ios::in);
haloInputFile_.read((char *)&totalNumHalos, sizeof(int));
}else{
haloInputFile_.open(ahf_part_file_txt.c_str());
haloInputFile_ >> totalNumHalos;
}
if(!haloInputFile_.good()){
printf("AHF Particle File Error!\n");
exit(1);
}
int numPartsRead_ = 0;
printf("Start reading halo particles...\n", numOfHalos_);
for(int i = 0; i < totalNumHalos; i ++){
int numHaloParts;
if(isAHFPartFileBin){
haloInputFile_.read((char *) &numHaloParts, sizeof(int));
}else{
haloInputFile_ >> numHaloParts;
}
//printf("%d %d\n", i, numHaloParts);
if(thrust::binary_search(dev_searchHaloIds_.begin(),
dev_searchHaloIds_.end(),
i,
thrust::less<int>())){
printf("Halo: %d, Particles: %d.\n", i, numHaloParts);
for(int j = 0; j < numHaloParts; j++){
int partindex;
int ch;
if(isAHFPartFileBin){
haloInputFile_.read((char *) &partindex, sizeof(int));
haloInputFile_.read((char *) &ch, sizeof(int));
}else{
haloInputFile_ >> partindex;
haloInputFile_ >> ch;
}
haloParticles_[numPartsRead_] = partindex;
numPartsRead_ ++;
if(numPartsRead_ >= GPU_MEM){
doSearch(numPartsRead_, dev_searchParts_, dev_searchResult_, dev_val);
numPartsRead_ = 0;
}
}
}else{
string line;
if(isAHFPartFileBin){
haloInputFile_.seekg(sizeof(int) * numHaloParts, ios_base::cur);
}else{
for(int j = 0; j < numHaloParts + 1; j++){
getline(haloInputFile_, line);
//haloInputFile_.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
}
//printf("%s\n", line.c_str());
}
}
}
if(numPartsRead_ > 0){
doSearch(numPartsRead_, dev_searchParts_, dev_searchResult_, dev_val);
numPartsRead_ = 0;
}
printf("\n");
haloInputFile_.close();
delete haloParticles_;
delete searchParts_;
delete searchIndex_;
delete searchResult_;
}
void printUsage(const char * name){
printf("%s \n%s \n%s \n%s \n%s\n", name,
"-[bin/txt]_index <index file>",
"-[bin/txt]_ahf <AHF particle output file>",
"-[bin/txt]_haloid <ids of halo to be selected>",
"-output <outputfile>");
}
int main(int argc, const char **argv){
int m=1;
if(argc < 9){
printUsage(argv[0]);
exit(1);
}
while (m<argc)
{
string arg = argv[m];
if (arg == "-bin_index") {
isIndexBin = true;
index_file_bin = argv[m+1];
m+=1;
}else if (arg == "-txt_index") {
isIndexBin = false;
index_file_txt = argv[m+1];
m+=1;
}else if (arg == "-bin_ahf") {
isAHFPartFileBin = true;
ahf_part_file_bin = argv[m+1];
m+=1;
}else if (arg == "-txt_ahf") {
isAHFPartFileBin = false;
ahf_part_file_txt = argv[m+1];
m+=1;
}else if (arg == "-output") {
output_file = argv[m+1];
m+=1;
}else if (arg == "-bin_haloid") {
isHaloIdsBin = true;
haloids_to_be_selected_bin = argv[m+1];
m+=1;
}else if (arg == "-txt_haloid") {
isHaloIdsBin = false;
haloids_to_be_selected_txt = argv[m+1];
m+=1;
}
//else if (arg == "-verbose") { verbose = true;}
else{
printUsage(argv[0]);
exit(0);
}
m++;
}
ifstream dataInputFile_;
if(isIndexBin){
dataInputFile_.open(index_file_bin.c_str(), ios::binary | ios::in);
if(!dataInputFile_.good()){
printf("Datafile error: %s !\n", index_file_bin.c_str());
exit(1);
}
dataInputFile_.read((char*)&numParts_, sizeof(int));
}else{
dataInputFile_.open(index_file_txt.c_str(), ios::in);
if(!dataInputFile_.good()){
printf("Datafile error: %s !\n", index_file_txt.c_str());
exit(1);
}
dataInputFile_ >>numParts_;
}
cout << "Particles: " << numParts_ << endl;
particles_ = new int[numParts_];
//printf("ok\n");
flags_ = new char[numParts_];
//printf("ok1\n");
if(isIndexBin){
dataInputFile_.read((char *) particles_, sizeof(int) * numParts_);
dataInputFile_.close();
}else{
for(int i = 0; i < numParts_; i++){
dataInputFile_ >> particles_[i];
}
dataInputFile_.close();
}
ifstream haloidsStream_;
//printf("%d %s\n", isHaloIdsBin, )
if(isHaloIdsBin){
haloidsStream_.open(haloids_to_be_selected_bin.c_str(), ios::binary);
if(!haloidsStream_.good()){
printf("Halo Id error: %s!\n", haloids_to_be_selected_bin.c_str());
exit(1);
}
haloidsStream_.read((char *) &numOfHalos_, sizeof(int));
}else{
haloidsStream_.open(haloids_to_be_selected_txt.c_str());
if(!haloidsStream_.good()){
printf("Halo Id error: %s!\n", haloids_to_be_selected_txt.c_str());
exit(1);
}
haloidsStream_ >> numOfHalos_;
}
printf("Number of Halos: %d\n", numOfHalos_);
haloIds_ = new int[numOfHalos_];
if(isHaloIdsBin){
haloidsStream_.read((char *) haloIds_, sizeof(int) * numOfHalos_);
haloidsStream_.close();
}else{
for(int i = 0; i < numOfHalos_; i++){
haloidsStream_ >> haloIds_[i];
}
haloidsStream_.close();
}
getFlag();
//output
printf("Output the result...\n");
ofstream dataOutputStream_(output_file.c_str(), ios::binary);
dataOutputStream_.write((char *) &numParts_, sizeof(int));
dataOutputStream_.write((char *) flags_, sizeof(char) * numParts_);
dataOutputStream_.close();
printf("Finished...\n");
delete particles_;
delete flags_;
delete haloIds_;
}
|
19,974 | #include <cstdlib>
#include <ctime>
#include <climits>
#include <algorithm>
#include <functional>
#include <iostream>
using namespace std;
/*========* CudaArray *========*/
template<typename T>
class CudaArray {
public:
CudaArray(int size) :
size_ { size } {
host_data = (T*) malloc(sizeof(T) * size);
cudaMalloc(&device_data, sizeof(T) * size);
}
~CudaArray() {
free(host_data);
cudaFree(device_data);
}
int size() const {
return size_;
}
T* host() {
return host_data;
}
const T* host() const {
return host_data;
}
T*& device() {
return device_data;
}
const T* device() const {
return device_data;
}
void copyToDevice() {
cudaMemcpy(device_data, host_data, sizeof(T) * size_,
cudaMemcpyHostToDevice);
}
void copyFromDevice() {
cudaMemcpy(host_data, device_data, sizeof(T) * size_,
cudaMemcpyDeviceToHost);
}
private:
int size_;
T* host_data;
T* device_data;
};
template<typename T>
void print(CudaArray<T>& a) {
cout << "[ ";
for (int i = 0; i < a.size(); ++i) {
cout << a.host()[i] << " ";
}
cout << "]" << endl;
}
/*========* ArrayUtils *========*/
template<typename T, typename Fun>
void for_each(T *a, int length, Fun f) {
for (int i = 0; i < length; ++i) {
f(a[i], i);
}
}
template<typename T>
void print(T *a, int length) {
for_each(a, length, [](T& x, int i) {
cout << x << " ";
});
cout << endl;
}
template<typename T, size_t N>
void print(T (&a)[N]) {
print(a, N);
}
/*========* compile-time utils *========*/
constexpr unsigned ilog2_impl(unsigned x, unsigned bit) {
return (1 << bit) & x ? bit : ilog2_impl(x, bit - 1);
}
constexpr unsigned ilog2(unsigned x) {
return ilog2_impl(x, sizeof(unsigned) * CHAR_BIT - 1u);
}
constexpr unsigned next_multiple(unsigned multiple, unsigned x) {
return x + (x % multiple == 0 ? 0 : (multiple - x % multiple));
}
/*========* device code *========*/
//__host__ __device__ unsigned id(unsigned x) {
// return x;
//}
//
//__host__ __device__ unsigned is_zero(unsigned bit, unsigned x) {
// return ((1 << bit) & ~x) >> bit;
//}
//
//template<typename Fun>
//__device__ void transform_scan(unsigned *idata, unsigned *odata, int n,
// Fun fun) {
// __shared__ unsigned temp[THREADS_PER_BLOCK * sizeof(unsigned)];
// int thid = threadIdx.x;
// int offset = 1;
// temp[2 * thid] = fun(idata[2 * thid]);
// temp[2 * thid + 1] = fun(idata[2 * thid + 1]);
// for (int d = n >> 1; d > 0; d >>= 1) {
// __syncthreads();
// if (thid < d) {
// int ai = offset * (2 * thid + 1) - 1;
// int bi = offset * (2 * thid + 2) - 1;
// temp[bi] += temp[ai];
// }
// offset *= 2;
// }
// if (thid == 0) {
// temp[n - 1] = 0;
// }
// for (int d = 1; d < n; d *= 2) {
// offset >>= 1;
// __syncthreads();
// if (thid < d) {
// int ai = offset * (2 * thid + 1) - 1;
// int bi = offset * (2 * thid + 2) - 1;
// unsigned t = temp[ai];
// temp[ai] = temp[bi];
// temp[bi] += t;
// }
// }
// __syncthreads();
// odata[2 * thid] = temp[2 * thid];
// odata[2 * thid + 1] = temp[2 * thid + 1];
//}
//
//__global__ void transform_scan_all(unsigned *idata, unsigned *odata, int n,
// unsigned bit) {
// int offset = 2 * blockDim.x * blockIdx.x;
// transform_scan(idata + offset, odata + offset, 2 * blockDim.x,
// [bit](unsigned x) {
// return is_zero(bit,x);
// });
//}
//__global__ void scan_all(unsigned *idata, unsigned *odata, int n) {
// int offset = 2 * blockDim.x * blockIdx.x;
// transform_scan(idata + offset, odata + offset, 2 * blockDim.x, id);
//}
//
//__device__ unsigned *scan_buffer;
//__global__ void radix_sort(unsigned *numbers, int n, unsigned bit) {
// int idx = threadIdx.x + blockDim.x * blockIdx.x;
// if (idx == 0) {
// scan_buffer = new unsigned[n];
// }
// int offset = 2 * blockDim.x * blockIdx.x;
// __syncthreads();
// transform_scan(numbers + offset, scan_buffer + offset, 2 * blockDim.x,
// [bit](unsigned x) {
// return is_zero(bit, x);
// });
// __syncthreads();
// const unsigned zeros = scan_buffer[n - 1] + is_zero(numbers[n - 1], bit);
// const unsigned tmp1 = numbers[2 * idx];
// const unsigned tmp2 = numbers[2 * idx + 1];
//
// __syncthreads();
// const auto new_index = [zeros,bit](unsigned x, unsigned i) {
// return is_zero(x,bit)
// ? scan_buffer[i]
// : i - scan_buffer[i] + zeros;
// };
// __syncthreads();
// numbers[new_index(tmp1, 2 * idx)] = tmp1;
// numbers[new_index(tmp2, 2 * idx + 1)] = tmp2;
//
// __syncthreads();
// if (idx == 0) {
// delete[] scan_buffer;
// }
//}
__host__ __device__ unsigned is_zero(unsigned bit, unsigned x) {
return ((1 << bit) & ~x) >> bit;
}
__device__ void block_scan(unsigned *data, int n, unsigned *sum) {
extern __shared__ unsigned temp[];
int thid = threadIdx.x;
int offset = 1;
temp[2 * thid] = data[2 * thid];
temp[2 * thid + 1] = data[2 * thid + 1];
for (int d = n >> 1; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
*sum = temp[n - 1];
temp[n - 1] = 0;
}
for (int d = 1; d < n; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset * (2 * thid + 1) - 1;
int bi = offset * (2 * thid + 2) - 1;
unsigned t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
data[2 * thid] = temp[2 * thid];
data[2 * thid + 1] = temp[2 * thid + 1];
}
__global__ void partial_scan(unsigned *data, int n, unsigned *sums) {
int offset = 2 * blockDim.x * blockIdx.x;
block_scan(data + offset, 2 * blockDim.x, sums + blockIdx.x);
}
__global__ void d_fill_n(unsigned *data, unsigned n, unsigned value) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n) {
data[idx] = value;
}
}
__global__ void add_sums(unsigned *numbers, unsigned *block_sums) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
numbers[2 * idx] += block_sums[blockIdx.x];
numbers[2 * idx + 1] += block_sums[blockIdx.x];
}
__global__ void d_transform(unsigned *numbers, unsigned *new_numbers,
unsigned bit) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
new_numbers[idx] = is_zero(bit, numbers[idx]);
}
__global__ void rearange(unsigned *in_numbers, unsigned *out_numbers,
unsigned *scan, unsigned count, unsigned bit) {
// int idx = threadIdx.x + blockDim.x * blockIdx.x;
auto zeros = scan[count - 1] + is_zero(bit, in_numbers[count - 1]);
int blockOffset = (2 * blockDim.x) * blockIdx.x;
int myindex1 = blockOffset + 2 * threadIdx.x;
int myindex2 = blockOffset + 2 * threadIdx.x + 1;
int val1 = in_numbers[myindex1];
int val2 = in_numbers[myindex2];
int index1 =
is_zero(bit, val1) ?
scan[myindex1] : myindex1 - scan[myindex1] + zeros;
int index2 =
is_zero(bit, val2) ?
scan[myindex2] : myindex2 - scan[myindex2] + zeros;
out_numbers[index1] = val1;
out_numbers[index2] = val2;
}
/*========* host code *========*/
void scan(unsigned *d_numbers, const unsigned count,
const unsigned block_size) {
const unsigned count_filled = next_multiple(block_size, count);
const unsigned block_dim = block_size / 2;
const unsigned grid_dim = count_filled / block_dim;
unsigned *d_block_sums;
cudaMalloc(&d_block_sums, (grid_dim * 2) * sizeof(unsigned));
d_fill_n<<<grid_dim, block_dim>>>(d_numbers + count, count_filled - count, 0);
partial_scan<<<grid_dim, block_dim, 2*block_size*sizeof(unsigned)>>>(d_numbers, count_filled, d_block_sums);
if (count_filled > block_size) {
scan(d_block_sums, grid_dim, block_size);
add_sums<<<grid_dim, block_dim>>>(d_numbers, d_block_sums);
}
cudaFree(d_block_sums);
}
void test_error(int str) {
auto error = cudaGetLastError();
if (error != cudaSuccess) {
cout << str << ":" << cudaGetErrorString(error) << endl;
}
}
void foo() {
srand(time(nullptr));
constexpr auto BLOCK_SIZE = 4;
constexpr auto N = 128*1024;
static_assert((1u<<ilog2(BLOCK_SIZE)) == BLOCK_SIZE,"THREADS_PER_BLOCK muss eine 2er Potenz sein");
constexpr auto N_FILLED = next_multiple(BLOCK_SIZE, N);
const unsigned block_dim = BLOCK_SIZE / 2;
const unsigned grid_dim = N_FILLED / BLOCK_SIZE;
cout << "foooo:" <<(N == N_FILLED) << endl;
CudaArray<unsigned> a { N_FILLED };
std::generate_n(a.host(), a.size(), []() {return rand()%16;});
// int x = N_FILLED;
// std::generate_n(a.host(), a.size(), [x]() mutable {return x--;});
// std::fill_n(a.host(), a.size(),0);
// print(a);
a.copyToDevice();
CudaArray<unsigned> d_buffer { N_FILLED };
CudaArray<unsigned> d_scan { N_FILLED };
// unsigned *d_scan;
// unsigned *d_buffer;
// cudaMalloc(&d_scan, N_FILLED * sizeof(unsigned));
// cudaMalloc(&d_buffer, N_FILLED * sizeof(unsigned));
for (int bit = 0; bit < 32; ++bit) {
d_transform<<<grid_dim,BLOCK_SIZE>>>(a.device(), d_scan.device(), bit);
test_error(__LINE__);
scan(d_scan.device(), N, BLOCK_SIZE);
test_error(__LINE__);
rearange<<<grid_dim,block_dim>>>(a.device(), d_buffer.device(), d_scan.device(), N_FILLED, bit);
test_error(__LINE__);
d_scan.copyFromDevice();
print(d_scan);
// d_buffer.copyFromDevice();
// print(d_buffer);
// break;
unsigned *tmp = a.device();
a.device() = d_buffer.device();
d_buffer.device() = tmp;
}
// cudaFree(d_scan);
// cudaFree(d_buffer);
a.copyFromDevice();
// print(a);
cout << is_sorted(a.host(), a.host() + a.size()) << endl;
}
void test_partial_scan() {
constexpr auto BLOCK_SIZE = 4;
constexpr auto N = 16;
static_assert((1u<<ilog2(BLOCK_SIZE)) == BLOCK_SIZE,"THREADS_PER_BLOCK muss eine 2er Potenz sein");
constexpr auto N_FILLED = next_multiple(BLOCK_SIZE, N);
CudaArray<unsigned> a { N_FILLED };
CudaArray<unsigned> sums { ilog2(N_FILLED) };
std::generate_n(a.host(), a.size(), []() {return rand()%4;});
print(a);
a.copyToDevice();
partial_scan<<<N/BLOCK_SIZE,BLOCK_SIZE/2,BLOCK_SIZE>>>(a.device(), a.size(), sums.device());
a.copyFromDevice();
sums.copyFromDevice();
print(a);
print(sums);
auto error = cudaGetLastError();
if (error != cudaSuccess) {
cout << cudaGetErrorString(error) << endl;
}
}
//void scan(unsigned *block_max_elem, unsigned n) {
// if (n <= THREADS_PER_BLOCK)
// return;
// const auto N = next_multiple(THREADS_PER_BLOCK, n / THREADS_PER_BLOCK);
// CudaArray<unsigned> max_elems { N };
//
// for (int i = 0; i < N; ++i) {
// max_elems.host()[i] = block_max_elem[(i + 1) * THREADS_PER_BLOCK - 1]
// + block_max;
// }
//
// max_elems.copyToDevice();
// scan_all<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK/2>>>(max_elems.device(),max_elems.device(),max_elems.size());
// max_elems.copyFromDevice();
//
// scan(max_elems.host(), N);
//
// for (int i = 0; i < N; ++i) {
// for (int j = 0; j < THREADS_PER_BLOCK; ++j) {
// block_max_elem[i * THREADS_PER_BLOCK + j] += max_elems.host()[i];
// }
// }
//}
//void foo() {
// srand(time(nullptr));
// CudaArray<unsigned> a { N_FILLED };
// CudaArray<unsigned> block_scan { N_FILLED };
// CudaArray<unsigned> block_max_elem { N_FILLED / THREADS_PER_BLOCK };
//
// std::fill_n(a.host(), N, 0);
//// std::generate_n(a.host(), a.size(), []() {return rand()%256;});
// unsigned x = 1;
//// std::generate_n(a.host(), N, [x]() mutable {return x++;});
// std::fill_n(a.host() + N, N_FILLED - N, UINT_MAX);
//// a.host()[0] = 3; //0
//// a.host()[1] = 0; //1
//// a.host()[2] = 2; //1
//// a.host()[3] = 1; //0
//// a.host()[4] = 3; //0
//// a.host()[5] = 0; //1
//// a.host()[6] = 2; //1
//// a.host()[7] = 1; //0
//
// print(a.host(), N);
//
// a.copyToDevice();
// for (unsigned bit = 0u; bit < 32; ++bit) {
// transform_scan_all<<<N_FILLED/THREADS_PER_BLOCK,THREADS_PER_BLOCK/2>>>(a.device(),block_scan.device(),N_FILLED,bit);
// block_scan.copyFromDevice();
// scan(block_scan.host(),block_scan.size());
// break;
// }
// a.copyFromDevice();
//
// bool ok = true;
// x = 1;
// for (int i = 0; i < N; ++i) {
// if (a.host()[i] != x++) {
// ok = false;
// cout << i << endl;
// break;
// }
// }
// cout << "ok: " << ok << endl;
// auto error = cudaGetLastError();
// if (error != cudaSuccess) {
// cout << cudaGetErrorString(error) << endl;
// }
// print(block_scan.host(), N);
//}
int main(int argc, char **argv) {
int devID = 0;
cudaError_t error;
cudaDeviceProp deviceProp;
error = cudaGetDevice(&devID);
error = cudaGetDeviceProperties(&deviceProp, devID);
if (deviceProp.computeMode == cudaComputeModeProhibited) {
return 1;
}
if (error != cudaSuccess) {
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
foo();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "time (" << milliseconds << " ms)" << endl;
return 0;
}
|
19,975 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<iostream>
#include <sys/time.h>
#include<bits/stdc++.h>
using namespace std;
struct edgepairs{
int x;
int y;
};
bool compareTwoEdgePairs(edgepairs a, edgepairs b)
{
if (a.x != b.x)
return a.x < b.x;
if (a.y != b.y)
return a.y < b.y;
return true;
}
// complete the following kernel...
__global__ void dkernel_Adds(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
unsigned node = blockIdx.x;
unsigned vert = 0;
vert = gpuOA[node];
unsigned add = 0;
for(; vert < gpuOA[node + 1]; ++vert ){
add = add + gpucurrentupdate[gpuCA[vert]];
}
gpulocals[node] += add;
}
// complete the following kernel...
__global__ void dkernel_Mins(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
unsigned node = blockIdx.x;
unsigned vert = 0;
vert = gpuOA[node];
unsigned var = pow(2, 30);
for(; vert < gpuOA[node + 1]; ++vert ){
var = min(gpucurrentupdate[gpuCA[vert]], var);
}
gpulocals[node] = min(gpulocals[node], var);
}
// complete the following kernel...
__global__ void dkernel_Maxs(int *gpuOA, int *gpuCA, int *gpulocals,int *gpucurrentupdate){
unsigned node = blockIdx.x;
unsigned vert = 0;
vert = gpuOA[node];
unsigned var = 0;
for(; vert < gpuOA[node + 1]; ++vert ){
var = max(gpucurrentupdate[gpuCA[vert]], var);
}
gpulocals[node] = max(gpulocals[node], var);
}
int main(int argc,char **argv){
//variable declarations
int m,n;
int number;
int numofquery;
int op;
struct timeval t1, t2;
vector <double> kerneltime;
//File pointer declaration
FILE *filePointer;
//File Opening for read
char *filename = argv[1];
filePointer = fopen( filename , "r") ;
//checking if file ptr is NULL
if ( filePointer == NULL )
{
printf( "input.txt file failed to open." ) ;
return 0;
}
fscanf(filePointer, "%d", &n ); //scaning the number of vertices
fscanf(filePointer, "%d", &m ); //scaning the number of edges
//D.S to store the input graph in COO format
vector <edgepairs> COO(m);
//Reading from file and populate the COO
for(int i=0 ; i<m ; i++ )
{
for(int j=0;j<2;j++){
if ( fscanf(filePointer, "%d", &number) != 1)
break;
if( j%2 == 0)
{
if(number >= 1 && number <= 10000)
COO[i].y = number;
}
else
{
if(number >= 1 && number <= 10000)
COO[i].x = number;
}
}
}
// COO done...
// sort the COO
sort(COO.begin(),COO.end(),compareTwoEdgePairs);
//sorting COO done..
// Converting the graph in COO format to CSR format..
// create the CSR
int *OA = (int *)malloc( (n+1)*sizeof(int)); //Offsets Array
for(int i=0;i<n+1;i++){
OA[i] = 0;
}
int *CA = (int *)malloc(m*sizeof(int)); //Coordinates Array
OA[0]=0;
//initialize the Coordinates Array
for(int i=0;i<m;i++){
if(COO[i].y >= 1 && COO[i].y <= 10000)
CA[i] = COO[i].y - 1;
}
//initialize the Offsets Array
for(int i=0;i<m;i++){
if(COO[i].x >= 1 && COO[i].x <= 10000)
OA[COO[i].x]++; //store the frequency..
}
for(int i=0;i<n;i++){
OA[i+1] += OA[i]; // do cumulative sum..
}
// Converting the graph to CSR done..
// copy initial local values to the array from the file
int *initlocalvals = (int *)malloc(n*sizeof(int));;
for(int i=0 ; i<n ; i++ )
{
if ( fscanf(filePointer, "%d", &number) != 1)
break;
initlocalvals[i] = number;
}
// copying local vals end..
// get number of queries from the file
fscanf(filePointer, "%d", &numofquery);
//copy OA,CA and initlocalvals to the GPU Memory
int *gpuOA, *gpuCA, *gpulocals;
cudaMalloc( &gpuOA, sizeof(int) * (1+n) );
cudaMalloc( &gpuCA, sizeof(int) * m );
cudaMalloc( &gpulocals, sizeof(int) * n );
cudaMemcpy(gpuOA, OA, sizeof(int) * (1+n), cudaMemcpyHostToDevice);
cudaMemcpy(gpuCA, CA, sizeof(int) * m, cudaMemcpyHostToDevice);
cudaMemcpy(gpulocals, initlocalvals, sizeof(int) * n, cudaMemcpyHostToDevice);
int *currentupdate = (int *)malloc(n*sizeof(int)); // array to store the updates that are pushed by each vertex to there neighbors
int *gpucurrentupdate; // same as above but on GPU
cudaMalloc( &gpucurrentupdate, sizeof(int) * n );
int *results = (int *)malloc(n*sizeof(int)); // storing the results from GPU to CPU for the enumerate query
// open the output.txt to write the query results
char *fname = argv[2];
FILE *fptr;
fptr = fopen(fname,"w");
for(int i=0;i<numofquery;i++){
//read the operator
fscanf(filePointer, "%d", &op);
if(op != 3){ // if operator is other then enumerate (i.e. +,min,max)
// read the current updates in the array
for(int j=0 ; j<n ; j++ )
{
if ( fscanf(filePointer, "%d", &number) != 1)
break;
currentupdate[j] = number;
}
// copy current updates to gpu
cudaMemcpy(gpucurrentupdate, currentupdate, sizeof(int) * n, cudaMemcpyHostToDevice);
//kernel launches
if(op == 0) {
gettimeofday(&t1, 0);
dkernel_Adds<<<n,1>>>(gpuOA,gpuCA,gpulocals,gpucurrentupdate);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
}
if(op == 1) {
gettimeofday(&t1, 0);
dkernel_Mins<<<n,1>>>(gpuOA,gpuCA,gpulocals,gpucurrentupdate);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
}
if(op == 2) {
gettimeofday(&t1, 0);
dkernel_Maxs<<<n,1>>>(gpuOA,gpuCA,gpulocals,gpucurrentupdate);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
}
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0; // Time taken by kernel in seconds
kerneltime.push_back(time);
printf("Time taken by kernel to execute is: %.6f ms\n", time);
}
else{ // if operator is enumnerate then store the results to file
//print local values of each vertices.
cudaMemcpy(results, gpulocals, n * sizeof(int), cudaMemcpyDeviceToHost); // get each locals from GPU
for(int j=0;j<n;j++){
fprintf(fptr ,"%d ", results[j] );
}
fprintf(fptr,"\n");
/*
for(int j=0;j<n + 1;j++){
fprintf(fptr ,"%d ", OA[j] );
}
fprintf(fptr,"\n");
for(int j=0;j<m;j++){
fprintf(fptr ,"%d ", CA[j] );
}
fprintf(fptr,"\n");
for(int j=0;j<m;j++){
fprintf(fptr ,"%d %d ", COO[j].y, COO[j].x );
}
fprintf(fptr,"\n");
*/
}
}
int nall = kerneltime.size();
double sumtime=0;
for(int i=0;i<nall;i++){
sumtime += kerneltime[i];
}
// print the time taken by all the kernels of the current test-case
cout << "\ntotal time taken by the current test-case is " << sumtime << " ms\n";
fclose(fptr);
fclose(filePointer);
return 0;
}
|
19,976 | //
// CasAES128_CUDA.c
// CasAES128_CUDA
// Created by Carter McCardwell on 11/11/14.
//
#include <stdint.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <cuda_runtime.h>
const int Nb_h = 4;
const int Nr_h = 10;
const int Nk_h = 4;
const uint8_t s_h[256]=
{
0x63, 0x7C, 0x77, 0x7B, 0xF2, 0x6B, 0x6F, 0xC5, 0x30, 0x01, 0x67, 0x2B, 0xFE, 0xD7, 0xAB, 0x76,
0xCA, 0x82, 0xC9, 0x7D, 0xFA, 0x59, 0x47, 0xF0, 0xAD, 0xD4, 0xA2, 0xAF, 0x9C, 0xA4, 0x72, 0xC0,
0xB7, 0xFD, 0x93, 0x26, 0x36, 0x3F, 0xF7, 0xCC, 0x34, 0xA5, 0xE5, 0xF1, 0x71, 0xD8, 0x31, 0x15,
0x04, 0xC7, 0x23, 0xC3, 0x18, 0x96, 0x05, 0x9A, 0x07, 0x12, 0x80, 0xE2, 0xEB, 0x27, 0xB2, 0x75,
0x09, 0x83, 0x2C, 0x1A, 0x1B, 0x6E, 0x5A, 0xA0, 0x52, 0x3B, 0xD6, 0xB3, 0x29, 0xE3, 0x2F, 0x84,
0x53, 0xD1, 0x00, 0xED, 0x20, 0xFC, 0xB1, 0x5B, 0x6A, 0xCB, 0xBE, 0x39, 0x4A, 0x4C, 0x58, 0xCF,
0xD0, 0xEF, 0xAA, 0xFB, 0x43, 0x4D, 0x33, 0x85, 0x45, 0xF9, 0x02, 0x7F, 0x50, 0x3C, 0x9F, 0xA8,
0x51, 0xA3, 0x40, 0x8F, 0x92, 0x9D, 0x38, 0xF5, 0xBC, 0xB6, 0xDA, 0x21, 0x10, 0xFF, 0xF3, 0xD2,
0xCD, 0x0C, 0x13, 0xEC, 0x5F, 0x97, 0x44, 0x17, 0xC4, 0xA7, 0x7E, 0x3D, 0x64, 0x5D, 0x19, 0x73,
0x60, 0x81, 0x4F, 0xDC, 0x22, 0x2A, 0x90, 0x88, 0x46, 0xEE, 0xB8, 0x14, 0xDE, 0x5E, 0x0B, 0xDB,
0xE0, 0x32, 0x3A, 0x0A, 0x49, 0x06, 0x24, 0x5C, 0xC2, 0xD3, 0xAC, 0x62, 0x91, 0x95, 0xE4, 0x79,
0xE7, 0xC8, 0x37, 0x6D, 0x8D, 0xD5, 0x4E, 0xA9, 0x6C, 0x56, 0xF4, 0xEA, 0x65, 0x7A, 0xAE, 0x08,
0xBA, 0x78, 0x25, 0x2E, 0x1C, 0xA6, 0xB4, 0xC6, 0xE8, 0xDD, 0x74, 0x1F, 0x4B, 0xBD, 0x8B, 0x8A,
0x70, 0x3E, 0xB5, 0x66, 0x48, 0x03, 0xF6, 0x0E, 0x61, 0x35, 0x57, 0xB9, 0x86, 0xC1, 0x1D, 0x9E,
0xE1, 0xF8, 0x98, 0x11, 0x69, 0xD9, 0x8E, 0x94, 0x9B, 0x1E, 0x87, 0xE9, 0xCE, 0x55, 0x28, 0xDF,
0x8C, 0xA1, 0x89, 0x0D, 0xBF, 0xE6, 0x42, 0x68, 0x41, 0x99, 0x2D, 0x0F, 0xB0, 0x54, 0xBB, 0x16
};
uint8_t Rcon_h[256] = {
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d
};
__constant__ uint8_t s[256];
__constant__ int Nb;
__constant__ int Nr;
__constant__ int Nk;
__constant__ uint32_t ek[44];
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void cudaDevAssist(cudaError_t code, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"cudaDevAssistant: %s %d\n", cudaGetErrorString(code), line);
if (abort) exit(code);
}
}
uint32_t sw(uint32_t word)
{
union {
uint32_t word;
uint8_t bytes[4];
} subWord __attribute__ ((aligned));
subWord.word = word;
subWord.bytes[3] = s_h[subWord.bytes[3]];
subWord.bytes[2] = s_h[subWord.bytes[2]];
subWord.bytes[1] = s_h[subWord.bytes[1]];
subWord.bytes[0] = s_h[subWord.bytes[0]];
return subWord.word;
}
__device__ void sb(uint8_t* in)
{
for (int i = 0; i < 16; i++) { in[i] = s[in[i]]; }
}
__device__ void mc(uint8_t* arr)
{
for (int i = 0; i < 4; i++)
{
uint8_t a[4];
uint8_t b[4];
uint8_t c;
uint8_t h;
for(c=0;c<4;c++) {
a[c] = arr[(4*c+i)];
h = (uint8_t)((signed char)arr[(4*c+i)] >> 7);
b[c] = arr[(4*c+i)] << 1;
b[c] ^= 0x1B & h;
}
arr[(i)] = b[0] ^ a[3] ^ a[2] ^ b[1] ^ a[1];
arr[(4+i)] = b[1] ^ a[0] ^ a[3] ^ b[2] ^ a[2];
arr[(8+i)] = b[2] ^ a[1] ^ a[0] ^ b[3] ^ a[3];
arr[(12+i)] = b[3] ^ a[2] ^ a[1] ^ b[0] ^ a[0];
}
}
__device__ void sr(uint8_t* arr)
{
uint8_t out[16];
//On per-row basis (+1 shift ea row)
//Row 1
out[0] = arr[0];
out[1] = arr[1];
out[2] = arr[2];
out[3] = arr[3];
//Row 2
out[4] = arr[5];
out[5] = arr[6];
out[6] = arr[7];
out[7] = arr[4];
//Row 3
out[8] = arr[10];
out[9] = arr[11];
out[10] = arr[8];
out[11] = arr[9];
//Row 4
out[12] = arr[15];
out[13] = arr[12];
out[14] = arr[13];
out[15] = arr[14];
for (int i = 0; i < 16; i++)
{
arr[i] = out[i];
}
}
uint32_t rw(uint32_t word)
{
union {
uint8_t bytes[4];
uint32_t word;
} subWord __attribute__ ((aligned));
subWord.word = word;
uint8_t B0 = subWord.bytes[3], B1 = subWord.bytes[2], B2 = subWord.bytes[1], B3 = subWord.bytes[0];
subWord.bytes[3] = B1; //0
subWord.bytes[2] = B2; //1
subWord.bytes[1] = B3; //2
subWord.bytes[0] = B0; //3
return subWord.word;
}
void K_Exp(uint8_t* pk, uint32_t* out)
{
int i = 0;
union {
uint8_t bytes[4];
uint32_t word;
} temp __attribute__ ((aligned));
union {
uint8_t bytes[4];
uint32_t word;
} univar[44] __attribute__ ((aligned));
for (i = 0; i < Nk_h; i++)
{
univar[i].bytes[3] = pk[i*4];
univar[i].bytes[2] = pk[i*4+1];
univar[i].bytes[1] = pk[i*4+2];
univar[i].bytes[0] = pk[i*4+3];
}
for (i = Nk_h; i < Nb_h*(Nr_h+1); i++)
{
temp.word = univar[i-1].word;
if (i % Nk_h == 0)
{
temp.word = (sw(rw(temp.word)));
temp.bytes[3] = temp.bytes[3] ^ (Rcon_h[i/Nk_h]);
}
else if (Nk_h > 6 && i % Nk_h == 4)
{
temp.word = sw(temp.word);
}
if (i-4 % Nk_h == 0)
{
temp.word = sw(temp.word);
}
univar[i].word = univar[i-Nk_h].word ^ temp.word;
}
for (i = 0; i < 44; i++)
{
out[i] = univar[i].word;
}
}
__device__ void ark(uint8_t* state, int strD)
{
union {
uint32_t word;
uint8_t bytes[4];
} zero __attribute__ ((aligned));
union {
uint32_t word;
uint8_t bytes[4];
} one __attribute__ ((aligned));
union {
uint32_t word;
uint8_t bytes[4];
} two __attribute__ ((aligned));
union {
uint32_t word;
uint8_t bytes[4];
} three __attribute__ ((aligned));
zero.word = ek[strD];
one.word = ek[strD+1];
two.word = ek[strD+2];
three.word = ek[strD+3];
state[0] = state[0] ^ zero.bytes[3];
state[4] = state[4] ^ zero.bytes[2];
state[8] = state[8] ^ zero.bytes[1];
state[12] = state[12] ^ zero.bytes[0];
state[1] = state[1] ^ one.bytes[3];
state[5] = state[5] ^ one.bytes[2];
state[9] = state[9] ^ one.bytes[1];
state[13] = state[13] ^ one.bytes[0];
state[2] = state[2] ^ two.bytes[3];
state[6] = state[6] ^ two.bytes[2];
state[10] = state[10] ^ two.bytes[1];
state[14] = state[14] ^ two.bytes[0];
state[3] = state[3] ^ three.bytes[3];
state[7] = state[7] ^ three.bytes[2];
state[11] = state[11] ^ three.bytes[1];
state[15] = state[15] ^ three.bytes[0];
}
__global__ void cudaRunner(uint8_t *in)
{
uint8_t state[16];
int localid = blockDim.x * blockIdx.x + threadIdx.x; //Data is shifted by 16 * ID of worker
for (int i = 0; i < 16; i++) { state[i] = in[(localid*16)+i]; }
ark(state, 0);
for (int i = 1; i < Nr; i++)
{
sb(state);
sr(state);
mc(state);
ark(state, i*Nb);
}
sb(state);
sr(state);
ark(state, Nr*Nb);
for (int i = 0; i < 16; i++) { in[(localid*16)+i] = state[i]; }
}
int main(int argc, const char * argv[])
{
printf("CasAES_CUDA Hyperthreaded AES-128 Encryption for CUDA processors - compiled 3/25/2015 Rev. 4\nCarter McCardwell, Northeastern University NUCAR - http://coe.neu.edu/~cmccardw - mccardwell.net\nPlease Wait...\n");
clock_t c_start, c_stop;
c_start = clock();
FILE *infile;
FILE *keyfile;
FILE *outfile;
infile = fopen(argv[2], "r");
if (infile == NULL) { printf("error (infile)\n"); return(1); }
keyfile = fopen(argv[3], "rb");
if (keyfile == NULL) { printf("error (keyfile)\n"); return(1); }
outfile = fopen(argv[4], "w");
if (outfile == NULL) { printf("error (outfile permission error, run with sudo)\n"); return(1); }
//Hex info, or ASCII
bool hexMode = false;
if (strcmp(argv[1], "h") == 0) { hexMode = true; }
else if (strcmp(argv[1], "a") == 0) { hexMode = false; }
else { printf("error: first argument must be \'a\' for ASCII interpretation or \'h\' for hex interpretation\n"); return(1); }
uint8_t key[16];
uint32_t ek_h[44];
for (int i = 0; i < 16; i++)
{
fscanf(keyfile, "%x", &key[i]);
}
K_Exp(key, ek_h);
//send constants to GPU
cudaSetDevice(0);
cudaDevAssist(cudaMemcpyToSymbol(Nk, &Nk_h, sizeof(int), 0, cudaMemcpyHostToDevice), 535, true);
cudaDevAssist(cudaMemcpyToSymbol(Nr, &Nr_h, sizeof(int), 0, cudaMemcpyHostToDevice), 543, true);
cudaDevAssist(cudaMemcpyToSymbol(Nb, &Nb_h, sizeof(int), 0, cudaMemcpyHostToDevice), 903, true);
cudaDevAssist(cudaMemcpyToSymbol(s, &s_h, 256*sizeof(uint8_t), 0, cudaMemcpyHostToDevice), 920, true);
cudaDevAssist(cudaMemcpyToSymbol(ek, &ek_h, 44*sizeof(uint32_t), 0, cudaMemcpyHostToDevice), 823, true);
cudaThreadSynchronize();
const int BLOCKS = -1; //Not used
const int RUNNING_THREADS = 512;
uint8_t *devState = NULL;
cudaDevAssist(cudaMalloc((void**)&devState, RUNNING_THREADS*16*sizeof(uint8_t)), 425, true);
uint8_t states[RUNNING_THREADS][16] = { 0x00 };
int ch = 0;
int spawn = 0;
int end = 1;
while (end)
{
spawn = 0;
for (int i = 0; i < RUNNING_THREADS; i++) //Dispatch many control threads that will report back to main (for now 5x) - 1 worker per state
{
spawn++;
for (int ix = 0; ix < 16; ix++)
{
if (hexMode)
{
if (fscanf(infile, "%x", &states[i][ix]) != EOF) { ; }
else
{
if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } }
else { spawn--; }
i = RUNNING_THREADS + 1;
end = 0;
break;
}
}
else
{
ch = getc(infile);
if (ch != EOF) { states[i][ix] = ch; }
else
{
if (ix > 0) { for (int ixx = ix; ixx < 16; ixx++) { states[i][ixx] = 0x00; } }
else { spawn--; }
i = RUNNING_THREADS + 1;
end = 0;
break;
}
}
}
}
//arrange data correctly
for (int i = 0; i < spawn; i++)
{
uint8_t temp[16];
memcpy(&temp[0], &states[i][0], sizeof(uint8_t));
memcpy(&temp[4], &states[i][1], sizeof(uint8_t));
memcpy(&temp[8], &states[i][2], sizeof(uint8_t));
memcpy(&temp[12], &states[i][3], sizeof(uint8_t));
memcpy(&temp[1], &states[i][4], sizeof(uint8_t));
memcpy(&temp[5], &states[i][5], sizeof(uint8_t));
memcpy(&temp[9], &states[i][6], sizeof(uint8_t));
memcpy(&temp[13], &states[i][7], sizeof(uint8_t));
memcpy(&temp[2], &states[i][8], sizeof(uint8_t));
memcpy(&temp[6], &states[i][9], sizeof(uint8_t));
memcpy(&temp[10], &states[i][10], sizeof(uint8_t));
memcpy(&temp[14], &states[i][11], sizeof(uint8_t));
memcpy(&temp[3], &states[i][12], sizeof(uint8_t));
memcpy(&temp[7], &states[i][13], sizeof(uint8_t));
memcpy(&temp[11], &states[i][14], sizeof(uint8_t));
memcpy(&temp[15], &states[i][15], sizeof(uint8_t));
for (int c = 0; c < 16; c++) { memcpy(&states[i][c], &temp[c], sizeof(uint8_t)); }
}
//printf("\nCycle!: Spawn = %i", spawn);
cudaDevAssist(cudaMemcpy(devState, *states, spawn*16*sizeof(uint8_t), cudaMemcpyHostToDevice), 426, true);
cudaDevAssist(cudaDeviceSynchronize(), 268, true);
cudaRunner<<<1,spawn>>>(devState);
cudaDevAssist(cudaDeviceSynchronize(), 270, true);
cudaDevAssist(cudaMemcpy(*states, devState, spawn*16*sizeof(uint8_t), cudaMemcpyDeviceToHost), 431, true);
//Write results to out
for (int i = 0; i < spawn; i++)
{
for (int ix = 0; ix < 4; ix++)
{
char hex[3];
sprintf(hex, "%02x", states[i][ix]);
for (int i = 0; i < 3; i++) { putc(hex[i], outfile); }
sprintf(hex, "%02x", states[i][ix+4]);
for (int i = 0; i < 3; i++) { putc(hex[i], outfile); }
sprintf(hex, "%02x", states[i][ix+8]);
for (int i = 0; i < 3; i++) { putc(hex[i], outfile); }
sprintf(hex, "%02x", states[i][ix+12]);
for (int i = 0; i < 3; i++) { putc(hex[i], outfile); }
}
}
}
c_stop = clock();
float diff = (((float)c_stop - (float)c_start) / CLOCKS_PER_SEC ) * 1000;
printf("Done - Time taken: %f ms\n", diff);
cudaFree(devState);
cudaDeviceReset();
fclose(infile);
fclose(outfile);
fclose(keyfile);
return 0;
}
|
19,977 | #include "includes.h"
__global__ void calcularBloques(int *matriz, int *u, int *resultado, int num_bloques, int nc, int m ){
int index1 = threadIdx.x + blockIdx.x*blockDim.x; // 0 - 1
int index2 = threadIdx.y + blockIdx.y*blockDim.y; // 0 - 1
int suma = 0;
for(int i=0 ; i < num_bloques ; i++){
suma = 0;
for(int l=0 ; l < nc ; l++){
suma += matriz[l+index1*nc] * u[index2+m*(l+i*nc)];
}
resultado[index2 + m*(index1+i*nc)] = suma;
}
} |
19,978 | #define TILE_DIM 32
template<typename T>
__device__ void matrixDotVector(const T* matrix, const T* vector, T* result,
const int matrixRows, const int matrixColumns) {
__shared__ T matrix_tile[TILE_DIM][TILE_DIM];
__shared__ T vector_tile[TILE_DIM];
int bx = blockIdx.x;
int tx = threadIdx.x;
int baseRow = bx * blockDim.x;
int index = baseRow + tx;
T resultValue = 0;
for (int t = 0; t < (matrixColumns - 1) / TILE_DIM + 1; t++) {
int column = t * TILE_DIM + tx;
if (column < matrixColumns) {
vector_tile[tx] = vector[column];
for (int i = 0; i < TILE_DIM; i++) {
int row = baseRow + i;
if (row < matrixRows) {
matrix_tile[i][tx] = matrix[row * matrixColumns + column];
} else {
matrix_tile[i][tx] = 0;
}
}
} else {
vector_tile[tx] = 0;
for (int i = 0; i < TILE_DIM; i++) {
matrix_tile[i][tx] = 0;
}
}
__syncthreads();
for (int i = 0; i < TILE_DIM; i++) {
resultValue += matrix_tile[tx][i] * vector_tile[i];
}
__syncthreads();
}
if (index < matrixRows) {
result[index] = resultValue;
}
} |
19,979 | /*
============================================================================
Name : sem1.cu
Author : maminov
Version :
Copyright : copyleft
Description : CUDA compute reciprocals
============================================================================
*/
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <iostream>
#include <numeric>
#include <stdlib.h>
static void CheckCudaErrorAux(const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
__global__ void heatKernel(float *data, unsigned size, unsigned iterations, float tempPerIteration, float dx, float dt) {
unsigned idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < size)
for (int i = 0; i < iterations; ++i) {
if (idx == 0)
data[size + 1] += tempPerIteration;
data[idx + 1] += (data[idx - 1 + 1] - 2 * data[idx + 1] + data[idx + 1 + 1]) * dt / (dx*dx);
__syncthreads();
}
}
void initialize(float *data, unsigned size)
{
for (unsigned i = 0; i < size; ++i)
data[i] = 0.;
}
int main(void)
{
static const int SIM_TIME = 5;
static const int STEPS_PER_SECOND = 10;
static const int CYL_LENGTH = 10;
static const int STEPS_PER_METER = 10;
static const int WORK_SIZE = CYL_LENGTH * STEPS_PER_METER;
static const int BLOCK_SIZE = 256;
static const float TEMP_PER_SEC = 5.;
float *hostData = new float[WORK_SIZE + 2];
float *gpuData;
initialize(hostData, WORK_SIZE + 2);
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(float)*(WORK_SIZE + 2)));
CUDA_CHECK_RETURN(cudaMemcpy(gpuData, hostData, sizeof(float)*(WORK_SIZE + 2), cudaMemcpyHostToDevice));
float *result = new float[WORK_SIZE + 2];
const int blockCount = (WORK_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE;
heatKernel <<< blockCount, BLOCK_SIZE >>> (gpuData, WORK_SIZE, SIM_TIME * STEPS_PER_SECOND, TEMP_PER_SEC / (STEPS_PER_SECOND*1.0f), CYL_LENGTH / (STEPS_PER_METER*1.0f), SIM_TIME / (STEPS_PER_SECOND*1.0f));
CUDA_CHECK_RETURN(cudaMemcpy(result, gpuData, sizeof(float)*(WORK_SIZE + 2), cudaMemcpyDeviceToHost));
/* Verify the results */
for (int i = 0; i < WORK_SIZE; ++i)
std::cout << result[i + 1] << std::endl;
/* Free memory */
CUDA_CHECK_RETURN(cudaFree(gpuData));
delete[] hostData;
delete[] result;
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux(const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement << " returned " << cudaGetErrorString(err) << "(" << err << ") at " << file << ":" << line << std::endl;
exit(1);
}
|
19,980 | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#include<ctime>
__global__
void vecMulMatrixKernel(float* A, float* B, float* C, int n){
// clock_t start = clock();
int column = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
//printf("%d ",blockDim.x);
if(row<n && column <n){
float val = 0.0;
int i;
for(i=0;i<n;i++){
val += A[row*n+i] * B[i*n+column];
}
C[row*n+column]=val;
}
// clock_t end =clock();
// printf("time:%lld\n",(int)(end-start));
}
|
19,981 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define MINVAL 1e-7
#define CSC(call) { \
cudaError err = call; \
if(err!=cudaSuccess) \
{ \
fprintf(stderr, "CUDA error in file '%s' in line %i: %s.\n", \
__FILE__, __LINE__, cudaGetErrorString(err)); \
} \
} while (0)
__global__ void Permute(double* Dev_Mtr, int* i, int* k, int* Dev_size)
{
int index=blockDim.x*blockIdx.x+threadIdx.x;
if(index<*Dev_size)
{
double tmp=Dev_Mtr[index*(*Dev_size)+(*i)];
Dev_Mtr[index*(*Dev_size)+(*i)]=Dev_Mtr[index*(*Dev_size)+(*k)];
Dev_Mtr[index*(*Dev_size)+(*k)]=tmp;
}
}
__global__ void MaxElement(double* Mtr, int Size, int i, int*strnum)
{
double MaxValue=Mtr[i*Size+i];
*strnum=i;
for(int k=i; k<Size; k++)
{
if(fabs(Mtr[i*(Size)+k])>fabs(MaxValue))
{
*strnum=*strnum+1; //
*strnum=k;
MaxValue=Mtr[i*(Size)+k];
}
}
if(fabs(MaxValue)<MINVAL) // , -1 -> 0
{
*strnum=-1;
}
}
__global__ void Gaus(double* Mtr, int Size, int i)
{
int index=blockIdx.x*blockDim.x+threadIdx.x;
if(index>i && index< Size)
{
double particial = -Mtr[i*Size+index]/Mtr[i*Size+i];
for(int z=i; z<Size; z++)
{
Mtr[z*Size+index]=Mtr[z*Size+index]+Mtr[z*Size+i]*particial;
}
}
}
int main()
{
int Size;
int hostDet=1;
int HSTcountPerm=0; //
scanf("%d", &Size);
if (Size==0) return 0;
double *Mtr = (double*)malloc(Size*Size*sizeof(double));
for(int i=0; i<Size; i++)
{
for(int j=0; j<Size; j++)
scanf("%lf", &Mtr[j*Size+i]);
}
int* dev_Size; // ,
double* dev_Mtr; //
CSC(cudaMalloc((void**)&dev_Size, sizeof(int)));
CSC(cudaMalloc((void**)&dev_Mtr, Size*Size*sizeof(double)));
CSC(cudaMemcpy(dev_Size, &Size, sizeof(int), cudaMemcpyHostToDevice));
CSC(cudaMemcpy(dev_Mtr, Mtr, Size*Size*sizeof(double), cudaMemcpyHostToDevice));
int * Curr_str=NULL;
int* New_Main_str=NULL;
CSC(cudaMalloc((void**)&Curr_str, sizeof(int)));
CSC(cudaMalloc((void**)&New_Main_str, sizeof(int)));
int HostNewMainString=0;
for(int i=0; i<Size; i++)
{
cudaMemcpy(New_Main_str, &i, sizeof(int), cudaMemcpyHostToDevice);
MaxElement<<<1,1>>>(dev_Mtr, Size, i, New_Main_str);
cudaMemcpy(&HostNewMainString, New_Main_str, sizeof(int), cudaMemcpyDeviceToHost);
if (HostNewMainString==-1) { hostDet=0; break;}
if(HostNewMainString!=i)
{
CSC(cudaMemcpy(Curr_str, &i, sizeof(int), cudaMemcpyHostToDevice));
//CSC(cudaMemcpy(dev_Mtr, Mtr, Size*Size*sizeof(double), cudaMemcpyHostToDevice));
Permute<<<100,100>>>(dev_Mtr, Curr_str, New_Main_str, dev_Size);
//CSC(cudaMemcpy(Mtr, dev_Mtr, Size*Size*sizeof(double), cudaMemcpyDeviceToHost));
HSTcountPerm++;
}
Gaus<<<100,100>>>(dev_Mtr, Size, i);
}
double *ResMtr = (double*)malloc(Size*Size*sizeof(double));
CSC(cudaMemcpy(ResMtr, dev_Mtr, Size*Size*sizeof(double), cudaMemcpyDeviceToHost));
if(hostDet==0)
{
double ans=0;
printf("%e ", ans);
//system("pause");
return 0;
}
double Det;
int CountNegativeElements=0;
if(ResMtr[0]<0) CountNegativeElements++;
Det=log(fabs(ResMtr[0]));
for(int i=1; i<Size; i++)
{
if(ResMtr[i*Size+i]<0) CountNegativeElements++;
Det+=log(fabs(ResMtr[i*Size+i]));
}
Det=pow(exp(1.0), Det)*pow(-1.0, CountNegativeElements);
if((HSTcountPerm % 2)!=0) Det*=-1;
printf("%e", Det);
//getchar();
return 0;
} |
19,982 | #include<cstdio>
using namespace std;
__global__ void add(const int *a, const int *b, int *c)
{
int i = threadIdx.x;
c[i] = a[i] * *b;
}
int main(void)
{
int count = 100;
int size = sizeof(int) * count;
int *cpu_a = (int *)malloc(size); int *gpu_a; cudaMalloc((void**)&gpu_a, size);
int cpu_b = 5; int *gpu_b; cudaMalloc((void**)&gpu_b, sizeof(int));
int *cpu_c = (int *)malloc(size); int *gpu_c; cudaMalloc((void**)&gpu_c, size);
for(int i=0; i<count; i++) cpu_a[i]=i;
cudaMemcpy(gpu_a, cpu_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, &cpu_b, sizeof(int), cudaMemcpyHostToDevice);
add<<<1, count>>>(gpu_a, gpu_b, gpu_c);
cudaMemcpy(cpu_c, gpu_c, size, cudaMemcpyDeviceToHost);
for(int i=0; i<count; i++)
printf("%d * %d = %dn", cpu_a[i], cpu_b, cpu_c[i]);
free(cpu_a); cudaFree(gpu_a);
cudaFree(gpu_b);
free(cpu_c); cudaFree(gpu_c);
}
|
19,983 | #include "includes.h"
__global__ void sam_kernel(float *in_w_h_c, int size, int channel_size, float *scales_c, float *out)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
out[index] = in_w_h_c[index] * scales_c[index];
}
} |
19,984 | //nvcc linspace.cu -o linspace -lglut -lGL -lm; ./'linspace'
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
void linspace(float* vec, float start, float stop, int num, int useEndpoint)
{
/* Create 'num' evenly spaced samples, calculated over the interval ['start', 'stop'].
* The endpoint of the interval can be excluded.
*/
int q = 0;
float div;
float delta;
float step;
if(num > 1)
{
if(useEndpoint == 1)
{
q = 1;
}
div = num - q;
delta = stop - start;
step = float(delta)/div;
for(int i = 0;i<num;i++)
{
vec[i] = i*step + start;
}
}
}
int main( void )
{
int n = 5;
int R = 1;
float* arr = (float*)malloc(n*sizeof(float));
linspace( arr,
-1.0,
1.0,
n, 1);
printf("\n\n");
for(int i = 0; i < n ; i++)
{
printf("%f\n", arr[i]);
}
printf("\n\n");
free(arr);
} |
19,985 | #include "includes.h"
__global__ void smoothing(float* input, float* output, double alpha, double beta, int length) {
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = i<<1;
if (j < length) {
output[j] = (float) (input[j] * (1.0 + alpha) - output[j] * alpha);
output[j+1] = (float) (input[j+1] * (1.0 + beta) - output[j+1] * beta);
}
} |
19,986 | __global__ void gpu_Actualizar(float *layer, int posicion, float energia,int layer_size) {
float umbral = 0.001;
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid < layer_size){
int distancia = posicion - gid;
if ( distancia < 0 ) distancia = - distancia;
distancia = distancia + 1;
float atenuacion = sqrtf( (float)distancia );
float energia_k = energia / atenuacion;
if ( energia_k >= umbral || energia_k <= -umbral ) layer[gid] = layer[gid] + energia_k;
}
}
__global__ void gpu_Copiar(float *layer, float *layer_copy,int layer_size) {
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid < layer_size) layer_copy[gid]=layer[gid];
}
__global__ void gpu_Relajacion(float *layer, float *layer_copy, int layer_size) {
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if(gid>0 && gid < layer_size-1) layer[gid] = ( layer_copy[gid-1] + layer_copy[gid] + layer_copy[gid+1] ) / 3;
}
__global__ void gpu_reduceMaximo(float* g_candidatos, float* positions, int size){
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
int s = size/2;
if ( gid >= size/2) return;
if(g_candidatos[ gid ] < g_candidatos[ gid + s]) {
g_candidatos[ gid ] = g_candidatos[ s + gid ];
positions[gid] = positions[gid+s];
}
// Extra element
if ( size%2 != 0 && gid == 0 ){
if(g_candidatos[ 0 ] < g_candidatos[ size-1 ]) {
g_candidatos[ 0 ] = g_candidatos[ size-1 ];
positions[0] = size-1;
}
}
}
__global__ void gpu_obtenCandidatos (float *layer, float *candidatos, int layer_size ){
int gid = (blockIdx.x + gridDim.x * blockIdx.y) * (blockDim.x * blockDim.y) + (threadIdx.x + blockDim.x * threadIdx.y);
if (gid > layer_size) return;
candidatos[gid] = 0;
if (gid == 0 || gid == layer_size-1) return;
if (layer[gid]>layer[gid-1] && layer[gid] > layer[gid+1]) candidatos[gid] = layer[gid];
}
|
19,987 | __device__ float a, b, c;
__global__ void doit1(int start, int end) {
float k;
if (start == 4) {
k = a * b + 2;
} else if (start == 5) {
k = a* b + 3;
} else {
k = a * b + 4;
}
c = k;
}
__global__ void doit2(int start, int end) {
float k;
for (int i = start; i < end; i++) {
if (i == 4) {
k = a * b + 5;
a = 4;
break;
} else if (i == 5) {
k = a* b + 5;
b = 5;
break;
} else {
k = a * b + 5;
c = 99;
}
}
c = k;
}
__global__ void doit3(int start, int end) {
float k;
for (int i = start; i < end; i++) {
if (start == 4) {
k = a * b + 5;
c = 4;
} else if (start == 5) {
k = a* b + 5;
c = 5;
}
c = k + i;
}
}
__global__ void doit4(int start, int end) {
float k;
if (start > 999) goto L;
if (start == 4) {
k = a * b + 5;
a = 4;
} else if (start == 5) {
k = a* b + 5;
b = 5;
} else {
L:
k = a * b + 5;
c = 99;
}
c = k;
}
__global__ void doit5(int start, int end) {
float k;
if (start > 999) goto L;
if (start == 4) {
k = a * b + 5;
a = 4;
} else if (start == 5) {
k = a* b + 5;
b = 5;
} else {
k = a * b + 5;
c = 99;
}
c = k;
L:
;
}
|
19,988 | #include<stdio.h>
#define START 32 //first char to make hist ascii code
#define STOP 127 //last char to make hist ascii code
int main(int argc, char** argv){
if(argc <= 2){
fprintf(stderr, "Arguments non valide");
return 1;
}
FILE *f_input;
FILE *f_output;
long lSize;
char *buffer;
f_input = fopen ( argv[1] , "r" );
f_output = fopen( argv[2],"w");
if( !f_input ) perror(argv[1]),exit(1);
fseek( f_input , 0L , SEEK_END);
lSize = ftell( f_input );
rewind( f_input );
printf("The size is : %li", lSize);
buffer =(char*) malloc(lSize);
if( !buffer ) fclose(f_input),fputs("memory alloc fails",stderr),exit(1);
if( 1!=fread( buffer , lSize, 1 , f_input) )
fclose(f_input),free(buffer),fputs("entire read fails",stderr),exit(1);
unsigned int histo[256];
for(int i=0; i<256; i++)
histo[i]=0;
/*Computing of the time*/
// std::clock_t c_start = std::clock();
/*Core of Algo compute a hist by increase hist array if char is occured**/
for(int i=0; i< lSize ;i++){
histo[buffer[i]]++;
}
/*End of excution*/
// std::clock_t c_end = std::clock();
// long time_elapsed_ms = 1000.0 * (c_end-c_start) / CLOCKS_PER_SEC;
// std::cout << "CPU time used: " << time_elapsed_ms << " ms\n";
for(int i=START; i<STOP;i++){
printf("%c:%d\n",i,histo[i]);
fprintf(f_output, "%c:%d\n",i,histo[i]);
}
fclose(f_input);
fclose(f_output);
free(buffer);
return 0;
}
|
19,989 | #include <stdint.h>
#include <stdio.h>
#define N 34
#define THREADS_PER_BLOCK 32
__global__ void dotproduct(float* x, float* y, float* result) {
// Compute the index this thread should use to access elements
size_t index = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if(index < N) {
// Create space for a shared array that all threads in this block will use to store pairwise products
__shared__ float temp[THREADS_PER_BLOCK];
for(int i=0; i<THREADS_PER_BLOCK; i++) {
temp[i] = 0.0;
}
// Compute pairwise products
temp[threadIdx.x] = x[index] * y[index];
// The thread with index zero will sum up the values in temp
if(threadIdx.x == 0) {
__syncthreads();
float sum = 0;
int i;
for(i=0; i<THREADS_PER_BLOCK; i++) {
sum += temp[i];
}
// Add the sum for this block to the result
atomicAdd(result, sum);
}
}
}
int main() {
// Allocate arrays for X and Y on the CPU
float* cpu_x = (float*)malloc(sizeof(float) * N);
float* cpu_y = (float*)malloc(sizeof(float) * N);
float* cpu_result = (float*) malloc(sizeof(float));
*cpu_result = 0.0;
// Initialize X and Y
int i;
for(i=0; i<N; i++) {
cpu_x[i] = (float)(i);
cpu_y[i] = 1.0;
}
// Allocate space for X and Y on the GPU
float* gpu_x;
float* gpu_y;
float* gpu_result;
if(cudaMalloc(&gpu_x, sizeof(float) * N) != cudaSuccess) {
fprintf(stderr, "Failed to allocate X array on GPU\n");
exit(2);
}
if(cudaMalloc(&gpu_y, sizeof(float) * N) != cudaSuccess) {
fprintf(stderr, "Failed to allocate Y array on GPU\n");
exit(2);
}
if(cudaMalloc(&gpu_result, sizeof(float)) != cudaSuccess) {
fprintf(stderr, "Failed to allocate result value on GPU\n");
exit(2);
}
// Copy the host X and Y arrays to the device X and Y arrays
if(cudaMemcpy(gpu_x, cpu_x, sizeof(float) * N, cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Failed to copy X to the GPU\n");
}
if(cudaMemcpy(gpu_y, cpu_y, sizeof(float) * N, cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Failed to copy Y to the GPU\n");
}
if(cudaMemcpy(gpu_result, cpu_result, sizeof(float), cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Failed to copy result to the GPU\n");
}
// How many blocks should be run, rounding up to include all threads?
size_t blocks = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
// Run the saxpy kernel
dotproduct<<<blocks, THREADS_PER_BLOCK>>>(gpu_x, gpu_y, gpu_result);
// Wait for the kernel to finish
cudaDeviceSynchronize();
// Copy values from the GPU back to the CPU
if(cudaMemcpy(cpu_result, gpu_result, sizeof(float), cudaMemcpyDeviceToHost) != cudaSuccess) {
fprintf(stderr, "Failed to copy result from the GPU\n");
}
for(int i = 0; i < N; i++) {
printf(" %.1f ", cpu_x[i]);
}
printf("\n");
for(int i = 0; i < N; i++) {
printf(" %.1f ", cpu_y[i]);
}
printf("\nResult: %.1f\n", *cpu_result);
cudaFree(gpu_x);
cudaFree(gpu_y);
cudaFree(gpu_result);
free(cpu_x);
free(cpu_y);
free(cpu_result);
return 0;
}
|
19,990 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <assert.h>
#define SEED
#define BLOCK_SIZE 16
typedef struct _data {
char * values;
char * next_values;
int width;
int height;
} data;
void input_error() {
fprintf(stderr, "Erro na leitura dos parâmetros");
exit(EXIT_FAILURE);
}
void mem_error() {
fprintf(stderr, "Erro na alocação de memória");
exit(EXIT_FAILURE);
}
double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d", stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
__device__ int amount_neighbours(char * source, int x, int y, int width, int height) {
int i, j;
int amount = 0;
for(i = y-1; i <= y+1; i++) {
for(j = x-1; j <= x+1; j++) {
//printf("%d %d -- %c\n", j, i, conways_data->values[i*conways_data->width+j]);
if(i == y && j == x)
continue;
if(i >= 0 && i < height
&& j >= 0 && j < width
&& source[i*width+j] == '1') {
amount++;
}
}
}
assert(amount >= 0 && amount <= 8);
return amount;
}
__global__ void operate(char * source, char * goal, int width, int height) {
int index_i = blockDim.x * blockIdx.x + threadIdx.x;
int index_j = blockDim.y * blockIdx.y + threadIdx.y;
int index = index_i*width + index_j;
if (index_i < height && index_j < width && index < height*width) {
int amount = amount_neighbours(source, index_j, index_i, width, height);
if(source[index] == '1') {
if(amount < 2 || amount > 3)
goal[index] = '0';
else
goal[index] = '1';
}
else {
if(amount == 3)
goal[index] = '1';
else
goal[index] = '0';
}
}
}
void run_n_times(data * conways_data, int iterations, int number_threads) {
int i;
char * d_A, * d_B;
int size = conways_data->height * conways_data->width * sizeof(char);
cudaMalloc((void**) &d_A, size);
cudaMalloc((void**) &d_B, size);
cudaMemcpy(d_A, conways_data->values, size, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlocks(ceil(conways_data->width/(float) threadsPerBlock.x), ceil(conways_data->height/(float) threadsPerBlock.y));
for(i = 0; i < iterations; i++) {
operate<<<numBlocks, threadsPerBlock>>>(i%2 == 0? d_A : d_B, i%2 == 0? d_B : d_A, conways_data->width, conways_data->height);
cudaThreadSynchronize();
}
cudaMemcpy(conways_data->values, i%2 == 0? d_A : d_B, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
return;
}
void print_data(data * conways_data) {
int i, j;
for(i = 0; i < conways_data->height; i++) {
for(j = 0; j < conways_data->width; j++) {
printf("%c ", conways_data->values[i*conways_data->width+j]);
}
printf("\n");
}
return;
}
int main(void) {
int w, h, number_threads, seed;
data conways_data;
if(scanf(" %d %d %d", &w, &h, &number_threads) != 3) {
input_error();
}
conways_data.width = w;
conways_data.height = h;
conways_data.values = (char *) malloc(sizeof(char) * w * h);
conways_data.next_values = (char *) malloc(sizeof(char) * w * h);
if(conways_data.values == NULL || conways_data.next_values == NULL) {
mem_error();
}
#ifdef SEED
if(scanf(" %d", &seed) != 1) {
input_error();
}
srand(seed);
#endif
int i, j;
for(i = 0; i < h; i++) {
for(j = 0; j < w; j++) {
#ifdef SEED
conways_data.values[i * w + j] = '0' + rand() % 2;
#else
if(scanf(" %c", &conways_data.values[i * w + j]) != 1) {
input_error();
}
#endif
}
}
int iterations;
if(scanf(" %d", &iterations) != 1) {
input_error();
}
double start, end;
start = rtclock();
run_n_times(&conways_data, iterations, number_threads);
end = rtclock();
print_data(&conways_data);
printf("%f\n", end-start);
free(conways_data.values);
free(conways_data.next_values);
return 0;
}
|
19,991 | struct MscData {
float a;
float b;
};
struct UrbanMsc {
const MscData& data;
__device__ decltype(auto) make_calc_thing() const {
return [this](float step) { return this->data.a * step + this->data.b; };
}
};
template <class F>
__device__ void apply_track(F calc_thing, float step, float* result) {
*result = calc_thing(step);
}
__global__ void apply_kernel(const MscData data, float const* __restrict__ step,
float* __restrict__ result) {
UrbanMsc msc{data};
apply_track(msc.make_calc_thing(), step[threadIdx.x], &result[threadIdx.x]);
} |
19,992 | #include<iostream>
using namespace std;
__global__ void mykernel(void){
}
int main(void){
mykernel<<<1, 1>>>();
cout << "Hello World!\n" << endl;
return 0;
}
|
19,993 |
/*
* compiles on elephanttest using
* nvcc --compiler-options '-fPIC' -o libfpoly.so --shared matrix.cu
*/
#include <cuda_runtime.h>
#define aref(mat, row, col, n) (mat[(col)*(n) + (row)])
/* do the echelon operation */
__device__ int ffge(int *mat, int *vec, int n);
/* launch the threads on the GPU */
__global__ void ffge_gpu(int *mats, int *vecs, int num, int n);
/* pivoting rows */
__device__ int pivot(int *a, int *b, int i, int n);
/* the entry point for calling */
int ffge_list (int *mats, int *vecs, int num, int n);
/* a = matrix, b = vector */
__device__ int ffge(int *a, int *b, int n) {
int i, j, k;
for(i = 0; i < n - 1; ++i) {
if (aref(a, i, i, n) == 0) {
/* need to pivot */
if (pivot(a, b, i, n)) {
/* can't pivot! probably will divide by zero so return an error code */
return 1;
}
}
for(j = i + 1; j < n; ++j) {
b[j] = aref(a, i, i, n)*b[j] - aref(a, j, i, n)*b[i];
if (i > 0) {
b[j] = b[j] / aref(a, i-1, i-1, n);
}
for(k = i + 1; k < n; ++k){
aref(a, j, k, n) = aref(a, i, i, n)*aref(a, j, k, n) - aref(a, j, i, n)*aref(a, i, k, n);
if (i > 0) {
aref(a, j, k, n) = aref(a, j, k, n) / aref(a, i-1, i-1, n);
}
}
aref(a, j, i, n) = 0;
}
}
return 0;
}
__device__ int pivot(int *a, int *b, int i, int n) {
int row, col, tmp;
int err = 1;
for(row=i; row < n; ++row) {
if (aref(a, row, i, n) != 0) {
/* found a row with non-zero pivot element */
for(col=0; col < n; ++col) {
tmp = aref(a, i, col, n);
aref(a, i, col, n) = aref(a, row, col, n);
aref(a, row, col, n) = tmp;
tmp = b[i];
b[i] = b[row];
b[row] = tmp;
}
err = 0;
break;
}
}
return err;
}
__global__ void ffge_gpu(int *mats, int *vecs, int num, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
int mat_size = sizeof(int)*n*n;
int vec_size = sizeof(int)*n;
if (i < num) {
ffge(mats + i*mat_size, vecs + i*vec_size, n);
}
}
int ffge_list(int *h_mats, int *h_vecs, int num, int n) {
int mat_size = sizeof(int)*n*n;
int vec_size = sizeof(int)*n;
int *d_mats, *d_vecs;
/* cudaMalloc the arrays and copy over to the GPU */
cudaError_t err = cudaSuccess;
err = cudaMalloc((void **)&d_mats, num*mat_size);
if (err != cudaSuccess) {
return 1;
}
err = cudaMalloc((void **)&d_vecs, num*vec_size);
if (err != cudaSuccess) {
return 1;
}
/* now copy the data over */
err = cudaMemcpy(d_mats, h_mats, num*mat_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
return 1;
}
err = cudaMemcpy(d_vecs, h_vecs, num*vec_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
return 1;
}
/* do the computation on the GPU */
ffge_gpu<<<1,num>>>(d_mats, d_vecs, num, n);
/* now copy the results back to the host memory */
err = cudaMemcpy(h_mats, d_mats, num*mat_size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
return 1;
}
err = cudaMemcpy(h_vecs, d_vecs, num*vec_size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
return 1;
}
err = cudaFree(d_mats);
if (err != cudaSuccess) {
return 1;
}
err = cudaFree(d_vecs);
if (err != cudaSuccess){
return 1;
}
return 0;
}
|
19,994 | #include "includes.h"
__global__ void calcSoftmaxBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, unsigned int n )
{
int index = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
// unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
if ( index < n ){
dz_in[index] += dz_next_layer[index];
dz[index] += dz_in[index];
}
/* original
for( int i = 0; i < dz_in.size.b * dz_in.size.x * dz_in.size.y * dz_in.size.z; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
}
for ( int i = 0; i < in.size.b * in.size.x * in.size.y * in.size.z; ++i ){
dz.data[i] += dz_in.data[i];
}
*/
} |
19,995 | #include <cuda.h>
#include <stdio.h>
void initializeArray(int*, int);
void stampaArray(int*, int);
void equalArray(int*, int*, int);
void prodottoArrayCompPerCompCPU(int*, int*, int *, int);
__global__ void prodottoArrayCompPerCompGPU(int*, int*, int*, int );
int main(int argn, char * argv[]) {
//numero di blocchi e numero di thread per blocco
dim3 nBlocchi, nThreadPerBlocco;
int N; //numero totale di elementi dell'array
//array memorizzati sull'host
int *A_host, *B_host, *C_host;
//array memorizzati sul device
int *A_device, *B_device, *C_device;
int *copy; //array in cui copieremo i risultati di C_device
int size; //size in byte di ciascun array
int flag;
printf("***\t PRODOTTO COMPONENTE PER COMPONENTE DI DUE ARRAY \t***\n");
/* se l'utente non ha inserito un numero sufficiente di
parametri da riga di comando, si ricorre ai valori di
default per impostare il numero di thread per blocco, il
numero totale di elementi e il flag di stampa */
if (argn<4) {
printf("Numero di parametri insufficiente!!!\n");
printf("Uso corretto: %s <NumElementi> <NumThreadPerBlocco> <flag per la Stampa>\n",argv[0]);
printf("Uso dei valori di default\n");
nThreadPerBlocco=4;
N=12;
flag=1;
} else {
N=atoi(argv[1]);
nThreadPerBlocco=atoi(argv[2]);
flag=atoi(argv[3]);
}
//determinazione esatta del numero di blocchi
nBlocchi = N / nThreadPerBlocco.x + ((N%nThreadPerBlocco.x)==0?0:1);
//size in byte di ogni array
size = N * sizeof(int);
//stampa delle info sull'esecuzione del kernel
printf("Numero di elementi = %d\n", N);
printf("Numero di thread per blocco = %d\n", nThreadPerBlocco.x);
printf("Numero di blocchi = %d\n", nBlocchi.x);
//allocazione dati sull'host
A_host = (int*)malloc(size);
B_host = (int*)malloc(size);
C_host = (int*)malloc(size);
copy = (int*)malloc(size);
//allocazione dati sul device
cudaMalloc((void**) &A_device, size);
cudaMalloc((void**) &B_device, size);
cudaMalloc((void**) &C_device, size);
//inizializzazione dati sull'host
initializeArray(A_host, N);
initializeArray(B_host, N);
//copia dei dati dall'host al device
cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_device, B_host, size, cudaMemcpyHostToDevice);
//azzeriamo il contenuto della matrice C
memset(C_host, 0, size);
cudaMemset(C_device, 0, size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//invocazione del kernel
prodottoArrayCompPerCompGPU<<<nBlocchi, nThreadPerBlocco>>>(A_device, B_device, C_device, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo
float elapsed;
// tempo tra i due eventi in millisecondi
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copia dei risultati dal device all'host
cudaMemcpy(copy,C_device,size, cudaMemcpyDeviceToHost);
printf("tempo GPU: %.3f\n", elapsed);
// calcolo su CPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//chiamata alla funzione seriale per il prodotto di due array
prodottoArrayCompPerCompCPU(A_host, B_host, C_host, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("tempo CPU: %.3f\n", elapsed);
//stampa degli array e dei risultati
if (flag == 1) {
printf("array A\n");
stampaArray(A_host,N);
printf("array B\n");
stampaArray(B_host,N);
printf("Risultati host\n");
stampaArray(C_host, N);
printf("Risultati device\n");
stampaArray(copy,N);
}
//test di correttezza
equalArray(copy, C_host,N);
//de-allocazione host
free(A_host);
free(B_host);
free(C_host);
free(copy);
//de-allocazione device
cudaFree(A_device);
cudaFree(B_device);
cudaFree(C_device);
exit(0);
}
void initializeArray(int *array, int n) {
int i;
for (i = 0; i < n; i++)
array[i] = 1/((i+1)*10);
if (i % 2 == 0)
array[i] = array[i]*(-1);
}
void stampaArray(int* array, int n) {
int i;
for (i = 0; i < n; i++)
printf("%d ", array[i]);
printf("\n");
}
void equalArray(int* a, int* b, int n) {
int i = 0;
while (a[i] == b[i])
i++;
if (i < n)
printf("I risultati dell'host e del device sono diversi\n");
else
printf("I risultati dell'host e del device coincidono\n");
}
//Seriale
void prodottoArrayCompPerCompCPU(int *a, int *b, int *c, int n) {
int i;
for (i = 0; i < n; i++)
c[i]=a[i]*b[i];
}
//Parallelo
__global__ void prodottoArrayCompPerCompGPU(int *a, int *b, int *c, int n) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n)
c[index] = a[index]*b[index];
}
|
19,996 | #include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <float.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <vector>
#include <chrono>
// #include "Matchcommon.h"
#define Radius 1
// #define NPixel 8
#define Deltat 0.00001
// int NBlocks;
// int Blocks;
// int binNum;
int find_option( int argc, char **argv, const char *option )
{
for( int i = 1; i < argc; i++ )
if( strcmp( argv[i], option ) == 0 )
return i;
return -1;
}
int read_int( int argc, char **argv, const char *option, int default_value ) //
{
int iplace = find_option( argc, argv, option );
if( iplace >= 0 && iplace < argc-1 ) //
return atoi( argv[iplace+1] ); // Ascii to integer conversion
return default_value;
}
double read_timer( )
{
static bool initialized = false;
static struct timeval start;
struct timeval end;
if( !initialized )
{
gettimeofday( &start, NULL );
initialized = true;
}
gettimeofday( &end, NULL );
return (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
}
//Initializing events
void init_events( int n, int *g )
{
srand48( time( NULL ) );
for (int i = 0; i < n; i++)
for (int j = 0; j < n; j++)
g[i*n+j] = drand48()+.5;
}
__device__ void HammingDistance(int *tmd, int *tm2d, int tmdaddr, int tm2daddr, int *temp, int N, int NPixel)
{
int xtid = threadIdx.x + blockIdx.x * blockDim.x;
int ytid = threadIdx.y + blockIdx.y * blockDim.y;
int xoffset = gridDim.x*blockDim.x;
int yoffset = gridDim.y*blockDim.y;
for (int k = xtid; k < NPixel; k+=xoffset)
for (int l = ytid; l < NPixel; l+=yoffset)
*temp += int (tmd[tmdaddr+(k*N+l)] ^ tm2d[tm2daddr+(k*N+l)]);
}
__device__ void OpticalFlow(int *tmd, int *tm2d, int tmdaddr, int tm2daddr, int *velocity, int N, int NPixel)
{
int xtid = threadIdx.x + blockIdx.x * blockDim.x;
int ytid = threadIdx.y + blockIdx.y * blockDim.y;
int xoffset = gridDim.x*blockDim.x;
int yoffset = gridDim.y*blockDim.y;
for (int k = xtid; k < NPixel; k+=xoffset)
for (int l = ytid; l < NPixel; l+=yoffset)
velocity[tmdaddr+(k*N+l)] = int (tmd[tmdaddr+(k*N+l)] - tm2d[tm2daddr+(k*N+l)]);
}
__global__ void TargetDefining( int* tmd, int* tm2d, int* Targetind, int* targetvalue, int N, int NPixel)
{
// // NBlocks = int (N/NPixel);
int xtid = threadIdx.x + blockIdx.x * blockDim.x;
int ytid = threadIdx.y + blockIdx.y * blockDim.y;
int xoffset = gridDim.x*blockDim.x;
int yoffset = gridDim.y*blockDim.y;
for (int i = xtid; i < (N/NPixel); i+=xoffset){
for (int j = ytid; j < (N/NPixel); j+=yoffset){
Targetind[i*(N/NPixel) + j] = i*NPixel*N + j*NPixel; // Assign block in t-2d as target
HammingDistance(tmd, tm2d, i*NPixel*N + j*NPixel, i*NPixel*N + j*NPixel, &targetvalue[i*(N/NPixel) + j], N,NPixel );
}
}
}
__constant__ const int dir[8][2]={{-1,-1},{0,-1},{1,-1},{1,0},{1,1},{0,1},{-1,1},{-1,0}};
__global__ void OpticalMatching(int* __restrict__ tmd, int* __restrict__ tm2d, int* __restrict__ Targetind, int* __restrict__ targetvalue, int N, int NPixel)
{
int xtid = threadIdx.x + blockIdx.x * blockDim.x;
int ytid = threadIdx.y + blockIdx.y * blockDim.y;
int xoffset = gridDim.x*blockDim.x;
int yoffset = gridDim.y*blockDim.y;
for (int i = xtid; i < N/NPixel; i+=xoffset){
for (int j = ytid; j < N/NPixel; j+=yoffset){
for(int t=0;t<8;t++){
int x = (i + dir[t][0]);
int y = (j + dir[t][1]);
if (x*NPixel >= 0 && x*NPixel < N && y*NPixel >= 0 && y*NPixel < N)
{
int temp = 0;
// Calculating the hamming distance
HammingDistance(tmd, tm2d, i*NPixel*N + j*NPixel, (x*NPixel*N) + (y*NPixel), &temp, N, NPixel);
// printf("temp for block %d where dx is %d and dy is %d is %d\n",i* N/NPixel+j,dx,dy,temp);
// for (int k = 0; k < NPixel; k++)
// for (int l = 0; l < NPixel; l++)
// temp = temp + int (tmd[(i+k)*N+(j+l)] xor tm2d[(i+k+dx*Radius)*N+(j+l+dy*Radius)]);
// printf("temp is %d\n",temp);
if (temp < targetvalue[i* N/NPixel + j])
{
targetvalue[i* N/NPixel + j] = temp;
// printf("targetvalue[%d] is %d\n",i* N/NPixel+j,targetvalue[i* N/NPixel + j]);
Targetind[i* N/NPixel + j] = x*NPixel*N + y*NPixel;
}
}
}
}
}
}
__global__ void OpticalFlowCalculation(int *tmd, int *tm2d, int *Targetind, int *velocity, int N, int NPixel){
int xtid = threadIdx.x + blockIdx.x * blockDim.x;
int ytid = threadIdx.y + blockIdx.y * blockDim.y;
int xoffset = gridDim.x*blockDim.x;
int yoffset = gridDim.y*blockDim.y;
for (int i = xtid; i < (N/NPixel); i+=xoffset){
for (int j = ytid; j < (N/NPixel); j+=yoffset){
OpticalFlow(tmd, tm2d, i*NPixel*N + j*NPixel, Targetind[i*(N/NPixel)+j], velocity, N, NPixel);
}
}
}
int main(int argc, char **argv)
{
int N = read_int( argc, argv, "-n", 64 ); // the number of particles
int NUM_THREADS = read_int( argc, argv, "-t", 256 ); // the number of particles
int blks = read_int( argc, argv, "-b", 1024 ); // the number of particles
int NPixel = read_int( argc, argv, "-p", 8 ); // the number of particles
int NBlocks = int (N/NPixel);
// binNum = int(N / NPixel); // Should be around sqrt(N/2)
int *tmd = (int*) malloc( N * N * sizeof(int) );
int *tm2d = (int*) malloc( N * N * sizeof(int) );
int *velocity = (int*) malloc( N * N * sizeof(int) );
int *Targetind = (int*) malloc( NBlocks * NBlocks * sizeof(int) );
int *targetvalue = (int*) malloc( NBlocks * NBlocks * sizeof(int) );
int * d_tmd,*d_tm2d, *d_velocity, *d_Targetind, *d_targetvalue;
cudaMalloc((void **) &d_tmd, N * N * sizeof(int));
cudaMalloc((void **) &d_tm2d, N * N * sizeof(int));
cudaMalloc((void **) &d_velocity, N * N * sizeof(int));
cudaMalloc((void **) &d_Targetind, NBlocks * NBlocks * sizeof(int));
cudaMalloc((void **) &d_targetvalue, NBlocks * NBlocks * sizeof(int));
init_events(N, tmd);
init_events(N, tm2d);
cudaDeviceSynchronize();
double copy_time = read_timer( );
cudaMemcpy(d_tmd, tmd, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_tm2d, tm2d, N * N * sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
copy_time = read_timer( ) - copy_time;
auto begin_sim = std::chrono::high_resolution_clock::now();
////////////////////////////////////////////////////////////////////////////////
//////////////////////////// Target Defining ////////////////////////////
////////////////////////////////////////////////////////////////////////////////
int threadNum = NUM_THREADS;
// int blks = min(1024,(N*N + NUM_THREADS - 1) / NUM_THREADS);
int blockNum = blks;//min(512,(n+threadNum-1)/threadNum);
cudaMemset(d_Targetind, 0, NBlocks * NBlocks * sizeof(int));
cudaMemset(d_targetvalue, 0, NBlocks * NBlocks * sizeof(int));
TargetDefining<<<blockNum,threadNum>>>(d_tmd, d_tm2d, d_Targetind, d_targetvalue, N, NPixel );
// cudaMemcpy(Targetind, d_Targetind, NBlocks * NBlocks * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(targetvalue, d_targetvalue, NBlocks * NBlocks * sizeof(int), cudaMemcpyDeviceToHost);
// for (int i = 0; i < N; i+=NPixel)
// {
// for (int j = 0; j < N; j+=NPixel)
// {
// count = ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel;
// Targetind[count] = i*N + j; // Assign block in t-2d as target
// // printf("count= %d, ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel= %d\n", count, ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel);
// HammingDistance(NPixel, N, i*N + j, i*N + j, tmd, tm2d, &targetvalue[count]);
// }
// }
////////////////////////////////////////////////////////////////////////////////
//////////////////////////// Optical Matching ////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// cudaMemcpy(d_Targetind, Targetind, NBlocks * NBlocks * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(d_targetvalue, targetvalue, NBlocks * NBlocks * sizeof(int), cudaMemcpyHostToDevice);
OpticalMatching<<<blockNum,threadNum>>>(d_tmd, d_tm2d, d_Targetind, d_targetvalue, N, NPixel);
// cudaMemcpy(Targetind, d_Targetind, NBlocks * NBlocks * sizeof(int), cudaMemcpyDeviceToHost);
// for (int i = 0; i < N; i+=NPixel)
// {
// for (int j = 0; j < N; j+=NPixel)
// {
// count = ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel;
// // int *temp = (int*) calloc((2*Radius +1)*(2*Radius +1), sizeof(int));
// // Targetind[i*NBlocks+j] = (i * NBlocks + j);
// for (int dx = -Radius; dx <= Radius; dx++) //Search over nearby 8 blocks and the target block
// {
// for (int dy = -Radius; dy <= Radius; dy++)
// {
// if (i + dx*NPixel >= 0 && i + dx*NPixel < N && j + dy*NPixel >= 0 && j + dy*NPixel < N)
// {
// temp = 0;
// // Calculating the hamming distance
// HammingDistance(NPixel, N, i*N + j, (i+dx*Radius)*N + (j+dy*Radius), tmd, tm2d, &temp);
// // for (int k = 0; k < NPixel; k++)
// // for (int l = 0; l < NPixel; l++)
// // temp = temp + int (tmd[(i+k)*N+(j+l)] xor tm2d[(i+k+dx*Radius)*N+(j+l+dy*Radius)]);
// // printf("temp is %d\n",temp);
// if (temp < targetvalue[count])
// {
// targetvalue[count] = temp;
// Targetind[count] = (i + dx*NPixel)*N + (j + dy*NPixel);
// // int Targetind[count] = 0;
// }
// }
// }
// }
// }
// }
///////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////// Optical Flow Calculation ////////////////////
///////////////////////////////////////////////////////////////////////////////////
// cudaMemcpy(d_Targetind, Targetind, NBlocks * NBlocks * sizeof(int), cudaMemcpyHostToDevice);
cudaMemset(d_velocity, 0, N * N * sizeof(int) );
OpticalFlowCalculation<<<blockNum,threadNum>>>(d_tmd, d_tm2d, d_Targetind, d_velocity, N, NPixel);
cudaMemcpy(velocity, d_velocity, N * N * sizeof(int), cudaMemcpyDeviceToHost);
// for (int i = 0; i < N; i+=NPixel)
// {
// for (int j = 0; j < N; j+=NPixel)
// {
// count = ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel;
// // Calculating the Optical Flow
// OpticalFlow(NPixel, N, i*N + j, Targetind[count], tmd, tm2d, velocity);
// // for (int k = 0; k < NPixel; k++)
// // for (int l = 0; l < NPixel; l++)
// // OpticalFlow[(i+k)*N+(j+l)] = tmd[(i+k)*N+(j+l)] - tm2d[Targetind[count]+(k*N+l)];
// }
// }
cudaDeviceSynchronize();
auto end_sim = std::chrono::high_resolution_clock::now();
double simdur = std::chrono::duration <double> (end_sim - begin_sim).count();
printf( "N: %d, NPixels: %d, NBlocks: %d, Radius: %d, Threads: %d, Blocks: %d, GPGPU ST: %g, CT: %g\n", N, NPixel, NBlocks, Radius, NUM_THREADS, blockNum, simdur, copy_time);
// int i, j, count = 0;
// int *B[N];
// for (i=0; i<M; i++)
// B[i] = (int *)malloc(N * sizeof(int));
// for (int i = 0; i < N; i++)
// for (int j = 0; j < N; j++)
// printf("tmd in position %d*%d is:%d\n",i, j, tmd[i*N+j]);
cudaFree(d_tmd);
cudaFree(d_tm2d);
cudaFree(d_velocity);
free( tmd );
free( tm2d );
free( velocity );
free( Targetind );
free( targetvalue );
return 0;
}
// //module load cuda
// //salloc -N 1 -t 01:30:00 -p gpu
// #include <stdlib.h>
// #include <stdio.h>
// #include <assert.h>
// #include <float.h>
// #include <string.h>
// #include <math.h>
// #include <time.h>
// #include <sys/time.h>
// #include <vector>
// // #include "Matchcommon.h"
// #define NUM_THREADS 256
// #define Radius 1
// #define NPixel 8
// #define Deltat 0.00001
// int NBlocks;
// int Blocks;
// int binNum;
// int find_option( int argc, char **argv, const char *option )
// {
// for( int i = 1; i < argc; i++ )
// if( strcmp( argv[i], option ) == 0 )
// return i;
// return -1;
// }
// int read_int( int argc, char **argv, const char *option, int default_value ) //
// {
// int iplace = find_option( argc, argv, option );
// if( iplace >= 0 && iplace < argc-1 ) //
// return atoi( argv[iplace+1] ); // Ascii to integer conversion
// return default_value;
// }
// double read_timer( )
// {
// static bool initialized = false;
// static struct timeval start;
// struct timeval end;
// if( !initialized )
// {
// gettimeofday( &start, NULL );
// initialized = true;
// }
// gettimeofday( &end, NULL );
// return (end.tv_sec - start.tv_sec) + 1.0e-6 * (end.tv_usec - start.tv_usec);
// }
// void init_events( int n, int *g1 , int *g2)
// {
// srand48( time( NULL ) );
// for (int i = 0; i < n; i++) {
// for (int j = 0; j < n; j++){
// g1[i*n+j] = drand48()+.5;
// g2[i*n+j] = drand48()+.5;
// }
// }
// }
// __device__ void HammingDistance(int *tmd, int *tm2d, int tmdaddr, int tm2daddr, int *temp, int N )
// {
// // int xtid = threadIdx.x + blockIdx.x * blockDim.x;
// // int ytid = threadIdx.y + blockIdx.y * blockDim.y;
// // int xoffset = gridDim.x*blockDim.x;
// // int yoffset = gridDim.y*blockDim.y;
// // for (int k = xtid; k < NPixel; k+=xoffset){
// // for (int l = ytid; l < NPixel; l+=yoffset){
// for (int k = 0; k < NPixel; k++){
// for (int l = 0; l < NPixel; l++){
// *temp += int (tmd[tmdaddr+(k*N+l)] ^ tm2d[tm2daddr+(k*N+l)]);
// // atomicAdd(temp,int (tmd[tmdaddr+(k*N+l)] ^ tm2d[tm2daddr+(k*N+l)]));
// // printf("tm2daddr is %d+ (k*N+l) is %d and tm2daddr+(k*N+l) is %d and temp is %d\n", tm2daddr, (k*N+l),tm2daddr+(k*N+l),*temp);
// }
// }
// }
// __device__ void OpticalFlow(int *tmd, int *tm2d, int tmdaddr, int tm2daddr, int *velocity, int N )
// {
// // int xtid = threadIdx.x + blockIdx.x * blockDim.x;
// // int ytid = threadIdx.y + blockIdx.y * blockDim.y;
// // int xoffset = gridDim.x*blockDim.x;
// // int yoffset = gridDim.y*blockDim.y;
// // for (int k = xtid; k < NPixel; k+=xoffset)
// // for (int l = ytid; l < NPixel; l+=yoffset)
// for (int k = 0; k < NPixel; k++){
// for (int l = 0; l < NPixel; l++){
// velocity[tmdaddr+(k*N+l)] = int (tmd[tmdaddr+(k*N+l)] - tm2d[tm2daddr+(k*N+l)]);
// }
// }
// }
// __global__ void TargetDefining( int* tmd, int* tm2d, int* Targetind, int* targetvalue, int N )
// {
// // // NBlocks = int (N/NPixel);
// int xtid = threadIdx.x + blockIdx.x * blockDim.x;
// int ytid = threadIdx.y + blockIdx.y * blockDim.y;
// int xoffset = gridDim.x*blockDim.x;
// int yoffset = gridDim.y*blockDim.y;
// for (int i = xtid; i < (N/NPixel); i+=xoffset){
// for (int j = ytid; j < (N/NPixel); j+=yoffset){
// Targetind[i*(N/NPixel) + j] = i*(N/NPixel) + j; // Assign block in t-2d as target
// HammingDistance(tmd, tm2d, i*NPixel*N + j*NPixel, i*NPixel*N + j*NPixel, &targetvalue[i*(N/NPixel) + j], N );
// // printf("targetvalue[i*NBlocks + j] is %d\n",targetvalue[i*(N/NPixel) + j]);
// }
// }
// }
// __constant__ const int dir[8][2]={{-1,-1},{0,-1},{1,-1},{1,0},{1,1},{0,1},{-1,1},{-1,0}};
// __global__ void OpticalMatching(int* __restrict__ tmd, int* __restrict__ tm2d, int* __restrict__ Targetind, int* __restrict__ targetvalue, int N){
// int xtid = threadIdx.x + blockIdx.x * blockDim.x;
// int ytid = threadIdx.y + blockIdx.y * blockDim.y;
// int xoffset = gridDim.x*blockDim.x;
// int yoffset = gridDim.y*blockDim.y;
// for (int i = xtid; i < N/NPixel; i+=xoffset){
// for (int j = ytid; j < N/NPixel; j+=yoffset){
// for(int t=0;t<8;t++){
// int x = (i + dir[t][0]);
// int y = (j + dir[t][1]);
// // for (int dx = -Radius; dx <= Radius; dx++) //Search over nearby 8 blocks and the target block
// // {
// // for (int dy = -Radius; dy <= Radius; dy++)
// // {
// if (x*NPixel >= 0 && x*NPixel < N && y*NPixel >= 0 && y*NPixel < N)
// {
// int temp = 0;
// // Calculating the hamming distance
// HammingDistance(tmd, tm2d, i*NPixel*N + j*NPixel, ((i+dx)*NPixel*N) + ((j+dy)*NPixel), &temp, N);
// // printf("temp for block %d where dx is %d and dy is %d is %d\n",i* N/NPixel+j,dx,dy,temp);
// // for (int k = 0; k < NPixel; k++)
// // for (int l = 0; l < NPixel; l++)
// // temp = temp + int (tmd[(i+k)*N+(j+l)] xor tm2d[(i+k+dx*Radius)*N+(j+l+dy*Radius)]);
// // printf("temp is %d\n",temp);
// if (temp < targetvalue[i* N/NPixel + j])
// {
// targetvalue[i* N/NPixel + j] = temp;
// // printf("targetvalue[%d] is %d\n",i* N/NPixel+j,targetvalue[i* N/NPixel + j]);
// Targetind[i* N/NPixel + j] = x* N/NPixel + y;
// }
// }
// }
// }
// }
// }
// __global__ void OpticalFlowCalculation(int *tmd, int *tm2d, int *Targetind, int *velocity, int N){
// int xtid = threadIdx.x + blockIdx.x * blockDim.x;
// int ytid = threadIdx.y + blockIdx.y * blockDim.y;
// int xoffset = gridDim.x*blockDim.x;
// int yoffset = gridDim.y*blockDim.y;
// for (int i = xtid; i < (N/NPixel); i+=xoffset){
// for (int j = ytid; j < (N/NPixel); j+=yoffset){
// int tm2daddri= (Targetind[i*(N/NPixel)+j]*NPixel)/N;
// int tm2daddrj= Targetind[i*(N/NPixel) + j]-tm2daddri*(N/NPixel);
// int tm2daddr= tm2daddri*NPixel*N+tm2daddrj*NPixel;
// OpticalFlow(tmd, tm2d, i*NPixel*N + j*NPixel, tm2daddr, velocity, N);
// }
// }
// }
// int main(int argc, char **argv)
// {
// int N = read_int( argc, argv, "-n", 32 ); // the number of particles
// NBlocks = int (N/NPixel);
// // binNum = int(N / NPixel); // Should be around sqrt(N/2)
// int *tmd = (int*) malloc( N * N * sizeof(int) );
// int *tm2d = (int*) malloc( N * N * sizeof(int) );
// int *velocity = (int*) malloc( N * N * sizeof(int) );
// int *Targetind = (int*) malloc( NBlocks * NBlocks * sizeof(int) );
// int *targetvalue = (int*) malloc( NBlocks * NBlocks * sizeof(int) );
// int * d_tmd,*d_tm2d, *d_velocity, *d_Targetind, *d_targetvalue;
// cudaMalloc((void **) &d_tmd, N * N * sizeof(int));
// cudaMalloc((void **) &d_tm2d, N * N * sizeof(int));
// cudaMalloc((void **) &d_velocity, N * N * sizeof(int));
// cudaMalloc((void **) &d_Targetind, NBlocks * NBlocks * sizeof(int));
// cudaMalloc((void **) &d_targetvalue, NBlocks * NBlocks * sizeof(int));
// init_events(N, tmd,tm2d);
// // /////////////////////////////////////////////////
// // ///////////////// just for test /////////////////
// // /////////////////////////////////////////////////
// // printf("tmd is a 4*4 matrix in 2*2 block:\n");
// // printf("|%d %d | %d %d|:\n",tmd[0],tmd[1],tmd[2],tmd[3]);
// // printf("|%d %d | %d %d|:\n",tmd[4],tmd[5],tmd[6],tmd[7]);
// // printf("---------------\n");
// // printf("|%d %d | %d %d|:\n",tmd[8],tmd[9],tmd[10],tmd[11]);
// // printf("|%d %d | %d %d|:\n",tmd[12],tmd[13],tmd[14],tmd[15]);
// // printf("tm2d is a 4*4 matrix in 2*2 block:\n");
// // printf("|%d %d | %d %d|:\n",tm2d[0],tm2d[1],tm2d[2],tm2d[3]);
// // printf("|%d %d | %d %d|:\n",tm2d[4],tm2d[5],tm2d[6],tm2d[7]);
// // printf("---------------\n");
// // printf("|%d %d | %d %d|:\n",tm2d[8],tm2d[9],tm2d[10],tm2d[11]);
// // printf("|%d %d | %d %d|:\n",tm2d[12],tm2d[13],tm2d[14],tm2d[15]);
// // /////////////////////////////////////////////////
// // /////////////////////////////////////////////////
// // /////////////////////////////////////////////////
// cudaDeviceSynchronize();
// double copy_time = read_timer();
// cudaMemcpy(d_tmd, tmd, N * N * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(d_tm2d, tm2d, N * N * sizeof(int), cudaMemcpyHostToDevice);
// cudaDeviceSynchronize();
// copy_time = read_timer() - copy_time;
// double simulation_time = read_timer( );
// ////////////////////////////////////////////////////////////////////////////////
// //////////////////////////// Target Defining ////////////////////////////
// ////////////////////////////////////////////////////////////////////////////////
// int threadNum = NUM_THREADS;
// int blks = min(1024,(N*N + NUM_THREADS - 1) / NUM_THREADS);
// int blockNum = blks;//min(512,(n+threadNum-1)/threadNum);
// cudaMemset(d_Targetind, 0, NBlocks * NBlocks * sizeof(int));
// cudaMemset(d_targetvalue, 0, NBlocks * NBlocks * sizeof(int));
// TargetDefining<<<blockNum,threadNum>>>(d_tmd, d_tm2d, d_Targetind, d_targetvalue, N );
// cudaMemcpy(Targetind, d_Targetind, NBlocks * NBlocks * sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(targetvalue, d_targetvalue, NBlocks * NBlocks * sizeof(int), cudaMemcpyDeviceToHost);
// // /////////////////////////////////////////////////
// // ///////////////// just for test /////////////////
// // /////////////////////////////////////////////////
// // printf("Target Indices for 2*2 blocks:\n");
// // printf("|%d | %d|:\n",Targetind[0],Targetind[1]);
// // printf("|%d | %d|:\n",Targetind[2],Targetind[3]);
// // printf("Target Values for 2*2 blocks:\n");
// // printf("|%d | %d|:\n",targetvalue[0],targetvalue[1]);
// // printf("|%d | %d|:\n",targetvalue[2],targetvalue[3]);
// // /////////////////////////////////////////////////
// // /////////////////////////////////////////////////
// // /////////////////////////////////////////////////
// // for (int i = 0; i < N; i+=NPixel)
// // {
// // for (int j = 0; j < N; j+=NPixel)
// // {
// // count = ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel;
// // Targetind[count] = i*N + j; // Assign block in t-2d as target
// // // printf("count= %d, ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel= %d\n", count, ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel);
// // HammingDistance(NPixel, N, i*N + j, i*N + j, tmd, tm2d, &targetvalue[count]);
// // }
// // }
// ////////////////////////////////////////////////////////////////////////////////
// //////////////////////////// Optical Matching ////////////////////////////
// ////////////////////////////////////////////////////////////////////////////////
// cudaMemcpy(d_Targetind, Targetind, NBlocks * NBlocks * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemcpy(d_targetvalue, targetvalue, NBlocks * NBlocks * sizeof(int), cudaMemcpyHostToDevice);
// OpticalMatching<<<blockNum,threadNum>>>(d_tmd, d_tm2d, d_Targetind, d_targetvalue, N);
// cudaMemcpy(Targetind, d_Targetind, NBlocks * NBlocks * sizeof(int), cudaMemcpyDeviceToHost);
// // cudaMemcpy(targetvalue, d_targetvalue, NBlocks * NBlocks * sizeof(int), cudaMemcpyDeviceToHost);
// // /////////////////////////////////////////////////
// // ///////////////// just for test /////////////////
// // /////////////////////////////////////////////////
// // printf("New Target Indices for 2*2 blocks:\n");
// // printf("|%d | %d|:\n",Targetind[0],Targetind[1]);
// // printf("|%d | %d|:\n",Targetind[2],Targetind[3]);
// // printf("New Target Values for 2*2 blocks:\n");
// // printf("|%d | %d|:\n",targetvalue[0],targetvalue[1]);
// // printf("|%d | %d|:\n",targetvalue[2],targetvalue[3]);
// // /////////////////////////////////////////////////
// // /////////////////////////////////////////////////
// // /////////////////////////////////////////////////
// // for (int i = 0; i < N; i+=NPixel)
// // {
// // for (int j = 0; j < N; j+=NPixel)
// // {
// // count = ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel;
// // // int *temp = (int*) calloc((2*Radius +1)*(2*Radius +1), sizeof(int));
// // // Targetind[i*NBlocks+j] = (i * NBlocks + j);
// // for (int dx = -Radius; dx <= Radius; dx++) //Search over nearby 8 blocks and the target block
// // {
// // for (int dy = -Radius; dy <= Radius; dy++)
// // {
// // if (i + dx*NPixel >= 0 && i + dx*NPixel < N && j + dy*NPixel >= 0 && j + dy*NPixel < N)
// // {
// // temp = 0;
// // // Calculating the hamming distance
// // HammingDistance(NPixel, N, i*N + j, (i+dx*Radius)*N + (j+dy*Radius), tmd, tm2d, &temp);
// // // for (int k = 0; k < NPixel; k++)
// // // for (int l = 0; l < NPixel; l++)
// // // temp = temp + int (tmd[(i+k)*N+(j+l)] xor tm2d[(i+k+dx*Radius)*N+(j+l+dy*Radius)]);
// // // printf("temp is %d\n",temp);
// // if (temp < targetvalue[count])
// // {
// // targetvalue[count] = temp;
// // Targetind[count] = (i + dx*NPixel)*N + (j + dy*NPixel);
// // // int Targetind[count] = 0;
// // }
// // }
// // }
// // }
// // }
// // }
// ///////////////////////////////////////////////////////////////////////////////////
// //////////////////////////////////// Optical Flow Calculation ////////////////////
// ///////////////////////////////////////////////////////////////////////////////////
// cudaMemcpy(d_Targetind, Targetind, NBlocks * NBlocks * sizeof(int), cudaMemcpyHostToDevice);
// cudaMemset(d_velocity, 0, N * N * sizeof(int) );
// OpticalFlowCalculation<<<blockNum,threadNum>>>(d_tmd, d_tm2d, d_Targetind, d_velocity, N);
// // cudaMemcpy(velocity, d_velocity, N * N * sizeof(int), cudaMemcpyDeviceToHost);
// // /////////////////////////////////////////////////
// // ///////////////// just for test /////////////////
// // /////////////////////////////////////////////////
// // printf("Diff for 4*4 matrix and 2*2 blocks:\n");
// // printf("|%d %d | %d %d|:\n",velocity[0],velocity[1],velocity[2],velocity[3]);
// // printf("|%d %d | %d %d|:\n",velocity[4],velocity[5],velocity[6],velocity[7]);
// // printf("---------------\n");
// // printf("|%d %d | %d %d|:\n",velocity[8],velocity[9],velocity[10],velocity[11]);
// // printf("|%d %d | %d %d|:\n",velocity[12],velocity[13],velocity[14],velocity[15]);
// // /////////////////////////////////////////////////
// // /////////////////////////////////////////////////
// // /////////////////////////////////////////////////
// // for (int i = 0; i < N; i+=NPixel)
// // {
// // for (int j = 0; j < N; j+=NPixel)
// // {
// // count = ((i+NPixel-1)/NPixel)*NBlocks + (j+NPixel-1)/NPixel;
// // // Calculating the Optical Flow
// // OpticalFlow(NPixel, N, i*N + j, Targetind[count], tmd, tm2d, velocity);
// // // for (int k = 0; k < NPixel; k++)
// // // for (int l = 0; l < NPixel; l++)
// // // OpticalFlow[(i+k)*N+(j+l)] = tmd[(i+k)*N+(j+l)] - tm2d[Targetind[count]+(k*N+l)];
// // }
// // }
// cudaDeviceSynchronize();
// simulation_time = read_timer( ) - simulation_time;
// printf( "N: %d, NPixels: %d, NBlocks: %d, Radius: %d, GPGPU ST: %g s, CT: %g s\n", N, NPixel, N/NPixel, Radius, simulation_time,copy_time);
// // int i, j, count = 0;
// // int *B[N];
// // for (i=0; i<M; i++)
// // B[i] = (int *)malloc(N * sizeof(int));
// // for (int i = 0; i < N; i++)
// // for (int j = 0; j < N; j++)
// // printf("tmd in position %d*%d is:%d\n",i, j, tmd[i*N+j]);
// cudaFree(d_tmd);
// cudaFree(d_tm2d);
// cudaFree(d_velocity);
// free( tmd );
// free( tm2d );
// free( velocity );
// free( Targetind );
// free( targetvalue );
// return 0;
// }
// //module load cuda
// //salloc -N 1 -t 01:30:00 -p gpu
|
19,997 | #include <stdio.h>
#include <stdlib.h>
__global__ void SyncKernel(int iters)
{
int i;
for (i = 0; i < iters; i++) {
__syncthreads();
}
}
void usage(char *program)
{
fprintf(stderr, "usage: %s nblocks nthreads iters\n", program);
fprintf(stderr, " nblocks: number of thread blocks (>0)\n");
fprintf(stderr, " nthreads: number of threads per block (>0)\n");
fprintf(stderr, " iters: number of iterations (>0)\n");
}
int main(int argc, char **argv) {
int blocks, threads, iters;
float time;
cudaEvent_t start, stop;
if (argc != 4) {
usage(argv[0]);
return EXIT_FAILURE;
}
blocks = atoi(argv[1]);
threads = atoi(argv[2]);
iters = atoi(argv[3]);
if (blocks <= 0 || threads <= 0 || iters <= 0) {
usage(argv[0]);
return EXIT_FAILURE;
}
cudaFree(0);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
SyncKernel<<<blocks, threads>>>(iters);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Kernel synchronization overhead time for %d rounds: %3.5f ms \n", iters, time);
return EXIT_SUCCESS;
} |
19,998 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
if (comp <= (var_1 - var_2)) {
comp = (-1.3362E-36f * var_4);
comp += (var_5 / -1.4797E-36f);
for (int i=0; i < var_3; ++i) {
float tmp_1 = var_6 + var_7 + powf(floorf((+1.9231E-43f + (-1.7396E17f + +1.6266E-17f))), acosf((-1.0219E-36f + -0.0f / -0.0f)));
comp = tmp_1 + +1.2606E36f - powf(var_8 * (+1.1071E-35f + var_9 + var_10), (var_11 * -1.4507E36f));
comp = sinhf(+1.3497E-43f * (var_12 * +1.5994E14f - -1.6073E-41f * var_13));
comp += var_14 - (-1.0769E-36f / var_15 - var_16);
}
if (comp == var_17 - (+1.5350E34f / (-1.2701E-8f * +1.0941E-35f))) {
float tmp_2 = logf((+1.7381E26f / -1.6074E-44f));
float tmp_3 = tanhf(-1.8802E-37f - +1.9595E-9f);
comp = tmp_3 * tmp_2 * +0.0f / -0.0f * (var_18 * +1.5576E-5f + var_19 / var_20);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
cudaDeviceSynchronize();
return 0;
}
|
19,999 | template <int N>
__device__ int get_value(){
return N;
}
__global__ void foo_device(int * n){
int i = threadIdx.x;
n[i] = get_value<7>()*i;
//n[i] = 7*i;
} |
20,000 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <vector>
#define real double
#define number float
#define NP 400
#define Q 1.29
#define me 0.511
#define eta 0.0
#define Mk 2.14 // Kamiokande detector mass (kton).
#define Mimb 6.8 // IMB detector mass (kton).
#define Mbaksan 0.28 // Baksan detector mass (kton).
#define fk (1.0)
#define fimb (0.9055)
#define fb (1.0)
#define Cn (1.0/(4 * 3.1416))
#define lnVk 14.56
#define lnVimb 15.73
#define delt 1.0 //2.3
#define timeK 20.0 //10.43
#define timeIMB 16.0 //5.9
#define timeEnd 40.0
#define epsmin 1.0
#define epsmax 50.0 // 50.0
#define ddeps 0.1 // 5.0
// Noise's parameters
#define pk 1.0
#define pimb 1.0
#define pb 1.0
#define effK 1e-5
#define effIMB 1e-5
#define Integra( r, ff , x, x1 , x2 , dx ) for(x = (x1) ; x <= (x2) ; x+=0 ){ r = r + (dx) * ( ff )/6.0 ; x+=(dx)/2.0 ; r = r + 4*(dx) * ( ff )/6.0; x+=(dx)/2.0 ;r = r + (dx) * ( ff )/6.0; };
#define H 6.0
typedef struct LikelihoodParameter
{
double alpha, T, ap, tp, tau1, tau2;
double result;
LikelihoodParameter(double _alpha, double _T, double _ap, double _tp, double _tau1, double _tau2);
} LikelihoodParameter;
typedef struct PressSchecter
{
number ap, tp, tau1, tau2;
}PressSchecter;
__device__ number gaussian(number x, number x0, number sigma)
{
if (fabs(x - x0) < sigma * H) return (1.0 / (sqrt((number)2 * 3.1415928* sigma*sigma))) * exp(-0.5*pow((x - x0) / sigma, (number)2.0));
return 0.0;
}
// Function type Press - Schecter
__device__ number Temp(number tu, number T, PressSchecter tmp) {
if (tu > tmp.tp)
{
return T * exp(-1.0 * min((((tu - tmp.tp) / tmp.tau1)), (number)12.0));
}
return T;
}
__device__ number r(number T) {
return 1.0;
}
__device__ number f_fermi(number E, number T)
{
//if (T < 0.01) return 0;
number ET = min(15.0,E / T);
return 1.0 / (exp(ET) + 1.0);
}
__device__ number kappa(number E) {
const number a = 1.0 - Q / E;
const number b = 1.0 - (2.0 * Q / E);
const number c = (pow((number)Q, (number)2.0) - pow((number)me, (number)2.0)) / pow(E, (number)2.0);
return a * sqrt(max(b + c, (number)0.0));
}
// Rate's neutrino - cooling component=
__device__ number Rcol(number E, number alpha, number T, number MMeff) {
number fm;
number kp;
number saida = 0;
fm = f_fermi(E, T);
if (fm <= 0.0) { return 0.0; }
number alpha_t = alpha;
kp = kappa(E);
if (kp <= 0.0) return 0.0;
saida = (1.22e-5) * pow(alpha_t, (number) 2.0) * MMeff * (pow(E, (number)4.0)) * fm * kp * pow(r(T), (number)2.0);
return saida;
}
__device__ number etabarK(number E)
{
number c = 0.95*(1.0 - exp(-pow((E) / 9.3, 4.0)));
return max(c,0.0);
}
__device__ number noiseK(number E)
{
return effK * (gaussian(E, 6.0, 1.0) + 0.001);
}
__device__ number StepK(number eps) {
if (eps > 5.0) {
return 1;
}
else { return 0.0; }
}
__device__ real LikelihoodK(number alpha, number T, PressSchecter tmp, real *LMax) {
const number tk[17] = { 0.0, 0.0, 0.107, 0.303, 0.324, 0.507, 0.686, 1.541, 1.728, 1.915, 9.219, 10.433, 12.439, 17.641, 20.257, 21.355, 23.814 }; // times of events;
const number Ek[17] = { 0.0, 20.0, 13.5, 7.5, 9.2, 12.8, 6.3, 35.4, 21, 19.8, 8.6, 13, 8.9, 6.5, 5.4, 4.6, 6.5 }; // energy of events;
const number Sigmak[17] = { 0.0, 2.9, 3.2, 2.0, 2.7, 2.9, 1.7, 8, 4.2, 3.2, 2.7, 2.6, 1.9, 1.6, 1.4, 1.3, 1.6 }; // standard deviation by events;
const number Bk[17] = { 1, 1.6e-5, 1.9e-3, 2.9e-2, 1.2e-2, 2.1e-3, 3.7e-2, 4.5e-5, 8.2e-5, 1.5e-5, // detector's noise;
1.5e-2, 1.9e-3, 1.6e-2, 3.8e-2, 2.9e-2, 2.8e-2, 3.8e-2 };
int i ;
number soma;
real termo1, termo2;
real prod;
number eps;
number e1, e2;
soma = 0.0;
//number jddtp = 0.05;
number time_end = timeEnd;
termo1 = 0.0;
time_end = min(3*timeEnd, tmp.tp + H * 1.0/tmp.tau1);
number jddtp = (time_end - tmp.tp) / 30.0;
//return (etabarK(eps)*(Cn*Rcol(eps + Q, alpha, Temp(0.0, T, tmp), Mk) + noiseK(epsmin)));
{
number Tj = Temp(0, T, tmp);
for (eps = epsmin; eps <= epsmax; eps = eps + ddeps)
{
number _etabarK = etabarK(eps);
termo1 += (_etabarK*(Cn*Rcol(eps + Q, alpha, Tj, Mk) + noiseK(eps)));
}
termo1 = termo1 * tmp.tp;
}
for (number ti = tmp.tp; ti <= time_end; ti = ti + jddtp)
{
number Tj = Temp(ti, T, tmp);
if (Tj > 0.01)
{
for (eps = epsmin; eps <= epsmax; eps = eps + ddeps)
{
termo1 += (etabarK(eps)*(Cn*Rcol(eps + Q, alpha, Tj, Mk) + noiseK(eps))) * jddtp ;
}
//Integra(termo1, (etabarK(eps)*(Cn*Rcol(eps + Q, alpha, Tj, Mk) + noiseK(eps))), eps, epsmin, epsmax, ddeps);
}
}
termo1 = termo1 * ddeps;
//{
// number ti = 0;
// Integra(termo1, ProbNotIntegrate(alpha, ti, T, tmp), ti, 0, time_end, jddtp);
//}
prod = 1.0;
for (i = 1; i <= 12; i++)
{
if (i == 6) continue;
termo2 = 0.0;
number Tj = Temp(tk[i], T, tmp);
if (Tj > 0.01)
{
e1 = max(epsmin, Ek[i] - H * Sigmak[i]);
e2 = min(epsmax, Ek[i] + H * Sigmak[i]);
number dSigma = Sigmak[i] / 4.0;
//Integra(termo2, RDet(eps) , eps, e1, e2, dSigma);
Integra(termo2, (StepK(eps)* Cn * lnVk * gaussian(eps, Ek[i], Sigmak[i])*Cn*(Rcol(eps + Q, alpha, Tj, Mk) + 1.0*noiseK(eps))), eps, e1, e2, dSigma);
}
prod = prod * (Bk[i] + termo2);
}
soma = exp(-termo1) * prod;
if (soma > (*LMax)) *LMax = soma;
return soma;
}
__device__ real Likelihood_combined(real alpha, real T, real ap, real tp, real tau1, real tau2)
{
PressSchecter psch = { (number)ap, (number)tp, (number)tau1,(number)tau2 };
real LMax = 0.0;
return LikelihoodK(alpha, T, psch, &LMax);
}
__global__ void Likelihood(const LikelihoodParameter *inputParams, double *results , unsigned int size)
{
//int tid = threadIdx.x;
unsigned int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < size)
{
results[tid] = Likelihood_combined(inputParams[tid].alpha, inputParams[tid].T, inputParams[tid].ap, inputParams[tid].tp, inputParams[tid].tau1, inputParams[tid].tau2);
}
__syncthreads();
}
bool IsPowerOfTwo(unsigned int x)
{
return (x & (x - 1)) == 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t LikelihoodList(LikelihoodParameter *a, double *results, const unsigned int size)
{
cudaError_t cudaStatus;
static LikelihoodParameter *dev_a = 0;
static double *dev_Lk = 0;
unsigned int size_truc = size;
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
while (size_truc % 32 != 0) size_truc++;
int nth = size_truc / 64 + 1;
if (dev_a == 0)
{
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size_truc * sizeof(LikelihoodParameter));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc A failed! %i \n", size_truc * sizeof(LikelihoodParameter));
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_Lk, size_truc * sizeof(double));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc B failed! %i \n", size_truc * sizeof(double));
goto Error;
}
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(LikelihoodParameter), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed! \n");
goto Error;
}
// The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, Likelihood, 0, size_truc);
// gridSize = (size_truc + blockSize - 1) / blockSize;
//printf("gridSize = %i ,blockSize = %i \n", gridSize, blockSize);
// Launch a kernel on the GPU with one thread for each element.
nth = size_truc / 64 + 1;
//printf("BLOCK = %i ,THREADS_PER_BLOCK = %i \n", nth, 64);
Likelihood <<<nth, 64 >>>( dev_a , dev_Lk , size );
//Likelihood << <gridSize, blockSize >> >(dev_a, dev_Lk, size);
cudaDeviceSynchronize();
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "size trunc: %u\n", size_truc);
fprintf(stderr, "Likelihood launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(results, dev_Lk, size * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
//cudaFree(dev_a);
//cudaFree(dev_Lk);
cudaDeviceSynchronize();
return cudaStatus;
}
void computeParams(std::vector<LikelihoodParameter> ¶ms)
{
static int enQueues = 0;
unsigned int size = params.size();
//printf("Queues to compute = %i \n", size);
std::vector<double> results(size, 0.0);
LikelihoodList(params.data(), results.data(), size);
for(unsigned int k = 0;k< size ;++k)
{
//if (k==0) printf("result = %g \n" ,results[k]);
params[k].result = results[k];
}
enQueues += size;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.