serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
7,301 | //#include "ParticleFinderKernels.cuh"
//
//thrust_operator bool IsFoundParticle::operator()(const Particle p)
//{
// return p.parent == nullptr;
//}
//thrust_operator bool IsNotFoundParticle::operator()(const Particle p)
//{
// return p.parent != nullptr;
//}
//// apply to parents to get proper initial values
//thrust_operator int initFoundParticles::operator()(Particle& p)
//{
// p.x *= p.i;
// p.y *= p.i;
// p.z *= p.i;
//
// p.r2 *= _xyFactor;
//
// p.nContributingSlices = 1;
//
// return 0;
//}
//// apply to children to reduce into parent
//thrust_operator int computeAverageSum::operator()(Particle p)
//{
//#if SOLVER_DEVICE
// atomicAdd (&p.parent->x, p.i * p.x);
// atomicAdd (&p.parent->y, p.i * p.y);
// atomicAdd (&p.parent->z, p.i * p.z);
// atomicAdd (&p.parent->i, p.i);
//
// // https://stackoverflow.com/a/51549250/1973454
// // (I took the positive version of this because the radius is always positive
// atomicMax ((int*)&p.parent->r2, __float_as_int (_xyFactor * p.r2));
//
// atomicAdd (&p.parent->nContributingSlices, 1);
//#else
// p.parent->x += p.i * p.x;
// p.parent->y += p.i * p.y;
// p.parent->z += p.i * p.z;
//
// p.parent->r2 = fmaxf (_xyFactor * p.r2, p.parent->r2);
//
// p.parent->i += p.i;
// p.parent->nContributingSlices++;
//#endif
//
// return 0;
//}
//// apply to parent to get averaged positions
//thrust_operator int averageParticlePositions::operator()(Particle& p)
//{
// p.x *= _xyFactor / p.i;
// p.y *= _xyFactor / p.i;
// p.z *= _zFactor / p.i;
// // p.i /= float (p.nContributingSlices); // does this need to be averaged?
//
// return 0;
//}
//// apply to parent to get averaged positions
//thrust_operator bool CheckParticleBoundaries::operator()(const Particle p)
//{
// return
// (p.x < _minX) || (p.x > _maxX) ||
// (p.y < _minY) || (p.y > _maxY) ||
// (p.z < _minZ) || (p.z > _maxZ);
//}
//thrust_operator bool FilterParticlesBySliceCount::operator()(const Particle P)
//{
// return P.nContributingSlices < _minSlices;
//}
//thrust_operator ParticleFinder::FoundParticle Particle2FoundParticle::operator()(const Particle& p)
//{
// ParticleFinder::FoundParticle ret;
// ret.fPosX = p.x;
// ret.fPosY = p.y;
// ret.fPosZ = p.z;
// ret.fIntensity = p.i;
// ret.fR2 = p.r2;
// return ret;
//}
//
//MakeParticleFromIdx::MakeParticleFromIdx (int nStack, int sIdx, int n, int kRad, float* lm, float* cK, float* xK, float* yK, float* sqK) :
// stackNum (nStack),
// sliceIdx (sIdx),
// N (n),
// kernelRad (kRad),
// lmImg (lm),
// circMask (cK),
// rxMask (xK),
// ryMask (yK),
// rSqMask (sqK)
//{}
//thrust_operator Particle MakeParticleFromIdx::operator()(int idx)
//{
// // Grab x, y values
// int x = idx % N;
// int y = idx / N;
//
// // Make tmp pointers to our masks and advance them
// // as we iterate to perform the multiplication
// float* tmpCircPtr = circMask;
// float* tmpXPtr = rxMask;
// float* tmpYPtr = ryMask;
// float* tmpR2Ptr = rSqMask;
//
// // To be calculated
// float total_mass (0);
// float x_offset (0), y_offset (0);
// float r2_sum (0);
//
// // Apply the mask as a multiplcation
// for (int iY = -kernelRad; iY <= kernelRad; iY++)
// {
// // For y, go down then up
// float* ptrY = &lmImg[idx + (N * iY)];
// for (int iX = -kernelRad; iX <= kernelRad; iX++)
// {
// // Get the local max img value
// float lmImgVal = ptrY[iX];
//
// // Multiply by mask value, sum, advance mask pointer
// total_mass += lmImgVal * (*tmpCircPtr++);
// x_offset += lmImgVal * (*tmpXPtr++);
// y_offset += lmImgVal * (*tmpYPtr++);
// r2_sum += lmImgVal * (*tmpR2Ptr++);
// }
// }
//
// // Calculate x val, y val
// // (in the original code the calculation is
// // x_val = x + x_offset/total_maxx - kernelRad - 1... not sure if I still need that)
// float total_mass_inv = 1.f / total_mass;
// float x_val = float (x) + x_offset * total_mass_inv - kernelRad - 1;
// float y_val = float (y) + y_offset * total_mass_inv - kernelRad - 1;
// float z_val = float (sliceIdx + 1);
// float r2_val = r2_sum * total_mass_inv;
//
// // Construct particle and return
// Particle p{ x_val, y_val, z_val, total_mass, r2_val };
// return p;
//}
//thrust_operator int SeverParticle::operator()(Particle& p)
//{
// p.parent = nullptr;
// p.nContributingSlices = 0;
//
// return 0;
//}
//thrust_operator bool CheckParticleRadius::operator()(int idx)
//{
// int ixPrev = idx % prevParticleCount;
// int ixCur = idx / prevParticleCount;
// Particle& prev = prevParticles[ixPrev];
// Particle& cur = curParticles[ixCur];
//
// float dX = prev.x - cur.x;
// float dY = prev.y - cur.y;
// float r2 = (dX * dX + dY * dY);
// return r2 < r2Max;
//}
//thrust_operator int AttachParticle::operator() (const int idx)
//{
// int ixPrev = idx % prevParticleCount;
// int ixCur = idx / prevParticleCount;
// Particle& prev = prevParticles[ixPrev];
// Particle& cur = curParticles[ixCur];
//
//#if SOLVER_DEVICE
// unsigned long long int* parentAddr = (unsigned long long int*) & cur.parent;
// unsigned long long int newParent = (unsigned long long int)(prev.parent ? prev.parent : &prev);
// unsigned long long int* matchAddr = (unsigned long long int*) & cur.match;
// unsigned long long int newMatch = (unsigned long long int)(&prev);
// atomicExch (parentAddr, newParent);
// atomicExch (matchAddr, newMatch);
// atomicExch (&cur.nContributingSlices, prev.nContributingSlices + 1);
//#else
// Particle* newParent = prev.parent ? prev.parent : &prev;
// cur.parent = prev.parent ? prev.parent : &prev;
// cur.nContributingSlices = prev.nContributingSlices + 1;
//#endif
//
// return 0;
//}
//thrust_operator bool ShouldSeverParticle::operator()(Particle p)
//{
// return ((p.nContributingSlices >= minSlices) && (p.i > p.match->i)) || (p.nContributingSlices >= maxSlices);
//} |
7,302 | #include <stdio.h>
#include <stdlib.h>
#define NUM 4096
#define NUM_THREADS 512
#define NUM_BLOCKS 8
/* Function to implement c[i] = a[i] + b[i] non coalesced memory access */
__global__ void no_coalesce(int *a, int *b, int *c)
{
int idx = threadIdx.x;
idx = NUM_THREADS - idx - 1;
int index = idx + (blockIdx.x * blockDim.x);
c [index] = a [index] + b [index];
}
/* Function to implement c[i] = a[i] + b[i], coalesced memory access */
__global__ void coalesce (int *a, int *b, int *c)
{
int index = threadIdx.x + (blockIdx.x * blockDim.x);
c [index] = a [index] + b [index];
}
int main ()
{
/* Timing */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* Timing */
int a[NUM], b[NUM], c[NUM], i;
float time1, time2;
bool passed;
for (i=0; i<NUM; i++)
{
a[i] = i;
b[i] = i;
}
int *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, NUM * sizeof(int));
cudaMemcpy(dev_a, a, NUM * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&dev_b, NUM * sizeof(int));
cudaMemcpy(dev_b, b, NUM * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc(&dev_c, NUM * sizeof(int));
cudaMemcpy(dev_c, c, NUM * sizeof(int), cudaMemcpyHostToDevice);
/* Timing */
cudaEventRecord(start, 0);
/* Timing */
no_coalesce<<<NUM_BLOCKS, NUM_THREADS>>>(dev_a, dev_b, dev_c);
cudaThreadSynchronize();
/* Timing */
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time1, start, stop);
/* Timing */
cudaMemcpy(c, dev_c, NUM*sizeof(int), cudaMemcpyDeviceToHost);
passed = true;
for (i=0; i<NUM; i++)
{
if (c [i] != a [i] + b [i])
passed = false;
}
printf ("\nNon-Coalesced:\t%s\nTime:\t%f\n", passed ? "PASSED" : "FAILED", time1);
/* Timing */
cudaEventRecord(start, 0);
/* Timing */
coalesce<<<NUM_BLOCKS, NUM_THREADS>>>(dev_a, dev_b, dev_c);
cudaThreadSynchronize();
/* Timing */
cudaEventRecord (stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime (&time2, start, stop);
/* Timing */
cudaMemcpy(c, dev_c, NUM*sizeof(int), cudaMemcpyDeviceToHost);
passed = true;
for (i=0; i<NUM; i++)
{
if (c [i] != a [i] + b [i])
passed = false;
}
printf ("\nCoalesced:\t%s\nTime:\t%f\n", passed ? "PASSED" : "FAILED", time2);
/* Timing */
cudaEventDestroy (start);
cudaEventDestroy (stop);
/* Timing */
cudaFree (dev_a);
cudaFree (dev_b);
cudaFree (dev_c);
return 0;
}
|
7,303 | #include <iostream>
#include "cuda_runtime.h"
#include "time.h"
using namespace std;
#define num (256 * 1024 * 1024)
__global__ void hist(unsigned char* inputdata, int* outPutHist, long size) {
__shared__ int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
// ߳߳ƫ
int ids = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
while (ids < size) {
//ԭӲһblockеݽֱͼͳ
atomicAdd(&temp[inputdata[ids]], 1);
ids += offset;
}
// ȴͳɣȥͳƽ
__syncthreads();
atomicSub(&outPutHist[threadIdx.x], temp[threadIdx.x]);
}
int main() {
// [0 255]
unsigned char* cpudata = new unsigned char[num];
for (size_t i = 0; i < num; i++)
cpudata[i] = static_cast<unsigned char>(rand() % 256);
// ڼ¼ͳƽ
int cpuhist[256];
memset(cpuhist, 0, 256 * sizeof(int));
/******************************* CPUԴ
* *********************************/
clock_t cpu_start, cpu_stop;
cpu_start = clock();
for (size_t i = 0; i < num; i++) cpuhist[cpudata[i]]++;
cpu_stop = clock();
cout << "CPU time: " << (cpu_stop - cpu_start) << "ms" << endl;
/******************************* GPUԴ
* *********************************/
//¼ڼʱ
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Դ沢copyԴ
unsigned char* gpudata;
cudaMalloc((void**)&gpudata, num * sizeof(unsigned char));
cudaMemcpy(gpudata, cpudata, num * sizeof(unsigned char),
cudaMemcpyHostToDevice);
// Դڴ洢,CPUļcopyȥ
int* gpuhist;
cudaMalloc((void**)&gpuhist, 256 * sizeof(int));
cudaMemcpy(gpuhist, cpuhist, 256 * sizeof(int), cudaMemcpyHostToDevice);
// ִк˺ʱ
cudaEventRecord(start, 0);
hist<<<1024, 256>>>(gpudata, gpuhist, num);
cudaEventRecord(stop, 0);
// copy
int histcpu[256];
cudaMemcpy(cpuhist, gpuhist, 256 * sizeof(int), cudaMemcpyDeviceToHost);
// ٿٵڴ
cudaFree(gpudata);
cudaFree(gpuhist);
delete cpudata;
// GPUʱ䲢ټʱ¼
cudaEventSynchronize(stop);
float gputime;
cudaEventElapsedTime(&gputime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout << "GPU time: " << gputime << "ms" << endl;
// ֤
long result = 0;
for (size_t i = 0; i < 256; i++) result += cpuhist[i];
if (result == 0)
cout << "GPU has the same result with CPU." << endl;
else
cout << "Error: GPU has a different result with CPU." << endl;
system("pause");
return 0;
} |
7,304 | #include<iostream>
void RandomInit(float *mat,int N)
{
for(int i = 0; i < N*N; i++)
mat[i] = rand()/(float) RAND_MAX;
}
__global__ void matMul(float *matA, float *matB, float *matC, int N)
{
int column = threadIdx.x + blockIdx.x*blockDim.x;
int row = threadIdx.y + blockIdx.x*blockDim.y;
int sum = 0;
if(column < N && row < N)
{
for(int k = 0; k < N*N; k++)
{
sum += matA[N*column + k] * matB[k*N + column];
}
matC[row*N+column] = sum;
}
}
int main()
{
int matrixSize;
printf("Input the matrix size: ");
scanf("%d",&matrixSize);
size_t size = matrixSize * matrixSize * sizeof(int);
float *matA,*matB,*matC,*matD;
float *gpuMatA,*gpuMatB,*gpuMatC;
matA = (float*)malloc(size);
matB = (float*)malloc(size);
matC = (float*)malloc(size);
RandomInit(matA,matrixSize);
RandomInit(matB,matrixSize);
cudaEvent_t gpuStart,gpuStop,cpuStart,cpuStop;
cudaEventCreate(&gpuStart);
cudaEventCreate(&gpuStop);
cudaEventCreate(&cpuStart);
cudaEventCreate(&cpuStop);
float cpu_tottime,gpu_tottime;
cudaEventRecord(cpuStart,0);
for(int i = 0; i < matrixSize; i++)
{
for(int j = 0; j < matrixSize; j++)
{
int sum = 0;
for(int k = 0; k < matrixSize; k++)
{
sum += matA[i*matrixSize + k] * matB[k*matrixSize + j];
}
matC[i*matrixSize + j] = sum;
}
}
cudaEventRecord(cpuStop,0);
cudaEventSynchronize(cpuStop);
cudaEventElapsedTime(&cpu_tottime,cpuStart,cpuStop);
printf("CPU time %5.5f (ms) by matrix multiplication\n",cpu_tottime);
int threadsPerblock = 0;
int blocksPergrid = 0;
cudaEventRecord(gpuStart,0);
cudaMalloc((void**)&gpuMatA,size);
cudaMalloc((void**)&gpuMatB,size);
cudaMalloc((void**)&gpuMatC,size);
cudaMemcpy((void **)gpuMatA,matA,size,cudaMemcpyHostToDevice);
cudaMemcpy((void **)gpuMatB,matB,size,cudaMemcpyHostToDevice);
cudaEventRecord(gpuStop,0);
cudaEventSynchronize(gpuStop);
float Inittime;
cudaEventElapsedTime(&Inittime,gpuStart,gpuStop);
printf("Input time: %5.5f (ms)\n",Inittime);
printf("Input the threads per block: ");
scanf("%d",&threadsPerblock);
printf("\nInput the blocks per grid: ");
scanf("%d",&blocksPergrid);
printf("\n");
matD = (float*)malloc(size);
dim3 blocks(threadsPerblock,threadsPerblock);
dim3 grid(blocksPergrid,blocksPergrid);
cudaEventRecord(gpuStart,0);
matMul<<<grid,blocks>>>(gpuMatA,gpuMatB,gpuMatC,matrixSize);
cudaEventRecord(gpuStop,0);
cudaEventSynchronize(gpuStop);
float pro_time;
cudaEventElapsedTime(&pro_time,gpuStart,gpuStop);
printf("GPU Processing time: %5.5f (ms)\n",pro_time);
cudaEventRecord(gpuStart,0);
cudaMemcpy(matD,gpuMatC,size,cudaMemcpyDeviceToHost);
cudaFree(gpuMatA);
cudaFree(gpuMatB);
cudaFree(gpuMatC);
cudaEventRecord(gpuStop,0);
cudaEventSynchronize(gpuStop);
float Outtime;
cudaEventElapsedTime(&Outtime,gpuStart,gpuStop);
printf("Output time: %5.5f (ms)\n",Outtime);
printf("Total time of GPU: %5.5f (ms)\n",(Inittime+pro_time+Outtime));
}
|
7,305 | #include "includes.h"
__global__ void vecadd( int * v0, int * v1, std::size_t size )
{
auto tid = threadIdx.x;
v0[ tid ] += v1[ tid ];
} |
7,306 | extern "C" __global__ void saxpy0(float* Z, float A, float* X) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
Z[id] = A * X[id];
}
extern "C" __global__ void saxpy1(float* Z, float* Y) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
Z[id] += Y[id];
}
|
7,307 | #include <stdio.h>
extern "C" {
__device__ __constant__ int n;
__device__ __constant__ int* offsets;
__device__ __constant__ int* outlist;
__device__ int* parents;
__device__ int* frontier;
__device__ int** next_frontier;
__device__ int* next_len;
enum Errors {
NONE = 0,
MALLOC = -1,
};
__device__ int err = NONE;
__global__ void bfs() {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
int v = frontier[idx];
int out = offsets[v];
int len = offsets[v+1] - out;
int num_discovered = 0;
int* new_frontier = (int*) malloc(len * sizeof(int)); // TODO handle malloc failure
if (!new_frontier) {
err = true;
return;
}
for (int i = 0; i < len; i++) {
int edge = outlist[out + i];
int old = atomicCAS(&parents[edge], ~0, v);
if (old == ~0) {
new_frontier[num_discovered++] = edge;
}
}
next_frontier[idx] = new_frontier;
next_len[idx] = num_discovered;
}
__global__ void reduce(int t, int w) {
int idx = (blockIdx.x * blockDim.x + threadIdx.x);
if (idx + w >= t) return;
int* a = next_frontier[idx];
int al = next_len[idx];
int* b = next_frontier[idx+w];
int bl = next_len[idx+w];
int cl = al + bl;
int* c = (int*) malloc(cl * sizeof(int)); // TODO handle malloc failure
memcpy(c , a, al*sizeof(int));
memcpy(c+al, b, bl*sizeof(int));
free(a);
free(b);
next_frontier[idx] = c;
next_frontier[idx+w] = 0;
next_len[idx] = al+bl;
next_len[idx+w] = 0;
}
__global__ void step() {
int* f = next_frontier[0];
memcpy(frontier, f, next_len[0]*sizeof(int));
free(f);
}
} // extern "C"
|
7,308 | #include <iostream>
//some of the materials here was reused or based on NERSC/OLCF cuda training-series (https://github.com/olcf/cuda-training-series)
#define cudaCheck(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
#define DATA_SIZE 1024*1024*32
void result_check(float *A, float *B, float *C);
// kernel
__global__
void vector_add_kernel(const float *A, const float *B, float *C, int size);
__global__
void vector_add_kernel_memory(const float *A, const float *B, float *C, int size, int stride_size);
int main(){
float *A_h, *B_h, *C_h; // host pointers
float *A_d, *B_d, *C_d;// device pointers
A_h = new float[DATA_SIZE]; // allocating host arrays
B_h = new float[DATA_SIZE];
C_h = new float[DATA_SIZE];
for (int i = 0; i < DATA_SIZE; i++){ // initializing host vectors
A_h[i] = rand()/(float)RAND_MAX;
B_h[i] = rand()/(float)RAND_MAX;
C_h[i] = 0;
}
cudaMalloc(&A_d, DATA_SIZE*sizeof(float)); //allocate memory for device vectors
cudaMalloc(&B_d, DATA_SIZE*sizeof(float));
cudaMalloc(&C_d, DATA_SIZE*sizeof(float));
cudaCheck("Error in cudaMallocs"); //check if any errors during memory allocation on device
// copy host vectors to device
cudaMemcpy(A_d, A_h, DATA_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, DATA_SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaCheck("host to device copy error");
int blocks = 4; // to set number of blocks
int threads = 256; // to set number of threads per block
// set memory stride size
//*************************************************************************************///
//*********** uncomment the below kernel for studying launch configurations ***********///
//*************************************************************************************///
vector_add_kernel<<<blocks, threads>>>(A_d, B_d, C_d, DATA_SIZE);
//*************************************************************************************///
//*********** uncomment the below two lines to study memory caching and coalescing ***********///
//*************************************************************************************///
// int mem_stride = 8;
// vector_add_kernel_memory<<<blocks, threads>>>(A_d, B_d, C_d, DATA_SIZE, mem_stride);
cudaCheck("kernel launch error");
// copy result vector from device to host
cudaMemcpy(C_h, C_d, DATA_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
cudaCheck("device to host copy error or kernel launch failure");
result_check(A_h, B_h, C_h);
return 0;
} // end main
void result_check(float *A, float *B, float *C){
int errors = 0;
for(int i = 0; i < DATA_SIZE; i++){
if(A[i] + B[i] != C[i]){
errors++;
}
}
if(errors == 0){
std::cout<<"\tCorrectness Test Passed\n";
}else{
std::cout<<"\tCorrectness Test Failed\n";
}
}
// kernel for launch configurations
__global__
void vector_add_kernel(const float *A, const float *B, float *C, int size){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int total_threads = gridDim.x*blockDim.x;
for(int i = idx; i < size; i+=total_threads)
C[i] = A[i] + B[i];
}
// kernel for stuyding effect of caching
__global__
void vector_add_kernel_memory(const float *A, const float *B, float *C, int size, int stride_size){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int total_threads = gridDim.x*blockDim.x;
int strides_per_thread = size/(total_threads*stride_size);
for(int j = 0; j < strides_per_thread; j++){
int stride_begin = stride_size * idx + j * stride_size * total_threads;
int stride_end = stride_size + stride_begin;
for(int i = stride_begin; i < stride_end; i++ ){
C[i] = A[i] + B[i];
}
}
}
|
7,309 | #include "../include/camera.cuh"
#include "../include/math_utils.cuh"
__host__ __device__
Camera::Camera(int width, int height, float fov, const vec3 &origin, const vec3 &dir)
: width{width}, height{height}, fov{fov * M_PI_F / 180.0f},
origin{origin}, dir{dir}
{
inv_width = 1.0f / width;
inv_height = 1.0f / height;
aspect_ratio = width / float(height);
angle = tan(0.5f * fov);
calc_axes();
}
__host__ __device__
void Camera::move(const vec3 &new_origin, const vec3 &new_dir) {
origin = new_origin;
dir = new_dir;
dir.normalize();
calc_axes();
}
__host__ __device__
void Camera::move_from_to(const vec3 &from, const vec3 &to) {
move(from, to - from);
}
__host__ __device__
vec3 Camera::ray_dir_at_pixel(float x, float y) const {
float xx = (2 * x * inv_width - 1) * angle * aspect_ratio;
float yy = (1 - 2 * y * inv_height) * angle;
vec3 img_pt = right * xx + up * yy + dir;
img_pt.normalize();
return img_pt;
}
__host__ __device__
void Camera::calc_axes() {
// approximately "up"
vec3 tmp_up = {0, 1, 0};
tmp_up.normalize();
right = cross(dir, tmp_up);
right.normalize();
up = cross(right, dir);
} |
7,310 | #include <iostream>
#include <stdio.h>
#include <cstdlib>
#include <ctime>
using namespace std;
void matrix_multiplication(float* M, float* N, float* P, int witdh) {
for (int i = 0; i < witdh; i++) {
for (int j = 0; j < witdh; j++) {
int sum = 0;
for (int k = 0; k < witdh; k++) {
float a = M[i * witdh + k];
float b = N[k * witdh + j];
sum += a * b;
}
P[i * witdh + j] = sum;
}
}
}
int main(int argc, char **argv) {
int witdh = 200;
float* A;
float* B;
float* C;
A = (float*) malloc(witdh * witdh * sizeof(float));
B = (float*) malloc(witdh * witdh * sizeof(float));
C = (float*) malloc(witdh * witdh * sizeof(float));
srand (time(NULL));
// for (int i = 0; i < witdh; i++) {
// for (int j = 0; j < witdh; j++) {
// A[i * witdh + j] = (rand() % 100) + 1;
// B[i * witdh + j] = (rand() % 200) + 1;
// }
// }
// cout << endl;
// for (int i = 0; i < witdh; i++) {
// for (int j = 0; j < witdh; j++) {
// cout << A[i * witdh + j] << "\t";
// }
// cout << endl;
// }
// cout << endl;
// for (int i = 0; i < witdh; i++) {
// for (int j = 0; j < witdh; j++) {
// cout << B[i * witdh + j] << "\t";
// }
// cout << endl;
// }
matrix_multiplication(A, B, C, witdh);
/*for (int i = 0; i < witdh; i++) {
for (int j = 0; j < witdh; j++) {
cout << C[i * witdh + j] << "\t";
}
cout << endl;
}*/
return 0;
}
|
7,311 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
/* Time */
#include <sys/time.h>
#include <sys/resource.h>
static struct timeval tv0;
double getMicroSeconds()
{
double t;
gettimeofday(&tv0, (struct timezone*)0);
t = ((tv0.tv_usec) + (tv0.tv_sec)*1000000);
return (t);
}
void init_seed()
{
int seedi=1;
FILE *fd;
/* Generated random values between 0.00 - 1.00 */
fd = fopen("/dev/urandom", "r");
fread( &seedi, sizeof(int), 1, fd);
fclose (fd);
srand( seedi );
}
void init2Drand(float **buffer, int n)
{
int i, j;
for (i=0; i<n; i++)
for(j=0; j<n; j++)
buffer[i][j] = 500.0*(float(rand())/RAND_MAX)-500.0; /* [-500 500]*/
}
float *getmemory1D( int nx )
{
int i;
float *buffer;
if( (buffer=(float *)malloc(nx*sizeof(float *)))== NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
return( NULL );
}
for( i=0; i<nx; i++ )
buffer[i] = 0.0;
return( buffer );
}
float **getmemory2D(int nx, int ny)
{
int i,j;
float **buffer;
if( (buffer=(float **)malloc(nx*sizeof(float *)))== NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
return( NULL );
}
if( (buffer[0]=(float *)malloc(nx*ny*sizeof(float)))==NULL )
{
fprintf( stderr, "ERROR in memory allocation\n" );
free( buffer );
return( NULL );
}
for( i=1; i<nx; i++ )
{
buffer[i] = buffer[i-1] + ny;
}
for( i=0; i<nx; i++ )
for( j=0; j<ny; j++ )
{
buffer[i][j] = 0.0;
}
return( buffer );
}
/********************************************************************************/
/********************************************************************************/
/*
* Traspose 2D version
*/
void transpose2D(float **in, float **out, int n)
{
int i, j;
for(j=0; j < n; j++)
for(i=0; i < n; i++)
out[j][i] = in[i][j];
}
/*
* Traspose 1D version
*/
void transpose1D(float *in, float *out, int n)
{
int i, j;
for(j=0; j < n; j++)
for(i=0; i < n; i++)
out[j*n+i] = in[i*n+j];
}
/*
* Traspose CUDA version
*/
#define NTHREADS1D 256
/*
__global__ void transpose_device(float *in, float *out, int rows, int cols)
{
int i, j;
i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<rows)
for ( j=0; j<cols; j++)
out [ i * rows + j ] = in [ j * cols + i ];
}*/
#define TPB 32
__global__ void transpose_device(float *in, float *out, int rows, int cols) {
int i, j;
__shared__ float tile[TPB][TPB+1];
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < rows && j < cols) {
tile[threadIdx.y][threadIdx.x] = in[rows*j+i];
}
__syncthreads();
i = blockDim.y*blockIdx.y + threadIdx.x;
j = blockDim.x*blockIdx.x + threadIdx.y;
if (i < rows && j < cols) {
out[rows * j + i] = tile[threadIdx.x][threadIdx.y];
}
}
int check(float *GPU, float *CPU, int n)
{
int i;
for (i=0; i<n; i++)
if(GPU[i]!=CPU[i])
return(1);
return(0);
}
int main(int argc, char **argv)
{
int n;
float **array2D, **array2D_trans;
float *array1D, *array1D_trans;
float *array1D_trans_GPU;
double t0;
if (argc==2)
n = atoi(argv[1]);
else {
n = 4096;
printf("./exec n (by default n=%i)\n", n);
}
/* Initizalization */
init_seed();
array2D = getmemory2D(n,n);
array2D_trans = getmemory2D(n,n);
array1D_trans_GPU = getmemory1D(n*n);
array1D = array2D[0];
array1D_trans = array2D_trans[0];
init2Drand(array2D, n);
/* Transpose 1D version */
t0 = getMicroSeconds();
transpose1D(array1D, array1D_trans, n);
printf("Transpose version 1D: %f MB/s\n", n*n*sizeof(float)/((getMicroSeconds()-t0)/1000000)/1024/1024);
/* CUDA vesion */
float *darray1D, *darray1D_trans;
cudaMalloc((void**)&darray1D, n*n*sizeof(float));
cudaMemcpy(darray1D, array1D, n*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&darray1D_trans, n*n*sizeof(float));
dim3 dimBlock(TPB, TPB);
int blocks = (n+TPB-1)/TPB;
dim3 dimGrid(blocks, blocks);
t0 = getMicroSeconds();
transpose_device<<<dimGrid,dimBlock>>>(darray1D, darray1D_trans, n, n);
cudaMemcpy(array1D_trans_GPU, darray1D_trans, n*n*sizeof(float), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
printf("Transpose kernel version: %f MB/s\n", n*n*sizeof(float)/((getMicroSeconds()-t0)/1000000)/1024/1024);
if (check(array1D_trans_GPU, array1D_trans, n*n))
printf("Transpose CPU-GPU differs!!\n");
return(0);
}
|
7,312 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MATRIX_TYPE int
#define BLOCK_SIZE 8
#define THREAD_SIZE 64
#define TILE 64
void stopwatch(int);
//CUDA 배열 곱
__global__ void cuda_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
__global__ void shared_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
__global__ void exam_mul(MATRIX_TYPE*,MATRIX_TYPE*,MATRIX_TYPE*,int);
int main()
{
//1024 by 1024 행렬
const int width = 1024;
const int height = width;
const int matrix_size = width*height;
const int buffer_size = matrix_size*sizeof(MATRIX_TYPE);
MATRIX_TYPE *host_A,*host_B,*host_C;
host_A = (MATRIX_TYPE*)malloc(buffer_size);
host_B = (MATRIX_TYPE*)malloc(buffer_size);
host_C = (MATRIX_TYPE*)malloc(buffer_size);
for(int i=0;i<matrix_size;i++)
{
host_A[i] = i;
host_B[i] = i;
host_C[i] =0;
}
printf("Multiply matrix (%dX%d ) * (%dX%d)\n",width,width,width,width);
MATRIX_TYPE *device_A,*device_B,*device_C;
dim3 Db(1024,1024,1);
cudaMalloc((void**)&device_A,buffer_size );
cudaMalloc((void**)&device_B,buffer_size );
cudaMalloc((void**)&device_C,buffer_size );
printf("cuda_mul\n");
stopwatch(0);
cudaMemcpy(device_A,host_A,buffer_size,cudaMemcpyHostToDevice);
cudaMemcpy(device_B,host_B,buffer_size,cudaMemcpyHostToDevice);
cuda_mul<<<1,Db>>>(device_A,device_B,device_C,width);
cudaMemcpy(host_C,device_C,buffer_size,cudaMemcpyDeviceToHost);
stopwatch(1);
for(int i=0;i<matrix_size;i++)
{
host_A[i] = i;
host_B[i] = i;
host_C[i] =0;
}
dim3 Sg(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 Sb(THREAD_SIZE,THREAD_SIZE,1);
printf("shared_mul\n");
stopwatch(0);
cudaMemcpy(device_A,host_A,buffer_size,cudaMemcpyHostToDevice);
cudaMemcpy(device_B,host_B,buffer_size,cudaMemcpyHostToDevice);
exam_mul<<<1,Db>>>(device_A,device_B,device_C,width);
cudaMemcpy(host_C,device_C,buffer_size,cudaMemcpyDeviceToHost);
stopwatch(1);
cudaFree(device_A);
cudaFree(device_B);
cudaFree(device_C);
free(host_A);
free(host_B);
free(host_C);
return 0;
}
__global__ void cuda_mul(MATRIX_TYPE* A, MATRIX_TYPE* B, MATRIX_TYPE* C, int w)
{
MATRIX_TYPE v;
v = 0;
for(int i =0;i<w;i++)
{
v += A[threadIdx.y*w + i] * B[threadIdx.x *w + i];
}
C[threadIdx.x *w + threadIdx.y] = v;
}
__global__ void shared_mul(MATRIX_TYPE*A,MATRIX_TYPE*B,MATRIX_TYPE*C,int w)
{
/*
Dg(16,16,1)
Db(64,64,1)
0,0 1,0
---------
|
0,1 | 1,1
|
1 0 2 4
1
0
2
4
*/
__shared__ MATRIX_TYPE SA[THREAD_SIZE][THREAD_SIZE];
__shared__ MATRIX_TYPE SB[THREAD_SIZE][THREAD_SIZE];
MATRIX_TYPE v;
SA[threadIdx.x][threadIdx.y] = A[blockIdx.y *w +blockIdx.x];
SB[threadIdx.x][threadIdx.y] = B[blockIdx.x *w +blockIdx.y];
v = 0;
/*
A 의 한 타일을 사용하는 모든 B의 타일들을 연산
O O O O X X X X
O O O O O O O O
X O O O O O O O
O O O O O O O O
*/
}
void stopwatch(int flag)
{
const long long NANOS = 1000000000LL;
static struct timespec startTS,endTS;
static long long Diff = 0;
//start
if(flag == 0)
{
Diff = 0;
if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS))
printf("Failed to call clock_gettime\n");
}
//end
else if(flag == 1)
{
if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS))
printf("Failed to call clock_gettime\n");
Diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec);
printf("elapsed time : % lld micros\n",Diff/1000);
}
else
{
printf("wrong flag | 0 : start, 1 : end\n");
}
}
__global__ void exam_mul(MATRIX_TYPE*A,MATRIX_TYPE*B,MATRIX_TYPE*C,int w)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = w * TILE * by;
int aEnd = aBegin +w -1;
int aStep = TILE;
int bBegin = TILE *bx;
int bStep = TILE * w;
MATRIX_TYPE Csub = 0;
for(int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b+= bStep)
{
__shared__ MATRIX_TYPE As[TILE][TILE];
__shared__ MATRIX_TYPE Bs[TILE][TILE];
As[ty][tx] = A[a + w * ty + tx];
Bs[ty][tx] = B[b + w * ty + tx];
__syncthreads();
for(int k=0;k<TILE;k++)
Csub += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = w * TILE * by + TILE * bx;
C[c + w * ty + ty] = Csub;
}
|
7,313 | #include <cstdio>
#include <cstdlib>
#include <string>
#include <cuda_runtime.h>
#include <iostream>
__global__
void print(const char * message, size_t length)
{
//printf("blockIdx.x, threadIdx.x: %d, %d\n", blockIdx.x, threadIdx.x);
for (size_t i = blockDim.x * blockIdx.x + threadIdx.x; i < length; i += blockDim.x * gridDim.x)
printf("%c", message[i]);
}
int main(int argc, const char* argv[]) {
std::string message;
if (argc == 1) {
message = "Hello world!";
} else {
message = argv[1];
}
char * buffer;
cudaMalloc(& buffer, message.size());
cudaMemcpy(buffer, message.data(), message.size(), cudaMemcpyDefault);
print<<<16,1>>>(buffer, message.size());
cudaDeviceSynchronize();
cudaGetLastError();
std::cout << std::endl;
print<<<4,4>>>(buffer, message.size());
cudaDeviceSynchronize();
cudaGetLastError();
std::cout << std::endl;
print<<<1,16>>>(buffer, message.size());
cudaDeviceSynchronize();
cudaGetLastError();
std::cout << std::endl;
cudaFree(buffer);
}
|
7,314 | #include <stdio.h>
struct MyCudaTime {
cudaEvent_t _start;
cudaEvent_t _beforeKernel, _afterKernel, _stop;
MyCudaTime() {
cudaEventCreate(&_start);
cudaEventCreate(&_beforeKernel);
cudaEventCreate(&_afterKernel);
cudaEventCreate(&_stop);
cudaEventRecord(_start, 0);
}
void beforeKernel() {
cudaEventRecord(_beforeKernel, 0);
}
void afterKernel() {
cudaEventRecord(_afterKernel, 0);
}
void stop() { // return elapsed time in milliseconds
cudaEventRecord(_stop, 0);
cudaEventSynchronize(_stop);
}
void report() {
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, _start, _stop);
printf("Total time %3.2f ms\n", elapsedTime); // why 3.1?
cudaEventElapsedTime(&elapsedTime, _start, _beforeKernel);
printf("\t Before calling kernel %3.2f ms\n", elapsedTime);
cudaEventElapsedTime(&elapsedTime, _beforeKernel, _afterKernel);
printf("\t In kernel %3.2f ms\n", elapsedTime);
cudaEventElapsedTime(&elapsedTime, _afterKernel, _stop);
printf("\t After calling kernel %3.2f ms\n", elapsedTime);
}
}; |
7,315 | #include "includes.h"
__global__ void d_putgaps(float *sne7, float *snaw, int *aw2ali, const int snno)
{
//sino index
int sni = threadIdx.x + blockIdx.y*blockDim.x;
//sino bin index
int awi = blockIdx.x;
if (sni<snno) {
sne7[aw2ali[awi] * snno + sni] = snaw[awi*snno + sni];
}
} |
7,316 | #include "includes.h"
__global__ void roipool_bp_cuda_(int nProposal, int C, float *d_feats, int *proposals_offset, int *output_maxidx, float *d_output_feats){
for(int pp_id = blockIdx.x; pp_id < nProposal; pp_id += gridDim.x){
for(int plane = threadIdx.x; plane < C; plane += blockDim.x){
int argmax_idx = output_maxidx[pp_id * C + plane];
atomicAdd(&d_feats[argmax_idx * C + plane], d_output_feats[pp_id * C + plane]);
}
}
} |
7,317 | #ifdef __cplusplus
extern "C" {
#endif
// Non-maximum Supression Kernel
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
// theta: angle input data
__global__ void non_max_supp_kernel(int *data,
int *out,
int *theta,
int rows,
int cols)
{
// These variables are offset by one to avoid seg. fault errors
// As such, this kernel ignores the outside ring of pixels
int l_row = threadIdx.y + 1 ;
int l_col = threadIdx.x + 1;
int g_row = threadIdx.y + (blockIdx.y * blockDim.y);
int g_col = threadIdx.x + (blockIdx.x * blockDim.x);
int pos = g_row * cols + g_col;
__shared__ int l_data[18][18];
// copy to l_data
l_data[l_row][l_col] = data[pos];
// top most row
if (l_row == 1)
{
l_data[0][l_col] = data[pos-cols];
// top left
if (l_col == 1)
l_data[0][0] = data[pos-cols-1];
// top right
else if (l_col == 16)
l_data[0][17] = data[pos-cols+1];
}
// bottom most row
else if (l_row == 16)
{
l_data[17][l_col] = data[pos+cols];
// bottom left
if (l_col == 1)
l_data[17][0] = data[pos+cols-1];
// bottom right
else if (l_col == 16)
l_data[17][17] = data[pos+cols+1];
}
if (l_col == 1)
l_data[l_row][0] = data[pos-1];
else if (l_col == 16)
l_data[l_row][17] = data[pos+1];
int my_magnitude = l_data[l_row][l_col];
// The following variables are used to address the matrices more easily
switch (theta[pos])
{
// A gradient angle of 0 degrees = an edge that is North/South
// Check neighbors to the East and West
case 0:
// supress me if my neighbor has larger magnitude
if (my_magnitude <= l_data[l_row][l_col+1] || // east
my_magnitude <= l_data[l_row][l_col-1]) // west
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else
{
out[pos] = my_magnitude;
}
break;
// A gradient angle of 45 degrees = an edge that is NW/SE
// Check neighbors to the NE and SW
case 45:
// supress me if my neighbor has larger magnitude
if (my_magnitude <= l_data[l_row-1][l_col+1] || // north east
my_magnitude <= l_data[l_row+1][l_col-1]) // south west
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else
{
out[pos] = my_magnitude;
}
break;
// A gradient angle of 90 degrees = an edge that is E/W
// Check neighbors to the North and South
case 90:
// supress me if my neighbor has larger magnitude
if (my_magnitude <= l_data[l_row-1][l_col] || // north
my_magnitude <= l_data[l_row+1][l_col]) // south
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else
{
out[pos] = my_magnitude;
}
break;
// A gradient angle of 135 degrees = an edge that is NE/SW
// Check neighbors to the NW and SE
case 135:
// supress me if my neighbor has larger magnitude
if (my_magnitude <= l_data[l_row-1][l_col-1] || // north west
my_magnitude <= l_data[l_row+1][l_col+1]) // south east
{
out[pos] = 0;
}
// otherwise, copy my value to the output buffer
else
{
out[pos] = my_magnitude;
}
break;
default:
out[pos] = my_magnitude;
break;
}
}
#ifdef __cplusplus
}
#endif
|
7,318 | #include <stdio.h>
__global__ void test(float *g_summag){
float maxmag = 0.560288846;
printf("maxmag=%.9f\n", maxmag);
printf("g_summag=%.9f\n", *g_summag);
*g_summag += maxmag;
printf("g_summag=%.9f\n", *g_summag);
}
int main() {
float summag = 0.555002689;
printf("summag=%.9f\n", summag);
float *g_summag;
cudaMalloc(&g_summag, sizeof(float));
cudaMemcpy(g_summag, &summag, sizeof(summag), cudaMemcpyHostToDevice);
test<<<1, 1>>>(g_summag);
cudaMemcpy(&summag, g_summag, sizeof(summag), cudaMemcpyDeviceToHost);
printf("summag=%.9f\n", summag);
return 0;
}
|
7,319 | #include <cufft.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <cuda.h>
#define NUM 1024//160//1024
#define NUM2 1024//7327//13981//128
#define batch 64//10//30//512
#define fileNUM 8
int main(int argc,char *argv[])
{
FILE *fp;
FILE *file;
FILE *fp2;
unsigned char *f_h_re, *f_h_im;
int h,i,t1,t2,t,k,v,p,a,c;
float *f_hf_re, *f_hf_im;
cufftComplex *f_hc;
float psd[NUM*batch];
//int time[NUM*batch];
char *infile_name_real,*infile_name_imaginary, *outfile_name;
char day[10];
char time[10];
char head[20],re[3],im[3],fft[5];
char underbar[5],bin[5];
sprintf(underbar,"_");
sprintf(head,"crab_nasu_");
sprintf(re,"_re");
sprintf(im,"_im");
sprintf(fft,"_fft");
sprintf(bin,".bin");
printf("日付を入力してください。(yyyymmdd)\n");
scanf("%s", day);
printf("時間を入力してください。(just,a1,b1 etc..)\n");
scanf("%s", time);
for(c = 0;c < fileNUM;c++){
infile_name_real = (char *)malloc(sizeof(char)*100);
infile_name_imaginary = (char *)malloc(sizeof(char)*100);
outfile_name = (char *)malloc(sizeof(char)*100);
sprintf(infile_name_real,"%s%s%s%s%s%d%s",head,day,underbar,time,re,c,bin);
sprintf(infile_name_imaginary,"%s%s%s%s%s%d%s",head,day,underbar,time,im,c,bin);
printf("%s\n%s\n",infile_name_real,infile_name_imaginary);
//ファイルオープン
fp = fopen(infile_name_real,"rb");
//ファイルが空だった場合の処理
if(fp == NULL){
printf("ファイルをオープンできませんでした。\n");
return 1;
}
//imginaryファイルオープン
fp2 = fopen(infile_name_imaginary,"rb");
//ファイルが空だった場合の処理
if(fp2 == NULL){
printf("ファイルをオープンできませんでした。\n");
return 1;
}
//6.6msごとに配列に入れていく
for(h = 0;h < NUM2;h++){
//realデータ用のホスト・メモリの確保
f_h_re = (unsigned char *)malloc(sizeof(unsigned char)*NUM*batch);
//realデータ用float型のメモリを確保
f_hf_re = (float *)malloc(sizeof(float)*NUM*batch);
//imaginaryデータ用のホスト・メモリを確保
f_h_im = (unsigned char *)malloc(sizeof(unsigned char)*NUM*batch);
//imaginaryデータ用のfloat型のメモリを確保
f_hf_im = (float *)malloc(sizeof(float)*NUM*batch);
//cufftComplex型のメモリを確保
f_hc = (cufftComplex *)malloc(sizeof(cufftComplex)*NUM*batch);
t1 = 0;
t2 = 0;
t1 = NUM*batch*h;
t2 = NUM*batch*h + NUM*batch;
//バイナリデータを配列に格納
for(i = t1;i < t2;i++){
//realdata
fseek(fp, i * sizeof(unsigned char), SEEK_SET);
fread(&f_h_re[i-t1],sizeof(unsigned char),1,fp);
f_hf_re[i-t1] = f_h_re[i-t1]; //unsigned char型からfloat型への型変換
f_hc[i-t1].x = f_hf_re[i-t1]; //float型からcufftComplex型への変換
//imdata
fseek(fp2, i * sizeof(unsigned char), SEEK_SET);
fread(&f_h_im[i-t1],sizeof(unsigned char),1,fp2);
f_hf_im[i-t1] = f_h_im[i-t1];
f_hc[i-t1].y = f_hf_im[i-t1];
}
free(f_hf_re);
free(f_h_re);
free(f_hf_im);
free(f_h_im);
//fclose(fp);
cufftComplex *f_d;
//デバイスメモリの確保
cudaMalloc((void **)&f_d, sizeof(cufftComplex)*NUM*batch);
//ホストからデバイスへの転送
cudaMemcpy(f_d, f_hc, sizeof(cufftComplex)*NUM*batch, cudaMemcpyHostToDevice);
cufftHandle plan;
//1次元FFTの準備
cufftPlan1d(&plan, NUM, CUFFT_C2C, batch);
//順方向への変換を実行
cufftExecC2C(plan, f_d, f_d, CUFFT_FORWARD);
//デバイスからホストへ転送
cudaMemcpy(f_hc, f_d, sizeof(cufftComplex)*NUM*batch, cudaMemcpyDeviceToHost);
cudaFree(f_d);
cufftDestroy(plan);
/*パワースペクトルの計算*/
for(t = 0; t < batch*NUM ;t++){
psd[t] = sqrt((f_hc[t].x * f_hc[t].x) + (f_hc[t].y * f_hc[t].y));
}
free(f_hc);
sprintf(outfile_name,"%s%s%s%s%s%d%s",head,day,underbar,time,fft,c,bin);
file = fopen(outfile_name,"ab");
if(file == NULL){
printf("ファイルをオープンできませんでした。\n");
return 1;
}
for(k = 0; k < batch; k++){
v = NUM*k+1;
p = NUM*(k+1);
/*
z = 2*k + 1;
p = (NUM/2)*z;
*/
for(a = v; a < p; a++){
fwrite(&psd[a],sizeof(float),1,file);
}
}
fclose(file);
}
printf("%s\n",outfile_name);
fclose(fp);
fclose(fp2);
free(infile_name_real);
free(infile_name_imaginary);
free(outfile_name);
}
return 0;
}
|
7,320 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void sw4 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
// Assumptions
int a1 = 1;
double h = 3.7;
double cof = 1e0 / ( h * h);
double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0;
double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1;
double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2;
double (*u_0)[304][304] = (double (*)[304][304])u_in_0;
double (*u_1)[304][304] = (double (*)[304][304])u_in_1;
double (*u_2)[304][304] = (double (*)[304][304])u_in_2;
double (*mu)[304][304] = (double (*)[304][304])mu_in;
double (*la)[304][304] = (double (*)[304][304])la_in;
double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4;
double r1, r2, r3;
if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) {
double mux1;
double mux2;
double mux3;
double mux4;
double muy1;
double muy2;
double muy3;
double muy4;
double muz1;
double muz2;
double muz3;
double muz4;
double _t_2_;
double _t_3_;
double _t_1_;
double _t_4_;
double _t_5_;
double _t_6_;
double _t_7_;
double _t_8_;
double _t_9_;
double _t_0_;
double _t_11_;
double _t_10_;
double _t_12_;
double _t_13_;
double _t_14_;
double _t_16_;
double _t_15_;
double _t_17_;
double _t_18_;
double _t_19_;
double r1;
double _t_22_;
double _t_21_;
double _t_23_;
double _t_24_;
double _t_25_;
double _t_20_;
double _t_27_;
double _t_28_;
double _t_26_;
double _t_29_;
double _t_30_;
double _t_31_;
double _t_32_;
double _t_33_;
double _t_34_;
double _t_36_;
double _t_35_;
double _t_37_;
double _t_38_;
double _t_39_;
double r2;
double _t_42_;
double _t_41_;
double _t_43_;
double _t_44_;
double _t_45_;
double _t_40_;
double _t_47_;
double _t_46_;
double _t_48_;
double _t_49_;
double _t_50_;
double _t_52_;
double _t_53_;
double _t_51_;
double _t_54_;
double _t_55_;
double _t_56_;
double _t_57_;
double _t_58_;
double _t_59_;
double r3;
double _t_63_;
double _t_61_;
double _t_64_;
double _t_65_;
double _t_62_;
double _t_67_;
double _t_68_;
double _t_66_;
double _t_70_;
double _t_71_;
double _t_69_;
double _t_72_;
double _t_73_;
double _t_60_;
double _t_76_;
double _t_74_;
double _t_77_;
double _t_78_;
double _t_75_;
double _t_80_;
double _t_81_;
double _t_79_;
double _t_83_;
double _t_84_;
double _t_82_;
double _t_85_;
double _t_86_;
double _t_89_;
double _t_87_;
double _t_90_;
double _t_91_;
double _t_88_;
double _t_93_;
double _t_94_;
double _t_92_;
double _t_96_;
double _t_97_;
double _t_95_;
double _t_98_;
double _t_99_;
double _t_102_;
double _t_100_;
double _t_103_;
double _t_104_;
double _t_101_;
double _t_106_;
double _t_107_;
double _t_105_;
double _t_109_;
double _t_110_;
double _t_108_;
double _t_111_;
double _t_112_;
double _t_116_;
double _t_114_;
double _t_117_;
double _t_118_;
double _t_115_;
double _t_120_;
double _t_121_;
double _t_119_;
double _t_123_;
double _t_124_;
double _t_122_;
double _t_125_;
double _t_126_;
double _t_113_;
double _t_129_;
double _t_127_;
double _t_130_;
double _t_131_;
double _t_128_;
double _t_133_;
double _t_134_;
double _t_132_;
double _t_136_;
double _t_137_;
double _t_135_;
double _t_138_;
double _t_139_;
double _t_142_;
double _t_140_;
double _t_143_;
double _t_144_;
double _t_141_;
double _t_146_;
double _t_147_;
double _t_145_;
double _t_149_;
double _t_150_;
double _t_148_;
double _t_151_;
double _t_152_;
double _t_155_;
double _t_153_;
double _t_156_;
double _t_157_;
double _t_154_;
double _t_159_;
double _t_160_;
double _t_158_;
double _t_162_;
double _t_163_;
double _t_161_;
double _t_164_;
double _t_165_;
double _t_169_;
double _t_167_;
double _t_170_;
double _t_171_;
double _t_168_;
double _t_173_;
double _t_174_;
double _t_172_;
double _t_176_;
double _t_177_;
double _t_175_;
double _t_178_;
double _t_179_;
double _t_166_;
double _t_182_;
double _t_180_;
double _t_183_;
double _t_184_;
double _t_181_;
double _t_186_;
double _t_187_;
double _t_185_;
double _t_189_;
double _t_190_;
double _t_188_;
double _t_191_;
double _t_192_;
double _t_195_;
double _t_193_;
double _t_196_;
double _t_197_;
double _t_194_;
double _t_199_;
double _t_200_;
double _t_198_;
double _t_202_;
double _t_203_;
double _t_201_;
double _t_204_;
double _t_205_;
double _t_208_;
double _t_206_;
double _t_209_;
double _t_210_;
double _t_207_;
double _t_212_;
double _t_213_;
double _t_211_;
double _t_215_;
double _t_216_;
double _t_214_;
double _t_217_;
double _t_218_;
double uacc_0kc0jc0ic0;
double uacc_1kc0jc0ic0;
double uacc_2kc0jc0ic0;
mux1 = mu[k][j][i-1] * strx[i-1];
mux1 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2];
mux2 = mu[k][j][i-2] * strx[i-2];
mux2 += mu[k][j][i+1] * strx[i+1];
mux2 += 3.0 * mu[k][j][i] * strx[i];
mux2 += 3.0 * mu[k][j][i-1] * strx[i-1];
mux3 = mu[k][j][i-1] * strx[i-1];
mux3 += mu[k][j][i+2] * strx[i+2];
mux3 += 3.0 * mu[k][j][i+1] * strx[i+1];
mux3 += 3.0 * mu[k][j][i] * strx[i];
mux4 = mu[k][j][i+1] * strx[i+1];
mux4 -= 3.0 / 4.0 * mu[k][j][i] * strx[i];
mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2];
muy1 = mu[k][j-1][i] * stry[j-1];
muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2];
muy2 = mu[k][j-2][i] * stry[j-2];
muy2 += mu[k][j+1][i] * stry[j+1];
muy2 += 3.0 * mu[k][j][i] * stry[j];
muy2 += 3.0 * mu[k][j-1][i] * stry[j-1];
muy3 = mu[k][j-1][i] * stry[j-1];
muy3 += mu[k][j+2][i] * stry[j+2];
muy3 += 3.0 * mu[k][j+1][i] * stry[j+1];
muy3 += 3.0 * mu[k][j][i] * stry[j];
muy4 = mu[k][j+1][i] * stry[j+1];
muy4 -= 3.0 / 4.0 * mu[k][j][i] * stry[j];
muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2];
muz1 = mu[k-1][j][i] * strz[k-1];
muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2];
muz2 = mu[k-2][j][i] * strz[k-2];
muz2 += mu[k+1][j][i] * strz[k+1];
muz2 += 3.0 * mu[k][j][i] * strz[k];
muz2 += 3.0 * mu[k-1][j][i] * strz[k-1];
muz3 = mu[k-1][j][i] * strz[k-1];
muz3 += mu[k+2][j][i] * strz[k+2];
muz3 += 3.0 * mu[k+1][j][i] * strz[k+1];
muz3 += 3.0 * mu[k][j][i] * strz[k];
muz4 = mu[k+1][j][i] * strz[k+1];
muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k];
muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2];
_t_2_ = 2.0 * mux1;
_t_2_ += la[k][j][i-1] * strx[i-1];
_t_2_ -= 3.0 / 4.0 * la[k][j][i] * strx[i];
_t_2_ -= 3.0 / 4.0 * la[k][j][i-2] * strx[i-2];
_t_3_ = u_0[k][j][i-2];
_t_3_ -= u_0[k][j][i];
_t_1_ = _t_2_ * _t_3_;
_t_4_ = 2.0 * mux2;
_t_4_ += la[k][j][i-2] * strx[i-2];
_t_4_ += la[k][j][i+1] * strx[i+1];
_t_4_ += 3.0 * la[k][j][i] * strx[i];
_t_4_ += 3.0 * la[k][j][i-1] * strx[i-1];
_t_5_ = u_0[k][j][i-1];
_t_5_ -= u_0[k][j][i];
_t_1_ += _t_4_ * _t_5_;
_t_6_ = 2.0 * mux3;
_t_6_ += la[k][j][i-1] * strx[i-1];
_t_6_ += la[k][j][i+2] * strx[i+2];
_t_6_ += 3.0 * la[k][j][i+1] * strx[i+1];
_t_6_ += 3.0 * la[k][j][i] * strx[i];
_t_7_ = u_0[k][j][i+1];
_t_7_ -= u_0[k][j][i];
_t_1_ += _t_6_ * _t_7_;
_t_8_ = 2.0 * mux4;
_t_8_ += la[k][j][i+1] * strx[i+1];
_t_8_ -= 3.0 / 4.0 * la[k][j][i] * strx[i];
_t_8_ -= 3.0 / 4.0 * la[k][j][i+2] * strx[i+2];
_t_9_ = u_0[k][j][i+2];
_t_9_ -= u_0[k][j][i];
_t_1_ += _t_8_ * _t_9_;
_t_0_ = strx[i] * _t_1_;
_t_11_ = u_0[k][j-2][i];
_t_11_ -= u_0[k][j][i];
_t_10_ = muy1 * _t_11_;
_t_12_ = u_0[k][j-1][i];
_t_12_ -= u_0[k][j][i];
_t_10_ += muy2 * _t_12_;
_t_13_ = u_0[k][j+1][i];
_t_13_ -= u_0[k][j][i];
_t_10_ += muy3 * _t_13_;
_t_14_ = u_0[k][j+2][i];
_t_14_ -= u_0[k][j][i];
_t_10_ += muy4 * _t_14_;
_t_0_ += stry[j] * _t_10_;
_t_16_ = u_0[k-2][j][i];
_t_16_ -= u_0[k][j][i];
_t_15_ = muz1 * _t_16_;
_t_17_ = u_0[k-1][j][i];
_t_17_ -= u_0[k][j][i];
_t_15_ += muz2 * _t_17_;
_t_18_ = u_0[k+1][j][i];
_t_18_ -= u_0[k][j][i];
_t_15_ += muz3 * _t_18_;
_t_19_ = u_0[k+2][j][i];
_t_19_ -= u_0[k][j][i];
_t_15_ += muz4 * _t_19_;
_t_0_ += strz[k] * _t_15_;
r1 = 1.0 / 6.0 * _t_0_;
_t_22_ = u_1[k][j][i-2];
_t_22_ -= u_1[k][j][i];
_t_21_ = mux1 * _t_22_;
_t_23_ = u_1[k][j][i-1];
_t_23_ -= u_1[k][j][i];
_t_21_ += mux2 * _t_23_;
_t_24_ = u_1[k][j][i+1];
_t_24_ -= u_1[k][j][i];
_t_21_ += mux3 * _t_24_;
_t_25_ = u_1[k][j][i+2];
_t_25_ -= u_1[k][j][i];
_t_21_ += mux4 * _t_25_;
_t_20_ = strx[i] * _t_21_;
_t_27_ = 2.0 * muy1;
_t_27_ += la[k][j-1][i] * stry[j-1];
_t_27_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
_t_27_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2];
_t_28_ = u_1[k][j-2][i];
_t_28_ -= u_1[k][j][i];
_t_26_ = _t_27_ * _t_28_;
_t_29_ = 2.0 * muy2;
_t_29_ += la[k][j-2][i] * stry[j-2];
_t_29_ += la[k][j+1][i] * stry[j+1];
_t_29_ += 3.0 * la[k][j][i] * stry[j];
_t_29_ += 3.0 * la[k][j-1][i] * stry[j-1];
_t_30_ = u_1[k][j-1][i];
_t_30_ -= u_1[k][j][i];
_t_26_ += _t_29_ * _t_30_;
_t_31_ = 2.0 * muy3;
_t_31_ += la[k][j-1][i] * stry[j-1];
_t_31_ += la[k][j+2][i] * stry[j+2];
_t_31_ += 3.0 * la[k][j+1][i] * stry[j+1];
_t_31_ += 3.0 * la[k][j][i] * stry[j];
_t_32_ = u_1[k][j+1][i];
_t_32_ -= u_1[k][j][i];
_t_26_ += _t_31_ * _t_32_;
_t_33_ = 2.0 * muy4;
_t_33_ += la[k][j+1][i] * stry[j+1];
_t_33_ -= 3.0 / 4.0 * la[k][j][i] * stry[j];
_t_33_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2];
_t_34_ = u_1[k][j+2][i];
_t_34_ -= u_1[k][j][i];
_t_26_ += _t_33_ * _t_34_;
_t_20_ += stry[j] * _t_26_;
_t_36_ = u_1[k-2][j][i];
_t_36_ -= u_1[k][j][i];
_t_35_ = muz1 * _t_36_;
_t_37_ = u_1[k-1][j][i];
_t_37_ -= u_1[k][j][i];
_t_35_ += muz2 * _t_37_;
_t_38_ = u_1[k+1][j][i];
_t_38_ -= u_1[k][j][i];
_t_35_ += muz3 * _t_38_;
_t_39_ = u_1[k+2][j][i];
_t_39_ -= u_1[k][j][i];
_t_35_ += muz4 * _t_39_;
_t_20_ += strz[k] * _t_35_;
r2 = 1.0 / 6.0 * _t_20_;
_t_42_ = u_2[k][j][i-2];
_t_42_ -= u_2[k][j][i];
_t_41_ = mux1 * _t_42_;
_t_43_ = u_2[k][j][i-1];
_t_43_ -= u_2[k][j][i];
_t_41_ += mux2 * _t_43_;
_t_44_ = u_2[k][j][i+1];
_t_44_ -= u_2[k][j][i];
_t_41_ += mux3 * _t_44_;
_t_45_ = u_2[k][j][i+2];
_t_45_ -= u_2[k][j][i];
_t_41_ += mux4 * _t_45_;
_t_40_ = strx[i] * _t_41_;
_t_47_ = u_2[k][j-2][i];
_t_47_ -= u_2[k][j][i];
_t_46_ = muy1 * _t_47_;
_t_48_ = u_2[k][j-1][i];
_t_48_ -= u_2[k][j][i];
_t_46_ += muy2 * _t_48_;
_t_49_ = u_2[k][j+1][i];
_t_49_ -= u_2[k][j][i];
_t_46_ += muy3 * _t_49_;
_t_50_ = u_2[k][j+2][i];
_t_50_ -= u_2[k][j][i];
_t_46_ += muy4 * _t_50_;
_t_40_ += stry[j] * _t_46_;
_t_52_ = 2.0 * muz1;
_t_52_ += la[k-1][j][i] * strz[k-1];
_t_52_ -= 3.0 / 4.0 * la[k][j][i] * strz[k];
_t_52_ -= 3.0 / 4.0 * la[k-2][j][i] * strz[k-2];
_t_53_ = u_2[k-2][j][i];
_t_53_ -= u_2[k][j][i];
_t_51_ = _t_52_ * _t_53_;
_t_54_ = 2.0 * muz2;
_t_54_ += la[k-2][j][i] * strz[k-2];
_t_54_ += la[k+1][j][i] * strz[k+1];
_t_54_ += 3.0 * la[k][j][i] * strz[k];
_t_54_ += 3.0 * la[k-1][j][i] * strz[k-1];
_t_55_ = u_2[k-1][j][i];
_t_55_ -= u_2[k][j][i];
_t_51_ += _t_54_ * _t_55_;
_t_56_ = 2.0 * muz3;
_t_56_ += la[k-1][j][i] * strz[k-1];
_t_56_ += la[k+2][j][i] * strz[k+2];
_t_56_ += 3.0 * la[k+1][j][i] * strz[k+1];
_t_56_ += 3.0 * la[k][j][i] * strz[k];
_t_57_ = u_2[k+1][j][i];
_t_57_ -= u_2[k][j][i];
_t_51_ += _t_56_ * _t_57_;
_t_58_ = 2.0 * muz4;
_t_58_ += la[k+1][j][i] * strz[k+1];
_t_58_ -= 3.0 / 4.0 * la[k][j][i] * strz[k];
_t_58_ -= 3.0 / 4.0 * la[k+2][j][i] * strz[k+2];
_t_59_ = u_2[k+2][j][i];
_t_59_ -= u_2[k][j][i];
_t_51_ += _t_58_ * _t_59_;
_t_40_ += strz[k] * _t_51_;
r3 = 1.0 / 6.0 * _t_40_;
_t_63_ = strx[i] * strz[k];
_t_61_ = _t_63_ * 1.0 / 144.0;
_t_64_ = u_0[k-2][j][i-2];
_t_64_ -= u_0[k+2][j][i-2];
_t_65_ = -u_0[k-1][j][i-2];
_t_65_ += u_0[k+1][j][i-2];
_t_64_ += 8.0 * _t_65_;
_t_62_ = mu[k][j][i-2] * _t_64_;
_t_67_ = u_0[k-2][j][i-1];
_t_67_ -= u_0[k+2][j][i-1];
_t_68_ = -u_0[k-1][j][i-1];
_t_68_ += u_0[k+1][j][i-1];
_t_67_ += 8.0 * _t_68_;
_t_66_ = mu[k][j][i-1] * _t_67_;
_t_62_ -= 8.0 * _t_66_;
_t_70_ = u_0[k-2][j][i+1];
_t_70_ -= u_0[k+2][j][i+1];
_t_71_ = -u_0[k-1][j][i+1];
_t_71_ += u_0[k+1][j][i+1];
_t_70_ += 8.0 * _t_71_;
_t_69_ = mu[k][j][i+1] * _t_70_;
_t_62_ += 8.0 * _t_69_;
_t_72_ = u_0[k-2][j][i+2];
_t_72_ -= u_0[k+2][j][i+2];
_t_73_ = -u_0[k-1][j][i+2];
_t_73_ += u_0[k+1][j][i+2];
_t_72_ += 8.0 * _t_73_;
_t_62_ -= mu[k][j][i+2] * _t_72_;
_t_60_ = _t_61_ * _t_62_;
_t_76_ = stry[j] * strz[k];
_t_74_ = _t_76_ * 1.0 / 144.0;
_t_77_ = u_1[k-2][j-2][i];
_t_77_ -= u_1[k+2][j-2][i];
_t_78_ = -u_1[k-1][j-2][i];
_t_78_ += u_1[k+1][j-2][i];
_t_77_ += 8.0 * _t_78_;
_t_75_ = mu[k][j-2][i] * _t_77_;
_t_80_ = u_1[k-2][j-1][i];
_t_80_ -= u_1[k+2][j-1][i];
_t_81_ = -u_1[k-1][j-1][i];
_t_81_ += u_1[k+1][j-1][i];
_t_80_ += 8.0 * _t_81_;
_t_79_ = mu[k][j-1][i] * _t_80_;
_t_75_ -= 8.0 * _t_79_;
_t_83_ = u_1[k-2][j+1][i];
_t_83_ -= u_1[k+2][j+1][i];
_t_84_ = -u_1[k-1][j+1][i];
_t_84_ += u_1[k+1][j+1][i];
_t_83_ += 8.0 * _t_84_;
_t_82_ = mu[k][j+1][i] * _t_83_;
_t_75_ += 8.0 * _t_82_;
_t_85_ = u_1[k-2][j+2][i];
_t_85_ -= u_1[k+2][j+2][i];
_t_86_ = -u_1[k-1][j+2][i];
_t_86_ += u_1[k+1][j+2][i];
_t_85_ += 8.0 * _t_86_;
_t_75_ -= mu[k][j+2][i] * _t_85_;
_t_60_ += _t_74_ * _t_75_;
_t_89_ = strx[i] * strz[k];
_t_87_ = _t_89_ * 1.0 / 144.0;
_t_90_ = u_0[k-2][j][i-2];
_t_90_ -= u_0[k-2][j][i+2];
_t_91_ = -u_0[k-2][j][i-1];
_t_91_ += u_0[k-2][j][i+1];
_t_90_ += 8.0 * _t_91_;
_t_88_ = la[k-2][j][i] * _t_90_;
_t_93_ = u_0[k-1][j][i-2];
_t_93_ -= u_0[k-1][j][i+2];
_t_94_ = -u_0[k-1][j][i-1];
_t_94_ += u_0[k-1][j][i+1];
_t_93_ += 8.0 * _t_94_;
_t_92_ = la[k-1][j][i] * _t_93_;
_t_88_ -= 8.0 * _t_92_;
_t_96_ = u_0[k+1][j][i-2];
_t_96_ -= u_0[k+1][j][i+2];
_t_97_ = -u_0[k+1][j][i-1];
_t_97_ += u_0[k+1][j][i+1];
_t_96_ += 8.0 * _t_97_;
_t_95_ = la[k+1][j][i] * _t_96_;
_t_88_ += 8.0 * _t_95_;
_t_98_ = u_0[k+2][j][i-2];
_t_98_ -= u_0[k+2][j][i+2];
_t_99_ = -u_0[k+2][j][i-1];
_t_99_ += u_0[k+2][j][i+1];
_t_98_ += 8.0 * _t_99_;
_t_88_ -= la[k+2][j][i] * _t_98_;
_t_60_ += _t_87_ * _t_88_;
_t_102_ = stry[j] * strz[k];
_t_100_ = _t_102_ * 1.0 / 144.0;
_t_103_ = u_1[k-2][j-2][i];
_t_103_ -= u_1[k-2][j+2][i];
_t_104_ = -u_1[k-2][j-1][i];
_t_104_ += u_1[k-2][j+1][i];
_t_103_ += 8.0 * _t_104_;
_t_101_ = la[k-2][j][i] * _t_103_;
_t_106_ = u_1[k-1][j-2][i];
_t_106_ -= u_1[k-1][j+2][i];
_t_107_ = -u_1[k-1][j-1][i];
_t_107_ += u_1[k-1][j+1][i];
_t_106_ += 8.0 * _t_107_;
_t_105_ = la[k-1][j][i] * _t_106_;
_t_101_ -= 8.0 * _t_105_;
_t_109_ = u_1[k+1][j-2][i];
_t_109_ -= u_1[k+1][j+2][i];
_t_110_ = -u_1[k+1][j-1][i];
_t_110_ += u_1[k+1][j+1][i];
_t_109_ += 8.0 * _t_110_;
_t_108_ = la[k+1][j][i] * _t_109_;
_t_101_ += 8.0 * _t_108_;
_t_111_ = u_1[k+2][j-2][i];
_t_111_ -= u_1[k+2][j+2][i];
_t_112_ = -u_1[k+2][j-1][i];
_t_112_ += u_1[k+2][j+1][i];
_t_111_ += 8.0 * _t_112_;
_t_101_ -= la[k+2][j][i] * _t_111_;
_t_60_ += _t_100_ * _t_101_;
r3 += _t_60_;
_t_116_ = strx[i] * stry[j];
_t_114_ = _t_116_ * 1.0 / 144.0;
_t_117_ = u_1[k][j-2][i-2];
_t_117_ -= u_1[k][j+2][i-2];
_t_118_ = -u_1[k][j-1][i-2];
_t_118_ += u_1[k][j+1][i-2];
_t_117_ += 8.0 * _t_118_;
_t_115_ = la[k][j][i-2] * _t_117_;
_t_120_ = u_1[k][j-2][i-1];
_t_120_ -= u_1[k][j+2][i-1];
_t_121_ = -u_1[k][j-1][i-1];
_t_121_ += u_1[k][j+1][i-1];
_t_120_ += 8.0 * _t_121_;
_t_119_ = la[k][j][i-1] * _t_120_;
_t_115_ -= 8.0 * _t_119_;
_t_123_ = u_1[k][j-2][i+1];
_t_123_ -= u_1[k][j+2][i+1];
_t_124_ = -u_1[k][j-1][i+1];
_t_124_ += u_1[k][j+1][i+1];
_t_123_ += 8.0 * _t_124_;
_t_122_ = la[k][j][i+1] * _t_123_;
_t_115_ += 8.0 * _t_122_;
_t_125_ = u_1[k][j-2][i+2];
_t_125_ -= u_1[k][j+2][i+2];
_t_126_ = -u_1[k][j-1][i+2];
_t_126_ += u_1[k][j+1][i+2];
_t_125_ += 8.0 * _t_126_;
_t_115_ -= la[k][j][i+2] * _t_125_;
_t_113_ = _t_114_ * _t_115_;
_t_129_ = strx[i] * strz[k];
_t_127_ = _t_129_ * 1.0 / 144.0;
_t_130_ = u_2[k-2][j][i-2];
_t_130_ -= u_2[k+2][j][i-2];
_t_131_ = -u_2[k-1][j][i-2];
_t_131_ += u_2[k+1][j][i-2];
_t_130_ += 8.0 * _t_131_;
_t_128_ = la[k][j][i-2] * _t_130_;
_t_133_ = u_2[k-2][j][i-1];
_t_133_ -= u_2[k+2][j][i-1];
_t_134_ = -u_2[k-1][j][i-1];
_t_134_ += u_2[k+1][j][i-1];
_t_133_ += 8.0 * _t_134_;
_t_132_ = la[k][j][i-1] * _t_133_;
_t_128_ -= 8.0 * _t_132_;
_t_136_ = u_2[k-2][j][i+1];
_t_136_ -= u_2[k+2][j][i+1];
_t_137_ = -u_2[k-1][j][i+1];
_t_137_ += u_2[k+1][j][i+1];
_t_136_ += 8.0 * _t_137_;
_t_135_ = la[k][j][i+1] * _t_136_;
_t_128_ += 8.0 * _t_135_;
_t_138_ = u_2[k-2][j][i+2];
_t_138_ -= u_2[k+2][j][i+2];
_t_139_ = -u_2[k-1][j][i+2];
_t_139_ += u_2[k+1][j][i+2];
_t_138_ += 8.0 * _t_139_;
_t_128_ -= la[k][j][i+2] * _t_138_;
_t_113_ += _t_127_ * _t_128_;
_t_142_ = strx[i] * stry[j];
_t_140_ = _t_142_ * 1.0 / 144.0;
_t_143_ = u_1[k][j-2][i-2];
_t_143_ -= u_1[k][j-2][i+2];
_t_144_ = -u_1[k][j-2][i-1];
_t_144_ += u_1[k][j-2][i+1];
_t_143_ += 8.0 * _t_144_;
_t_141_ = mu[k][j-2][i] * _t_143_;
_t_146_ = u_1[k][j-1][i-2];
_t_146_ -= u_1[k][j-1][i+2];
_t_147_ = -u_1[k][j-1][i-1];
_t_147_ += u_1[k][j-1][i+1];
_t_146_ += 8.0 * _t_147_;
_t_145_ = mu[k][j-1][i] * _t_146_;
_t_141_ -= 8.0 * _t_145_;
_t_149_ = u_1[k][j+1][i-2];
_t_149_ -= u_1[k][j+1][i+2];
_t_150_ = -u_1[k][j+1][i-1];
_t_150_ += u_1[k][j+1][i+1];
_t_149_ += 8.0 * _t_150_;
_t_148_ = mu[k][j+1][i] * _t_149_;
_t_141_ += 8.0 * _t_148_;
_t_151_ = u_1[k][j+2][i-2];
_t_151_ -= u_1[k][j+2][i+2];
_t_152_ = -u_1[k][j+2][i-1];
_t_152_ += u_1[k][j+2][i+1];
_t_151_ += 8.0 * _t_152_;
_t_141_ -= mu[k][j+2][i] * _t_151_;
_t_113_ += _t_140_ * _t_141_;
_t_155_ = strx[i] * strz[k];
_t_153_ = _t_155_ * 1.0 / 144.0;
_t_156_ = u_2[k-2][j][i-2];
_t_156_ -= u_2[k-2][j][i+2];
_t_157_ = -u_2[k-2][j][i-1];
_t_157_ += u_2[k-2][j][i+1];
_t_156_ += 8.0 * _t_157_;
_t_154_ = mu[k-2][j][i] * _t_156_;
_t_159_ = u_2[k-1][j][i-2];
_t_159_ -= u_2[k-1][j][i+2];
_t_160_ = -u_2[k-1][j][i-1];
_t_160_ += u_2[k-1][j][i+1];
_t_159_ += 8.0 * _t_160_;
_t_158_ = mu[k-1][j][i] * _t_159_;
_t_154_ -= 8.0 * _t_158_;
_t_162_ = u_2[k+1][j][i-2];
_t_162_ -= u_2[k+1][j][i+2];
_t_163_ = -u_2[k+1][j][i-1];
_t_163_ += u_2[k+1][j][i+1];
_t_162_ += 8.0 * _t_163_;
_t_161_ = mu[k+1][j][i] * _t_162_;
_t_154_ += 8.0 * _t_161_;
_t_164_ = u_2[k+2][j][i-2];
_t_164_ -= u_2[k+2][j][i+2];
_t_165_ = -u_2[k+2][j][i-1];
_t_165_ += u_2[k+2][j][i+1];
_t_164_ += 8.0 * _t_165_;
_t_154_ -= mu[k+2][j][i] * _t_164_;
_t_113_ += _t_153_ * _t_154_;
r1 += _t_113_;
_t_169_ = strx[i] * stry[j];
_t_167_ = _t_169_ * 1.0 / 144.0;
_t_170_ = u_0[k][j-2][i-2];
_t_170_ -= u_0[k][j+2][i-2];
_t_171_ = -u_0[k][j-1][i-2];
_t_171_ += u_0[k][j+1][i-2];
_t_170_ += 8.0 * _t_171_;
_t_168_ = mu[k][j][i-2] * _t_170_;
_t_173_ = u_0[k][j-2][i-1];
_t_173_ -= u_0[k][j+2][i-1];
_t_174_ = -u_0[k][j-1][i-1];
_t_174_ += u_0[k][j+1][i-1];
_t_173_ += 8.0 * _t_174_;
_t_172_ = mu[k][j][i-1] * _t_173_;
_t_168_ -= 8.0 * _t_172_;
_t_176_ = u_0[k][j-2][i+1];
_t_176_ -= u_0[k][j+2][i+1];
_t_177_ = -u_0[k][j-1][i+1];
_t_177_ += u_0[k][j+1][i+1];
_t_176_ += 8.0 * _t_177_;
_t_175_ = mu[k][j][i+1] * _t_176_;
_t_168_ += 8.0 * _t_175_;
_t_178_ = u_0[k][j-2][i+2];
_t_178_ -= u_0[k][j+2][i+2];
_t_179_ = -u_0[k][j-1][i+2];
_t_179_ += u_0[k][j+1][i+2];
_t_178_ += 8.0 * _t_179_;
_t_168_ -= mu[k][j][i+2] * _t_178_;
_t_166_ = _t_167_ * _t_168_;
_t_182_ = strx[i] * stry[j];
_t_180_ = _t_182_ * 1.0 / 144.0;
_t_183_ = u_0[k][j-2][i-2];
_t_183_ -= u_0[k][j-2][i+2];
_t_184_ = -u_0[k][j-2][i-1];
_t_184_ += u_0[k][j-2][i+1];
_t_183_ += 8.0 * _t_184_;
_t_181_ = la[k][j-2][i] * _t_183_;
_t_186_ = u_0[k][j-1][i-2];
_t_186_ -= u_0[k][j-1][i+2];
_t_187_ = -u_0[k][j-1][i-1];
_t_187_ += u_0[k][j-1][i+1];
_t_186_ += 8.0 * _t_187_;
_t_185_ = la[k][j-1][i] * _t_186_;
_t_181_ -= 8.0 * _t_185_;
_t_189_ = u_0[k][j+1][i-2];
_t_189_ -= u_0[k][j+1][i+2];
_t_190_ = -u_0[k][j+1][i-1];
_t_190_ += u_0[k][j+1][i+1];
_t_189_ += 8.0 * _t_190_;
_t_188_ = la[k][j+1][i] * _t_189_;
_t_181_ += 8.0 * _t_188_;
_t_191_ = u_0[k][j+2][i-2];
_t_191_ -= u_0[k][j+2][i+2];
_t_192_ = -u_0[k][j+2][i-1];
_t_192_ += u_0[k][j+2][i+1];
_t_191_ += 8.0 * _t_192_;
_t_181_ -= la[k][j+2][i] * _t_191_;
_t_166_ += _t_180_ * _t_181_;
_t_195_ = stry[j] * strz[k];
_t_193_ = _t_195_ * 1.0 / 144.0;
_t_196_ = u_2[k-2][j-2][i];
_t_196_ -= u_2[k+2][j-2][i];
_t_197_ = -u_2[k-1][j-2][i];
_t_197_ += u_2[k+1][j-2][i];
_t_196_ += 8.0 * _t_197_;
_t_194_ = la[k][j-2][i] * _t_196_;
_t_199_ = u_2[k-2][j-1][i];
_t_199_ -= u_2[k+2][j-1][i];
_t_200_ = -u_2[k-1][j-1][i];
_t_200_ += u_2[k+1][j-1][i];
_t_199_ += 8.0 * _t_200_;
_t_198_ = la[k][j-1][i] * _t_199_;
_t_194_ -= 8.0 * _t_198_;
_t_202_ = u_2[k-2][j+1][i];
_t_202_ -= u_2[k+2][j+1][i];
_t_203_ = -u_2[k-1][j+1][i];
_t_203_ += u_2[k+1][j+1][i];
_t_202_ += 8.0 * _t_203_;
_t_201_ = la[k][j+1][i] * _t_202_;
_t_194_ += 8.0 * _t_201_;
_t_204_ = u_2[k-2][j+2][i];
_t_204_ -= u_2[k+2][j+2][i];
_t_205_ = -u_2[k-1][j+2][i];
_t_205_ += u_2[k+1][j+2][i];
_t_204_ += 8.0 * _t_205_;
_t_194_ -= la[k][j+2][i] * _t_204_;
_t_166_ += _t_193_ * _t_194_;
_t_208_ = stry[j] * strz[k];
_t_206_ = _t_208_ * 1.0 / 144.0;
_t_209_ = u_2[k-2][j-2][i];
_t_209_ -= u_2[k-2][j+2][i];
_t_210_ = -u_2[k-2][j-1][i];
_t_210_ += u_2[k-2][j+1][i];
_t_209_ += 8.0 * _t_210_;
_t_207_ = mu[k-2][j][i] * _t_209_;
_t_212_ = u_2[k-1][j-2][i];
_t_212_ -= u_2[k-1][j+2][i];
_t_213_ = -u_2[k-1][j-1][i];
_t_213_ += u_2[k-1][j+1][i];
_t_212_ += 8.0 * _t_213_;
_t_211_ = mu[k-1][j][i] * _t_212_;
_t_207_ -= 8.0 * _t_211_;
_t_215_ = u_2[k+1][j-2][i];
_t_215_ -= u_2[k+1][j+2][i];
_t_216_ = -u_2[k+1][j-1][i];
_t_216_ += u_2[k+1][j+1][i];
_t_215_ += 8.0 * _t_216_;
_t_214_ = mu[k+1][j][i] * _t_215_;
_t_207_ += 8.0 * _t_214_;
_t_217_ = u_2[k+2][j-2][i];
_t_217_ -= u_2[k+2][j+2][i];
_t_218_ = -u_2[k+2][j-1][i];
_t_218_ += u_2[k+2][j+1][i];
_t_217_ += 8.0 * _t_218_;
_t_207_ -= mu[k+2][j][i] * _t_217_;
_t_166_ += _t_206_ * _t_207_;
r2 += _t_166_;
uacc_0kc0jc0ic0 = a1 * uacc_0[k][j][i];
uacc_0kc0jc0ic0 += cof * r1;
uacc_0[k][j][i] = uacc_0kc0jc0ic0;
uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i];
uacc_1kc0jc0ic0 += cof * r2;
uacc_1[k][j][i] = uacc_1kc0jc0ic0;
uacc_2kc0jc0ic0 = a1 * uacc_2[k][j][i];
uacc_2kc0jc0ic0 += cof * r3;
uacc_2[k][j][i] = uacc_2kc0jc0ic0;
}
}
extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) {
double *uacc_0;
cudaMalloc (&uacc_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_1;
cudaMalloc (&uacc_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *uacc_2;
cudaMalloc (&uacc_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_0;
cudaMalloc (&u_0, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_1;
cudaMalloc (&u_1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u_2;
cudaMalloc (&u_2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
double *strz;
cudaMalloc (&strz, sizeof(double)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 2, 2);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z));
sw4 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
7,321 | /*************************************************************************************************
*
* Computer Engineering Group, Heidelberg University - GPU Computing Exercise 03
*
* Group : TBD
*
* File : main.cu
*
* Purpose : Memory Operations Benchmark
*
*************************************************************************************************/
//
// Kernels
//
__global__ void
globalMemCoalescedKernel(/*TODO Parameters*/)
{
/*TODO Kernel Code*/
}
void
globalMemCoalescedKernel_Wrapper(dim3 gridDim, dim3 blockDim /*TODO Parameters*/) {
globalMemCoalescedKernel<<< gridDim, blockDim, 0 /*Shared Memory Size*/ >>>( /*TODO Parameters*/);
}
__global__ void
globalMemStrideKernel(/*TODO Parameters*/)
{
/*TODO Kernel Code*/
}
void
globalMemStrideKernel_Wrapper(dim3 gridDim, dim3 blockDim /*TODO Parameters*/) {
globalMemStrideKernel<<< gridDim, blockDim, 0 /*Shared Memory Size*/ >>>( /*TODO Parameters*/);
}
__global__ void
globalMemOffsetKernel(/*TODO Parameters*/)
{
/*TODO Kernel Code*/
}
void
globalMemOffsetKernel_Wrapper(dim3 gridDim, dim3 blockDim /*TODO Parameters*/) {
globalMemOffsetKernel<<< gridDim, blockDim, 0 /*Shared Memory Size*/ >>>( /*TODO Parameters*/);
}
|
7,322 | #include"cuda_runtime.h"
#include"cuda.h"
#include"cuda_runtime_api.h"
#include<stdio.h>
#include <stdlib.h>
using namespace std;
__global__ static void sumOfSquares()
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
printf("???????");
//printf("%d %d\n",tid*blockDim.x + bid,result);
}
int main()
{
int result = 5;
sumOfSquares<<<1,1,0>>>();
for(int i=0;i<1000000;i++)
for(int j=0;j<10000;j++);
return 0;
} |
7,323 | # include <time.h>
# include <math.h>
# include <stdio.h>
__global__ void add( int *a , int *b , int *c)
{
int index= threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] +b[index];
}
//# define N 125
#define thread_count 10
void random_ints(int* a, int h)
{
}
int main(void){
int N;
printf("\"Hello Vector !\"\n enter size of vector\n");
scanf("%d",&N);
int a[N],b[N],c[N]; // host copies of a, b,c
int *d_a,*d_b,*d_c; //// device copies of a, b, c
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
//setup input values for a, b, c
for ( int i=0;i<=N;i++)
{
a[i]=i+2;
b[i]=i+3;
c[i]=0;
}
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, c, size, cudaMemcpyHostToDevice);
// start clocking
clock_t start_time = clock();
//Launch add() kernel on GPU
add<<<N,thread_count>>>(d_a, d_b, d_c);
cudaThreadSynchronize();
//end clocking and measuring time for execution
clock_t stop_time = clock();
int time =stop_time - start_time;
printf("time=%d\n", time);
//Copy result back to host
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("c=");
for(int i=0;i<N;i++){
printf("%d+",c[i]);}
printf("\n");
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
7,324 | #include <stdio.h>
//casual 50 million interger addition
#define N 50000000
__global__ void mykernel(void){
}
__global__ void add(int *a, int *b, int *c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%N;
}
}
int main(void){
int *a,*b,*c; //host copies of a,b,c
int *d_a,*d_b,*d_c; //device copies of a,b,c
int size = N * sizeof(int) * 2;
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_b,size);
cudaMalloc((void **)&d_c,size);
//Alloc space for host copies of a,b,c and setup input values
a = (int *)malloc(size); random_ints(a,N);
b = (int *)malloc(size); random_ints(b,N);
c = (int *)malloc(size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
add<<<N,1>>>(d_a,d_b,d_c);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
int i;
for(i = 0; i < N;i++){
printf("a[%d]: %d\n",i,a[i]);
printf("b[%d]: %d\n",i,b[i]);
printf("c[%d]: %d\n",i,c[i]);
}
return 0;
} |
7,325 | #include "includes.h"
__global__ void multiplyBy2(int size, const long *in, long *out) {
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
if (ix < size) {
out[ix] = in[ix] * 2;
}
} |
7,326 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <stdio.h>
#define blocksize 256
int getblock(int length) {
int block = length / blocksize;
if ((length%blocksize) > 0)block++;
return block;
}
int getlg2(int length);
void check(int *a, int length) {
printf("//////////////////////////////////////////////\n");
int t = 0;
for (int i = 0; i < length; i++) {
if (i < (length - 1)) {
if (a[i] > a[i + 1])
printf("a[%d] = %d , a[%d] = %d\n", i, a[i], i + 1, a[i + 1]);
}
else printf("succeed\n");
}
}
__device__ __host__ int insert0(int a, int *array,unsigned int star,unsigned int end) {
int p = 0;
while ((end - star) > 1) {
p = (star + end) / 2;
if (a > array[p])star = p;
else end = p;
}
p = (end + star)/2;
if (a > array[p])star = p;
else end = p;
if (a > array[end])end++;
return end;
}
__device__ __host__ int insert1(int a, int *array, unsigned int star, unsigned int end) {
int p = 0;
while ((end - star) > 1) {
p = (star + end) / 2;
if (a >= array[p])star = p;
else end = p;
}
p = (end + star) / 2;
if (a >= array[p])star = p;
else end = p;
if (a >= array[end])end++;
return end;
}
__global__ void sort_int_shared(int *a_map, int *a_dev, int length) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int a_s[blocksize];
//Ȼȡֵ빲ڴ
if(tid<length)a_s[threadIdx.x] = a_map[tid];
else a_s[threadIdx.x] = INT_MAX;
int r = 0;
__syncthreads();
//ֳ߳̿,ֱִвͬ
bool flag = (threadIdx.x < (blocksize / 2));
tid = tid % (blocksize / 2);
//ֻԪص
if (flag) r = a_s[tid * 2];
else r = a_s[tid * 2 + 1];
__syncthreads();
if (flag) {
if(r > a_s[tid * 2 + 1])a_s[tid*2+1] = r;
}
else {
if (r < a_s[tid * 2])a_s[tid * 2] = r;
}
__syncthreads();//洢ͬ
for (int i = 2; i < blocksize; i *= 2) {
int pair_star = (tid / i)*i*2;
int offset = tid%i;
r = a_s[pair_star + offset + ((!flag)*i)];
int p;
if (flag) {
p = insert0(r, a_s, pair_star + i, pair_star + (2 * i) - 1);
p = p - i;
}
else
{
p = insert1(r, a_s, pair_star, pair_star + i - 1);
}
__syncthreads();
p += offset;
a_s[p] = r;
__syncthreads();
}
a_dev[blockIdx.x * blockDim.x + threadIdx.x] = a_s[threadIdx.x];
}
__global__ void short_int_global(int *a_dev, int *a_next, int i) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = a_dev[tid];
int p = tid;
bool flag = ((blockIdx.x >> i)&1);
flag = !flag;
int pair_length = 2 << i;
int pair_tid = blockIdx.x / pair_length;
if (flag)
{
if ((blockIdx.x >> (i + 1)) == (gridDim.x >> (i + 1))) {
int size = gridDim.x % (2 << i);
size -= (pair_length / 2);
if (size > 0) {
int star = pair_tid*pair_length + (pair_length / 2);
int end = star + size;
star *= blocksize;
end *= blocksize;
end--;
p = insert0(r, a_dev, star, end);
p = p - blocksize*(pair_length / 2);
p += tid - (pair_tid*pair_length*blocksize);
}
a_next[p] = r;
}
else
{
int star = pair_tid*pair_length + (pair_length / 2);
int end = star + (pair_length/2);
star *= blocksize;
end *= blocksize;
end--;
p = insert0(r, a_dev, star, end);
p = p - blocksize*(pair_length / 2);
p += tid - (pair_tid*pair_length*blocksize);
a_next[p] = r;
}
}
else
{
int star = pair_tid*pair_length;
int end = star + (pair_length / 2);
star *= blocksize;
end *= blocksize;
end--;
p = insert1(r, a_dev, star, end);
p += tid - ((pair_tid*pair_length + (pair_length / 2))*blocksize);
a_next[p] = r;
}
}
void sort_int(int *a,int length) {;
int *a_dev;
int *a_map;
cudaMalloc((void**)&a_dev, getblock(length) * blocksize * sizeof(int));
cudaHostGetDevicePointer((void **)&a_map, (void *)a, 0);
sort_int_shared<<<getblock(length),blocksize>>>(a_map, a_dev, length);
cudaMemcpy(a, a_dev, length * sizeof(int), cudaMemcpyDeviceToHost);
int *a_next;
cudaMalloc((void**)&a_next, getblock(length) * blocksize * sizeof(int));
int times = getlg2(getblock(length));
for (int i = 0; i < times; i++) {
short_int_global <<<getblock(length), blocksize >>>(a_dev, a_next, i);
//cudaMemcpy(a, a_next, length * sizeof(int), cudaMemcpyDeviceToHost);
//check(a, length);
if (i == (times - 1)) {
cudaMemcpy(a, a_next, length * sizeof(int), cudaMemcpyDeviceToHost);
break;
}
int *c;
c = a_dev;
a_dev = a_next;
a_next = c;
}
cudaFree(a_next);
//cudaFree(a_dev);
//cudaMemcpy(a,a_next,length*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a_next);
//check(a, length);
}
int getlg2(int length) {
int a = 0, b = 0;
for (int i = 0; i < 32; i++) {
if (((length >>i )&1) == 1) {
//printf("i = %d\n", i);
a = i;
b++;
}
}
if (b > 1)a++;
return a;
}
int* genarray(int length) {
int *a;
cudaHostAlloc((void**)&a, length*sizeof(int), cudaHostAllocMapped);
for (int i = 0; i < length; i++) {
a[i] = length - i;
};
return a;
}
int main() {
int length = 134217728;
int *a = genarray(length);
/*for (int i = 0; i < length; i++) {
printf("a[%d] = %d\n", i, a[i]);
}*/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// record start event on the default stream
cudaEventRecord(start);
sort_int(a,length);
cudaEventRecord(stop);
// wait until the stop event completes
cudaEventSynchronize(stop);
// calculate the elapsed time between two events
float time;
cudaEventElapsedTime(&time, start, stop);
printf("time = %f\n",time);
getchar();
} |
7,327 | #include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
#include <stdlib.h>
#define BLOCKSIZE 1024
__device__ char dec(char c) {
return (c - 1);
}
__device__ char enc(char c) {
return (c + 1);
}
__global__ void decrypt(char *src, char *target, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
target[id] = dec(src[id]);
else if (id == n)
target[id] = '\0';
}
__global__ void encrypt(char *src, char *target, int n) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
target[id] = enc(src[id]);
else if (id == n)
target[id] = '\0';
}
void init(char *s, int *n) {
strcpy(s, "Hello World!");
*n = strlen(s);
}
int main() {
char *s, *ds;
int n;
s = (char *)malloc(20);
init(s, &n);
dim3 block(BLOCKSIZE, 1, 1);
dim3 grid(ceil((float)n/BLOCKSIZE), 1, 1);
printf("number of blocks = %d\n", ceil((float)n/BLOCKSIZE));
cudaMalloc(&ds, (n + 1)*sizeof(char));
cudaMemcpy(ds, s, (n+1)*sizeof(char), cudaMemcpyHostToDevice);
encrypt<<<grid, block>>>(ds, ds, n);
cudaDeviceSynchronize();
cudaMemcpy(s, ds, (n+1)*sizeof(char), cudaMemcpyDeviceToHost);
puts(s);
decrypt<<<grid, block>>>(ds, ds, n);
cudaDeviceSynchronize();
cudaMemcpy(s, ds, (n+1)*sizeof(char), cudaMemcpyDeviceToHost);
puts(s);
printf("\n");
return 0;
}
|
7,328 | #include "xorshift.hh"
namespace xorshift
{
namespace
{
std::uint64_t s[4];
}
void seed(std::uint64_t x)
{
s[0] = 1000*x;
s[1] = 2000*x;
s[2] = 3000*x;
s[3] = 4000*x;
}
std::uint64_t next_u64()
{
std::uint64_t t = s[0] ^ (s[0] << 11);
s[0] = s[1];
s[1] = s[2];
s[2] = s[3];
s[3] = s[3] ^ (s[3] >> 19) ^ t ^ (t >> 8);
return s[3];
}
float next_f32()
{
float x = next_u64();
float div = 0xFFFFFFFFFFFFFFFF;
return x / div;
}
void fill(float* begin, float* end)
{
while (begin != end)
*begin++ = next_f32();
}
/*
std::uint64_t x = s[0];
std::uint64_t y = s[1];
s[0] = y;
x ^= x << 23; // a
s[1] = x ^ y ^ (x >> 17) ^ (y >> 26); // b, c
return s[1] + y;
*/
}
|
7,329 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifndef N
#define N 1000000 // 1 << 20
#endif
#define ERROR 0.001
#ifndef THREADS
#define THREADS 256
#endif
// The same code from lecture1 :)
double mysecond(){
struct timeval tp;
struct timezone tzp;
int i; i = gettimeofday(&tp,&tzp);
return (
(double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );}
void cpu_vector_add( float * x, float * y, int n, float A) {
for(int i = 0; i < n; i++){
y[i] = A*x[i] + y[i];
}
}
__global__ void gpu_vector_add(float * __restrict__ x, float * __restrict__ y, int n, float A){
// Each thread calculates its own index
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = A*x[i] + y[i];
}
int main(){
float *a, *b, *cudaOut;
double t1, t2, t3, t4, t5, t6;
printf("Computing SAXPY with %d elements\n", N);
// Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
cudaOut = (float*)malloc(sizeof(float)*N);
// Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
cudaOut[i] = 2.0f;
}
// Allocate GPU memory
float *aCuda, *bCuda;
cudaMalloc((void **)&aCuda, sizeof(float)*N);
cudaMalloc((void **)&bCuda, sizeof(float)*N);
t1 = mysecond();
// Copy data to GPU
cudaMemcpy(aCuda, a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(bCuda, b, sizeof(float)*N, cudaMemcpyHostToDevice);
// Operating
// Have a fixed number of threads
int threads = THREADS;
// Always an extra group in case of non multiple of threads N
int groups = N/threads + 1;
t3 = mysecond();
gpu_vector_add<<<groups, threads>>>(aCuda, bCuda, N, 1.0);
t4 = mysecond();
// Coying back
cudaMemcpy(cudaOut, bCuda, sizeof(float)*N, cudaMemcpyDeviceToHost);
t2 = mysecond();
printf("Computing SAXPY on the GPU in %fs (taking into account memcpy), %fs (operational)... Done!\n", (t2 - t1), (t4 -t3));
// Freeing cuda resources
cudaFree(aCuda);
cudaFree(bCuda);
t5 = mysecond();
cpu_vector_add(a, b, N, 1.0);
t6 = mysecond();
printf("Computing SAXPY on the CPU in %fs… Done!\n", (t6 - t5));
printf("Times: %f,%f,%f\n", (t2 -t1), (t4 - t3), (t6 - t5));
for(int i = 0; i < N; i++){
if(abs(b[i] - cudaOut[i]) > ERROR)
{
printf("Comparing the output for each implementation, it is incorrect at index %d, %f != %f\n",i,b[i], cudaOut[i]);
exit(1);
}
}
}
|
7,330 | #include "includes.h"
__global__ void repeat0(float* in, float* out, int outStride0, int outStride1, int outScalarCount) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < outScalarCount; tid += stride) {
int linearIndex = tid;
int outIndex0 = linearIndex / outStride0;
linearIndex = linearIndex - outIndex0 * outStride0;
int outIndex1 = linearIndex / outStride1;
int outIndex2 = linearIndex - outIndex1 * outStride1;
int inIndex = outIndex2 + (outIndex0 + outIndex1) * outStride1;
out[tid] = in[inIndex];
}
} |
7,331 | __global__
void updateVelocities(const float * d_levelset,
const float * d_velIn_x,
const float * d_velIn_y,
float * d_velOut_x,
float * d_velOut_y,
const float * d_pressure)
{
}
void updateVelocities(dim3 blocks,
dim3 threads,
const float * d_levelset,
const float * d_velIn_x,
const float * d_velIn_y,
float * d_velOut_x,
float * d_velOut_y,
const float * d_pressure)
{
updateVelocities<<<blocks,threads>>>(d_levelset,
d_velIn_x,
d_velIn_y,
d_velOut_x,
d_velOut_y,
d_pressure);
} |
7,332 | /*
* Copyright (c) 2018 Preferred Networks, Inc. All rights reserved.
*/
#include <cuda_fp16.h>
namespace chainer_trt {
namespace plugin {
template <typename T>
__global__ void broadcast_to_kernel(const T* d_src, T* d_dst,
int* d_i_strides, int* d_o_strides,
int in_size, int out_size,
int nb_dims) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < out_size) {
// calc offset relationship between input & output
int in_idx = 0;
int f = idx;
for(int i = 0; i < nb_dims; i++) {
in_idx += (f / d_o_strides[i]) * d_i_strides[i];
f = f % d_o_strides[i];
}
d_dst[blockIdx.y * out_size + idx] =
d_src[blockIdx.y * in_size + in_idx];
}
}
template <typename T>
void apply_broadcast_to(const T* d_src, T* d_dst, int* d_i_strides,
int* d_o_strides, int in_size, int out_size,
int nb_dims, int batch_size, cudaStream_t stream) {
const int thread_size = 1024;
const int block_size = (int)std::ceil(1.0 * out_size / thread_size);
dim3 grid(block_size, batch_size);
broadcast_to_kernel<<<grid, thread_size, 0, stream>>>(
d_src, d_dst, d_i_strides, d_o_strides, in_size, out_size, nb_dims);
}
template void apply_broadcast_to(const float*, float*, int*, int*, int, int,
int, int, cudaStream_t);
template void apply_broadcast_to(const __half*, __half*, int*, int*, int,
int, int, int, cudaStream_t);
}
}
|
7,333 | #include <stdio.h>
#include <curand_kernel.h>
__host__ __device__ unsigned int XY_TO_INT(int x, int y) {//r represent the number of 10 degree, x,y - 11 bits, max = 2047, r - max = 36, 6 bits
return (((y) << 11) | (x));
}
__host__ __device__ int INT_TO_X(unsigned int v) {
return (v)&((1 << 11) - 1);
}
__host__ __device__ int INT_TO_Y(unsigned int v) {
return (v >> 11)&((1 << 11) - 1);
}
__device__ int get_max(int x,int y){
if(x>y)
return x;
return y;
}
__device__ int get_min(int x,int y){
if(x<y)
return x;
return y;
}
__device__ float compute_distance(float *a,
float *aa,
float *b,
float *bb,
int rows,
int cols,
int channels,
int patch_size,
int ax,
int ay,
int bx,
int by){
int num_points = 0;
float pixel_sum1 = 0;
float pixel_sum2 = 0;
float temp_distance = 0;
int curr_pix_ax = 0;
int curr_pix_ay = 0;
int curr_pix_bx = 0;
int curr_pix_by = 0;
for(int y = -patch_size/2 ; y <= patch_size/2 ; y++ ){
for(int x = -patch_size/2 ; x <= patch_size/2 ; x++){
curr_pix_ax = ax + x;
curr_pix_ay = ay + y;
curr_pix_bx = bx + x;
curr_pix_by = by + y;
if ( curr_pix_ax >= 0 && curr_pix_ax < cols && curr_pix_ay >= 0 && curr_pix_ay < rows
&&
curr_pix_bx >= 0 && curr_pix_bx < cols && curr_pix_by >= 0 && curr_pix_by < rows ){
for(int ch = 0 ; ch < channels ; ch++){
temp_distance = a[channels*(curr_pix_ay*cols + curr_pix_ax ) +ch]
- b[channels*(curr_pix_by*cols + curr_pix_bx ) +ch] ;
pixel_sum1 += temp_distance * temp_distance;
temp_distance = aa[channels*(curr_pix_ay*cols + curr_pix_ax ) +ch]
- bb[channels*(curr_pix_by*cols + curr_pix_bx ) +ch] ;
pixel_sum2 += temp_distance * temp_distance;
}
num_points ++;
}
}
}
if(num_points ==0){
printf("HMM");
}
return (pixel_sum1+pixel_sum2) /(float) num_points;
}
__device__ void compare_and_update(float *a,
float *aa,
float *b,
float *bb,
int rows,
int cols ,
int channels,
int patch_size,
int *nnf,
float *nnd,
int x,
int y,
int bx_new,
int by_new,
int *best_x,
int *best_y,
float *best_d)
{
float dist_new = compute_distance(a,aa,b,bb,rows,cols,channels,patch_size,x,y,bx_new,by_new);
if(dist_new < *best_d){
*best_d = dist_new;
*best_y = by_new;
*best_x = bx_new;
}
}
__device__ float get_rand(curandState *state){
return curand_uniform(state);
}
__device__ void InitcuRand(curandState *state) {//random number in cuda, between 0 and 1
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
curand_init(i, j, 0, state);
}
extern "C"
__global__ void patch_match(float *a,
float *aa,
float *b,
float *bb,
int *nnf,
unsigned int *nnf_t,
float *nnd,
int rows,
int cols ,
int channels,
int patch_size,
int iters,
int jump_size,
int search_radius)
{
int xmin, xmax, ymin, ymax;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if(row >= rows || col >= cols)
return;
curandState state;
InitcuRand(&state);
int init_x = nnf[2*(row*cols + col) + 0 ];
int init_y = nnf[2*(row*cols + col) + 1 ];
nnd[row*cols + col] = compute_distance(a,aa,b,bb,rows,cols,channels,patch_size,col,row,init_x,init_y) ;
for(int i = 0 ; i < iters; i++){
int best_x = nnf[2*(row*cols + col) + 0 ];
int best_y = nnf[2*(row*cols + col) + 1 ];
float best_d = nnd[row*cols + col];
for(int jump = jump_size ; jump >0 ; jump /=2){
//test up
if (row - jump >=0){
int test_x = nnf[2*((row-jump)*cols + col) + 0 ];
int test_y = nnf[2*((row-jump)*cols + col) + 1 ] + jump;
if(test_y < rows)
{
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
nnf[2*(row*cols + col) + 0] = best_x;
nnf[2*(row*cols + col) + 1] = best_y;
nnd[1*(row*cols + col) ] = best_d;
}
}
if (row + jump < rows){
int test_x = nnf[2*((row+jump)*cols + col) + 0 ];
int test_y = nnf[2*((row+jump)*cols + col) + 1 ] - jump;
if(test_y >=0)
{
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
nnf[2*(row*cols + col) + 0] = best_x;
nnf[2*(row*cols + col) + 1] = best_y;
nnd[1*(row*cols + col) ] = best_d;
}
}
/*//test left*/
if (col - jump >=0){
int test_x = nnf[2*(row*cols + col-jump) + 0 ] +jump;
int test_y = nnf[2*(row*cols + col-jump) + 1 ];
if(test_x < cols)
{
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
nnf[2*(row*cols + col) + 0] = best_x;
nnf[2*(row*cols + col) + 1] = best_y;
nnd[1*(row*cols + col) ] = best_d;
}
}
//test right
if (col + jump < cols){
int test_x = nnf[2*(row*cols + col +jump) + 0 ] -jump;
int test_y = nnf[2*(row*cols + col +jump) + 1 ];
if(test_x >=0)
{
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
nnf[2*(row*cols + col) + 0] = best_x;
nnf[2*(row*cols + col) + 1] = best_y;
nnd[1*(row*cols + col) ] = best_d;
}
}
int rs_start = search_radius;
if (rs_start > get_max(cols, rows)) {
rs_start = get_max(cols, rows);
}
for (int mag = rs_start; mag >= 1; mag /= 2) {
xmin = get_max(best_x - mag, 0), xmax = get_min(best_x + mag + 1, cols);
ymin = get_max(best_y - mag, 0), ymax = get_min(best_y + mag + 1, rows);
int test_x = xmin + (int)(get_rand(&state)*(xmax - xmin)) % (xmax - xmin);
int test_y = ymin + (int)(get_rand(&state)*(ymax - ymin)) % (ymax - ymin);
compare_and_update(a,
aa,
b,
bb,
rows,
cols ,
channels,
patch_size,
nnf,
nnd,
col,
row,
test_x,
test_y,
&best_x,
&best_y,
&best_d);
}
nnf[2*(row*cols + col) + 0] = best_x;
nnf[2*(row*cols + col) + 1] = best_y;
nnd[1*(row*cols + col) ] = best_d;
__syncthreads();
}
}
}
|
7,334 | // includes, system
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
__device__ void wasteTime(int n)
//This function wastes time proportional to n
{
float temp;
int i;
for( i = 0; i < n; i++ )
{
temp = sin((float)i * 3.14f);
}
}
__global__ void notDivergent(int n)
//This kernel should perform the same work as
//divergent(), but the threads within a warp
//should not diverge
{
// int tid = blockIdx.x * blockDim.x + threadIdx.x;
float temp;
//waste some time
wasteTime(n);
}
__global__ void divergent(int n)
//This kernel should perform the same work as
//notDivergent(), but the threads within
//a warp should be forced to diverge
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
float temp;
if ( tid % 2 == 0 )
wasteTime(n);
else
wasteTime(n);
}
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
const int N = 10000, threads = 10000;
cudaEvent_t start, stop;
float time;
int nBlocks, nThreads;
nThreads = 512;
nBlocks = (threads + nThreads - 1)/nThreads;
//Set up the timing variables and begin timing
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//The Divergent Kernal
divergent<<<nBlocks, nThreads>>>(N);
//Stop timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Compute the Elapsed Time
cudaEventElapsedTime(&time, start, stop);
printf("divergent kernel: %f milliseconds\n", time);
//begin new timing
cudaEventRecord(start, 0);
//The non-Divergent Kernel
notDivergent<<<nBlocks, nThreads>>>(N);
//Stop timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Compute the Kernel Time
cudaEventElapsedTime(&time, start, stop);
printf("non-divergent kernel: %f milliseconds\n", time);
return 0;
}
|
7,335 | #include "includes.h"
__global__ void kernel_dot_product(const double * vec1, const double * vec2, int numElements, double * answer)
{
extern __shared__ double products[]; // one element per thread
int i = threadIdx.x; // numElements assumed to fit into one block
products[i] = vec1[i] * vec2[i];
__syncthreads();
if (i == 0) {
double sum = 0;
for (int j = 0; j < numElements; ++j) {
sum += products[j];
}
*answer = sum;
}
} |
7,336 | #include "includes.h"
__global__ void reduceSmemUnroll(int *g_idata, int *g_odata, unsigned int n)
{
// static shared memory
__shared__ int smem[DIM];
// set thread ID
unsigned int tid = threadIdx.x;
// global index
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// unrolling 4 blocks
int localSum = 0;
if (idx + 3 * blockDim.x < n)
{
float a1 = g_idata[idx];
float a2 = g_idata[idx + blockDim.x];
float a3 = g_idata[idx + 2 * blockDim.x];
float a4 = g_idata[idx + 3 * blockDim.x];
localSum = a1 + a2 + a3 + a4;
}
smem[tid] = localSum;
__syncthreads();
// in-place reduction in shared memory
if (blockDim.x >= 1024 && tid < 512) smem[tid] += smem[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) smem[tid] += smem[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) smem[tid] += smem[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) smem[tid] += smem[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = smem[0];
} |
7,337 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/copy.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
#include <time.h>
#include <random>
#define RUNS 20
using namespace std;
// Multiplier functor
struct multiplier{
__host__ __device__
double operator()(double x) {
return (x * x);
}
};
int main (int argc, char *argv[]) {
// Basic error check
if (argc != 2) {
printf("Usage: %s N", argv[0]);
exit(1);
}
// Select device
cudaSetDevice(1);
// Get size
int size = 1024 * 1024 * (10 + atoi(argv[1]));
// Initialize vectors
thrust::host_vector<double> h_data(size);
thrust::host_vector<int> h_scan(size);
thrust::host_vector<int> h_sort(size);
//double h_sum, d_sum;
// Timing varibles
//float time_reduce;
//cudaEvent_t start_reduce, end_reduce;
float time_sort, time_scan;
cudaEvent_t start_scan, end_scan;
cudaEvent_t start_sort, end_sort;
//cudaEvent_t start_scan, end_scan;
//cudaEventCreate(&start_reduce);
//cudaEventCreate(&end_reduce);
cudaEventCreate(&start_scan);
cudaEventCreate(&end_scan);
cudaEventCreate(&start_sort);
cudaEventCreate(&end_sort);
// Ready host and device data
srand(time(NULL));
thrust::generate(h_data.begin(), h_data.end(), rand);
//thrust::fill(h_data.begin(), h_data.end(), 1);
// Reduction
/*
thrust::device_vector<double> d_data = h_data;
d_sum = thrust::transform_reduce(d_data.begin(), d_data.end(), multiplier(), (double)0, thrust::plus<double>());
cudaEventRecord(start_reduce, NULL);
for (int i=0; i<RUNS; i++) {
d_sum = thrust::transform_reduce(d_data.begin(), d_data.end(), multiplier(), (double)0, thrust::plus<double>());
}
cudaEventRecord(end_reduce, NULL);
cudaEventSynchronize(end_reduce);
cudaEventElapsedTime(&time_reduce, start_reduce, end_reduce);
h_sum = thrust::transform_reduce(h_data.begin(), h_data.end(), multiplier(), (double)0, thrust::plus<double>());
cout << "Reduction time: " << time_reduce/RUNS << " ms"<< endl;
double time_sec = time_reduce / RUNS / 1e3;
double gflops = 2 * size / time_sec / 1e9;
cout << "N:" << size << "\tGFLOPS: " << gflops << endl;
//cout << "\tHost result: " << h_sum << endl;
//cout << "\tDevice result: " << d_sum << endl;
double residue = (d_sum - h_sum) / h_sum;
cout << "Residue: " << residue << endl;
*/
// Exclusive scan
std::default_random_engine rng( std::rand() );
std::uniform_int_distribution<int> rnd_int;
thrust::generate(h_scan.begin(), h_scan.end(), [&]() { return rnd_int(rng); });
thrust::device_vector<int> d_scan = h_scan;
thrust::exclusive_scan(d_scan.begin(), d_scan.end(), d_scan.begin());
cudaEventRecord(start_scan, NULL);
for (int i=0; i<RUNS; i++)
thrust::exclusive_scan(d_scan.begin(), d_scan.end(), d_scan.begin());
cudaEventRecord(end_scan, NULL);
cudaEventSynchronize(end_scan);
cudaEventElapsedTime(&time_scan, start_scan, end_scan);
//thrust::copy(d_scan.begin(), d_scan.end(), h_scan.begin());
//thrust::exclusive_scan(h_data.begin(), h_data.end(), h_data.begin());
cout << "Scan time: " << time_scan/RUNS << " ms"<< endl;
double time_sec = time_scan / RUNS / 1e3;
cout << "Scan N:" << size << "\tkeys/sec: " << size / time_sec << endl;
/*
if (thrust::equal(h_data.begin(), h_data.end(), h_scan.begin())) {
cout << "Prefix scan time: " << time_scan << endl;
cout << "\tHost result: " << h_data[size-1] << endl;
cout << "\tDevice result: " << h_result[size-1] << endl;
}
else {
printf("Mismatch in scan results\n");
// Only for debugging
for(std::vector<double>::size_type i = 0; i != h_data.size(); i++)
{
cout << h_data[i] << endl;
cout << h_result[i] << endl;
}
}
*/
// Sort
thrust::generate(h_sort.begin(), h_sort.end(), [&]() { return rnd_int(rng); });
thrust::device_vector<int> d_sort = h_sort;
thrust::sort(d_sort.begin(), d_sort.end());
cudaEventRecord(start_sort, NULL);
for (int i=0; i<RUNS; i++)
thrust::sort(d_sort.begin(), d_sort.end());
cudaEventRecord(end_sort, NULL);
cudaEventSynchronize(end_sort);
cudaEventElapsedTime(&time_sort, start_sort, end_sort);
cout << "Sort time: " << time_sort/RUNS << " ms"<< endl;
time_sec = time_sort / RUNS / 1e3;
cout << "Sort N:" << size << "\tkeys/sec: " << size / time_sec << endl;
return 0;
}
|
7,338 | #include <stdio.h>
__device__ char x = 0;
__global__ void racey_kernel() {
#ifdef WW
x = threadIdx.x;
#elif RW
volatile char c = x;
x = c + 1;
#endif
}
int main() {
// sanity check test, would have been too messy to shoehorn into two_streams.cu
racey_kernel<<<1,1>>>();
racey_kernel<<<1,1>>>();
cudaDeviceReset();
return 0;
}
|
7,339 | #include <stdio.h>
#include <cuda_runtime.h>
int log2(int i)
{
int r = 0;
while (i >>= 1) r++;
return r;
}
int bit_reverse(int w, int bits)
{
int r = 0;
for (int i = 0; i < bits; i++)
{
int bit = (w & (1 << i)) >> i;
r |= bit << (bits - i - 1);
}
return r;
}
/*
Using device 0:
NVIDIA Tegra X1; global mem: 2076037120B; compute v5.3; clock: 921600 kHz
Running naive histo
bin 0: count 7
bin 1: count 7
bin 2: count 6
bin 3: count 6
bin 4: count 7
bin 5: count 6
bin 6: count 7
bin 7: count 6
bin 8: count 7
bin 9: count 7
bin 10: count 7
bin 11: count 7
bin 12: count 7
bin 13: count 6
bin 14: count 6
bin 15: count 8
incorrect due to race condition in d_bins[myBin]++
this does not happen in serial code as each thread runs separately
e.g. BIN with value 5, and thread 1 and 2 wants to increase it
- thread 1 reads 5, increase to 6, write 6 back to bin
- thread 2 reads 5, increase to 6, write 6 back to bin
- but actual answer is 7
*/
__global__ void naive_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
d_bins[myBin]++;
}
/*
Using device 0:
NVIDIA Tegra X1; global mem: 2076037120B; compute v5.3; clock: 921600 kHz
Running simple histo
bin 0: count 4096
bin 1: count 4096
bin 2: count 4096
bin 3: count 4096
bin 4: count 4096
bin 5: count 4096
bin 6: count 4096
bin 7: count 4096
bin 8: count 4096
bin 9: count 4096
bin 10: count 4096
bin 11: count 4096
bin 12: count 4096
bin 13: count 4096
bin 14: count 4096
bin 15: count 4096
*/
__global__ void simple_histo(int *d_bins, const int *d_in, const int BIN_COUNT)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = myItem % BIN_COUNT;
atomicAdd(&(d_bins[myBin]), 1);
}
int main(int argc, char **argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
const int ARRAY_SIZE = 65536;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
const int BIN_COUNT = 16;
const int BIN_BYTES = BIN_COUNT * sizeof(int);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for(int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = bit_reverse(i, log2(ARRAY_SIZE));
}
int h_bins[BIN_COUNT];
for(int i = 0; i < BIN_COUNT; i++) {
h_bins[i] = 0;
}
// declare GPU memory pointers
int * d_in;
int * d_bins;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_bins, BIN_BYTES);
// transfer the arrays to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_bins, h_bins, BIN_BYTES, cudaMemcpyHostToDevice);
int whichKernel = 0;
if (argc == 2) {
whichKernel = atoi(argv[1]);
}
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running naive histo\n");
naive_histo<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT);
break;
case 1:
printf("Running simple histo\n");
simple_histo<<<ARRAY_SIZE / 64, 64>>>(d_bins, d_in, BIN_COUNT);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
// copy back the sum from GPU
cudaMemcpy(h_bins, d_bins, BIN_BYTES, cudaMemcpyDeviceToHost);
for(int i = 0; i < BIN_COUNT; i++) {
printf("bin %d: count %d\n", i, h_bins[i]);
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_bins);
return 0;
} |
7,340 | //optimization homework #4 cs 677 Theodore Jagodits
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include "string.h"
#include <iostream>
#define DEFAULT_ROW 128
#define DEFAULT_COL 128
#define TILE_SIZE 16
#define MAX_CONST 16000
//add constant memory
__constant__ float c_inp[MAX_CONST];
//tiling complete
__global__ void unknown_algo_inp2(float *inp2, float *result, int row, int col, int num_tiles){
//add shared memory
__shared__ float temp_shared_2[TILE_SIZE * TILE_SIZE];
//get row col idx
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
float temp = 0.0f;
unsigned int curr = 0, pos_x;
for(unsigned int count = 0; count < num_tiles; count++){
//find position
pos_x = count * TILE_SIZE + threadIdx.x;
//check bounds and load tile
if(pos_x < col && tx < row){
temp_shared_2[threadIdx.y * TILE_SIZE + threadIdx.x] = c_inp[ty*row + pos_x];
}
__syncthreads();
for(unsigned int tile = 0; tile < TILE_SIZE; tile++){
if(curr <= tx){
temp += temp_shared_2[threadIdx.x * TILE_SIZE + tile];
}
curr ++;
}
}
if(ty < row && tx < col){
result[ty*row + tx] = 30;
}
}
__global__ void unknown_algo_inp1(float *inp1, float *result, int row, int col, int num_tiles){
//add shared memory
__shared__ float temp_shared_1[TILE_SIZE][TILE_SIZE];
//get row col idx
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
float temp, local;
unsigned int curr = 0, pos_y;
//preload local
if(tx < row && ty < row){
temp = 0.0f;
local = inp1[row*tx + ty];
}
for(unsigned int count = 0; count < num_tiles; count++){
pos_y = count * TILE_SIZE + threadIdx.y;
//decrease load by letting one thread do it
if(pos_y < col && tx < row){
temp_shared_1[threadIdx.x][threadIdx.y] = inp1[pos_y];
}
__syncthreads();
for(unsigned int tile = 0; tile < TILE_SIZE; tile++){
if(curr <= ty){
temp += temp_shared_1[threadIdx.x][tile] * local;
}
curr++;
}
}
if(tx < row && ty < col){
result[tx*row + ty] = temp;
}
}
void cpu_v(float *inp1, float *inp2, float *result, int row, int col, float *temp){
for(int i = 0 ; i < row; i++){
temp[i] = 0.0f;
for(int j = 0; j < col; j++){
temp[i] += inp2[i*row + j];
result[i*row + j] = temp[i];
for(int k = 0; k < col; k++){
result[i*row + j] += inp1[j] * inp1[k];
}
}
}
}
int compare_res(float *d_res, float *h_res, int row, int col){
int check = 0;
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
if(d_res[i*row + j] != h_res[i*row +j]){
check += 1;
}
}
}
return check;
}
int main( int argc, char **argv ){
int row = DEFAULT_ROW;
int col = DEFAULT_COL;
if(argc == 3){
row = atoi(argv[1]);
col = atoi(argv[2]);
}
//create vars
int input1_bytes = col * sizeof(float);
int num_bytes = row * col * sizeof(float);
//event timers
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
clock_t start_cpu, end_cpu;
//malloc device
float *d_input1 = (float *) malloc(input1_bytes);
float *d_input2 = (float *) malloc(num_bytes);
float *d_result = (float *) malloc(num_bytes);
//malloc host
float *h_input1 = (float *) malloc(input1_bytes);
float *h_input2 = (float *) malloc(num_bytes);
float *h_result = (float *) malloc(num_bytes);
//malloc test
float *temp = (float *) malloc(row* sizeof(float));
float *cpu_res = (float *) malloc(num_bytes);
//cuda malloc
cudaMalloc(&d_input1, input1_bytes);
cudaMalloc(&d_input2, num_bytes);
cudaMalloc(&d_result, num_bytes);
//put in data
for(int o = 0; o < row; o++){
for(int p = 0; p < col; p++){
h_input2[row * o + p] = 1.0f;
}
}
for(int i = 0; i < col; i++){
h_input1[i] = 1.0f;
}
//copy over memory
cudaMemcpy(d_input1, h_input1, input1_bytes, cudaMemcpyHostToDevice);
//cudaMemcpy(d_input2, h_input2, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(c_inp, h_input2, num_bytes);
//declare block and grid size for kernel
int block_size = TILE_SIZE;
//make grids x y
int grid_x = (int)ceil((float)row/block_size);
int grid_y = (int)ceil((float)col/block_size);
dim3 dim_grid (grid_x, grid_y);
dim3 dim_block (block_size, block_size);
//start timer
cudaEventRecord(start);
//run kernel on inp2
unknown_algo_inp2<<< dim_grid, dim_block >>> (d_input2, d_result, row, col, grid_x);
//inp1
//unknown_algo_inp1<<< dim_grid, dim_block >>> (d_input1, d_result, row, col, grid_x);
//end timer
cudaEventRecord(stop);
// Copy result back to host
cudaMemcpy(h_result, d_result, num_bytes, cudaMemcpyDeviceToHost);
//synchronize https://devblogs.nvidia.com/how-implement-performance-metrics-cuda-cc/
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
//cpu version
start_cpu = clock();
cpu_v(h_input1, h_input2, cpu_res, row, col, temp);
end_cpu = clock();
int error_count = compare_res(h_result, cpu_res, row, col);
printf("error count: %d\nGPU time for execution: %lf ms\nCPU time for execution: %lf ms\n", error_count, milliseconds, ((float)((end_cpu-start_cpu)*1000))/CLOCKS_PER_SEC);
printf("gridx: %d gridy: %d\n", grid_x, grid_y);
//print error check
for(int i = 0; i < row; i++){
printf("%d. ", i);
for(int j = 0; j < col; j++){
printf("%d ", (int)h_result[i*row + j]);
}
printf("\n");
}
for(int i = 0; i < row; i++){
printf("%d. ", i);
for(int j = 0; j < col; j++){
printf("%d ", (int)cpu_res[i*row + j]);
}
printf("\n");
}
//free all vars
free(h_input1);
free(h_input2);
free(h_result);
free(temp);
cudaFree(d_input1);
cudaFree(d_input2);
cudaFree(d_result);
return 0;
} |
7,341 | #include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
#define gray_t double
#define PI 3.1415926535897
#define mymax(x, y) ((x) < (y) ? (y) : (x))
#define mymin(x, y) ((x) > (y) ? (y) : (x))
#define myabs(x) ((x) < 0? (-(x)) : (x))
// 检查以cuda开头的api调用是否出错或已经出错
#define checkCudaErrors(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code:%d, reason: %s \n", error, cudaGetErrorString(error)); \
exit(1); \
} \
}
// from CUDA7.pdf
// 计算 每个block内部的前缀和 并记录block sum(block内前缀和数组的最后一个值)的前缀和
__global__ void scan(gray_t* out, gray_t* block_sums, gray_t* data) {
extern __shared__ gray_t s_data[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
s_data[threadIdx.x] = data[tid];
for (int stride = 1; stride < blockDim.x; stride <<= 1) {
__syncthreads();
gray_t val = (threadIdx.x >= stride) ? s_data[threadIdx.x - stride] : 0;
__syncthreads();
s_data[threadIdx.x] += val;
}
out[tid] = s_data[threadIdx.x];
if (threadIdx.x == 0) {
for (int i = blockIdx.x + 1; i < gridDim.x; ++i) {
atomicAdd(&block_sums[i], s_data[blockDim.x - 1]);
}
}
}
// 将blocksum前缀和添加到block内部前缀和的每一项
__global__ void scan_update(gray_t* out, gray_t* block_sums) {
__shared__ gray_t block_sum;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (threadIdx.x == 0) {
block_sum = block_sums[blockIdx.x];
}
__syncthreads();
out[idx] += block_sum;
}
int diveup(int total, int threadnum){
return (total % threadnum ? (total / threadnum + 1) : (total / threadnum));
}
void test_scan() {
double sigma = 150;
int filter_size = int(sigma * 3 * 2 + 1) | 1;
int mid = filter_size >> 1;
gray_t* filter = new gray_t[mid + 1];
gray_t* pre_filter = new gray_t[mid + 1]; // pre_filter[i]表示sum(filter[0], ..., filter[i])
gray_t* cpu_pre_filter = new gray_t[mid + 1];
double total = 0;
for (int i = 0; i < mid + 1; ++i) {
filter[i] = 1 / (sqrt(2 * PI) * sigma) * exp((- (i - mid) * (i - mid)) / (2 * sigma * sigma));
total += 2 * filter[i];
}
total -= filter[mid];
for (int i = 0; i < mid + 1; ++i) {
filter[i] /= total;
}
gray_t* device_filter;
checkCudaErrors(cudaMalloc((void **) &device_filter, sizeof(gray_t) * (mid + 1)));
checkCudaErrors(cudaMemcpy(device_filter, filter, sizeof(gray_t) * (mid + 1), cudaMemcpyHostToDevice));
// copy filter to pre_filter
int block_num = diveup(mid + 1, 32);
gray_t* device_pre_filter;
gray_t* block_sums;
checkCudaErrors(cudaMalloc((void **) &block_sums, sizeof(gray_t) * (block_num)));
checkCudaErrors(cudaMalloc((void **) &device_pre_filter, sizeof(gray_t) * (mid + 1)));
scan<<<block_num, 32, sizeof(gray_t)*32>>>(device_pre_filter, block_sums, device_filter);
checkCudaErrors(cudaDeviceSynchronize());
scan_update<<<block_num, 32>>>(device_pre_filter, block_sums);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(pre_filter, device_pre_filter, sizeof(gray_t) * (mid + 1), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(device_filter));
checkCudaErrors(cudaFree(block_sums));
checkCudaErrors(cudaFree(device_pre_filter));
cpu_pre_filter[0] = filter[0];
for (int i = 1; i < mid + 1; ++i) {
cpu_pre_filter[i] = filter[i] + cpu_pre_filter[i - 1];
}
for (int i = 0; i < mid + 1; ++i) {
printf("%lf ", cpu_pre_filter[i] - pre_filter[i]);
}
}
__global__ void cal_filter(gray_t* filter, gray_t* total, int mid, double sigma) {
extern __shared__ gray_t sdata[];
// 计算filter
int fi = blockIdx.x * blockDim.x + threadIdx.x; // not unsigned int
int tidx = threadIdx.x;
if (fi <= mid) {
filter[fi] = 1 / (sqrt(2 * PI) * sigma) * exp((- (fi - mid) * (fi - mid)) / (2 * sigma * sigma));
}
if (tidx == 0) {
total[0] = 0.0;
}
__syncthreads(); // 确保当前线程块所需的filter计算完毕
// 将filter加载到共享内存
sdata[tidx] = fi <= mid ? filter[fi] : 0;
__syncthreads();
// 规约:把当前线程块的filter之和存放到共享内存sdata[0]
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tidx < s) {
sdata[tidx] += sdata[tidx + s];
}
__syncthreads();
}
// 把所有线程块结果用原子加法累加到全局遍历total[0]上
if (tidx == 0) {
atomicAdd(total, 2.0 * sdata[0]);
}
if (fi == mid) {
atomicAdd(total, - filter[mid]);
}
}
__global__ void div_filter(gray_t* filter, gray_t* total, int mid, int loop) {
// 计算filter
int fi = blockIdx.x * blockDim.x + threadIdx.x;
int sta = fi * loop;
int end = mymin(sta + loop, mid + 1);
for (int i = sta; i < end; ++i) {
filter[i] /= total[0];
}
}
void test_reduce() {
double sigma = 150;
int filter_size = int(sigma * 3 * 2 + 1) | 1;
int mid = filter_size >> 1;
gray_t* filter = new gray_t[mid + 1];
gray_t* cpu_filter = new gray_t[mid + 1];
double total = 0;
for (int i = 0; i < mid + 1; ++i) {
cpu_filter[i] = 1 / (sqrt(2 * PI) * sigma) * exp((- (i - mid) * (i - mid)) / (2 * sigma * sigma));
total += cpu_filter[i];
}
total = 2 * total - cpu_filter[mid];
for (int i = 0; i < mid + 1; ++i) {
cpu_filter[i] /= total;
}
printf("[%lf]", total);
gray_t* device_total;
gray_t* device_filter;
checkCudaErrors(cudaMalloc((void **) &device_total, sizeof(gray_t)));
checkCudaErrors(cudaMalloc((void **) &device_filter, sizeof(gray_t) * (mid + 1)));
cal_filter<<<diveup((mid + 1), 32), 32, sizeof(gray_t)*32>>>(device_filter, device_total, mid, sigma);
checkCudaErrors(cudaDeviceSynchronize());
div_filter<<<diveup((mid + 1), 32 * 8), 32>>>(device_filter, device_total, mid, 8);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(filter, device_filter, sizeof(gray_t) * (mid + 1), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&total, device_total, sizeof(gray_t), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(device_filter));
checkCudaErrors(cudaFree(device_total));
for (int i = 0; i < mid + 1; ++i) {
printf("%lf ", filter[i] - cpu_filter[i]);
}
}
__global__ void conv1(gray_t* img_src, gray_t* pre_filter, gray_t* filter, gray_t* temp_res, int n, int m, int mid){
int i = threadIdx.x+blockDim.x*blockIdx.x;
int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i < n && j < m) {
int pos = j + i * m;
gray_t temp = 0;
int i_sta = i - mid;
int i_end = i + mid;
// 当前行的卷积范围:[i-mid, i+mid]
if (i - mid < 0) {
// 合并计算[i-mid, 0)部分,即原filter的前mid-i个参数与mid-i个填充值(列首元素)的点乘
temp += pre_filter[mid - i - 1] * img_src[j];
i_sta = 0;
}
if (i + mid >= n) {
// 合并计算(n-1, i+mid] 部分,即原filter的后xx=i+mid+1-n个参数(由于对称性 等价于前xx个)与xx个填充值(行尾元素)的点乘
temp += pre_filter[i + mid - n] * img_src[(n - 1) * m + j];
i_end = n - 1;
}
for (int xi = i_sta; xi <= i_end; ++xi) {
// 第xi个元素离卷积中心i的距离为xi-i 使用的是距离卷积核中心mid距离为xi-i的卷积参数
temp += filter[mid - myabs(i - xi)] * img_src[xi * m + j];
}
temp_res[pos] = temp;
}
}
__global__ void conv2(gray_t* temp_res, gray_t* pre_filter, gray_t* filter, gray_t* img_dst, int n, int m, int mid){
int i = threadIdx.x+blockDim.x*blockIdx.x;
int j = threadIdx.y+blockDim.y*blockIdx.y;
if (i < n && j < m) {
int pos = i * m + j;
gray_t temp = 0;
int j_sta = j - mid;
int j_end = j + mid;
// gray_t tmp = 0;
// 当前行的卷积范围:[j-mid, j+mid]
if (j - mid < 0) {
// 合并计算[j-mid, 0)部分,即原filter的前mid-j个参数与mid-j个填充值(行首元素)的点乘
temp += pre_filter[mid - j - 1] * temp_res[i * m];
j_sta = 0;
}
if (j + mid >= m) {
// 合并计算(m-1, j+mid] 部分,即原filter的后xx=j+mid+1-m个参数(由于对称性 等价于前xx个)与xx个填充值(行尾元素)的点乘
temp += pre_filter[j + mid - m] * temp_res[i * m];
j_end = m - 1;
}
for (int yj = j_sta; yj <= j_end; ++yj) {
// 第yj个元素离卷积中心j的距离为yj-j 使用的是距离卷积核中心mid距离为yj-j的卷积参数
temp += filter[mid - myabs(j - yj)] * temp_res[i * m + yj];
}
img_dst[pos] = temp;
}
}
void guassian_smooth(const gray_t* img_src, gray_t** img_dst_ptr, int n, int m, double sigma) {
gray_t* img_dst = new gray_t[n * m];
*img_dst_ptr = img_dst;
// 卷积核:用两次一维卷积分离实现二维卷积 复杂度从 O(m*n*filter_size*filter_size) 降为 O(m*n*filter_size)
// 1. 根据sigma确定卷积核大小 原理参考https://www.cnblogs.com/shine-lee/p/9671253.html “|1”是为了取邻近的奇数
int filter_size = int(sigma * 3 * 2 + 1) | 1;
// 2. 根据高斯分布确定卷积核参数
int mid = filter_size >> 1;
gray_t* filter = new gray_t[mid + 1]; // 因为高斯卷积核的对称性 所以只存储前一半加一个参数
gray_t* pre_filter = new gray_t[mid + 1]; // pre_filter[i]表示sum(filter[0], ..., filter[i])
double total = 0;
for (int i = 0; i < mid + 1; ++i) {
filter[i] = 1 / (sqrt(2 * PI) * sigma) * exp((- (i - mid) * (i - mid)) / (2 * sigma * sigma));
total += 2 * filter[i];
}
total -= filter[mid];
for (int i = 0; i < mid + 1; ++i) {
filter[i] /= total;
}
pre_filter[0] = filter[0];
for (int i = 1; i < mid + 1; ++i) {
pre_filter[i] = filter[i] + pre_filter[i - 1];
}
// 卷积(卷积核越界部分使用边界填充,保持图片大小不变)
gray_t* temp_res; // 存储进行第一维卷积后的结果
gray_t* device_img_src;
gray_t* device_pre_filter;
gray_t* device_filter;
gray_t* device_img_dst;
checkCudaErrors(cudaMalloc((void **) &temp_res, sizeof(gray_t) * n * m));
checkCudaErrors(cudaMalloc((void **) &device_img_src, sizeof(gray_t) * n * m));
checkCudaErrors(cudaMalloc((void **) &device_pre_filter, sizeof(gray_t) * (mid + 1)));
checkCudaErrors(cudaMalloc((void **) &device_filter, sizeof(gray_t) * (mid + 1)));
checkCudaErrors(cudaMalloc((void **) &device_img_dst, sizeof(gray_t) * n * m));
checkCudaErrors(cudaMemcpy(device_img_src, img_src, sizeof(gray_t) * n * m, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(device_pre_filter, pre_filter, sizeof(gray_t) * (mid + 1), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(device_filter, filter, sizeof(gray_t) * (mid + 1), cudaMemcpyHostToDevice));
unsigned int grid_rows = diveup(n, 4);
unsigned int grid_cols = diveup(m, 8);
dim3 dimGrid(grid_rows, grid_cols);
dim3 dimBlock(4, 8);
// 1. 进行第一维卷积
conv1<<<dimGrid, dimBlock>>>(device_img_src, device_pre_filter, device_filter, temp_res, n, m, mid);
checkCudaErrors(cudaDeviceSynchronize());
// 2. 进行第二维卷积
conv2<<<dimGrid, dimBlock>>>(temp_res, device_pre_filter, device_filter, device_img_dst, n, m, mid);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(img_dst, device_img_dst, sizeof(gray_t) * n * m, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(temp_res));
checkCudaErrors(cudaFree(device_img_src));
checkCudaErrors(cudaFree(device_pre_filter));
checkCudaErrors(cudaFree(device_filter));
checkCudaErrors(cudaFree(device_img_dst));
delete[] filter;
delete[] pre_filter;
}
void cpu_guassian_smooth(const gray_t* img_src, gray_t** img_dst_ptr, int n, int m, double sigma) {
gray_t* img_dst = new gray_t[n * m];
*img_dst_ptr = img_dst;
// 卷积核:用两次一维卷积分离实现二维卷积 复杂度从 O(m*n*filter_size*filter_size) 降为 O(m*n*filter_size)
// 1. 根据sigma确定卷积核大小 原理参考https://www.cnblogs.com/shine-lee/p/9671253.html “|1”是为了取邻近的奇数
int filter_size = int(sigma * 3 * 2 + 1) | 1;
// 2. 根据高斯分布确定卷积核参数
int mid = filter_size >> 1;
gray_t* filter = new gray_t[mid + 1]; // 因为高斯卷积核的对称性 所以只存储前一半加一个参数
gray_t* pre_filter = new gray_t[mid + 1]; // pre_filter[i]表示sum(filter[0], ..., filter[i])
double total = 0;
for (int i = 0; i < mid + 1; ++i) {
filter[i] = 1 / (sqrt(2 * PI) * sigma) * exp((- (i - mid) * (i - mid)) / (2 * sigma * sigma));
total += 2 * filter[i];
}
total -= filter[mid];
for (int i = 0; i < mid + 1; ++i) {
filter[i] /= total;
}
pre_filter[0] = filter[0];
for (int i = 1; i < mid + 1; ++i) {
pre_filter[i] = filter[i] + pre_filter[i - 1];
}
// 卷积(卷积核越界部分使用边界填充,保持图片大小不变)
gray_t* temp_res = new gray_t[n * m]; // 存储进行第一维卷积后的结果
// 1. 进行第一维卷积
for (int j = 0; j < m; ++j) {
for (int i = 0; i < n; ++i) {
int pos = i * m + j;
temp_res[pos] = 0;
int i_sta = i - mid;
int i_end = i + mid;
// 当前行的卷积范围:[i-mid, i+mid]
if (i - mid < 0) {
// 合并计算[i-mid, 0)部分,即原filter的前mid-i个参数与mid-i个填充值(列首元素)的点乘
temp_res[pos] += pre_filter[mid - i - 1] * img_src[j];
i_sta = 0;
}
if (i + mid >= n) {
// 合并计算(n-1, i+mid] 部分,即原filter的后xx=i+mid+1-n个参数(由于对称性 等价于前xx个)与xx个填充值(行尾元素)的点乘
temp_res[pos] += pre_filter[i + mid - n] * img_src[(n - 1) * m + j];
i_end = n - 1;
}
for (int xi = i_sta; xi <= i_end; ++xi) {
// 第xi个元素离卷积中心i的距离为xi-i 使用的是距离卷积核中心mid距离为xi-i的卷积参数
temp_res[pos] += filter[mid - myabs(i - xi)] * img_src[xi * m + j];
}
}
}
// 2. 进行第二维卷积
for (int i = 0; i < n; ++i) {
for (int j = 0; j < m; ++j) {
int pos = i * m + j;
img_dst[pos] = 0;
int j_sta = j - mid;
int j_end = j + mid;
// 当前行的卷积范围:[j-mid, j+mid]
if (j - mid < 0) {
// 合并计算[j-mid, 0)部分,即原filter的前mid-j个参数与mid-j个填充值(行首元素)的点乘
img_dst[pos] += pre_filter[mid - j - 1] * temp_res[i * m];
j_sta = 0;
}
if (j + mid >= m) {
// 合并计算(m-1, j+mid] 部分,即原filter的后xx=j+mid+1-m个参数(由于对称性 等价于前xx个)与xx个填充值(行尾元素)的点乘
img_dst[pos] += pre_filter[j + mid - m] * temp_res[i * m];
j_end = m - 1;
}
for (int yj = j_sta; yj <= j_end; ++yj) {
// 第yj个元素离卷积中心j的距离为yj-j 使用的是距离卷积核中心mid距离为yj-j的卷积参数
img_dst[pos] += filter[mid - myabs(j - yj)] * temp_res[i * m + yj];
}
}
}
delete[] filter;
delete[] pre_filter;
delete[] temp_res;
}
double get_time() {
struct timeval t1;
gettimeofday(&t1, NULL);
return t1.tv_sec + t1.tv_usec / 1000000.0;
}
void test_guassian() {
int n = 1023, m = 888;
gray_t* src_img = new gray_t[n * m];
for (int i = 0; i < n * m; ++i) {
src_img[i] = 0.5 * (i + 1);
}
gray_t sigma = 55.5;
gray_t* cpu_res, *gpu_res;
double time1 = get_time();
cpu_guassian_smooth(src_img, &cpu_res, n, m, sigma);
double time2 = get_time();
guassian_smooth(src_img, &gpu_res, n, m, sigma);
double time3 = get_time();
printf("%lf,%lf\n",time2-time1,time3-time2);
// for (int i = 0; i < n * m; ++i) {
// printf("%lf ", gpu_res[i] - cpu_res[i]);
// }
}
int main() {
test_guassian();
} |
7,342 | #define BLOCK_SIZE 512
#include <stdio.h>
__global__
void force (float *virialArray, float *potentialArray, float *pval, float *vval, float *rx, float *ry, float *rz, float *fx, float *fy, float *fz, float sigma, float rcut, float vrcut, float dvrc12, float dvrcut, int *head, int *list, int mx, int my, int mz, int natoms, int step, float sfx, float sfy, float sfz)
{
float sigsq, rcutsq;
float rxi, ryi, rzi, fxi, fyi, fzi;
float rxij, ryij, rzij, rijsq;
float rij, sr2, sr6, vij, wij, fij, fxij, fyij, fzij;
float potential, virial;
int i, icell, j, jcell, nabor;
int xi, yi, zi, ix, jx, kx, xcell, ycell, zcell;
__shared__ float vArray[BLOCK_SIZE];
__shared__ float pArray[BLOCK_SIZE];
int p_start = BLOCK_SIZE;
sigsq = sigma*sigma;
rcutsq = rcut*rcut;
potential = 0.0f;
virial = 0.0f;
int element = blockIdx.x * blockDim.x + threadIdx.x;
if (element < natoms)
{
rxi = rx[element];
ryi = ry[element];
rzi = rz[element];
fxi = 0.0f;
fyi = 0.0f;
fzi = 0.0f;
xi = (int)((rxi+0.5f)/sfx) + 1;
yi = (int)((ryi+0.5f)/sfy) + 1;
zi = (int)((rzi+0.5f)/sfz) + 1;
if(xi > mx) xi = mx;
if(yi > my) yi = my;
if(zi > mz) zi = mz;
icell = xi + (mx+2)*(yi+zi*(my+2));
for (ix=-1;ix<=1;ix++)
for (jx=-1;jx<=1;jx++)
for (kx=-1;kx<=1;kx++){
xcell = ix+xi;
ycell = jx+yi;
zcell = kx+zi;
jcell = xcell + (mx+2)*(ycell+(my+2)*zcell);
j = head[jcell];
while (j>=0) {
if (j!=element) {
rxij = rxi - rx[j];
ryij = ryi - ry[j];
rzij = rzi - rz[j];
rijsq = rxij*rxij + ryij*ryij + rzij*rzij;
if (rijsq < rcutsq) {
//START FORCE_IJ
rij = (float) sqrt ((float)rijsq);
sr2 = sigsq/rijsq;
sr6 = sr2*sr2*sr2;
vij = sr6*(sr6-1.0f) - vrcut - dvrc12*(rij-rcut);
wij = sr6*(sr6-0.5f) + dvrcut*rij;
fij = wij/rijsq;
fxij = fij*rxij;
fyij = fij*ryij;
fzij = fij*rzij;
//END FORCE_IJ
wij *= 0.5f;
vij *= 0.5f;
potential += vij;
virial += wij;
fxi += fxij;
fyi += fyij;
fzi += fzij;
}
}
j = list[j];
}
}
*(fx+element) = 48.0f*fxi;
*(fy+element) = 48.0f*fyi;
*(fz+element) = 48.0f*fzi;
vArray[threadIdx.x] = virial;
pArray[threadIdx.x] = potential;
unsigned int stride;
unsigned int t = threadIdx.x;
__syncthreads();
if (t == 0)
{
// __syncthreads();
for(stride = 1; stride < blockDim.x; stride += 1)
{
vArray[t]+= vArray[stride];
pArray[t]+= pArray[stride];
}
}
//__syncthreads();
if(t == 0)
{
virialArray[blockIdx.x] = vArray[0];
potentialArray[blockIdx.x] = pArray[0];
}
}
}
|
7,343 | #include "includes.h"
__global__ void sqr_norm_kernel(int n, float *out, float *data, float rows, float cols)
{
extern __shared__ float sdata[];
int i = blockDim.x * threadIdx.y + threadIdx.x;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = 2 * (blockId * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x) + threadIdx.x);
sdata[i] = 0;
sdata[i] = data[threadId] * data[threadId] + data[threadId + 1] * data[threadId + 1];
__syncthreads();
for (unsigned int s = (blockDim.x * blockDim.y + 1) / 2, old_s = blockDim.x * blockDim.y; s > 0; s >>= 1) {
if (old_s & 1) s += 1;
if (i < s && i + s < old_s) {
sdata[i] += sdata[i + s];
}
old_s = s;
__syncthreads();
}
if (i == 0) {
atomicAdd(&out[blockId / n], sdata[0] / (rows * cols));
}
} |
7,344 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* nvcc -o cuda_linear_regression CUDA_linear_regression.c -lm
*
* To run:
* ./cuda_linear_regression
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000; //device varialble
//date set
point_t data[] = {{84.00,158.34},{65.52,110.67},{85.91,134.11},{77.48,136.09},
{73.17,136.16},{67.40,109.20},{75.49,126.90},{77.85,114.67},
{79.25,148.06},{78.31,121.73},{68.11,123.17},{65.98,119.55},
{76.17,132.26},{ 2.64,38.33},{96.38,166.85},{86.14,132.68},
{61.54,96.82},{37.72,98.07},{37.70,80.81},{32.28,100.04},
{ 3.66,23.94},{ 7.46,52.84},{36.23,95.31},{22.34,69.48},
{71.96,115.66},{86.47,138.88},{92.92,145.82},{38.48,82.46},
{44.20,93.62},{21.05,49.35},{11.86,34.11},{46.37,98.83},
{91.10,151.02},{49.67,99.71},{93.18,154.70},{29.36,74.06},
{98.23,161.21},{11.48,37.38},{76.14,134.55},{12.82,42.87},
{41.65,73.14},{90.87,150.57},{20.07,60.27},{16.50,52.54},
{13.34,34.29},{84.40,135.44},{11.52,59.82},{29.37,47.39},
{98.77,186.38},{37.76,90.20},{70.76,120.68},{76.53,124.45},
{53.19,83.32},{43.38,84.37},{24.17,54.39},{34.73,75.47},
{48.74,88.61},{90.41,137.16},{71.67,116.42},{33.52,73.32},
{89.07,151.04},{99.62,157.57},{ 6.06,23.75},{ 9.88,41.48},
{37.40,88.12},{ 4.90,28.21},{59.65,93.58},{89.26,144.30},
{80.12,125.86},{55.68,109.87},{ 6.63,29.82},{42.01,82.57},
{28.50,58.62},{96.34,151.98},{58.48,86.73},{ 9.21,49.93},
{45.26,91.89},{61.45,129.42},{64.16,122.85},{50.16,110.61},
{69.15,117.41},{ 5.71,48.77},{94.65,155.24},{53.12,94.42},
{16.94,63.60},{14.17,54.90},{51.54,115.01},{39.22,79.28},
{39.61,78.46},{54.97,119.51},{11.59,36.63},{89.21,153.74},
{72.09,134.94},{65.94,108.67},{25.24,83.47},{46.42,97.83},
{92.01,150.73},{37.99,76.21},{89.04,142.25},{64.33,112.96},
{53.37,106.10},{95.53,165.26},{38.36,91.04},{93.05,143.28},
{28.64,68.65},{44.62,95.02},{45.09,77.05},{19.75,43.50},
{20.91,56.67},{54.75,98.18},{58.10,104.92},{76.92,112.29},
{95.54,134.70},{18.37,60.01},{34.41,64.02},{73.88,115.06},
{ 3.12,29.78},{87.83,139.28},{51.24,100.70},{59.09,93.30},
{17.73,37.79},{53.24,125.19},{55.98,95.18},{93.57,161.31},
{94.19,156.54},{17.23,61.67},{84.72,126.89},{94.21,173.37},
{14.36,50.76},{86.73,164.40},{11.46,50.63},{82.37,140.06},
{91.47,149.02},{78.43,139.81},{69.13,110.30},{55.18,98.69},
{77.04,133.84},{89.54,152.12},{41.97,92.78},{33.30,54.63},
{67.24,120.88},{99.28,167.60},{15.72,44.70},{ 1.99,30.69},
{22.75,63.59},{ 7.67,35.46},{33.63,78.13},{ 4.18,34.79},
{53.87,85.32},{50.78,116.87},{59.50,110.24},{86.30,128.04},
{ 1.58,32.38},{60.74,115.04},{41.74,80.26},{47.25,87.04},
{ 6.48,48.05},{ 5.38,40.66},{44.37,96.63},{52.09,106.49},
{ 2.53,20.15},{29.92,75.21},{64.13,113.18},{49.87,94.77},
{89.10,139.11},{64.31,122.49},{77.49,151.19},{47.76,92.00},
{35.10,84.27},{59.01,94.03},{30.72,66.91},{ 0.73,31.64},
{87.23,139.17},{93.71,143.40},{99.09,150.61},{49.24,86.30},
{70.06,123.48},{94.79,161.15},{65.53,110.62},{73.07,120.91},
{58.64,109.98},{93.06,153.11},{10.33,52.32},{53.66,99.87},
{79.93,131.72},{92.47,149.34},{81.26,134.04},{77.69,122.75},
{ 0.21,37.82},{ 5.74,33.36},{92.38,139.73},{11.65,57.39},
{75.26,145.81},{92.63,139.34},{73.34,147.14},{54.49,105.57},
{25.16,57.99},{15.96,57.13},{61.03,104.92},{49.06,91.20},
{46.50,78.47},{83.35,139.36},{75.69,107.74},{45.22,91.87},
{ 3.46,47.84},{90.17,155.25},{40.70,58.46},{42.82,97.35},
{49.34,98.41},{89.89,141.81},{42.76,84.78},{34.67,77.38},
{58.32,101.54},{14.00,39.47},{85.53,144.41},{ 0.20,38.28},
{86.50,126.17},{35.04,66.41},{48.45,101.50},{83.83,137.62},
{67.53,105.84},{ 8.18,36.42},{53.77,95.88},{73.79,129.29},
{53.20,93.60},{65.37,111.30},{97.78,167.73},{22.98,55.09},
{81.18,141.58},{37.60,92.48},{84.76,143.11},{22.76,73.08},
{82.74,116.98},{ 6.27,53.39},{34.87,71.66},{75.19,131.80},
{92.31,152.15},{86.41,132.70},{97.91,134.42},{23.91,64.29},
{39.02,83.92},{55.23,119.47},{22.70,69.44},{68.05,116.09},
{60.05,110.20},{43.42,94.91},{ 5.76,40.19},{ 0.85,27.11},
{25.87,54.31},{96.94,146.88},{26.27,72.95},{10.94,45.38},
{64.67,120.45},{11.09,66.59},{26.46,72.91},{18.58,52.41},
{80.97,134.00},{ 0.34,27.83},{33.13,69.96},{49.90,91.22},
{18.80,64.32},{35.91,80.24},{28.45,49.05},{64.01,119.04},
{ 4.57,17.02},{93.16,157.23},{78.53,132.43},{50.60,97.10},
{24.82,50.38},{ 4.84,48.13},{75.03,125.77},{75.72,138.80},
{52.51,114.45},{30.78,72.74},{18.85,45.55},{10.56,49.41},
{ 6.21,50.39},{ 8.24,48.12},{98.61,155.04},{65.17,127.86},
{73.79,127.93},{ 4.28,29.56},{96.11,151.45},{65.64,115.43},
{19.96,40.75},{54.15,107.88},{73.22,121.27},{31.92,62.21},
{92.49,137.93},{46.31,96.16},{24.55,67.97},{49.05,91.91},
{55.62,111.54},{81.51,151.15},{30.35,70.93},{88.44,146.56},
{48.89,97.29},{10.97,35.94},{14.82,56.32},{69.65,117.12},
{30.65,85.34},{92.16,146.37},{16.71,71.71},{66.67,131.21},
{37.11,74.31},{59.68,110.52},{77.14,127.68},{39.95,79.67},
{22.52,91.67},{10.03,45.65},{31.57,75.72},{65.36,128.53},
{83.86,140.56},{41.54,85.30},{ 6.38,30.93},{69.65,122.81},
{ 7.27,44.05},{94.53,149.93},{42.97,95.82},{31.21,65.74},
{56.21,100.24},{ 3.36,35.37},{38.60,82.21},{ 5.89,46.30},
{57.19,105.13},{24.70,62.79},{19.06,46.81},{ 0.03,40.45},
{53.27,98.31},{63.51,94.80},{60.00,117.85},{55.89,115.68},
{47.68,98.74},{ 6.75,49.35},{25.37,36.75},{ 6.02,27.97},
{32.83,70.94},{24.08,59.55},{ 1.50,41.37},{20.14,34.03},
{29.57,72.67},{67.45,129.74},{10.37,71.88},{ 4.00,28.25},
{70.58,121.73},{73.06,147.21},{ 3.18,31.23},{39.72,87.12},
{48.37,95.58},{87.89,148.28},{ 1.38,39.73},{74.67,123.62},
{23.09,85.06},{45.85,83.61},{70.08,120.59},{50.54,127.10},
{18.68,46.70},{25.34,75.71},{36.59,82.03},{74.06,136.39},
{77.69,119.75},{40.72,86.70},{ 2.10,39.85},{70.21,110.13},
{57.12,98.18},{95.81,168.67},{ 1.89,28.48},{80.47,151.77},
{58.85,110.02},{97.94,150.44},{94.39,148.61},{57.84,102.75},
{74.49,133.19},{54.75,103.82},{35.22,94.57},{75.75,136.10},
{50.83,93.11},{63.46,90.88},{43.93,98.46},{96.07,153.80},
{73.81,137.75},{42.06,85.80},{25.23,69.96},{88.47,133.44},
{36.88,84.25},{59.81,106.92},{17.34,65.65},{ 3.38,37.52},
{66.07,130.98},{83.69,133.52},{97.61,141.32},{ 5.25,37.57},
{ 3.65,26.65},{88.36,166.35},{92.12,134.00},{17.96,69.00},
{80.70,126.96},{99.18,157.12},{31.73,77.47},{43.01,99.71},
{40.38,88.63},{70.91,125.87},{80.45,153.99},{57.17,113.12},
{76.32,133.62},{73.71,134.36},{26.86,51.30},{39.58,83.32},
{74.78,129.83},{57.47,98.73},{41.74,87.85},{56.88,103.05},
{30.25,80.40},{10.57,55.26},{97.60,151.00},{93.22,149.95},
{ 0.46,23.10},{58.64,106.75},{99.59,158.87},{32.30,74.35},
{38.16,80.61},{42.23,96.81},{67.36,118.92},{47.78,90.29},
{53.16,112.75},{27.65,49.70},{30.00,56.75},{48.80,117.42},
{12.52,47.70},{29.98,63.12},{43.86,89.89},{99.69,171.58},
{40.14,79.46},{54.06,121.93},{91.20,147.36},{11.64,43.11},
{36.83,96.61},{15.05,42.61},{19.69,71.98},{31.61,75.89},
{25.20,90.34},{85.54,120.51},{52.87,88.38},{72.88,120.51},
{74.91,115.52},{25.60,48.79},{79.13,131.98},{16.82,58.28},
{17.84,48.08},{52.58,108.19},{50.31,94.38},{81.58,124.00},
{83.06,146.52},{70.99,129.95},{33.61,69.02},{52.77,113.22},
{22.94,60.25},{16.16,41.73},{32.61,74.22},{32.95,59.85},
{76.76,135.04},{79.95,136.48},{25.45,60.50},{81.66,142.89},
{45.57,98.11},{98.68,157.32},{33.40,92.64},{56.66,105.69},
{34.55,67.90},{65.58,116.61},{79.54,132.70},{13.55,49.96},
{49.60,92.00},{75.55,143.09},{94.16,145.02},{68.01,123.94},
{11.48,42.98},{ 7.76,56.04},{56.28,118.42},{ 6.09,57.31},
{98.90,179.15},{26.95,84.92},{ 8.52,25.64},{63.05,126.16},
{39.72,94.83},{94.54,159.75},{67.46,123.28},{68.85,138.76},
{46.96,83.67},{67.54,101.95},{23.17,61.79},{29.49,76.26},
{58.17,111.80},{53.25,97.62},{37.89,74.59},{ 7.60,29.16},
{60.72,103.03},{30.80,78.51},{14.13,34.68},{38.44,79.92},
{59.12,111.86},{17.24,56.59},{89.12,133.28},{19.85,60.88},
{90.45,151.93},{82.77,132.73},{34.02,76.70},{54.54,105.19},
{65.08,123.75},{69.80,115.13},{95.30,156.69},{17.66,70.39},
{25.18,82.85},{ 6.02,43.58},{12.61,40.23},{34.32,81.89},
{67.40,100.75},{ 7.81,34.52},{ 7.03,36.08},{22.67,59.09},
{93.01,150.36},{87.02,164.77},{32.20,74.13},{54.87,103.10},
{58.37,135.91},{62.51,96.38},{45.32,89.16},{39.40,80.36},
{12.45,68.04},{61.43,131.19},{59.25,103.13},{26.71,74.65},
{61.25,110.85},{53.78,97.09},{57.75,114.27},{29.38,59.90},
{18.36,71.14},{69.96,118.11},{14.56,39.27},{19.89,40.98},
{68.55,130.17},{ 7.50,47.37},{35.98,101.38},{17.22,43.45},
{76.31,127.73},{ 3.16,47.85},{93.16,152.57},{36.04,77.38},
{60.95,118.72},{ 2.53,27.86},{78.06,137.18},{34.06,71.82},
{83.47,149.39},{43.94,84.91},{82.45,154.13},{70.65,125.11},
{25.67,64.86},{ 3.34,31.20},{15.95,58.76},{82.08,133.76},
{94.85,157.96},{20.05,54.70},{ 3.44,40.93},{40.08,77.92},
{43.31,79.23},{66.15,117.30},{84.56,137.10},{27.33,78.17},
{16.18,46.85},{54.56,94.42},{32.63,78.15},{54.25,118.13},
{90.63,165.79},{57.19,98.26},{46.83,82.83},{ 4.02,24.60},
{66.12,115.96},{71.95,97.79},{52.13,103.65},{31.29,75.26},
{55.91,100.66},{73.56,127.91},{74.57,133.72},{14.54,55.51},
{35.91,76.04},{71.79,112.40},{ 3.52,32.63},{99.48,162.94},
{ 6.95,26.59},{75.82,121.54},{10.08,46.44},{45.52,91.13},
{ 1.68,30.79},{89.95,118.34},{34.84,77.63},{82.45,150.87},
{42.78,63.15},{94.05,152.35},{61.19,104.44},{54.42,104.51},
{ 9.39,44.61},{ 6.90,39.55},{75.00,125.36},{71.30,129.59},
{58.25,97.29},{90.61,147.99},{31.75,67.79},{85.80,137.70},
{79.15,139.56},{44.24,79.90},{81.04,137.23},{65.21,106.73},
{11.01,57.70},{33.41,84.03},{ 5.62,19.26},{77.21,143.88},
{99.40,162.13},{92.80,146.34},{77.65,128.09},{86.21,138.32},
{92.32,158.09},{79.31,150.60},{45.31,91.34},{13.30,45.91},
{91.10,145.10},{94.55,148.44},{90.81,162.01},{98.98,170.98},
{ 5.68,57.45},{10.28,46.14},{13.53,44.62},{52.10,93.46},
{98.73,164.39},{65.57,117.52},{63.85,112.29},{ 5.96,48.16},
{ 8.27,25.85},{14.11,51.72},{87.32,139.79},{ 7.19,36.04},
{36.11,87.96},{79.40,134.60},{79.73,146.13},{69.33,120.83},
{24.96,70.50},{12.89,34.58},{69.45,98.30},{86.02,147.84},
{12.77,47.45},{22.82,76.20},{88.41,150.31},{30.70,72.59},
{43.43,90.49},{25.14,51.00},{51.61,94.30},{61.68,132.43},
{19.55,53.26},{61.03,108.01},{24.35,65.91},{34.89,88.15},
{64.10,104.38},{67.04,133.73},{ 8.14,37.24},{11.35,43.91},
{11.79,46.13},{ 3.91,29.22},{37.60,97.66},{70.40,123.36},
{58.04,122.64},{49.69,93.67},{92.56,162.20},{96.15,157.63},
{23.97,73.66},{84.56,145.39},{61.80,94.78},{55.92,133.05},
{30.56,73.72},{ 4.70,31.81},{64.33,113.26},{50.51,95.26},
{47.10,94.95},{10.32,38.87},{67.75,120.60},{23.91,75.57},
{21.39,61.62},{96.89,147.19},{67.14,123.16},{16.08,61.14},
{93.28,139.31},{50.74,98.09},{37.71,86.95},{78.59,137.61},
{63.89,106.58},{31.69,82.45},{58.08,112.38},{43.16,94.01},
{11.39,25.42},{66.16,112.32},{96.03,152.01},{42.55,83.11},
{32.32,65.15},{23.02,47.10},{29.21,61.35},{16.54,58.56},
{48.28,100.58},{18.08,46.23},{25.86,59.73},{78.52,127.28},
{ 9.95,50.65},{28.39,64.12},{56.10,87.61},{43.22,93.14},
{42.76,89.45},{98.29,159.63},{97.34,177.78},{ 4.27,55.23},
{47.01,105.57},{59.86,114.37},{18.95,51.11},{41.52,84.33},
{62.96,108.18},{27.79,55.86},{16.39,65.66},{66.18,105.45},
{34.86,94.94},{28.47,69.45},{97.67,167.37},{75.86,104.33},
{25.01,74.40},{46.96,87.54},{92.52,129.85},{29.78,73.72},
{85.72,139.37},{83.89,126.76},{59.14,115.78},{46.85,97.38},
{ 6.19,34.32},{11.63,23.31},{11.63,44.46},{22.84,56.76},
{12.00,50.78},{62.84,100.45},{81.42,131.72},{90.47,143.37},
{29.93,88.90},{77.40,122.09},{93.82,145.35},{47.75,96.19},
{21.14,70.74},{97.46,148.02},{12.72,36.39},{49.97,83.79},
{97.95,162.70},{90.86,153.60},{63.85,117.19},{ 4.58,30.40},
{60.56,119.74},{53.52,110.35},{30.87,64.20},{ 6.80,49.32},
{73.95,124.06},{ 6.10,58.73},{22.38,69.13},{60.70,112.49},
{60.85,85.34},{88.28,163.12},{53.88,98.87},{11.25,27.58},
{61.39,111.48},{50.19,101.48},{47.54,105.73},{36.68,74.67},
{16.93,37.98},{63.31,104.97},{98.08,146.59},{20.30,65.51},
{66.90,115.71},{ 3.46,26.89},{75.90,129.36},{ 6.60,56.60},
{58.83,111.03},{69.25,129.30},{76.03,114.70},{62.91,118.23},
{96.98,154.55},{21.79,63.43},{82.03,146.15},{25.64,53.97},
{79.89,145.81},{39.61,84.38},{46.54,88.11},{63.57,98.17},
{78.91,134.57},{92.78,165.65},{88.14,156.37},{ 0.33,35.73},
{51.07,80.06},{37.70,69.07},{76.62,125.56},{74.47,121.16},
{47.03,88.43},{46.70,96.04},{25.41,63.87},{68.35,116.96},
{71.34,120.03},{16.20,49.28},{11.01,49.88},{95.77,162.43},
{28.71,77.99},{46.92,73.65},{62.38,104.31},{86.01,128.55},
{65.86,113.60},{57.63,121.48},{74.36,125.46},{ 1.08,39.77},
{ 8.21,41.65},{13.79,48.62},{83.45,141.43},{15.74,38.94},
{19.63,71.50},{99.25,151.24},{50.00,90.16},{81.22,122.70},
{24.58,46.44},{51.88,85.53},{55.71,84.04},{ 5.93,32.59},
{74.94,134.09},{92.76,149.10},{46.09,101.09},{36.43,94.08},
{ 8.41,38.29},{68.17,99.20},{34.42,85.90},{80.43,138.29},
{48.74,88.09},{56.61,116.21},{27.16,80.41},{96.04,143.67},
{41.60,88.71},{56.43,96.06},{99.67,159.49},{ 7.93,25.69},
{95.21,156.97},{42.55,69.14},{10.74,28.43},{51.32,97.30},
{44.89,83.67},{ 7.40,44.96},{ 1.16,42.00},{ 5.30,37.14},
{96.77,152.72},{ 5.98,40.00},{60.23,122.22},{37.87,84.62},
{42.33,78.24},{18.89,37.62},{26.30,51.55},{ 5.60,42.60},
{32.57,75.79},{94.80,149.64},{98.93,156.85},{ 8.20,41.74},
{38.60,87.55},{88.03,144.13},{28.03,53.93},{26.09,89.76},
{29.84,72.65},{85.36,132.48},{26.97,76.70},{35.26,76.30},
{25.85,70.80},{77.97,127.49},{71.01,138.54},{10.94,49.91},
{31.24,71.77},{ 7.53,44.95},{ 1.52,28.59},{19.27,43.30},
{14.91,45.06},{96.42,156.47},{66.39,126.02},{68.99,112.91},
{18.85,62.58},{32.55,100.92},{33.59,86.01},{58.03,97.94},
{35.86,75.80},{31.33,93.37},{93.26,153.90},{15.52,42.25},
{ 2.75,40.88},{14.25,56.34},{11.34,55.07},{ 1.06,29.82},
{19.47,46.71},{68.39,95.41},{39.15,67.12},{27.81,65.03},
{33.96,68.72},{29.79,83.58},{ 9.85,47.36},{49.47,115.24},
{ 1.24,29.63},{22.96,60.85},{37.50,66.51},{46.51,96.74},
{34.64,72.15},{82.11,143.60},{88.97,152.44},{91.71,145.13},
{13.42,62.70},{56.31,108.49},{26.71,74.17},{52.99,92.39},
{15.94,42.04},{ 1.25,24.40},{70.62,126.08},{88.17,143.68},
{ 7.38,48.95},{50.62,99.33},{43.91,86.82},{ 0.57,26.36},
{27.98,82.27},{38.45,69.65},{11.30,40.94},{45.25,84.73},
{23.87,68.58},{25.37,72.12},{60.34,119.79},{42.59,100.13},
{12.92,41.13},{22.43,55.83},{97.86,168.21},{15.57,53.63},
{26.44,66.90},{99.83,149.10},{83.71,118.45},{69.42,125.64},
{55.85,107.20},{58.00,110.89},{66.97,119.44},{47.25,93.49},
{49.27,106.34},{81.80,147.14},{84.71,137.74},{29.70,61.42},
{71.42,105.56},{68.87,128.05},{46.67,91.29},{65.32,132.59},
{48.76,85.75},{94.55,153.38},{21.41,42.63},{52.33,92.98},
{10.89,38.87},{21.31,52.97},{52.06,85.39},{17.92,50.67},
{73.13,126.80},{65.01,131.31},{37.48,85.60},{62.95,110.14},
{63.03,105.04},{10.10,51.57},{55.89,97.16},{16.20,67.89},
{14.11,38.98},{34.82,80.05},{43.53,87.53},{81.52,149.98},
{65.19,127.61},{13.80,43.24},{62.56,95.92},{32.09,68.79},
{24.35,52.32},{ 8.52,40.70},{83.94,140.84},{76.46,117.49},
{ 9.96,43.44},{90.95,133.38},{29.78,70.77},{15.32,39.57},
{80.51,130.61},{73.15,117.93},{10.85,52.82},{67.84,108.04},
{84.76,131.77},{37.11,74.72},{59.02,110.81},{83.26,138.42},
{44.73,104.58},{95.54,150.49},{60.96,85.42},{70.72,110.25},
{38.20,77.21},{80.19,123.35},{23.75,56.10},{36.83,91.49}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
//error calc func
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
//global func
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
/*
Calculate the current index by using:
- The thread id
- The block id
- The number of threads per block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x;
//sum error stored in array
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
//time difffernce
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
//main funcation
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
//time start
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
//variables for device func
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
//allocating memory in device using CUDA malloc
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//allocating memory in device using CUDA malloc
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//allocating memory for d_error_sum_arr
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//allocating memory for d_data
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
//copying memory host ti device
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
//copying memory host to device
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
//copying memory host to device
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
//Host variable storing the array returned from the kernel function.
double h_error_sum_arr[1000];
//Stores the total sum of the values from the error sum array.
double error_sum_total;
//Stores the mean of the total sum of the error sums.
double error_sum_mean;
//Call the rms_error function using 100 blocks and 10 threads.
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
//Copy memory for d_error_sum_arr
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000),
cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
//Loop through the error sum array returned from the kernel function
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
//Calculate the mean for the error sum.
error_sum_mean = error_sum_total / n_data;
//Calculate the square root for the error sum mean.
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
//printf("best m,c is %lf,%lf with error %lf in direction %d\n",
//dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//clearing memory
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//freee memory complete
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
//geting time after printing m c with error
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
7,345 | extern "C"
__global__ void rowfilter(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Win, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use convolution formula: y[n] = sum h[k]*x[n-k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int x_in = x - k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = x_in >= 0 ? ((x_in / Win) % 2)
: 1-(((-x_in-1)/Win) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (x_in % Win + Win) % Win;
x_in = (group == 1) ? (Win-1) - res : res;
const int offset = n*C*H*Win + c*H*Win + y*Win + x_in;
value += w[k-Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void rowfilter_bwd(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Win, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use correlation formula: y[n] = sum h[k]*x[n+k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int x_in = x + k;
int k_in = (x_in < 0 || x_in >= Win) ? -k : k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = x_in >= 0 ? ((x_in / Win) % 2)
: 1-(((-x_in-1)/Win) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (x_in % Win + Win) % Win;
x_in = (group == 1) ? (Win-1) - res : res;
const int offset = n*C*H*Win + c*H*Win + y*Win + x_in;
value += w[k_in - Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void colfilter(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Hin, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use convolution formula: y[n] = sum h[k]*x[n-k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int y_in = y - k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = y_in >= 0 ? ((y_in / Hin) % 2)
: 1-(((-y_in-1)/Hin) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (y_in % Hin + Hin) % Hin;
y_in = (group == 1) ? (Hin-1) - res : res;
const int offset = n*C*Hin*W + c*Hin*W + y_in*W + x;
value += w[k-Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void colfilter_bwd(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Hin, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use correlation formula: y[n] = sum h[k]*x[n+k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int y_in = y + k;
int k_in = (y_in < 0 || y_in >= Hin) ? -k : k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = y_in >= 0 ? ((y_in / Hin) % 2)
: 1-(((-y_in-1)/Hin) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (y_in % Hin + Hin) % Hin;
y_in = (group == 1) ? (Hin-1) - res : res;
const int offset = n*C*Hin*W + c*Hin*W + y_in*W + x;
value += w[k_in - Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
|
7,346 | #include <stdio.h>
#include <stdlib.h>
#define RADIUS 3
#define BLK_SIZE 256
#define NUM_ELEMENTS (BLK_SIZE * 32) // 256 * 32 = 8192
__global__ void stencil_1d(int *d_in, int *d_out){
int gindex = (blockIdx.x * blockDim.x) + threadIdx.x + RADIUS;
int result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++)
result += d_in[gindex + offset];
d_out[gindex - RADIUS] = result;
}
int main(void){
int h_in[ NUM_ELEMENTS + (2*RADIUS) ];
int h_out[ NUM_ELEMENTS ];
int *d_in, *d_out;
// Initialize host input values
for (int i=0; i<(NUM_ELEMENTS + 2*RADIUS); i++)
h_in[i] = 1;
// Allocate device global memory
cudaMalloc( &d_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int) );
cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int) );
// Copy HOST -> DEVICE
cudaMemcpy( d_in, h_in, (NUM_ELEMENTS + 2*RADIUS) * sizeof(int), cudaMemcpyHostToDevice);
// Launch kernel
stencil_1d<<< NUM_ELEMENTS/BLK_SIZE, BLK_SIZE>>>(d_in, d_out);
// NUM_ELEMENTS / BLK_SIZE = 8192 / 256 = 32
// BLK_SIZE = 256
// Copy result DEVICE -> HOST
cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost);
// Verify results
int err_cnt = 0;
for (int i=0; i<NUM_ELEMENTS; i++){
if (h_out[i] != 7){
printf("h_out[%d] == %d != 7\n", i, h_out[i]);
err_cnt++;
break;
}
}
if (err_cnt!=0){
printf("Wrong result\n");
}else{
printf("Success\n");
}
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
7,347 | #include <cuda.h>
#include <stdio.h>
__global__ void naive_kernel(float* d_in, int height, int width,
float* filter, int radius,
float* d_out) {
// Get global position in grid
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
// actual location within image data
unsigned int loc = (y * width) + x;
// sum of all element-wise multiplications
float sum = 0;
int filter_size = radius*2 + 1;
// only perform convolution on pixels within radius
// Global memory use and O(N^2) loop in kernel kill performance
if (x >= radius && y >= radius && x < (width - radius) && y < (height - radius)) {
#pragma unroll
for (int i = -radius; i <= radius; ++i) {
#pragma unroll
for (int j = -radius; j <= radius; ++j) {
// x, y, and global location adjusted for filter radius
int img_x = x + i;
int img_y = y + j;
int img_loc = (img_y * width) + img_x;
// filter location based just on x and y
int filt_x = i + radius;
int filt_y = j + radius;
int filter_loc = filt_y * filter_size + filt_x;
// add element-wise product to accumulator
sum += d_in[img_loc] * filter[filter_loc];
}
}
// add pixel value to output
d_out[loc] = sum;
}
}
void naivekernel(float* d_in, int height, int width,
float* filter, int radius,
float* d_out,
dim3 h_gridDim, dim3 h_blockDim) {
naive_kernel<<<h_gridDim, h_blockDim>>>(d_in, height, width,
filter, radius,
d_out);
}
|
7,348 | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
extern "C" __global__ void kernelFunction ( int *input )
{
input[threadIdx.x] = 32 - threadIdx.x;
}
|
7,349 | #include "includes.h"
__global__ void update_cluster(int *cluster, float *centroid, float *B_c, float *G_c, float *R_c, int size_image, int n_threads, int k, float *Bdata, float *Gdata, float *Rdata, float *nValue)
{
unsigned int tid = threadIdx.x;
int size_per_thread = int(size_image/n_threads);
int start = tid*size_per_thread;
int end = start + size_per_thread;
float count = 0;
float B = 0;
float G = 0;
float R = 0;
if (tid >=size_image){ return; }
if (tid==n_threads-1)
{
start = (n_threads-1)*size_per_thread;
end = size_image;
}
for(int j = start; j < end; j++)
{
if(cluster[j] == k)
{
B = B + (B_c[j]);
G = G + (G_c[j]);
R = R + (R_c[j]);
count = count + 1;
}
}
nValue[tid] = count;
Bdata[tid] = B;
Gdata[tid] = G;
Rdata[tid] = R;
__syncthreads();
for(unsigned int s=1; s < blockDim.x; s *= 2)
{
if(tid % (2*s) == 0)
{
nValue[tid] += nValue[tid + s];
Bdata[tid] += Bdata[tid + s];
Gdata[tid] += Gdata[tid + s];
Rdata[tid] += Rdata[tid + s];
}
__syncthreads();
}
if(tid == 0)
{
if (nValue[0] != 0)
{
centroid[k*3 + 0] = Bdata[0] / nValue[0];
centroid[k*3 + 1] = Gdata[0] / nValue[0];
centroid[k*3 + 2] = Rdata[0] / nValue[0];
}
}
} |
7,350 | #include <iostream>
#define iterationsToDo 4096
//#define iterationsToDo 25
#define isNumberEven(n) (n % 2 == 0)
__global__ void calculate(double *runningSum) {
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < iterationsToDo; i += stride) {
double numerator = isNumberEven(i) ? 1 : -1;
double denominator = 2 * i + 1;
*runningSum += numerator / denominator;
}
}
__global__ void init(double* runningSum) {
*runningSum = 0;
}
__global__ void printTheThing(double *thingToPrint) {
printf("!!! TEST !!!%.20lf", *thingToPrint * 4);
}
void doIt() {
double *runningSum;
if (cudaMalloc((void **) &runningSum, sizeof(double)) != cudaSuccess) {
puts("Something truly terrible happened");
exit(1);
}
init<<<1, 1>>>(runningSum);
calculate<<<1, 1>>>(runningSum);
cudaDeviceSynchronize();
printTheThing<<<1, 1>>>(runningSum);
cudaDeviceSynchronize();
}
int main() {
doIt();
return 0;
}
|
7,351 | #include <iostream>
#include <thrust/device_vector.h>
int main(void){
thrust::device_vector<short> vec(10);
std::cout << sizeof(vec) << std::endl;
return 0;
}
|
7,352 | #include "includes.h"
__global__ void decat(float* input, float* output1, float* output2, float* output3, float* output4, const int size, const int out_channel1, const int out_channel2, const int out_channel3, const int out_channel4)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int in_channel = out_channel1 + out_channel2 + out_channel3 + out_channel4; // # of channel of input
const int N = size * size; // total elements per channel
if(pos < N){
for(int n = 0; n < in_channel; n++){
const int row = pos / size;
const int col = pos % size;
if(n < out_channel1){ // first output
output1[(n * size + col) * size + row] = input[(n * size + col) * size + row];
}
else if(n < out_channel1 + out_channel2){ // second output
output2[((n - out_channel1) * size + col) * size + row] = input[(n * size + col) * size + row];
}
else if(n < out_channel1 + out_channel2 + out_channel3){ // third output
output3[((n - out_channel1 - out_channel2) * size + col) * size + row] = input[(n * size + col) * size + row];
}
else{ // last output
output4[((n - out_channel1 - out_channel2 - out_channel3) * size + col) * size + row] = input[(n * size + col) * size + row];
}
}
}
} |
7,353 | #include <cmath>
__global__ void isinf_kernel(const double* value, bool* result)
{
result[threadIdx.x] = std::isinf(value[threadIdx.x]) & (value[threadIdx.x] > 0);
}
|
7,354 | // This is the REAL "hello world" for CUDA!
// It takes the string "Hello ", prints it, then passes it to CUDA
// with an array of offsets. Then the offsets are added in parallel
// to produce the string "World!"
// By Ingemar Ragnemalm 2010
// nvcc hello-world.cu -L /usr/local/cuda/lib -lcudart -o hello-world
#include <stdio.h>
const int N = 16;
const int blocksize = 16;
__global__ void kernel_hello(char *a, int *b){
a[threadIdx.x] += b[threadIdx.x];
}
int main(){
char a[N] = "Hello \0\0\0\0\0\0";
int b[N] = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char *a_device;
int *b_device;
const int csize = N*sizeof(char);
const int isize = N*sizeof(int);
printf("%s", a);
cudaMalloc( (void**)&a_device, csize );
cudaMalloc( (void**)&b_device, isize );
cudaMemcpy( a_device, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( b_device, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
kernel_hello<<<dimGrid, dimBlock>>>(a_device, b_device);
cudaMemcpy( a, a_device, csize, cudaMemcpyDeviceToHost );
cudaFree( a_device );
cudaFree( b_device );
printf("%s\n", a);
return EXIT_SUCCESS;
}
|
7,355 | #include "includes.h"
__global__ void unroll_kernel(int h_in, int w_in, int k, float *x, float *x_unroll) {
int w_out_, h_out_, h_unroll, w_unroll_, p, q;
int t = blockIdx.x * 1024 + threadIdx.x; // Index of this thread
int w_out = w_in - k + 1; // Output image size
int w_unroll = w_out * w_out; // Unroll limit
if (t < w_unroll) {
h_out_ = t / w_out; // Output height
w_out_ = t % w_out; // Output width
w_unroll_ = h_out_ * w_out + w_out_; // The index of output pixel in image
for (p = 0; p < k; p++)
for (q = 0; q < k; q++) {
h_unroll = p * k + q;
if ((h_out_ + p) < h_in && (w_out_ + q) < w_in)
x_unroll[h_unroll * w_unroll + w_unroll_] =
x[(h_out_ + p) * w_in + w_out_ + q];
}
}
} |
7,356 | #include <bits/stdc++.h>
using namespace std;
constexpr int MAX_THREADS = (1 << 9);
struct edge {
int u, v, w;
edge(int u, int v, int w): u(u), v(v), w(w) {}
};
using comparison_func_t = bool (*) (edge*, edge*);
int n;
edge *edges, *chosen_edges;
int *par;
int num_edge;
__device__ bool comparison_weight(edge *x, edge *y) {
if (x->w == y->w) {
if (x->u == y->u)
return x->v < y->v;
return x->u < y->u;
}
return x->w < y->w;
}
__device__ bool comparison_node(edge *x, edge *y) {
if (x->u == y->u)
return x->v < y->v;
return x->u < y->u;
}
__device__ comparison_func_t p_comparison_weight = comparison_weight;
__device__ comparison_func_t p_comparison_node = comparison_node;
int get_container_length(int x) {
int ret = 1;
while (ret < x)
ret <<= 1;
return ret;
}
__global__ void bitonic_sort_kernel(edge *d_edges, int j, int k, comparison_func_t comparison) {
unsigned int i, ixj;
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
auto swap = [&](edge& x, edge& y)->void {
edge temp = x;
x = y;
y = temp;
};
if (ixj > i) {
if (((i & k) != 0) && (*comparison)(&d_edges[i], &d_edges[ixj]))
swap(d_edges[i], d_edges[ixj]);
else if (((i & k) == 0) && (*comparison)(&d_edges[ixj], &d_edges[i]))
swap(d_edges[i], d_edges[ixj]);
}
}
void bitonic_sort(edge *edges, int length, comparison_func_t comparison) {
int container_length = get_container_length(length);
for (int i = length; i < container_length; i++) {
edges[i] = edge(INT_MAX, INT_MAX, INT_MAX);
}
length = container_length;
edge *d_edges;
size_t container_size = length * sizeof(edge);
// Copy data to gpu
cudaMalloc((void**) & d_edges, container_size);
cudaMemcpy(d_edges, edges, container_size, cudaMemcpyHostToDevice);
// Call kernel func
int num_thread = min(length, MAX_THREADS);
int num_blocks = length / num_thread;
dim3 blocks(num_blocks, 1);
dim3 threads(num_thread, 1);
for (int k = 2; k <= length; k <<= 1) {
for (int j = k >> 1; j > 0; j >>= 1) {
bitonic_sort_kernel<<<blocks, threads>>>(d_edges, j, k, comparison);
}
}
// Copy result from gpu
cudaMemcpy(edges, d_edges, container_size, cudaMemcpyDeviceToHost);
cudaFree(d_edges);
}
int main(int argc, char **argv) {
// Copy function to device
comparison_func_t h_comparison_weight;
comparison_func_t h_comparison_node;
cudaMemcpyFromSymbol(&h_comparison_weight, p_comparison_weight, sizeof(comparison_func_t));
cudaMemcpyFromSymbol(&h_comparison_node, p_comparison_node, sizeof(comparison_func_t));
// Init clock
clock_t t = clock();
// Input n
cin >> n;
// Initialize parents
par = (int * ) malloc(n * sizeof(int));
for (int i = 0; i < n; i++) {
par[i] = i;
}
function<int(int)> find_set = [&](int x) {
return (par[x] == x ? x : par[x] = find_set(par[x]));
};
function<bool(int, int)> merge_set = [&](int u, int v) {
int pu = find_set(u), pv = find_set(v);
if (pu == pv) return false;
par[pv] = pu;
return true;
};
// Input edge
edges = (edge * ) malloc(n * n * sizeof(edge));
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
int x;
cin >> x;
if (x == -1 || i >= j) continue;
edges[num_edge++] = edge(i, j, x);
}
}
assert(num_edge >= n - 1);
// Sort weight
bitonic_sort(edges, num_edge, h_comparison_weight);
// Build MST
long long total_cost = 0;
int num_chosen = 0;
chosen_edges = (edge * ) malloc(num_edge * 2 * sizeof(edge));
for (int i = 0; i < num_edge; i++) {
int u = edges[i].u, v = edges[i].v, w = edges[i].w;
if (merge_set(u, v)) {
total_cost += w;
chosen_edges[num_chosen++] = edges[i];
if (num_chosen == n - 1) break;
}
}
// Sort chosen edge for output
bitonic_sort(chosen_edges, num_chosen, h_comparison_node);
// Get duration
double time_taken = ((double) (clock() - t)) / CLOCKS_PER_SEC;
// Output
cout << total_cost << '\n';
for (int i = 0; i < num_chosen; i++) {
cout << chosen_edges[i].u << '-' << chosen_edges[i].v << '\n';
}
cout << fixed << setprecision(12) << "Waktu eksekusi: " << time_taken << " ms\n";
// Return
return 0;
}
|
7,357 | #include "includes.h"
__global__ void sobel(unsigned char *output, unsigned char *input, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (y >= height || x >= width)
return;
// Sobel weights
float weightsX[9] = { -1, -2, -1,
0, 0, 0,
1, 2, 1 };
float weightsY[9] = { -1, 0, 1,
-2, 0, 2,
-1, 0, 1 };
int offsetY[9] = { -1, -1, -1,
0, 0, 0,
1, 1, 1 };
int offsetX[9] = { -1, 0, 1,
-1, 0, 1,
-1, 0, 1 };
float pointX = 0.f;
float pointY = 0.f;
#pragma unroll
for (int i = 0; i < 9; i++)
{
int index = (x + offsetX[i]) + (y + offsetY[i]) * width;
unsigned char pixel = *(input + index);
pointX += pixel * weightsX[i];
pointY += pixel * weightsY[i];
}
// Do Sobel here!
int index = x + y * width;
unsigned char * outputData = output + index;
outputData[0] = sqrtf(pointX * pointX + pointY * pointY);
} |
7,358 |
extern "C"
{
__device__ int fun1(int* argIntStar, float argFloat)
{
__shared__ int tab[123],temp,*ptr,arr[2];
ptr=&tab[5];
tab[5]=argIntStar[threadIdx.x];
argIntStar[6]=*ptr;
return 10;
}
__global__ void fun2(char* argCharStar,int argInt)
{
__shared__ char tab[12],a,b,*c;
__shared__ int q,w[2],*e;
}
}
|
7,359 | /**
* @file ex_particle_OPENMP_seq.c
* @author Michael Trotter & Matt Goodrum
* @brief Particle filter implementation in C/OpenMP
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <limits.h>
#include <math.h>
#include <unistd.h>
#include <fcntl.h>
#include <float.h>
#include <sys/time.h>
#include<ctime>
#define PI 3.1415926535897932
#define BLOCK_X 16
#define BLOCK_Y 16
/**
@var M value for Linear Congruential Generator (LCG); use GCC's value
*/
long M = INT_MAX;
/**
@var A value for LCG
*/
int A = 1103515245;
/**
@var C value for LCG
*/
int C = 12345;
const int threads_per_block = 128;
/*****************************
*GET_TIME
*returns a long int representing the time
*****************************/
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) + tv.tv_usec;
}
// Returns the number of seconds elapsed between the two specified times
float elapsed_time(long long start_time, long long end_time) {
return (float) (end_time - start_time) / (1000 * 1000);
}
/*****************************
* CHECK_ERROR
* Checks for CUDA errors and prints them to the screen to help with
* debugging of CUDA related programming
*****************************/
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
__device__ int findIndexSeq(double * CDF, int lengthCDF, double value)
{
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++)
{
if(CDF[x] >= value)
{
index = x;
break;
}
}
if(index == -1)
return lengthCDF-1;
return index;
}
__device__ int findIndexBin(double * CDF, int beginIndex, int endIndex, double value)
{
if(endIndex < beginIndex)
return -1;
int middleIndex;
while(endIndex > beginIndex)
{
middleIndex = beginIndex + ((endIndex-beginIndex)/2);
if(CDF[middleIndex] >= value)
{
if(middleIndex == 0)
return middleIndex;
else if(CDF[middleIndex-1] < value)
return middleIndex;
else if(CDF[middleIndex-1] == value)
{
while(CDF[middleIndex] == value && middleIndex >= 0)
middleIndex--;
middleIndex++;
return middleIndex;
}
}
if(CDF[middleIndex] > value)
endIndex = middleIndex-1;
else
beginIndex = middleIndex+1;
}
return -1;
}
/*****************************
* CUDA Kernel Function to replace FindIndex
* param1: arrayX
* param2: arrayY
* param3: CDF
* param4: u
* param5: xj
* param6: yj
* param7: Nparticles
*****************************/
__global__ void kernel(double * arrayX, double * arrayY, double * CDF, double * u, double * xj, double * yj, int Nparticles){
int block_id = blockIdx.x;// + gridDim.x * blockIdx.y;
int i = blockDim.x * block_id + threadIdx.x;
if(i < Nparticles){
int index = -1;
int x;
for(x = 0; x < Nparticles; x++){
if(CDF[x] >= u[i]){
index = x;
break;
}
}
if(index == -1){
index = Nparticles-1;
}
xj[i] = arrayX[index];
yj[i] = arrayY[index];
}
}
/**
* Takes in a double and returns an integer that approximates to that double
* @return if the mantissa < .5 => return value < input value; else return value > input value
*/
double roundDouble(double value){
int newValue = (int)(value);
if(value - newValue < .5)
return newValue;
else
return newValue++;
}
/**
* Set values of the 3D array to a newValue if that value is equal to the testValue
* @param testValue The value to be replaced
* @param newValue The value to replace testValue with
* @param array3D The image vector
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
*/
void setIf(int testValue, int newValue, int * array3D, int * dimX, int * dimY, int * dimZ){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
if(array3D[x * *dimY * *dimZ+y * *dimZ + z] == testValue)
array3D[x * *dimY * *dimZ + y * *dimZ + z] = newValue;
}
}
}
}
/**
* Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG)
* @see http://en.wikipedia.org/wiki/Linear_congruential_generator
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a uniformly distributed number [0, 1)
*/
double randu(int * seed, int index)
{
int num = A*seed[index] + C;
seed[index] = num % M;
return fabs(seed[index]/((double) M));
}
/**
* Generates a normally distributed random number using the Box-Muller transformation
* @note This function is thread-safe
* @param seed The seed array
* @param index The specific index of the seed to be advanced
* @return a double representing random number generated using the Box-Muller algorithm
* @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution
*/
double randn(int * seed, int index){
/*Box-Muller algorithm*/
double u = randu(seed, index);
double v = randu(seed, index);
double cosine = cos(2*PI*v);
double rt = -2*log(u);
return sqrt(rt)*cosine;
}
/**
* Sets values of 3D matrix using randomly generated numbers from a normal distribution
* @param array3D The video to be modified
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param seed The seed array
*/
void addNoise(int * array3D, int * dimX, int * dimY, int * dimZ, int * seed){
int x, y, z;
for(x = 0; x < *dimX; x++){
for(y = 0; y < *dimY; y++){
for(z = 0; z < *dimZ; z++){
array3D[x * *dimY * *dimZ + y * *dimZ + z] = array3D[x * *dimY * *dimZ + y * *dimZ + z] + (int)(5*randn(seed, 0));
}
}
}
}
/**
* Fills a radius x radius matrix representing the disk
* @param disk The pointer to the disk to be made
* @param radius The radius of the disk to be made
*/
void strelDisk(int * disk, int radius)
{
int diameter = radius*2 - 1;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
double distance = sqrt(pow((double)(x-radius+1),2) + pow((double)(y-radius+1),2));
if(distance < radius)
disk[x*diameter + y] = 1;
}
}
}
/**
* Dilates the provided video
* @param matrix The video to be dilated
* @param posX The x location of the pixel to be dilated
* @param posY The y location of the pixel to be dilated
* @param poxZ The z location of the pixel to be dilated
* @param dimX The x dimension of the frame
* @param dimY The y dimension of the frame
* @param dimZ The number of frames
* @param error The error radius
*/
void dilate_matrix(int * matrix, int posX, int posY, int posZ, int dimX, int dimY, int dimZ, int error)
{
int startX = posX - error;
while(startX < 0)
startX++;
int startY = posY - error;
while(startY < 0)
startY++;
int endX = posX + error;
while(endX > dimX)
endX--;
int endY = posY + error;
while(endY > dimY)
endY--;
int x,y;
for(x = startX; x < endX; x++){
for(y = startY; y < endY; y++){
double distance = sqrt( pow((double)(x-posX),2) + pow((double)(y-posY),2) );
if(distance < error)
matrix[x*dimY*dimZ + y*dimZ + posZ] = 1;
}
}
}
/**
* Dilates the target matrix using the radius as a guide
* @param matrix The reference matrix
* @param dimX The x dimension of the video
* @param dimY The y dimension of the video
* @param dimZ The z dimension of the video
* @param error The error radius to be dilated
* @param newMatrix The target matrix
*/
void imdilate_disk(int * matrix, int dimX, int dimY, int dimZ, int error, int * newMatrix)
{
int x, y, z;
for(z = 0; z < dimZ; z++){
for(x = 0; x < dimX; x++){
for(y = 0; y < dimY; y++){
if(matrix[x*dimY*dimZ + y*dimZ + z] == 1){
dilate_matrix(newMatrix, x, y, z, dimX, dimY, dimZ, error);
}
}
}
}
}
/**
* Fills a 2D array describing the offsets of the disk object
* @param se The disk object
* @param numOnes The number of ones in the disk
* @param neighbors The array that will contain the offsets
* @param radius The radius used for dilation
*/
void getneighbors(int * se, int numOnes, double * neighbors, int radius){
int x, y;
int neighY = 0;
int center = radius - 1;
int diameter = radius*2 -1;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(se[x*diameter + y]){
neighbors[neighY*2] = (int)(y - center);
neighbors[neighY*2 + 1] = (int)(x - center);
neighY++;
}
}
}
}
/**
* The synthetic video sequence we will work with here is composed of a
* single moving object, circular in shape (fixed radius)
* The motion here is a linear motion
* the foreground intensity and the backgrounf intensity is known
* the image is corrupted with zero mean Gaussian noise
* @param I The video itself
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames of the video
* @param seed The seed array used for number generation
*/
void videoSequence(int * I, int IszX, int IszY, int Nfr, int * seed){
int k;
int max_size = IszX*IszY*Nfr;
/*get object centers*/
int x0 = (int)roundDouble(IszY/2.0);
int y0 = (int)roundDouble(IszX/2.0);
I[x0 *IszY *Nfr + y0 * Nfr + 0] = 1;
/*move point*/
int xk, yk, pos;
for(k = 1; k < Nfr; k++){
xk = abs(x0 + (k-1));
yk = abs(y0 - 2*(k-1));
pos = yk * IszY * Nfr + xk *Nfr + k;
if(pos >= max_size)
pos = 0;
I[pos] = 1;
}
/*dilate matrix*/
int * newMatrix = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
imdilate_disk(I, IszX, IszY, Nfr, 5, newMatrix);
int x, y;
for(x = 0; x < IszX; x++){
for(y = 0; y < IszY; y++){
for(k = 0; k < Nfr; k++){
I[x*IszY*Nfr + y*Nfr + k] = newMatrix[x*IszY*Nfr + y*Nfr + k];
}
}
}
free(newMatrix);
/*define background, add noise*/
setIf(0, 100, I, &IszX, &IszY, &Nfr);
setIf(1, 228, I, &IszX, &IszY, &Nfr);
/*add noise*/
addNoise(I, &IszX, &IszY, &Nfr, seed);
}
/**
* Determines the likelihood sum based on the formula: SUM( (IK[IND] - 100)^2 - (IK[IND] - 228)^2)/ 100
* @param I The 3D matrix
* @param ind The current ind array
* @param numOnes The length of ind array
* @return A double representing the sum
*/
double calcLikelihoodSum(int * I, int * ind, int numOnes){
double likelihoodSum = 0.0;
int y;
for(y = 0; y < numOnes; y++)
likelihoodSum += (pow((double)(I[ind[y]] - 100),2) - pow((double)(I[ind[y]]-228),2))/50.0;
return likelihoodSum;
}
/**
* Finds the first element in the CDF that is greater than or equal to the provided value and returns that index
* @note This function uses sequential search
* @param CDF The CDF
* @param lengthCDF The length of CDF
* @param value The value to be found
* @return The index of value in the CDF; if value is never found, returns the last index
*/
int findIndex(double * CDF, int lengthCDF, double value){
int index = -1;
int x;
for(x = 0; x < lengthCDF; x++){
if(CDF[x] >= value){
index = x;
break;
}
}
if(index == -1){
return lengthCDF-1;
}
return index;
}
/**
* The implementation of the particle filter using OpenMP for many frames
* @see http://openmp.org/wp/
* @note This function is designed to work with a video of several frames. In addition, it references a provided MATLAB function which takes the video, the objxy matrix and the x and y arrays as arguments and returns the likelihoods
* @param I The video to be run
* @param IszX The x dimension of the video
* @param IszY The y dimension of the video
* @param Nfr The number of frames
* @param seed The seed array used for random number generation
* @param Nparticles The number of particles to be used
*/
void particleFilter(int * I, int IszX, int IszY, int Nfr, int * seed, int Nparticles){
int max_size = IszX*IszY*Nfr;
long long start = get_time();
//original particle centroid
double xe = roundDouble(IszY/2.0);
double ye = roundDouble(IszX/2.0);
//expected object locations, compared to center
int radius = 5;
int diameter = radius*2 - 1;
int * disk = (int *)malloc(diameter*diameter*sizeof(int));
strelDisk(disk, radius);
int countOnes = 0;
int x, y;
for(x = 0; x < diameter; x++){
for(y = 0; y < diameter; y++){
if(disk[x*diameter + y] == 1)
countOnes++;
}
}
double * objxy = (double *)malloc(countOnes*2*sizeof(double));
getneighbors(disk, countOnes, objxy, radius);
long long get_neighbors = get_time();
printf("TIME TO GET NEIGHBORS TOOK: %f\n", elapsed_time(start, get_neighbors));
//initial weights are all equal (1/Nparticles)
double * weights = (double *)malloc(sizeof(double)*Nparticles);
for(x = 0; x < Nparticles; x++){
weights[x] = 1/((double)(Nparticles));
}
long long get_weights = get_time();
printf("TIME TO GET WEIGHTSTOOK: %f\n", elapsed_time(get_neighbors, get_weights));
//initial likelihood to 0.0
double * likelihood = (double *)malloc(sizeof(double)*Nparticles);
double * arrayX = (double *)malloc(sizeof(double)*Nparticles);
double * arrayY = (double *)malloc(sizeof(double)*Nparticles);
double * xj = (double *)malloc(sizeof(double)*Nparticles);
double * yj = (double *)malloc(sizeof(double)*Nparticles);
double * CDF = (double *)malloc(sizeof(double)*Nparticles);
//GPU copies of arrays
double * arrayX_GPU;
double * arrayY_GPU;
double * xj_GPU;
double * yj_GPU;
double * CDF_GPU;
int * ind = (int*)malloc(sizeof(int)*countOnes);
double * u = (double *)malloc(sizeof(double)*Nparticles);
double * u_GPU;
//CUDA memory allocation
check_error(cudaMalloc((void **) &arrayX_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &arrayY_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &xj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &yj_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &CDF_GPU, sizeof(double)*Nparticles));
check_error(cudaMalloc((void **) &u_GPU, sizeof(double)*Nparticles));
for(x = 0; x < Nparticles; x++){
arrayX[x] = xe;
arrayY[x] = ye;
}
int k;
//double * Ik = (double *)malloc(sizeof(double)*IszX*IszY);
int indX, indY;
for(k = 1; k < Nfr; k++){
long long set_arrays = get_time();
//printf("TIME TO SET ARRAYS TOOK: %f\n", elapsed_time(get_weights, set_arrays));
//apply motion model
//draws sample from motion model (random walk). The only prior information
//is that the object moves 2x as fast as in the y direction
for(x = 0; x < Nparticles; x++){
arrayX[x] = arrayX[x] + 1.0 + 5.0*randn(seed, x);
arrayY[x] = arrayY[x] - 2.0 + 2.0*randn(seed, x);
}
//particle filter likelihood
long long error = get_time();
printf("TIME TO SET ERROR TOOK: %f\n", elapsed_time(set_arrays, error));
for(x = 0; x < Nparticles; x++){
//compute the likelihood: remember our assumption is that you know
// foreground and the background image intensity distribution.
// Notice that we consider here a likelihood ratio, instead of
// p(z|x). It is possible in this case. why? a hometask for you.
//calc ind
for(y = 0; y < countOnes; y++){
indX = roundDouble(arrayX[x]) + objxy[y*2 + 1];
indY = roundDouble(arrayY[x]) + objxy[y*2];
ind[y] = fabs(indX*IszY*Nfr + indY*Nfr + k);
if(ind[y] >= max_size)
ind[y] = 0;
}
likelihood[x] = calcLikelihoodSum(I, ind, countOnes);
likelihood[x] = likelihood[x]/countOnes;
}
long long likelihood_time = get_time();
printf("TIME TO GET LIKELIHOODS TOOK: %f\n", elapsed_time(error, likelihood_time));
// update & normalize weights
// using equation (63) of Arulampalam Tutorial
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x] * exp(likelihood[x]);
}
long long exponential = get_time();
printf("TIME TO GET EXP TOOK: %f\n", elapsed_time(likelihood_time, exponential));
double sumWeights = 0;
for(x = 0; x < Nparticles; x++){
sumWeights += weights[x];
}
long long sum_time = get_time();
printf("TIME TO SUM WEIGHTS TOOK: %f\n", elapsed_time(exponential, sum_time));
for(x = 0; x < Nparticles; x++){
weights[x] = weights[x]/sumWeights;
}
long long normalize = get_time();
printf("TIME TO NORMALIZE WEIGHTS TOOK: %f\n", elapsed_time(sum_time, normalize));
xe = 0;
ye = 0;
// estimate the object location by expected values
for(x = 0; x < Nparticles; x++){
xe += arrayX[x] * weights[x];
ye += arrayY[x] * weights[x];
}
long long move_time = get_time();
printf("TIME TO MOVE OBJECT TOOK: %f\n", elapsed_time(normalize, move_time));
printf("XE: %lf\n", xe);
printf("YE: %lf\n", ye);
double distance = sqrt( pow((double)(xe-(int)roundDouble(IszY/2.0)),2) + pow((double)(ye-(int)roundDouble(IszX/2.0)),2) );
printf("%lf\n", distance);
//display(hold off for now)
//pause(hold off for now)
//resampling
CDF[0] = weights[0];
for(x = 1; x < Nparticles; x++){
CDF[x] = weights[x] + CDF[x-1];
}
long long cum_sum = get_time();
printf("TIME TO CALC CUM SUM TOOK: %f\n", elapsed_time(move_time, cum_sum));
double u1 = (1/((double)(Nparticles)))*randu(seed, 0);
for(x = 0; x < Nparticles; x++){
u[x] = u1 + x/((double)(Nparticles));
}
long long u_time = get_time();
printf("TIME TO CALC U TOOK: %f\n", elapsed_time(cum_sum, u_time));
long long start_copy = get_time();
//CUDA memory copying from CPU memory to GPU memory
cudaMemcpy(arrayX_GPU, arrayX, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(arrayY_GPU, arrayY, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(xj_GPU, xj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(yj_GPU, yj, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(CDF_GPU, CDF, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
cudaMemcpy(u_GPU, u, sizeof(double)*Nparticles, cudaMemcpyHostToDevice);
long long end_copy = get_time();
//Set number of threads
int num_blocks = ceil((double) Nparticles/(double) threads_per_block);
//KERNEL FUNCTION CALL
kernel <<< num_blocks, threads_per_block >>> (arrayX_GPU, arrayY_GPU, CDF_GPU, u_GPU, xj_GPU, yj_GPU, Nparticles);
cudaThreadSynchronize();
long long start_copy_back = get_time();
//CUDA memory copying back from GPU to CPU memory
cudaMemcpy(yj, yj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
cudaMemcpy(xj, xj_GPU, sizeof(double)*Nparticles, cudaMemcpyDeviceToHost);
long long end_copy_back = get_time();
printf("SENDING TO GPU TOOK: %lf\n", elapsed_time(start_copy, end_copy));
printf("CUDA EXEC TOOK: %lf\n", elapsed_time(end_copy, start_copy_back));
printf("SENDING BACK FROM GPU TOOK: %lf\n", elapsed_time(start_copy_back, end_copy_back));
long long xyj_time = get_time();
printf("TIME TO CALC NEW ARRAY X AND Y TOOK: %f\n", elapsed_time(u_time, xyj_time));
for(x = 0; x < Nparticles; x++){
//reassign arrayX and arrayY
arrayX[x] = xj[x];
arrayY[x] = yj[x];
weights[x] = 1/((double)(Nparticles));
}
long long reset = get_time();
printf("TIME TO RESET WEIGHTS TOOK: %f\n", elapsed_time(xyj_time, reset));
}
//CUDA freeing of memory
cudaFree(u_GPU);
cudaFree(CDF_GPU);
cudaFree(yj_GPU);
cudaFree(xj_GPU);
cudaFree(arrayY_GPU);
cudaFree(arrayX_GPU);
//free memory
free(disk);
free(objxy);
free(weights);
free(likelihood);
free(arrayX);
free(arrayY);
free(xj);
free(yj);
free(CDF);
free(u);
free(ind);
}
int main(int argc, char * argv[]){
char* usage = "naive.out -x <dimX> -y <dimY> -z <Nfr> -np <Nparticles>";
//check number of arguments
if(argc != 9)
{
printf("%s\n", usage);
return 0;
}
//check args deliminators
if( strcmp( argv[1], "-x" ) || strcmp( argv[3], "-y" ) || strcmp( argv[5], "-z" ) || strcmp( argv[7], "-np" ) ) {
printf( "%s\n",usage );
return 0;
}
int IszX, IszY, Nfr, Nparticles;
//converting a string to a integer
if( sscanf( argv[2], "%d", &IszX ) == EOF ) {
printf("ERROR: dimX input is incorrect");
return 0;
}
if( IszX <= 0 ) {
printf("dimX must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[4], "%d", &IszY ) == EOF ) {
printf("ERROR: dimY input is incorrect");
return 0;
}
if( IszY <= 0 ) {
printf("dimY must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[6], "%d", &Nfr ) == EOF ) {
printf("ERROR: Number of frames input is incorrect");
return 0;
}
if( Nfr <= 0 ) {
printf("number of frames must be > 0\n");
return 0;
}
//converting a string to a integer
if( sscanf( argv[8], "%d", &Nparticles ) == EOF ) {
printf("ERROR: Number of particles input is incorrect");
return 0;
}
if( Nparticles <= 0 ) {
printf("Number of particles must be > 0\n");
return 0;
}
//establish seed
int * seed = (int *)malloc(sizeof(int)*Nparticles);
int i;
for(i = 0; i < Nparticles; i++)
seed[i] = time(0)*i;
//malloc matrix
int * I = (int *)malloc(sizeof(int)*IszX*IszY*Nfr);
long long start = get_time();
//call video sequence
videoSequence(I, IszX, IszY, Nfr, seed);
long long endVideoSequence = get_time();
printf("VIDEO SEQUENCE TOOK %f\n", elapsed_time(start, endVideoSequence));
//call particle filter
particleFilter(I, IszX, IszY, Nfr, seed, Nparticles);
long long endParticleFilter = get_time();
printf("PARTICLE FILTER TOOK %f\n", elapsed_time(endVideoSequence, endParticleFilter));
printf("ENTIRE PROGRAM TOOK %f\n", elapsed_time(start, endParticleFilter));
free(seed);
free(I);
return 0;
}
|
7,360 | // to compile on cooley: nvcc -arch sm_30 -o mandelbrot mandelbrot.cu -lm
// to run on cooley: ./mandelbrot
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define MXITER 1000
#define NPOINTS 2048
// we will use these later to specify a 16x16 thread-block size
#define TX 16
#define TY 16
typedef struct {
double r;
double i;
}d_complex;
// return 1 if c is outside the mandelbrot set
// return 0 if c is inside the mandelbrot set
// TASK 1: annotate this as a device function
__device__ int testpoint(d_complex c){
d_complex z = c;
for(int iter=0; iter<MXITER; iter++){
double temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return 1;
}
}
return 0;
}
// FREEBIE: partial reduction
__device__ void partialReduction(int outside, int *outsideCounts){
__shared__ int s_outside[TX*TY];
int t = threadIdx.x + threadIdx.y*TX;
s_outside[t] = outside;
int alive = TX*TY;
while(alive>1){
__syncthreads();
alive /= 2;
if(t<alive && t+alive<TX*TY)
s_outside[t] += s_outside[t+alive];
}
if(t==0){
int b = blockIdx.x + gridDim.x*blockIdx.y;
outsideCounts[b] = s_outside[0];
}
}
// TASK 2: make this a kernel that processes
// (i,j) \in [blockIdx.x*blockDim.x,(blockIdx.x+1)*blockDim.x)
// x [blockIdx.y*blockDim.y,(blockIdx.y+1)*blockDim.y)
// TASK 2a: annotate this to indicate it is a kernel and change return type to void
__global__ void mandeloutside(int * outsideCounts){
double eps = 1e-5;
d_complex c;
// TASK 2b: replace loop structures with (i,j) defined from blockIdx, blockDim, threadIdx
// for(i=0;i<NPOINTS;i++){
// for(j=0;j<NPOINTS;j++){
int i = threadIdx.x+blockIdx.x*TX;
int j = threadIdx.y+blockIdx.y*TY;
c.r = -2. + 2.5*((double)i)/(double)(NPOINTS)+eps;
c.i = 1.125*((double)j)/(double)(NPOINTS)+eps;
// TASK 2c: replace this with a partial sum reduction of numoutside in thread block
int outside = 0;
if(i<NPOINTS && j<NPOINTS){
outside = testpoint(c);
}
// }
// }
// FREEBIE: reduction of TX*TY values to one value on each thread-block
partialReduction(outside, outsideCounts);
}
int main(int argc, char **argv){
// TASK 3a: define dim3 variables for the grid size and thread-block size
int GX = (NPOINTS+TX-1)/TX;
int GY = (NPOINTS+TY-1)/TY;
dim3 dimGrid(GX,GY,1);
dim3 dimBlock(TX,TY,1);
// TASK 3b: use cudaMalloc to create a DEVICE array that has one entry for each thread-block
int *c_outsideCounts;
cudaMalloc(&c_outsideCounts, GX*GY*sizeof(int));
// FREEBIE: create CUDA events for timing
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
// TASK 3c: replace this with a kernel call
mandeloutside <<< dimGrid, dimBlock >>> (c_outsideCounts);
// FREEBIE: timing
float elapsed;
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
elapsed /= 1000;
printf("elapsed = %g\n", elapsed);
// TASK 3d: allocate a HOST array to receive the contents of the c_outsideCounts array
int *h_outsideCounts = (int*) calloc(GX*GY, sizeof(int));
// TASK 3e: use cudaMemcpy to copy the contents of the entries of c_outsideCounts to h_outsideCounts
cudaMemcpy(h_outsideCounts, c_outsideCounts, GX*GY*sizeof(int), cudaMemcpyDeviceToHost);
// TASK 3f: sum up the outsideCounts
int numoutside = 0;
for(int n=0;n<GX*GY;++n){
numoutside += h_outsideCounts[n];
}
printf("numoustide = %d\n", numoutside);
double area = 2.*2.5*1.125*(NPOINTS*NPOINTS-numoutside)/(NPOINTS*NPOINTS);
printf("area = %17.15lf\n", area);
return 0;
}
|
7,361 |
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//libreria cuda
#include <cuda.h>
// CUDA runtime
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
// Helper functions and utilities to work with CUDA
//#include <helper_functions.h>
//#include <helper_cuda.h>
/*
* somma di vettori in cpu
*/
#define LOG_CUDA "[CUDA] "
void vecAdd(float* h_A, float* h_B, float* h_C, int n)
{
int i;
for(i=0; i<n; i++) h_C[i] = h_A[i] + h_B[i];
}
void cudaAllocaMemoria(void** source, int size)
{
printf("%salloco memoria sulla GPU grandezza = %d \n",LOG_CUDA, size);
cudaError_t err = cudaMalloc(source, size);
if(err != cudaSuccess){
printf("%s%s in %s alla linea %d\n", LOG_CUDA, cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
void cudaCopiaMemoria(float* dst, float* src, int byteSize, enum cudaMemcpyKind dir)
{
char* a;
switch(dir){
case 1 : a = "cudaMemcpyHostToDevice";break;
case 2 : a = "cudaMemcpyDeviceToHost"; break;
}
printf("%sCopio i dati modo = %s grandezza = %d \n", LOG_CUDA, a, byteSize);
cudaError_t err = cudaMemcpy(dst, src, byteSize, dir);
if(err != cudaSuccess){
printf("%s%s in %s alla linea %d\n",LOG_CUDA, cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
//__global__: idica che il metodo seguente è un metodo kernel. E che quindi può essere chiamata da un
// metodo dell'host per generare una griglia di thread sul device. Puo essere chiamata solo
// dal codice dell host
__global__ void vecAddKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
printf("%s thread numero: %d", LOG_CUDA, i);
if(i < n ) C[i] = A[i] + B[i];
}
void cudaVecAdd(float* h_A, float* h_B, float* h_C, int n)
{
int size = n * sizeof(float);
printf("%sgrandezza vettore = %d grandezza byte float=%4.2f\n",LOG_CUDA, n, sizeof(float));
printf("%sdetermino quando memoria mi occore size = n * sizeof(flaot) = %d \n" ,LOG_CUDA, size);
//gli indizirizzi dei vettori nella "devices memory"
float *d_A, *d_B, *d_C;
//args 1: un puntatore generico all'indirizzo del vettore che deve essere allocato
//args 2: la grandezza in byte del vettore da allocare
//->A_d punterea quindi alla "device memory" per il vettore a cui A_d punta
cudaAllocaMemoria((void **)&d_A, size);
cudaAllocaMemoria((void **)&d_B, size);
cudaAllocaMemoria((void **)&d_C, size);
//1.1: copio della momeria
//arg 1: Destinanzione: puntatore alla destinanzione di dove verra copia l'oggetto
//arg 2: Sorgente: puntatore all'oggetto da copiare
//arg 3: Byte: quantitia di dati da copiare, lunghezza vettore in byte
//arg 4: Tipo di Copia: da .. a .., host / devices
cudaCopiaMemoria(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaCopiaMemoria(d_B, h_B, size, cudaMemcpyHostToDevice);
printf("%sLancio dell'esecuzione in parallelo della funzione vecAdd\n", LOG_CUDA);
// <<< : parametri configurazione del kernel
// size: numero di blocchi di thread nella griglia
// 256.0: il numero di threah all'interno di ogni blocco
/*printf("%snumero blocchi= %d\n"
"%snumero thread per blocco 256\n"
"%stread totali = %4.2f\n",
LOG_CUDA, ceil(size/256.0)*256, LOG_CUDA, LOG_CUDA, ceil(size/256.0)*256 );*/
int threadPerBlock = 256;
int blockPerGrid = (n + threadPerBlock -1 ) / threadPerBlock;
//int blockPerGrid = threadPerBlock;
int tot = threadPerBlock * blockPerGrid;
printf("%sblocchi = %d thread = %d tot = %d\n", LOG_CUDA, blockPerGrid, threadPerBlock, tot);
vecAddKernel<<<blockPerGrid, threadPerBlock>>>(d_A, d_B, d_C, n);
cudaThreadSynchronize();
printf("%sCopia C dalla memoria del device e rilascia la memoria del device\n", LOG_CUDA);
//ritorno il dato
cudaCopiaMemoria(h_C, d_C, size, cudaMemcpyDeviceToHost);
//cancello la memoria occupata dal vettore
//arg : il puntatore da liberare
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
#define LOG_MAIN "[MAIN] "
int main(int argc, char **argv)
{
printf("%s--------Start---------------\n",LOG_MAIN);
int n = 5;
float a[5]={1.0, 50.6, 89.4, 2.3, 5.7};
float b[5]={3.0, 30.6, 8.4, 25.3, 25.7};
float c[5];
//vecAdd(a, b, c, n);
//for(int i=0; i<n; i++) printf("index: %d, float: %4.2f \n", i, c[i]);
cudaVecAdd(a, b, c, 5);
for(int i=0; i<n; i++) printf("%sindex: %d, float: %.1f \n",LOG_MAIN,i, c[i]);
printf("%s--------End-----------------\n",LOG_MAIN);
return 0;
};
|
7,362 | #include "includes.h"
__global__ void cu_downSample(const float *src, float* dst, const int y_stride, const int x_stride, const int colssrc, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int colsdst = colssrc / x_stride;
if(colssrc % x_stride > 0) ++colsdst;
while(tid < n){
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = rdst * y_stride;
int csrc = cdst * x_stride;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
} |
7,363 | #include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void put_bucket(int *bucket, int *key, int N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
bucket[id] = 0;
__syncthreads();
extern __shared__ int sharedKey[];
if (id == 0) {
for (int i = 0; i < N; i++) sharedKey[i] = key[i];
}
__syncthreads();
for (int i = 0; i < N; i++) {
if (id == sharedKey[i]) bucket[id]++;
}
}
// Slide Lecture 5 P19
__global__ void scan(int *a, int *b, int N) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for(int j=1; j<N; j<<=1) {
b[i] = a[i];
__syncthreads();
a[i] += b[i-j];
__syncthreads();
}
}
__global__ void bucket_sort(int *bucket, int *bucket_sum, int *key) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = 0; i < bucket[id]; i++) {
key[i + bucket_sum[id] - bucket[id]] = id;
}
}
int main() {
int n = 50;
int range = 5;
int *bucket, *bucket_sum, *bucket_temp, *key;
cudaMallocManaged(&bucket, range * sizeof(int));
cudaMallocManaged(&bucket_sum, range * sizeof(int));
cudaMallocManaged(&bucket_temp, range * sizeof(int));
cudaMallocManaged(&key, n * sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
put_bucket<<<1, range, n>>>(bucket, key, n);
cudaDeviceSynchronize();
cudaMemcpy(bucket_sum, bucket, range * sizeof(int), cudaMemcpyDefault);
scan<<<1, range>>>(bucket_sum, bucket_temp, range);
cudaDeviceSynchronize();
bucket_sort<<<1, range>>>(bucket, bucket_sum, key);
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
cudaFree(bucket);
cudaFree(bucket_sum);
cudaFree(bucket_temp);
cudaFree(key);
}
|
7,364 | //The max possible number of roots
#define DEV_MAX_ROOTS_NUM 3
//Number of threads per block.
#define NT 1024
//The array index in global memory.
__device__ int devArrayIndex;
//The answer array stored roots in global memory
__device__ unsigned long long int devAnswer[DEV_MAX_ROOTS_NUM];
/**
* Device kernel to find (a) cube root(s) for a given integer c with a modular n.
* <P>
* Called with a one-dimensional grid of one-dimensional blocks.
*
* @param c The mod cube (input).
* @param n The module (input).
*
* @author Junan Zhao
* @version 26-Nov-2018
*/
extern "C" __global__ void modCubeRoot( int c, int n)
{
//Determine number of threads and this thread's m (test number).
unsigned long long m = blockIdx.x*NT + threadIdx.x;
unsigned long long size = gridDim.x*NT;
for(; m<n; m+=size) //use loop to cover all range of n if n is a pretty large integer cannot covered by one round
{
unsigned long long temp = m*m;
temp = temp%n;
temp = temp*m;
temp = temp%n;
if(c==(int)temp) //once found a root
{
int oldIndex = atomicAdd(&devArrayIndex,1);
devAnswer[oldIndex] = m;
}
}
} |
7,365 | #include "includes.h"
__global__ void marshalling1(int *input_itemsets, int *tmp, int max_rows, int max_cols)
{
int i, j;
i = blockIdx.y*blockDim.y+threadIdx.y;
j = blockIdx.x*blockDim.x+threadIdx.x;
if( i >= max_rows || j >= max_cols) return;
if( j <= i) {
tmp[i*max_cols+j] = input_itemsets[(i-j)*max_cols+j];
}
else {
tmp[i*max_cols+j] = 0;
}
} |
7,366 | #include <iostream>
#include <math.h>
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
using namespace std;
// global variable definition
// GPU timers using CUDA events
float unified = 0.0f, traditional = 0.0f, unified_initD = 0.0f;
// CUDA kernel to add elements of two arrays
__global__ void add(int N, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
//int stride = blockDim.x * gridDim.x;
//for (int i = index; i < n; i += stride)
if (index <N ) {
y[index] = x[index] + y[index];
//printf("y Value, %f\n", y[index]);
}
}
__global__ void init(int N, float *x, float*y) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
//int stride = blockDim.x * gridDim.x;
//for(int i = index; i < n; i +=stride){
if (index < N) {
x[index] = 1.0f;
y[index] = 2.0f;
}
//}
}
__global__ void print(int N, float *x, float*y){
printf("Hello from gpu!...\n");
}
void unifiedVectorAdd(){
int N = 999999;
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Launch kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//cout<< " Vector Add (Unified memory) : " << unified << " ms, " << unified / 1000 << " secs" <<endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
}
void unifiedInitDevice(){
int N = 999999;
cout << "N: " << N << endl;
float *x, *y;
// define timers
/* cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start timer
cudaEventRecord(start,0);*/
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// Launch kernel to initialize input arrays
// Launch kernel on 1M elements on the GPU
int threads = 1024;
int blocks = (N + threads - 1) / threads;
cout << "Threads: " <<threads << endl;
cout << "blocks: " << blocks << endl;
init<<<threads, blocks>>>(N, x, y);
add<<<threads, blocks>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
//cudaDeviceSynchronize();
//cout<< " Vector Add (Unified memory init in device) : " << unified << " ms, " << unified / 1000 << " secs" <<endl;
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError += fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
}
int main(void)
{
//unifiedVectorAdd();
unifiedInitDevice();
return 0;
}
|
7,367 | #include <cuda.h>
#include <stdio.h>
int main(int argc, char** argv) {
size_t limit = 0;
cudaDeviceGetLimit(&limit, cudaLimitStackSize);
printf("cudaLimitStackSize: %u\n", (unsigned)limit);
cudaDeviceGetLimit(&limit, cudaLimitPrintfFifoSize);
printf("cudaLimitPrintfFifoSize: %u\n", (unsigned)limit);
cudaDeviceGetLimit(&limit, cudaLimitMallocHeapSize);
printf("cudaLimitMallocHeapSize: %u\n", (unsigned)limit);
limit = 9999;
cudaDeviceSetLimit(cudaLimitStackSize, limit);
cudaDeviceSetLimit(cudaLimitPrintfFifoSize, limit);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, limit);
limit = 0;
cudaDeviceGetLimit(&limit, cudaLimitStackSize);
printf("New cudaLimitStackSize: %u\n", (unsigned)limit);
cudaDeviceGetLimit(&limit, cudaLimitPrintfFifoSize);
printf("New cudaLimitPrintfFifoSize: %u\n", (unsigned)limit);
cudaDeviceGetLimit(&limit, cudaLimitMallocHeapSize);
printf("New cudaLimitMallocHeapSize: %u\n", (unsigned)limit);
return 0;
}
|
7,368 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <stdint.h>
#include <time.h>
__global__ void conv(float *tab, int N, int M, int ndim,
float *filter, int s, float *output);
__global__ void conv_bil(float *tab, int N, int M, int ndim,
float *filter, int s, float *output, float r);
void box_filter(float *filter, int size, int ndim);
void gaussian_filter(float *filter, int size, int ndim);
void conv_GPU(float *tab, int ndim, int *dim, float *filter,
int s, float *tab_filtered, int N_threads);
typedef struct {
float *img;
int ndim;
int *dim;
int max_pix;
} Image;
void open_img(Image * image);
void write_img(Image * image, int file_idx);
void box_filtering(Image *image, int s);
void gaussian_filtering(Image *image, int s);
void bilateral_filtering(Image *image, int s, float r);
int main(int argc, char const *argv[]) {
Image image;
Image * image_addr = ℑ
int s = 5;
open_img(image_addr);
box_filtering(image_addr, s);
write_img(image_addr, 0);
open_img(image_addr);
gaussian_filtering(image_addr, s);
write_img(image_addr, 1);
open_img(image_addr);
bilateral_filtering(image_addr, s, 10);
write_img(image_addr, 2);
return 0;
}
void box_filtering(Image *image, int s){
int dim_dot = 1;
float *filter;
for (int d=0; d<image->ndim; d++){
dim_dot*=image->dim[d];
}
if (image->ndim==2)
filter = (float*) malloc((2*s+1)*(2*s+1)*sizeof(float));
if (image->ndim==3)
filter = (float*) malloc((2*s+1)*(2*s+1)*3*sizeof(float));
float * img_filtered = (float*) malloc(dim_dot*sizeof(float));
box_filter(filter, s, image->ndim);
conv_GPU(image->img, image->ndim, image->dim,
filter, s, img_filtered, 16);
printf("\n");
image->img = img_filtered;
}
void gaussian_filtering(Image *image, int s){
int dim_dot = 1;
float *filter;
for (int d=0; d<image->ndim; d++){
dim_dot*=image->dim[d];
}
if (image->ndim==2)
filter = (float*) malloc((2*s+1)*(2*s+1)*sizeof(float));
if (image->ndim==3)
filter = (float*) malloc((2*s+1)*(2*s+1)*3*sizeof(float));
float * img_filtered = (float*) malloc(dim_dot*sizeof(float));
gaussian_filter(filter, s, image->ndim);
conv_GPU(image->img, image->ndim, image->dim,
filter, s, img_filtered, 16);
printf("\n");
image->img = img_filtered;
}
void bilateral_filtering(Image *image, int s, float r){
clock_t start, finish;
double duration;
start = clock();
float * filter;
float * img_filtered;
int N;
int M;
int N_threads;
float size;
if (image->ndim==2){
filter = (float*) malloc((2*s+1)*(2*s+1)*sizeof(float));
img_filtered = (float*) malloc(image->dim[0]*image->dim[1]
*sizeof(float));
N = image->dim[0];
M = image->dim[1];
N_threads = 16;
size = (float) (2*s+1);
for (int i=0; i<2*s+1; i++){
for (int j=0; j<2*s+1; j++){
filter[i*(2*s+1)+j] = exp(-((i-s)*(i-s)+(j-s)*(j-s))/ (2*size*size));
}
}
float *tab_GPU;
float *output_GPU;
float *filter_GPU;
// Allocate vector in device memory
cudaMalloc(&tab_GPU, N * M * sizeof(float));
cudaMalloc(&output_GPU, N * M * sizeof(float));
cudaMalloc(&filter_GPU, (2*s+1) * (2*s+1) * sizeof(float));
// Copy vectors from host memory to device memory
cudaMemcpy(tab_GPU, image->img, N * M * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(output_GPU, img_filtered, N * M * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(filter_GPU, filter, (2*s+1) * (2*s+1) *
sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N_threads, N_threads, 1);
dim3 blocksPerGrid((int) ceil(N / (float)threadsPerBlock.x),
(int) ceil(M / (float)threadsPerBlock.y), 1);
conv_bil<<<blocksPerGrid,threadsPerBlock>>>(tab_GPU, N, M, image->ndim, filter_GPU, s, output_GPU, r);
if ( cudaSuccess != cudaGetLastError() )
printf( "Error!\n" );
cudaMemcpy(img_filtered, output_GPU, N * M * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(tab_GPU);
cudaFree(filter_GPU);
cudaFree(output_GPU);
}
if (image->ndim==3){
filter = (float*) malloc((2*s+1)*(2*s+1)*3*sizeof(float));
img_filtered = (float*) malloc(image->dim[0]*image->dim[1]
*3*sizeof(float));
N = image->dim[0];
M = image->dim[1];
N_threads = 16;
size = (float) (2*s+1);
for (int k=0; k<3; k++){
for (int i=0; i<2*s+1; i++){
for (int j=0; j<2*s+1; j++){
filter[(i*(2*s+1)+j)*3+k] = exp(-((i-s)*(i-s)+
(j-s)*(j-s)+(k-1)*(k-1))/ (2*size*size*3));
}
}
}
float *tab_GPU;
float *output_GPU;
float *filter_GPU;
// Allocate vector in device memory
cudaMalloc(&tab_GPU, N * M * 3 *sizeof(float));
cudaMalloc(&output_GPU, N * M * 3 *sizeof(float));
cudaMalloc(&filter_GPU, (2*s+1) * (2*s+1) * 3 *sizeof(float));
// Copy vectors from host memory to device memory
cudaMemcpy(tab_GPU, image->img, N * M * 3 *sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(output_GPU, img_filtered, N * M * 3 *sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(filter_GPU, filter, (2*s+1) * (2*s+1) * 3 *
sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N_threads, N_threads, 3);
dim3 blocksPerGrid((int) ceil(N / (float)threadsPerBlock.x),
(int) ceil(M / (float)threadsPerBlock.y), 1);
conv_bil<<<blocksPerGrid,threadsPerBlock>>>(tab_GPU, N, M, image->ndim, filter_GPU, s, output_GPU, r);
if ( cudaSuccess != cudaGetLastError() )
printf( "Error!\n" );
cudaMemcpy(img_filtered, output_GPU, N * M * 3 * sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(tab_GPU);
cudaFree(filter_GPU);
cudaFree(output_GPU);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
printf("%f\n",duration);
image->img = img_filtered;
}
// TODO: case "P6"
void open_img(Image *image){
FILE *fp;
int height, width, max_pix;
int c;
fp = fopen("image_256x256.pgm", "r");
fscanf(fp, "P%d\n", &c);
switch (c) {
case 2:
fscanf(fp, "%d %d\n", &height, &width);
fscanf(fp, "%d\n", &max_pix);
image->ndim = 2;
image->img = (float *) malloc(height*width*sizeof(float));
image->dim = (int *) malloc(image->ndim*sizeof(int));
image->dim[0] = height;
image->dim[1] = width;
image->max_pix = max_pix;
for(int i=0; i<height; i++){
for(int j=0; j<width; j++){
fscanf(fp, "%d\n", &c);
*(image->img+i*width+j) = (float) c;
}
}
fclose(fp);
break;
case 6:
fscanf(fp, "%d %d\n", &height, &width);
fscanf(fp, "%d\n", &max_pix);
image->ndim = 3;
image->img = (float *) malloc(height*width*3*sizeof(float));
image->dim = (int *) malloc(image->ndim*sizeof(int));
image->dim[0] = height;
image->dim[1] = width;
image->dim[2] = 3;
image->max_pix = max_pix;
for(int i=0; i<height; i++){
for(int j=0; j<width; j++){
fscanf(fp, "%d\n", &c);
*(image->img+i*width+j) = (float) c;
}
}
fclose(fp);
}
}
// TODO: case "P6"
void write_img(Image *image, int file_idx){
FILE *fp;
int height = image->dim[0];
int width = image->dim[1];
int max_pix = image->max_pix;
switch (file_idx) {
case 0:
fp = fopen ("test_box.pgm", "w+");
break;
case 1:
fp = fopen ("test_gauss.pgm", "w+");
break;
case 2:
fp = fopen ("test_bil.pgm", "w+");
break;
default :
fp = fopen ("test.pgm", "w+");
}
fprintf(fp, "P2\n%d %d\n%d\n", height, width, max_pix);
for (int i=0; i<height; i++){
for (int j=0; j<width; j++)
fprintf(fp, "%d\n", (int) *(image->img+i*width+j));
}
fclose(fp);
}
__global__ void conv(float *tab, int N, int M, int ndim,
float *filter, int s, float *output){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (ndim==2){
if (idx<=N){
if (idy<=M){
output[idx*M+idy] = filter[s]*tab[idx*M+idy];
float sum = filter[s];
for (int i=1; i<s+1; i++){
int x_big = -N*(idx+i>=N);
int x_small = N*(idx-i<0);
float t = (filter[(s+i)*(2*s+1)+s]);
float b = (filter[(s-i)*(2*s+1)+s]);
output[idx*M+idy] += (t*tab[(idx+i+x_big)*M]);
output[idx*M+idy] += (b*tab[(idx-i+x_small)*M]);
for (int j=1; j<s+1; j++){
int y_big = -M*(idy+j>=M);
int y_small = M*(idy-j<0);
float tl = (filter[(s+i)*(2*s+1)+(s+j)]);
float tr = (filter[(s+i)*(2*s+1)+(s-j)]);
float bl = (filter[(s-i)*(2*s+1)+(s+j)]);
float br = (filter[(s-i)*(2*s+1)+(s-j)]);
output[idx*M+idy] += (tl*tab[(idx-i+x_small)*M+idy-j+y_small]);
output[idx*M+idy] += (tr*tab[(idx-i+x_small)*M+idy+j+y_big]);
output[idx*M+idy] += (bl*tab[(idx+i+x_big)*M+idy-j+y_small]);
output[idx*M+idy] += (br*tab[(idx+i+x_big)*M+idy+j+y_big]);
if (i==1) {
float l = (filter[(s)*(2*s+1)+(s+j)]);
float r = (filter[(s)*(2*s+1)+(s-j)]);
output[idx*M+idy] += (l*tab[(idx)*M+idy-j+y_small]);
output[idx*M+idy] += (r*tab[(idx)*M+idy+j+y_big]);
sum += l+r;
}
sum += tl+tr+bl+br;
}
sum += t+b;
}
output[idx*M+idy] /= sum;
}
}
}
if (ndim==3){
int idz = blockIdx.z * blockDim.z + threadIdx.z;
if (idx<N){
if (idy<M){
if (idz<3){
float sum = 0;
output[(idx*M+idy)*3+idz] = 0;
for (int k=0; k<3; k++){
output[(idx*M+idy)*3+idz] += filter[s*3+k+idz+1]*tab[(idx*M+idy)*3+(k+idz)%3];
sum += filter[s*3+k+idz+1];
for (int i=1; i<s+1; i++){
int x_big = -N*(idx+i>=N);
int x_small = N*(idx-i<0);
float t = (filter[((s+i)*(2*s+1)+s)*3+(k+idz+1)%3]);
float b = (filter[((s-i)*(2*s+1)+s)*3+(k+idz+1)%3]);
output[(idx*M+idy)*3+idz] += (t*tab[((idx+i+x_big)*M)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (b*tab[((idx-i+x_small)*M)*3+(k+idz)%3]);
for (int j=1; j<s+1; j++){
int y_big = -M*(idy+j>=M);
int y_small = M*(idy-j<0);
float tl = (filter[((s+i)*(2*s+1)+(s+j))*3+(k+idz+1)%3]);
float tr = (filter[((s+i)*(2*s+1)+(s-j))*3+(k+idz+1)%3]);
float bl = (filter[((s-i)*(2*s+1)+(s+j))*3+(k+idz+1)%3]);
float br = (filter[((s-i)*(2*s+1)+(s-j))*3+(k+idz+1)%3]);
output[(idx*M+idy)*3+idz] += (tl*tab[((idx-i+x_small)*M+idy-j+y_small)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (tr*tab[((idx-i+x_small)*M+idy+j+y_big)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (bl*tab[((idx+i+x_big)*M+idy-j+y_small)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (br*tab[((idx+i+x_big)*M+idy+j+y_big)*3+(k+idz)%3]);
if (i==1) {
float l = (filter[((s)*(2*s+1)+(s+j))*3+(k+idz+1)%3]);
float r = (filter[((s)*(2*s+1)+(s-j))*3+(k+idz+1)%3]);
output[(idx*M+idy)*3+idz] += (l*tab[((idx)*M+idy-j+y_small)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (r*tab[((idx)*M+idy+j+y_big)*3+(k+idz)%3]);
sum += l+r;
}
sum += tl+tr+bl+br;
}
sum += t+b;
}
}
output[(idx*M+idy)*3+idz] /= sum;
}
}
}
}
}
__global__ void conv_bil(float *tab, int N, int M, int ndim,
float *filter, int s, float *output, float r){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (ndim==2){
float var = 2*r*r;
if (idx<=N){
if (idy<=M){
output[idx*M+idy] = filter[s]*tab[idx*M+idy];
float sum = filter[s];
for (int i=1; i<s+1; i++){
int x_big = -N*(idx+i>=N);
int x_small = N*(idx-i<0);
float t = (filter[(s+i)*(2*s+1)+s]
*exp(-(tab[(idx+i+x_big)*M]-tab[idx*M+idy])
*(tab[(idx+i+x_big)*M]-tab[idx*M+idy])/var)
);
float b = (filter[(s-i)*(2*s+1)+s]
*exp(-(tab[(idx-i+x_small)*M]-tab[idx*M+idy])
*(tab[(idx-i+x_small)*M]-tab[idx*M+idy])/var)
);
output[idx*M+idy] += (t*tab[(idx+i+x_big)*M]);
output[idx*M+idy] += (b*tab[(idx-i+x_small)*M]);
for (int j=1; j<s+1; j++){
int y_big = -M*(idy+j>=M);
int y_small = M*(idy-j<0);
float tl = (filter[(s+i)*(2*s+1)+(s+j)]
*exp(-(tab[(idx-i+x_small)*M+idy-j+y_small]-tab[idx*M+idy])
*(tab[(idx-i+x_small)*M+idy-j+y_small]-tab[idx*M+idy])/var)
);
float tr = (filter[(s+i)*(2*s+1)+(s-j)]
*exp(-(tab[(idx-i+x_small)*M+idy+j+y_big]-tab[idx*M+idy])
*(tab[(idx-i+x_small)*M+idy+j+y_big]-tab[idx*M+idy])/var)
);
float bl = (filter[(s-i)*(2*s+1)+(s+j)]
*exp(-(tab[(idx+i+x_big)*M+idy-j+y_small]-tab[idx*M+idy])
*(tab[(idx+i+x_big)*M+idy-j+y_small]-tab[idx*M+idy])/var)
);
float br = (filter[(s-i)*(2*s+1)+(s-j)]
*exp(-(tab[(idx+i+x_big)*M+idy+j+y_big]-tab[idx*M+idy])
*(tab[(idx+i+x_big)*M+idy+j+y_big]-tab[idx*M+idy])/var)
);
output[idx*M+idy] += (tl*tab[(idx-i+x_small)*M+idy-j+y_small]);
output[idx*M+idy] += (tr*tab[(idx-i+x_small)*M+idy+j+y_big]);
output[idx*M+idy] += (bl*tab[(idx+i+x_big)*M+idy-j+y_small]);
output[idx*M+idy] += (br*tab[(idx+i+x_big)*M+idy+j+y_big]);
if (i==1) {
float l = (filter[(s)*(2*s+1)+(s+j)]
*exp(-(tab[(idx)*M+idy-j+y_small]-tab[idx*M+idy])
*(tab[(idx)*M+idy-j+y_small]-tab[idx*M+idy])/var)
);
float r = (filter[(s)*(2*s+1)+(s-j)]
*exp(-(tab[(idx)*M+idy+j+y_big]-tab[idx*M+idy])
*(tab[(idx)*M+idy+j+y_big]-tab[idx*M+idy])/var)
);
output[idx*M+idy] += (l*tab[(idx)*M+idy-j+y_small]);
output[idx*M+idy] += (r*tab[(idx)*M+idy+j+y_big]);
sum += l+r;
}
sum += tl+tr+bl+br;
}
sum += t+b;
}
output[idx*M+idy] /= sum;
}
}
}
if (ndim==3){
int idz = blockIdx.z * blockDim.z + threadIdx.z;
float var = 2*r*r*3;
if (idx<=N){
if (idy<=M){
if (idz<3){
output[(idx*M+idy)*3+idz] = 0;
float sum = 0;
for (int k=0; k<3; k++){
output[(idx*M+idy)*3+idz] += filter[s*3+k+idz+1]*tab[(idx*M+idy)*3+(k+idz)%3];
sum += filter[s];
for (int i=1; i<s+1; i++){
int x_big = -N*(idx+i>=N);
int x_small = N*(idx-i<0);
float t = (filter[((s+i)*(2*s+1)+s)*3+k+idz+1]
*exp(-(tab[((idx+i+x_big)*M)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])
*(tab[((idx+i+x_big)*M)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])/var)
);
float b = (filter[((s-i)*(2*s+1)+s)*3+k+idz+1]
*exp(-(tab[((idx-i+x_small)*M)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])
*(tab[((idx-i+x_small)*M)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])/var)
);
output[(idx*M+idy)*3+idz] += (t*tab[((idx+i+x_big)*M)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (b*tab[((idx-i+x_small)*M)*3+(k+idz)%3]);
for (int j=1; j<s+1; j++){
int y_big = -M*(idy+j>=M);
int y_small = M*(idy-j<0);
float tl = (filter[((s+i)*(2*s+1)+(s+j))*3+k+idz+1]
*exp(-(tab[((idx-i+x_small)*M+idy-j+y_small)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])
*(tab[((idx-i+x_small)*M+idy-j+y_small)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])/var)
);
float tr = (filter[((s+i)*(2*s+1)+(s-j))*3+k+idz+1]
*exp(-(tab[((idx-i+x_small)*M+idy+j+y_big)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])
*(tab[((idx-i+x_small)*M+idy+j+y_big)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])/var)
);
float bl = (filter[((s-i)*(2*s+1)+(s+j))*3+k+idz+1]
*exp(-(tab[((idx+i+x_big)*M+idy-j+y_small)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])
*(tab[((idx+i+x_big)*M+idy-j+y_small)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])/var)
);
float br = (filter[((s-i)*(2*s+1)+(s-j))*3+k+idz+1]
*exp(-(tab[((idx+i+x_big)*M+idy+j+y_big)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])
*(tab[((idx+i+x_big)*M+idy+j+y_big)*3+(k+idz)%3]-tab[(idx*M+idy)]*3+(k+idz)%3)/var)
);
output[(idx*M+idy)*3+idz] += (tl*tab[((idx-i+x_small)*M+idy-j+y_small)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (tr*tab[((idx-i+x_small)*M+idy+j+y_big)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (bl*tab[((idx+i+x_big)*M+idy-j+y_small)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (br*tab[((idx+i+x_big)*M+idy+j+y_big)*3+(k+idz)%3]);
if (i==1) {
float l = (filter[((s)*(2*s+1)+(s+j))*3+k+idz+1]
*exp(-(tab[((idx)*M+idy-j+y_small)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])
*(tab[((idx)*M+idy-j+y_small)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])/var)
);
float r = (filter[((s)*(2*s+1)+(s-j))*3+k+idz+1]
*exp(-(tab[((idx)*M+idy+j+y_big)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])
*(tab[((idx)*M+idy+j+y_big)*3+(k+idz)%3]-tab[(idx*M+idy)*3+(k+idz)%3])/var)
);
output[(idx*M+idy)*3+idz] += (l*tab[((idx)*M+idy-j+y_small)*3+(k+idz)%3]);
output[(idx*M+idy)*3+idz] += (r*tab[((idx)*M+idy+j+y_big)*3+(k+idz)%3]);
sum += l+r;
}
sum += tl+tr+bl+br;
}
sum += t+b;
}
}
output[(idx*M+idy)*3+idz] /= sum;
}
}
}
}
}
void box_filter(float *filter, int size, int ndim) {
if (ndim==2){
for (int i=0; i<2*size+1; i++){
for (int j=0; j<2*size+1; j++){
filter[i*(2*size+1)+j] = 1;
}
}
}
if (ndim==3){
for (int i=0; i<2*size+1; i++){
for (int j=0; j<2*size+1; j++){
for (int k=0; k<3; k++){
filter[(i*(2*size+1)+j)*3+k] = 1;
}
}
}
}
}
void gaussian_filter(float *filter, int size, int ndim) {
float s = (float) (2*size+1);
if (ndim==2){
for (int i=0; i<2*size+1; i++){
for (int j=0; j<2*size+1; j++){
filter[i*(2*size+1)+j] = exp(-((i-size)*(i-size)+(j-size)*(j-size))/ (2*s*s));
}
}
}
if (ndim==3){
for (int i=0; i<2*size+1; i++){
for (int j=0; j<2*size+1; j++){
for (int k=0; k<3; k++){
float norm = (i-size)*(i-size)+(j-size)*(j-size)+(k-1)*(k-1);
filter[(i*(2*size+1)+j)*3+k] = exp(-norm/(2*s*s*3));
}
}
}
}
}
void conv_GPU(float *tab, int ndim, int *dim, float *filter,
int s, float *tab_filtered, int N_threads) {
clock_t start, finish;
double duration;
start = clock();
int dim_dot = 1;
for (int i=0; i<ndim; i++){
dim_dot*=dim[i];
}
if (ndim==2){
float *tab_GPU;
float *output_GPU;
float *filter_GPU;
// Allocate vector in device memory
cudaMalloc(&tab_GPU, dim_dot*sizeof(float));
cudaMalloc(&output_GPU, dim_dot*sizeof(float));
cudaMalloc(&filter_GPU, (2*s+1) * (2*s+1) * sizeof(float));
// Copy vectors from host memory to device memory
cudaMemcpy(tab_GPU, tab, dim_dot*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(output_GPU, tab, dim_dot*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(filter_GPU, filter, (2*s+1) * (2*s+1) *
sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N_threads, N_threads, 1);
dim3 blocksPerGrid((int) ceil(dim[0] / (float)threadsPerBlock.x),
(int) ceil(dim[1] / (float)threadsPerBlock.y), 1);
conv<<<blocksPerGrid,threadsPerBlock>>>(tab_GPU, dim[0], dim[1], ndim, filter_GPU, s, output_GPU);
cudaMemcpy(tab_filtered, output_GPU, dim_dot*sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(tab_GPU);
cudaFree(filter_GPU);
cudaFree(output_GPU);
}
if (ndim==3){
float *tab_GPU;
float *output_GPU;
float *filter_GPU;
// Allocate vector in device memory
cudaMalloc(&tab_GPU, dim_dot*sizeof(float));
cudaMalloc(&output_GPU, dim_dot*sizeof(float));
cudaMalloc(&filter_GPU, (2*s+1) * (2*s+1) * 3 * sizeof(float));
// Copy vectors from host memory to device memory
cudaMemcpy(tab_GPU, tab, dim_dot*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(output_GPU, tab, dim_dot*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(filter_GPU, filter, (2*s+1) * (2*s+1) * 3 *
sizeof(float), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(N_threads, N_threads, 3);
dim3 blocksPerGrid((int) ceil(dim[0] / (float)threadsPerBlock.x),
(int) ceil(dim[1] / (float)threadsPerBlock.y), 1);
conv<<<blocksPerGrid,threadsPerBlock>>>(tab_GPU, dim[0], dim[1], ndim, filter_GPU, s, output_GPU);
cudaMemcpy(tab_filtered, output_GPU, dim_dot*sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(tab_GPU);
cudaFree(filter_GPU);
cudaFree(output_GPU);
}
finish = clock();
duration = (double)(finish - start) / CLOCKS_PER_SEC;
printf("%f",duration);
}
|
7,369 | __global__
void add(double* out,const double* a,const double* b){
int idx = blockIdx.x*blockDim.x + threadIdx.x;
out[idx]=a[idx]+b[idx];
} |
7,370 | #include <stdio.h>
#define NUM_ELE 5
struct ListElem{
int id;
bool last;
ListElem *next;
};
__global__ void test_kernel(ListElem *list){
int count = 0;
while (!(list->last)){
printf("List element %d has id %d\n", count++, list->id);
list = list->next;}
printf("List element %d is the last item in the list\n", count);
}
int main(){
ListElem *h_list, *my_list;
cudaHostAlloc(&h_list, sizeof(ListElem), cudaHostAllocDefault);
my_list = h_list;
for (int i = 0; i < NUM_ELE-1; i++){
my_list->id = i+101;
my_list->last = false;
cudaHostAlloc(&(my_list->next), sizeof(ListElem), cudaHostAllocDefault);
my_list = my_list->next;}
my_list->last = true;
test_kernel<<<1,1>>>(h_list);
cudaDeviceSynchronize();
}
|
7,371 | #include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1 << 22
// define Array of Struct (AoS)
struct innerStruct{
float x;
float y;
};
// define Struct of Array (SoA)
struct innerArray{
float x[LEN];
float y[LEN];
};
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
void initialInnerStruct(innerStruct *ip, int size){
// initialize a array of struct (AoS)
for (int i=0;i<size;i++){
ip[i].x = (float)(rand() & 0xFF)/100.0f;
ip[i].y = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void checkInnerStruct(innerStruct *hostRef, innerStruct *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i = 0; i < N; i++){
if (abs(hostRef[i].x - gpuRef[i].x)>epsilon){
match = 0;
printf("different on %dth element: host %f gpu %f\n",i,
hostRef[i].x,gpuRef[i].x);
break;
}
if (abs(hostRef[i].y - gpuRef[i].y)>epsilon){
match = 0;
printf("different on %dth element: host %f gpu %f\n",i,
hostRef[i].y,gpuRef[i].y);
break;
}
}
if (!match) printf("Arrays do not match! \n\n");
}
void testInnerStructHost(innerStruct *A,innerStruct *C,const int n){
for (int idx = 0;idx < n; idx++){
C[idx].x = A[idx].x + 10.f;
C[idx].y = A[idx].y + 20.f;
}
return;
}
__global__ void testInnerStruct(innerStruct *data, innerStruct *result, const int n){
// test the array of struct (AoS)
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
__global__ void warmup(innerStruct *data, innerStruct *result, const int n){
// warmup kernel function
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
innerStruct tmp = data[i];
tmp.x += 10.f;
tmp.y += 20.f;
result[i] = tmp;
}
}
int main(int argc,char **argv){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n",dev,deviceProp.name);
cudaSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = nElem * sizeof(innerStruct);
innerStruct *h_A = (innerStruct *)malloc(nBytes);
innerStruct *hostRef = (innerStruct *)malloc(nBytes);
innerStruct *gpuRef = (innerStruct *)malloc(nBytes);
// initialize host array
initialInnerStruct(h_A,nElem);
testInnerStructHost(h_A,hostRef,nElem);
// allocate device memory
innerStruct *d_A,*d_C;
cudaMalloc((innerStruct**)&d_A,nBytes);
cudaMalloc((innerStruct**)&d_C,nBytes);
// copy data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
int blocksize = 128;
if (argc > 1) blocksize = atoi(argv[1]);
// execution config
dim3 block(blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1: warmup
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup kernel <<< %3d, %3d >>> elapsed %f sec\n",grid.x,block.x,iElaps);
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkInnerStruct(hostRef,gpuRef,nElem);
cudaGetLastError();
// kernel 2: testInnerStruct
iStart = seconds();
testInnerStruct<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerstruct <<< %3d, %3d >>> elapsed %f sec\n",grid.x,block.x,iElaps);
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkInnerStruct(hostRef,gpuRef,nElem);
cudaGetLastError();
// free memories
cudaFree(d_A);
cudaFree(d_C);
free(h_A);
free(hostRef);
free(gpuRef);
// reset devices
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
7,372 | #include <stdio.h>
#include <time.h>
int main(void) {
time_t t;
struct tm* ptm;
int hour, minute, second;
time( &t );
ptm = localtime(&t);
hour = ptm->tm_hour;
minute = ptm->tm_min;
second = ptm->tm_sec;
printf("%02d:%02d:%02d\n", hour, minute, second);
}
|
7,373 | #include "assignmentHPC2.cuh"
#include <iostream>
#include <cstdlib>
#include <chrono>
using namespace std;
using namespace std::chrono;
#define VEC_SIZE 1024*1024*256
__global__ void vec_add_kernel(float *a, float *b, float *c) {
unsigned int index = blockIdx.x + threadIdx.x;
if(index < VEC_SIZE) {
c[index] = a[index] + b[index];
}
}
void vec_add_cpu(float *a, float *b, float *c) {
for(unsigned int i = 0; i < VEC_SIZE; i++) {
c[i] = a[i] + b[i];
}
}
void vec_add() {
// declare variables
float *a_host, *b_host, *c_host;
float *a_device, *b_device, *c_device;
// allocate memory to host variables
a_host = (float *)malloc(VEC_SIZE * sizeof(float));
b_host = (float *)malloc(VEC_SIZE * sizeof(float));
c_host = (float *)malloc(VEC_SIZE * sizeof(float));
// initialize host variables
for(int i = 0; i < VEC_SIZE; i++) {
a_host[i] = 1.0f ;//1024*1024 * float(rand())/RAND_MAX;
b_host[i] = 1.0f ;//1024*1024 * float(rand())/RAND_MAX;
}
cout<<"INPUT SIZE "<<endl;
cout<<"Vector A : "<<VEC_SIZE<<" * "<<1<<endl;
// ----------------------------------------- CPU Code -------------------------------------------------
// call vec_add_cpu function
auto startCPU = high_resolution_clock::now();
vec_add_cpu(a_host, b_host, c_host);
auto stopCPU = high_resolution_clock::now();
// Display Results
cout<<"\n\n--------------- CPU ---------------\n"<<endl;
cout<<"Answer CPU : \n"<<endl;
for(int i = 0; i < 5; i++) {
cout<<a_host[i]<<" + "<<b_host[i]<<" = "<<c_host[i]<<endl;
}
cout<<"\nTime on CPU : "<<duration_cast<microseconds>(stopCPU - startCPU).count()/1000<<" milli seconds\n\n"<<endl;
free(c_host);
// ----------------------------------------- GPU Code -------------------------------------------------
// allocate memory to device vairables
cudaMalloc(&a_device, VEC_SIZE * sizeof(float));
cudaMalloc(&b_device, VEC_SIZE * sizeof(float));
cudaMalloc(&c_device, VEC_SIZE * sizeof(float));
// copy data from host to device
cudaMemcpy(a_device, a_host, VEC_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_device, b_host, VEC_SIZE * sizeof(float), cudaMemcpyHostToDevice);
// set up timing variables
float gpu_elapsed_time;
cudaEvent_t gpu_start, gpu_stop;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_stop);
// call kernel
cudaEventRecord(gpu_start, 0);
// call Kernel
dim3 blockSize(1024);
dim3 gridSize(1024);
auto startGPU = high_resolution_clock::now();
vec_add_kernel<<<gridSize, blockSize>>>(a_device, b_device, c_device);
cudaEventRecord(gpu_stop, 0);
cudaEventSynchronize(gpu_stop);
cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
cudaEventDestroy(gpu_start);
cudaEventDestroy(gpu_stop);
// copy results from device to host
c_host = (float *)malloc(VEC_SIZE * sizeof(float));
cudaMemcpy(c_host, c_device, VEC_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
// Display Results
cout<<"--------------- GPU ---------------\n"<<endl;
cout<<"Answer GPU : \n"<<endl;
for(int i = 0; i < 5; i++) {
cout<<a_host[i]<<" + "<<b_host[i]<<" = "<<c_host[i]<<endl;
}
cout<<"\nTime on GPU : "<<gpu_elapsed_time<<" milli seconds\n\n"<<endl;
// Free allocated Memory
free(a_host);
free(b_host);
free(c_host);
cudaFree(a_device);
cudaFree(b_device);
cudaFree(c_device);
}
|
7,374 | #include "includes.h"
__device__ __forceinline__ float relu(float a) {
return a < 0 ? 0 : a;
}
__global__ void relu_kernel(float *vec, int len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < len) {
vec[index] = relu(vec[index]);
}
} |
7,375 | #include "includes.h"
__global__ void vec_add_kernel(float *c, float *a, float *b, int n) {
int i = 0; // Oops! Something is not right here, please fix it!
if (i < n) {
c[i] = a[i] + b[i];
}
} |
7,376 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <getopt.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
/* Original permuation code due to D. Jimenez, UT Austin
* http://faculty.cse.tamu.edu/djimenez/ut/utsa/cs3343/
*/
/* Requires C99 compiler (gcc: -std=c99) */
#define ONE_BILLION (double)1000000000.0
#define debug_printf(fmt, ...) do { if (DEBUG) fprintf(stderr, fmt, __VA_ARGS__); } while (0)
/* Action function for each permuation. */
typedef void (*perm_action_t)(int *v, int n);
/* Reference an element in the TSP distance array. */
#define TSP_ELT(tsp, n, i, j) *(tsp + (i * n) + j)
__host__ double
now(void)
{
struct timespec current_time;
clock_gettime(CLOCK_REALTIME, ¤t_time);
return current_time.tv_sec + (current_time.tv_nsec / ONE_BILLION);
}
/* Swap array elements. */
__device__ __host__ void
swap(int *v, int i, int j)
{
int t = v[i];
v[i] = v[j];
v[j] = t;
}
/* Trivial action to pass to permutations--print out each one. */
__device__ void
print_perm(int *perm, int n, char *msge)
{
for (int j = 0; j < n; j++) {
printf("%2d ", perm[j]);
}
printf(" - %s\n", msge);
}
__host__ void
smallest_in_list(int *list, int *num_short, int n, int *shortest_length, int *num_as_short)
{
int min_path = INT_MAX;
int num = 0;
for (int j = 0; j < n; j++) {
int tmp = list[j];
if(tmp < min_path){
min_path = tmp;
num = 0;
}
if(tmp == min_path){
num += num_short[j];
}
}
*shortest_length = min_path;
*num_as_short = num;
}
/* Create an instance of a symmetric TSP. */
__host__ int *
create_tsp(int n, int random_seed)
{
int *tsp = (int *) malloc(n * n * sizeof(int));
srandom(random_seed);
for (int i = 0; i < n; i++) {
for (int j = 0; j <= i; j++) {
int val = (int)(random() / (RAND_MAX / 100));
TSP_ELT(tsp, n, i, j) = val;
TSP_ELT(tsp, n, j, i) = val;
}
}
return tsp;
}
/* Print a TSP distance matrix. */
__host__ __device__ void
print_tsp(int *tsp, int n)
{
// printf("TSP (%d cities - seed %d)\n ", n, random_seed);
for (int j = 0; j < n; j++) {
printf("%3d|", j);
}
printf("\n");
for (int i = 0; i < n; i++) {
printf("%2d|", i);
for (int j = 0; j < n; j++) {
printf("%4d", TSP_ELT(tsp, n, i, j));
}
printf("\n");
}
printf("\n");
}
/* Evaluate a single instance of the TSP. */
__device__ int
eval_tsp(int *perm, int n, int* distances)
{
/* Calculate the length of the tour for the current permutation. */
int total = 0;
for (int i = 0; i < n; i++) {
int j = (i + 1) % n;
int from = perm[i];
int to = perm[j];
int val = TSP_ELT(distances, n, from, to);
total += val;
}
return total;
}
/**** List ADT ****************/
typedef struct {
int *values; /* Values stored in list */
int max_size; /* Maximum size allocated */
int cur_size; /* Size currently in use */
} list_t;
/* Dump list, including sizes */
__device__ void
list_dump(list_t *list)
{
printf("%2d/%2d", list->cur_size, list->max_size);
for (int i = 0; i < list->cur_size; i++) {
printf(" %d", list->values[i]);
}
printf("\n");
}
/* Allocate list that can store up to 'max_size' elements */
__device__ list_t *
list_alloc(int max_size)
{
list_t *list = (list_t *)malloc(sizeof(list_t));
list->values = (int *)malloc(max_size * sizeof(int));
list->max_size = max_size;
list->cur_size = 0;
return list;
}
/* Free a list; call this to avoid leaking memory! */
__device__ void
list_free(list_t *list)
{
free(list->values);
free(list);
}
/* Add a value to the end of the list */
__device__ void
list_add(list_t *list, int value)
{
if (list->cur_size >= list->max_size) {
printf("List full");
list_dump(list);
// exit(1);
}
list->values[list->cur_size++] = value;
}
/* Return the current size of the list */
__device__ int
list_size(list_t *list)
{
return list->cur_size;
}
/* Validate index */
__device__ void
_list_check_index(list_t *list, int index)
{
if (index < 0 || index > list->cur_size - 1) {
printf("Invalid index %d\n", index);
list_dump(list);
// exit(1);
}
}
/* Get the value at given index */
__device__ int
list_get(list_t *list, int index)
{
_list_check_index(list, index);
return list->values[index];
}
/* Remove the value at the given index */
__device__ void
list_remove_at(list_t *list, int index)
{
_list_check_index(list, index);
for (int i = index; i < list->cur_size - 1; i++) {
list->values[i] = list->values[i + 1];
}
list->cur_size--;
}
/* Retrieve a copy of the values as a simple array of integers. The returned
array is allocated dynamically; the caller must free the space when no
longer needed.
*/
__device__ int *
list_as_array(list_t *list)
{
int *rtn = (int *)malloc(list->max_size * sizeof(int));
for (int i = 0; i < list->max_size; i++) {
rtn[i] = list_get(list, i);
}
return rtn;
}
/**** Permutation ****************/
/* Permutation algorithms based on code found at:
http://www.mathblog.dk/project-euler-24-millionth-lexicographic-permutation/
which references:
http://www.cut-the-knot.org/do_you_know/AllPerm.shtml
*/
/* Calculate n! iteratively */
__device__ __host__ long
factorial(int n)
{
if (n < 1) {
return 0;
}
long rtn = 1;
for (int i = 1; i <= n; i++) {
rtn *= i;
}
return rtn;
}
/* Return the kth lexographically ordered permuation of an array of k integers
in the range [0 .. size - 1]. The integers are allocated dynamically and
should be free'd by the caller when no longer needed.
*/
__device__ int *
kth_perm(int k, int size)
{
long remain = k - 1;
list_t *numbers = list_alloc(size);
for (int i = 0; i < size; i++) {
list_add(numbers, i);
}
list_t *perm = list_alloc(size);
for (int i = 1; i < size; i++) {
long f = factorial(size - i);
long j = remain / f;
remain = remain % f;
list_add(perm, list_get(numbers, j));
list_remove_at(numbers, j);
if (remain == 0) {
break;
}
}
/* Append remaining digits */
for (int i = 0; i < list_size(numbers); i++) {
list_add(perm, list_get(numbers, i));
}
int *rtn = list_as_array(perm);
list_free(perm);
return rtn;
}
/* Given an array of size elements at perm, update the array in place to
contain the lexographically next permutation. It is originally due to
Dijkstra. The present version is discussed at:
http://www.cut-the-knot.org/do_you_know/AllPerm.shtml
*/
__device__ void
next_perm(int *perm, int size)
{
int i = size - 1;
while (perm[i - 1] >= perm[i]) {
i = i - 1;
}
int j = size;
while (perm[j - 1] <= perm[i - 1]) {
j = j - 1;
}
swap(perm, i - 1, j - 1);
i++;
j = size;
while (i < j) {
swap(perm, i - 1, j - 1);
i++;
j--;
}
}
__global__
void tsp_go(int* perm, int num_cities, int num_threads, int* cperm, int* output, int* num_as_short){
long one_index = threadIdx.x + 1;
long cur_idx = (factorial(num_cities)/num_threads) * (threadIdx.x)+1;
long end_idx = (factorial(num_cities)/num_threads) * (one_index);
int min_path = INT_MAX;
int num = 0;
__syncthreads();
perm = kth_perm(cur_idx, num_cities);
while( cur_idx <= end_idx){
// printf("Hello from %d, end_idx: %ld, cur_idx: %ld, perms: %ld\n", threadIdx.x, end_idx, cur_idx, factorial(num_cities));
int tmp = eval_tsp(perm, num_cities, cperm);
// printf("Hello from %d, cost: %d\n", threadIdx.x, tmp);
if(tmp < min_path){
min_path = tmp;
num = 0;
}
if(tmp == min_path){
num++;
}
cur_idx++;
// MAKING SURE NOT OUT OF RANGE
if( cur_idx <= end_idx){
next_perm(perm, num_cities);
}
// __syncthreads();
}
__syncthreads();
output[threadIdx.x] = min_path;
num_as_short[threadIdx.x] = num;
}
void
usage(char *prog_name)
{
fprintf(stderr, "usage: %s [flags]\n", prog_name);
fprintf(stderr, " -h\n");
fprintf(stderr, " -c <number of cities>\n");
fprintf(stderr, " -s <random seed>\n");
exit(1);
}
int
main(int argc, char **argv)
{
int num_cities = 5;
int shortest_length;
int num_as_short = -1;
long num_trials = 0;
int num_threads = 1;
int random_seed = 42;
/* Use "random" random seed by default. */
random_seed = time(NULL);
int ch;
while ((ch = getopt(argc, argv, "c:hn:s:")) != -1) {
switch (ch) {
case 'c':
num_cities = atoi(optarg);
break;
case 'n':
num_threads = atoi(optarg);
if(num_threads < 1){
usage(argv[0]);
}
break;
case 's':
random_seed = atoi(optarg);
break;
case 'h':
default:
usage(argv[0]);
}
}
num_trials = factorial(num_cities);
if(num_trials < num_threads){
num_threads = num_trials;
}
double start_time = now();
// cost array
int* h_cperm = create_tsp(num_cities, random_seed);
// print_tsp(h_cperm, num_cities);
int* d_cperm;
// output Array
int h_output[num_threads];
int* d_output;
// perm array
int *d_perm;
// num_as_short array
int h_num_short[num_threads];
int *d_num_short;
cudaMalloc((void **)&d_perm, sizeof(int)*num_cities);
cudaMalloc((void **)&d_cperm, sizeof(int)*num_cities*num_cities);
cudaMalloc((void **)&d_output, sizeof(int)*num_threads);
cudaMalloc((void **)&d_num_short, sizeof(int)*num_threads);
cudaMemcpy(d_cperm, h_cperm, sizeof(int)*num_cities*num_cities, cudaMemcpyHostToDevice);
/* "Travel, salesman!" */
tsp_go<<<1, num_threads>>>(d_perm, num_cities, num_threads, d_cperm, d_output, d_num_short);
cudaDeviceSynchronize();
// collect results
cudaMemcpy(h_output, d_output, num_threads * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_num_short, d_num_short, num_threads * sizeof(int), cudaMemcpyDeviceToHost);
smallest_in_list(h_output, h_num_short, num_threads, &shortest_length, &num_as_short);
double time_took = now() - start_time;
FILE *f = fopen("time.txt", "a");
fprintf(f, "%d Threads took %5.3f to compute %d cities with random_seed: %d. shortest_length= %d\n\n", num_threads, time_took, num_cities, random_seed, shortest_length);
fclose(f);
/* Report. */
printf("\n");
printf("Trials %ld\n", num_trials);
float percent_as_short = (float)num_as_short / (float)num_trials * 100.0;
printf("Shortest %d - %d tours - %.6f%%\n",
shortest_length, num_as_short, percent_as_short);
printf("\n");
free(h_cperm);
// free(h_output);
cudaFree(d_perm);
cudaFree(d_cperm);
cudaFree(d_output);
}
|
7,377 | /* File: cuda_wtime.cu */
/* Description: a timer that reports the current wall time */
/* */
/* Author: K Sujith Bhatt */
/* Chirag K */
/* National Institute of Technology Karnataka */
/* ------------------------------------------------------------------------- */
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
double wtime(void)
{
double now_time;
struct timeval etstart;
struct timezone tzp;
if (gettimeofday(&etstart, &tzp) == -1)
perror("Error: calling gettimeofday() not successful.\n");
now_time = ((double)etstart.tv_sec) + /* in seconds */
((double)etstart.tv_usec) / 1000000.0; /* in microseconds */
return now_time;
}
#ifdef _TESTING_
int main(int argc, char **argv) {
double time;
time = wtime();
printf("time of day = %10.4f\n", time);
return 0;
}
#endif
|
7,378 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <getopt.h>
#include <time.h>
#include <unistd.h>
#include <string.h>
//global variable for dimensions of matrices
unsigned long long dimension;
//--Function to randomly seed values in the matrices
//provided by Shahadat
double r8_uniform_01 ( int *seed ){
int k;
double r;
k = *seed / 127773;
*seed = 16807 * ( *seed - k * 127773 ) - k * 2836;
if ( *seed < 0 )
{
*seed = *seed + 2147483647;
}
r = ( double ) ( *seed ) * 4.656612875E-10;
return r;
}
//We will use this define statement to specify what the size of the thread blocks should be
//optimum has been determined to be 16
#define BLOCK_SIZE 16
__global__ void global_memory_kernel(float* a, float* b, float* c, unsigned long long dimension) {
// Each thread computes one element of c
// by accumulating results into accumulator
float accumulator = 0.0;
//iterate through the row s and columns of the thread block
unsigned long long row = blockIdx.y * blockDim.y + threadIdx.y;
unsigned long long col = blockIdx.x * blockDim.x + threadIdx.x;
//if we go out of bounds, stop
if(row > dimension || col > dimension) return;
//since each thread is computing one element of c, grab the appropriate
//element from a's row, and b's column, and multply them and hold them in the accumulator
//then set the value in c
for (unsigned long long e = 0; e < dimension; ++e)
accumulator += (a[row * dimension + e]) * (b[e * dimension + col]);
c[row * dimension + col] = accumulator;
}
//function doing matrix multiplication using only global memory
double* global_memory( float* a, float* b, float* c, unsigned long long run_number) {
double dt_and_rate[2];
unsigned long long l = dimension;
unsigned long long m = dimension;
unsigned long long n = dimension;
// Load A and B to device memory
float* cuda_A;
unsigned long long size = l*l*sizeof(float);
cudaMalloc(&cuda_A, size);
cudaMemcpy(cuda_A, a, size, cudaMemcpyHostToDevice);
float* cuda_B;
size = l* l * sizeof(float);
cudaMalloc(&cuda_B, size);
cudaMemcpy(cuda_B, b, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
float* cuda_C;
size = l * l * sizeof(float);
cudaMalloc(&cuda_C, size);
//set up the block and grid dimensions
//using BLOCK_SIZEXBLOCK_SIZE for the blocks
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
//setting up the dimensions of the grid
//using this method so that we find the numbers such that the correct number of blocks
//reside within the grid
//e.g. for a 1024* 1024 matrix:
//BLOCK_SIZE = 16
//l = 1024
//the formula is thus:
//(1024+16-1)/16 = 1029/16 = 64.6125 = 64 (because integer division)
//and what happens if we multiply by our thread block size? --> 64 * 16 = 1024, which returns our dimension
//the same reasoning goes for the y dimension
dim3 dimGrid((l + dimBlock.x - 1) / dimBlock.x,(l + dimBlock.y - 1) / dimBlock.y);
//declare variable to find out elapsed time
float time_elapsed;
//CUDA time keeping variables
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//run the function on the GPU
global_memory_kernel<<<dimGrid, dimBlock>>>(cuda_A, cuda_B, cuda_C, dimension);
//synchronize threads
cudaThreadSynchronize();
//CUDA timekeeping stop events
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
//store the elapsed time
cudaEventElapsedTime(&time_elapsed,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//figure out how many operations needed to be done
unsigned long long ops = l * l * ( 2 * l );
time_elapsed = time_elapsed/1000; //change into seconds
//figure out the rate
double rate = ( double ) ( ops ) / time_elapsed / 1000000.0;
printf ( "\n" );
printf("Run number: %d\n", run_number);
printf ( " Floating point OPS roughly %llu\n", ops );
printf ( " Elapsed time dT = %f\n", time_elapsed);
printf ( " Rate = MegaOPS/dT = %f\n", rate );
// Read C from device memory
cudaMemcpy(c, cuda_C, size, cudaMemcpyDeviceToHost);
//return values for main
dt_and_rate[0]=time_elapsed;
dt_and_rate[1]=rate;
// Free device memory
cudaFree(cuda_A);
cudaFree(cuda_B);
cudaFree(cuda_C);
return dt_and_rate;
}
//retrieve a matrix element
//device function because this needs to execute on the graphics card, not on host machines
__device__ float retrieve_item(float* a, unsigned long long row, unsigned long long col, unsigned long long dim) {
return a[row * dim + col];
}
// Set a matrix element
//device function because this needs to execute on the graphics card, not on host machines
__device__ void set_entry(float* a, unsigned long long row, unsigned long long col, float value, unsigned long long dim) {
a[row * dim + col] = value;
}
//get the next sub-block of a matrix when doing blocking multiplication
__device__ float* get_next_block(float* a, unsigned long long row, unsigned long long col, unsigned long long dimension) {
float* block_a;
block_a = &a[dimension * BLOCK_SIZE * row + BLOCK_SIZE * col];
return block_a;
}
// Matrix multiplication kernel called by MatMul()
__global__ void shared_memory_kernel(float* a, float* b, float* c, unsigned long long dimension) {
// Block row and column
unsigned long long blockRow = blockIdx.y;
unsigned long long blockCol = blockIdx.x;
// Each thread block computes one sub-matrix of C
float* block_c = get_next_block(c, blockRow, blockCol, dimension);
// Each thread computes one element of the tile of C
// by accumulating results into accumulator
float accumulator = 0.0;
// Thread row and column within the tile of c
unsigned long long row = threadIdx.y;
unsigned long long col = threadIdx.x;
// Loop over all A and B's tiles
// Multiply each pair of sub-matrices together
// and accumulate the results
for (unsigned long long m = 0; m < (dimension / BLOCK_SIZE); ++m) {
// Get sub-matrix of A
float* block_a = get_next_block(a, blockRow, m, dimension);
// Get sub-matrix of B
float* block_b = get_next_block(b, m, blockCol, dimension);
// Shared memory used to store the tiles of A and B
__shared__ float a_shared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float b_shared[BLOCK_SIZE][BLOCK_SIZE];
// Load tiles from device memory to shared memory
// Each thread loads one element of each sub-matrix
a_shared[row][col] = retrieve_item(block_a, row, col, dimension);
b_shared[row][col] = retrieve_item(block_b, row, col, dimension);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (unsigned long long e = 0; e < BLOCK_SIZE; ++e)
accumulator += a_shared[row][e] * b_shared[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the tile of to device memory
// Each thread writes one element
set_entry(block_c, row, col, accumulator, dimension);
}
//function to execute shared-memory version of blocking matrix multiplication
double* shared_memory(float* a, float* b, float* c, unsigned long long run_number) {
double dt_and_rate[2];
unsigned long long l = dimension;
unsigned long long m = dimension;
unsigned long long n = dimension;
// Load A and B to device memory
float* cuda_A;
unsigned long long size = dimension * dimension * sizeof(float);
//allocate device memory
cudaMalloc(&cuda_A, size);
//copy to device
cudaMemcpy(cuda_A, a, size, cudaMemcpyHostToDevice);
//do the same for b
float* cuda_B;
size = dimension * dimension * sizeof(float);
cudaMalloc(&cuda_B, size);
cudaMemcpy(cuda_B, b, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
float* cuda_C;
size = dimension * dimension * sizeof(float);
cudaMalloc(&cuda_C, size);
//set up float for timer
float time_elapsed;
//st up the block and grid for device
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(dimension / dimBlock.x, dimension / dimBlock.y);
//start timers
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//call the CUDA function
shared_memory_kernel<<<dimGrid, dimBlock>>>(cuda_A, cuda_B, cuda_C, dimension);
//synchronize threads
cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_elapsed,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//figure out how many operations we have done
unsigned long long ops = l * l * ( 2 * l );
time_elapsed = time_elapsed/1000; //change into seconds
//get the rate
double rate = ( double ) ( ops ) / time_elapsed / 1000000.0;
printf ( "\n" );
printf("Run number: %d\n", run_number);
printf ( " Floating point OPS roughly %llu\n", ops );
printf ( " Elapsed time dT = %f\n", time_elapsed);
printf ( " Rate = MegaOPS/dT = %f\n", rate );
// Read c from device memory
cudaMemcpy(c, cuda_C, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(cuda_A);
cudaFree(cuda_B);
cudaFree(cuda_C);
dt_and_rate[0]=time_elapsed;
dt_and_rate[1]=rate;
return dt_and_rate;
}
void usage (char* argv[])
{
//go through and print out any relevant information for command line arguments
//to use the program
printf("Usage: %s -n <matrix dimensions>\n", argv[0]);
printf("Options:\n");
printf(" -h Print this help message.\n");
printf(" -n Dimensions of the matrices.\n");
printf("\nExamples:\n");
printf(" %s -n 256 > output.txt\n", argv[0]);
printf(" %s -n 4096 > output.txt\n", argv[0]);
//end the program
exit(0);
}
//main function
int main(int argc, char* argv[]){
//used to determine average rates and times
double* temp;
double average_dt=0.0;
double average_rate=0.0;
//get command line options
char options;
while( (options=getopt(argc,argv,"n:h")) != -1){
switch(options){
case 'n':
dimension = atoi(optarg);
break;
case 'h':
usage(argv);
exit(0);
default:
usage(argv);
exit(1);
}
}
//allocate space
float* a = (float*)malloc(dimension * dimension * sizeof(float));
float* b = (float*)malloc(dimension * dimension * sizeof(float));
float* c = (float*)malloc(dimension * dimension * sizeof(float));
int seed=123456789;
//seed the matrices with random values
for(unsigned long long i = 0; i < dimension; i++){
for(unsigned long long j = 0; j < dimension; j++){
a[i*dimension + j] = (float) (r8_uniform_01 ( &seed ));
}
}
for(unsigned long long i = 0; i < dimension; i++){
for(unsigned long long j = 0; j < dimension; j++){
b[i*dimension + j] = (float) (r8_uniform_01 ( &seed ));
}
}
printf ("Thread Block Size: %d\n", BLOCK_SIZE);
printf( "\n" );
printf( "========================Unoptimized CUDA Multiplication================================\n" );
for(unsigned long long i =0; i < 10; i++){
temp = global_memory(a, b, c,i);
average_dt += temp[0];
average_rate += temp[1];
}
average_rate = (double) average_rate/10;
average_dt = (double) average_dt/10;
printf("Average Elapsed Time dT: %f\n", average_dt);
printf("Average Rate: %f\n", average_rate);
average_rate=average_dt=0.0;
printf( "\n" );
printf( "========================Blocking Optimized CUDA Multiplication================================\n" );
for(unsigned long long i=0; i<10;i++){
temp=shared_memory(a,b,c,i);
average_dt += temp[0];
average_rate += temp[1];
}
average_rate = (double) average_rate/10;
average_dt = (double) average_dt/10;
printf("Average Elapsed Time dT: %f\n", average_dt);
printf("Average Rate: %f\n", average_rate);
average_rate=average_dt=0.0;
//free host memory
free(a);
free(b);
free(c);
}
|
7,379 | #include "includes.h"
__global__ void add_scalar(float *v, float other, int n) {
int x(threadIdx.x + blockDim.x * blockIdx.x);
if (x >= n) return;
v[x] += other;
} |
7,380 | /* Lab 4 Exercise 1 Program
In this exercise we write a CUDA program to decipher some text encoded using an affine cipher (in the file `encrypted1.bin`)
An affine cipher is a monoalphabetic substitution cipher, and it is decrypted by an affine decipher
The encryption function is E(x) = (Ax + B) mod M
The decryption function is D(x) = A^{−1}(x − B) mod M
In this exercise `M` is 128 (the size of the ASCII alphabet), `A` is 15, `B` is 27, and so `A^{-1}` is 111.
As each of the encrypted character values are independent we can use the GPU to decrypt them in parallel.
To do this we will launch a thread for each of the encrypted character values and use a
kernel function to perform the decryption. */
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 1024 // The number of characters in the encrypted text
#define A 111 // The inverse multiplier to the encryption multiplier
#define B 27 // The shift value in the encryption function
#define M 128 // The modulus of the encryption (the size of the ASCII alphabet)
#define MULTIBLOCK 1 // Set this definition to `0` for single block, `1` to run the multiblock kernel
void checkCUDAError(const char*);
void read_encrypted_file(int*);
/* 1.1 Modify the `modulo` function so that it can be called on the device by the `affine_decrypt` kernel. */
__device__ int modulo(int a, int m) {
int r = a % m; // The remainder operator works differently for negative numbers (as we always want positive output)
r = (r < 0) ? r + m : r; // We add `m` to the remainder `r` when `r` is negative, else do nothing
return r;
}
/* 1.2 Implement the decryption kernel for a single block of threads with an `x` dimension of `N` (1024).
The function should store the result in `d_output`. You can define the
inverse modulus `A`, `B` and `M` using pre-processor definitions. */
__global__ void affine_decrypt(int *d_input, int *d_output) {
int i = threadIdx.x;
d_output[i] = modulo(A * (d_input[i] - B), M);
}
/* 1.8 Complete the `affine_decrypt_multiblock` kernel to work using multiple blocks of threads.
Change your grid and block dimensions so that you launch 8 blocks of 128 threads. Note: 8 * 128 = 1024. */
__global__ void affine_decrypt_multiblock(int *d_input, int *d_output) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
d_output[i] = modulo(A * (d_input[i] - B), M);
}
int main(int argc, char *argv[]) {
int *h_input, *h_output;
int *d_input, *d_output;
unsigned int size;
int i;
size = N * sizeof(int); // Define the size of the data
/* Allocate the host memory */
h_input = (int *)malloc(size);
h_output = (int *)malloc(size);
/* 1.3 Allocate memory on the device for the input `d_input` and output `d_output` */
cudaMalloc((void **)&d_input, size);
cudaMalloc((void **)&d_output, size);
checkCUDAError("Memory allocation");
/* Read the encryted text from file to `h_input` */
read_encrypted_file(h_input);
/* 1.4 Copy the host input values in `h_input` to the device memory `d_input`. */
cudaMemcpy(d_input, h_input, size, cudaMemcpyHostToDevice);
checkCUDAError("Input transfer to device");
/* Configure the grid of thread blocks and run the GPU kernel. */
if (MULTIBLOCK == 0) {
// 1.5 Configure a single block of `N` threads and launch the `affine_decrypt` kernel.
dim3 blocksPerGrid(1, 1, 1);
dim3 threadsPerBlock(N, 1, 1);
affine_decrypt <<<blocksPerGrid, threadsPerBlock >>>(d_input, d_output);
}
else if (MULTIBLOCK == 1) {
/* 1.8 Configure 8 blocks of 128 threads and launch the `affine_decrypt_multiblock` kernel */
dim3 blocksPerGrid(8, 1, 1);
dim3 threadsPerBlock(N/8, 1, 1); // Note: 8 * 128 = 1024.
affine_decrypt_multiblock <<<blocksPerGrid, threadsPerBlock >>>(d_input, d_output);
}
/* Wait for all threads to complete */
cudaThreadSynchronize();
checkCUDAError("Kernel execution");
/* 1.6 Copy the GPU output back to the host.
Copy the device output values in `d_output` to the host memory `h_output`. */
cudaMemcpy(h_output, d_output, size, cudaMemcpyDeviceToHost);
checkCUDAError("Result transfer to host");
/* Print out the result to screen */
for (i = 0; i < N; i++) {
printf("%c", (char)h_output[i]);
}
printf("\n");
/* 1.7: Free device memory */
cudaFree(d_input);
cudaFree(d_output);
checkCUDAError("Free memory");
/* Free host buffers */
free(h_input);
free(h_output);
return 0;
}
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "CUDA ERROR: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void read_encrypted_file(int* input) {
FILE *f = NULL;
f = fopen("encrypted1.bin", "rb"); // Read-only and binary flags
if (f == NULL) {
fprintf(stderr, "Error: Could not find encrypted1.bin file \n");
exit(1);
}
// Read the encrypted data
fread(input, sizeof(unsigned int), N, f);
fclose(f);
}
|
7,381 | template<typename T>
__global__
void init_array_kernel(T *gpu_array, size_t n) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
new (&gpu_array[i]) T();
}
}
template<class T, class TransformedT, class TransformationT>
__global__
void transform_kernel(
T *from_array, size_t n,
TransformedT *to_array,
TransformationT transform) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
to_array[i] = transform(from_array[i], i);
}
}
template<typename T, typename Transformation>
__global__
void for_each_kernel(T *gpu_array, size_t n, Transformation fn) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < n;
i += blockDim.x * gridDim.x)
{
fn(gpu_array[i], i);
}
}
template<typename T, typename Reduction>
__global__
void reduce_2step_kernel(
T *gpu_array, size_t n,
T *out,
Reduction fn, T initial_value=T{}) { /*
Log-reduction based from the one in the book "The CUDA Handbook" by
Nicholas Wilt.
*/
extern __shared__ T partials[];
const int tid = threadIdx.x;
auto reduced = initial_value;
for (int i = blockIdx.x * blockDim.x + tid;
i < n;
i += blockDim.x * gridDim.x) {
reduced = fn(reduced, gpu_array[i]);
}
partials[tid] = reduced;
__syncthreads();
for (int active_threads = blockDim.x / 2;
active_threads > 0;
active_threads /= 2) {
auto is_active_thread = tid < active_threads;
if (is_active_thread) {
partials[tid] = fn(partials[tid], partials[tid + active_threads]);
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = partials[0];
}
}
/* -----------------------------------------------------------------------
These kernels are used and tested on the gpu_array and gpu_object classes.
*/
|
7,382 | //
// http://forums.nvidia.com/index.php?showtopic=34309
//
#include <stdio.h>
// called from host, run on device
__global__ void add_arrays_gpu(float *in1,float *in2,float *out)
{
int idx=threadIdx.x; // flat model
out[idx]=in1[idx]+in2[idx];
}
int main()
{
// pointers to host memory
float *a,*b,*c;
// pointers to device memory
float *a_d,*b_d,*c_d;
int N=18;
int i;
// allocate arrays a, b and c on host
a=(float*)malloc(N*sizeof(float));
b=(float*)malloc(N*sizeof(float));
c=(float*)malloc(N*sizeof(float));
// allocate arrays a_d, b_d and c_d on device
cudaMalloc((void**)&a_d,sizeof(float)*N);
cudaMalloc((void**)&b_d,sizeof(float)*N);
cudaMalloc((void**)&c_d,sizeof(float)*N);
// initialize arrays a and b
for(i=0;i<N;i++){
a[i]= (float) i*i;
b[i]=-(float) i/2.0f;
}
// copy input from host memory to device memory
cudaMemcpy(a_d,a,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b,sizeof(float)*N,cudaMemcpyHostToDevice);
// execution configuration: How the threads are arranged, FLAT and LINEAR.
dim3 dimGrid(1),dimBlock(N);
add_arrays_gpu<<<dimGrid,dimBlock>>>(a_d, b_d, c_d);
// copy result from device memory to host memory
cudaMemcpy(c,c_d,sizeof(float)*N,cudaMemcpyDeviceToHost);
for(i=0;i<N;i++)
printf("c[%d]=%f\n",i,c[i]);
free(a);
free(b);
free(c);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
|
7,383 | #include "includes.h"
__global__ void convert(double* A,double* C)
{
int idx = BLOCK*blockIdx.x + threadIdx.x;
int i;
int stride = BLOCK * THREAD;
for(i=idx;i<SIZE;i+=stride)
A[i] = C[SIZE-i-1];
} |
7,384 | #include <stdio.h>
#include <cuda_runtime.h>
// reference
// https://devblogs.nvidia.com/maximizing-unified-memory-performance-cuda/
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("code: %d, reason: %s\n", error, cudaGetErrorString(error)); \
exit(1); \
} \
} \
#define PAGE_STRIDE 65536 // page size: 64K -> 65536 bytes
template <typename data_type>
__global__ void stream_thread(data_type *input, data_type *output, const int n);
template <typename data_type>
__global__ void stream_warp(data_type *input, data_type *output, const int n);
void initialData(float *ip, const int n);
void verifyResult(float *result, float *reference, const int n);
int main(int argc, char **argv) {
int n = 1<<20;
size_t nBytes = n * sizeof(float);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// allocate unified memory
float *in, *out;
CHECK(cudaMallocManaged((float**)&in, nBytes));
CHECK(cudaMallocManaged((float**)&out, nBytes));
CHECK(cudaMemPrefetchAsync(in, nBytes, cudaCpuDeviceId, 0));
initialData(in, n);
// launch kernels
int caseNo = 1;
if (argc > 1) caseNo = atoi(argv[1]);
int blockSize = 256;
if (argc > 2) blockSize = atoi(argv[2]);
dim3 block(blockSize);
dim3 grid((n + block.x - 1) / block.x);
int pages = (nBytes + PAGE_STRIDE - 1) / PAGE_STRIDE; // # pages
switch (caseNo) {
case 1: // no pre-fetching; normal kernel
grid.x = (n + block.x - 1) / block.x;
printf("<<< %d, %d >>>\n", grid.x, block.x);
stream_thread<float> <<<grid, block>>> (in, out, n);
break;
case 2: // no pre-fetching; one warp per page
grid.x = (pages * 32 + block.x - 1) / block.x; // # warps = # pages
printf("<<< %d, %d >>>\n", grid.x, block.x);
stream_warp<float> <<<grid, block>>> (in, out, n);
break;
case 3: // pre-fetching; normal kernel
CHECK(cudaMemPrefetchAsync(in, nBytes, dev, 0));
CHECK(cudaMemPrefetchAsync(out, nBytes, dev, 0));
grid.x = (n + block.x - 1) / block.x;
printf("<<< %d, %d >>>\n", grid.x, block.x);
stream_thread<float> <<<grid, block>>> (in, out, n);
break;
}
CHECK(cudaDeviceSynchronize());
CHECK(cudaMemPrefetchAsync(in, nBytes, cudaCpuDeviceId, 0));
CHECK(cudaMemPrefetchAsync(out, nBytes, cudaCpuDeviceId, 0));
verifyResult(out, in, n);
CHECK(cudaGetLastError());
// free memory
CHECK(cudaFree(in));
CHECK(cudaFree(out));
// clean up all resources
CHECK(cudaDeviceReset());
return 0;
}
/**********CUDA kernels**********/
template <typename data_type>
__global__ void stream_thread(data_type *input, data_type *output, const int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid < n) {
output[tid] = input[tid];
}
}
template <typename data_type>
__global__ void stream_warp(data_type *input, data_type *output, const int n) {
int laneId = threadIdx.x & 31;
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int warpId = tid >> 5;
size_t size = n * sizeof(data_type);
int pages = (size + PAGE_STRIDE - 1) / PAGE_STRIDE; // how many pages
if (warpId < pages) {
#pragma unroll // tell compiler to specifically unroll a loop
for(int rep = 0; rep < PAGE_STRIDE / sizeof(data_type) / 32; rep++) {
int ind = warpId * PAGE_STRIDE / sizeof(data_type) + rep * 32 + laneId;
if (ind < n)
output[ind] = input[ind];
}
}
}
/**********host functions**********/
void initialData(float *ip, const int n) {
for (int i = 0; i < n; i++) {
ip[i] = (float) (rand() & 0xFF) / 10.f;
}
}
void verifyResult(float *result, float *reference, const int n) {
double eps = 1e-8;
bool match = 1;
for (int i = 0; i < n; i++) {
if (abs(result[i] - reference[i]) > eps) {
printf("Arrays do not match:\n");
printf("result %5.2f reference %5.2f at array index %d\n", result[i], reference[i], i);
match = 0;
return;
}
}
if (match) printf("Arrays match!\n");
return;
} |
7,385 | #include <cuda_runtime.h>
#include <iostream>
int main(int argc, char* argv[]) {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
if (argv[1][0] == '0')
std::cout << devProp.multiProcessorCount * devProp.maxThreadsPerMultiProcessor;
}
|
7,386 | #include<iostream>
using namespace std;
__global__ void Array_Add(float* d_A, float* d_B, float* d_Sum)
{
int id = (blockIdx.x * blockDim.x) + threadIdx.x;
d_Sum[id] = d_A[id] + d_B[id];
}
int main()
{
const int Array_Size = 320000;
const int Array_Bytes = Array_Size * sizeof(float);
float h_A[Array_Size], h_B[Array_Size], h_Sum[Array_Size];
for(int i=0; i<Array_Size; i++)
{
h_A[i] = (float)i;
h_B[i] = (float)i;
}
float *d_A, *d_B, *d_Sum;
//Measuring performance
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void**)&d_A, Array_Bytes);
cudaMalloc((void**)&d_B, Array_Bytes);
cudaMalloc((void**)&d_Sum, Array_Bytes);
cudaMemcpy(d_A, h_A, Array_Bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, Array_Bytes, cudaMemcpyHostToDevice);
//Start of performance measurement
cudaEventRecord(start);
Array_Add<<<625, 512>>>(d_A, d_B, d_Sum);
//End of performance measurement
cudaEventRecord(stop);
//Block CPU execution until the event "stop" is recorded
cudaEventSynchronize(stop);
//Print the time taken in milliseconds
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << "The total time taken is " << milliseconds << " milliseconds.\n";
cudaMemcpy(h_Sum, d_Sum, Array_Bytes, cudaMemcpyDeviceToHost);
for(int i=0; i<Array_Size; i++)
cout << h_Sum[i] << " ";
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_Sum);
} |
7,387 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
__global__ void vecAdd(double *a, double *b, double *c, int n)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
int n = 100000;
double *h_a;
double *h_b;
double *h_c;
double *d_a;
double *d_b;
double *d_c;
size_t bytes = n*sizeof(double);
srand(time(NULL));
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
int i;
for( i = 0; i < n; i++ ) {
h_a[i] = rand ()%100;
h_b[i] = rand ()%100;
}
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("w %f\n", time);
int blockSize, gridSize;
blockSize = atoi(argv[1]);
gridSize = (int)ceil((float)n/blockSize);
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("e %f\n", time);
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start, 0) ;
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
cudaEventRecord(stop, 0) ;
cudaEventSynchronize(stop) ;
cudaEventElapsedTime(&time, start, stop) ;
printf("w %f", time);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
7,388 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
comp = var_1 + -1.0156E-36f * var_2;
if (comp < (var_3 * (-0.0f / var_4))) {
float tmp_1 = +1.3948E1f + +1.7148E35f * +1.4966E-1f;
comp += tmp_1 + (var_5 * sqrtf((var_6 - (var_7 * -1.3162E-42f))));
comp += +1.2144E-41f - (var_8 + -1.9306E-9f + var_9);
}
if (comp > -0.0f * (-1.2006E34f + var_10)) {
float tmp_2 = +1.5972E34f;
float tmp_3 = var_11 - +1.4203E-42f / cosf(var_12 - fmodf(-1.6689E-43f - var_13, var_14 + sqrtf(+0.0f)));
comp = tmp_3 * tmp_2 + -0.0f - var_15 / (+1.4749E-44f + -1.5832E-43f);
comp = (var_16 / atan2f(var_17 / var_18 / ceilf((var_19 / (+1.8022E34f + var_20 / -1.6589E-37f + var_21))), (+0.0f / +1.7854E-37f)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
cudaDeviceSynchronize();
return 0;
}
|
7,389 | #include "includes.h"
__global__ void concat_z(size_t sz, float_t* src, float_t* dest, float_t* z, size_t stride)
{
size_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < sz)
{
if(index>=stride)
{
dest[index]=src[index-stride];
}
else
{
dest[index]=z[index];
}
}
} |
7,390 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#define N 100
using namespace std;
__global__ void elemento_n(float *pi_4){
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < N){
pi_4[i] = pow((float)-1,(float)i);
pi_4[i] /= (float)(2*i+1);
}
}
void showDeviceProperties(){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
cout << "Max Threads: " << prop.maxThreadsPerBlock << endl;
cout << "Max Grid Size: " << prop.maxGridSize[0] << " " << prop.maxGridSize[1] << " " << prop.maxGridSize[2] << endl;
cout << "Max Threads Dim: " << prop.maxThreadsDim[0] << " " << prop.maxThreadsDim[1] << " " << prop.maxThreadsDim[2] << endl;
cout << "MultiProcessor Count: " << prop.multiProcessorCount << endl;
cout << "Warp Size: " << prop.warpSize << endl;
}
int main(){
showDeviceProperties();
int i;
float val[N];
float *dev_val;
float pi_4 = 0;
cudaMalloc( (void**)&dev_val, N * sizeof(float) );
elemento_n<<< ceil(N/512.0), 512 >>>(dev_val);
cudaThreadSynchronize();
cout << cudaGetErrorString(cudaGetLastError()) << endl;
cudaMemcpy(val, dev_val, N * sizeof(float), cudaMemcpyDeviceToHost);
//for(i=0; i<N; i++){
// cout << "i: " << i << " " << val[i] << endl;
//}
for(i=0; i<N; i++){
pi_4 = pi_4 + val[i];
}
cout.precision(100);
cout << fixed << "El valor de pi/4: " << pi_4 << endl;
cudaFree(dev_val);
return 0;
}
|
7,391 | /**********key使用共享内存*************/
/**********使用T-box***********/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <cstring>
#include <cuda.h>
#include <iomanip>
#include <time.h>
#include "cuda_profiler_api.h"
#define BYTE unsigned char
typedef unsigned long u32;
using namespace std;
class aes_block
{
public:
BYTE block[16];
};
BYTE AES_Sbox[] =
{ /*0 1 2 3 4 5 6 7 8 9 a b c d e f */
0x63,0x7c,0x77,0x7b,0xf2,0x6b,0x6f,0xc5,0x30,0x01,0x67,0x2b,0xfe,0xd7,0xab,0x76, /*0*/
0xca,0x82,0xc9,0x7d,0xfa,0x59,0x47,0xf0,0xad,0xd4,0xa2,0xaf,0x9c,0xa4,0x72,0xc0, /*1*/
0xb7,0xfd,0x93,0x26,0x36,0x3f,0xf7,0xcc,0x34,0xa5,0xe5,0xf1,0x71,0xd8,0x31,0x15, /*2*/
0x04,0xc7,0x23,0xc3,0x18,0x96,0x05,0x9a,0x07,0x12,0x80,0xe2,0xeb,0x27,0xb2,0x75, /*3*/
0x09,0x83,0x2c,0x1a,0x1b,0x6e,0x5a,0xa0,0x52,0x3b,0xd6,0xb3,0x29,0xe3,0x2f,0x84, /*4*/
0x53,0xd1,0x00,0xed,0x20,0xfc,0xb1,0x5b,0x6a,0xcb,0xbe,0x39,0x4a,0x4c,0x58,0xcf, /*5*/
0xd0,0xef,0xaa,0xfb,0x43,0x4d,0x33,0x85,0x45,0xf9,0x02,0x7f,0x50,0x3c,0x9f,0xa8, /*6*/
0x51,0xa3,0x40,0x8f,0x92,0x9d,0x38,0xf5,0xbc,0xb6,0xda,0x21,0x10,0xff,0xf3,0xd2, /*7*/
0xcd,0x0c,0x13,0xec,0x5f,0x97,0x44,0x17,0xc4,0xa7,0x7e,0x3d,0x64,0x5d,0x19,0x73, /*8*/
0x60,0x81,0x4f,0xdc,0x22,0x2a,0x90,0x88,0x46,0xee,0xb8,0x14,0xde,0x5e,0x0b,0xdb, /*9*/
0xe0,0x32,0x3a,0x0a,0x49,0x06,0x24,0x5c,0xc2,0xd3,0xac,0x62,0x91,0x95,0xe4,0x79, /*a*/
0xe7,0xc8,0x37,0x6d,0x8d,0xd5,0x4e,0xa9,0x6c,0x56,0xf4,0xea,0x65,0x7a,0xae,0x08, /*b*/
0xba,0x78,0x25,0x2e,0x1c,0xa6,0xb4,0xc6,0xe8,0xdd,0x74,0x1f,0x4b,0xbd,0x8b,0x8a, /*c*/
0x70,0x3e,0xb5,0x66,0x48,0x03,0xf6,0x0e,0x61,0x35,0x57,0xb9,0x86,0xc1,0x1d,0x9e, /*d*/
0xe1,0xf8,0x98,0x11,0x69,0xd9,0x8e,0x94,0x9b,0x1e,0x87,0xe9,0xce,0x55,0x28,0xdf, /*e*/
0x8c,0xa1,0x89,0x0d,0xbf,0xe6,0x42,0x68,0x41,0x99,0x2d,0x0f,0xb0,0x54,0xbb,0x16 /*f*/
};
void printBytes(BYTE b[], int len) {
int i;
for (i=0; i<len; i++)
printf("%x ", b[i]);
printf("\n");
}
void f1printBytes(BYTE b[], int len, FILE* fp) {
int i;
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
for (i=0; i<len; i++)
fprintf(fp, "%02x ", b[shiftTab[i]]);
fprintf(fp, "\n");
}
int flag=0;
void f2printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i=0; i<len; i++){
fprintf(fp, "%c", b[i]);
if(b[i]=='\n')
flag++;
}
}
void f3printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i=0; i<len; i++){
if(b[i]=='\0')
return ;
fprintf(fp, "%c", b[i]);
if(b[i]=='\n')
flag++;
}
}
void AES_ExpandKey(BYTE key[]) {
int kl = 16, ks=176, Rcon = 1, i, j;
BYTE temp[4], temp2[4];
for(i = kl; i < ks; i += 4) {
memcpy(temp, &key[i-4], 4);
if (i % kl == 0) {
temp2[0] = AES_Sbox[temp[1]] ^ Rcon;
temp2[1] = AES_Sbox[temp[2]];
temp2[2] = AES_Sbox[temp[3]];
temp2[3] = AES_Sbox[temp[0]];
memcpy(temp, temp2, 4);
if ((Rcon <<= 1) >= 256)
Rcon ^= 0x11b;
}
else if ((kl > 24) && (i % kl == 16)) {
temp2[0] = AES_Sbox[temp[0]];
temp2[1] = AES_Sbox[temp[1]];
temp2[2] = AES_Sbox[temp[2]];
temp2[3] = AES_Sbox[temp[3]];
memcpy(temp, temp2, 4);
}
for(j = 0; j < 4; j++)
key[i + j] = key[i + j - kl] ^ temp[j];
}
}
__device__ u32 Byte2Word(BYTE k1, BYTE k2, BYTE k3, BYTE k4){
u32 result(0x00000000);
u32 temp;
temp = k1; // K1
temp <<= 24;
result |= temp;
temp = k2; // K2
temp <<= 16;
result |= temp;
temp = k3; // K3
temp <<= 8;
result |= temp;
temp = k4; // K4
result |= temp;
return result;
}
u32 Te0[256] = {
0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
};
u32 Te1[256] = {
0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
};
u32 Te2[256] = {
0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
};
u32 Te3[256] = {
0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
};
__global__ void AES_Encrypt(aes_block aes_block_array[], BYTE key[],int block_number,u32 Te0[], u32 Te1[] ,u32 Te2[] ,u32 Te3[] ) {
int global_thread_index = blockDim.x*blockIdx.x + threadIdx.x;
__shared__ u32 keywords[44];
int stride=blockDim.x*gridDim.x;
for(int real_thread=global_thread_index;real_thread < block_number;real_thread+=stride){
if(threadIdx.x == 0 ){
for(int i=0;i<176;i+=4)
keywords[i/4]=Byte2Word(key[i],key[i+1],key[i+2],key[i+3]);
}
__syncthreads();
u32 s0,s1,s2,s3,t0,t1,t2,t3;
BYTE block[16]; //定义一个临时存放加密数据的块
block[0] = aes_block_array[real_thread].block[0];
block[1] = aes_block_array[real_thread].block[4];
block[2] = aes_block_array[real_thread].block[8];
block[3] = aes_block_array[real_thread].block[12];
block[4] = aes_block_array[real_thread].block[1];
block[5] = aes_block_array[real_thread].block[5];
block[6] = aes_block_array[real_thread].block[9];
block[7] = aes_block_array[real_thread].block[13];
block[8] = aes_block_array[real_thread].block[2];
block[9] = aes_block_array[real_thread].block[6];
block[10] = aes_block_array[real_thread].block[10];
block[11] = aes_block_array[real_thread].block[14];
block[12] = aes_block_array[real_thread].block[3];
block[13] = aes_block_array[real_thread].block[7];
block[14] = aes_block_array[real_thread].block[11];
block[15] = aes_block_array[real_thread].block[15];
/* 初始addkey*/
s0=Byte2Word(block[0],block[1],block[2],block[3])^keywords[0];
s1=Byte2Word(block[4],block[5],block[6],block[7])^keywords[1];
s2=Byte2Word(block[8],block[9],block[10],block[11])^keywords[2];
s3=Byte2Word(block[12],block[13],block[14],block[15])^keywords[3];
/*round 1: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff]^keywords[4];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff]^keywords[5];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff]^keywords[6];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff]^keywords[7];
/* round 2: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ keywords[ 8];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ keywords[ 9];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ keywords[10];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ keywords[11];
/* round 3: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ keywords[12];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ keywords[13];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ keywords[14];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ keywords[15];
/* round 4: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ keywords[16];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ keywords[17];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ keywords[18];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ keywords[19];
/* round 5: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ keywords[20];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ keywords[21];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ keywords[22];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ keywords[23];
/* round 6: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ keywords[24];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ keywords[25];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ keywords[26];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ keywords[27];
/* round 7: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ keywords[28];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ keywords[29];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ keywords[30];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ keywords[31];
/* round 8: */
s0 = Te0[t0 >> 24] ^ Te1[(t1 >> 16) & 0xff] ^ Te2[(t2 >> 8) & 0xff] ^ Te3[t3 & 0xff] ^ keywords[32];
s1 = Te0[t1 >> 24] ^ Te1[(t2 >> 16) & 0xff] ^ Te2[(t3 >> 8) & 0xff] ^ Te3[t0 & 0xff] ^ keywords[33];
s2 = Te0[t2 >> 24] ^ Te1[(t3 >> 16) & 0xff] ^ Te2[(t0 >> 8) & 0xff] ^ Te3[t1 & 0xff] ^ keywords[34];
s3 = Te0[t3 >> 24] ^ Te1[(t0 >> 16) & 0xff] ^ Te2[(t1 >> 8) & 0xff] ^ Te3[t2 & 0xff] ^ keywords[35];
/* round 9: */
t0 = Te0[s0 >> 24] ^ Te1[(s1 >> 16) & 0xff] ^ Te2[(s2 >> 8) & 0xff] ^ Te3[s3 & 0xff] ^ keywords[36];
t1 = Te0[s1 >> 24] ^ Te1[(s2 >> 16) & 0xff] ^ Te2[(s3 >> 8) & 0xff] ^ Te3[s0 & 0xff] ^ keywords[37];
t2 = Te0[s2 >> 24] ^ Te1[(s3 >> 16) & 0xff] ^ Te2[(s0 >> 8) & 0xff] ^ Te3[s1 & 0xff] ^ keywords[38];
t3 = Te0[s3 >> 24] ^ Te1[(s0 >> 16) & 0xff] ^ Te2[(s1 >> 8) & 0xff] ^ Te3[s2 & 0xff] ^ keywords[39];
/* round 10: */
s0 =(Te2[(t0 >> 24)] & 0xff000000) ^(Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^(Te0[(t2 >>8) & 0xff] & 0x0000ff00) ^(Te1[(t3) & 0xff] & 0x000000ff) ^keywords[40];
s1 =(Te2[(t1 >> 24)] & 0xff000000) ^(Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^(Te0[(t3 >>8) & 0xff] & 0x0000ff00) ^(Te1[(t0) & 0xff] & 0x000000ff) ^keywords[41];
s2 =(Te2[(t2 >> 24)] & 0xff000000) ^(Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^(Te0[(t0 >>8) & 0xff] & 0x0000ff00) ^(Te1[(t1) & 0xff] & 0x000000ff) ^keywords[42];
s3 =(Te2[(t3 >> 24)] & 0xff000000) ^(Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^(Te0[(t1 >>8) & 0xff] & 0x0000ff00) ^(Te1[(t2) & 0xff] & 0x000000ff) ^keywords[43];
aes_block_array[real_thread].block[0]=(s0>>24)&0xff;
aes_block_array[real_thread].block[1]=(s1>>24)&0xff;
aes_block_array[real_thread].block[2]=(s2>>24)&0xff;
aes_block_array[real_thread].block[3]=(s3>>24)&0xff;
aes_block_array[real_thread].block[4]=(s0>>16)&0xff;
aes_block_array[real_thread].block[5]=(s1>>16)&0xff;
aes_block_array[real_thread].block[6]=(s2>>16)&0xff;
aes_block_array[real_thread].block[7]=(s3>>16)&0xff;
aes_block_array[real_thread].block[8]=(s0>>8)&0xff;
aes_block_array[real_thread].block[9]=(s1>>8)&0xff;
aes_block_array[real_thread].block[10]=(s2>>8)&0xff;
aes_block_array[real_thread].block[11]=(s3>>8)&0xff;
aes_block_array[real_thread].block[12]=s0&0xff;
aes_block_array[real_thread].block[13]=s1&0xff;
aes_block_array[real_thread].block[14]=s2&0xff;
aes_block_array[real_thread].block[15]=s3&0xff;
}
}
int main(int argc, char* argv[]) {
ifstream ifs;
ifs.open(argv[1], ios::binary);
if(!ifs){
cerr<<"错误:无法打开加密文件"<<endl;
exit(1);
}
ifs.seekg(0, ios::end);
int infileLength = ifs.tellg();
infileLength-=1;
ifs.seekg(0, ios::beg);
cout<<"输入文件长度为(字节): "<<infileLength<<endl<<"文件块个数为: "<<infileLength/16<<endl;
int block_number = infileLength/16 ;
int number_of_zero_pending = infileLength%16;
aes_block* aes_block_array;
BYTE key[16 * 11]; //定义AES中需要的最大的key
int keyLen = 0;
int blockLen = 16;
ifstream key_fp;
key_fp.open(argv[2]);
while(key_fp.peek()!=EOF)
{
key_fp>>key[keyLen];
if(key_fp.eof())
break;
keyLen++;
}
cout<<"密码长度为(字节):"<<keyLen<<endl;
switch (keyLen)
{
case 16:break;
case 24:break;
case 32:break;
default:printf("错误:密钥需要128, 192或256字节\n"); return 0;
}
AES_ExpandKey(key);
if(number_of_zero_pending != 0)
aes_block_array = new aes_block [ block_number + 1];
else
aes_block_array = new aes_block[ block_number ];
char temp[16];
FILE* en_fp; //定义加密文件
en_fp = fopen(argv[3], "wb");
int shiftTab[16]={0,4,8,12,1,5,9,13,2,6,10,14,3,7,11,15};
for(int i=0; i<block_number; i++){
ifs.read(temp, 16);
for(int j=0; j<16; j++){
aes_block_array[i].block[shiftTab[j]] = (unsigned char)temp[j];
}
}
if(number_of_zero_pending != 0)
{
ifs.read(temp, number_of_zero_pending);
for(int j=0; j<16; j++){
aes_block_array[block_number].block[j] = (unsigned char)temp[j];
}
for(int j=1; j<=16-number_of_zero_pending; j++)
aes_block_array[block_number].block[16-j] = '\0';
block_number++;
}
cudaSetDevice(0); //选择设备
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int num_sm = prop.multiProcessorCount;
aes_block *cuda_aes_block_array;
BYTE *cuda_key;
// int thrdperblock = block_number/num_sm;
// if(block_number%num_sm>0)
// thrdperblock++;
// //设备线程快内线程数最多为1024
// if(thrdperblock>1024){
// thrdperblock = 1024;
// num_sm = block_number/1024;
// if(block_number%1024>0){
// num_sm++;
// }
// }
dim3 ThreadperBlock(1024);
dim3 BlockperGrid(num_sm);
cudaMalloc(&cuda_aes_block_array, block_number*sizeof(class aes_block));
cudaMalloc(&cuda_key,16*15*sizeof(BYTE) );
cudaMemcpy(cuda_aes_block_array, aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_key, key, 16*15*sizeof(BYTE), cudaMemcpyHostToDevice);
//将T-box分配到全局内存
u32 * cuda_Te0;u32 * cuda_Te1;u32 * cuda_Te2;u32 * cuda_Te3;
cudaMalloc(&cuda_Te0,256*sizeof(u32));
cudaMalloc(&cuda_Te1,256*sizeof(u32));
cudaMalloc(&cuda_Te2,256*sizeof(u32));
cudaMalloc(&cuda_Te3,256*sizeof(u32));
cudaMemcpy(cuda_Te0,Te0,256*sizeof(u32),cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Te1,Te1,256*sizeof(u32),cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Te2,Te2,256*sizeof(u32),cudaMemcpyHostToDevice);
cudaMemcpy(cuda_Te3,Te3,256*sizeof(u32),cudaMemcpyHostToDevice);
printf("加密数据块数: %d\n", block_number);
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, 0);
AES_Encrypt <<< BlockperGrid, ThreadperBlock>>>(cuda_aes_block_array, cuda_key, block_number, cuda_Te0, cuda_Te1 , cuda_Te2 , cuda_Te3);
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f,total;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
cudaEventDestroy(start1);
cudaEventDestroy(stop1);
total=msecTotal1/1000;
cout<<"加密时间:"<<total<<endl;
long r=1<<23; //单位换算常数
cout<<"吞吐量为:"<<block_number/total/r<<" Gbps"<<endl;
cudaMemcpy(aes_block_array, cuda_aes_block_array, block_number*sizeof(class aes_block), cudaMemcpyDeviceToHost);
cudaProfilerStop();
cudaFree(cuda_aes_block_array);
cudaFree(cuda_key);
cudaFree(cuda_Te0);
cudaFree(cuda_Te1);
cudaFree(cuda_Te2);
cudaFree(cuda_Te3);
for(int i=0; i<block_number; i++)
f1printBytes(aes_block_array[i].block, blockLen, en_fp);
return 0;
}
|
7,392 | /*
by Qin Yu, Apr 2019
*/
#include <fstream>
using namespace std;
#include <cooperative_groups.h>
#include <cuda.h>
namespace cg = cooperative_groups;
#define CUPRINTF(fmt, ...) \
printf("[%d, %d]:\t" fmt, blockIdx.y *gridDim.x + blockIdx.x, \
threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + \
threadIdx.x, \
__VA_ARGS__) // Idiom, not used, put here for convenient debugging.
__global__ void kernel_minibatch(int *iters, float *alpha, float *sigma,
float *K, int *y, int l, int C) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
int can_stop = 0;
float delta_ = 0;
__shared__ float delta;
float last_alpha_j, last_alpha;
int counter = 0;
while (true) {
counter++;
last_alpha_j = alpha[j];
for (int i = 0; i < l; i++) {
if (j == i) {
last_alpha = alpha[i];
delta = 1 / K[i * l + i] * (1 - y[i] * sigma[i]);
alpha[i] += delta;
if (alpha[i] < 0) {
alpha[i] = 0;
delta = 0 - last_alpha;
}
if (alpha[i] > C) {
alpha[i] = C;
delta = C - last_alpha;
}
}
__syncthreads();
sigma[j] += delta * y[i] * K[i * l + j];
}
can_stop = 0;
delta_ = alpha[j] - last_alpha_j;
if (-0.0001f < delta_ && delta_ < 0.0001f)
can_stop = 1;
// CUPRINTF("%d, %9.6f, %9.6f, %9.6f, %d\n", counter, alpha[j],
// last_alpha_j, delta_, can_stop);
if (__syncthreads_and(can_stop) > 0) {
if (j == 1) {
// CUPRINTF("iters = %d\n", counter);
iters[0] = counter;
}
break;
}
}
}
extern "C" __global__ void kernel_minibatch_g(int *iters, float *alpha,
float *sigma, float *K, int *y,
int *d, int ddim, float *delta,
int l, int C) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < l) {
cg::grid_group grid = cg::this_grid();
// if (j == l-1) CUPRINTF("l = %d, C = %d\n", l, C);
int can_break = 0;
int can_stop = 0;
float delta_ = 0;
float last_alpha_j, last_alpha;
int counter = 0;
while (true) {
// for (int counter = 0; counter < 2000; counter++) {
counter++;
if (threadIdx.x == 1)
d[blockIdx.x] = 0;
last_alpha_j = alpha[j];
for (uint32_t i = 0; i < l; i++) {
if (j == i) {
// if (threadIdx.x == i) { // This was a big big bug
last_alpha = alpha[i];
delta[0] = 1 / K[i * l + i] * (1 - y[i] * sigma[i]);
alpha[i] += delta[0];
// alpha[i] += delta;
if (alpha[i] < 0) {
alpha[i] = 0;
delta[0] = 0 - last_alpha;
}
if (alpha[i] > C) {
alpha[i] = C;
delta[0] = C - last_alpha;
}
}
cg::sync(grid);
sigma[j] += delta[0] * y[i] * K[i * l + j];
}
can_stop = 0;
delta_ = alpha[j] - last_alpha_j;
if (-0.0001f < delta_ && delta_ < 0.0001f)
can_stop = 1;
if (__syncthreads_and(can_stop) > 0)
if (threadIdx.x == 1)
d[blockIdx.x] = 1;
cg::sync(grid);
can_break = 0;
for (int i = 0; i < ddim; i++) {
can_break += d[i];
}
// if (j == 1) CUPRINTF("iters = %d\n", counter);
if (can_break == ddim) {
if (j == 1) {
// CUPRINTF("iters = %d\n", counter);
iters[0] = counter;
}
// cg::sync(grid);
break;
}
}
}
}
// Helper function for using CUDA to update sigma in parallel:
cudaError_t kernel_minibatch_wrapper(int *iters, float *alpha, float *sigma,
float *K, int *y, int l, int C) {
// int *dev_iters = 0;
// float *dev_alpha = 0;
// float *dev_sigma = 0;
// float *dev_K = 0;
// int *dev_y = 0;
int *dev_block_done = 0;
float *dev_delta = 0;
const int block_dim_max = 1024;
int block_dimension = block_dim_max;
int grid_dimension = (l - 1) / block_dim_max + 1;
dim3 block(block_dimension);
dim3 grid(grid_dimension);
// void *args[10] = {
// &dev_iters, &dev_alpha, &dev_sigma, &dev_K, &dev_y,
// &dev_block_done, &grid_dimension, &dev_delta, &l, &C};
void *args[10] = {&iters, &alpha, &sigma, &K, &y,
&dev_block_done, &grid_dimension, &dev_delta, &l, &C};
cudaError_t cudaStatus;
// Allocate GPU buffers for all vectors:
// cudaStatus = cudaMalloc(&dev_iters, sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error1;
// }
// cudaStatus = cudaMalloc(&dev_alpha, l * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error2;
// }
// cudaStatus = cudaMalloc(&dev_sigma, l * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error3;
// }
// cudaStatus = cudaMalloc(&dev_K, l * l * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error4;
// }
// cudaStatus = cudaMalloc(&dev_y, l * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error5;
// }
cudaStatus = cudaMallocManaged(&dev_block_done, grid_dimension * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMallocManaged failed!");
goto Error5;
}
cudaStatus = cudaMallocManaged(&dev_delta, 1 * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMallocManaged failed!");
goto Error5;
}
// // Copy input vectors from host memory to GPU buffers.
// cudaStatus =
// cudaMemcpy(dev_K, K, l * l * sizeof(float), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// cudaStatus = cudaMemcpy(dev_y, y, l * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// printf("READY TO CALL KERNEL\n");
cudaStatus =
cudaLaunchCooperativeKernel((void *)kernel_minibatch_g, grid, block, args,
sizeof(float), cudaStream_t(0));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "kernel_minibatch_g launch failed: %s\n",
cudaGetErrorString(cudaStatus));
goto Error5;
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "kernel_minibatch_g launch failed: %s\n",
cudaGetErrorString(cudaStatus));
goto Error5;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaDeviceSynchronize returned error code %d after launching "
"addKernel!\n",
cudaStatus);
goto Error5;
}
// Copy output vector from GPU buffer to host memory.
// cudaStatus =
// cudaMemcpy(iters, dev_iters, sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// cudaStatus =
// cudaMemcpy(alpha, dev_alpha, l * sizeof(float),
// cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// cudaStatus =
// cudaMemcpy(sigma, dev_sigma, l * sizeof(float),
// cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// Error6:
cudaFree(dev_block_done);
cudaFree(dev_delta);
Error5:
// cudaFree(dev_y);
// Error4:
// cudaFree(dev_K);
// Error3:
// cudaFree(dev_sigma);
// Error2:
// cudaFree(dev_alpha);
// Error1:
// cudaFree(dev_iters);
// Error0:
return cudaStatus;
}
cudaError_t kernel_minibatch_block_wrapper(int *iters, float *alpha,
float *sigma, float *K, int *y,
int l, int C) {
// int *dev_iters = 0;
// float *dev_alpha = 0;
// float *dev_sigma = 0;
// float *dev_K = 0;
// int *dev_y = 0;
dim3 grid(1);
dim3 block(l);
void *args[7] = {&iters, &alpha, &sigma, &K, &y, &l, &C};
cudaError_t cudaStatus;
// Allocate GPU buffers for all vectors:
// cudaStatus = cudaMalloc(&dev_iters, sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error1;
// }
// cudaStatus = cudaMalloc(&dev_alpha, l * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error2;
// }
// cudaStatus = cudaMalloc(&dev_sigma, l * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error3;
// }
// cudaStatus = cudaMalloc(&dev_K, l * l * sizeof(float));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error4;
// }
// cudaStatus = cudaMalloc(&dev_y, l * sizeof(int));
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMalloc failed!");
// goto Error5;
// }
// Copy input vectors from host memory to GPU buffers.
// cudaStatus =
// cudaMemcpy(dev_K, K, l * l * sizeof(float), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// cudaStatus = cudaMemcpy(dev_y, y, l * sizeof(int), cudaMemcpyHostToDevice);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// printf("READY TO CALL KERNEL\n");
cudaStatus = cudaLaunchKernel((void *)kernel_minibatch, grid, block, args,
sizeof(float), cudaStream_t(0));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "kernel_minibatch launch failed: %s\n",
cudaGetErrorString(cudaStatus));
goto Error5;
}
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "kernel_minibatch_g launch failed: %s\n",
cudaGetErrorString(cudaStatus));
goto Error5;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaDeviceSynchronize returned error code %d after launching "
"addKernel!\n",
cudaStatus);
goto Error5;
}
// Copy output vector from GPU buffer to host memory.
// cudaStatus =
// cudaMemcpy(iters, dev_iters, sizeof(int), cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// cudaStatus =
// cudaMemcpy(alpha, dev_alpha, l * sizeof(float),
// cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
// cudaStatus =
// cudaMemcpy(sigma, dev_sigma, l * sizeof(float),
// cudaMemcpyDeviceToHost);
// if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaMemcpy failed!");
// goto Error5;
// }
Error5:
// cudaFree(dev_y);
// Error4:
// cudaFree(dev_K);
// Error3:
// cudaFree(dev_sigma);
// Error2:
// cudaFree(dev_alpha);
// Error1:
// cudaFree(dev_iters);
// Error0:
return cudaStatus;
}
|
7,393 | #include <stdio.h>
#define NUM_BLOCKS_X 2 //Number of cuda blocks
#define NUM_THREADS_X 16 //Number of threads per block
#define NUM_BLOCKS_Y 3 //Number of cuda blocks
#define NUM_THREADS_Y 8 //Number of threads per block
__global__ void hello(){
printf("Hellow world, I'm thread %i %i in block %i %i\n",threadIdx.x, threadIdx.y,blockIdx.x, blockIdx.y);
}
int main(int argc, char** args){
size_t free, total;
cudaMemGetInfo(&free, &total);
printf("There are %lu bytes available of %lu\n", free, total);
//Launch kernel
hello<<<dim3(NUM_BLOCKS_X, NUM_BLOCKS_Y), dim3(NUM_THREADS_X, NUM_THREADS_X)>>>();
//Force hellos to flush ??
cudaDeviceSynchronize();
printf("That is all!\n");
return 0;
}
|
7,394 |
__global__ void test(float *a, float *b, float *c, int N)
{
if(blockIdx.x<N)
c[blockIdx.x] = a[blockIdx.x]*b[blockIdx.x];
return;
} |
7,395 | #import <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
void error(char const *str)
{
fprintf(stderr, "%s\n", str);
exit(1);
}
void cuda_check(cudaError_t err, char const *str)
{
if (err != cudaSuccess) {
fprintf(stderr, "%s: CUDA error %d (%s)\n",
str, err, cudaGetErrorString(err));
}
}
__global__
void init_vec(int nels, float* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
d_vec1[i] = i;
}
__global__
void multi_vec(int n_row1,int n_col1,int n_row2,int n_col2,float* __restrict__ res_vec,float* __restrict__ d_vec1,float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int c= blockIdx.x*n_row1 + (threadIdx.x)%n_col1;
int j= ((int)(threadIdx.x/n_row2) + (threadIdx.x%n_row2)*n_col2);
res_vec[i]=d_vec1[c]*d_vec2[j];
}
__global__
void scalareMatrice(float* __restrict__ res_vec,float scalar,float* __restrict__ d_vec)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i]=d_vec[i]*scalar;
}
__global__
void reduction_row(int N,float* __restrict__ res_vec,float* __restrict__ d_vec1)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int idx=(int)(i/N);
float c =res_vec[idx];
float d =d_vec1[i];
if(i%N==31){
res_vec[idx]=d_vec1[i-31]+d_vec1[i-30]+d_vec1[i-29]+d_vec1[i-28]+d_vec1[i-27]+d_vec1[i-26]+d_vec1[i-25]+d_vec1[i-24]+
d_vec1[i-23]+d_vec1[i-22]+d_vec1[i-21]+d_vec1[i-20]+d_vec1[i-19]+d_vec1[i-18]+d_vec1[i-17]+d_vec1[i-16]+
d_vec1[i-15]+d_vec1[i-14]+d_vec1[i-13]+d_vec1[i-12]+d_vec1[i-11]+d_vec1[i-10]+d_vec1[i-9]+d_vec1[i-8]+
d_vec1[i-7]+d_vec1[i-6]+d_vec1[i-5]+d_vec1[i-4]+d_vec1[i-3]+d_vec1[i-2]+d_vec1[i-1]+d_vec1[i];
}
}
__global__
void transpose(int nrow,int ncols, float* __restrict__ res_vec, float* __restrict__ d_vec1)
{
int c = threadIdx.x;
int r=blockIdx.x;
int l_in = r*ncols + c;
int l_out = c * nrow + r;
res_vec[l_out] = d_vec1[l_in];
}
__global__
void vecsum(int nels, float* __restrict__ res_vec, float* __restrict__ d_vec1, float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] = d_vec1[i]+d_vec2[i];
}
__global__
void vecdif(int nels, float* __restrict__ res_vec, float* __restrict__ d_vec1, float* __restrict__ d_vec2)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
res_vec[i] = d_vec1[i]-d_vec2[i];
}
void stampa(float* matrice,int m){
int i,j;
printf("\n");
for(i=0;i<m;i++){
printf("%f ",matrice[i]);
printf("\n");
}
}
int main(int argc, char *argv[]){
float* matriceA;
float* matriceB;
float* matriceX;
float* pk;
float* trasposta;
float* prodotto;
float* somma;
float* res;
float* den;
float* res0;
float* res1;
float* res2;
float* red_den;
float* matrice;
float* scalar;
float* num;
float* deno;
float ak;
int nels;
printf("%d\n",argc );
if (argc != 2) {
error("syntax: serve N come arg");
}
int N = atoi(argv[1]);
if (N < 0) {
error("N < 0");
}
int M=1;
nels=N*N;
size_t memsize = nels*sizeof(float);
cudaError_t err;
err = cudaMalloc((void**)&matriceA, memsize);
cuda_check(err, "alloc matriceA");
err = cudaMalloc((void**)&matriceB, N*M*sizeof(float));
cuda_check(err, "alloc matriceB");
err = cudaMalloc((void**)&matriceX, N*sizeof(float));
cuda_check(err, "alloc matriceX");
err = cudaMallocHost(&matrice, N*N*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMallocHost(&num, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMallocHost(&deno, M*sizeof(float));
cuda_check(err, "alloc matrice");
err = cudaMalloc((void**)&somma,nels*M*sizeof(float));
cuda_check(err, "alloc somma");
err = cudaMalloc((void**)&res,M*N*N*sizeof(float));
cuda_check(err, "alloc res");
err = cudaMalloc((void**)&res0,M*N*sizeof(float));
cuda_check(err, "alloc res0");
err = cudaMalloc((void**)&prodotto,M*N*N*sizeof(float));
cuda_check(err, "alloc prodotto");
err = cudaMalloc((void**)&res1,M*N*sizeof(float));
cuda_check(err, "alloc res1");
err = cudaMalloc((void**)&res2,M*N*sizeof(float));
cuda_check(err, "alloc res2");
err = cudaMalloc((void**)&pk,M*N*sizeof(float));
cuda_check(err, "alloc pk");
err = cudaMalloc((void**)&trasposta,M*N*sizeof(float));
cuda_check(err, "alloc trasposta ");
err = cudaMalloc((void**)&den,M*N*sizeof(float));
cuda_check(err, "alloc den");
err = cudaMalloc((void**)&red_den,M*sizeof(float));
cuda_check(err, "alloc den");
err = cudaMalloc((void**)&scalar,M*N*sizeof(float));
cuda_check(err, "alloc scalar");
cudaEvent_t pre_init, post_init, pre_sum, post_sum, pre_red, post_red,pre_prodotto,post_prodotto,
pre_transpose,post_transpose,pre_scalar_matrice,post_scalar_matrice,pre_vecsum,post_vecsum,
pre_vecdif,post_vecdif;
err = cudaEventCreate(&pre_init, 0);
cuda_check(err, "create pre_init");
err = cudaEventCreate(&pre_red, 0);
cuda_check(err, "create pre_red");
err = cudaEventCreate(&pre_prodotto, 0);
cuda_check(err, "create pre_sum");
err = cudaEventCreate(&pre_transpose, 0);
cuda_check(err, "create pre_traspose");
err = cudaEventCreate(&pre_scalar_matrice, 0);
cuda_check(err, "create pre_scalar_matrice");
err = cudaEventCreate(&pre_vecdif, 0);
cuda_check(err, "create pre_vecdif");
err = cudaEventCreate(&pre_vecsum, 0);
cuda_check(err, "create pre_vecsum");
err = cudaEventCreate(&post_init, 0);
cuda_check(err, "create post_init");
err = cudaEventCreate(&post_red, 0);
cuda_check(err, "create post_red");
err = cudaEventCreate(&post_prodotto, 0);
cuda_check(err, "create post_sum");
err = cudaEventCreate(&post_transpose, 0);
cuda_check(err, "create post_traspose");
err = cudaEventCreate(&post_scalar_matrice, 0);
cuda_check(err, "create post_scalar_matrice");
err = cudaEventCreate(&post_vecdif, 0);
cuda_check(err, "create post_vecdif");
err = cudaEventCreate(&post_vecsum, 0);
cuda_check(err, "create post_vecsum");
cudaEventRecord(pre_init);
init_vec<<<N, N>>>(nels, matriceA);
cudaEventRecord(post_init);
init_vec<<<1, M*N>>>(M*N, matriceB);
init_vec<<<1, M*N>>>(M*N, matriceX);
int i;
for(i=0;i<1;i++){
cudaEventRecord(pre_prodotto);
multi_vec<<<N, M*N>>>(N,N,N,M,somma,matriceA,matriceX);
cudaEventRecord(post_prodotto);
cudaEventRecord(pre_red);
reduction_row<<<N, M*N>>>(N,res0,somma);
cudaEventRecord(post_red);
cudaEventRecord(pre_vecdif);
vecdif<<<N,M>>>(N*M,pk,matriceB,res0);
cudaEventRecord(post_vecdif);
cudaEventRecord(pre_transpose);
transpose<<<N,M>>>(N,M,trasposta,pk);
cudaEventRecord(post_transpose);
multi_vec<<<M, N>>>(M,N,N,M,prodotto,trasposta,pk);
reduction_row<<<M, N>>>(N,res1,prodotto);
multi_vec<<<M, M*N*N>>>(M,N,N,N,res,trasposta,matriceA);
reduction_row<<<M*N, N>>>(N,res2,res);
multi_vec<<<N, M*N>>>(M,N,N,M,den,res2,pk);
reduction_row<<<N, M*N>>>(N,red_den,den);
err = cudaMemcpy(num, res1, 1*sizeof(float), cudaMemcpyDeviceToHost);
err = cudaMemcpy(deno, red_den, 1*sizeof(float), cudaMemcpyDeviceToHost);
ak=num[0]/deno[0];
cudaEventRecord(pre_scalar_matrice);
scalareMatrice<<<N, M>>>(scalar,ak,pk);
cudaEventRecord(post_scalar_matrice);
cudaEventRecord(pre_vecsum);
vecsum<<<N, M>>>(N*M*N,matriceX,matriceX,scalar);
cudaEventRecord(post_vecsum);
err = cudaMemcpy(matrice, matriceX, M*N*sizeof(float), cudaMemcpyDeviceToHost);
cuda_check(err, "create mem");
stampa(matrice,M*N);
float runtime_init_ms, runtime_prodotto_ms, runtime_red_ms,runtime_transpose_ms,runtime_scalar_matrice_ms,
runtime_vecdif_ms,runtime_vecsum_ms;
err = cudaEventElapsedTime(&runtime_init_ms, pre_init, post_init);
cuda_check(err, "elapsed time init");
err = cudaEventElapsedTime(&runtime_prodotto_ms, pre_prodotto, post_prodotto);
cuda_check(err, "elapsed time prodotto");
err = cudaEventElapsedTime(&runtime_red_ms, pre_red, post_red);
cuda_check(err, "elapsed time reduction");
err = cudaEventElapsedTime(&runtime_transpose_ms, pre_transpose, post_transpose);
cuda_check(err, "elapsed time traspose");
err = cudaEventElapsedTime(&runtime_scalar_matrice_ms, pre_scalar_matrice, post_scalar_matrice);
cuda_check(err, "elapsed time scalar_matrice");
err = cudaEventElapsedTime(&runtime_vecdif_ms, pre_vecdif, post_vecdif);
cuda_check(err, "elapsed time vecdif");
err = cudaEventElapsedTime(&runtime_vecsum_ms, pre_vecsum, post_vecsum);
cuda_check(err, "elapsed time vecsum");
printf("init: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_init_ms, nels/runtime_init_ms/1.0e6, memsize/runtime_init_ms/1.0e6);
printf("prodotto: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_prodotto_ms, nels/runtime_prodotto_ms/1.0e6, memsize/runtime_prodotto_ms/1.0e6);
printf("reduction: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_red_ms, nels/runtime_red_ms/1.0e6, memsize/runtime_red_ms/1.0e6);
printf("transpose: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_transpose_ms, N/runtime_transpose_ms/1.0e6, (N*sizeof(float))/runtime_transpose_ms/1.0e6);
printf("scalareMatrice: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_scalar_matrice_ms, N/runtime_scalar_matrice_ms/1.0e6, (N*sizeof(float))/runtime_scalar_matrice_ms/1.0e6);
printf("vecdif: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecdif_ms, N/runtime_vecdif_ms/1.0e6, (N*sizeof(float))/runtime_vecdif_ms/1.0e6);
printf("vecsum: runtime %.4gms, %.4g GE/s, %.4g GB/s\n",
runtime_vecsum_ms, N/runtime_vecsum_ms/1.0e6, (N*sizeof(float))/runtime_vecsum_ms/1.0e6);
}
cudaFree(matriceA);
cudaFreeHost(matrice);
cudaFree(somma);
cudaFree(res);
cudaFree(pk);
cudaFree(trasposta);
cudaFree(prodotto);
cudaFree(den);
cudaFree(res0);
cudaFree(res1);
cudaFree(res2);
cudaFree(red_den);
cudaFree(scalar);
cudaFree(matriceB);
cudaFree(matriceX);
cudaFreeHost(num);
cudaFreeHost(deno);
}
|
7,396 | #include "includes.h"
__global__ void erosionLayers3DKernel( unsigned short *d_dst, unsigned short *d_src, int w, int h, int d, int kernel_radius )
{
__shared__ unsigned short smem[ER_LAYERS_BLOCKDIM_X][ER_LAYERS_BLOCKDIM_Y][(ER_LAYERS_RESULT_STEPS + 2 * ER_LAYERS_HALO_STEPS) * ER_LAYERS_BLOCKDIM_Z + 1];
unsigned short *smem_thread = smem[threadIdx.x][threadIdx.y];
//Offset to the upper halo edge
const int baseX = blockIdx.x * ER_LAYERS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ER_LAYERS_BLOCKDIM_Y + threadIdx.y;
const int baseZ = (blockIdx.z * ER_LAYERS_RESULT_STEPS - ER_LAYERS_HALO_STEPS) * ER_LAYERS_BLOCKDIM_Z + threadIdx.z;
d_src += (baseZ * h + baseY) * w + baseX;
d_dst += (baseZ * h + baseY) * w + baseX;
const int pitch = w*h;
//Main data
#pragma unroll
for (int i = ER_LAYERS_HALO_STEPS; i < ER_LAYERS_HALO_STEPS + ER_LAYERS_RESULT_STEPS; i++) {
smem_thread[threadIdx.z + i * ER_LAYERS_BLOCKDIM_Z] = d_src[i * ER_LAYERS_BLOCKDIM_Z * pitch];
}
//Upper halo
#pragma unroll
for (int i = 0; i < ER_LAYERS_HALO_STEPS; i++) {
smem_thread[threadIdx.z + i * ER_LAYERS_BLOCKDIM_Z] = (baseZ + i * ER_LAYERS_BLOCKDIM_Z >= 0) ? d_src[i * ER_LAYERS_BLOCKDIM_Z * pitch] : 0;
}
//Lower halo
#pragma unroll
for (int i = ER_LAYERS_HALO_STEPS + ER_LAYERS_RESULT_STEPS; i < ER_LAYERS_HALO_STEPS + ER_LAYERS_RESULT_STEPS + ER_LAYERS_HALO_STEPS; i++) {
smem_thread[threadIdx.z + i * ER_LAYERS_BLOCKDIM_Z]= (baseZ + i * ER_LAYERS_BLOCKDIM_Z < d) ? d_src[i * ER_LAYERS_BLOCKDIM_Z * pitch] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = ER_LAYERS_HALO_STEPS; i < ER_LAYERS_HALO_STEPS + ER_LAYERS_RESULT_STEPS; i++) {
unsigned short *smem_kern = &smem_thread[threadIdx.z + i * ER_LAYERS_BLOCKDIM_Z - kernel_radius];
unsigned short val = smem_kern[0];
//#pragma unroll
for (int j = 1; j <= 2*kernel_radius; j++) {
val = min(val, smem_kern[j]);
}
d_dst[i * ER_LAYERS_BLOCKDIM_Z * pitch] = val;
}
} |
7,397 | // Exercise: Implement matrix transpose using CUDA
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <ctype.h>
#include <sys/types.h>
#include <sys/time.h>
#include<assert.h>
// #define NUM_BLOCKS 8192
#define NUM_THREADS 512
double cclock()
/* Returns elepsed seconds past from the last call to timer rest */
{
struct timeval tmp;
double sec;
gettimeofday( &tmp, (struct timezone *)0 );
sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0;
return sec;
}
// print the matrix
void print_matrix(int size, double * M)
{
int i, j;
for (i=0; i<size; i++)
{
for(j=0; j<size; j++)
{
fprintf(stdout, "%f ", M[ i*size + j ]);
}
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
}
// transpose the matrix
__global__ void trasp_mat(int MATRIXDIM, double * d_A, double * d_AT)
{
int idx = ( blockIdx.x * blockDim.x ) + threadIdx.x;
// here I am writing i and j as functions of idx,
// knowing that idx = i*MATRIXDIM+j
int i = idx / MATRIXDIM;
int j = idx % MATRIXDIM;
if ( idx < MATRIXDIM * MATRIXDIM )
{
d_AT[ ( j * MATRIXDIM ) + i ] = d_A[ ( i * MATRIXDIM ) + j ];
}
}
int main( int argc, char * argv [] )
{
double * h_A, * h_AT; // host pointers
double * d_A, * d_AT; // device pointers
int i;
int MATRIXDIM;
int size_in_bytes;
double t_start, t_end;
if( argc < 2 ){
fprintf( stderr, "Error. The program runs as following: %s [MATRIXDIM].\nProgram exit ...\n", argv[0]);
exit(1);
}
MATRIXDIM = atoi(argv[1]);
size_in_bytes = MATRIXDIM * MATRIXDIM * sizeof( double );
if( MATRIXDIM < 1 ){
fprintf( stderr, "Error. Inconsistent parameters.\nProgram exit ...\n", argv[0]);
exit(1);
}
// allocate the pointers
h_A = ( double * ) malloc( size_in_bytes );
h_AT = ( double * ) malloc( size_in_bytes );
//cudaMalloc( (void **) &my_ptr, sizeinbytes );
cudaMalloc( (void**) &d_A, size_in_bytes );
cudaMalloc( (void**) &d_AT, size_in_bytes );
// initialize the matrix A
for( i = 0; i < MATRIXDIM * MATRIXDIM; i++ ){
h_A[i] = (double) i;
}
print_matrix( MATRIXDIM, h_A);
// copy from cpu to gpu
//cudaMemcpy( dest, source, sizeinbytes, cudaMemcpyHostToDevice | cudaMemcpyDeviceToHost );
cudaMemcpy( d_A, h_A, size_in_bytes, cudaMemcpyHostToDevice );
// (MATRIXDIM * MATRIXDIM + NUM_THREADS) makes sure that we create enough threads
t_start=cclock();
trasp_mat<<< (MATRIXDIM * MATRIXDIM + NUM_THREADS) / NUM_THREADS, NUM_THREADS >>>( MATRIXDIM, d_A, d_AT );
t_end=cclock();
// copying from gpu to cpu
cudaMemcpy( h_AT, d_AT, size_in_bytes, cudaMemcpyDeviceToHost );
print_matrix(MATRIXDIM, h_AT);
fprintf( stdout, " Matrix transpose executed. Time Elapsed %9.4f secs\n", t_end-t_start );
// free the memory
free( h_A );
free( h_AT );
cudaFree( d_A );
cudaFree( d_AT );
return 0;
}
|
7,398 | #include"cuda_runtime.h"
#include"device_launch_parameters.h"
#include<stdio.h>
__global__ void convert(int *b, int *c, int n)
{
int tx,ty,i;
tx = threadIdx.x;
ty = threadIdx.y;
if(tx==ty)
{
c[tx*n+ty]=0;
}
else if(tx>ty)
{
int fact = 1;
for(i=1;i<b[ty*n+tx];i++)
{
fact += fact * i;
}
c[ty*n+tx]=fact;
//printf("\nx = %d, y= %d ,fact =%d",tx,ty,fact);
}
else if(ty>tx)
{
int num=b[ty*n+tx];
int r=0;
while(num>0)
{
r+=num%10;
num=num/10;
}
c[ty*n+tx]=r;
//printf("\nx = %d, y= %d ,num =%d",tx,ty,r);
}
}
int main(void)
{
int n,i,j,a[3][3],b[9],c[9];
int *d_b,*d_c, size;
printf("Enter value of n\n");
scanf("%d", &n);
printf("Enter the elements of the matrix\n");
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
scanf("%d", &a[i][j]);
}
}
/*for(i=0;i<n*n;i++)
{
b[i]=a[i][j];
}*/
size = sizeof(int);
cudaMalloc((void **)&d_b,n*n*size);
cudaMalloc((void **)&d_c,n*n*size);
cudaMemcpy(d_b,a,n*n*size,cudaMemcpyHostToDevice);
dim3 dimgrid(1,1,1);
dim3 dimblock(n,n,1);
convert<<<dimgrid,dimblock>>>(d_b,d_c,n);
cudaMemcpy(c,d_c,n*n*size,cudaMemcpyDeviceToHost);
printf("\n");
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
printf("%d\t", c[i*n+j]);
}
printf("\n");
}
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
7,399 | __global__ void empty(size_t n) {}
__global__ void axpy(double *y, double* x, double alpha, size_t n) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<n) { y[i] += alpha*x[i]; }
}
__device__ double f(double x) { return exp(cos(x)) - 2; }
__device__ double fp(double x) { return -sin(x) * exp(cos(x)); }
__global__ void newton(size_t n_iter, double *x, size_t n) {
auto i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < n) {
auto x0 = x[i];
for(int iter = 0; iter < n_iter; ++iter) {
x0 -= f(x0)/fp(x0);
}
x[i] = x0;
}
}
|
7,400 | /*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc add.cu
* ./a.out
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <cuda_runtime.h>
#define N 512
__global__ void add_kernel(int* a, int* b, int*c){
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* a, int num) {
for(int i=0; i<num; i++){
a[i] = rand() % 1000;
}
}
int main(void)
{
printf("Vector addition using GPU block\n");
int *a, *b, *c;
int *d_a, *d_b, *d_c;
int size = N * sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
clock_t t;
t = clock();
// float time;
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start, 0);
// Launch add() kernel on GPU
add_kernel<<<N, 1>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
// cudaEventRecord(stop, 0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&time, start, stop);
t = clock() - t;
double time_taken = (((double)t)/CLOCKS_PER_SEC)/1000.0; // in milli-seconds
printf("Took %4.3f milli-seconds to execute \n", time_taken);
// printf("Time to generate: %4.3f ms \n", time);
// cudaEventDestroy(start);
// cudaEventDestroy(stop);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("Result: \n");
for(int i=0; i<20; i++){
printf("%d ", c[i]);
}
printf("\n");
// Cleanup
if(a) free(a); if(b) free(b); if(c) free(c);
if(d_a) cudaFree(d_a); if(d_b) cudaFree(d_b); if(d_c) cudaFree(d_c);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.