serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
1,801
|
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <cuda.h>
#define BLOCKSIZE 512
__global__ void gpu_phi(float *r, float *m, float *phi, int N)
{
int i;
i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < N)
{
phi[i] = 0.0;
for (int j=0; j<i; ++j)
phi[i] -= m[j]/r[i];
for (int j=i+1; j<N; ++j)
phi[i] -= m[j]/r[j];
}
}
extern "C" void getphi_func(float *r, float *m, float *phi, int N)
{
float *r_d, *m_d, *phi_d;
cudaMalloc(&r_d , sizeof(float)*N);
cudaMalloc(&m_d , sizeof(float)*N);
cudaMalloc(&phi_d , sizeof(float)*N);
cudaMemcpy(r_d , r , sizeof(float)*N, cudaMemcpyHostToDevice); // Host -> Device
cudaMemcpy(m_d , m , sizeof(float)*N, cudaMemcpyHostToDevice); // Host -> Device
cudaMemcpy(phi_d, phi, sizeof(float)*N, cudaMemcpyHostToDevice); // Host -> Device
// gpu_phi <<< 256, BLOCKSIZE >>>(r_d, m_d, phi_d, N);
gpu_phi <<< ((N+BLOCKSIZE-1))/BLOCKSIZE,BLOCKSIZE >>>(r_d, m_d, phi_d, N);
cudaMemcpy(phi, phi_d, sizeof(float)*N, cudaMemcpyDeviceToHost); // Device -> Host
cudaFree(r_d);
cudaFree(m_d);
cudaFree(phi_d);
return;
}
|
1,802
|
#include "includes.h"
__global__ void func(void){
}
|
1,803
|
#include <stdio.h>
int main(void) {
cudaDeviceProp deviceProp;
int dev = 0;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Device number %d has name %s\n", dev, deviceProp.name);
printf("Clock freq. (KHz): %d\n", deviceProp.clockRate);
printf("The max grid size in x: %d, y: %d, z: %d\n", deviceProp.maxGridSize[0], \
deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
return 0;
}
|
1,804
|
#include "includes.h"
__global__ void zupdate2_SoA(float *z1, float *z2, float *f, float tau, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float a, b, t;
if (px<nx && py<ny)
{
// compute the gradient
a = 0;
b = 0;
float fc = f[idx];
float fr = f[idx + 1];
float fu = f[idx + nx];
if (!(px == (nx - 1))) a = fr - fc;
if (!(py == (ny - 1))) b = fu - fc;
// update z
t = 1 / (1 + tau*sqrtf(a*a + b*b));
z1[idx] = (z1[idx] + tau*a)*t;
z2[idx] = (z2[idx] + tau*b)*t;
}
}
|
1,805
|
#include "includes.h"
__device__ float sigmoid(float x) {
return 1 / (1 + expf(-x));
}
__global__ void produceState2(const float* arguments, const int argsSize, const float* weights, const int* topology, const int topSize, float* outStates) {
const int tid = threadIdx.x;
const int dim = argsSize + topSize;
extern __shared__ float s[];
float* states = s;
bool* ready = (bool*)&states[dim];
__shared__ int counter[1];
int r = tid;
while(r < dim) {
ready[r] = false;
r += blockDim.x;
}
if (tid == 0) {
counter[tid] = argsSize;
}
if (tid < argsSize) {
states[tid] = arguments[tid];
ready[tid] = true;
}
__syncthreads();
while(counter[0] < dim) {
const int index = counter[0] + tid;
const int topIndex = index - argsSize;
if (topIndex < topSize) {
const int leftBorder = topology[topIndex*3];
const int rightBorder = topology[topIndex*3 + 1];
const int weightsStart = topology[topIndex*3 + 2];
if (rightBorder <= counter[0]) {
float sum = 0;
for (int i = leftBorder; i < rightBorder; i++) {
sum += states[i] * weights[weightsStart + i - leftBorder];
}
states[index] = sigmoid(sum);
ready[index] = true;
}
}
__syncthreads();
if (tid == 0) {
int total = counter[0];
for (int i = total; i < total + blockDim.x && i < dim; i++) {
if (ready[i]) {
counter[0]++;
}
}
}
__syncthreads();
}
int n = tid;
while(n < dim) {
outStates[n] = states[n];
n += blockDim.x;
}
}
|
1,806
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
// #include <vld.h>
#include <algorithm>
#include <chrono>
#include <iostream>
#include <exception>
#include <random>
typedef double type_t;
const size_t size = 2000;
const size_t blockSize = 32;
type_t* matrixCreate();
void matrixRelease(type_t *matrix);
type_t maxDiff(type_t* a, type_t* b);
float multiplyWithCPU(type_t* a, type_t* b, type_t* c);
float multiplyWithCuda(type_t* a, type_t* b, type_t* c);
namespace MyCudaErrorHandler
{
enum errorCodes
{
CUDA_START,
CUDA_MALLOC,
CUDA_MEMCPY,
CUDA_LAUNCH_KERNEL,
CUDA_DEVICE_SYNCHRONIZE,
};
class MyCudaException : public std::exception
{
cudaError_t status;
errorCodes err;
public:
MyCudaException(errorCodes errorType, cudaError_t& cudaStatus) : err(errorType), status(cudaStatus) {};
void printInfo()
{
switch (err)
{
case CUDA_START:
std::cout << "\ncudaSetDevice failed!\n Do you have a CUDA-capable GPU installed?\n";
break;
case CUDA_MALLOC:
std::cout << "cudaMalloc failed!";
break;
case CUDA_MEMCPY:
std::cout << "cudaMemcpy failed!";
break;
case CUDA_LAUNCH_KERNEL:
std::cout << "addKernel launch failed: " << cudaGetErrorString(status) << std::endl;
break;
case CUDA_DEVICE_SYNCHRONIZE:
std::cout << "cudaDeviceSynchronize returned error code " << status << " after launching addKernel!\n";
break;
default:
std::cout << "Unsupported error type!!!\n" << cudaGetErrorString(status);
}
}
};
void checkCudaStatus(errorCodes errorType, cudaError_t& cudaStatus)
{
if (cudaStatus != cudaSuccess)
throw MyCudaException(errorType, cudaStatus);
}
}
__global__ void multiplyKernel(type_t* a, type_t* b, type_t* c)
{
size_t i = blockDim.y * blockIdx.y + threadIdx.y;
size_t j = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= size || j >= size)
return;
size_t index = i * size + j;
c[index] = 0;
for (size_t k = 0; k < size; ++k)
{
c[index] += a[i * size + k] * b[k * size + j];
}
}
int main()
{
type_t* a = matrixCreate();
type_t* b = matrixCreate();
type_t* cp = matrixCreate();
type_t* gp = matrixCreate();
std::cout << "Started, matrix size - " << size << ", gpu block size: " << blockSize << std::endl;
float gpuTime = multiplyWithCuda(a, b, gp);
std::cout << "GPU elapsed time (in seconds): " << gpuTime << std::endl;
float cpuTime = multiplyWithCPU(a, b, cp);
std::cout << "CPU elapsed time (in seconds): " << cpuTime << std::endl;
std::cout << "Max diff: " << maxDiff(cp, gp) << std::endl;
matrixRelease(a);
matrixRelease(b);
matrixRelease(cp);
matrixRelease(gp);
return 0;
}
type_t maxDiff(type_t* a, type_t* b)
{
int n = size * size;
type_t m = 0;
for (int i = 0; i < n; ++i)
{
m = std::max(m, std::abs(a[i] - b[i]));
}
return m;
}
type_t* matrixCreate()
{
const type_t min = -100;
const type_t max = 100;
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_real_distribution<> distrib(min, max);
int n = size * size;
type_t *matrix = new type_t[n];
for (int i = 0; i < n; ++i)
{
matrix[i] = distrib(gen);
}
return matrix;
}
void matrixRelease(type_t* matrix)
{
delete[] matrix;
}
float multiplyWithCPU(type_t* a, type_t* b, type_t* c)
{
auto begin = std::chrono::high_resolution_clock::now();
for (int row = 0; row < size; ++row)
{
for (int col = 0; col < size; ++col)
{
c[row * size + col] = 0;
for (int k = 0; k < size; ++k)
{
c[row * size + col] += a[size * row + k] * b[size * k + col];
}
}
}
auto end = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::milliseconds>(end - begin);
return elapsed.count() / 1000.0f;
}
float multiplyWithCuda(type_t *a, type_t *b, type_t *c)
{
type_t* dev_a = 0;
type_t* dev_b = 0;
type_t* dev_c = 0;
int byteSize = size * size * sizeof(type_t);
unsigned int gridDim = (unsigned int)ceil((double)size / blockSize);
dim3 block(blockSize, blockSize);
dim3 grid(gridDim, gridDim);
cudaError_t cudaStatus;
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
try
{
cudaStatus = cudaSetDevice(0);
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_START, cudaStatus);
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, byteSize);
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_MALLOC, cudaStatus);
cudaStatus = cudaMalloc((void**)&dev_a, byteSize);
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_MALLOC, cudaStatus);
cudaStatus = cudaMalloc((void**)&dev_b, byteSize);
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_MALLOC, cudaStatus);
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, byteSize, cudaMemcpyHostToDevice);
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_MEMCPY, cudaStatus);
cudaStatus = cudaMemcpy(dev_b, b, byteSize, cudaMemcpyHostToDevice);
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_MEMCPY, cudaStatus);
// Launch a kernel on the GPU with one thread for each element.
multiplyKernel <<<grid, block >>> (dev_a, dev_b, dev_c);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_LAUNCH_KERNEL, cudaStatus);
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_DEVICE_SYNCHRONIZE, cudaStatus);
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, byteSize, cudaMemcpyDeviceToHost);
MyCudaErrorHandler::checkCudaStatus(MyCudaErrorHandler::CUDA_MEMCPY, cudaStatus);
}
catch (MyCudaErrorHandler::MyCudaException &e)
{
e.printInfo();
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventRecord(stop, 0);
return -1;
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
return gpuTime / 1000.0f;
}
|
1,807
|
/*
hello.cu
*/
#include <stdio.h>
int main(){
printf("Hello World!\n");
return 0;
}
|
1,808
|
#include<stdio.h>
#define N 10
__global__ void suma_vect(int *a, int *b, int *c){
int tid = blockIdx.x;
if(tid<N)
c[tid] = a[tid]+b[tid];
}
int main(void){
int a[N], b[N],c[N];
int *device_a, *device_b, *device_c;
int i;
//alojando en device
cudaMalloc((void **)&device_a, sizeof(int)*N);
cudaMalloc((void **)&device_b, sizeof(int)*N);
cudaMalloc((void **)&device_c, sizeof(int)*N);
//llenando los arreglos:
for(i=0;i<N;i++){
a[i]=i;
b[i]=i*i;
}
//copiamos arreglos a, b a la GPU
cudaMemcpy(device_a,a,N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(device_b,b,N*sizeof(int), cudaMemcpyHostToDevice);
//mandamos a llamar a suma_vect:
suma_vect<<<N,1>>>(device_a,device_b,device_c);
//copia del resultado al arreglo c:
cudaMemcpy(c,device_c,N*sizeof(int),cudaMemcpyDeviceToHost);
for(i=0;i<N;i++)
printf("%d+%d = %d\n",a[i],b[i],c[i]);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
return 0;
}
|
1,809
|
#include <math.h>
#include <stdio.h>
const double EPSILON = 1.0e-15;
const double a = 1.23;
const double b = 2.34;
const double c = 3.57;
void __global__ add(const double *x, const double *y, double *z);
void check(const double *z, const int N);
int main(void)
{
const int N = 100000000;
const int M = sizeof(double) * N;
// request for host memory
double *h_x = (double*) malloc(M);
double *h_y = (double*) malloc(M);
double *h_z = (double*) malloc(M);
for(int i = 0; i < N; ++i){
h_x[i] = a;
h_y[i] = b;
}
// request for device memory
double *d_x, *d_y, *d_z;
cudaMalloc((void **)&d_x, M);
cudaMalloc((void **)&d_y, M);
cudaMalloc((void **)&d_z, M);
cudaMemcpy(d_x, h_x, M, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, M, cudaMemcpyHostToDevice);
// set for grid and block
const int block_size = 128;
const int grid_size = N / block_size;
add<<<grid_size, block_size>>>(d_x, d_y, d_z);
// TransferData
cudaMemcpy(h_z, d_z, M, cudaMemcpyDeviceToHost);
check(h_z, N);
// Free Memory
free(h_x);
free(h_y);
free(h_z);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
return 0;
}
void __global__ add(const double *x, const double *y, double *z){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
z[tid] = x[tid] + y[tid];
}
void check(const double *z, const int N){
bool has_error = false;
for (int i = 0; i < N; i++)
{
if (fabs(z[i] - c)>EPSILON)
{
has_error = true;
}
}
printf("%s\n", has_error?"Has errors" : "No errors");
}
|
1,810
|
#include "includes.h"
__global__ void LowPassRowMulti(float *d_Result, float *d_Data, int width, int pitch, int height)
{
__shared__ float data[CONVROW_W + 2*RADIUS];
const int tx = threadIdx.x;
const int block = blockIdx.x/(NUM_SCALES+3);
const int scale = blockIdx.x - (NUM_SCALES+3)*block;
const int xout = block*CONVROW_W + tx;
const int loadPos = xout - RADIUS;
const int yptr = blockIdx.y*pitch;
const int writePos = yptr + height*pitch*scale + xout;
float *kernel = d_Kernel + scale*16;
if (loadPos<0)
data[tx] = d_Data[yptr];
else if (loadPos>=width)
data[tx] = d_Data[yptr + width-1];
else
data[tx] = d_Data[yptr + loadPos];
__syncthreads();
if (xout<width && tx<CONVROW_W)
d_Result[writePos] =
(data[tx+0] + data[tx+8])*kernel[0] +
(data[tx+1] + data[tx+7])*kernel[1] +
(data[tx+2] + data[tx+6])*kernel[2] +
(data[tx+3] + data[tx+5])*kernel[3] +
data[tx+4]*kernel[4];
__syncthreads();
}
|
1,811
|
#include "golden.cuh"
#include "slicer.cuh"
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <stdio.h>
long checkOutput(triangle* triangles_dev, size_t num_triangles, bool* in) {
bool* expected = (bool*)malloc(NUM_LAYERS * X_DIM * Y_DIM * sizeof(bool));
std::cout << "executing golden model" << std::endl;
goldenModel(triangles_dev, num_triangles, &expected[0]);
long size = NUM_LAYERS * Y_DIM * X_DIM;
long diff = 0;
long inside = 0;
std::cout << "comparing results" << std::endl;
for (int i = 0; i < size; i++) {
inside += expected[i];
diff += (expected[i] != in[i]);
}
free(expected);
std::cout << inside << " pixels are inside the model." << std::endl;
std::cout << diff << " pixels are different in the expected and actual output." << std::endl;
return diff;
}
void goldenModel(triangle* triangles_dev, size_t num_triangles, bool* out) {
int threadsPerBlock = THREADS_PER_BLOCK;
int blocksPerGrid;
bool* all_dev;
size_t size = NUM_LAYERS * Y_DIM * X_DIM * sizeof(bool);
cudaMalloc(&all_dev, size);
layer_t* all_intersections;
cudaMalloc(&all_intersections, Y_DIM * X_DIM * NUM_LAYERS * sizeof(layer_t));
size_t* trunk_length;
cudaMalloc(&trunk_length, Y_DIM * X_DIM * sizeof(size_t));
cudaMemset(trunk_length, 0, Y_DIM * X_DIM * sizeof(size_t));
int* locks;
cudaMalloc(&locks, Y_DIM * X_DIM * sizeof(int));
cudaMemset(locks, 0, Y_DIM * X_DIM * sizeof(int));
blocksPerGrid = (num_triangles * Y_DIM * X_DIM + threadsPerBlock - 1) / threadsPerBlock;
_fps1 << <blocksPerGrid, threadsPerBlock >> > (&triangles_dev[0], num_triangles, all_intersections, trunk_length, locks);
cudaDeviceSynchronize();
blocksPerGrid = (X_DIM * Y_DIM + threadsPerBlock - 1) / threadsPerBlock;
_fps2 << <blocksPerGrid, threadsPerBlock >> > (all_intersections, trunk_length);
cudaDeviceSynchronize();
blocksPerGrid = (X_DIM * Y_DIM * NUM_LAYERS + threadsPerBlock - 1) / threadsPerBlock;
_fps3 << <blocksPerGrid, threadsPerBlock >> > (all_intersections, trunk_length, all_dev);
cudaDeviceSynchronize();
cudaFree(all_intersections);
cudaFree(trunk_length);
cudaFree(locks);
cudaMemcpy(out, all_dev, size, cudaMemcpyDeviceToHost);
cudaFree(all_dev);
cudaFree(triangles_dev);
}
__global__
void _fps1(triangle* triangles, size_t num_triangles, layer_t* all_intersections, size_t* trunk_length, int* locks) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
size_t tri_idx = idx / (X_DIM * Y_DIM);
// if (tri_idx >= num_triangles) return;
// copy 1 triangle to the shared memory -- That's all we need on this block
__shared__ triangle triangles_shared;
__shared__ double x_max, x_min, y_max, y_min;
if (threadIdx.x == 0) {
triangles_shared = triangles[tri_idx];
thrust::maximum<double> max;
thrust::minimum<double> min;
x_max = max(triangles_shared.p1.x, max(triangles_shared.p2.x, triangles_shared.p3.x));
x_min = min(triangles_shared.p1.x, min(triangles_shared.p2.x, triangles_shared.p3.x));
y_max = max(triangles_shared.p1.y, max(triangles_shared.p2.y, triangles_shared.p3.y));
y_min = min(triangles_shared.p1.y, min(triangles_shared.p2.y, triangles_shared.p3.y));
}
__syncthreads();
int y_idx = (idx - (tri_idx * (X_DIM * Y_DIM))) / X_DIM;
int x_idx = (idx - (tri_idx * (X_DIM * Y_DIM))) % X_DIM;
int x = x_idx - (X_DIM >> 1);
int y = y_idx - (Y_DIM >> 1);
double x_pos = x * RESOLUTION;
double y_pos = y * RESOLUTION;
bool notInRect = (x_pos < x_min) || (x_pos > x_max) || (y_pos < y_min) || (y_pos > y_max);
layer_t* layers = all_intersections + y_idx * X_DIM * NUM_LAYERS + x_idx * NUM_LAYERS;
int* lock = locks + y_idx * X_DIM + x_idx;
size_t* length = trunk_length + y_idx * X_DIM + x_idx;
layer_t intersection = notInRect ? -1 : _pixelRayIntersection(triangles_shared, x, y);
bool run = (intersection != -1);
while (run) {
if (atomicCAS(lock, 0, 1) == 0) {
layers[length[0]] = intersection;
length[0]++;
run = false;
atomicExch(lock, 0);
}
}
}
__global__
void _fps2(layer_t* all_intersections, size_t* trunk_length) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= X_DIM * Y_DIM) return;
size_t length = trunk_length[idx];
layer_t* curr_trunk = all_intersections + (idx * NUM_LAYERS);
thrust::sort(thrust::device, curr_trunk, curr_trunk + length);
}
__global__
void _fps3(layer_t* sorted_intersections, size_t* trunk_length, bool* out) {
size_t idx = blockDim.x * blockIdx.x + threadIdx.x;
int z_idx = idx / (X_DIM * Y_DIM);
if (z_idx >= NUM_LAYERS) return;
int y_idx = (idx - (z_idx * X_DIM * Y_DIM)) / X_DIM;
int x_idx = (idx - (z_idx * X_DIM * Y_DIM)) % X_DIM;
size_t length = trunk_length[y_idx * X_DIM + x_idx];
layer_t* intersection_trunk = sorted_intersections + y_idx * X_DIM * NUM_LAYERS + x_idx * NUM_LAYERS;
out[idx] = _isInside(z_idx, intersection_trunk, length);
}
__device__ __forceinline__
layer_t _pixelRayIntersection(triangle t, int x, int y) {
/*
Let A, B, C be the 3 vertices of the given triangle
Let S(x,y,z) be the intersection, where x,y are given
We want to find some a, b such that AS = a*AB + b*AC
If a >= 0, b >= 0, and a+b <= 1, S is a valid intersection.
return the layer of intersection, or -1 if none
*/
double x_d = x * RESOLUTION - t.p1.x;
double y_d = y * RESOLUTION - t.p1.y;
double x1 = t.p2.x - t.p1.x;
double y1 = t.p2.y - t.p1.y;
double z1 = t.p2.z - t.p1.z;
double x2 = t.p3.x - t.p1.x;
double y2 = t.p3.y - t.p1.y;
double z2 = t.p3.z - t.p1.z;
double a = (x_d * y2 - x2 * y_d) / (x1 * y2 - x2 * y1);
double b = (x_d * y1 - x1 * y_d) / (x2 * y1 - x1 * y2);
bool inside = (a >= 0) && (b >= 0) && (a + b <= 1);
double intersection = (a * z1 + b * z2) + t.p1.z;
// // divide by layer width
layer_t layer = inside ? (intersection / RESOLUTION) : -1;
return layer;
}
__device__
bool _isInside(layer_t current, layer_t* trunk, size_t length) {
size_t startIdx = 0;
size_t endIdx = length;
size_t mid;
bool goLeft;
// perform binary search
while (startIdx < endIdx) {
mid = (startIdx + endIdx) / 2;
if (trunk[mid] == current) return true;
goLeft = trunk[mid] > current;
startIdx = goLeft ? startIdx : (mid + 1);
endIdx = goLeft ? mid : endIdx;
}
return (bool)(startIdx & 1);
}
|
1,812
|
#include <stdio.h>
#include <math.h>
#define N 10000000
#define THREADS_PER_BLOCK 1000
//cambia todos los numeros pares excepto el 2
__global__ void pares(char *a, int raiz)
{
//calcular index que este thread revisara
int index = blockIdx.x * blockDim.x + (threadIdx.x * 2);
//para que se salte el 2
if (index == 2)
return;
if (index < N)
a[index] = 1;
}
//para revisar los impares
__global__ void impares(char *a, int raiz)
{
//para que se salte el 1
int index = blockIdx.x * blockDim.x + (threadIdx.x * 2) + 1;
if (index == 1)
return;
//revisa si el numero ya fue revisado
if (a[index] == 0)
{
int j;
if (index <= raiz)
for (j=index*index; j<N; j+=index)
a[j] = 1;
}
}
int main()
{
//arreglo en CPU/RAM
char *a = new char[N];
//arreglo para el device
char *d_a;
//la raiz del numero maximo
int raiz = sqrt(N);
//tamanio del arreglo
int size = N * sizeof( char );
//tiempos
float tiempo1, tiempo2;
cudaEvent_t inicio1, fin1, inicio2, fin2; // para medir tiempos como con timestamp
/* allocate space for host copies of a, b, c and setup input alues */
//a = (char *)malloc( size );
for( int i = 0; i < N; i++ )
a[i] = 0;
//cambia el 0 y el 1 (casos especiales )
a[0] = 1;
a[1] = 1;
//empieza a tomar tiempo
cudaEventCreate(&inicio1); // Se inicializan
cudaEventCreate(&fin1);
cudaEventRecord( inicio1, 0 ); // Se toma el tiempo de inicio
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
/* copy inputs to deice */
/* fix the parameters needed to copy data to the device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
//empieza a tomar tiempo
cudaEventCreate(&inicio2); // Se inicializan
cudaEventCreate(&fin2);
cudaEventRecord( inicio2, 0 ); // Se toma el tiempo de inicio
/* launch the kernel on the GPU */
pares<<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, raiz );
impares<<< N / THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, raiz );
cudaEventRecord( fin2, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin2 ); // Se sincroniza
cudaEventElapsedTime( &tiempo2, inicio2, fin2 );
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
cudaMemcpy( a, d_a, size, cudaMemcpyDeviceToHost );
//libera memoria
cudaFree( d_a );
cudaEventRecord( fin1, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin1 ); // Se sincroniza
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
//cuenta cuantos primos hay
int cuantos=0;
for (int i=0; i<N; i++)
{
if(a[i] == 0)
{
printf( "%d\n", i);
cuantos++;
}
}
printf( "cantidad de numeros primos: %d\n", cuantos);
/* clean up */
free(a);
printf("Tiempo cálculo %f ms\n", tiempo2);
printf("Tiempo total %f ms\n", tiempo1);
return 0;
} /* end main */
|
1,813
|
#include "includes.h"
__global__ void MultiplicarMatrices(float *m1, float *m2, float *mr, int columna1, int fila1, int columna2, int fila2)
{
int fila_r = blockIdx.y*blockDim.y+threadIdx.y;
int columna_r = blockIdx.x*blockDim.x+threadIdx.x;
float tmp_mult = 0;
if ((fila_r < fila2) && (columna_r < columna1)) {
for (int i = 0; i < fila2 ; i++) {
tmp_mult += m1[i+columna1*fila_r]*m2[i*columna2+columna_r];
}
mr[fila_r*columna2+columna_r]= tmp_mult;
}
}
|
1,814
|
/*
* file name: TilingMatrix.cu
* NOTE:
* squareMatrixMult is much more efficent than the regular multiplier
* currently compiling with: nvcc TilingMatrix.cu -o tileTest
* Device Standards for: GeForce GTX 1060 6GB
* total global mem size: 6078 MBytes (6373572608 bytes)
* total shared mem per block: 49.152 KBytes (49152 bytes)
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include <sys/time.h>
#include <stdlib.h>
#include <sys/time.h> //measuring performance data
#define BLOCK_SIZE 32
/**********************************************************************
function name: matrixTriUpper
description: sets a matrix to an upper bound triangle matrix
parameters:
&a GPU device pointer to a m X n matrix (A)
Note:
return: none
**********************************************************************/
__global__ void matrixTriUpper(float *a, int m, int n) {
//setting matricies to their upper bound
for(int i = 0; i < m; ++i) {
for(int j = 0; j < n; ++j) {
if(i>j)
a[i*n + j] = 0;
a[i*n + j] = a[i*n + j];
}
}
}
/**********************************************************************
function name: matrixMult
description: dot product of two matrix (not only square)
parameters:
&a GPU device pointer to a m X n matrix (A)
&b GPU device pointer to a n X k matrix (B)
&c GPU device output purpose pointer to a m X k matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dimGrid((k + BLOCK_SIZE - 1) / BLOCK_SIZE, (m + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
further sppedup can be obtained by using shared memory to decrease global memory access times
return: none
**********************************************************************/
__global__ void matrixMult(float *a, float *b, float *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if( col < k && row < m) {
for(int i = 0; i < n; i++)
sum += a[row * n + i] * b[i * k + col];
c[row * k + col] = sum;
}
}
/**********************************************************************
function name: squareMatrixMult
description: dot product of two matrix (not only square) in GPU
parameters:
&a GPU device pointer to a n X n matrix (A)
&b GPU device pointer to a n X n matrix (B)
&c GPU device output purpose pointer to a n X n matrix (C)
to store the result
Note:
grid and block should be configured as:
dim3 dim_grid((n - 1) / BLOCK_SIZE + 1, (n - 1) / BLOCK_SIZE + 1, 1);
dim3 dim_block(BLOCK_SIZE, BLOCK_SIZE, 1);
SQUARE IS MUCH MORE EFFICENT THAN REGULAR
return: none
**********************************************************************/
__global__ void squareMatrixMult(float *d_a, float *d_b, float *d_result, int n)
{
__shared__ float tile_a[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float tile_b[BLOCK_SIZE][BLOCK_SIZE];
int row = blockIdx.y * BLOCK_SIZE + threadIdx.y;
int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float tmp = 0;
int idx;
for (int sub = 0; sub < gridDim.x; ++sub) {
idx = row * n + sub * BLOCK_SIZE + threadIdx.x;
if(idx >= n*n) {
// n may not divisible by BLOCK_SIZE
tile_a[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_a[threadIdx.y][threadIdx.x] = d_a[idx];
}
idx = (sub * BLOCK_SIZE + threadIdx.y) * n + col;
if(idx >= n*n) {
tile_b[threadIdx.y][threadIdx.x] = 0;
}
else {
tile_b[threadIdx.y][threadIdx.x] = d_b[idx];
}
__syncthreads();
for (int k = threadIdx.x/n; k < BLOCK_SIZE; ++k) {
tmp += tile_a[threadIdx.y][k] * tile_b[k][threadIdx.x];
}
__syncthreads();
}
if(row < n && col < n) {
d_result[row * n + col] = tmp;
}
}
/**********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
**********************************************************************/
int main(int argc, char** argv) {
int printAllMat = 1; // debug flag for printing all of the maticies
// Set sizes of the matrixes
int m=15;
int n=15;
int k=15;
/* Fixed seed for illustration */
srand(3333);
// Allocate memory in host RAM
float *copyA, *copyB, *copyC;
cudaMallocHost((void **) ©A, sizeof(float)*m*n); // copied matrix is m x n
cudaMallocHost((void **) ©B, sizeof(float)*n*k); // copied matrix is n x k
cudaMallocHost((void **) ©C, sizeof(float)*m*k); // copied matrix is m x k
// float x = (float)rand()/(float)(RAND_MAX/a);
// random initialize matrix A
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
copyA[i * n + j] =((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// random initialize matrix B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < k; ++j) {
copyB[i * k + j] = ((float)rand()/(float)(RAND_MAX)) * 1024;
}
}
// Allocate memory space on the device
float *matA, *matB, *matC;
cudaMalloc((void **) &matA, sizeof(float)*m*n); // matrix is m x n
cudaMalloc((void **) &matB, sizeof(float)*n*k); // matrix is n x k
cudaMalloc((void **) &matC, sizeof(float)*m*k); // matrix is m x k
// copy matrix A and B from host to device memory
cudaMemcpy(matA, copyA, sizeof(float)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(matB, copyB, sizeof(float)*n*k, cudaMemcpyHostToDevice);
printf("size of matA %dX%d: %zu bytes\n", m,n,(sizeof(float)*m*n));
printf("size of matB %dX%d: %zu bytes\n", n,k,(sizeof(float)*n*k));
printf("size of matC %dX%d: %zu bytes\n", m,k,(sizeof(float)*m*k));
printf("total bytes allocated to mem: %zu bytes ", ((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)));
printf("(~%zu MBytes)\n\n", (((sizeof(float)*m*n) + (sizeof(float)*n*k)+ (sizeof(float)*m*k)) / 1000000)); // get megabytes of the allocated arrays
unsigned int grid_rows = (m + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (k + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
printf("Calculating...\n\n");
// Launch kernel, check if it is a square
if(m == n && n == k) {
matrixTriUpper<<<dimGrid, dimBlock>>>(matA, m, n);
matrixTriUpper<<<dimGrid, dimBlock>>>(matB, n, k);
squareMatrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, n); // square, thus only need 1 param to define size
}
else { // not a square, thus it needs param to define all sizes
matrixMult<<<dimGrid, dimBlock>>>(matA, matB, matC, m, n, k);
}
// Transefr results from device to host
cudaMemcpy(copyC, matC, sizeof(float)*m*k, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize(); //possibly
//cudaThreadSynchronize();
//prints the matricies
// printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]); //Another possible way to print the matrix
//if the debug flag is on it will print the first two product arrays as well
int i,j;
if(printAllMat == 1) {
// print matrix A
printf("matA matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++) {
//printf("[%d][%d]:%d, ", i, j, copyA[i*k + j]);
printf(" %f ", copyA[i*k + j]);
}
printf("\n");
}
// print matrix B
printf("\nmatB matrix: \n");
for (i = 0; i < n; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyB[i*k + j]);
printf(" %f ", copyB[i*k + j]);
}
printf("\n");
}
}
// print result matrix
printf("\nResult matrix: \n");
for (i = 0; i < m; i++) {
for (j = 0; j < k; j++) {
//printf("[%d][%d]:%d, ", i, j, copyC[i*k + j]);
printf(" %f ", copyC[i*k + j]);
}
printf("\n");
}
// free memory
cudaFree(matA);
cudaFree(matB);
cudaFree(matC);
cudaFreeHost(copyA);
cudaFreeHost(copyB);
cudaFreeHost(copyC);
return 0;
}
|
1,815
|
#include <cuda_runtime.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdio.h>
#define CUDA_CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
double cpuSecond()
/*< return cpu time>*/
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)(tp.tv_sec)+(double)tp.tv_usec*1.e-6);
}
__device__ int getGlobalIdx_1D_1D()
/*< device get GlobalIdx with 1D grid 1D block >*/
{
return blockIdx.x * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_2D()
/*< device get GlobalIdx with 1D grid 2D block >*/
{
return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_3D()
/*< device get GlobalIdx with 1D grid 3D block >*/
{
return blockIdx.x * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_1D()
/*< device get GlobalIdx with 2D grid 1D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_2D()
/*< device get GlobalIdx with 2D grid 2D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_3D()
/*< device get GlobalIdx with 2D grid 3D block >*/
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_1D()
/*< device get GlobalIdx with 3D grid 1D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_2D()
/*< device get GlobalIdx with 3D grid 2D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
return blockId * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_3D_3D()
/*< device get GlobalIdx with 3D grid 3D block >*/
{
int blockId = blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x * blockDim.y * blockDim.z \
+ threadIdx.z * blockDim.x * blockDim.y \
+ threadIdx.y * blockDim.x + threadIdx.x ;
return threadId;
}
|
1,816
|
#include <iostream>
#include <vector>
using namespace std;
__global__ void mult(const int *pA, const int *pB, int *pC, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
pC[i] = pA[i] * pB[i];
}
int main(void)
{
const int N = 8192;
vector<int> a(N), b(N), c(N);
for (int i = 0 ; i < N ; i++)
{
a[i] = i;
b[i] = -i;
}
int *cuA, *cuB, *cuC;
cudaMalloc(&cuA, N*sizeof(int));
cudaMalloc(&cuB, N*sizeof(int));
cudaMalloc(&cuC, N*sizeof(int));
cudaMemcpy(cuA, a.data(), N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cuB, b.data(), N*sizeof(int), cudaMemcpyHostToDevice);
int blockSize = 256;
int numBlocks = N/blockSize + ((N%blockSize == 0)?0:1);
mult<<<numBlocks, blockSize>>>(cuA, cuB, cuC, N);
cudaMemcpy(c.data(), cuC, N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(cuA);
cudaFree(cuB);
cudaFree(cuC);
for (auto x : c)
cout << x << " ";
cout << endl;
return 0;
}
|
1,817
|
#include <bits/stdc++.h>
#define N 16
using namespace std;
__global__ void RGBtoGray(float img[], float gray_img[])
{
int ID = threadIdx.x + blockIdx.x * blockDim.x;
for(int i = ID ; i < N * N; i += gridDim.x * blockIdx.x)
{
gray_img[i] = 0.21 * img[i * 3] + 0.71 * img[i * 3 + 1] + 0.07 * img[i * 3 + 2];
}
__syncthreads();
}
void print_matrix(float mat[])
{
for(int k = 0; k < 3; k++)
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
cout << mat[(i * N + j) * 3 + k] << " ";
cout << endl;
}
cout << endl << endl;
}
}
void print_gray_matrix(float mat[])
{
for(int i = 0; i < N; i++)
{
for(int j = 0; j < N; j++)
cout << mat[(i * N + j)] << " ";
cout << endl;
}
cout << endl << endl;
}
void init_matrix(float mat[])
{
for(int k = 0; k < 3; k++)
{
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
mat[(i * N + j) * 3 + k] = k + 1;
}
}
int main()
{
float *host_img = new float[N * N * 3], *host_gray_img = new float[N * N ], *cuda_img, *cuda_gray_img;
// Assuming N is a multiple of 16
//dim3 grid_dim(N / 16, N / 16), block_dim(16, 16);
int block_dim = 256, grid_dim;
if( (N * N) % 256 == 0)
{
grid_dim = (N * N) / 256;
}
else
{
grid_dim = (N * N) / 256 + 1;
}
init_matrix(host_img);
print_matrix(host_img);
cudaMalloc(&cuda_img, sizeof(float) * N * N * 3);
cudaMalloc(&cuda_gray_img, sizeof(float) * N * N );
cudaMemcpy(cuda_img, host_img, sizeof(float) * N * N * 3, cudaMemcpyHostToDevice);
RGBtoGray<<<grid_dim, block_dim>>>(cuda_img, cuda_gray_img);
cudaMemcpy(host_gray_img, cuda_gray_img, sizeof(float) * N * N , cudaMemcpyDeviceToHost);
print_gray_matrix(host_gray_img);
free(host_img);
free(host_gray_img);
cudaFree(cuda_img);
cudaFree(cuda_gray_img);
return 0;
}
|
1,818
|
#include "includes.h"
__global__ void knapsackKernel(int *profits, int *weights, int *input_f, int *output_f, int capacity, int c_min, int k){
int c = blockIdx.x*512 + threadIdx.x;
if(c<c_min || c>capacity){return;}
if(input_f[c] < input_f[c-weights[k-1]]+profits[k-1]){
output_f[c] = input_f[c-weights[k-1]]+profits[k-1];
}
else{
output_f[c] = input_f[c];
}
}
|
1,819
|
// Compile: nvcc -arch=sm_61 -std=c++11 assignment5-p2.cu -o assignment5-p2
#include <cmath>
#include <cstdint>
#include <iostream>
#include <sys/time.h>
#define THRESHOLD (0.000001)
#define SIZE1 4096
#define SIZE2 4097
#define ITER 100
#define BLOCK_SIZE 16
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void kernel1(double* A) {
// SB: Write the first kernel here
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j < SIZE1 - 1) {
for (int k = 0; k < ITER; k++) {
for (int i = 1; i < SIZE1; i++) {
A[i*SIZE1 + j + 1] = A[(i - 1)*SIZE1 + j + 1] + A[i*SIZE1 + j + 1];
}
}
}
}
__global__ void kernel2(double* A) {
// SB: Write the second kernel here
int j = blockIdx.x*blockDim.x + threadIdx.x;
if(j < SIZE2 - 1) {
for (int k = 0; k < ITER; k++) {
for (int i = 1; i < SIZE2; i++) {
A[i*SIZE2 + j + 1] = A[(i - 1)*SIZE2 + j + 1] + A[i*SIZE2 + j + 1];
}
}
}
}
__host__ void serial(double** A) {
for (int k = 0; k < ITER; k++) {
for (int i = 1; i < SIZE1; i++) {
for (int j = 0; j < SIZE1 - 1; j++) {
A[i][j + 1] = A[i - 1][j + 1] + A[i][j + 1];
}
}
}
}
__host__ void check_result(double** w_ref, double* w_opt, uint64_t size) {
double maxdiff = 0.0, this_diff = 0.0;
int numdiffs = 0;
for (uint64_t i = 0; i < size; i++) {
for (uint64_t j = 0; j < size; j++) {
this_diff = w_ref[i][j] - w_opt[i*size + j];
if (fabs(this_diff) > THRESHOLD) {
numdiffs++;
if (this_diff > maxdiff)
maxdiff = this_diff;
}
}
}
if (numdiffs > 0) {
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << "; Max Diff = " << maxdiff
<< endl;
} else {
cout << "No differences found between base and test versions" << endl;
}
}
__host__ double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
double** A_ser = new double*[SIZE1];
double* A_k1 = new double[SIZE1*SIZE1];
double* A_k2 = new double[SIZE2*SIZE2];
for (int i = 0; i < SIZE1; i++) {
A_ser[i] = new double[SIZE1];
}
for (int i = 0; i < SIZE1; i++) {
for (int j = 0; j < SIZE1; j++) {
A_ser[i][j] = i + j;
A_k1[i*SIZE1 + j] = i + j;
}
}
for (int i = 0; i < SIZE2; i++) {
for (int j = 0; j < SIZE2; j++) {
A_k2[i*SIZE2 + j] = i + j;
}
}
double clkbegin, clkend;
double t;
clkbegin = rtclock();
serial(A_ser);
clkend = rtclock();
t = clkend - clkbegin;
cout << "Serial code on CPU: " << (1.0 * SIZE1 * SIZE1 * ITER / t / 1.0e9)
<< " GFLOPS; Time = " << t * 1000 << " msec" << endl;
cudaEvent_t start, end;
gpuErrchk( cudaEventCreate(&start) );
gpuErrchk( cudaEventCreate(&end) );
// SB: Write your first GPU kernel here
double* A_k1_c;
gpuErrchk( cudaMalloc((void**)&A_k1_c, SIZE1*SIZE1*sizeof(double)) );
gpuErrchk( cudaEventRecord(start, 0) );
gpuErrchk( cudaMemcpy(A_k1_c, A_k1, SIZE1*SIZE1*sizeof(double), cudaMemcpyHostToDevice) );
kernel1<<<4, 1024>>>(A_k1_c);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(A_k1, A_k1_c, SIZE1*SIZE1*sizeof(double), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventRecord(end, 0) );
gpuErrchk( cudaDeviceSynchronize() );
float kernel_time = 0;
gpuErrchk( cudaEventElapsedTime(&kernel_time, start, end) );
check_result(A_ser, A_k1, SIZE1);
cout << "Kernel 1 on GPU: " << (1.0 * SIZE1 * SIZE1 * ITER / t / 1.0e9)
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
// SB: Write your second GPU kernel here
double* A_k2_c;
gpuErrchk( cudaMalloc((void**)&A_k2_c, SIZE2*SIZE2*sizeof(double)) );
gpuErrchk( cudaEventRecord(start, 0) );
gpuErrchk( cudaMemcpy(A_k2_c, A_k2, SIZE2*SIZE2*sizeof(double), cudaMemcpyHostToDevice) );
kernel2<<<4, 1024>>>(A_k2_c);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(A_k2, A_k2_c, SIZE2*SIZE2*sizeof(double), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaEventRecord(end, 0) );
gpuErrchk( cudaDeviceSynchronize() );
kernel_time = 0;
gpuErrchk( cudaEventElapsedTime(&kernel_time, start, end) );
// check_result(A_ser, A_k2, SIZE1);
cout << "Kernel 2 on GPU: " << (1.0 * SIZE2 * SIZE2 * ITER / t / 1.0e9)
<< " GFLOPS; Time = " << kernel_time << " msec" << endl;
gpuErrchk( cudaFree(A_k1_c) );
gpuErrchk( cudaFree(A_k2_c) );
free(A_ser);
free(A_k1);
free(A_k2);
return EXIT_SUCCESS;
}
|
1,820
|
#include "includes.h"
__global__ void KernelVersionShim() { }
|
1,821
|
#include "includes.h"
__global__ void reduction_kernel_complete_unrolling8_1(int * input, int * temp, int size)
{
int tid = threadIdx.x;
int index = blockDim.x * blockIdx.x * 8 + threadIdx.x;
int * i_data = input + blockDim.x * blockIdx.x * 8;
if ((index + 7 * blockDim.x) < size)
{
int a1 = input[index];
int a2 = input[index + blockDim.x];
int a3 = input[index + 2 * blockDim.x];
int a4 = input[index + 3 * blockDim.x];
int a5 = input[index + 4 * blockDim.x];
int a6 = input[index + 5 * blockDim.x];
int a7 = input[index + 6 * blockDim.x];
int a8 = input[index + 7 * blockDim.x];
input[index] = a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8;
}
__syncthreads();
//complete unrolling manually
//if the block dim == 1024
if (blockDim.x == 1024 && tid < 512)
i_data[tid] += i_data[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256)
i_data[tid] += i_data[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128)
i_data[tid] += i_data[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64)
i_data[tid] += i_data[tid + 64];
__syncthreads();
// warp unrolling
if (tid < 32)
{
volatile int * vsmem = i_data;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if (tid == 0)
{
temp[blockIdx.x] = i_data[0];
}
}
|
1,822
|
#include <iostream>
#include <fstream>
#include <cstdio>
#include <chrono>
#include <cmath>
void init(double* const __restrict__ a, double* const __restrict__ at, const int ncells)
{
for (int i=0; i<ncells; ++i)
{
a[i] = pow(i,2)/pow(i+1,2);
at[i] = 0.;
}
}
void diff(double* const __restrict__ at, const double* const __restrict__ a, const double visc,
const double dxidxi, const double dyidyi, const double dzidzi,
const int itot, const int jtot, const int ktot)
{
const int ii = 1;
const int jj = itot;
const int kk = itot*jtot;
#pragma acc parallel loop present(a, at) collapse(3)
for (int k=1; k<ktot-1; ++k)
for (int j=1; j<jtot-1; ++j)
for (int i=1; i<itot-1; ++i)
{
const int ijk = i + j*jj + k*kk;
at[ijk] += visc * (
+ ( (a[ijk+ii] - a[ijk ])
- (a[ijk ] - a[ijk-ii]) ) * dxidxi
+ ( (a[ijk+jj] - a[ijk ])
- (a[ijk ] - a[ijk-jj]) ) * dyidyi
+ ( (a[ijk+kk] - a[ijk ])
- (a[ijk ] - a[ijk-kk]) ) * dzidzi
);
}
}
int main(int argc, char* argv[])
{
if (argc != 2)
{
std::cout << "Add the grid size as an argument!" << std::endl;
return 1;
}
const int nloop = 30;
const int itot = std::stoi(argv[1]);
const int jtot = std::stoi(argv[1]);
const int ktot = std::stoi(argv[1]);
const int ncells = itot*jtot*ktot;
double *a = new double[ncells];
double *at = new double[ncells];
init(a, at, ncells);
#pragma acc data copyin(at[0:ncells], a[0:ncells]) copyout(at[0:ncells])
{
// Check results
diff(
at, a,
0.1, 0.1, 0.1, 0.1,
itot, jtot, ktot);
}
printf("at=%.20f\n",at[itot*jtot+itot+itot/2]);
#pragma acc data copyin(at[0:ncells], a[0:ncells]) copyout(at[0:ncells])
{
// Time performance
auto start = std::chrono::high_resolution_clock::now();
for (int i=0; i<nloop; ++i)
diff(
at, a,
0.1, 0.1, 0.1, 0.1,
itot, jtot, ktot);
auto end = std::chrono::high_resolution_clock::now();
double duration = std::chrono::duration_cast<std::chrono::duration<double>>(end - start).count();
printf("time/iter = %E s (%i iters)\n",duration/(double)nloop, nloop);
}
printf("at=%.20f\n", at[itot*jtot+itot+itot/4]);
return 0;
}
|
1,823
|
#include "includes.h"
__global__ void init_check(int *d_check, int nz)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= nz) {
return;
}
d_check[i] = -1;
}
|
1,824
|
#include "includes.h"
__global__ void __dds0(int nrows, int ncols, float *A, float *B, int *Cir, int *Cjc, float *P) {}
|
1,825
|
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
__global__ void helloWorld(){
int threadIndex = threadIdx.x;
int blockIndex = blockIdx.x;
if(threadIndex%2 == 1)
printf("Hello World from thread %d of block %d\n",
threadIndex, blockIndex);
else{
printf("hello from the other threads\n");
if(threadIndex%3)
printf("hi from the multiples of 3\n");
}
}
int main(int argc, char **argv){
int blocks = 4;
int threadsPerBlock = 3;
helloWorld <<< blocks, threadsPerBlock >>> ();
cudaDeviceSynchronize();
}
|
1,826
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <time.h>
/*
* Monte Carlo Pi Estimation Algorithm in CUDA
*
* This Project uses Cuda and thread
* topology to estimate Pi.
*
* Author: Clayton Glenn
*/
#define MAX_THREAD 16
#define MIN_THREAD 8
#define MAX_N 20
#define MIN_N 8
#define BLOCK_SIZE 256
#define DEBUG 0
/** Kernel Function
* First finds the Thread ID within the block of GPU Threads
* and if the Thread is Correct, it Encrypts the corresponding
* Character in the String.
**/
__global__
void monte(int *flags, float *x_vals, float *y_vals, int t, int n) {
//Get Thread id
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Loop N/Threads times plus one
for(int i = 0; i < (n/t + 1); i++){
// If looped id count is less than n, grab rand x
// and y and check within unit. Increment if so
if((i*t+tid) < n){
if((pow(x_vals[(i*t+tid)], 2) + pow(y_vals[(i*t+tid)],2)) <= 1) flags[(tid)]++;
}
}
}
/**
* Helper Function
* Prints an string to standard error showing help
* for valid arguments in the executable
**/
void printerror(){
fprintf(stderr, "Invalid Arguments\n");
fprintf(stderr, "Correct Form: ./monte [# threads] [# points]\n");
exit(0);
}
/**
* Main Program
* This Program is for Homework 6 to encrypt some text or show
* the encryption method of text that is 2 to the power of N
* characters long all initialized to zero.
**/
int main(int argc, char **argv) {
// Declare a buffer of max size to start
int N = MIN_THREAD;
int THREADS = MIN_THREAD;
int BLOCKS = 256;
// Check for immediate errors in args
if (argc < 3 || argc > 3) printerror();
// Get Thread Count Per Block
THREADS = strtol(argv[1], NULL, 10);
THREADS = ((int)pow(2, THREADS));
if(THREADS < BLOCKS) BLOCKS = 1;
else THREADS = THREADS / BLOCKS;
// Get N Coordinates
N = strtol(argv[2], NULL, 10);
N = (int)pow(2, N);
// Print N and Threads for distinguish
printf("(Threads: %d) (N: %d)\n", THREADS * BLOCKS, N);
//Set Array of Size Thread
int flags[BLOCKS*THREADS];
float randx[N];
float randy[N];
srand( time( NULL ) );
for(int i = 0; i < N; i++){
if(i < BLOCKS*THREADS)flags[i] = 0;
randx[i] = ( float )rand()/RAND_MAX;
randy[i] = ( float )rand()/RAND_MAX;
}
// Init all other variables
int *dev_flags;
float *dev_randx;
float *dev_randy;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float final_time = 0.0;
// Allocate memory in the GPU for the int array
cudaMalloc(&dev_randx, N*sizeof(float));
cudaMalloc(&dev_randy, N*sizeof(float));
cudaMalloc(&dev_flags, BLOCKS*THREADS*sizeof(int));
// Copy the Memory from the array to the array pointers
cudaMemcpy(dev_flags, flags, BLOCKS*THREADS*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_randx, randx, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_randy, randy, N*sizeof(float), cudaMemcpyHostToDevice);
// Total Time Record
cudaEventRecord(start);
monte<<<BLOCKS, THREADS>>>(dev_flags, dev_randx, dev_randy, BLOCKS*THREADS, N);
cudaEventRecord(stop);
// Copy the results from GPU to the CPU
cudaMemcpy(flags, dev_flags, BLOCKS*THREADS*sizeof(int), cudaMemcpyDeviceToHost);
// Count total successes for each thread
int success = 0;
for(int i = 0; i < BLOCKS*THREADS; i++){
if(flags[i] > 0) success += flags[i];
}
// Print Successes, failures, and estimation
//printf("Success: %d\n", success);
//printf("Failure: %d\n", (N - success));
printf("Estimation of Pi: %1.6f\n", ((float)success/N)*4);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&final_time, start, stop);
printf("Time in Kernel: %1.10f\n\n", final_time/1000);
cudaFree(dev_flags);
cudaFree(dev_randx);
cudaFree(dev_randy);
}
|
1,827
|
#include "includes.h"
__global__ void recombiner( double * rands , unsigned int * parents , unsigned int parent_rows , unsigned int parent_cols , unsigned int * off , unsigned int cols , unsigned int seq_offset ) {
double id_offset = rands[ seq_offset + blockIdx.y ];
__syncthreads();
unsigned int col_offset = (blockIdx.x + threadIdx.y) * blockDim.x + threadIdx.x;
// using integer cast to truncate of fractional portion
unsigned int p0_offset = id_offset * ((parent_rows - 1) / 2);
p0_offset = (2 * p0_offset * parent_cols) + col_offset;
unsigned int p = 0, q = 0, res = 0;
if( col_offset < parent_cols ) {
// should hold true for entire warps
p = parents[ p0_offset ];
q = parents[ p0_offset + parent_cols ];
}
__syncthreads();
if( col_offset < cols ) {
res = off[ (seq_offset + blockIdx.y) * cols + col_offset ];
}
__syncthreads();
res = (( p & ~res ) | ( q & res ));
__syncthreads();
if( col_offset < cols ) {
off[ (seq_offset + blockIdx.y) * cols + col_offset ] = res;
}
}
|
1,828
|
#pragma once
#include "Vector3.cuh.cu"
namespace RayTracing
{
class Ray
{
public:
Point3 origin;
Vector3 direction;
public:
__host__ __device__
Ray() {}
__host__ __device__
Ray(const Point3 &origin, const Vector3 &direction) : origin(origin), direction(direction) {}
__host__ __device__
Point3 At(const float t) const
{
return origin + t * direction;
}
};
} // namespace RayTracing
|
1,829
|
#include <stdio.h>
void print_matrix(int *s, int N){
for(int i = 0; i < N; ++i){
printf("%i | ", s[i]);
}
}
void init_matrix(int *m, int val, int N){
for(int i = 0; i < N; ++i){
m[i] = val;
}
}
// kernel for run in GPU
// This is a device code beacuse run in GPU
__global__
void saxpy_CUDA(int *s, int a, int *x, int *y, int N){
for( int i = 0; i < N; ++i){
s[i] = a*x[i] + y[i];
}
}
// This is host code because it runs on CPU
int main(int argc, char *argv[]){
// printf("%i \n", 10);
if(argc != 2){
printf("compile must be: \n ./program_name N \n");
}
int N = atoi(argv[1]);
int a = 2;
// int *s = new int[N];
// int *x = new int[N];
// int *y = new int[N];
int *s, *x, *y;
// Allocate Unified Memory
// It's accesible from GPU and CPU
cudaMallocManaged(&s, N*sizeof(int));
cudaMallocManaged(&x, N*sizeof(int));
cudaMallocManaged(&y, N*sizeof(int));
// Init matrix
init_matrix(s, 0, N);
init_matrix(x, 1, N);
init_matrix(y, 0, N);
// printf("%i \n", z);
// saxpy function
// s = a * x + y
// saxpy_CUDA(s, a, x, y, N);
// Launch saxpy_CUDA to kernel for run in GPU
saxpy_CUDA<<<1,1>>>(s, a, x, y, N) ;
// Wait for GPU to finish before accesing on host
cudaDeviceSynchronize();
print_matrix(s, N);
// Free memory
cudaFree(s);
cudaFree(x);
cudaFree(y);
return 0;
}
// #include <iostream>
// #include <math.h>
// #include <typeinfo>
// using namespace std;
// // Kernel function to add the elements of two arrays
// __global__
// void add(int n, float *x, float *y)
// {
// for (int i = 0; i < n; i++)
// y[i] = x[i] + y[i];
// }
// int main(void)
// {
// // int N = 1<<20;
// int N = 20;
// // printf("%i \n", N);
// // float *x, *y;
// int *x, *y;
// // Allocate Unified Memory – accessible from CPU or GPU
// cudaMallocManaged(&x, N*sizeof(float));
// cudaMallocManaged(&y, N*sizeof(float));
// // initialize x and y arrays on the host
// for (int i = 0; i < N; i++) {
// x[i] = 1.0f;
// y[i] = 2.0f;
// }
// // Run kernel on 1M elements on the GPU
// add<<<1, 1>>>(N, x, y);
// // Wait for GPU to finish before accessing on host
// cudaDeviceSynchronize();
// // Check for errors (all values should be 3.0f)
// float maxError = 0.0f;
// for (int i = 0; i < N; i++)
// maxError = fmax(maxError, fabs(y[i]-3.0f));
// std::cout << "Max error: " << maxError << std::endl;
// // Free memory
// cudaFree(x);
// cudaFree(y);
// return 0;
// }
|
1,830
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <math.h>
#include <iostream>
const int N = 1000000;
const int blocksize = 256;
__global__ void add_two_tab(unsigned int *a, unsigned int *b, unsigned int *c, unsigned int n) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid<n) {
c[tid] = a[tid] + b[tid];
}
}
int main() {
thrust::host_vector<unsigned int> a_tab;
thrust::device_vector<unsigned int> ad_tab;
thrust::host_vector<unsigned int> b_tab;
thrust::device_vector<unsigned int> bd_tab;
thrust::host_vector<unsigned int> c_tab;
thrust::device_vector<unsigned int> cd_tab;
for (int i = 1; i <= N; i++) {
a_tab.push_back(1);
b_tab.push_back(10);
c_tab.push_back(0);
}
ad_tab = a_tab;
bd_tab = b_tab;
cd_tab = c_tab;
dim3 dimBlock(blocksize);
dim3 dimGrid(ceil((float)N / (float)blocksize));
add_two_tab <<< dimGrid,dimBlock >>>(ad_tab.data().get(), bd_tab.data().get(), cd_tab.data().get(), ad_tab.size());
c_tab = cd_tab;
for (int i = 0; i < 10; i++) {
std::cout << i << " : " << c_tab[i] << "\n";
std::cout << N-1-i << " : " << c_tab[N-1-i] << "\n";
}
return 0;
}
|
1,831
|
#include <stdio.h>
// by lectures and "CUDA by Example" book
// device code: array sum calculation: c = a + b
__global__ void sum_arrays_kernel(float* a, float* b, float* c, int array_len) {
printf("blockId, threadId: %d, %d\n", blockIdx.x, threadIdx.x);
// element index that corresponds to current thread
int ind = blockDim.x * blockIdx.x + threadIdx.x;
// process all indices that correspond to current thread
while (ind < array_len) {
printf("blockId, threadId, ind: %d, %d, %d\n", blockIdx.x, threadIdx.x, ind);
c[ind] = a[ind] + b[ind];
ind += blockDim.x * gridDim.x;
}
}
// host code: preparation
void sum_arrays_gpu(float* host_a, float* host_b, float* host_c, int array_len) {
// Step 1
// size of memory to allocate on device for each array
long mem_size = array_len * sizeof(float);
// device memory allocation for arrays a, b and c
// with copying data from host to device arrays
float* dev_a;
cudaMalloc((void**) &dev_a, mem_size);
cudaMemcpy(dev_a, host_a, mem_size, cudaMemcpyHostToDevice);
float* dev_b;
cudaMalloc((void**) &dev_b, mem_size);
cudaMemcpy(dev_b, host_b, mem_size, cudaMemcpyHostToDevice);
float* dev_c;
cudaMalloc((void**) &dev_c, mem_size);
// Step 2
// grid (of blocks) dimensions initialization
dim3 DimGrid(2, 1, 1);
// block (of threads) dimensions initialization
dim3 DimBlock(4, 1, 1);
// running kernel summation code
sum_arrays_kernel<<<DimGrid, DimBlock>>>(dev_a, dev_b, dev_c, array_len);
// Step 3
// copying result from device to host array
cudaMemcpy(host_c, dev_c, mem_size, cudaMemcpyDeviceToHost);
// freeing device memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
}
int main() {
// array size
int array_len = 10;
// first array
float* host_a = (float*)malloc(array_len * sizeof(float));
// second array
float* host_b = (float*)malloc(array_len * sizeof(float));
for (int i = 0; i < array_len; i++) {
host_a[i] = i;
host_b[i] = (i + 1) * 2;
}
// result array
float* host_c = (float*)malloc(array_len * sizeof(float));
// summation
sum_arrays_gpu(host_a, host_b, host_c, array_len);
// showing result
printf("host_a[i] + host_b[i] = host_c[i]:\n");
for (int i = 0; i < array_len; i++)
printf("%.2f + %.2f = %.2f\n", host_a[i], host_b[i], host_c[i]);
return 0;
}
|
1,832
|
#include<stdio.h>
//gpu code
__global__ void square(float * d_out,float *d_in) // arguments are pointers to input and the return is a value to the pointer in the argument list
{
int idx=threadIdx.x;
float f= d_in[idx];
d_out[idx]=f*f;
}
//cpu code
int main()
{
//generate input array
const int ARRAY_SIZE=64;
const int ARRAY_BYTES=ARRAY_SIZE * sizeof(float);
float h_in[ARRAY_SIZE];
for (int i=0;i<64;i++)
{
h_in[i]=float(i);
}
float h_out[ARRAY_SIZE];
//declare gpu pointers
float * d_in;
float * d_out;
//allocate gpu memory
//The cudamalloc function returns an integer(instead of pointer) as error code to the memory block
//All CUDA API function follows the convention of returning an integer error code
//when cudamalloc is called, a local variable named d_array is created and assigned with the value of the first function argument.
//There is no way we can retrieve the value in that local variable outside the function's scope. That why we need to a pointer to a pointer here.
cudaMalloc((void **) &d_in,ARRAY_BYTES);
cudaMalloc((void **) &d_out,ARRAY_BYTES);
//Copy the input array from cpu to gpu
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
//Launch The Kernal
square<<<1,ARRAY_SIZE>>>(d_out,d_in); // launching on one Block with 64 elements
//copy back the result array to the CPU
cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost);
//print
for (int i=0;i<64;i++)
{
printf("%f",h_out[i]);
printf(((i%4)!=3)?"\t":"\n");
}
//free the gpu memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
// output console
/*
256.000000 289.000000 324.000000 361.000000
400.000000 441.000000 484.000000 529.000000
576.000000 625.000000 676.000000 729.000000
784.000000 841.000000 900.000000 961.000000
1024.000000 1089.000000 1156.000000 1225.000000
1296.000000 1369.000000 1444.000000 1521.000000
1600.000000 1681.000000 1764.000000 1849.000000
1936.000000 2025.000000 2116.000000 2209.000000
2304.000000 2401.000000 2500.000000 2601.000000
2704.000000 2809.000000 2916.000000 3025.000000
3136.000000 3249.000000 3364.000000 3481.000000
3600.000000 3721.000000 3844.000000 3969.000000
*/
|
1,833
|
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx: (%d, %d, %d) blockIdx: (%d, %d, %d) blockDim: (%d, %d, %d)"
"gridDim: (%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(int argc, char **argv)
{
int nElem = 6;
dim3 block (3);
dim3 grid ((nElem + block.x - 1) / block.x);
printf("grid.x %d grid.y %d gird.z %d\n", grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n", block.x, block.y, block.z);
checkIndex <<<grid, block>>> ();
cudaDeviceReset();
return(0);
}
|
1,834
|
// This program computes matrix multiplication on the GPU using CUDA
// By: Nick from CoffeeBeforeArch
#include <cstdlib>
#include <cassert>
#include <iostream>
using namespace std;
__global__ void matrixMul(int *a, int *b, int *c, int N){
// Calculate the global row and column for each thread
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Boundary check for our matrix
if(row < N && col < N){
// Accumulate a partial result
int tmp = 0;
for(int i = 0; i < N; i++){
tmp += a[row * N + i] * b[i * N + col];
}
// Write back the result
c[row * N + col] = tmp;
}
}
// Initializes a square matrix with random numbers between 0-100
void init_matrix(int *m, int N){
for(int i = 0; i < N * N; i++){
m[i] = rand() % 100;
}
}
//MM on CPU
void cpu_mm(int *a, int *b, int *c, int N){
int tmp;
// For every row...
for(int i = 0; i < N; i++){
// For every col...
for(int j = 0; j < N; j++){
// For every element in the row-col pair
tmp = 0;
for(int k = 0; k < N; k++){
tmp += a[i * N + k] * b[k * N + j];
}
tmp = c[i * N + j];
// Check each result
// assert(tmp == c[i * N + j]);
}
}
}
// Verify the result on the CPU
void verify_result(int *a, int *b, int *c, int N){
int tmp;
// For every row...
for(int i = 0; i < N; i++){
// For every col...
for(int j = 0; j < N; j++){
// For every element in the row-col pair
tmp = 0;
for(int k = 0; k < N; k++){
tmp += a[i * N + k] * b[k * N + j];
}
// Check each result
assert(tmp == c[i * N + j]);
}
}
}
int main(){
// Set our square matrix dimension (2^10 x 2^10 default)
int N = 1 << 10;
size_t bytes = N * N * sizeof(int);
// Allocate memory for our matrices
int *a, *b, *c;
// cudaMallocManaged(&a, bytes);
// cudaMallocManaged(&b, bytes);
// cudaMallocManaged(&c, bytes);
// Initialize our matrices
init_matrix(a, N);
init_matrix(b, N);
// Set our CTA and Grid dimensions
// int threads = 16;
// int blocks = (N + threads - 1) / threads;
// Setup our kernel launch parameters
// dim3 THREADS(threads, threads);
// dim3 BLOCKS(blocks, blocks);
// Launch our kernel
// matrixMul<<<BLOCKS, THREADS>>>(a, b, c, N);
// cudaDeviceSynchronize();
// Verify the result
// verify_result(a, b, c, N);
cpu_mm(a, b, c, N);
cout << "PROGRAM COMPLETED SUCCESSFULLY!" << endl;
// Free allocated memory
// cudaFree(a);
// cudaFree(b);
// cudaFree(c);
return 0;
}
|
1,835
|
#include <iostream>
#include <stdlib.h>
#include <string.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
// this function will sum a large vector, the max length of the vector can be 256 * 2147483647 in nvidia 1060
__global__ void vectorSumStg1(int* DA, int* DB, int n){
__shared__ int SA[256];
int i = threadIdx.x;
int blocki = blockIdx.x;
if(i < n){
SA[i] = DA[i];
}else{
SA[i] = 0;
}
__syncthreads();
if(i < 128 && i+128<n){
SA[i] += SA[i+128];
}
__syncthreads();
if(i < 64 && i+64<n){
SA[i] += SA[i+64];
}
__syncthreads();
if(i < 32 && i+32<n){
SA[i] += SA[i+32];
SA[i] += SA[i+16];
SA[i] += SA[i+8];
SA[i] += SA[i+4];
SA[i] += SA[i+2];
SA[i] += SA[i+1];
}
__syncthreads();
DB[blocki] = SA[0];
}
__global__ void vectorSumWithinBlock(int* DA, int n, int* Das){
extern __shared__ int SA[];
int i = threadIdx.x;
if(i < n){
SA[i] = DA[i];
}else{
SA[i] = 0;
}
__syncthreads();
if(i < 128 && i+128<n){
SA[i] += SA[i+128];
}
__syncthreads();
if(i < 64 && i+64<n){
SA[i] += SA[i+64];
}
__syncthreads();
if(i < 32 && i+32<n){
SA[i] += SA[i+32];
SA[i] += SA[i+16];
SA[i] += SA[i+8];
SA[i] += SA[i+4];
SA[i] += SA[i+2];
SA[i] += SA[i+1];
}
__syncthreads();
*Das = SA[0];
}
int vectorSum(int* DA, int n){
int Das = 0;
}
|
1,836
|
#include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
// This is my deviece function
// __global__ means this function is visible to the host
__global__ void kernelHelloWorld() {
printf("Hello World!\n");
}
int main(int argc, char** argv) {
int Nblocks = 10; // number of blocks
int Nthreads = 3; //number of threads per block
// run the function "kernelHelloworld" on the device
kernelHelloWorld <<< Nblocks, Nblocks >>> ();
}
|
1,837
|
// Actually, there are no rounding errors due to results being accumulated in an arbitrary order..
// Therefore EPSILON = 0.0f is OK
#define EPSILON 0.001f
#define EPSILOND 0.0000001
extern "C" __global__ void compare(float *C, int *faultyElems, size_t iters) {
size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y;
size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y
gridDim.x*blockDim.x + // W
blockIdx.x*blockDim.x + threadIdx.x; // X
int myFaulty = 0;
for (size_t i = 1; i < iters; ++i)
if (fabsf(C[myIndex] - C[myIndex + i*iterStep]) > EPSILON)
myFaulty++;
atomicAdd(faultyElems, myFaulty);
}
extern "C" __global__ void compareD(double *C, int *faultyElems, size_t iters) {
size_t iterStep = blockDim.x*blockDim.y*gridDim.x*gridDim.y;
size_t myIndex = (blockIdx.y*blockDim.y + threadIdx.y)* // Y
gridDim.x*blockDim.x + // W
blockIdx.x*blockDim.x + threadIdx.x; // X
int myFaulty = 0;
for (size_t i = 1; i < iters; ++i)
if (fabs(C[myIndex] - C[myIndex + i*iterStep]) > EPSILOND)
myFaulty++;
atomicAdd(faultyElems, myFaulty);
}
|
1,838
|
#include <stdio.h>
__global__ void thread_per(float* a, float * b) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
b[index] = sinf(a[index]);
}
void thread_per_block(int count) {
float a[] = {0.0, 1.57, 2.57, 3.14};
float *b = (float*) malloc(sizeof(float) * 4);
float *d_a;
float *d_b;
cudaMalloc((void **)& d_a, sizeof(float) * 4);
cudaMalloc((void **)& d_b, sizeof(float) * 4);
cudaMemcpy(d_a, a, count * sizeof(float), cudaMemcpyHostToDevice);
thread_per<<<ceil(count / 256.0), 256>>>(d_a, d_b);
cudaMemcpy(b, d_b, sizeof(float) * 4, cudaMemcpyDeviceToHost);
for(int i = 0; i < count; i++) {
printf("sin(%.2f) = %.2f\n", a[i], b[i]);
}
cudaFree(d_a);
cudaFree(d_b);
}
int main(int argc, char** argv) {
if(argc < 2) {
printf("Enter count in file args\n");
return -1;
}
int count = atoi(argv[1]);
thread_per_block(count);
return 0;
}
|
1,839
|
#include <stdio.h>
#include <time.h>
#define SIZE 1024
__global__ void VectorAdd(float *a, float *b, float *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
float *a, *b, *c;
float *d_a, *d_b, *d_c;
clock_t start, end;
double cpu_time_used;
a = (float *)malloc(SIZE*sizeof(float));
b = (float *)malloc(SIZE*sizeof(float));
c = (float *)malloc(SIZE*sizeof(float));
cudaMalloc( &d_a, SIZE*sizeof(float));
cudaMalloc( &d_b, SIZE*sizeof(float));
cudaMalloc( &d_c, SIZE*sizeof(float));
for( int i = 0; i < SIZE; ++i )
{
a[i] = (float) i;
b[i] = (float) i;
c[i] = 0.0;
}
cudaMemcpy( d_a, a, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_c, c, SIZE*sizeof(float), cudaMemcpyHostToDevice );
start = clock();
VectorAdd<<< 1, SIZE >>>(d_a, d_b, d_c, SIZE);
end = clock();
cudaMemcpy( c, d_c, SIZE*sizeof(float), cudaMemcpyDeviceToHost );
for( int i = 0; i < 10; ++i)
printf("c[%d] = %f\n", i, c[i]);
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cpu_time_used = ((double) (end - start))/CLOCKS_PER_SEC;
printf("Time = %f seconds to execute.\n", cpu_time_used);
return 0;
}
|
1,840
|
__global__ void tsort1(int *input0,int *result0){
unsigned int tid = threadIdx.x;
unsigned int bid = blockIdx.x;
extern __shared__ __attribute__ ((aligned (16))) unsigned char sbase[];
(( int *)sbase)[tid] = ((tid&1)==0) ? min(input0[((bid*512)+tid)],input0[((bid*512)+(tid^1))]) : max(input0[((bid*512)+tid)],input0[((bid*512)+(tid^1))]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&2)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^3)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^3)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&1)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^1)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^1)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&4)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^7)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^7)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&2)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^2)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^2)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&1)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^1)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^1)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&8)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^15)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^15)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&4)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^4)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^4)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&2)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^2)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^2)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&1)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^1)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^1)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&16)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^31)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^31)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&8)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^8)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^8)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&4)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^4)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^4)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&2)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^2)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^2)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&1)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^1)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^1)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&32)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^63)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^63)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&16)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^16)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^16)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&8)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^8)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^8)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&4)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^4)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^4)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&2)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^2)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^2)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&1)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^1)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^1)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&64)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^127)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^127)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&32)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^32)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^32)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&16)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^16)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^16)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&8)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^8)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^8)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&4)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^4)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^4)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&2)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^2)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^2)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&1)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^1)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^1)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&128)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^255)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^255)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&64)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^64)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^64)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&32)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^32)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^32)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&16)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^16)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^16)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&8)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^8)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^8)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&4)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^4)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^4)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&2)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^2)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^2)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&1)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^1)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^1)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&256)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^511)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^511)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&128)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^128)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^128)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&64)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^64)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^64)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&32)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^32)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^32)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&16)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^16)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^16)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&8)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^8)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^8)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&4)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^4)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^4)]);
__syncthreads();
(( int *)(sbase + 2048))[tid] = ((tid&2)==0) ? min((( int *)sbase)[tid],(( int *)sbase)[(tid^2)]) : max((( int *)sbase)[tid],(( int *)sbase)[(tid^2)]);
__syncthreads();
(( int *)sbase)[tid] = ((tid&1)==0) ? min((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^1)]) : max((( int *)(sbase+2048))[tid],(( int *)(sbase+2048))[(tid^1)]);
__syncthreads();
result0[((bid*512)+tid)] = (( int *)sbase)[tid];
}
|
1,841
|
#include <stdlib.h>
#include <stdio.h>
int main(int argc, char *argv[]){
char *arr;
char *d_arr;
unsigned long long size = atoll(argv[1]);
arr = (char*)malloc(size);
cudaMalloc(&d_arr,size);
if(!arr){
printf("malloc error\n");
return 0;
}
cudaMemcpy(d_arr,arr,size,cudaMemcpyHostToDevice);
free(arr);
cudaFree(d_arr);
return 0;
}
|
1,842
|
/*author: Zeke Elkins
*date: 3/27/2014
*description: a simple hello world program -- introducing CUDA device syntax
*/
#include <iostream>
using namespace std;
__global__ void mykernel(void){
}
int main(void) {
mykernel<<<1,1>>>();
cout << "Hello World" << endl;
return 0;
}
|
1,843
|
__global__ void
mat_hadamard(float *a, float *b, float *c, int rows, int columns)
{
const int i = blockDim.y * blockIdx.y + threadIdx.y,
j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < rows && j < columns)
{
int k = i * columns + j;
c[k] = a[k] * b[k];
}
}
|
1,844
|
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <time.h>
#define SRC_LINES 10000 // 40
#define CMP_LINES 1000 // 4
#define LINE_MAXLEN 13000 // 100
#define BLOCK_SIZE 1000 // 4
/* Print error message and exit with error status. If PERR is not 0,
display current errno status. */
static void
error_print (int perr, char *fmt, va_list ap)
{
vfprintf (stderr, fmt, ap);
if (perr)
perror (" ");
else
fprintf (stderr, "\n");
exit (1);
}
/* Print error message and exit with error status. */
static void
errf (char *fmt, ...)
{
va_list ap;
va_start (ap, fmt);
error_print (0, fmt, ap);
va_end (ap);
}
/* Print error message followed by errno status and exit
with error code. */
static void
perrf (char *fmt, ...)
{
va_list ap;
va_start (ap, fmt);
error_print (1, fmt, ap);
va_end (ap);
}
static void
cudaErrhnd(cudaError_t err){
if(err)
errf("Error: %s", cudaGetErrorString(err));
}
void replaceFoundWord(FILE *sfp, FILE *ofp, char *marker, unsigned int lineCount, fpos_t startPos)
{
char *strbuf = (char*)malloc(LINE_MAXLEN);
int i;
fsetpos (sfp, &startPos);
for (i = 0; i < lineCount; i++)
{
strcpy(strbuf, "\0");
if (fgets(strbuf, LINE_MAXLEN, sfp) != NULL)
{
//linectr++;
if (marker[i] == 'n')
{
fprintf(ofp, "%s", strbuf);
}
else
{
fprintf(ofp, "%s", "---\n");
}
}
}
}
__device__
int cdStrcmp(const char *s1, const char *s2){
for ( ; *s1 == *s2; s1++, s2++)
if (*s1 == '\0')
return 0;
return ((*(unsigned char *)s1 < *(unsigned char *)s2) ? -1 : +1);
}
__global__
void ckCountWord(char *src, unsigned int srcLineAmount, char *cmp, char *elim, int *count) {
// get next line from sfp
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int lineIdx = tid * LINE_MAXLEN;
int i;
char *srcWord, *cmpWord;
for (i = 0; i < srcLineAmount; i++) {
srcWord = &src[i * LINE_MAXLEN];
cmpWord = &cmp[lineIdx];
if(cdStrcmp(srcWord, cmpWord) == 0){
elim[i] = 'y'; // mark as found
count[tid] += 1;
}
}
}
int main(int argc, char **argv)
{
if (argc < 3)
errf("Please fill the input file and comparer");
time_t begin = time(NULL);
FILE *sfp = fopen (argv[1], "r");
FILE *cfp = fopen (argv[2], "r");
FILE *ofp = fopen ("replace-result.txt", "w");
FILE *nfp = fopen ("count-result.txt", "w");
if (!sfp)
perrf("cannot open file `%s'", argv[1]);
if (!cfp)
perrf("cannot open file `%s'", argv[2]);
if (!ofp)
perrf("cannot create file `%s'", "replace-result.txt");
if (!nfp)
perrf("cannot create file `%s'", "count-result.txt");
int i,j;
char *D_srcSec, *H_srcSec, *D_elim, *H_elim, *D_cmpSec, *H_cmpSec;
int *D_count, *H_count;
size_t srcSecSize = SRC_LINES * LINE_MAXLEN;
size_t cmpSecSize = CMP_LINES * LINE_MAXLEN;
printf("Allocating Memory for Host\n");
H_srcSec = (char*)malloc(srcSecSize);
H_elim = (char*)malloc(SRC_LINES);
H_cmpSec = (char*)malloc(cmpSecSize);
H_count = (int*)malloc(sizeof(int) * CMP_LINES);
printf("Allocating Memory for Device\n");
cudaErrhnd(cudaMalloc((void**)&D_srcSec, srcSecSize));
cudaErrhnd(cudaMalloc((void**)&D_elim, SRC_LINES));
cudaErrhnd(cudaMalloc((void**)&D_cmpSec, cmpSecSize));
cudaErrhnd(cudaMalloc((void**)&D_count, sizeof(int) * CMP_LINES));
// main task
printf("Initializing Data\n");
for(i = 0; i < CMP_LINES; i++){
if(fgets(&H_cmpSec[i * LINE_MAXLEN], LINE_MAXLEN, cfp) == NULL) break;
}
cudaErrhnd(cudaMemcpy(D_cmpSec, H_cmpSec, cmpSecSize, cudaMemcpyHostToDevice));
cudaErrhnd(cudaMemset(D_count, 0, sizeof(int) * CMP_LINES));
int itr = 0;
fpos_t startPos;
for(i = 1; i != 0;){
itr += 1;
if(itr % 100 == 0) printf("Computing section number %d.\n",itr);
fgetpos(sfp, &startPos);
for(j = 0; j < SRC_LINES; j++){
if(fgets(&H_srcSec[j * LINE_MAXLEN], LINE_MAXLEN, sfp) == NULL){
i = 0;
break;
}
}
if(j == 0) break;
cudaErrhnd(cudaMemset(D_elim, 'n', j));
cudaErrhnd(cudaMemcpy(D_srcSec, H_srcSec, srcSecSize, cudaMemcpyHostToDevice));
ckCountWord<<<CMP_LINES/BLOCK_SIZE,BLOCK_SIZE>>>(D_srcSec, j, D_cmpSec, D_elim, D_count);
cudaDeviceSynchronize();
cudaErrhnd(cudaGetLastError());
cudaErrhnd(cudaMemcpy(H_elim, D_elim, j, cudaMemcpyDeviceToHost));
replaceFoundWord(sfp, ofp, H_elim, j, startPos);
}
cudaErrhnd(cudaMemcpy(H_count, D_count, sizeof(int) * CMP_LINES, cudaMemcpyDeviceToHost));
cudaFree(D_srcSec);
cudaFree(D_elim);
cudaFree(D_cmpSec);
cudaFree(D_count);
free(H_srcSec);
free(H_elim);
// test output
/*
for(i=0; i<CMP_LINES; i++){
printf("%d\n", H_count[i]);
}
*/
// modify files
for(i=0; i < CMP_LINES; i++){
fprintf(nfp, "%d ", H_count[i]);
fprintf(nfp, "%s", &H_cmpSec[i * LINE_MAXLEN]);
}
time_t end = time(NULL);
fprintf(nfp, "\nComputed in %.6f seconds.\n", difftime(end,begin));
free(H_cmpSec);
free(H_count);
fclose(sfp);
fclose(cfp);
fclose(ofp);
fclose(nfp);
return 0;
}
|
1,845
|
#include "includes.h"
extern "C" {
}
#define TB 256
#define EPS 0.1
#undef MIN
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#undef MAX
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void patchmatch_r_conv_kernel( float *input, float *target, float *conv, int patch, int stride, int c1, int h1, int w1, int h2, int w2 )
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
int size1 = h1 * w1, size2 = h2 * w2;
int N = size1 * size2;
// id = id1 * size2 + id2
if (id < N) {
int id1 = id / size2, id2 = id % size2;
int x1 = id1 % w1, y1 = id1 / w1;
int x2 = id2 % w2, y2 = id2 / w2;
int kernel_radius = (patch - 1) / 2;
double conv_result = 0, norm_1 = 0, norm_2 = 0;
for (int dy = -kernel_radius; dy <= kernel_radius; dy+=stride) {
for (int dx = -kernel_radius; dx <= kernel_radius; dx+=stride) {
int xx1 = x1 + dx, yy1 = y1 + dy;
int xx2 = x2 + dx, yy2 = y2 + dy;
if (0 <= xx1 && xx1 < w1 && 0 <= yy1 && yy1 < h1 &&
0 <= xx2 && xx2 < w2 && 0 <= yy2 && yy2 < h2)
{
int _id1 = yy1 * w1 + xx1, _id2 = yy2 * w2 + xx2;
for (int c = 0; c < c1; c++) {
float term1 = input[c * size1 + _id1];
float term2 = target[c * size2 + _id2];
conv_result += term1 * term2;
norm_1 += term1 * term1;
norm_2 += term2 * term2;
}
}
}
}
norm_1 = sqrt(norm_1);
norm_2 = sqrt(norm_2);
conv[id] = conv_result / (norm_1 * norm_2 + 1e-9);
}
return ;
}
|
1,846
|
#include<iostream>
#include<vector>
#include<fstream>
#include<sstream>
#include<string>
#include<iterator>
#include<ctype.h>
#include <iomanip>
#include <math.h>
#include <fstream>
#include<cuda_runtime.h>
using namespace std;
vector<string> parFile;
vector<string> particleConfig;
__global__ void updateVel (double* d_pPos, double delTime, double* d_pVel,double* d_calculatedData, int* d_pMass, double* d_oldForce)
{
int Idx = blockIdx.x;
d_pVel[Idx*3+0]+=(d_oldForce[Idx*3+0]+d_calculatedData[Idx*3+0])*delTime/(2.0*d_pMass[Idx]);
d_pVel[Idx*3+1]+=(d_oldForce[Idx*3+1]+d_calculatedData[Idx*3+1])*delTime/(2.0*d_pMass[Idx]);
d_pVel[Idx*3+2]+=(d_oldForce[Idx*3+2]+d_calculatedData[Idx*3+2])*delTime/(2.0*d_pMass[Idx]);
// printf("Old force_kernel: %f \t new FOrce_kernel:%f \tIdx: %d\n", d_oldForce[Idx*3], d_calculatedData[Idx*3], Idx);
// printf("vel_kernel: %f \nIdx: %d\n", d_pVel[Idx*3], Idx);
}
__global__ void updatePos (double*d_pPos, double delTime, double* d_pVel, double* d_calculatedData, int* d_pMass)
{
int Idx = blockIdx.x;
//int Idy = blockIdx.y;
//if(Idx!=Idy)
{
d_pPos[Idx*3+0]+=delTime*d_pVel[Idx*3+0]+(d_calculatedData[Idx*3+0]*pow(delTime,2)/(2.0*d_pMass[Idx]));
d_pPos[Idx*3+1]+=delTime*d_pVel[Idx*3+1]+(d_calculatedData[Idx*3+1]*pow(delTime,2)/(2.0*d_pMass[Idx]));
d_pPos[Idx*3+2]+=delTime*d_pVel[Idx*3+2]+(d_calculatedData[Idx*3+2]*pow(delTime,2)/(2.0*d_pMass[Idx]));
}
// printf("force inside_POS_kernel: %f\t\t Idx:%d\n",d_calculatedData[Idx*3], Idx);
// printf("POS_kernel: %f \nIdx: %d\n", d_pPos[Idx*3], Idx);
}
__global__ void forceTwoParticles(int nParticles, int* d_pMass, double* d_pPos, double* d_pVel, double* d_calculatedData, double* d_eps, double* d_sigma)
{
int Idx = blockIdx.x ;
int Idy = blockIdx.y;
//vector<double> initForce = totalForce(Idx, Idy, nParticles, eps, sigma, d_pPos);
if(Idx!=Idy)
{
double separation [3] = {d_pPos[Idx*3+0]-d_pPos[Idy*3+0], d_pPos[Idx*3+1]-d_pPos[Idy*3+1], d_pPos[Idx*3+2]-d_pPos[Idy*3+2]};
//for (auto i: separation)
//printf("separation vector0: %f \nseparation vector3: %f \nidx: %d \nidy: %d \n", separation[0], separation[3] , Idx, Idy);
double separation_magnitude = sqrt (pow(separation[0],2)+pow(separation[1],2)+pow(separation[2],2));
// printf("separation: %f Idx: %d Idy: %d \n", separation_magnitude,Idx, Idy);
double force_LJ = 24*(*d_eps/(pow(separation_magnitude,2)))*pow(*d_sigma/separation_magnitude,6)*(2*pow(*d_sigma/separation_magnitude,6)-1);
//printf("force: %f Idx: %d Idy: %d \n", force_LJ,Idx, Idy);
//printf("s: %f \n", pow(separation_magnitude,2));
d_calculatedData [Idx*3+0] = force_LJ*(d_pPos[Idx*3+0]-d_pPos[Idy*3+0]);
d_calculatedData [Idx*3+1] = force_LJ*(d_pPos[Idx*3+1]-d_pPos[Idy*3+1]);
d_calculatedData [Idx*3+2] = force_LJ*(d_pPos[Idx*3+2]-d_pPos[Idy*3+2]);
/*printf("calculated FORCE_kernel %f, %f, %f\n%f, %f, %f\n", d_calculatedData [0],d_calculatedData [1],d_calculatedData [2],d_calculatedData [3],d_calculatedData [4],d_calculatedData [5]);
*/ }
}
int main(int argc, char* argv[])
{
//enableMapHost();
ifstream parFileName (argv[1]); //reading *.par file
if (parFileName.is_open())
{
string str;
while(getline(parFileName, str))
{
stringstream temp(str);
string str1;
while (getline(temp, str1,' '))
{
parFile.push_back(str1);
}
}
}
else
{
cout<<"*.par file not found"<< endl;
}
ifstream particleConfigFile (parFile[1]); //reading *.in file
if (particleConfigFile.is_open())
{
string str;
while(getline(particleConfigFile,str))
{
stringstream temp(str);
string str1;
while (getline(temp, str1,' '))
{
if(str1[str1.size()-1]=='\0') //alpha-numeric character check
{
str1.pop_back();
}
particleConfig.push_back(str1);
}
}
}
else
{
cout<<"*.in file not found"<< endl;
}
for(int i=0;i<particleConfig.size();++i)
{
cout<<"i: "<<i<<" "<<setprecision(10)<<(particleConfig[i])<<" size: "<<particleConfig[i].size()<<endl;
}
int nParticles = stoi(particleConfig[0]);
particleConfig.erase(particleConfig.begin());
vector<int> pMass (nParticles);
vector<double> pPos (nParticles*3);
vector<double> pVel (nParticles*3);
for(int i=0; i<nParticles; ++i)
{
pMass[i]=stoi(particleConfig[i*7]);
}
int i=0;
for(int j=0; j<particleConfig.size(); ++j)
{
if((i*7+3)<=particleConfig.size())
{
pPos[j]=stod(particleConfig[i*7+1]);
pPos[j+1]=stod(particleConfig[i*7+2]);
pPos[j+2]=stod(particleConfig[i*7+3]);
j+=2;
++i;
}
else
break;
}
i=0;
for(int j=0; j<particleConfig.size(); ++j)
{
if((i*7+6)<=particleConfig.size())
{
pVel[j]=stod(particleConfig[i*7+4]);
pVel[j+1]=stod(particleConfig[i*7+5]);
pVel[j+2]=stod(particleConfig[i*7+6]);
j+=2;
++i;
}
else
break;
}
double initTime=0.005, delTime = stod(parFile[3]), endTime = stod(parFile[5]), eps = stod(parFile[7]), sigma = stod(parFile[9]);
string baseFile = parFile[13];
int totalTimeSteps = endTime / delTime;
vector<double> calculatedData(3*nParticles);
//assigning pointer variables for device memory
int* d_pMass; double* d_pPos; double* d_pVel; double* d_calculatedData;
double* d_eps; double* d_sigma; double* d_delTime;
//mem allocation in Device
cudaMalloc (&d_pMass, nParticles*sizeof(int));
cudaMalloc (&d_pPos, nParticles*3*sizeof(double));
cudaMalloc (&d_pVel, nParticles*3*sizeof(double));
cudaMalloc (&d_calculatedData, 3*nParticles*sizeof(double));
cudaMalloc (&d_eps, sizeof(double));
cudaMalloc (&d_sigma, sizeof(double));
cudaMalloc (&d_delTime, sizeof(double));
//mem copy from HostToDevice
cudaMemcpy (d_pMass, &pMass[0],nParticles*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy (d_pPos, &pPos[0],nParticles*3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy (d_pVel, &pVel[0],nParticles*3*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy (d_calculatedData, &calculatedData[0],3*nParticles*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy (d_eps,&eps,sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy (d_sigma,&sigma,sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy (d_delTime,&d_delTime,sizeof(double), cudaMemcpyHostToDevice);
int nThreads = nParticles*(nParticles-1);
vector<double> oldForce;
oldForce.resize(3*nParticles);
double* d_oldForce;
cudaMalloc (&d_oldForce, 3*nParticles*sizeof(double));
dim3 grid(nParticles,nParticles,1);
//cout<<"hi"<<endl;
//initial force
forceTwoParticles<<<grid, 1>>>(nParticles, d_pMass, d_pPos, d_pVel, d_calculatedData, d_eps, d_sigma);
cudaMemcpy (&calculatedData[0], d_calculatedData, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost);
for(auto i: calculatedData)
{cout<<"initialForce: "<<i<<'\t';}
cout<<endl;
int n=0;
int vtk_count = 1;
int out_count = 1;
bool write_vtk = true;
bool write_out = true;
int temp_vtk = 0;
int temp_out = 0;
//calculate intial forces
vector<double> force_new;
//initial configuration at t=0
ofstream myfile;
myfile.open(baseFile+"0"+".vtk");
myfile<<"# vtk DataFile Version 4.0\nhesp visualization file\nASCII\nDATASET UNSTRUCTURED_GRID\n";
myfile<<"POINTS"<<" "<<nParticles<<" "<<"double"<<'\n';
for(int i=0;i<nParticles;++i)
{
myfile<<fixed<<pPos[i*3+0]<<" "<<pPos[i*3+1]<<" "<<pPos[i*3+2];
myfile<<'\n';
}
myfile<<"CELLS"<<" "<<0<<" "<<0<<'\n';
myfile<<"CELL_TYPES"<<" "<<0<<'\n';
myfile<<"POINT_DATA"<<" "<<nParticles<<'\n';
myfile<<"SCALARS"<<" "<<"m"<<" "<<"double"<<'\n';
myfile<<"LOOKUP_TABLE"<<" "<<"default"<<'\n';
for (int i=0; i<nParticles;++i)
{
myfile<<pMass[i]<<'\n';
}
myfile<<"VECTORS"<<" "<<"v"<<" "<<"double"<<'\n';
for(int i=0;i<nParticles;++i)
{
myfile<<pVel[i*3+0]<<" "<<pVel[i*3+1]<<" "<<pVel[i*3+2];
myfile<<'\n';
}
myfile.close();
myfile.open(baseFile+"0"+".out");
myfile<<nParticles<<'\n';
for(int i=0; i<nParticles; ++i)
{
myfile<<pMass[i]<<" "<<fixed<<pPos[i*3+0]<<" "<<pPos[i*3+1]<<" "<<pPos[i*3+2]<<" "<<pVel[i*3+0]<<" "<<pVel[i*3+1]<<" "<<pVel[i*3+2]<<'\n';
}
myfile.close();
temp_vtk += 1;
write_vtk=false;
temp_out += 1;
write_out=false;
while (initTime<endTime)
{
if (temp_vtk % stoi(parFile[15]) == 0)
//cout<<temp_vtk<<endl;
write_vtk = true;
if (temp_out % stoi(parFile[11]) == 0)
//cout<<temp_out<<endl;
write_out = true;
if (temp_vtk % stoi(parFile[15]) != 0)
write_vtk=false;
if (temp_out % stoi(parFile[11]) != 0)
write_out=false;
cout<<"\n####\tIteration: "<<n<<"\tTime: "<<initTime<<"\t####\n";
//calling Kernel
/*for(int i=0; i<pPos.size();++i)
{
cout<<"old POS_main: "<<pPos[i]<<'\t';
}
cout<<endl;*/
updatePos<<<nParticles , 1>>>(d_pPos, delTime, d_pVel, d_calculatedData,d_pMass);
cudaMemcpy (&pPos[0], d_pPos, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy (d_pPos, &pPos[0], 3*nParticles*sizeof(double), cudaMemcpyHostToDevice);
/*for(int i=0; i<nParticles*3; ++i)
{
cout<<"###"<<endl;
cout<<fixed<<"pos: "<<pPos[i]<<endl;
cout<<"vel: "<<pVel[i]<<endl;
}*/
//cout<<"hello"<<endl;
cudaMemcpy (&calculatedData[0], d_calculatedData, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost);
//oldForce.assign(calculatedData.begin(), calculatedData.end()-1);
/*for (auto i: calculatedData)
std::cout << i << ' ';*/
for(int i=0; i<oldForce.size();++i)
{
oldForce[i]=calculatedData[i];
//cout<<"old FORC_main: "<<oldForce[i]<<'\t';
}
//cout<<endl;
cout<<"updated POS: ";
for(int i=0; i<pPos.size();++i)
{
cout<<fixed<<pPos[i]<<'\t';
}
cout<<endl;
cudaMemcpy (d_oldForce, &oldForce[0], 3*nParticles*sizeof(double), cudaMemcpyHostToDevice);
forceTwoParticles<<<grid , 1>>>(nParticles, d_pMass, d_pPos, d_pVel, d_calculatedData, d_eps, d_sigma);
cudaMemcpy (&calculatedData[0], d_calculatedData, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost);
//cudaMemcpy (d_calculatedData, &calculatedData[0], totalTimeSteps*nParticles*sizeof(double), cudaMemcpyHostToDevice);
/*for(int i=0; i<calculatedData.size();++i)
{
cout<<"new FORCE_main: "<<calculatedData[i]<<'\t';
}*/
updateVel<<<nParticles , 1>>>(d_pPos, delTime, d_pVel, d_calculatedData,d_pMass, d_oldForce);
cudaMemcpy (&pPos[0], d_pPos, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy (&pVel[0], d_pVel, 3*nParticles*sizeof(double), cudaMemcpyDeviceToHost);
cout<<"updated VEL: ";
for(int i=0;i<pVel.size();++i)
{
cout<<fixed<<pVel[i]<<'\t';
}
cout<<endl;
//writing *.vtk file
if(write_vtk == true)
{
myfile.open(baseFile+to_string(vtk_count)+".vtk");
myfile<<"# vtk DataFile Version 4.0\nhesp visualization file\nASCII\nDATASET UNSTRUCTURED_GRID\n";
myfile<<"POINTS"<<" "<<nParticles<<" "<<"double"<<'\n';
for(int i=0;i<nParticles;++i)
{
myfile<<fixed<<pPos[i*3+0]<<" "<<pPos[i*3+1]<<" "<<pPos[i*3+2];
myfile<<'\n';
}
myfile<<"CELLS"<<" "<<0<<" "<<0<<'\n';
myfile<<"CELL_TYPES"<<" "<<0<<'\n';
myfile<<"POINT_DATA"<<" "<<nParticles<<'\n';
myfile<<"SCALARS"<<" "<<"m"<<" "<<"double"<<'\n';
myfile<<"LOOKUP_TABLE"<<" "<<"default"<<'\n';
for (int i=0; i<nParticles;++i)
{
myfile<<pMass[i]<<'\n';
}
myfile<<"VECTORS"<<" "<<"v"<<" "<<"double"<<'\n';
for(int i=0;i<nParticles;++i)
{
myfile<<pVel[i*3+0]<<" "<<pVel[i*3+1]<<" "<<pVel[i*3+2];
myfile<<'\n';
}
myfile.close();
vtk_count+=1;
}
if(write_out == true)
{
//cout<<"temp_out: "<<temp_out<<"\t";
myfile.open(baseFile+to_string(out_count)+".out");
myfile<<nParticles<<'\n';
for(int i=0; i<nParticles; ++i)
{
myfile<<pMass[i]<<" "<<fixed<<pPos[i*3+0]<<" "<<pPos[i*3+1]<<" "<<pPos[i*3+2]<<" "<<pVel[i*3+0]<<" "<<pVel[i*3+1]<<" "<<pVel[i*3+2]<<'\n';
}
myfile.close();
out_count+=1;
}
temp_vtk += 1;
temp_out += 1;
initTime+=delTime;
++n;
}
}
|
1,847
|
#ifndef GPUPRIMITIVE_DEF_CU
#define GPUPRIMITIVE_DEF_CU
#include "stdlib.h"
#include <stdio.h>
#include <cuda_runtime.h>
//unsigned int gpuMemSize = 0;
# define CUDA_SAFE_CALL( call) { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} }
#define GPUMALLOC(D_POINTER, SIZE) CUDA_SAFE_CALL( cudaMalloc( D_POINTER, SIZE) )
#define CPUMALLOC(H_POINTER, SIZE) CUDA_SAFE_CALL(cudaMallocHost (H_POINTER, SIZE))
#define CPUFREE(H_POINTER) if(H_POINTER!=NULL) CUDA_SAFE_CALL(cudaFreeHost ((void *)H_POINTER))
#define GPUFREE(D_POINTER) CUDA_SAFE_CALL( cudaFree( D_POINTER) )
#define TOGPU(D_POINTER,H_POINTER, SIZE) CUDA_SAFE_CALL(cudaMemcpy(D_POINTER,H_POINTER, SIZE, cudaMemcpyHostToDevice))
#define FROMGPU(H_POINTER, D_POINTER, SIZE) CUDA_SAFE_CALL(cudaMemcpy(H_POINTER, D_POINTER, SIZE, cudaMemcpyDeviceToHost))
#define GPUTOGPU(D_TO, D_FROM, SIZE) CUDA_SAFE_CALL(cudaMemcpy(D_TO, D_FROM, SIZE, cudaMemcpyDeviceToDevice))
#define GPUTOGPU_CONSTANT(D_TO, D_FROM, SIZE, OFFSET) CUDA_SAFE_CALL(cudaMemcpyToSymbol(D_TO, D_FROM, SIZE, OFFSET,cudaMemcpyDeviceToDevice))
#define SHARED_MEMORY_PER_PROCESSOR (32*1024)
#define NLJ_NUM_PROCESSOR (16)//for GTX
#define NLJ_SHARED_MEM_PER_PROCESSOR (SHARED_MEMORY_PER_PROCESSOR)
#define NLJ_SHARED_MEM (NLJ_SHARED_MEM_PER_PROCESSOR*NLJ_NUM_PROCESSOR)
#define NLJ_MAX_NUM_BLOCK_PER_DIM (32*1024)
#define NLJ_NUM_THREADS_PER_BLOCK 512
#define NLJ_NUM_TUPLE_PER_THREAD 2
#define NLJ_S_BLOCK_SIZE (NLJ_NUM_THREADS_PER_BLOCK*NLJ_NUM_TUPLE_PER_THREAD)
#define NLJ_R_BLOCK_SIZE NLJ_NUM_THREADS_PER_BLOCK
#define PRED_EQUAL2(DATA) (DATA[0]==DATA[1])
#define PRED_EQUAL(V1,V2) (V1==V2)
//ke's definitions.
/////////////////////////////////////////////////////////////////////////defines
#ifdef max
#undef max
#define max(a,b) (((a) > (b)) ? (a) : (b))
#endif
#ifdef min
#undef min
#define min(a,b) (((a) < (b)) ? (a) : (b))
#endif
///////////////////////////////////////general define
#define _charHistOpt 0 //NOTE: if charOpt, then can't have too many (>255) duplicate pid!!
typedef int2 Rec;
struct/* __align__(16)*/ RecRS
{
unsigned int val;
int ridR;
int ridS;
};
//end of Ke's definition.
#define TEST_MAX (1<<30)
#define TEST_MIN (0)
#define SHARED_MEM 1
#define COALESCED 1
//#define OUTPUT_INFO 1
#define BINARY_SEARCH 1
//#define BINARY_SEARCH_HASH 1
#define CONSTANT_BUFFER_SIZE (1024*64)
#endif
|
1,848
|
/**
* Copyright 2019 Matthew Oliver
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdint>
__device__ __forceinline__ float clamp(const float f, const float a, const float b)
{
return fmaxf(a, fminf(f, b));
}
__device__ __forceinline__ float3 YUVToRGB(const uchar3 yuv)
{
// Get YCbCr values
const float luma = static_cast<float>(yuv.x);
const float chromaCb = static_cast<float>(yuv.y) - 128.0f;
const float chromaCr = static_cast<float>(yuv.z) - 128.0f;
// Convert to RGB using BT601
return make_float3(
luma + 1.13983 * chromaCr, luma - 0.39465f * chromaCb - 0.58060f * chromaCr, luma + 2.03211f * chromaCb);
}
struct Pixel2
{
float3 m_pixels[2];
};
struct NV12Planes
{
uint8_t* m_plane1;
uint8_t* m_plane2;
};
template<typename T>
struct RGBPlanes
{
T* m_plane1;
T* m_plane2;
T* m_plane3;
};
__device__ __forceinline__ Pixel2 getNV12ToRGB(
const uint32_t x, const uint32_t y, const NV12Planes source, const uint32_t sourceStep)
{
// NV12 is stored as 2 planes: the first plane contains Y the second plane contains U+V interleaved
// There are 1 U+V sample for every 2x2 Y block
// Y1 Y2 Y3 Y4 Y5 Y5
// Y7 Y8 Y9 Y10 Y11 Y12
// Y13 Y14 Y15 Y16 Y17 Y18
// Y19 Y20 Y21 Y22 Y23 Y24
//
// U1 V1 U2 V2 U2 V3
// U4 V4 U5 V5 U6 V6
//
// UV1 is used for Y1 Y2 Y7 Y8
// UV2 is used for Y3 Y4 Y9 Y10
// UV4 is used for Y13 Y14 Y19 Y20
// etc.
// Reading a 2x2 Y block requires 2 memory reads as it is split over 2 rows
// To try and be a bit more cache friendly Y is processed in 2 pixels (row) at a time instead of 4
// This replaces 2 Y loads at a time with 2 UV loads for each 2xY row
uchar3 yuvi[2];
const uint32_t sourceOffset = y * sourceStep + x;
yuvi[0].x = source.m_plane1[sourceOffset];
yuvi[1].x = source.m_plane1[sourceOffset + 1];
const uint32_t chromaOffset = y >> 1;
const uint32_t chromaSourceOffset = chromaOffset * sourceStep + x;
const uint8_t chromaCb = source.m_plane2[chromaSourceOffset];
const uint8_t chromaCr = source.m_plane2[chromaSourceOffset + 1];
// This doesn't perform any chroma interpolation, this feature would need to be added later if needed
yuvi[0].y = chromaCb;
yuvi[0].z = chromaCr;
yuvi[1].y = chromaCb;
yuvi[1].z = chromaCr;
Pixel2 rgb;
rgb.m_pixels[0] = YUVToRGB(yuvi[0]);
rgb.m_pixels[1] = YUVToRGB(yuvi[1]);
return rgb;
}
template<typename T>
class UpPack
{
public:
typedef float3 Type;
};
template<>
class UpPack<uint8_t>
{
public:
typedef uchar3 Type;
};
template<typename T>
__device__ __forceinline__ T getRGB(const float3 pixel)
{
// Normalise float values
return make_float3(__saturatef(pixel.x / 255.0f), __saturatef(pixel.y / 255.0f), __saturatef(pixel.z / 255.0f));
}
template<>
__device__ __forceinline__ uchar3 getRGB(const float3 pixel)
{
return make_uchar3(clamp(pixel.x, 0.0f, 255.0f), clamp(pixel.y, 0.0f, 255.0f), clamp(pixel.z, 0.0f, 255.0f));
}
template<typename T>
__device__ __forceinline__ void convertNV12ToRGBP(const NV12Planes source, const uint32_t sourceStep,
const uint32_t width, const uint32_t height, RGBPlanes<T> dest, const uint32_t destStep)
{
const uint32_t x = blockIdx.x * (blockDim.x << 1) + (threadIdx.x << 1);
const uint32_t y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width - 1 || y >= height) {
return;
}
Pixel2 pixels = getNV12ToRGB(x, y, source, sourceStep);
const auto pixel1 = getRGB<typename UpPack<T>::Type>(pixels.m_pixels[0]);
const auto pixel2 = getRGB<typename UpPack<T>::Type>(pixels.m_pixels[1]);
const uint32_t destOffset = y * destStep + x;
dest.m_plane1[destOffset] = pixel1.x;
dest.m_plane1[destOffset + 1] = pixel2.x;
dest.m_plane2[destOffset] = pixel1.y;
dest.m_plane2[destOffset + 1] = pixel2.y;
dest.m_plane3[destOffset] = pixel1.z;
dest.m_plane3[destOffset + 1] = pixel2.z;
}
extern "C" {
__global__ void convertNV12ToRGB8P(const NV12Planes source, const uint32_t sourceStep, const uint32_t width,
const uint32_t height, const RGBPlanes<uint8_t> dest, const uint32_t destStep)
{
convertNV12ToRGBP<uint8_t>(source, sourceStep, width, height, dest, destStep);
}
__global__ void convertNV12ToRGB32FP(const NV12Planes source, const uint32_t sourceStep, const uint32_t width,
const uint32_t height, const RGBPlanes<float> dest, const uint32_t destStep)
{
convertNV12ToRGBP<float>(source, sourceStep, width, height, dest, destStep / sizeof(float));
}
}
|
1,849
|
#include <stdio.h>
const int N = 1;
const int blocksize = 1;
__global__ void kernelFunc() {
}
int main() {
int b[N] = {4};
int *bd;
const int isize = N*sizeof(int);
printf("%i", *b);
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
// Allocate a big chunk of memory as a trigger
const int cnst = 1000000000;
int *d_ptr;
cudaMalloc(&d_ptr, cnst * sizeof(int));
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
kernelFunc<<<dimGrid, dimBlock>>>();
cudaMemcpy( b, bd, isize, cudaMemcpyDeviceToHost );
cudaFree( bd );
cudaFree( d_ptr );
printf(" %i\n", *b);
return EXIT_SUCCESS;
}
|
1,850
|
#include "includes.h"
__global__ void cuda_multi_matrix_on_vector(int *matrix, int *vector, int *new_vector, int numElements){
__shared__ int cache[threadsPerBlock];
const int idx = blockDim.x*blockIdx.x + threadIdx.x;//глобальный индекс
const int tIdx = threadIdx.x;//индекс нити
const int k = (numElements - 1 + threadsPerBlock) / threadsPerBlock;//всего кол-во блоков
for (int i = 0; i < k; i++){//в блок влезает threadsPerBlock нитей. Чтобы посчитать всю строку на нужно читать кусок вектора k раз
if (tIdx+threadsPerBlock*i < numElements){//если индекс нити плюс потоковое смещение меньше n то копируем в память shared
cache[tIdx] = vector[tIdx + threadsPerBlock * i];
}
__syncthreads();
int min = numElements - i*threadsPerBlock;//определяем хвост
if (min > threadsPerBlock)min = threadsPerBlock;//если хвост слишком длинный то берём по нитям
if (idx < numElements){
for (int j= 0; j < min; j++){
new_vector[idx] += cache[j]*matrix[(i*threadsPerBlock + j)*numElements + idx];//каждая нить считает свой вектор умножая кусок вектора на сообверствующий кусок матрицы
}
}
__syncthreads();
}
}
|
1,851
|
#include<iostream>
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
#define NThreads 64
#define NBlocks 16
#define Num NThreads*NBlocks
__global__ void bitonic_sort_step(int *arr, int i, int j)
{
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tid_comp = tid ^ j;
if (tid_comp > tid)
{
if ((tid & i) == 0)
{
//ascending
if (arr[tid] > arr[tid_comp])
{
int temp = arr[tid];
arr[tid] = arr[tid_comp];
arr[tid_comp] = temp;
}
}
else
{
//descending
if (arr[tid] < arr[tid_comp])
{
int temp = arr[tid];
arr[tid] = arr[tid_comp];
arr[tid_comp] = temp;
}
}
}
}
int main(int argc, char* argv[])
{
int* arr= (int*) malloc(Num*sizeof(int));
int* arr_temp = (int*) malloc(Num*sizeof(int));
// Initialization
time_t t;
srand((unsigned)time(&t));
for(int i=0;i<Num;i++){
arr[i] = rand() % 10000;
//arr[i] = i;
}
//init device variable
int* dev_ptr;
cudaMalloc((void**)&dev_ptr,Num*sizeof(int));
cudaMemcpy(dev_ptr,arr,Num*sizeof(int),cudaMemcpyHostToDevice);
/*
for(int i=0;i<Num;i++)
{
printf("%d\t",arr[i]);
}
*/
printf("\n End initialization \n");
dim3 blocks(NBlocks,1);
dim3 threads(NThreads,1);
// bitonic sort
for(unsigned int i=2; i<=Num; i<<=1)
{
// bitonic merge
for(unsigned int j=i>>1; j>0; j>>=1)
{
bitonic_sort_step<<<blocks,threads>>>(dev_ptr,i,j);
/* cudaMemcpy(arr_temp,dev_ptr,Num*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<Num;i++){
printf("%d\t",arr_temp[i]);
}
printf("\n");
*/
}
}
cudaMemcpy(arr,dev_ptr,Num*sizeof(int),cudaMemcpyDeviceToHost);
// Self validation
bool flag = true;
for(int i = 0;i < Num - 1;i++)
{
if (arr[i] > arr[i+1])
{
flag = false;
break;
}
}
if (flag)
printf("\nVerification passes\n");
else
printf("\nVerification fails\n");
cudaFree(dev_ptr);
return 0;
}
|
1,852
|
#include "includes.h"
__global__ void cn_pnpoly_reference_kernel(int *bitmap, float2 *points, float2 *vertices, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
int c = 0;
float2 p = points[i]; // DO NOT MODIFY THIS KERNEL
int k = VERTICES-1;
for (int j=0; j<VERTICES; k = j++) {
float2 vj = vertices[j]; // DO NOT MODIFY THIS KERNEL
float2 vk = vertices[k];
float slope = (vk.x-vj.x) / (vk.y-vj.y);
if ( ( (vj.y>p.y) != (vk.y>p.y)) && (p.x < slope * (p.y-vj.y) + vj.x) ) {
c = !c;
}
}
bitmap[i] = c; // DO NOT MODIFY THIS KERNEL
}
}
|
1,853
|
//Submitted by GAutham M 15co118 and yashwanth 15co154
#include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf("Total global memory: %u\n", prop.totalGlobalMem);
printf("Total shared memory per block: %u\n", prop.sharedMemPerBlock);
printf("Total registers per block: %d\n", prop.regsPerBlock);
printf("Warp size: %d\n", prop.warpSize);
printf("Maximum memory pitch: %u\n", prop.memPitch);
printf("Maximum threads per block: %d\n", prop.maxThreadsPerBlock);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, prop.maxThreadsDim[i]);
for (int i = 0; i < 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, prop.maxGridSize[i]);
printf("Total constant memory: %u\n", prop.totalConstMem);
printf("Texture alignment: %u\n", prop.textureAlignment);
printf("Concurrent copy and execution: %s\n", (prop.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", prop.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (prop.kernelExecTimeoutEnabled ? "Yes" : "No"));
}
}
|
1,854
|
#include <assert.h>
int IDX(const int r, const int z, const int NrTotal, const int NzTotal)
{
// Check for overflow: uncomment if sure.
assert(r < NrTotal);
assert(z < NzTotal);
return r * NzTotal + z;
}
int nnz_calculator(const int NrInterior, const int NzInterior)
{
return 5 * NrInterior * NzInterior + 6 * NrInterior + 6 * NzInterior + 16;
}
|
1,855
|
//pass
//--blockDim=128 --gridDim=128 --warp-sync=32 --no-inline
__global__ void foo(int* A) {
A[ blockIdx.x*blockDim.x + threadIdx.x ] += (A[ (blockIdx.x + 1)*blockDim.x + threadIdx.x ]);
}
|
1,856
|
#include "stdio.h"
#include "stdlib.h"
void print_array(int* array, int size)
{
for (int i = 0; i < size; ++i)
{
printf("%d ", array[i]);
}
}
void generate_random_array(int* arr, int size)
{
for (int i = 0; i < size; ++i)
{
arr[i] = rand() % (size * 10);
}
}
|
1,857
|
//=============================================================================
//
// Copyright (c) Kitware, Inc.
// All rights reserved.
// See LICENSE.txt for details.
//
// This software is distributed WITHOUT ANY WARRANTY; without even
// the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
// PURPOSE. See the above copyright notice for more information.
//
// Copyright 2015 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
// Copyright 2015 UT-Battelle, LLC.
// Copyright 2015 Los Alamos National Security.
//
// Under the terms of Contract DE-NA0003525 with NTESS,
// the U.S. Government retains certain rights in this software.
// Under the terms of Contract DE-AC52-06NA25396 with Los Alamos National
// Laboratory (LANL), the U.S. Government retains certain rights in
// this software.
//
//=============================================================================
#include <cstdio>
#include <cuda.h>
#include <cuda_runtime.h>
int main()
{
int count = 0;
if (cudaSuccess != cudaGetDeviceCount(&count))
return 1;
if (count == 0)
return 1;
int prev_arch = 0;
for (int device = 0; device < count; ++device)
{
cudaDeviceProp prop;
if (cudaSuccess == cudaGetDeviceProperties(&prop, device))
{
int arch = (prop.major * 10) + prop.minor;
int compute_level = arch;
//arch 21 has no equivalent compute level.
if (compute_level == 21)
{
compute_level = 20;
}
//handle multiple cards of the same architecture
if (arch == prev_arch)
{
continue;
}
prev_arch = arch;
//we need to print out a semi-colon as this needs to be output
//as a CMake list which is separated by semicolons
printf("--generate-code=arch=compute_%d,code=sm_%d;", compute_level, arch);
}
}
return 0;
}
|
1,858
|
#include <stdio.h>
#include <string.h>
#define THREADS_PER_BLOCK 32
__global__ void minA_cuda(int* a, int* b, int len, int n_output) {
int b_index = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
int a_index = b_index * 2;
if (b_index < n_output && a_index < len) {
if (a_index == len-1) {
b[b_index] = a[a_index];
}
else {
int v1 = a[a_index];
int v2 = a[a_index + 1];
if (v2 < v1) {
b[b_index] = v2;
}
else {
b[b_index] = v1;
}
}
}
}
__global__ void last_digit_cuda(int* a, int* b, int len) {
int index = threadIdx.x + blockIdx.x * THREADS_PER_BLOCK;
if (index < len)
b[index] = a[index] % 10;
}
void last_digit() {
//Get input data from files
char buff[50000];
int inp[10000];
buff[0] = ' ';
char* token;
FILE* fp = fopen("inp.txt", "r");
fgets(buff+1,sizeof(buff), fp);
token = strtok(buff, ",");
int len = 0;
while(token != NULL) {
inp[len] = atoi(token+1);
len++;
token = strtok(NULL, ",");
}
int* A = (int* )malloc(sizeof(int) * len);
int* B = (int* )malloc(sizeof(int) * len);
for (int i = 0; i < len; i++) {
A[i] = inp[i];
}
//Cuda stuff
int *d_a, *d_b;
cudaMalloc(&d_a, sizeof(int) * len);
cudaMalloc(&d_b, sizeof(int) * len);
cudaMemcpy(d_a, A, sizeof(int) * len, cudaMemcpyHostToDevice);
last_digit_cuda<<<(len + THREADS_PER_BLOCK)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, len);
cudaDeviceSynchronize();
cudaMemcpy(B, d_b, sizeof(int) * len, cudaMemcpyDeviceToHost);
//put results in file
fclose(fp);
FILE* fp_end = fopen("q1b.txt", "w");
for (int i = 0; i < len; i++) {
fprintf(fp_end, "%d", B[i]);
if (i != len-1) {
fprintf(fp_end, "%s", ", ");
}
}
//Free up memory
cudaFree(d_a);
cudaFree(d_b);
free(A);
free(B);
fclose(fp_end);
}
void minA() {
//Get input from files
char buff[50000];
int inp[10000];
buff[0] = ' ';
char* token;
FILE* fp = fopen("inp.txt", "r");
fgets(buff+1,sizeof(buff), fp);
token = strtok(buff, ",");
int len = 0;
while(token != NULL) {
inp[len] = atoi(token+1);
len++;
token = strtok(NULL, ",");
}
//Copy input to array of proper size
int* A = (int* )malloc(sizeof(int) * len);
for (int i = 0; i < len; i++) {
A[i] = inp[i];
}
//Cuda stuff
int B_size = (len + 1) / 2;
int* B;
while (len != 1) {
B = (int* )malloc(sizeof(int) * B_size);
int *d_a, *d_b;
cudaMalloc(&d_a, sizeof(int) * len);
cudaMalloc(&d_b, sizeof(int) * B_size);
cudaMemcpy(d_a, A, sizeof(int) * len, cudaMemcpyHostToDevice);
minA_cuda<<<(B_size + THREADS_PER_BLOCK)/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, len, B_size);
cudaDeviceSynchronize();
cudaMemcpy(B, d_b, sizeof(int) * B_size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
memcpy(A, B, B_size * sizeof(int));
free(B);
len = B_size;
B_size = (len + 1) / 2;
}
//Print output to file
fclose(fp);
FILE* fp_end = fopen("q1a.txt", "w");
fprintf(fp_end, "%d", A[0]);
fclose(fp_end);
}
int main(int argc,char **argv)
{
minA();
cudaDeviceReset();
last_digit();
return 0;
}
|
1,859
|
#include "includes.h"
__global__ void kernelUpdateParticle(float *positions,float *velocities,float *pBests,float *gBest,float r1,float r2)
{
int i=blockIdx.x*blockDim.x+threadIdx.x;
if(i>=NUM_OF_PARTICLES*NUM_OF_DIMENSIONS)
return;
float rp=r1;
float rg=r2;
velocities[i]=OMEGA*velocities[i]+c1*rp*(pBests[i]-positions[i])+c2*rg*(gBest[i%NUM_OF_DIMENSIONS]-positions[i]);
positions[i]+=velocities[i];
}
|
1,860
|
#include <stdio.h>
#include <sys/resource.h>
#define TILE_SIZE 16
#define SIZE (TILE_SIZE * 256)
#define MALLOC_MATRIX(n) (float*)malloc((n)*(n)*sizeof(float))
float* device_malloc(int n){
float* m;
if(cudaMalloc(&m, n*n*sizeof(float)) == cudaErrorMemoryAllocation) return NULL;
return m;
}
__global__ void gpuPower(float* res, float* inp, int n, int power){
__shared__ float s_res[TILE_SIZE*TILE_SIZE];
__shared__ float s_inp[TILE_SIZE*TILE_SIZE];
__shared__ float s_tmp[TILE_SIZE*TILE_SIZE];
const unsigned int row = blockIdx.y*blockDim.y + threadIdx.y;
const unsigned int col = blockIdx.x*blockDim.x + threadIdx.x;
int pow = 0, idx = 0;
float sum = 0.0f;
if(row < n && col < n){
//copy the input to its share
s_inp[row*n+col] = inp[row*n+col];
//set the shared result to the identity matrix
if(row == col) s_res[row*n+col] = 1.0f;
else s_res[row*n+col]=0.0f;
}
__syncthreads();
for(pow=0; pow<power; pow++){
//multiply the matrices
if(row < n && col < n){
sum = 0.0f;
for(idx=0; idx<n; idx++){
sum = sum + s_res[row*n+idx]*s_inp[idx*n+col];
}
s_tmp[row*n+col] = sum;
}
__syncthreads();
//copy temp back to result
if(row < n && col < n) s_res[row*n+col] = s_tmp[row*n+col];
__syncthreads();
}
__syncthreads();
//copy the result matrix to global res
if(row < n && col < n) res[row*n+col] = s_res[row*n+col];
__syncthreads();
}
void power_gpu(float* result, float* input, int n, int power){
dim3 bdim(TILE_SIZE, TILE_SIZE);
dim3 gdim(SIZE/TILE_SIZE, SIZE/TILE_SIZE);
float* d_result = device_malloc(n);
float* d_input = device_malloc(n);
cudaMemcpy(d_input, input, n*n*sizeof(float), cudaMemcpyHostToDevice);
gpuPower<<<gdim, bdim>>>(d_result, d_input, n, power);
cudaMemcpy(result, d_result, n*n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_result);
cudaFree(d_input);
}
void power_cpu(float* result, float* input, int n, int power){
int pow;
int i, k, r, c;
float* temp = MALLOC_MATRIX(n);
//set result to be the identity matrix
for(i=0; i<n*n; i++){
if(i/n == i%n) result[i] = 1.0f;
else result[i] = 0.0f;
}
for(pow=0; pow<power; pow++){
//multiply result*input -> temp
for(r=0; r<n; r++) for(c=0; c<n; c++){
float sum = 0;
for(k=0; k<n; k++){
sum = sum + result[r*n+k]*input[k*n+c];
}
temp[r*n+c] = sum;
}
//copy temp back to result
for(i=0; i<n*n; i++){
result[i] = temp[i];
}
}
}
void printMat(float* mat, int n){
for(int r=0; r<n; r++){
for(int c=0; c<n; c++){
if(c != 0) printf(" ");
printf("%.3f", mat[r*n+c]);
}
printf("\n");
}
}
int main() {
int n;
int power;
float* mat = NULL;
float* result = NULL;
scanf("%d", &n);
scanf("%d", &power);
if(power < 0 || n < 0){
printf("error\n");
return 1;
}
mat = MALLOC_MATRIX(n);
result = MALLOC_MATRIX(n);
if(!mat || !result){
printf("error\n");
return 1;
}
for(int i=0; i<(n*n); i++){
if(!scanf("%f", &(mat[i]))){
printf("error\n");
free(mat);
free(result);
return 1;
}
}
power_gpu(result, mat, n, power);
printMat(mat, n);
printMat(result, n);
free(mat);
free(result);
return 0;
}
|
1,861
|
/* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
static const int WORK_SIZE = 10;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
/*
define CUDA_CHECK_RETURN(value)
{
cudaError_t _m_cudaStat = value;
if (_m_cudaStat != cudaSuccess) {
fprintf(stderr, "Error %s at line %d in file %s\n",
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);
exit(1);
} }
*/
__global__ void sort(int *a,int i,int n)
{
int tid = threadIdx.x;
int p;
int temp;
if(i%2==0)
{
p=tid*2;
if(a[p]>a[p+1])
{
temp = a[p];
a[p] = a[p+1];
a[p+1] =temp;
}
}
else
{
p=tid*2+1;
if(p<n-1)
{
if(a[p]>a[p+1])
{
temp = a[p];
a[p] = a[p+1];
a[p+1] =temp;
}
}
}
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
int main(void)
{
int a[WORK_SIZE];
int i;
int *da;
cudaMalloc((void**) &da, sizeof(int) * WORK_SIZE);
for(i=0;i<WORK_SIZE;i++)
{
printf("%d:",i);
scanf("%d",&a[i]);
}
cudaMemcpy(da, a, sizeof(int) * WORK_SIZE, cudaMemcpyHostToDevice);
for(i=0;i<WORK_SIZE;i++)
{
sort<<<1,WORK_SIZE/2>>>(da,i,WORK_SIZE);
}
cudaThreadSynchronize(); // Wait for the GPU launched work to complete
cudaGetLastError();
cudaMemcpy(a, da, sizeof(int) * WORK_SIZE, cudaMemcpyDeviceToHost);
for(i=0;i<WORK_SIZE;i++)
{
printf("%d\t",a[i]);
}
printf("\n");
cudaFree((void*) da);
return 0;
}
|
1,862
|
//data-racer
#include <cuda.h>
#include <stdio.h>
#define SIZE 2
#define TILES 4
#define LENGTH (TILES * SIZE)
#define N 2
__global__ void matrix_transpose(float* A)
{
__shared__ float tile [SIZE][SIZE];
int x = threadIdx.x;
int y = threadIdx.y;
int tile_x = blockIdx.x;
int tile_y = blockIdx.y;
tile[x][y] = A[((x + (tile_x * SIZE)) * LENGTH) + (y + (tile_y * SIZE))];
tile[x][y] = tile[y][x];
__syncthreads();
A[((x + (tile_y * SIZE)) * LENGTH) + (y + (tile_x * SIZE))] = tile[x][y];
}
|
1,863
|
#include <stdio.h>
#include <stdlib.h>
#define ROWS 4
#define COLUMNS 5
typedef struct mystruct
{
int a[ROWS];
int **data;
}mystruct;
__global__ void printKernel(mystruct *d_var)
{
int i, j;
for(i = 0; i < ROWS; i++)
{
for(j = 0; j < COLUMNS; j++)
{
printf("%d\t", d_var->data[i][j]);
}
printf("\n");
}
}
int main()
{
int i, j, k=1;
mystruct *var, *d_var;
/* Allocate and initialize a dynamic 2D array on CPU */
var->data = (int**)malloc(ROWS*sizeof(int*));
for (i = 0; i < ROWS; i++)
var->data[i] = (int*)malloc(COLUMNS*sizeof(int));
for(i = 0; i < ROWS; i++)
{
var->a[i] = 2;
for(j = 0; j < COLUMNS; j++)
{
var->data[i][j] = k++;
}
}
/* Allocate memory for struct on GPU*/
cudaMalloc((void**)&d_var, sizeof(mystruct));
/*Allocate memory explicitly for the 2D array*/
cudaMalloc((void**)&d_var->data, ROWS*sizeof(int*));
for(i = 0; i < ROWS; i++)
cudaMalloc((void**)&d_var->data[i], COLUMNS*sizeof(int));
/*Copy the host struct to device*/
cudaMemcpy(d_var, var, (sizeof(mystruct)+ROWS*COLUMNS*sizeof(int)), cudaMemcpyHostToDevice);
printKernel<<<1,1>>>(d_var);
free(var);
cudaFree(d_var);
return 0;
}
|
1,864
|
#include <iostream>
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <cuda.h>
#include <stdio.h>
#include <cassert>
//define the chunk sizes that each threadblock will work on
#define BLKXSIZE 32
#define BLKYSIZE 4
#define BLKZSIZE 4
#define Q 19
#define lx 10
#define ly 10
#define lz 5
// for cuda error checking
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
return 1; \
} \
} while (0)
template <typename T> __device__ void swap ( T& a, T& b )
{
T c(a); a=b; b=c;
}
template <typename Ttype>
Ttype**** create_4d_harray() {
Ttype**** array;
array = new Ttype***[lx];
assert(array != nullptr);
array[0] = (Ttype***)malloc(sizeof(Ttype)*lx*ly*lz*Q);
for (int i = 1; i < lx; i++) {//from i=0 to i=1;
// array[i] = new Ttype**[ly];
array[i] = array[i-1] + ly*lz*Q;
assert(array[i] != nullptr);
array[i][0] = (Ttype**)malloc(sizeof(Ttype)*ly*lz*Q);
for (int j = 1; j < ly; j++) {
//array[i][j] = new Ttype*[lz];
array[i][j] = array[i][j-1] + lz*Q;
assert(array[i][j] != nullptr);
array[i][j][0] = (Ttype*)malloc(sizeof(Ttype)*Q);
for (int k = 1; k < lz; k++) {
//array[i][j][k] = new Ttype[n];
array[i][j][k] = array[i][j][k-1]+ Q;
assert(array[i][j][k] != nullptr);
}
}
}
return array;
}
template <typename Ttype>
void free_4d_harray(Ttype**** array) {
for (size_t i = 0; i < lx; ++i) {
for (size_t j = 0; j < ly; ++j) {
for (size_t k = 0; k < lz; ++k) {
delete[] array[i][j][k];
}
delete[] array[i][j];
}
delete[] array[i];
}
delete[] array;
}
template <typename Ttype>
Ttype (*create_4d_darray())[ly][lz][Q] {
// const int d_lx = lx, d_ly = ly, d_lz = lz, d_n = n;
//Ttype array_4d[lx][ly][lz][Q];
//typedef Ttype array_4d[ly][lz][Q];
//array_4d* dptr_4d_array;
Ttype(*dptr_4d_array)[ly][lz][Q];
cudaMalloc((void**)&dptr_4d_array,
(lx * ly * lz * Q) * sizeof(Ttype));
return dptr_4d_array;
}
__global__ void gpu_array_swap(int ptr_gpu[][ly][lz][Q]) {
// int thread_id = thread_idx(grid_dim, block_dim);
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned idy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned idz = blockIdx.z * blockDim.z + threadIdx.z;
if ((idx < lx) && (idy < ly) && (idz < lz)) {
for (size_t i = 1; i <= 9; i++)
swap(ptr_gpu[idx][idy][idz][i], ptr_gpu[idx][idy][idz][i + 9]);
}
}
void set_array(int**** array) {
int m = 0;
for (int l = 0; l < Q; ++l) {
for (int i = 0; i < lz; ++i) {
for (int j = 0; j < ly; ++j) {
for (int k = 0; k < lx; ++k) {
array[i][j][k][l] = ++m;
}
}
}
}
}
void print_array(int**** array) {
for (int i = 0; i < lx; ++i) {
for (int j = 0; j < ly; ++j) {
for (int k = 0; k < lz; ++k) {
for (int l = 0; l < Q; ++l) {
std::cout << array[i][j][k][l] << " ";
if (l == (Q - 1)) std::cout << std::endl;
}
}
}
}
}
int main() {
//int lx=10,ly=10,lz=5,Q=19;
int**** host_4d_array=create_4d_harray<int>();
int(* device_4d_array)[ly][lz][Q]=create_4d_darray<int>();
const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE);
const dim3 gridSize(((lx + BLKXSIZE - 1) / BLKXSIZE),
((ly + BLKYSIZE - 1) / BLKYSIZE),
((lz + BLKZSIZE - 1) / BLKZSIZE));
set_array(host_4d_array);
print_array(host_4d_array);
// allocate GPU device buffers
cudaCheckErrors("Failed to allocate device buffer");
cudaMemcpy(device_4d_array, host_4d_array, (lx * ly * lz * Q) * sizeof(int),
cudaMemcpyHostToDevice);
// compute result
gpu_array_swap<<<gridSize, blockSize>>>(device_4d_array);
cudaCheckErrors("Kernel launch failure");
// copy output data back to host
cudaMemcpy(host_4d_array, device_4d_array, ((lx * ly * lz * Q) * sizeof(int)),
cudaMemcpyDeviceToHost);
cudaCheckErrors("CUDA memcpy failure");
free_4d_harray(host_4d_array);
cudaFree(device_4d_array);
cudaCheckErrors("cudaFree fail");
return 0;
}
|
1,865
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
__global__ void image_conversion(unsigned char *colorImage, unsigned char *grayImage, int imageWidth, int imageHeight)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int idx = x*blockDim.y + y;
if(idx<imageWidth*imageHeight)
{
int r,g,b;
r = colorImage[3*idx];
g = colorImage[3*idx+1];
b = colorImage[3*idx+2];
grayImage[idx] = (unsigned char)((21*r + 71*g + 7*b)/100);
}
}
int main( int argc, char* argv[] )
{
FILE *fptr = fopen("block_parallel_image_conversion.txt", "w");
//fprintf(fptr, "imageHeight x imageWidth \t Time(milli) \n", );
int i, j;
for (i = 7; i < 28; i++)
{
for(j=5; j<11; j++)
{
unsigned char *colorImage_cpu;
unsigned char *grayImage_cpu;
char header[100];
long long imageWidth, imageHeight, ccv;
char filename[50];
snprintf(filename, sizeof(filename), "Lenna_%d.ppm", i);
FILE *color = fopen(filename, "rb");
fscanf(color, "%s\n%lld %lld\n%lld\n", header, &imageWidth, &imageHeight, &ccv);
size_t bytes = imageWidth*imageHeight*sizeof(unsigned char);
colorImage_cpu = (unsigned char*)malloc(bytes*3);
grayImage_cpu = (unsigned char*)malloc(bytes);
fread(colorImage_cpu, sizeof(unsigned char), imageWidth*imageHeight*3, color);
fclose(color);
unsigned char *colorImage_gpu;
unsigned char *grayImage_gpu;
cudaMalloc(&colorImage_gpu, bytes*3);
cudaMalloc(&grayImage_gpu, bytes);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(colorImage_gpu, colorImage_cpu, bytes*3, cudaMemcpyHostToDevice);
cudaMemcpy(grayImage_gpu, grayImage_cpu, bytes, cudaMemcpyHostToDevice);
int x, y;
x = pow(2, floor((float)j/2.0));
y = pow(2, ceil((float)j/2.0));
dim3 blocksize(x, y);
dim3 gridsize((int)ceil((float)imageHeight/(float)x),(int)ceil((float)imageWidth/(float)y));
//blocksize = 1024;
//gridsize = (int)ceil((float)(imageWidth*imageHeight)/blocksize);
//printf("here\n");
cudaEventRecord(start);
image_conversion<<<gridsize, blocksize>>>(colorImage_gpu, grayImage_gpu, imageWidth, imageHeight);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cudaMemcpy( grayImage_cpu, grayImage_gpu, bytes, cudaMemcpyDeviceToHost );
cudaFree(colorImage_gpu);
cudaFree(grayImage_gpu);
fprintf(fptr, "%d %d %dx%d %ldx%ld %lf\n", i, j, x, y, imageWidth, imageHeight, milliseconds);
free(colorImage_cpu);
free(grayImage_cpu);
}
}
fclose(fptr);
return 0;
}
|
1,866
|
#include <stdio.h>
#include <cuda.h>
#define MAX_TESS_POINTS 32
struct BezierLine{
float2 CP[3];//control points for the line
float2 vertexPos[MAX_TESS_POINTS];//Vertex position array to tessellate into
//Number of tessellated vertices
int nVertices;
};
__forceinline__ __device__ float2 operator+(float2 a,float2 b){
float2 c;
c.x = a.x + b.x;
c.y = a.y + b.y;
return c;
}
__forceinline__ __device__ float2 operator-(float2 a, float2 b){
float2 c;
c.x = a.x - b.x;
c.y = a.y - b.y;
return c;
}
__forceinline__ __device__ float2 operator*(float a, float2 b){
float2 c;
c.x = a * b.x;
c.y = a * b.y;
return c;
}
__forceinline__ __device__ float length(float2 a){
return sqrtf(a.x * a.x + a.y * a.y);
}
__device__ float computeCurvature(BezierLine* bLines){
int bidx = blockIdx.x;
float curvature = length(bLines[bidx].CP[1] - 0.5f * (bLines[bidx].CP[0] + bLines[bidx].CP[2]))/length(bLines[bidx].CP[2] - bLines[bidx].CP[0]);
return curvature;
}
__global__ void computeBezierLines(BezierLine* bLines, int nLines){
int bidx = blockIdx.x;
if(bidx < nLines){
//compute the curvature of the line
float curvature = computeCurvature(bLines);
//From the curvature, compute the number of tessellation points
int nTessPoints = min(max((int)(curvature * 16.0f),4),32);
bLines[bidx].nVertices = nTessPoints;
//Loop through vertices to be tessellated, incrementing by blockDim.x
for(int inc = 0;inc < nTessPoints;inc += blockDim.x){
int idx = inc + threadIdx.x;//Compute a unique index for this point
if(idx < nTessPoints){
float u = (float)idx /(float)(nTessPoints - 1);//compute u from idx
float omu = 1.0f - u;
float B3u[3];
B3u[0] = omu * omu;
B3u[1] = 2.0f * u * omu;
B3u[2] = u * u;
float2 position = {0,0};
for(int i = 0;i<3;i++){
position = position + B3u[i] * bLines[bidx].CP[i];
}
bLines[bidx].vertexPos[idx] = position;
}
}
}
}
#define N_LINES 256
#define BLOCK_DIM 32
void initializeBLines(BezierLine * bLines_h){
float2 last = {0,0};
for(int i = 0;i<N_LINES;i++){
bLines_h[i].CP[0] = last;
for(int j = 1;j<3;j++){
bLines_h[i].CP[j].x = (float)rand()/(float)RAND_MAX;
bLines_h[i].CP[j].y = (float)rand()/(float)RAND_MAX;
}
last = bLines_h[i].CP[2];
bLines_h[i].nVertices = 0;
}
}
int main(){
BezierLine * bLines_h = new BezierLine[N_LINES];
initializeBLines(bLines_h);
BezierLine * bLines_d;
cudaMalloc((void**)&bLines_d,N_LINES*sizeof(BezierLine));
cudaMemcpy(bLines_d,bLines_h,N_LINES*sizeof(BezierLine),cudaMemcpyHostToDevice);
computeBezierLines<<<N_LINES,BLOCK_DIM>>>(bLines_d,N_LINES);
cudaFree(bLines_d);
delete[] bLines_h;
}
|
1,867
|
#include "includes.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256 // threads per block
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
__global__ void kernel5(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ volatile dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int blockDimNew = blockDim.x * 2; // since the new blockDim will be halved for the loop iterations
unsigned int i = (bid * blockDimNew) + threadIdx.x;
unsigned int gridSize = blockDim.x * 2 * gridDim.x;
// each thread sums up 512 elements before storing into shared array
scratch[threadIdx.x] = 0;
while(i < n) {
scratch[threadIdx.x] += g_idata[i] + g_idata[i + blockDim.x];
i += gridSize; //stride length
}
__syncthreads ();
int warp_size = 32;
for(int stride = (blockDim.x/2); stride > warp_size; stride = (stride/2)) { //repeat until stride is 32 (one warp left at this point and no active threads)
if(threadIdx.x < stride) { // check index range
scratch[threadIdx.x] += scratch[threadIdx.x + stride];
}
__syncthreads ();
}
// manually reduce
if(threadIdx.x <= warp_size)
{
scratch[threadIdx.x] += scratch[threadIdx.x + warp_size];
scratch[threadIdx.x] += scratch[threadIdx.x + warp_size/2];
scratch[threadIdx.x] += scratch[threadIdx.x + warp_size/4];
scratch[threadIdx.x] += scratch[threadIdx.x + warp_size/8];
scratch[threadIdx.x] += scratch[threadIdx.x + warp_size/16];
scratch[threadIdx.x] += scratch[threadIdx.x + 1];
}
__syncthreads ();
if(threadIdx.x == 0) { // copy back to global array
g_odata[bid] = scratch[0];
}
}
|
1,868
|
/*
CSC691 GPU programming
Project 3: Pi Time
Jiajie Xiao
Oct 23, 2017
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define CHUNK 100000
__global__ void partialHist(char *input, int len, int *hist)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int number = input[i]-'0';
//printf("%c\t%d\t%d\n", input[i], number, len);
if (i<len)
{
// printf("%d\t",i);
__shared__ int partial_sum[10];
// thread 0 is responsible for initializing partial_sum
if (threadIdx.x == 0)
{
for (i=0;i<10;i++)
partial_sum[i] = 0;
}
__syncthreads(); // each thread updates the partial sum
atomicAdd(&partial_sum[number], 1);
//printf("%d\t%d\n",number, partial_sum[number]);
__syncthreads();
// thread 0 updates the total sum
if (threadIdx.x == 0)
{
for (i=0;i<10;i++)
atomicAdd(&hist[i], partial_sum[i]);
}
}
}
int main(int argc, char **argv)
{
FILE *inputFile, *outputFile;
int numTruncated = -1;
if (argc < 2)
{
printf("An input file name is required.");
return -1;
}
else if (argc >2)
{
numTruncated = atoi(argv[2]);
if (numTruncated<1)
{
printf("Please type positive number of digits to be evaluated.\n");
return -1;
}
}
inputFile = fopen(argv[1],"r");
if (ferror(inputFile))
{
perror("Error: ");
return -1;
}
else
{
char buf[CHUNK];
int histc[10] = {0,0,0,0,0,0,0,0,0,0};
size_t nread;
int *dev_histc;
cudaMalloc((void**)&dev_histc, 10 * sizeof(int));
cudaMemcpy(dev_histc, histc, 10 * sizeof(int), cudaMemcpyHostToDevice);
char *dev_buf;
cudaMalloc((void**)&dev_buf, CHUNK * sizeof(char));
cudaEvent_t start_gpu, stop_gpu;
cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventRecord(start_gpu);
int NumDigitsHistogramed = 0, Finished = 0;
while((nread = fread(buf, 1, sizeof(buf), inputFile)) > 0 && Finished !=1)
{
cudaMemcpy(dev_buf, buf, CHUNK * sizeof(char), cudaMemcpyHostToDevice);
//printf("%d\n",(int)nread);
if (numTruncated == -1 || NumDigitsHistogramed + (int)nread < numTruncated)
{
partialHist <<< 100,1000 >>> (dev_buf, (int) nread, dev_histc);
NumDigitsHistogramed += (int)nread;
}
else // The streaming is approacing the number of selected
{
partialHist <<< 100,1000 >>> (dev_buf, numTruncated - NumDigitsHistogramed, dev_histc);
NumDigitsHistogramed = numTruncated;
Finished = 1;
}
}
cudaEventRecord(stop_gpu);
cudaMemcpy(histc, dev_histc, 10 * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_histc);
cudaFree(dev_buf);
cudaEventSynchronize(stop_gpu);
float milliseconds_gpu = 0;
cudaEventElapsedTime(&milliseconds_gpu, start_gpu, stop_gpu);
fclose(inputFile);
printf("The histograming calculation time (ms): %f\n", milliseconds_gpu);
outputFile = fopen("hist.txt", "a");
if (ferror(inputFile))
{
perror("Error: ");
return -1;
}
else
{
fprintf(outputFile, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", NumDigitsHistogramed, histc[0], histc[1], histc[2], histc[3], histc[4], histc[5], histc[6], histc[7], histc[8], histc[9] );
fclose(outputFile);
}
}
return 0;
}
|
1,869
|
/*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
unsigned int filter_radius;
#define FILTER_LENGTH (2 * filter_radius + 1)
#define ABS(val) ((val) < 0.0 ? (-(val)) : (val))
#define accuracy 0.00005
#define ArraySize imageW *imageH
#define ERROR -1
#define FILTER_R_X2 2*filter_radius
#define SH_MEM_SIZE 32
#define NUMBLOCKS 4
typedef double dataType;
__constant__ dataType d_Filter[65536/sizeof(dataType)];
// This checks for cuda errors
#define cudaErrorCheck() \
{ \
cudaError_t error = cudaGetLastError(); \
if (error != cudaSuccess) \
{ \
printf("Cuda Error Found %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(error)); \
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU); \
return (ERROR); \
} \
}
#define cudaCalloc(pointer, size, sizeOfElement) \
{ \
cudaError_t err = cudaMalloc(pointer, size * sizeOfElement); \
if (err != cudaSuccess) \
{ \
printf("Error allocating memory on host: %s\n", cudaGetErrorString(err)); \
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU); \
return (ERROR); \
} \
cudaMemset(*pointer, 0.0, size *sizeOfElement); \
}
////////////////////////////////////////////////////////////////////////////////
// Kernel Row Convolution Filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRow(dataType *Input, dataType *Output, int filterR, int imageW)
{
dataType sum = 0;
int d, k;
int ix = blockIdx.x * blockDim.x + threadIdx.x + filterR;
int iy = blockIdx.y * blockDim.y + threadIdx.y + filterR;
//int dimx = blockDim.x * gridDim.x + 2 * filterR;
//int idx = iy * dimx + ix;
int imageWithPaddingW = imageW + 2 * filterR;
for (k = -filterR; k <= filterR; k++)
{
d = ix + k;
sum += Input[iy * imageWithPaddingW + d] * d_Filter[filterR - k];
}
Output[iy * imageWithPaddingW + ix] = sum; //Only 1 time for each thread
}
////////////////////////////////////////////////////////////////////////////////
// Kernel Column Convolution Filter
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumn(dataType *Input, dataType *Output, int filterR, int imageW, int imageH)
{
dataType sum = 0;
int d, k;
int ix = blockIdx.x * blockDim.x + threadIdx.x + filterR;
int iy = blockIdx.y * blockDim.y + threadIdx.y + filterR;
int imageWithPaddingW = imageW + 2 * filterR;
for (k = -filterR; k <= filterR; k++)
{
d = iy + k;
sum += Input[d * imageWithPaddingW + ix] * d_Filter[filterR - k];
Output[iy * imageWithPaddingW + ix] = sum;
}
}
////////////////////////////////////////////////////////////////////////////////
// Kernel Row Convolution Filter using Shared Memory
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionRowSharedMem(dataType *Input, dataType *Output, int filterR, int imageW, int SH_MEM_SIZE_PAD)
{
dataType sum = 0;
int d, k;
int tx = threadIdx.x + filterR;
int ix = blockIdx.x * blockDim.x + threadIdx.x + filterR;
int iy = blockIdx.y * blockDim.y + threadIdx.y + filterR;
//indexes in arrays including padding
int ptx = threadIdx.x;
int pty = threadIdx.y;
int pix = blockIdx.x * blockDim.x + threadIdx.x;
int imageWithPaddingW = imageW + 2 * filterR;
//shared memory for Input
extern __shared__ dataType s_Input[]; // shared memory with padding
//collaboratively load tiles into __shared__
for (int i = 0; i < SH_MEM_SIZE_PAD/32; i++){
s_Input[pty * SH_MEM_SIZE_PAD + (SH_MEM_SIZE_PAD / 32) * ptx + i] = Input[iy * imageWithPaddingW + (SH_MEM_SIZE_PAD / 32) * pix + i - ((SH_MEM_SIZE_PAD / 32) - 1) * (blockIdx.x * blockDim.x)];
}
__syncthreads();
for (k = -filterR; k <= filterR; k++)
{
d = tx + k;
sum += s_Input[pty * SH_MEM_SIZE_PAD + d] * d_Filter[filterR - k];
}
Output[iy * imageWithPaddingW + ix] = sum; //1 time for each thread
}
////////////////////////////////////////////////////////////////////////////////
// Kernel Column Convolution Filter using Shared Memory
////////////////////////////////////////////////////////////////////////////////
__global__ void convolutionColumnSharedMem(dataType *Input, dataType *Output, int filterR, int imageW, int imageH, int SH_MEM_SIZE_PAD)
{
dataType sum = 0;
int d, k;
int ty = threadIdx.y + filterR;
int ix = blockIdx.x * blockDim.x + threadIdx.x + filterR;
int iy = blockIdx.y * blockDim.y + threadIdx.y + filterR;
//indexes in arrays including padding
int ptx = threadIdx.x;
int pty = threadIdx.y;
int piy = blockIdx.y * blockDim.y + pty;
int imageWithPaddingW = imageW + 2 * filterR;
//shared memory for Input
extern __shared__ dataType s_Input[];
//collaboratively load tiles into __shared__
for (int i = 0; i < SH_MEM_SIZE_PAD/32; i++){
s_Input[(pty * (SH_MEM_SIZE_PAD / 32) + i) * SH_MEM_SIZE + ptx] = Input[(piy * (SH_MEM_SIZE_PAD / 32) + i - ((SH_MEM_SIZE_PAD / 32) - 1)*(blockIdx.y * blockDim.y)) * imageWithPaddingW + ix];
}
__syncthreads();
for (k = -filterR; k <= filterR; k++)
{
d = ty + k;
sum += s_Input[d * SH_MEM_SIZE + ptx] * d_Filter[filterR - k];
}
Output[iy * imageWithPaddingW + ix] = sum; //One time for each thread
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(dataType *h_Dst, dataType *h_Src, dataType *h_Filter,
int imageW, int imageH, int filterR)
{
int x, y, k;
int imageWithPaddingW = imageW + 2 * filterR;
for (y = filterR; y < (imageH + filterR); y++)
{
for (x = filterR; x < (imageW + filterR); x++)
{
dataType sum = 0;
for (k = -filterR; k <= filterR; k++)
{
int d = x + k;
sum += h_Src[y * imageWithPaddingW + d] * h_Filter[filterR - k];
}
h_Dst[y * imageWithPaddingW + x] = sum; //One time for each x & y
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(dataType *h_Dst, dataType *h_Src, dataType *h_Filter,
int imageW, int imageH, int filterR)
{
int x, y, k;
int imageWithPaddingW = imageW + 2 * filterR;
for (y = filterR; y < (imageH + filterR); y++)
{
for (x = filterR; x < (imageW + filterR); x++)
{
dataType sum = 0;
for (k = -filterR; k <= filterR; k++)
{
int d = y + k;
sum += h_Src[d * imageWithPaddingW + x] * h_Filter[filterR - k];
}
h_Dst[y * imageWithPaddingW + x] = sum; //One time for each x & y
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Free Alocated Host and Device Memory
////////////////////////////////////////////////////////////////////////////////
int freeMemory(dataType *h_Filter, dataType *h_Input, dataType *h_Buffer, dataType *h_OutputCPU, dataType *h_OutputGPU, dataType *d_Input, dataType *d_Buffer, dataType *d_OutputGPU)
{
cudaError_t err;
// free all the allocated memory for the host
printf("Free host memory...\n");
if (h_OutputGPU != NULL)
{
free(h_OutputGPU);
}
if (h_OutputCPU != NULL)
{
free(h_OutputCPU);
}
if (h_Buffer != NULL)
{
free(h_Buffer);
}
if (h_Input != NULL)
{
free(h_Input);
}
if (h_Filter != NULL)
{
free(h_Filter);
}
//free all the allocated device (GPU) memory
printf("Free device memory...\n");
if (d_OutputGPU != NULL)
{
err = cudaFree(d_OutputGPU);
if (err != cudaSuccess)
{
printf("Error during cudaFree (d_OutputGPU): %s\n", cudaGetErrorString(err));
return (ERROR);
}
}
if (d_Buffer != NULL)
{
err = cudaFree(d_Buffer);
if (err != cudaSuccess)
{
printf("Error during cudaFree (d_Buffer): %s\n", cudaGetErrorString(err));
return (ERROR);
}
}
if (d_Input != NULL)
{
err = cudaFree(d_Input);
if (err != cudaSuccess)
{
printf("Error during cudaFree (d_Input): %s\n", cudaGetErrorString(err));
return (ERROR);
}
}
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
printf("Reset Device\n");
err = cudaDeviceReset();
if (err != cudaSuccess)
{
printf("Error during cudaDeviceReset: %s\n", cudaGetErrorString(err));
return (ERROR);
}
return (0);
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv)
{
//pointers for the host
dataType
*h_Filter = NULL,
*h_Input = NULL,
*h_Buffer = NULL,
*h_OutputCPU = NULL,
*h_OutputGPU = NULL;
//pointers for the device
dataType
*d_Input = NULL,
*d_Buffer = NULL,
*d_OutputGPU = NULL;
int imageW; //image width = N
int imageH; //image height = N
unsigned int i, j, block_size, numberOfBlocks;
dataType diff = 0, max_diff = 0;
/*-------timing variables-------*/
struct timespec tv1, tv2;
cudaError_t err;
cudaEvent_t start;
cudaEvent_t stop;
float elapsed;
/*------------------------------*/
/*------padding variables-------*/
int imageWithPaddingW, newImageSize;
/*------------------------------*/
printf("Enter filter radius : ");
scanf("%d", &filter_radius);
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW); //TODO Warning
imageH = imageW;
imageWithPaddingW = imageW + 2 * filter_radius;
newImageSize = imageWithPaddingW * imageWithPaddingW;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Image Width x Height = %i x %i\n\n", imageWithPaddingW, imageWithPaddingW);
printf("Allocating and initializing host arrays...\n");
//Allocate host (CPU) memory
{
h_Filter = (dataType *)malloc(FILTER_LENGTH * sizeof(dataType));
if (h_Filter == NULL)
{
printf("Error allocating memory on host for h_Filter");
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
h_Input = (dataType *)calloc(newImageSize, sizeof(dataType));
if (h_Input == NULL)
{
printf("Error allocating memory on host for h_Input");
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
h_Buffer = (dataType *)calloc(newImageSize, sizeof(dataType));
if (h_Buffer == NULL)
{
printf("Error allocating memory on host for h_Buffer");
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
h_OutputCPU = (dataType *)calloc(newImageSize, sizeof(dataType));
if (h_OutputCPU == NULL)
{
printf("Error allocating memory on host for h_OutputCPU");
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
h_OutputGPU = (dataType *)calloc(newImageSize, sizeof(dataType));
if (h_OutputGPU == NULL)
{
printf("Error allocating memory on host for h_OutputGPU");
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
}
printf("Allocate device (GPU) memory\n");
//Allocate device (GPU) memory
{
err = cudaMalloc((void **)&d_Input, newImageSize * sizeof(dataType));
if (err != cudaSuccess)
{
printf("Error allocating memory on host for d_Input: %s\n", cudaGetErrorString(err));
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
cudaCalloc((void **)&d_Buffer, newImageSize, sizeof(dataType));
cudaCalloc((void **)&d_OutputGPU, newImageSize, sizeof(dataType));
}
if (imageW < 32)
{
block_size = imageW;
numberOfBlocks = 1;
}
else
{
block_size = 32;
numberOfBlocks = imageW / block_size;
}
dim3 threadsPerBlock(block_size, block_size); //geometry for block
dim3 numBlocks(numberOfBlocks, numberOfBlocks); //geometry for grid
int SH_MEM_SIZE_PAD = 32 + 2 * filter_radius;
//Initializations
{
srand(200);
// Random initialization of h_Filter
for (i = 0; i < FILTER_LENGTH; i++)
{
h_Filter[i] = (dataType)(rand() % 16);
}
// Random initialization of h_Input
for (i = filter_radius; i < (imageH + filter_radius); i++)
{
for (j = filter_radius; j < (imageW + filter_radius); j++)
{
h_Input[i * imageWithPaddingW + j] = (dataType)rand() / ((dataType)RAND_MAX / 255) + (dataType)rand() / (dataType)RAND_MAX;
}
}
}
//CPU Computation
{
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation is about to start...\n");
//Get the starting time
clock_gettime(CLOCK_MONOTONIC_RAW, &tv1);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH, filter_radius); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH, filter_radius); // convolution kata sthles
//Take the end time
clock_gettime(CLOCK_MONOTONIC_RAW, &tv2);
printf("CPU computation finished...\n");
}
//Calculate the duration of the CPU computation and report it
{
printf("\033[1;33m");
printf("CPU time = %10g seconds\n",
(double)(tv2.tv_nsec - tv1.tv_nsec) / 1000000000.0 +
(double)(tv2.tv_sec - tv1.tv_sec));
}
printf("\033[0m");
//Copy from host to device
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("Copy host memory to device\n");
cudaEventRecord(start, 0);
//Copy host memory to device
err = cudaMemcpyToSymbol(d_Filter, h_Filter, FILTER_LENGTH * sizeof(dataType));
if (err != cudaSuccess)
{
printf("Error during cudaMemcpyToSymbol of h_Filter to d_Filter: %s\n", cudaGetErrorString(err));
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
err = cudaMemcpy(d_Input, h_Input, newImageSize * sizeof(dataType), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
printf("Error during cudaMemcpy of h_Input to d_Input: %s\n", cudaGetErrorString(err));
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
}
//GPU Computation
{
printf("GPU computation is about to start...\n");
//kernel for row convolution
//execute grid of numBlocks blocks of threadsPerBlock threads each
convolutionRowSharedMem<<<numBlocks, threadsPerBlock, (32 *( 32 + 2 * filter_radius)) * sizeof(dataType)>>>(d_Input, d_Buffer, filter_radius, imageW, SH_MEM_SIZE_PAD);
err = cudaDeviceSynchronize();
if (err != cudaSuccess)
{
printf("Error during cudaDeviceSynchronize: %s\n", cudaGetErrorString(err));
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
//Error Checking
cudaErrorCheck();
//kernel for column convolution
//execute grid of numBlocks blocks of threadsPerBlock threads each
convolutionColumnSharedMem<<<numBlocks, threadsPerBlock, (32 * (32 + 2 * filter_radius)) * sizeof(dataType)>>>(d_Buffer, d_OutputGPU, filter_radius, imageW, imageH, SH_MEM_SIZE_PAD);
err = cudaMemcpy(h_OutputGPU, d_OutputGPU, newImageSize * sizeof(dataType), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
printf("Error during cudaMemcpy of d_OutputGPU to h_OutputGPU: %s\n", cudaGetErrorString(err));
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return (ERROR);
}
//Error Checking
cudaErrorCheck();
cudaEventRecord(stop, 0);
printf("GPU computation finished...\n");
}
//Execution Time of GPU
{
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
printf("\033[1;35m");
printf("GPU time = %g ms\n", elapsed);
printf("\033[0m");
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
//Compare the results from CPU and GPU
{
for (i = filter_radius; i < imageH + filter_radius; i++)
{
for (j = filter_radius; j < imageW + filter_radius; j++)
{
diff = ABS(h_OutputCPU[i * imageWithPaddingW + j] - h_OutputGPU[i * imageWithPaddingW + j]);
//printf("The difference between h_OutputCPU[%d]=%lf and h_OutputGPU[%d]=%lf is diff = %g\n", i * imageWithPaddingW + j, h_OutputCPU[i * imageWithPaddingW + j], i * imageWithPaddingW + j, h_OutputGPU[i * imageWithPaddingW + j], diff);
if (diff > max_diff)
{
max_diff = diff;
}
if (diff > accuracy)
{
//printf("\t|->ERROR: The difference between the values of h_OutputCPU and h_OutputGPU at index i = %u is bigger than the given accuracy.\n", i);
}
}
}
if (max_diff > accuracy)
{
printf("\033[1;31m");
printf("ERROR! Max difference between the values of h_OutputCPU and h_OutputGPU is max_diff = %g\n", max_diff);
}
else
{
printf("\033[1;32m");
printf("Max difference between the values of h_OutputCPU and h_OutputGPU is max_diff = %g\n", max_diff);
}
}
//Free allocated host and device memory
printf("\033[0m");
freeMemory(h_Filter, h_Input, h_Buffer, h_OutputCPU, h_OutputGPU, d_Input, d_Buffer, d_OutputGPU);
return 0;
}
|
1,870
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <ctype.h>
#include <string.h>
#include "scrImagePgmPpmPackage.h"
void get_string_nocomments( FILE* fdin, char* s )
{
int i,done;
done=0;
while( !done )
{
fscanf( fdin, "%s", s );
for( i=0; !done; ++i )
{
if( (s)[i] == '#' )
{
fgets( s, 256, fdin );
break;
}
if( ! isspace(s[i]) )
done = 1;
}
}
}
int scr_read_pgm( char* name, unsigned char* image, int irows, int icols )
{
int i,j, range, rows, cols;
unsigned char* tpt;
char* ins;
char imgtype[4], whitespace;
FILE* fdin;
ins = (char*)malloc(257);
if( (fdin=fopen(name,"rb")) == NULL )
{
return 0;
/* fprintf( stderr, "cannot open file %s\n enter new name->", name );
gets( name );
if( (fdin=fopen(name,"r")) == NULL )
{
fprintf( stderr, "cannot open file %s either, aborting\n", name );
exit(-1);
}
*/
}
fscanf( fdin, "%s", imgtype );
get_string_nocomments( fdin, ins );
sscanf( ins,"%d", &cols );
get_string_nocomments( fdin,ins );
sscanf( ins, "%d", &rows );
get_string_nocomments( fdin,ins );
sscanf( ins, "%d", &range );
if( (rows<=0) || (cols<=0) )
fprintf( stderr, ": negative rows or columns. Check!! %s", name );
if( (irows!=rows) || (cols!=icols) )
{
fprintf( stderr, ": rows or columns don't match for file %s\n", name );
fprintf( stderr, ": read %d %d, expected %d %d \n", rows,cols,irows,icols );
}
if( (strcmp(imgtype,"P5")==0) || (strcmp(imgtype,"p5")==0) )
{
fscanf( fdin, "%c", &whitespace );
/* read binary form */
tpt = image;
for( i=0; i<rows; i++ )
{
for( j=0; j<cols; j++,tpt++ )
{
if( EOF == fscanf(fdin,"%c",tpt) )
fprintf( stderr, "WARNING!! .not enought bytes for %s, on r%d c%d\n", name,i,j );
}
}
}
else if( (strcmp(imgtype,"P2")==0) || (strcmp(imgtype,"p2")==0) )
{
/* its ascii form */
tpt = image;
for( i=0; i<rows; i++ )
{
for( j=0; j<cols; j++,tpt++ )
{
if( EOF == fscanf(fdin,"%d",tpt) )
fprintf( stderr, "WARNING!! .not enought bytes for %s, on r%d c%d\n", name,i,j );
}
}
}
else
{
/* not p2 or p5 */
fprintf( stderr, "ERROR, called read pgm_byte but %s not a P2 or P5 image\n", name );
}
free( ins );
fclose( fdin );
return 1;
}
void scr_write_pgm( char* name, unsigned char* image, int rows, int cols, char* comment )
{
int i,j;
unsigned char* tpt;
char fname[256];
FILE* fdout;
if( strlen(name) < 1 )
{
fprintf( stderr, "Image write called withoutd filename\n" );
fprintf( stderr, "Please input filename->" );
scanf( "%s", fname );
}
else
strcpy( fname, name );
if( (fdout=fopen(fname,"wb")) == NULL )
{
fprintf( stderr, "cannot open file >>%s<< for output\n", fname );
fprintf( stderr, "continuing without write\n" );
}
if( comment )
fprintf( fdout, "P5\n#%s \n%d %d\n255\n", comment, cols, rows );
else
fprintf( fdout, "P5\n%d %d\n255\n", cols, rows );
tpt = image;
for( i=0; i<rows; i++ )
{
for( j=0; j<cols; j++ )
{
fprintf( fdout, "%c", (unsigned char)*tpt++ );
}
}
fprintf( fdout, "\n" );
fclose( fdout );
}
int scr_read_ppm( char* name, unsigned char* image, int irows, int icols )
{
int i,j, range, rows, cols;
unsigned char* tpt;
char* ins;
char imgtype[4], whitespace;
FILE* fdin;
ins = (char*)malloc(257);
if( (fdin=fopen(name,"rb")) == NULL )
{
return 0;
/* fprintf( stderr, "cannot open file %s\n enter new name->", name );
gets( name );
if( (fdin=fopen(name,"rb")) == NULL )
{
fprintf( stderr, "cannot open file %s either, aborting\n", name );
exit(-1);
}
*/
}
fscanf( fdin, "%s", imgtype );
get_string_nocomments( fdin, ins );
sscanf( ins,"%d", &cols );
get_string_nocomments( fdin,ins );
sscanf( ins, "%d", &rows );
get_string_nocomments( fdin,ins );
sscanf( ins, "%d", &range );
if( (rows<=0) || (cols<=0) )
fprintf( stderr, ": negative rows or columns. Check!! %s", name );
if( (irows!=rows) || (cols!=icols) )
{
fprintf( stderr, ": rows or columns don't match for file %s\n", name );
fprintf( stderr, ": read %d %d, expected %d %d \n", rows,cols,irows,icols );
}
if( (strcmp(imgtype,"P6")==0) || (strcmp(imgtype,"p6")==0) )
{
fscanf( fdin, "%c", &whitespace );
/* read binary form */
tpt = image;
for( i=0; i<rows; i++ )
{
for( j=0; j<cols; j++,tpt+=3 )
{
if( EOF == fscanf(fdin,"%c",tpt+2) )
fprintf( stderr, "WARNING!! .not enought bytes for %s, on r%d c%d\n", name,i,j );
if( EOF == fscanf(fdin,"%c",tpt+1) )
fprintf( stderr, "WARNING!! .not enought bytes for %s, on r%d c%d\n", name,i,j );
if( EOF == fscanf(fdin,"%c",tpt) )
fprintf( stderr, "WARNING!! .not enought bytes for %s, on r%d c%d\n", name,i,j );
}
}
}
else if( (strcmp(imgtype,"P3")==0) || (strcmp(imgtype,"p3")==0) )
{
/* its ascii form */
tpt = image;
for( i=0; i<rows; i++ )
{
for( j=0; j<cols; j++,tpt+=3 )
{
if( EOF == fscanf(fdin,"%d",tpt+2) )
fprintf( stderr, "WARNING!! .not enought bytes for %s, on r%d c%d\n", name,i,j );
if( EOF == fscanf(fdin,"%d",tpt+1) )
fprintf( stderr, "WARNING!! .not enought bytes for %s, on r%d c%d\n", name,i,j );
if( EOF == fscanf(fdin,"%d",tpt) )
fprintf( stderr, "WARNING!! .not enought bytes for %s, on r%d c%d\n", name,i,j );
}
}
}
else
{
/* not p3 or p6 */
fprintf( stderr, "ERROR, called read pgm_byte but %s not a P3 or P6 image\n", name );
}
free( ins );
fclose( fdin );
return 1;
}
void scr_write_ppm( char* name, unsigned char* image, int rows, int cols, char* comment )
{
int i,j;
unsigned char* tpt;
char fname[256];
FILE* fdout;
if( strlen(name) < 1 )
{
fprintf( stderr, "Image write called withoutd filename\n" );
fprintf( stderr, "Please input filename->" );
scanf( "%s", fname );
}
else
strcpy( fname, name );
if( (fdout=fopen(fname,"wb")) == NULL )
{
fprintf( stderr, "cannot open file >>%s<< for output\n", fname );
fprintf( stderr, "continuing without write\n" );
}
if( comment )
fprintf( fdout, "P6\n#%s \n%d %d\n255\n", comment, cols, rows );
else
fprintf( fdout, "P6\n%d %d\n255\n", cols, rows );
// the order is r g b
tpt = image;
for( i=0; i<rows; ++i )
{
for( j=0; j<cols; ++j,tpt+=3 )
{
fprintf( fdout, "%c%c%c", (unsigned char)(*(tpt+2)), (unsigned char)(*(tpt+1)), (unsigned char)(*tpt) );
}
}
fprintf( fdout, "\n" );
fclose( fdout );
}
/*void get_PgmPpmParams(char* name, int *irows, int *icols )
{
int range, rows, cols;
char imgtype[4];
FILE* fdin;
if( (fdin=fopen(name,"rb")) == NULL )
{
printf("File %s is not available - check\n",name);
exit(1);
}
fscanf( fdin, "%s", imgtype );
getout_comment(fdin);
fscanf(fdin,"%d",&cols);
fscanf(fdin,"%d",&rows);
fscanf(fdin,"%d",&range);
if( (rows<=0) || (cols<=0) )
fprintf( stderr, ": negative rows or columns. Check!! %s", name );
*irows = rows;
*icols = cols;
fclose(fdin);
}*/
void get_PgmPpmParams(char* name, int *irows, int *icols )
{
int range, rows, cols;
char imgtype[4];
char* ins;
FILE* fdin;
ins = (char*)malloc(257);
if( (fdin=fopen(name,"rb")) == NULL )
{
printf("File %s is not available - check\n",name);
exit(1);
}
fscanf( fdin, "%s", imgtype );
get_string_nocomments( fdin, ins );
sscanf( ins,"%d", &cols );
get_string_nocomments( fdin,ins );
sscanf( ins, "%d", &rows );
get_string_nocomments( fdin,ins );
sscanf( ins, "%d", &range );
if( (rows<=0) || (cols<=0) )
fprintf( stderr, ": negative rows or columns. Check!! %s", name );
*irows = rows;
*icols = cols;
fclose(fdin);
}
void getout_comment(FILE* fdin)
{
char c;
c = fgetc(fdin);
do
c = fgetc(fdin);
while (c != '\n');
}
|
1,871
|
#include "includes.h"
__global__ void cuSort(float* data,int bucketSize,int* startPoint)
{
// int L= blockIdx.x * blockDim.x;
int L= blockIdx.x*bucketSize;
int U= L + bucketSize;
int j;
float tmp;
startPoint[blockIdx.x] = L;
for(int i=L+1; i < U; i++)
{
tmp=data[i];
j = i-1;
while(tmp<data[j] && j>=0)
{
data[j+1] = data[j];
j = j-1;
}
data[j+1]=tmp;
}
}
|
1,872
|
__device__ float sigmoid(float x) {
return 1 / (1 + expf(-x));
}
extern "C"
__global__ void produceState2(const float* arguments, const int argsSize, const float* weights,
const int* topology, const int topSize, float* outStates) {
const int tid = threadIdx.x;
const int dim = argsSize + topSize;
extern __shared__ float s[];
float* states = s;
bool* ready = (bool*)&states[dim];
__shared__ int counter[1];
int r = tid;
while(r < dim) {
ready[r] = false;
r += blockDim.x;
}
if (tid == 0) {
counter[tid] = argsSize;
}
if (tid < argsSize) {
states[tid] = arguments[tid];
ready[tid] = true;
}
__syncthreads();
while(counter[0] < dim) {
const int index = counter[0] + tid;
const int topIndex = index - argsSize;
if (topIndex < topSize) {
const int leftBorder = topology[topIndex*3];
const int rightBorder = topology[topIndex*3 + 1];
const int weightsStart = topology[topIndex*3 + 2];
if (rightBorder <= counter[0]) {
float sum = 0;
for (int i = leftBorder; i < rightBorder; i++) {
sum += states[i] * weights[weightsStart + i - leftBorder];
}
states[index] = sigmoid(sum);
ready[index] = true;
}
}
__syncthreads();
if (tid == 0) {
int total = counter[0];
for (int i = total; i < total + blockDim.x && i < dim; i++) {
if (ready[i]) {
counter[0]++;
}
}
}
__syncthreads();
}
int n = tid;
while(n < dim) {
outStates[n] = states[n];
n += blockDim.x;
}
}
extern "C"
__global__ void produceState3(const float* arguments, const int argsSize, const float* weights,
const int* topology, const int topSize, float* outStates) {
const int tid = threadIdx.x;
const int dim = argsSize + topSize;
extern __shared__ float s[];
float* states = s;
int* iters = (int*)&states[dim];
if (tid < argsSize) {
states[tid] = arguments[tid];
iters[tid] = 1;
} else {
iters[tid] = 0;
}
__syncthreads();
while(iters[tid] * blockDim.x + tid < dim) {
const int index = iters[tid] * blockDim.x + tid;
const int topIndex = index - argsSize;
const int leftBorder = topology[topIndex*3];
const int rightBorder = topology[topIndex*3 + 1];
const int weightsStart = topology[topIndex*3 + 2];
bool canStart = true;
for (int i = leftBorder; i < rightBorder; i++) {
int threadId = i % blockDim.x;
int mustCounted = i / blockDim.x + 1;
if (iters[threadId] < mustCounted) {
canStart = false;
break;
}
}
if (canStart) {
float sum = 0;
for (int i = leftBorder; i < rightBorder; i++) {
sum += states[i] * weights[weightsStart + i - leftBorder];
}
states[index] = sigmoid(sum);
iters[tid]++;
}
__syncthreads();
}
__syncthreads();
int n = tid;
while(n < dim) {
outStates[n] = states[n];
n += blockDim.x;
}
}
|
1,873
|
#include <stdlib.h>
#include <stdio.h>
void fill_matrix(double *mat, unsigned numRows, unsigned numCols)
{
for(unsigned i=0; i < numRows; i++)
for(unsigned j=0; j < numCols; j++)
{
mat[i*numCols + j] = i*2.1f + j*3.2f;
}
}
void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols)
{
const char *fname = "assignment2_5_out";
FILE *f = fopen(fname, "w");
for(unsigned i=0; i < numRows; i++)
{
for(unsigned j=0; j < numCols; j++)
fprintf(f,"%4.4f ", mat[i*numCols + j]);
fprintf(f,"\n");
}
fclose(f); }
__global__ void MatrixMulKernel_col_maj(double* M, double* N, double* P, int Width, int TILE_WIDTH) {
extern __shared__ double buffer[];
double *ds_M = &buffer[0];
double *ds_N = &buffer[TILE_WIDTH*TILE_WIDTH];
//__shared__ double ds_M[TILE_WIDTH][TILE_WIDTH];
//__shared__ double ds_N[TILE_WIDTH][TILE_WIDTH];
// Generate IDs
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
ds_M[ty*TILE_WIDTH + tx] = 0.0;
ds_N[ty*TILE_WIDTH + tx] = 0.0;
double Pvalue=0;
// Loop over the M and N tiles required to compute the P element
for (int p = 0; p < (Width-1)/TILE_WIDTH+1; ++p) {
if ( (Row < Width) && (threadIdx.x + (p*TILE_WIDTH)) < Width){
// Collaborative loading of M and N tiles into shared memory
ds_M[ty*TILE_WIDTH + tx] = M[Row*Width + p*TILE_WIDTH+tx];
ds_N[ty*TILE_WIDTH + tx] = N[(p*TILE_WIDTH+ty)*Width + Col];
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i)
Pvalue += ds_M[ty*TILE_WIDTH + i] * ds_N[i*TILE_WIDTH + tx];
__syncthreads();
}
if (Row < Width && Col < Width){
P[Row*Width+Col] = Pvalue;
}
}
int main(int argc,char **argv) {
int N;
int TILE_WIDTH_ll[4], TILE_WIDTH;
int loop,loop1, loop2; // loop variables
float time_spent;
N=8192;
size_t size = N *N* sizeof(double);
double*h_matA = (double*)malloc(size);
double*h_matB = (double*)malloc(size);
double*h_matC = (double*)malloc(size); // result
fill_matrix(h_matA,N,N);
fill_matrix(h_matB,N,N);
for (loop = 0; loop<4; loop++){
TILE_WIDTH_ll[loop]=pow(2,2+loop);
}
printf("\nMatrix A (first 10*10 inputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matA + N*loop1 + loop2));
printf("\n");
}
printf("\n\nMatrix B (first 10*10 inputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matB + N*loop1 + loop2));
printf("\n");
}
double* d_matA; cudaMalloc(&d_matA, size);
double* d_matB; cudaMalloc(&d_matB, size);
double* d_matC; cudaMalloc(&d_matC, size);
//GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy vectors from host memory to device memory
cudaMemcpy(d_matA, h_matA, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_matB, h_matB, size,cudaMemcpyHostToDevice);
for (loop =0;loop < 4; loop++){
TILE_WIDTH=TILE_WIDTH_ll[loop];
// Invoke kernel
dim3 threadsPerBlock (TILE_WIDTH,TILE_WIDTH,1);
dim3 blocksPerGrid ((N + threadsPerBlock.x) /threadsPerBlock.x,(N + threadsPerBlock.y) /threadsPerBlock.y,1);
size_t blocksize = 2 * TILE_WIDTH * TILE_WIDTH;
cudaEventRecord(start, 0);
MatrixMulKernel_col_maj<<<blocksPerGrid, threadsPerBlock, sizeof(double)*blocksize>>>(d_matA,d_matB, d_matC, N, TILE_WIDTH);
//cudaDeviceSynchronize();//To synchronize the device
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
printf("\nTime spent in col maj for tile %d x %d %f\n",TILE_WIDTH,TILE_WIDTH,time_spent);
// h_C contains the result in host memory
cudaMemcpy(h_matC, d_matC, size,cudaMemcpyDeviceToHost);
printf("\n\nMatrix C (first 10*10 outputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matC + N*loop1 + loop2));
printf("\n");
}
}
// Log outputs
printf("\nWritting to file assignment_2_1_out as Mat C");
print_matrix_to_file(h_matC,N,N);
// Free device memory
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
// Free host memory
free(h_matA);
free(h_matB);
free(h_matC);
return 0;
}
|
1,874
|
#include <stdio.h>
//////////////////////////float3////////////////////////////////
inline __device__ float3 operator+(float3 a, float b)
{
return make_float3(a.x + b, a.y + b, a.z + b);
}
inline __device__ float3 operator-(float3 a, float b)
{
return make_float3(a.x - b, a.y - b, a.z - b);
}
inline __device__ float3 operator+(float3 a, float3 b)
{
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __device__ float3 operator-(float3 a, float3 b)
{
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __device__ void operator+=(float3 &a, float3 b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
}
inline __device__ void operator-=(float3 &a, float3 b)
{
a.x -= b.x;
a.y -= b.y;
a.z -= b.z;
}
inline __device__ float3 operator/(float3 a, float3 b)
{
return make_float3(a.x/b.x, a.y/b.y, a.z/b.z);
}
inline __device__ float3 operator*(float3 a, float b)
{
return make_float3(a.x*b, a.y*b, a.z*b);
}
inline __device__ float3 operator*(float a, float3 b)
{
return make_float3(a*b.x, a*b.y, a*b.z);
}
inline __device__ float3 operator*(float3 a, float3 b)
{
return make_float3(a.x*b.x, a.y*b.y, a.z*b.z);
}
inline __device__ float3 operator*(float3 a, int3 b)
{
return make_float3(a.x*b.x, a.y*b.y, a.z*b.z);
}
///////////////////////////int3/////////////////////////////////
inline __device__ int3 operator+(int3 a, int3 b)
{
return make_int3(a.x + b.x, a.y + b.y, a.z + b.z);
}
inline __device__ int3 operator-(int3 a, int3 b)
{
return make_int3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __device__ int3 operator+(int3 a, int b)
{
return make_int3(a.x + b, a.y + b, a.z + b);
}
inline __device__ int3 operator-(int3 a, int b)
{
return make_int3(a.x - b, a.y - b, a.z - b);
}
inline __device__ int3 operator+(int a, int3 b)
{
return make_int3(a + b.x, a + b.y, a + b.z);
}
inline __device__ int3 operator-(int a, int3 b)
{
return make_int3(a - b.x, a - b.y, a - b.z);
}
////////////////////////////////////////////////////////////////
inline __device__ int3 clamp(int3 x, int a, int3 b)
{
return make_int3(max(a, min(x.x, b.x)), max(a, min(x.y, b.y)), max(a, min(x.z, b.z)));
}
inline __device__ int3 clamp(int3 x, int3 a, int b)
{
return make_int3(max(a.x, min(x.x, b)), max(a.y, min(x.y, b)), max(a.z, min(x.z, b)));
}
inline __device__ int3 floorf(float3 v)
{
return make_int3(floorf(v.x), floorf(v.y), floorf(v.z));
}
inline __device__ float3 round(float3 v)
{
return make_float3(round(v.x), round(v.y), round(v.z));
}
inline __device__ int dot(int3 a, int3 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
inline __device__ float dot(float3 a, float3 b)
{
return a.x*b.x + a.y*b.y + a.z*b.z;
}
inline __device__ int mod(int a, int b)
{
int k = a % b;
return (k < 0) ? (k + b) : k;
}
inline __device__ int3 mod(int3 a, int3 b)
{
return make_int3(mod(a.x,b.x), mod(a.y,b.y), mod(a.z,b.z));
}
////////////////////////////////////////////////////////////////
struct BinStep
{
int3 positive, negative;
};
inline __device__ int3 getLocalBinIndex(float3 coordinate, float3 binLength, int3 binDim)
{
return clamp(floorf(coordinate/binLength), 0, binDim - 1);
}
inline __device__ int getBinIndex(int3 localBinIndex, int3 binDim)
{
return dot(localBinIndex, make_int3(1, binDim.x, binDim.x*binDim.y));
}
inline __device__ void stepLimit(int &positive, int &negative, int binDim)
{
if (positive - negative > binDim - 1)
{
if (positive > -negative)
{
positive = negative + binDim - 1;
}
else
{
negative = positive - binDim + 1;
}
}
}
inline __device__ void lennardJones(float3 &ljF, float &ljU, float3 R, float r2, float eps, float sig)
{
float sr = sig*sig/r2;
sr = sr*sr*sr;
ljF += eps/r2*(2.0f*sr*sr - sr)*R;
ljU += eps*(sr*sr - sr);
}
__device__ BinStep getBinStep(float3 coordinate, float3 binLength, int3 binDim, int3 localBinIndex, float cutoff)
{
struct BinStep binStep;
binStep.positive = clamp(floorf((coordinate + cutoff)/binLength) - localBinIndex, 0, binDim - 1);
binStep.negative = clamp(floorf((coordinate - cutoff)/binLength) - localBinIndex, 1 - binDim, 0);
stepLimit(binStep.positive.x, binStep.negative.x, binDim.x);
stepLimit(binStep.positive.y, binStep.negative.y, binDim.y);
stepLimit(binStep.positive.z, binStep.negative.z, binDim.z);
return binStep;
}
////////////////////////////////////////////////////////////////
extern "C"
__global__ void fillBins(float3 *coordinate, int *binIndex, unsigned int *binCount,
float3 binLength, int3 binDim, unsigned int arraySize)
{
unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x;
if (idg < arraySize)
{
int idB = getBinIndex(getLocalBinIndex(coordinate[idg], binLength, binDim), binDim);
binIndex[idg] = idB;
atomicInc(&binCount[idB], arraySize);
}
}
extern "C"
__global__ void countingSort(int *binIndex, unsigned int *prefixSum,
float3 *coordinate, float3 *velocity,
float3 *coordinateSorted, float3 *velocitySorted,
unsigned int arraySize)
{
unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x;
if (idg < arraySize)
{
unsigned int idgSorted = atomicDec(&prefixSum[binIndex[idg]], arraySize) - 1u;
coordinateSorted[idgSorted] = coordinate[idg];
velocitySorted[idgSorted] = velocity[idg];
}
}
extern "C"
__global__ void ljForce(float3 *coordinateSorted, float3 *force,
float *potentialEnergy, unsigned int *binCount, unsigned int *prefixSum,
float3 boxSize, float3 binLength, int3 binDim, float cutoff, float eps, float sig,
unsigned int arraySize)
{
unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x;
if (idg < arraySize)
{
float3 R;
float r2;
int binIndexNeighbour;
unsigned int ionCount, offset;
float3 coordinate = coordinateSorted[idg];
int3 localBinIndex = getLocalBinIndex(coordinate, binLength, binDim);
struct BinStep binStep = getBinStep(coordinate, binLength, binDim, localBinIndex, cutoff);
float3 ljF = make_float3(0.0f,0.0f,0.0f);
float cutoff2 = cutoff*cutoff;
float ljU = 0.0f;
for (int dz = binStep.negative.z; dz <= binStep.positive.z; ++dz)
{
for (int dy = binStep.negative.y; dy <= binStep.positive.y; ++dy)
{
for (int dx = binStep.negative.x; dx <= binStep.positive.x; ++dx)
{
binIndexNeighbour = getBinIndex(mod(localBinIndex + make_int3(dx,dy,dz), binDim), binDim);
ionCount = binCount[binIndexNeighbour];
if (ionCount == 0u)
{
continue;
}
offset = prefixSum[binIndexNeighbour];
for (unsigned int i = offset; i < offset + ionCount; ++i)
{
if (i == idg)
{
continue;
}
R = coordinate - coordinateSorted[i];
r2 = dot(R, R);
if (r2 < cutoff2)
{
lennardJones(ljF, ljU, R, r2, eps, sig);
continue;
}
R -= boxSize*round(R/boxSize);
r2 = dot(R, R);
if (r2 < cutoff2)
{
lennardJones(ljF, ljU, R, r2, eps, sig);
}
}
}
}
}
force[idg] = 24.0f*ljF;
potentialEnergy[idg] = 2.0f*ljU;
}
}
extern "C"
__global__ void verletPre(float3 *coordinate, float3 *velocity,
float3 *coordinateSorted, float3 *velocitySorted,
float3 *force, float3 boxSize, float mass, float dt, unsigned int arraySize)
{
unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x;
if (idg < arraySize)
{
float3 coord, vel;
vel = velocitySorted[idg];
coord = coordinateSorted[idg];
vel += 0.00482426665f*dt/mass*force[idg]; // 0.00964853329 * 0.5
coord += dt*vel;
velocity[idg] = vel;
coordinate[idg] = coord - boxSize*floorf(coord/boxSize);
}
}
extern "C"
__global__ void verletPos(float3 *velocitySorted, float3 *force, float *kineticEnergy,
float mass, float dt, unsigned int arraySize)
{
unsigned int idg = blockIdx.x*blockDim.x + threadIdx.x;
if (idg < arraySize)
{
float3 vel = velocitySorted[idg];
vel += 0.00482426665f*dt/mass*force[idg]; // 0.00964853329 * 0.5
kineticEnergy[idg] = 51.8213479f*mass*dot(vel, vel);
velocitySorted[idg] = vel;
}
}
|
1,875
|
//#define TILE_DIM 1024
//
//template<typename T>
//__device__ void reverse(const T* vector, T* result, const int length) {
//// __shared__ T tile[TILE_DIM];
//// __shared__ T anti_tile[TILE_DIM];
// extern __shared__ char m[];
// T* tile = (T*)m;
// T* anti_tile = (T*)(m + blockDim.x * sizeof(T));
//
// int bx = blockIdx.x;
// int tx = threadIdx.x;
//
// int index = bx * blockDim.x + tx;
// int centerIndex = length / 2;
// int nextBlockIndex = (bx + 1) * blockDim.x;
// int blockShift = (nextBlockIndex < centerIndex ? 0 : nextBlockIndex - centerIndex);
// int anti_index = index != centerIndex ? length - (bx + 1) * blockDim.x + tx + blockShift : centerIndex;
// if(nextBlockIndex < centerIndex || index < centerIndex) {
// int tileIndex = blockDim.x - 1 - tx - blockShift;
// tile[tileIndex] = vector[index];
// anti_tile[tileIndex] = vector[anti_index];
// }
// __syncthreads();
//
// if (nextBlockIndex < centerIndex || index < centerIndex) {
// result[index] = anti_tile[tx];
// result[anti_index] = tile[tx];
// } else if (index == centerIndex && length % 2 != 0) {
// result[index] = vector[index];
// }
//}
template<typename T>
__device__ void reverse(const T* vector, T* result, const int length) {
int bx = blockIdx.x;
int tx = threadIdx.x;
int index = bx * blockDim.x + tx;
int anti_index = length - 1 - index;
if (anti_index > index) {
T value = vector[index];
T anti_value = vector[anti_index];
result[index] = anti_value;
result[anti_index] = value;
} else if (index == anti_index) {
result[index] = vector[index];
}
}
|
1,876
|
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <bits/stdc++.h>
#include <curand.h>
#include <curand_kernel.h>
using namespace std;
const int MAX_FES = 50000;
int NL = 10;
int LS = 4;
int dim1 = 20;
int dim2 = 1000000;
const int num_points = NL*LS;
const double phi = 0.1;
const double tol = 0.000001;
struct coor {
double a0,a1,a2,a3,a4,a5,a6;
};
struct coor1 {
double* a1;
double a2;
double a3;
double* a4;
double a5;
double a0;
};
__device__ void func_eval_input_data(coor1* d_coo, double* y, double* sum)
{
// int tId = threadIdx.x + (blockIdx.x * blockDim.x);
double temp = (*y);
*sum *= temp;
}
__host__ __device__ void func_eval_input_data(coor1* d_coo, double* x, double* y, double* total) {
double* d_sum, *sum;
sum = (double *) malloc(sizeof(double));
*sum = 1;
// cudaMalloc((void **)&sum,sizeof(double));
// int tId = threadIdx.x + (blockIdx.x * blockDim.x);
// func_eval_input_data<<<(*x)/512,512>>>(d_coo, y, d_sum);
*total += *sum;
free(sum);
}
__host__ __device__ double eval(double x, double y) {
// Function to be evaluated. Can be changed
return sin(x)*cos(y);
}
/*
bool comp(coor temp1, coor temp2) {
// Comparator function for std::sort()
return temp1.a2 < temp2.a2;
}
*/
__host__ __device__ bool operator < (const coor& a,const coor& b){return a.a2 < b.a2;}
__global__ void noicetoit(coor* h_coo, int* levels, double* best_x, double* best_y, double* best_fx, int* i, int* LS, int* NL) {
int tId = threadIdx.x + (blockIdx.x * blockDim.x);
curandState state;
curand_init((unsigned long long)clock() + tId, 0, 0, &state);
int index = threadIdx.x + blockIdx.x * blockDim.x;
double rand1 = curand_uniform_double(&state);
double rand2 = curand_uniform_double(&state);
double rand3 = curand_uniform_double(&state);
double rand4 = curand_uniform_double(&state);
double rand5 = curand_uniform_double(&state);
double rand6 = curand_uniform_double(&state);
double rand7 = curand_uniform_double(&state);
int pt = levels[(*i-1)*(*NL) + index];
// Choosing a random level
int lev1 = (int)rand1 % (*i);
int lev2 = (int)rand2 % (*i);
if(lev2 < lev1) {
int temp = lev2;
lev2 = lev1;
lev1 = temp;
}
// Choosing random points from those levels
int pt1 = (int)rand3 % (*LS);
int pt2 = (int)rand4 % (*LS);
int temp1 = levels[lev1*(*NL) + pt1];
int temp2 = levels[lev2*(*NL) + pt2];
int r1 = ((double) rand5 / (RAND_MAX));
int r2 = ((double) rand6 / (RAND_MAX));
int r3 = ((double) rand7 / (RAND_MAX));
// Update Functions
h_coo[pt].a4 = r1*(h_coo[pt].a4) + r2*((h_coo[temp1].a0) - (h_coo[pt].a0)) + phi*r3*((h_coo[temp2].a0) - (h_coo[pt].a0));
h_coo[pt].a5 = r1*h_coo[pt].a5 + r2*(h_coo[temp1].a1 - h_coo[pt].a1) + phi*r3*(h_coo[temp2].a1 - h_coo[pt].a1);
h_coo[pt].a0 = h_coo[pt].a0 + h_coo[pt].a4;
h_coo[pt].a1 = h_coo[pt].a1 + h_coo[pt].a5;
double fx = eval(h_coo[pt].a0, h_coo[pt].a1);
if(abs(fx - *best_fx) < tol) {
*best_x = h_coo[pt].a0;
*best_y = h_coo[pt].a1;
*best_fx = fx;
}
h_coo[pt].a2 = fx;
}
int main() {
clock_t tStart = clock();
int fes = 0;
// Domain for coordinates
int coor_low_lim = -10;
int coor_high_lim = 50;
// Domain for velocities
double vel_low_lim = -0.1;
double vel_high_lim = 0.1;
double best_x;
double best_y;
double best_fx;
//parallel programming
coor* h_coo, *d_coo;
h_coo = (coor *)malloc(num_points*sizeof(coor));
coor1* h_coo1, *d_coo1;
h_coo1 = (coor1 *)malloc(num_points*sizeof(coor));
for(int i=0; i<num_points; i++)
{
h_coo1[i].a1 = (double *)malloc(dim1*sizeof(double));
h_coo1[i].a4 = (double *)malloc(dim1*sizeof(double));
}
cudaMalloc((void **)&d_coo, num_points*sizeof(coor));
int *levels, *d_levels;
levels = (int *)malloc(sizeof(int)*NL*LS);
cudaMalloc((void **)&d_levels, sizeof(int)*NL*LS);
// vector<vector<int> > levels(NL, vector<int> (LS, 0));
// Seeding random
srand(static_cast <unsigned> (time(0)));
for(int i=0; i< num_points; i++) {
/*
coor[i][...] contains property of each point.
coor[i][0] -> x-coordinate
coor[i][1] -> y-coordinate
coor[i][2] -> function evaluation
coor[i][3] -> level number
coor[i][4] -> x-velocity
coor[i][5] -> y-velocity
*/
h_coo[i].a0 = (coor_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(coor_high_lim - coor_low_lim))));
h_coo[i].a1 = (coor_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(coor_high_lim - coor_low_lim))));
h_coo[i].a2 = eval(h_coo[i].a0, h_coo[i].a1);
h_coo[i].a4 = (vel_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(vel_high_lim - vel_low_lim))));
h_coo[i].a5 = (vel_low_lim + static_cast <double> (rand()) /( static_cast <double> (RAND_MAX/(vel_high_lim - vel_low_lim))));
}
fes += num_points;
cudaMemcpy(d_coo, h_coo, num_points*sizeof(coor), cudaMemcpyHostToDevice);
while(fes < MAX_FES) {
thrust::sort(h_coo, h_coo + num_points);
best_fx = h_coo[0].a2;
best_x = h_coo[0].a0;
best_y = h_coo[0].a1;
// Segregating points into levels
for(int i=0; i<num_points; i++) {
/*
Levels basically acts as a lookup for coor array.
Dimensions of levels: levels[NL][LS]
levels[i] denotes (i+1)th level and each element in levels[i][...] denotes the number of
the point in the coor array.
For instance,
levels[1][2] = 5 denotes that the 3rd point in 2nd level corresponds to point 5 in coor array,
i.e. coor[5][...]
*/
h_coo[i].a3 = i/LS;
levels[(i/LS)*NL + i%LS] = i;
}
cudaMemcpy(d_coo, h_coo, num_points*sizeof(coor), cudaMemcpyHostToDevice);
double *d_best_x, *d_best_y, *d_best_fx;
int* d_LS, *d_i, *d_NL;
cudaMalloc((void **)&d_best_x, sizeof(double));
cudaMalloc((void **)&d_best_y, sizeof(double));
cudaMalloc((void **)&d_best_fx, sizeof(double));
cudaMalloc((void **)&d_i, sizeof(int));
cudaMalloc((void **)&d_LS, sizeof(int));
cudaMalloc((void **)&d_NL, sizeof(int));
cudaMemcpy(d_best_x, &best_x, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_best_y, &best_y, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_best_fx, &best_fx, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_LS, &LS, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_NL, &NL, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_levels, levels, sizeof(int)*NL*LS, cudaMemcpyHostToDevice);
for(int i=NL; i>=3; i--) {
cudaMemcpy(d_i, &i, sizeof(int), cudaMemcpyHostToDevice);
noicetoit<<<LS/512,512>>>(d_coo,d_levels, d_best_x, d_best_y, d_best_fx, d_i, d_LS, d_NL);
fes+= LS;
}
// cudaMemcpy(h_coo, d_coo, num_points*sizeof(coor), cudaMemcpyDeviceToHost);
for(int i=0; i<LS; i++) {
int pt1 = 0 + (rand() % static_cast<int>(LS));
int pt2 = 0 + (rand() % static_cast<int>(LS));
int pt = levels[1*NL + i];
int temp1 = levels[0*NL + pt1];
int temp2 = levels[0*NL + pt2];
if(abs(eval(h_coo[temp2].a0, h_coo[temp2].a1) - eval(h_coo[temp1].a0, h_coo[temp1].a1)) < tol) {
swap(temp1, temp2);
}
int r1 = ((double) rand() / (RAND_MAX));
int r2 = ((double) rand() / (RAND_MAX));
int r3 = ((double) rand() / (RAND_MAX));
// Update Functions
h_coo[pt].a4 = r1*h_coo[pt].a4 + r2*(h_coo[temp1].a0 - h_coo[pt].a0) + phi*r3*(h_coo[temp2].a0 - h_coo[pt].a0);
h_coo[pt].a5 = r1*h_coo[pt].a5 + r2*(h_coo[temp1].a1 - h_coo[pt].a1) + phi*r3*(h_coo[temp2].a1 - h_coo[pt].a1);
h_coo[pt].a0 = h_coo[pt].a0 + h_coo[pt].a4;
h_coo[pt].a1 = h_coo[pt].a1 + h_coo[pt].a5;
double fx = eval(h_coo[pt].a0, h_coo[pt].a1);
if(abs(fx - best_fx) < tol) {
best_x = h_coo[pt].a0;
best_y = h_coo[pt].a1;
best_fx = fx;
}
h_coo[pt].a2 = fx;
}
fes+= LS;
}
cout << "Time: " << (double)(clock() - tStart)/CLOCKS_PER_SEC << endl;
cout << "FINAL RESULTS: " << endl;
cout << "Best x: " << best_x << endl;
cout << "Best y: " << best_y << endl;
// cout << "Best evaluation: " << best_fx << endl;
return 0;
}
|
1,877
|
#include <stdio.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cmath>
#include <cstring>
#define NSTREAM 4
#define BDIM 128
void initialData(float *ip, int size)
{
for (int i = 0; i < size; i++)
{
ip[i] = (float)(rand() & 0xFF) / 10.0f;
}
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
C[idx] = A[idx] + B[idx];
}
__global__ void sumArrays(float *A, float *B, float *C, const int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N)
{
for (int i = 0; i < N; ++i)
{
C[idx] = A[idx] + B[idx];
}
}
}
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = true;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = false;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at %d\n", hostRef[i], gpuRef[i], i);
break;
}
}
if (match)
printf("Arrays match.\n\n");
}
int main(int argc, char **argv)
{
printf("> %s Starting...\n", argv[0]);
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("> Using Device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
// check if device support hyper-q
if (deviceProp.major < 3 || (deviceProp.major == 3 && deviceProp.minor < 5))
{
if (deviceProp.concurrentKernels == 0)
{
printf("> GPU does not support concurrent kernel execution (SM 3.5 or higher required)\n");
printf("> CUDA kernel runs will be serialized\n");
}
else
{
printf("> GPU does not support HyperQ\n");
printf("> CUDA kernel runs will have limited concurrency\n");
}
}
printf("> Compute Capability %d.%d hardware with %d multi-processors\n", deviceProp.major, deviceProp.minor, deviceProp.multiProcessorCount);
// set up data size of vectors
int nElem = 1 << 16;
printf("> vector size = %d\n", nElem);
size_t nBytes = nElem * sizeof(float);
// malloc pinned host memory for async memcpy
float *h_A, *h_B, *hostRef, *gpuRef;
cudaHostAlloc((void**)&h_A, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&h_B, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&gpuRef, nBytes, cudaHostAllocDefault);
cudaHostAlloc((void**)&hostRef, nBytes, cudaHostAllocDefault);
// initialize data at host side
initialData(h_A, nElem);
initialData(h_B, nElem);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
sumArraysOnHost(h_A, h_B, hostRef, nElem);
// malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float**)&d_A, nBytes);
cudaMalloc((float**)&d_B, nBytes);
cudaMalloc((float**)&d_C, nBytes);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// invoke kernel at host side
dim3 block(BDIM);
dim3 grid((nElem + block.x - 1) / block.x);
printf("> grid (%d, %d) block (%d, %d)\n", grid.x, grid.y, block.x,block.y);
// sequential operation
cudaEventRecord(start, 0);
cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float memcpy_h2d_time;
cudaEventElapsedTime(&memcpy_h2d_time, start, stop);
cudaEventRecord(start, 0);
sumArrays <<<grid, block >>>(d_A, d_B, d_C, nElem);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernel_time;
cudaEventElapsedTime(&kernel_time, start, stop);
cudaEventRecord(start, 0);
cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float memcpy_d2h_time;
cudaEventElapsedTime(&memcpy_d2h_time, start, stop);
float itotal = kernel_time + memcpy_h2d_time + memcpy_d2h_time;
printf("\n");
printf("Measured timings (throughput):\n");
printf(" Memcpy host to device\t: %f ms (%f GB/s)\n", memcpy_h2d_time, (nBytes * 1e-6) / memcpy_h2d_time);
printf(" Memcpy device to host\t: %f ms (%f GB/s)\n", memcpy_d2h_time, (nBytes * 1e-6) / memcpy_d2h_time);
printf(" Kernel\t\t\t: %f ms (%f GB/s)\n", kernel_time, (nBytes * 2e-6) / kernel_time);
printf(" Total\t\t\t: %f ms (%f GB/s)\n", itotal, (nBytes * 2e-6) / itotal);
// grid parallel operation
int iElem = nElem / NSTREAM;
size_t iBytes = iElem * sizeof(float);
grid.x = (iElem + block.x - 1) / block.x;
cudaStream_t stream[NSTREAM];
for (int i = 0; i < NSTREAM; ++i)
{
cudaStreamCreate(&stream[i]);
}
cudaEventRecord(start, 0);
// initiate all work on the device asynchronously in depth-first order
for (int i = 0; i < NSTREAM; ++i)
{
int ioffset = i * iElem;
cudaMemcpyAsync(&d_A[ioffset], &h_A[ioffset], iBytes, cudaMemcpyHostToDevice, stream[i]);
cudaMemcpyAsync(&d_B[ioffset], &h_B[ioffset], iBytes, cudaMemcpyHostToDevice, stream[i]);
sumArrays <<<grid, block, 0, stream[i] >>>(&d_A[ioffset], &d_B[ioffset], &d_C[ioffset], iElem);
cudaMemcpyAsync(&gpuRef[ioffset], &d_C[ioffset], iBytes, cudaMemcpyDeviceToHost, stream[i]);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float execution_time;
cudaEventElapsedTime(&execution_time, start, stop);
printf("\n");
printf("Actual results from overlapped data transfers:\n");
printf(" overlap with %d streams : %f ms (%f GB/s)\n", NSTREAM, execution_time, (nBytes * 2e-6) / execution_time);
printf(" speedup : %f \n", ((itotal - execution_time) * 100.0f) / itotal);
// check kernel error
cudaGetLastError();
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// free host memory
cudaFreeHost(h_A);
cudaFreeHost(h_B);
cudaFreeHost(hostRef);
cudaFreeHost(gpuRef);
// destroy events
cudaEventDestroy(start);
cudaEventDestroy(stop);
// destroy streams
for (int i = 0; i < NSTREAM; ++i)
{
cudaStreamDestroy(stream[i]);
}
cudaDeviceReset();
system("Pause");
return(0);
}
|
1,878
|
// GPU kernel
__global__ void process(float * data_out, int gap)
{
float res = 0.;
int numthread = blockIdx.x * blockDim.x + threadIdx.x;
bool pair = (((numthread*gap + gap-1) % 2) ==0);
for(int i = (numthread*gap+gap-1); i >= (numthread*gap); i--){
res += (pair?1.:-1.)/(i+1.);
pair = !pair;
}
data_out[numthread] = res;
}
|
1,879
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
using namespace std;
void find_alive(){
}
int main(){
int worldX, worldY;
printf("Please enter the width of the array : ");
scanf("%d", &worldX);
printf("Please enter the height of the array : ");
scanf("%d", &worldY);
int population = worldX * worldY;
int* world = (int*)malloc(sizeof(int*) * population);
int* count = (int*)malloc(sizeof(int*) * population);
int* state = (int*)malloc(sizeof(int*) * population);
int alive = 0;
// Random initial polulation
srand(time(NULL));
for (int row = 0; row < worldY; row++)
for (int col = 0; col < worldX; col++){
int rand_val = rand() % 2;
world[row*worldX + col] = rand_val;
if (rand_val) alive += 1;
}
cout << alive;
memcpy(state, world, sizeof(int*) * population);
int lowest = INT_MAX;
while (alive > 0){
// Calculate alive neighbours and print polulation
for (int row = 0; row < worldY; row++){
//cout << "| ";
int tmp = 0;
for (int col = 0; col < worldX; col++){
for (int off_row = row - 5; off_row <= row + 5; off_row++){
for (int off_col = col - 5; off_col <= col + 5; off_col++)
if (!(off_row < 0 || off_row >= worldY || off_col < 0 || off_col >= worldX || (off_row == row && off_col == col))) //or substract itself
tmp += world[off_row*worldX + off_col];//tmp = tmp;
}
//cout << world[row * worldX + col] << " | ";
count[row * worldX + col] = tmp;
if (tmp >= 34 && tmp <= 58){
if (tmp <= 45 && state[row * worldX + col] == 0){
state[row * worldX + col] = 1;
alive += 1;
}
}
else {
if (state[row * worldX + col] == 1){
state[row * worldX + col] = 0;
alive -= 1;
}
}
tmp = 0;
}
//cout << "\n";
}
//cout << "\n\n\n";
//for (int row = 0; row < worldY; row++){
// cout << "| ";
// for (int col = 0; col < worldX; col++){
// cout << count[row * worldX + col] << " | ";
// }
// cout << "\n";
//}
//cout << "\n\n\n";
memcpy(world, state, sizeof(int*) * population);
//for (int row = 0; row < worldY; row++){
// cout << "| ";
// for (int col = 0; col < worldX; col++){
// cout << world[row * worldX + col] << " | ";
// }
// cout << "\n";
//}
if (alive < lowest){
cout << '\n' << alive;
lowest = alive;
}
}
return 0;
}
|
1,880
|
/*
* Copyright 2019 Australian National University
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either or express implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
// this macro checks for errors in cuda calls
#define Err(ans) \
{ gpucheck((ans), __FILE__, __LINE__); }
inline void gpucheck(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPU Err: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
/*
Just to save on some time I have included the below list of cuda commands. You can cut and paste directly to set up your code.
Err(cudaMallocHost(&data_h,sizeof(int) * size));
Err(cudaMalloc(&data_d, sizeof(int) * size));
Err(cudaMemcpy(data_h, data_d, sizeof(int) * size, cudaMemcpyDeviceToHost));
Err(cudaMemcpy(data_d, data_h, sizeof(int) * size, cudaMemcpyHostToDevice));
Err(cudaDeviceSynchronize()); // remember a kernel launch is asynchronous so you may need to do this after the last kernel.
*/
// 0-1 Knapsack problem - Eric McCreath 2019
// see https://en.wikipedia.org/wiki/Knapsack_problem
// for problem and solution I used in this code.
int n = 2500; // number of items to select from
int *w; // this is weight of the items (these are positive)
int *v; // value gained by placing the item into the knapsack
int W = 100000; // the maximum weight the knapsack can take
int maxvalue = 1000; // the maximum value any item can be worth
// Objective - find a set of items to placing into the knapsack that maximizes the value while keeping the total weight less than or
// equal to W.
/* A dynamic programming approach can be used to solve the problem in psueo-polynomial time. This works by using a 2D array, called "m", where m[k][j] is the maximum value that can be stored in the knapsack keeping the weight less than or equal to "j" and using a subset of items which have index less than "k". */
#define max(A, B) ((A) > (B) ? (A) : (B))
#define m(K, J) (m_array[(K) + (J) * (n + 1)])
void zeroFirstColumn(int *m_array, int n, int W) {
for (int j = 0; j <= W; j++)
m(0, j) = 0;
}
void displaySubSet(int *m_array, int n, int W) {
int cw;
cw = W;
printf("best value is %d using : ", m(n, cw));
for (int k = n - 1; k >= 0; k--) {
if (m(k + 1, cw) > m(k, cw)) {
if (W != cw)
printf(",");
printf("%d", k);
cw -= w[k];
}
}
printf("\n");
}
int main() {
int *m_array;
int j, k;
// obtain the memory for the 2D array
m_array = (int *)malloc(sizeof(int) * (W + 1) * (n + 1));
if (!m_array) {
printf("malloc error");
exit(1);
}
// set up and initialize the weights with values
srand(0);
w = (int *)malloc(sizeof(int) * n);
v = (int *)malloc(sizeof(int) * n);
for (k = 0; k < n; k++) {
w[k] = (rand() % (W - 1)) + 1;
v[k] = (rand() % (maxvalue - 1)) + 1;
}
// initailize the first column to 0
zeroFirstColumn(m_array, n, W);
for (k = 0; k < n; k++) {
for (j = 0; j <= W; j++) { // the next column is set based on the previous
if (w[k] > j) {
// If the items weight is greater than the total weight we are considering it
// can not be added, so use the best value from the previous column for this weight
m((k + 1), j) = m(k, j);
} else {
// If item "k" not add then best value us just taken
// from the previous column (same row).
// If item "k" is added then best value is the sum of the items value and the
// best value from the previous column and row offset by the items weight.
// The maximum of these two options is taken.
m(k + 1, j) = max(m(k, j), v[k] + m(k, j - w[k]));
}
}
}
// Work out a sub set that gives the maximum value.
displaySubSet(m_array, n, W);
}
|
1,881
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void mandelKernel(int* d_data, int width, float stepX, float stepY, float lowerX, float lowerY, int count) {
// To avoid error caused by the floating number, use the following pseudo code
//
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int thisX = blockIdx.x * blockDim.x + threadIdx.x;
int thisY = blockIdx.y * blockDim.y + threadIdx.y;
float c_x = lowerX + thisX * stepX;
float c_y = lowerY + thisY * stepY;
float z_x = c_x;
float z_y = c_y;
int iter;
for (iter = 0; iter < count; ++iter){
if (z_x * z_x + z_y * z_y > 4.f) break;
float new_x = z_x * z_x - z_y * z_y;
float new_y = 2.f * z_x * z_y;
z_x = c_x + new_x;
z_y = c_y + new_y;
}
int idx = thisX + thisY * width;
d_data[idx] = iter;
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
int N = resX * resY;
int size = N * sizeof(int);
int *data;
data = (int*) malloc(size);
int *d_data;
cudaMalloc(&d_data, size);
dim3 threadsPerBlock(25, 25);
dim3 numBlocks(resX / threadsPerBlock.x, resY / threadsPerBlock.y);
mandelKernel<<<numBlocks, threadsPerBlock>>>(d_data, resX, stepX, stepY, lowerX, lowerY, maxIterations);
cudaMemcpy(data, d_data, size, cudaMemcpyDeviceToHost);
memcpy(img, data, size);
cudaFree(d_data);
free(data);
}
|
1,882
|
/********************
*
* CUDA Kernel: row gradient computing
*
*/
/* ==================================================
*
* sub2ind - Column-major indexing of 2D arrays
*
*/
template <typename T>
__device__ __forceinline__ T sub2ind( T i, T j, T height) {
return (i + height*j);
} // end function 'sub2ind'
/* ==================================================
*
* core kernel
*
*/
__global__
void row_filtering(double * R,
const double * M,
const int m,
const int n,
const int p){
/* thread indices */
const int j = blockIdx.y*blockDim.y+threadIdx.y;
const int i = blockIdx.x*blockDim.x+threadIdx.x;
/* matrix calculation */
if ((i >= m) || (j >= n*p) ){
return;
}
R[sub2ind(i,j,m)] = M[sub2ind(min(i+1,m-1),j,m)]-M[sub2ind(i,j,m)];
return ;
}
|
1,883
|
#include<stdio.h>
#include<cuda.h>
#include<curand.h>
#include<iostream>
#include<stdlib.h>
#include<time.h>
#include<cstdio>
#include <assert.h>
#define M 6
#define N 4000
#define K 9
#define C 1000
using namespace std;
__global__ void multi_kernel(int *mn,int *m, int *n){
int xbidx = blockIdx.x;
int ybidx = blockIdx.y;
int tidx = threadIdx.x;
__shared__ int sh_var[N];
sh_var[tidx] = mn[N * ybidx + tidx] * m[K * tidx + xbidx];
__syncthreads();
n[K * ybidx + xbidx ] = 0;
for(int i = 0; i<N; i++){
n[K * ybidx + xbidx] = n[K * ybidx + xbidx] + sh_var[i];
}
}
int multiplication(){
int *a,*b,*c;
int an[M][N];
int bn[N][K];
int cn[M][K];
//Generating random Matrix B
for (int i = 0; i < N; i++){
for (int j = 0; j < K; j++){
bn[i][j] = (int)rand() % 100 * sizeof(int);
}
}
cout << "Matrix B generated" << endl;
cudaMallocManaged((void **)&b, N * K * sizeof(int));
cudaMemcpy(b, bn, N * K * sizeof(int), cudaMemcpyHostToDevice);
dim3 gridDim(K,M);
for (int i = 0; i < C; i++){
for (int k = 0; k < M; k++){
for (int l = 0; l < N; l++){
an[k][l] = (int)rand() % 100 * sizeof(int);
//printf("%d\n", &an[k][l]);
}
}
cudaMallocManaged((void **)&a, M * N * sizeof(int));
cudaMallocManaged((void **)&c, M * K * sizeof(int));
cudaMemcpy(a, an, M * N * sizeof(int), cudaMemcpyHostToDevice);
multi_kernel <<< gridDim, N >>> (a, b, c);
cudaMemcpy(cn, c, M * K * sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(a);
cudaFree(c);
}
cudaFree(b);
cout << "Completed Successfully" << endl;
cout << "[" << M << "] " << "x" << " [" << N << "] " << "*"<< " [" << N << "] "<< "x" << " [" << K << "]"<< endl;
return 0;
}
int main(){
time_t start, end, t;
start = time(NULL);
srand((unsigned) time(&t));
multiplication();
end = time(NULL);
// printf("%ld", &end);
cout << "Total execution time: " << (end-start) << " seconds" << endl;
return 0;
}
|
1,884
|
#include "includes.h"
__global__ void inverse_variance_kernel(int size, float *src, float *dst, float epsilon)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size)
dst[index] = 1.0f / sqrtf(src[index] + epsilon);
}
|
1,885
|
#include "includes.h"
__global__ void power_spectrum_kernel(int row_length, float *A_in, int32_t ldi, float *A_out, int32_t ldo) {
int thread_id = threadIdx.x;
int block_id = blockIdx.x;
float *Ar = A_in + block_id * ldi;
float *Aw = A_out + block_id * ldo;
int half_length = row_length / 2;
for (int idx = thread_id; idx < half_length; idx += CU1DBLOCK) {
// ignore special case
if (idx == 0) continue;
float2 val = reinterpret_cast<float2 *>(Ar)[idx];
float ret = val.x * val.x + val.y * val.y;
Aw[idx] = ret;
}
// handle special case
if (threadIdx.x == 0) {
float real = Ar[0];
// cufft puts this at the end, this is different than kaldi does with its
// own
// internal implementation
float im = Ar[row_length];
Aw[0] = real * real;
Aw[half_length] = im * im;
}
}
|
1,886
|
/**
* APPROXIMATE PATTERN MATCHING
*
* INF560
*/
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include <unistd.h>
#include <sys/time.h>
#define APM_DEBUG 0
char *
read_input_file( char * filename, int * size )
{
char * buf ;
off_t fsize;
int fd = 0 ;
int n_bytes = 1 ;
/* Open the text file */
fd = open( filename, O_RDONLY ) ;
if ( fd == -1 )
{
fprintf( stderr, "Unable to open the text file <%s>\n", filename ) ;
return NULL ;
}
/* Get the number of characters in the textfile */
fsize = lseek(fd, 0, SEEK_END);
if ( fsize == -1 )
{
fprintf( stderr, "Unable to lseek to the end\n" ) ;
return NULL ;
}
#if APM_DEBUG
printf( "File length: %lld\n", fsize ) ;
#endif
/* Go back to the beginning of the input file */
if ( lseek(fd, 0, SEEK_SET) == -1 )
{
fprintf( stderr, "Unable to lseek to start\n" ) ;
return NULL ;
}
/* Allocate data to copy the target text */
buf = (char *)malloc( fsize * sizeof ( char ) ) ;
if ( buf == NULL )
{
fprintf( stderr, "Unable to allocate %lld byte(s) for main array\n",
fsize ) ;
return NULL ;
}
n_bytes = read( fd, buf, fsize ) ;
if ( n_bytes != fsize )
{
fprintf( stderr,
"Unable to copy %lld byte(s) from text file (%d byte(s) copied)\n",
fsize, n_bytes) ;
return NULL ;
}
#if APM_DEBUG
printf( "Number of read bytes: %d\n", n_bytes ) ;
#endif
*size = n_bytes ;
close( fd ) ;
return buf ;
}
#define MIN3(a, b, c) ((a) < (b) ? ((a) < (c) ? (a) : (c)) : ((b) < (c) ? (b) : (c)))
__host__ __device__ int levenshtein(char *s1, char *s2, int len, int * column) {
unsigned int x, y, lastdiag, olddiag;
for (y = 1; y <= len; y++)
{
column[y] = y;
}
for (x = 1; x <= len; x++) {
column[0] = x;
lastdiag = x-1 ;
for (y = 1; y <= len; y++) {
olddiag = column[y];
column[y] = MIN3(
column[y] + 1,
column[y-1] + 1,
lastdiag + (s1[y-1] == s2[x-1] ? 0 : 1)
);
lastdiag = olddiag;
}
}
return(column[len]);
}
__global__ void matchesKernel(int* d_n_matches, char * d_buf, char * d_pattern, int i, int size_pattern, int offset, int n_bytes, int approx_factor){
/* Traverse the input data up to the end of the file */
int j = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int distance = 0 ;
int size ;
size = size_pattern ;
int* columns = (int *) malloc((size_pattern + 1) * sizeof(int));
while (j < n_bytes) {
if (n_bytes - j < size_pattern ){
size = n_bytes - j ;
}
distance = levenshtein(d_pattern + offset, &d_buf[j], size, columns ) ;
if ( distance <= approx_factor) {
atomicAdd(&d_n_matches[i], 1);
}
j += stride;
}
free(columns);
}
int
main( int argc, char ** argv )
{
char ** pattern ;
char * filename ;
int approx_factor = 0 ;
int nb_patterns = 0 ;
int i;
char * buf ;
struct timeval t1, t2;
double duration ;
int n_bytes ;
int * n_matches ;
/* Check number of arguments */
if ( argc < 4 )
{
printf( "Usage: %s approximation_factor "
"dna_database pattern1 pattern2 ...\n",
argv[0] ) ;
return 1 ;
}
/* Get the distance factor */
approx_factor = atoi( argv[1] ) ;
/* Grab the filename containing the target text */
filename = argv[2] ;
/* Get the number of patterns that the user wants to search for */
nb_patterns = argc - 3 ;
/* Fill the pattern array */
pattern = (char **)malloc( nb_patterns * sizeof( char * ) ) ;
if ( pattern == NULL )
{
fprintf( stderr,
"Unable to allocate array of pattern of size %d\n",
nb_patterns ) ;
return 1 ;
}
/* Grab the patterns */
for ( i = 0 ; i < nb_patterns ; i++ )
{
int l ;
l = strlen(argv[i+3]) ;
if ( l <= 0 )
{
fprintf( stderr, "Error while parsing argument %d\n", i+3 ) ;
return 1 ;
}
pattern[i] = (char *)malloc( (l+1) * sizeof( char ) ) ;
if ( pattern[i] == NULL )
{
fprintf( stderr, "Unable to allocate string of size %d\n", l ) ;
return 1 ;
}
strncpy( pattern[i], argv[i+3], (l+1) ) ;
}
printf( "Approximate Pattern Mathing: "
"looking for %d pattern(s) in file %s w/ distance of %d\n",
nb_patterns, filename, approx_factor ) ;
buf = read_input_file( filename, &n_bytes ) ;
if ( buf == NULL )
{
return 1 ;
}
/* Allocate the array of matches */
n_matches = (int *)malloc( nb_patterns * sizeof( int ) ) ;
for (i = 0; i < nb_patterns; i++) {
n_matches[i] = 0;
}
if ( n_matches == NULL )
{
fprintf( stderr, "Error: unable to allocate memory for %ldB\n",
nb_patterns * sizeof( int ) ) ;
return 1 ;
}
/*****
* BEGIN MAIN LOOP
******/
/* Timer start */
gettimeofday(&t1, NULL);
/* Check each pattern one by one */
int* d_n_matches;
char * d_pattern;
char* d_buf;
int* offset = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int* lens = (int *)malloc( nb_patterns * sizeof( int ) ) ;
int sum_lens;
lens[0] = strlen(pattern[0]);
offset[0] = 0;
sum_lens = lens[0];
for (i = 1; i < nb_patterns; i++) {
offset[i] = offset[i-1] + lens[i-1];
lens[i] = strlen(pattern[i]);
sum_lens += lens[i];
}
char* concat_patterns = (char*) malloc( sum_lens * sizeof( char ) ) ;
for (i = 0; i < nb_patterns; i++) {
strcpy (concat_patterns + offset[i], pattern[i]);
}
cudaError_t error;
cudaMalloc((void **)&d_n_matches, nb_patterns*sizeof(int));
cudaMalloc((void **)&d_pattern, sum_lens*sizeof(char));
cudaMalloc((void **)&d_buf, n_bytes);
cudaMemcpy(d_pattern, concat_patterns, sum_lens*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_buf, buf, n_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_n_matches, n_matches, nb_patterns*sizeof(int), cudaMemcpyHostToDevice);
int Dg = 4;
int Db = 256;
for (i = 0; i < nb_patterns; i++) {
matchesKernel<<<Dg,Db>>>(d_n_matches, d_buf, d_pattern, i, lens[i], offset[i], n_bytes, approx_factor);
cudaGetLastError();
}
cudaMemcpy(n_matches, d_n_matches, nb_patterns*sizeof(int), cudaMemcpyDeviceToHost);
/* Timer stop */
gettimeofday(&t2, NULL);
duration = (t2.tv_sec -t1.tv_sec)+((t2.tv_usec-t1.tv_usec)/1e6);
printf( "APM done in %lf s\n", duration ) ;
/*****
* END MAIN LOOP
******/
for ( i = 0 ; i < nb_patterns ; i++ )
{
printf( "Number of matches for pattern <%s>: %d\n",
pattern[i], n_matches[i] ) ;
}
return 0 ;
}
|
1,887
|
//
// findCutoff.cu
//
//This file contains the function that determines the most
//efficient number of assemblies to perform on the gpu
#include <cuda.h>
#include <iostream>
void cudaAssemble(double Zs[],double Xs[], int num, double nZs[], double nXs[], int odd, int newlen);
void cudaDisassemble(double OldAF[], double Zs[], double Xs[],double nZs[], double nXs[], int odd, int morelen, int lesslen, double AF[]);
void Assemble(double Zs[], double Xs[],double nZs[], double nXs[], int len, int odd, int n);
void Disassemble(double lessZs[], double lessXs[],double moreZs[], double moreXs[], double oldAs[] ,double newAs[], int num, int odd);
int findCutoff(int n, int accuracy)
{
//Variable declarations
int x=n;
int numAssemblies=0;
int count=0;
int *numbods;
int odd;
//Timer creation
float time1;
float time2;
time1=0;
time2=0;
cudaEvent_t beginEvent;
cudaEvent_t endEvent;
cudaEventCreate( &beginEvent );
cudaEventCreate( &endEvent );
//Determine the number of assemblies needed to completely assemble n bodies
while(x!=1)
{
if( x%2==0)
{
x=x/2;
}
else
{
x++;
x=x/2;
}
numAssemblies++;
}
//Allocate space for a matrix that holds the number of bodies at each level of assembly
numbods=(int*)malloc(sizeof(int)*numAssemblies);
//Fill numbods
x=n;
while(count<numAssemblies)
{
numbods[count]=x;
if(x%2==0)
{
x=x/2;
}
else
{
x++;
x=x/2;
}
count++;
}
count=1;
//Begin process of finding most efficient number of assemblies
while((count<numAssemblies) && time1>=time2) //Compare time for gpu and cpu to complete assembly
{
//Create and allocate space for empty variables to mimic the dca algorithm at a level of assembly
double* AF;
double* Zs;
double* Xs;
double* nZs;
double* nXs;
double* AFo;
AF=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*4*6);
Zs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*26*6);
Xs=(double*)malloc(sizeof(double)*numbods[numAssemblies-1-count]*5*5);
if(count==0)
{
nZs=(double*)malloc(sizeof(double)*26*6);
nXs=(double*)malloc(sizeof(double)*25);
AFo=(double*)malloc(sizeof(double)*6*4);
}
else
{
nZs=(double*)malloc(sizeof(double)*26*6*numbods[numAssemblies-count]);
nXs=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*25);
AFo=(double*)malloc(sizeof(double)*numbods[numAssemblies-count]*6*4);
}
//Check the parity of the number of bodies at the current level of assembly
if(numbods[numAssemblies-1-count]%2==0)
{
odd=0;
}
else
{
odd=1;
}
//Check the gpu speed
cudaEventRecord( beginEvent, 0 ); //Begin timer
for(int i=0; i<accuracy; i++) //Perform the operations a set number of times to vary accuracy
{
//Test both assembly and disassembly at this level
if(count==0)
{
cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,1);
cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],1,AF);
}
else
{
cudaAssemble(Zs,Xs,numbods[numAssemblies-1-count],nZs,nXs,odd,numbods[numAssemblies-count]);
cudaDisassemble(AFo,Zs,Xs,nZs,nXs,odd,numbods[numAssemblies-1-count],numbods[numAssemblies-count],AF);
}
}
//End timer
cudaEventRecord( endEvent, 0 );
cudaEventSynchronize( endEvent );
cudaEventElapsedTime( &time1, beginEvent, endEvent );
//Check the cpu speed
cudaEventRecord( beginEvent,0); //begin timing
for(int i=0; i<accuracy; i++) //Perfom the operations a set number of times to vary accuracy
{
//Test both assembly and disassembly at this level
if(count==0)
{
Assemble(Zs,Xs,nZs,nXs,1,odd,numbods[numAssemblies-1-count]);
Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd);
}
else
{
Assemble(Zs,Xs,nZs,nXs,numbods[numAssemblies-count],odd,numbods[numAssemblies-1-count]);
Disassemble(nZs,nXs,Zs,Xs,AFo,AF,numbods[numAssemblies-count],odd);
}
}
//End timer
cudaEventRecord( endEvent,0);
cudaEventSynchronize(endEvent);
cudaEventElapsedTime( &time2,beginEvent, endEvent);
count++;
}
return numAssemblies-count;
}
|
1,888
|
#include "includes.h"
__global__ void compute_potential_gpu(float *m, float *x, float *y, float *z, float *phi, int N, int N1) {
int i,j;
float rijx, rijy, rijz;
float xi, yi, zi;
float potential;
i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < (N1 == 0 ? N : N1))
{
xi = x[i];
yi = y[i];
zi = z[i];
for (j = (N1 == 0 ? 0 : N1); j < N; j++)
{
rijx = xi - x[j];
rijy = yi - y[j];
rijz = zi - z[j];
if (i!=j)
potential -= m[j]/sqrt(rijx*rijx + rijy*rijy + rijz*rijz);
}
phi[i] = potential;
}
}
|
1,889
|
#include<stdio.h>
#include<stdlib.h>
/*the gpu kernel, to be launched on threads + blocks + grid heirarchy*/
__global__
void KERNEL_max(float *d_out, float *d_in, int DIM)
{
int idx = threadIdx.x; //threadIdx is a struct with 3 members, x,y, and z.
int max_value = *(d_in + idx*DIM + 0);
for(int j=0; j<DIM; j++){
if (max_value<*(d_in + idx*DIM + j))
{
max_value = *(d_in + idx*DIM + j);
}
}
d_out[idx] = max_value;
}
float random_number(int min, int max)
{
float num = rand() % (max + 1 - min) + min;
return num;
}
int main()
{
const int NUM_SAMPLES=10;
const int DIM=16;
// 1. HOST CODE
// input array (host)
float * h_in = (float *)malloc(NUM_SAMPLES*DIM*sizeof(float));
for(int i=0; i<NUM_SAMPLES; i++){
for(int j=0; j<DIM; j++){
*(h_in + i*DIM + j) = random_number(1,100);
}
}
// print the input array
for(int i=0; i<NUM_SAMPLES; i++){
printf("\nRow %d \n", i);
for(int j=0; j<DIM; j++){
printf(" %f ", *(h_in + i*DIM + j));
}
printf("\n");
}
// output array (host)
float * h_out = (float *)malloc(NUM_SAMPLES*sizeof(float));
for(int i=0; i<NUM_SAMPLES; i++){
h_out[i] = 0.0;
}
// 2. DEVICE CODE
float * d_in;
float * d_out;
// copy h_in to d_in
cudaMalloc((void **) &d_in, NUM_SAMPLES*DIM*sizeof(float));
cudaMalloc((void **) &d_out, NUM_SAMPLES*sizeof(float));
cudaMemcpy(d_in, h_in, NUM_SAMPLES*DIM*sizeof(float), cudaMemcpyHostToDevice);
// launch the kernel
KERNEL_max<<<1, NUM_SAMPLES>>>(d_out, d_in, DIM);
// copy the output back to host memory
cudaMemcpy(h_out, d_out, NUM_SAMPLES*sizeof(float), cudaMemcpyDeviceToHost);
// print the resultant
for(int i=0; i<NUM_SAMPLES; i++){
printf("Row %d : %f \n", i, h_out[i]);
}
/*// free GPU memory allocation */
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
1,890
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <assert.h>
#include <cuda.h>
#define u32 unsigned int
#define u64 unsigned long
#define uchar unsigned char
#define BLOCK_SIZE 64
#define CREATE_RAND_ARR(arr, size, min, max) \
do { \
time_t t; \
srand((unsigned)time(&t)); \
for (u32 i = 0; i < size; i++) \
arr[i] = rand() % max + min; \
} while (0) \
#define PRINT_ARR(arr, size) \
do { \
for (u32 i = 0; i < size; i++) \
printf("%u, ", arr[i]); \
printf("\n"); \
} while (0) \
/**
* @ref https://stackoverflow.com/questions/5447570/cuda-atomic-operations-on-unsigned-chars
*/
__device__
uchar atomicUChatAdd(uchar* address, const uchar val)
{
u64 long_address_modulo = (u64) address & 3;
u32* base_address = (u32*) ((char*) address - long_address_modulo);
u32 long_val = (u32) val << (8 * long_address_modulo);
u32 long_old = atomicAdd(base_address, long_val);
if (long_address_modulo == 3) {
// the first 8 bits of long_val represent the char value,
// hence the first 8 bits of long_old represent its previous value.
return (char) (long_old >> 24);
}
else {
// bits that represent the char value within long_val
unsigned int mask = 0x000000ff << (8 * long_address_modulo);
unsigned int masked_old = long_old & mask;
// isolate the bits that represent the char value within long_old, add the long_val to that,
// then re-isolate by excluding bits that represent the char value
unsigned int overflow = (masked_old + long_val) & ~mask;
if (overflow) {
atomicSub(base_address, overflow);
}
return (char) (masked_old >> 8 * long_address_modulo);
}
}
__global__
void global_hist_kernel(const uchar* __restrict__ arr,
const u32 arr_size,
uchar* hist,
const u32 hist_bins)
{
u32 id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (id_x < arr_size) {
if (arr[id_x] < hist_bins) {
atomicUChatAdd(&hist[arr[id_x]], 1);
}
}
}
__global__
void shared_hist_kernel(const uchar* __restrict__ arr,
const u32 arr_size,
uchar* hist,
const u32 hist_bins)
{
extern __shared__ uchar shared_hist[];
u32 id_x = blockIdx.x * blockDim.x + threadIdx.x;
if (id_x < hist_bins) {
shared_hist[id_x] = 0;
}
__syncthreads();
if (id_x < arr_size) {
if (arr[id_x] < hist_bins) {
atomicUChatAdd(&shared_hist[arr[id_x]], 1);
}
}
__syncthreads();
for (u32 i = threadIdx.x; i < hist_bins; i += blockDim.x) {
atomicUChatAdd(&hist[i], shared_hist[i]);
}
}
void gpuGlobalHist(uchar* h_arr, const u32 arr_size, uchar* h_hist, const u32 hist_bins)
{
uchar* d_arr;
cudaMalloc((void**)&d_arr, arr_size * sizeof(uchar));
cudaMemcpy(d_arr, h_arr, arr_size * sizeof(uchar), cudaMemcpyHostToDevice);
uchar* d_hist;
cudaMalloc((void**)&d_hist, hist_bins * sizeof(uchar));
dim3 blocks = BLOCK_SIZE;
dim3 grids = (arr_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
global_hist_kernel <<< grids, blocks >>> (d_arr, arr_size, d_hist, hist_bins);
if (cudaSuccess != cudaGetLastError()) {
printf("global_hist_kernel fault!\n");
}
cudaMemcpy(h_hist, d_hist, hist_bins * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaFree(d_arr);
cudaFree(d_hist);
}
void gpuSharedHist(uchar* h_arr, const u32 arr_size, uchar* h_hist, const u32 hist_bins)
{
uchar* d_arr;
cudaMalloc((void**)&d_arr, arr_size * sizeof(uchar));
cudaMemcpy(d_arr, h_arr, arr_size * sizeof(uchar), cudaMemcpyHostToDevice);
uchar* d_hist;
cudaMalloc((void**)&d_hist, hist_bins * sizeof(uchar));
dim3 blocks = BLOCK_SIZE;
dim3 grids = (arr_size + BLOCK_SIZE - 1) / BLOCK_SIZE;
u32 shared_mem_size = hist_bins * sizeof(uchar);
shared_hist_kernel <<< grids, blocks, shared_mem_size >>> (d_arr, arr_size, d_hist, hist_bins);
if (cudaSuccess != cudaGetLastError()) {
printf("shared_clac_hist_kernel fault!\n");
}
cudaMemcpy(h_hist, d_hist, hist_bins * sizeof(uchar), cudaMemcpyDeviceToHost);
cudaFree(d_arr);
cudaFree(d_hist);
}
void cpuHist(uchar* arr, const u32 arr_size, uchar* hist, const u32 hist_bins)
{
for (u32 i = 0; i < arr_size; i++) {
if (arr[i] < hist_bins) {
hist[arr[i]]++;
}
}
}
int main()
{
const u32 arr_size = 1000;
const u32 hist_bins = 8;
uchar* arr = (uchar*)malloc(arr_size * sizeof(uchar));
CREATE_RAND_ARR(arr, arr_size, 0, 8);
uchar* cpu_hist = (uchar*)malloc(hist_bins * sizeof(uchar));
cpuHist(arr, arr_size, cpu_hist, hist_bins);
printf("CPU hist:\n");
PRINT_ARR(cpu_hist, hist_bins);
uchar* gpu_global_hist = (uchar*)malloc(hist_bins * sizeof(uchar));
gpuGlobalHist(arr, arr_size, gpu_global_hist, hist_bins);
printf("GPU hist - use global memory:\n");
PRINT_ARR(gpu_global_hist, hist_bins);
uchar* gpu_shared_hist = (uchar*)malloc(hist_bins * sizeof(uchar));
gpuSharedHist(arr, arr_size, gpu_shared_hist, hist_bins);
printf("GPU hist - use shared memory:\n");
PRINT_ARR(gpu_shared_hist, hist_bins);
free(arr);
free(cpu_hist);
free(gpu_global_hist);
free(gpu_shared_hist);
return 0;
}
|
1,891
|
#include "includes.h"
__global__ void devFillAffectedIndex(int nRemove, int maxTriPerVert, int *pTriangleAffectedIndex)
{
int n = blockIdx.x*blockDim.x + threadIdx.x;
while (n < nRemove) {
for (int i = 0; i < maxTriPerVert; i++) {
pTriangleAffectedIndex[i + n*maxTriPerVert] = n;
pTriangleAffectedIndex[i + n*maxTriPerVert + nRemove*maxTriPerVert] = n;
}
n += blockDim.x*gridDim.x;
}
}
|
1,892
|
#include <stdio.h>
int main(){
int nDevices;
cudaGetDeviceCount(&nDevices);
printf("%d devices found supporting CUDA\n", nDevices);
for(int i = 0; i < nDevices; i++){
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("----------------------------------\n");
printf("Device %s\n", prop.name);
printf("----------------------------------\n");
printf(" Device memory: %zu\n", prop.totalGlobalMem);
printf(" Memory per-block: %zu\n", prop.sharedMemPerBlock);
printf(" Register per-block: %d\n", prop.regsPerBlock);
printf(" Warp size: %d\n", prop.warpSize);
printf(" Memory pitch: %zu\n", prop.memPitch);
printf(" Constant Memory: %zu\n", prop.totalConstMem);
printf(" Max thread per-block: %d\n", prop.maxThreadsPerBlock);
printf(" Max thread dim: %d / %d / %d\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf(" Max grid size: %d / %d / %d\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf(" Ver: %d.%d\n", prop.major, prop.minor);
printf(" Clock: %d\n", prop.clockRate);
printf(" Texture Alignment: %zu\n", prop.textureAlignment);
}
}
|
1,893
|
#include<iostream>
#include<stdlib.h>
#include <cuda.h>
#include <math.h>
#define RADIUS 3
int checkResults(int startElem, int endElem, float* cudaRes, float* res)
{
int nDiffs=0;
const float smallVal = 0.0001f;
for(int i=startElem; i<endElem; i++)
if(fabs(cudaRes[i]-res[i])>smallVal)
nDiffs++;
return nDiffs;
}
void initializeWeights(float* weights, int rad)
{
// for now hardcoded for RADIUS=3
weights[0] = 0.50f;
weights[1] = 0.75f;
weights[2] = 1.25f;
weights[3] = 2.00f;
weights[4] = 1.25f;
weights[5] = 0.75f;
weights[6] = 0.50f;
}
void initializeArray(FILE* fp,float* arr, int nElements)
{
for( int i=0; i<nElements; i++){
int r=fscanf(fp,"%f",&arr[i]);
if(r == EOF){
rewind(fp);
}
}
}
void applyStencil1D_SEQ(int sIdx, int eIdx, const float *weights, float *in, float *out) {
for (int i = sIdx; i < eIdx; i++) {
out[i] = 0;
//loop over all elements in the stencil
for (int j = -RADIUS; j <= RADIUS; j++) {
out[i] += weights[j + RADIUS] * in[i + j];
}
out[i] = out[i] / (2 * RADIUS + 1);
}
}
__global__ void applyStencil1D(int sIdx, int eIdx, const float *weights, float *in, float *out) {
int i = sIdx + blockIdx.x*blockDim.x + threadIdx.x;
if( i < eIdx ) {
float result = 0.f;
result += weights[0]*in[i-3];
result += weights[1]*in[i-2];
result += weights[2]*in[i-1];
result += weights[3]*in[i];
result += weights[4]*in[i+1];
result += weights[5]*in[i+2];
result += weights[6]*in[i+3];
result /=7.f;
out[i] = result;
}
}
int main(int argc, char* argv[]) {
if(argc!=2){
printf("Usage %s N\n",argv[0]);
return 1;
}
int N=atoi(argv[1]);
FILE *fp = fopen("problem1.inp","r");
int size = N * sizeof(float);
int wsize = (2 * RADIUS + 1) * sizeof(float);
//allocate resources
float *weights = (float *)malloc(wsize);
float *in = (float *)malloc(size);
float *out = (float *)malloc(size);
float *cuda_out= (float *)malloc(size);
float time = 0.f;
initializeWeights(weights, RADIUS);
initializeArray(fp,in, N);
float *d_weights; cudaMalloc(&d_weights, wsize);
float *d_in; cudaMalloc(&d_in, size);
float *d_out; cudaMalloc(&d_out, size);
cudaMemcpy(d_weights,weights,wsize,cudaMemcpyHostToDevice);
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
applyStencil1D<<<(N+511)/512, 512>>>(RADIUS, N-RADIUS, d_weights, d_in, d_out);
cudaMemcpy(cuda_out, d_out, size, cudaMemcpyDeviceToHost);
applyStencil1D_SEQ(RADIUS, N-RADIUS, weights, in, out);
int nDiffs = checkResults(RADIUS, N-RADIUS, cuda_out, out);
if(nDiffs)printf("Test Failed\n"); // This should never print
printf("%f\n%f\n",cuda_out[N-RADIUS-1],time);
//free resources
free(weights); free(in); free(out); free(cuda_out);
cudaFree(d_weights); cudaFree(d_in); cudaFree(d_out);
return 0;
}
|
1,894
|
/*
* dotproduct.cu
* includes setup funtion called from "driver" program
* also includes kernel function 'kernel_dotproduct[2]()'
* largely inspired in the pdf http://www.cuvilib.com/Reduction.pdf
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define BLOCK_SIZE 1024
struct timeval tp1, tp2;
#define GPU_ERR_CHK(ans) { gpu_assert((ans), __FILE__, __LINE__); }
static void gpu_assert(cudaError_t code, const char *file, int line,
bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code),
file, line);
if (abort) {
exit(code);
}
}
}
__global__ void kernel_laplacian(float *lap,long long size, int tUnit) {
extern __shared__ float lapd[];
int bid = blockDim.x;
int nTotalThreads;
nTotalThreads= 0;
if (!bid){
nTotalThreads = bid;
}else{
//(0 == 2^0)
int x = 1;
while(x < bid)
{
x <<= 1;
}
nTotalThreads = x;
}
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
long long i = blockIdx.x*nTotalThreads + threadIdx.x;
lapd[tid] = 1;
if(i < size){
lapd[tid]= (lap[i-1]+lap[i+1])/2;
if(i==(size-1)){
lapd[tid]= (lap[i-1]+23)/2;
}
}
__syncthreads();
if(i && i < size){
lap[i] = lapd[tid];}
}
// This function is called from the host computer.
// It manages memory and calls the function that is executed on the GPU
extern "C" void cuda_laplacian(float *lap,long long arraySize, int tUnit, double*time_result)
{
// force_d, distance_d and result_d are the GPU counterparts of the arrays that exists in host memory
float *lap_d;
// Reset the device and exit
GPU_ERR_CHK(cudaDeviceReset());
// allocate space in the device
GPU_ERR_CHK(cudaMalloc ((void**) &lap_d, sizeof(float) * arraySize));
//copy the arrays from host to the device
GPU_ERR_CHK(cudaMemcpy (lap_d, lap, sizeof(float) * arraySize, cudaMemcpyHostToDevice));
int threads;
if(arraySize < 128){
threads = 64;
} else if (arraySize < 256 ){
threads = 128;
} else if (arraySize < 512){
threads = 256;
} else if (arraySize < 1024){
threads = 512;
} else {
threads = BLOCK_SIZE;
}
long long block_size = threads;
long long blocks = ceil(arraySize / ((float) block_size));
// set execution configuration
dim3 dimblock (block_size);
dim3 dimgrid (blocks);
int smemSize = dimblock.x * sizeof(long long);
// actual computation: Call the kernel
gettimeofday(&tp1, NULL);
int i;
for(i=0; i<tUnit;i++){
kernel_laplacian<<<dimgrid,dimblock,smemSize>>>(lap_d, arraySize,tUnit);
}
//copy the arrays from host to the device
GPU_ERR_CHK(cudaMemcpy (lap, lap_d, sizeof(float) * arraySize, cudaMemcpyDeviceToHost));
gettimeofday(&tp2, NULL);
*time_result = (double) (tp2.tv_usec - tp1.tv_usec) / 1000000 + (double) (tp2.tv_sec - tp1.tv_sec);
cudaFree(lap_d);
}
|
1,895
|
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda.h>
#define CUDA_SAFE_CALL_NO_SYNC( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CUDA_SAFE_CALL( call) do { \
CUDA_SAFE_CALL_NO_SYNC(call); \
cudaError err = cudaThreadSynchronize(); \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
//input element size
const int N = 1024*1024*32;
//block size
const int blocksize = 1024;
__global__ void maxBandwidth(int n, float* in, float* out){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n){
in[i] = in[i] + 4.0f; //5.0
out[i] = out[i] + in[i];//5.0
in[i] = in[i] - 4.0f; //1.0
out[i] = out[i] - in[i];//4.0
in[i] = in[i] + 1.0f; //2.0
out[i] = out[i] + in[i];//6.0
}
/*if(threadIdx.x == 0 && blockIdx.x == 0){
printf("%d\n", threadIdx.x);
}
*/
}
int main(int argc, char **argv)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//unsigned int num_threads = N;
unsigned int numbytes = N * sizeof(float);
//allocate host memory
float *in = (float *) malloc(numbytes);
float *out =(float *) malloc(numbytes);
// initalize the memory
for( unsigned int i = 0; i < N ; ++i)
{
in[i] = 1.0f;
out[i] = 0.0f;
}
//allocate device memory
float *d_in, *d_out;
CUDA_SAFE_CALL(cudaMalloc(&d_in, numbytes));
CUDA_SAFE_CALL(cudaMalloc(&d_out, numbytes));
CUDA_SAFE_CALL(cudaMemcpy(d_in, in, numbytes, cudaMemcpyHostToDevice));
dim3 block(N/blocksize, 1, 1);
//max block size(1024, 1024, 64)
dim3 thread(blocksize, 1 ,1);
// execute the kernel
cudaEventRecord(start, 0);
maxBandwidth<<< block, thread>>>(N, d_in, d_out);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// copy output to host memory
CUDA_SAFE_CALL( cudaMemcpy(out, d_out, numbytes, cudaMemcpyDeviceToHost));
//check output from kernel
int flag = 1;
for(unsigned int j=0; j<N; j++){
if(out[j] != 6.0 ){
printf("out[%d]: %f\n", j, out[j]);
flag = 0;
}
}
if(flag == 1){
printf("ALL SUCCESS!\n");
}else{
printf("WRONG!!!\n");
}
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\nProcessing time: %f (ms)\n", elapsedTime);
printf("Effective Bandwidth (GB/s): %f\n\n", (12*numbytes)/elapsedTime/1e6);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// cleanup memory
free(in);
free(out);
CUDA_SAFE_CALL(cudaFree(d_in));
CUDA_SAFE_CALL(cudaFree(d_out));
}
|
1,896
|
#include "includes.h"
__global__ void set_bin(int *d_row_nz, int *d_bin_size, int *d_max, int M, int min, int mmin)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= M) {
return;
}
int nz_per_row = d_row_nz[i];
atomicMax(d_max, nz_per_row);
int j = 0;
for (j = 0; j < BIN_NUM - 2; j++) {
if (nz_per_row <= (min << j)) {
if (nz_per_row <= (mmin)) {
atomicAdd(d_bin_size + j, 1);
}
else {
atomicAdd(d_bin_size + j + 1, 1);
}
return;
}
}
atomicAdd(d_bin_size + BIN_NUM - 1, 1);
}
|
1,897
|
#include "includes.h"
__global__ void PD_INPLACE_GPU_KERNEL(float *d_input, float *d_temp, unsigned char *d_output_taps, float *d_MSD, int maxTaps, int nTimesamples)
{
extern __shared__ float s_input[]; //dynamically allocated memory for now
int f, i, gpos_y, gpos_x, spos, itemp;
float res_SNR[PD_NWINDOWS], SNR, temp_FIR_value, FIR_value, ftemp;
int res_Taps[PD_NWINDOWS];
float signal_mean, signal_sd, modifier;
signal_mean = d_MSD[0];
signal_sd = d_MSD[2];
modifier = d_MSD[1];
//----------------------------------------------
//----> Reading data
gpos_y = blockIdx.y * nTimesamples;
gpos_x = blockIdx.x * PD_NTHREADS * PD_NWINDOWS + threadIdx.x;
spos = threadIdx.x;
for (f = 0; f < PD_NWINDOWS; f++)
{
if (gpos_x < nTimesamples)
{
s_input[spos] = d_input[gpos_y + gpos_x];
}
spos = spos + blockDim.x;
gpos_x = gpos_x + blockDim.x;
}
//----> Loading shared data
itemp = PD_NTHREADS * PD_NWINDOWS + maxTaps - 1;
gpos_y = blockIdx.y * ( maxTaps - 1 ) * gridDim.x;
gpos_x = blockIdx.x * ( maxTaps - 1 ) + threadIdx.x;
while (spos < itemp)
{ // && gpos_x<((maxTaps-1)*gridDim.x)
s_input[spos] = d_temp[gpos_y + gpos_x];
spos = spos + blockDim.x;
gpos_x = gpos_x + blockDim.x;
}
__syncthreads();
//----> SNR for nTaps=1
spos = PD_NWINDOWS * threadIdx.x;
for (i = 0; i < PD_NWINDOWS; i++)
{
res_SNR[i] = ( s_input[spos + i] - signal_mean ) / signal_sd;
res_Taps[i] = 1;
}
//----------------------------------------------
//----> FIR calculation loop
FIR_value = s_input[spos];
for (f = 1; f < maxTaps; f++)
{
//nTaps=f+1;!
ftemp = signal_sd + f * modifier;
spos = PD_NWINDOWS * threadIdx.x;
// 0th element from NWINDOW
i = 0;
FIR_value += s_input[spos + f];
SNR = ( FIR_value - ( f + 1 ) * signal_mean ) / ( ftemp );
if (SNR > res_SNR[i])
{
res_SNR[i] = SNR;
res_Taps[i] = f + 1;
}
temp_FIR_value = FIR_value;
for (i = 1; i < PD_NWINDOWS; i++)
{
temp_FIR_value = temp_FIR_value - s_input[spos + i - 1] + s_input[spos + f + i];
SNR = ( temp_FIR_value - ( f + 1 ) * signal_mean ) / ( ftemp );
if (SNR > res_SNR[i])
{
res_SNR[i] = SNR;
res_Taps[i] = f + 1;
}
}
}
//----------------------------------------------
//---- Writing data
gpos_y = blockIdx.y * nTimesamples;
gpos_x = blockIdx.x * PD_NTHREADS * PD_NWINDOWS + PD_NWINDOWS * threadIdx.x;
for (i = 0; i < PD_NWINDOWS; i++)
{
if (( gpos_x + i ) < ( nTimesamples ))
{
d_input[gpos_y + gpos_x + i] = res_SNR[i];
d_output_taps[gpos_y + gpos_x + i] = res_Taps[i];
}
}
}
|
1,898
|
#define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 4
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 16
#define WIDTH_A (2048+16)
__global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h)
{
__shared__ float shared_1[16][5];
__shared__ float shared_0[272];
int j;
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
float sum_3 = 0;
int it_2;
for (j=0; j<(h-3); j=(j+1))
{
int it_2;
if ((tidx<16))
{
shared_0[(tidx+0)]=A((((idy*4)+(( - 1)*j))+h), (idx+(( - 1)*0)));
}
shared_0[(tidx+16)]=A((((idy*4)+(( - 1)*j))+h), ((idx+(( - 1)*0))+16));
__syncthreads();
if ((tidx<16))
{
shared_1[(tidx+0)][0]=B((j+0), (0+tidx));
shared_1[(tidx+0)][1]=B((j+1), (0+tidx));
shared_1[(tidx+0)][2]=B((j+2), (0+tidx));
shared_1[(tidx+0)][3]=B((j+3), (0+tidx));
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
}
__syncthreads();
__syncthreads();
}
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*4)+(( - 1)*(h-1)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*4)+(( - 1)*(h-1)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
sum_0+=(a*b_0);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*4)+(( - 1)*(h-2)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*4)+(( - 1)*(h-2)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*4)+(( - 1)*(h-3)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*4)+(( - 1)*(h-3)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
}
C(((idy*4)+0), idx)=sum_0;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*4)+(( - 1)*(0-1)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*4)+(( - 1)*(0-1)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][1]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][2]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][3]=B(2, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_1;
float b_2;
float b_3;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
}
C(((idy*4)+1), idx)=sum_1;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*4)+(( - 1)*(0-2)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*4)+(( - 1)*(0-2)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][2]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][3]=B(1, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_2;
float b_3;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
sum_2+=(a*b_2);
sum_3+=(a*b_3);
}
C(((idy*4)+2), idx)=sum_2;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*4)+(( - 1)*(0-3)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*4)+(( - 1)*(0-3)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][3]=B(0, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_3;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_3=shared_1[it_2][3];
sum_3+=(a*b_3);
}
C(((idy*4)+3), idx)=sum_3;
__syncthreads();
__syncthreads();
{
}
{
}
{
}
{
}
}
|
1,899
|
#include "cuda_runtime.h"
#include <cstdio>
template <typename T>
using uCat = T(*)(T);
template <typename T>
using mCat = T(*)(T, T);
__device__ int square(int a)
{
return a * a;
}
__device__ int mult(int a, int b)
{
return a * b;
}
template <typename T>
__device__ T mult(T a, T b)
{
return a * b;
}
template <typename T>
__device__ T add2(int a, float b)
{
return (T) (a + b);
}
template <typename T, mCat<T> func>
__global__ void biMapKernel(T *a, T *b, T *c, size_t size)
{
size_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
c[i] = func(a[i], b[i]);
}
template <typename T, uCat<T> func>
__global__ void MapKernel(T *a, T *c, size_t size)
{
size_t i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
c[i] = func(a[i]);
}
template <typename T, mCat<T> func>
__global__ void ReduceKernel(const T *a, T *c, size_t size)
{
extern __shared__ T sdata[];
size_t myId = threadIdx.x + blockIdx.x * blockDim.x;
size_t tid = threadIdx.x;
if (myId > size)
return;
sdata[tid] = a[myId];
__syncthreads();
for (size_t i = blockDim.x/2; i > 0; i /= 2)
{
if (tid < i)
{
sdata[tid] = func(sdata[tid], sdata[tid+i]);
}
__syncthreads();
}
if (tid == 0)
{
c[blockIdx.x] = sdata[0];
}
}
template <typename T>
void arrPrint(T *a, size_t l)
{
for (size_t i = 0; i < l; ++i)
{
printf("%d ", a[i]);
}
printf("\n");
}
int main()
{
int h_a[10], h_b[10], h_c[10];
int *d_a, *d_b, *d_c;
for (int i = 0; i < 10; ++i)
{
h_a[i] = h_b[9-i] = -i;
}
arrPrint(h_a,10);
arrPrint(h_b,10);
arrPrint(h_c,10);
cudaMalloc((void **) &d_a, 10*sizeof(int));
cudaMalloc((void **) &d_b, 10*sizeof(int));
cudaMalloc((void **) &d_c, 10*sizeof(int));
cudaMemcpy(d_a, h_a, 10 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, 10 * sizeof(int), cudaMemcpyHostToDevice);
biMapKernel<int,mult><<<10,1>>>(d_a, d_b, d_c, 10);
cudaMemcpy(h_c, d_c, 10 * sizeof(int), cudaMemcpyDeviceToHost);
arrPrint(h_c,10);
return 0;
}
|
1,900
|
__global__ void rectify_back_kernel(float *d_a, float *d_error, float *d_out, int size) {
// Get the id and make sure it is within bounds
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= size) {
return;
}
const float x = d_a[id];
if (x > 0) {
d_out[id] = d_error[id];
} else {
d_out[id] = 0;
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.