serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
23,501 | #include "includes.h"
__global__ void kernel(float *id, float *od, int w, int h, int depth)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int z = blockIdx.z * blockDim.z + threadIdx.z;
const int dataTotalSize = w * h * depth;
const int radius = 2;
const int filter_size = 2*radius + 1;
const int sW = 6; /* sW == 2 * filter_radius + blockDim.x (or same as 2 * filter_radius + blockDim.y) */
/* boarder do not concerned */
if(x >= w || y >= h || z >= depth)
return;
else
{
//global defined
int idx = z*w*h+y*w+x;
//3d grid(blocks) 2d block(threads)
int threadsPerBlock = blockDim.x * blockDim.y;
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = (blockId * threadsPerBlock)
+ (threadIdx.y * blockDim.x) + threadIdx.x;
int g_Idx = threadId;
//2d shared memory working
__shared__ unsigned char smem[sW][sW];
int s_Idx = threadIdx.x + (threadIdx.y * sW);
int s_IdxY = s_Idx / sW;
int s_IdxX = s_Idx % sW;
//Here: definition error, need edit, haven't finished yet.
//int g_IdxY = s_IdxY + (blockIdx.y * blockDim.y);
//int g_IdxX = s_IdxX + (blockIdx.x * blockDim.x);
//int g_Idx = g_IdxX + (g_IdxY * w);
//32 threads working together per warp
if(s_IdxY < sW && s_IdxX < sW) //Here: boarder concerned error, need edit
{
if(x >= 0 && y < w && y >= 0 && y < h && z >= 0 && z < depth ) //Here: boarder concerned error, need edit
smem[s_IdxY][s_IdxX] = id[g_Idx];
else
smem[s_IdxY][s_IdxX] = 0;
__syncthreads();
}
/*compute the sum using shared memory*/
float avg = 0.0;
for (int i = -radius; i <= radius; i++){
if(s_IdxY + i < 0 /*|| g_IdxY > h*/ ) //Here: boarder concerned error, need edit
avg += 0.0;
else
avg += smem[s_IdxY+i][s_IdxX];
}
/*register to global, by now thread*/
avg /= filter_size;
if(idx < dataTotalSize)
od[idx] = avg;
}
} |
23,502 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define DATA_TYPE 0 // 0-SP, 1-INT, 2-DP
#define VECTOR_SIZE 60000000
#define TILE_DIM 1024
#define COMP_ITERATIONS 8192
#define KERNEL_CALLS 1
template <class T> __global__ void simpleKernel2(int size, int compute_iters, int tile_dim)
{
__shared__ T shared[TILE_DIM];
T r0;
int xIndex = blockIdx.x * tile_dim + threadIdx.x;
if (xIndex < size) {
for (int i=0;i<compute_iters;i++) {
r0 = shared[threadIdx.x];
shared[TILE_DIM - threadIdx.x - 1] = r0;
}
}
}
int main(int argc, char **argv) {
int compute_iters=COMP_ITERATIONS,
kernel_calls=KERNEL_CALLS,
vector_size=VECTOR_SIZE,
tile_dim=TILE_DIM;
if (argc > 3 || argc == 2) {
printf("\nError: Wrong number of arguments.\n\n");
printf("Usage:\n\t %s [inner_iterations] [kernel_calls]\n\t %s\n", argv[0], argv[0]);
return -1;
}
if (argc == 3) {
kernel_calls = atoi(argv[2]);
compute_iters = atoi(argv[1]);
}
printf("Number of kernel launches: %d\n", kernel_calls);
printf("Number of compute iterations: %d\n", compute_iters);
// execution configuration parameters
dim3 grid(vector_size/tile_dim, 1), threads(tile_dim, 1);
// CUDA events
cudaEvent_t start, stop;
printf("\nVector size: %d TotalBlocks: %d blockSize: %d\n\n", vector_size, grid.x, threads.x);
// initialize events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// take measurements for loop over kernel launches
cudaEventRecord(start, 0);
for (int i=0; i < kernel_calls; i++)
{
#if (DATA_TYPE == 0)
simpleKernel2<float><<<grid, threads>>>(vector_size, compute_iters, tile_dim);
#elif (DATA_TYPE == 1)
simpleKernel2<int><<<grid, threads>>>(vector_size, compute_iters, tile_dim);
#else
simpleKernel2<double><<<grid, threads>>>(vector_size, compute_iters, tile_dim);
#endif
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float kernelTime;
cudaEventElapsedTime(&kernelTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceReset();
printf("Test passed\n");
exit(EXIT_SUCCESS);
}
|
23,503 | #include "includes.h"
__global__ void cudaSpow_kernel(unsigned int size, float power, const float *x, float *y)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
y[i] = powf(x[i], power);
}
} |
23,504 | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#define GPUJOULE_DIR ""
#define SHARED_MEM_ELEMENTS 1024
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__
//void compute(const float* A, const float* B, const float* C, float* D, int n) {
void compute(float* D, int n, int div, int stride) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
float I1 = tid * 2.0;
int thread_id = threadIdx.x % 32;
__shared__ unsigned long long sdata[SHARED_MEM_ELEMENTS];
__shared__ void **tmp_ptr;
__shared__ void *arr[SHARED_MEM_ELEMENTS];
if (threadIdx.x == 0) {
for (int i = 0; i < SHARED_MEM_ELEMENTS; i++) {
arr[i] = (void *)&sdata[i];
}
for (int i = 0; i < (SHARED_MEM_ELEMENTS - 1); i++) {
sdata[i] = (unsigned long long) arr[i + 1];
}
sdata[SHARED_MEM_ELEMENTS - 1] = (unsigned long long) arr[0];
}
__syncthreads();
tmp_ptr = (void **)(&(arr[(threadIdx.x + stride) % SHARED_MEM_ELEMENTS]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (thread_id < div) {
/* __asm volatile (
".reg .f32 %r14;\n\t"
"mov.f32 %r14, 2.2;\n\t"
);
*/
for (int k = 0; k < n; k++) {
/* __asm volatile (
"add.rn.f32 %r14, %r11, %r14;\n\t"
);
*/
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + k;
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
*D = f1 * tid;
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
int main(int argc, char **argv)
{
if (argc != 6) {
usage();
exit(1);
}
int num_blocks = atoi(argv[1]);
int num_threads_per_block = atoi(argv[2]);
int iterations = atoi(argv[3]);
int divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
// h_A = new float(2.0);
// h_B = new float(3.0);
// h_C = new float(4.0);
// cudaMalloc((void**)&d_A, sizeof(float));
// cudaMalloc((void**)&d_B, sizeof(float));
// cudaMalloc((void**)&d_C, sizeof(float));
cudaMalloc((void**)&d_res, sizeof(float));
// cudaMemcpy(d_A, h_A, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_B, h_B, sizeof(float), cudaMemcpyHostToDevice);
// cudaMemcpy(d_C, h_C, sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
std::string cmd = "GPUJOULE_DIR/nvml/example/power_monitor 5 > GPUJOULE_DIR/energy_model_ubench/energy_model_data/combined_inst_validation_data/fadd_shd_mem_64p_asm_power.txt &";
std::system(cmd.c_str());
std::system("sleep 5");
cudaEventRecord(start, 0);
cudaProfilerStart();
// compute<<<num_blocks, num_threads_per_block>>>(d_A, d_B, d_C, d_res, iterations);
compute<<<num_blocks, num_threads_per_block>>>(d_res, iterations, divergence, stride);
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
std::system("killall power_monitor");
std::cout << "GPU Elapsed Time = " << time << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaDeviceSynchronize();
cudaMemcpy(h_res, d_res, sizeof(float), cudaMemcpyDeviceToHost);
return 0;
}
|
23,505 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
}
void matrix_init(int* a, int n){
for (int i = 0; i < n; i++){
a[i] = rand()%100;
}
}
void check_error(int* a, int* b, int* c, int n){
for (int i = 0; i < n; i++){
assert(c[i] == a[i] + b[i]);
}
}
int main(){
// 2^16
int n = 1 << 16;
// h_ = host variables (cpu)
int *h_a, *h_b, *h_c;
// device variables (gpu)
int *d_a, *d_b, *d_c;
size_t bytes = sizeof(int) * n;
// Allocate host memory
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c = (int*)malloc(bytes);
// Allocate device memory
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
// Initialize vectors with random values
matrix_init(h_a, n);
matrix_init(h_b, n);
// Copy data from host to device
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, bytes, cudaMemcpyHostToDevice);
int NUM_THREADS = 256;
int NUM_BLOCKS = (int)ceil(n / NUM_THREADS);
vectorAdd<<<NUM_BLOCKS, NUM_THREADS>>>(d_a, d_b, d_c, n);
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
check_error(h_a, h_b, h_c, n);
printf("Completed.\n");
return 0;
} |
23,506 | #include "includes.h"
__global__ void d_updateTransforms (float* d_currentTransform, float3* d_cameraPosition)
{
d_cameraPosition->x = d_currentTransform[3];
d_cameraPosition->y = d_currentTransform[7];
d_cameraPosition->z = d_currentTransform[11];
} |
23,507 | #include<stdio.h>
__global__
void kernel(int * a, unsigned long int n)
{
unsigned long long int i = blockDim.x*blockIdx.x+threadIdx.x;
if(i<n)
a[i] += a[i]*0.5;
}
int main()
{
unsigned long int N = 2509892096;
int * A = (int *) malloc(N * sizeof(int));
int * B;
cudaMalloc(&B, N * sizeof(int));
cudaMemcpy(B,A,N * sizeof(int),cudaMemcpyHostToDevice);
int blocks = (N+1023)/1024;
kernel<<<blocks,1024>>>(A,N);
free(A);
return 0;
}
|
23,508 | /*
* pthreaded hw5, written by Adam Tygart abd Ryan Hershberger
* Could be further optimized by pipelining read operations and not cyclically creating/destroying child threads
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/*
* Length of "lines changes with every protein"
* Thanks to wikipedia for the following pseudocode:
* function LCSLength(X[1..m], Y[1..n])
* C = array(0..m, 0..n)
* for i := 0..m
* C[i,0] = 0
* for j := 0..n
* C[0,j] = 0
* for i := 1..m
* for j := 1..n
* if X[i] = Y[j]
* C[i,j] := C[i-1,j-1] + 1
* else:
* C[i,j] := max(C[i,j-1], C[i-1,j])
* return C[m,n]
*/
FILE *f;
int comp_count;
int offset = 0;
#ifndef NUM_THREADS
#define NUM_THREADS 4
#endif
#ifndef WORK_UNIT
#define WORK_UNIT 400
#endif
#define QUEUE_SIZE NUM_THREADS*WORK_UNIT
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
/*
* Calculate the LCS of the two strings.
*/
__device__ int MCSLength(char *str1, int len1, char* str2, int len2) {
int** arr = (int**) malloc(sizeof(int*)*(len1+1));
if ( arr == 0 ) {
printf("Couldn't allocate memory for the MCS array\n");
}
int i, j, local_max = 0;
for (i = 0; i <= len1; i++) {
arr[i] = (int*)malloc((len2+1)*sizeof(int));
if ( arr[i] == 0 ) {
printf("Couldn't allocate memory for the MCS subarray\n");
}
}
for (i = 1; i <= len1; i++) {
for (j = 1; j <= len2; j++) {
if (str1[i-1] == str2[j-1]) {
arr[i][j] = arr[i-1][j-1] + 1;
if (arr[i][j] > local_max)
local_max = arr[i][j];
}
}
}
for (i = 0; i <= len1; i++)
free(arr[i]);
free(arr);
return local_max;
}
/*
* Read file, char by char. headers start with '>' or ';', ignore until newline.
* read "gene" until we reach the next header. return int of num of chars in buff[i]
*/
int readLine(char **buff, int i) {
int readchars = 0;
int commentline = 0, startedgene = 0;
int buffStepSize = 4000;
int buffSize = 4000;
buff[i] = (char*)malloc(sizeof(char)*buffSize);
char c;
do {
if (((readchars) >= buffSize) && (buffSize != 0)) {
buffSize += buffStepSize;
char* temp_buff = (char*)realloc(buff[i],sizeof(char)*buffSize);
buff[i] = temp_buff;
}
if (buff[i] == 0) {
printf("Couldn't allocate memory for the buffer\n");
exit(-2);
}
c = fgetc(f);
switch (c) {
case '\n':
commentline = 0;
break;
case ';':
case '>':
commentline = 1;
if (startedgene == 1) {
long curr = ftell(f);
fseek(f, curr-1, SEEK_SET);
return readchars;
}
break;
default:
if ( commentline == 0 ) {
startedgene = 1;
if (c != EOF)
buff[i][readchars++] = c;
}
}
} while (c != EOF);
return readchars;
}
/*
* Is the worker function for a thread, calculate your chunk of the global data, calculate the MCS of each pair, copy the counts off to the global counts once locked
*/
__global__ void threaded_count(int* completed_count, int* counts, char* queue, int* lens) {
int local_work_unit = blockDim.x*blockIdx.x;
int local_counts[WORK_UNIT/2];
int local_count = 0;
int startPos = (threadIdx.x) + (local_work_unit);
int endPos = startPos + (local_work_unit);
char* str1;
char* str2;
int strlen1, strlen2;
int i, j, k;
for (i = 0; i < WORK_UNIT/2; i++) {
local_counts[i] = 0;
j = startPos + (i*2);
if ((lens[j] != 0) && (lens[j+1] != 0)) {
//dev_lens needs to hold starting positions of the current string in dev_queue
str1 = (char*) malloc(lens[j]+1*sizeof(char));
str2 = (char*) malloc(lens[j+1]+1*sizeof(char));
strlen1 = lens[j+1] - lens[j];
strlen2 = lens[j+2] - lens[j+1];
for (k = 0; k < strlen1; k++)
str1[k] = queue[lens[j] + k];
for (k = 0; k < strlen2; k++)
str2[k] = queue[lens[j+1] + k];
local_counts[i] = MCSLength(str1, strlen1, str2, strlen2);
free(str1);
free(str2);
local_count++;
}
else
break;
}
for (i = 0; i < WORK_UNIT/2; i++) {
counts[(startPos/2) + i] = local_counts[i];
}
atomicAdd(completed_count, local_count);
}
/*
* Take a file-name on the command line, open it and read portions of the file at a time. start threads to calcluate MCS. Find the max and average MCSs
*/
int main(int argc, char* argv[]) {
if (argc != 2 ) {
printf("Please specify a file on the command line\n");
exit(-1);
}
f = fopen(argv[1],"r");
if ( f == 0 ) {
printf("Couldn't open file\n");
exit(-1);
}
char **queue;
int *lens;
int *counts;
char *dev_queue;
int *dev_lens;
int *dev_counts;
//pthread
int i;
int perThread = WORK_UNIT;
int totalSize = QUEUE_SIZE;
int size = NUM_THREADS;
int numThreadsPerBlock = 100;
int numBlocks = size / numThreadsPerBlock;
int totalThreads = numThreadsPerBlock * numBlocks;
int* dev_completed_count;
cudaMalloc((void**)&dev_completed_count, sizeof(int));
printf("we get this far!\n");
counts = (int*)calloc(sizeof(int),QUEUE_SIZE);
do {
queue = (char**)malloc(sizeof(char*)*QUEUE_SIZE);
printf("A\n");
lens = (int*)calloc(sizeof(int),QUEUE_SIZE+1);
cudaMalloc((void**)&dev_lens, sizeof(int)*(QUEUE_SIZE +1));
printf("B\n");
int *temp_counts = (int*) realloc(counts, (QUEUE_SIZE + offset)/2 * sizeof(int));
printf("C\n");
if (( queue == 0 ) || (lens == 0) || (temp_counts == 0)) {
printf("Couldn't allocate memory for the work queues\n");
exit(-1);
}
counts = temp_counts;
printf("This is a TEST %d\n", QUEUE_SIZE);
int t = 0;
char *dev_queue_flat = (char *) malloc(sizeof(char));
char *temp_flat;
lens[0] = 0;
for (i = 0; i < QUEUE_SIZE; i++) {
lens[i+1] = t + readLine(queue, i);
temp_flat = (char *) realloc(dev_queue_flat, (lens[i+1] + 1) * sizeof(char));
dev_queue_flat = temp_flat;
int j;
for (j = 0; j <= lens[i+1] - t; j++)
dev_queue_flat[t+j] = queue[i][j];
t = lens[i+1];
if (( queue[i] == 0 )) {
printf("Couldn't allocate memory for the work subqueues\n");
exit(-1);
}
}
cudaMalloc((void**)&dev_queue, (t * sizeof(char)));
cudaMemcpy(dev_queue, dev_queue_flat, t*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_lens, lens, QUEUE_SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_counts, (QUEUE_SIZE*sizeof(int))/2);
cudaMemset( dev_counts, 0, (QUEUE_SIZE*sizeof(int))/2);
printf("A1\n");
dim3 numBlocks(NUM_THREADS);
dim3 threadsPerBlock(WORK_UNIT);
threaded_count<<< numBlocks, threadsPerBlock >>>(dev_completed_count, dev_counts, dev_queue, dev_lens);
cudaThreadSynchronize();
int* temp = (int*) malloc(sizeof(int)*QUEUE_SIZE/2);
cudaMemcpy(temp, dev_counts, (QUEUE_SIZE*sizeof(int))/2, cudaMemcpyDeviceToHost);
for (i = 0; i < QUEUE_SIZE/2; i++)
counts[offset+i] = temp[i];
for (i = 0; i < QUEUE_SIZE; i++) {
free(queue[i]);
}
cudaFree(dev_queue);
cudaFree(dev_counts);
free(temp);
cudaFree(dev_queue);
free(queue);
cudaFree(dev_lens);
free(lens);
offset += QUEUE_SIZE;
} while (!feof(f));
unsigned long total = 0;
int longest = 0, longest_loc = -1;
for (i = 0; i < comp_count; i++) {
total += counts[i];
if (counts[i] > longest) {
longest = counts[i];
longest_loc = i;
}
}
printf("Longest LCS: %d, is the %dth pair in the file\n", longest, longest_loc);
printf("Average: %Lf\n",((long double) total)/comp_count);
fclose(f);
free(counts);
return 0;
}
|
23,509 | #include <cuda_runtime_api.h>
#include <stdio.h>
int main() {
int device_id = 0; // ID of the GPU device to query
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device_id);
int reservedShared = prop.reservedSharedMemPerBlock;
printf("Reversed Shared Memory per Block: %d bytes\n", reservedShared);
return 0;
}
|
23,510 | #include <bits/stdc++.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/copy.h>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
#define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin())
using namespace std;
const int BLOCK_SIZE = 1024;
const int SHARE_SIZE = 1024;
__global__ void naiveKernel(int N, float *input, float *output){
int global_i = blockIdx.x * blockDim.x + threadIdx.x;
if(global_i < N){
for(int i=0;i<N;++i) output[global_i] += input[i];
output[global_i] /= N;
}
return ;
}
__global__ void smemKernel(int N, float *input, float *output){
int b_size = blockDim.x, b_idx = blockIdx.x, t_idx = threadIdx.x;
int global_i = b_size * b_idx + t_idx, n_chk = (N + SHARE_SIZE - 1)/SHARE_SIZE;
__shared__ float buff[SHARE_SIZE];
for(int q=0;q<n_chk;++q){
int left = q*SHARE_SIZE, right = min(left + SHARE_SIZE, N);
for(int i = t_idx + left; i < right; i += b_size) buff[i-left] = input[i];
__syncthreads();
if(global_i < N){
for(int i = left; i < right; ++i) output[global_i] += buff[i-left];
}
__syncthreads();
}
output[global_i] /= N;
return ;
}
int main(int argc, char *argv[]){
int N = 1<<18;
if(argc > 1) N = stoi(string(argv[1]));
float *input = new float [N], *output = new float [N];
float *dev_in, *dev_out;
clock_t time;
cudaMalloc((void **)&dev_in, N*sizeof(float));
cudaMalloc((void **)&dev_out, N*sizeof(float));
for(int i=0;i<N;++i) input[i] = (float)rand()/RAND_MAX;
/* Using serial code */
time = clock();
cout << "Serial (CPU) Code:" << endl;
float ans = accumulate(input, input + N, 0.)/N;
cout << "Time Usage: " << float(clock() - time)/CLOCKS_PER_SEC << endl;
cout << "Answer: " << ans << endl << endl;
/* Doing parallel */
int block_size = BLOCK_SIZE;
int num_block = (N + block_size - 1)/block_size;
cout << "block_size = " << block_size << endl;
cout << "num_blocks = " << num_block << endl << endl;
cudaEvent_t start, stop;
float cuda_time;
cudaEventCreate(&start); // creating the event 1
cudaEventCreate(&stop); // creating the event 2
for(int q = 0; q < 6; ++q){
cudaMemcpy(dev_in , input, N*sizeof(float), cudaMemcpyHostToDevice);
memset(output, 0, N*sizeof(float));
cudaMemcpy(dev_out, output, N*sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0); // Start time measuring
smemKernel<<<num_block, block_size>>>(N, dev_in, dev_out);
cout << "GPU code run #" << to_string(q+1) <<": " << endl;
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cuda_time, start, stop); // Saving the time measured
cout << "Time Usage: " << cuda_time/1000 << endl << endl;
cudaMemcpy(output, dev_out, N*sizeof(float), cudaMemcpyDeviceToHost);
}
cudaFree(dev_in);
cudaFree(dev_out);
delete [] input;
delete [] output;
return 0;
}
|
23,511 | #include "includes.h"
__global__ void kDumbSumCols(float* mat, float* vec, unsigned int width, unsigned int height) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
mat += idx;
if (idx < width) {
float sum = 0;
for (int j = 0; j < height; j++) {
sum += *mat;
mat += width;
}
vec[idx] = sum;
}
} |
23,512 | //
// Created by root on 2020/11/24.
//
#include "curand_kernel.h"
#include "cuda_runtime.h"
#include "stdio.h"
__global__ void device_api_kernel(curandState *states, float *out, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// init curand state for each thread
curand_init(9444, tid, 0, states + tid);
int nthreads = gridDim.x * blockDim.x;
for (int i = tid; i < N; i += nthreads) {
float rand = curand_uniform(states + tid);
out[i] = rand * 2;
}
}
__global__ void host_api_kernel(float *values, float *out, int N) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int nthreads = gridDim.x * blockDim.x;
for (int i = tid; i < N; i += nthreads) {
float rand = values[i];
out[i] = rand * 2;
}
}
void cuda_device_rand() {
// use device api to generate random numbers
static curandState *states = NULL;
static float *dRand = NULL, *hRand = NULL;
static int dRand_length = 1000000;
int block = 256;
int grid = 30;
cudaMalloc(&dRand, sizeof(float) * dRand_length);
cudaMalloc(&states, sizeof(curandState) * block * grid);
hRand = (float *) malloc(sizeof(float) * dRand_length);
device_api_kernel<<<grid, block>>>(states, dRand, dRand_length);
cudaMemcpy(hRand, dRand, sizeof(float) * dRand_length, cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; i++) {
printf("%.2f\t", hRand[i]);
}
printf("\n");
free(hRand);
cudaFree(dRand);
}
void cuda_host_rand() {
// generate random data with host api
static curandGenerator_t randGen;
static float *dRand = NULL, *dOut, *hOut;
static int dRand_length = 1000000, dRand_used = 1000000;
cudaMalloc(&dRand, sizeof(float) * dRand_length);
cudaMalloc(&dOut, sizeof(float) * dRand_length);
hOut = (float *) malloc(sizeof(float) * dRand_length);
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_DEFAULT);
curandGenerateUniform(randGen, dRand, dRand_length); // the new data are in device memory
host_api_kernel<<<30, 256>>>(dRand, dOut, dRand_length);
cudaMemcpy(hOut, dOut, sizeof(float) * dRand_length, cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; i++) {
printf("%.2f\t", hOut[i]);
}
printf("\n");
free(hOut);
cudaFree(dOut);
cudaFree(dRand);
}
// nvcc -lcurand RandKernel.cu -o RandKernel
int main() {
printf("host:\n");
cuda_host_rand();
printf("device:\n");
cuda_device_rand();
return 0;
} |
23,513 | #include "includes.h"
__global__ void InterpolateVectorKernel( int r, int q, int f, int inputSize, float *referenceVector )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < inputSize)
{
referenceVector[r * inputSize + threadId] = 0.50f * (referenceVector[q * inputSize + threadId] + referenceVector[f * inputSize + threadId]);
}
} |
23,514 | /* Problem - Paralelno programiranje
Naci najveci poligon od ucitanih tacaka */
#include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#define TPB 16
__global__ void calculate(double *x, double *y, double *z, double *out, int n){
int index=threadIdx.x+blockIdx.x*blockDim.x;
__shared__ double temp[TPB];
if (index+2<n){
temp[threadIdx.x]=sqrt((x[index]-x[0]) * (x[index]-x[0])+ (y[index]-y[0])* (y[index]-y[0])+ (z[index]-z[0])* (z[index]-z[0]));
}
__syncthreads();
if (threadIdx.x==0){
double s=temp[0];
int i ;
for(i=1;i<TPB;i++){
if (temp[i]>s) s=temp[i];
}
out[blockIdx.x]=s;
}
}
int main(int argc, char **argv){
double *x,*y,*z,*out,*d_x,*d_y,*d_z,*d_out;
FILE *f=fopen("tacke.txt","r");
int n;
fscanf(f,"%d",&n);
n++;
int size=sizeof(double)*n, outSize=sizeof(double)*(n-2+TPB-1)/TPB,i;
cudaMalloc((void**)&d_x,size);
cudaMalloc((void**)&d_y,size);
cudaMalloc((void**)&d_z,size);
cudaMalloc((void**)&d_out,outSize);
x=(double*)malloc(size);
y=(double*)malloc(size);
z=(double*)malloc(size);
out=(double*)malloc(outSize);
fscanf(f,"%lf%lf%lf",&(x[0]),&(y[0]),&(z[0]));
double minx=x[1],miny=y[1],minz=z[1];
double maxx=x[1],maxy=y[1],maxz=z[1];
for(i=2;i<n;i++){
fscanf(f,"%lf%lf%lf",&(x[i]),&(y[i]),&(z[i]));
x[0]+=x[i];
if (x[i]<minx) minx=x[i];
else maxx=x[i];
y[0]+=y[i];
if (y[i]<miny) miny=y[i];
else maxy=y[i];
z[0]+=z[i];
if (z[i]<minz) minz=z[i];
else maxz=z[i];
}
x[0]/=(n-1);
y[0]/=(n-1);
z[0]/=(n-1);
cudaMemcpy(d_x,x,sizeof(double*)*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_y,y,sizeof(double*)*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_z,z,sizeof(double*)*n,cudaMemcpyHostToDevice);
calculate<<<(n-2+TPB-1)/TPB,TPB>>>(d_x,d_y,d_z,d_out,n);
cudaMemcpy(out,d_out,outSize,cudaMemcpyDeviceToHost);
double max=-9999;
for (i=0;i<(n-2+TPB-1)/TPB;i++){
if (max<out[i]) max=out[i];
}
printf("Precnik sfere je : %lf \n x=%lf y=%lf z=%lf\n",max,x[0],y[0],z[0]);
fclose(f);
return 0;
}
|
23,515 | // CUDA libraries.
#include <cuda.h>
#include <cuda_runtime.h>
// Include associated header file.
#include "../include/cuda_kernel.cuh"
/**
* Sample CUDA device function which adds an element from array A and array B.
*
*/
__global__ void cuda_kernel(double *A, double *B, double *C, int arraySize){
// Get thread ID.
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// Check if thread is within array bounds.
if ( tid < arraySize ) {
// Add a and b.
C[tid] = A[tid] + B[tid];
}
}
/**
* Wrapper function for the CUDA kernel function.
* @param A Array A.
* @param B Array B.
* @param C Sum of array elements A and B directly across.
* @param arraySize Size of arrays A, B, and C.
*/
void kernel(double *A, double *B, double *C, int arraySize) {
// Initialize device pointers.
double *d_A, *d_B, *d_C;
// Allocate device memory.
cudaMalloc((void**) &d_A, arraySize * sizeof(double));
cudaMalloc((void**) &d_B, arraySize * sizeof(double));
cudaMalloc((void**) &d_C, arraySize * sizeof(double));
// Transfer arrays a and b to device.
cudaMemcpy(d_A, A, arraySize * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, arraySize * sizeof(double), cudaMemcpyHostToDevice);
// Calculate blocksize and gridsize.
dim3 blockSize(512, 1, 1);
dim3 gridSize(512 / arraySize + 1, 1);
// Launch CUDA kernel.
cuda_kernel<<<gridSize, blockSize>>>(d_A, d_B, d_C, arraySize);
// Copy result array c back to host memory.
cudaMemcpy(C, d_C, arraySize * sizeof(double), cudaMemcpyDeviceToHost);
}
|
23,516 | #include "includes.h"
__global__ void readLocalMemory(const float *data, float *output, int size, int repeat)
{
int gid = threadIdx.x + (blockDim.x * blockIdx.x), j = 0;
float sum = 0;
int tid=threadIdx.x, localSize=blockDim.x, grpid=blockIdx.x,
litems=2048/localSize, goffset=localSize*grpid+tid*litems;
int s = tid;
__shared__ float lbuf[2048];
for ( ; j<litems && j<(size-goffset) ; ++j)
lbuf[tid*litems+j] = data[goffset+j];
for (int i=0 ; j<litems ; ++j,++i)
lbuf[tid*litems+j] = data[i];
__syncthreads();
for (j=0 ; j<repeat ; ++j)
{
float a0 = lbuf[(s+0)&(2047)];
float a1 = lbuf[(s+1)&(2047)];
float a2 = lbuf[(s+2)&(2047)];
float a3 = lbuf[(s+3)&(2047)];
float a4 = lbuf[(s+4)&(2047)];
float a5 = lbuf[(s+5)&(2047)];
float a6 = lbuf[(s+6)&(2047)];
float a7 = lbuf[(s+7)&(2047)];
float a8 = lbuf[(s+8)&(2047)];
float a9 = lbuf[(s+9)&(2047)];
float a10 = lbuf[(s+10)&(2047)];
float a11 = lbuf[(s+11)&(2047)];
float a12 = lbuf[(s+12)&(2047)];
float a13 = lbuf[(s+13)&(2047)];
float a14 = lbuf[(s+14)&(2047)];
float a15 = lbuf[(s+15)&(2047)];
sum += a0+a1+a2+a3+a4+a5+a6+a7+a8+a9+a10+a11+a12+a13+a14+a15;
s = (s+16)&(2047);
}
output[gid] = sum;
} |
23,517 | #include <stdio.h>
__global__ void kernel(void) {
printf("Hello from block (%d,%d,%d), thread (%d,%d,%d) of the GPU\n",
blockIdx.x, blockIdx.y, blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z);
}
int main (void) {
dim3 numBlocks(1,2,3);
dim3 threadsPerBlock(1,2,3);
kernel<<<numBlocks, threadsPerBlock>>>();
cudaDeviceSynchronize();
printf("Hello, World\n");
return 0;
}
|
23,518 | //xfail:BOOGIE_ERROR
//--blockDim=128 --gridDim=128 --warp-sync=32 --no-inline
//kernel.cu: error: possible read-write race on A
//It fail to dim >= 128, because it can't synchronize.
#include <stdio.h>
#include <cuda.h>
#define N dim*dim
#define dim 2//128 //64
__global__ void foo(int* A) {
A[ blockIdx.x*blockDim.x + threadIdx.x ] += (A[ (blockIdx.x + 1)*blockDim.x + threadIdx.x ]);
}
|
23,519 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
for (int i=0; i < var_1; ++i) {
comp = +1.0779E36f + tanhf(+0.0f * -1.7349E34f);
float tmp_1 = (+1.2043E34f / (var_3 - var_4));
comp += tmp_1 * +1.1885E-42f / (+1.1410E3f * asinf((var_5 / expf((+1.8003E-41f * +1.2444E2f)))));
comp = (var_6 / (-1.1314E-35f * var_7));
for (int i=0; i < var_2; ++i) {
comp = var_8 / var_9;
comp += acosf(-1.6283E-36f);
}
if (comp > +1.9307E-37f * +0.0f * (-1.1173E-35f + var_10 * floorf(var_11 - +1.4816E-41f))) {
float tmp_2 = +1.9974E35f - (var_12 + var_13 - (+0.0f * +1.3769E-4f));
comp = tmp_2 + +1.0508E-41f * +1.6762E36f;
}
if (comp >= (-0.0f - log10f(+0.0f))) {
comp = (var_14 - (var_15 - (var_16 / -0.0f)));
comp += atan2f(-0.0f - var_17, cosf(var_18 / sinhf(+1.4442E36f)));
comp += powf((var_19 - (-1.2191E34f / var_20 * (+0.0f - var_21 - -1.7855E-37f))), var_22 / +1.1783E-36f / var_23);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
cudaDeviceSynchronize();
return 0;
}
|
23,520 | /* skeleton code for assignment2 COMP4901D
xjia@ust.hk 2015/03
*/
#include <iostream>
#include <cstdio>
#include <cmath>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
using namespace std;
const int TILE_WIDTH = 1024;
__global__ void mergeJoin(int *key1, float *value1, int *key2, float *value2,int N1,int N2,int *result)
{
//chunk of keys in the shared memory
__shared__ int s_key[TILE_WIDTH];
//start and end positions of the elements in the matching chunk
__shared__ int start;
__shared__ int end;
//add you code here
//load key2 element into shared memory
int index = blockIdx.x * blockDim.x + threadIdx.x;
s_key[threadIdx.x] = key2[index];
__syncthreads();
// first thread in each block is responsible for setting start and end
if (threadIdx.x == 0) {
int min_key = s_key[0];
int max_key = s_key[TILE_WIDTH - 1];
for (int i = 0; i < N1; i ++) {
if (key1[i] >= min_key) {
start = i;
break;
}
}
for (int i = start; i < N1; i ++) {
if (key1[i] <= max_key) {
end = i;
break;
}
}
}
__syncthreads();
//int numberElementPerThread = ceil(double(start - end + 1)/blockDim.x);
//int start_index = start + threadIdx.x * numberElementPerThread;
//int end_index = start + (threadIdx.x + 1) * numberElementPerThread;
if (threadIdx.x + start <= end) {
for (int i = threadIdx.x + start; i <= end; i += blockDim.x) {
int key = key1[i];
bool found = false;
for (int j = 0; j < TILE_WIDTH; j ++) {
if (s_key[j] == key) {
result[i] = blockIdx.x * blockDim.x + j;
found = true;
break;
}
}
if (!found) result[i] = -1;
}
}
}
int main()
{
freopen("in.txt","r",stdin);
int *h_key1, *h_key2, *d_key1, *d_key2;
float *h_value1, *h_value2, *d_value1, *d_value2;
int *h_result, *d_result;
int N1,N2;
scanf("%d%d",&N1,&N2);
h_key1 = (int*)malloc(N1 * sizeof(int));
h_key2 = (int*)malloc(N2 * sizeof(int));
h_value1 = (float*)malloc(N1 * sizeof(float));
h_value2 = (float*)malloc(N2 * sizeof(float));
h_result = (int*)malloc(N1 * sizeof(int));
cudaMalloc(&d_key1, N1 * sizeof(int));
cudaMalloc(&d_key2, N2 * sizeof(int));
cudaMalloc(&d_value1, N1 * sizeof(float));
cudaMalloc(&d_value2, N2 * sizeof(float));
cudaMalloc(&d_result, N1 * sizeof(int));
for(int i = 0; i < N1; ++i)
scanf("%d%f",&h_key1[i],&h_value1[i]);
for(int i = 0; i < N2; ++i)
scanf("%d%f",&h_key2[i],&h_value2[i]);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
memset(h_result,-1,sizeof(int) * N1);
cudaMemcpy(d_key1,h_key1, sizeof(int) * N1, cudaMemcpyHostToDevice);
cudaMemcpy(d_result,h_result, sizeof(int) * N1, cudaMemcpyHostToDevice);
cudaMemcpy(d_key2,h_key2, sizeof(int) * N2, cudaMemcpyHostToDevice);
cudaMemcpy(d_value1,h_value1, sizeof(float) * N1, cudaMemcpyHostToDevice);
cudaMemcpy(d_value2,h_value2, sizeof(float) * N2, cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
thrust::device_ptr<int> dev_key1(d_key1);
thrust::device_ptr<int> dev_key2(d_key2);
thrust::device_ptr<float> dev_value1(d_value1);
thrust::device_ptr<float> dev_value2(d_value2);
thrust::sort_by_key(dev_key1,dev_key1 + N1, dev_value1);
thrust::sort_by_key(dev_key2,dev_key2 + N2, dev_value2);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float ElapsedTime;
cudaEventElapsedTime(&ElapsedTime,start,stop);
printf("Sort Elapsed Time: %.3f ms\n",ElapsedTime);
dim3 grid(ceil(double(N2)/1024));
dim3 block(1024);
cudaEventRecord(start,0);
mergeJoin<<<grid,block>>>(d_key1,d_value1,d_key2,d_value2,N1,N2,d_result);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ElapsedTime,start,stop);
printf("kernel mergeJoin Elapsed Time: %.3f ms\n",ElapsedTime);
cudaMemcpy(h_result,d_result,sizeof(int) * N1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_key1,d_key1,sizeof(int) * N1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_value1,d_value1,sizeof(float) * N1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_value2,d_value2,sizeof(float) * N1, cudaMemcpyDeviceToHost);
int matched = 0;
freopen("out.txt","w",stdout);
for(int i = 0;i < N1; ++i)
{
if(h_result[i] == -1) {
cout << "h_result[" << i << "] not found " << endl;
continue;
}
matched++;
printf("Key %d\nValue1 %.2f Value2 %.2f\n\n",h_key1[i],h_value1[i],h_value2[h_result[i]]);
}
printf("Matched %d\n",matched);
fclose(stdout);
free(h_key1);
free(h_key2);
free(h_value1);
free(h_value2);
free(h_result);
cudaFree(d_key1);
cudaFree(d_key2);
cudaFree(d_value1);
cudaFree(d_value2);
cudaFree(d_result);
return 0;
}
|
23,521 | #include "includes.h"
__global__ void square(float * d_out, float * d_in) {
const unsigned int lid = threadIdx.x;
const unsigned int gid = blockIdx.x*blockDim.x + lid;
float f = d_in[gid];
d_out[gid] = f * f;
} |
23,522 | #include <iostream>
#include <cmath>
#include <vector>
#include <time.h>
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
__global__ void color_pixel(int* pixels, int x_size, int y_size, int max_iterations){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= x_size) || (j >= y_size)) return; // make sure we're actually operating on the array
float c_re = (i - x_size/2.0f)*4.0f/x_size;
float c_im = (j - y_size/2.0f)*4.0f/x_size;
float x = 0, y = 0;
int iteration = 0;
while (x*x+y*y <= 4 && iteration < max_iterations) {
float x_new = x*x - y*y + c_re;
y = 2*x*y + c_im;
x = x_new;
iteration++;
}
int pixel_index = j * x_size + i;
if (iteration < max_iterations) {
pixels[pixel_index] = iteration;
}
else {
pixels[pixel_index] = 0;
}
}
int main(int argc, char *argv[]){
int nx = 1000;
int max = 1000;
int tx = 8; // number of thread blocks
int ty = 8; // number of threads in a block
if(argc > 1){
nx = atoi(argv[1]);
}
int ny = nx; // for now, grid must be square, so ny = nx
//allocate color values array
int num_pixels = nx * ny;
size_t pixels_size = num_pixels * sizeof(int);
int *pixels;
checkCudaErrors(cudaMallocManaged((void **)&pixels, pixels_size)); // allocate shared memory
clock_t start, stop;
start = clock();
dim3 blocks(nx/tx+1,ny/ty+1); // 1221 blocks
dim3 threads(tx,ty); // total of tx * ty (64) threads in a block
color_pixel<<<blocks, threads>>>(pixels, nx, ny, max);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for(int x = 0; x < nx; x++){
for(int y = 0; y < ny; y++){
int pixel_index = x * nx + y;
int r = 0; // (img[x][y] * 255/100);
int g = (pixels[pixel_index] * 255/10);
int b = 0; //(img[x][y] * 255);
std::cout << r << " " << g << " " << b << "\n";
}
}
}
|
23,523 | #include <cuda.h>
int main()
{
return 0;
} |
23,524 | #include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
using namespace std;
int main(int argc, char **argv)
{
// Get number of devices on system
int deviceCount;
cudaGetDeviceCount(&deviceCount);
cout << "Number of devices: " << deviceCount << endl;
for (int i = 0; i < deviceCount; ++i)
{
// Get properties for device
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, i);
cout << "Device " << i << endl;
cout << "Name " << deviceProp.name << endl;
cout << "Revision " << deviceProp.major << "." << deviceProp.minor << endl;
cout << "Memory " << deviceProp.totalGlobalMem / 1024 / 1024 << "MB" << endl;
cout << "Warp Size " << deviceProp.warpSize << endl;
cout << "Clock " << deviceProp.clockRate << endl;
cout << "Multiprocessors " << deviceProp.multiProcessorCount << endl;
}
return 0;
} |
23,525 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void print_warps_details() {
int gbid = blockIdx.y * gridDim.x + blockIdx.x;
int gid = gbid * blockDim.x + threadIdx.x;
int wid = threadIdx.x / 32;
printf("tid : %d, bid : [%d, %d], gid : %d, wid : %d, gbid : %d\n", threadIdx.x, blockIdx.x,
blockIdx.y, gid, wid, gbid);
}
int main(int argc, char *argv[]) {
dim3 block_size(42); // 10 larger than actual warp size
dim3 grid_size(2, 2);
print_warps_details<<<grid_size, block_size>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return EXIT_SUCCESS;
} |
23,526 | #include <cstdio>
#define EMPTY 0
#define WHITE 1
#define BLACK 2
#define QUEENW 11
#define QUEENB 22
struct checkers_point{
int board[64];
int how_much_children;
checkers_point * children = NULL;
checkers_point * next = NULL;
checkers_point * prev = NULL;
checkers_point * parent = NULL;
checkers_point * tempnext = NULL;
bool min_max;
int value;
int alpha = -1000000000;
int beta = 1000000000;
int player;
};
struct next_kill{
int t[4];
next_kill * next = NULL;
int * parent_tab;
};
class Queue{
private:
checkers_point * first = NULL;
checkers_point * last = NULL;
int size = 0;
public:
__device__
void add_one(checkers_point * point) {
if(point == NULL)
return;
if(first == NULL) {
this->first = point;
this->last = point;
}
else {
this->last->tempnext = this->last->next;
this->last->next = point;
this->last = point;
}
this->size = this->size + 1;
}
__device__
void add(checkers_point * layer) {
if(layer == NULL)
return;
int counter = 0;
if(this->first == NULL) {
this->first = layer;
}
else {
this->last->next=layer;
}
checkers_point * temp = layer;
counter+=1;
while(temp->next != NULL) {
temp = temp->next;
counter+=1;
}
this->last = temp;
this->size = this->size + counter;
}
__device__
checkers_point * pop() {
checkers_point * firs,* seco;
firs = this->first;
if(firs == NULL)
return NULL;
else
seco = firs->next;
if(seco==NULL || firs->parent != seco->parent) {
firs->next = NULL;
first->next=first->tempnext;
}
this->first = seco;
this->size = this->size - 1;
return firs;
}
__device__
bool empty() {
return this->size == 0;
}
__device__
int get_size() {
return this->size;
}
__device__
checkers_point * front() {
return this->first;
}
__device__
void clean() {
while(this->size > 0)
this->pop();
}
};
extern "C" {
__device__
int pawn_owner(int * tab, int x, int y){
if (tab[x*8+y] == BLACK || tab[x*8+y] == QUEENB)
return BLACK;
if (tab[x*8+y] == WHITE || tab[x*8+y] == QUEENW)
return WHITE;
return EMPTY;
}
__device__
bool create_queen(int * tab, int x, int y){
int n = 8;
if ((x != 0 && x != n-1) ||
(tab[x*n+y] != WHITE && tab[x*n+y] != BLACK) ||
(x == 0 && tab[x*n+y] == BLACK) ||
(x == n-1 && tab[x*n+y] == WHITE))
return false;
if (tab[x*n+y] == WHITE)
tab[x*n+y] = QUEENW;
else
tab[x*n+y] = QUEENB;
return true;
}
__device__
bool is_queen(int * tab, int x, int y){
int n = 8;
return (tab[x*n+y] == QUEENB || tab[x*n+y] == QUEENW);
}
__device__
bool is_a_pawn(int * tab, int x, int y){
return !(tab[x*8+y] == EMPTY);
}
__device__
bool correct_kill(int * tab, int x, int y, int x1, int y1){
if (!is_a_pawn(tab, x1, y1))
return false;
if (pawn_owner(tab, x, y) != pawn_owner(tab, x1, y1))
return true;
return false;
}
__device__
bool queen_way(int * tab, int x, int y, int x1, int y1){
int own = pawn_owner(tab, x, y);
int x_r = x > x1 ? -1 : 1, y_r = y > y1 ? -1 : 1;
bool next_empty = false;
x += x_r; y += y_r;
while (x != x1){
if (!(tab[x*8+y] == EMPTY)){
if (next_empty)
return false;
next_empty = true;
if (pawn_owner(tab, x, y) == own)
return false;
} else {
next_empty = false;
}
x += x_r; y += y_r;
}
return true;
}
__device__
bool is_move_correct(int * tab, int x, int y, int who, int x1, int y1){
int n = 8;
if (x < 0 || x >= n || x1 < 0 || x1 >= n || y < 0 || y >= n || y1 < 0 || y1 >= n ){
// printf("WRONG VALUE");
return false;
}
if (std::abs(x-x1) != std::abs(y-y1)){
// printf("ABS PROBLEM");
return false;
}
int pwn_wnr = pawn_owner(tab, x, y);
if (pwn_wnr == EMPTY){
// printf("PAWN OWNER EMPTY");
return false;
}
if (pwn_wnr != who){
// printf("pwn_wnr != who");
return false;
}
if (is_a_pawn(tab, x1, y1)){
// printf("pawn in _");
return false;
}
if (x < x1 && who == WHITE && tab[x*n+y] != QUEENW){
// printf("WHITE WRONG WAY");
return false;
}
if (x > x1 && who == BLACK && tab[x*n+y] != QUEENB){
// printf("BLACK WRONG WAY");
return false;
}
if ((tab[x*n+y] == QUEENW || tab[x*n+y] == QUEENB) && (!queen_way(tab, x, y, x1, y1))){
// printf("queen problem");
return false;
}
if (!is_queen(tab, x, y) && std::abs((x-x1)) > 1 && !correct_kill(tab, x, y, (x1+x)/2, (y1+y)/2)){
// printf("Correct kill problem");
return false;
}
return true;
}
__device__
next_kill * create_next_move(int x, int y, int * par_tb, int x1, int y1){
next_kill * res = new next_kill;
res->t[0] = x; res->t[1] = y;
res->t[2] = x1; res->t[3] = y1;
res->parent_tab = par_tb;
return res;
}
__device__
void copy_board(int * ch, checkers_point * ch2){
for (int i = 0; i < 64; i++){
ch2->board[i] = ch[i];
}
}
__device__
void kill(int x, int y, int * tab){
tab[x*8+y] = EMPTY;
}
__device__
bool has_next_move(int x, int y, int x1, int y1, int * tab, bool kiiil){
int kll = 0;
int x_r = x > x1 ? -1 : 1, y_r = y > y1 ? -1 : 1;
while (x != x1){
if (is_a_pawn(tab, x, y)){
if (kiiil)
kill(x, y, tab);
kll++;
}
x += x_r; y += y_r;
}
return (kll > 0);
}
__device__
checkers_point * again_queen(checkers_point * ch, next_kill * first, next_kill * last){
return ch;
}
__device__
next_kill * queen_move_again_again(int * tab, int x1, int y1, int x, int y, next_kill * last){
if (is_move_correct(tab, x1, y1, pawn_owner(tab, x1, y1), x, y)
&& has_next_move(x1, y1, x, y, tab, false)) {
last->next = create_next_move(x1, y1, tab, x, y);
last = last->next;
}
return last;
}
__device__
checkers_point * again(checkers_point * ch, next_kill * first, next_kill * last, int pm){
int x = first->t[0], y = first->t[1], x1 = first->t[2], y1 = first->t[3], * tab = first->parent_tab;
if(is_move_correct(tab, x, y, pawn_owner(tab, x, y), x1, y1)){
checkers_point * chld;
ch->next = new checkers_point;
ch->next->parent = ch->parent;
ch->next->prev = ch;
ch->next->tempnext = NULL;
chld = ch->next;
chld->min_max = !chld->parent->min_max;
chld->alpha = -1000000000;
chld->beta = 1000000000;
copy_board(tab, chld);
chld->parent->how_much_children++;
chld->board[x1*8+y1] = chld->board[x*8+y];
chld->board[x*8+y] = EMPTY;
chld->board[(x+x1)/2*8+(y+y1)/2] = EMPTY;
ch = chld;
create_queen(ch->board, x1, y1);
last->next = create_next_move(x1, y1, ch->board, x1+pm, y1+2);
last = last->next;
last->next = create_next_move(x1, y1, ch->board, x1+pm, y1-2);
last = last->next;
}
return ch;
}
__device__
checkers_point * pawn(checkers_point * ch, int x, int y, int x1, int y1, bool &nxt, bool iskillsomethingnow, bool queen){
int * tab = ch->board;
if (ch->parent != NULL)
tab = ch->parent->board;
if (is_move_correct(tab, x, y, pawn_owner(tab, x, y), x1, y1) == true){
// printf("correct ");
checkers_point * chld;
if (!nxt){
// printf("chld ");
ch->children = new checkers_point;
ch->children->parent = ch;
ch->children->prev = NULL;
ch->children->tempnext = NULL;
chld = ch->children;
} else {
// printf("next ");
ch->next = new checkers_point;
ch->next->parent = ch->parent;
ch->next->prev = ch;
ch->next->tempnext = NULL;
chld = ch->next;
}
chld->min_max = !chld->parent->min_max;
chld->alpha = -1000000000;
chld->beta = 1000000000;
chld->how_much_children = 0;
chld->next = chld->children = NULL;
copy_board(chld->parent->board, chld);
chld->parent->how_much_children++;
chld->board[x1*8+y1] = chld->board[x*8+y];
chld->board[x*8+y] = EMPTY;
if (iskillsomethingnow && queen == false)
chld->board[(x+x1)/2*8+(y+y1)/2] = EMPTY;
ch = chld;
nxt = true;
if (!iskillsomethingnow)
create_queen(ch->board, x1, y1);
// printf("%d, %d -> %d, %d\n", x, y, x1, y1);
if (iskillsomethingnow && queen == false && (create_queen(ch->board, x1, y1) == false)){
int pm;
if (ch->board[x1*8+y1] == WHITE){
pm = -2;
} else {
pm = 2;
}
next_kill * first, * last, * temp;
first = create_next_move(x1, y1, ch->board, x1+pm, y1+2);
first->next = last = create_next_move(x1, y1, ch->board, x1+pm, y1-2);
while (first != NULL){
ch = again(ch, first, last, pm);
while (last->next != NULL)
last = last->next;
temp = first;
first = first->next;
delete temp;
}
}
if (queen && has_next_move(x1, y1, x, y, tab, true)){
/*next_kill * first, * last, * temp;
first = create_next_move(x1, y1, ch->board, x1+1, y1+1);
first->next = last = create_next_move(x1, y1, ch->board, x1-1, y1+1);
last->next = create_next_move(x1, y1, ch->board, x1+1, y1-1);
last = last->next;
last->next = create_next_move(x1, y1, ch->board, x1-1, y1-1);
last = last->next;
for (int i = 2; i < 8; i++){
last = queen_move_again_again(tab, x1, y1, x1+i, y1+i, last);
last = queen_move_again_again(tab, x1, y1, x1+i, y1-i, last);
last = queen_move_again_again(tab, x1, y1, x1-i, y1+i, last);
last = queen_move_again_again(tab, x1, y1, x1-i, y1-i, last);
}
while (first != NULL){
ch = again_queen(ch, first, last);
while (last->next != NULL)
last = last->next;
temp = first;
first = first->next;
delete temp;
}*/
}
}
return ch;
}
__device__
checkers_point * dismember_child(checkers_point * ch, int x, int y, int turn_no, bool &nxt, int player){
checkers_point * chb = ch->parent;
if (!nxt){
// printf(" NO PARENT ");
chb = ch;
}
/*
printf("NR %d\n", chb->value);
for (int i = 0; i < 64; i++)
printf("%d ", chb->board[i]);
printf("\n");
*/
int ww = 1, bb = 0;
if (player == BLACK){
ww = 0;
bb = 1;
}
switch(chb->board[x*8+y]){
case WHITE:
if (turn_no % 2 == ww){
// printf("WHITE ");
ch = pawn(ch, x, y, x-1, y-1, nxt, false, false);
ch = pawn(ch, x, y, x-1, y+1, nxt, false, false);
ch = pawn(ch, x, y, x-2, y-2, nxt, true, false);
ch = pawn(ch, x, y, x-2, y+2, nxt, true, false);
}
break;
case BLACK:
if (turn_no % 2 == bb){
// printf("BLACK %d %d", x, y);
ch = pawn(ch, x, y, x+1, y-1, nxt, false, false);
ch = pawn(ch, x, y, x+1, y+1, nxt, false, false);
ch = pawn(ch, x, y, x+2, y-2, nxt, true, false);
ch = pawn(ch, x, y, x+2, y+2, nxt, true, false);
}
break;
case QUEENB:
if (turn_no % 2 == bb){
/*for (int i = 0; i < 8; i++){
ch = pawn(ch, x, y, x+i, y-i, nxt, false, true);
ch = pawn(ch, x, y, x+i, y+i, nxt, false, true);
ch = pawn(ch, x, y, x-i, y-i, nxt, false, true);
ch = pawn(ch, x, y, x-i, y+i, nxt, false, true);
}*/
ch = pawn(ch, x, y, x+1, y-1, nxt, false, true);
ch = pawn(ch, x, y, x+1, y+1, nxt, false, true);
ch = pawn(ch, x, y, x-1, y-1, nxt, false, true);
ch = pawn(ch, x, y, x-1, y+1, nxt, false, true);
ch = pawn(ch, x, y, x+2, y-2, nxt, false, true);
ch = pawn(ch, x, y, x+2, y+2, nxt, false, true);
ch = pawn(ch, x, y, x-2, y-2, nxt, false, true);
ch = pawn(ch, x, y, x-2, y+2, nxt, false, true);
}
break;
case QUEENW:
if (turn_no % 2 == ww){/*
for (int i = 0; i < 8; i++){
ch = pawn(ch, x, y, x+i, y-i, nxt, false, true);
ch = pawn(ch, x, y, x+i, y+i, nxt, false, true);
ch = pawn(ch, x, y, x-i, y-i, nxt, false, true);
ch = pawn(ch, x, y, x-i, y+i, nxt, false, true);
}*/
ch = pawn(ch, x, y, x+1, y-1, nxt, false, true);
ch = pawn(ch, x, y, x+1, y+1, nxt, false, true);
ch = pawn(ch, x, y, x-1, y-1, nxt, false, true);
ch = pawn(ch, x, y, x-1, y+1, nxt, false, true);
ch = pawn(ch, x, y, x+2, y-2, nxt, false, true);
ch = pawn(ch, x, y, x+2, y+2, nxt, false, true);
ch = pawn(ch, x, y, x-2, y-2, nxt, false, true);
ch = pawn(ch, x, y, x-2, y+2, nxt, false, true);
}
break;
default:
break;
}
return ch;
}
__device__
//add global size
void ramification(checkers_point * ch2, int thid, int how_deep, int player){
bool nxt = false;
//printf("!%d!\n", how_deep);
for (int i = 0; i < 8*8; i++){
if (ch2->board[i] != EMPTY){
ch2 = dismember_child(ch2, i/8, i % 8, how_deep, nxt, player);
}
}
}
__global__
void create_tree(int n, checkers_point * ch, int how_deep, int player){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
int find_me = thid;
int count_group = n;
__syncthreads();
if (thid < n){
checkers_point * ch2 = ch;
for (int i = 0; i < how_deep; i++){
if (find_me == 0 && i + 1 == how_deep){
ramification(ch2, thid, how_deep, player);
}
__syncthreads();
if (i + 1 == how_deep)
break;
count_group = count_group/ch2->how_much_children;
int group = find_me/count_group;
if (group >= ch2->how_much_children)
break;
find_me = find_me % count_group;
ch2 = ch2->children;
if (ch2 == NULL)
break;
for (int k = 0; k < group; k++){
ch2 = ch2->next;
if (ch2 == NULL)
break;
}
if (ch2 == NULL)
break;
__syncthreads();
}
}
}
__global__
void delete_tree(checkers_point * ch, int thread_num, checkers_point ** V) {
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ int count;
count = 0;
if(thid == 0){
// printf("delete_tree");
checkers_point * child = ch->children;
checkers_point * temp;
Queue Q;
Q.add(child);
while(!Q.empty() && Q.get_size()+Q.front()->how_much_children < thread_num) {
temp = Q.pop();
if(temp->children !=NULL)
Q.add(temp->children);
delete temp;
}
while(!Q.empty()) {
temp = Q.pop();
V[count]=temp;
count++;
}
}
__syncthreads();
if(thid < count) {
checkers_point * my_child = V[thid];
Queue Q;
checkers_point * temp, * child;
Q.add_one(my_child);
while(!Q.empty()) {
temp = Q.pop();
child = temp->children;
if(child != NULL)
Q.add(child);
delete temp;
}
}
}
__device__
int calculate_pawns_value(int * tab){
int count = 0;
int n=8;
for(int i=0;i<n*n;i++){
if(tab[i] == WHITE)
count+=3;
else if(tab[i] == QUEENW)
count+=5;
else if(tab[i] == BLACK)
count-=3;
else if(tab[i] == QUEENB)
count-=5;
}
return (int)(((49.5*count)/57.0) + 49.5);
}
__device__
int calculate_dist_to_be_queen(int * tab){
int n=8;
int black_count=0,white_count=0,black_dist=0,white_dist=0;
for(int row=0;row<n;row++){
for(int col=0;col<n;col++){
if(tab[row*n+col] == WHITE){
white_count++;
white_dist+=(n-1-row);
}
else if(tab[row*n+col] == BLACK){
black_count++;
black_dist+=row;
}
}
}
double black_val, white_val;
if(white_count == 0)
white_val = 0;
else
white_val = (double)white_dist/(double)white_count;
if(black_count == 0)
black_val = 0;
else
black_val = (double)black_dist/(double)black_count;
double value = black_val - white_val;
return (int)(0.495*value + 49.5);
}
__device__
int calculate_future_queen_kills(int * tab){
int n=8;
int * kill_tab = new int [n*n];
int temp_row,temp_col,white_count=0,black_count=0,white_dead=0,black_dead=0;
for(int row=0;row<n;row++){
for(int col=0;col<n;col++){
kill_tab[row*n+col] = EMPTY;
if(tab[row*n+col] == QUEENB || tab[row*n+col] == QUEENW){
temp_row = row-1;
temp_col = col-1;
while(temp_row >= 1 && temp_col >= 1){
if(is_a_pawn(tab,temp_row,temp_col)){
if(pawn_owner(tab,row,col) == pawn_owner(tab,temp_row,temp_col))
break;
else if(is_a_pawn(tab,temp_row-1,temp_col-1))
break;
else{
kill_tab[temp_row*n+temp_col] = pawn_owner(tab,temp_row,temp_col);
temp_row--;
temp_col--;
}
}
temp_row--;
temp_col--;
}
temp_row = row-1;
temp_col = col+1;
while(temp_row >= 1 && temp_col <= n-2){
if(is_a_pawn(tab,temp_row,temp_col)){
if(pawn_owner(tab,row,col) == pawn_owner(tab,temp_row,temp_col))
break;
else if(is_a_pawn(tab,temp_row-1,temp_col+1))
break;
else{
kill_tab[temp_row*n+temp_col] = pawn_owner(tab,temp_row,temp_col);
temp_row--;
temp_col++;
}
}
temp_row--;
temp_col++;
}
temp_row = row+1;
temp_col = col-1;
while(temp_row <= n-2 && temp_col >= 1){
if(is_a_pawn(tab,temp_row,temp_col)){
if(pawn_owner(tab,row,col) == pawn_owner(tab,temp_row,temp_col))
break;
else if(is_a_pawn(tab,temp_row+1,temp_col-1))
break;
else{
kill_tab[temp_row*n+temp_col] = pawn_owner(tab,temp_row,temp_col);
temp_row++;
temp_col--;
}
}
temp_row++;
temp_col--;
}
temp_row = row+1;
temp_col = col+1;
while(temp_row <= n-2 && temp_col <= n-2){
if(is_a_pawn(tab,temp_row,temp_col)){
if(pawn_owner(tab,row,col) == pawn_owner(tab,temp_row,temp_col))
break;
else if(is_a_pawn(tab,temp_row+1,temp_col+1))
break;
else{
kill_tab[temp_row*n+temp_col] = pawn_owner(tab,temp_row,temp_col);
temp_row++;
temp_col++;
}
}
temp_row++;
temp_col++;
}
}
}
}
for(int row=0;row<n;row++){
for(int col=0;col<n;col++){
if(kill_tab[row*n+col] == WHITE)
white_dead++;
else if(kill_tab[row*n+col] == BLACK)
black_dead++;
if(is_a_pawn(tab,row,col)){
if(pawn_owner(tab,row,col) == WHITE)
white_count++;
else
black_count++;
}
}
}
double black_percent,white_percent;
if(black_count == 0)
black_percent = 1.0;
else
black_percent = (double)black_dead/(double)black_count;
if(white_count == 0)
white_percent = 1.0;
else
white_percent = (double)white_dead/(double)white_count;
int value = white_percent - black_percent;
delete kill_tab;
return 49.5*value+49.5;
}
__device__
int calculate_board_value(int * Board){
int value;
value=calculate_pawns_value(Board);
value=100*value+calculate_dist_to_be_queen(Board);
value=100*value+calculate_future_queen_kills(Board);
//value=10*value+(rand()%10);
return value;
}
__device__
void minmax(checkers_point * ch) {
//zjedz do lisci i wrzuc je na kolejke
Queue tempQueue;
Queue Q;
checkers_point * temp;
tempQueue.add_one(ch);
while(!tempQueue.empty()) {
temp = tempQueue.pop();
if(temp->alpha!=-1000000000 || temp->beta!=1000000000)
Q.add_one(temp);
else if(temp->children==NULL) {
int wynik = calculate_board_value(temp->board);//policz stan planszy
temp->alpha = wynik;
temp->beta = wynik;
Q.add_one(temp);
}
else if(temp->children!=NULL)
tempQueue.add(temp->children);
}
//pamietaj parenta pierwszego z kolejki
checkers_point * parent = Q.front()->parent;
Q.add_one(parent);
//lecac po kolejce modyfikuj parenta danego liscia
//jak parent nowego goscia jest inny niz poprzedni dorzuc poprzedni na kolejke i zastap go w zmiennej nowym
while(!Q.empty()) {
temp = Q.pop();
if(temp==ch) break;
if(temp->parent!=NULL) {
if(temp->min_max)
temp->parent->beta = min(temp->alpha,temp->parent->beta);
else
temp->parent->alpha = max(temp->beta,temp->parent->alpha);
}
if(parent!=temp->parent) {
if(temp->parent!=NULL)
Q.add_one(temp->parent);
parent = temp->parent;
}
}
Q.clean();
//tadam!
}
__global__
void alpha_beta(checkers_point * ch, int thread_num, checkers_point ** V) {
//rozdziel i wrzuc do V
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ int count;
count = 0;
if(thid == 0){
checkers_point * temp;
Queue Q;
Q.add(ch);
while(!Q.empty() && Q.get_size()+Q.front()->how_much_children < thread_num) {
temp = Q.pop();
if(temp->children !=NULL)
Q.add(temp->children);
temp->alpha=-1000000000;
temp->beta=1000000000;
}
while(!Q.empty()) {
temp = Q.pop();
temp->alpha=-1000000000;
temp->beta=1000000000;
V[count]=temp;
count++;
}
}
__syncthreads();
//policz dla tych w V
if(thid<count)
minmax(V[thid]);
__syncthreads();
//policz w gore
if(thid == 0) {
minmax(ch);
}
//zwroc wynik (?)
}
__device__
void print_tr(checkers_point * ch){
if (ch == NULL)
return;
if (ch->children != NULL){
printf("(c) %d ", ch->value);
print_tr(ch->children);
}
if (ch->next != NULL){
printf("(n) %d ", ch->value);
print_tr(ch->next);
}
if (ch->next == NULL && ch->children == NULL){
printf("%d\n", ch->value);
for (int i = 0; i < 64; i++){
printf("%d", ch->board[i]);
if (i % 8 == 7)
printf("\n");
}
printf("\n");
}
}
__global__
void print_tree(int n, checkers_point * ch, int i, int player){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thid == 0){
printf("\n\n");
// print_tr(ch);
// printf("____\n");
}
}
__global__
void set_root(checkers_point * ch, int * tab, int size, int player){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thid == 0){
ch->value = 1;
ch->children = NULL;
ch->next = NULL;
ch->prev = NULL;
ch->parent = NULL;
ch->tempnext = NULL;
ch->alpha = -1000000000;
ch->beta = 1000000000;
if(player == WHITE)
ch->min_max = true;
else
ch->min_max = false;
ch->how_much_children = 0;
for (int i = 0; i < size*size; ++i)
ch->board[i] = tab[i];
}
}
__global__
void copy_best_result(checkers_point * ch, int * tab, int size, int player){
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thid == 0){
checkers_point * ch2 = ch->children;
if (ch2 == NULL){
printf("No result -> root->chidlren = NULL\n");
return;
}
if (player == WHITE){
while (ch->alpha != ch2->beta){
if (ch2->next == NULL)
break;
ch2 = ch2->next;
}
} else {
while (ch->beta != ch2->alpha){
if (ch2->next == NULL)
break;
ch2 = ch2->next;
}
}
for (int i = 0; i < 64; ++i)
tab[i] = ch2->board[i];
}
}
}
|
23,527 | /*
============================================================================
Name : GScuda.cu
Author : caleb
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdint.h>
#include <limits.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <cub/cub.cuh>
using namespace cub;
// I ASSUME THROUGHOUT THAT sizeof(int) = 4
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err);
__global__ void piiKernel(uint8_t n, uint8_t* male_prefs, uint8_t* female_prefs, uint8_t* output) {
//TODO fix shared bank conflicts
int idx = threadIdx.x;
extern __shared__ int s[];
// current_match[i] = j implies female i is matched with male j
int* current_match = (int*) s;
//curent_match flipped for fast lookups so match current[j] = i implies female i is matched with male j
int* match_current = (int*) ¤t_match[n];
// nm1[i] = j implies that female i is matched with male (j&0xFF) and female_prefs[i][j] = ((j>>24)&0xFF)
// no need to reset every time since nm1 pairs always improve things for the woman
unsigned int* nm1 = (unsigned int*) &match_current[n];
// an array of three tuples of the form nm2graph[male id(row)] = (row neighbor index, female id (column),column neighbor index), indexes are 255 if null
// where the row (respectively column) neighbor index is the index into nm2graph of the the nm2 pair it is connected to by the matching pair in the same row as it
uint8_t* nm2graph= (uint8_t*) &nm1[n];
uint8_t* shared_fp = (uint8_t*) &nm2graph[3*n];
uint8_t* unstable = (uint8_t*) &shared_fp[n*n];
//local cache of male_prefs
//TODO coalesce
uint8_t* local_mp = new uint8_t[n];
for (int i = 0; i < n; i++){
local_mp[i] = male_prefs[idx*n+i];
}
// generated a random permutation on the CPU
int temp_man = current_match[idx] = output[idx];
match_current[temp_man] = idx;
//copy female prefs into swap
//TODO optimize (may be good already)
for (int i = 0; i < n; i++) {
shared_fp[i*n + idx] = female_prefs[i*n + idx];
}
__syncthreads();
for (int p = 0; p < n; p++) {
__syncthreads();
int partner = match_current[idx];
// the female partner for this male's NM1 pair
int nm1g = -1;
//TODO optimize
unstable[0] = 0;
__syncthreads();
//find NM1-generating pairs
for (int j = 0; local_mp[j] != partner; j++){
if (shared_fp[local_mp[j]*n+idx] < shared_fp[local_mp[j]*n+current_match[local_mp[j]]]){
nm1g = local_mp[j];
#ifdef DEBUG
printf("found nm1g pair for male %d\n",idx);
#endif
unstable[0] = 1;
break;
}
}
#ifdef DEBUG
printf("thread: %d, potential nm1g=%d\n",idx,nm1g);
#endif
//TODO optimize
__syncthreads();
if (unstable[0] == 0){
#ifdef DEBUG
printf("FOUND STABLE MATCHING\n");
#endif
break;
}
//TODO optimize?
nm2graph[idx*3 +0] = 255;
nm2graph[idx*3 +1] = 255;
nm2graph[idx*3+2] = 255;
bool is_nm1 = false;
unsigned int potential_nm1_construct = ~0;
// initialize large
nm1[idx] = INT_MAX;
__syncthreads();
if (nm1g != -1) {
//find NM1 pairs
//TODO explain
potential_nm1_construct = (female_prefs[nm1g*n+idx] << 24)|idx;
atomicMin(nm1+nm1g, potential_nm1_construct);
}
__syncthreads();
is_nm1 = (potential_nm1_construct == nm1[nm1g]);
// find nm2
if (is_nm1){
// race condition?
// horizontal nm2 pair
nm2graph[idx*3+0] = current_match[nm1g];
//vertical nm2 pair
nm2graph[(current_match[nm1g])*3+1] = partner;
nm2graph[(current_match[nm1g])*3+2] = idx;
}
#ifdef DEBUG
printf("thread: %d ready to sync\n",idx);
#endif
__syncthreads();
// if we are nm1
if (is_nm1) {
#ifdef DEBUG
printf("thread: %d, final nm1g=%d\n",idx,nm1g);
#endif
current_match[nm1g] = idx;
match_current[idx] = nm1g;
// if we are a column end
if (nm2graph[idx*3+2] == 255) {
#ifdef DEBUG
printf("thread: %d, nm2graph[idx] = (%d,%d,%d)\n",idx,nm2graph[idx*3+0],nm2graph[idx*3+1],nm2graph[idx*3+2]);
#endif
int nm2column = partner;
//TODO optimize
int nm2row= current_match[nm1g];
#ifdef DEBUG
printf("thread: %d, nm2row=%d, nm2graph[nm2row] = (%d,%d,%d)\n",idx,nm2row,nm2graph[nm2row*3+0],nm2graph[nm2row*3+1],nm2graph[nm2row*3+2]);
#endif
// if it's a single pair
if (nm2graph[nm2row*3+0]==nm2graph[nm2row*3+2]){
nm2column = nm2graph[nm2row*3+1];
}
// if it's a chain
else {
while(nm2graph[nm2row*3+0] != 255) {
#ifdef DEBUG
printf("thread: %d, nm2row=%d, nm2graph[nm2row] = (%d,%d,%d)\n",idx,nm2row,nm2graph[nm2row*3+0],nm2graph[nm2row*3+1],nm2graph[nm2row*3+2]);
#endif
nm2row = nm2graph[nm2row*3+0];
}
}
#ifdef DEBUG
printf("final nm2row=%d,nm2column=%d\n",nm2row,nm2column);
#endif
current_match[nm2column] = nm2row;
match_current[nm2row] = nm2column;
}
}
}
// copy output out
output[idx] = current_match[idx];
delete[] local_mp;
}
/**
* Host function that copies the data and launches GS on the CPU
*
*/
void pii(uint8_t n, uint8_t* male_prefs, uint8_t* female_prefs, uint8_t* output)
{
uint8_t *d_male_prefs, *d_female_prefs;
uint8_t* d_output;
size_t prefs_size = sizeof(uint8_t)*n*n;
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_male_prefs, prefs_size));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_female_prefs, prefs_size));
CUDA_CHECK_RETURN(cudaMalloc((void **)&d_output, sizeof(uint8_t)*n));
CUDA_CHECK_RETURN(cudaMemcpy(d_male_prefs, male_prefs, prefs_size, cudaMemcpyHostToDevice));
CUDA_CHECK_RETURN(cudaMemcpy(d_female_prefs, female_prefs, prefs_size, cudaMemcpyHostToDevice));
#ifdef TIME_KERNEL
struct timespec start, end;
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &start);
#endif
//knuth shuffle from sgb
for (uint8_t k = 0;k < n; k++) {
uint8_t j= rand() % (k+1);
output[k] = output[j];
output[j] = k;
}
#ifdef DEBUG
printf("male_prefs: \n ");
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++)
printf("%hhd ",male_prefs[i*n+j]);
printf("\n ");
}
printf("female_prefs: \n ");
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
printf("%hhd ",female_prefs[i*n+j]);
printf("\n ");
}
printf("permutation: ");
for (int i = 0; i < n; i++)
printf("%hhd ",output[i]);
printf("\n ");
#endif
CUDA_CHECK_RETURN(cudaMemcpy(d_output, output, n*sizeof(uint8_t), cudaMemcpyHostToDevice));
piiKernel<<<1,n,n*sizeof(int)+n*sizeof(int)+n*sizeof(unsigned int) + 3*n*sizeof(uint8_t) + n*n*sizeof(uint8_t)+1>>> (n, d_male_prefs,d_female_prefs, d_output);
#ifdef TIME_KERNEL
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
long long unsigned int diff = (1000000000L) * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
printf("kernel time %llu\n",diff);
#endif
CUDA_CHECK_RETURN(cudaMemcpy(output, d_output, sizeof(uint8_t)*n, cudaMemcpyDeviceToHost));
#ifdef DEBUG
printf("result: ");
for (int i = 0; i < n; i++)
printf("%hhd ",output[i]);
printf("\n ");
#endif
CUDA_CHECK_RETURN(cudaFree(d_male_prefs));
CUDA_CHECK_RETURN(cudaFree(d_female_prefs));
CUDA_CHECK_RETURN(cudaFree(d_output));
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
23,528 | /* Matrix normalization.
* Compile with "gcc matrixNorm.c"
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
/* Program Parameters */
#define N 9000 /* Matrix size */
/* Matrices */
volatile float A[N][N], B[N][N];
// Flattened array A & B
float flattenA[N * N], flattenB[N * N];
/* Initialize A and B*/
void initialize_inputs() {
int row, col;
// FIXED RANDOM SEED = 22
srand(22);
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
A[row][col] = (float)rand() / 32768.0;
B[row][col] = 0.0;
}
}
}
// Printing array func
// This will help us in both seeing inputs/results before and after normalization
void print_arrays() {
int row, col;
if (N < 10) {
printf("\nA =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%5.2f%s", A[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
printf("\nB =\n\t");
for (row = 0; row < N; row++) {
for (col = 0; col < N; col++) {
printf("%5.2f%s", B[row][col], (col < N-1) ? ", " : ";\n\t");
}
}
printf("\n");
}
}
// Even though CUDA API has cudaMemcpy2D, I found it much easier
// to convert a 2D array into a 1D with mapping scheme like this
void flattenArray() {
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
flattenA[i * N + j] = A[i][j];
flattenB[i * N + j] = B[i][j];
}
}
}
// This function basically prints out a 1D array to console
void checkFlatten(float targetArray[]) {
if (N < 10) {
printf("---- Checking ----\n");
for (int i = 0; i < (N * N); i++) {
if (i % N == 0 && i != 0) {
printf("\n");
}
printf("%5.2f ", targetArray[i]);
}
printf("\n");
}
}
// CUDA device information
void getCudaDevices(int nDevices) {
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
// GPU parallel matrix normalization kernel
__global__ void gpuMatrixNorm(float *flattenA, float *flattenB, int arraySize) {
float mu, sigma;
// Index when inside GPU i.e. threadID
// After flattenning, we can access a pseudo 2D array in the same way
// we flatten it i.e. A[i][j] == flattenA[i * arraySize + idx]
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// If clause here is to prevent faulty computation tasks
// where gpu processes an index that is beyond the scope
// of the vector A
if (idx < arraySize) {
// Mean
mu = 0.0;
for (int row = 0; row < arraySize; row++) {
mu += flattenA[row * arraySize + idx];
}
mu /= (float) arraySize;
// Wait here until every mean computations have arrived
// Once arrived, then continue to Standard deviation
// syncthreads == barrier
__syncthreads();
// Standard deviation
sigma = 0.0;
for (int row = 0; row < arraySize; row++) {
sigma += powf((flattenA[row * arraySize + idx] - mu), 2.0);
}
sigma /= (float) arraySize;
// Wait here until every Standard deviation computations have arrived
// Once arrived, then continue to compute the final result for B
// syncthreads == barrier
__syncthreads();
sigma = sqrt(sigma);
// Normalization calculation
for (int row = 0; row < arraySize; row++) {
if (sigma == 0.0) {
flattenB[row * arraySize + idx] = 0.0;
}
else {
flattenB[row * arraySize + idx] = (flattenA[row * arraySize + idx] - mu) / sigma;
}
}
}
}
int main(int argc, char **argv) {
// Variables for CUDA
float *device_A, *device_B;
int nDevices;
int cudaRunTimeVersion;
int cudaDriverVersion;
/* Timing variables */
struct timeval startGPU, stopGPU; /* Elapsed times using gettimeofday() */
struct timezone tzdummy;
unsigned long long runtimeGPU;
/* Initialize A and B */
initialize_inputs();
// Sanity check after inputs initialization
printf("---- Initialized inputs ----\n");
print_arrays();
// Flatten 2D array A & B
flattenArray();
// Sanity check after flattening
// Usually commented this out ... I only un-conmment it to validate the flattening process went ok
// NOTE: This will only print out if N < 10
checkFlatten(flattenA);
// After flattening, size of array flattenA will be N * N
int arraySize = sizeof(float) * N * N;
// Cuda device info
cudaGetDeviceCount(&nDevices);
getCudaDevices(nDevices);
cudaRuntimeGetVersion(&cudaRunTimeVersion);
cudaDriverGetVersion(&cudaDriverVersion);
// Printing out CUDA runtime & driver version to console
printf("Cuda Runtime Version: %i\n", cudaRunTimeVersion);
printf("Cuda Driver Version: %i\n", cudaDriverVersion);
// Start Clock GPU
printf("---------------------------------------------\n");
printf("Matrix size N = %d", N);
printf("\nStarting clock for GPU.\n\n");
gettimeofday(&startGPU, &tzdummy);
// Allocating space for GPU device
cudaMalloc((void**)&device_A, arraySize);
cudaMalloc((void**)&device_B, arraySize);
// Copying array A from HOST to GPU
cudaMemcpy(device_A, flattenA, arraySize, cudaMemcpyHostToDevice);
// Launch GPU kernel gpuMatrixNorm
gpuMatrixNorm<<<N, N>>>(device_A, device_B, N);
// Copying array B from GPU to HOST
// Initially I had cudaDeviceSynchronize() before copying B from device to host
// However, by reading CUDA's doc further, cudaMemcpy is a blocking method
cudaMemcpy(flattenB, device_B, arraySize, cudaMemcpyDeviceToHost);
/* Stop Clock */
gettimeofday(&stopGPU, &tzdummy);
runtimeGPU = (unsigned long long)(stopGPU.tv_sec - startGPU.tv_sec) * 1000000 + (stopGPU.tv_usec - startGPU.tv_usec);
/* Display timing results */
printf("GPU Runtime = %g ms.\n", (float)runtimeGPU/(float)1000);
printf("\nStopped clock for GPU.");
printf("\n---------------------------------------------\n");
printf("---- Results ----\n");
// Sanity check the result after computes by GPU and deliver back to host machine
// Usually commented this out ... I only un-comment it to validate the computed result went ok
// NOTE: This will only print out if N < 10
checkFlatten(flattenB);
// Freeing memory in GPU device
cudaFree(device_A);
cudaFree(device_B);
exit(0);
} |
23,529 | // CUDA przykład (c) Andrzej Łukaszewski 2010
// Dodawanie macierzy na GPU: kompilacja: nvcc addmat.cu
#include <stdio.h>
__global__ void AddMatrixKernel1(float *A, float *B, float *C, int N){
int adres = threadIdx.x + N * blockIdx.x;
C[adres] = A[adres] + B[adres];
}
void GPUMatrixAdd(float *A, float *B, float *C, int N) {
int size = N*N*sizeof(float);
float *Ad, *Bd, *Cd; // macierze na GPU
cudaMalloc(&Ad, size);
cudaMemcpy(Ad, A, size, cudaMemcpyHostToDevice);
cudaMalloc(&Bd, size);
cudaMemcpy(Bd, B, size, cudaMemcpyHostToDevice);
cudaMalloc(&Cd, size);
// Wywołanie jądra np.:
AddMatrixKernel1<<<N,N>>>(Ad,Bd,Cd,N); // N bloków po N wątków
cudaMemcpy(C, Cd, size, cudaMemcpyDeviceToHost);
cudaFree(Ad); cudaFree(Bd); cudaFree(Cd);
}
int main(){
float A[4]={ 1., 1., 1., 1. };
float B[4]={ 2., 3., 4., 5. };
float C[4];
printf("A[1]=%f\n", A[1]); printf("B[1]=%f\n", B[1]);
GPUMatrixAdd(A,B,C,2);
printf("C[1]=%f\n", C[1]);
return 0;
}
|
23,530 | //
// Created by steve on 3/15/2021.
//
#include <deque>
#include <iostream>
#include <string>
std::string getDimsExceptionString(const std::deque<unsigned long long>& dims)
{
std::string err = "( ";
if(dims.size() != 0)
{
for(unsigned long long i = 0;i<dims.size()- 1; i++)
{
err += std::to_string(dims[i]) + ", ";
}
err += std::to_string(dims[dims.size() - 1]);
}
err += ")";
return err;
}
unsigned long long getTotalMatricesFromDims(const std::deque<unsigned long long>& dims)
{
if(dims.size() == 0)
{
return 0;
}
else if(dims.size() <= 2)
{
return 1;
}
else
{
unsigned long long prod = 1;
for(unsigned i = 0; i < dims.size()-2; i++)
{
prod *= dims[i];
}
return prod;
}
} |
23,531 | // testing usage of thrust vectors
#include <iostream>
#include <cmath>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/transform_reduce.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
// const int NBLOCK = N;
// const int NTHREAD = 1;
// square<T> computes the square of the number f(x) -> x*x
template <typename T>
struct square {
__host__ __device__ T operator()(const T& x) const {
return x*x;
}
};
int main(void){
const int N = 10;
thrust::host_vector<double> H(N,1.); // initialize to size N, each entry is 1
thrust::device_vector<double> D = H;
// arguments to reduce: start, end, initial value, operation
double sum = thrust::reduce( D.begin(),D.end(),0.,thrust::plus<double>() );
std::cout << "sum = " << sum << std::endl;
double mean = sum/( (double)N );
std::cout << "mean = " << mean << std::endl;
// now get its RMS and norm
square<double> unary_op;
thrust::plus<double> binary_op;
double init = 0;
// transform reduce arguments: start, end, unary operator, init value, binary operator
double sum_sq = thrust::transform_reduce(D.begin(),D.end(),unary_op,init,binary_op);
double norm = sqrt(sum_sq);
double rms = norm/std::sqrt( (double)N );
std::cout << "norm = " << norm << std::endl;
std::cout << "RMS = " << rms << std::endl;
return 0;
}
|
23,532 | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define MAX_XY 32
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 0.00005
#define TILE_H 32
#define TILE_W 32
#define FILTER_RADIUS 32
#define FILTER_LENGTH (2 * FILTER_RADIUS + 1)
__device__ __constant__ double d_Filter[FILTER_LENGTH];
#define cudaCheckError() { \
cudaError_t error=cudaGetLastError(); \
if(error!=cudaSuccess) { \
printf("ERROR IN CUDA %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error)); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
__global__ void convolutionRowGPU(double *d_Dst, double *d_Src,int imageW, int imageH) {
int x,y,k,d,P_x,x0,number_of_writes = 1;
__shared__ double image[TILE_H * (TILE_W+FILTER_RADIUS*2)];
x = blockIdx.x*blockDim.x + threadIdx.x;
y = blockIdx.y*blockDim.y + threadIdx.y;
if(FILTER_RADIUS > TILE_W){
number_of_writes = FILTER_RADIUS/TILE_W + 1;
}
P_x = x - FILTER_RADIUS;
if(P_x < 0) {
image[threadIdx.x + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = 0;
image[threadIdx.x + blockDim.x + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = d_Src[y*imageW + x + blockDim.x - FILTER_RADIUS];
image[threadIdx.x + blockDim.x + FILTER_RADIUS + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = d_Src[y*imageW + x + blockDim.x];
}
else{
if(threadIdx.x < FILTER_RADIUS) {
image[threadIdx.x + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = d_Src[y*imageW + x - FILTER_RADIUS];
image[threadIdx.x + blockDim.x + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = d_Src[y*imageW + x + blockDim.x - FILTER_RADIUS];
image[threadIdx.x + blockDim.x + FILTER_RADIUS + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = d_Src[y*imageW + x + blockDim.x];
}
else{
image[threadIdx.x + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = d_Src[y*imageW + x - FILTER_RADIUS];
}
}
P_x = x + FILTER_RADIUS;
if(P_x > imageW - 1) {
image[threadIdx.x + 2*FILTER_RADIUS + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = 0;
image[threadIdx.x + FILTER_RADIUS + threadIdx.y*(TILE_W+FILTER_RADIUS*2)] = d_Src[y*imageW + x];
}
__syncthreads();
double sum = 0;
x0 = threadIdx.x + FILTER_RADIUS ;
for(k = -FILTER_RADIUS; k <= FILTER_RADIUS; k++) {
d = x0 + k;
sum += image[threadIdx.y*(TILE_W+FILTER_RADIUS*2) + d] * d_Filter[FILTER_RADIUS - k];
}
d_Dst[y*imageW + x] = sum;
}
__global__ void convolutionColumnGPU(double *d_Dst, double *d_Src,int imageW, int imageH) {
int x,y,k,d,P_y,y0;
__shared__ double image[TILE_W * (TILE_H+FILTER_RADIUS*2)];
x = blockIdx.x*blockDim.x + threadIdx.x;
y = blockIdx.y*blockDim.y + threadIdx.y;
P_y = y - FILTER_RADIUS;
if(P_y < 0) {
image[threadIdx.y*(TILE_W) + threadIdx.x] = 0;
image[(threadIdx.y + blockDim.y)*TILE_W + threadIdx.x] = d_Src[(y + blockDim.y - FILTER_RADIUS)*imageW + x ];
image[(threadIdx.y + blockDim.y + FILTER_RADIUS)*TILE_W + threadIdx.x] = d_Src[(y + blockDim.y)*imageW + x ];
}
else{
if(threadIdx.y < FILTER_RADIUS){
if(y + blockDim.y > imageH - 1) {
image[threadIdx.y*(TILE_W) + threadIdx.x] = d_Src[(y- FILTER_RADIUS)*imageW + x];
}
else {
image[threadIdx.y*TILE_W + threadIdx.x] = d_Src[(y- FILTER_RADIUS)*imageW + x];
image[(threadIdx.y + blockDim.y)*TILE_W + threadIdx.x] = d_Src[(y + blockDim.y - FILTER_RADIUS)*imageW + x ];
image[(threadIdx.y + blockDim.y + FILTER_RADIUS)*TILE_W + threadIdx.x] = d_Src[(y + blockDim.y)*imageW + x ];
}
}
else{
image[threadIdx.y*(TILE_W) + threadIdx.x] = d_Src[(y- FILTER_RADIUS)*imageW + x];
}
}
P_y = y + FILTER_RADIUS;
if(P_y > imageH - 1) {
image[(threadIdx.y+2*FILTER_RADIUS)*TILE_W + threadIdx.x] = 0;
image[(threadIdx.y + FILTER_RADIUS)*TILE_W + threadIdx.x] = d_Src[y*imageW + x];
}
__syncthreads();
double sum = 0;
y0 = threadIdx.y + FILTER_RADIUS ;
for(k = -FILTER_RADIUS; k <= FILTER_RADIUS; k++) {
d = y0 + k;
sum += image[d*TILE_W + threadIdx.x] * d_Filter[FILTER_RADIUS - k];
}
d_Dst[y*imageW + x] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
double sum = 0;
for (k = -FILTER_RADIUS; k <= FILTER_RADIUS; k++) {
int d = x + k;
if (d >= 0 && d < imageW) {
sum += h_Src[y * imageW + d] * h_Filter[FILTER_RADIUS - k];
}
}
//printf("ROW X:%d Y:%d SUM:%f\n\n",x,y,sum);
h_Dst[y * imageW + x] = sum;
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(double *h_Dst, double *h_Src, double *h_Filter,
int imageW, int imageH) {
int x, y, k;
for (y = 0; y < imageH; y++) {
for (x = 0; x < imageW; x++) {
double sum = 0;
for (k = -FILTER_RADIUS; k <= FILTER_RADIUS; k++) {
int d = y + k;
if (d >= 0 && d < imageH) {
sum += h_Src[d * imageW + x] * h_Filter[FILTER_RADIUS - k];
}
}
//printf("COL X:%d Y:%d SUM:%f\n\n",x,y,sum);
h_Dst[y * imageW + x] = sum;
}
}
}
//
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
double
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*d_Input,
*d_Buffer,
*d_OutputGPU,
*h_OutputGPU;
int imageW;
int imageH;
//int i=MAX_XY;
//int count=0;
unsigned int i;
double timing;
clock_t start;
clock_t end;
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
printf("Enter image size. Should be a power of two and greater than %d : ", FILTER_LENGTH);
scanf("%d", &imageW);
imageH = imageW;
// while(1){
// if(imageW % i == 0) {
// dim3 threads(i,i);
// dim3 blocks(imageW/i,imageW/i);
// break;
// }
// i--;
// }
dim3 threads(MAX_XY,MAX_XY);
dim3 blocks (imageH/MAX_XY,imageW/MAX_XY); /* this is wrong, fix later */
if(imageH < MAX_XY && imageW < MAX_XY){
threads.x = imageH;
threads.y = imageH;
blocks.x = 1;
blocks.y = 1;
}
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays and device array...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (double *)malloc(FILTER_LENGTH * sizeof(double));
h_Input = (double *)malloc(imageW * imageH * sizeof(double));
h_Buffer = (double *)malloc(imageW * imageH * sizeof(double));
h_OutputCPU = (double *)malloc(imageW * imageH * sizeof(double));
h_OutputGPU = (double *)malloc(imageW * imageH * sizeof(double));
if (h_Filter==NULL || h_Input == NULL || h_Buffer == NULL || h_OutputCPU == NULL){
printf("Something went wrong wille malloc in CPU\n");
}
printf("Memmory allocation for host arrays: COMPLETED \n");
cudaMallocManaged((void**)&d_Input,imageH * imageW * sizeof(double));
cudaMallocManaged((void**)&d_Buffer,imageH * imageW * sizeof(double));
cudaMallocManaged((void**)&d_OutputGPU,imageH * imageW * sizeof(double));
cudaCheckError();
printf("Memmory allocation for device arrays: COMPLETED \n");
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
srand(200);
for (i = 0; i < FILTER_LENGTH; i++) {
h_Filter[i] = (double)(rand() % 16);
}
for (i = 0; i < imageW * imageH; i++) {
h_Input[i] = (double)rand() / ((double)RAND_MAX / 255) + (double)rand() / (double)RAND_MAX;
}
printf("initialization of host arrays: COMPLETED \n");
/* START OF HOST2DEVICE TRANSFER */
start=clock();
cudaMemcpyToSymbol(d_Filter,h_Filter,FILTER_LENGTH * sizeof(double),0,cudaMemcpyHostToDevice);
cudaMemcpy(d_Input, h_Input,imageH * imageW * sizeof(double),cudaMemcpyHostToDevice);
cudaCheckError();
end=clock();
printf("memcpy host2device execution time: %g s\n", (double(end-start))/CLOCKS_PER_SEC);
printf("initialization of device arrays: COMPLETED \n\n");
printf("GPU computation...\n");
/* START OF ROW KERNEL */
start=clock();
convolutionRowGPU<<<blocks,threads>>>(d_Buffer,d_Input,imageW,imageH);
cudaCheckError();
cudaDeviceSynchronize();
end=clock();
printf("row kernel execution time: %g s\n", (double(end-start))/CLOCKS_PER_SEC);
/* END OF ROW KERNEL */
/* START OF COL KERNEL */
start=clock();
convolutionColumnGPU<<<blocks,threads>>>(d_OutputGPU,d_Buffer,imageW,imageH);
cudaCheckError();
cudaDeviceSynchronize();
end=clock();
printf("col kernel execution time: %g s\n", (double(end-start))/CLOCKS_PER_SEC);
/* END OF COL KERNEL */
printf("GPU computation : COMPLETED\n\n");
/* START OF DEVICE2HOST TRANSFER */
start=clock();
cudaMemcpy(h_OutputGPU,d_OutputGPU,imageH * imageW * sizeof(double),cudaMemcpyDeviceToHost);
end=clock();
printf("memcpy device2host execution time: %g s\n", (double(end-start))/CLOCKS_PER_SEC);
/* END OF DEVICE2HOST TRANSFER */
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
start = clock();
convolutionRowCPU(h_Buffer, h_Input, h_Filter, imageW, imageH); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, imageW, imageH); // convolution kata sthles
end = clock();
timing = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("CPU computation : COMPLETED in time:%10.5f\n",timing);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("\nCPU computations == GPU computation?\n");
for (i = 0; i < imageW * imageH; i++) {
if(h_OutputGPU[i] > h_OutputCPU[i] + accuracy || h_OutputGPU[i] < h_OutputCPU[i] - accuracy){
printf("CPU computations == GPU computation : FALSE line:%d difrence:%f \nExitting program...\n GPU: %lf \n",i,h_OutputGPU[i]-h_OutputCPU[i],h_OutputGPU[i]);
free(h_OutputCPU);
free(h_OutputGPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// free all the allocated memory GPU
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaCheckError();
cudaDeviceReset();
return(1);
}
}
printf("CPU computations == GPU computation : TRUE \nExitting program after Memmory Free...\n");
// free all the allocated memory CPU
free(h_OutputCPU);
free(h_OutputGPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
// free all the allocated memory GPU
cudaFree(d_OutputGPU);
cudaFree(d_Buffer);
cudaFree(d_Input);
cudaDeviceReset();
return 0;
}
|
23,533 | #include "includes.h"
__global__ void THCudaTensor_kernel_indexFill( float *tensor, long* stride, float *index, long src_nDim, int dim, long idx_size, long tensor_size, long size_dim, float val )
{
int thread_idx = blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
long flat_size = tensor_size / idx_size;
if (thread_idx < flat_size)
{
long coeff = 0;
for (int i=0; i<idx_size; i++)
{
int leftover = thread_idx;
int srcIdx = 0;
for (int d=0; d<src_nDim; d++)
{
if (d < dim)
{
coeff = leftover / (stride[d] / size_dim);
leftover -= coeff * (stride[d] / size_dim);
srcIdx += coeff * stride[d];
}
else if (d > dim)
{
coeff = leftover / stride[d];
leftover -= coeff * stride[d];
srcIdx += coeff * stride[d];
}
}
tensor[srcIdx + (int)((index[i])-1)*stride[dim]] = val;
}
}
} |
23,534 | #include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
__global__ void add(float* x,float* y,int* n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<*n)
y[id]=sinf(x[id]);
}
int main()
{
float a[100],res[100],*da,*db;
int *dn;
int n;
printf("Enter size");
scanf("%d",&n);
printf("Enter the elements:");
for(int i=0;i<n;i++)
scanf("%f",&a[i]);
cudaMalloc((void**)&da,n*sizeof(float));
cudaMalloc((void**)&db,n*sizeof(float));
cudaMalloc((void**)&dn,sizeof(int));
cudaMemcpy(da,a,n*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dn,&n,sizeof(int),cudaMemcpyHostToDevice);
add<<<n,1>>>(da,db,dn);
cudaMemcpy(res,db,n*sizeof(float),cudaMemcpyDeviceToHost);
printf("\n N,1: ");
for(int i=0;i<n;i++)
printf("sine(%f) = %f\n",a[i],res[i]);
cudaFree(da);
cudaFree(db);
cudaFree(dn);
} |
23,535 | #include <stdio.h>
#define SIZE_TEXT (sizeof(text)-1)
#define SIZE_END (sizeof(end)-1)
__device__ char text[] =
"__ bottles of beer on the wall, __ bottles of beer!\n"
"Take one down, and pass it around, ## bottles of beer on the wall!\n\n";
__device__ char end[] =
"01 bottle of beer on the wall, 01 bottle of beer.\n"
"Take one down and pass it around, no more bottles of beer on the wall.\n"
"\n"
"No more bottles of beer on the wall, no more bottles of beer.\n"
"Go to the store and buy some more, 99 bottles of beer on the wall.";
__global__
void bottle99(char *addr){
int x = threadIdx.x;
addr += x * SIZE_TEXT;
int bottle = 99 - x;
if (bottle == 1){
for(int i = 0; i < SIZE_END; i++){
addr[i] = end[i];
}
addr[SIZE_END] = '\0';
}else{
char c1 = (bottle/10) + '0';
char c2 = (bottle%10) + '0';
char d1 = ((bottle-1)/10) + '0';
char d2 = ((bottle-1)%10) + '0';
for(int i = 0; i < SIZE_TEXT; i++){
int c = text[i];
if(c == '_'){
addr[i] = c1;
addr[i + 1] = c2;
i++;
}else if(c == '#'){
addr[i] = d1;
addr[i + 1] = d2;
i++;
}else{
addr[i] = text[i];
}
}
}
}
int main(void)
{
char *buffer;
char *d_buffer;
int size = SIZE_TEXT * 98 + SIZE_END + 1;
buffer = new char[size];
cudaMalloc((void**)&d_buffer, size);
bottle99<<<1,99>>>(d_buffer);
cudaMemcpy(buffer, d_buffer, size, cudaMemcpyDeviceToHost);
cudaFree(d_buffer);
puts(buffer);
free(buffer);
return 0;
}
|
23,536 | #include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <algorithm>
#include <stdlib.h>
#define N 4
#define BLOCK_SIZE 4
#define GRID_SIZE 1
void cuda_error_check(cudaError_t err , const char *msg )
{
if(err != cudaSuccess)
{
printf("The error is %s, %s \n", cudaGetErrorString(err), msg );
exit(1);
}
}
__global__ void matrix(int *d_a)
{
int row = (blockIdx.y * blockDim.y) + threadIdx.y ;
int col = (blockIdx.x * blockDim.x) + threadIdx.x ;
int L1 = blockDim.x ;
int L2 = blockDim.y ;
__shared__ int s_a[N*N];
s_a[row*L1 + col] = d_a[row*L1 + col] ;
__syncthreads();
d_a[row*L1 + col] = s_a[row*L1 + col] ;
/*if(row == 0)
{
d_a[col] = col * -1 ;
}else if(col == 0)
{
d_a[row*L1] = row * -1 ;
}else
{
d_a[row*L1 + col] = 0 ;
}
__syncthreads();
int z ;
for( z = 2; z <= L1 + L2 - 1; z++ )
{
int mx = ( 1>z-L1 ? 1 : z-L1);
int mn = ( L1<z-1 ? L1 : z-1 );
if(z>L1) mx++ ;
if(row >=mx && row<=mn)
{
d_a[row*L1 + (z-row)] = d_a[(row-1)*L1 + (z-row)] + d_a[row*L1 + (z-row-1)] ;
}
__syncthreads();
}
*/
}
int main(int argc , char **argv)
{
int a[N*N];
int i,j;
int *d_a ;
cuda_error_check(cudaSetDevice(0) , "cudaSetDevice failed!" );
cuda_error_check(cudaMalloc((void **)&d_a , N*N* sizeof(int)),"cudaMalloc Failed!");
cuda_error_check(cudaMemcpy(d_a , a , N*N * sizeof(int) , cudaMemcpyHostToDevice),"cudaMemcpy H-D failed!");
dim3 dimBlock(BLOCK_SIZE , BLOCK_SIZE );
dim3 dimGrid(GRID_SIZE , GRID_SIZE);
matrix<<< dimGrid , dimBlock >>>(d_a);
cuda_error_check(cudaMemcpy(a , d_a , N*N * sizeof(int) , cudaMemcpyDeviceToHost),"cudaMemcpy D-H failed!");
printf("\n\n");
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
//printf("%d\t",a[i*N + j]);
printf("%d\t",a[i*N + j]);
}
printf("\n");
}
return 0;
}
|
23,537 | // sort_by_key.cpp : Defines the entry point for the application.
//
#include <thrust/sort.h>
#include <thrust/functional.h>
template <typename T>
void print_array(const T* array, const int size)
{
for (int i = 0; i < size; ++i) {
std::cout << array[i] << ", ";
}
std::cout << std::endl;
}
int main()
{
// Test 1: Ordinary sorting
std::cout << "Test 1: Ordinary Sorting" << std::endl;
const int N = 6;
int A[N] = { 1, 4, 2, 8, 5, 7 };
thrust::sort(A, A + N);
std::cout << "After sorting" << std::endl;
print_array<int>(A, N);
std::cout << std::endl;
// Test 2: Sort by key
std::cout << "Test 2: Sort by keys" << std::endl;
int keys[N] = { 1, 4, 2, 8, 5, 7 };
char values[N] = { 'a', 'b', 'c', 'd', 'e', 'f' };
thrust::sort_by_key(keys, keys + N, values);
// keys is now { 1, 2, 4, 5, 7, 8}
// values is now {'a', 'c', 'b', 'e', 'f', 'd'}
std::cout << "After sorting" << std::endl;
std::cout << "keys = ";
print_array<int>(keys, N);
std::cout << "values = ";
print_array<char>(values, N);
std::cout << std::endl;
// Test 3: descending sort
std::cout << "Test 3: Descending sort" << std::endl;
int B[N] = { 1, 4, 2, 8, 5, 7 };
thrust::stable_sort(B, B + N, thrust::greater<int>());
std::cout << "After sorting" << std::endl;
print_array<int>(B, N);
std::cout << std::endl;
return 0;
}
|
23,538 | #include <cstdio>
int getThreadNum()
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
printf("gpu num %d\n", count);
cudaGetDeviceProperties(&prop, 0);
printf("max thread num : %d\n", prop.maxThreadsPerBlock);
printf("grid dimensions : %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
return prop.maxThreadsPerBlock;
}
__global__ void conv (float* img, float* kernel, float* result, int width, int
height, int channel, int kernelSize)
{
int ti = threadIdx.x;
int bi = blockIdx.x;
// int id = (bi * threadNum + ti);
int id = (bi * blockDim.x + ti);
if (id >= width * height * channel) return;
int row = id / width;
int col = id % width;
for ( int k = 0;k < channel;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
float imgValue = 0.0;
int curRow = row - kernelSize >> 1 + i;
int curCol = col - kernelSize >> 1 + j;
if ( curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
continue;
} else {
imgValue = img[curRow * width + curCol];
}
result[id] += kernel[k * kernelSize * kernelSize + i * kernelSize + j] * imgValue;
}
}
}
// for ( int i = 0;i < kernelSize; ++i ) {
// for ( int j = 0;j < kernelSize; ++j ) {
// float imgValue = 0.0;
// int curRow = row - kernelSize / 2 + i;
// int curCol = col - kernelSize / 2 + j;
// if (curRow < 0 || curCol < 0 || curRow >= height || curCol >= width) {
// continue;
// } else {
// imgValue = img[curRow * width + curCol];
// }
// result[id] += kernel[i * kernelSize + j] * imgValue;
// }
// }
}
int main(void)
{
int width = 1920;
int height = 1080;
int inChannel = 1;
int outChannel = 8;
float* img = new float[width * height];
for ( int row = 0;row < height;++row ) {
for (int col = 0;col < width;++col ) {
img[col + row * width] = (col + row) % 256;
}
}
int kernelSize = 5;
float* kernel = new float[outChannel * kernelSize * kernelSize];
for ( int i = 0;i < outChannel * kernelSize * kernelSize; ++i) {
kernel[i] = i % kernelSize - 1;
}
float* imgGpu;
float* kernelGpu;
float* resultGpu;
cudaMalloc((void**)&imgGpu, inChannel * width * height * sizeof(float));
cudaMalloc((void**)&kernelGpu, outChannel * kernelSize * kernelSize * sizeof(float));
cudaMalloc((void**)&resultGpu, outChannel * width * height * sizeof(float));
cudaMemcpy(imgGpu, img, inChannel * width * height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(kernelGpu, kernel, outChannel * kernelSize * kernelSize * sizeof(float), cudaMemcpyHostToDevice);
int threadNum = getThreadNum();
int blockNum = (width * height - 0.5) / threadNum + 1;
conv<<<blockNum, threadNum>>>(imgGpu, kernelGpu, resultGpu, width, height,
outChannel, kernelSize);
float* result = new float[outChannel * width * height];
cudaMemcpy(result, resultGpu, outChannel * width * height * sizeof(float), cudaMemcpyDeviceToHost);
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", img[i * width + j]);
}
printf("\n");
}
printf("\n");
for ( int k = 0;k < 2;++k ) {
for ( int i = 0;i < kernelSize;++i ) {
for ( int j = 0;j < kernelSize;++j ) {
printf("%2.0f ", kernel[k * kernelSize * kernelSize + i * kernelSize + j]);
}
printf("\n");
}
printf("\n\n");
}
for ( int k = 0;k < 2;++k ){
for ( int i = 0;i < 10;++i ) {
for ( int j = 0;j < 10;++j ) {
printf("%2.0f ", result[k * height * width + i * width + j]);
}
printf("\n");
}
printf("\n\n");
}
return 0;
}
|
23,539 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h> /* round() */
typedef struct bmpFileHeader
{
/* 2 bytes de identificación */
unsigned int size; /* Tamaño del archivo */
unsigned short resv1; /* Reservado */
unsigned short resv2; /* Reservado */
unsigned int offset; /* Offset hasta hasta los datos de imagen */
} bmpFileHeader;
typedef struct bmpInfoHeader
{
unsigned int headersize; /* Tamaño de la cabecera */
unsigned int width; /* Ancho */
unsigned int height; /* Alto */
unsigned short planes; /* Planos de color (Siempre 1) */
unsigned short bpp; /* bits por pixel */
unsigned int compress; /* compresión */
unsigned int imgsize; /* tamaño de los datos de imagen */
unsigned int bpmx; /* Resolución X en bits por metro */
unsigned int bpmy; /* Resolución Y en bits por metro */
unsigned int colors; /* colors used en la paleta */
unsigned int imxtcolors; /* Colores importantes. 0 si son todos */
} bmpInfoHeader;
unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader);
void DisplayInfo(bmpInfoHeader *info);
void TextDisplay(bmpInfoHeader *info, unsigned char *img);
void SaveBMP(char *filename, bmpInfoHeader *info, unsigned char *imgdata);
__global__
void escalaGrises(unsigned char *img, unsigned char *grayimg, int numRows, int numCols)
{
int Col = threadIdx.x + blockIdx.x*blockDim.x;
int Row = threadIdx.y + blockIdx.y*blockDim.y;
if(Col < numCols && Row < numRows)
{
int grayOffset = Col + Row*numCols;
int rgbOffset = grayOffset*3;
unsigned char r = img[rgbOffset + 2];
unsigned char g = img[rgbOffset + 1];
unsigned char b = img[rgbOffset ];
grayimg[grayOffset] = 0.21f*r + 0.71f*g + 0.07*b;
}
}
int main()
{
bmpInfoHeader info;
unsigned char *img, *im2;
int blockThreadSize, numberOfBlocks;
int numRows, numCols;
img = LoadBMP("magia_sonrisa.bmp", &info);
DisplayInfo(&info);
TextDisplay(&info, img);
numRows = info.height;
numCols = info.width;
blockThreadSize = 32;
numberOfBlocks = 1 + ((numRows - 1) / blockThreadSize);
const int isize = (numRows*numCols)*sizeof(char);
cudaMalloc((void**)&im2, isize);
const dim3 blockSize(blockThreadSize, blockThreadSize, 1);
const dim3 gridSize(numberOfBlocks , ceil(numCols/blockThreadSize), 1);
escalaGrises<<<gridSize, blockSize>>>(img, im2, numRows, numCols);
cudaMemcpy(img, im2, isize, cudaMemcpyDeviceToHost);
cudaFree(im2);
SaveBMP("resultado.bmp", &info, img);
free(img);
return 0;
}
void TextDisplay(bmpInfoHeader *info, unsigned char *img)
{
int x, y;
/* Reducimos la resolución vertical y horizontal para que la imagen entre en pantalla */
static const int reduccionX=6, reduccionY=4;
/* Si la componente supera el umbral, el color se marcará como 1. */
static const int umbral=90;
/* Asignamos caracteres a los colores en pantalla */
static unsigned char colores[9]=" bgfrRGB";
int r,g,b;
/* Dibujamos la imagen */
for (y=info->height; y>0; y-=reduccionY)
{
for (x=0; x<info->width; x+=reduccionX)
{
b=(img[3*(x+y*info->width)]>umbral);
g=(img[3*(x+y*info->width)+1]>umbral);
r=(img[3*(x+y*info->width)+2]>umbral);
printf("%c", colores[b+g*2+r*4]);
}
printf("\n");
}
}
unsigned char *LoadBMP(char *filename, bmpInfoHeader *bInfoHeader)
{
FILE *f;
bmpFileHeader header; /* cabecera */
unsigned char *imgdata; /* datos de imagen */
unsigned short type; /* 2 bytes identificativos */
f=fopen (filename, "r");
if (!f)
return NULL; /* Si no podemos leer, no hay imagen*/
/* Leemos los dos primeros bytes */
fread(&type, sizeof(unsigned short), 1, f);
if (type !=0x4D42) /* Comprobamos el formato */
{
fclose(f);
return NULL;
}
/* Leemos la cabecera de fichero completa */
fread(&header, sizeof(bmpFileHeader), 1, f);
/* Leemos la cabecera de información completa */
fread(bInfoHeader, sizeof(bmpInfoHeader), 1, f);
/* Reservamos memoria para la imagen, ¿cuánta?
Tanto como indique imgsize */
imgdata=(unsigned char*)malloc(bInfoHeader->imgsize);
/* Nos situamos en el sitio donde empiezan los datos de imagen,
nos lo indica el offset de la cabecera de fichero*/
fseek(f, header.offset, SEEK_SET);
/* Leemos los datos de imagen, tantos bytes como imgsize */
fread(imgdata, bInfoHeader->imgsize,1, f);
/* Cerramos */
fclose(f);
/* Devolvemos la imagen */
return imgdata;
}
void DisplayInfo(bmpInfoHeader *info)
{
printf("Tamaño de la cabecera: %u\n", info->headersize);
printf("Anchura: %d\n", info->width);
printf("Altura: %d\n", info->height);
printf("Planos (1): %d\n", info->planes);
printf("Bits por pixel: %d\n", info->bpp);
printf("Compresión: %d\n", info->compress);
printf("Tamaño de datos de imagen: %u\n", info->imgsize);
printf("Resolucón horizontal: %u\n", info->bpmx);
printf("Resolucón vertical: %u\n", info->bpmy);
printf("Colores en paleta: %d\n", info->colors);
printf("Colores importantes: %d\n", info->imxtcolors);
}
void SaveBMP(char *filename, bmpInfoHeader *info, unsigned char *imgdata)
{
bmpFileHeader header;
FILE *f;
unsigned short type;
f=fopen(filename, "w+");
header.size = info->imgsize + sizeof(bmpFileHeader) + sizeof(bmpInfoHeader);
/* header.resv1=0; */
/* header.resv2=1; */
/* El offset será el tamaño de las dos cabeceras + 2 (información de fichero)*/
header.offset = sizeof(bmpFileHeader) + sizeof(bmpInfoHeader) + 2;
/* Escribimos la identificación del archivo */
type = 0x4D42;
fwrite(&type, sizeof(type),1,f);
/* Escribimos la cabecera de fichero */
fwrite(&header, sizeof(bmpFileHeader),1,f);
/* Escribimos la información básica de la imagen */
fwrite(info, sizeof(bmpInfoHeader),1,f);
/* Escribimos la imagen */
fwrite(imgdata, info->imgsize, 1, f);
fclose(f);
} |
23,540 | #include "includes.h"
__global__ void kernel_update_models(float4* d_positions, float4* d_modelBuffer, int numel) {
size_t col = threadIdx.x + blockIdx.x * blockDim.x;
if (col >= numel) { return; }
d_modelBuffer[col*4+3] = make_float4(
d_positions[col].x,
d_positions[col].y,
d_positions[col].z,
1
);
__syncthreads();
} |
23,541 | #include <thrust/device_vector.h>
#include <thrust/random.h>
#include <iostream>
struct fillRng {
thrust::uniform_real_distribution<double> distribution;
thrust::default_random_engine rng;
fillRng(thrust::uniform_real_distribution<double> dist, thrust::default_random_engine engine) {
distribution = dist;
rng = engine;
}
__device__ __host__
double operator() (const int &i) {
return distribution(rng);
}
};
int main()
{
thrust::uniform_real_distribution<double> dist(25, 40);
thrust::default_random_engine rng;
thrust::device_vector<double> dev(10, 0);
thrust::transform(
dev.begin(),
dev.end(),
dev.begin(),
fillRng(dist, rng)
);
for (auto i = dev.begin(); i != dev.end(); i++) {
std::cout << *i << " "; // este acesso é lento! -- GPU
}
std::cout << std::endl;
} |
23,542 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__device__ int getTid()
{
int bid = blockIdx.y * gridDim.x + blockIdx.x;
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int tPB = blockDim.x * blockDim.y ;
int fin = bid*tPB+tid;
return fin;
}
__global__ void mulElement(int *a ,int *b , int *c , int ha ,int wa)
{
int th = getTid();
if(th<(ha*wa))
{
c[th] = a[th]+b[th];
}
}
int main(void)
{
int *a,*b,*t,i,j;
int *d_a,*d_b,*d_t;
int ha , wa;
printf("Enter the dimensions of first matrix \n ");
scanf("%d %d",&ha,&wa);
int size1 = sizeof(int)*ha*wa;
a = (int*)malloc(size1);
b = (int*)malloc(size1);
t = (int*)malloc(size1);
printf("Enter input matrix 1 : \n");
for(i = 0;i<ha*wa;i++)
scanf("%d",&a[i]);
printf("Enter input matrix 2 : \n");
for(i = 0;i<ha*wa;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&d_a,size1);
cudaMalloc((void**)&d_b,size1);
cudaMalloc((void**)&d_t,size1);
cudaMemcpy(d_a,a,size1,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size1,cudaMemcpyHostToDevice);
int gx,gy,bx,by;
printf("Enter the dimension of the grid \n");
scanf("%d %d",&gx,&gy);
bx = ceil((double)ha/gx);
by = ceil((double)wa/gy);
printf("The dimensions of block are : \n %d %d \n",bx,by);
dim3 grid(gx,gy);
dim3 block(bx,by);
mulElement<<<grid,block>>>(d_a,d_b,d_t,ha,wa);
cudaMemcpy(t,d_t,size1,cudaMemcpyDeviceToHost);
printf("Result vector is :\n");
for(i = 0;i<ha;i++)
{
for(j = 0;j<wa;j++)
printf("%d ",t[i*wa+j]);
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
} |
23,543 | /*
Group info:
nphabia Niklesh Phabiani
rtnaik Rohit Naik
anjain2 Akshay Narendra Jain
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#define __DEBUG
#define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__)
#define TSCALE 1.0
#define VSQR 0.1
/**************************************
* void __cudaSafeCall(cudaError err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
/* utility functions for lake calculations */
__device__ double f_gpu(double p, double t)
{
return -expf(-TSCALE * t) * p;
}
// Evolve kernel function to run on GPU
__global__ void evolve_gpu(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t) {
int i, j, idx;
i = blockIdx.x * blockDim.x + threadIdx.x;
j = blockIdx.y * blockDim.y + threadIdx.y;
idx = i * n + j;
if(i == 0 || i == n - 1 || j == 0 || j == n - 1)
un[idx] = 0.;
else
{
// 5-point stencil formula
// idx-1 -> WEST; idx+1 -> EAST; idx-n -> NORTH; idx+n -> SOUTH
/*un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] +
uc[idx + n] + uc[idx - n] - 4 * uc[idx])/(h * h) + f_gpu(pebbles[idx],t));*/
// 9-point stencil formula
// idx-n-1 -> NORTHWEST; idx-n+1 -> NORTHEAST; idx+n-1 -> SOUTHWEST; idx+n+1 -> SOUTHEAST
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *(( uc[idx-1] + uc[idx+1] +
uc[idx-n] + uc[idx+n] + 0.25*(uc[idx-n-1] + uc[idx-n+1] + uc[idx+n-1] + uc[idx+n+1])-
5 * uc[idx])/(h * h) + f_gpu(pebbles[idx],t));
}
}
int tpdt_gpu(double *t, double dt, double tf)
{
if((*t) + dt > tf) return 0;
(*t) = (*t) + dt;
return 1;
}
void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads)
{
cudaEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
// Calculate the number of blocks and threads.
double t = 0.;
double dt = h / 2.;
int narea = n * n;
// n: npoints (grid size)
dim3 no_blocks(n/nthreads, n/nthreads);
dim3 threads_per_block(nthreads, nthreads);
/* Set up device timers */
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
/* HW2: Add CUDA kernel call preperation code here */
// Device copies
double *d_uo;
double *d_uc;
double *d_un;
double *d_pebb;
// Allocating memory for device copies
cudaMalloc((double **)&d_uo, narea * sizeof(double));
cudaMalloc((double **)&d_uc, narea * sizeof(double));
cudaMalloc((double **)&d_un, narea * sizeof(double));
cudaMalloc((double **)&d_pebb, narea * sizeof(double));
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
// Copying host values to device so that the device can then operate using these values
cudaMemcpy(d_uo, u0, sizeof(double) * narea, cudaMemcpyHostToDevice);
cudaMemcpy(d_uc, u1, sizeof(double) * narea, cudaMemcpyHostToDevice);
cudaMemcpy(d_pebb, pebbles, sizeof(double) * narea, cudaMemcpyHostToDevice);
/* HW2: Add main lake simulation loop here */
while(1)
{
// GPU kernel function call to start computations on GPU
evolve_gpu<<<no_blocks, threads_per_block>>>(d_un, d_uc, d_uo, d_pebb, n, h, dt, t);
// Copying evolved values so that they can be used for the next iteration of evolve
cudaMemcpy(d_uo, d_uc, sizeof(double) * narea, cudaMemcpyDeviceToDevice);
cudaMemcpy(d_uc, d_un, sizeof(double) * narea, cudaMemcpyDeviceToDevice);
if(!tpdt_gpu(&t,dt,end_time)) break;
}
/* Copy back results to host */
// from d_u (2d) to u (1d)
// Copying latest evolve value from device to host after end time is reached
cudaMemcpy(u, d_un, sizeof(double) * narea, cudaMemcpyDeviceToHost);
/* Stop GPU computation timer */
CUDA_CALL(cudaEventRecord(kstop, 0));
CUDA_CALL(cudaEventSynchronize(kstop));
CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* HW2: Add post CUDA kernel call processing and cleanup here */
cudaFree(d_uo);
cudaFree(d_uc);
cudaFree(d_un);
cudaFree(d_pebb);
/* timer cleanup */
CUDA_CALL(cudaEventDestroy(kstart));
CUDA_CALL(cudaEventDestroy(kstop));
}
|
23,544 | /*
helloWorld example for CUDA
compile with:
> nvcc -arch=sm_20 hello_cuda.cu
run with:
> ./a.out
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N 10
// cuda kernel (runs on GPU)
__global__ void sum_kernel(float* A,float* B, float* C, float* sum, int nmax)
{
// thread id
int id = blockIdx.x*blockDim.x + threadIdx.x;
// sums values
if( id < nmax ){
C[id] = A[id] + B[id];
// no atomic summation -> wrong results...
//*sum += C[id];
// atomic operation to avoid race-conditions
atomicAdd(sum,C[id]);
}
}
// main program (runs on CPU)
int main(void)
{
float *A, *B, *C;
float *A_d, *B_d, *C_d;
float sum;
float *sum_d;
printf("hello CUDA: \n");
// array on CPU (host)
A = (float *) malloc(N*sizeof(float));
B = (float *) malloc(N*sizeof(float));
C = (float *) malloc(N*sizeof(float));
// initializes
for(int i=0; i<N; i++){
A[i] = 1.0;
B[i] = 2.0;
C[i] = 0.0;
}
sum = 0.0f;
// GPU preparation:
// array on GPU (device)
cudaMalloc((void **) &A_d, N*sizeof(float));
cudaMalloc((void **) &B_d, N*sizeof(float));
cudaMalloc((void **) &C_d, N*sizeof(float));
cudaMalloc((void **) &sum_d, sizeof(float));
// initializes on GPU with zero
cudaMemset(sum_d,0,sizeof(float));
// copies arrays from CPU to GPU
cudaMemcpy(A_d,A,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(B_d,B,N*sizeof(float),cudaMemcpyHostToDevice);
// cuda kernel dimensions ( 3 blocks x 4 threads )
int blocksize = 4;
int nblock = N/blocksize+(N%blocksize==0?0:1);
// launches cuda kernel
sum_kernel<<<nblock,blocksize>>>(A_d,B_d,C_d,sum_d,N);
// copies back from GPU to CPU
cudaMemcpy(C,C_d,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(&sum,sum_d,sizeof(float),cudaMemcpyDeviceToHost);
// user output
printf("result: \n");
for(int i=0;i<N;i++){
printf(" %f \n",C[i]);
}
printf("\n");
printf(" sum = %f\n\n",sum);
// releases memory on CPU
free(A);
free(B);
free(C);
// releases memory on GPU
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
cudaFree(sum_d);
}
|
23,545 |
/************************************************************************************
PEMG-2010
June 21-24, 2010
Source Code : sharedMemoryRestructuringDataTypes.cu
Objective : This code demonstrates achievable shared memory bandwidth for different
inbuilt data types
Description : Example code to demonstrate the different shared memory bandwidths
achieved when
1) accessing a 3d array of floats
2) accessing a float3 array
3) accessing a 4d array of floats
4) accessing a float4 array
Input : None
output : The different bandwidths of the shared memory that are achieved in the above
mentioned accesses
Modified : May 2010
National PARAM Supercomputing Facility, C-DAC,Pune.
Author : RarchK
****************************************************************************************/
#include <stdio.h>
#include <cuda.h>
#include <float.h>
#define NO_OF_PATTERNS 4
#define NTIMES 10
#define BLOCK_SIZE 128
#define TRANSFERED_DATA_SIZE 2000000 //2 MB
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
void printResults(void);
////////////////////////////////////////////////////////////////////////////////////////////////
//
// intialising an unused 3D float array in shared memory
// access pattern involving accessing the i-th element from all the rows by the i-th thread
//
/////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void set3DFloatArray( float value )
{
__shared__ __attribute__((unused)) float array[3][BLOCK_SIZE];
int idx = threadIdx.x;
array[0][idx] = value;
array[1][idx] = value;
array[2][idx] = value;
}
/////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// intialising an unused float3 array in shared memory
// access pattern involving accessing all the fields of the i-th element of the array by the i-th thread
//
////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void setFloat3Array( float value )
{
__shared__ __attribute__((unused)) float3 array[BLOCK_SIZE];
int idx = threadIdx.x;
array[idx].x = value;
array[idx].y = value;
array[idx].z = value;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////
//
// intialising an unused 4D float array in shared memory
// access pattern involving accessing the i-th element from all the rows by the i-th thread
//
///////////////////////////////////////////////////////////////////////////////////////////////
__global__ void set4DFloatArray( float value )
{
__shared__ __attribute__((unused)) float array[4][BLOCK_SIZE];
int idx = threadIdx.x;
array[0][idx] = value;
array[1][idx] = value;
array[2][idx] = value;
array[3][idx] = value;
}
///////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// intialising an unused float4 array in shared memory
// access pattern involving accessing all the fields of the i-th element of the array by the i-th thread
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void setFloat4Array( float value )
{
__shared__ __attribute__((unused)) float4 array[BLOCK_SIZE];
int idx = threadIdx.x;
array[idx].x = value;
array[idx].y = value;
array[idx].z = value;
array[idx].w = value;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
static double avgtime[NO_OF_PATTERNS] = {0}, maxtime[NO_OF_PATTERNS] = {0}, mintime[NO_OF_PATTERNS];
static float bandWidths[NO_OF_PATTERNS] = {0};
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// the main routene
// for timing the different initialising kernels
// finding the band widths
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
float elapsedTimes[NO_OF_PATTERNS][NTIMES];
cudaEvent_t start,stop;
double bytes = sizeof(float) * TRANSFERED_DATA_SIZE;
// event creation, which will be used for timing the code
cudaEventCreate(&start);
cudaEventCreate(&stop);
//finding the 1D grid size
int gridSize = TRANSFERED_DATA_SIZE/BLOCK_SIZE;
if( TRANSFERED_DATA_SIZE % BLOCK_SIZE != 0 )
gridSize += 1;
// running each pattern NTIMES
for(int k=0; k < NTIMES; k++)
{
// timing the kernels corresponding to different access patterns
// PATTERN 1
cudaEventRecord(start,0);
set3DFloatArray <<< gridSize,BLOCK_SIZE >>> (2.0f);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[0][k],start,stop);
// PATTERN 2
cudaEventRecord(start,0);
setFloat3Array <<< gridSize,BLOCK_SIZE >>> (2.0f);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[1][k],start,stop);
//PATTERN 3
cudaEventRecord(start,0);
set4DFloatArray <<< gridSize,BLOCK_SIZE >>> (2.0f);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[2][k],start,stop);
//PATTERN 3
cudaEventRecord(start,0);
setFloat4Array <<< gridSize,BLOCK_SIZE >>> (2.0f);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[3][k],start,stop);
} // end of the for loop involving NTIMES
// intializing the mintime array
for(int i=0; i < NO_OF_PATTERNS;i++)
mintime[i] = FLT_MAX;
for (int k=1; k < NTIMES; k++) // skiping the first iteration
{
for (int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i] + elapsedTimes[i][k];
mintime[i] = MIN(mintime[i],elapsedTimes[i][k]);
maxtime[i] = MAX(maxtime[i], elapsedTimes[i][k]);
}
}
// calculation of the different band widths that are achieved by different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time
bandWidths[i] = bytes/mintime[i];
}
printResults();
printf("\n\n**** successful termination of the program ****\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// prints the results containig the minimum, maximum, average times taken by different kernels
// the associated maximum bandwidth of the sharede memory achieved by the different kernals
//
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
void printResults()
{
printf("Demonstrating differences among the different data types and their accesses in the shared memory\n");
printf("The transfered data size (Bytes): %ld\n",TRANSFERED_DATA_SIZE*sizeof(float));
printf("\n-------------------------------------------------------------------------------------------------------------------------------\n");
printf("Pattern \t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n");
printf("-------------------------------------------------------------------------------------------------------------------------------\n");
// printing the results for different access patterns
for(int i=0; i < NO_OF_PATTERNS; i++)
{
switch(i)
{
case 0: printf("accessing 3-d float array ");
break;
case 1: printf("accessing 1-d float3 array");
break;
case 2: printf("accessing 4-d float array ");
break;
case 3: printf("accessing 1-d float4 array");
break;
}
printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]);
}
printf("\n ------------------------------------------------------------------------------------------------------------------------------\n");
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
23,546 |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int TM)
{
float cc;
int k;
// calcul des coordonnees du thread
int i = blockIdx.x;
int j = threadIdx.x;
cc = 0.;
// calcul de c[i][j]
for(k=0;k<TM;k++){
cc += A[i*TM+k] * B[k*TM+j];
}
C[i*TM+j] = cc;
}
/////////////////////////////////////////////////////////
// Programme main
/////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int i, j, GRID_SIZE_X, BLOCK_SIZE_X;
int TM=1024;
int size = TM*TM*sizeof(float);
cudaError_t cerror;
if (argc>1) {
TM=atoi(argv[1]);
}
BLOCK_SIZE_X = TM;
GRID_SIZE_X = TM;
// definiton de la grille et des blocs
dim3 block(BLOCK_SIZE_X);
dim3 grid(GRID_SIZE_X);
// allocation des matrices sur CPU
float *h_A, *h_B, *h_C ;
h_A = (float*) malloc(size);
h_B = (float*) malloc(size);
h_C = (float*) malloc(size);
// initialisation des matrices avec des valeurs permettant de verifier le resultat
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
// Pour mesurer le temps de calcul
float tc, tt_cpuGpu, tt_gpuCpu;
cudaEvent_t depart, arret,cpuGpuDebut, cpuGpuFin, gpuCpuDebut, gpuCpuFin;
cudaEventCreate(&depart);
cudaEventCreate(&arret);
cudaEventCreate(&cpuGpuDebut);
cudaEventCreate(&cpuGpuFin);
cudaEventCreate(&gpuCpuDebut);
cudaEventCreate(&gpuCpuFin);
cudaEventRecord(depart,0);
cudaEventRecord(cpuGpuDebut,0);
cudaEventRecord(gpuCpuDebut,0);
// allocation des matrices sur GPU
float *d_A, *d_B, *d_C;
cudaMalloc((void**) &d_A,size);
cudaMalloc((void**) &d_B,size);
cudaMalloc((void**) &d_C,size);
// copie des matrives A et B depuis le CPU vers le GPU
cudaMemcpy(d_A, h_A, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size,cudaMemcpyHostToDevice);
// Mesure du temps : transfert + calcul
cudaEventRecord(cpuGpuFin,0);
cudaEventSynchronize(cpuGpuFin);
cudaEventElapsedTime(&tt_cpuGpu,cpuGpuDebut, cpuGpuFin);
cudaEventDestroy(cpuGpuDebut);
cudaEventDestroy(cpuGpuFin);
printf("Transfert A,B de CPU vers GPU : %f seconde\n", tt_cpuGpu/1000.0);
// lancement des threads
matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM);
// Vérification en cas d'erreur
cerror=cudaGetLastError();
if ((int)cerror !=0) {
printf("Erreur appel kernel %d \n", (int) cerror);
exit(cerror);
}
// copie de la matrive C depuis le GPU
cudaMemcpy(h_C,d_C, size,cudaMemcpyDeviceToHost);
// Mesure du temps : transfert + calcul
cudaEventRecord(gpuCpuFin,0);
cudaEventSynchronize(gpuCpuFin);
cudaEventElapsedTime(&tt_gpuCpu,gpuCpuDebut, gpuCpuFin);
cudaEventDestroy(gpuCpuDebut);
cudaEventDestroy(gpuCpuFin);
printf("Transfert C de GPU vers CPU : %f seconde\n", tt_gpuCpu/1000.0);
// Mesure du temps : transfert + calcul
cudaEventRecord(arret,0);
cudaEventSynchronize(arret);
cudaEventElapsedTime(&tc,depart, arret);
cudaEventDestroy(depart);
cudaEventDestroy(arret);
printf("Temps consommé : %f seconde\n", tc/1000.0);
// verification du resultat
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1);
}
else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))) {
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
// liberation de la memoire
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
23,547 | #include "cuda_runtime.h"
#include <stdio.h>
__global__ void PF_iteration_kernel(int t)
{
} |
23,548 | //2 layered neural network with LIF neurons
//computing Vm in parallel, Computing Isyn
//all-all connectivity between 2 layers
//starting point of reading mnist set by 'start'
//test: Test the trained mnist network on the images of handwritten
#include<stdio.h>
#include<math.h>
#include<time.h>
#include<stdlib.h>
#include "device_launch_parameters.h"
#include "cuda_runtime_api.h"
#define C 300E-12
#define gL 30E-9
#define VT 20E-3
#define EL -70E-3
#define Rp 3E-3
#define dt 1E-4
#define decay 0.9802
#define decays 0.9231
#define decay1 0.9048
#define WT 5E-9
#define w_lat -1.0E-9 //inhibitory lateral
//num of neurons in layer 1 and layer 2
#define p 28
#define q 28
#define N_imgs 10000
#define N2 10 //no. of neurons in 2nd layer
//Convolution parameters
#define Nw 3
#define L (p-Nw+1) //square matrix
#define N1 (12*L*L) //no. of neurons in 1st layer
#define Nthrds 1024 //use max no. of threads available per SM
#define Nsyns N1*N2 //no. of connections
#define T 0.1
#define M 1000
//Training parameters:
#define max_epochs 1 //1 complete presentation of all images
#define decay_LP 0.9802
//Variables for image reading
unsigned char *pix_spks_d;
int *d_imgs_lin, img_lin[N_imgs*(p*q+1)];
int test_set[N_imgs][p*q+1];
__device__ int d_imgs[N_imgs][p*q+1];
__device__ double img_spks[p*q][M];
__device__ double syn1[256], syn1s[256], syn[256][M];
__device__ unsigned char in_spk[N1];
__device__ double Isyn[N1][N2], weight[N1][N2];
__device__ double Isyn_tot[N2], Isyn_lat[N2];
__device__ double I_lat[N2];
__device__ unsigned char Y_op[N2][M];
__device__ double ci[N1], cil[N2];
__device__ double cis[N1],cils[N2];
//Neuron variables:
__device__ int ref_time1[N1],ref_time2[N2];
__device__ double Vm1[N1],Vm2[N2];
//Correlation variables:
__device__ double norm_obs[N2], norm_des;
__device__ double L_obs[N2][M], sum_obs[N2];
__device__ double L_des[M], sum_des;
////////////CUDA Kernels///////////
__global__ void img_readKernel(int *img)
{
for(int i=0; i<N_imgs; i++){
for(int j=0; j<(p*q+1); j++){
d_imgs[i][j]=img[i*(p*q+1)+j];
}
}
}
__device__ unsigned char pix_spks[256][M];
unsigned char pix_spks_h[256*M];
__global__ void convert2Spks(unsigned char *pix_spks_d)
{
//int time_period=1;
for(int i=0; i<256; i++) {
syn1[i]=0.0; syn1s[i]=0.0;
for(int l=0; l<M; l++) {
//pix_spks[i][l]=0;
pix_spks[i][l]=pix_spks_d[i*M+l];
}
}
/*for(int i=0; i<256; i++) { //all possible pixel values
if(i>7) {
time_period = round((1.0/(double)i)/0.1E-3);
for(int l=time_period; l<M; l+=time_period) {
pix_spks[i][l]=1;
}
}
}*/
//Compute the synaptic kernels:
for(int i=0; i<256; i++) {
for(int l=0; l<M; l++) {
syn1[i]*=decay; syn1s[i]*=decays;
if(pix_spks[i][l]==1) {
syn1[i]+=1.0;
syn1s[i]+=1.0;
}
syn[i][l]=syn1[i]-syn1s[i];
}
}
}
//spikes with poisson distributed inter-arrival times
//NOTE: rand fn not allowed inside cuda kernel
//so making this a cpu function
unsigned char pixspks[256*M];
__global__ void pix2spks(int k, int i)
{
int tx=threadIdx.x+blockIdx.x*blockDim.x;
if(tx<(p*q)) {
img_spks[tx][i]=syn[d_imgs[k][tx+1]][i];
__syncthreads();
}
}
/*__global__ void pix2spks(int k)
{
//Load the spikes trains for the corresponding pixels:
for(int i=1; i<(p*q+1); i++) {
for(int l=0; l<M; l++) {
img_spks[i-1][l]=syn[d_imgs[k][i]][l];
//if(img_spks[i-1][l]!=0) printf("img_Spks[%d][%d]=%e\n",i-1,l,img_spks[i-1][l]);
}
}
}*/
__device__ unsigned char D_op[M];
__global__ void createDes() {
for(int i=0; i<M;i++) {
D_op[i]=0;
}
for(int i=100; i<M; i+=35) {
D_op[i]=1;
}
L_des[0]=0.0;
for(int i=1; i<M; i++) {
L_des[i]=L_des[i-1]*decay_LP;
if(D_op[i]==1)
L_des[i]+=1;
sum_des+=L_des[i]*L_des[i];
}
norm_des=sqrt(sum_des);
}
__device__ double I_in[N1];
__global__ void clear_vars()
{
int tx=threadIdx.x+blockIdx.x*blockDim.x;
int ty=threadIdx.y+blockIdx.y*blockDim.y;
if(tx<N1 && ty<N2) {
Vm2[ty]=EL; Isyn_tot[ty]=0.0;
ref_time2[ty]=0.0;
Vm1[tx]=EL; ref_time1[tx]=0.0;
I_in[tx]=0.0;
ci[tx]=0.0; cis[tx]=0.0;
cil[ty]=0.0; cils[ty]=0.0;
Isyn[tx][ty]=0.0; Isyn_lat[ty]=0.0;
sum_obs[ty]=0.0; L_obs[ty][0]=0.0;
}
}
//__global__ void LifKernel1(int i, double *V1, double *I1, unsigned char *spk)
__global__ void LifKernel1(int i)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
double k1, k2;
if(j<N1) {
if(ref_time1[j]<i) ref_time1[j]=0;
k1=(-gL*(Vm1[j]-EL)+I_in[j])/C;
k2=(-gL*((Vm1[j]+dt*k1)-EL)+I_in[j])/C;
Vm1[j]+=(dt*(k1+k2)/2)*(ref_time1[j]==0);
if(Vm1[j]<EL)
Vm1[j]=EL;
if(Vm1[j]>=VT) {
Vm1[j]=EL;
ref_time1[j]=i+round(Rp/dt);
in_spk[j]=1;
} else {
in_spk[j]=0;
}
__syncthreads();
}
}
//__global__ void LifKernel2(int i, double *V2, double *I2, unsigned char *spk)
__global__ void LifKernel2(int i)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
double k1, k2;
if(j<N2) {
if(ref_time2[j]<i) ref_time2[j]=0;
k1=(-gL*(Vm2[j]-EL)+Isyn_tot[j]+Isyn_lat[j])/C;
k2=(-gL*((Vm2[j]+dt*k1)-EL)+Isyn_tot[j]+Isyn_lat[j])/C;
Vm2[j]+=(dt*(k1+k2)/2)*(ref_time2[j]==0);
if(Vm2[j]<EL)
Vm2[j]=EL;
if(Vm2[j]>=VT) {
Vm2[j]=EL;
ref_time2[j]=i+round(Rp/dt);
Y_op[j][i]=1;
} else {
Y_op[j][i]=0;
}
//compute LPF of the output spike trains:
if(i>0) {
L_obs[j][i]=L_obs[j][i-1]*decay_LP;
if(Y_op[j][i]==1) {
L_obs[j][i]+=1;
}
sum_obs[j]+=L_obs[j][i]*L_obs[j][i];
}
if(i==M-1)
norm_obs[j]=sqrt(sum_obs[j]);
__syncthreads();
}
}
//kernels for the total synapses in the network
__global__ void SynKernel(int i)
{
int ix=blockIdx.x*blockDim.x + threadIdx.x;
int iy=blockIdx.y*blockDim.y + threadIdx.y;
if(ix<N1 && iy<N2) {
if(iy==0) {
ci[ix]=ci[ix]*decay;
cis[ix]=cis[ix]*decays;
if(in_spk[ix]==1) {
ci[ix]+=1.0;
cis[ix]+=1.0;
}
}
__syncthreads();
Isyn[ix][iy]=(ci[ix]-cis[ix])*weight[ix][iy];
}
}
__global__ void Lat_curr(int i)
{
int ix=blockIdx.x*blockDim.x+threadIdx.x;
if(ix<N2) {
cil[ix]=cil[ix]*decay;
cils[ix]=cils[ix]*decays;
if(Y_op[ix][i-1]==1) {
cil[ix]+=1.0;
cils[ix]+=1.0;
}
I_lat[ix]=w_lat*(cil[ix]-cils[ix]);
Isyn_lat[ix]=0;
for(int k=0; k<N2; k++) {
if(k!=ix) {
Isyn_lat[ix]+=I_lat[k];
}
}
}
}
__device__ double total_curr[8][N2];
__global__ void IsynRedKernel(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int tid=threadIdx.x;
for(unsigned int s=blockDim.x/2;s>0;s>>=1) {
if(iy<N2 && tid<s && (ix+s)<N1) {
Isyn[ix][iy]+=Isyn[ix+s][iy];
}
__syncthreads();
}
if(tid==0 && iy<N2) {
total_curr[blockIdx.x][iy]=Isyn[ix][iy];
}
}
__global__ void reduce1(int i) {
int tx=threadIdx.x+blockIdx.x*blockDim.x;
if(tx<N2) {
double total=0.0;
for(int k=0; k<8; k++) {
total+=total_curr[k][tx];
}
Isyn_tot[tx]=total;
}
}
__device__ int match, match_corr;
__global__ void Trained_chk(int k, double *success, double *Corr_acc)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
__shared__ int Tobs[N2];
__shared__ double corr[N2];
__shared__ int max, ind;
__shared__ int max2, ind2;
if(ix<N2) {
Tobs[ix]=0; corr[ix]=0.0;
double dot_lp=0;
for(int i=0; i<M; i++) {
if(Y_op[ix][i]==1)
Tobs[ix]++;
//correltion betwn. desired and observed:
dot_lp+=L_obs[ix][i]*L_des[i];
}
if(norm_obs[ix]!=0) {
corr[ix]=dot_lp/(norm_des*norm_obs[ix]);
}
__syncthreads();
}
//find max in tobs:
if(ix==0) {
max=0; ind=0;
for(int j=0; j<N2; j++) {
if(max<Tobs[j]) {
max=Tobs[j];
ind=j;
}
}
max2=0; Tobs[ind]=0;
for(int j=0; j<N2; j++) {
if(max2<Tobs[j]) {
ind2=j;
max2=Tobs[j];
}
}
__shared__ double onoff;
onoff=(double)max/(double)max2;
*success=0.0;
printf("img=%d max=%d, ind=%d, max2=%d,ind2=%d, onoff=%f\n",d_imgs[k][0],max,ind,max2, ind2, onoff);
if(ind==d_imgs[k][0] && max!=0 && onoff>1.0)
match++;
printf("match=%d\n",match);
//find max in corr:
double max_corr=0;
int ind_max=0;
for(int j=0; j<N2; j++) {
if(max_corr<corr[j]) {
max_corr=corr[j];
ind_max=j;
}
}
if(ind_max==d_imgs[k][0] && max_corr!=0.0) {
match_corr++;
}
else
printf("k=%d, ouput=%d did not match with label%d with max_corr=%f\n",k,ind_max,d_imgs[k][0],max_corr);
printf("max_corr=%e, ind_max=%d, label=%d, match_corr=%d\n",max_corr,ind_max,d_imgs[k][0],match_corr);
if(k==(N_imgs-1)) {
*success=(double)match/(double)N_imgs;
*Corr_acc=(double)match_corr/(double)N_imgs;
}
}
}
__global__ void reset()
{
match=0; match_corr=0;
}
__device__ double w_conv1[Nw][Nw], w_conv2[Nw][Nw];
__device__ double w_conv3[Nw][Nw], w_conv4[Nw][Nw];
__device__ double w_conv5[Nw][Nw], w_conv6[Nw][Nw];
__device__ double w_conv7[Nw][Nw], w_conv8[Nw][Nw];
__device__ double w_conv9[Nw][Nw], w_conv10[Nw][Nw];
__device__ double w_conv11[Nw][Nw], w_conv12[Nw][Nw];
__global__ void initialize2D(double *d_wts, double *c_wts)
{
for(int i=0; i<N1; i++) {
for(int j=0; j<N2; j++) {
weight[i][j]=d_wts[i*N2+j];
}
}
for(int i=0; i<(12*Nw); i++) {
for(int j=0; j<Nw; j++) {
if(i<Nw) {
w_conv1[i][j]=c_wts[i*Nw+j];
} else if(i>=Nw && i<(2*Nw)) {
w_conv2[i-Nw][j]=c_wts[i*Nw+j];
} else if(i>=(2*Nw) && i<(3*Nw)) {
w_conv3[i-(2*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(3*Nw) && i<(4*Nw)){
w_conv4[i-(3*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(4*Nw) && i<(5*Nw)){
w_conv5[i-(4*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(5*Nw) && i<(6*Nw)){
w_conv6[i-(5*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(6*Nw) && i<(7*Nw)){
w_conv7[i-(6*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(7*Nw) && i<(8*Nw)){
w_conv8[i-(7*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(8*Nw) && i<(9*Nw)){
w_conv9[i-(8*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(9*Nw) && i<(10*Nw)){
w_conv10[i-(9*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(10*Nw) && i<(11*Nw)){
w_conv11[i-(10*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(11*Nw) && i<(12*Nw)){
w_conv12[i-(11*Nw)][j]=c_wts[i*Nw+j];
}
}
}
}
__global__ void convKernel1(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
double temp;
if(ix<L && iy<L) {
temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv1[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[ix*L+iy]=temp;
}
}
__global__ void convKernel2(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
double temp;
if(ix<L && iy<L) {
temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv2[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel3(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
double temp;
if(ix<L && iy<L) {
temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv3[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[2*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel4(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
double temp;
if(ix<L && iy<L) {
temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv4[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[3*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel5(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
double temp;
if(ix<L && iy<L) {
temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv5[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[4*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel6(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
double temp;
if(ix<L && iy<L) {
temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv6[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[5*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel7(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
double temp;
if(ix<L && iy<L) {
temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv7[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[6*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel8(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
double temp;
if(ix<L && iy<L) {
temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv8[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[7*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel9(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv9[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[8*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel10(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv10[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[9*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel11(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv11[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[10*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel12(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv12[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[11*L*L+ix*L+iy]=temp;
}
}
long timediff(clock_t t1, clock_t t2) {
long elapsed;
elapsed = ((double)t2 - t1) / CLOCKS_PER_SEC * 1000;
return elapsed;
}
double h_wts[N1*N2], *d_wts;
double *dcwts;
double c_wts[12*Nw*Nw];
//////////storage varialbes///////////
//double V1[N1], V2[N2], I1[N1], I2[N2];
//unsigned char spk1[N1], spk2[N2];
//////////////////////////////////////
int main(int argc, char *argv[])
{
int i;
int start=atoi(argv[1]);
FILE *FWI; //to load initial wts and store final wts
FILE *conv_wt;
clock_t t1, t2;
long elapsed; //time_1pass=0;
FILE *F_test, *F_lif_spks;
//for concurrent execution of different kernels:
cudaStream_t stream2, stream3, stream4, stream5, stream6;
cudaStream_t stream7, stream8, stream9, stream10, stream11, stream12, stream13;
cudaStream_t stream14, stream15;
printf("starting program\n");
cudaSetDevice(6); //set the number of GPU you want to use for this simuluation
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
cudaStreamCreate(&stream5);
cudaStreamCreate(&stream6);
cudaStreamCreate(&stream7);
cudaStreamCreate(&stream8);
cudaStreamCreate(&stream9);
cudaStreamCreate(&stream10);
cudaStreamCreate(&stream11);
cudaStreamCreate(&stream12);
cudaStreamCreate(&stream13);
cudaStreamCreate(&stream14);
cudaStreamCreate(&stream15);
printf("testing the network trained with 60000 MNIST train images\n");
F_test = fopen("mnist_val.csv","r"); //load the csv files for MNIST dataset
//F_test = fopen("mnist_test.csv","r");
if(F_test == NULL)
{
perror("Error while opening file mnist_train.csv\n");
exit(EXIT_FAILURE);
}
FWI = fopen("wts_trained_Final.txt","r"); //load the trained weights for the particular epoch
if(FWI == NULL)
{
perror("Error while opening file wts_trained.csv\n");
exit(EXIT_FAILURE);
}
conv_wt=fopen("kernels_3x3.csv","r");
if(conv_wt==NULL) {
perror("Error while opening file kernel.csv\n");
exit(EXIT_FAILURE);
}
F_lif_spks = fopen("pixels_spks.csv","r");
if(F_lif_spks == NULL)
{
perror("Error while opening file pixels_spks.csv\n");
exit(EXIT_FAILURE);
}
printf("Total no. of neurons=%d, no. of synapses to be trained=%d\n",(N1+N2), N1*N2);
cudaMalloc((void**)&d_imgs_lin,sizeof(int)*(N_imgs*(p*q+1)));
cudaMalloc((void**)&d_wts,N1*N2*sizeof(double));
cudaMalloc((void**)&dcwts,12*Nw*Nw*sizeof(double));
cudaMalloc((void**)&pix_spks_d,256*M*sizeof(unsigned char));
//Read the initial weights:
for(int i=0; i<N1; i++) {
for(int j=0; j<N2; j++) {
fscanf(FWI,"%lf,",&h_wts[i*N2+j]);
}
}
fclose(FWI);
for(int i=0; i<(12*Nw); i++) {
for(int j=0; j<Nw; j++) {
fscanf(conv_wt,"%lf,",&c_wts[i*Nw+j]);
}
}
fclose(conv_wt);
for(int i=0; i<(256); i++) {
for(int j=0; j<M; j++) {
fscanf(F_lif_spks,"%d,",&pix_spks_h[i*M+j]);
}
}
fclose(F_lif_spks);
cudaMemcpy(pix_spks_d,pix_spks_h,256*M*sizeof(unsigned char),cudaMemcpyHostToDevice);
cudaMemcpy(d_wts,h_wts,N1*N2*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dcwts,c_wts,12*Nw*Nw*sizeof(double),cudaMemcpyHostToDevice);
initialize2D<<<1,1>>>(d_wts,dcwts);
cudaDeviceSynchronize();
cudaFree(d_wts);
cudaFree(dcwts);
//Read the images from file:
for(int n=0;n<N_imgs;n++) {
for(int j=0;j<(p*q+1);j++) {
fscanf(F_test,"%d,",&test_set[n][j]);
}
}
fclose(F_test);
//convert 2D matrix to 1D for transfer to device:
for(int n=0; n<(N_imgs);n++) {
for(int j=0;j<(p*q+1);j++) {
img_lin[n*(p*q+1)+j]=test_set[n][j];
}
}
cudaMemcpy(d_imgs_lin,img_lin,sizeof(int)*(N_imgs*(p*q+1)),cudaMemcpyHostToDevice);
//call cuda kernel to read in the images:
img_readKernel<<<1,1>>>(d_imgs_lin);
cudaDeviceSynchronize();
cudaFree(d_imgs_lin);
createDes<<<1,1>>>();
int NBlks=(N1/Nthrds)+1;
dim3 dimGrid(82,1,1);
dim3 dimBlock(100,10,1);
dim3 grid_syn(NBlks,10,1);
dim3 block_syn(Nthrds,1,1);
//convert the pixel values from 0 to 255 into spike trains
convert2Spks<<<1,1>>>(pix_spks_d);
cudaDeviceSynchronize();
cudaFree(pix_spks_d);
dim3 convGrid(1,1,1);
dim3 convBlks(26,26,1);
double *d_success, *success, *corr_acc, *h_corr_acc;
d_success = (double*)malloc(sizeof(double));
h_corr_acc = (double*)malloc(sizeof(double));
cudaMalloc((void**)&success,sizeof(double));
cudaMalloc((void**)&corr_acc,sizeof(double));
//CPU time required for computation
t1 = clock();
int l=0; //image index
for(int n=0; n<max_epochs; n++) {
reset<<<1,1>>>();
for(l=0; l<N_imgs; l++) {
printf("l=%d image %d\n",l,test_set[l][0]);
//Clear all neuron/synapse variables
clear_vars<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize();
for(i=0; i<M; i++) {
pix2spks<<<1,784>>>(l,i);
/////////////////////////////////////////////////
cudaDeviceSynchronize();
convKernel1<<<convGrid,convBlks,0,stream2>>>(i);
convKernel2<<<convGrid,convBlks,0,stream3>>>(i);
convKernel3<<<convGrid,convBlks,0,stream4>>>(i);
convKernel4<<<convGrid,convBlks,0,stream5>>>(i);
convKernel5<<<convGrid,convBlks,0,stream6>>>(i);
convKernel6<<<convGrid,convBlks,0,stream7>>>(i);
convKernel7<<<convGrid,convBlks,0,stream8>>>(i);
convKernel8<<<convGrid,convBlks,0,stream9>>>(i);
convKernel9<<<convGrid,convBlks,0,stream10>>>(i);
convKernel10<<<convGrid,convBlks,0,stream11>>>(i);
convKernel11<<<convGrid,convBlks,0,stream12>>>(i);
convKernel12<<<convGrid,convBlks,0,stream13>>>(i);
////////////////////////////////////////////////////////
cudaDeviceSynchronize();
LifKernel1<<<NBlks,Nthrds>>>(i);
/////////////////////////////////////////////////////////
cudaDeviceSynchronize();
SynKernel<<<dimGrid,dimBlock,0,stream14>>>(i);
Lat_curr<<<1,10,0,stream15>>>(i);
/////////////////////////////////////////////////////////
cudaDeviceSynchronize();
IsynRedKernel<<<grid_syn,block_syn>>>(i);
reduce1<<<10,1>>>(i);
//////////////////////////////////////////////////
cudaDeviceSynchronize();
LifKernel2<<<1,10>>>(i);
cudaDeviceSynchronize();
}
//verify if image is correctly identified as per onoff criteria:
Trained_chk<<<1,10>>>(l,success,corr_acc);
} //end of loops over N_imgs
cudaMemcpy(d_success,success,sizeof(double),cudaMemcpyDeviceToHost);
cudaMemcpy(h_corr_acc,corr_acc,sizeof(double),cudaMemcpyDeviceToHost);
printf("success till %d epochs using count=%f\n",(n/2),*d_success);
printf("Accuracy till %d epochs using correlation=%f\n",(n/2),*h_corr_acc);
} //end of n epochs loops
cudaDeviceSynchronize();
t2 = clock();
elapsed = timediff(t1,t2);
printf("Elapsed time: %ld ms\n", elapsed);
cudaFree(success);
cudaDeviceReset();
return(0);
}
|
23,549 | #include "includes.h"
__global__ void sum( float4 *a, float4 *b, int N ) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if( idx < N ) {
float4 t1 = a[idx];
float4 t2 = b[idx];
t1.x += t2.x;
t1.y += t2.y;
t1.z += t2.z;
t1.w += t2.w;
a[idx] = t1;
}
} |
23,550 | #include <cuda_runtime.h>
#include <iostream>
#include "bfs.cuh"
using namespace std;
int main()
{
// test data
int V[] = {0, 1, 2, 3, 5, 6, 7, 8, 9}; // the last one is not a vetex
int E[] = {1, 3, 1, 2, 4, 5, 7, 4, 6};
int C[] = {0, INF, INF, INF, INF, INF, INF, INF};
cudaBfs(V, E, C, 8, 9, 0);
cout << "Shortest distances are:" << endl;
for (int i = 0; i < 8; ++i)
cout << i << ": " << C[i] << endl;
return 0;
}
|
23,551 | /* File: matmult-cuda-double.cu
*
* Purpose:
*
* Input:
*
* Output:
*
* Compile: nvcc -o matmult-cuda-double.o matmult-cuda-double.cu
*
* Run: ./matmult-cuda-double.o
*
* Algorithm:
*
* Note:
*
* */
#include <stdio.h>
#include <cuda_runtime.h>
__global__ void VecAdd(double* A, double* B, double* C, int N)
{
int index = blockIdx.x * blockDim.x + threadIdx.x; //indice del vector
int ix; //ix indica el renglon
int iy; //iy toma valores solo entre 0 a N-1
double result; //Acumula la suma del renglon por la columna
int k; // Iterador
if(index < N * N)
{
ix = index / N;
iy = index % N;
result = 0.0;
for(k = 0; k < N; k++)
result += A[k + N * ix] * B[k * N + iy ];
C[iy + N * ix] = result;
}
}
// Host code
int main()
{
//Variables
int N; // Tamaño de la matriz cuadrada.
int i; // Indice del renglon.
int j; // Indice de la columna.
size_t size; // Tamaño total en memoria.
double* h_A; // Matriz A en el equipo.
double* h_B; // Matriz B en el equipo.
double* h_C; // Matriz C (resultado) en el equipo.
double* d_A; // Matriz A en la memoria de la GPU.
double* d_B; // Matriz B en la memoria de la GPU.
double* d_C; // Matriz C (resultado) en la memoria de la GPU.
int Tam; // Numero de datos que se manejan
int NumHilos; // Hilos por bloque
int numBlock; // Numero de bloques necesario para procesar los datos
//Asignacion de variables
N = 2500;
size = N * sizeof(double) * N;
//En la memoria del equipo
h_A = (double*)malloc(size);
h_B = (double*)malloc(size);
h_C = (double*)malloc(size);
//En la memoria de la GPU
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
//
Tam = N * N;
NumHilos = 1024;
numBlock = Tam / NumHilos;
if(Tam % NumHilos > 0) //Si sobran datos, aumenta los bloques en 1
numBlock++;
// LLena los arreglos A y B
for(i = 0;i < N;i++) //Renglon
for(j = 0;j < N;j++) // Columna
{
h_A[j + i * N] = rand() % (11 * (i + 1)) * 1.12;
h_B[j + i * N] = rand() % (11 * (i + 1)) * 1.12;
}
//Copia los arreglos de memoria del CPU a memoria de la GPU
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
VecAdd<<<numBlock, NumHilos >>>(d_A, d_B, d_C, N);
//Copia el resultado de la multiplicacion de memoria de la GPU a memoria de la CPU
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
/*
//Imprime la matriz A
printf("Matriz A\n");
for(i = 0;i < N;i++)
{
for(j = 0;j < N;j++)
printf("%.2e ", h_A[j + i * N]);
printf("\n");
}
//Imprime la matriz B
printf("Matriz B\n");
for(i = 0;i < N;i++)
{
for(j = 0;j < N;j++)
printf("%.2e ", h_B[j + i * N]);
printf("\n");
}
//Imprime la matriz C
printf("Matriz C\n");
for(i = 0;i < N;i++)
{
for(j = 0;j < N;j++)
printf("%.2e ", h_C[j + i * N]);
printf("\n");
}*/
//Libera la memoria utilizada.
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
free(h_A);
free(h_B);
free(h_C);
}
|
23,552 | #include <iostream>
#include <math.h>
#include <sys/time.h> // provides resolution of 1 us
//Number of threads in one thread block
#define THREAD_NUM (256)
// cuda kernel to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for(int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
timeval t1, t2;
double elapsedTime;
int N = 1 << 20;
float *x, *y;
//Allocate Unified memory -accessible from both CPU or GPU 🤘
//Conceptually, foes the same function as new
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for(int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// timestamp t1
gettimeofday(&t1, NULL);
//Run kernel on 1M elements on the CPU
add<<<1, THREAD_NUM>>>(N, x, y);
// timestamp t2
gettimeofday(&t2, NULL);
//Wait for GPU to finish before accessing on host
//Why: to make CPU wait from accessing GPU result
cudaDeviceSynchronize();
//Free memory
cudaFree(x);
cudaFree(y);
// compute and print the elapsed time in millisec
elapsedTime = (t2.tv_sec - t1.tv_sec) * 1000.0; // sec to ms
elapsedTime += (t2.tv_usec - t1.tv_usec) / 1000.0; // us to ms
std::cout << "Amount of time to add 1 Million elements (with CUDA cores): " << elapsedTime << " millisec." << std::endl;
return 0;
} |
23,553 | #include <stdio.h>
#include <assert.h>
#include <time.h>
#include <cuda.h>
#define BLOCK_LOW(id,p,n) ((id)*(n)/(p))l;
#define BLOCK_HIGH(id,p,n) (BLOCK_LOW((id)+1,p,n)-1);
#define BLOCK_SIZE(id,p,n) (BLOCK_LOW((id)+1,p,n)-BLOCK_LOW(id,p,n));
#define BLOCK_OWNER(index,p,n) (((p)*((index)+1)-1)/(n));
int inputvalues[8] = {12010991,3059810,17756755,7362131,25485528,4134948,15051971,16947626};
//int inputvalues[8] = {100,3059810,17756755,7362131,25485528,4134948,15051971,16947626};
__global__ void seive(int *d_marked,int prime)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int width=256;
int idx=threadIdx.x;
/*
__shared__ int low_value = blockIdx.x * blockDim.x + 0;
__shared__ int high_value = blockIdx.x * blockDim.x + blockDim.x;
__shared__ int first;
if (prime * prime > low_value)
first = prime * prime - low_value;
else {
if (!(low_value % prime)) first = 0;
else first = prime - (low_value % prime);
}*/
__shared__ int my_marked[256];
if( idx<width) my_marked[idx]=d_marked[tid];
//if ((d_marked[tid]%prime)==0) d_marked[tid]=0 ;
if ((my_marked[idx]%prime)==0) d_marked[tid]=0 ;
}
int main (int argc, char** argv)
{
int *h_marked;
int *d_marked;
int size;
int i,ii,jj;
int count=0;
int prime=2;
double t1, t2;
FILE *f;
f =fopen("seive_CUDA_results.txt","w");
fprintf(f,"index,counter,number,time,result\n");
for (ii = 0;ii<8;ii++)
{
size = inputvalues[ii];
for(jj = 0;jj<10;jj++)
{
// Allocate host memory
t1=clock();
h_marked = (int *)malloc (size*sizeof(int));
for (i = 0; i < size; i++) {h_marked[i] = i;}
// Allocate device memory
cudaMalloc((void **) &d_marked, size*sizeof(int));
// Copy host data to device
cudaMemcpy(d_marked,h_marked, size*sizeof(int),cudaMemcpyHostToDevice);
///------------------------------
int myn;
myn=sqrt(size)+1;
int *intarray = (int *) malloc(myn * sizeof(int));
int *primearray = NULL;
int j = 0;
int primecounter = 0;
for(i = 0; i < myn; i++)
intarray[i] = 0;
/* at positions where there are multiples of i, mark as zero */
for( i = 2; i * i < myn; i++ )
if( intarray[i]==0 )
for( j = i + i; j < myn; j += i )
intarray[j] = 1;
/* count all values that haven't been marked */
for( j = 2; j < myn; j++ )
if(intarray[j]==0)
primecounter++;
primearray = (int *) malloc(primecounter * sizeof(int));
for(i = 0; i < primecounter; i++)
primearray[i] = i;
/* store all prime numbers from intarray into primearray */
i=0;
for( j = 2; j < myn; j++ )
if(intarray[j]==0) {primearray[i] = j; i++;}
///-----------------------------------
// Launch Kernel
/*dim3 dimGrid(1,1);
dim3 dimBlock(128,1);
seive <<< dimGrid,dimBlock >>> (d_marked,prime); */
int block_size = 256;
int n_blocks = size/block_size + (size%block_size == 0 ? 0:1);
for(i=0;i<primecounter;i++){
prime=primearray[i];
seive <<< n_blocks, block_size >>> (d_marked, prime);
}
// Copy device data to host
cudaMemcpy(h_marked,d_marked,size*sizeof(int),cudaMemcpyDeviceToHost);
//for (i = 0; i < size; i++) printf(" %d ",h_marked[i]);
count=0;
for (i = 4; i < size; i++) if(h_marked[i]!=0) count++;
t2 = clock();
// Free memory locations
cudaFree(d_marked); free(h_marked) ;
fprintf(f,"%d,%d,%d,%.6f,%d\n",ii,jj,size,(t2-t1)/CLOCKS_PER_SEC,count+primecounter);
printf("%d,%d,%d %.6f seconds ",ii,jj,size,(t2-t1)/CLOCKS_PER_SEC);
printf(" # of primes is %d\n",count+primecounter);
}
}
fclose(f);
}
|
23,554 | /*
* =====================================================================================
*
* Filename: glsltest_cuda.cu
*
* Description:
*
* Version: 1.0
* Created: 2016年08月11日 18時15分02秒
* Revision: none
* Compiler: gcc
*
* Author: YOUR NAME (),
* Organization:
*
* =====================================================================================
*/
__global__
void moveVAO_cuda_kernel(float* v)
{
unsigned int tx = threadIdx.x;
int i = tx * 3 + 2;
v[i] += 0.025;
v[i] = ( v[i] > 1.0 ) ? -1.0f : v[i];
}
void moveVAO_cuda(float* v, int vc)
{
moveVAO_cuda_kernel<<<1, vc>>>(v);
}
|
23,555 | extern "C"
__global__ void deltasBatch(float *inputs, float *outputs, float *weights, float *weightsDeltas, int noInputs, int inputSize){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float sum=0;
int offsetDeltas = (inputSize+1)*gid;
int offsetInput = noInputs*inputSize*gid;
int offsetOutputs = noInputs*gid;
for(int imageIndex=0;imageIndex<=inputSize;imageIndex++){
weightsDeltas[offsetDeltas+imageIndex]=0;
}
for (int i=0;i<noInputs;i++){
sum=0;
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
sum+=inputs[offsetInput+i*inputSize+imageIndex]*weights[imageIndex];
}
sum+=weights[inputSize];
if(sum>0) sum=1;
else sum=0;
sum=outputs[offsetOutputs+i]-sum;
if(sum!=0){
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
weightsDeltas[offsetDeltas+imageIndex]+=sum*inputs[offsetInput+i*inputSize+imageIndex];
}
weightsDeltas[offsetDeltas+inputSize]+=sum;
}
}
}
extern "C"
__global__ void deltasOne(float *inputs, float *outputs, float *weights, float *weightsDeltas, int offsetInputImages, int inputSize){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float sum=0;
int offsetDeltas = (inputSize+1)*gid;
int offsetInput = inputSize*(gid+offsetInputImages);
for(int imageIndex=0;imageIndex<=inputSize;imageIndex++){
weightsDeltas[offsetDeltas+imageIndex]=0;
}
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
sum+=inputs[offsetInput+imageIndex]*weights[imageIndex];
}
sum+=weights[inputSize];
if(sum>0) sum=1;
else sum=0;
sum=outputs[offsetInputImages+gid]-sum;
if(sum!=0){
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
weightsDeltas[offsetDeltas+imageIndex]+=sum*inputs[offsetInput+imageIndex];
}
weightsDeltas[offsetDeltas+inputSize]+=sum;
}
}
|
23,556 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j2d64pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x) + 4;
int i = max(i0,4) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y) + 4;
int j = max(j0,4) + 4*(int)(threadIdx.y);
double (*in)[8200] = (double (*)[8200]) l_in;
double (*out)[8200] = (double (*)[8200]) l_out;
if (i>=4 & j>=4 & i<=N-5 & j<=N-5) {
double _t_1_;
double _t_10_;
double outjc0ic0;
double _t_3_;
double _t_2_;
double _t_0_;
double _t_8_;
double _t_5_;
double _t_12_;
double _t_23_;
double _t_16_;
double _t_32_;
double _t_36_;
double outjp2ic0;
double _t_9_;
double _t_7_;
double _t_4_;
double _t_11_;
double _t_20_;
double _t_6_;
double _t_13_;
double _t_33_;
double _t_22_;
double _t_15_;
double _t_26_;
double _t_18_;
double _t_38_;
double _t_29_;
double _t_27_;
double outjp3ic0;
double _t_35_;
double outjp1ic0;
double _t_14_;
double _t_21_;
double _t_25_;
double _t_17_;
double _t_28_;
double _t_37_;
double _t_30_;
double _t_39_;
double _t_19_;
double _t_31_;
double _t_24_;
double _t_34_;
_t_1_ = -in[j-4][i-3];
_t_1_ += in[j-4][i+3];
_t_1_ -= in[j-3][i-4];
_t_10_ = in[j-3][i-4];
_t_1_ += in[j-3][i+4];
_t_10_ -= in[j-3][i+4];
_t_1_ += in[j+4][i-3];
_t_1_ -= in[j+4][i+3];
_t_1_ += in[j+3][i-4];
_t_1_ -= in[j+3][i+4];
outjc0ic0 = _t_1_ * 0.000136017;
_t_3_ = -in[j-4][i-1];
_t_3_ += in[j-4][i+1];
_t_3_ -= in[j-1][i-4];
_t_3_ += in[j-1][i+4];
_t_3_ += in[j+1][i-4];
_t_3_ -= in[j+1][i+4];
_t_3_ += in[j+4][i-1];
_t_3_ -= in[j+4][i+1];
outjc0ic0 += _t_3_ * 0.002856;
_t_2_ = in[j-4][i-2];
_t_2_ -= in[j-4][i+2];
_t_2_ -= in[j+4][i-2];
_t_2_ += in[j+4][i+2];
_t_2_ += in[j-2][i-4];
_t_2_ -= in[j-2][i+4];
_t_2_ -= in[j+2][i-4];
_t_2_ += in[j+2][i+4];
outjc0ic0 += _t_2_ * 0.000714;
_t_0_ = in[j-4][i-4];
_t_0_ -= in[j-4][i+4];
_t_0_ -= in[j+4][i-4];
_t_0_ += in[j+4][i+4];
outjc0ic0 += _t_0_ * 1.27449;
_t_8_ = -in[j-2][i-1];
_t_8_ += in[j-2][i+1];
_t_8_ -= in[j-1][i-2];
_t_8_ += in[j-1][i+2];
_t_8_ += in[j+2][i-1];
_t_8_ -= in[j+2][i+1];
_t_8_ += in[j+1][i-2];
_t_8_ -= in[j+1][i+2];
outjc0ic0 += _t_8_ * 0.16;
_t_5_ = in[j+2][i-3];
_t_5_ += -in[j-3][i-2];
_t_12_ = in[j-3][i-2];
_t_12_ += in[j-1][i-4];
_t_12_ -= in[j-1][i+4];
_t_12_ -= in[j+3][i-4];
_t_12_ += in[j+3][i+4];
_t_23_ = -in[j-2][i-1];
_t_23_ += in[j-2][i+1];
_t_23_ -= in[j+1][i-4];
_t_23_ += in[j+1][i+4];
_t_23_ += in[j+3][i-4];
_t_23_ -= in[j+3][i+4];
_t_16_ = in[j-2][i-1];
_t_16_ -= in[j-2][i+1];
_t_16_ -= in[j+2][i-3];
_t_16_ -= in[j+4][i-1];
_t_16_ += in[j+4][i+1];
_t_32_ = in[j-1][i-2];
_t_32_ -= in[j-1][i+2];
_t_32_ += in[j+1][i-4];
_t_32_ -= in[j+1][i+4];
_t_5_ += in[j-3][i+2];
_t_12_ -= in[j-3][i+2];
_t_36_ = in[j+2][i-3];
_t_36_ -= in[j+4][i-3];
_t_36_ += in[j+4][i+3];
_t_5_ -= in[j+2][i+3];
_t_16_ += in[j+2][i+3];
_t_36_ -= in[j+2][i+3];
_t_23_ += in[j+6][i-1];
_t_36_ -= in[j+6][i-1];
_t_23_ -= in[j+6][i+1];
_t_36_ += in[j+6][i+1];
_t_32_ -= in[j+7][i-2];
_t_32_ += in[j+7][i+2];
outjp2ic0 = _t_23_ * 0.002856;
_t_5_ += in[j+3][i-2];
_t_5_ -= in[j+3][i+2];
_t_5_ -= in[j-2][i-3];
_t_5_ += in[j-2][i+3];
outjc0ic0 += _t_5_ * 0.00762;
_t_9_ = in[j-1][i-1];
_t_9_ -= in[j-1][i+1];
_t_9_ -= in[j+1][i-1];
_t_9_ += in[j+1][i+1];
outjc0ic0 += _t_9_ * 0.64;
_t_7_ = -in[j+2][i-2];
_t_7_ += in[j+2][i+2];
_t_7_ += in[j-2][i-2];
_t_7_ -= in[j-2][i+2];
outjc0ic0 += _t_7_ * 0.04;
_t_4_ = -in[j+3][i-3];
_t_4_ += in[j-3][i-3];
_t_11_ = -in[j-3][i-3];
_t_11_ += in[j-2][i+4];
_t_11_ -= in[j-2][i-4];
_t_11_ += in[j+4][i-4];
_t_11_ -= in[j+4][i+4];
_t_20_ = in[j-2][i-4];
_t_20_ -= in[j-2][i+4];
_t_4_ -= in[j-3][i+3];
_t_11_ += in[j-3][i+3];
_t_4_ += in[j+3][i+3];
outjc0ic0 += _t_4_ * 0.00145161;
_t_6_ = in[j-1][i-3];
_t_6_ += in[j-3][i-1];
_t_13_ = -in[j-3][i-1];
_t_13_ += in[j+2][i-4];
_t_13_ -= in[j+2][i+4];
_t_33_ = -in[j-1][i-1];
_t_33_ += in[j-1][i+1];
_t_33_ -= in[j+2][i-4];
_t_33_ += in[j+2][i+4];
_t_33_ += in[j+4][i-4];
_t_33_ -= in[j+4][i+4];
_t_22_ = in[j-2][i-2];
_t_22_ -= in[j-2][i+2];
_t_22_ -= in[j+4][i-4];
_t_22_ += in[j+4][i+4];
_t_15_ = -in[j-2][i-2];
_t_15_ += in[j-2][i+2];
_t_15_ -= in[j-1][i-3];
_t_15_ += in[j+3][i-3];
_t_15_ -= in[j+3][i+3];
_t_15_ += in[j+4][i-2];
_t_15_ -= in[j+4][i+2];
_t_26_ = in[j-1][i-1];
_t_26_ -= in[j-1][i+1];
_t_26_ -= in[j+3][i-3];
_t_26_ += in[j+3][i+3];
_t_18_ = -in[j-1][i-1];
_t_18_ += in[j-1][i+1];
_t_18_ += in[j+2][i-2];
_t_18_ -= in[j+2][i+2];
_t_38_ = -in[j+1][i-1];
_t_38_ += in[j+1][i+1];
_t_38_ -= in[j+2][i-2];
_t_38_ += in[j+2][i+2];
_t_38_ += in[j+4][i-2];
_t_38_ -= in[j+4][i+2];
_t_29_ = in[j+1][i-1];
_t_29_ -= in[j+1][i+1];
_t_27_ = -in[j+4][i-2];
_t_27_ += in[j+4][i+2];
_t_6_ -= in[j+3][i-1];
_t_18_ += in[j+3][i-1];
_t_29_ -= in[j+3][i-1];
_t_6_ += in[j+3][i+1];
_t_18_ -= in[j+3][i+1];
_t_29_ += in[j+3][i+1];
outjp2ic0 += _t_29_ * 0.64;
_t_6_ -= in[j-3][i+1];
_t_13_ += in[j-3][i+1];
_t_13_ += in[j+5][i-1];
_t_26_ -= in[j+5][i-1];
_t_38_ += in[j+5][i-1];
_t_13_ -= in[j+5][i+1];
_t_26_ += in[j+5][i+1];
_t_38_ -= in[j+5][i+1];
outjp3ic0 = _t_38_ * 0.16;
_t_33_ += in[j+7][i-1];
_t_33_ -= in[j+7][i+1];
outjp3ic0 += _t_33_ * 0.002856;
_t_6_ -= in[j+1][i-3];
_t_26_ += in[j+1][i-3];
_t_35_ = -in[j+1][i-3];
_t_6_ += in[j+1][i+3];
_t_26_ -= in[j+1][i+3];
outjp2ic0 += _t_26_ * 0.03048;
_t_35_ += in[j+1][i+3];
_t_18_ -= in[j][i-2];
_t_27_ += in[j][i-2];
_t_35_ += -in[j][i-2];
_t_18_ += in[j][i+2];
_t_27_ -= in[j][i+2];
outjp2ic0 += _t_27_ * 0.04;
_t_35_ += in[j][i+2];
outjp1ic0 = _t_18_ * 0.16;
_t_22_ -= in[j+6][i-2];
_t_35_ += in[j+6][i-2];
_t_22_ += in[j+6][i+2];
_t_35_ -= in[j+6][i+2];
_t_6_ -= in[j-1][i+3];
outjc0ic0 += _t_6_ * 0.03048;
out[j][i] = outjc0ic0;
_t_15_ += in[j-1][i+3];
outjp1ic0 += _t_15_ * 0.00762;
_t_14_ = in[j-2][i-3];
_t_14_ -= in[j-2][i+3];
_t_14_ -= in[j+4][i-3];
_t_14_ += in[j+4][i+3];
outjp1ic0 += _t_14_ * 0.00145161;
_t_21_ = -in[j-2][i-3];
_t_21_ += in[j-2][i+3];
_t_21_ += in[j-1][i+4];
_t_21_ -= in[j-1][i-4];
_t_25_ = -in[j-1][i-2];
_t_25_ += in[j-1][i+2];
_t_25_ += in[j+4][i-3];
_t_25_ -= in[j+4][i+3];
_t_17_ = in[j-1][i-2];
_t_17_ -= in[j-1][i+2];
_t_17_ -= in[j+3][i-2];
_t_17_ += in[j+3][i+2];
outjp1ic0 += _t_17_ * 0.04;
_t_28_ = -in[j+1][i-2];
_t_28_ += in[j+1][i+2];
_t_28_ += in[j+3][i-2];
_t_28_ -= in[j+3][i+2];
_t_28_ += in[j+4][i-1];
_t_28_ -= in[j+4][i+1];
_t_37_ = in[j+1][i-2];
_t_37_ -= in[j+1][i+2];
_t_30_ = in[j-1][i-4];
_t_30_ -= in[j-1][i+4];
_t_39_ = in[j+2][i-1];
_t_39_ -= in[j+2][i+1];
_t_39_ -= in[j+4][i-1];
_t_39_ += in[j+4][i+1];
outjp3ic0 += _t_39_ * 0.64;
_t_19_ = -in[j+2][i-1];
_t_19_ += in[j+2][i+1];
_t_19_ += in[j][i-1];
_t_28_ += -in[j][i-1];
_t_36_ += in[j][i-1];
_t_19_ -= in[j][i+1];
outjp1ic0 += _t_19_ * 0.64;
_t_28_ += in[j][i+1];
outjp2ic0 += _t_28_ * 0.16;
_t_36_ -= in[j][i+1];
outjp3ic0 += _t_36_ * 0.03048;
_t_10_ -= in[j+5][i-4];
_t_21_ += in[j+5][i-4];
_t_32_ -= in[j+5][i-4];
_t_10_ += in[j+5][i+4];
outjp1ic0 += _t_10_ * 1.27449;
_t_21_ -= in[j+5][i+4];
_t_32_ += in[j+5][i+4];
outjp3ic0 += _t_32_ * 0.000714;
_t_12_ -= in[j+5][i-2];
_t_25_ += in[j+5][i-2];
_t_37_ -= in[j+5][i-2];
_t_12_ += in[j+5][i+2];
outjp1ic0 += _t_12_ * 0.000714;
_t_25_ -= in[j+5][i+2];
_t_37_ += in[j+5][i+2];
outjp3ic0 += _t_37_ * 0.04;
_t_30_ -= in[j+7][i-4];
_t_30_ += in[j+7][i+4];
outjp3ic0 += _t_30_ * 1.27449;
_t_13_ -= in[j][i-4];
_t_22_ += in[j][i-4];
_t_31_ = -in[j-1][i-3];
_t_31_ += in[j-1][i+3];
_t_31_ -= in[j][i-4];
_t_24_ = in[j-1][i-3];
_t_24_ -= in[j-1][i+3];
_t_13_ += in[j][i+4];
outjp1ic0 += _t_13_ * 0.002856;
_t_22_ -= in[j][i+4];
outjp2ic0 += _t_22_ * 0.000714;
_t_31_ += in[j][i+4];
_t_11_ += in[j+5][i-3];
_t_24_ -= in[j+5][i-3];
_t_35_ += in[j+5][i-3];
_t_11_ -= in[j+5][i+3];
outjp1ic0 += _t_11_ * 0.000136017;
_t_24_ += in[j+5][i+3];
outjp2ic0 += _t_24_ * 0.00145161;
_t_35_ -= in[j+5][i+3];
outjp3ic0 += _t_35_ * 0.00762;
_t_20_ -= in[j+6][i-4];
_t_31_ += in[j+6][i-4];
_t_20_ += in[j+6][i+4];
outjp2ic0 += _t_20_ * 1.27449;
_t_31_ -= in[j+6][i+4];
_t_31_ += in[j+7][i-3];
_t_31_ -= in[j+7][i+3];
outjp3ic0 += _t_31_ * 0.000136017;
_t_16_ += in[j][i-3];
_t_25_ -= in[j][i-3];
_t_34_ = in[j][i-3];
_t_16_ -= in[j][i+3];
outjp1ic0 += _t_16_ * 0.03048;
out[j+1][i] = outjp1ic0;
_t_25_ += in[j][i+3];
outjp2ic0 += _t_25_ * 0.00762;
_t_34_ -= in[j][i+3];
_t_21_ += in[j+6][i-3];
_t_34_ -= in[j+6][i-3];
_t_21_ -= in[j+6][i+3];
outjp2ic0 += _t_21_ * 0.000136017;
out[j+2][i] = outjp2ic0;
_t_34_ += in[j+6][i+3];
outjp3ic0 += _t_34_ * 0.00145161;
out[j+3][i] = outjp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y));
j2d64pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
23,557 | #include <iostream>
#include <math.h>
// Kernel function to determine depth of mandelbrot at cr, ci
__device__ unsigned int mandelDepth(float cr, float ci, int maxDepth)
{
float zr = 0.0f;
float zi = 0.0f;
float zrSqr = 0.0f;
float ziSqr = 0.0f;
unsigned int i;
for (i = 0; i < maxDepth; i++)
{
zi = zr * zi;
zi += zi + ci;
zr = zrSqr - ziSqr + cr;
zrSqr = zr * zr;
ziSqr = zi * zi;
if (zrSqr + ziSqr > 4.0f) break;
}
return i;
}
__device__ float3 color(unsigned int depth, unsigned int maxDepth)
{
if(depth == maxDepth)
return make_float3(0.0f, 0.0f, 0.0f);
else
return make_float3(1.0f, 1.0f, 1.0f);
}
__global__ void mandel(float* buffer,
float xMin, float xMax,
float yMin, float yMax,
unsigned int maxDepth)
{
int nx = blockDim.x;
int ny = gridDim.x;
float dx = (xMax - xMin) / nx;
float dy = (yMax - yMin) / ny;
float x = xMin + (threadIdx.x + 0.5f) * dx;
float y = yMin + (blockIdx.x + 0.5f) * dy;
unsigned int depth = mandelDepth(x, y, maxDepth);
float3 depthColor = color(depth, maxDepth);
int index = blockIdx.x*blockDim.x + threadIdx.x;
buffer[3*index + 0] = depthColor.x;
buffer[3*index + 1] = depthColor.y;
buffer[3*index + 2] = depthColor.z;
}
int main(void)
{
float xMin = -2.0f;
float xMax = 1.0f;
float yMin = -1.0f;
float yMax = 1.0f;
float step = 0.005f;
int nx = (xMax - xMin) / step;
int ny = (yMax - yMin) / step;
float* buffer;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&buffer, 3*nx*ny*sizeof(float));
// Run kernel on nx*ny elements on the GPU
mandel<<<ny, nx>>>(buffer,
xMin, xMax,
yMin, yMax,
100
);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// Print the buffer out to command
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--)
for (int i = 0; i < nx; i++)
{
float* triplet = buffer + 3*(j*nx + i);
int ir = int(255.99f * triplet[0]);
int ig = int(255.99f * triplet[1]);
int ib = int(255.99f * triplet[2]);
std::cout << ir << " " << ig << " " << ib << "\n";
}
// Free memory
cudaFree(buffer);
return 0;
}
|
23,558 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define m(y,x) mapa[(y * cols) + x]
/*Definición de constantes*/
#define currentGPU 0 //El número más alto suele indicar la salida de vídeo
#define MAX 50
typedef struct {
int y;
int x;
} Antena;
__global__ void gpu_init(int *mapad, int max, int size)
{
/*Identificaciones necesarios*/
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if (position<size) mapad[position] = max;
}
__global__ void gpu_actualizar(int *mapad, int rows, int cols, Antena antena, int size)
{
int IDX_Thread = threadIdx.x;
int IDY_Thread = threadIdx.y;
int IDX_block = blockIdx.x;
int IDY_block = blockIdx.y;
int shapeGrid_X = gridDim.x;
int threads_per_block = blockDim.x * blockDim.y;
int position = threads_per_block * ((IDY_block * shapeGrid_X)+IDX_block)+((IDY_Thread*blockDim.x)+IDX_Thread);
if(position<size)
{
int x,y;
y=(int)position/cols;
x=position-y*rows;
int dist = abs(antena.x -x) + abs(antena.y - y);
int nuevadist = dist*dist;
if(nuevadist<mapad[position])
{
mapad[position] = nuevadist;
}
}
}
int manhattan(Antena a, int y, int x){
int dist = abs(a.x -x) + abs(a.y - y);
return dist * dist;
}
int main()
{
int rows, cols,nAntenas, i,j;
rows = 5;
cols = 5;
nAntenas = 1;
Antena *antenas;
if ((antenas = (Antena *) calloc(rows * cols, sizeof(Antena)))==NULL){ //mapa
printf("error\n");
exit (-1);
}
/*Declaración e inicialización de variables CPU (HOST)*/
int *mapa;
/*Indicamos la GPU (DEVICE) que vamos a utilizar*/
int *mapad;
/*Reserva de memoria para de variables CPU*/
if ((mapa=(int *) calloc(rows * cols, sizeof(int)))==NULL){ //mapa
printf("error\n");
exit (-1);
}
/*Reserva de memoria para variables del DEVICE (en memoria GPU)*/
cudaMalloc( (void**) &mapad, sizeof(int) * (int) rows * cols);
/*Inicialización del mapa*/
int size = rows * cols;
int tam = (int) ceil( ((float)(rows * cols))/size);
dim3 bloqdiminit(128,1);
dim3 griddiminit(tam,1);
gpu_init<<<griddiminit,bloqdiminit>>>(mapad,MAX,size);
/*Copia de datos del HOST al DEVICE*/
cudaMemcpy(mapa,mapad,sizeof(int) * rows*cols,cudaMemcpyDeviceToHost);
/*Lanzamos la función del DEVICE*/
cudaDeviceSynchronize();
printf("matriz:\n");
for (i = 0; i<5;i++){
for (j=0;j<5;j++){
printf(" %d ",mapa[j*5+i]);
}
printf("\n");
}
printf("fin de la matriz\n");
Antena ant = {2,2};
gpu_actualizar<<<griddiminit,bloqdiminit>>>(mapad, rows, cols, ant, size);
cudaMemcpy(mapa,mapad,sizeof(int) * rows*cols,cudaMemcpyDeviceToHost);
/*Lanzamos la función del DEVICE*/
cudaDeviceSynchronize();
printf("matriz2:\n");
for (i = 0; i<5;i++){
for (j=0;j<5;j++){
printf(" %d ",mapa[j*5+i]);
}
printf("\n");
}
printf("fin de la matriz2\n");
/*Liberamos memoria del DEVICE*/
cudaFree(mapad);
/*Liberamos memoria del HOST*/
free(mapa);
/*Liberamos los hilos del DEVICE*/
cudaDeviceReset();
} //main
|
23,559 | #include "includes.h"
__global__ void GaussianEliminationGlobal(const int clusterSize,float *x, const float *diagonal_values , const float *non_diagonal_values ,float *y , const int size)
{
const int index = blockIdx.x * blockDim.x + threadIdx.x ;
const int gi = index * clusterSize;
float matrix[180][180]; //size of matrix
for (int i = gi; i < gi + clusterSize;++i)
{
for (int j = gi; j < gi + clusterSize;++j)
{
matrix[i][j]=0;
}
matrix[i][i] = diagonal_values[i];
}
for(int i = gi; i < gi + clusterSize - 1 ;++i)
{
matrix[i][i+1] = non_diagonal_values[2*i+1];
matrix[i+1][i] = non_diagonal_values[2*i+2];
}
// triangle form
for (int i = gi ; i < gi + clusterSize; ++i)
{
//for every row...
for (int j = i+1; j < gi + clusterSize; ++j)
{
//calculate ratio for every row below it using the triangular
double ratio = matrix[j][i] / matrix[i][i];
for(int k = gi; k < gi + clusterSize; ++k)
{
//Eliminate every column based on that ratio
matrix[j][k] = matrix[j][k] - (matrix[i][k] * ratio);
}
//elimination on the coefficient vector
y[j] = y[j] - (y[i] * ratio);
}
}
__syncthreads();
//Back substitution
for (int i = gi + clusterSize-1; i > gi-1; --i)
{
double current = 0;
for (unsigned int j = i; j < gi + clusterSize; ++j)
{
current = current + (matrix[i][j] * x[j]);
}
x[i] = (y[i] - current) / matrix[i][i];
}
} |
23,560 | #include<cuda.h>
#include<stdio.h>
#include<stdlib.h>
#include<iostream>
#define CHECK 0
const unsigned int SINGLE_PRECISION = 1;
const unsigned int DOUBLE_PRECISION = 0;
float *SMd, *SNd, *SPd;
double *DMd, *DNd, *DPd;
const unsigned int WIDTH = 1024;
//generate matrix
template<typename T>
T *GenMatrix(const unsigned int width, const unsigned int height)
{
T *matrix;
const unsigned int M_SIZE = width*height;
unsigned int i = 0, j = 0;
matrix = (T*) malloc(M_SIZE * sizeof(double));
for(i = 0 ;i < height; i++){
for(j = 0 ;j < width; j++){
matrix[i * width + j] = (rand()*1.0)/ RAND_MAX;
}
}
return matrix;
}
//display matrix
template<typename T>
int PrintMatrix(T *P, const unsigned int width, const unsigned int height)
{
unsigned int i = 0, j = 0;
printf("\n");
for(i = 0 ;i < height; i++){
for(j = 0 ;j < width; j++){
printf("%.3f\t", P[i * width + j]);
}
printf("\n");
}
return 1;
}
//Init data
template<typename T>
void Init_Cuda(T *M, T *N, const unsigned int width, const unsigned int height, bool sp)
{
const unsigned int size = width*height*sizeof(T);
//allocate matrix
if(sp==SINGLE_PRECISION){
cudaMalloc((void**)&SMd, size);
cudaMemcpy(SMd, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&SNd, size);
cudaMemcpy(SNd, N, size,cudaMemcpyHostToDevice);
cudaMalloc((void**)&SPd, size);
cudaMemset(SPd, 0, size);
}
else
{
cudaMalloc((void**)&DMd, size);
cudaMemcpy(DMd, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&DNd, size);
cudaMemcpy(DNd, N, size,cudaMemcpyHostToDevice);
cudaMalloc((void**)&DPd, size);
cudaMemset(DPd, 0, size);
}
}
//Free memory
void Free_Cuda(bool sp)
{
if(sp==SINGLE_PRECISION){
cudaFree(SMd);
cudaFree(SNd);
cudaFree(SPd);
}
else
{
cudaFree(DMd);
cudaFree(DNd);
cudaFree(DPd);
}
}
//kernel function
template<typename T>
__global__ void MatrixAddKernel(T *P, const T *M, const T *N, const unsigned int width)
{
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
const unsigned int length = width * width;
while (i < length) {
P[i] = M[i] + N[i];
i += gridDim.x * blockDim.x;
}
}
template<typename T>
int MatrixAdd(T *P, const T *M, const T *N, const unsigned int n)
{
int i, j ;
for(i = 0 ;i < n; i++)
for(j = 0 ;j < n; j++)
{ P [ i* n + j] = M[ i* n + j] + N[ i* n + j];
}
return 0;
}
template<typename T>
int Check(const T *KP, const T *CP, const unsigned int n)
{
int i, j;
T e = 0.001;
int correct = 1;
for(i = 0; i < n ; i++)
for(j = 0; j < n; j++)
{ if(abs(KP[i * n + j] - CP[i * n + j]) > e)
{ printf("%.10f %.10f\n", KP[i * n + j], CP[i * n + j]);
return 0;
}
}
return correct;
}
int main(int argc, char * argv[])
{
bool sp = 1;
float *SM, *SN, *SKP, *SCP;
double *DM, *DN, *DKP, *DCP;
cudaEvent_t start, stop;
float elapsedTime;
unsigned int width;
width = WIDTH;
//create number of blocks and number of threads
int Thr = 128;
dim3 block(Thr, 1, 1);
dim3 grid(((width*width)+ Thr - 1) / Thr, 1, 1);
if (argc != 5)
{
/* We print argv[0] assuming it is the program name */
printf("Wrong parameters. Please use the following format for running.\n");
printf(" Usage: %s %s %s %s %s", argv[0], "[matrix_size]", "[single|double]", "[divide_val]", "[num_threads]\n");
exit(EXIT_FAILURE);
} else {
width = atoi(argv[1]);
sp = atoi(argv[2]);
block.x = atoi(argv[4]);
grid.x = ((width*width)/atoi(argv[3]) + block.x - 1) / block.x;
if(atoi(argv[2])!=0)
sp = SINGLE_PRECISION;
else
sp = DOUBLE_PRECISION;
}
//for using MatrixMul_Kernel_Tiled_SM kernel
//block.x = TILE_WIDTH; block.y=TILE_WIDTH;
//grid.x = WIDTH/TILE_WIDTH; grid.y = WIDTH/TILE_WIDTH;
//initialize host memory
if(sp==SINGLE_PRECISION)
{
SM = GenMatrix<float>(width, width);
//PrintMatrix(M, width, width);
SN = GenMatrix<float>(width, width);
//PrintMatrix(N, width, width);
SKP = GenMatrix<float>(width, width);
SCP = GenMatrix<float>(width, width);
//initialize device memory
Init_Cuda<float>(SM, SN, width, width, SINGLE_PRECISION);
}
else
{
DM = GenMatrix<double>(width, width);
//PrintMatrix(M, width, width);
DN = GenMatrix<double>(width, width);
//PrintMatrix(N, width, width);
DKP = GenMatrix<double>(width, width);
DCP = GenMatrix<double>(width, width);
//initialize device memory
Init_Cuda<double>(DM, DN, width, width, DOUBLE_PRECISION);
}
//create cudaEvent start and stop to record elapsed time
cudaEventCreate(&start);
cudaEventCreate(&stop);
//record start time to start event
cudaEventRecord(start, 0);
//launch kernel
if(sp==SINGLE_PRECISION)
{
MatrixAddKernel<float><<<grid, block>>>(SPd, SMd, SNd, width);
}
else
{
MatrixAddKernel<double><<<grid, block>>>(DPd, DMd, DNd, width);
}
//record start time to stop event
cudaEventRecord(stop, 0);
//synchronize the stop event
cudaEventSynchronize(stop);
//calculate the elapsed time
cudaEventElapsedTime(&elapsedTime, start, stop);
//destroy the start and stop event
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copy data from device memory to host memory
if(sp==SINGLE_PRECISION)
cudaMemcpy(SKP, SPd, width*width*sizeof(float), cudaMemcpyDeviceToHost);
else
cudaMemcpy(DKP, DPd, width*width*sizeof(double), cudaMemcpyDeviceToHost);
//PrintMatrix(P, width, width);
//print runtime
printf("[ %s ][ %4dx%4d ][ %10d blocks ][ %5d threads ]\t>\t[ %7.3f (ms) ]\n", ((sp==SINGLE_PRECISION)?"Single Precision":"Double Precision"), width, width, grid.x, block.x, elapsedTime);
#if (CHECK==1)
if(sp==SINGLE_PRECISION)
{
/*printf("M >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SM, width, width);
printf("N >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SN, width, width);
printf("KP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<float>(SKP, width, width);
*/
MatrixAdd<float>(SCP, SM, SN, width);
//printf("CP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
//PrintMatrix<float>(CP, width, width);
if(Check<float>(SKP, SCP, width))
printf("We do it.\n");
else
printf("Something is wrong.\n");
}
else
{
/*printf("M >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DM, width, width);
printf("N >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DN, width, width);
printf("KP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
PrintMatrix<double>(DKP, width, width);
*/
MatrixAdd<double>(DCP, DM, DN, width);
//printf("CP >>>>>>>>>>>>>>>>>>>>>>>>>>\n");
//PrintMatrix<double>(DCP, width, width);
if(Check<double>(DKP, DCP, width))
printf("We do it.\n");
else
printf("Something is wrong.\n");
}
#endif
//free host memory
if(sp==SINGLE_PRECISION)
{
free(SM);
free(SN);
free(SKP);
free(SCP);
//free device memory
Free_Cuda(sp);
}
else
{
free(DM);
free(DN);
free(DKP);
free(DCP);
//free device memory
Free_Cuda(sp);
}
return 0;
}
|
23,561 | #include "includes.h"
__global__ void makeError(float *err, float *output, unsigned int Y, const int N)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int size = blockDim.x * gridDim.x;
for (int idx = N * pos / size; idx < N * (pos+1) / size; ++idx) {
err[idx] = ((Y == idx ? 1.0f : 0.0f) - output[idx]);
}
} |
23,562 | // CUDA code to find the maximum in an array
#include<iostream>
#include<vector>
#include<cstdlib>
#include<algorithm>
const int SHARED_MEM = 256;
__global__ void maxFinder(double *arr, double *m, int N){
auto index = blockDim.x*blockIdx.x+threadIdx.x;
__shared__ double cache[SHARED_MEM];
float temp = 0;
int stride = blockDim.x;
int offset = 0;
while(index+offset < N){
temp = fmaxf(temp, arr[index+offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
int i = blockDim.x/2;
while(i > 0){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x+i]);
i /= 2;
}
__syncthreads();
if(threadIdx.x == 0){
*m = cache[0];
}
}
int main(){
int N = 10240;
size_t size = N*sizeof(double);
std::vector<double> array(N);
for(auto& i:array){i = rand()%100;}
double h_max;
double h_max_check;
h_max_check = *std::max_element(array.begin(), array.end());
double *d_arr, *d_max;
cudaMalloc(&d_arr, size);
cudaMalloc(&d_max, sizeof(double));
cudaMemcpy(d_arr, array.data(), size, cudaMemcpyHostToDevice);
int threadsPerBlock = 64;
int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
maxFinder<<<blocksPerGrid, threadsPerBlock>>>(d_arr, d_max, N);
cudaMemcpy(&h_max, d_max, sizeof(double), cudaMemcpyDeviceToHost);
std::cout << "The max value in the array is: " << h_max << std::endl;
std::cout << "The max value with std library is: " << h_max_check << std::endl;
cudaFree(d_arr);
cudaFree(d_max);
return 0;
}
|
23,563 | #include "includes.h"
__global__ void kernCalcMu( const size_t numPoints, const size_t pointDim, const double* X, const double* loggamma, const double* GammaK, double* dest ) {
// Assumes a 2D grid of 1024x1 1D blocks
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
if(i >= numPoints) {
return;
}
const double a = exp(loggamma[i]) / exp(*GammaK);
const double* x = & X[i * pointDim];
double* y = & dest[i * pointDim];
for(size_t i = 0; i < pointDim; ++i) {
y[i] = a * x[i];
}
} |
23,564 |
extern "C"
__device__ int classify(float* ins, int iniIns, int* attributes, int* isLeaf, int* numbersOfArcs, int* evalTypes, float* vals, int* nodeIndices, int MAX_NUM_ARCS)
{
int actual = 0;
while(isLeaf[actual] == 0) //is not a leaf
{
int auxx = actual;
int att = attributes[actual];
float insVal = ins[iniIns + att]; //the actual value of the attribute for this instance
//System.out.println("Atributoo: " + att + "vaal :" + insVal);
for(int i = 0; i < numbersOfArcs[actual]; i++) //traverse each arc
{
int arcIndi = (actual * MAX_NUM_ARCS) + i; //the actual index of the arc
float arcVal = vals[arcIndi];
int evType = evalTypes[arcIndi];
//System.out.println("aaaarc :" + arcVal + "evvtype" + evType);
if(evType == 0)
{
if(insVal <= arcVal) //<=
{
actual = nodeIndices[arcIndi];
break;
}
else
{
continue;
}
}
else if(evType == 1)
{
if(insVal > arcVal) // >
{
actual = nodeIndices[arcIndi];
break;
}
else
continue;
}
else if(evType == 2)
{
if(insVal == arcVal) // =
{
actual = nodeIndices[arcIndi];
break;
}
else
continue;
}
}
if(auxx == actual) //there wasn't an appropiate path, None value found
{
//System.out.println("Aquí se ciclaría :" + insVal);
return 0;
}
}
//pack two integers in one using bitwise operations 8388607 possible leaves, 256 possible class values
int auxPack = isLeaf[actual] << 8; // reserve 1 byte for class value
return (auxPack | attributes[actual]); //or bitewise pack
}
extern "C"
__global__ void hilo(float* ins, int insSize, int chunk, int* attributes, int* isLeaf, int* numbersOfArcs, int* evalTypes, float* vals, int* nodeIndices, int MAX_NUM_ARCS, int NIntances, int* res, int* used, int* resHojas)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = tid * chunk; //begin to save index in own slot, i points to the instance index
int fin = i + chunk;
int resi = i;
while(i < NIntances && i < fin)
{
int iniIns= i * insSize;
int auxPredicted = classify(ins, iniIns, attributes, isLeaf, numbersOfArcs, evalTypes, vals, nodeIndices, MAX_NUM_ARCS); //its necessesary to unpack
int predicted = auxPredicted & 255; //bitwise unpack
int actual = ins[iniIns + insSize-1];
if(predicted != actual) //counter example
{
res[resi] = i; //this index was a counter example
resHojas[resi] = auxPredicted >> 8; //unpack leaf index
resi++;
}
i++;
}
//if(tid < NIntances)
used[tid] = resi - (tid * chunk); //how many results does this thread saved
}
//extracts the size needed for the counter examples set, to allocate memory for res
extern "C"
__global__ void countSize(int* used, int threads, int* res)
{
int cont = 0;
for(int i = 0; i < threads; i++)
{
cont += used[i];
}
res[0] = cont;
}
/*
Takes the number of results generated by each thread and crates a shrink version
*/
extern "C"
__global__ void genResult(int* resHuecos, int* resHojas, int* used, int threads, int chunk, int* res, int* resCounterHojas)
{
int loc = 0; //the loc in resHuecos
int resi = 0;
for(int i = 0; i < threads; i++)
{
for(int j = 0; j < used[i]; j++)
{
res[resi] = resHuecos[loc+j];
resCounterHojas[resi] = resHojas[loc+j];
resi++;
}
loc += chunk;
}
}
//removes counter examples
extern "C"
__global__ void filter(int* counterIndis, float* data, int lenData, int lenIns, float* newData)
{
int indiCounter = 0;
int actual = 0;
int resi = 0;
for(int i = 0; i < lenData; i++)
{
if(i == counterIndis[indiCounter]) //don't copy
{
indiCounter++;
}
else //copy the instance
{
for(int j = 0; j < lenIns; j++)
{
newData[resi+j] = data[actual+j];
}
resi+=lenIns;
}
actual += lenIns;
}
}
//removes counter examples
extern "C"
__global__ void filterParallel(int* counterIndis, int* used, float* data, int lenData, int lenIns, int chunk, int threads, float* newData)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//determine search space
int iniIndi = 0;
int iniChunk = tid * chunk;
if(iniChunk >= lenData) //you can't do anything
return;
for(int i = 0; i < tid; i++) //how many counter examples do the other thrads found
{
iniIndi += used[i];
}
// instances before tid minus the counter found multiplied by the len of the instance
int iniResi = ((tid * chunk) - iniIndi) * lenIns;
// int finIndi = iniIndi+ used[tid];
int finChunk = iniChunk + chunk;
int indiCounter = iniIndi;
int actual = iniChunk * lenIns;
int resi = iniResi;
int i = iniChunk;
while(i < lenData && i < finChunk)
{
if(i == counterIndis[indiCounter]) //don't copy
{
indiCounter++;
}
else //copy the instance
{
for(int j = 0; j < lenIns; j++)
{
newData[resi+j] = data[actual+j];
//newData[resi+j] = 0.0;
}
resi+=lenIns;
}
actual += lenIns;
i++;
}
}
//compute how many counters are per leaf, pos 0 is not used
extern "C"
__global__ void countersForLeaf(int numCounters, int* hojasCounters, int hojas, int*res)
{
//initialize array
for(int i = 0; i < hojas; i++)
{
res[i] = 0;
}
//count counters for each leaf
for(int i = 0; i < numCounters; i++)
{
res[hojasCounters[i]]++;
}
}
//compute how many counter examples to consider per leaf accoriding to a %
extern "C"
__global__ void countersPerLeaf(int hojas, int* countersInHojas, float percent, int min, int* res, int* count)
{
count[0] = 0; // to save total number
for(int i = 0; i < hojas; i++)
{
int normal = countersInHojas[i];
int consider = normal * percent;
if(consider > min)
{
res[i] = consider;
count[0] += consider;
}
else
{
res[i] = normal;
count[0] += normal;
}
}
}
//filters counter examples
extern "C"
__global__ void filterCounters(int hojas, int numCounters, int* counters, int* hojasCounters, int* countersPerHoja, int* contador, int* res)
{
//put contador to 0
for(int i = 0; i < hojas; i++)
{
contador[i] = 0;
}
//traverse each counter example
int resi = 0;
for(int i = 0; i < numCounters; i++)
{
int h = hojasCounters[i];
if(contador[h] < countersPerHoja[h])
{
res[resi] = counters[i];
resi++;
contador[h]++;
}
}
}
extern "C"
__global__ void calcAcc(float* ins, int insSize, int chunk, int* attributes, int* isLeaf, int* numbersOfArcs, int* evalTypes, float* vals, int* nodeIndices, int MAX_NUM_ARCS, int NIntances, int* res)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = tid * chunk; //begin to save index in own slot, i points to the instance index
int fin = i + chunk;
res[tid] = 0;
while(i < NIntances && i < fin)
{
int iniIns= i * insSize;
int auxPredicted = classify(ins, iniIns, attributes, isLeaf, numbersOfArcs, evalTypes, vals, nodeIndices, MAX_NUM_ARCS);
int predicted = auxPredicted & 255; //bitwise unpack
int actual = ins[iniIns + insSize-1];
if(predicted == actual) //correctly classified
{
res[tid] += 1;
}
i++;
}
//if(tid < NIntances)
}
//extracts the total sum computed by each thread
extern "C"
__global__ void totalAcc(int* sums, int threads, int* res)
{
int cont = 0;
for(int i = 0; i < threads; i++)
{
cont += sums[i];
}
res[0] = cont;
}
|
23,565 | #include <iostream>
#include <cstdlib>
#include <math.h>
using namespace std;
__global__
void p_vec_dist(int dim, float3 p, float3 *vec, float *res){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
res[i] = (p.x - vec[i].x) * (p.x - vec[i].x);
res[i] += (p.y - vec[i].y) * (p.y - vec[i].y);
res[i] += (p.z - vec[i].z) * (p.z - vec[i].z);
}
}
__global__
void vec_vec_dist(int dim, float3 *vec0, float3 *vec1, float *res){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
//p_vec_dist<<<1, dim>>>(dim, vec0[i], vec1, res[i]);
for (int j = 0; j < dim; j++){
res[i] = (vec0[i].x - vec1[j].x) * (vec0[i].x - vec1[j].x);
res[i] += (vec0[i].y - vec1[j].y) * (vec0[i].y - vec1[j].y);
res[i] += (vec0[i].z - vec1[j].z) * (vec0[i].z - vec1[j].z);
}
}
}
__global__
void fill_float3(int dim, float3 val, float3 *dst)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < dim; i += stride){
dst[i].x = val.x;
dst[i].y = val.y;
dst[i].z = val.z;
}
}
int main(void){
int dim = 1 << 10;
float3 *x, *y;
float *res;
cudaMallocManaged(&x, dim * sizeof(float3));
cudaMallocManaged(&y, dim * sizeof(float3));
cudaMallocManaged(&res, dim * sizeof(float));
fill_float3<<<32, 32>>>(dim, make_float3(1.0, 2.0, 3.0), x);
fill_float3<<<32, 32>>>(dim, make_float3(4.0, 5.0, 6.0), y);
vec_vec_dist<<<32, 32>>>(dim, x, y, res);
float maxError = 0.0;
for (int i = 0; i < dim; i++)
for (int j = 0; j < dim; j++)
maxError = fmax(maxError, fabs(res[i] - 0.0f));
cout << "Max error: " << maxError << endl;
cudaFree(x);
cudaFree(y);
cudaFree(res);
return 0;
} |
23,566 | #include <iostream>
#include <cuda.h>
using namespace std;
__global__ void AddIntsCUDA (int *a, int *b)
{
a[0]+=b[0];
}
int main()
{
int a = 5, b = 9;
int *d_a, *d_b ;
cudaMalloc(&d_a,sizeof(int));
cudaMalloc(&d_b,sizeof(int));
cudaMemcpy(d_a,&a,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,sizeof(int),cudaMemcpyHostToDevice);
AddIntsCUDA<<<1, 1>>>(d_a,d_b);
cudaMemcpy(&a,d_a,sizeof(int),cudaMemcpyDeviceToHost);
cout <<"The answer is "<<a<<endl;
return 0;
}
|
23,567 | /*
* Overdamped Brownian particle in symmetric piecewise linear potential
*
* \dot{x} = -V'(x) + dichotomous noise
*
*/
#include <stdio.h>
#include <getopt.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define PI 3.14159265358979f
//model
__constant__ float d_fa, d_fb, d_mua, d_mub;
__constant__ int d_comp;
float h_fa, h_fb, h_mua, h_mub;
int h_comp;
//simulation
float h_trans;
int h_dev, h_block, h_grid, h_spp, h_samples;
long h_paths, h_periods, h_threads, h_steps, h_trigger;
__constant__ int d_spp, d_samples;
__constant__ long d_paths;
//output
char *h_domain;
char h_domainx;
float h_beginx, h_endx;
int h_logx, h_points, h_moments;
__constant__ char d_domainx;
__constant__ int d_points;
//vector
float *h_x, *h_xb, *h_fx, *h_dx;
float *d_x, *d_fx, *d_dx;
int *d_dcd, *d_dst;
unsigned int *h_seeds, *d_seeds;
curandState *d_states;
size_t size_f, size_i, size_ui, size_p;
curandGenerator_t gen;
static struct option options[] = {
{"fa", required_argument, NULL, 'a'},
{"fb", required_argument, NULL, 'b'},
{"mua", required_argument, NULL, 'c'},
{"mub", required_argument, NULL, 'd'},
{"comp", required_argument, NULL, 'e'},
{"dev", required_argument, NULL, 'f'},
{"block", required_argument, NULL, 'g'},
{"paths", required_argument, NULL, 'h'},
{"periods", required_argument, NULL, 'i'},
{"trans", required_argument, NULL, 'j'},
{"spp", required_argument, NULL, 'k'},
{"samples", required_argument, NULL, 'l'},
{"mode", required_argument, NULL, 'm'},
{"domain", required_argument, NULL, 'n'},
{"domainx", required_argument, NULL, 'o'},
{"logx", required_argument, NULL, 'p'},
{"points", required_argument, NULL, 'q'},
{"beginx", required_argument, NULL, 'r'},
{"endx", required_argument, NULL, 's'}
};
void usage(char **argv)
{
printf("Usage: %s <params> \n\n", argv[0]);
printf("Model params:\n");
printf(" -a, --fa=FLOAT set the first state of the dichotomous noise 'F_a' to FLOAT\n");
printf(" -b, --fb=FLOAT set the second state of the dichotomous noise 'F_b' to FLOAT\n");
printf(" -c, --mua=FLOAT set the transition rate of the first state of dichotomous noise '\\mu_a' to FLOAT\n");
printf(" -d, --mub=FLOAT set the transition rate of the second state of dichotomous noise '\\mu_b' to FLOAT\n");
printf(" -e, --comp=INT choose between biased and unbiased Poissonian or dichotomous noise. INT can be one of:\n");
printf(" 0: biased; 1: unbiased\n");
printf("Simulation params:\n");
printf(" -f, --dev=INT set the gpu device to INT\n");
printf(" -g, --block=INT set the gpu block size to INT\n");
printf(" -h, --paths=LONG set the number of paths to LONG\n");
printf(" -i, --periods=LONG set the number of periods to LONG\n");
printf(" -j, --trans=FLOAT specify fraction FLOAT of periods which stands for transients\n");
printf(" -k, --spp=INT specify how many integration steps should be calculated for the smaller characteristic time scale of dichotomous noise\n");
printf(" -l, --samples=INT specify how many integration steps should be calculated for a single kernel call\n");
printf("Output params:\n");
printf(" -m, --mode=STRING sets the output mode. STRING can be one of:\n");
printf(" moments: the first moment <<v>>\n");
printf(" -n, --domain=STRING simultaneously scan over one or two model params. STRING can be one of:\n");
printf(" 1d: only one parameter\n");
printf(" -o, --domainx=CHAR sets the first domain of the moments. CHAR can be one of:\n");
printf(" a: fa; b: fb; m: mua; n: mub\n");
printf(" -p, --logx=INT choose between linear and logarithmic scale of the domainx\n");
printf(" 0: linear; 1: logarithmic\n");
printf(" -q, --points=INT set the number of samples to generate between begin and end\n");
printf(" -r, --beginx=FLOAT set the starting value of the domainx to FLOAT\n");
printf(" -s, --endx=FLOAT set the end value of the domainx to FLOAT\n");
printf("\n");
}
void parse_cla(int argc, char **argv)
{
int c, itmp;
while( (c = getopt_long(argc, argv, "a:b:c:d:e:f:g:h:i:j:k:l:m:n:o:p:q:r:s", options, NULL)) != EOF) {
switch (c) {
case 'a':
h_fa = atof(optarg);
cudaMemcpyToSymbol(d_fa, &h_fa, sizeof(float));
break;
case 'b':
h_fb = atof(optarg);
cudaMemcpyToSymbol(d_fb, &h_fb, sizeof(float));
break;
case 'c':
h_mua = atof(optarg);
cudaMemcpyToSymbol(d_mua, &h_mua, sizeof(float));
break;
case 'd':
h_mub = atof(optarg);
cudaMemcpyToSymbol(d_mub, &h_mub, sizeof(float));
break;
case 'e':
h_comp = atoi(optarg);
cudaMemcpyToSymbol(d_comp, &h_comp, sizeof(int));
break;
case 'f':
itmp = atoi(optarg);
cudaSetDevice(itmp);
break;
case 'g':
h_block = atoi(optarg);
break;
case 'h':
h_paths = atol(optarg);
cudaMemcpyToSymbol(d_paths, &h_paths, sizeof(long));
break;
case 'i':
h_periods = atol(optarg);
break;
case 'j':
h_trans = atof(optarg);
break;
case 'k':
h_spp = atoi(optarg);
cudaMemcpyToSymbol(d_spp, &h_spp, sizeof(int));
break;
case 'l':
h_samples = atoi(optarg);
cudaMemcpyToSymbol(d_samples, &h_samples, sizeof(int));
break;
case 'm':
if ( !strcmp(optarg, "moments") ) {
h_moments = 1;
}
break;
case 'n':
h_domain = optarg;
break;
case 'o':
h_domainx = optarg[0];
cudaMemcpyToSymbol(d_domainx, &h_domainx, sizeof(char));
break;
case 'p':
h_logx = atoi(optarg);
break;
case 'q':
h_points = atoi(optarg);
cudaMemcpyToSymbol(d_points, &h_points, sizeof(int));
break;
case 'r':
h_beginx = atof(optarg);
break;
case 's':
h_endx = atof(optarg);
break;
}
}
}
__global__ void init_dev_rng(unsigned int *d_seeds, curandState *d_states)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
curand_init(d_seeds[idx], 0, 0, &d_states[idx]);
}
__device__ float drift(float l_x)
{
if (-sinf(PI*l_x) < 0.0f) {
return -1.0f;
} else {
return 1.0f;
}
}
__global__ void init_dich(float *d_dx, int *d_dcd, int *d_dst, curandState *d_states)
//init dichotomous noise
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
float l_dx;
curandState l_state;
//cache model parameters in local variables
l_state = d_states[idx];
float l_fa, l_fb, l_mua, l_mub;
int l_comp;
l_fa = d_fa;
l_fb = d_fb;
l_mua = d_mua;
l_mub = d_mub;
l_comp = d_comp;
long ridx = (idx/d_paths) % d_points;
l_dx = d_dx[ridx];
switch(d_domainx) {
case 'a':
l_fa = l_dx;
if (l_comp == 1) {
l_fb = -l_fa*l_mub/l_mua;
}
break;
case 'b':
l_fb = l_dx;
if (l_comp == 1) {
l_fa = -l_fb*l_mua/l_mub;
}
break;
case 'm':
l_mua = l_dx;
if (l_comp == 1) {
l_mub = -l_fb*l_mua/l_fa;
}
break;
case 'n':
l_mub = l_dx;
if (l_comp == 1) {
l_mua = -l_fa*l_mub/l_fb;
}
break;
}
//step size
float l_dt, taua, taub;
int l_spp;
l_spp = d_spp;
taua = 1.0f/l_mua;
taub = 1.0f/l_mub;
if (taua < taub) {
l_dt = taua/l_spp;
} else {
l_dt = taub/l_spp;
}
//jump countdown
int l_dcd, l_dst;
float rn;
rn = curand_uniform(&l_state);
if (rn < 0.5f) {
l_dst = 0;
l_dcd = (int) floorf( -logf( curand_uniform(&l_state) )/l_mua/l_dt + 0.5f);
} else {
l_dst = 1;
l_dcd = (int) floorf( -logf( curand_uniform(&l_state) )/l_mub/l_dt + 0.5f);
}
//write back noise state to the global memory
d_dcd[idx] = l_dcd;
d_dst[idx] = l_dst;
d_states[idx] = l_state;
}
__device__ float adapted_jump_dich(int &ndcd, int dcd, int &ndst, int dst, float l_fa, float l_fb, float l_mua, float l_mub, float l_dt, curandState *l_state)
{
if (dcd <= 0) {
if (dst == 0) {
ndst = 1;
ndcd = (int) floorf( -logf( curand_uniform(l_state) )/l_mub/l_dt + 0.5f );
return l_fb*l_dt;
} else {
ndst = 0;
ndcd = (int) floorf( -logf( curand_uniform(l_state) )/l_mua/l_dt + 0.5f );
return l_fa*l_dt;
}
} else {
ndcd = dcd - 1;
if (dst == 0) {
return l_fa*l_dt;
} else {
return l_fb*l_dt;
}
}
}
__device__ void predcorr(float &corrl_x, float l_x, curandState *l_state, \
int &ndcd, int dcd, int &ndst, int dst, float l_fa, float l_fb, float l_mua, float l_mub, float l_dt)
/* simplified weak order 2.0 adapted predictor-corrector scheme
( see E. Platen, N. Bruti-Liberati; Numerical Solution of Stochastic Differential Equations with Jumps in Finance; Springer 2010; p. 503, p. 532 )
*/
{
float l_xt, l_xtt, predl_x;
l_xt = drift(l_x);
predl_x = l_x + l_xt*l_dt;
l_xtt = drift(predl_x);
predl_x = l_x + 0.5f*(l_xt + l_xtt)*l_dt;
l_xtt = drift(predl_x);
corrl_x = l_x + 0.5f*(l_xt + l_xtt)*l_dt + adapted_jump_dich(ndcd, dcd, ndst, dst, l_fa, l_fb, l_mua, l_mub, l_dt, l_state);
}
__global__ void fold(float *d_x, float *d_fx)
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
float l_x, l_fx, f;
l_x = d_x[idx];
l_fx = d_fx[idx];
f = floorf(l_x/2.0f)*2.0f;
l_x = l_x - f;
l_fx = l_fx + f;
d_x[idx] = l_x;
d_fx[idx] = l_fx;
}
void unfold(float *x, float *fx)
{
int i;
for (i = 0; i < h_threads; i++) {
x[i] = x[i] + fx[i];
}
}
__global__ void run_moments(float *d_x, float *d_dx, int *d_dcd, int *d_dst, curandState *d_states)
//actual moments kernel
{
long idx = blockIdx.x * blockDim.x + threadIdx.x;
float l_x, l_dx;
int l_dcd, l_dst;
curandState l_state;
//cache path and model parameters in local variables
l_x = d_x[idx];
l_dcd = d_dcd[idx];
l_dst = d_dst[idx];
l_state = d_states[idx];
float l_fa, l_fb, l_mua, l_mub;
int l_comp;
l_fa = d_fa;
l_fb = d_fb;
l_mua = d_mua;
l_mub = d_mub;
l_comp = d_comp;
//run simulation for multiple values of the system parameters
long ridx = (idx/d_paths) % d_points;
l_dx = d_dx[ridx];
switch(d_domainx) {
case 'a':
l_fa = l_dx;
if (l_comp == 1) {
l_fb = -l_fa*l_mub/l_mua;
}
break;
case 'b':
l_fb = l_dx;
if (l_comp == 1) {
l_fa = -l_fb*l_mua/l_mub;
}
break;
case 'm':
l_mua = l_dx;
if (l_comp == 1) {
l_mub = -l_fb*l_mua/l_fa;
}
break;
case 'n':
l_mub = l_dx;
if (l_comp == 1) {
l_mua = -l_fa*l_mub/l_fb;
}
break;
}
//step size & number of steps
float l_dt, taua, taub;
int i, l_spp, l_samples;
l_spp = d_spp;
taua = 1.0f/l_mua;
taub = 1.0f/l_mub;
if (taua < taub) {
l_dt = taua/l_spp;
} else {
l_dt = taub/l_spp;
}
l_samples = d_samples;
for (i = 0; i < l_samples; i++) {
predcorr(l_x, l_x, &l_state, l_dcd, l_dcd, l_dst, l_dst, l_fa, l_fb, l_mua, l_mub, l_dt);
}
//write back path parameters to the global memory
d_x[idx] = l_x;
d_dcd[idx] = l_dcd;
d_dst[idx] = l_dst;
d_states[idx] = l_state;
}
void prepare()
//prepare simulation
{
//grid size
h_paths = (h_paths/h_block)*h_block;
h_threads = h_paths;
if (h_moments) h_threads *= h_points;
h_grid = h_threads/h_block;
//number of steps
if (h_moments) h_steps = h_periods*h_spp;
//host memory allocation
size_f = h_threads*sizeof(float);
size_i = h_threads*sizeof(int);
size_ui = h_threads*sizeof(unsigned int);
size_p = h_points*sizeof(float);
h_x = (float*)malloc(size_f);
h_fx = (float*)malloc(size_f);
h_seeds = (unsigned int*)malloc(size_ui);
//create & initialize host rng
curandCreateGeneratorHost(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, time(NULL));
curandGenerate(gen, h_seeds, h_threads);
//device memory allocation
cudaMalloc((void**)&d_x, size_f);
cudaMalloc((void**)&d_fx, size_f);
cudaMalloc((void**)&d_seeds, size_ui);
cudaMalloc((void**)&d_dcd, size_i);
cudaMalloc((void**)&d_dst, size_i);
cudaMalloc((void**)&d_states, h_threads*sizeof(curandState));
//copy seeds from host to device
cudaMemcpy(d_seeds, h_seeds, size_ui, cudaMemcpyHostToDevice);
//initialization of device rng
init_dev_rng<<<h_grid, h_block>>>(d_seeds, d_states);
free(h_seeds);
cudaFree(d_seeds);
//moments specific requirements
if (h_moments) {
h_trigger = h_steps*h_trans;
h_xb = (float*)malloc(size_f);
h_dx = (float*)malloc(size_p);
float dxtmp = h_beginx;
float dxstep = (h_endx - h_beginx)/h_points;
int i;
//set domainx
for (i = 0; i < h_points; i++) {
if (h_logx) {
h_dx[i] = exp10f(dxtmp);
} else {
h_dx[i] = dxtmp;
}
dxtmp += dxstep;
}
cudaMalloc((void**)&d_dx, size_p);
cudaMemcpy(d_dx, h_dx, size_p, cudaMemcpyHostToDevice);
}
}
void copy_to_dev()
{
cudaMemcpy(d_x, h_x, size_f, cudaMemcpyHostToDevice);
cudaMemcpy(d_fx, h_fx, size_f, cudaMemcpyHostToDevice);
}
void copy_from_dev()
{
cudaMemcpy(h_x, d_x, size_f, cudaMemcpyDeviceToHost);
cudaMemcpy(h_fx, d_fx, size_f, cudaMemcpyDeviceToHost);
}
void initial_conditions()
//set initial conditions for path parameters
{
int i;
curandGenerateUniform(gen, h_x, h_threads); //x in (0,1]
for (i = 0; i < h_threads; i++) {
h_x[i] = 2.0f*h_x[i] - 1.0f; //x in (-1,1]
}
memset(h_fx, 0.0f, size_f);
copy_to_dev();
}
void moments(float *av)
//calculate the first moment of v
{
float sx, sxb, taua, taub, dt, tmp;
int i, j;
copy_from_dev();
unfold(h_x, h_fx);
for (j = 0; j < h_points; j++) {
sx = 0.0f;
sxb = 0.0f;
for (i = 0; i < h_paths; i++) {
sx += h_x[j*h_paths + i];
sxb += h_xb[j*h_paths + i];
}
//Dichotomous
if (h_domainx == 'm') {
taua = 1.0f/h_dx[j];
taub = 1.0f/h_mub;
if (h_comp) {
tmp = 1.0f/(-h_fb*h_dx[j]/h_fa);
} else {
tmp = taub;
}
if (taua <= tmp) {
dt = taua/h_spp;
} else {
dt = tmp/h_spp;
}
} else if (h_domainx == 'n') {
taua = 1.0f/h_mua;
taub = 1.0f/h_dx[j];
if (h_comp) {
tmp = 1.0f/(-h_fa*h_dx[j]/h_fb);
} else {
tmp = taua;
}
if (taub <= tmp) {
dt = taub/h_spp;
} else {
dt = tmp/h_spp;
}
} else if (h_mua != 0.0f || h_mub != 0.0f) {
taua = 1.0f/h_mua;
taub = 1.0f/h_mub;
if (taua < taub) {
dt = taua/h_spp;
} else {
dt = taub/h_spp;
}
}
av[j] = (sx - sxb)/( (1.0f - h_trans)*h_steps*dt )/h_paths;
}
}
void finish()
//free memory
{
free(h_x);
free(h_fx);
curandDestroyGenerator(gen);
cudaFree(d_x);
cudaFree(d_fx);
cudaFree(d_dcd);
cudaFree(d_dst);
cudaFree(d_states);
if (h_moments) {
free(h_xb);
free(h_dx);
cudaFree(d_dx);
}
}
int main(int argc, char **argv)
{
clock_t b, e;
double t;
parse_cla(argc, argv);
if (!h_moments) {
usage(argv);
return -1;
}
prepare();
initial_conditions();
//asymptotic long time average velocity <<v>>
if (h_moments) {
//float *av;
int i;
//av = (float*)malloc(size_p);
if ( !strcmp(h_domain, "1d") ) {
cudaDeviceSynchronize();
b = clock();
init_dich<<<h_grid, h_block>>>(d_dx, d_dcd, d_dst, d_states);
for (i = 0; i < h_steps; i += h_samples) {
run_moments<<<h_grid, h_block>>>(d_x, d_dx, d_dcd, d_dst, d_states);
fold<<<h_grid, h_block>>>(d_x, d_fx);
if (i == h_trigger) {
cudaMemcpy(h_xb, d_x, size_f, cudaMemcpyDeviceToHost);
cudaMemcpy(h_fx, d_fx, size_f, cudaMemcpyDeviceToHost);
unfold(h_xb, h_fx);
}
}
cudaDeviceSynchronize();
e = clock();
t = (double)(e - b) / CLOCKS_PER_SEC;
/*moments(av);
printf("#%c <<v>>\n", h_domainx);
for (i = 0; i < h_points; i++) {
printf("%e %e\n", h_dx[i], av[i]);
}*/
printf("%lf\n", t);
}
//free(av);
}
finish();
return 0;
}
|
23,568 | #include "includes.h"
__global__ void bestFilter(const double *Params, const float *data, const float *mu, const float *lam, const float *nu, float *xbest, float *err, int *ftype){
int tid, tid0, i, bid, NT, Nfilt, ibest = 0;
float Th, Cf, Ci, xb, Cbest = 0.0f, epu, cdiff;
tid = threadIdx.x;
bid = blockIdx.x;
NT = (int) Params[0];
Nfilt = (int) Params[1];
Th = (float) Params[2];
epu = (float) Params[8];
tid0 = tid + bid * Nthreads;
if (tid0<NT-1 & tid0>0){
for (i=0; i<Nfilt;i++){
Ci = data[tid0 + NT * i] + mu[i] * lam[i];
Cf = Ci * Ci / (lam[i] + 1.0f) - lam[i]*mu[i]*mu[i];
// add the shift component
cdiff = data[tid0+1 + NT * i] - data[tid0-1 + NT * i];
Cf = Cf + cdiff * cdiff / (epu + nu[i]);
if (Cf > Cbest){
Cbest = Cf;
xb = Ci - mu[i] * lam[i]; /// (lam[i] + 1);
ibest = i;
}
}
if (Cbest > Th*Th){
err[tid0] = Cbest;
xbest[tid0] = xb;
ftype[tid0] = ibest;
}
}
} |
23,569 | #include "includes.h"
__global__ void __word2vecEvalNeg(int nrows, int ncols, int *WA, int *WB, float *A, float *B, float *Retval) {} |
23,570 | #include "includes.h"
__global__ void initKernel(double* data, int count, double val) {
int ti = blockDim.x * blockIdx.x + threadIdx.x;
if (ti < count) {
data[ti] = val;
}
} |
23,571 | #include "includes.h"
__global__ void matrixAddKernel1(float* ans, float* M, float* N, int size) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
if((row < size) && (col < size)) {
ans[row*size + col] = M[row*size + col] + N[row*size + col];
}
} |
23,572 | extern "C"
__global__ void JCudaVectorAddKernel(int n, int *a, int *b, int *sum) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
sum[i] = a[i] + b[i];
}
} |
23,573 | #include "includes.h"
__global__ void kBoundingBoxLogisticGrad( float* mat, int* bbox, int* label, int* seg, float* indices, float* width_offset, float* height_offset, int size, int width, int height, int depth, float scale_width, float scale_height, float* grad) {
const int color = blockIdx.z;
/*
const int numXBlocksPerImage = DIVUP(width, blockDim.x);
const int image_id = blockIdx.x / numXBlocksPerImage;
const int col = (blockIdx.x % numXBlocksPerImage) * blockDim.x + threadIdx.x;
const int row = blockIdx.y * blockDim.y + threadIdx.y;
*/
const int image_id = threadIdx.x;
const int col = blockIdx.x;
const int row = blockIdx.y;
int num_bboxes = 0, num_bboxes_of_this_depth = 0, num_bboxes_of_this_depth_inside = 0;
if (col < width && row < height && image_id < size && color < depth) {
int src_image_id = (int)indices[image_id];
int src_col = (int)(scale_width * col);
int src_row = (int)(scale_height * row);
int start = seg[src_image_id];
int end = seg[src_image_id + 1];
int x1, y1, x2, y2, l, inside;
for (int box_id = start; box_id < end; box_id++) {
l = label[box_id];
x1 = bbox[box_id << 2] - width_offset[image_id];
y1 = bbox[(box_id << 2) + 1] - height_offset[image_id];
x2 = bbox[(box_id << 2) + 2] - width_offset[image_id];
y2 = bbox[(box_id << 2) + 3] - height_offset[image_id];
inside = (src_col >= x1 && src_col <= x2 && src_row >= y1 && src_row <= y2) ? 1:0;
num_bboxes += inside;
num_bboxes_of_this_depth += (l == color) ? 1: 0;
num_bboxes_of_this_depth_inside += (inside == 1 && l == color) ? 1: 0;
}
}
unsigned long i = image_id + size * (col + width * (row + height * color));
__syncthreads();
if (col < width && row < height && image_id < size && color < depth) {
if (num_bboxes > 0) {
grad[i] = (num_bboxes_of_this_depth_inside > 0) ? (mat[i] - 1) : 0;
} else {
grad[i] = (num_bboxes_of_this_depth > 0) ? mat[i] : 0;
}
}
} |
23,574 | #include "includes.h"
__global__ void add(float *a, float *b, float *c) {
int tid = blockIdx.x;
while(tid < N) {
c[tid] = a[tid] + b[tid];
tid += gridDim.x;
}
} |
23,575 | #include "Objects.cuh"
__device__
BVHNode::BVHNode(int depth) :
m_depth(depth) {
if (depth > 0) {
m_left = new BVHNode(depth-1);
m_right = new BVHNode(depth-1);
#if __CUDA_ARCH__ >= 200
printf("|||| depth %d left %p right %p\n", m_depth,m_left,m_right);
#endif
} else {
m_left = nullptr;
m_right = nullptr;
#if __CUDA_ARCH__ >= 200
printf("|||| depth %d left %p right %p\n", m_depth,m_left,m_right);
#endif
}
}
|
23,576 | #include <assert.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define MAX_POINTS 100000000
#define MAX_MEANS 1000
#define MAX_ITER 100
typedef struct {
double *x, *y;
int *membership;
} points;
typedef struct {
double *x, *y;
int *size;
double *x_sum, *y_sum;
} centroids;
__managed__ int assignment_changed;
// reads n data points from input file
__host__ void read_data(int n, char *file_name, points P) {
size_t i = 0;
double x, y;
FILE *file = fopen(file_name, "r");
assert(file != NULL);
while (!feof(file) && i < n) {
if (fscanf(file, "%lf %lf", &x, &y) != 2)
break;
P.x[i] = x;
P.y[i++] = y;
if (i % (n / 100) == 0) {
printf("\rReading input: %d%%", 100 * i / n);
fflush(stdout);
}
}
printf("Read %d points\n", i);
}
// selects k centers at random from n points
__host__ void init_centers(int n, int k, points P, centroids C) {
for (int i = 0; i < k; ++i) {
// not actually uniform random sampling, but good enough for testing
int rand_idx = rand() % n;
C.x[i] = P.x[rand_idx];
C.y[i] = P.y[rand_idx];
}
}
// computes ||p-c||^2 for a point p and center c
__device__ inline double norm_2D_sqr(double x1, double y1, double x2,
double y2) {
// sqrt is monotonic, so we may omit it in the distance calculation
return (x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2);
}
__global__ void
assign_clusters(int n, int k, const double *__restrict__ Px,
const double *__restrict__ Py, int *__restrict__ Pmembership,
double *__restrict__ Cx, double *__restrict__ Cy,
int *__restrict__ Csize, double *__restrict__ Cx_sum,
double *__restrict__ Cy_sum) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
double min_dist = INFINITY;
int membership = -1;
for (int i = 0; i < k; ++i) {
double current_dist = norm_2D_sqr(Px[index], Py[index], Cx[i], Cy[i]);
if (current_dist < min_dist) {
min_dist = current_dist;
membership = i;
}
}
if (membership != Pmembership[index])
assignment_changed = 1;
Pmembership[index] = membership;
atomicAdd(&Cx_sum[membership], Px[index]);
atomicAdd(&Cy_sum[membership], Py[index]);
atomicAdd(&Csize[membership], 1);
}
}
__global__ void update_clusters(int n, int k, double *__restrict__ Cx,
double *__restrict__ Cy,
double *__restrict__ Cx_sum,
double *__restrict__ Cy_sum,
int *__restrict__ Csize) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < k && Csize[index]) {
Cx[index] = Cx_sum[index] / Csize[index];
Cy[index] = Cy_sum[index] / Csize[index];
}
}
__global__ void zero_centroid_vals(int k, double *__restrict__ Cx_sum,
double *__restrict__ Cy_sum,
int *__restrict__ Csize) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < k) {
Cx_sum[index] = 0;
Cy_sum[index] = 0;
Csize[index] = 0;
}
}
__host__ void print_results(int k, int n, int h, double t, points P,
centroids C) {
printf("performed %d iterations in %.2f s, perf: %.2f billion\n", h, t,
(double)k * n * h / t * 1e-9);
for (int i = 0; i < k; ++i) {
printf("cluster %d centered at (%f, %f) has size %d\n", i, C.x[i], C.y[i],
C.size[i]);
}
}
int main(int argc, char **argv) {
int k, n, h;
char *file_name;
points P;
centroids C;
assert(argc >= 4);
n = atoi(argv[1]);
k = atoi(argv[2]);
file_name = argv[3];
assert(n <= MAX_POINTS && k <= MAX_MEANS);
cudaMallocManaged(&P.x, sizeof(double) * n);
cudaMallocManaged(&P.y, sizeof(double) * n);
cudaMallocManaged(&P.membership, sizeof(int) * n);
cudaMallocManaged(&C.x, sizeof(double) * k);
cudaMallocManaged(&C.y, sizeof(double) * k);
cudaMallocManaged(&C.size, sizeof(int) * k);
cudaMallocManaged(&C.x_sum, sizeof(double) * k);
cudaMallocManaged(&C.y_sum, sizeof(double) * k);
read_data(n, file_name, P);
init_centers(n, k, P, C);
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
clock_t start = clock();
for (h = 0; h < MAX_ITER; ++h) {
assignment_changed = 0;
zero_centroid_vals<<<1, k>>>(k, C.x_sum, C.y_sum, C.size);
cudaDeviceSynchronize();
assign_clusters<<<numBlocks, blockSize>>>(n, k, P.x, P.y, P.membership, C.x,
C.y, C.size, C.x_sum, C.y_sum);
cudaDeviceSynchronize();
if (!assignment_changed)
break;
update_clusters<<<1, k>>>(n, k, C.x, C.y, C.x_sum, C.y_sum, C.size);
cudaDeviceSynchronize();
}
clock_t end = clock();
double t = (double)(end - start) / CLOCKS_PER_SEC;
print_results(k, n, h, t, P, C);
}
|
23,577 | #include "includes.h"
/*
152096 - William Matheus
Friendly Numbers
Programacao Paralela e Distribuida
CUDA - 2019/2 - UPF
Programa 2 - Kernel
*/
__global__ void sum(long int* device_num, long int* device_den, long int* device_vet, int size, int x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + x;
int j;
if (i < size) {
for (j = i + 1; j < size; j++) {
if ((device_num[i] == device_num[j]) && (device_den[i] == device_den[j]))
device_vet[i]++;
}
}
} |
23,578 |
#define DENOMINATOR_INDEX(a,g,i,j,k,nang,ng,nx,ny) ((a)+((nang)*(g))+((nang)*(ng)*(i))+((nang)*(ng)*(nx)*(j))+((nang)*(ng)*(nx)*(ny)*(k)))
#define denominator(a,g,i,j,k) denominator[DENOMINATOR_INDEX((a),(g),(i),(j),(k),nang,ng,nx,ny)]
__global__ void calc_denominator(
const unsigned int nx,
const unsigned int ny,
const unsigned int nz,
const unsigned int nang,
const unsigned int ng,
const double * __restrict__ mat_cross_section,
const double * __restrict__ velocity_delta,
const double * __restrict__ mu,
const double * __restrict__ dd_i,
const double * __restrict__ dd_j,
const double * __restrict__ dd_k,
double * __restrict__ denominator
)
{
size_t a = blockIdx.x * blockDim.x + threadIdx.x;
size_t g = blockIdx.y * blockDim.y + threadIdx.y;
if (a >= nang) return;
if (g >= ng) return;
for (unsigned int k = 0; k < nz; k++)
for (unsigned int j = 0; j < ny; j++)
for (unsigned int i = 0; i < nx; i++)
denominator(a,g,i,j,k) = 1.0 / (mat_cross_section[g] + velocity_delta[g] + mu[a]*dd_i[0] + dd_j[a] + dd_k[a]);
}
|
23,579 | //双线性插值
__global__ void zoomOutIn(const int n, const float*src, int srcWidth, int srcHeight, \
float *dst, int dstWidth, int dstHeight) {
float srcColTidf;
float srcRowTidf;
float c, r;
const float rowScale = srcHeight / (float)(dstHeight);
const float colScale = srcWidth / (float)(dstWidth);
//int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x) {
int tidC = i;
int tidR = i;// *colScaleExtend;
float srcColTidf = (float)((tidC % (dstWidth)) * colScale);
float srcRowTidf = (float)((tidR / (dstWidth)) * rowScale);
int srcColTid = (int)srcColTidf;
int srcRowTid = (int)srcRowTidf;
c = srcColTidf - srcColTid;
r = srcRowTidf - srcRowTid;
int dstInd = i;
int srcInd = srcRowTid * srcWidth + srcColTid;
dst[dstInd] = 0;
dst[dstInd] += (1 - c)*(1 - r)*src[srcRowTid * srcWidth + srcColTid];
dst[dstInd] += (1 - c)*r*src[(srcRowTid + 1)*srcWidth + srcColTid];
dst[dstInd] += c*(1 - r)*src[srcRowTid*srcWidth + srcColTid + 1];
dst[dstInd] += c*r*src[(srcRowTid + 1)*srcWidth + srcColTid + 1];
}
}
//双三次插值
__global__ void zoomCubicOutIn(const int n, const float*src, int srcWidth, int srcHeight, \
float *dst, int dstWidth, int dstHeight) {
float srcColTidf;
float srcRowTidf;
float c, r;
float A = -0.75;
const float rowScale = srcHeight / (float)(dstHeight);
const float colScale = srcWidth / (float)(dstWidth);
//int tid = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; \
i < (n); \
i += blockDim.x * gridDim.x) {
int tidC = i;
int tidR = i;// *colScaleExtend;
float srcColTidf = (float)((tidC % (dstWidth)) * colScale);
float srcRowTidf = (float)((tidR / (dstWidth)) * rowScale);
int srcColTid = (int)srcColTidf;
int srcRowTid = (int)srcRowTidf;
c = srcColTidf - srcColTid;
r = srcRowTidf - srcRowTid;
int dstInd = i;
int srcInd = srcRowTid * srcWidth + srcColTid;
dst[dstInd] = 0;
{
//
float coeffsY[4];
coeffsY[0] = ((A*(r + 1) - 5 * A)*(r + 1) + 8 * A)*(r + 1) - 4 * A;
coeffsY[1] = ((A + 2)*r - (A + 3))*r*r + 1;
coeffsY[2] = ((A + 2)*(1 - r) - (A + 3))*(1 - r)*(1 - r) + 1;
coeffsY[3] = 1.f - coeffsY[0] - coeffsY[1] - coeffsY[2];
float coeffsX[4];
coeffsX[0] = ((A*(c + 1) - 5 * A)*(c + 1) + 8 * A)*(c + 1) - 4 * A;
coeffsX[1] = ((A + 2)*c - (A + 3))*c*c + 1;
coeffsX[2] = ((A + 2)*(1 - c) - (A + 3))*(1 - c)*(1 - c) + 1;
coeffsX[3] = 1.f - coeffsX[0] - coeffsX[1] - coeffsX[2];
dst[dstInd] =
src[(srcRowTid - 1) * srcWidth + (srcColTid - 1)] * coeffsX[0] * coeffsY[0] \
+ src[(srcRowTid)* srcWidth + (srcColTid - 1)] * coeffsX[0] * coeffsY[1] \
+ src[(srcRowTid + 1) * srcWidth + (srcColTid - 1)] * coeffsX[0] * coeffsY[2] \
+ src[(srcRowTid + 2) * srcWidth + (srcColTid - 1)] * coeffsX[0] * coeffsY[3] \
+ src[(srcRowTid - 1) * srcWidth + (srcColTid)] * coeffsX[1] * coeffsY[0] \
+ src[(srcRowTid)* srcWidth + (srcColTid)] * coeffsX[1] * coeffsY[1] \
+ src[(srcRowTid + 1) * srcWidth + (srcColTid)] * coeffsX[1] * coeffsY[2] \
+ src[(srcRowTid + 2) * srcWidth + (srcColTid)] * coeffsX[1] * coeffsY[3] \
+ src[(srcRowTid - 1) * srcWidth + (srcColTid + 1)] * coeffsX[2] * coeffsY[0] \
+ src[(srcRowTid)* srcWidth + (srcColTid + 1)] * coeffsX[2] * coeffsY[1] \
+ src[(srcRowTid + 1) * srcWidth + (srcColTid + 1)] * coeffsX[2] * coeffsY[2] \
+ src[(srcRowTid + 2) * srcWidth + (srcColTid + 1)] * coeffsX[2] * coeffsY[3] \
+ src[(srcRowTid - 1) * srcWidth + (srcColTid + 2)] * coeffsX[3] * coeffsY[0] \
+ src[(srcRowTid)* srcWidth + (srcColTid + 2)] * coeffsX[3] * coeffsY[1] \
+ src[(srcRowTid + 1) * srcWidth + (srcColTid + 2)] * coeffsX[3] * coeffsY[2] \
+ src[(srcRowTid + 2) * srcWidth + (srcColTid + 2)] * coeffsX[3] * coeffsY[3];
}
}
}
|
23,580 | #include "includes.h"
/* Start Header
***************************************************************** /
/*!
\file knn-kernel.cu
\author Koh Wen Lin
\brief
Contains the implementation for kmeans clustering on the gpu.
*/
/* End Header
*******************************************************************/
#define KMEAN_BLOCK_SIZE 32
#define KMEAN_BLOCK_SIZE_1D KMEAN_BLOCK_SIZE * KMEAN_BLOCK_SIZE
__global__ void Mean(float* dIn, unsigned n, unsigned d, int* dGroupIn, float* dMeanIn, unsigned k, int* count)
{
// Each thread block to perform its own summation internally(Reduction), then, each thread block will add its result into global counter and sum
extern __shared__ float sDataSumGroupCount[]; // Dynamic allocated shared memory enough to store block-size amount of data and sum of cluster, group and count.
float* sData = sDataSumGroupCount;
float* sSum = sData + KMEAN_BLOCK_SIZE_1D * d;
int* sGroup = (int*)&sDataSumGroupCount[(k + KMEAN_BLOCK_SIZE_1D) * d];
int* sCount = sGroup + KMEAN_BLOCK_SIZE_1D;
const int tx = threadIdx.x;
int tid = blockIdx.x * blockDim.x + tx;
if(tid >= n)
return;
// Clear shared memory
if(tx < k)
{
for(int i = 0; i < d; ++i)
sSum[tx * d + i] = dMeanIn[tx * d + i];
sCount[tx] = count[tx] = 0.0f;
}
// Each thread perform 1 global load for all its feature and its group index
memcpy(&sData[tx * d], &dIn[tid * d], d * sizeof(float));
sGroup[tx] = dGroupIn[tid];
// Clear old mean
memset(dMeanIn, 0, k * d * sizeof(float));
// Ensure all data relavant to block is loaded
__syncthreads();
int clusterId = sGroup[tx];
for(int i = 0; i < d; ++i)
atomicAdd(&sSum[clusterId * d + i], sData[tx * d + i]);
atomicAdd(&sCount[clusterId], 1);
__syncthreads();
if(tx == 0)
{
for(int i = 0; i < k * d; ++i)
atomicAdd(&dMeanIn[i], sSum[i]);
for(int i = 0; i < k; ++i)
atomicAdd(&count[i], sCount[i]);
}
} |
23,581 | #include "includes.h"
__global__ void VecAdd(const int* A, const int* B, int* C, int N) {
// Index holen
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
} |
23,582 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <time.h>
#define N 100
__global__ void mul(int a[][N], int b[][N], int c[][N]){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int col = blockIdx.y*blockDim.y+threadIdx.y;
if(row < N && col < N) {
for(int i = 0; i < N; i++) {
for(int j = 0; j < N; j++) {
c[i][j] = 0;
for(int k = 0; k < N; k++) {
c[i][j] += a[i][k] * b[k][j];
}
}
}
}
}
int main(){
int (*pa)[N], (*pb)[N], (*pc)[N];
int a[N][N], b[N][N], c[N][N];
srand((unsigned)time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&pa, (N*N) * sizeof(int));
cudaMalloc((void**)&pb, (N*N) * sizeof(int));
cudaMalloc((void**)&pc, (N*N) * sizeof(int));
for(int i = 0 ; i<N ; i++){
for(int j = 0 ; j<N ; j++) {
a[i][j] = rand()%10 + 1;
b[i][j] = rand()%10 + 1;
}
}
cudaMemcpy(pa, a, (N*N) * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(pb, b, (N*N) * sizeof(int), cudaMemcpyHostToDevice);
dim3 threadsPerThreads(N,N);
mul<<<1,threadsPerThreads>>>(pa, pb, pc);
cudaMemcpy(c, pc, (N*N) * sizeof(int), cudaMemcpyDeviceToHost);
printf("matrix multiplication per thread\n");
/* for(int i = 0 ; i<N ; i++){
for(int j = 0 ; j<N ; j++) {
printf("%d ",c[i][j]);
}
printf("\n");
}
*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time to generate : %3.1f ms\n", elapsedTime);
cudaFree(pa);
cudaFree(pb);
cudaFree(pc);
return 0;
}
|
23,583 | #include "includes.h"
#define A 1.2f
#define B 0.5f
#define MIN_LEARNING_RATE 0.000001f
#define MAX_LEARNING_RATE 50.0f
// Device functions
// Array[height * width]
__global__ void fillArray(float *array, float value, int arrayLength)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= arrayLength)
return;
array[i] = value;
} |
23,584 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "driver_types.h"
#include <stdio.h>
#include <fstream>
#define BLOCK_SIZE 16
typedef struct
{
int width;
int height;
float* elements;
} Matrix;
__global__ void mat_mul_kernel(const Matrix a, const Matrix b, Matrix c) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
float res = 0.0;
for (int i = 0; i < a.width; ++i) {
res += a.elements[row*a.height + i] * b.elements[i*b.width + col];
}
c.elements[row*c.width + col] = res;
}
int mat_mul(const Matrix a, const Matrix b, Matrix c)
{
//1 copy host memory to device(cudaMalloc + cudaMemcpy)
cudaError_t cuda_error = cudaSuccess;
Matrix d_a;
d_a.width = a.width;
d_a.height = a.height;
cuda_error = cudaMalloc(&(d_a.elements), d_a.width*d_a.height*sizeof(float));
if (cuda_error != cudaSuccess) {
printf("cuda malloc failed." );
return -1;
}
cuda_error = cudaMemcpy(d_a.elements, a.elements, d_a.width*d_a.height * sizeof(float), cudaMemcpyHostToDevice);
if (cuda_error != cudaSuccess) {
printf("cuda memcpy failed.");
return -1;
}
Matrix d_b;
d_b.width = b.width;
d_b.height = b.height;
cuda_error = cudaMalloc(&(d_b.elements), d_b.width*d_b.height * sizeof(float));
if (cuda_error != cudaSuccess) {
printf("cuda malloc failed.");
return -1;
}
cuda_error = cudaMemcpy(d_b.elements, b.elements, d_b.width*d_b.height * sizeof(float), cudaMemcpyHostToDevice);
if (cuda_error != cudaSuccess) {
printf("cuda memcpy failed.");
return -1;
}
Matrix d_c;
d_c.width = c.width;
d_c.height = c.height;
cuda_error = cudaMalloc(&(d_c.elements), d_c.width*d_c.height * sizeof(float));
if (cuda_error != cudaSuccess) {
printf("cuda malloc failed.");
return -1;
}
//2 invoke kernel to calculate (block thread)
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(b.width / dimBlock.x, a.height / dimBlock.y);
//dim3 dimGrid = (4, 4);
mat_mul_kernel<<<dimGrid,dimBlock>>>(d_a, d_b, d_c);
//3 download to host
cudaMemcpy(c.elements, d_c.elements, c.width*c.height*sizeof(float) , cudaMemcpyDeviceToHost);
if (cuda_error != cudaSuccess) {
printf("cuda memcpy failed.");
return -1;
}
//4 release the device memeory
cudaFree(d_a.elements);
cudaFree(d_b.elements);
cudaFree(d_c.elements);
return 0;
}
int save_matrix(Matrix m, const char* file_name) {
std::ofstream out(file_name, std::ios::out);
if (!out.is_open()) {
return -1;
}
for (int row = 0; row < m.height; ++row) {
for (int col= 0; col < m.width; ++col) {
out << m.elements[row*m.height + col] << " ";
}
out << std::endl;
}
out.close();
return 0;
}
int cuda_matrix_mul(int argc , char* argv[])
{
Matrix a;
a.width = 64;
a.height = 64;
a.elements = new float[64*64];
for (int i = 0; i < 64 * 64; ++i) {
a.elements[i] = 1.0f;
}
Matrix b;
b.width = 64;
b.height = 64;
b.elements = new float[64 * 64];
for (int i = 0; i < 64 * 64; ++i) {
b.elements[i] = 2.0f;
}
Matrix c;
c.width = 64;
c.height = 64;
c.elements = new float[64 * 64];
for (int i = 0; i < 64 * 64; ++i) {
c.elements[i] = 0;
}
if (0 == mat_mul(a, b, c)) {
printf("Success.\n");
save_matrix(c, "D:/temp/mat.txt");
}
else {
printf("Failed.\n");
}
return 0;
} |
23,585 | //#define DIMX 1920
//#define DIMY 1080
//
//struct CuComplex {
// float r;
// float i;
//
// __device__ CuComplex(float a, float b) :r(a), i(b) {}
// __device__ float magnitude2(void) {
// return r * r + i * i;
// }
//
// __device__ CuComplex operator*(const CuComplex& a)
// {
// return CuComplex(r*a.r - i * a.i, i*a.r + r * a.i);
// }
//
// __device__ CuComplex operator+(const CuComplex& a)
// {
// return CuComplex(r + a.r, i + a.i);
// }
//};
//
//
//__device__ int julia(int x, int y)
//{
// const float scale = 1.5;
// float jx = scale * (float)(DIMX / 2 - x) / (DIMX / 2);
// float jy = scale * (float)(DIMY / 2 - y) / (DIMY / 2);
//
// CuComplex c(-0.8, 0.154);
// CuComplex a(jx, jy);
//
// int i = 0;
// for (i = 0; i < 200; i++)
// {
// a = a * a + c;
// if (a.magnitude2() > 1000)
// return 0;
// }
// return 1;
//}
//
//__global__ void kernel(unsigned char *ptr)
//{
// int x = blockIdx.x;
// int y = blockIdx.y;
// int offset = x + y * gridDim.x;
//
// int juliaValue = julia(x, y);
// ptr[offset * 4 + 0] = 0;
// ptr[offset * 4 + 1] = 255 * juliaValue;
// ptr[offset * 4 + 2] = 0;
// ptr[offset * 4 + 3] = 255;
//
//}
|
23,586 | #include "includes.h"
__global__ void reg_addArrays_kernel_float4(float4 *array1_d, float4 *array2_d)
{
const int tid= (blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
if(tid < c_VoxelNumber){
float4 a = array1_d[tid];
float4 b = array1_d[tid];
array1_d[tid] = make_float4(a.x+b.x,a.y+b.y,a.z+b.z,a.w+b.w);
}
} |
23,587 | #include<stdio.h>
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
__global__ void add(int *a,int *b,int *c)
{
*c=*a+*b;
}
int main()
{
int a,b,c;
printf("\nValue of A:");
scanf("%d",&a);
printf("\nValue of b:");
scanf("%d",&b);
int *d_a,*d_b,*d_c;
int size=sizeof(int);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,size);
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,size,cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a,d_b,d_c);
cudaMemcpy(&c,d_c,size,cudaMemcpyDeviceToHost);
printf("The Sum of the Numbers is %d:",c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 1;
} |
23,588 | #include <iostream>
#include <sstream>
#include <stdio.h>
using namespace std;
#define UP 0
#define DOWN 1
#define BLOCK_SIZE 1024
#define NUM_BLOCKS 128
#define SHARED_MEM 8192
/* Cuda memcheck snippets from HW3
* http://graphics.stanford.edu/~seander/bithacks.html#DetermineIfPowerOf2
*/
#define CUDA_SAFE_CALL_NO_SYNC( call) do { \
cudaError err = call; \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
#define CUDA_SAFE_CALL( call) do { \
CUDA_SAFE_CALL_NO_SYNC(call); \
cudaError err = cudaDeviceSynchronize(); \
if( cudaSuccess != err) { \
fprintf(stderr, "Cuda error in file '%s' in line %i : %s.\n", \
__FILE__, __LINE__, cudaGetErrorString( err) ); \
exit(EXIT_FAILURE); \
} } while (0)
__device__ int getButtSize(int stageSize, int numButterfly){
return stageSize >> numButterfly;
}
__device__ int maximum(int firstIn, int secondIn){
return firstIn >= secondIn ? firstIn : secondIn;
}
__device__ void compare_and_switch(int direction, float *in0, float *in1){
if (direction == DOWN){
// printf("direction: UP\tin0: %f\tin1: %f\n", *in0, *in1);
if(*in1 > *in0){
float temp = *in0;
*in0 = *in1;
*in1 = temp;
}
}
if (direction == UP){
// printf("direction: DN\tin0: %f\tin1: %f\n", *in0, *in1);
if(*in0 > *in1){
float temp = *in0;
*in0 = *in1;
*in1 = temp;
}
}
}
__device__ void shuffle(int stageSize, float value, int addrOut, int offset, float *array){
// Actually destination address is addrIn,
// it may look unintuitive but the fact is
// In N' Out (pun intended with the california
// based fast food chain) are relative terms based
// on where you're looking at the code from. In this case
// the shuffle function in going to caluculate the input address
// (addrIn) for the shared memory based on the output address of
// the previous comparator (addrOut)
int addrIn = addrOut;
addrIn = addrIn << 1;
addrIn &= ~(stageSize);
addrIn |= (addrOut / (stageSize / 2));
// printf("stageSize: %8d\tvalue: %f\naddrOut: %8d\toffset: %4d\narray[%d + %d] = %f\n\n", stageSize, value, addrOut, offset, addrIn, offset, value);
array[addrIn + offset] = value;
}
__device__ void butterfly(int stageSize, float value, int addrOut, int offset, float *array){
int firstBit = addrOut % 2;
int lastBit = addrOut / (stageSize / 2);
int addrIn = addrOut;
addrIn = addrIn - (addrIn % 2);
addrIn &= ~(stageSize / 2);
addrIn |= (firstBit * stageSize / 2);
addrIn |= lastBit;
// printf("stageSize: %8d\tvalue: %f\naddrOut: %8d\toffset: %4d\narray[%d + %d] = %f\n\n", stageSize, value, addrOut, offset, addrIn, offset, value);
array[addrIn + offset] = value;
}
__global__ void stagingKernel(int stageNum, int stageSize, int numElements, float *arrayIn){
int quotient = maximum(blockDim.x, stageSize / 2);
int index = blockIdx.x * quotient + threadIdx.x;
int stride = gridDim.x * quotient;
__shared__ float buffer[SHARED_MEM];
int level = stageNum;
// printf ("blockIdx.x: %4d\tthreadIdx.x: %4d\nindex: %4d\tstride: %4d\n\n", blockIdx.x, threadIdx.x, index, stride);
for (int addr = index; addr < (numElements / 2); addr += stride){
for (int iteration = 0; iteration < maximum(stageSize / (2 * BLOCK_SIZE), 1); ++iteration){
int compGlobalAddr = addr + iteration * BLOCK_SIZE;
int compLocalAddr = threadIdx.x + iteration * BLOCK_SIZE;
// printf ("blockIdx.x: %4d\tthreadIdx.x: %4d\ncompGlobalAddr: %4d\tcompLocalAddr: %4d\n\n", blockIdx.x, threadIdx.x, compGlobalAddr, compLocalAddr);
int firstInAddr = compGlobalAddr * 2;
int secondInAddr = compGlobalAddr * 2 + 1;
float firstIn = arrayIn[firstInAddr];
float secondIn = arrayIn[secondInAddr];
int outAddrOffset = (compLocalAddr * 2 / stageSize) * stageSize;
int firstOutAddr = (compLocalAddr * 2) % stageSize;
int secondOutAddr = (compLocalAddr * 2 + 1) % stageSize;
// printf("compGlobalAddr: %8d\tcompLocalAddr: %4d\noutAddrOffset: %4d\tfirstoutAddr: %4d\tsecondOutAddr: %9d\nfirstIn: %f\tsecondIn: %f\n\n", compGlobalAddr, compLocalAddr, outAddrOffset, firstOutAddr, secondOutAddr, firstIn, secondIn);
int direction = (compGlobalAddr & (1 << level)) == 0 ? UP : DOWN;
// printf("compGlobalAddr: %4d\tdirection: %d\n", compGlobalAddr, direction);
compare_and_switch(direction, &firstIn, &secondIn);
shuffle(stageSize, firstIn, firstOutAddr, outAddrOffset, buffer);
shuffle(stageSize, secondIn, secondOutAddr, outAddrOffset, buffer);
}
level++;
// if (threadIdx.x == 0)
// printf("blockIdx.x: %d\n%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n\n", blockIdx.x,
// buffer[0], buffer[1], buffer[2], buffer[3],
// buffer[4], buffer[5], buffer[6], buffer[7],
// buffer[8], buffer[9], buffer[10], buffer[11],
// buffer[12], buffer[13], buffer[14], buffer[15]);
// __syncthreads();
for (int iteration = 0; iteration < maximum(stageSize / (2 * BLOCK_SIZE), 1); ++iteration){
for (int numButterfly = 0; numButterfly <= stageNum; ++numButterfly){
int butterflySize = getButtSize(stageSize, numButterfly);
int compGlobalAddr = addr + iteration * BLOCK_SIZE;
int compLocalAddr = threadIdx.x + iteration * BLOCK_SIZE;
int firstInAddr = compLocalAddr * 2;
int secondInAddr = compLocalAddr * 2 + 1;
float firstIn = buffer[firstInAddr];
float secondIn = buffer[secondInAddr];
int outAddrOffset = (compLocalAddr * 2 / butterflySize) * butterflySize;
int firstOutAddr = (compLocalAddr * 2) % butterflySize;
int secondOutAddr = (compLocalAddr * 2 + 1) % butterflySize;
int direction = (compGlobalAddr & (1 << level)) == 0 ? UP : DOWN;
compare_and_switch(direction, &firstIn, &secondIn);
butterfly(butterflySize, firstIn, firstOutAddr, outAddrOffset, buffer);
butterfly(butterflySize, secondIn, secondOutAddr, outAddrOffset, buffer);
}
__syncthreads();
}
// if (threadIdx.x == 0)
// printf("blockIdx.x: %d\n%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n\n", blockIdx.x,
// buffer[0], buffer[1], buffer[2], buffer[3],
// buffer[4], buffer[5], buffer[6], buffer[7],
// buffer[8], buffer[9], buffer[10], buffer[11],
// buffer[12], buffer[13], buffer[14], buffer[15]);
// __syncthreads();
for (int iteration = 0; iteration < maximum(stageSize / (2 * BLOCK_SIZE), 1); ++iteration){
int compGlobalAddr = addr + iteration * BLOCK_SIZE;
int compLocalAddr = threadIdx.x + iteration * BLOCK_SIZE;
int firstLocalAddr = compLocalAddr * 2;
int secondLocalAddr = compLocalAddr * 2 + 1;
int firstGlobalAddr = compGlobalAddr * 2;
int secondGlobalAddr = compGlobalAddr * 2 + 1;
// printf("compGlobalAddr: %d\tcompLocalAddr: %d\nbuffer[%d]: %f\tbuffer[%d]: %f\narrayIn[%d] = buffer[%d] = %f\narrayIn[%d] = buffer[%d] = %f\n\n", compGlobalAddr, compLocalAddr, firstLocalAddr, buffer[firstLocalAddr], secondLocalAddr, buffer[secondLocalAddr], firstGlobalAddr, firstLocalAddr, buffer[firstLocalAddr], secondGlobalAddr, secondLocalAddr, buffer[secondLocalAddr]);
// printf("compGlobalAddr: %d\tcompLocalAddr: %d\nbuffer[%d]: %f\tbuffer[%d]: %f\narrayIn[%d] = buffer[%d] = %f\narrayIn[%d] = buffer[%d] = %f\n\n", compGlobalAddr, compLocalAddr, firstLocalAddr, secondLocalAddr, firstGlobalAddr, firstLocalAddr, buffer[firstLocalAddr], secondGlobalAddr, secondLocalAddr, buffer[secondLocalAddr]);
arrayIn[firstGlobalAddr] = buffer[firstLocalAddr];
arrayIn[secondGlobalAddr] = buffer[secondLocalAddr];
}
// if (threadIdx.x == 0)
// printf("%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n%f\t%f\t%f\t%f\n\n", buffer[0], buffer[1], buffer[2], buffer[3],
// buffer[4], buffer[5], buffer[6], buffer[7],
// buffer[8], buffer[9], buffer[10], buffer[11],
// buffer[12], buffer[13], buffer[14], buffer[15]);
// __syncthreads();
}
// level++;
// for (int iteration = 0; iteration <= stageNum; ++iteration){
// for (int addr = index; addr < (numElements / 2); addr += stride){
// int direction = addr & (1 << level) == 0 ? UP : DOWN;
// int outAddrOffset = addr - (addr % (stageSize / 2));
// int outAddr0 = (addr * 2) % stageSize;
// int outAddr1 = (addr * 2 + 1) % stageSize;
// float firstIn = buffer[tIndex * 2];
// float secondIn = buffer[tIndex * 2 + 1];
// compare_and_switch(direction, &firstIn, &secondIn);
// butterfly(stageSize, firstIn, outAddr0, outAddrOffset, buffer);
// butterfly(stageSize, secondIn, outAddr1, outAddrOffset, buffer);
// }
// // arrayIn[addr * 2] = buffer[tIndex * 2];
// // arrayIn[addr * 2 + 1] = buffer[tIndex * 2 + 1];
// }
}
__global__ void cleanupKenerl(int numElements, float *arrayIn){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int addr = index; addr < (numElements / 2); addr += stride){
float firstIn = arrayIn[addr * 2];
float secondIn = arrayIn[addr * 2 + 1];
compare_and_switch(UP, &firstIn, &secondIn);
arrayIn[addr * 2] = firstIn;
arrayIn[addr * 2 + 1] = secondIn;
}
}
void banyan(float *x, ulong N, uint n){
int stageSize = 4;
for (int stageNum = 0; stageNum < n - 1; stageNum++){
// printf("stageNum: %4d\t stageSize: %4d\n\n", stageNum, stageSize);
// stagingKernel<<< 1, 1, 16384 * sizeof(float)>>>(stageNum, stageSize, N, x);
stagingKernel<<< NUM_BLOCKS, BLOCK_SIZE >>>(stageNum, stageSize, N, x);
cudaDeviceSynchronize();
// printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
stageSize = stageSize * 2;
}
cleanupKenerl<<< 80, 1024>>>(N, x);
cudaDeviceSynchronize();
}
// main for debugging individual kernels
int main(int argc, char** argv)
{
// USAGE: single argument
// -> n = argv[1]
// --> e.g. "./banyan 4" would run n=4, N=16
// params for testing helper functions
stringstream conv_1(argv[1]);
stringstream conv_2(argv[2]);
uint n;
int thresh;
if (!(conv_1 >> n))
n = 4;
if (!(conv_2 >> thresh))
thresh = 1;
ulong N = pow(2,n);
printf("n=%d // N=%d // thresh=%d:\n",(int)n,(int)N,thresh); // NOTE: might not be exposing issue by casting to int here
// x = inputs, y = outputs
float *x;
CUDA_SAFE_CALL(cudaMallocManaged(&x, N*sizeof(float)));
printf("------------------------------------------------------------\n");
printf("Init input:\n");
printf("------------------------------------------------------------\n");
for (int i=0; i<N; i++) {
x[i]=(float) (N-i-1); // backwards list
// x[i]=(float) i; // sorted list
// x[i]=(float) (rand() % 50); // random list
if (i<thresh || i>N-thresh-1)
printf("for i=%d: x=%f\n", i, x[i]);
}
// call batcher-banyan sorting network on N-element array
banyan(x, N, n);
cudaDeviceSynchronize();
printf("------------------------------------------------------------\n");
printf("Output:\n");
printf("------------------------------------------------------------\n");
for (int i=0; i<2*thresh-1; i++) {
printf("for i=%d: x=%f\n", (i<thresh) ? i : (int)N-(2*thresh-i-1) , (i<thresh) ? x[i] : x[(int)N-(2*thresh-i-1)]);
}
CUDA_SAFE_CALL(cudaFree(x));
return 0;
}
|
23,589 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <iostream>
#include <algorithm>
#define BLOCKS_NUM 160
#define THREADS_NUM 1024 //thread number/block
#define TOTAL_THREADS (BLOCKS_NUM * THREADS_NUM)
#define REPEAT_TIMES 2048
#define WARP_SIZE 32
#define ARRAY_SIZE (TOTAL_THREADS + REPEAT_TIMES*WARP_SIZE)
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <class T>
__global__ void max_flops(uint64_t *startClk, uint64_t *stopClk, T *data1, T *res) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
//register T s1 = data1[gid];
//register T s2 = data2[gid];
//register T result = 0;
// synchronize all threads
//int32_t res0, res1, res2, res3, res4, res5, res6, res7, res8, res9, res10, res11, res12, res13, res14, res15;
int32_t sum;
asm volatile ("bar.sync 0;");
// start timing
uint64_t start = clock64();
for(uint32_t i = 0; i<REPEAT_TIMES; i++){
sum = sum + atomicAdd(&data1[(i*WARP_SIZE)+gid], 10);
}
// synchronize all threads
asm volatile("bar.sync 0;");
// stop timing
uint64_t stop = clock64();
// write time and data back to memory
startClk[gid] = start;
stopClk[gid] = stop;
res[gid] = sum;
}
int main(){
uint64_t *startClk = (uint64_t*) malloc(TOTAL_THREADS*sizeof(uint64_t));
uint64_t *stopClk = (uint64_t*) malloc(TOTAL_THREADS*sizeof(uint64_t));
//int32_t *data2 = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
int32_t *res = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
int32_t *data1 = (int32_t*) malloc(ARRAY_SIZE*sizeof(int32_t));
uint64_t *startClk_g;
uint64_t *stopClk_g;
int32_t *data1_g;
//int32_t *data2_g;
int32_t *res_g;
for (uint32_t i=0; i<ARRAY_SIZE; i++) {
data1[i] = (int32_t)i;
//data2[i] = (int32_t)i;
}
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint64_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint64_t)) );
gpuErrchk( cudaMalloc(&data1_g, ARRAY_SIZE*sizeof(int32_t)) );
//gpuErrchk( cudaMalloc(&data2_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( cudaMalloc(&res_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( cudaMemcpy(data1_g, data1, ARRAY_SIZE*sizeof(int32_t), cudaMemcpyHostToDevice) );
//gpuErrchk( cudaMemcpy(data2_g, data2, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyHostToDevice) );
max_flops<int32_t><<<BLOCKS_NUM,THREADS_NUM>>>(startClk_g, stopClk_g, data1_g, res_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(res, res_g, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyDeviceToHost) );
float bw;
uint64_t total_time = *std::max_element(&stopClk[0],&stopClk[TOTAL_THREADS])-*std::min_element(&startClk[0],&startClk[TOTAL_THREADS]);
bw = (((float)REPEAT_TIMES*(float)TOTAL_THREADS*4*8)/(float)(total_time));
printf("int32 bendwidth = %f (byte/clk)\n", bw);
printf("Total Clk number = %ld \n", total_time);
return 0;
}
|
23,590 | #include "includes.h"
__global__ void invierte(float *a, float *b) {
int id = threadIdx.x;
//int id = threadIdx.x + blockDim.x * blockIdx.x;// para n-bloques de 1 hilo
if (id < N)
{
b[id] = a[N-id];
}
} |
23,591 | #include "includes.h"
__global__ void Vector_Addition ( const int *dev_a , const int *dev_b , int *dev_c)
{
//Get the id of thread within a block
unsigned short tid = blockDim.x*blockIdx.x+threadIdx.x;
if ( tid < N ) // check the boundry condition for the threads
dev_c [tid] = dev_a[tid] + dev_b[tid] ;
} |
23,592 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <stdbool.h>
#include <time.h>
#define K 3 // K is from K-SAT, currently we are working on 3-SAT
#define THREAD_PER_BLOCK_log2 10
// current Var Limit is 32;
void preProcessing(){
// removes comment
while(getchar() == 'c'){
while(getchar()!='\n');
}
getchar();
char format[100];
scanf("%s", format);
if(strcmp(format, "cnf") != 0){ // format assertion
printf("Format Error, expected cnf but %s was provided\n", format);
exit(1);
}
printf("Preprocessing Successfull\n");
}
int cpuSolve(int varCount, int clauseCount, int* clauseStore){
int limit = pow(2, varCount);
int satCount = 0;
for(int perIndex=0; perIndex<limit; perIndex++){
bool result = true;
for(int i=0; i<clauseCount; i++){
bool clauseResult = false;
for(int j=0; j<K; j++){
int var = clauseStore[K*i + j];
int absVar = abs(var);
bool varValue;
if(var < 0)
varValue = !((perIndex >> (absVar-1))&1);
else
varValue = (perIndex >> (absVar-1))&1;
clauseResult = clauseResult || varValue;
}
result = result && clauseResult;
}
if(result)
satCount++;
// if(perIndex%10000 == 0)
// printf("completed = %d\n", perIndex);
}
return satCount;
}
__global__ void gpuSolver(int varCount, int clauseCount, int limit, int* clauseStore, int *gpu_sat_count){
bool result = true;
int perIndex = (blockIdx.x << THREAD_PER_BLOCK_log2) + threadIdx.x;
if(perIndex >= limit)
return;
for(int i=0; i<clauseCount; i++){
bool clauseResult = false;
for(int j=0; j<K; j++){
int var = clauseStore[K*i + j];
int absVar = abs(var);
bool varValue;
if(var < 0)
varValue = !((perIndex >> (absVar-1))&1);
else
varValue = (perIndex >> (absVar-1))&1;
clauseResult = clauseResult || varValue;
}
result = result && clauseResult;
}
if(result)
atomicAdd(gpu_sat_count, 1);
}
int main(int argc, char* argv[]){
if(argc<2){
printf("Invalid Options: One options is required to indetity type of execution\n");
return 1;
}
preProcessing();
int varCount, clauseCount;
scanf("%d%d", &varCount, &clauseCount);
printf("\nNo. of Variables = %d | No. of clauses = %d\n", varCount, clauseCount);
// clauses Input
int *clauseStore = (int*)malloc(sizeof(int)*clauseCount*K);
for(int i=0; i<clauseCount; i++){
for(int j=0; j<K; j++){ // one clause with K variables
scanf("%d", clauseStore + (K * i) + j);
}
int tmp;
scanf("%d\n", &tmp);
}
clock_t start, end;
/* for(int i=0; i<clauseCount; i++){
for(int j=0; j<K; j++){
printf("%d ", clauseStore[K*i + j]);
}
printf("\n");
} */
if(strcmp(argv[1], "cpu")==0){ // cpu implementations
start = clock();
int satCount = cpuSolve(varCount, clauseCount, clauseStore);
end = clock();
printf("\n\nSAT Count = %d\n", satCount);
}
else if(strcmp(argv[1], "gpu") ==0){ // gpu implementations
int *gpuClauseStore;
cudaMalloc(&gpuClauseStore, sizeof(int)*clauseCount*K);
cudaMemcpy(gpuClauseStore, clauseStore, sizeof(int)*clauseCount*K, cudaMemcpyHostToDevice);
int *gpu_sat_count;
cudaMalloc(&gpu_sat_count, sizeof(int));
cudaMemset(gpu_sat_count, 0, sizeof(int));
cudaDeviceSynchronize();
int limit = pow(2, varCount);
int threadPerBlock = pow(2, THREAD_PER_BLOCK_log2);
int noOfBlock = ceil((float)limit / threadPerBlock);
start = clock();
gpuSolver<<<noOfBlock, threadPerBlock>>>(varCount, clauseCount, limit, gpuClauseStore, gpu_sat_count);
cudaDeviceSynchronize();
end = clock();
int *satCount= (int*)malloc(sizeof(int));
cudaMemcpy(satCount, gpu_sat_count, sizeof(int), cudaMemcpyDeviceToHost);
printf("\n\nSAT Count = %d\n", *satCount);
}
else{
printf("Invalid Option");
return 0;
}
double executionTime = (double)(end-start)/CLOCKS_PER_SEC;
printf("execution Time = %lf\n", executionTime);
return 0;
}
|
23,593 | #include <stdio.h>
__global__
void laplace(float * U1, float * U2) {
int i = blockIdx.x;
int j = threadIdx.x;
int side = blockDim.x + 2;
U2[(i + 1) * side + j + 1] // i, j
= U1[i * side + j + 1] // i-1, j
+ U1[(i + 1) * side + j] // i, j-1
+ U1[(i + 2) * side + j + 1] // i+1, j
+ U1[(i + 1) * side + j + 2]; // i, j+1
U2[(i + 1) * side + j + 1] *= .25;
}
int main() {
int T = 10000;
int side = 128;
int area = side * side;
float * U1, * U2, * devU1, * devU2;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//---------------------------
U1 = (float *)malloc(area * sizeof(float));
U2 = (float *)malloc(area * sizeof(float));
cudaMalloc(&devU1, area * sizeof(float));
cudaMalloc(&devU2, area * sizeof(float));
for (int i=0; i<side; ++i)
U1[i] = 1.;
for (int i=1; i<side; ++i) {
for (int j=0; j<side; ++j)
U1[i * side + j] = 0.;
}
memcpy(U2, U1, area * sizeof(float));
cudaMemcpy(devU1, U1, area * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(devU2, U1, area * sizeof(float),
cudaMemcpyHostToDevice);
for (int t=0; t<T;) {
laplace<<<side-2, side-2>>>(devU1, devU2);
laplace<<<side-2, side-2>>>(devU2, devU1);
t += 2;
}
cudaMemcpy(U1, devU1, area * sizeof(float),
cudaMemcpyDeviceToHost);
//----------------------------
cudaEventRecord(stop);
float elapsed_time(0);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("elapsed time: %f ms\n", elapsed_time);
FILE * cfout = fopen("output.bin", "wb");
fwrite(U1, sizeof(float), area, cfout);
fclose(cfout);
cudaFree(devU1);
cudaFree(devU2);
free(U1);
free(U2);
}
|
23,594 | #include "includes.h"
__global__ void cudaDRectifier_backPropagate_kernel(double* x, double* dx, unsigned int size, double leakSlope, double clipping)
{
const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int stride = blockDim.x * gridDim.x;
for (unsigned int i = index; i < size; i += stride) {
if (clipping > 0.0) {
dx[i] *= (x[i] > clipping) ? 0.0 : (x[i] > 0.0)
? 1.0
: leakSlope;
}
else
dx[i] *= (x[i] > 0.0) ? 1.0 : leakSlope;
}
} |
23,595 | #include "includes.h"
__global__ void translate_2D(float* coords, size_t dim_y, size_t dim_x, float seg_y, float seg_x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t total = dim_x * dim_y;
if(index < total){
coords[index] += seg_y;
coords[index + total] += seg_x;
__syncthreads();
}
} |
23,596 | #include<iostream>
#include<cstring>
#include<cstdlib>
#define GMM_MAX_COMPONT 3
#define GMM_LEARN_ALPHA 0.005
#define GMM_THRESHOD_SUMW 0.7
#define HEIGHT 1080
#define WIDTH 1920
using namespace std;
__global__ void trainGMM_CUDA(unsigned char *_image, unsigned char *mask, float *modelW, float *modelS, unsigned char *modelM, int height, int width)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= height || y >= width)
return;
unsigned char *_imageData = _image + x * width + y;
float *modelWData = modelW + GMM_MAX_COMPONT * (x * width + y);
float *modelSData = modelS + GMM_MAX_COMPONT * (x * width + y);
unsigned char *modelMData = modelM + GMM_MAX_COMPONT * (x * width + y);
int num_fit = 0;
for (int k = 0; k < GMM_MAX_COMPONT; k++)
{
int delm = abs(_imageData[0] - modelMData[k]);
long dist = delm * delm;
if (dist < 3.0 * modelSData[k])
{
modelWData[k] += GMM_LEARN_ALPHA * (1 - modelWData[k]);
modelMData[k] += (GMM_LEARN_ALPHA / modelWData[k]) * delm;
modelSData[k] += (GMM_LEARN_ALPHA / modelWData[k]) * (dist - modelSData[k]);
}
else
{
modelWData[k] += GMM_LEARN_ALPHA * (0 - modelWData[k]);
num_fit++;
}
}
for (int kk = 0; kk < GMM_MAX_COMPONT; kk++)
{
for (int rr = kk; rr< GMM_MAX_COMPONT; rr++)
{
if (modelWData[rr] / modelSData[rr] > modelWData[kk] / modelSData[kk])
{
float temp_weight = modelWData[rr];
modelWData[rr] = modelWData[kk];
modelWData[kk] = temp_weight;
unsigned char temp_mean = modelMData[rr];
modelMData[rr] = modelMData[kk];
modelMData[kk] = temp_mean;
float temp_sigma = modelSData[rr];
modelSData[rr] = modelSData[kk];
modelSData[kk] = temp_sigma;
}
}
}
if (num_fit == GMM_MAX_COMPONT && modelWData[GMM_MAX_COMPONT - 1] == 0)
{
for (int k = 0; k < GMM_MAX_COMPONT; k++)
{
if (0 == modelWData[k])
{
if (k == 0)
modelWData[k] = 1;
else
modelWData[k] = GMM_LEARN_ALPHA;
modelMData[k] = _imageData[0];
modelSData[k] = 15.0;
for (int q = 0; q < GMM_MAX_COMPONT && q != k; q++)
{
modelWData[q] *= (1 - GMM_LEARN_ALPHA);
}
break;
}
}
}
else if (num_fit == GMM_MAX_COMPONT && modelWData[GMM_MAX_COMPONT - 1] != 0)
{
modelMData[GMM_MAX_COMPONT - 1] = _imageData[0];
modelSData[GMM_MAX_COMPONT - 1] = 15.0;
}
}
__global__ void testGMM_CUDA(unsigned char *_image, unsigned char *mask, float *modelW, float *modelS, unsigned char *modelM, int height, int width)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= height || y >= width)
return;
unsigned char *_imageData = _image + x * width + y;
unsigned char *maskData = mask + x * width + y;
float *modelWData = modelW + GMM_MAX_COMPONT * (x * width + y);
float *modelSData = modelS + GMM_MAX_COMPONT * (x * width + y);
unsigned char *modelMData = modelM + GMM_MAX_COMPONT * (x * width + y);
float sum_w = 0.0;
for (unsigned char k = 0; k < GMM_MAX_COMPONT; k++)
{
if (abs(_imageData[0] - modelMData[k]) < (unsigned char)(2.5 * modelSData[k]))
{
maskData[0] = 0;
break;
}
sum_w += modelWData[k];
if (sum_w >= GMM_THRESHOD_SUMW)
{
maskData[0] = 255;
break;
}
}
}
void trainGMM(unsigned char *_image, unsigned char *mask, float *modelW, float *modelS, unsigned char *modelM, int height, int width)
{
/**************************** Train ******************************************/
for (int i = 0; i < height; i++)
{
float *modelWData = modelW + i * width * GMM_MAX_COMPONT;
float *modelSData = modelS + i * width * GMM_MAX_COMPONT;
unsigned char *modelMData = modelM + i * width * GMM_MAX_COMPONT;
const unsigned char *_imageData = _image + i * width;
for (int j = 0; j < width; j++)
{
int num_fit = 0;
/**************************** Update parameters Start ******************************************/
for (int k = 0; k < GMM_MAX_COMPONT; k++)
{
int delm = abs(_imageData[j] - modelMData[k]);
long dist = delm * delm;
if (dist < 3.0 * modelSData[k])
{
modelWData[k] += GMM_LEARN_ALPHA * (1 - modelWData[k]);
modelMData[k] += (GMM_LEARN_ALPHA / modelWData[k]) * delm;
modelSData[k] += (GMM_LEARN_ALPHA / modelWData[k]) * (dist - modelSData[k]);
}
else
{
modelWData[k] += GMM_LEARN_ALPHA * (0 - modelWData[k]);
num_fit++;
}
}
/**************************** Update parameters End ******************************************/
/*********************** Sort Gaussian component by 'weight / sigma' Start ****************************/
for (int kk = 0; kk < GMM_MAX_COMPONT; kk++)
{
for (int rr = kk; rr< GMM_MAX_COMPONT; rr++)
{
if (modelWData[rr] / modelSData[rr] > modelWData[kk] / modelSData[kk])
{
float temp_weight = modelWData[rr];
modelWData[rr] = modelWData[kk];
modelWData[kk] = temp_weight;
unsigned char temp_mean = modelMData[rr];
modelMData[rr] = modelMData[kk];
modelMData[kk] = temp_mean;
float temp_sigma = modelSData[rr];
modelSData[rr] = modelSData[kk];
modelSData[kk] = temp_sigma;
}
}
}
/*********************** Sort Gaussian model by 'weight / sigma' End ****************************/
/*********************** Create new Gaussian component Start ****************************/
if (num_fit == GMM_MAX_COMPONT && modelWData[GMM_MAX_COMPONT - 1] == 0)
{
//if there is no exit component fit,then start a new component
for (int k = 0; k < GMM_MAX_COMPONT; k++)
{
if (0 == modelWData[k])
{
if (k == 0)
modelWData[k] = 1;
else
modelWData[k] = GMM_LEARN_ALPHA;
modelMData[k] = _imageData[j];
modelSData[k] = 15.0;
//normalization the weight,let they sum to 1
for (int q = 0; q < GMM_MAX_COMPONT && q != k; q++)
{
/****update the other unfit's weight,u and sigma remain unchanged****/
modelWData[q] *= (1 - GMM_LEARN_ALPHA);
}
break;
}
}
}
else if (num_fit == GMM_MAX_COMPONT && modelWData[GMM_MAX_COMPONT - 1] != 0)
{
modelMData[GMM_MAX_COMPONT - 1] = _imageData[j];
modelSData[GMM_MAX_COMPONT - 1] = 15.0;
}
/*********************** Create new Gaussian component End ****************************/
modelWData += GMM_MAX_COMPONT;
modelSData += GMM_MAX_COMPONT;
modelMData += GMM_MAX_COMPONT;
}
}
}
void testGMM(unsigned char* _image, unsigned char* mask, float* modelW, float* modelS, unsigned char* modelM, int height, int width)
{
/*********************** Predict ****************************/
for (int i = 0; i < height; i++)
{
float *modelWData = modelW + i * width * GMM_MAX_COMPONT;
float *modelSData = modelS + i * width * GMM_MAX_COMPONT;
unsigned char *modelMData = modelM + i * width * GMM_MAX_COMPONT;
const unsigned char *_imageData = _image + i * width;
unsigned char *maskData = mask + i * width;
for (int j = 0; j < width; j++)
{
float sum_w = 0.0;
for (unsigned char k = 0; k < GMM_MAX_COMPONT; k++)
{
if (abs(_imageData[j] - modelMData[k]) < (unsigned char)(2.5 * modelSData[k]))
{
maskData[j] = 0;
break;
}
sum_w += modelWData[k];
if (sum_w >= GMM_THRESHOD_SUMW)
{
maskData[j] = 255;
break;
}
}
modelWData += GMM_MAX_COMPONT;
modelSData += GMM_MAX_COMPONT;
modelMData += GMM_MAX_COMPONT;
}
}
}
int main()
{
float *modelW = new float[HEIGHT * GMM_MAX_COMPONT * WIDTH];
float *modelS = new float[HEIGHT * GMM_MAX_COMPONT * WIDTH];
unsigned char *modelM = new unsigned char[HEIGHT * GMM_MAX_COMPONT * WIDTH];;
unsigned char *frame = new unsigned char[HEIGHT * WIDTH];
unsigned char *mask = new unsigned char[HEIGHT * WIDTH];
unsigned char *mask2 = new unsigned char[HEIGHT * WIDTH];
int height = HEIGHT;
int width = WIDTH;
memset(modelW, 0, sizeof(modelW));
memset(modelS, 0, sizeof(modelM));
memset(modelM, 0, sizeof(modelS));
memset(mask, 0, sizeof(mask));
// For GPU
float *dev_modelW, *dev_modelS;
unsigned char *dev_frame, *dev_mask, *dev_modelM;
cudaMalloc((void**)&dev_modelW, (HEIGHT * GMM_MAX_COMPONT * WIDTH) * sizeof(float));
cudaMalloc((void**)&dev_modelS, (HEIGHT * GMM_MAX_COMPONT * WIDTH) * sizeof(float));
cudaMalloc((void**)&dev_modelM, (HEIGHT * GMM_MAX_COMPONT * WIDTH) * sizeof(unsigned char));
cudaMalloc((void**)&dev_frame, (HEIGHT * WIDTH) * sizeof(unsigned char));
cudaMalloc((void**)&dev_mask, (HEIGHT * WIDTH) * sizeof(unsigned char));
cudaMemcpy(dev_modelW,
modelW,
HEIGHT * GMM_MAX_COMPONT * WIDTH * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_modelS,
modelS,
HEIGHT * GMM_MAX_COMPONT * WIDTH * sizeof(float),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_modelM,
modelM,
HEIGHT * GMM_MAX_COMPONT * WIDTH * sizeof(unsigned char),
cudaMemcpyHostToDevice);
dim3 block(32,8);
dim3 grid((HEIGHT + block.x - 1) / block.x,
(WIDTH + block.y - 1) / block.y);
clock_t start, end;
clock_t start2, end2;
double sum_t = 0;
double sum_t2 = 0;
srand(unsigned(time(0)));
int frame_num = 100;
for (int t = 0; t < frame_num; t++)
{
for (int i = 0; i < height; i++)
for (int j = 0; j < width; j++)
*(frame + i * width + j) = rand() % 255;
start = clock();
trainGMM(frame, mask, modelW, modelS, modelM, height, width);
testGMM(frame, mask, modelW, modelS, modelM, height, width);
end = clock();
sum_t += end - start;
start2 = clock();
cudaMemcpy(dev_frame,
frame,
HEIGHT * WIDTH * sizeof(unsigned char),
cudaMemcpyHostToDevice);
trainGMM_CUDA<<<grid, block>>>(dev_frame, dev_mask, dev_modelW, dev_modelS, dev_modelM, height, width);
testGMM_CUDA<<<grid, block>>>(dev_frame, dev_mask, dev_modelW, dev_modelS, dev_modelM, height, width);
cudaMemcpy(mask2,
dev_mask,
HEIGHT * WIDTH * sizeof(unsigned char),
cudaMemcpyDeviceToHost);
end2 = clock();
sum_t2 += end2 - start2;
// TEST
/*
int diff_count = 0;
for (int i = 0; i < height * width; i++)
if (mask[i] != mask2[i])
diff_count++;
cout <<"Diff: " << diff_count << endl;
*/
}
cout <<"CPU: " << (sum_t / frame_num) / CLOCKS_PER_SEC << endl;
cout <<"GPU: " << (sum_t2 / frame_num) / CLOCKS_PER_SEC << endl;
}
|
23,597 | /* Program : To find the run-time for the matrix multiplication kernel without tiling for various block sizes
* Author : Anant Shah
* Date : 13-9-2018
* Roll Number : EE16B105
**/
#include<stdio.h>
#define ERROR_HANDLER(error_msg,line) error_handler(error_msg,line)
#define NUM_THREADS_X 16
#define NUM_THREADS_Y 16
#define X_1 4
#define Y_1 4
#define X_2 4
#define Y_2 8
#define X_3 8
#define Y_3 4
#define X_4 8
#define Y_4 8
#define X_5 8
#define Y_5 16
#define X_6 16
#define Y_6 8
#define X_7 16
#define Y_7 32
#define SIZE 8192
void error_handler(cudaError_t error_msg,int line){
/* Function to check if a CUDA statement resulted in a function */
if(error_msg!=cudaSuccess){
printf("%s in %s at %d",cudaGetErrorString(error_msg),__FILE__,line);
exit(EXIT_FAILURE);
}
}
void fill_matrix(double *mat,unsigned numRows,unsigned numCols){
/* Function to fill a mtrix with values */
for(unsigned i=0;i<numRows;i++){
for(unsigned j=0;j<numCols;j++){
mat[i*numCols+j] = i*2.1f + j*3.2f;
}
}
}
void print_matrix_to_file(double *mat,unsigned numRows,unsigned numCols){
/* Function to wite a matrix into a file */
const char *fname = "assignment2_out";
FILE *f = fopen(fname,"a");
for(unsigned i=0;i<numRows;i++){
for(unsigned j=0;j<numCols;j++){
fprintf(f,"%4.4f ",mat[i*numCols+j]);
}
fprintf(f,"\n");
}
fclose(f);
}
__global__ void matrixMul(double *M,double *N,double *P,int width){
/* Kernel to perform the matrix multiplication between two N*N matrices where the fastest changing index is ".x"
* Each thread calculates one cell in the output matrix
* Parameters : M - matrix M stored as 1-D layout
* : N - matrix N stored as 1-D layout
* : P - matrix P which stores the multiplication of the matrices A and B
* : width - Number of rows/columns in the matrix as they are square matrices
*/
int col = blockIdx.x*blockDim.x+threadIdx.x; /* Column of the cell to be calculated */
int row = blockIdx.y*blockDim.y+threadIdx.y; /* Row of the cell to be calculated */
double pSum = 0.0 ; /* Variable to store the partial sum of each multiplication */
if( (row<width) && (col<width)){ /* Condition to check if the element to be calculated is within bounds */
for(unsigned i=0;i<width;i++){
pSum += M[row*width+i]*N[i*width+col];
}
}
P[row*width+col] = pSum;
}
int main(int argc,char **argv){
if(argc!=1){
printf("error: Invalid number of arguments\n");
exit(EXIT_FAILURE);
}
/******************************* Variable Initialization ********************************/
double *h_M; /* Matrix multiplicand on the host */
double *h_N; /* Matrix multiplicand on the host */
double *h_P; /* Matrix multiplication result on the host */
double *d_M; /* Matrix multiplicand on the device */
double *d_N; /* Matrix multiplicand on the device */
double *d_P; /* Matrix multiplication result on the device */
size_t size; /* Size of each matrix in bytes */
cudaEvent_t start,stop; /* CUDA events to be used to calculate the kernel-run time */
cudaEvent_t start_1,stop_1; /* CUDA events to be used to calculate the kernel-run time */
cudaEvent_t start_2,stop_2; /* CUDA events to be used to calculate the kernel-run time */
cudaEvent_t start_3,stop_3; /* CUDA events to be used to calculate the kernel-run time */
cudaEvent_t start_4,stop_4; /* CUDA events to be used to calculate the kernel-run time */
cudaEvent_t start_5,stop_5; /* CUDA events to be used to calculate the kernel-run time */
cudaEvent_t start_6,stop_6; /* CUDA events to be used to calculate the kernel-run time */
cudaEvent_t start_7,stop_7; /* CUDA events to be used to calculate the kernel-run time */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start_1);
cudaEventCreate(&stop_1);
cudaEventCreate(&start_2);
cudaEventCreate(&stop_2);
cudaEventCreate(&start_3);
cudaEventCreate(&stop_3);
cudaEventCreate(&start_4);
cudaEventCreate(&stop_4);
cudaEventCreate(&start_5);
cudaEventCreate(&stop_5);
cudaEventCreate(&start_6);
cudaEventCreate(&stop_6);
cudaEventCreate(&start_7);
cudaEventCreate(&stop_7);
/*********************************** Allocate Memory on the Host *************************/
size = sizeof(double)*SIZE*SIZE;
h_M = (double *)malloc(size);
h_N = (double *)malloc(size);
h_P = (double *)malloc(size);
/********************************* Initialize matrix with values ***********************/
fill_matrix(h_M,SIZE,SIZE);
fill_matrix(h_N,SIZE,SIZE);
/******************************* Allocate Memory on the GPU ****************************/
ERROR_HANDLER(cudaMalloc((void **)&d_M,size),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_N,size),__LINE__);
ERROR_HANDLER(cudaMalloc((void **)&d_P,size),__LINE__);
/****************************** Copy multiplicands to the device ***********************/
ERROR_HANDLER(cudaMemcpy(d_M,h_M,size,cudaMemcpyHostToDevice),__LINE__);
ERROR_HANDLER(cudaMemcpy(d_N,h_N,size,cudaMemcpyHostToDevice),__LINE__);
/****************************** Kernel Invocation **************************************/
dim3 threads(NUM_THREADS_X,NUM_THREADS_Y);
dim3 blocks((SIZE+NUM_THREADS_X-1)/NUM_THREADS_X,(SIZE+NUM_THREADS_Y-1)/NUM_THREADS_Y);
float milliseconds = 0.0;
cudaEventRecord(start);
matrixMul<<<blocks,threads>>>(d_M,d_N,d_P,SIZE);
cudaEventRecord(stop);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
print_matrix_to_file(h_P,SIZE,SIZE);
dim3 threads_1(X_1,Y_1);
dim3 blocks_1((SIZE+X_1-1)/X_1,(SIZE+Y_1-1)/Y_1);
cudaEventRecord(start_1);
matrixMul<<<blocks_1,threads_1>>>(d_M,d_N,d_P,SIZE);
cudaEventRecord(stop_1);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
print_matrix_to_file(h_P,SIZE,SIZE);
dim3 threads_2(X_2,Y_2);
dim3 blocks_2((SIZE+X_2-1)/X_2,(SIZE+Y_2-1)/Y_2);
cudaEventRecord(start_2);
matrixMul<<<blocks_2,threads_2>>>(d_M,d_N,d_P,SIZE);
cudaEventRecord(stop_2);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
print_matrix_to_file(h_P,SIZE,SIZE);
dim3 threads_3(X_3,Y_3);
dim3 blocks_3((SIZE+X_3-1)/X_3,(SIZE+Y_3-1)/Y_3);
cudaEventRecord(start_3);
matrixMul<<<blocks_3,threads_3>>>(d_M,d_N,d_P,SIZE);
cudaEventRecord(stop_3);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
print_matrix_to_file(h_P,SIZE,SIZE);
dim3 threads_4(X_4,Y_4);
dim3 blocks_4((SIZE+X_4-1)/X_4,(SIZE+Y_4-1)/Y_4);
cudaEventRecord(start_4);
matrixMul<<<blocks_4,threads_4>>>(d_M,d_N,d_P,SIZE);
cudaEventRecord(stop_4);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
print_matrix_to_file(h_P,SIZE,SIZE);
dim3 threads_5(X_5,Y_5);
dim3 blocks_5((SIZE+X_5-1)/X_5,(SIZE+Y_5-1)/Y_5);
cudaEventRecord(start_5);
matrixMul<<<blocks_5,threads_5>>>(d_M,d_N,d_P,SIZE);
cudaEventRecord(stop_5);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
print_matrix_to_file(h_P,SIZE,SIZE);
dim3 threads_6(X_6,Y_6);
dim3 blocks_6((SIZE+X_6-1)/X_6,(SIZE+Y_6-1)/Y_6);
cudaEventRecord(start_6);
matrixMul<<<blocks_6,threads_6>>>(d_M,d_N,d_P,SIZE);
cudaEventRecord(stop_6);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
print_matrix_to_file(h_P,SIZE,SIZE);
dim3 threads_7(X_7,Y_7);
dim3 blocks_7((SIZE+X_7-1)/X_7,(SIZE+Y_7-1)/Y_7);
cudaEventRecord(start_7);
matrixMul<<<blocks_7,threads_7>>>(d_M,d_N,d_P,SIZE);
cudaEventRecord(stop_7);
ERROR_HANDLER(cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost),__LINE__);
print_matrix_to_file(h_P,SIZE,SIZE);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds,start_1,stop_1);
printf("Run-Time(seconds) (4*4): %.10f\n",milliseconds/1000);
cudaEventElapsedTime(&milliseconds,start_2,stop_2);
printf("Run-Time(seconds) (4*8): %.10f\n",milliseconds/1000);
cudaEventElapsedTime(&milliseconds,start_3,stop_3);
printf("Run-Time(seconds) (8*4): %.10f\n",milliseconds/1000);
cudaEventElapsedTime(&milliseconds,start_4,stop_4);
printf("Run-Time(seconds) (8*8): %.10f\n",milliseconds/1000);
cudaEventElapsedTime(&milliseconds,start_5,stop_5);
printf("Run-Time(seconds) (8*16): %.10f\n",milliseconds/1000);
cudaEventElapsedTime(&milliseconds,start_6,stop_6);
printf("Run-Time(seconds) (16*8): %.10f\n",milliseconds/1000);
cudaEventElapsedTime(&milliseconds,start,stop);
printf("Run-Time(seconds) (16*16): %.10f\n",milliseconds/1000);
cudaEventElapsedTime(&milliseconds,start_7,stop_7);
printf("Run-Time(seconds) (16*32): %.10f\n",milliseconds/1000);
/**************************** Free Allocated Memory **********************************/
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
free(h_M);
free(h_N);
free(h_P);
}
|
23,598 | #include "FileWriter.cuh"
#include "Empire.cuh"
void writeFile(char* fileName, float** contents, int width, int height) {
FILE* f = fopen(fileName, "w");
fprintf(f, "P3\n%d %d\n255\n\n", width, height);
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
if (contents[y][x]>=0) {
fprintf(f, "0, %d, 0\t", (int)(contents[y][x]));
}
else {
fprintf(f, "0, 0, %d\t", (int)(contents[y][x]*-1));
}
}
fprintf(f, "\n");
}
fclose(f);
}
void writeEmpireFile(char* fileName, empire* empires, float** empireMap, float** map, int width, int height) {
FILE* f = fopen(fileName, "w");
fprintf(f, "P3\n%d %d\n255\n\n", width, height);
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x++) {
if (map[y][x] >= 0) {
if (empireMap[y][x]>0) {
empire emp = empires[int(empireMap[y][x]) - 1];
fprintf(f, "%d, %d, %d\t", emp.r, emp.b, emp.g);
}
else {
fprintf(f, "255, 255, 255\t");
}
}
else {
fprintf(f, "0, 0, %d\t", (int)(map[y][x] * -1));
}
}
fprintf(f, "\n");
}
fclose(f);
} |
23,599 | #include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__global__ void Get_Histogram(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
unsigned int *hist_r,unsigned int *hist_g,unsigned int *hist_b) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
atomicAdd( &(hist_r[R_input[offset]]), 1);
atomicAdd( &(hist_g[G_input[offset]]), 1);
atomicAdd( &(hist_b[B_input[offset]]), 1);
}
__global__ void Equalization_GPU(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
unsigned char *r_dataE, unsigned char *g_dataE,
unsigned char *b_dataE,
unsigned int *hist_r,unsigned int *hist_g,unsigned int *hist_b) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
int offset = x + y * i_size;
r_dataE[offset] = hist_r[R_input[offset]];
g_dataE[offset] = hist_g[G_input[offset]];
b_dataE[offset] = hist_b[B_input[offset]];
}
|
23,600 | #include "includes.h"
__global__ void to_float(float *out, int *in, int size) {
int element = threadIdx.x + blockDim.x * blockIdx.x;
if (element >= size) return;
out[element] = float(in[element]);
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.