serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
6,901 | #include "Tests.cuh"
#include "cuda_runtime.h"
int ALL_TESTS = 0;
int PASSED_TESTS = 0;
cudaEvent_t start;
cudaEvent_t stop;
cudaEvent_t startPack;
cudaEvent_t stopPack;
void InitAllTests()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
ALL_TESTS = 0;
PASSED_TESTS = 0;
}
void InitPack()
{
cudaEventCreate(&startPack);
cudaEventCreate(&stopPack);
cudaEventRecord(startPack, 0);
}
void TestInit(const char *testName)
{
printf("%135s ", testName);
++ALL_TESTS;
}
void Assert(bool arg1, bool arg2, bool arg3, bool arg4, bool arg5, bool arg6, bool arg7, bool arg8, bool arg9, bool arg10)
{
if (arg1 && arg2 && arg3 && arg4 && arg5
&& arg6 && arg7 && arg8 && arg9 && arg10)
{
++PASSED_TESTS;
printf(" PASS\n");
}
else
{
printf(" FAILED\n");
}
}
void Output(std::string output)
{
printf("\nTest output:\n%s\n", output.c_str());
}
void FinalReport()
{
float time;
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("===== FINAL REPORT =====\n\n");
printf("\nPASSED %d / %d\n", PASSED_TESTS, ALL_TESTS);
printf("ELAPSED TIME: %f\n\n", time);
}
void PackReport()
{
float time;
cudaEventRecord(stopPack, 0);
cudaEventSynchronize(stopPack);
cudaEventElapsedTime(&time, startPack, stopPack);
printf("ELAPSED TIME: %f\n\n", time);
}
void PrintException(std::exception e)
{
printf(" EXCEPTION: %s\n", e.what());
}
void PrintException()
{
printf(" EXCEPTION\n");
}
|
6,902 | #include <iostream>
#include <stdio.h>
#include <fstream>
#include <stdlib.h>
#include <cmath>
#include <ctime>
#include <cuda.h>
//#define DEBUG
//#define HANDLE_ERROR(x) if((x) != 0) cout << "Error!" << endl;
using namespace std;
struct SubBlock{
int * nnz_global_i_idx;
int * nnz_global_o_idx;
int nnz;
int * nnz_local_r_idx;
int * nnz_local_c_idx;
float * nnz_values;
};
//void printSubBlocksInfo(SubBlock * sbs, int nsbs, int mem_b_size);
__global__ void CudaCompute(SubBlock * d_sbs, float * d_x, float * d_y, int nblocks, int mem_b_size, int nrows, int ncols , float * sub_y_arr){
/*
sub_y_arr stores float number, with nblocks rows, mem_b_size columns
*/
//#ifdef DEBUG
//printf("This is Cuda Block # %d: \n", blockIdx.x);
//#endif
//if(blockIdx.x >= nblocks)
// return;
//SubBlock * work_sb = &d_sbs[blockIdx.x];
//printSubBlocksInfo(work_sb, 1, mem_b_size);
/*
float * x_sub = (float *) malloc(mem_b_size * sizeof(float));
float * y_sub = (float *) malloc(mem_b_size * sizeof(float));
//float * x;
for(int i = 0; i < mem_b_size; i++){
if(work_sb->nnz_global_i_idx[i] > 0 && work_sb->nnz_global_i_idx[i] <= ncols){
// d_x indexing starts from '1'
// x_sub indexing starts from '0'
x_sub[i] = d_x[work_sb->nnz_global_i_idx[i] - 1];
}
else{
x_sub[i] = 0.0;
}
}
for(int i = 0; i < work_sb->nnz; i++){
int x_sub_idx = work_sb->nnz_local_c_idx[i] - 1;
int y_sub_idx = work_sb->nnz_local_r_idx[i] - 1;
y_sub[y_sub_idx] += work_sb->nnz_values[i] * x_sub[x_sub_idx];
//#ifdef DEBUG
// printf("This is Cuda Block # %d: Computing (%d, %d) product as (%f)\n", blockIdx.x, x_sub_idx, y_sub_idx, work_sb->nnz_values[i] * x_sub[x_sub_idx]);
//#endif
}
for(int i = 0; i < mem_b_size; i++){
sub_y_arr[blockIdx.x * mem_b_size + i] = y_sub[i];
}
*/
}
__global__ void CudaMergeResults(SubBlock * d_sbs, float * d_x, float * d_y, int nblocks, int mem_b_size, int nrows, int ncols , float * sub_y_arr){
if(blockIdx.x == 0 && threadIdx.x == 0){
for(int i = 0; i < nblocks; i++){
int * outLocs = d_sbs[i].nnz_global_o_idx;
for(int j = 0; j < mem_b_size; j++){
d_y[outLocs[j] - 1] += sub_y_arr[i * mem_b_size + j];
}
}
}
}
__global__ void cudaDummy(){
}
void printSubBlocksInfo(SubBlock * sbs, int nsbs, int mem_b_size){
cout << endl << "There are " << nsbs << "subblocks." << endl;
for(int i = 0; i < nsbs; i++){
cout << "Subblock #: " << i << endl;
cout << "Numbe of non-zeros: " << sbs[i].nnz << endl;
cout << "Input Locations: " << endl;
for(int j = 0; j < mem_b_size; j++){
cout << sbs[i].nnz_global_i_idx[j] << " ";
}
cout << endl;
cout << "Output Locations: " << endl;
for(int j = 0; j < mem_b_size; j++){
cout << sbs[i].nnz_global_o_idx[j] << " ";
}
cout << endl;
for(int j = 0; j < sbs[i].nnz; j++){
cout << sbs[i].nnz_local_r_idx[j] << " " << sbs[i].nnz_local_c_idx[j] << " " << sbs[i].nnz_values[j] << endl;
}
cout << endl;
}
cout << endl;
}
__host__ void randomizeFloatVector(float * vec, int size){
for(int i = 0; i < size; i++){
float r = ((float) rand()) / (RAND_MAX);
*(vec+i) = r;
}
//printf("\n\n");
}
void displayFloatVector(float * vec, int size){
for(int i = 0; i < size; i++){
printf("%f ", vec[i] );
}
printf("\n\n");
}
void setZeroFloatVector(float * vec, int size){
for(int i = 0; i < size; i++){
vec[i] = 0.0;
}
}
int main(){
int count;
cudaGetDeviceCount(&count);
cout << "There are " << count << " GPU devices available. " << endl;
cudaSetDevice(1);
srand ( time(0));
ifstream datafile;
//datafile.open("../data/blockmatrix.data");
datafile.open("../data/data");
int nblocks, mem_b_size, nrows, ncols;
float density;
datafile >> nblocks >> nrows >> ncols >> mem_b_size;
float * x;
float * y;
float * d_x;
float * d_y;
x = (float *) malloc(ncols * sizeof(float));
y = (float *) malloc(nrows * sizeof(float));
// fixed a nightmare bug here on 04/11/2015
// originally it was:
// randomizeFloatVector(x, ncols * sizeof(float) );
randomizeFloatVector(x, ncols );
setZeroFloatVector(y, nrows );
int d_x_size = ncols * sizeof(float);
int d_y_size = nrows * sizeof(float);
cudaMalloc((void **) &d_x, d_x_size) ;
int ErrorCode = cudaGetLastError();
cout << ErrorCode << endl;
SubBlock * sbs = NULL;
SubBlock * d_sbs;
sbs = (SubBlock *) malloc(nblocks * sizeof(SubBlock));
for(int i = 0; i < nblocks; i++){
datafile >> sbs[i].nnz >> density;
int nnz = sbs[i].nnz;
sbs[i].nnz_global_i_idx = (int *) malloc( mem_b_size * sizeof(int));
sbs[i].nnz_global_o_idx = (int *) malloc( mem_b_size * sizeof(int));
for(int j = 0; j < mem_b_size; j++){
datafile >> sbs[i].nnz_global_i_idx[j];
}
for(int j = 0; j < mem_b_size; j++){
datafile >> sbs[i].nnz_global_o_idx[j];
}
sbs[i].nnz_local_r_idx = (int *) malloc(nnz * sizeof(int));
sbs[i].nnz_local_c_idx = (int *) malloc(nnz * sizeof(int));
sbs[i].nnz_values = (float *) malloc(nnz * sizeof(float));
for(int j = 0; j < nnz; j++){
datafile >> sbs[i].nnz_local_r_idx[j] >> sbs[i].nnz_local_c_idx[j] >> sbs[i].nnz_values[j];
}
}
#ifdef DEBUG
printSubBlocksInfo(sbs, nblocks, mem_b_size);
printf("The mem size of struct SubBlock is %d bytes.\n", sizeof(SubBlock));
printf("The mem size of int is %d bytes.\n", sizeof(int));
printf("The mem size of int * is %d bytes.\n", sizeof(int*));
#endif
int sbs_size = nblocks * sizeof(SubBlock);
cudaMalloc((void **) &d_sbs, sbs_size);
cudaMemcpy(d_sbs, sbs, sbs_size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_y, d_y_size);
cudaMemcpy(d_x, x, ncols * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, nrows * sizeof(float), cudaMemcpyHostToDevice);
float * d_sub_y_arr = NULL;
cudaMalloc((void **) &d_sub_y_arr, nblocks * mem_b_size * sizeof(float));
CudaCompute<<<30, 1>>>(d_sbs, d_x, d_y, nblocks, mem_b_size, nrows, ncols, d_sub_y_arr);
ErrorCode = cudaGetLastError();
cout << "CudaCompute Error Code is " << endl;
cout << ErrorCode << endl;
cudaDummy<<<1, 1>>>();
ErrorCode = cudaGetLastError();
cout << "CudaCompute Error Code is " << endl;
cout << ErrorCode << endl;
//cudaDeviceSynchronize();
CudaMergeResults<<<1, 1>>>(d_sbs, d_x, d_y, nblocks, mem_b_size, nrows, ncols, d_sub_y_arr);
ErrorCode = cudaGetLastError();
cout << "CudaCompute Error Code is " << endl;
cout << ErrorCode << endl;
cudaMemcpy(y, d_y, nrows * sizeof(float), cudaMemcpyDeviceToHost);
//displayFloatVector(x, ncols);
//displayFloatVector(y, nrows);
free(x);
free(y);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_sub_y_arr);
cudaFree(d_sbs);
datafile.close();
printf("Hello!\n");
return 0;
}
|
6,903 | extern "C"{
// must include this header, because we need some built-in math functions and structure definitions.
//#include <__nv_nvrtc_builtin_header.h>
struct vox_decl
{
unsigned char x,y,z,w;
};
struct dda_trav
{
// constant during ray-traversal
float oxyz[3];
float dxyz[3];
float invdxyz[3];
int signxyz[3];
// variables during ray-traversal
float pxyz[3];
float nxyz[3];
int voxijk[3];
vox_decl vd;
};
__device__ int argmax3(float* arr)
{
float max = arr[0];
int arg=0;
if (arr[1]>max){max=arr[1];arg=1;}
if (arr[2]>max){max=arr[2];arg=2;}
return arg;
}
__device__ void norm3(float *in, float *out)
{
float invl = 1.0f / sqrt(in[0] * in[0] + in[1] * in[1] + in[2] * in[2]);
out[0] = in[0] * invl;out[1] = in[1] * invl;out[2] = in[2] * invl;
}
__device__ float lensq3(float* in)
{
return in[0]*in[0]+in[1]*in[1]+in[2]*in[2];
}
__device__ void cross3(float* out,float* a, float* b)
{
out[0] = a[1]*b[2]-b[1]*a[2];
out[1] = a[2]*b[0]-b[2]*a[0];
out[2] = a[0]*b[1]-b[0]*a[1];
}
// state used for PRNG
struct xorwow_state {
unsigned int a, b, c, d;
unsigned int counter;
};
__device__ unsigned int hash(unsigned int a) {
a = (a ^ 61) ^ (a >> 16);
a = a + (a << 3);
a = a ^ (a >> 4);
a = a * 0x27d4eb2d;
a = a ^ (a >> 15);
return a;
}
__device__ void xorwow_init(xorwow_state* state, int thread_id, int pass)
{
state->a = hash((thread_id^0x7123bbcc)+(pass^0x0baabfcb));
state->b = hash((thread_id^0xfabbcddc)+7+(pass^0xa30fb67a));
state->c = hash((thread_id^0x0078ddcc)-23+(pass^0xffaabccb));
state->d = hash((thread_id^0x78633ff0)+1001+(pass^0x98ab47f1));
state->counter = hash((thread_id^0x0893ff87)-(pass^0x19d86b2d));
// state->a = hash(thread_id+pass);
// state->b = hash(thread_id+7+pass);
// state->c = hash(thread_id-23+pass);
// state->d = hash(thread_id+1001+pass);
// state->counter = hash(thread_id+11+pass);
}
// From wikipedia
// https://en.wikipedia.org/wiki/Xorshift
// xorwow is used as default PRNG in CUDA Toolkit.
/* The state array must be initialized to not be all zero in the first four words */
__device__ unsigned int xorwow(xorwow_state* state)
{
/* Algorithm "xorwow" from p. 5 of Marsaglia, "Xorshift RNGs" */
unsigned int t = state->d;
unsigned int s = state->a;
state->d = state->c;
state->c = state->b;
state->b = s;
t ^= t >> 2;
t ^= t << 1;
t ^= s ^ (s << 4);
state->a = t;
state->counter += 362437;
return t + state->counter;
}
__device__ float xorwow_f(xorwow_state* state)
{
return float(xorwow(state)&0x7fffffff)/float(0x80000000);
}
// generate random points on a hemisphere
__device__ void xorwow_hs(xorwow_state* state,float* result)
{
float l;
do{
result[0] = (xorwow_f(state)-0.5f)*2;
result[1] = (xorwow_f(state)-0.5f)*2;
result[2] = (xorwow_f(state)-0.5f)*2;
l=lensq3(result);
}while(l>1.0f||l<0.0001f); // reject sampling
result[2] = fabsf(result[2]);
norm3(result,result);
}
__device__ void xorwow_hsn(xorwow_state* state,float* normal,float* result)
{
// 3D coordinate axis
float u[3] = {0.0f,0.0f,1.0f};
float v[3];
float t[3];
cross3(t,normal,u);
if(lensq3(t)<0.001f) { u[0]=0.0f;u[1]=1.0f;u[2]=0.0f;}
cross3(v,normal,u);
norm3(v,v);
cross3(u,v,normal);
norm3(u,u);
// sample from hemisphere
xorwow_hs(state,t);
// transform t to new coordinate
result[0] = u[0]*t[0]+v[0]*t[1]+normal[0]*t[2];
result[1] = u[1]*t[0]+v[1]*t[1]+normal[1]*t[2];
result[2] = u[2]*t[0]+v[2]*t[1]+normal[2]*t[2];
}
// ray-box intersect
__device__ int rbi(
float *oxyz, float *dxyz, float *dinvxyz,
int n, float *t0, float *t1)
{
*t0 = 0, *t1 = 1e20f;
float near_t, far_t;
near_t = -oxyz[0] * dinvxyz[0];
far_t = (n - oxyz[0]) * dinvxyz[0];
if (near_t > far_t){float t = near_t;near_t = far_t;far_t = t;}
*t0 = *t0 < near_t ? near_t : *t0;
*t1 = *t1 > far_t ? far_t : *t1;
if (*t0 > *t1) return 0;
near_t = -oxyz[1] * dinvxyz[1];
far_t = (n - oxyz[1]) * dinvxyz[1];
if (near_t > far_t){float t = near_t;near_t = far_t;far_t = t;}
*t0 = *t0 < near_t ? near_t : *t0;
*t1 = *t1 > far_t ? far_t : *t1;
if (*t0 > *t1) return 0;
near_t = -oxyz[2] * dinvxyz[2];
far_t = (n - oxyz[2]) * dinvxyz[2];
if (near_t > far_t){float t = near_t;near_t = far_t;far_t = t;}
*t0 = *t0 < near_t ? near_t : *t0;
*t1 = *t1 > far_t ? far_t : *t1;
if (*t0 > *t1) return 0;
return 1;
}
__device__ void minxyz(
float *x, float *y, float *z,
float *f, int *idx)
{
*f = *x; *idx = 0;
if (*f > *y){*f = *y;*idx = 1;}
if (*f > *z){*f = *z;*idx = 2;}
}
__device__ void maxxyz(
float *x, float *y, float *z,
float *f, int *idx)
{
*f = *x;*idx = 0;
if (*f < *y){*f = *y;*idx = 1;}
if (*f < *z){*f = *z;*idx = 2;}
}
__device__ void dda_trav_init(dda_trav *trav)
{
norm3(trav->dxyz, trav->dxyz);
trav->invdxyz[0] = 1.0f / trav->dxyz[0];
trav->invdxyz[1] = 1.0f / trav->dxyz[1];
trav->invdxyz[2] = 1.0f / trav->dxyz[2];
trav->signxyz[0] = trav->dxyz[0] > 0 ? 1 : -1;
trav->signxyz[1] = trav->dxyz[1] > 0 ? 1 : -1;
trav->signxyz[2] = trav->dxyz[2] > 0 ? 1 : -1;
}
__device__ int dda_trav_begin(int n, dda_trav *trav)
{
float t0, t1;
if (!rbi(trav->oxyz, trav->dxyz, trav->invdxyz, n, &t0, &t1)) return 0;
trav->pxyz[0] = trav->oxyz[0] + t0 * trav->dxyz[0];
trav->pxyz[1] = trav->oxyz[1] + t0 * trav->dxyz[1];
trav->pxyz[2] = trav->oxyz[2] + t0 * trav->dxyz[2];
trav->voxijk[0] = int(trav->pxyz[0]);
trav->voxijk[1] = int(trav->pxyz[1]);
trav->voxijk[2] = int(trav->pxyz[2]);
if (trav->voxijk[0]<0) trav->voxijk[0] = 0;
if (trav->voxijk[0]>n-1) trav->voxijk[0] = n-1;
if (trav->voxijk[1]<0) trav->voxijk[1] = 0;
if (trav->voxijk[1]>n-1) trav->voxijk[1] = n-1;
if (trav->voxijk[2]<0) trav->voxijk[2] = 0;
if (trav->voxijk[2]>n-1) trav->voxijk[2] = n-1;
return 1;
}
__device__ int dda_trav_next(int n, dda_trav *trav)
{
float voxcx = trav->voxijk[0] + 0.5f;
float voxcy = trav->voxijk[1] + 0.5f;
float voxcz = trav->voxijk[2] + 0.5f;
float tx = fabsf((voxcx + 0.5f * trav->signxyz[0] - trav->pxyz[0]) * trav->invdxyz[0]);
float ty = fabsf((voxcy + 0.5f * trav->signxyz[1] - trav->pxyz[1]) * trav->invdxyz[1]);
float tz = fabsf((voxcz + 0.5f * trav->signxyz[2] - trav->pxyz[2]) * trav->invdxyz[2]);
float tmin;
int id;
minxyz(&tx, &ty, &tz, &tmin, &id);
trav->voxijk[id] += trav->signxyz[id];
if (trav->voxijk[0] < 0 || trav->voxijk[0] > n-1) return 0;
if (trav->voxijk[1] < 0 || trav->voxijk[1] > n-1) return 0;
if (trav->voxijk[2] < 0 || trav->voxijk[2] > n-1) return 0;
trav->pxyz[0] += tmin * trav->dxyz[0];
trav->pxyz[1] += tmin * trav->dxyz[1];
trav->pxyz[2] += tmin * trav->dxyz[2];
return 1;
}
__device__ void emit_ray(
float* camera_position, float* camera_heading, float* camera_up,
// float camera_depth, float canvas_width, float canvas_height,
float fov, float px, float py, int w, int h,
float* ray_origin, float* ray_direction)
{
float canvas_width = 1.0f;
float canvas_height = float(h)/float(w);
float theta = fov/360.0f*3.1415926f;
float camera_depth = canvas_width*0.5f / tan(theta);
//
ray_origin[0] = camera_position[0];
ray_origin[1] = camera_position[1];
ray_origin[2] = camera_position[2];
float canvas_center[3];
canvas_center[0] = camera_position[0] + camera_heading[0]*camera_depth;
canvas_center[1] = camera_position[1] + camera_heading[1]*camera_depth;
canvas_center[2] = camera_position[2] + camera_heading[2]*camera_depth;
float camera_right[3];
cross3(camera_right,camera_heading,camera_up);
float dx = (px/w-0.5f)*canvas_width;
float dy = (0.5f-py/h)*canvas_height;
float target[3];
target[0] = canvas_center[0] + camera_right[0]*dx + camera_up[0]*dy;
target[1] = canvas_center[1] + camera_right[1]*dx + camera_up[1]*dy;
target[2] = canvas_center[2] + camera_right[2]*dx + camera_up[2]*dy;
ray_direction[0] = target[0] - ray_origin[0];
ray_direction[1] = target[1] - ray_origin[1];
ray_direction[2] = target[2] - ray_origin[2];
norm3(ray_direction,ray_direction);
}
__device__ void regularize_camera_vectors(float* front,float* up)
{
float right[3];
norm3(front,front);
cross3(right,front,up);
norm3(right,right);
cross3(up,right,front);
norm3(up,up);
}
__device__ void calculate_normal(int* signxyz,float* pxyz,int* voxijk,float* normal)
{
float voxel_center[3];
voxel_center[0] = float(voxijk[0])+0.5f;
voxel_center[1] = float(voxijk[1])+0.5f;
voxel_center[2] = float(voxijk[2])+0.5f;
float delta[3];
delta[0] = fabsf(pxyz[0]-voxel_center[0]);
delta[1] = fabsf(pxyz[1]-voxel_center[1]);
delta[2] = fabsf(pxyz[2]-voxel_center[2]);
int axis = argmax3(delta);
normal[0] = 0.0f;
normal[1] = 0.0f;
normal[2] = 0.0f;
normal[axis] = -signxyz[axis];
}
__device__ int trace(int n, dda_trav* trav, vox_decl* voxdata,xorwow_state* state)
{
if(dda_trav_begin(n,trav)==0) return 0;
int voxid = trav->voxijk[2]*n*n+trav->voxijk[1]*n+trav->voxijk[0];
trav->vd = voxdata[voxid];
// trav->vd.x resembles alpha value
if((xorwow(state)%255)<trav->vd.x){
calculate_normal(
trav->signxyz,
trav->pxyz,
trav->voxijk,
trav->nxyz);
return 1;
}
while(dda_trav_next(n,trav)){
int voxid = trav->voxijk[2]*n*n+trav->voxijk[1]*n+trav->voxijk[0];
trav->vd = voxdata[voxid];
if((xorwow(state)%255)<trav->vd.x){
calculate_normal(
trav->signxyz,
trav->pxyz,
trav->voxijk,
trav->nxyz);
return 1;
}
}
return 0;
}
__global__ void render(
int pass,
int w,int h, // render buffer size
float* rcomp, float* gcomp, float* bcomp, // target render buffer
int n, vox_decl* voxdata, // voxel data
float* position, float* heading, float* up, // camera settings
float fov) // field of view
{
int ipx = blockIdx.x*blockDim.x + threadIdx.x;
int ipy = blockIdx.y*blockDim.y + threadIdx.y;
float px = float(ipx);
float py = float(ipy);
if (px>=w||py>=h) return;
// global thread_id, see richiesams blogspot
int thread_id = (blockIdx.x+blockIdx.y*gridDim.x)*(blockDim.x*blockDim.y)
+(threadIdx.y*blockDim.x)+threadIdx.x;
// initializing random seed, we use "xorwow" algorithm.
xorwow_state state;
xorwow_init(&state,thread_id,pass);
// jitter pixel location for anti-aliasing
px = px+xorwow_f(&state)-0.5f;
py = py+xorwow_f(&state)-0.5f;
// copy camera configurations
float camera_position[3] = {position[0],position[1],position[2]};
float camera_heading[3] = {heading[0],heading[1],heading[2]};
float camera_up[3] = {up[0],up[1],up[2]};
regularize_camera_vectors(camera_heading,camera_up);
// emit initial ray from camera
dda_trav trav;
emit_ray(camera_position, camera_heading, camera_up,
fov, px, py, w, h, trav.oxyz, trav.dxyz);
// initialize traversal structure
dda_trav_init(&trav);
// initialize rendering context
float energy[3]={1.0f,1.0f,1.0f};
int max_depth = 12;
int hit_anything = 0;
// start raytrace
for (int depth=0; depth<max_depth;depth++){
int trace_result = trace(n,&trav,voxdata,&state);
if(trace_result==0){
if(hit_anything==0) {energy[0]=0;energy[1]=0;energy[2]=0;}
else if(trav.dxyz[2]<0) {energy[0]=0;energy[1]=0;energy[2]=0;}
else {/*...*/}
break;
}
else{
hit_anything=1;
energy[0]=energy[0]*trav.vd.y/255.0f;
energy[1]=energy[1]*trav.vd.z/255.0f;
energy[2]=energy[2]*trav.vd.w/255.0f;
// slightly move intersection point outward to avoid self-intersection
trav.pxyz[0] = trav.pxyz[0]+trav.nxyz[0]*0.001f;
trav.pxyz[1] = trav.pxyz[1]+trav.nxyz[1]*0.001f;
trav.pxyz[2] = trav.pxyz[2]+trav.nxyz[2]*0.001f;
// fill in info to prepare for next raytrace
trav.oxyz[0] = trav.pxyz[0];trav.oxyz[1] = trav.pxyz[1];trav.oxyz[2] = trav.pxyz[2];
xorwow_hsn(&state,trav.nxyz,trav.dxyz);
dda_trav_init(&trav);
}
}
rcomp[ipy*w+ipx]+=energy[0];
gcomp[ipy*w+ipx]+=energy[1];
bcomp[ipy*w+ipx]+=energy[2];
}
} |
6,904 | #include "includes.h"
__global__ void add(int *a, int *b, int *c)
{
extern __shared__ int shared_mem[];
int * shmem=shared_mem;
shmem[threadIdx.x]=threadIdx.x;
a[threadIdx.x]=shmem[threadIdx.x];
b[threadIdx.x]=shmem[threadIdx.x];
c[threadIdx.x]=a[threadIdx.x]+b[threadIdx.x];
} |
6,905 | #define BLOCK_SIZE 32
#define PADDING 2
__global__ void gaborFilterKernel(float* o_arr1d, float* i_arr1d, int rows, int cols, float* filter1d, int radius) {
__shared__ float tile[BLOCK_SIZE+PADDING*2][BLOCK_SIZE+PADDING*2];
__shared__ float filter_buf[2*PADDING+1][2*PADDING+1];
int tx = threadIdx.x;
int ty = threadIdx.y;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
tile[ty+PADDING][tx+PADDING] = i_arr1d[yidx * cols + xidx];
if(tx < 2*PADDING+1 && ty < 2*PADDING+1)
filter_buf[ty][tx] = filter1d[ty * (2*PADDING+1) + tx];
/*if(tx < PADDING)
tile[ty+PADDING][tx] = i_arr1d[yidx * cols + (xidx - tx)];
if(tx >= BLOCK_SIZE - PADDING)
tile[ty+PADDING][tx+PADDING*2] = i_arr1d[yidx * cols + (xidx - tx + BLOCK_SIZE - 1)];
if(ty < PADDING)
tile[ty][tx+PADDING] = i_arr1d[(yidx - ty) * cols + xidx];
if(ty >= BLOCK_SIZE - PADDING)
tile[ty+PADDING*2][tx+PADDING] = i_arr1d[(yidx - ty + BLOCK_SIZE - 1) * cols + xidx];
if(tx < PADDING && ty < PADDING)
tile[ty][tx] = i_arr1d[(yidx - ty) * cols + xidx - tx];
if(tx >= BLOCK_SIZE - PADDING && ty < PADDING)
tile[ty][tx+PADDING*2] = i_arr1d[(yidx - ty) * cols + (xidx - tx + BLOCK_SIZE - 1)];
if(tx < PADDING && ty >= BLOCK_SIZE - PADDING)
tile[ty+PADDING*2][tx] = i_arr1d[(yidx - ty + BLOCK_SIZE - 1) * cols + xidx - tx];
if(tx >= BLOCK_SIZE - PADDING && ty >= BLOCK_SIZE - PADDING)
tile[ty+PADDING*2][tx+PADDING*2] = i_arr1d[(yidx - ty + BLOCK_SIZE - 1) * cols + xidx - tx + BLOCK_SIZE - 1];*/
__syncthreads();
float Io = 0.0f;
float accumWeight = 0.0f;
for(int dy = -radius; dy <= radius; dy++) {
for(int dx = -radius; dx <= radius; dx++) {
float weight = filter_buf[dy + radius][dx + radius];
//float weight = filter1d[(dy + radius) * radius + (dx + radius)];
float I = tile[ty + dy + PADDING][tx + dx + PADDING];
Io += I * weight;
accumWeight += weight;
}
}
// normalize weight
Io /= accumWeight;
o_arr1d[yidx * cols + xidx] = Io;
//o_arr1d[yidx * cols + xidx] = tile[ty+PADDING][tx+PADDING];
}
|
6,906 |
#include <stdio.h>
#include <math.h>
#include <float.h>
__device__ int sign(float val) {
return ((0.0 < val) ? 1 : -1 );
}
__global__ void FastMarchInit(float* phi, int pitch, int* Accept, int Apitch, float* Fext, int Nx, int Ny, float dx, float dy, float Xmin, float Ymin)
//float* XXo, float* YYo) // testing that the grid coord calculated right
{
// This will initialize the Velocity field near the interface
// ASSUMES THAT THE SIGNED DISTANCE FUNCTION DOES NOT NEED REINITIALIZATION
// Phi assumed accurate throughout domain. Only calculating Velocity extension Fext
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = index_x * pitch + index_y;
int idxA = index_x * Apitch + index_y;
// int row = idx/pitch;
// int col = idx%pitch;
int row = idxA/Apitch;
int col = idxA%Apitch;
float dd = sqrt(dx*dx/4+dy*dy/4); //to check if interface is nearby
int Aval = 0;
float Fval = 0;
if (col<Ny-1 && row<Nx-1 && col>1 && row>1){
if (abs(phi[idx])<dd){
Aval = 1;
Fval = Xmin + row * dx;
//Accept[idxA] = 1;// mark as on interface
//Fext[idx] = Xmin + row * dx; // NOTE TO DO LATER: build more flexible function than F(x,y)=x
}
else if ( abs(phi[idx - pitch])<dd || abs(phi[idx + pitch])<dd || abs(phi[idx - 1])<dd || abs(phi[idx + 1])<dd ){
Aval = -1;
Fval = Xmin + row * dx;
//Accept[idxA] = -11; // mark as tentative because it is a neigbor of interface
//Fext[idx] = Xmin + row * dx;
//printf("\nWHY!!!??? \t (r,c)=%d,%d\t , %d, %d, \t F=%3.3f, A=%d \n", row,col, idxA, idx, Fval, Aval );
}
Accept[idxA] = Aval;
Fext[idx] = Fval;
//if (Aval>2){
// printf("\nWHY!!!??? \t (r,c)=%d,%d\t , %d, %d, \t F=%3.3f, A=%d \n", row,col, idxA, idx, Fval, Aval );
//}
}
}
__global__ void FastMarchVelocity(int count, float* phi, int pitch, int* Accept, int Apitch, float* Fext, int* AcceptNew, float* FextNew, int Nx, int Ny, float dx, float dy)
{
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = index_x * pitch + index_y;
int idxA = index_x * Apitch + index_y;
int row = idx/pitch;
int col = idx%pitch;
float Dxp, Dxm, Dyp, Dym;
float Dx, Dy, FDx, FDy;
// put into local memory from global {center, top, right, bottom, left}
int A[5] = { Accept[idxA], Accept[idxA - Apitch], Accept[idxA + 1], Accept[idxA + Apitch], Accept[idxA - 1] } ;
float F[5] = { Fext[idx], Fext[idx-pitch], Fext[idx+1], Fext[idx+pitch], Fext[idx-1] } ;
float P[5] = { phi[idx], phi[idx-pitch], phi[idx+1], phi[idx+pitch], phi[idx-1] } ;
//Dxm = (row==0) ? 0 : -F[1] * abs(A[1]) * (P[0] - P[1])/dx ;
//Dxp = (row==Nx-1) ? 0 : F[3] * abs(A[3]) * (P[3] - P[0])/dx ;
//Dym = (col==0) ? 0 : -F[4] * abs(A[4]) * (P[0] - P[4])/dy ;
//Dyp = (col==Ny-1) ? 0 : F[2] * abs(A[2]) * (P[2] - P[0])/dy ;
Dxm = (row==0) ? 0 : abs(A[1]) * (P[0] - P[1])/dx ;
Dxp = (row==Nx-1) ? 0 : abs(A[3]) * (P[3] - P[0])/dx ;
Dym = (col==0) ? 0 : abs(A[4]) * (P[0] - P[4])/dy ;
Dyp = (col==Ny-1) ? 0 : abs(A[2]) * (P[2] - P[0])/dy ;
int tentative = 0;
tentative += (row==0) ? 0 : abs(A[1]);
tentative += (row==Nx-1) ? 0 : abs(A[3]);
tentative += (col==0) ? 0 : abs(A[4]);
tentative += (row==Nx-1) ? 0 : abs(A[2]);
if ((-Dxp>Dxm) && (-Dxp>0)){
FDx = F[3] * (-Dxp);
Dx = -Dxp;
}else if ((Dxm>-Dxp) &&(Dxm>0)){
FDx = F[1] * (Dxm);
Dx = Dxm;
}else{
FDx = 0;
Dx = 0;
}
if ((-Dyp>Dym) && (-Dyp>0)){
FDy = F[2] * (-Dyp);
Dy = -Dyp;
}else if ((Dym>-Dyp) &&(Dym>0)){
FDy = F[4] * (Dym);
Dy = Dym;
}else{
FDy = 0;
Dy = 0;
}
//if ( row<Nx && col<Ny && A[0]!=1 && tentative>0 && (Dx+Dy)>0 ) {
if ( row<Nx && col<Ny && A[0]!=1 && tentative>0 ) {
//if ( row<Nx && col<Ny && A[0]!=1 ) {
F[0] = (FDx + FDy)/(Dx + Dy + DBL_EPSILON);
A[0] = -1;
}
//__syncthreads();
// if (row<Nx && col<Ny && tentative==0 && count>49){
// printf("\n count= %d\t, (r,c)=(%d,%d)\t A= {%d,%d,%d,%d} \n ", count, row, col, A[1], A[2], A[3], A[4] );
// }
// if (A[0]>2){
// printf("\nWHY!!!??? \t (r,c)=%d,%d\t , %d, %d, \t F=%3.3f, A=%d \n", row,col, idxA, idx, F[0], A[0] );
// }
AcceptNew[idxA] = A[0];
FextNew[idx] = F[0];
//__syncthreads();
}
|
6,907 | #include <stdio.h>
static void CUDA_ERROR( cudaError_t err)
{
if (err != cudaSuccess) {
printf("CUDA ERROR: %s, exiting\n", cudaGetErrorString(err));
exit(-1);
}
}
struct two_numbers
{
int i;
int j;
};
struct container
{
two_numbers *numbers;
two_numbers *d_numbers;
};
__global__ void increment(two_numbers *d_numbers, int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i<N)
{
d_numbers[i].i *= 2;
d_numbers[i].j *= -3;
}
}
int main()
{
int N = 10;
two_numbers *numbers = new two_numbers[N];
container theBox;
theBox.numbers = numbers;
for (int i = 0; i < N; i++)
{
theBox.numbers[i].i = i;
theBox.numbers[i].j = -i;
printf("%d %d \n", theBox.numbers[i].i, theBox.numbers[i].j);
}
two_numbers *d_numbers;
theBox.d_numbers = d_numbers;
CUDA_ERROR(cudaMalloc((void**)&(theBox.d_numbers), N*sizeof(two_numbers))); //this causes cudaMemcpy
printf("d_i mallocated\n");
// CUDA_ERROR(cudaMemcpy(d_numbers, numbers, N*sizeof(two_numbers), cudaMemcpyHostToDevice));
CUDA_ERROR(cudaMemcpy(theBox.d_numbers, theBox.numbers, N*sizeof(two_numbers), cudaMemcpyHostToDevice));
printf("d_i memcpied\n");
increment<<<1,16>>>(theBox.d_numbers, N);
CUDA_ERROR(cudaMemcpy(theBox.numbers, theBox.d_numbers, N*sizeof(two_numbers), cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
{
printf("%d %d \n", theBox.numbers[i].i, theBox.numbers[i].j);
}
CUDA_ERROR(cudaFree(theBox.d_numbers));
}
|
6,908 | // A naive matrix multiplication program without tiling
// Uses a single block
#include "stdio.h"
#include "stdlib.h"
// this is a naive code that only uses a single block
// since the GTX 480 is limited to 32*32 = 1024 threads
// per block, the max value of SIZE is 32. Anything
// that exceeds 32 will not yield correct values
#define SIZE 32
// kernel definition
__global__ void MatrixMulKernel(float * A,float * B,float * C,int len)
{
int col = threadIdx.x;
int row = threadIdx.y;
float sum=0.0, Aelement, Belement;
int i;
for (i = 0; i < len ; i++) {
Aelement = A[ row*len + i ];
Belement = B[ i*len + col ];
sum += Aelement * Belement;
}
C[ row*len + col ] = sum;
}
int main(int argc, char ** argv)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float h_A[SIZE*SIZE],h_B[SIZE*SIZE],h_C[SIZE*SIZE];
float * d_A, * d_B, * d_C;
// initialize host matrices with arbitrary data
int i;
for (i=0;i<SIZE*SIZE;i++) {
h_A[i] = (float)i;
h_B[i] = (float)SIZE * (float)SIZE - (float)i - 1.00;
h_C[i] = 0.0;
}
// allocate space on device
size_t size = SIZE*SIZE*sizeof(float);
cudaMalloc(&d_A,size);
cudaMalloc(&d_B,size);
cudaMalloc(&d_C,size);
//copy data to device
cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_C,h_C,size,cudaMemcpyHostToDevice);
dim3 blocksPerGrid(1); // 1 x 1 x 1
dim3 threadsPerBlock(SIZE,SIZE); // SIZE x SIZE x 1
cudaEventRecord(start);
// invoke the kernel here
MatrixMulKernel<<< blocksPerGrid, threadsPerBlock >>>(d_A,d_B,d_C,SIZE);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel time (ms) : %7.5f\n",milliseconds);
// copy results back to host
cudaMemcpy(h_C,d_C,size,cudaMemcpyDeviceToHost);
// output results
/*for (i=0;i<SIZE*SIZE;i++) {
printf("i: %d h_C[i]: %f\n",i,h_C[i]);
}*/
// Free up device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
} |
6,909 | #include <stdio.h>
#define N 64
#define TPB 32
__device__ float scale(int i,int n)
{
return ((float)i)/(n-1);
}
__device__ float distance(float x1,float x2)
{
//return fabs(x2-x1);
return sqrt((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out,float ref,int len) {
const int i = blockIdx.x*blockDim.x+threadIdx.x;
const float x = scale(i,len);
d_out[i]=distance(x,ref);
printf("i = %2d:dist from %f to %f is %f.\n", i,ref,x,d_out[i]);
}
int main()
{
const float ref=0.5f;
float *d_out = 0;
cudaMalloc(&d_out,N*sizeof(float));
distanceKernel<<<N/TPB,TPB>>>(d_out,ref,N);
cudaFree(d_out);
return 0;
}
|
6,910 | #include <stdio.h>
__global__ void helloFromGPU(void){
if(threadIdx.x == 5){
printf("Hello World from GPU! thread: %d \n", threadIdx.x);
}
}
int main(void){
// Hello from CPU
printf("Hello World from CPU! \n");
// helloFromGPU<<<1, 10>>>();
helloFromGPU<<<1, 10>>>();
// cudaDeviceReset();
cudaDeviceSynchronize();
return 0;
} |
6,911 | #include <iostream>
#include <stdio.h>
#include <assert.h>
#include <vector>
#include <queue>
#include <fstream>
#include <time.h>
#include <cmath>
#include <algorithm>
#include <string>
#include <set>
#include <climits>
#include <stack>
#include <sstream>
#include <chrono>
template <typename T> std::string tostr(const T& t)
{
std::ostringstream os; os<<t; return os.str();
}
#define N 5
#define N2 25
#define PDB_TABLESIZE 244140625
#define STACK_LIMIT 64 * 8
#define CORE_NUM 768
#define WARP_SIZE 32
#define BLOCK_NUM 24
#define DEBUG
using namespace std;
static void HandleError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
#define HANDLE_NULL( a ) {if (a == NULL) { \
printf( "Host memory failed in %s at line %d\n", \
__FILE__, __LINE__ ); \
exit( EXIT_FAILURE );}}
static const int dx[4] = {0, -1, 0, 1};
static const int dy[4] = {1, 0, -1, 0};
// static const char dir[4] = {'r', 'u', 'l', 'd'};
static const int order[4] = {1, 0, 2, 3};
static __device__ __constant__ const int dev_rf[] = {0,5,10,15,20,1,6,11,16,21,2,7,12,17,22,3,8,13,18,23,4,9,14,19,24};
static __device__ __constant__ const int dev_rot90[] = {20,15,10,5,0,21,16,11,6,1,22,17,12,7,2,23,18,13,8,3,24,19,14,9,4};
static __device__ __constant__ const int dev_rot90rf[] = {20,21,22,23,24,15,16,17,18,19,10,11,12,13,14,5,6,7,8,9,0,1,2,3,4};
static __device__ __constant__ const int dev_rot180[] = {24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
static __device__ __constant__ const int dev_rot180rf[] = {24,19,14,9,4,23,18,13,8,3,22,17,12,7,2,21,16,11,6,1,20,15,10,5,0};
static const int rf[] = {0,5,10,15,20,1,6,11,16,21,2,7,12,17,22,3,8,13,18,23,4,9,14,19,24};
static const int rot90[] = {20,15,10,5,0,21,16,11,6,1,22,17,12,7,2,23,18,13,8,3,24,19,14,9,4};
static const int rot90rf[] = {20,21,22,23,24,15,16,17,18,19,10,11,12,13,14,5,6,7,8,9,0,1,2,3,4};
static const int rot180[] = {24,23,22,21,20,19,18,17,16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
static const int rot180rf[] = {24,19,14,9,4,23,18,13,8,3,22,17,12,7,2,21,16,11,6,1,20,15,10,5,0};
__device__ unsigned char dev_h0[PDB_TABLESIZE];
__device__ unsigned char dev_h1[PDB_TABLESIZE];
unsigned char h0[PDB_TABLESIZE];
unsigned char h1[PDB_TABLESIZE];
struct Node
{
int puzzle[N2];
int inv_puzzle[N2];
int space;
// int md;
int h;
int depth;
int pre;
bool operator < (const Node& n) const {
return depth + h < n.depth + n.h;
}
bool operator > (const Node& n) const {
return depth + h > n.depth + n.h;
}
};
template<class T, int NUM>
class local_stack
{
private:
T buf[NUM];
int tos;
public:
__device__ local_stack() :
tos(-1)
{
}
__device__ T const & top() const
{
return buf[tos];
}
__device__ T & top()
{
return buf[tos];
}
__device__ void push(T const & v)
{
buf[++tos] = v;
}
__device__ T pop()
{
return buf[tos--];
}
__device__ bool full()
{
return tos == (NUM - 1);
}
__device__ bool empty()
{
return tos == -1;
}
};
Node s_node;
int ans;
priority_queue<Node, vector<Node>, greater<Node> > pq;
class PatternDataBase
{
private:
// unsigned char h0[PDB_TABLESIZE];
// unsigned char h1[PDB_TABLESIZE];
/* the position of each tile in order, reflected about the main diagonal */
public:
PatternDataBase();
void init();
void input_h0(const char *filename);
void input_h1(const char *filename);
unsigned int hash0(const int *inv);
unsigned int hash1(const int *inv);
unsigned int hash2(const int *inv);
unsigned int hash3(const int *inv);
unsigned int hashref0(const int *inv);
unsigned int hashref1(const int *inv);
unsigned int hashref2(const int *inv);
unsigned int hashref3(const int *inv);
unsigned int get_hash_value(const int *inv);
// unsigned char get_h0_value(int i);
// unsigned char get_h1_value(int i);
};
PatternDataBase::PatternDataBase() {}
void PatternDataBase::init() {
const char *c0 = "../pdb/pat24.1256712.tab";
const char *c1 = "../pdb/pat24.34891314.tab";
cout << "pattern 1 2 5 6 7 12 read in" << endl;
input_h0(c0);
cout << "pattern 3 4 8 9 13 14 read in" << endl;
input_h1(c1);
}
void PatternDataBase::input_h0(const char *filename) {
FILE *infile;
infile = fopen(filename, "rb");
int index;
int s[6];
for (s[0] = 0; s[0] < N2; s[0]++) {
for (s[1] = 0; s[1] < N2; s[1]++) {
if (s[1] == s[0]) continue;
for (s[2] = 0; s[2] < N2; s[2]++) {
if (s[2] == s[0] || s[2] == s[1]) continue;
for (s[3] = 0; s[3] < N2; s[3]++) {
if (s[3] == s[0] || s[3] == s[1] || s[3] == s[2]) continue;
for (s[4] = 0; s[4] < N2; s[4]++) {
if (s[4] == s[0] || s[4] == s[1] || s[4] == s[2] || s[4] == s[3]) continue;
for (s[5] = 0; s[5] < N2; s[5]++) {
if (s[5] == s[0] || s[5] == s[1] || s[5] == s[2] || s[5] == s[3] || s[5] == s[4]) continue;
index = ((((s[0]*25+s[1])*25+s[2])*25+s[3])*25+s[4])*25+s[5];
h0[index] = getc(infile);
}
}
}
}
}
}
fclose(infile);
}
void PatternDataBase::input_h1(const char *filename) {
FILE *infile;
infile = fopen(filename, "rb");
int index;
int s[6];
for (s[0] = 0; s[0] < N2; s[0]++) {
for (s[1] = 0; s[1] < N2; s[1]++) {
if (s[1] == s[0]) continue;
for (s[2] = 0; s[2] < N2; s[2]++) {
if (s[2] == s[0] || s[2] == s[1]) continue;
for (s[3] = 0; s[3] < N2; s[3]++) {
if (s[3] == s[0] || s[3] == s[1] || s[3] == s[2]) continue;
for (s[4] = 0; s[4] < N2; s[4]++) {
if (s[4] == s[0] || s[4] == s[1] || s[4] == s[2] || s[4] == s[3]) continue;
for (s[5] = 0; s[5] < N2; s[5]++) {
if (s[5] == s[0] || s[5] == s[1] || s[5] == s[2] || s[5] == s[3] || s[5] == s[4]) continue;
index = ((((s[0]*25+s[1])*25+s[2])*25+s[3])*25+s[4])*25+s[5];
h1[index] = getc(infile);
}
}
}
}
}
}
fclose(infile);
}
unsigned int PatternDataBase::hash0(const int *inv) {
int hashval;
hashval = ((((inv[1]*N2+inv[2])*N2+inv[5])*N2+inv[6])*N2+inv[7])*N2+inv[12];
return h0[hashval];
}
unsigned int PatternDataBase::hash1(const int *inv) {
int hashval;
hashval = ((((inv[3]*N2+inv[4])*N2+inv[8])*N2+inv[9])*N2+inv[13])*N2+inv[14];
return (h1[hashval]);
}
unsigned int PatternDataBase::hash2(const int *inv) {
int hashval;
hashval = ((((rot180[inv[21]] * N2
+ rot180[inv[20]]) * N2
+ rot180[inv[16]]) * N2
+ rot180[inv[15]]) * N2
+ rot180[inv[11]]) * N2
+ rot180[inv[10]];
return (h1[hashval]);
}
unsigned int PatternDataBase::hash3(const int *inv) {
int hashval;
hashval = ((((rot90[inv[19]] * N2
+ rot90[inv[24]]) * N2
+ rot90[inv[18]]) * N2
+ rot90[inv[23]]) * N2
+ rot90[inv[17]]) * N2
+ rot90[inv[22]];
return (h1[hashval]);
}
unsigned int PatternDataBase::hashref0(const int *inv) {
int hashval;
hashval = (((((rf[inv[5]] * N2
+ rf[inv[10]]) * N2
+ rf[inv[1]]) * N2
+ rf[inv[6]]) * N2
+ rf[inv[11]]) * N2
+ rf[inv[12]]);
return (h0[hashval]);
}
unsigned int PatternDataBase::hashref1(const int *inv) {
int hashval;
hashval = (((((rf[inv[15]] * N2
+ rf[inv[20]]) * N2
+ rf[inv[16]]) * N2
+ rf[inv[21]]) * N2
+ rf[inv[17]]) * N2
+ rf[inv[22]]);
return (h1[hashval]);
}
unsigned int PatternDataBase::hashref2(const int *inv) {
int hashval;
hashval = (((((rot180rf[inv[9]] * N2
+ rot180rf[inv[4]]) * N2
+ rot180rf[inv[8]]) * N2
+ rot180rf[inv[3]]) * N2
+ rot180rf[inv[7]]) * N2
+ rot180rf[inv[2]]);
return (h1[hashval]);
}
unsigned int PatternDataBase::hashref3(const int *inv) {
int hashval;
hashval = (((((rot90rf[inv[23]] * N2
+ rot90rf[inv[24]]) * N2
+ rot90rf[inv[18]]) * N2
+ rot90rf[inv[19]]) * N2
+ rot90rf[inv[13]]) * N2
+ rot90rf[inv[14]]);
return (h1[hashval]);
}
unsigned int PatternDataBase::get_hash_value(const int *inv) {
return max( hash0(inv) + hash1(inv) + hash2(inv) + hash3(inv),
hashref0(inv) + hashref1(inv) + hashref2(inv) + hashref3(inv) );
}
// unsigned char PatternDataBase::get_h0_value(int i) {
// return h0[i];
// }
// unsigned char PatternDataBase::get_h1_value(int i) {
// return h1[i];
// }
PatternDataBase pd;
class local_pdb
{
// private:
// unsigned char h0[PDB_TABLESIZE];
// unsigned char h1[PDB_TABLESIZE];
public:
local_pdb();
__device__ unsigned int hash0(const int *inv);
__device__ unsigned int hash1(const int *inv);
__device__ unsigned int hash2(const int *inv);
__device__ unsigned int hash3(const int *inv);
__device__ unsigned int hashref0(const int *inv);
__device__ unsigned int hashref1(const int *inv);
__device__ unsigned int hashref2(const int *inv);
__device__ unsigned int hashref3(const int *inv);
__device__ unsigned int get_hash_value(const int *inv);
};
local_pdb::local_pdb() {
// HANDLE_ERROR(cudaMemcpy(dev_h0, h0, PDB_TABLESIZE * sizeof(unsigned char), cudaMemcpyHostToDevice) );
// HANDLE_ERROR(cudaMemcpy(dev_h1, h1, PDB_TABLESIZE * sizeof(unsigned char), cudaMemcpyHostToDevice) );
}
__device__ unsigned int local_pdb::hash0(const int *inv) {
int hashval;
hashval = ((((inv[1]*N2+inv[2])*N2+inv[5])*N2+inv[6])*N2+inv[7])*N2+inv[12];
return dev_h0[hashval];
}
__device__ unsigned int local_pdb::hash1(const int *inv) {
int hashval;
hashval = ((((inv[3]*N2+inv[4])*N2+inv[8])*N2+inv[9])*N2+inv[13])*N2+inv[14];
return (dev_h1[hashval]);
}
__device__ unsigned int local_pdb::hash2(const int *inv) {
int hashval;
hashval = ((((dev_rot180[inv[21]] * N2
+ dev_rot180[inv[20]]) * N2
+ dev_rot180[inv[16]]) * N2
+ dev_rot180[inv[15]]) * N2
+ dev_rot180[inv[11]]) * N2
+ dev_rot180[inv[10]];
return (dev_h1[hashval]);
}
__device__ unsigned int local_pdb::hash3(const int *inv) {
int hashval;
hashval = ((((dev_rot90[inv[19]] * N2
+ dev_rot90[inv[24]]) * N2
+ dev_rot90[inv[18]]) * N2
+ dev_rot90[inv[23]]) * N2
+ dev_rot90[inv[17]]) * N2
+ dev_rot90[inv[22]];
return (dev_h1[hashval]);
}
__device__ unsigned int local_pdb::hashref0(const int *inv) {
int hashval;
hashval = (((((dev_rf[inv[5]] * N2
+ dev_rf[inv[10]]) * N2
+ dev_rf[inv[1]]) * N2
+ dev_rf[inv[6]]) * N2
+ dev_rf[inv[11]]) * N2
+ dev_rf[inv[12]]);
return (dev_h0[hashval]);
}
__device__ unsigned int local_pdb::hashref1(const int *inv) {
int hashval;
hashval = (((((dev_rf[inv[15]] * N2
+ dev_rf[inv[20]]) * N2
+ dev_rf[inv[16]]) * N2
+ dev_rf[inv[21]]) * N2
+ dev_rf[inv[17]]) * N2
+ dev_rf[inv[22]]);
return (dev_h1[hashval]);
}
__device__ unsigned int local_pdb::hashref2(const int *inv) {
int hashval;
hashval = (((((dev_rot180rf[inv[9]] * N2
+ dev_rot180rf[inv[4]]) * N2
+ dev_rot180rf[inv[8]]) * N2
+ dev_rot180rf[inv[3]]) * N2
+ dev_rot180rf[inv[7]]) * N2
+ dev_rot180rf[inv[2]]);
return (dev_h1[hashval]);
}
__device__ unsigned int local_pdb::hashref3(const int *inv) {
int hashval;
hashval = (((((dev_rot90rf[inv[23]] * N2
+ dev_rot90rf[inv[24]]) * N2
+ dev_rot90rf[inv[18]]) * N2
+ dev_rot90rf[inv[19]]) * N2
+ dev_rot90rf[inv[13]]) * N2
+ dev_rot90rf[inv[14]]);
return (dev_h1[hashval]);
}
__device__ unsigned int local_pdb::get_hash_value(const int *inv) {
return max( hash0(inv) + hash1(inv) + hash2(inv) + hash3(inv),
hashref0(inv) + hashref1(inv) + hashref2(inv) + hashref3(inv) );
}
local_pdb *dev_pd;
void input_table(char *input_file) {
s_node = Node();
fstream ifs(input_file);
for (int i = 0; i < N2; ++i)
{
int tmp;
// scanf("%d", &tmp);
ifs >> tmp;
// cin >> tmp;
if(tmp == 0) {
s_node.space = i;
}
s_node.puzzle[i] = tmp;
s_node.inv_puzzle[tmp] = i;
}
s_node.h = pd.get_hash_value(s_node.inv_puzzle);
s_node.depth = 0;
s_node.pre = -10;
}
bool dfs(int limit, Node s_n) {
stack<Node> st;
st.push(s_n);
while(!st.empty()) {
Node cur_n = st.top();
st.pop();
if(cur_n.h == 0 ) {
ans = cur_n.depth;
return true;
}
int s_x = cur_n.space / N;
int s_y = cur_n.space % N;
for (int operator_order = 0; operator_order < 4; ++operator_order)
{
int i = order[operator_order];
Node next_n = cur_n;
int new_x = s_x + dx[i];
int new_y = s_y + dy[i];
if(new_x < 0 || new_y < 0 || new_x >= N || new_y >= N) continue;
if(max(cur_n.pre, i) - min(cur_n.pre, i) == 2) continue;
swap(next_n.puzzle[new_x * N + new_y], next_n.puzzle[s_x * N + s_y]);
swap(next_n.inv_puzzle[new_x * N + new_y], next_n.inv_puzzle[s_x * N + s_y]);
next_n.space = new_x * N + new_y;
next_n.h = pd.get_hash_value(cur_n.inv_puzzle);
// assert(get_md_sum(new_n.puzzle) == new_n.h);
// return dfs(new_n, depth+1, i);
next_n.depth++;
if(cur_n.depth + cur_n.h > limit) continue;
next_n.pre = i;
st.push(next_n);
if(next_n.h == 0) {
ans = next_n.depth;
return true;
}
}
}
return false;
}
bool create_root_set() {
pq.push(s_node);
while(pq.size() < CORE_NUM) {
Node cur_n = pq.top();
pq.pop();
if(cur_n.h == 0 ) {
ans = cur_n.depth;
return true;
}
int s_x = cur_n.space / N;
int s_y = cur_n.space % N;
for (int operator_order = 0; operator_order < 4; ++operator_order)
{
int i = order[operator_order];
Node next_n = cur_n;
int new_x = s_x + dx[i];
int new_y = s_y + dy[i];
if(new_x < 0 || new_y < 0 || new_x >= N || new_y >= N) continue;
if(max(cur_n.pre, i) - min(cur_n.pre, i) == 2) continue;
swap(next_n.inv_puzzle[next_n.puzzle[new_x * N + new_y]], next_n.inv_puzzle[next_n.puzzle[s_x * N + s_y]]);
swap(next_n.puzzle[new_x * N + new_y], next_n.puzzle[s_x * N + s_y]);
// swap(next_n.puzzle[new_x * N + new_y], next_n.puzzle[s_x * N + s_y]);
// swap(next_n.inv_puzzle[new_x * N + new_y], next_n.inv_puzzle[s_x * N + s_y]);
next_n.space = new_x * N + new_y;
next_n.h = pd.get_hash_value(next_n.inv_puzzle);
next_n.depth++;
next_n.pre = i;
if(next_n.h == 0) {
ans = next_n.depth;
return true;
}
pq.push(next_n);
if(pq.size() >= CORE_NUM){
return false;
}
}
}
return false;
}
__global__ void dfs_kernel(int limit, Node *root_set, int *dev_flag, local_pdb *dev_pdb) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
local_stack<Node, STACK_LIMIT> st;
st.push(root_set[idx]);
int order[4] = {1, 0, 2, 3};
int dx[4] = {0, -1, 0, 1};
int dy[4] = {1, 0, -1, 0};
while(!st.empty()) {
Node cur_n = st.top();
st.pop();
if(cur_n.h == 0 ) {
// ans = cur_n.depth;
*dev_flag = cur_n.depth;
return;
}
if(cur_n.depth + cur_n.h > limit) continue;
int s_x = cur_n.space / N;
int s_y = cur_n.space % N;
for (int operator_order = 0; operator_order < 4; ++operator_order)
{
int i = order[operator_order];
Node next_n = cur_n;
int new_x = s_x + dx[i];
int new_y = s_y + dy[i];
if(new_x < 0 || new_y < 0 || new_x >= N || new_y >= N) continue;
if(max(cur_n.pre, i) - min(cur_n.pre, i) == 2) continue;
int a = next_n.inv_puzzle[next_n.puzzle[new_x * N + new_y]];
int b = next_n.inv_puzzle[next_n.puzzle[s_x * N + s_y]];
next_n.inv_puzzle[next_n.puzzle[new_x * N + new_y]] = b;
next_n.inv_puzzle[next_n.puzzle[s_x * N + s_y]] = a;
int c = next_n.puzzle[new_x * N + new_y];
int d = next_n.puzzle[s_x * N + s_y];
next_n.puzzle[new_x * N + new_y] = d;
next_n.puzzle[s_x * N + s_y] = c;
next_n.space = new_x * N + new_y;
next_n.h = dev_pdb->get_hash_value(next_n.inv_puzzle);
next_n.depth++;
if(next_n.depth + next_n.h > limit) continue;
next_n.pre = i;
st.push(next_n);
if(next_n.h == 0) {
// ans = next_n.depth;
*dev_flag = next_n.depth;
return;
}
}
}
return;
}
void ida_star() {
// cout << "before_create_root" << endl;
pq = priority_queue<Node, vector<Node>, greater<Node> >();
if(create_root_set()) {
printf("%d\n", ans);
return;
}
// cout << "after_create_root" << endl;
int pq_size = pq.size();
Node root_set[CORE_NUM];
int i = 0;
while(!pq.empty()) {
Node n = pq.top();
pq.pop();
root_set[i] = n;
i++;
}
//gpu側で使う根集合のポインタ
Node *dev_root_set;
//gpu側のメモリ割当て
HANDLE_ERROR(cudaMalloc((void**)&dev_root_set, pq_size * sizeof(Node) ) );
//root_setをGPU側のdev_root_setにコピー
HANDLE_ERROR(cudaMemcpy(dev_root_set, root_set, pq_size * sizeof(Node), cudaMemcpyHostToDevice) );
// cout << "md" << s_node.h << endl;
for (int limit = s_node.h; limit < 100; ++limit, ++limit)
{
// path.resize(limit);
// priority_queue<Node, vector<Node>, greater<Node> > tmp_pq = pq;
int flag = -1;
int *dev_flag;
//gpu側にメモリ割当
HANDLE_ERROR(cudaMalloc( (void**)&dev_flag, sizeof(int) ) );
HANDLE_ERROR(cudaMemcpy(dev_flag, &flag, sizeof(int), cudaMemcpyHostToDevice));
dfs_kernel<<<BLOCK_NUM, WARP_SIZE>>>(limit, dev_root_set, dev_flag, dev_pd);
HANDLE_ERROR(cudaGetLastError());
HANDLE_ERROR(cudaDeviceSynchronize());
HANDLE_ERROR(cudaMemcpy(&flag, dev_flag, sizeof(int), cudaMemcpyDeviceToHost));
if(flag != -1) {
cout << flag << endl;
HANDLE_ERROR(cudaFree(dev_flag) );
HANDLE_ERROR(cudaFree(dev_root_set));
return;
}
HANDLE_ERROR(cudaFree(dev_flag) );
}
HANDLE_ERROR(cudaFree(dev_root_set));
}
int main() {
#ifndef DEBUG
FILE *output_file;
output_file = fopen("../result/yama24_med_psimple_with_pdb_result.csv","w");
#endif
// set_md();
// pattern database
pd = PatternDataBase();
pd.init();
//gpu側のメモリ割当て
HANDLE_ERROR(cudaMalloc((void**)&dev_pd, sizeof(local_pdb) ) );
local_pdb *lpdb = new local_pdb();
//root_setをGPU側のdev_pdにコピー
HANDLE_ERROR(cudaMemcpy(dev_pd, lpdb, sizeof(local_pdb), cudaMemcpyHostToDevice) );
HANDLE_ERROR(cudaMemcpyToSymbol(dev_h0, &h0, PDB_TABLESIZE * sizeof(unsigned char)));
HANDLE_ERROR(cudaMemcpyToSymbol(dev_h1, &h1, PDB_TABLESIZE * sizeof(unsigned char)));
for (int i = 0; i <= 50; ++i)
{
// string input_file = "../benchmarks/yama24_50_easy/prob";
// string input_file = "../benchmarks/yama24_50/prob";
string input_file = "../benchmarks/yama24_50_med/prob";
// string input_file = "../benchmarks/korf100/prob";
if(i < 10) {
input_file += "00";
} else if(i < 100) {
input_file += "0";
}
input_file += tostr(i);
cout << input_file << " ";
// set_md();
// clock_t start = clock();
auto start = std::chrono::system_clock::now();
input_table(const_cast<char*>(input_file.c_str()));
ida_star();
// clock_t end = clock();
auto end = std::chrono::system_clock::now();
auto diff = end - start;
#ifndef DEBUG
fprintf(output_file,"%f\n", std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count() / (double)1000000000.0);
#endif
#ifdef DEBUG
printf("%f\n", std::chrono::duration_cast<std::chrono::nanoseconds>(diff).count() / (double)1000000000.0);
#endif
// writing_file << (double)(end - start) / CLOCKS_PER_SEC << endl;
}
HANDLE_ERROR(cudaFree(dev_pd));
// HANDLE_ERROR(cudaFree(dev_h0));
// HANDLE_ERROR(cudaFree(dev_h1));
#ifndef DEBUG
fclose(output_file);
#endif
cudaDeviceReset();
}
|
6,912 |
#include "PathsVAO_cuda_kernel.cuh"
#define VCOUNT (9)
#define VCOUNT2 (18)
#define INDEXCOUNT (4)
#define INDEX_BEGIN (0)
#define INDEX_PAST (1)
#define INDEX_NOW (2)
#define INDEX_ELEMENTBEGIN (3)
//インターリーブ配列としてのインデックスと
//頂点配列としてのインデックスを区別するため、
//接尾辞を下記にする
//_index : 頂点配列としてのインデックス
//_iindex : インターリーブ配列としてのインデックス
__host__
__device__
bool PathsVAO_isInInterval
(
float time,
const float* const vertex_d,
unsigned int begin_iindex,
unsigned int end_iindex,
unsigned int vertex_iindex
)
{
const unsigned int it = 3;
if((vertex_iindex < begin_iindex) || (end_iindex <= vertex_iindex))
{
return false;
}
if((vertex_d[vertex_iindex + it] <= time) && (time < vertex_d[vertex_iindex + VCOUNT2 + it]))
{
return true;
}
return false;
}
__host__
__device__
void PathsVAO_updateTimeVertex
(
float time,
const float* const vertex_d,
unsigned int begin_iindex,
unsigned int end_iindex,
unsigned int v_iindex,
float* const time_vertex
)
{
//各座標を示すインデックス
const unsigned int ix = 0;
const unsigned int iy = 1;
const unsigned int iz = 2;
const unsigned int it = 3;
const unsigned int ir = 4;
const unsigned int ig = 5;
const unsigned int ib = 6;
const unsigned int ia = 7;
unsigned int from_iindex = (v_iindex < begin_iindex) ? begin_iindex : v_iindex;
unsigned int to_iindex = (v_iindex < begin_iindex) ? begin_iindex : (v_iindex + VCOUNT2);
from_iindex = (end_iindex <= from_iindex) ? (end_iindex - VCOUNT2) : from_iindex;
to_iindex = (end_iindex <= to_iindex) ? (end_iindex - VCOUNT2) : to_iindex;
const float* const from = &vertex_d[from_iindex];
const float* const to = &vertex_d[ to_iindex];
float from_time = from[it];
float to_time = to[it];
//内分比
float ratio_from = 1.0f - ((float)(time - from_time))/((float)(to_time - from_time));
float ratio_to = 1.0f - ratio_from;
//上の点の設定
time_vertex[ix] = ratio_from * from[ix] + ratio_to * to[ix];
time_vertex[iy] = ratio_from * from[iy] + ratio_to * to[iy];
time_vertex[iz] = ratio_from * from[iz] + ratio_to * to[iz];
time_vertex[it] = time;
time_vertex[ir] = ratio_from * from[ir] + ratio_to * to[ir];
time_vertex[ig] = ratio_from * from[ig] + ratio_to * to[ig];
time_vertex[ib] = ratio_from * from[ib] + ratio_to * to[ib];
time_vertex[ia] = ratio_from * from[ia] + ratio_to * to[ia];
//下の点の設定
time_vertex[VCOUNT + ix] = ratio_from * from[VCOUNT + ix] + ratio_to * to[VCOUNT + ix];
time_vertex[VCOUNT + iy] = ratio_from * from[VCOUNT + iy] + ratio_to * to[VCOUNT + iy];
time_vertex[VCOUNT + iz] = ratio_from * from[VCOUNT + iz] + ratio_to * to[VCOUNT + iz];
time_vertex[VCOUNT + it] = time;
time_vertex[VCOUNT + ir] = ratio_from * from[VCOUNT + ir] + ratio_to * to[VCOUNT + ir];
time_vertex[VCOUNT + ig] = ratio_from * from[VCOUNT + ig] + ratio_to * to[VCOUNT + ig];
time_vertex[VCOUNT + ib] = ratio_from * from[VCOUNT + ib] + ratio_to * to[VCOUNT + ib];
time_vertex[VCOUNT + ia] = ratio_from * from[VCOUNT + ia] + ratio_to * to[VCOUNT + ia];
}
__host__
__device__
void PathsVAO_updateTimeIndex
(
float time,
const float* const vertex_d,
unsigned int begin_index,
unsigned int end_index,
unsigned int* const time_index,
float* const time_vertex
)
{
//インターリーブ配列としてのインデックスと
//頂点配列としてのインデックスを区別するため、
//接尾辞を下記にする
//_index : 頂点配列としてのインデックス
//_iindex : インターリーブ配列としてのインデックス
//区間に含まれているかをチェックするインデックス
unsigned int v_iindex = VCOUNT * (*time_index);
unsigned int begin_iindex = VCOUNT * begin_index;
unsigned int end_iindex = VCOUNT * end_index;
//最初の区間の手前の場合
if(v_iindex < begin_iindex)
{
//最初の区間の時刻より手前であれば最初の区間の1つ手前のインデックスを返す
const unsigned int it = 3;
//最初の区間の時刻
float time_first = vertex_d[begin_iindex + it];
if(time < time_first)
{
*time_index = (begin_iindex - VCOUNT2) / VCOUNT;
return;
}
}
//最後の区間の先の場合はそのまま終了
if(v_iindex >= end_iindex)
{
return;
}
//time_indexから順に区間に含まれているかチェックし、
//含まれている区間の情報から頂点を設定する
for(; v_iindex < end_iindex; v_iindex+=VCOUNT2)
{
if(PathsVAO_isInInterval(time, vertex_d, begin_iindex, end_iindex, v_iindex))
{
*time_index = v_iindex / VCOUNT;
PathsVAO_updateTimeVertex(time, vertex_d, begin_iindex, end_iindex, v_iindex, time_vertex);
return;
}
}
//見つからなかった場合は最後のインデックスを設定する
*time_index = end_iindex / VCOUNT;
}
//elementを更新する
__host__
__device__
void PathsVAO_updateElement
(
const unsigned int* const index_list_d,
unsigned int path_index,
unsigned int* const element_d
)
{
unsigned int begin_index = index_list_d[INDEXCOUNT * path_index + INDEX_BEGIN ];
unsigned int past_index = index_list_d[INDEXCOUNT * path_index + INDEX_PAST ];
unsigned int now_index = index_list_d[INDEXCOUNT * path_index + INDEX_NOW ];
unsigned int element_begin_index = index_list_d[INDEXCOUNT * path_index + INDEX_ELEMENTBEGIN];
unsigned int begin_index_main = begin_index + 4;
unsigned int end_index = index_list_d[INDEXCOUNT * (path_index + 1) + INDEX_BEGIN];
unsigned int element_end_index = index_list_d[INDEXCOUNT * (path_index + 1) + INDEX_ELEMENTBEGIN];
unsigned int past_vertex_index = begin_index;
unsigned int now_vertex_index = begin_index + 2;
//pastとnowの位置関係を示す真偽値
//pastは頂点配列の手前にある
bool past_before = (past_index < begin_index_main);
//pastは頂点配列の後にある
bool past_after = (end_index <= past_index );
//nowは頂点配列の手前にある
bool now_before = (now_index < begin_index_main);
//nowは頂点配列の後にある
bool now_after = (end_index <= now_index );
//pastを描画するかどうか
bool past_draw = !(past_before || past_after);
//nowを描画するかどうか
bool now_draw = !(now_before || now_after);
//pastとnowの間の頂点配列の開始・終了のインデックス
unsigned int main_begin_vertex_index = past_before ? begin_index_main : (past_after ? end_index : (past_index + 2));
unsigned int main_end_vertex_index = now_before ? begin_index_main : (now_after ? end_index : (now_index + 2));
//インデックス配列のカウンタ
unsigned int ei = element_begin_index;
//(最初に2つ設定するダミーのインデックス)
// =(最初に描画する点のインデックス)を決定する
unsigned int first_vertex_index = past_draw ? past_vertex_index : main_begin_vertex_index;
//ダミーのインデックスを設定する
element_d[ei] = first_vertex_index; ei++;
element_d[ei] = first_vertex_index; ei++;
//描画する頂点のインデックスを設定する
if(past_draw)
{
element_d[ei] = past_vertex_index ; ei++;
element_d[ei] = past_vertex_index + 1; ei++;
}
unsigned int sei = main_begin_vertex_index;
for(;(ei < element_end_index) && (sei < main_end_vertex_index); ei++, sei++)
{
element_d[ei] = sei;
}
if(now_draw)
{
element_d[ei] = now_vertex_index ; ei++;
element_d[ei] = now_vertex_index + 1; ei++;
}
//最後に描画した頂点のインデックスをダミーとして最後まで設定する
unsigned int last_vertex_index = element_d[ei -1];
for(;ei < element_end_index; ei++)
{
element_d[ei] = last_vertex_index;
}
/*
element_d[ei] = past_vertex_index; ei++;
element_d[ei] = past_vertex_index; ei++;
element_d[ei] = past_vertex_index; ei++;
element_d[ei] = past_vertex_index + 1; ei++;
unsigned int sei = past_index + 2;
for(; (ei < element_end_index) && (sei < now_index + 2); ei++, sei++)
{ element_d[ei] = sei;
}
if(ei < end_index)
{
element_d[ei] = now_vertex_index;
ei++;
}
for(;ei < element_end_index;ei++)
{
element_d[ei] = now_vertex_index + 1;
}
*/
}
/**
* @brief nowに応じてVAOを更新する
*
* block 1つにpath 1つが対応する
*
* @param now
* @param vertex_d
* @param element_d
* @param index_list_d
* @param path_count
*
* @return
*/
__host__
__device__
void PathsVAO_updateDeviceDataCUDAMain
(
float now,
float past,
float* const vertex_d,
unsigned int* const element_d,
unsigned int* const index_list_d,
unsigned int path_index
)
{
//path[path_index]に関連するインデックス
unsigned int begin_index = index_list_d[INDEXCOUNT * path_index + INDEX_BEGIN];
unsigned int* const past_index = &index_list_d[INDEXCOUNT * path_index + INDEX_PAST ];
unsigned int* const now_index = &index_list_d[INDEXCOUNT * path_index + INDEX_NOW ];
//最後の次のインデックス
unsigned int end_index = index_list_d[INDEXCOUNT * (path_index + 1) + INDEX_BEGIN];
//path[path_index]の頂点数
//unsigned int vertex_count = end_index - begin_index;
//pastの頂点
float* const past_vertex = &vertex_d[VCOUNT * begin_index];
//nowの頂点
float* const now_vertex = &vertex_d[VCOUNT * begin_index + VCOUNT2];
//pastIndexを更新する
PathsVAO_updateTimeIndex(past, vertex_d, begin_index + 4, end_index, past_index, past_vertex);
//nowIndexを更新する
PathsVAO_updateTimeIndex( now, vertex_d, begin_index + 4, end_index, now_index, now_vertex);
//elementを更新する
PathsVAO_updateElement(index_list_d, path_index, element_d);
}
__global__
void PathsVAO_updateDeviceDataCUDA
(
float now,
float past,
float* const vertex_d,
unsigned int* const element_d,
unsigned int* const index_list_d
)
{
//pathのインデックス
unsigned int path_index = blockIdx.x;
//メインの処理に渡す
PathsVAO_updateDeviceDataCUDAMain(now, past, vertex_d, element_d, index_list_d, path_index);
}
|
6,913 | //#include "gpu_boundary_matrix.h"
class chunk_reduction_algorithm {
public:
//gpu_boundary_matrix gpu_matrix;
public:
void local_chunk_reduction();
};
/*void chunk_reduction_algorithm::local_chunk_reduction() {
for (int cur_dim = gpu_matrix.get_max_dim(); cur_dim >= 1; cur_dim--) {
}
}*/
|
6,914 | #include<stdio.h>
#include<math.h>
#include<cuda.h>
#define N 256
__global__ void matrix_vector_multi_gpu_1_2(float *A_d,float *B_d,float *C_d){
int i,j;
int N_start;
N_start=threadIdx.x*128;
for(j=N_start;j<N_start+128;j++){
A_d[j]=0.0;
for(i=0;i<N;i++){
A_d[j]=A_d[j]+B_d[j*N+i]*C_d[i];
}
}
}
int main(){
int i,j;
float A[N],B[N*N],C[N];
float *A_d,*B_d,*C_d;
dim3 blocks(1,1,1);
dim3 threads(2,1,1);
for(j=0;j<N;j++){
for(i=0;i<N;i++){
B[j*N+i]=((float)j)/256.0;
}
}
for(j=0;j<N;j++){
C[j]=1.0F;
}
cudaMalloc((void**)&A_d,N*sizeof(float));
cudaMalloc((void**)&B_d,N*N*sizeof(float));
cudaMalloc((void**)&C_d,N*sizeof(float));
cudaMemcpy(A_d,A,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(B_d,B,N*N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(C_d,C,N*sizeof(float),cudaMemcpyHostToDevice);
matrix_vector_multi_gpu_1_2<<<blocks,threads>>>(A_d,B_d,C_d);
cudaMemcpy(A,A_d,N*sizeof(float),cudaMemcpyDeviceToHost);
for(j=0;j<N;j++){
printf("A[ %d ]=%f \n",j,A[j]);
}
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
6,915 | #define getMass(id) stars[id+6]
#define vec3fSub(a,b,out) out.x = a.x-b.x; out.y = a.y-b.y; out.z = a.z-b.z;
#define vec3fAdd(a,b,out) out.x = a.x+b.x; out.y = a.y+b.y; out.z = a.z+b.z;
#define vec3fDiv(a,b,out) out.x = a.x/b.x; out.y = a.y/b.y; out.z = a.z/b.z;
#define vec3fDivConstant(a,b,out) out.x = a.x/b; out.y = a.y/b; out.z = a.z/b;
#define vec3fMul(a,b,out) out.x = a.x*b.x; out.y = a.y*b.y; out.z = a.z*b.z;
#define vec3fMulConstant(a,b,out) out.x = a.x*b; out.y = a.y*b; out.z = a.z*b;
#define vec3fLen(a) sqrtf(powf(a.x,2)+powf(a.y,2)+powf(a.z,2));
#define getPosition(star_id,struct) struct.x = stars[star_id]; struct.y = stars[star_id+1]; struct.z = stars[star_id+2];
#define getVelocity(star_id,struct) struct.x = stars[star_id+3]; struct.y = stars[star_id+4]; struct.z = stars[star_id+5];
struct vec3f{
float x;
float y;
float z;
};
typedef struct vec3f vec3f;
__global__ void sim(float* stars, int numstars, int stride, float timestep, int* stars_complete, int steps)
{
float mass,len,force;
int this_id, that_id;
vec3f ThisPos,ThatPos,NewVelocity,dir;
int step;
for(step = 0; step<steps; step++)
{
for(this_id = blockIdx.x; this_id<numstars; this_id += stride*7){
getVelocity(this_id,NewVelocity);
getPosition(this_id,ThisPos);
for(that_id = 0; that_id<numstars; that_id+=7)
{
if(that_id != this_id)
{
//F = G*((this.mass*that.mass)/
// distance^2)
//subtract this.pos-that.pos
getPosition(that_id,ThatPos);
vec3fSub(ThisPos,ThatPos,dir);
//Calculate force magnitude
mass = getMass(this_id)*getMass(that_id);
len = vec3fLen(dir);
force = mass/powf(len,2);
//vec3fNormalize(thisMinusThat); //Calculate direction of influence
vec3fDivConstant(dir,len,dir);
//vec3fMul(thisMinusThat,forceVec,thisMinusThat); //multiply by force magnitude
vec3fMulConstant(dir,force,dir);
//vec3fMul(thisMinusThat,stepVector,thisMinusThat); // *delta-t
vec3fMulConstant(dir,timestep,dir);
//vec3fAdd(NewVelocity,thisMinusThat,NewVelocity); //Incorporate the calculated force into the new velocity
vec3fAdd(NewVelocity,dir,NewVelocity);
}
//store NewVelocity
stars[this_id+3] = NewVelocity.x;
stars[this_id+4] = NewVelocity.y;
stars[this_id+5] = NewVelocity.z;
//store NewPosition
stars[this_id] = stars[this_id] + NewVelocity.x;
stars[this_id+1] = stars[this_id+1] + NewVelocity.y;
stars[this_id+2] = stars[this_id+2] + NewVelocity.z;
}
//I would like to wait until *all* blocks have reached this point, but that's not possible in CUDA.
//Must find a better way. Currenly with no synchronization scheme in place,
//I would expect to see a slow but steady declie in accuracy
//as a certain percentage of blocks finish looping sooner than others, thus getting slightly ahead.
//Though possibly they will then get slightly behind, thus canceling everything out.
//Accuracey wise, this is bad. Performance wise, this is good.
}
(*stars_complete)++;
while((*stars_complete)<numstars){
}
}
} |
6,916 | #include "includes.h"
__global__ void additionMatricesKernel(int* d_a, int* d_b, int* d_c) {
// -:YOUR CODE HERE:-
} |
6,917 | // Checks that cuda compilation does the right thing when passed
// -fcuda-flush-denormals-to-zero. This should be translated to
// -fdenormal-fp-math-f32=preserve-sign
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell--cuda-gpu-arch=sm_20 -fcuda-flush-denormals-to-zero -nocudainc -nocudalib %s 2>&1 | FileCheck -check-prefix=FTZ %s
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell--cuda-gpu-arch=sm_20 -fno-cuda-flush-denormals-to-zero -nocudainc -nocudalib %s 2>&1 | FileCheck -check-prefix=NOFTZ %s
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell--cuda-gpu-arch=sm_10 -fcuda-flush-denormals-to-zero -nocudainc -nocudalib %s 2>&1 | FileCheck -check-prefix=FTZ %s
// RUN: %clang -no-canonical-prefixes -### -target x86_64-linux-gnu -c -march=haswell--cuda-gpu-arch=sm_10 -fno-cuda-flush-denormals-to-zero -nocudainc -nocudalib %s 2>&1 | FileCheck -check-prefix=NOFTZ %s
// CPUFTZ-NOT: -fdenormal-fp-math
// FTZ: "-fdenormal-fp-math-f32=preserve-sign,preserve-sign"
// NOFTZ: "-fdenormal-fp-math=ieee,ieee"
|
6,918 | #include "includes.h"
__global__ void sync_ssim_conv_groups() { } |
6,919 | #include "includes.h"
__global__ void TgvConvertKBKernel(float2* disparity, float focalx, float focaly, float cx, float cy, float d1, float d2, float d3, float d4, float t1, float t2, float t3, float3* X, float* depth, int width, int height, int stride)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float u0 = (float)ix;
float v0 = (float)iy;
float xprime0 = (u0 - focalx) / cx;
float yprime0 = (v0 - focaly) / cy;
float u = disparity[pos].x;
float v = disparity[pos].y;
float u1 = u0 + u;
float v1 = v0 + v;
float xprime1 = (u1 - focalx) / cx;
float yprime1 = (v1 - focaly) / cy;
// Newton-Raphson Method Frame 0
float ru0 = sqrtf(xprime0 * xprime0 + yprime0 * yprime0);
float theta0 = 0.0f;
for (int iter = 0; iter < 5; iter++) {
float thetad0 = theta0 + d1 * powf(theta0, 3.0f) + d2 * powf(theta0, 5.0f) + d3 * powf(theta0, 7.0f)
+ d4 * powf(theta0, 9.0f);
float Dthetad0 = 1.0f + 3.0f * d1 * powf(theta0, 2.0f) + 5.0f * d2 * powf(theta0, 4.0f)
+ 7.0f * d3 * powf(theta0, 6.0f) + 9.0f * d4 * powf(theta0, 8.0f);
float f0 = ru0 - thetad0;
float f0prime = -Dthetad0;// 2 * (ru0 - thetad0).*(-Dthetad0);
theta0 = theta0 - f0 / f0prime;
}
float x0out = tanf(theta0) * xprime0 / ru0;
float y0out = tanf(theta0) * yprime0 / ru0;
// Newton-Raphson Method Frame 1
float ru1 = sqrtf(xprime1 * xprime1 + yprime1 * yprime1);
float theta1 = 0.0f;
for (int iter = 0; iter < 5; iter++) {
float thetad1 = theta1 + d1 * powf(theta1, 3.0f) + d2 * powf(theta1, 5.0f) + d3 * powf(theta1, 7.0f)
+ d4 * powf(theta1, 9.0f);
float Dthetad1 = 1.0f + 3.0f * d1 * powf(theta1, 2.0f) + 5.0f * d2 * powf(theta1, 4.0f)
+ 7.0f * d3 * powf(theta1, 6.0f) + 9.0f * d4 * powf(theta1, 8.0f);
float f1 = ru1 - thetad1;// % (ru1 - thetad1). ^ 2;
float f1prime = -Dthetad1;// % 2 * (ru1 - thetad1).*(-Dthetad1);
theta1 = theta1 - f1 / f1prime;
}
float x1out = tanf(theta1) * xprime1 / ru1;
float y1out = tanf(theta1) * yprime1 / ru1;
// Triangulation
float Zx = (t1 - x1out * t3) / (x1out - x0out);
float Zy = (t2 - y1out * t3) / (y1out - y0out);
float Z = Zx;
X[pos].x = x0out * Z;
X[pos].y = y0out * Z;
X[pos].z = Z;
depth[pos] = sqrt(X[pos].x * X[pos].x + X[pos].y * X[pos].y + X[pos].z * X[pos].z);
//depth[pos] = Z;
} |
6,920 | #include "includes.h"
__global__ void Kernel2(bool *g_graph_mask, bool *g_updating_graph_mask, bool *g_graph_visited, bool *g_over, int no_of_nodes) {
int tid = blockIdx.x * MAX_THREADS_PER_BLOCK + threadIdx.x;
if (tid < no_of_nodes && g_updating_graph_mask[tid]) {
g_graph_mask[tid] = true;
g_graph_visited[tid] = true;
*g_over = true;
g_updating_graph_mask[tid] = false;
}
} |
6,921 | /*
Collatz code for CS 4380 / CS 5351
Copyright (c) 2019 Texas State University. All rights reserved.
Redistribution in source or binary form, with or without modification,
is *not* permitted. Use in source and binary forms, with or without
modification, is only permitted for academic use in CS 4380 or CS 5351
at Texas State University.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <cstdio>
#include <algorithm>
#include <cuda.h>
#include <sys/time.h>
static const int ThreadsPerBlock = 512;
//req (4),(3),(2),(8)
static __global__ void collatz(const long range, int* maxlen)
{
// compute sequence lengths
//req (7), (5).
const long idx = threadIdx.x + blockIdx.x * (long)blockDim.x; long val = idx+1;
int len = 1;
if(idx < range)
while (val != 1) {
len++;
if ((val % 2) == 0) {
val = val / 2; // even
} else {
val = 3 * val + 1; // odd
}
}
if(*maxlen < len) {atomicMax(maxlen, len);}//req (9).
}
static void CheckCuda(){
cudaError_t e;
cudaDeviceSynchronize();
if(cudaSuccess != (e = cudaGetLastError()))
{fprintf(stderr, "CUDA error %D: %s\n", e, cudaGetErrorString(e)); exit(-1);}
}
int main(int argc, char *argv[])
{
printf("Collatz v1.1\n");
// check command line
if (argc != 2) {fprintf(stderr, "USAGE: %s range\n", argv[0]); exit(-1);}
const long range = atol(argv[1]);
if (range < 3) {fprintf(stderr, "ERROR: range must be at least 3\n"); exit(-1);}
printf("range bound: %ld\n", range);
//allocate mem for deviceMaxlen.
int* d_maxlen;
const int size = sizeof(int);
cudaMalloc((void **)&d_maxlen,size); //initialize hostMaxlen
int *h_maxlen = new int;
*h_maxlen = 0;
//copy to gpu (Device)
if (cudaSuccess != cudaMemcpy(d_maxlen, h_maxlen, size, cudaMemcpyHostToDevice)){
fprintf(stderr, "Copy operation to GPU failed");
exit(-1);
}
// start time
timeval start, end;
gettimeofday(&start, NULL);
// call timed function
collatz<<<(ThreadsPerBlock + range - 1)/ThreadsPerBlock,ThreadsPerBlock>>>(range, d_maxlen);
cudaDeviceSynchronize();
// end time
gettimeofday(&end, NULL);
const double runtime = end.tv_sec - start.tv_sec + (end.tv_usec - start.tv_usec) / 1000000.0;
printf("compute time: %.4f s\n", runtime);
CheckCuda();
//copy data back to host/cpu
if (cudaSuccess != cudaMemcpy(h_maxlen, d_maxlen, size, cudaMemcpyDeviceToHost))
{fprintf(stderr, "copy from gpu to cpu failed!\n"); exit(-1);}
// print result
printf("longest sequence: %d elements\n", *h_maxlen);
//deleting memory
delete h_maxlen;
cudaFree(d_maxlen);
return 0;
}
|
6,922 |
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
int __host__ cu2_sq_func(int x)
{
cudaError_t err;
int nDevices = 0;
err = cudaGetDeviceCount(&nDevices);
if (err != cudaSuccess) {
std::cerr << "nDevices: " << nDevices << std::endl;
std::cerr << "err: " << err << std::endl;
return 1;
}
return x * x;
}
|
6,923 | #include "includes.h"
__global__ void kernel_stencil(float *new_data, float *data, float *param_a, float *param_b, float *param_c, float *param_wrk, float *param_bnd) {
int _tid_ = threadIdx.x + blockIdx.x * blockDim.x;
if (_tid_ >= 129 * 65 * 65) return;
int idx_0 =_tid_ / 65 / 65;
int idx_1 = (_tid_ / 65) % 65;
int idx_2 = (_tid_ / 1) % 65;
if (idx_0 - 1 < 0 || idx_0 + 1 >= 129) { new_data[_tid_] = 0.0; return; }
if (idx_1 - 1 < 0 || idx_2 + 1 >= 65) { new_data[_tid_] = 0.0; return; }
if (idx_1 - 1 < 0 || idx_2 + 1 >= 65) { new_data[_tid_] = 0.0; return; }
float v000 = data[(idx_0) * 65 * 65 + (idx_1) * 65 + (idx_2)];
float v100 = data[(idx_0 + 1) * 65 * 65 + (idx_1) * 65 + (idx_2)];
float v010 = data[(idx_0) * 65 * 65 + (idx_1 + 1) * 65 + (idx_2)];
float v001 = data[(idx_0) * 65 * 65 + (idx_1) * 65 + (idx_2 + 1)];
float v110 = data[(idx_0 + 1) * 65 * 65 + (idx_1 + 1) * 65 + (idx_2)];
float v120 = data[(idx_0 + 1) * 65 * 65 + (idx_1 - 1) * 65 + (idx_2)];
float v210 = data[(idx_0 - 1) * 65 * 65 + (idx_1 + 1) * 65 + (idx_2)];
float v220 = data[(idx_0 - 1) * 65 * 65 + (idx_1 - 1) * 65 + (idx_2)];
float v011 = data[(idx_0) * 65 * 65 + (idx_1 + 1) * 65 + (idx_2 + 1)];
float v021 = data[(idx_0) * 65 * 65 + (idx_1 - 1) * 65 + (idx_2 + 1)];
float v012 = data[(idx_0) * 65 * 65 + (idx_1 + 1) * 65 + (idx_2 - 1)];
float v022 = data[(idx_0) * 65 * 65 + (idx_1 - 1) * 65 + (idx_2 - 1)];
float v101 = data[(idx_0 + 1) * 65 * 65 + (idx_1) * 65 + (idx_2 + 1)];
float v201 = data[(idx_0 - 1) * 65 * 65 + (idx_1) * 65 + (idx_2 + 1)];
float v102 = data[(idx_0 + 1) * 65 * 65 + (idx_1) * 65 + (idx_2 - 1)];
float v202 = data[(idx_0 - 1) * 65 * 65 + (idx_1) * 65 + (idx_2 - 1)];
float v200 = data[(idx_0 - 1) * 65 * 65 + (idx_1) * 65 + (idx_2)];
float v020 = data[(idx_0) * 65 * 65 + (idx_1 - 1) * 65 + (idx_2)];
float v002 = data[(idx_0) * 65 * 65 + (idx_1) * 65 + (idx_2 - 1)];
new_data[_tid_] =
v000 + 0.8 * (((
param_a[65 * 65 * 4 * idx_0 + 65 * 4 * idx_1 + 4 * idx_2 + 0] * v100 +
param_a[65 * 65 * 4 * idx_0 + 65 * 4 * idx_1 + 4 * idx_2 + 1] * v010 +
param_a[65 * 65 * 4 * idx_0 + 65 * 4 * idx_1 + 4 * idx_2 + 2] * v001 +
param_b[65 * 65 * 3 * idx_0 + 65 * 3 * idx_1 + 3 * idx_2 + 0] *
(v110 - v120 - v210 + v220) +
param_b[65 * 65 * 3 * idx_0 + 65 * 3 * idx_1 + 3 * idx_2 + 1] *
(v011 - v021 - v012 + v022) +
param_b[65 * 65 * 3 * idx_0 + 65 * 3 * idx_1 + 3 * idx_2 + 2] *
(v101 - v201 - v102 + v202) +
param_c[65 * 65 * 3 * idx_0 + 65 * 3 * idx_1 + 3 * idx_2 + 0] * v200 +
param_c[65 * 65 * 3 * idx_0 + 65 * 3 * idx_1 + 3 * idx_2 + 1] * v020 +
param_c[65 * 65 * 3 * idx_0 + 65 * 3 * idx_1 + 3 * idx_2 + 2] * v002 +
param_wrk[65 * 65 * idx_0 + 65 * idx_1 + idx_2]) *
param_a[65 * 65 * 4 * idx_0 + 65 * 4 * idx_1 + 4 * idx_2 + 3] -
v000) * param_bnd[65 * 65 * idx_0 + 65 * idx_1 + idx_2]);
} |
6,924 | #include "includes.h"
__global__ void kern_FindLeafSinkPotential(float* sinkBuffer, float* incBuffer, float* divBuffer, float* labelBuffer, float iCC, int size)
{
int idx = CUDASTDOFFSET;
float value = incBuffer[idx] - divBuffer[idx] + labelBuffer[idx] * iCC;
if( idx < size )
{
sinkBuffer[idx] = value;
}
} |
6,925 | #include "includes.h"
__global__ void add(const int *a, const int *b, int *dest, const size_t length) {
for (size_t tid = threadIdx.x + blockIdx.x * blockDim.x; tid < length;
tid += blockDim.x * gridDim.x) {
dest[tid] = a[tid] + b[tid];
}
} |
6,926 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void)
{
printf("threadIdx: (%d, %d, %d), blockIdx: (%d, %d, %d), blockDim: (%d, %d, %d), gridDim: (%d, %d, %d)\n",
threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z,
blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main()
{
int nElem = 6;
dim3 block (3);
dim3 grid ((nElem + block.x -1) / block.x);
printf("grid: (%d, %d, %d)\n", grid.x, grid.y, grid.z);
printf("block: (%d, %d, %d)\n", block.x, block.y, block.z);
checkIndex <<<grid, block>>> ();
cudaDeviceReset();
return 0;
}
|
6,927 | #include <stdio.h>
#include <cuda.h>
struct some_data {
some_data(float a_, int b_) {
a = a_;
b = b_;
}
float a;
int b;
__device__ float other_func() {
return a * (float)b;
}
};
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
}
__global__ void square_array(some_data* a, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < N) {
//a[idx].a = a[idx].a * a[idx].a;
//a[idx].b = a[idx].b * a[idx].b;
a[idx].a = a[idx].other_func();
}
}
// main routine that executes on the host
int main(void)
{
some_data *a_h; // Pointer to host & device arrays
const int N = 10; // Number of elements in arrays
size_t size = N * sizeof(some_data);
cudaMallocManaged((void **) &a_h, size); // Allocate array on device
cudaDeviceSynchronize();
// Initialize host array and copy it to CUDA device
for (int i=0; i<N; i++){
//a_h[i].a = (float)i;
//a_h[i].b = i+2;
a_h[i] = some_data((float)i, i + 2);
}
// Do calculation on device:
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
square_array <<< n_blocks, block_size >>> (a_h, N);
cudaDeviceSynchronize();
// Print results
for (int i=0; i<N; i++){
printf("%d %f %d\n", i, a_h[i].a, a_h[i].b);
}
// Cleanup
cudaFree(a_h);
}
|
6,928 | #pragma once
#define FLT_MAX 3.402823466e+38F
#define UINT_MAX 0xffffffff
#define INT_MAX 2147483647
struct stateRNG_xorShift128{
unsigned int x;
unsigned int y;
unsigned int z;
unsigned int w;
}; |
6,929 | // arrayMult.cu
// Andrew Krepps
// Module 3 Assignment
// 2/19/2018
#include <stdio.h>
#define MAX_ARRAY_SIZE 65536
// element-wise multiplication of n values from
// in1 and in2, with the result stored in out
__global__
void arrayMult(const float* in1, const float* in2, float* out, const unsigned int n)
{
unsigned int dataIdx = blockIdx.x*blockDim.x + threadIdx.x;
if (dataIdx < n) {
out[dataIdx] = in1[dataIdx]*in2[dataIdx];
}
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = 64;
int blockSize = 16;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
if (totalThreads > MAX_ARRAY_SIZE) {
printf("Warning: Total thread count is greater than MAX_ARRAY_SIZE\n");
}
// initialize data
float in1[MAX_ARRAY_SIZE];
float in2[MAX_ARRAY_SIZE];
for (int i = 0; i < totalThreads; ++i) {
in1[i] = (float)i;
in2[i] = 0.25f*i;
}
// allocate device memory
const int dataSize = totalThreads*sizeof(float);
float* in1d;
float* in2d;
float* outd;
cudaMalloc((void**)&in1d, dataSize);
cudaMalloc((void**)&in2d, dataSize);
cudaMalloc((void**)&outd, dataSize);
// copy data to device
cudaMemcpy(in1d, in1, dataSize, cudaMemcpyHostToDevice);
cudaMemcpy(in2d, in2, dataSize, cudaMemcpyHostToDevice);
// execute kernel
arrayMult<<<numBlocks, blockSize>>>(in1d, in2d, outd, totalThreads);
// copy results to host
float out[MAX_ARRAY_SIZE];
cudaMemcpy(out, outd, dataSize, cudaMemcpyDeviceToHost);
// display results
for (int i = 0; i < totalThreads; ++i) {
printf("%f * %f = %f\n", in1[i], in2[i], out[i]);
}
// clean up device memory
cudaFree(in1d);
cudaFree(in2d);
cudaFree(outd);
return EXIT_SUCCESS;
}
|
6,930 | #define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
#include <device_functions.h>
#include <stdint.h>
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <sys/types.h>
#include <iostream>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <iomanip>
#include <unistd.h>
#pragma pack(1)
typedef struct BITMAPFILEHEADER
{
uint16_t bfType;
uint32_t bfSize;
uint16_t bfReserved1;
uint16_t bfReserved2;
uint32_t bfOffBits;
} BITMAPFILEHEADER;
typedef struct BITMAPINFOHEADER
{
uint32_t biSize;
uint32_t biWidth;
uint32_t biHeight;
uint16_t biPlanes;
uint16_t biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
uint32_t biXPelsPerMeter;
uint32_t biYPelsPerMeter;
uint32_t biClrUsed;
uint32_t biClrImportant;
} BITMAPINFOHEADER;
#define MASK_WIDTH 5
int filter_size = MASK_WIDTH;
int arr_height = 4096;
int arr_width;
int arr_size=4096;
int res_size = arr_height;
#define O_TILE_WIDTH 64
#define BLOCK_WIDTH (O_TILE_WIDTH + MASK_WIDTH - 1)
#define BMP_FILE_NAME "./img/timg.bmp"
using namespace std;
float GsCore[25]={
0.01441881,0.02808402,0.03507270,0.02808402,0.01441881,
0.02808402,0.0547002, 0.06831229,0.0547002 ,0.02808402,
0.0350727 ,0.06831229,0.08531173,0.06831229,0.03507270,
0.02808402,0.0547002 ,0.06831229,0.0547002 ,0.02808402,
0.01441881,0.02808402,0.03507270,0.02808402,0.01441881
};
void Conv2(float **filter, float **arr, float **res, int filter_size, int arr_size)
{
int temp;
for (int i = 0; i < arr_size; i++)
{
for (int j = 0; j < arr_size; j++)
{
temp = 0;
int starti = i - filter_size / 2;
int startj = j - filter_size / 2;
for (int m = starti; m < starti + filter_size; m++)
{
for (int n = startj; n < startj + filter_size; n++)
{
if (m >= 0 && m < arr_size && n >= 0 && n < arr_size)
{
temp += filter[m - starti][n - startj] * arr[m][n];
}
}
}
res[i][j] = temp;
}
}
}
//kernel function
__global__ void convolution_2D_basic(float *in, float *out, float *mask, int maskwidth, int w, int h)
{
int Col = blockIdx.x * blockDim.x + threadIdx.x;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
if (Row < h && Col < w)
{
float pixVal = 0;
//start
int startCol = Col - maskwidth / 2;
int startRow = Row - maskwidth / 2;
//caculate the res
for (int i = 0; i < maskwidth; i++)
{
for (int j = 0; j < maskwidth; j++)
{
int curRow = startRow + i;
int curCol = startCol + j;
if (curRow > -1 && curRow < h && curCol > -1 && curCol < w)
{
pixVal += mask[i * maskwidth + j] * in[curRow * w + curCol];
}
}
}
out[Row * w + Col] = pixVal;
}
}
//kernel function
__global__ void convolution_2D_shared(float *in, float *out, float *mask, int maskwidth, int w, int h)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int row_o = blockIdx.y * O_TILE_WIDTH + ty;
int col_o = blockIdx.x * O_TILE_WIDTH + tx;
int row_i = row_o - maskwidth / 2;
int col_i = col_o - maskwidth / 2;
out[row_o * w + col_o] = in[row_o * w + col_o];
// __shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH];
// if ((row_i >= 0) && (row_i < h) &&
// (col_i >= 0) && (col_i < w))
// {
// Ns[ty][tx] = in[row_i * w + col_i];
// }
// else
// {
// Ns[ty][tx] = 0.0f;
// }
// float output = 0.0f;
// if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH)
// {
// for (int i = 0; i < maskwidth; i++)
// {
// for (int j = 0; j < maskwidth; j++)
// {
// output += mask[i * maskwidth + j] * Ns[i + ty][j + tx];
// }
// }
// if (row_o < h && col_o < w)
// {
// out[row_o * w + col_o] = output;
// }
// }
}
int check(float *a, float *b, int arr_size_1D)
{
float res = 0;
for (int i = 0; i < arr_size_1D; i++)
{
res += (a[i] - b[i]);
}
if ((res - 0) < 1e-7)
return 1;
return 0;
}
__global__ void test()
{
int Col = blockIdx.x * blockDim.x + threadIdx.x;
int Row = blockIdx.y * blockDim.y + threadIdx.y;
printf("%d,%d]\n", Row, Col);
printf("%d,%d,%d)\n", blockDim.y, blockDim.x, blockDim.z);
printf("%d,%d,%d)\n", gridDim.x, gridDim.y, gridDim.z);
}
void readBmp(FILE *fp, unsigned char *&pBmpBuf, int BmpWidth, int BmpHeight,int BiBitCount, int startx, int endx)
{
/**
* 灰度图像有颜色表,且颜色表表项为256
* (可以理解为lineByte是对bmpWidth的以4为步长的向上取整)
*/
int lineByte = (BmpWidth * BiBitCount / 8 + 3) / 4 * 4;
//申请位图数据所需要的空间,读位图数据进内存
pBmpBuf = new (nothrow) unsigned char[lineByte * BmpHeight];
if (pBmpBuf == NULL)
{
cerr << "Mem alloc failed." << endl;
exit(-1);
}
if (startx - 2 > 0)
startx = startx - 2;
if (endx + 2 < BmpHeight)
endx = endx + 2;
fseek(fp, startx * lineByte, SEEK_CUR);
cerr<<fread(pBmpBuf + startx * lineByte, lineByte * (endx - startx + 1), 1, fp)<<endl;
return;
}
//给定一个图像位图数据、宽、高、颜色表指针及每像素所占的位数等信息,将其写到指定文件中
bool saveBmp(const char *bmpName, unsigned char *imgBuf, int width, int height,
int biBitCount)
{
//如果位图数据指针为0,则没有数据传入,函数返回
if (!imgBuf)
return 0;
//颜色表大小,以字节为单位,灰度图像颜色表为1024字节,彩色图像颜色表大小为0
int colorTablesize = 0;
if (biBitCount == 8)
colorTablesize = 1024; // 8*128
//待存储图像数据每行字节数为4的倍数
int lineByte = (width * biBitCount / 8 + 3) / 4 * 4;
//以二进制写的方式打开文件
FILE *fp = fopen(bmpName, "wb");
if (fp == 0)
{
cerr << "Open file error." << endl;
return 0;
}
//申请位图文件头结构变量,填写文件头信息
BITMAPFILEHEADER fileHead;
fileHead.bfType = 0x4D42; // bmp类型
// bfSize是图像文件4个组成部分之和
fileHead.bfSize = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
colorTablesize + lineByte * height;
fileHead.bfReserved1 = 0;
fileHead.bfReserved2 = 0;
// bfOffBits是图像文件前3个部分所需空间之和
fileHead.bfOffBits = 54 + colorTablesize;
//写文件头进文件
fwrite(&fileHead, sizeof(BITMAPFILEHEADER), 1, fp);
//申请位图信息头结构变量,填写信息头信息
BITMAPINFOHEADER head;
head.biBitCount = biBitCount;
head.biClrImportant = 0;
head.biClrUsed = 0;
head.biCompression = 0;
head.biHeight = height;
head.biPlanes = 1;
head.biSize = 40;
head.biSizeImage = lineByte * height;
head.biWidth = width;
head.biXPelsPerMeter = 0;
head.biYPelsPerMeter = 0;
//写位图信息头进内存
fwrite(&head, sizeof(BITMAPINFOHEADER), 1, fp);
//写位图数据进文件
fwrite(imgBuf, height * lineByte, 1, fp);
//关闭文件
fclose(fp);
return 1;
}
int main()
{
unsigned char *bmpBuf;
unsigned char *result;
int BmpWidth; //图像的宽
int BmpHeight; //图像的高
int BiBitCount; //图像类型,每像素位数 8-灰度图 24-彩色图
BITMAPFILEHEADER BmpHead;
BITMAPINFOHEADER BmpInfo;
FILE *fp = fopen(BMP_FILE_NAME, "rb"); //二进制读方式打开指定的图像文件
if (fp == 0)
{
cerr << "Can not open " << BMP_FILE_NAME << endl;
return 0;
}
//获取位图文件头结构BITMAPFILEHEADER
cerr <<fread(&BmpHead, sizeof(BITMAPFILEHEADER), 1, fp);
//获取图像宽、高、每像素所占位数等信息
cerr <<fread(&BmpInfo, sizeof(BITMAPINFOHEADER), 1, fp);
BmpWidth = BmpInfo.biWidth; //宽度用来计算每行像素的字节数
BmpHeight = BmpInfo.biHeight; // 像素的行数
//计算图像每行像素所占的字节数(必须是4的倍数)
BiBitCount = BmpInfo.biBitCount;
int lineByte = (BmpWidth * BiBitCount / 8 + 3) / 4 * 4;
// 将图片读取到内存中
result = new(nothrow) unsigned char[BmpHeight * lineByte];
readBmp(fp, bmpBuf, BmpWidth, BmpHeight, BiBitCount, 0, BmpHeight - 1);
printf("the mask(filter) size is :%d X %d.\n", filter_size, filter_size);
printf("the matrix size is :%d X %d.\n", BmpWidth, BmpHeight);
clock_t start_CPU, end_CPU;
//arr res pFilter
int arr_size_1D = BmpWidth * BmpHeight;
int filter_size_1D = filter_size * filter_size;
float *arr_1Dr = (float *)malloc(arr_size_1D * sizeof(float));
float *arr_1Dg = (float *)malloc(arr_size_1D * sizeof(float));
float *arr_1Db = (float *)malloc(arr_size_1D * sizeof(float));
float *res_1Dr = (float *)malloc(arr_size_1D * sizeof(float));
float *res_1Dg = (float *)malloc(arr_size_1D * sizeof(float));
float *res_1Db = (float *)malloc(arr_size_1D * sizeof(float));
int i=0;
while(i<arr_size_1D)
{
arr_1Dr[i]=bmpBuf[i*3];
arr_1Dg[i]=bmpBuf[i*3+1];
arr_1Db[i]=bmpBuf[i*3+2];
i++;
}
// ############################初始化完毕############################
// ############################以下是卷积部分############################
//allocate mem
float *inD, *outD, *maskD;
//malloc 分配cuda内存
cudaMalloc((void **)&inD, sizeof(float) * arr_size_1D);
cudaMalloc((void **)&outD, sizeof(float) * arr_size_1D);
cudaMalloc((void **)&maskD, sizeof(float *) * filter_size_1D);
//复制图片
cudaMemcpy(inD, arr_1Dr, sizeof(float) * arr_size_1D, cudaMemcpyHostToDevice);
//cudaMemcpy(outD, arr_1Dr, sizeof(float) * arr_size_1D, cudaMemcpyHostToDevice);
//复制卷积核
cudaMemcpy(maskD, GsCore, sizeof(float) * filter_size_1D, cudaMemcpyHostToDevice);
//kerner function void convolution_2D_basic(float *in,float *out,float *mask,int maskwidth,int w,int h)
// int threadPerBlockX = 16;
// int threadPerBlockY = 16;
// dim3 grid((arr_size - 1) / threadPerBlockX + 1,(arr_size - 1) / threadPerBlockY + 1,1);
// dim3 block(threadPerBlockX, threadPerBlockY);
// convolution_2D_basic << <grid, block >>>(inD, outD, maskD, filter_size, arr_size, arr_size);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
dim3 dimGrid((arr_size - 1) / O_TILE_WIDTH + 1, (arr_size - 1) / O_TILE_WIDTH + 1, 1);
start_CPU = clock();
convolution_2D_shared<<<dimGrid, dimBlock>>>(inD, outD, maskD, filter_size, BmpWidth, BmpHeight);
//copy back
cudaMemcpy(res_1Dr, outD, sizeof(float) * arr_size_1D, cudaMemcpyDeviceToHost);
// cudaMemcpy(inD, arr_1Dg, sizeof(float) * arr_size_1D, cudaMemcpyHostToDevice);
// convolution_2D_shared<<<dimGrid, dimBlock>>>(inD, outD, maskD, filter_size, BmpWidth, BmpHeight);
// //copy back
// cudaMemcpy(res_1Dg, outD, sizeof(float) * arr_size_1D, cudaMemcpyDeviceToHost);
// cudaMemcpy(inD, arr_1Db, sizeof(float) * arr_size_1D, cudaMemcpyHostToDevice);
// convolution_2D_shared<<<dimGrid, dimBlock>>>(inD, outD, maskD, filter_size, BmpWidth, BmpHeight);
// //copy back
// cudaMemcpy(res_1Db, outD, sizeof(float) * arr_size_1D, cudaMemcpyDeviceToHost);
i=0;
while(i<arr_size_1D)
{
result[i*3]=res_1Dr[i];
result[i*3+1]=res_1Dg[i];
result[i*3+2]=res_1Db[i];
i=i+1;
}
printf("-------------------GPU version Done!------------------\n");
end_CPU = clock();
float time2 = (float)(end_CPU - start_CPU) / CLOCKS_PER_SEC;
printf("GPU time:%f ms\n", time2 * 1000);
saveBmp("cudaresult.bmp", result, BmpWidth, BmpHeight, BiBitCount);
cudaFree(inD);
cudaFree(outD);
cudaFree(maskD);
//check the res;
//check(arr_1D,res_1D,arr_size_1D);
//printf("the check result is : %d\n", check(res_1D, arr_1D_Cpu, arr_size_1D));
// printf("the speed up ratio is :%.2f\n", time*1000/ ((end_CPU - start_CPU) * 1000 * 1.0 / CLOCKS_PER_SEC));
for (int i = 0; i < arr_size_1D; i++)
{
// printf("%.2f ", arr_1Dr[i]);
}
} |
6,931 |
#define _CRT_SECURE_NO_WARNINGS
#define _USE_MATH_DEFINES
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <random>
#include <cassert>
#include <tuple>
#include <omp.h>
#include <time.h>
#include <math.h>
#define UNREACHABLE() assert(0)
#define Pi 3.14159265358979323846
#define MAX_Sphere 10
int tonemap(double v) {
return std::min(
std::max(int(std::pow(v, 1 / 2.2) * 255), 0), 255);
};
//\
struct Ray {
double3 o;
double3 d;
};
struct Sphere;
struct Hit {
double t;
double3 p;
double3 n;
const Sphere* sphere;
bool F;
};
enum class SurfaceType {
Diffuse,
Mirror,
Fresnel,
};
struct Sphere {
double3 p;
double r;
SurfaceType type;
double3 R;//˗@@F
double3 Le;
double ior = 1.5168;
};
struct Scene {
Sphere spheres[MAX_Sphere]
{
{ double3{1e5 + 1, 40.8, 81.6}, 1e5 , SurfaceType::Diffuse, double3{.99,0.,0.} },//̕
{ double3{-1e5 + 99, 40.8, 81.6}, 1e5 , SurfaceType::Diffuse, double3{0.,.99,0.} },//E̕
{ double3{50, 40.8, 1e5}, 1e5 , SurfaceType::Diffuse, double3{.75,.75,.75} },//̕
{ double3{50, 1e5, 81.6}, 1e5 , SurfaceType::Diffuse, double3{.75,.75,.75} },//V
{ double3{50, -1e5 + 81.6, 81.6}, 1e5 , SurfaceType::Diffuse, double3{.75,.75,.75} },//
{ double3{37, 16.5, 47}, 16.5, SurfaceType::Mirror, double3{.999,.999,.999} },//̋
{ double3{37, 49.5, 47}, 16.5, SurfaceType::Mirror, double3{.999,.999,.999} },//̋
{ double3{73, 16.5, 78}, 16.5, SurfaceType::Fresnel,double3{.999,.999,.999} },//E̋
{ double3{73, 49.5, 78}, 16.5, SurfaceType::Fresnel,double3{.999,.999,.999} },//E̋
{ double3{50, 681.6 - .27, 81.6}, 600 , SurfaceType::Diffuse, double3{0,0,0}, double3{12,12,12} },//Cg
};
};
//O[oϐ
const int width = 1200; //摜
const int height = 800; //摜
const int spp = 10; //sNZƂ̃Tv
const int depth = 10; //C̔ː
//zXg
double3 h_Result[width*height];
//foCX
double3 *d_Result;
//foCX
__device__ double dot(double3 a, double3 b) {
return a.x * b.x + a.y * b.y + a.z * b.z;
}
__device__ double3 Normalize(double3 v) {
return double3{ v.x / sqrt(dot(v, v)), v.y / sqrt(dot(v, v)), v.z / sqrt(dot(v, v)) };
}
__device__ double3 cross(double3 a, double3 b) {
return double3{ a.y * b.z - a.z * b.y,
a.z * b.x - a.x * b.z,
a.x * b.y - a.y * b.x };
}
//J[l
// GPUŌvZۂ̊
__global__ void gpu_function(double3 *d_Result)
{
int k_x = blockIdx.x * blockDim.x + threadIdx.x;//J[lXW
int k_y = blockIdx.y*blockDim.y + threadIdx.y;//J[lYW
int xsize = gridDim.x*blockDim.x;
int id = k_x + k_y * xsize;
d_Result[id] = { 0,0,0 };//{r,g,b}
curandStateXORWOW_t rands;
curand_init(1234, id, 0, &rands);
/*camera parameter*/
//ʒu
const double3 eye{ 50, 52, 295.6 };
//_
const double3 center = double3{ eye.x + 0, eye.y - 0.042612, eye.z - 1 };
//J̏\xNg
const double3 up{ 0, 1, 0 };
//p
const double fov = 30 * Pi / 180;
//ʂ̃AXyNg
const double aspect = double(width) / height;
// Basis vectors for camera coordinates
//JWn̊xNg
const auto wE = Normalize({ eye.x - center.x, eye.y - center.y, eye.z - center.z });
const auto uE = Normalize(cross(up, wE));
const auto vE = cross(wE, uE);
for (int j = 0; j < spp; j++) {
const int x = id % width;
const int y = height - id / width;
Ray ray;
ray.o = eye;
ray.d = [&]() {
const double tf = tan(fov * .5);
const double rpx = 2. * (x + curand_uniform_double(&rands)) / width - 1;
const double rpy = 2. * (y + curand_uniform_double(&rands)) / height - 1;
const double3 w = Normalize(double3{ aspect * tf * rpx, tf * rpy, -1 });
return double3{ uE.x * w.x + vE.x * w.y + wE.x * w.z,
uE.y * w.x + vE.y * w.y + wE.y * w.z,
uE.z * w.x + vE.z * w.y + wE.z * w.z };// uE*w.x + vE * w.y + wE * w.z;
}();
double3 L{ 0,0,0 };
double3 th{ 1.,1.,1. };
for (int k = 0; k < depth; k++) {//ˉH@@ˉPƂC̐łQ
// _EJ̐ݒ
Scene scene;
Hit minh;
int num;
double tmin = 1e-4;
double tmax = 1e+10;
for (int i = 0; i < MAX_Sphere; i++) {
Hit hit;
const double3 op = { scene.spheres[i].p.x - ray.o.x , scene.spheres[i].p.y - ray.o.y , scene.spheres[i].p.z - ray.o.z };
const double b = op.x*ray.d.x + op.y*ray.d.y + op.z*ray.d.z;
const double det = b * b - (op.x*op.x + op.y*op.y + op.z*op.z) + scene.spheres[i].r * scene.spheres[i].r;
if (det < 0) {
hit = Hit{ 0,{0,0,0},{0,0,0},nullptr,false };
}
else {
const double t1 = b - sqrt(det);
if (tmin < t1 && t1 < tmax) {
hit = Hit{ t1, {}, {}, &scene.spheres[i] ,true };
}
else {
const double t2 = b + sqrt(det);
if (tmin < t2 && t2 < tmax) {
num = 11;
hit = Hit{ t2, {}, {}, &scene.spheres[i] ,true };
}
else {
hit = Hit{ 0,{0,0,0},{0,0,0},nullptr,false };//KHIT̒lԂ
}
}
}
if (!hit.F) { continue; };
//num = i;
minh = hit;
minh.F = true;//lj
tmax = minh.t;
}
if (minh.F) {
const Sphere* s = minh.sphere;
minh.p = double3{ ray.o.x + ray.d.x * minh.t, ray.o.y + ray.d.y * minh.t, ray.o.z + ray.d.z * minh.t };
minh.n = double3{ (minh.p.x - s->p.x) / s->r ,(minh.p.y - s->p.y) / s->r ,(minh.p.z - s->p.z) / s->r };
}
//return minh;
// Intersection
const Hit h = minh;
if (!h.F) {
break;
}
// Add contribution
L = double3{ L.x + th.x * h.sphere->Le.x, L.y + th.y * h.sphere->Le.y, L.z + th.z * h.sphere->Le.z };
// Update next direction
ray.o = h.p;
ray.d = [&]() {
if (h.sphere->type == SurfaceType::Diffuse) {
// Sample direction in local coordinates
const double3 n = dot(h.n, double3{ -ray.d.x,-ray.d.y ,-ray.d.z }) > 0 ? double3{ h.n.x,h.n.y,h.n.z } : double3{ -h.n.x,-h.n.y,-h.n.z };
double3 u{ 0,0,0 }, v{ 0,0,0 };
const double s = n.z >= 0 ? 1 : -1;
const double a = -1 / (s + n.z);
const double b = n.x * n.y * a;
u = double3{ 1 + s * n.x * n.x * a, s * b, -s * n.x };
v = double3{ b, s + n.y * n.y * a, -n.y };
const double3 d = [&]() {
const double r = sqrt(curand_uniform_double(&rands));
const double t = 2 * Pi * curand_uniform_double(&rands);
const double x = r * cos(t);
const double y = r * sin(t);
if (0.0 > 1 - x * x - y * y) {
return double3{ x, y,
sqrt(0.0) };
}
else {
return double3{ x, y,
sqrt(1 - x * x - y * y) };
}
}();
// Convert to world coordinates
return double3{ u.x * d.x + v.x * d.y + n.x * d.z,
u.y * d.x + v.y * d.y + n.y * d.z,
u.z * d.x + v.z * d.y + n.z * d.z };
}
else if (h.sphere->type == SurfaceType::Mirror) {
const double3 wi = double3{ -ray.d.x, -ray.d.y, -ray.d.z };//-ray.d
return double3{ 2 * dot(wi,h.n) * h.n.x - wi.x,
2 * dot(wi,h.n) * h.n.y - wi.y,
2 * dot(wi,h.n) * h.n.z - wi.z };
}
else if (h.sphere->type == SurfaceType::Fresnel) {
const double3 wi = double3{ -ray.d.x, -ray.d.y, -ray.d.z };//-ray.d;
const bool into = dot(wi, h.n) > 0;
const double3 n = into ? h.n : double3{ -h.n.x,-h.n.y,-h.n.z };
const double ior = h.sphere->ior;
const double eta = into ? 1 / ior : ior;
bool F;
const double3 wt = [&]() -> double3 {
// Snell's law (vector form)
const double t = dot(wi, n);
const double t2 = 1 - eta * eta * (1 - t * t);
if (t2 < 0) {
F = false;
return double3{ 0,0,0 };
}
F = true;
return double3{ eta * (n.x * t - wi.x) - n.x * sqrt(t2),
eta * (n.y * t - wi.y) - n.y * sqrt(t2),
eta * (n.z * t - wi.z) - n.z * sqrt(t2) }; //eta * (n * t - wi) - n * sqrt(t2);
}();
if (!F) {
// Total internal reflection
return double3{ 2 * dot(wi,h.n) * h.n.x - wi.x,
2 * dot(wi,h.n) * h.n.y - wi.y,
2 * dot(wi,h.n) * h.n.z - wi.z };// 2 * dot(wi, h.n) * h.n - wi;
}
const double Fr = [&]() {
// Schlick's approximation
const double cos = into
? dot(wi, h.n)
: dot(wt, h.n);
const double r = (1 - ior) / (1 + ior);
return r * r + (1 - r * r) * pow(1 - cos, 5);
}();
// Select reflection or refraction
// according to the fresnel term
return curand_uniform_double(&rands) < Fr
? double3{ 2 * dot(wi, h.n) * h.n.x - wi.x,
2 * dot(wi, h.n) * h.n.y - wi.y,
2 * dot(wi, h.n) * h.n.z - wi.z }
: wt;
}
//UNREACHABLE();
return double3{ 0,0,0 };
}();
// Update throughput
th = double3{ th.x*h.sphere->R.x, th.y*h.sphere->R.y ,th.z*h.sphere->R.z };
if (th.x > th.y&&th.x > th.z&&th.x == 0) {
break;
}
if (th.y > th.x&&th.y > th.z&&th.y == 0) {
break;
}
if (th.z > th.x&&th.z > th.y&&th.z == 0) {
break;
}
}
d_Result[id] = double3{ (d_Result[id].x + L.x / spp), (d_Result[id].y + L.y / spp), (d_Result[id].z + L.z / spp) };
}
}
// main function
int cudafunction(void)
{
int start = clock();
// foCX(GPU)̗̈m
cudaMalloc(&d_Result, width*height * sizeof(double3));
// CPUGPŨf[^Rs[
cudaMemcpy(d_Result, h_Result, width*height * sizeof(double3), cudaMemcpyHostToDevice);
dim3 grid(75, 50);//Obh
dim3 block(16, 16, 1);//ubN 16̔{炵
// GPUŌvZ
gpu_function << <grid, block >> > (d_Result);
// GPUCPŨf[^Rs[
cudaMemcpy(h_Result, d_Result, width*height * sizeof(double3), cudaMemcpyDeviceToHost);
int end = clock();
cudaFree(d_Result);
std::ofstream ofs("result.ppm");
ofs << "P3\n" << width << " " << height << "\n255\n";
for (const auto& i : h_Result) {
ofs << tonemap(i.x) << " "
<< tonemap(i.y) << " "
<< tonemap(i.z) << "\n";
}
return end-start;
}
|
6,932 | #define REAL double
__global__ void diffusion_kernel(
const REAL * __restrict__ gf1, REAL *gf2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct, REAL cb, REAL cc){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int c = i + j*nx;
int xy = nx*ny;
int w = (i == 0) ? c : c - 1;
int e = (i == nx-1) ? c : c + 1;
int n = (j == 0) ? c : c - nx;
int s = (j == ny-1) ? c : c + nx;
REAL t1, t2, t3;
t1 = t2 = gf1[c];
t3 = gf1[c + xy];
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
for(int k=1; k<nz-1; k++){
t1 = t2;
t2 = t3;
t3 = gf1[c+xy];
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
c += xy;
w += xy;
e += xy;
n += xy;
s += xy;
}
t1 = t2;
t2 = t3;
gf2[c] = cc*t2 + cw*gf1[w] + ce*gf1[e] + cs*gf1[s]
+ cn*gf1[n] + cb*t1 + ct*t3;
return;
}
extern "C" {
void diffusion_cuda_host(REAL *f1, REAL *f2, int nx, int ny, int nz,
REAL ce, REAL cw, REAL cn, REAL cs, REAL ct,
REAL cb, REAL cc, REAL dt,
REAL **f_ret, REAL *time_ret, int *count_ret) {
REAL time = 0.0;
int count = 0;
int blockDim_x = 128;
int blockDim_y = 1;
int blockDim_z = 1;
int grid_x = (nx-1) / blockDim_x + 1;
int grid_y = (ny-1) / blockDim_y + 1;
int grid_z = 1;
dim3 grid(grid_x, grid_y, grid_z);
dim3 threads(blockDim_x, blockDim_y, blockDim_z);
//cudaFuncSetCacheConfig(diffusion_kernel, cudaFuncCachePreferL1);
do {
diffusion_kernel<<<grid, threads>>>(f1, f2, nx, ny, nz, ce, cw, cn, cs, ct, cb, cc);
REAL *t = f1;
f1 = f2;
f2 = t;
time += dt;
count++;
} while (time + 0.5*dt < 0.1);
//*f_ret = f1;
*time_ret = time;
*count_ret = count;
return;
}
}
|
6,933 | #include <cstdlib>
#include <cstdio>
#include <cassert>
#define SIZE 234
void print_err_msg(cudaError_t err, int line) {
if(err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, line);
exit(EXIT_FAILURE);
}
}
__global__ void matrix_vec_product(float* d_mat, float* d_in_vec, float* d_out_vec, size_t size) {
/*
The Matrix kernel, each thread operates on 1 Matrix row
2D Array is represented as flat 1D array
*/
size_t i = threadIdx.x + gridDim.x*blockIdx.x;
float acc = 0.0f;
for(size_t j = i; j < (i+gridDim.x) && j < (size*size); ++j) {
float val = d_in_vec[j%size];
acc += d_mat[j]*val;
}
d_out_vec[blockIdx.x] = acc;
}
int main() {
float* h_mat;
float* h_in_vec;
float* h_out_vec;
// device pointers
float* d_mat;
float* d_in_vec;
float* d_out_vec;
h_mat = new float[SIZE*SIZE];
h_in_vec = new float[SIZE];
h_out_vec = new float[SIZE];
cudaError_t err;
err = cudaMalloc((void **) &d_mat, SIZE*SIZE*sizeof(float));
print_err_msg(err, __LINE__);
err = cudaMalloc((void **) &d_in_vec, SIZE*sizeof(float));
print_err_msg(err, __LINE__);
err = cudaMalloc((void **) &d_out_vec, SIZE*sizeof(float));
print_err_msg(err, __LINE__);
for(size_t i = 0; i < SIZE; ++i) {
for(size_t j = 0; j < SIZE; ++j) {
/* mat[i][j] = j + i*SIZE; */
h_mat[j+i*SIZE] = 1.0f;
}
h_in_vec[i] = static_cast<float>(i+1);
}
err = cudaMemcpy(d_mat, h_mat, SIZE*SIZE*sizeof(float), cudaMemcpyHostToDevice);
/* print_err_msg(err); */
err = cudaMemcpy(d_in_vec, h_in_vec, SIZE*sizeof(float), cudaMemcpyHostToDevice);
/* print_err_msg(err); */
err = cudaMemcpy(d_out_vec, h_out_vec, SIZE*sizeof(float), cudaMemcpyHostToDevice);
/* print_err_msg(err); */
/* for(size_t i = 0; i < SIZE; ++i) { */
/* for(size_t j = 0; j < SIZE; ++j) { */
/* printf("%f ", mat[i][j]); */
/* } */
/* printf("\n"); */
/* } */
/* for(size_t i = 0; i < SIZE; ++i) { */
/* printf("%f\n", in_vec[i]); */
/* } */
int threads = 1;
int blocks = ceil((SIZE*SIZE)/(SIZE*threads));
matrix_vec_product<<<blocks, threads>>>(d_mat, d_in_vec, d_out_vec, SIZE);
err = cudaMemcpy(h_out_vec, d_out_vec, SIZE*sizeof(float), cudaMemcpyDeviceToHost);
print_err_msg(err, __LINE__);
for(size_t i = 0; i < SIZE; ++i) {
printf("out_vec[i]: %f, i: %zu\n", h_out_vec[i], i);
/* assert(out_vec[i] == i); */
}
delete[] h_mat;
delete[] h_in_vec;
delete[] h_out_vec;
err = cudaFree(d_mat);
/* print_err_msg(err); */
err = cudaFree(d_in_vec);
/* print_err_msg(err); */
err = cudaFree(d_out_vec);
/* print_err_msg(err); */
return 0;
}
|
6,934 | #include "includes.h"
__global__ void Substep3Kernel2 (double *Dens, double *Qplus, double *viscosity_array, double *TAURR, double *TAURP,double *TAUPP, double *DivergenceVelocity, int nrad, int nsec, double *Rmed, int Cooling, double *EnergyNew, double dt, double *EnergyMed, double *SigmaMed, double *CoolingTimeMed, double *EnergyInt, double ADIABATICINDEX, double *QplusMed)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double den, num;
if (i==0 && j<nsec){
/* We calculate the heating source term Qplus for i=0 */
if (viscosity_array[nrad-1] != 0.0) {
/* power-law extrapolation */
Qplus[i*nsec + j] = Qplus[(i+1)*nsec + j]*exp(log(Qplus[(i+1)*nsec + j]/Qplus[(i+2)*nsec + j]) * \
log(Rmed[i]/Rmed[i+1]) / log(Rmed[i+1]/Rmed[i+2]));
}
else
Qplus[i*nsec + j] = 0.0;
}
/* Now we can update energy with source terms from i=0 */
if (i<nrad && j<nsec){
if (!Cooling){
num = dt*Qplus[i*nsec + j] + EnergyInt[i*nsec + j];
den = 1.0+(ADIABATICINDEX-1.0)*dt*DivergenceVelocity[i*nsec + j];
EnergyNew[i*nsec + j] = num/den;
}
else{
num = EnergyMed[i]*dt*Dens[i*nsec + j]/SigmaMed[i] + CoolingTimeMed[i]*EnergyInt[i*nsec + j] + \
dt*CoolingTimeMed[i]*(Qplus[i*nsec + j]-QplusMed[i]*Dens[i*nsec + j]/SigmaMed[i]);
den = dt + CoolingTimeMed[i] + (ADIABATICINDEX-1.0)*dt*CoolingTimeMed[i]*DivergenceVelocity[i*nsec + j];
EnergyNew[i*nsec + j] = num/den;
}
}
} |
6,935 |
/*************************************
* Matrix-Vector product CUDA kernel *
* V1: Without Shared memory *
*************************************/
#include <stdio.h>
#define CUDA_SAFE_CALL( call ) { \
cudaError_t err = call; \
if( cudaSuccess != err ) { \
fprintf(stderr,"CUDA: error occurred in cuda routine. Exiting...\n"); \
exit(err); \
} }
#define A(i,j) A[ (j) + ((i)*(n)) ]
#define x(i) x[ (i) ]
#define y(i) y[ (i) ]
#define y_gpu(i) y_gpu[ (i) ]
#define y_cpu(i) y_cpu[ (i) ]
#define d_A(i,j) d_A[ (j) + ((i)*(n)) ]
#define d_x(i) d_x[ (i) ]
#define d_y(i) d_y[ (i) ]
#define BLOCKSIZE 32
__global__ void compute_kernel( unsigned int m, unsigned int n, float *d_A, float *d_x, float *d_y ) {
/* Figure out the global index to a matrix row */
/* Each thread computes the ith component of array d_y by multiplying the ith row of d_A by array d_x */
/* Pay attention again to if the component accessed by the thread falls within the bound */
float Cvalue = 0;
int row = blockIdx.x * BLOCKSIZE + threadIdx.x;
if(row<n){
for (int e = 0; e < m; ++e)
Cvalue += d_A(row,e) * d_x(e);
d_y(row) = Cvalue;
}
}
int cu_matrix_vector( unsigned int m, unsigned int n, float *h_A, float *h_x, float *h_y ) {
// Allocate device memory
float *d_A, *d_x, *d_y;
CUDA_SAFE_CALL( cudaMalloc((void **) &d_A, m*n*sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc((void **) &d_x, n*sizeof(float) ) );
CUDA_SAFE_CALL( cudaMalloc((void **) &d_y, m *sizeof(float) ) );
// Copy host memory to device
CUDA_SAFE_CALL( cudaMemcpy( d_A, h_A, m*n*sizeof(float), cudaMemcpyHostToDevice ) );
CUDA_SAFE_CALL( cudaMemcpy( d_x, h_x, n*sizeof(float), cudaMemcpyHostToDevice ) );
int n_blocks = (int) ceil( (float) m / (float) BLOCKSIZE );
// Execute the kernel
dim3 dimGrid( n_blocks );
dim3 dimBlock( BLOCKSIZE );
compute_kernel<<< dimGrid, dimBlock >>>( m, n, d_A, d_x, d_y );
// Copy device memory to host
CUDA_SAFE_CALL( cudaMemcpy( h_y, d_y, m *sizeof(float), cudaMemcpyDeviceToHost ) );
// Deallocate device memory
CUDA_SAFE_CALL( cudaFree(d_A) );
CUDA_SAFE_CALL( cudaFree(d_x) );
CUDA_SAFE_CALL( cudaFree(d_y) );
return EXIT_SUCCESS;
}
int matrix_vector( unsigned int m, unsigned int n, float *A, float *x, float *y ) {
unsigned int i, j;
for( i=0; i<m; i++ ) {
y( i ) = 0.0f;
for( j=0; j<n; j++ ) {
y( i ) += A( i, j ) * x( j );
}
}
return EXIT_SUCCESS;
}
int main( int argc, char *argv[] ) {
unsigned int m, n;
unsigned int i, j;
/* Generating input data */
if( argc<3 ) {
printf("Usage: %s n_rows n_cols \n",argv[0]);
exit(-1);
}
sscanf(argv[1],"%d",&m);
sscanf(argv[2],"%d",&n);
float *A = (float *) malloc( m*n*sizeof(float) );
float *x = (float *) malloc( n*sizeof(float) );
printf("%s: Generating a random matrix of size %dx%d and a vector of size %d...\n",argv[0],m,n,n);
for( i=0; i<m; i++ ) {
for( j=0; j<n; j++ ) {
A( i, j ) = 2.0f * ( (float) rand() / RAND_MAX ) - 1.0f;
}
}
for( j=0; j<n; j++ ) {
x( j ) = 2.0f * ( (float) rand() / RAND_MAX ) - 1.0f;
}
// Allocate CUDA events that we'll use for timing
cudaEvent_t start, stop;
CUDA_SAFE_CALL( cudaEventCreate(&start) );
CUDA_SAFE_CALL( cudaEventCreate(&stop) );
printf("%s: y=A*x in CPU...\n",argv[0]);
float *y_cpu = (float *) malloc( m*sizeof(float) );
CUDA_SAFE_CALL( cudaEventRecord(start, NULL) ); // Record the start event
matrix_vector( m, n, A, x, y_cpu );
CUDA_SAFE_CALL( cudaEventRecord(stop, NULL) ); // Record the stop event
CUDA_SAFE_CALL( cudaEventSynchronize(stop) ); // Wait for the stop event to complete
float msecCPU = 0.0f;
CUDA_SAFE_CALL( cudaEventElapsedTime(&msecCPU, start, stop) );
printf("%s: y=A*x in GPU...\n",argv[0]);
float *y_gpu = (float *) malloc( m*sizeof(float) );
CUDA_SAFE_CALL( cudaEventRecord(start, NULL) ); // Record the start event
cu_matrix_vector( m, n, A, x, y_gpu );
CUDA_SAFE_CALL( cudaEventRecord(stop, NULL) ); // Record the stop event
CUDA_SAFE_CALL( cudaEventSynchronize(stop) ); // Wait for the stop event to complete
float msecGPU = 0.0f;
CUDA_SAFE_CALL( cudaEventElapsedTime(&msecGPU, start, stop) );
/* Check for correctness */
float max = fabs( y_cpu( 0 ) );
for( i=1; i<m; i++ ) {
max = fabs( y_cpu( i ) > max ? y_cpu( i ) : max );
}
float error = 0.0f;
for( i=0; i<m; i++ ) {
error += fabs( y_gpu( i ) - y_cpu( i ) );
}
printf("Error CPU/GPU = %.3e\n",error/max);
double flops = 2.0 * (double) m * (double) n;
double gigaFlopsCPU = (flops * 1.0e-9f) / (msecCPU / 1000.0f);
double gigaFlopsGPU = (flops * 1.0e-9f) / (msecGPU / 1000.0f);
printf("CPU time = %.2f msec.\n",msecCPU);
printf("GPU time = %.2f msec.\n",msecGPU);
printf("Gflops CPU = %.2f \n",gigaFlopsCPU);
printf("Gflops GPU = %.2f \n",gigaFlopsGPU);
free(A);
free(x);
free(y_cpu);
free(y_gpu);
}
|
6,936 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
using namespace std;
__global__ void init(double* result, double step) {
const int tid = threadIdx.x;
result[tid] = result[tid] * result[tid]; // x^2
result[tid] *= step;
}
__global__ void sum(double* input) { // work only when count is a power of two
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0) {
if (tid < number_of_threads) {
const int fst = tid * step_size * 2;
const int snd = fst + step_size;
input[fst] += input[snd];
}
step_size <<= 1;
number_of_threads >>= 1;
}
}
int main() {
const double start = 0;
const double finish = 8;
const double step = 0.0625;
int steps = (finish - start) / step;
const int size = steps * sizeof(double);
double input[steps];
for (int i = 0; i < steps; ++i) {
input[i] = i * step;
}
double* d;
cudaMalloc(&d, size);
cudaMemcpy(d, input, size, cudaMemcpyHostToDevice);
init <<<1, steps>>>(d, step);
sum <<<1, steps / 2 >>>(d);
double result;
cudaMemcpy(&result, d, sizeof(double), cudaMemcpyDeviceToHost);
cout << "Sum is " << result << endl;
cudaFree(d);
return 0;
}
|
6,937 | #include "stdio.h"
// Kernel addition on GPU
__global__ void add (int N, float a, float b, float c, float* A)
{
int i = threadIdx.x;
A[i] = (A[i] + a) * b + c;
}
void init (float* A, int N)
{
for (int i = 0; i < N; i++)
{
A[i] = (float)i;
// printf ("%d: %.2f\n", i, A[i]);
}
}
// Main function on the host
int main()
{
int N = 20;
float *A, *dev_A;
A = (float*) malloc(sizeof(float) * N);
printf ("Initializing array\n");
init(A, N); // Initialize the array
printf ("Initialization complete\n");
cudaMalloc((void **) &dev_A, sizeof(float) * N);
printf ("Device memory allocated\n");
cudaMemcpy(dev_A, A, sizeof(float) * N, cudaMemcpyHostToDevice);
printf ("Data moved to device\n");
add <<< 1, N >>> (N, 3.0f, 4.0f, -2.0f, dev_A);
cudaMemcpy(A, dev_A, sizeof(float) * N, cudaMemcpyDeviceToHost);
printf ("Data moved to host\n");
cudaFree(dev_A);
printf ("Device memory released\n");
for (int i = 0; i < N; i++)
{
printf ("%.2f ", A[i]);
}
printf ("\n");
return 0;
} |
6,938 | #include "includes.h"
__global__ void kernel_hadamard(int N, double *wt, double *x){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
/* make sure to use only N threads */
if (tid<N) {
x[tid]*=wt[tid];
}
} |
6,939 | #include <stdio.h>
#include <stdlib.h>
int main(void){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
for (device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n",
device, deviceProp.major, deviceProp.minor);
}
}
|
6,940 | extern "C" __device__
int bar(int* out, int a) {
// Explicitly placed to generate a warning for testing the NVRTC program log
int unused;
*out = a * 2;
return 0;
}
|
6,941 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#define ThreadsPerBlock 512
__global__
void GammaKernel(const uchar4* const rgbaImage, uchar4* const outputImage, int numRows, int numCols, float gamma)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < numRows * numCols) {
uchar4 px = rgbaImage[i];
unsigned char rcolor = round(pow((px.x / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].x = (rcolor > 255.0f) ? 255.0f : rcolor;
unsigned char gcolor = round(pow((px.y / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].y = (gcolor > 255.0f) ? 255.0f : gcolor;
unsigned char bcolor = round(pow((px.z / 255.0f), (1.0f / gamma)) * 255.0f);
outputImage[i].z = (bcolor > 255.0f) ? 255.0f : bcolor;
}
}
void CorreccionGamma(uchar4* const d_rgbaImage, uchar4* const d_outputImage, size_t numRows, size_t numCols, float gamma)
{
long long int total_px = numRows * numCols;
long int grids_n = ceil(total_px / ThreadsPerBlock);
const dim3 blockSize(ThreadsPerBlock, 1, 1);
const dim3 gridSize(grids_n, 1, 1);
GammaKernel <<<gridSize, blockSize >>> (d_rgbaImage, d_outputImage, numRows, numCols, gamma);
cudaDeviceSynchronize();
} |
6,942 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void mykernel(void){
printf("Oi GPU\n");
}
int main(void)
{
mykernel<<<2,5>>>();
cudaDeviceSynchronize();
printf("Hello World !!\n");
return 0;
}
|
6,943 | ////////Dense Pyramidal Lucas and Kanade Optical Flow////////
////////Omid Rezai - omid.rezai@uwaterloo.ca///////////////
#include <math.h>
__global__ void LKPYRCUDA(float* wr_vx, float* wr_vy,
float* r_vx, float* r_vy, float* img1, float* img2,
int window,
int iter,
int hw,
float alpha,
size_t Pitch,
unsigned int Width,
unsigned int Height)
{
// calculate normalized texture coordinates
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if( (x <Height)&&(y< Width ))
{
//wr_vx[y*Pitch + x] = lrintf( r_vx[x+Pitch*y]);
//wr_vy[y*Pitch + x] = lrintf( r_vy[x+Pitch*y]);
float vxPrec = lrintf( r_vx[x+Pitch*y]); // tex2D(_levelTexture7,x,y);
float vyPrec = lrintf( r_vy[x+Pitch*y]); //tex2D(_levelTexture8,x,y);
for (int r=0 ; r<iter ; ++r)
{
int lr = x - hw + vyPrec;
int hr = x + hw + vyPrec;
int lc = y - hw + vxPrec;
int hc = y + hw + vxPrec;
if ( (lr<0)||(hr>=Height)||(lc<0)||(hc>=Width))
{
//if indices outside image, last value keeped
}
else
{
float Ix;
float Iy;
float It;
float G11=.0f;
float G22=.0f;
float G12=.0f;
float bx=.0f;
float by=.0f;
//Computation of matrices A and b
for(int i=0;i<window-2;++i)
for(int j=0;j<window-2;++j)
{
if ((i + lr < Height ) && (j + lc < Width )){
//interpolation by texture
Ix = (img1[x-hw+i+(y-hw+j)*Pitch]- img1[x-hw+i+(y+1-hw+j)*Pitch] + img1[x+1-hw+i+(y-hw+j)*Pitch]- img1[x+1-hw+i+(y+1-hw+j)*Pitch])*0.25 + (img2[lr+i+(lc+j)*Pitch]- img2[lr+i+(lc+j+1)*Pitch]+ img2[lr+i+1+(lc+j)*Pitch] - img2[lr+i+1+(lc+j+1)*Pitch])*0.25;
//Ix = Drx1[x-hw+i+(y-hw+j)*Pitch]; + Drx2[lr+i+(lc+j)*Pitch];
Iy = (img1[x-hw+i+(y-hw+j)*Pitch] + img1[x-hw+i+(y+1-hw+j)*Pitch] - img1[x+1-hw+i+(y-hw+j)*Pitch] - img1[x+1-hw+i+(y+1-hw+j)*Pitch] )*0.25+ (img2[lr+i+(lc+j)*Pitch] + img2[lr+i+(lc+j+1)*Pitch] - img2[lr+i+1+(lc+j)*Pitch] - img2[lr+i+1+(lc+j+1)*Pitch])*0.25;
//Iy = Dry1[x-hw+i+(y-hw+j)*Pitch] + Dry2[lr+i+(lc+j)*Pitch];
It = (img1[x-hw+i+(y-hw+j)*Pitch] + img1[x-hw+i+(y+1-hw+j)*Pitch] + img1[x+1-hw+i+(y-hw+j)*Pitch] + img1[x+1-hw+i+(y+1-hw+j)*Pitch])*0.25 + (img2[lr+i+(lc+j)*Pitch] + img2[lr+i+(lc+j+1)*Pitch] + img2[lr+i+1+(lc+j)*Pitch] + img2[lr+i+1+(lc+j+1)*Pitch])*-0.25;
//It = Drt1[x-hw+i+(y-hw+j)*Pitch] + Drt2[lr+i+(lc+j)*Pitch];
//wr_vx [j*Pitch + i] = It;
G11 += Ix*Ix;
G22 += Iy*Iy;
G12 += Ix*Iy;
bx -= (It*Ix);
by -= (It*Iy);
}
}
G11 += alpha;
G22 += alpha;
//wr_vy [8] = G11;
//wr_vy [9] = G12;
//wr_vy [488] = G12;
//wr_vy [489] = G22;
//determinant and inverse
//variable Ix is reused to store the determinant
Ix = (float)(1./(G11 * G22 - G12 * G12));
//wr_vy [11] = Ix;
//wr_vy [12] = bx;
//wr_vy [13] = by;
//speed vectors update
vxPrec += (Ix * (G22*bx - G12*by));
vyPrec += (Ix * (G11*by - G12*bx));
}
}
//speed vectors final update
wr_vx[y*Pitch + x] = vxPrec;
wr_vy[y*Pitch + x] = vyPrec;
}
}
|
6,944 | #define LENGTH_V 1024*1024
#define LENGTH_SHOW 10
#include <stdio.h>
#include <stdlib.h>
#include <limits.h>
#include <time.h>
// Kernel: naive scan
__global__ void scan_incl(int *g_odata, int *g_idata, int ndata) {
int thid = threadIdx.x;
extern __shared__ int temp[];
// Double buffer dinamically reserved
int pout = 0, pin = 1;
temp[pout * ndata + thid] = g_idata[thid];
__syncthreads();
for (int offset = 1; offset < ndata; offset *= 2)
{
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
temp[pout * ndata + thid] = temp[pin * ndata + thid];
if (thid >= offset) {
temp[pout * ndata + thid] += temp[pin * ndata + thid - offset];
}
__syncthreads();
}
g_odata[thid] = temp[pout * ndata + thid]; // write output
}
void show_vector(char *myString, int lengthMyString, int *vector) {
int j;
printf("\n%s\n",myString);
for (j = 0; j < lengthMyString; j++)
printf("-");
printf("\n");
if (LENGTH_SHOW*2 < LENGTH_V) {
for (j = 0; j < LENGTH_SHOW; j++)
printf(" %d", vector[j]);
printf(" ...");
for (j = LENGTH_V-LENGTH_SHOW; j < LENGTH_V; j++)
printf(" %d", vector[j]);
printf("\n");
} else {
for (j=0 ; j<LENGTH_V; j++)
printf(" %d", vector[j]);
printf("\n");
}
}
int main(void)
{
int Vector[LENGTH_V], VectorScan[LENGTH_V];
int j;
clock_t start, end;
double time_used;
// -----------------------
srand(time(NULL));
for (j = 0; j < LENGTH_V; j++) {
Vector[j] = (int)(rand() % 100) - 50;
}
char msg1[] = "Vector original";
show_vector(msg1, strlen(msg1), Vector);
// -----------------------
// Vector scan (CPU)
// -----------------------
start = clock();
VectorScan[0] = Vector[0];
for (j = 1; j < LENGTH_V; j++) {
VectorScan[j] = Vector[j] + VectorScan[j - 1];
}
end = clock();
// -----------------------
char msg2[] = "Vector scan (CPU)";
show_vector(msg2, strlen(msg2), VectorScan);
time_used = 1000.0*((double)(end-start)) / CLOCKS_PER_SEC;
printf("CPU scan kernel processing time: %f millisec. (nº elements %d)\n",time_used, LENGTH_V);
}
|
6,945 | #include <stdint.h>
#define WARP_SIZE 32
// -------------------------------------------------------------------
// helper functions
// -------------------------------------------------------------------
// Get largest memory address that is aligned to a warp worth of floats
// and smaller than x.
__forceinline__ __device__ uintptr_t getBlockBeginning(void const * x)
{
return (uintptr_t)(x) & (~((uintptr_t)(WARP_SIZE*sizeof(float)) - 1)) ;
}
// Call this kernel like compute_moments, but it does not need a scratch space
__global__ void normalize_data(float * data,
float const * moments,
int planeArea,
int numPlanes,
int numChannels)
{
int tid = threadIdx.x ;
int plane = blockIdx.x ;
int blockSize = blockDim.x ;
int planeStride = gridDim.x ;
int channel = blockIdx.x % numChannels ;
float mean = moments[channel];
float sigma = moments[channel+numChannels];
/*float multiplier = multipliers[channel];*/
/*float bias = biases[channel];*/
/*float coefficient = 1.0 multiplier / sigma ;*/
float coefficient = 1.0 / sigma ;
while (plane < numPlanes) {
float const * planeBegin = data + plane * planeArea ;
float const * planeEnd = planeBegin + planeArea ;
float const * block = (float const*) getBlockBeginning(planeBegin) + tid ;
float * oblock = data + (block - data) ;
while (block < planeEnd) {
if (block >= planeBegin) {
/**oblock = coefficient * (*block - mean) + bias ;*/
*oblock = coefficient * (*block - mean) ;
}
block += blockSize ;
oblock += blockSize ;
}
plane += planeStride ;
}
}
|
6,946 | #include <cmath>
#include <iostream>
#include <vector>
template<typename T>
class DevicePtr
{
T *ptr = nullptr;
__device__ __host__ __inline__ DevicePtr(T *ptr) : ptr(ptr) { }
public:
static DevicePtr<T> fromRaw(T *ptr)
{
return { ptr };
}
__device__ __inline__ T* operator->() const
{
return ptr;
}
__device__ __inline__ T& operator*() const
{
return *ptr;
}
__device__ __host__ __inline__ operator T*() const
{
return ptr;
}
};
template<typename T>
__global__ void vecAdd(
DevicePtr<T> a, DevicePtr<T> b, DevicePtr<T> c,
size_t n)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = a[idx] + b[idx];
}
}
int main()
{
const size_t n = 1000;
const size_t bytes = n * sizeof(double);
std::vector<double> h_a(n);
std::vector<double> h_b(n);
for (double i = 0; i < n; i++) {
h_a[i] = sin(i) * sin(i);
h_b[i] = cos(i) * cos(i);
}
double *d_a, *d_b, *d_c;
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
cudaMemcpy(d_a, h_a.data(), bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.data(), bytes, cudaMemcpyHostToDevice);
const size_t blockDim = 1024;
const size_t gridDim = static_cast<size_t>(
ceil(static_cast<float>(n) / blockDim));
vecAdd<double><<<gridDim, blockDim>>>(
DevicePtr<double>::fromRaw(d_a),
DevicePtr<double>::fromRaw(d_b),
DevicePtr<double>::fromRaw(d_c),
n);
std::vector<double> h_c(n);
cudaMemcpy(h_c.data(), d_c, bytes, cudaMemcpyDeviceToHost);
double sum = 0;
for (double i = 0; i < n; i++) {
sum += h_c[i];
}
std::cout << "final result: " << sum << std::endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
6,947 | __global__ void SumV0(int* x, int* y, int* result) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int stride = gridDim.x * blockDim.x;
result[tid] = x[tid] + y[tid];
}
__global__ void SumV1(int *x, int* y, int* result) {
int double_tid = threadIdx.x + 2 * blockDim.x * blockIdx.x;
result[double_tid] = x[double_tid] + y[double_tid];
result[double_tid + blockDim.x] = x[double_tid + blockDim.x] + y[double_tid + blockDim.x];
}
int main() {
int array_size = 1 << 26;
int *h_x = new int[array_size];
int *h_y = new int[array_size];
for (int i = 0; i < array_size; ++i) {
h_x[i] = i;
h_y[i] = 2 * i;
}
int* d_x;
int* d_y;
int* d_result;
int num_bytes = sizeof(*h_x) * array_size;
cudaMalloc(&d_x, num_bytes);
cudaMalloc(&d_y, num_bytes);
cudaMalloc(&d_result, num_bytes);
cudaMemcpy(d_x, h_x, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, num_bytes, cudaMemcpyHostToDevice);
int block_size = 512;
int num_blocks = (array_size + block_size - 1) / block_size;
SumV1<<<num_blocks / 2, block_size>>>(d_x, d_y, d_result);
SumV0<<<num_blocks, block_size>>>(d_x, d_y, d_result);
int *h_result = new int[array_size];
cudaMemcpy(h_result, d_result, num_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_result);
delete[] h_x;
delete[] h_y;
delete[] h_result;
return 0;
}
|
6,948 | #ifndef _GENSPARSEMAT_
#define _GENSPARSEMAT_
void GenSparseMat(int *conVec, int rows, int clms, int* sparseVec, int* idxVec, int* nPostNeurons ) {
/* generate sparse representation
conVec : input vector / flattened matrix
sparseVec : sparse vector
idxVec : every element is the starting index in sparseVec for ith row in matrix conVec
nPostNeurons : number of non-zero elements in ith row
*/
int i, j, counter = 0, nPost;
for(i = 0; i < rows; ++i) {
nPost = 0;
for(j = 0; j < clms; ++j) {
if(conVec[i + clms * j]) { /* i --> j */
sparseVec[counter] = j;
counter += 1;
nPost += 1;
}
}
nPostNeurons[i] = nPost;
}
idxVec[0] = 0;
for(i = 1; i < rows; ++i) {
idxVec[i] = idxVec[i-1] + nPostNeurons[i-1];
}
}
#endif
|
6,949 | // 03 Matrix Mulplication
#include <stdio.h>
__global__ void MatrixMul(int *M, int *N, int *P, int width)
{
int accu = 0;
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = by * blockDim.y + ty;
int j = bx * blockDim.x + tx;
for(int k=0; k<width; k++)
{
accu = accu + M[i*width+k]*N[k*width+j];
}
P[i*width+j] = accu;
}
int main(void)
{
int i, j, k;
int size=1024;
int *h_A, *h_B, *h_C, *h_gC;
int *d_A, *d_B, *d_C;
int sizeByte = sizeof(int)*size*size;
h_A = (int *) malloc(sizeByte);
h_B = (int *) malloc(sizeByte);
h_C = (int *) malloc(sizeByte);
h_gC = (int *) malloc(sizeByte);
for(i = 0; i < size*size; i++) h_A[i] = 1;
for(i = 0; i < size*size; i++) h_B[i] = 2;
printf("Host Computing Statrs !\n");
for(i = 0; i < size; i++)
for(j = 0; j < size; j++) {
h_C[i*size+j] = 0;
for(k = 0; k < size; k++)
h_C[i*size+j] += h_A[i*size+k]*h_B[k*size+j];
}
printf("Host Computing Finished !\n");
// for(i = 0; i < size; i++) {
// for(j = 0; j < size; j++)
// printf("%d ", h_C[i*size+j]);
// printf("\n");
// }
cudaMalloc(&d_A, sizeByte);
cudaMalloc(&d_B, sizeByte);
cudaMalloc(&d_C, sizeByte);
cudaMemcpy(d_A, h_A, sizeByte, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeByte, cudaMemcpyHostToDevice);
printf("GPU Computing Statrs !\n");
dim3 blocks(size/16, size/16);
dim3 threads(16, 16);
MatrixMul<<<blocks, threads >>>(d_A, d_B, d_C, size);
cudaDeviceSynchronize();
printf("GPU Computing Finished !\n");
cudaMemcpy(h_gC, d_C, sizeByte, cudaMemcpyDeviceToHost);
// for(i = 0; i < size; i++) {
// for(j = 0; j < size; j++)
// printf("%d ", h_gC[i*size+j]);
// printf("\n");
// }
for(i = 0; i < size; i++)
for(j = 0; j < size; j++)
if( h_C[i*size+j] != h_gC[i*size+j] ) {
printf("Error !\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(h_gC);
exit(1);
}
printf("Success ! \n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
free(h_gC);
exit(0);
}
|
6,950 | #include <iostream>
#include <stdlib.h>
#include <vector>
#include <math.h>
#include <time.h>
#include <cstdlib>
#include <cstdio>
#include <fstream>
#include <string>
#include <sstream>
#include <cuda_runtime.h>
using namespace std;
#define DATA_SIZE 150 // Numero de datos usados
#define TRAIN_DSIZE 120 // Numero de datos de entrenamiento
#define TEST_DSIZE 30 // Numero de datos de testeo
#define INPUT_SIZE 15 // Numero de neuronas en capa de entrada
#define HIDDEN_LAYERS 1 // Numero de capas ocultas
#define HIDDEN_SIZE 15 // Numero de neuronas en capas ocultas
#define OUTPUT_SIZE 15
#define EPOCHS 200000
#define BATCHSIZE 16
#define N 48
float error;
int topology[HIDDEN_LAYERS+2];
float X_TRAIN[TRAIN_DSIZE*INPUT_SIZE];
float Y_TRAIN[TRAIN_DSIZE*OUTPUT_SIZE];
float X_TEST[TEST_DSIZE*INPUT_SIZE];
float Y_TEST[TEST_DSIZE*OUTPUT_SIZE]={0};
//// Activacion forward
__device__ float sigmoid(float x) {
return (1.0f / (1.0f + exp(-x)));
}
//// Derivada de activacion backward
__device__ float dsigmoid(float x) {
return (x*(1-x));
}
__global__ void training(float *dev_W,int *topology,float *X_TRAIN,float *Y_TRAIN) {
int offset=threadIdx.x+blockIdx.x*blockDim.x;
float LEARNING_RATE=0.5;
///////////////////////////////////////////////////////
/*INICIALIZACIONES*/
//////////////////////////////////////////////////////
if(offset<N){
float ***W;
W = new float**[HIDDEN_LAYERS+1];
for(int l = 0; l < HIDDEN_LAYERS+1; l++){
W[l] = new float*[topology[l]+1];//+1 for bias
for (int i = 0; i < topology[l]+1; i++) {
W[l][i]=&dev_W[l*topology[l]*topology[l+1]+i*topology[l+1]];//+1 Next layer
}
}
float ***DW;
DW = new float**[HIDDEN_LAYERS+1];
for(int l = 0; l < HIDDEN_LAYERS+1; l++){
DW[l] = new float*[topology[l]+1];//+1 for bias
for (int i = 0; i < topology[l]+1; i++) {
DW[l][i]=new float[topology[l+1]];//+1 Next layer
for(int j=0;j<topology[l+1];j++){
DW[l][i][j]=0;
}
}
}
float **l_net;//after activation
//// Inicializando salida de neuronas
l_net = new float*[HIDDEN_LAYERS+1];
for (int i = 0; i < HIDDEN_LAYERS+1; i++) {
l_net[i]=new float[topology[i+1]];
}
float **l_out;//before activation
//// Inicializando salida de activacion
//// Se inicializará el elemento 0 con la data de entrenamiento asi que
//// aumenta el tamaño en 1
l_out = new float*[HIDDEN_LAYERS+2];
for (int i = 0; i < HIDDEN_LAYERS+2; i++) {
l_out[i]=new float[topology[i]];
}
///////////////////////////////////////////////////////
/*ENTRENAMIENTO*/
//////////////////////////////////////////////////////
float error_epoch;
for (int ep = 0; ep < EPOCHS; ep++) {
if (ep%100==0 && offset==0) {
printf("Epoca %u ------",ep);
//cout<<"Epoca "<<ep<<"------Error: "<<error_epoch<<endl;
}
error_epoch=0;
//Reiniciando actualizacion de pesos
for (int m = offset; m < TRAIN_DSIZE; m+=N) {
for (int l = 0; l < HIDDEN_LAYERS+1; l++) {
for (int i = 0; i < topology[l]+1; i++) {
for (int j = 0; j < topology[l+1]; j++) {
DW[l][i][j]=0;
}
}
}
int m_data=m*BATCHSIZE+offset;
///////////////////////////////////////////////////////
/*FORWARD_TRAINING*/
//////////////////////////////////////////////////////
l_out[0]=&X_TRAIN[m_data*INPUT_SIZE];
//l_out[0]=X_TRAIN[m];
for (int l = 0; l < HIDDEN_LAYERS+1; l++) {
for (int j = 0; j < topology[l+1]; j++) {
l_net[l][j]=W[l][0][j];
/// No cuenta el bias. El ultimo elemento incrementa indice en 1.
for (int i = 0; i < topology[l]; i++) {
l_net[l][j]+=W[l][i+1][j]*l_out[l][i];
}
l_out[l+1][j]=sigmoid(l_net[l][j]);
}
}
/////////// Calculo del error por iteraacion y epoca
float error=0;
for (int i = 0; i < OUTPUT_SIZE; i++) {
error+=0.5*pow(Y_TRAIN[m*OUTPUT_SIZE+i]-l_out[HIDDEN_LAYERS+1][i],2);
}
//atomicAdd(&error_epoch,error);
error_epoch+=error;
///////////////////////////////////////////////////////
/*BACKWARD_TRAINING*/
//////////////////////////////////////////////////////
/////////// Calculo para output layer
for (int j = 0; j < topology[HIDDEN_LAYERS+1]; j++) {
float prediccion=l_out[HIDDEN_LAYERS+1][j];
//printf("Calculo Bias %u:\n",j);
DW[HIDDEN_LAYERS][0][j]=prediccion-Y_TRAIN[m_data*OUTPUT_SIZE+j];
DW[HIDDEN_LAYERS][0][j]*=dsigmoid(prediccion);
for (int i = 0; i < topology[HIDDEN_LAYERS]; i++) {
DW[HIDDEN_LAYERS][i+1][j]=DW[HIDDEN_LAYERS][0][j]*l_out[HIDDEN_LAYERS][i];
}
}
if(HIDDEN_LAYERS>0){
///////////CALCULO HIDDEN LAYERS
for (int l = HIDDEN_LAYERS-1; l >-1; l--) {
for (int j = 0; j < topology[l+1]; j++) {
////// Calculando actualizaciones de bias//////////
float sum=0;
for (int k = 0; k < topology[l+2]; k++) {
sum+=W[l+1][j+1][k]*DW[l+1][0][k];
}
//printf("SUMA: %f\n",sum);
DW[l][0][j]=sum*dsigmoid(l_out[l+1][j]);
//printf("Dsigmoid: %f\n",dsigmoid(this->l_out[l+1][j]));
//printf("Bias %u: %f\n",j,this->theta[l].DW[0][j]);
for (int i = 0; i < topology[l]; i++) {
DW[l][i+1][j]=DW[l][0][j]*l_out[l][i];
//printf("DW%u%u: %f\n",i-1,j,this->theta[l].DW[i][j]);
}
}
}
}
__syncthreads();
for (int l = 0; l < HIDDEN_LAYERS+1; l++) {
for (int i = 0; i < topology[l]+1; i++) {
for (int j = 0; j < topology[l+1]; j++) {
atomicAdd(&W[l][i][j],-LEARNING_RATE*DW[l][i][j]);
}
}
}
//atomicAdd(&error_epoch,error);
}
if (ep%100==0 && offset==0) {
printf("Error: %f\n",error_epoch);
//cout<<"Epoca "<<ep<<"------Error: "<<error_epoch<<endl;
}
}
}
}
int main() {
///////////////////////////////////////////////////////
/*LEYENDO DATA*/
//////////////////////////////////////////////////////
//float X_TRAIN[TRAIN_DSIZE][INPUT_SIZE];
//float Y_TRAIN[TRAIN_DSIZE][OUTPUT_SIZE]={0};
string row;
string data_aux;
ifstream file("weights.txt");
printf("Abriendo...\n");
int aux_img1=0;
int aux_img2=0;
for (int img = 0; img < DATA_SIZE; img++) {
getline (file,row,'\n');
stringstream ss(row);
if (img%5==0) {
for (int w = 0; w < INPUT_SIZE; w++) {
getline (ss,data_aux,',');
X_TEST[aux_img1*INPUT_SIZE+w]=stod(data_aux)/40000;
}
getline(ss,data_aux,',');
Y_TEST[aux_img1*INPUT_SIZE+stoi(data_aux)-1]=1;
//cout<<"testing output "<<stoi(data_aux)-1<<" : "<<Y_TEST[aux_img1][stoi(data_aux)-1]<<endl;
aux_img1++;
}
else{
for (int w = 0; w < INPUT_SIZE; w++) {
getline (ss,data_aux,',');
X_TRAIN[aux_img2*INPUT_SIZE+w]=stod(data_aux)/40000;
}
getline(ss,data_aux,',');
Y_TRAIN[aux_img2*INPUT_SIZE+stoi(data_aux)-1]=1;
aux_img2++;
//cout<<"training output "<<stoi(data_aux)-1<<" : "<<Y_TRAIN[aux_img2][stoi(data_aux)-1]<<endl;
}
}
cout<<"Y_train=[ ";
for (int i = 0; i < TRAIN_DSIZE; i++) {
for (int j = 0; j < OUTPUT_SIZE; j++) {
cout<<Y_TRAIN[i*OUTPUT_SIZE+j]<<" ";
}
cout<<"fila "<<i<<endl;
}
cout<<"]"<<endl;
///////////////////////////////////////////////////////
/*Generando topología*/
//////////////////////////////////////////////////////
topology[0]=INPUT_SIZE;
for (int i = 1; i < HIDDEN_LAYERS+1; i++) {
topology[i]=HIDDEN_SIZE;
}
topology[HIDDEN_LAYERS+1]=OUTPUT_SIZE;
printf("Topology: ");
for (int i = 0; i < HIDDEN_LAYERS+2; i++) {
printf("%u ",topology[i] );
}
printf("\n");
printf("TOPOLOGIA GENERADA\n");
///////////////////////////////////////////////////////
/*Generando RED*/
//////////////////////////////////////////////////////
int size3=0;
for(int i=0;i<HIDDEN_LAYERS+1;i++)
size3+=(topology[i]+1)*topology[i+1];
cout<<"size3: "<<size3<<endl;
float* W = new float[size3];
for (int i=0;i<size3;i++){
W[i]=0.2*(float(rand()) / float(RAND_MAX))/2.f;;
}
cout<<"Red creada"<<endl;
///////////////////////////////////////////////////////
/*Comprobando RED*/
//////////////////////////////////////////////////////
for (int l = 0; l < HIDDEN_LAYERS+1; l++) {
printf("THETA %u:\n",l);
printf("Size i: %u x o: %u\n", topology[l], topology[l+1]);
for (int row = 0; row < topology[l]; row++) {
for (int col = 0; col < topology[l+1]; col++) {
printf("%f ",W[l*(topology[l]+1)*topology[l+1]+row*(topology[l]+1)+col]);
}
printf("\n");
}
}
///////////////////////////////////////////////////////
/*Reservacion de espacios*/
//////////////////////////////////////////////////////
float *dev_W;
cudaMalloc(&dev_W, size3*sizeof(float));
cudaMemcpy( dev_W, W, size3*sizeof(float), cudaMemcpyHostToDevice);
int *dev_topology;
cudaMalloc(&dev_topology,(HIDDEN_LAYERS+2)*sizeof(int));
cudaMemcpy( dev_topology, topology, (HIDDEN_LAYERS+2)*sizeof(int), cudaMemcpyHostToDevice);
float *dev_X_TRAIN;
cudaMalloc(&dev_X_TRAIN,INPUT_SIZE*TRAIN_DSIZE*sizeof(float));
cudaMemcpy( dev_X_TRAIN, X_TRAIN, INPUT_SIZE*TRAIN_DSIZE*sizeof(float), cudaMemcpyHostToDevice);
float *dev_Y_TRAIN;
cudaMalloc(&dev_Y_TRAIN,INPUT_SIZE*TRAIN_DSIZE*sizeof(float));
cudaMemcpy( dev_Y_TRAIN, Y_TRAIN, INPUT_SIZE*TRAIN_DSIZE*sizeof(float), cudaMemcpyHostToDevice);
float *c,*dev_c;
c = (float*)malloc(4*sizeof(int));
cudaMalloc(&dev_c, 4*sizeof(float));
cudaMemcpy( dev_c, c, 4*sizeof(float), cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////
/*Entrenamiento*/
//////////////////////////////////////////////////////
int blocksize= BATCHSIZE;
int gridsize= (N-1+blocksize)/blocksize;
//////////// Iteraciones ///////////////////////////
//float start = omp_get_wtime( );
//void training(float *dev_W,int *topology,float *X_TRAIN,float *Y_TRAIN)
cout<<"Llega kernel"<<endl;
training<<<gridsize,blocksize>>>(dev_W,dev_topology,dev_X_TRAIN,dev_Y_TRAIN);
cudaMemcpy( c, dev_c, 4*sizeof(float), cudaMemcpyDeviceToHost );
//float end = omp_get_wtime( );
//printf("time = %f s\n",(end-start));
//testing();
printf("TESTEO TERMINADO\n");
}
|
6,951 | #include <iostream>
using namespace std;
typedef struct Matrix {
int width;
int height;
float *elements;
} Mat;
#define BLOCK_SIZE 16
#define w 4096
#define h 4096
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
void MatMul(const Matrix A, const Matrix B, Matrix C);
int main() {
Mat h_A;
h_A.width = w;
h_A.height = h;
h_A.elements = (float *)malloc(sizeof(float) * h_A.width * h_A.height);
for (int i = 0; i < h_A.height; ++i) {
for (int j = 0; j < h_A.width; ++j) {
h_A.elements[i * h_A.width + j] = 1;
}
}
Mat h_B;
h_B.width = w;
h_B.height = h;
h_B.elements = (float *)malloc(sizeof(float) * h_B.width * h_B.height);
for (int i = 0; i < h_B.height; ++i) {
for (int j = 0; j < h_B.width; ++j) {
h_B.elements[i * h_B.width + j] = 1;
}
}
Mat h_C;
h_C.width = w;
h_C.height = h;
h_C.elements = (float *)malloc(sizeof(float) * h_C.width * h_C.height);
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
h_C.elements[i * h_C.width + j] = 0;
}
}
MatMul(h_A, h_B, h_C);
float tmp_value = w;
float sum_error = 0;
for (int i = 0; i < h_C.height; ++i) {
for (int j = 0; j < h_C.width; ++j) {
sum_error += fabs(tmp_value - h_C.elements[i * h_C.width + j]);
}
}
cout << "sum error : " << sum_error << endl;
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
return 0;
}
void MatMul(const Matrix A, const Matrix B, Matrix C) {
Mat d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc((void **)&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Mat d_B;
d_B.width = B.width;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc((void **)&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
Mat d_C;
d_C.width = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc((void **)&d_C.elements, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(256, 256);
cout << dimGrid.x << " " << dimGrid.y << endl;
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
for (int row = blockIdx.y * blockDim.y + threadIdx.y; row < C.height;
row += gridDim.y * blockDim.y) {
for (int col = blockIdx.x * blockDim.x + threadIdx.x; col < C.width;
col += gridDim.x * blockDim.x) {
float CValue = 0;
for (int e = 0; e < A.width; ++e) {
CValue += A.elements[row * A.width + e] * B.elements[e * B.width + col];
}
C.elements[row * C.width + col] = CValue;
}
}
}
|
6,952 | #include <stdio.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <sys/ipc.h>
#include <sys/shm.h>
#include <fcntl.h>
#include <unistd.h>
#define get_local_id() (threadIdx.x)
#define get_local_size() (blockDim.x)
#define get_global_id() (threadIdx.x + blockIdx.x * blockDim.x)
#define get_global_size() (blockDim.x * gridDim.x)
#define ELOG(rc,fmt,...) \
do { \
if (rc != cudaSuccess) \
{ \
fprintf(stderr, "(%s:%d, %s) " fmt "\n", \
__FUNCTION__, __LINE__, \
cudaGetErrorName(rc), ##__VA_ARGS__); \
exit(1); \
} \
} while(0)
#define USE_SYSV_SHMEM 0
__global__ void makeSumKernel(const unsigned char *x, size_t nitems,
unsigned long long *p_sum)
{
size_t index;
unsigned long long sum = 0;
__shared__ unsigned long long lsum;
if (get_local_id() == 0)
lsum = 0;
__syncthreads();
for (index = get_global_id();
index < nitems;
index += get_global_size())
{
sum += x[index];
}
atomicAdd(&lsum, sum);
__syncthreads();
if (get_local_id() == 0)
atomicAdd(p_sum, lsum);
}
static void usage(const char *argv0)
{
const char *pos = strrchr(argv0, '/');
const char *command = (pos ? pos + 1 : argv0);
fprintf(stderr, "usage: %s [options]\n"
" -n <num of processes> (default: 1)\n"
" -s <buffer size in MB> (default: 256)\n\n"
" -d <dir of temporary file> (default: /dev/shm)\n",
command);
exit(1);
}
static int child_main(int fdesc, size_t length)
{
cudaExternalMemoryHandleDesc mdesc;
cudaExternalMemoryBufferDesc bdesc;
cudaExternalMemory_t extMem;
cudaError_t rc;
void *buf;
unsigned long long *sum;
int gridSz, blockSz = 1024;
/* cudaImportExternalMemory */
memset(&mdesc, 0, sizeof(mdesc));
mdesc.type = cudaExternalMemoryHandleTypeOpaqueFd;
mdesc.handle.fd = fdesc;
mdesc.size = length;
mdesc.flags = 0;
rc = cudaImportExternalMemory(&extMem, &mdesc);
ELOG(rc, "failed on cudaImportExternalMemory");
/* cudaExternalMemoryGetMappedBuffer */
memset(&bdesc, 0, sizeof(bdesc));
bdesc.offset = 0;
bdesc.size = length;
bdesc.flags = 0;
rc = cudaExternalMemoryGetMappedBuffer(&buf, extMem, &bdesc);
ELOG(rc, "failed on cudaExternalMemoryGetMappedBuffer");
/* result buffer */
rc = cudaMallocManaged(&sum, sizeof(unsigned long long),
cudaMemAttachHost);
ELOG(rc, "failed on cudaMallocManaged");
memset(sum, 0, sizeof(unsigned long long));
/* kernel invocation */
blockSz = 1024;
gridSz = (length + blockSz - 1) / blockSz;
makeSumKernel<<<gridSz, blockSz>>>((const unsigned char *)buf,
length, sum);
rc = cudaStreamSynchronize(NULL);
ELOG(rc, "failed on cudaStreamSynchronize");
printf("sum = %lu\n", sum[0]);
sleep(10);
return 0;
}
int main(int argc, char * const argv[])
{
int nprocs = 1;
size_t length = (256 << 20);
const char *dirname = "/dev/shm";
char path[1024];
int fdesc, c, _len;
while ((c = getopt(argc, argv, "n:s:d:")) >= 0)
{
switch (c)
{
case 'n':
nprocs = atoi(optarg);
if (nprocs < 1)
usage(argv[0]);
break;
case 's':
_len = atoi(optarg);
if (_len < 0)
usage(argv[0]);
length = (size_t)_len << 20;
break;
case 'd':
dirname = optarg;
break;
default:
usage(argv[0]);
break;
}
}
snprintf(path, sizeof(path), "%s/hogeXXXXXX", dirname);
fdesc = mkstemp(path);
if (fdesc < 0)
{
fprintf(stderr, "failed on mkstemp: %m\n");
return 1;
}
if (ftruncate(fdesc, length))
{
fprintf(stderr, "failed on ftruncate: %m\n");
return 1;
}
if (nprocs == 0)
child_main(fdesc, length);
else
{
int i, status;
pid_t child;
for (i=1; i <= nprocs; i++)
{
child = fork();
if (child == 0)
return child_main(fdesc, length);
else if (child < 0)
{
fprintf(stderr, "failed on fork: %m\n");
return 1;
}
}
for (i=1; i <= nprocs; i++)
{
child = wait(&status);
}
}
if (unlink(path))
{
fprintf(stderr, "failed on unlink: %m\n");
return 1;
}
return 0;
}
|
6,953 | #include <vector>
#include <random>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
void PrintDeviceInfo();
void GenerateBgra8K(uint8_t* buffer, int dataSize);
void convertPixelFormatCpu(uint8_t* inputBgra, uint8_t* outputYuv, int numPixels);
__global__ void convertPixelFormat(uint8_t* inputBgra, uint8_t* outputYuv, int numPixels);
int main()
{
PrintDeviceInfo();
uint8_t* bgraBuffer;
uint8_t* yuvBuffer;
uint8_t* deviceBgraBuffer;
uint8_t* deviceYuvBuffer;
const int dataSizeBgra = 7680 * 4320 * 4;
const int dataSizeYuv = 7680 * 4320 * 3;
// CUDA_CALL(cudaMallocHost(&bgraBuffer, dataSizeBgra));
// CUDA_CALL(cudaMallocHost(&yuvBuffer, dataSizeYuv));
bgraBuffer = new uint8_t [dataSizeBgra];
yuvBuffer = new uint8_t [dataSizeYuv];
CUDA_CALL(cudaMalloc(&deviceBgraBuffer, dataSizeBgra));
CUDA_CALL(cudaMalloc(&deviceYuvBuffer, dataSizeYuv));
std::vector<uint8_t> yuvCpuBuffer(dataSizeYuv);
cudaEvent_t start, stop;
float elapsedTime;
float elapsedTimeTotal;
float dataRate;
CUDA_CALL(cudaEventCreate(&start));
CUDA_CALL(cudaEventCreate(&stop));
std::cout << " " << std::endl;
std::cout << "Generating 7680 x 4320 BRGA8888 image, data size: " << dataSizeBgra << std::endl;
GenerateBgra8K(bgraBuffer, dataSizeBgra);
std::cout << " " << std::endl;
std::cout << "Computing results using CPU." << std::endl;
std::cout << " " << std::endl;
CUDA_CALL(cudaEventRecord(start, 0));
convertPixelFormatCpu(bgraBuffer, yuvCpuBuffer.data(), 7680*4320);
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
std::cout << " Whole process took " << elapsedTime << "ms." << std::endl;
std::cout << " " << std::endl;
std::cout << "Computing results using GPU, default stream." << std::endl;
std::cout << " " << std::endl;
std::cout << " Move data to GPU." << std::endl;
CUDA_CALL(cudaEventRecord(start, 0));
CUDA_CALL(cudaMemcpy(deviceBgraBuffer, bgraBuffer, dataSizeBgra, cudaMemcpyHostToDevice));
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
dataRate = dataSizeBgra/(elapsedTime/1000.0)/1.0e9;
elapsedTimeTotal = elapsedTime;
std::cout << " Data transfer took " << elapsedTime << "ms." << std::endl;
std::cout << " Performance is " << dataRate << "GB/s." << std::endl;
std::cout << " Convert 8-bit BGRA to 8-bit YUV." << std::endl;
CUDA_CALL(cudaEventRecord(start, 0));
convertPixelFormat<<<32400, 1024>>>(deviceBgraBuffer, deviceYuvBuffer, 7680*4320);
CUDA_CHECK();
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
dataRate = dataSizeBgra/(elapsedTime/1000.0)/1.0e9;
elapsedTimeTotal += elapsedTime;
std::cout << " Processing of 8K image took " << elapsedTime << "ms." << std::endl;
std::cout << " Performance is " << dataRate << "GB/s." << std::endl;
std::cout << " Move data to CPU." << std::endl;
CUDA_CALL(cudaEventRecord(start, 0));
CUDA_CALL(cudaMemcpy(yuvBuffer, deviceYuvBuffer, dataSizeYuv, cudaMemcpyDeviceToHost));
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
dataRate = dataSizeYuv/(elapsedTime/1000.0)/1.0e9;
elapsedTimeTotal += elapsedTime;
std::cout << " Data transfer took " << elapsedTime << "ms." << std::endl;
std::cout << " Performance is " << dataRate << "GB/s." << std::endl;
std::cout << " Whole process took " << elapsedTimeTotal << "ms." <<std::endl;
std::cout << " Compare CPU and GPU results ..." << std::endl;
bool foundMistake = false;
for(int i=0; i<dataSizeYuv; i++){
if(yuvCpuBuffer[i]!=yuvBuffer[i]){
foundMistake = true;
break;
}
}
if(foundMistake){
std::cout << " Results are NOT the same." << std::endl;
} else {
std::cout << " Results are the same." << std::endl;
}
const int nStreams = 24;
std::cout << " " << std::endl;
std::cout << "Computing results using GPU, using "<< nStreams <<" streams." << std::endl;
std::cout << " " << std::endl;
cudaStream_t streams[nStreams];
std::cout << " Creating " << nStreams << " CUDA streams." << std::endl;
for (int i = 0; i < nStreams; i++) {
CUDA_CALL(cudaStreamCreate(&streams[i]));
}
int brgaOffset = 0;
int yuvOffset = 0;
const int brgaChunkSize = dataSizeBgra / nStreams;
const int yuvChunkSize = dataSizeYuv / nStreams;
CUDA_CALL(cudaEventRecord(start, 0));
for(int i=0; i<nStreams; i++)
{
std::cout << " Launching stream " << i << "." << std::endl;
brgaOffset = brgaChunkSize*i;
yuvOffset = yuvChunkSize*i;
CUDA_CALL(cudaMemcpyAsync( deviceBgraBuffer+brgaOffset,
bgraBuffer+brgaOffset,
brgaChunkSize,
cudaMemcpyHostToDevice,
streams[i] ));
convertPixelFormat<<<4096, 1024, 0, streams[i]>>>(deviceBgraBuffer+brgaOffset, deviceYuvBuffer+yuvOffset, brgaChunkSize/4);
CUDA_CALL(cudaMemcpyAsync( yuvBuffer+yuvOffset,
deviceYuvBuffer+yuvOffset,
yuvChunkSize,
cudaMemcpyDeviceToHost,
streams[i] ));
}
CUDA_CHECK();
CUDA_CALL(cudaDeviceSynchronize());
CUDA_CALL(cudaEventRecord(stop, 0));
CUDA_CALL(cudaEventSynchronize(stop));
CUDA_CALL(cudaEventElapsedTime(&elapsedTime, start, stop));
std::cout << " Whole process took " << elapsedTime << "ms." << std::endl;
std::cout << " Compare CPU and GPU results ..." << std::endl;
for(int i=0; i<dataSizeYuv; i++){
if(yuvCpuBuffer[i]!=yuvBuffer[i]){
foundMistake = true;
break;
}
}
if(foundMistake){
std::cout << " Results are NOT the same." << std::endl;
} else {
std::cout << " Results are the same." << std::endl;
}
CUDA_CALL(cudaFreeHost(bgraBuffer));
CUDA_CALL(cudaFreeHost(yuvBuffer));
CUDA_CALL(cudaFree(deviceBgraBuffer));
CUDA_CALL(cudaFree(deviceYuvBuffer));
return 0;
}
void PrintDeviceInfo(){
int deviceCount = 0;
cudaGetDeviceCount(&deviceCount);
std::cout << "Number of device(s): " << deviceCount << std::endl;
if (deviceCount == 0) {
std::cout << "There is no device supporting CUDA" << std::endl;
return;
}
cudaDeviceProp info;
for(int i=0; i<deviceCount; i++){
cudaGetDeviceProperties(&info, i);
std::cout << "Device " << i << std::endl;
std::cout << " Name: " << std::string(info.name) << std::endl;
std::cout << " Glocbal memory: " << info.totalGlobalMem/1024.0/1024.0 << " MB"<< std::endl;
std::cout << " Shared memory per block: " << info.sharedMemPerBlock/1024.0 << " KB"<< std::endl;
std::cout << " Warp size: " << info.warpSize<< std::endl;
std::cout << " Max thread per block: " << info.maxThreadsPerBlock<< std::endl;
std::cout << " Thread dimension limits: " << info.maxThreadsDim[0]<< " x "
<< info.maxThreadsDim[1]<< " x "
<< info.maxThreadsDim[2]<< std::endl;
std::cout << " Max grid size: " << info.maxGridSize[0]<< " x "
<< info.maxGridSize[1]<< " x "
<< info.maxGridSize[2]<< std::endl;
std::cout << " Compute capability: " << info.major << "." << info.minor << std::endl;
}
}
void GenerateBgra8K(uint8_t* buffer, int dataSize){
std::random_device rd;
std::mt19937 gen(rd());
std::uniform_int_distribution<> sampler(0, 255);
for(int i=0; i<dataSize/4; i++){
buffer[i*4] = sampler(gen);
buffer[i*4+1] = sampler(gen);
buffer[i*4+2] = sampler(gen);
buffer[i*4+3] = 255;
}
}
void convertPixelFormatCpu(uint8_t* inputBgra, uint8_t* outputYuv, int numPixels){
short3 yuv16;
char3 yuv8;
for(int idx=0; idx<numPixels; idx++){
yuv16.x = 66*inputBgra[idx*4+2] + 129*inputBgra[idx*4+1] + 25*inputBgra[idx*4];
yuv16.y = -38*inputBgra[idx*4+2] + -74*inputBgra[idx*4+1] + 112*inputBgra[idx*4];
yuv16.z = 112*inputBgra[idx*4+2] + -94*inputBgra[idx*4+1] + -18*inputBgra[idx*4];
yuv8.x = (yuv16.x>>8)+16;
yuv8.y = (yuv16.y>>8)+128;
yuv8.z = (yuv16.z>>8)+128;
*(reinterpret_cast<char3*>(&outputYuv[idx*3])) = yuv8;
}
}
__global__ void convertPixelFormat(uint8_t* inputBgra, uint8_t* outputYuv, int numPixels){
int stride = gridDim.x * blockDim.x;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
short3 yuv16;
char3 yuv8;
while(idx<=numPixels){
if(idx<numPixels){
yuv16.x = 66*inputBgra[idx*4+2] + 129*inputBgra[idx*4+1] + 25*inputBgra[idx*4];
yuv16.y = -38*inputBgra[idx*4+2] + -74*inputBgra[idx*4+1] + 112*inputBgra[idx*4];
yuv16.z = 112*inputBgra[idx*4+2] + -94*inputBgra[idx*4+1] + -18*inputBgra[idx*4];
yuv8.x = (yuv16.x>>8)+16;
yuv8.y = (yuv16.y>>8)+128;
yuv8.z = (yuv16.z>>8)+128;
*(reinterpret_cast<char3*>(&outputYuv[idx*3])) = yuv8;
}
idx += stride;
}
}
|
6,954 | #include <iostream>
#include <fstream>
#include <cuda_runtime.h>
#include <stdlib.h>
#define check_cuda_call(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template<typename T, int num_threads>
__global__ void matrixMultiply(T * C, T * A, T * B,
int Ah, int Aw,
int numBRows, int numBColumns) {
__shared__ T ds_M[num_threads][num_threads];
__shared__ T ds_N[num_threads][num_threads];
int tx = threadIdx.x;
int ty = threadIdx.y;
int Ch = blockIdx.y * num_threads + ty;
int Cw = blockIdx.x * num_threads + tx;
T Cval = 0;
for (int m = 0; m < (Aw - 1) / num_threads + 1; ++m) {
if (Ch < Ah && m * num_threads + tx < Aw)
ds_M[ty][tx] = A[Ch * Aw + m * num_threads + tx];
else
ds_M[ty][tx] = 0;
if (Cw < numBColumns && m * num_threads + ty < numBRows)
ds_N[ty][tx] = B[(m * num_threads + ty) * numBColumns + Cw];
else
ds_N[ty][tx] = 0;
__syncthreads();
for (int k = 0; k < num_threads; ++k)
Cval += ds_M[ty][k] * ds_N[k][tx];
__syncthreads();
}
if (Ch < Ah && Cw < numBColumns)
C[Ch * numBColumns + Cw] = Cval;
}
int main(int argc, char const *argv[]) {
const int Ah = 300;
const int Aw = 400;
const int Bh = Aw;
const int Bw = 200;
// prepare host memory
float *A = new float[Ah * Aw];
float *B = new float[Bh * Bw];
float *C = new float[Ah * Bw];
for (int i = 0; i < Ah * Aw; ++i) A[i] = rand() / (float)RAND_MAX;
for (int i = 0; i < Bh * Bw; ++i) B[i] = rand() / (float)RAND_MAX;
float *dA; check_cuda_call(cudaMalloc(&dA, Ah * Aw * sizeof(float)));
float *dB; check_cuda_call(cudaMalloc(&dB, Bh * Bw * sizeof(float)));
float *dC; check_cuda_call(cudaMalloc(&dC, Ah * Bw * sizeof(float)));
check_cuda_call(cudaMemcpy(dA, A, Ah * Aw * sizeof(float), cudaMemcpyHostToDevice));
check_cuda_call(cudaMemcpy(dB, B, Bh * Bw * sizeof(float), cudaMemcpyHostToDevice));
const int num_threads = 32;
dim3 threads(num_threads, num_threads);
dim3 grid((Aw - 1) / num_threads + 1, (Bw - 1) / num_threads + 1);
matrixMultiply<float, 32> <<< grid, threads>>>(dC, dA, dB, Ah, Aw, Bh, Bw);
check_cuda_call(cudaPeekAtLastError());
check_cuda_call(cudaGetLastError());
check_cuda_call(cudaDeviceSynchronize());
check_cuda_call(cudaMemcpy(C, dC, Ah * Bw * sizeof(float), cudaMemcpyDeviceToHost));
float *C_cpu = new float[Ah * Bw];
for (int a = 0; a < Ah; ++a) {
for (int b = 0; b < Bw; ++b) {
float sum = 0;
for (int k = 0; k < Aw; ++k)
sum += A[a * Aw + k] * B[k * Bw + b];
C_cpu[a * Bw + b] = sum;
}
}
for (int i = 0; i < 10; ++i) {
std::cout << C[i] << " " << C_cpu[i] << std::endl;
}
}
|
6,955 | #include "tanh-grad.hh"
#include "graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
TanhGrad::TanhGrad(Op* sig_out, Op* dout)
: Op("tanh_grad", sig_out->shape_get(), {sig_out, dout})
{}
void TanhGrad::compile()
{
auto& g = Graph::instance();
auto& csig_out = g.compiled(preds()[0]);
auto& cdout = g.compiled(preds()[1]);
std::size_t len = csig_out.out_shape.total();
Shape out_shape = csig_out.out_shape;
dbl_t* out_data = tensor_alloc(len);
auto out_node = rt::Node::op_tanh_grad(csig_out.out_data, cdout.out_data, out_data,
len,
{csig_out.out_node, cdout.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
6,956 | #include "includes.h"
__global__ void cn_pnpoly_naive(int* bitmap, float2* points, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
int c = 0;
float2 p = points[i];
int k = VERTICES-1;
for (int j=0; j<VERTICES; k = j++) { // edge from v to vp
float2 vj = d_vertices[j];
float2 vk = d_vertices[k];
float slope = (vk.x-vj.x) / (vk.y-vj.y);
if ( ( (vj.y>p.y) != (vk.y>p.y)) && //if p is between vj and vk vertically
(p.x < slope * (p.y-vj.y) + vj.x) ) { //if p.x crosses the line vj-vk when moved in positive x-direction
c = !c;
}
}
bitmap[i] = c; // 0 if even (out), and 1 if odd (in)
}
} |
6,957 | #include "includes.h"
__global__ void STREAM_Scale(float *a, float *b, float scale, size_t len)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < len) {
b[idx] = scale* a[idx];
idx += blockDim.x * gridDim.x;
}
} |
6,958 | // Plain C
// To show nvcc can compile this
# include <stdio.h>
int main(void) {
printf("Hello! \n");
return 0;
}
|
6,959 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <errno.h>
#include <cuda.h>
#define THREADS 128
#define BLOCKS 16
#define SIZE 2048
__global__ void add(int *array) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx > SIZE) return;
int temp = 0;
int before = (idx + 1) % SIZE;
int after = idx - 1;
if (after < 0) after = SIZE - 1;
temp += array[idx];
temp += array[before];
temp += array[after];
__syncthreads(); // Barrera...
array[idx] = temp;
}
void init(int* h_v, int numb) {
for (int i = 0; i < SIZE; i++) {
h_v[i] = numb;
}
}
int main( void ) {
int *result, *h_a;
int *dev_a;
int size = SIZE * sizeof(int);
result = (int*) malloc( size );
h_a = (int*) malloc( size );
if (h_a == NULL || result == NULL) {
fprintf(stderr, "Error allocating memory... %s\n", strerror(errno));
exit(1);
}
memset(result, 0, size);
init(h_a, 3);
cudaMalloc(&dev_a, size);
// se transfieren los datos a memoria de dispositivo...
cudaMemcpy(dev_a, h_a, size, cudaMemcpyHostToDevice);
add<<<BLOCKS, THREADS>>>(dev_a);
// se transfieren los datos del dispositivo a memoria.
cudaMemcpy(result, dev_a, size, cudaMemcpyDeviceToHost);
fprintf(stdout, "Result %s\n", "");
for (int i = 0; i < SIZE; i++) {
fprintf(stderr, " %d ", result[i]);
if ((i + 1) % 10 == 0) fprintf(stdout, "%s\n", "");
}
fprintf(stdout, "%s\n", "");
free(h_a), free(result);
cudaFree(dev_a);
return 0;
}
|
6,960 | #include <cmath>
__global__ void mylog10(double* value)
{
value[threadIdx.x] = std::log10(value[threadIdx.x]);
}
|
6,961 | // JCudaSimpleKernel.java から呼び出される CUDA カーネル関数
extern "C"
__global__ void simpleKernel(float** input, int xsize, int ysize, float* output)
{
// X 方向については並列化されている
const unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x < xsize) {
// Y 方向については逐次足し算する
// そのため、メモリアクセスの競合は考えなくても良い
for (int y = 0; y < ysize; ++y) {
output[x] += input[x][y];
}
}
__syncthreads();
}
|
6,962 | /*
Single Author info:
yjkamdar Yash J Kamdar
Group info:
vphadke Vandan V Phadke
angodse Anupam N Godse
*/
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#define __DEBUG
#define VSQR 0.1
#define TSCALE 1.0
#define CUDA_CALL( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__,__LINE__)
/**************************************
* void __cudaSafeCall(cudaError err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall( cudaError err, const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
inline void __cudaCheckError( const char *file, const int line )
{
#ifdef __DEBUG
#pragma warning( push )
#pragma warning( disable: 4127 ) // Prevent warning on do-while(0);
do
{
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
} while ( 0 );
#pragma warning( pop )
#endif // __DEBUG
return;
}
int tpdt(double *t, double dt, double tf)
{
if((*t) + dt > tf) return 0;
(*t) = (*t) + dt;
return 1;
}
/*9-point evolution of the grid using the GPU*/
__global__ void evolve9ptgpu(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t){
/*Calculate the index of the current grid point calculation*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int totalLength = n*n;
/*Boudary conditions for the grid*/
if (idx >= 0 && idx < totalLength) {
if((idx % n == 0) || ((idx + 1) % n == 0) || idx < n || idx > n*(n-1) - 1)
{
un[idx] = 0;
}
/*Calculate grid point value using the 9-point scale*/
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] +
uc[idx + n] + uc[idx - n] + 0.25 * (uc[idx -n - 1] + uc[idx - n + 1] +
uc[idx -1 + n] + uc[idx + 1 + n]) - 5 * uc[idx])/(h * h)
+ (-1 * __expf(-TSCALE * t) * pebbles[idx]));
}
}
}
__global__ void evolvegpu(double *un, double *uc, double *uo, double *pebbles, int n, double h, double dt, double t){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int totalLength = n*n;
if (idx >= 0 && idx < totalLength) {
if((idx % n == 0) || ((idx + 1) % n == 0) || idx < n || idx > n*(n-1) - 1)
{
un[idx] = 0;
}
else
{
un[idx] = 2*uc[idx] - uo[idx] + VSQR *(dt * dt) *((uc[idx-1] + uc[idx+1] +
uc[idx + n] + uc[idx - n] - 4 * uc[idx])/(h * h) + (__expf(-TSCALE * t) * pebbles[idx]));
}
}
}
void run_gpu(double *u, double *u0, double *u1, double *pebbles, int n, double h, double end_time, int nthreads)
{
cudaEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
int nBlocks = n / nthreads;
double t, dt;
double *uc, *uo;
double *un_d, *uc_d, *uo_d, *pebbles_d;
uc = (double*)malloc(sizeof(double) * n * n);
uo = (double*)malloc(sizeof(double) * n * n);
t = 0.;
dt = h / 2.;
memcpy(uo, u0, sizeof(double) * n * n);
memcpy(uc, u1, sizeof(double) * n * n);
/* Set up device timers */
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
/* HW2: Add CUDA kernel call preperation code here */
cudaMalloc((void **) &un_d, sizeof(double) * n * n);
cudaMalloc((void **) &uc_d, sizeof(double) * n * n);
cudaMalloc((void **) &uo_d, sizeof(double) * n * n);
cudaMalloc((void **) &pebbles_d, sizeof(double) * n * n);
cudaMemcpy(pebbles_d, pebbles, sizeof(double) * n * n, cudaMemcpyHostToDevice);
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
/* HW2: Add main lake simulation loop here */
while(1)
{
cudaMemcpy(uo_d, uo, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(uc_d, uc, sizeof(double) * n * n, cudaMemcpyHostToDevice);
evolve9ptgpu<<<nBlocks*nBlocks, nthreads*nthreads>>>(un_d, uc_d, uo_d, pebbles_d, n, h, dt, t);
//evolvegpu<<<nBlocks*nBlocks, nthreads*nthreads>>>(un_d, uc_d, uo_d, pebbles_d, n, h, dt, t);
cudaMemcpy(uo, uc_d, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
cudaMemcpy(uc, un_d, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
if(!tpdt(&t,dt,end_time)) break;
}
memcpy(u, uc, sizeof(double) * n * n);
/* Stop GPU computation timer */
CUDA_CALL(cudaEventRecord(kstop, 0));
CUDA_CALL(cudaEventSynchronize(kstop));
CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* HW2: Add post CUDA kernel call processing and cleanup here */
free(uc);
free(uo);
cudaFree(un_d);
cudaFree(uc_d);
cudaFree(uo_d);
/* timer cleanup */
CUDA_CALL(cudaEventDestroy(kstart));
CUDA_CALL(cudaEventDestroy(kstop));
}
|
6,963 | // included dependencies
#include <map>
#include <fstream>
#include <sstream>
//#include <boost/lexical_cast.hpp>
#include <iostream>
#include <iomanip>
#include <cstdlib>
#include <exception>
#include <iterator>
#include <vector>
#include "ReadInput.cuh"
#include <math.h>
#include <numeric>
#include <algorithm>
namespace READINPUT {
void ReadInputFile(std::string FileName,
std::map<std::string, bool> &inputMapBool,
std::map<std::string,int> &inputMapInt,
std::map<std::string,double> &inputMapDouble,
std::map<std::string,std::string> &inputMapString,
std::map<std::string,std::vector<double>> &inputMapVector
){
// file reading variables
std::string line;
std::ifstream inputFile;
std::string key;
// value variables
bool valueBool;
int valueInt;
double valueDouble;
std::string valueString;
std::vector<double> valueVector;
// open file stream
try{
inputFile.open(FileName.c_str());
}
// print error if necessary
catch(std::exception const& e) {
std::cout << FileName << ": " << e.what() << "\n";
};
// start of reading file
if (inputFile.is_open())
{
while (!inputFile.eof()){
// read number of macro particles
std::getline(inputFile, line);
std::stringstream iss(line);
iss >> key;
// std::printf("Key %s\n",key.c_str());
// add to bool map if key in bool keys (see header)
std::vector<std::string>::iterator it = find(boolKeys.begin(), boolKeys.end(), key);
if (it != boolKeys.end()){
iss >> valueBool;
inputMapBool[key] = valueBool;
}
// add to int map if key in int keys (see header)
it = find(intKeys.begin(), intKeys.end(), key);
if (it != intKeys.end()){
iss >> valueInt;
//std::printf("%12i\n",valueInt);
inputMapInt[key] = valueInt;
}
// add to double map if key in double keys (see header)
it = find(doubleKeys.begin(), doubleKeys.end(), key);
if (it != doubleKeys.end()){
iss >> valueDouble;
//std::printf("%12.8f\n",valueDouble);
inputMapDouble[key] = valueDouble;
}
// add to double map if key in double keys (see header)
it = find(vectorKeys.begin(), vectorKeys.end(), key);
if (it != vectorKeys.end()){
while (iss>>valueDouble){
//iss >> valueDouble;
inputMapVector[key].push_back(valueDouble);
}
}
// add to string map if key in string keys (see header)
it = find(stringKeys.begin(), stringKeys.end(), key);
if (it != stringKeys.end()){
iss >> valueString;
inputMapString[key] = valueString;
}
}
}
}
void readBunchFile(std::string filename, std::map<int, std::vector<double>> &bunchMap){
// file reading vars
std::string line;
std::ifstream inputfile;
int bucket;
double realnumberparticles;
double emitx;
double emity;
double sigs;
// open file stream
try {
inputfile.open(filename.c_str());
}
catch(std::exception const& e) {
std::cout << filename << ": " << e.what() << "\n";
};
// start of reading file
if (inputfile.is_open()){
// read header line and skip it (only there for user readability)
std::getline(inputfile, line);
while(std::getline(inputfile, line)){
std::stringstream iss(line);
if ( iss >> bucket >> realnumberparticles >> emitx >> emity >> sigs ){
std::vector<double> bunchRow;
bunchRow.push_back(realnumberparticles);
bunchRow.push_back(emitx);
bunchRow.push_back(emity);
bunchRow.push_back(sigs);
bunchMap[bucket]=bunchRow;
}
}
}
inputfile.close();
}
void PrintInputBoolMap(std::map<std::string, bool> inputMapBool){
for(std::map<std::string, bool>::iterator it=inputMapBool.begin();it!=inputMapBool.end();it++){
std::printf("%-30s %i\n", it->first.c_str(), it->second);
}
}
void PrintInputIntMap(std::map<std::string, int> inputMapInt){
for(std::map<std::string, int>::iterator it=inputMapInt.begin();it!=inputMapInt.end();it++){
std::printf("%-30s %i\n", it->first.c_str(), it->second);
}
}
void PrintInputStringMap(std::map<std::string, std::string> inputMapString){
for(std::map<std::string, std::string>::iterator it=inputMapString.begin();it!=inputMapString.end();it++){
std::printf("%-30s %s\n", it->first.c_str(), it->second.c_str());
}
}
void PrintInputDoubleMap(std::map<std::string, double> inputMapDouble){
for(std::map<std::string, double>::iterator it=inputMapDouble.begin();it!=inputMapDouble.end();it++){
std::printf("%-30s %12.8e\n", it->first.c_str(), it->second);
}
}
void PrintInputVectorMap(std::map<std::string, std::vector<double>> inputMapVector){
for(std::map<std::string, std::vector<double>>::iterator it=inputMapVector.begin();
it!=inputMapVector.end();it++){
std::cout << std::setw(31) << std::left << it->first;
std::cout << it->second;
std::cout << std::endl;
}
}
void PrintInputBunch(std::map<int, std::vector<double>> bunchMap){
std::cout << std::setw(8) << std::left << "Bucket";
std::cout << std::setw(12) << std::right << "nReal";
std::cout << std::setw(15) << std::right << "ex";
std::cout << std::setw(15) << "ey";
std::cout << std::setw(15) << "sigs[m]" << std::endl;
for(std::map<int, std::vector<double>>::iterator it=bunchMap.begin();
it!=bunchMap.end();it++){
std::cout << std::setw(8) << std::left << it->first;
std::cout << it->second;
std::cout << std::endl;
}
}
}
|
6,964 |
__global__ void ppcg_init(
const int x_inner,
const int y_inner,
const int halo_depth,
const double theta,
const double* r,
double* sd)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid >= x_inner*y_inner) return;
const int x = x_inner + 2*halo_depth;
const int col = gid % x_inner;
const int row = gid / x_inner;
const int off0 = halo_depth*(x + 1);
const int index = off0 + col + row*x;
sd[index] = r[index] / theta;
}
__global__ void ppcg_calc_ur(
const int x_inner,
const int y_inner,
const int halo_depth,
const double* kx,
const double* ky,
const double* sd,
double* u,
double* r)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid >= x_inner*y_inner) return;
const int x = x_inner + 2*halo_depth;
const int col = gid % x_inner;
const int row = gid / x_inner;
const int off0 = halo_depth*(x + 1);
const int index = off0 + col + row*x;
const double smvp = (1.0
+ (kx[index+1]+kx[index])
+ (ky[index+x]+ky[index]))*sd[index]
- (kx[index+1]*sd[index+1]+kx[index]*sd[index-1])
- (ky[index+x]*sd[index+x]+ky[index]*sd[index-x]);
r[index] -= smvp;
u[index] += sd[index];
}
__global__ void ppcg_calc_sd(
const int x_inner,
const int y_inner,
const int halo_depth,
const double alpha,
const double beta,
const double* r,
double* sd)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid >= x_inner*y_inner) return;
const int x = x_inner + 2*halo_depth;
const int col = gid % x_inner;
const int row = gid / x_inner;
const int off0 = halo_depth*(x + 1);
const int index = off0 + col + row*x;
sd[index] = alpha*sd[index] + beta*r[index];
}
|
6,965 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<cuda.h>
#define BLOCK_DIM 16
__global__ void matrixMultKernel(int *a,int *b,int *c,int width);
int main(){
int curr=2;
int N=BLOCK_DIM*curr;
printf("------------------------------------------\n");
while(N<=BLOCK_DIM*16){
int a[N][N], b[N][N], gpu_mul[N][N],cpu_mul[N][N];
int *dev_a, *dev_b, *dev_c;
float time_gpu,time_cpu,timeindex,timeinit;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
a[i][j]=i+j;
b[i][j]=i*j;
}
}
int size=N*N*sizeof(int);
cudaMalloc((void**) &dev_a,size);
cudaMalloc((void**) &dev_b,size);
cudaMalloc((void**) &dev_c,size);
cudaEvent_t startinit,endinit;
cudaEventCreate(&startinit);
cudaEventCreate(&endinit);
cudaEventRecord(startinit, 0);
cudaMemcpy(dev_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,size,cudaMemcpyHostToDevice);
cudaEventRecord(endinit, 0);
cudaEventSynchronize(endinit);
cudaEventElapsedTime(&timeinit, startinit, endinit);
cudaEvent_t gpu_start,gpu_end;
cudaEventCreate(&gpu_start);
cudaEventCreate(&gpu_end);
cudaEventRecord(gpu_start, 0);
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM);
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));
matrixMultKernel<<<dimGrid,dimBlock>>>(dev_a,dev_b,dev_c,N);
cudaDeviceSynchronize();
cudaEventRecord(gpu_end, 0);
cudaEventSynchronize(gpu_end);
cudaEventElapsedTime(&time_gpu, gpu_start, gpu_end);
cudaEvent_t startindex,endindex;
cudaEventCreate(&startindex);
cudaEventCreate(&endindex);
cudaEventRecord(startindex, 0);
cudaMemcpy(gpu_mul,dev_c,size,cudaMemcpyDeviceToHost);
cudaEventRecord(endindex, 0);
cudaEventSynchronize(endindex);
cudaEventElapsedTime(&timeindex, startindex, endindex);
clock_t cpu_start,cpu_end;
cpu_start=clock();
for(int i=0;i<N;i++)
{
for(int j=0;j<N;j++)
{
float inter_sum=0;
for(int k=0;k<N;k++)
{
inter_sum+=a[i][k]*b[k][j];
}
cpu_mul[i][j]=inter_sum;
}
}
cpu_end=clock();
timeinit/=1000;
timeindex/=1000;
time_gpu/=1000;
time_cpu=float(cpu_end-cpu_start)/float(CLOCKS_PER_SEC);
printf("Time for sending initial data from host to device : %f\t sec\n",timeinit);
printf("Cuda program launched with %d blocks and %d threads\n",(int)ceil(N/dimBlock.x)*(int)ceil(N/dimBlock.y),BLOCK_DIM*BLOCK_DIM);
printf("Time for sending calculated data from device to host : %f\t sec\n",timeindex);
printf("GPU Time:%f seconds\n",time_gpu);
printf("CPU Time:%f seconds\n",time_cpu);
int flag=1;
for(int i=0;i<N;i++){
for(int j=0;j<N;j++){
if(gpu_mul[i][j]!=cpu_mul[i][j]){
flag=0;
break;
}
}
}
if(flag){
printf("TEST PASSED\n");
printf("SPEED UP:%f\n",time_cpu/time_gpu);
}
else{
printf("TEST FAILED\n");
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
printf("------------------------------------------\n");
curr++;
N=BLOCK_DIM*curr;
}
}
__global__ void matrixMultKernel(int *a,int *b,int *c,int width){
__shared__ float tile_a[BLOCK_DIM][BLOCK_DIM];
__shared__ float tile_b[BLOCK_DIM][BLOCK_DIM];
int bx=blockIdx.x; int by=blockIdx.y;
int tx=threadIdx.x; int ty=threadIdx.y;
int row=by*BLOCK_DIM+ty;
int col=bx*BLOCK_DIM+tx;
float Pvalue=0;
for(int m=0;m<width/BLOCK_DIM;m++){
tile_a[ty][tx]=a[row*width+(m*BLOCK_DIM+tx)];
tile_b[ty][tx]=b[col+(m*BLOCK_DIM+ty)*width];
__syncthreads();
for(int k=0;k<BLOCK_DIM;k++)
Pvalue+=tile_a[ty][k]*tile_b[k][tx];
__syncthreads();
}
c[row*width+col]=Pvalue;
}
|
6,966 | #include <cstdio>
#include <cstdlib>
const int DIM = 128;
const int N = DIM*DIM;
const int CUDA_DIM = 128;
const int MOD = 5;
void printMat(int *m) {
#ifndef NO_PRINT
for (int i=0; i<N; ++i) {
printf("%4d", m[i]);
if (i%DIM==DIM-1) {
printf("\n");
}
}
#endif
}
int *cache;
struct mat {
int *p;
mat(int n) {
p = cache;
cache += n*DIM;
}
__device__ int& operator[](int n) {
return p[n];
}
int* operator+(int n) {
return p+n;
}
~mat() {
cache = p;
}
};
__global__ void matAddOrSub(int *a, int *b, int *c, bool add) {
int x = blockIdx.x, y = threadIdx.x;
int offset = x*DIM + y;
c[offset] = add ? a[offset]+b[offset]:a[offset]-b[offset];
}
__global__ void matMulCuda(int *a, int *b, int *c) {
int x = blockIdx.x, y = threadIdx.x;
int offset = x*DIM + y;
int dim = gridDim.x;
c[offset] = 0;
for (int i=0; i<dim; ++i)
c[offset] += a[x*DIM+i]*b[i*DIM+y];
}
void matMul(int *a, int *b, int *c, int dim) {
if (dim <= CUDA_DIM) {
matMulCuda<<<dim, dim>>>(a, b, c);
return;
}
mat p1(dim), p2(dim), p3(dim);
int half = dim/2;
int *a11=a, *a12=a+half, *a21=a+half*DIM, *a22=a+half*DIM+half;
int *b11=b, *b12=b+half, *b21=b+half*DIM, *b22=b+half*DIM+half;
int *s1=p1+0, *s2=p1+half, *s3=p1+half*DIM, *s4=p1+half*DIM+half;
int *s5=p2+0, *s6=p2+half, *s7=p2+half*DIM, *s8=p2+half*DIM+half;
int *s9=p3+0, *s10=p3+half;
matAddOrSub<<<half, half>>>(a11, a22, s1, true);
matAddOrSub<<<half, half>>>(b11, b22, s2, true);
matAddOrSub<<<half, half>>>(a21, a22, s3, true);
matAddOrSub<<<half, half>>>(b12, b22, s4, false);
matAddOrSub<<<half, half>>>(b21, b11, s5, false);
matAddOrSub<<<half, half>>>(a11, a12, s6, true);
matAddOrSub<<<half, half>>>(a21, a11, s7, false);
matAddOrSub<<<half, half>>>(b11, b12, s8, true);
matAddOrSub<<<half, half>>>(a12, a22, s9, false);
matAddOrSub<<<half, half>>>(b21, b22, s10, true);
mat q1(dim), q2(dim);
int *m1=q1+0, *m2=q1+half, *m3=q1+half*DIM, *m4=q1+half*DIM+half;
int *m5=q2+0, *m6=q2+half, *m7=q2+half*DIM;
matMul(s1, s2, m1, half);
matMul(s3, b11, m2, half);
matMul(a11, s4, m3, half);
matMul(a22, s5, m4, half);
matMul(s6, b22, m5, half);
matMul(s7, s8, m6, half);
matMul(s9, s10, m7, half);
int *c11=c, *c12=c+half, *c21=c+half*DIM, *c22=c+half*DIM+half;
matAddOrSub<<<half, half>>>(m1, m4, c11, true);
matAddOrSub<<<half, half>>>(c11, m5, c11, false);
matAddOrSub<<<half, half>>>(c11, m7, c11, true);
matAddOrSub<<<half, half>>>(m3, m5, c12, true);
matAddOrSub<<<half, half>>>(m2, m4, c21, true);
matAddOrSub<<<half, half>>>(m1, m2, c22, false);
matAddOrSub<<<half, half>>>(c22, m3, c22, true);
matAddOrSub<<<half, half>>>(c22, m6, c22, true);
}
int main() {
int a[N], b[N], c[N];
for (int i=0; i<N; ++i) {
a[i] = rand()%MOD;
b[i] = rand()%MOD;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc(&cache, sizeof(int[N*5]));
int *devA, *devB, *devC;
cudaMalloc(&devA, sizeof(int[N]));
cudaMalloc(&devB, sizeof(int[N]));
cudaMalloc(&devC, sizeof(int[N]));
cudaMemcpy(devA, a, sizeof(int[N]), cudaMemcpyHostToDevice);
cudaMemcpy(devB, b, sizeof(int[N]), cudaMemcpyHostToDevice);
matMul(devA, devB, devC, DIM);
cudaMemcpy(c, devC, sizeof(int[N]), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
printf("a:\n");
printMat(a);
printf("\nb:\n");
printMat(b);
printf("\nc:\n");
printMat(c);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("\nTime taken for matrix multiplication: %.3f ms", elapsedTime);
int ans[N];
for (int i=0; i<DIM; ++i) {
for (int j=0; j<DIM; ++j) {
int offset = i*DIM+j;
ans[offset] = 0;
for (int k=0; k<DIM; ++k) {
ans[offset] += a[i*DIM+k]*b[k*DIM+j];
}
}
}
try {
for (int i=0; i<N; ++i) {
if (ans[i]!=c[i]) {
throw 1;
}
}
printf("\nSuccess!\n");
} catch (int) {
printf("\nFailed\n");
}
cudaFree(devA);
cudaFree(devB);
cudaFree(devC);
cudaFree(cache);
}
|
6,967 | #include <stdint.h>
//#include <stdio.h>
template <typename T>
__device__ void fill_contiguous(T *data, size_t len, T val) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = idx; i < len; i += gridDim.x * blockDim.x) {
data[i] = val;
}
}
// Note that pitch must be expressed in elements, not bytes!
template <typename T>
__device__ void fill_pitched(T *data, size_t width, size_t height, size_t depth, size_t pitch, T val) {
int idxx = blockIdx.x * blockDim.x + threadIdx.x;
int idxy = blockIdx.y * blockDim.y + threadIdx.y;
int idxz = blockIdx.z * blockDim.z + threadIdx.z;
for (int iz = idxz; iz < depth; iz += gridDim.z * blockDim.z) {
int offsetz = iz;
for (int iy = idxy; iy < height; iy += gridDim.y * blockDim.y) {
int offsety = height*offsetz + iy;
for (int ix = idxx; ix < width; ix += gridDim.x * blockDim.x) {
data[pitch*offsety + ix] = val;
}
}
}
}
extern "C"
{
void __global__ fill_contiguous_double(double *data, size_t len, double val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_float(float *data, size_t len, float val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_int64(int64_t *data, size_t len, int64_t val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_uint64(uint64_t *data, size_t len, uint64_t val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_int32(int32_t *data, size_t len, int32_t val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_uint32(uint32_t *data, size_t len, uint32_t val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_int16(int16_t *data, size_t len, int16_t val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_uint16(uint16_t *data, size_t len, uint16_t val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_int8(int8_t *data, size_t len, int8_t val) {fill_contiguous(data, len, val);}
void __global__ fill_contiguous_uint8(uint8_t *data, size_t len, uint8_t val) {fill_contiguous(data, len, val);}
void __global__ fill_pitched_double(double *data, size_t width, size_t height, size_t depth, size_t pitch, double val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_float(float *data, size_t width, size_t height, size_t depth, size_t pitch, float val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_int64(int64_t *data, size_t width, size_t height, size_t depth, size_t pitch, int64_t val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_uint64(uint64_t *data, size_t width, size_t height, size_t depth, size_t pitch, uint64_t val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_int32(int32_t *data, size_t width, size_t height, size_t depth, size_t pitch, int32_t val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_uint32(uint32_t *data, size_t width, size_t height, size_t depth, size_t pitch, uint32_t val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_int16(int16_t *data, size_t width, size_t height, size_t depth, size_t pitch, int16_t val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_uint16(uint16_t *data, size_t width, size_t height, size_t depth, size_t pitch, uint16_t val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_int8(int8_t *data, size_t width, size_t height, size_t depth, size_t pitch, int8_t val) {
fill_pitched(data, width, height, depth, pitch, val);
}
void __global__ fill_pitched_uint8(uint8_t *data, size_t width, size_t height, size_t depth, size_t pitch, uint8_t val) {
fill_pitched(data, width, height, depth, pitch, val);
}
}
// For implementing sleep, from http://stackoverflow.com/questions/11217117/equivalent-of-usleep-in-cuda-kernel
extern "C" {
// __global__ void clock_block(int64_t *d_o, int64_t clock_count)
__global__ void clock_block(int64_t clock_count)
{
int64_t start_clock = clock64();
int64_t clock_offset = 0;
while (clock_offset < clock_count)
{
clock_offset = clock64() - start_clock;
}
// d_o[0] = (int64_t) clock_offset;
}
}
/*// Debugging
int main()
{
size_t width = 5;
size_t height = 3;
size_t depth = 1;
size_t pitch = sizeof(double)*width;
cudaSetDevice(0);
cudaExtent extent = make_cudaExtent(pitch, height, depth);
cudaPitchedPtr devpp;
cudaMalloc3D(&devpp, extent);
pitch = devpp.pitch;
fill_pitched_double<<<32,16>>>((double *) devpp.ptr, width, height, depth, pitch/sizeof(double), (double) 8.4);
cudaDeviceSynchronize();
double *host = new double[width*height];
cudaPos dstpos = make_cudaPos(0, 0, 0);
cudaPos srcpos = make_cudaPos(0, 0, 0);
cudaPitchedPtr hostpp = make_cudaPitchedPtr(host, sizeof(double)*width, width, height);
cudaMemcpy3DParms mcpp = {0};
mcpp.srcPos = srcpos;
mcpp.dstPos = dstpos;
mcpp.srcPtr = devpp;
mcpp.dstPtr = hostpp;
mcpp.extent = extent;
mcpp.kind = cudaMemcpyDeviceToHost;
cudaMemcpy3D(&mcpp);
cudaDeviceSynchronize();
for (int i = 0; i < width*height; i++)
printf("%g\n", host[i]);
cudaDeviceReset();
}*/ |
6,968 | #define W 500
#define H 500
#define TX 32
#define TY 32
__global__
void distanceKernel(float *d_out, int w,int h, float2 pos){
const int c = blockIdx.x*blockDim.x + threadIdx.x;
const int r = blockIdx.y*blockDim.y + threadIdx.y;
const int i = r*w + c;
if ((c >= w)||(r >= h)) return;
d_out[i] = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y));
}
int main(){
float *out = (float *)calloc(W*H, sizeof(float));
float *d_out;
cudaMalloc(&d_out, W*H*sizeof(float));
const float2 pos = {0.0f, 0.0f};
const dim3 blockSize(TX, TY);
const int bx = (W + TX - 1)/TX;
const int by = (W + TY - 1)/TY;
const dim3 gridSize = dim3(bx, by);
distanceKernel<<<gridSize, blockSize>>>(d_out, W, H, pos);
cudaFree(d_out);
free(out);
return 0;
}
|
6,969 | /* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
/**
* This kernel uses each node's (and its neighbors') y-value(s) at times t and
* t - 1 to calculate it at t + 1, for nodes 1 through length - 2 (where the
* nodes are 0-indexed).
*/
__global__
void update_nodes_kernel(float *old, float *current, int length,
float courant_squared) {
float current_val;
for (unsigned int n = blockIdx.x * blockDim.x + threadIdx.x + 1;
n < length - 1; n += blockDim.x * gridDim.x) {
// The current y-value gets used a couple times, so make sure we don't
// have to dereference it more than once
current_val = current[n];
// Use the wave equation to apply the update rule, storing the new value
// in the "old" array so that we can just swap it with the "current" one
// once the kernel has finished
old[n] = 2 * current_val - old[n] +
courant_squared * (current[n + 1] - 2 * current_val +
current[n - 1]);
}
}
/**
* This kernel updates the boundary nodes, namely those at indices 0 and
* length - 1. The former's value is calculated on the CPU and passed in as an
* argument, while the latter's is always 0.0.
*/
__global__
void update_boundaries_kernel(float *current, float left_boundary_value,
int length) {
unsigned int n = blockIdx.x * blockDim.x + threadIdx.x;
if (n == 0)
current[0] = left_boundary_value;
else if (n == 1)
current[length - 1] = 0.0;
}
/**
* This function calls the general update kernel with the desired grid and block
* size. The kernel itself supports arbitrary grid and block sizes.
*/
void call_update_nodes_kernel(unsigned int grid_size, unsigned int block_size,
float *old, float *current, int length,
float courant_squared) {
update_nodes_kernel<<<grid_size, block_size>>>(old, current, length,
courant_squared);
}
/**
* This function calls the boundary node update kernel with two blocks of one
* thread each; we only need two threads, but if they were in one block they
* be de facto in the same warp, and would diverge, leading to poorer
* performance than just putting them each in their own block. The kernel
* itself supports arbitrary grid and block sizes.
*/
void call_update_boundaries_kernel(float *current, float left_boundary_value,
int length) {
update_boundaries_kernel<<<2, 1>>>(current, left_boundary_value, length);
} |
6,970 | #include <cuda.h>
#include <stdio.h>
int main(int argc, char ** argv) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
// Ne detecte pas CUDA
return -1;
}else{
// Afficher le nombre de device
if (deviceCount == 0) {
printf("There is no device supporting CUDA.\n");
exit (0);
}
else{
printf("Number of device : %d\n",deviceCount);
}
}
}
// Afficher le nom de la device
printf("Device Name : %s\n", deviceProp.name);
// Donner le numero de version majeur et mineur
printf("Major : %d, Minor : %d\n",deviceProp.major, deviceProp.minor);
// Donner la taille de la memoire globale
printf("Global Memory : %lu\n", deviceProp.totalGlobalMem);
// Donner la taille de la memoire constante
printf("Constant Memory : %lu\n", deviceProp.totalConstMem);
// Donner la taille de la memoire partagee par bloc
printf("Shared Memory : %lu\n", deviceProp.sharedMemPerBlock);
int i = 0;
for(i = 0; i<4; i++){
// Donner le nombre de thread max dans chacune des directions
printf("Number Thread Max %d: %d\n", i, deviceProp.maxThreadsDim[i]);
// Donner le taille maximum de la grille pour chaque direction
printf("Size Max Grid : %d %d\n", i, deviceProp.maxGridSize[i]);
}
// Donner la taille du warp
printf("Warp Size : %d\n", deviceProp.warpSize);
}
return 0;
}
|
6,971 | /*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#define blockSize 16
__global__ void multiply(int *d_a,int *d_b,int *d_c,int n)
{
int i;
int temp=0;
int row=blockIdx.y*blockDim.y + threadIdx.y;
int column=blockIdx.x*blockDim.x + threadIdx.x;
for(i=0;i<n;i++)
temp+=d_a[row*n+i]*d_b[i*n+column];
d_c[row*n+column]=temp;
}
int main()
{
int n,i,j;
int *h_a,*h_b,*h_c;
int *d_a,*d_b,*d_c;
int size;
int temp=0;
printf("Enter the size of the matrix :\n");
scanf("%d",&n);
h_a=(int *)malloc((size=(sizeof(int)*n*n)));
h_b=(int *)malloc(size);
h_c=(int *)malloc(size);
dim3 grid(blockSize,blockSize);
dim3 block((n-1)/blockSize +1,(n-1)/blockSize +1);
cudaMalloc((void **)&d_a,size);
cudaMalloc((void **)&d_b,size);
cudaMalloc((void **)&d_c,size);
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
h_a[i*n+j]=++temp;
h_b[i*n+j]=1;
}
}
cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,size,cudaMemcpyHostToDevice);
multiply<<<grid,block>>>(d_a,d_b,d_c,n);
cudaMemcpy(h_c,d_c,size,cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
printf("%d \n",h_c[i*n+j]);
}
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
*/ |
6,972 | #include <chrono>
#include <iostream>
#include <cuda.h>
using namespace std::chrono;
using namespace std;
#define RGB_COMPONENT_COLOR 255
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
struct PPMPixel {
int red;
int green;
int blue;
};
typedef struct{
int x, y, all;
PPMPixel * data;
} PPMImage;
void readPPM(const char *filename, PPMImage& img){
std::ifstream file (filename);
if (file){
std::string s;
int rgb_comp_color;
file >> s;
if (s!="P3") {std::cout<< "error in format"<<std::endl; exit(9);}
file >> img.x >>img.y;
file >>rgb_comp_color;
img.all = img.x*img.y;
std::cout << s << std::endl;
std::cout << "x=" << img.x << " y=" << img.y << " all=" <<img.all << std::endl;
img.data = new PPMPixel[img.all];
for (int i=0; i<img.all; i++){
file >> img.data[i].red >>img.data[i].green >> img.data[i].blue;
}
}else{
std::cout << "the file:" << filename << "was not found" << std::endl;
}
file.close();
}
void writePPM(const char *filename, PPMImage & img){
std::ofstream file (filename, std::ofstream::out);
file << "P3"<<std::endl;
file << img.x << " " << img.y << " "<< std::endl;
file << RGB_COMPONENT_COLOR << std::endl;
for(int i=0; i<img.all; i++){
file << img.data[i].red << " " << img.data[i].green << " " << img.data[i].blue << (((i+1)%img.x ==0)? "\n" : " ");
}
file.close();
}
void gen_matrix(int *matrix, int rows, int cols) {
for (int i = 0; i < rows * cols; ++i) {
matrix[i] = rand() % 10;
}
}
void print_matrix(int *matrix, int rows, int cols) {
for (int i = 0; i < rows * cols; ++i) {
cout << matrix[i] << " ";
if ((i + 1) % cols == 0) {
cout << endl;
}
}
}
#define BLOCK_SIZE 16
#define FILTER_SIZE 3
#define IDX(row, col, len) ((row)*(len)+(col))
__global__ void blur(int *from, int *to, int *filter, int divisor, int X, int Y) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (col > 0 && row > 0 && col < X - 1 && row < Y - 1) {
for (int i = -1; i < 2; ++i) {
for (int j = -1; j < 2; ++j) {
// assume that middle element in filter equals to 0
sum += from[(row + i) * X + (col + j)] * filter[(i + 1) * FILTER_SIZE + j + 1];
}
}
to[row * X + col] = sum / divisor;
}
}
__global__ void blur_median(int *from, int *to, int *filter, int X, int Y) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int filtered[9] = {0,0,0,0,0,0,0,0,0};
if (col > 0 && row > 0 && col < X - 1 && row < Y - 1) {
for (int i = -1; i < 2; ++i) {
for (int j = -1; j < 2; ++j) {
// assume that middle element in filter equals to 0
filtered[(i + 1) * FILTER_SIZE + j + 1] = from[(row + i) * X + (col + j)] * filter[(i + 1) * FILTER_SIZE + j + 1];
}
}
for (int i = 0; i < FILTER_SIZE * FILTER_SIZE; ++i) {
for (int j = i + 1; j < FILTER_SIZE * FILTER_SIZE; ++j) {
if (filtered[i] > filtered[j]) {
int tmp = filtered[i];
filtered[i] = filtered[j];
filtered[j] = tmp;
}
}
}
to[row * X + col] = filtered[4];
}
}
int main(int argc, char* argv[]) {
PPMImage image;
readPPM("dogs.ppm", image);
// writePPM("nature_before.ppm", image);
int X = image.x;
int Y = image.y;
int *from = new int[X * Y];
int *to = new int[X * Y];
int *filter = new int[FILTER_SIZE * FILTER_SIZE];
for (int i = 0; i != FILTER_SIZE; ++i) {
for (int j = 0; j != FILTER_SIZE; ++j) {
filter[i * FILTER_SIZE + j] = 1;
}
}
filter[4] = 0;
print_matrix(filter, FILTER_SIZE, FILTER_SIZE);
for (int channel = 0; channel != 3; ++channel) {
for (int i = 0; i != image.all; ++i) {
if (channel == 0) {
from[i] = image.data[i].blue;
} else if (channel == 1) {
from[i] = image.data[i].red;
} else {
from[i] = image.data[i].green;
}
}
// gen_matrix(from, X, Y);
// print_matrix(from, X, Y);
high_resolution_clock::time_point total_start = high_resolution_clock::now();
int *new_from, *new_to, *new_filter;
cudaMalloc((void**) &new_from, sizeof(int) * X * Y);
cudaMalloc((void**) &new_to, sizeof(int) * X * Y);
cudaMalloc((void**) &new_filter, sizeof(int) * FILTER_SIZE * FILTER_SIZE);
cudaMemcpy(new_from, from, sizeof(int) * X * Y, cudaMemcpyHostToDevice);
cudaMemcpy(new_filter, filter, sizeof(int) * FILTER_SIZE * FILTER_SIZE, cudaMemcpyHostToDevice);
dim3 grid((X + BLOCK_SIZE - 1) / BLOCK_SIZE, (Y + BLOCK_SIZE - 1) / BLOCK_SIZE);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
// blur<<<grid, block>>>(new_from, new_to, new_filter, 9, X, Y);
blur_median<<<grid, block>>>(new_from, new_to, new_filter, X, Y);
cudaMemcpy(to, new_to, sizeof(int) * X * Y, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaFree(new_from);
cudaFree(new_to);
cudaFree(new_filter);
high_resolution_clock::time_point total_end = high_resolution_clock::now();
double total_time = duration_cast<duration<double>>(total_end - total_start).count();
cout << "Total (kernel+copy) time: " << total_time << endl;
// print_matrix(from, X, Y);
cout << "----------------" << endl;
// print_matrix(to, X, Y);
for (int i = 0; i != image.all; ++i) {
if (channel == 0) {
image.data[i].blue = to[i];
} else if (channel == 1) {
image.data[i].red = to[i];
} else {
image.data[i].green = to[i];
}
}
}
writePPM("dogs_after.ppm", image);
delete from;
delete to;
delete filter;
}
|
6,973 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <string>
#include <fstream>
#include <cstdint>
#include <time.h>
#include <ctime>
using namespace std;
typedef struct WAV_HEADER
{
/* RIFF Chunk Descriptor */
uint8_t RIFF[4]; // RIFF Header Magic header
uint32_t ChunkSize; // RIFF Chunk Size
uint8_t WAVE[4]; // WAVE Header
/* "fmt" sub-chunk */
uint8_t fmt[4]; // FMT header
uint32_t Subchunk1Size; // Size of the fmt chunk
uint16_t AudioFormat; // Audio format 1=PCM,6=mulaw,7=alaw, 257=IBM Mu-Law, 258=IBM A-Law, 259=ADPCM
uint16_t NumOfChan; // Number of channels 1=Mono 2=Sterio
uint32_t SamplesPerSec; // Sampling Frequency in Hz
uint32_t bytesPerSec; // bytes per second
uint16_t blockAlign; // 2=16-bit mono, 4=16-bit stereo
uint16_t bitsPerSample; // Number of bits per sample
/* "data" sub-chunk */
uint8_t Subchunk2ID[4]; // "data" string
uint32_t Subchunk2Size; // Sampled data length
} wav_hdr;
// Function prototypes
int getFileSize(FILE* inFile);
double* filter(wav_hdr wavHeader, double limit_freq);
__global__ void filterr(int8_t* buffer_d, int8_t* buffer_dd, double* filtr) //a gpu function that calculates the convolution
{ //for each sample represented by each thread
int i = threadIdx.x + blockIdx.x * blockDim.x;
double suma = 0;
if (i > 30)
{
for (int j = 0; j < 31; j++)
suma += filtr[j] * abs(buffer_d[i - j]);
}
else
for (int k = 0; k < i; k++)
suma += filtr[k] * abs(buffer_d[i - k]);
buffer_dd[i] = suma;
}
int main(int argc, char* argv[])
{
wav_hdr wavHeader;
wav_hdr* wavHeader_d;
wav_hdr* wavHeaderPtr = &wavHeader;
wav_hdr* wavHeaderPtr1 = &wavHeader;
int headerSize = sizeof(wav_hdr);
int filelength = 0;
const char* filePath;
string input;
if (argc <= 1)
{
cout << "Input wave file name: ";
cin >> input;
cin.get();
filePath = input.c_str();
}
else
{
filePath = argv[1];
cout << "Input wave file name: " << filePath << endl;
}
FILE* wavFile = fopen(filePath, "r");
FILE* wavFile_d = fopen(filePath, "r");
FILE* output = fopen("output.wav", "w");
if (wavFile == nullptr)
{
fprintf(stderr, "Unable to open wave file: %s\n", filePath);
return 1;
}
//Read the header
size_t bytesRead = fread(&wavHeader, 1, headerSize, wavFile);
size_t bytesWritten = fwrite(wavHeaderPtr1, sizeof(wav_hdr), 1, output);
static const uint64_t BUFFER_SIZE = wavHeader.Subchunk2Size;
/*cudaMalloc((void**)&wavHeader_d, sizeof(wav_hdr));
cudaMemcpy(wavHeader_d, wavHeaderPtr, BUFFER_SIZE * sizeof(int8_t), cudaMemcpyHostToDevice);*/
cout << "Header Read " << bytesRead << " bytes." << endl;
if (bytesRead > 0)
{
//Read the data
int8_t* buffer = new int8_t[BUFFER_SIZE];
int8_t* buffer_d = new int8_t[BUFFER_SIZE];
int8_t* buffer_dd = new int8_t[BUFFER_SIZE];
double* filtr_cpu = new double[31];
double* filtr_gpu = new double[31];
filtr_cpu = filter(wavHeader, 10000);
cudaMalloc((void**)&buffer_d, BUFFER_SIZE * sizeof(int8_t));
cudaMalloc((void**)&buffer_dd, BUFFER_SIZE * sizeof(int8_t));
cudaMalloc((void**)&filtr_gpu, 31 * sizeof(double));
while ((bytesRead = fread(buffer, sizeof buffer[0], BUFFER_SIZE, wavFile)) > 0)
{
cout << "data bytes read: " << bytesRead << endl;
}
//allocate memory on GPU
cudaMemcpy(buffer_d, buffer, BUFFER_SIZE * sizeof(int8_t), cudaMemcpyHostToDevice);
cudaMemcpy(filtr_gpu, filtr_cpu, 31 * sizeof(double), cudaMemcpyHostToDevice);
const int size_blocks = 1024;
int num_blocks = BUFFER_SIZE / size_blocks - 1;
//calling out GPU function(kernel)
filterr <<<num_blocks, size_blocks >>> (buffer_d, buffer_dd, filtr_gpu);
//transfering data from device to host
cudaMemcpy(buffer, buffer_dd, BUFFER_SIZE * sizeof(int8_t), cudaMemcpyDeviceToHost);
//writing calculated data to a file
fwrite(buffer, wavHeader.Subchunk2Size, 1, output);
cudaFree(buffer_d);
cudaFree(buffer_dd);
cudaFree(filtr_gpu);
delete[] buffer;
buffer = nullptr;
filelength = getFileSize(wavFile);
cout << "File is :" << filelength << " bytes." << endl;
cout << "RIFF header :" << wavHeader.RIFF[0] << wavHeader.RIFF[1] << wavHeader.RIFF[2] << wavHeader.RIFF[3] << endl;
cout << "WAVE header :" << wavHeader.WAVE[0] << wavHeader.WAVE[1] << wavHeader.WAVE[2] << wavHeader.WAVE[3] << endl;
cout << "FMT :" << wavHeader.fmt[0] << wavHeader.fmt[1] << wavHeader.fmt[2] << wavHeader.fmt[3] << endl;
cout << "Data size :" << wavHeader.ChunkSize << endl;
cout << "Sampling Rate :" << wavHeader.SamplesPerSec << endl;
cout << "Number of bits used :" << wavHeader.bitsPerSample << endl;
cout << "Number of channels :" << wavHeader.NumOfChan << endl;
cout << "Number of bytes per second :" << wavHeader.bytesPerSec << endl;
cout << "Data length :" << wavHeader.Subchunk2Size << endl;
cout << "Audio Format :" << wavHeader.AudioFormat << endl;
cout << "Block align :" << wavHeader.blockAlign << endl;
cout << "Data string :" << wavHeader.Subchunk2ID[0] << wavHeader.Subchunk2ID[1] << wavHeader.Subchunk2ID[2] << wavHeader.Subchunk2ID[3] << endl;
}
fclose(wavFile);
fclose(output);
return 0;
}
// find the file size
int getFileSize(FILE* inFile)
{
int fileSize = 0;
fseek(inFile, 0, SEEK_END);
fileSize = ftell(inFile);
fseek(inFile, 0, SEEK_SET);
return fileSize;
}
//calculating lowpass filter impulse repsonse
double* filter(wav_hdr wavHeader, double limit_freq)
{
double sampling_freq = wavHeader.SamplesPerSec;
double usr_freq = limit_freq / sampling_freq / 2;
int il_probek = 31;
double* filtr = new double[il_probek];
int n = 0;
for(int i = -il_probek/2; i < 0; i++)
{
filtr[n] = sin(2 * 3.1415 * usr_freq * i) / (3.1415 * i);
n++;
}
filtr[n] = 2 * usr_freq;
n++;
for (int j = 1; j <= il_probek / 2; j++)
{
filtr[n] = sin(2 * 3.1415 * usr_freq * j) / (3.1415 * j);
n++;
}
return filtr;
} |
6,974 | /*
* initialization of the change flag
*/
namespace nscale { namespace gpu {
__global__ void init_change( bool *change ) {
*change = false;
}
}}
|
6,975 |
namespace Mochimazui {
namespace Rasterizer {
} // end of namespace Rasterizers
} // end of namespace Mochimazui
|
6,976 | #include "includes.h"
__global__ void iterate(float* originalMatrixD, float* solutionD, int originalMatrixWidth, int startingIndex) {
// __shared__ float originalMatrixDS [TILE_WIDTH][TILE_WIDTH];
__shared__ float originalMatrixDS [TILE_WIDTH * TILE_WIDTH];
int tx = threadIdx.x;
int ty = threadIdx.y;
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int currentMatrixIndex = blockId * (blockDim.x * blockDim.y) +
(threadIdx.y * blockDim.x) + threadIdx.x;
currentMatrixIndex += startingIndex;
originalMatrixDS[ty * TILE_WIDTH + tx] = originalMatrixD[currentMatrixIndex];
// Sync up w/ shared data set up
__syncthreads();
float replaceAmount;
bool onEdge = false;
int XEdgeCheckMod = currentMatrixIndex % originalMatrixWidth;
// X = 0 edge
if ( XEdgeCheckMod == 0) {
onEdge = true;
}
// X = N - 1
else if ( XEdgeCheckMod == (originalMatrixWidth - 1)) {
onEdge = true;
}
// Y = 0
else if (currentMatrixIndex < originalMatrixWidth) {
onEdge = true;
}
// Y = N - 1
else if (currentMatrixIndex >= (originalMatrixWidth * originalMatrixWidth
- originalMatrixWidth)) {
onEdge = true;
}
if (onEdge) {
replaceAmount = originalMatrixDS[ty * TILE_WIDTH + tx];
}
else {
// Top and Bottom come from Global memory
float top = originalMatrixD[currentMatrixIndex - originalMatrixWidth];
float bottom = originalMatrixD[currentMatrixIndex + originalMatrixWidth];
float left;
float right;
// Left and right edge come from Global memory
if (tx == 0 && ty == 0) {
left = originalMatrixD[currentMatrixIndex - 1];
}
else {
left = originalMatrixDS[ty * TILE_WIDTH + tx - 1];
}
if ((ty == TILE_WIDTH - 1) && (tx == TILE_WIDTH - 1)) {
right = originalMatrixD[currentMatrixIndex + 1];
}
else {
right = originalMatrixDS[ty * TILE_WIDTH + tx + 1];
}
replaceAmount = (left + right + top + bottom) / 4;
}
solutionD[currentMatrixIndex] = replaceAmount;
} |
6,977 | #include <cuda.h>
#include <curand_kernel.h>
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int id = blockIdx.x*blockDim.x + threadIdx.x;
if (id < n) // Prevents more than N operations
{
y[id] = a*x[id] + y[id];
// printf( " y[id] %f , " , y[id] );
}
}
void random_float(float* random, int size)
{
for (int i=0;i<size;i++)
{
random[i]=((float)rand()/(float)(RAND_MAX));
}
}
int main(void)
{
int N;
float A;
int nDevices;
int max_threads_per_blok = 0;
int max_grid_size = 0;
int max_thread_blocks = 0;
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
cudaGetDeviceCount(&nDevices);
printf("cudaGetDeviceCount: %d\n", nDevices);
printf("There are %d CUDA devices.\n", nDevices);
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d \n", i);
printf("Device name: %s \n ", prop.name);
printf("Block dimensions: %d x %d x %d \n", prop.maxThreadsDim[0],prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Maximum number of threads per block: %d\n", prop.maxThreadsPerBlock);
max_threads_per_blok= prop.maxThreadsPerBlock;
printf ("Grid dimensions: %d x %d x %d \n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
max_grid_size = prop.maxGridSize[0];
if (max_grid_size < prop.maxGridSize[1])
{
max_grid_size = prop.maxGridSize[1];
}
else if (max_grid_size < prop.maxGridSize[2])
{
max_grid_size = prop.maxGridSize[2];
}
// grid size give threads number in grid
max_thread_blocks = max_grid_size / max_threads_per_blok; // prop.maxGridSize[0] / prop.maxThreadsDim[0] for this operation used x dimension
printf (" Maximum number of thread blocks for x = %d \n", max_thread_blocks);
}
printf("Please input an N value: ");
scanf("%d", &N);
printf("Please input an A value: ");
scanf("%f", &A);
float *h_x, *h_y, *d_x, *d_y;
size_t size = N * sizeof(float);
// Allocate the host input x
h_x = (float *)malloc(size);
// Allocate the host input y
h_y = (float *)malloc(size);
// Verify that allocations succeeded
if (h_x == NULL || h_y == NULL)
{
fprintf(stderr, "Failed to allocate host x and y\n");
exit(EXIT_FAILURE);
}
random_float(h_x, N);
random_float(h_y, N);
d_x = NULL;
err = cudaMalloc((void **)&d_x, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device x (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
d_y = NULL;
err = cudaMalloc((void **)&d_y, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device y (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy x from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy y from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int number_of_blocks = (N+1023)/1024;
printf("Print max_threads %d \n " , (max_thread_blocks * max_threads_per_blok) );
if( N <= (max_thread_blocks * max_threads_per_blok)) // cannot be greater than the total number of threads
{
int number_of_threads_per_block = (N/number_of_blocks);
//This control is added to avoid missing the number of threads when integer does not give value when number is divided.
if (N % number_of_blocks != 0 && number_of_threads_per_block < 1024)
{
number_of_threads_per_block = number_of_threads_per_block+1;
}
if (number_of_blocks <= max_thread_blocks )
{
printf (" saxpy <<<number_of_blocks = %d , number_of_threads_per_block = %d >>>\n ",number_of_blocks ,number_of_threads_per_block);
saxpy<<<number_of_blocks ,number_of_threads_per_block >>>(N, A, d_x, d_y);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch saxpy kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
}
else
{
printf ("N number is too large, please enter a smaller number\n");
}
err = cudaMemcpy(h_y, d_y, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy y from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_x);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device x (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_y);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device y (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(h_x);
free(h_y);
}
|
6,978 | /*
Parallel reduction device function written using Cooperative Groups. When the threads of a group
call it, they cooperatively compute the sum of the values passed by each thread in the group
(through the val argument).
*/
#include <iostream>/* cout */
#include <cooperative_groups.h> /* thread_groups */
#include <stdio.h>/* printf */
using namespace cooperative_groups;
using u64_t = unsigned long long int;
void get_device_properties(){
int32_t device_cnt = 0;
cudaGetDeviceCount(&device_cnt);
cudaDeviceProp device_prop;
for (int i = 0; i < device_cnt; i++) {
cudaGetDeviceProperties(&device_prop, i);
std::cout << "+-------------------------------------------------------------------------------+\n";
printf("| Device id: %d\t", i);
printf(" Device name: %s\t", device_prop.name);
printf(" Compute capability: %d.%d\n", device_prop.major, device_prop.minor);
std::cout << std::endl;
printf("| Memory Clock Rate [KHz]: %d\n",
device_prop.memoryClockRate);
printf("| Memory Bus Width [bits]: %d\n",
device_prop.memoryBusWidth);
printf("| Peak Memory Bandwidth [GB/s]: %f\n",
2.0*device_prop.memoryClockRate*(device_prop.memoryBusWidth/8)/1.0e6);
printf("| L2 size [KB]: %d\n",
device_prop.l2CacheSize/1024);
std::cout << std::endl;
printf("| Number of SMs: %d\n",
device_prop.multiProcessorCount);
printf("| Max. number of threads per SM: %d\n",
device_prop.maxThreadsPerMultiProcessor);
printf("| Concurrent kernels: %d\n",
device_prop.concurrentKernels);
printf("| warpSize: %d\n",
device_prop.warpSize);
printf("| maxThreadsPerBlock: %d\n",
device_prop.maxThreadsPerBlock);
printf("| maxThreadsDim[0]: %d\n",
device_prop.maxThreadsDim[0]);
printf("| maxGridSize[0]: %d\n",
device_prop.maxGridSize[0]);
printf("| pageableMemoryAccess: %d\n",
device_prop.pageableMemoryAccess);
printf("| concurrentManagedAccess: %d\n",
device_prop.concurrentManagedAccess);
printf("| Number of async. engines: %d\n",
device_prop.asyncEngineCount);
std::cout << "+-------------------------------------------------------------------------------+\n";
}
}
template <typename group_t>
__device__
int reduce_sum_warp(group_t g, int *temp, int val)
{
int lane = g.thread_rank();
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane+i]
#pragma unroll
for (int i = g.size() / 2; i > 0; i /= 2)
{
temp[lane] = val;
g.sync(); // wait for all threads to store
if (lane < i) val += temp[lane + i];
g.sync(); // wait for all threads to load
}
return val; // note: only thread 0 will return full sum
}
__device__
int reduce_sum(thread_group t_group, int *temp, int value){
int lane = t_group.thread_rank();
// Each iteration halves the number of active threads
// Each thread adds its partial sum[i] to sum[lane + i]
for(size_t i = t_group.size() / 2; i > 0; i /= 2){
temp[lane] = value;
t_group.sync(); // wait for all threads to store
if(lane < i )
value += temp[lane + i];
t_group.sync(); // wait for all threads to load
}
return value;
}
__device__
int thread_sum(int *input, int n){
int sum = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(; index < n / 4; index += stride){
int4 in = ((int4*)input)[index];
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__
void sum_kernel_block(int *sum, int *input, int n){
u64_t my_sum = thread_sum(input, n);
extern __shared__ int temp[];
auto group = this_thread_block();
//auto group = tiled_partition<32>(this_thread_block());
u64_t block_sum = reduce_sum(group, temp, my_sum);
if(group.thread_rank() == 0)
atomicAdd(sum, block_sum);
}
int main(){
int n = 1 << 23;
std::cout << n << std::endl;
int blockSize = 256;
int numBlocks = (n + blockSize - 1) / blockSize;
int sharedBytes = blockSize * sizeof(int);
int *sum; int *data;
cudaMallocManaged(&sum, sizeof(int));
cudaMallocManaged(&data, n * sizeof(int));
std::fill_n(data, n, 1);
cudaMemset(sum, 0, sizeof(int));
// Prefetch the data to the GPU
int device = -1;
cudaGetDevice(&device);
cudaMemPrefetchAsync(data, n * sizeof(int), device, NULL);
std::cout << "Launching kernel <<<" << numBlocks << ", " << blockSize << ">>>" << std::endl;
sum_kernel_block<<<numBlocks, blockSize, sharedBytes>>>(sum, data, n);
cudaDeviceSynchronize();
std::cout << sum << " - " << *sum << " - " << &sum << std::endl;
} |
6,979 | #include "includes.h"
__global__ void matrix_mul_matrix(float *A, float *B, float *C, int col_A, int col_B, int row_C, int col_C){
float sum = 0.0f;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < row_C && col < col_C) {
for (int i = 0; i < col_A; ++i) {
sum += A[row * col_A + i] * B[i * col_B + col];
}
C[row * col_B + col] = sum;
}
} |
6,980 | #define TILE_DIM 32
#define BLOCK_ROWS 4
template<typename T>
__device__ void transpose(const T* matrix, T* result,
const int rows, const int cols) {
__shared__ T tile[TILE_DIM][TILE_DIM + 1];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_DIM + ty;
int col = bx * TILE_DIM + tx;
int srcRow = bx * TILE_DIM + ty;
int srcCol = by * TILE_DIM + tx;
if (srcCol < cols) {
#pragma unroll
for (int i = 0; i < TILE_DIM && srcRow + i < rows; i += BLOCK_ROWS) {
tile[ty + i][tx] = matrix[(srcRow + i) * cols + srcCol];
}
}
__syncthreads();
if (col < rows) {
#pragma unroll
for (int i = 0; i < TILE_DIM && row + i < cols; i += BLOCK_ROWS) {
result[(row + i) * rows + col] = tile[tx][ty + i];
}
}
} |
6,981 | #include <cuda.h>
#include <stdio.h>
// main routine
int main() {
float time;
cudaEvent_t start, stop;
for (size_t size = 32; size < 1024 * 1024 * 1024; size *= 2) {
float* d_Data;
float* h_Data = new float[size];
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&d_Data, size * sizeof(float));
cudaMemcpy(d_Data, h_Data, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
if (size > 1024 * 1024) {
printf("Allocation to device: %fms with size %dMB\n", time,
static_cast<int>((size * sizeof(float)) / (1024 * 1024)));
} else if (size > 1024) {
printf("Allocation to device: %fms with size %dKB\n", time,
static_cast<int>((size * sizeof(float)) / 1024));
} else {
printf("Allocation to device: %fms with size %dB\n", time,
static_cast<int>(size * sizeof(float)));
}
delete[] h_Data;
cudaFree(d_Data);
}
return 0;
} |
6,982 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <fstream>
using namespace std;
int main()
{
ofstream fout;
fout.open("gpuinfo.dat");
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
fout<<"$Device Number:\t"<<i<<endl;
fout<<"$Device name:\t"<<prop.name<<endl;
fout<<"$Memory Clock Rate (KHz):\t"<<prop.memoryClockRate<<endl;
fout<<"$Memory Bus Width (bits):\t"<<prop.memoryBusWidth<<endl;
fout<<"$Peak Memory Bandwidth (GB/s):\t"<<2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6;
}
fout.close();
} |
6,983 | #include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include <time.h>
#define HUGE_PRIME 1000033
//#define HUGE_PRIME 233
__global__ void signReduce(
int bucketSize,
int sigSize,
unsigned int *tmpRes,
int *d_sig) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid<sigSize) {
int tmp = HUGE_PRIME;
for (int i=tid; i<(bucketSize-1)*sigSize; i+=sigSize)
if (tmpRes[i] < tmp)
tmp = tmpRes[i];
d_sig[tid] = tmp;
}
}
__global__ void signCurrent(
int bucketSize,
int sigSize,
const int* coeffA,
const int* coeffB,
const int *bucket,
unsigned int *tmpRes) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid<sigSize*(bucketSize-1)) {
int pointID = bucket[(tid/sigSize)+1];
int sigID = tid % sigSize;
long long t1 = (coeffA[sigID] * (long long)pointID) % HUGE_PRIME;
int tmp = (t1 + coeffB[sigID]) % HUGE_PRIME;
tmpRes[tid] = tmp;
}
}
__global__ void sigCompare(
int numOfbucket,
int sigSize,
const int curbucket,
const int* d_signatures,
unsigned int *tmpRes) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid<numOfbucket*sigSize) {
//int bucketID = tid % numOfbucket;
int j = tid / numOfbucket;
if (d_signatures[curbucket+j*numOfbucket] == d_signatures[tid]){
tmpRes[tid] = 1;
//printf("%d %d %d %d %d\n",bucketID,j,tid,d_signatures[curbucket+j*numOfbucket],d_signatures[tid]);
}
}
}
__global__ void sigComReduce(
int numOfbucket,
int sigSize,
int* count,
unsigned int *tmpRes) {
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if (tid<numOfbucket) {
for (int i=tid; i<sigSize*numOfbucket; i+=numOfbucket)
count[tid] = count[tid] + tmpRes[i];
}
}
int main(int argc, char **argv) {
FILE * fin=fopen("bucket.txt","r");
FILE * fhash=fopen("hashcoeff.txt","r");
FILE * fout=fopen("output.txt","w");
// sscanf(argv[1],"%d",&k);
clock_t start,end;
int sigSize = 50;
int numOfBucket;
//sscanf(argv[1],"%d",&sigSize);
//printf("sigSize=%d\n",sigSize);
int **Bucket,*coeffA,*coeffB,tmp,*signatures;
// srand((unsigned)time(NULL));
coeffA = (int *)calloc(sigSize,sizeof(int));
coeffB = (int *)calloc(sigSize,sizeof(int));
// data = (float *)calloc(nums*dim,sizeof(float));
for (int i=0; i<sigSize; i++) {
fscanf(fhash,"%d",&tmp);
coeffA[i] = tmp;
}
for (int i=0; i<sigSize; i++) {
fscanf(fhash,"%d",&tmp);
coeffB[i] = tmp;
}
fscanf(fin,"%d",&numOfBucket);
Bucket = (int **)calloc(numOfBucket,sizeof(int*));
for (int i=0; i<numOfBucket; i++) {
fscanf(fin,"%d",&tmp);
Bucket[i] = (int *)calloc(tmp+1,sizeof(int));
Bucket[i][0] = tmp+1;
for (int j=1; j<Bucket[i][0]; j++) {
fscanf(fin,"%d",&tmp);
Bucket[i][j] = tmp;
}
}
// Caculate the signatures
signatures = (int *)calloc(sigSize*numOfBucket,sizeof(int));
memset(signatures,0,sizeof(signatures));
int *d_bucket,*d_cA,*d_cB,*d_sig,*tmpSig;
unsigned int *tmpRes;
tmpSig = (int *)calloc(sigSize,sizeof(int));
cudaMalloc((void**)&d_sig, sigSize*sizeof(int));
cudaMalloc((void**)&d_cA, sigSize*sizeof(int));
cudaMalloc((void**)&d_cB, sigSize*sizeof(int));
cudaMemcpy(d_cA, coeffA, sigSize*sizeof(int), cudaMemcpyDefault);
cudaMemcpy(d_cB, coeffB, sigSize*sizeof(int), cudaMemcpyDefault);
start = clock();
for (int i=0; i<numOfBucket; i++) {
// printf("current bucket: %d %d \n",i,Bucket[i][0]);
cudaMemset(d_sig, 0, sigSize*sizeof(int));
// printf("test 1 %d %d \n",i,Bucket[i][0]);
cudaMalloc((void**)&tmpRes, sigSize*(Bucket[i][0]-1)*sizeof(int));
cudaMalloc((void**)&d_bucket, Bucket[i][0]*sizeof(int));
cudaMemcpy(d_bucket, Bucket[i], Bucket[i][0]*sizeof(int), cudaMemcpyDefault);
// printf("test 2 %d %d \n",i,Bucket[i][0]);
int blockSize = 256;
int gridSize = ((Bucket[i][0]-1)*sigSize+blockSize-1)/blockSize;
signCurrent<<<gridSize,blockSize>>>(Bucket[i][0],sigSize,d_cA,d_cB,d_bucket,tmpRes);
signReduce<<<1,blockSize>>>(Bucket[i][0],sigSize,tmpRes,d_sig);
// printf("test 3 %d %d \n",i,Bucket[i][0]);
cudaMemcpy(tmpSig, d_sig, sigSize*sizeof(int), cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
for (int j=0; j<sigSize; j++) {
fprintf(fout,"%d ",tmpSig[j]);
signatures[i+j*numOfBucket] = tmpSig[j];
}
fprintf(fout,"\n");
// do something with tmpSig
cudaFree(d_bucket);
cudaFree(tmpRes);
}
cudaFree(d_cA);
cudaFree(d_cB);
cudaFree(d_sig);
end = clock();
printf("running time for signatures: %.2f\n", (double)(end-start)/CLOCKS_PER_SEC);
fprintf(fout,"running time for signatures: %.2f\n", (double)(end-start)/CLOCKS_PER_SEC);
printf("\n\n");
// for (int i=0; i<sigSize*numOfBucket; i++)
// printf("%d ",signatures[i]);
// printf("\n\n");
// Compare the signatures
int *d_signatures,*count,*d_count;
count = (int *)calloc(numOfBucket,sizeof(int));
cudaMalloc((void**)&d_signatures, sigSize*numOfBucket*sizeof(int));
cudaMemcpy(d_signatures, signatures, sigSize*numOfBucket*sizeof(int), cudaMemcpyDefault);
cudaMalloc((void**)&tmpRes, sigSize*numOfBucket*sizeof(int));
cudaMalloc((void**)&d_count, numOfBucket*sizeof(int));
for (int i=0; i<numOfBucket; i++) {
// printf("\n");
// for (int j=0; j<sigSize; j++) {
// printf("%d ",signatures[i+j*numOfBucket]);
// }
// printf("\n");
// cudaMemcpy(d_sig, tmpSig, sigSize*sizeof(int), cudaMemcpyDefault);
cudaMemset(tmpRes, 0, sigSize*numOfBucket*sizeof(int));
cudaMemset(d_count, 0, numOfBucket*sizeof(int));
int blockSize = 256;
int gridSize = (numOfBucket*sigSize+blockSize-1)/blockSize;
//printf("CudaINFO:\n");
sigCompare<<<gridSize,blockSize>>>(numOfBucket,sigSize,i,d_signatures,tmpRes);
gridSize = (numOfBucket+blockSize-1)/blockSize;
sigComReduce<<<gridSize,blockSize>>>(numOfBucket,sigSize,d_count,tmpRes);
cudaMemcpy(count, d_count, numOfBucket*sizeof(int), cudaMemcpyDefault);
//printf("CountINFO:\n");
for (int j=i+1; j<numOfBucket; j++)
if (count[j]>10)
printf("%d %d %d \n",i,j,count[j]);
}
cudaFree(tmpRes);
cudaFree(d_sig);
// cudaFree(d_signatures);
// free(countM);
free(coeffA);
free(coeffB);
fclose(fin);
fclose(fhash);
fclose(fout);
return 0;
}
|
6,984 | //
// Created by depaulsmiller on 9/9/20.
//
#include <chrono>
#include <algorithm>
#include <vector>
#include <iostream>
#include <functional>
#include <unordered_map>
#include <set>
#include <atomic>
#include <thread>
int main() {
int size = 136 * 512;
std::vector<std::pair<unsigned, unsigned>> vec;
vec.reserve(size);
for (int i = 0; i < size; i++) {
vec.push_back({rand(), 1});
}
std::vector<std::pair<unsigned, unsigned>> vec2;
vec2.reserve(size);
float *prob = new float[10000];
std::atomic_bool go{false};
std::atomic_int done{0};
std::vector<std::thread> threads;
int num_threads = 12;
threads.reserve(num_threads);
std::atomic_int caught;
for (int i = 0; i < num_threads; i++) {
threads.push_back(std::thread([&go, &done, &prob, &caught](std::pair<unsigned, unsigned> *data, int startidx, int endidx) {
while (!go);
int tmp = 0;
for (int i = startidx; i < endidx; i++) {
if (prob[data[i].first % 10000] < 0.01)
data[i].second = 1;
}
//caught.fetch_add(tmp);
std::atomic_thread_fence(std::memory_order_seq_cst);
done++;
//std::cerr << tmp << std::endl;
}, vec.data(), i * size / num_threads, i * size / num_threads + size / num_threads));
}
auto start = std::chrono::high_resolution_clock::now();
go = true;
while (done != num_threads);
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> dur = end - start;
for(auto& t : threads){
t.join();
}
std::cerr << dur.count() * 1e3 << " ms" << std::endl;
std::cerr << vec.size() / dur.count() / 1e6 << "Mops" << std::endl;
} |
6,985 | #include "Arrays.cuh"
#include "CrackingDES.cuh"
inline void gpuAssert(cudaError_t code, char *file, int line)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(code);
}
}
struct result
{
bool isCracked;
int keyNumber;
};
void encipherTextCPU(short * message, short * key, short * cipherMessage)
{
short C[SHIFTSLEN+1][BLOCKSLEN];
short D[SHIFTSLEN+1][BLOCKSLEN];
short L[IPMSGCOUNT+1][MSGBITLEN/2];
short R[IPMSGCOUNT+1][MSGBITLEN/2];
short expandedR[EXTENDEDLEN];
short sboxes[SBOXCOUNT][SBOXSIZE];
short keys[KEYCOUNT][PC2LEN];
for(int i = 0; i < BLOCKSLEN; i++)
{
C[0][i] = key[PC1[i]-1];
D[0][i] = key[PC1[BLOCKSLEN + i]-1];
}
for(int i = 1; i < SHIFTSLEN+1; i++)
{
for(int j = 0; j < BLOCKSLEN - leftShifts[i]; j++)
{
C[i][j] = C[i-1][j + leftShifts[i]];
D[i][j] = D[i-1][j + leftShifts[i]];
}
for(int j = 0; j < leftShifts[i]; j++)
{
C[i][j + BLOCKSLEN - leftShifts[i]] = C[i-1][j];
D[i][j + BLOCKSLEN - leftShifts[i]] = D[i-1][j];
}
for(int j = 0; j < PC2LEN; j++)
{
if(PC2[j] - 1 < BLOCKSLEN)
keys[i-1][j] = C[i][PC2[j]-1];
else
keys[i-1][j] = D[i][PC2[j]-BLOCKSLEN-1];
}
}
for(int i = 0; i < MSGBITLEN/2; i++)
{
L[0][i] = message[IP[i]-1];
R[0][i] = message[IP[MSGBITLEN/2 + i]-1];
}
for(int i = 1; i < IPMSGCOUNT+1; i++)
{
for(int j = 0; j < EXTENDEDLEN; j++)
expandedR[j] = R[i-1][selectionTable[j] - 1] ^ keys[i-1][j];
for(int j = 0; j < SBOXCOUNT; j++)
{
short row = 2 * expandedR[j*SBLOCKSIZE] + expandedR[j*SBLOCKSIZE + 5];
short column = 8 * expandedR[j*SBLOCKSIZE + 1]
+ 4 * expandedR[j*SBLOCKSIZE + 2] + 2 * expandedR[j*SBLOCKSIZE + 3]
+ expandedR[j*SBLOCKSIZE + 4];
short sValue = S[j][row*SCOLUMNS + column];
short mask = 1;
for(int k = 0; k < SBOXSIZE; k++)
sboxes[j][SBOXSIZE - k -1] = (sValue & (mask << k)) >> k;
}
for(int j = 0; j < MSGBITLEN/2; j++)
{
L[i][j] = R[i-1][j];
R[i][j] = (L[i-1][j] + sboxes[(P[j]-1) / SBOXSIZE][(P[j]-1) % SBOXSIZE]) % 2;
}
}
for(int i = 0; i < MSGBITLEN; i++)
{
if(reverseIP[i] < MSGBITLEN/2)
cipherMessage[i] = R[16][reverseIP[i] - 1];
else
cipherMessage[i] = L[16][reverseIP[i] - 1 - MSGBITLEN/2];
}
}
__device__ void encipherTextGPU(short * message, short * key, short * cipherMessage, bool * result)
{
short C[SHIFTSLEN+1][BLOCKSLEN];
short D[SHIFTSLEN+1][BLOCKSLEN];
short L[IPMSGCOUNT+1][MSGBITLEN/2];
short R[IPMSGCOUNT+1][MSGBITLEN/2];
short expandedR[EXTENDEDLEN];
short sboxes[SBOXCOUNT][SBOXSIZE];
short keys[KEYCOUNT][PC2LEN];
for(int i = 0; i < BLOCKSLEN; i++)
{
C[0][i] = key[d_PC1[i]-1];
D[0][i] = key[d_PC1[BLOCKSLEN + i]-1];
}
for(int i = 1; i < SHIFTSLEN+1; i++)
{
for(int j = 0; j < BLOCKSLEN - d_leftShifts[i]; j++)
{
C[i][j] = C[i-1][j + d_leftShifts[i]];
D[i][j] = D[i-1][j + d_leftShifts[i]];
}
for(int j = 0; j < d_leftShifts[i]; j++)
{
C[i][j + BLOCKSLEN - d_leftShifts[i]] = C[i-1][j];
D[i][j + BLOCKSLEN - d_leftShifts[i]] = D[i-1][j];
}
for(int j = 0; j < PC2LEN; j++)
{
if(d_PC2[j] - 1 < BLOCKSLEN)
keys[i-1][j] = C[i][d_PC2[j]-1];
else
keys[i-1][j] = D[i][d_PC2[j]-BLOCKSLEN-1];
}
}
for(int i = 0; i < MSGBITLEN/2; i++)
{
L[0][i] = message[d_IP[i]-1];
R[0][i] = message[d_IP[MSGBITLEN/2 + i]-1];
}
for(int i = 1; i < IPMSGCOUNT+1; i++)
{
for(int j = 0; j < EXTENDEDLEN; j++)
expandedR[j] = R[i-1][d_selectionTable[j] - 1] ^ keys[i-1][j];
for(int j = 0; j < SBOXCOUNT; j++)
{
short row = 2 * expandedR[j*SBLOCKSIZE] + expandedR[j*SBLOCKSIZE + 5];
short column = 8 * expandedR[j*SBLOCKSIZE + 1]
+ 4 * expandedR[j*SBLOCKSIZE + 2] + 2 * expandedR[j*SBLOCKSIZE + 3]
+ expandedR[j*SBLOCKSIZE + 4];
short sValue = d_S[j][row*SCOLUMNS + column];
short mask = 1;
for(int k = 0; k < SBOXSIZE; k++)
sboxes[j][SBOXSIZE - k -1] = (sValue & (mask << k)) >> k;
}
for(int j = 0; j < MSGBITLEN/2; j++)
{
L[i][j] = R[i-1][j];
R[i][j] = (L[i-1][j] + sboxes[(d_P[j]-1) / SBOXSIZE][(d_P[j]-1) % SBOXSIZE]) % 2;
}
}
*result = true;
for(int i = 0; i < MSGBITLEN; i++)
{
if(d_reverseIP[i] < MSGBITLEN/2)
{
if(R[16][d_reverseIP[i] - 1] != cipherMessage[i])
{
*result = false;
break;
}
}
else if(L[16][d_reverseIP[i] - 1 - MSGBITLEN/2] != cipherMessage[i])
{
*result = false;
break;
}
}
if(*result)
return;
}
__host__ __device__ void convertSignToBitArray(char sign, short * resultArray)
{
//memset(resultArray, 0 ,SIGN_SIZE);
char mask = 1;
for(int i = 0; i < SIGN_SIZE; i++)
resultArray[i] = (sign & (mask << i)) >> i;
}
__host__ __device__ void convertTextToBitArray(char * text, int length, short * resultArray)
{
//memset(resultArray, 0 ,length);
for(int i = 0; i < MAX_TEXT_LEN; i++)
{
if(i < length)
convertSignToBitArray(text[i],resultArray + i*SIGN_SIZE);
else
convertSignToBitArray('a',resultArray + i*SIGN_SIZE);
}
}
void generateRandomPermutation(int signsCount, int length, char *resultArray)
{
for(int i = 0; i < length; i++)
resultArray[i] = 'a' + rand() % signsCount;
}
__host__ __device__ void generatePermutation(unsigned long long combination, int signsCount, int length, char * resultArray)
{
for(int i = 0; i < length; i++)
{
int res = combination % signsCount;
resultArray[i] = 'a' + res;
combination /= signsCount;
}
}
__global__ void CrackingDESKernel(short * _cipherText, short * _plainText, int signsCount, unsigned long long threadsCount, int group, int keyLength, struct result * result)
{
__shared__ short cipherText[MSGBITLEN];
__shared__ short plainText[MSGBITLEN];
unsigned long long position = (blockIdx.x + group * MAXBLOCKCOUNT) * BLOCKSIZE + threadIdx.x;
if(threadIdx.x < MSGBITLEN)
{
cipherText[threadIdx.x] = _cipherText[threadIdx.x];
plainText[threadIdx.x] = _plainText[threadIdx.x];
}
__syncthreads();
if(position >= threadsCount)
return;
char * code = new char[MSGLEN];
short * key = new short[MSGBITLEN];
bool * res = new bool[1];
generatePermutation(position, signsCount, MSGLEN, code);
convertTextToBitArray(code,keyLength,key);
encipherTextGPU(plainText, key, cipherText, res);
if(*res)
{
result->isCracked = true;
result->keyNumber = position;
}
delete[] code;
delete[] key;
delete[] res;
return;
}
void ERR(char *msg)
{
fprintf(stderr,"Error: %s\n", msg);
exit(1);
}
int main()
{
char * plainText = new char[MSGLEN+1];
char * key = new char[MSGLEN+1];
short * plainBitText = new short[MSGBITLEN];
short * cipherBitText = new short[MSGBITLEN];
short * keyBit = new short[MSGBITLEN];
cudaEvent_t timerStart, timerStop;
float timer;
short * d_cipherText, * d_plainText;
int signsCount = 0;
printf("Enter the alphabet size (from 1 to 26).\n");
scanf("%d", &signsCount);
printf("Enter the plain text (maximum 8 signs).\n");
scanf("%s", plainText);
convertTextToBitArray(plainText,8,plainBitText);
printf("Enter the key text (maximum 8 signs).\n");
scanf("%s", key);
int keyLength = strlen(key);
int option = 0;
printf("Choose cracking type: 0 - sequentialy, 1 - randomize.\n");
scanf("%d", &option);
convertTextToBitArray(key,keyLength,keyBit);
encipherTextCPU(plainBitText, keyBit, cipherBitText);
printf("Cipher text generated from given text and key, now lets try to crack it.\n");
if(cudaMalloc((void**) &d_cipherText, sizeof(short)*MSGBITLEN) != cudaSuccess)
ERR("cudaMalloc");
if(cudaMemcpy(d_cipherText, cipherBitText, sizeof(short)*MSGBITLEN, cudaMemcpyHostToDevice) != cudaSuccess)
ERR("cudaMemcpy");
if(cudaMalloc((void**) &d_plainText, sizeof(short)*MSGBITLEN) != cudaSuccess)
ERR("cudaMalloc");
char * code = new char[MSGLEN];
struct result * result = new struct result;
result->isCracked = false;
result->keyNumber = -1;
struct result * d_result;
if(cudaMalloc((void**) &d_result, sizeof(struct result)) != cudaSuccess)
ERR("cudaMalloc");
if(cudaMemcpy(d_result, result, sizeof(struct result), cudaMemcpyHostToDevice) != cudaSuccess)
ERR("cudaMemcpy");
unsigned long long threadsCount = 1;
for(int i = 0; i < keyLength; i++)
threadsCount *= signsCount;
int blocksCount = threadsCount / BLOCKSIZE + 1;
int groupsCount = 1;
if(blocksCount > MAXBLOCKCOUNT)
{
groupsCount = blocksCount / MAXBLOCKCOUNT + 1;
blocksCount = MAXBLOCKCOUNT;
}
unsigned long long messageCombination = 0;
unsigned long long textsCount = 1;
for(int i = 0; i < MSGLEN; i++)
textsCount *= signsCount;
srand(time(NULL));
cudaEventCreate(&timerStart, 0);
cudaEventCreate(&timerStop, 0);
cudaEventRecord(timerStart, 0);
while(messageCombination < textsCount || option)
{
printf("Cracking iteration %lld of %lld\n",messageCombination, textsCount);
if(!option)
generatePermutation(messageCombination, signsCount, MSGLEN, code);
else
generateRandomPermutation(signsCount, MSGLEN, code);
convertTextToBitArray(code,MSGLEN,plainBitText);
messageCombination++;
if(cudaMemcpy(d_plainText, plainBitText, sizeof(short)*MSGBITLEN, cudaMemcpyHostToDevice) != cudaSuccess)
ERR("cudaMemcpy");
for(int group = 0; group < groupsCount; group++)
{
CrackingDESKernel<<<blocksCount,BLOCKSIZE>>>(d_cipherText, d_plainText, signsCount, threadsCount, group, keyLength, d_result);
gpuErrchk(cudaPeekAtLastError());
if(cudaDeviceSynchronize() != cudaSuccess)
ERR("cudaDeviceSynchronize");
if(cudaMemcpy(result, d_result, sizeof(struct result), cudaMemcpyDeviceToHost) != cudaSuccess)
ERR("cudaMemcpy");
if(result->isCracked)
break;
}
if(result->isCracked)
{
printf("MESSAGE CRACKED\n");
printf("MSG: ");
for(int i=0; i < MSGLEN; i++)
printf("%c",code[i]);
printf("\n");
generatePermutation(result->keyNumber, signsCount, MSGLEN, code);
printf("KEY: ");
for(int i=0; i < keyLength; i++)
printf("%c",code[i]);
printf("\n");
break;
}
}
if(cudaEventRecord(timerStop, 0) != cudaSuccess)
ERR("cudaEventRecord");
if(cudaEventSynchronize(timerStop) != cudaSuccess)
ERR("cudaEventSynchronize");
if(cudaDeviceSynchronize() != cudaSuccess)
ERR("cudaDeviceSynchronize");
cudaEventElapsedTime(&timer, timerStart, timerStop);
printf("\n");
printf("TIME = %d s %d ms\n", ((int)timer) / 1000, ((int)timer) % 1000);
cudaEventDestroy(timerStart);
cudaEventDestroy(timerStop);
if(cudaFree(d_cipherText) != cudaSuccess)
ERR("cudaFree");
if(cudaFree(d_plainText) != cudaSuccess)
ERR("cudaFree");
delete[] plainText;
delete[] key;
delete[] plainBitText;
delete[] cipherBitText;
delete[] keyBit;
} |
6,986 | #include "includes.h"
__global__ void compute_infection_prob_kernel(double alpha, double beta, int *infectious_rat_count, int *exposed_rat_count, int *rat_count, double *infection_prob_result, int width, int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int nid = y * width + x;
if(x < width && y < height) {
if(rat_count[nid] == 0) {
infection_prob_result[nid] = 0.0;
} else {
double density_of_exposed = (double)(exposed_rat_count[nid]) / (double)(rat_count[nid]);
double density_of_infectious = (double)(infectious_rat_count[nid]) / (double)(rat_count[nid]);
infection_prob_result[nid] = alpha * density_of_infectious + beta * density_of_exposed;
}
}
} |
6,987 | #include "includes.h"
__global__ void windowHamming2d(float* idata, int length, int height)
{
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
int tidy = threadIdx.y + blockIdx.y*blockDim.y;
//printf("tidy: %d, tidy:%d, idx:%d", tidy,tidx ,tidy * length + tidx);
if (tidx < length && tidy < height)
{
//printf("tidy: %d, tidy:%d, idx:%d", tidy,tidx ,tidy * length + tidx);
idata[tidy * length + tidx] = (0.54 - 0.46 * cos(2*tidy*PI_F / (height - 1))) * (0.54 - 0.46 * cos(2*tidx*PI_F / (length - 1)));
}
} |
6,988 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <iostream>
//#include <time.h>
//
//#include "neural_network.hh"
//#include "layers/linear_layer.hh"
//#include "layers/relu_activation.hh"
//#include "layers/sigmoid_activation.hh"
//#include "nn_utils/nn_exception.hh"
//#include "nn_utils/bce_cost.hh"
//
//#include "coordinates_dataset.hh"
//
//float computeAccuracy(const Matrix& predictions, const Matrix& targets);
//
//int main() {
//
// srand( time(NULL) );
//
// //batch_size=100, number of batches=21, use 20 batches for training and 1 batch for testing(get accuracy score)
// CoordinatesDataset dataset(100, 21);
// BCECost bce_cost;
//
// NeuralNetwork nn;
// //linear layer with 2 input neuron and 30 output/hidden neurons
// nn.addLayer(new LinearLayer("linear_1", Shape(2, 30)));
// nn.addLayer(new ReLUActivation("relu_1"));
// //linear layer with 30 input neurons and 1 output neuron
// nn.addLayer(new LinearLayer("linear_2", Shape(30, 1)));
// nn.addLayer(new SigmoidActivation("sigmoid_output"));
//
// // network training
// Matrix Y;
// for (int epoch = 0; epoch < 1001; epoch++) {
// float cost = 0.0;
//
// for (int batch = 0; batch < dataset.getNumOfBatches() - 1; batch++) {
// //get training batches as input for forward
// Y = nn.forward(dataset.getBatches().at(batch));
// //get label/target batches as input for backprop
// nn.backprop(Y, dataset.getTargets().at(batch));
// cost += bce_cost.cost(Y, dataset.getTargets().at(batch));
// }
//
// if (epoch % 100 == 0) {
// std::cout << "Epoch: " << epoch
// << ", Cost: " << cost / dataset.getNumOfBatches()
// << std::endl;
// }
// }
//
// // compute accuracy
// Y = nn.forward(dataset.getBatches().at(dataset.getNumOfBatches() - 1));
// Y.copyDeviceToHost();
//
// float accuracy = computeAccuracy(
// Y, dataset.getTargets().at(dataset.getNumOfBatches() - 1));
// std::cout << "Accuracy: " << accuracy << std::endl;
//
// return 0;
//}
//
////count number of correctly predicted values and divide it by the size of output vector
//float computeAccuracy(const Matrix& predictions, const Matrix& targets) {
// int m = predictions.shape.x;
// int correct_predictions = 0;
//
// for (int i = 0; i < m; i++) {
// float prediction = predictions[i] > 0.5 ? 1 : 0;
// if (prediction == targets[i]) {
// correct_predictions++;
// }
// }
//
// return static_cast<float>(correct_predictions) / m;
//}
|
6,989 | #include "cuda_runtime.h"
#include <stdint.h>
__global__ void row_contiguous_kernel(float* dst, const uint8_t* src,
int width, int height)
{
int x = blockDim.x*blockIdx.x + threadIdx.x;
int y = blockDim.y*blockIdx.y + threadIdx.y;
int c = blockIdx.z;
int channels = gridDim.z;
if(x >= width || y >= height)
return;
dst[(c*height + y)*width + x] = 1.f/255.f * src[(y*width + x)*channels + c];
}
void make_row_contiguous(float* dst, const uint8_t* src,
int width, int height, int channels)
{
int block = 32;
dim3 gridDim((width+block-1)/block, (height+block-1)/block, channels);
dim3 blockDim(block, block, 1);
row_contiguous_kernel<<<gridDim, blockDim>>>(dst, src, width, height);
}
|
6,990 | // a simple code to understand the grid and block layout
// and thread numbering scheme
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) "
"gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z,
gridDim.x,gridDim.y,gridDim.z);
}
__global__ void
vectorAdd(int *A, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
A[i] = A[i] * 2;
}
}
int main(int argc, char **argv) {
int nElem = 102173;
int *h_vect = (int *)malloc(nElem * sizeof(int));
int *d_vect = NULL;
for (int i = 0; i < nElem; ++i)
{
h_vect[i] = rand();
}
cudaMalloc((void **)&d_vect, nElem * sizeof(int));
int threadsPerBlock = 8388608;
int blocksPerGrid = (nElem+threadsPerBlock-1)/threadsPerBlock;
// check grid and block dimension from host side
//printf("grid.x %d grid.y %d grid.z %d\n",grid.x, grid.y, grid.z);
//printf("block.x %d block.y %d block.z %d\n",block.x, block.y, block.z);
cudaMemcpy(d_vect, h_vect, nElem * sizeof(int), cudaMemcpyHostToDevice);
vectorAdd<<<threadsPerBlock, blocksPerGrid>>>(d_vect, nElem);
cudaDeviceSynchronize();
cudaMemcpy(h_vect, d_vect, nElem * sizeof(int), cudaMemcpyDeviceToHost);
// check grid and block dimension from device side
//checkIndex <<<grid, block>>> ();
// reset device before you leave
cudaDeviceReset();
free(h_vect);
cudaFree(d_vect);
return(0);
}
|
6,991 |
#define DISP_MIN 0
#define DISP_MAX 63
__device__ unsigned int __usad4(unsigned int A, unsigned int B, unsigned int C=0)
{
unsigned int result;
#if (__CUDA_ARCH__ >= 300) // Kepler (SM 3.x) supports a 4 vector SAD SIMD
asm("vabsdiff4.u32.u32.u32.add" " %0, %1, %2, %3;": "=r"(result):"r"(A), "r"(B), "r"(C));
#else // SM 2.0 // Fermi (SM 2.x) supports only 1 SAD SIMD, so there are 4 instructions
asm("vabsdiff.u32.u32.u32.add" " %0, %1.b0, %2.b0, %3;": "=r"(result):"r"(A), "r"(B), "r"(C));
asm("vabsdiff.u32.u32.u32.add" " %0, %1.b1, %2.b1, %3;": "=r"(result):"r"(A), "r"(B), "r"(result));
asm("vabsdiff.u32.u32.u32.add" " %0, %1.b2, %2.b2, %3;": "=r"(result):"r"(A), "r"(B), "r"(result));
asm("vabsdiff.u32.u32.u32.add" " %0, %1.b3, %2.b3, %3;": "=r"(result):"r"(A), "r"(B), "r"(result));
#endif
return result;
}
__device__ unsigned short absus(short a)
{
return max(-a, a);
}
__device__ void match8x64w16(unsigned char pattern[16][16], unsigned char search[64+16][16], unsigned short* result, const unsigned short* idx){
unsigned short* result2 = (unsigned short*)&result[1024];
result[*idx] = absus((short)pattern[0][threadIdx.y] - search[threadIdx.x ][threadIdx.y]) ;
result[*idx] += absus((short)pattern[1][threadIdx.y] - search[threadIdx.x+1][threadIdx.y]) ;
result[*idx] += absus((short)pattern[2][threadIdx.y] - search[threadIdx.x+2][threadIdx.y]) ;
result[*idx] += absus((short)pattern[3][threadIdx.y] - search[threadIdx.x+3][threadIdx.y]) ;
result[*idx] += absus((short)pattern[4][threadIdx.y] - search[threadIdx.x+4][threadIdx.y]) ;
result[*idx] += absus((short)pattern[5][threadIdx.y] - search[threadIdx.x+5][threadIdx.y]) ;
result[*idx] += absus((short)pattern[6][threadIdx.y] - search[threadIdx.x+6][threadIdx.y]) ;
result[*idx] += absus((short)pattern[7][threadIdx.y] - search[threadIdx.x+7][threadIdx.y]) ;
// Right half
result2[*idx] = absus((short)pattern[8][threadIdx.y] - search[threadIdx.x+8][threadIdx.y]) ;
result2[*idx] += absus((short)pattern[9][threadIdx.y] - search[threadIdx.x+9][threadIdx.y]) ;
result2[*idx] += absus((short)pattern[10][threadIdx.y] - search[threadIdx.x+10][threadIdx.y]) ;
result2[*idx] += absus((short)pattern[11][threadIdx.y] - search[threadIdx.x+11][threadIdx.y]) ;
result2[*idx] += absus((short)pattern[12][threadIdx.y] - search[threadIdx.x+12][threadIdx.y]) ;
result2[*idx] += absus((short)pattern[13][threadIdx.y] - search[threadIdx.x+13][threadIdx.y]) ;
result2[*idx] += absus((short)pattern[14][threadIdx.y] - search[threadIdx.x+14][threadIdx.y]) ;
result2[*idx] += absus((short)pattern[15][threadIdx.y] - search[threadIdx.x+15][threadIdx.y]) ;
}
__device__ void sum8x64w16(unsigned short *result, const unsigned short* idx){
unsigned short* result2 = (unsigned short*)&result[1024];
if(threadIdx.y < 4){
result[*idx] += result[threadIdx.x+(threadIdx.y+4)*64];
result2[*idx] += result2[threadIdx.x+(threadIdx.y+4)*64];
}
if(threadIdx.y > 7 && threadIdx.y < 12) {
result[*idx] += result[threadIdx.x+(threadIdx.y+4)*64];
result2[*idx] += result2[threadIdx.x+(threadIdx.y+4)*64];
}
__syncthreads();
if(threadIdx.y < 2){
result[*idx] += result[threadIdx.x+(threadIdx.y+2)*64];
result2[*idx] += result2[threadIdx.x+(threadIdx.y+2)*64];
}
if(threadIdx.y > 7 && threadIdx.y < 10){
result[*idx] += result[threadIdx.x+(threadIdx.y+2)*64];
result2[*idx] += result2[threadIdx.x+(threadIdx.y+2)*64];
}
__syncthreads();
if(threadIdx.y == 0){
result[*idx] += result[threadIdx.x+(threadIdx.y+1)*64];
result2[*idx] += result2[threadIdx.x+(threadIdx.y+1)*64];
}
if(threadIdx.y == 8){
result[*idx] += result[threadIdx.x+(threadIdx.y+1)*64];
result2[*idx] += result2[threadIdx.x+(threadIdx.y+1)*64];
}
}
__global__ void edgeMatch8w16(const int rows, const int cols, unsigned char *edgeL, unsigned char *edgeR, unsigned short *out) {
__shared__ unsigned char pattern[16][16];
__shared__ unsigned char search[64+16][16];
extern __shared__ unsigned short results[];
const int x = blockIdx.x * 16 + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int t = y*cols+x;
const unsigned short idx = threadIdx.y*64+threadIdx.x;
//result[threadIdx.y*64+threadIdx.x] = 0; possible setting max uchar
if(x < 0 || x >= cols-64-16 || y < 0 || y >= rows){
if(threadIdx.y == 0){
out[(2*blockIdx.y*(80)+2*blockIdx.x)*64+threadIdx.x] = 60000;
out[(2*blockIdx.y*(80)+(2*blockIdx.x+1))*64+threadIdx.x] = 60000;
}
if(threadIdx.y == 8){
out[((2*(blockIdx.y)+1)*(80)+2*blockIdx.x)*64+threadIdx.x] = 60000;
out[((2*(blockIdx.y)+1)*(80)+2*blockIdx.x+1)*64+threadIdx.x] = 60000;
}
return;
}
search[threadIdx.x][threadIdx.y] = edgeR[t];
results[idx] = 0;
if(threadIdx.x < 16){
pattern[threadIdx.x][threadIdx.y] = edgeL[t];
search[threadIdx.x+64][threadIdx.y] = edgeR[t+64];
}
__syncthreads();
match8x64w16(pattern, search, results, &idx);
__syncthreads();
sum8x64w16(results, &idx);
__syncthreads();
if(threadIdx.y == 0){
out[(2*blockIdx.y*(80)+2*blockIdx.x)*64+threadIdx.x] = results[idx];
out[(2*blockIdx.y*(80)+(2*blockIdx.x+1))*64+threadIdx.x] = results[1024+idx];
}
if(threadIdx.y == 8){
out[((2*(blockIdx.y)+1)*(80)+2*blockIdx.x)*64+threadIdx.x] = results[idx];
out[((2*(blockIdx.y)+1)*(80)+2*blockIdx.x+1)*64+threadIdx.x] = results[1024+idx];
}
}
__device__ void findBest64(unsigned int kernel[64], unsigned char out[1], int *th){
__shared__ unsigned char idx[32];
out[1] = 0;
idx[threadIdx.z] = threadIdx.z;
__syncthreads();
if(threadIdx.z < 32){
if(kernel[threadIdx.z+32] > kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+32];
idx[threadIdx.z] = threadIdx.z+32;
}else
idx[threadIdx.z] = threadIdx.z;
}
__syncthreads();
if(threadIdx.z < 16){
if(kernel[threadIdx.z+16] > kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+16];
idx[threadIdx.z] = idx[threadIdx.z+16];
}
}
__syncthreads();
if(threadIdx.z < 8){
if(kernel[threadIdx.z+8] > kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+8];
idx[threadIdx.z] = idx[threadIdx.z+8];
}
}
__syncthreads();
if(threadIdx.z < 4){
if(kernel[threadIdx.z+4] > kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+4];
idx[threadIdx.z] = idx[threadIdx.z+4];
}
}
__syncthreads();
if(threadIdx.z < 2){
if(kernel[threadIdx.z+2] > kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+2];
idx[threadIdx.z] = idx[threadIdx.z+2];
}
}
__syncthreads();
if(threadIdx.z < 1){
if(kernel[threadIdx.z+1] > kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+1];
idx[threadIdx.z] = idx[threadIdx.z+1];
}
if(kernel[threadIdx.z] < *th && idx[threadIdx.z] < 50)
out[0] = idx[threadIdx.z];
else
out[0] = 0;
}
}
__device__ void findMin64(unsigned int kernel[64], unsigned char out[1], int *th){
__shared__ unsigned char idx[32];
out[1] = 0;
if(threadIdx.z < 32){
idx[threadIdx.z] = threadIdx.z;
}
__syncthreads();
if(threadIdx.z < 32){
if(kernel[threadIdx.z+32] < kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+32];
idx[threadIdx.z] = threadIdx.z+32;
}else
idx[threadIdx.z] = threadIdx.z;
}
__syncthreads();
if(threadIdx.z < 16){
if(kernel[threadIdx.z+16] < kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+16];
idx[threadIdx.z] = idx[threadIdx.z+16];
}
}
__syncthreads();
if(threadIdx.z < 8){
if(kernel[threadIdx.z+8] < kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+8];
idx[threadIdx.z] = idx[threadIdx.z+8];
}
}
__syncthreads();
if(threadIdx.z < 4){
if(kernel[threadIdx.z+4] < kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+4];
idx[threadIdx.z] = idx[threadIdx.z+4];
}
}
__syncthreads();
if(threadIdx.z < 2){
if(kernel[threadIdx.z+2] < kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+2];
idx[threadIdx.z] = idx[threadIdx.z+2];
}
}
__syncthreads();
if(threadIdx.z < 1){
if(kernel[threadIdx.z+1] < kernel[threadIdx.z]){
kernel[threadIdx.z] = kernel[threadIdx.z+1];
idx[threadIdx.z] = idx[threadIdx.z+1];
}
if(kernel[threadIdx.z] < *th && idx[threadIdx.z] < 64)
out[0] = idx[threadIdx.z];
else
out[0] = 0;
}
}
__device__ int abss(int a)
{
return max(-a, a);
}
__device__ void match2extend_16x16x2(unsigned char pattern[18][18], unsigned char search[64+18][18], unsigned short *block2, const unsigned short shift){
/*short iX = threadIdx.x+1;
short iY = threadIdx.y+1;
int idx = ((threadIdx.y * 16 + threadIdx.x)*64) + threadIdx.z + shift;
short p[9];
/*block2[idx] = absus((short)pattern[iX-1][iY-1] - search[iX-1+shift+threadIdx.z][iY-1]);///// Optimalisation
block2[idx] += 2*absus((short)pattern[iX ][iY-1] - search[iX +shift+threadIdx.z][iY-1]);
block2[idx] += absus((short)pattern[iX+1][iY-1] - search[iX+1+shift+threadIdx.z][iY-1]);
block2[idx] += 2*absus((short)pattern[iX-1][iY ] - search[iX-1+shift+threadIdx.z][iY ]);
block2[idx] += 4*absus((short)pattern[iX ][iY ] - search[iX +shift+threadIdx.z][iY ]);
block2[idx] += 2*absus((short)pattern[iX+1][iY ] - search[iX+1+shift+threadIdx.z][iY ]);
block2[idx] += absus((short)pattern[iX-1][iY+1] - search[iX-1+shift+threadIdx.z][iY+1]);
block2[idx] += 2*absus((short)pattern[iX ][iY+1] - search[iX +shift+threadIdx.z][iY+1]);
block2[idx] += absus((short)pattern[iX+1][iY+1] - search[iX+1+shift+threadIdx.z][iY+1]);
block2[idx+2] = absus((short)pattern[iX-1][iY-1] - search[iX-1+shift+threadIdx.z+2][iY-1]);
block2[idx+2] += 2*absus((short)pattern[iX ][iY-1] - search[iX +shift+threadIdx.z+2][iY-1]);
block2[idx+2] += absus((short)pattern[iX+1][iY-1] - search[iX+1+shift+threadIdx.z+2][iY-1]);
block2[idx+2] += 2*absus((short)pattern[iX-1][iY ] - search[iX-1+shift+threadIdx.z+2][iY ]);
block2[idx+2] += 4*absus((short)pattern[iX ][iY ] - search[iX +shift+threadIdx.z+2][iY ]);
block2[idx+2] += 2*absus((short)pattern[iX+1][iY ] - search[iX+1+shift+threadIdx.z+2][iY ]);
block2[idx+2] += absus((short)pattern[iX-1][iY+1] - search[iX-1+shift+threadIdx.z+2][iY+1]);
block2[idx+2] += 2*absus((short)pattern[iX ][iY+1] - search[iX +shift+threadIdx.z+2][iY+1]);
block2[idx+2] += absus((short)pattern[iX+1][iY+1] - search[iX+1+shift+threadIdx.z+2][iY+1]);
block2[idx+4] = absus((short)pattern[iX-1][iY-1] - search[iX-1+shift+threadIdx.z+4][iY-1]);
block2[idx+4] += 2*absus((short)pattern[iX ][iY-1] - search[iX +shift+threadIdx.z+4][iY-1]);
block2[idx+4] += absus((short)pattern[iX+1][iY-1] - search[iX+1+shift+threadIdx.z+4][iY-1]);
block2[idx+4] += 2*absus((short)pattern[iX-1][iY ] - search[iX-1+shift+threadIdx.z+4][iY ]);
block2[idx+4] += 4*absus((short)pattern[iX ][iY ] - search[iX +shift+threadIdx.z+4][iY ]);
block2[idx+4] += 2*absus((short)pattern[iX+1][iY ] - search[iX+1+shift+threadIdx.z+4][iY ]);
block2[idx+4] += absus((short)pattern[iX-1][iY+1] - search[iX-1+shift+threadIdx.z+4][iY+1]);
block2[idx+4] += 2*absus((short)pattern[iX ][iY+1] - search[iX +shift+threadIdx.z+4][iY+1]);
block2[idx+4] += absus((short)pattern[iX+1][iY+1] - search[iX+1+shift+threadIdx.z+4][iY+1]);
block2[idx+6] = absus((short)pattern[iX-1][iY-1] - search[iX-1+shift+threadIdx.z+6][iY-1]);
block2[idx+6] += 2*absus((short)pattern[iX ][iY-1] - search[iX +shift+threadIdx.z+6][iY-1]);
block2[idx+6] += absus((short)pattern[iX+1][iY-1] - search[iX+1+shift+threadIdx.z+6][iY-1]);
block2[idx+6] += 2*absus((short)pattern[iX-1][iY ] - search[iX-1+shift+threadIdx.z+6][iY ]);
block2[idx+6] += 4*absus((short)pattern[iX ][iY ] - search[iX +shift+threadIdx.z+6][iY ]);
block2[idx+6] += 2*absus((short)pattern[iX+1][iY ] - search[iX+1+shift+threadIdx.z+6][iY ]);
block2[idx+6] += absus((short)pattern[iX-1][iY+1] - search[iX-1+shift+threadIdx.z+6][iY+1]);
block2[idx+6] += 2*absus((short)pattern[iX ][iY+1] - search[iX +shift+threadIdx.z+6][iY+1]);
block2[idx+6] += absus((short)pattern[iX+1][iY+1] - search[iX+1+shift+threadIdx.z+6][iY+1]);*/
/*p[0] = (short)pattern[iX-1][iY-1];
block2[idx] = absus(p[0] - search[iX-1+shift+threadIdx.z][iY-1]);
block2[idx+2] = absus(p[0] - search[iX-1+shift+threadIdx.z+2][iY-1]);
block2[idx+4] = absus(p[0] - search[iX-1+shift+threadIdx.z+4][iY-1]);
block2[idx+4] = absus(p[0] - search[iX-1+shift+threadIdx.z+4][iY-1]);
block2[idx+6] = absus(p[0] - search[iX-1+shift+threadIdx.z+6][iY-1]);
p[1] = (short)pattern[iX ][iY-1];
block2[idx] += 2*absus(p[1] - search[iX +shift+threadIdx.z][iY-1]);
block2[idx+2] += 2*absus(p[1] - search[iX +shift+threadIdx.z+2][iY-1]);
block2[idx+4] += 2*absus(p[1] - search[iX +shift+threadIdx.z+4][iY-1]);
block2[idx+6] += 2*absus(p[1] - search[iX +shift+threadIdx.z+6][iY-1]);
p[2] = (short)pattern[iX+1][iY-1];
block2[idx] += absus(p[2] - search[iX+1+shift+threadIdx.z][iY-1]);
block2[idx+2] += absus(p[2] - search[iX+1+shift+threadIdx.z+2][iY-1]);
block2[idx+4] += absus(p[2] - search[iX+1+shift+threadIdx.z+4][iY-1]);
block2[idx+6] += absus(p[2] - search[iX+1+shift+threadIdx.z+6][iY-1]);
p[3] = (short)pattern[iX-1][iY ];
block2[idx] += 2*absus(p[3] - search[iX-1+shift+threadIdx.z][iY ]);
block2[idx+2] += 2*absus(p[3] - search[iX-1+shift+threadIdx.z+2][iY ]);
block2[idx+4] += 2*absus(p[3] - search[iX-1+shift+threadIdx.z+4][iY ]);
block2[idx+6] += 2*absus(p[3] - search[iX-1+shift+threadIdx.z+6][iY ]);
p[4] = (short)pattern[iX ][iY ];
block2[idx] += 4*absus(p[4] - search[iX +shift+threadIdx.z][iY ]);
block2[idx+2] += 4*absus(p[4] - search[iX +shift+threadIdx.z+2][iY ]);
block2[idx+4] += 4*absus(p[4] - search[iX +shift+threadIdx.z+4][iY ]);
block2[idx+6] += 4*absus(p[4] - search[iX +shift+threadIdx.z+6][iY ]);
p[5] = (short)pattern[iX+1][iY ];
block2[idx] += 2*absus(p[5] - search[iX+1+shift+threadIdx.z][iY ]);
block2[idx+2] += 2*absus(p[5] - search[iX+1+shift+threadIdx.z+2][iY ]);
block2[idx+4] += 2*absus(p[5] - search[iX+1+shift+threadIdx.z+4][iY ]);
block2[idx+6] += 2*absus(p[5] - search[iX+1+shift+threadIdx.z+6][iY ]);
p[6] = (short)pattern[iX-1][iY+1];
block2[idx] += absus(p[6] - search[iX-1+shift+threadIdx.z][iY+1]);
block2[idx+2] += absus(p[6] - search[iX-1+shift+threadIdx.z+2][iY+1]);
block2[idx+4] += absus(p[6] - search[iX-1+shift+threadIdx.z+4][iY+1]);
block2[idx+6] += absus(p[6] - search[iX-1+shift+threadIdx.z+6][iY+1]);
p[7] = (short)pattern[iX ][iY+1];
block2[idx] += 2*absus(p[7] - search[iX +shift+threadIdx.z][iY+1]);
block2[idx+2] += 2*absus(p[7] - search[iX +shift+threadIdx.z+2][iY+1]);
block2[idx+4] += 2*absus(p[7] - search[iX +shift+threadIdx.z+4][iY+1]);
block2[idx+6] += 2*absus(p[7] - search[iX +shift+threadIdx.z+6][iY+1]);
p[8] = (short)pattern[iX-1][iY+1];
block2[idx] += absus(p[8] - search[iX+1+shift+threadIdx.z][iY+1]);
block2[idx+2] += absus(p[8] - search[iX+1+shift+threadIdx.z+2][iY+1]);
block2[idx+4] += absus(p[8] - search[iX+1+shift+threadIdx.z+4][iY+1]);
block2[idx+6] += absus(p[8] - search[iX+1+shift+threadIdx.z+6][iY+1]);*/
const short iX = shift+threadIdx.z+1;
const short iY = threadIdx.y+1;
const int idx = ((threadIdx.y * 16 + (shift+threadIdx.z))*64) + threadIdx.x;
short p[9];
p[0] = (short)pattern[iX-1][iY-1];
p[1] = (short)pattern[iX ][iY-1];
p[2] = (short)pattern[iX+1][iY-1];
p[3] = (short)pattern[iX-1][iY ];
p[4] = (short)pattern[iX ][iY ];
p[5] = (short)pattern[iX+1][iY ];
p[6] = (short)pattern[iX-1][iY+1];
p[7] = (short)pattern[iX ][iY+1];
p[8] = (short)pattern[iX+1][iY+1];
block2[idx] = absus(p[0] - search[iX+threadIdx.x-1 ][threadIdx.y]);
block2[idx+16] = absus(p[0] - search[iX+threadIdx.x+15][threadIdx.y]);
block2[idx+32] = absus(p[0] - search[iX+threadIdx.x+31][threadIdx.y]);
block2[idx+48] = absus(p[0] - search[iX+threadIdx.x+47][threadIdx.y]);
block2[idx] += absus(p[1] - search[iX+threadIdx.x ][threadIdx.y]);
block2[idx+16] += absus(p[1] - search[iX+threadIdx.x+16][threadIdx.y]);
block2[idx+32] += absus(p[1] - search[iX+threadIdx.x+32][threadIdx.y]);
block2[idx+48] += absus(p[1] - search[iX+threadIdx.x+48][threadIdx.y]);
block2[idx] += absus(p[2] - search[iX+threadIdx.x+1 ][threadIdx.y]);
block2[idx+16] += absus(p[2] - search[iX+threadIdx.x+17][threadIdx.y]);
block2[idx+32] += absus(p[2] - search[iX+threadIdx.x+33][threadIdx.y]);
block2[idx+48] += absus(p[2] - search[iX+threadIdx.x+49][threadIdx.y]);
block2[idx] += absus(p[3] - search[iX+threadIdx.x-1 ][iY]);
block2[idx+16] += absus(p[3] - search[iX+threadIdx.x+15][iY]);
block2[idx+32] += absus(p[3] - search[iX+threadIdx.x+31][iY]);
block2[idx+48] += absus(p[3] - search[iX+threadIdx.x+47][iY]);
block2[idx] += absus(p[4] - search[iX+threadIdx.x ][iY]);
block2[idx+16] += absus(p[4] - search[iX+threadIdx.x+16][iY]);
block2[idx+32] += absus(p[4] - search[iX+threadIdx.x+32][iY]);
block2[idx+48] += absus(p[4] - search[iX+threadIdx.x+48][iY]);
block2[idx] += absus(p[5] - search[iX+threadIdx.x+1 ][iY]);
block2[idx+16] += absus(p[5] - search[iX+threadIdx.x+17][iY]);
block2[idx+32] += absus(p[5] - search[iX+threadIdx.x+33][iY]);
block2[idx+48] += absus(p[5] - search[iX+threadIdx.x+49][iY]);
block2[idx] += absus(p[6] - search[iX+threadIdx.x-1 ][iY+1]);
block2[idx+16] += absus(p[6] - search[iX+threadIdx.x+15][iY+1]);
block2[idx+32] += absus(p[6] - search[iX+threadIdx.x+31][iY+1]);
block2[idx+48] += absus(p[6] - search[iX+threadIdx.x+47][iY+1]);
block2[idx] += absus(p[7] - search[iX+threadIdx.x ][iY+1]);
block2[idx+16] += absus(p[7] - search[iX+threadIdx.x+16][iY+1]);
block2[idx+32] += absus(p[7] - search[iX+threadIdx.x+32][iY+1]);
block2[idx+48] += absus(p[7] - search[iX+threadIdx.x+48][iY+1]);
block2[idx] += absus(p[8] - search[iX+threadIdx.x+1 ][iY+1]);
block2[idx+16] += absus(p[8] - search[iX+threadIdx.x+17][iY+1]);
block2[idx+32] += absus(p[8] - search[iX+threadIdx.x+33][iY+1]);
block2[idx+48] += absus(p[8] - search[iX+threadIdx.x+49][iY+1]);
}
__device__ int roundff(float a)
{
return (int)floor(a + 0.5);
}
__device__ void findBestDispXX(unsigned int *in, unsigned char *indexes, unsigned int *minTemp){
const int inIdx = threadIdx.x + threadIdx.z*16; // after optimalisation
const int idxRes = threadIdx.y * 64 + inIdx;
const int idxIdx = threadIdx.y * 32 + inIdx;
int tmpIdx = 0;
int tmpVal = 0;
float interMinX =0.0f;
if(in[idxRes+32] < in[idxRes]){
minTemp[idxIdx] = in[idxRes+32];
indexes[idxIdx] = inIdx+32;
}else{
indexes[idxIdx] = inIdx;
minTemp[idxIdx] = in[idxRes];
}
__syncthreads();
if(inIdx < 16){
if(minTemp[idxIdx+16] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+16];
indexes[idxIdx] = indexes[idxIdx+16];
}
}
__syncthreads();
if(inIdx < 8){
if(minTemp[idxIdx+8] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+8];
indexes[idxIdx] = indexes[idxIdx+8];
}
}
__syncthreads();
if(inIdx < 4){
if(minTemp[idxIdx+4] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+4];
indexes[idxIdx] = indexes[idxIdx+4];
}
}
__syncthreads();
if(inIdx < 2){
if(minTemp[idxIdx+2] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+2];
indexes[idxIdx] = indexes[idxIdx+2];
}
}
__syncthreads();
if(inIdx == 0){
if(minTemp[idxIdx+1] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+1];
indexes[idxIdx] = indexes[idxIdx+1];
}
//in[idxRes] = minTemp[idxIdx];
tmpVal = in[threadIdx.y * 64+indexes[idxIdx]];
tmpIdx = indexes[idxIdx];
if(tmpIdx != 0 && tmpIdx != 63 ){
interMinX = 4*(float)((int)in[threadIdx.y * 64+tmpIdx-1]-(int)in[threadIdx.y * 64+tmpIdx+1])/(2*(((int)in[threadIdx.y * 64+tmpIdx-1])-2*(int)in[threadIdx.y * 64+tmpIdx]+(int)in[threadIdx.y * 64+tmpIdx+1]));
}
in[threadIdx.y * 64+tmpIdx] = 99999999;
/*if(indexes[idxIdx] == 0){
in[threadIdx.y * 64+tmpIdx+1] = 99999999;
in[threadIdx.y * 64+tmpIdx+2] = 99999999;
in[threadIdx.y * 64+tmpIdx+3] = 99999999;
in[threadIdx.y * 64+tmpIdx+4] = 99999999;
}
else if(indexes[idxIdx] == 1){
in[threadIdx.y * 64+tmpIdx-1] = 99999999;
in[threadIdx.y * 64+tmpIdx+1] = 99999999;
in[threadIdx.y * 64+tmpIdx+2] = 99999999;
in[threadIdx.y * 64+tmpIdx+3] = 99999999;
in[threadIdx.y * 64+tmpIdx+4] = 99999999;
}
else if(indexes[idxIdx] == 2){
in[threadIdx.y * 64+tmpIdx-2] = 99999999;
in[threadIdx.y * 64+tmpIdx-1] = 99999999;
in[threadIdx.y * 64+tmpIdx+1] = 99999999;
in[threadIdx.y * 64+tmpIdx+2] = 99999999;
in[threadIdx.y * 64+tmpIdx+3] = 99999999;
in[threadIdx.y * 64+tmpIdx+4] = 99999999;
}
else if(indexes[idxIdx] == 61){
in[threadIdx.y * 64+tmpIdx-4] = 99999999;
in[threadIdx.y * 64+tmpIdx-3] = 99999999;
in[threadIdx.y * 64+tmpIdx-2] = 99999999;
in[threadIdx.y * 64+tmpIdx-1] = 99999999;
in[threadIdx.y * 64+tmpIdx+1] = 99999999;
in[threadIdx.y * 64+tmpIdx+2] = 99999999;
}
else if(indexes[idxIdx] == 62){
in[threadIdx.y * 64+tmpIdx-4] = 99999999;
in[threadIdx.y * 64+tmpIdx-3] = 99999999;
in[threadIdx.y * 64+tmpIdx-2] = 99999999;
in[threadIdx.y * 64+tmpIdx-1] = 99999999;
in[threadIdx.y * 64+tmpIdx+1] = 99999999;
}
else if(indexes[idxIdx] == 63){
in[threadIdx.y * 64+tmpIdx-4] = 99999999;
in[threadIdx.y * 64+tmpIdx-3] = 99999999;
in[threadIdx.y * 64+tmpIdx-2] = 99999999;
in[threadIdx.y * 64+tmpIdx-1] = 99999999;
}else{
in[threadIdx.y * 64+tmpIdx-4] = 99999999;
in[threadIdx.y * 64+tmpIdx-3] = 99999999;
in[threadIdx.y * 64+tmpIdx-2] = 99999999;
in[threadIdx.y * 64+tmpIdx-1] = 99999999;
in[threadIdx.y * 64+tmpIdx+1] = 99999999;
in[threadIdx.y * 64+tmpIdx+2] = 99999999;
in[threadIdx.y * 64+tmpIdx+3] = 99999999;
in[threadIdx.y * 64+tmpIdx+4] = 99999999;
}*/
if(indexes[idxIdx] == 0){
in[threadIdx.y * 64+tmpIdx+1] = 99999999;
}
else if(indexes[idxIdx] == 63){
in[threadIdx.y * 64+tmpIdx-1] = 99999999;
}else{
in[threadIdx.y * 64+tmpIdx-1] = 99999999;
in[threadIdx.y * 64+tmpIdx+1] = 99999999;
}
}
__syncthreads();
if(in[idxRes+32] < in[idxRes]){
minTemp[idxIdx] = in[idxRes+32];
indexes[idxIdx] = inIdx+32;
}else{
indexes[idxIdx] = inIdx;
minTemp[idxIdx] = in[idxRes];
}
__syncthreads();
if(inIdx < 16){
if(minTemp[idxIdx+16] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+16];
indexes[idxIdx] = indexes[idxIdx+16];
}
}
__syncthreads();
if(inIdx < 8){
if(minTemp[idxIdx+8] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+8];
indexes[idxIdx] = indexes[idxIdx+8];
}
}
__syncthreads();
if(inIdx < 4){
if(minTemp[idxIdx+4] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+4];
indexes[idxIdx] = indexes[idxIdx+4];
}
}
__syncthreads();
if(inIdx < 2){
if(minTemp[idxIdx+2] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+2];
indexes[idxIdx] = indexes[idxIdx+2];
}
}
__syncthreads();
if(inIdx == 0){
if(minTemp[idxIdx+1] < minTemp[idxIdx]){
minTemp[idxIdx] = minTemp[idxIdx+1];
indexes[idxIdx] = indexes[idxIdx+1];
}
//in[idxRes] = minTemp[idxIdx];
if(abs(tmpIdx-indexes[idxIdx]) > 4){
if(0.6f > (float)(in[threadIdx.y * 64+indexes[idxIdx]]-tmpVal)/(tmpVal)){//*tmpIdx)){ // 0.02
indexes[idxIdx] = 0;
indexes[idxIdx+1] = 4;
}else{
indexes[idxIdx] = tmpIdx;
indexes[idxIdx+1] = (int)(round(interMinX))+4;
in[threadIdx.y * 64] = tmpVal;
}
}else if(abs(tmpIdx-indexes[idxIdx]) > 1){
if(0.15f > (float)(in[threadIdx.y * 64+indexes[idxIdx]]-tmpVal)/(tmpVal)){//*tmpIdx)){ // 0.02
indexes[idxIdx] = 0;
indexes[idxIdx+1] = 4;
}else{
indexes[idxIdx] = tmpIdx;
indexes[idxIdx+1] = (int)(round(interMinX))+4;
in[threadIdx.y * 64] = tmpVal;
}
}else{
indexes[idxIdx] = tmpIdx;
indexes[idxIdx+1] = (int)(round(interMinX))+4;
in[threadIdx.y * 64] = tmpVal;
}
}
}
__global__ void brain3(const int rows, const int cols, unsigned char *left, unsigned char *right, unsigned char *edgeL, unsigned short* in8, unsigned short *weights, unsigned char *disp, int mode, int maxErr){
__shared__ unsigned char pattern[18][18];
__shared__ unsigned char search[64+18][18];
__shared__ unsigned short w[32];
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int t = y*cols+x;
extern __shared__ unsigned int extt[];
unsigned int *block16 = (unsigned int*)&extt[0];
//size of block 16 is 2x2x64
unsigned short *block8 = (unsigned short*)&block16[3*3*64];
//size of block8 is 6x6x64
unsigned short *block2 = (unsigned short*)&block8[6*6*64];
//size of block2 is 16x16x64
unsigned int *res = (unsigned int*)&block2[16*16*64];
unsigned char *indexes = (unsigned char*)&res[4*4*64];
unsigned int *minTemp = (unsigned int*)&indexes[4*4*32];
if(threadIdx.y == 0){
w[threadIdx.x+16*threadIdx.z] = weights[threadIdx.x+16*threadIdx.z];
}
if(blockIdx.x < 1 || blockIdx.x >= (cols/16)-1 || blockIdx.y < 1 || blockIdx.y >= (rows/16)-1) return;
//------fill pattern---------//
const int iX = threadIdx.x+1;
const int iY = threadIdx.y+1;
if(threadIdx.z == 0){
pattern[iX][iY] = left[t];
if(iX < 2){
if(x < 1){
pattern[iX-1][iY] = 0;
if(iY < 2) pattern[iX-1][iY-1] = 0;
}else{
pattern[iX-1][iY] = left[y*cols+x-1];
if(y < 1) pattern[iX-1][iY-1] = 0;
else pattern[iX-1][iY-1] = left[(y-1)*cols+(x-1)];
}
}
if(iX >= blockDim.x){
if(x >= cols-1){
pattern[iX+1][iY] = 0;
if(iY >= blockDim.y) pattern[iX+1][iY+1] = 0;
}else{
pattern[iX+1][iY] = left[y*cols+(x+1)];
if(y >= rows-1) pattern[iX+1][iY+1] = 0;
else pattern[iX+1][iY+1] = left[(y+1)*cols+x+1];
}
}
if(iY < 2){
if(y < 1){
pattern[iX][iY-1] = 0;
if(iX >= blockDim.x) pattern[iX+1][iY-1] = 0;
}else{
pattern[iX][iY-1] = left[(y-1)*cols+x];
if(x >= cols-1) pattern[iX+1][iY-1] = 0;
else pattern[iX+1][iY-1] = left[(y-1)*cols+x+1];
}
}
if(iY >= blockDim.y){
if(y >= rows-1){
pattern[iX][iY+1] = 0;
if(iX < 2) pattern[iX-1][iY+1] = 0;
}else{
pattern[iX][iY+1] = left[(y+1)*cols+x];
if(x < 1) pattern[iX-1][iY+1] = 0;
else pattern[iX-1][iY+1] = left[(y+1)*cols+(x-1)];
}
}
}
//------fill search----------//
if(threadIdx.z == 0){
search[iX][iY] = right[t];
search[iX+16][iY] = right[t+16];
if(threadIdx.x == 0){
search[0][iY] = right[t-1];
}
if(threadIdx.y == 0){
search[iX][0] = right[t-cols];
search[iX+16][0] = right[t-cols+16];
if(threadIdx.x == 0){
search[0][0] = right[t-cols-1];
}
if(threadIdx.x == 15){
search[iX+64+1][0] = right[t-cols+64+1];
}
}
if(threadIdx.y == 15){
search[iX][17] = right[t+cols];
search[iX+16][17] = right[t+cols+16];
if(threadIdx.x == 0){
search[0][17] = right[t+cols-1];
}
if(threadIdx.x == 15){
search[iX+64+1][17] = right[t+cols+64+1];
}
}
}
if(threadIdx.z == 1){
search[iX+32][iY] = right[t+32];
search[iX+48][iY] = right[t+48];
search[iX+64][iY] = right[t+64];
if(threadIdx.x == 15){
search[64+17][iY] = right[t+64+1];
}
if(threadIdx.y == 0){
search[iX+32][0] = right[t-cols+32];
search[iX+48][0] = right[t-cols+48];
search[iX+64][0] = right[t-cols+64];
}
if(threadIdx.y == 15){
search[iX+32][17] = right[t+cols+32];
search[iX+48][17] = right[t+cols+48];
search[iX+64][17] = right[t+cols+64];
}
}
//------fill block8----------//
int block8idx = 0;
int thread8idx = 0;
//spliting coping z axis between x and y threads
if(threadIdx.x < 6 && threadIdx.y < 6){
block8idx = ((2*blockIdx.y+(((int)threadIdx.y)-2))*(cols/8)+2*blockIdx.x+(((int)threadIdx.x)-2))*64+threadIdx.z;
thread8idx = 64*(threadIdx.y*6+threadIdx.x)+threadIdx.z;
}
if(threadIdx.x >= 6 && threadIdx.x < 12 && threadIdx.y < 6){
block8idx = ((2*blockIdx.y+(((int)threadIdx.y)-2))*(cols/8)+2*blockIdx.x+((((int)threadIdx.x)-6)-2))*64+threadIdx.z+16;
thread8idx = 64*(threadIdx.y*6+((int)threadIdx.x)-6)+threadIdx.z+16;
}
if(threadIdx.x < 6 && threadIdx.y >= 6 && threadIdx.y < 12){
block8idx = ((2*blockIdx.y+(((int)threadIdx.y)-8))*(cols/8)+2*blockIdx.x+(((int)threadIdx.x)-2))*64+threadIdx.z+32;
thread8idx = 64*((((int)threadIdx.y)-6)*6+threadIdx.x)+threadIdx.z+32;
}
if(threadIdx.x >= 6 && threadIdx.x < 12 && threadIdx.y >= 6 && threadIdx.y < 12){
block8idx = ((2*blockIdx.y+(((int)threadIdx.y)-8))*(cols/8)+2*blockIdx.x+((((int)threadIdx.x)-6)-2))*64+threadIdx.z+48;
thread8idx = 64*((((int)threadIdx.y)-6)*6+((int)threadIdx.x)-6)+threadIdx.z+48;
}
if(threadIdx.x < 12 && threadIdx.y < 12){
block8[thread8idx ] = in8[block8idx ];
block8[thread8idx+2 ] = in8[block8idx+2 ];
block8[thread8idx+4 ] = in8[block8idx+4 ];
block8[thread8idx+6 ] = in8[block8idx+6 ];
block8[thread8idx+8 ] = in8[block8idx+8 ];
block8[thread8idx+10] = in8[block8idx+10];
block8[thread8idx+12] = in8[block8idx+12];
block8[thread8idx+14] = in8[block8idx+14];
}
__syncthreads();
//------calculate 2x2 extended blocks-----------//
if(mode){
match2extend_16x16x2(pattern, search, block2, 0);
match2extend_16x16x2(pattern, search, block2, 2);
match2extend_16x16x2(pattern, search, block2, 4);
match2extend_16x16x2(pattern, search, block2, 6);
match2extend_16x16x2(pattern, search, block2, 8);
match2extend_16x16x2(pattern, search, block2, 10);
match2extend_16x16x2(pattern, search, block2, 12);
match2extend_16x16x2(pattern, search, block2, 14);
}
//----copy edgeDistanceTransform to pattern-----------//
if(threadIdx.z == 0)
search[threadIdx.x][threadIdx.y] = edgeL[t];
//------calculate 16x16 blocks----------//
const int b16idx = ((threadIdx.y/4)*3+(threadIdx.x/4))*64;
const int b8idx = ((threadIdx.y/4)*6+(threadIdx.x/4))*64*2;
const int zidx2 = 2*(4*(threadIdx.y%4) + (threadIdx.x%4))+threadIdx.z;
if(threadIdx.x < 12 && threadIdx.y < 12){
block16[b16idx+zidx2] = block8[b8idx+zidx2];
block16[b16idx+zidx2+32] = block8[b8idx+zidx2+32];
block16[b16idx+zidx2] += block8[b8idx+zidx2+64];
block16[b16idx+zidx2+32] += block8[b8idx+zidx2+32+64];
block16[b16idx+zidx2] += block8[b8idx+zidx2+64*6];
block16[b16idx+zidx2+32] += block8[b8idx+zidx2+32+64*6];
block16[b16idx+zidx2] += block8[b8idx+zidx2+64*7];
block16[b16idx+zidx2+32] += block8[b8idx+zidx2+32+64*7];
}
__syncthreads();
//----calculate(3x3) 16x16 blocks into (3x) 32x32 blocks with results in corners
const int zidx = (2*(4*(threadIdx.y%8)+(threadIdx.x%4)))+threadIdx.z;
if(threadIdx.x < 4 && threadIdx.y < 8){
block16[0+zidx] += block16[64+zidx];
block16[0+zidx] += block16[64*3+zidx];
block16[0+zidx] += block16[64*4+zidx];
}
if(threadIdx.x < 4 && threadIdx.y >= 8 && threadIdx.y < 16 ){
block16[64*2+zidx] += block16[64+zidx];
block16[64*2+zidx] += block16[64*4+zidx];
block16[64*2+zidx] += block16[64*5+zidx];
}
if(threadIdx.x >= 4 && threadIdx.x < 8 && threadIdx.y < 8){
block16[64*6+zidx] += block16[64*3+zidx];
block16[64*6+zidx] += block16[64*4+zidx];
block16[64*6+zidx] += block16[64*7+zidx];
}
if(threadIdx.x >= 4 && threadIdx.x < 8 && threadIdx.y >= 8 && threadIdx.y < 16 ){
block16[64*8+zidx] += block16[64*4+zidx];
block16[64*8+zidx] += block16[64*5+zidx];
block16[64*8+zidx] += block16[64*7+zidx];
}
__syncthreads();
//int shift = 2*(4*(threadIdx.y%4)+(threadIdx.x%4))+threadIdx.z;
//int idxRes = (4*(threadIdx.y/4)+(threadIdx.x/4))*64+shift;
const int shift = 16*threadIdx.z + threadIdx.x;
const int idxRes = threadIdx.y*64 + shift;
const int idxB32_1 = 0*64+shift;
const int idxB32_2 = 2*64+shift;
const int idxB32_3 = 6*64+shift;
const int idxB32_4 = 8*64+shift;
const int idxB16_1 = 3*64+shift;
const int idxB16_2 = 1*64+shift;
const int idxB16_3 = 7*64+shift;
const int idxB16_4 = 5*64+shift;
if(blockIdx.x < 1 || blockIdx.x >= (cols/16)-4 || blockIdx.y < 1 || blockIdx.y >= (rows/16)-1) return;
int ix, iy, ixx, iyy;
const int thYmod = threadIdx.y%4;
const int thYdiv = threadIdx.y/4;
float xx32;// for next iter 0 change to 4/8/12
float yy32;// for next iter 0 change to 4/8/12
float xx16;// for next iter 0 change to 4/8/12
float yy16;// for next iter 0 change to 4/8/12
float x32;
float y32;
float x16;
float y16;
unsigned short idx2;
unsigned short weight;
#pragma unroll
for(ix = 0 ; ix < 2 ; ix++){
#pragma unroll
for(iy = 0 ; iy < 2; iy++){
if(threadIdx.x < 4 && threadIdx.y < 8){
block16[1*64+zidx] = block8[(8+6*iy+ix)*64+zidx];
block16[1*64+zidx] += block8[(9+6*iy+ix)*64+zidx];
block16[1*64+zidx] += block8[(14+6*iy+ix)*64+zidx];
block16[1*64+zidx] += block8[(15+6*iy+ix)*64+zidx];
}
if(threadIdx.x < 4 && threadIdx.y >= 8 && threadIdx.y < 16 ){
block16[3*64+zidx] = block8[(7+6*iy+ix)*64+zidx];
block16[3*64+zidx] += block8[(8+6*iy+ix)*64+zidx];
block16[3*64+zidx] += block8[(13+6*iy+ix)*64+zidx];
block16[3*64+zidx] += block8[(14+6*iy+ix)*64+zidx];
}
if(threadIdx.x >= 4 && threadIdx.x < 8 && threadIdx.y < 8){
block16[64*5+zidx] = block8[64*(14+6*iy+ix)+zidx];
block16[64*5+zidx] += block8[64*(15+6*iy+ix)+zidx];
block16[64*5+zidx] += block8[64*(20+6*iy+ix)+zidx];
block16[64*5+zidx] += block8[64*(21+6*iy+ix)+zidx];
}
if(threadIdx.x >= 4 && threadIdx.x < 8 && threadIdx.y >= 8 && threadIdx.y < 16 ){
block16[64*7+zidx] = block8[64*(13+6*iy+ix)+zidx];
block16[64*7+zidx] += block8[64*(14+6*iy+ix)+zidx];
block16[64*7+zidx] += block8[64*(19+6*iy+ix)+zidx];
block16[64*7+zidx] += block8[64*(20+6*iy+ix)+zidx];
}
__syncthreads();
#pragma unroll
for(ixx = 8*ix ; ixx < 8+8*ix ; ixx+=4){
#pragma unroll
for(iyy = 8*iy ; iyy < 8+8*iy; iyy+=4){
xx32 = ((float)(ixx+(thYmod)))/16.0;// for next iter 0 change to 4/8/12
yy32 = ((float)(iyy+(thYdiv)))/16.0;// for next iter 0 change to 4/8/12
xx16 = ((float)(ixx-8*ix+(thYmod)))/8.0;// for next iter 0 change to 4/8/12
yy16 = ((float)(iyy-8*iy+(thYdiv)))/8.0;// for next iter 0 change to 4/8/12
x32 = 1-xx32;
y32 = 1-yy32;
x16 = 1-xx16;
y16 = 1-yy16;
res[idxRes] = (x32*y32*block16[idxB32_1])+(xx32*y32*block16[idxB32_2])+(x32*yy32*block16[idxB32_3])+(xx32*yy32*block16[idxB32_4]);
res[idxRes+32] = (x32*y32*block16[idxB32_1+32])+(xx32*y32*block16[idxB32_2+32])+(x32*yy32*block16[idxB32_3+32])+(xx32*yy32*block16[idxB32_4+32]);
res[idxRes] += 4*((x16*y16*block16[idxB16_1])+(xx16*y16*block16[idxB16_2])+(x16*yy16*block16[idxB16_3])+(xx16*yy16*block16[idxB16_4]));
res[idxRes+32] += 4*((x16*y16*block16[idxB16_1+32])+(xx16*y16*block16[idxB16_2+32])+(x16*yy16*block16[idxB16_3+32])+(xx16*yy16*block16[idxB16_4+32]));
if(mode){
idx2 = ((iyy+(thYdiv))*16+(ixx+(thYmod)))*64+shift;
weight = w[search[ixx+(thYmod)][iyy+(thYdiv)]];
res[idxRes] += weight*block2[idx2];
res[idxRes+32] += weight*block2[idx2+32];
}
__syncthreads();
//------find max of sums -------------//
findBestDispXX(res, indexes, minTemp);
__syncthreads();
//------save best results into the file-----//
if(threadIdx.x >= ixx && threadIdx.x < ixx+4 && threadIdx.y >= iyy && threadIdx.y < iyy+4 && threadIdx.z == 0){
if(res[(4*(threadIdx.y%4)+(threadIdx.x%4))*64] < maxErr && pattern[threadIdx.x+1][threadIdx.y+1] < 255)
disp[t] = 4*(int)(indexes[(4*(threadIdx.y%4)+(threadIdx.x%4))*32])+(((int)indexes[(4*(threadIdx.y%4)+(threadIdx.x%4))*32+1])-4);
else
disp[t] = 0;
}
__syncthreads();
}
}
}
}
}
__global__ void udisp(const int rows, const int cols, unsigned char *disp, unsigned char *udisp){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
udisp[disp[y*cols+x]*cols+x]++;
}
__global__ void udispToUdepth(const int rows, const int cols, unsigned char *udisp, unsigned char *udepth){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
const double v0 = 327.9689445495605;
if(y < 2) return;
float Zw = 10.0f*4.0f/(y);
float ZwNext = 10.0f*4.0f/(y-1);
if(ZwNext > 4.0f) return;
float Xw = (float) ((x - v0) * Zw / 333.333);
int Z = 480-(int)roundff(Zw*100);
int Znext = 480-(int)roundff(ZwNext*100);
int X = 320+round(Xw*100);
if(Z >= 0 && Z < 480 && X >= 0 && X < 640){
for(int yi = Znext+1; yi <= Z; yi++)
udepth[yi*cols+X] = udisp[y*cols+x];
}
}
|
6,992 | extern "C" void gvectorAdd(double *A, double *B, double *C, int *n);
__global__ void
vectorAdd(const double *A, const double *B, double *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements) {
C[i] = A[i] + B[i];
}
}
void gvectorAdd(double *A, double *B, double *C, int *n) {
// Device Memory
double *d_A, *d_B, *d_C;
// Define the execution configuration
dim3 blockSize(256, 1, 1);
dim3 gridSize(1, 1, 1);
gridSize.x = (*n + blockSize.x - 1) / blockSize.x;
// Allocate output array
cudaMalloc((void**)&d_A, *n * sizeof(double));
cudaMalloc((void**)&d_B, *n * sizeof(double));
cudaMalloc((void**)&d_C, *n * sizeof(double));
// copy data to device
cudaMemcpy(d_A, A, *n * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, *n * sizeof(double), cudaMemcpyHostToDevice);
// GPU vector add
vectorAdd<<<gridSize,blockSize>>>(d_A, d_B, d_C, *n);
// Copy output
cudaMemcpy(C, d_C, *n * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
6,993 | #include <assert.h>
#include <stdio.h>
#include <stdlib.h>
void guardar_suma(float *pmat,int node, int ndist ){
// Guardar el archivo en un .txt
//-----------------------------------------------
int i;
int j;
FILE *f = fopen("suma_de_coliciones.txt", "w");
if (f == NULL)
{
printf("Error opening file!\n");
exit(1);
}
for(i=0;i<node;i++){
for(j=0;j<ndist;j++){
fprintf(f,"%f\t",pmat[i*ndist+j] );
}
fprintf(f,"\n");
}
fclose(f);
return ;
}
|
6,994 | #include "includes.h"
__global__ void normalizeLab_kernel(uint width, uint height, float* devL, float* devA, float* devB) {
int x0 = blockDim.x * blockIdx.x + threadIdx.x;
int y0 = blockDim.y * blockIdx.y + threadIdx.y;
if ((x0 < width) && (y0 < height)) {
int index = y0 * width + x0;
const float ab_min = -73;
const float ab_max = 95;
const float ab_range = ab_max - ab_min;
/* normalize Lab image */
float l_val = devL[index] / 100.0f;
float a_val = (devA[index] - ab_min) / ab_range;
float b_val = (devB[index] - ab_min) / ab_range;
if (l_val < 0) { l_val = 0; } else if (l_val > 1) { l_val = 1; }
if (a_val < 0) { a_val = 0; } else if (a_val > 1) { a_val = 1; }
if (b_val < 0) { b_val = 0; } else if (b_val > 1) { b_val = 1; }
devL[index] = l_val;
devA[index] = a_val;
devB[index] = b_val;
}
} |
6,995 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <algorithm>
#include <cstdlib>
#include <cuda.h>
int main(int argc, char* argv[])
{
size_t N = 10000; // Default value
cudaEvent_t start;
cudaEvent_t end;
float elapsed_time;
cudaEventCreate(&start);
cudaEventCreate(&end);
// generate 32M random numbers serially
if (argc > 1) {
N = atoi(argv[1]);
std::cout << "Using number of elements = " << N << std::endl;
}
thrust::host_vector<int> h_vec(N);
std::generate(h_vec.begin(), h_vec.end(), rand);
cudaEventRecord(start,0);
// A new device vector initialized to same values
thrust::device_vector<int> d_vec(h_vec.begin(),h_vec.end());
thrust::sort(d_vec.begin(),d_vec.end());
// Copy back
thrust::copy(d_vec.begin(),d_vec.end(),h_vec.begin());
cudaEventSynchronize(end);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed_time, start, end);
std::cout << "device sort took " << elapsed_time << " milliseconds" << std::endl;
// output smallest/largest value
std::cout << "Smallest value is\n" << h_vec[0] << std::endl;
std::cout << "Largest value is\n" << h_vec[h_vec.size()-1] << std::endl;
return 0;
}
|
6,996 | #include "includes.h"
// ïîäêëþ÷åíèå áèáëèîòåêè cuBLAS
#define IDX2C(i,j,ld) (((i)*(ld))+(j))
__global__ void matrixMultiplicationKernel(float* A, float* B, float* C, int N)
{
int ROW = blockIdx.y*blockDim.y + threadIdx.y;
int COL = blockIdx.x*blockDim.x + threadIdx.x;
float tmpSum = 0;
if (ROW < N && COL < N) {
// each thread computes one element of the block sub-matrix
for (int i = 0; i < N; i++) {
tmpSum += A[ROW * N + i] * B[i * N + COL];
}
C[ROW * N + COL] = tmpSum;
}
} |
6,997 | #include "includes.h"
__global__ void Matrix_Product (double *A, double *g, double *C)
// Each thread computes one element of C
// by accumulating results into Cvalue
{ double Cvalue = 0.00;
int row = blockIdx.y*blockDim.y+threadIdx.y;
// int col = blockIdx.x * blockDim.x + threadIdx.x;
//size of matrix A//
int N=1000;
if(row> N ) return;
for (int e = 0; e < N; e++)
{
Cvalue += A[N*row+e]*g[e];
}
C[row]+= Cvalue;
} |
6,998 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
//Riduzione di n in modulo p.
__device__ int mod_long_GPU(long long n, long long p) {
long long v = n, x = 0;
if (v >= p) {
v = n % p;
}
else {
if (v < 0) {
x = n / p;
v = n - (x*p);
v += p;
}
}
int r = v;
return r;
}
//Scambio di due righe della matrice m.
__device__ void swap_rows_GPU(int *m, int row, int col, int j, int i) {
int k;
long long tmp;
if (j != i) {
for (k = 0;k<col;k++) {
tmp = m[i*col + k]; //m[i][k];
m[i*col + k] = m[j*col + k]; //m[i][k] = m[j][k];
m[j*col + k] = tmp; //m[j][k] = tmp;
}
}
}
//Riduzione di n in modulo p.
__device__ int mod_GPU(int n, int p) {
int v = n, x = 0;
if (v >= p) {
v = n % p;
}
else {
if (v < 0) {
x = n / p;
v = n - (x*p);
v += p;
}
}
return v;
}
//inverso moltiplicativo di n in modulo p (con p primo).
__device__ int invers_GPU(int n, int p) {
int b0 = p, t, q;
int x0 = 0, x1 = 1;
if (p == 1) return 1;
while (n > 1) {
q = n / p;
t = p, p = (n % p), n = t;
t = x0, x0 = x1 - q * x0, x1 = t;
}
if (x1 < 0) x1 += b0;
return x1;
}
// a + b mod p
//sommatoria di a e b in modulo p
__device__ int add_mod_GPU(int a, int b, int p) {
return mod_GPU((a + b), p);
}
// a - b mod p
//sottrazione di a e b in modulo p
__device__ int sub_mod_GPU(int a, int b, int p) {
long long aa, bb;
aa = a;
bb = b;
return mod_long_GPU((aa - bb), p);
}
// a * b mod p
//prodotto di a e b in modulo p
__device__ int mul_mod_GPU(int a, int b, int p) {
long long aa, bb;
aa = a;
bb = b;
return mod_long_GPU((aa*bb), p);
}
__global__ void reset_pivot_col(int *matrix, int row, int col, int pivot_row, int pivot_col, int thread_height, int block_dim) {
int start_row = (pivot_row + 1) + ((blockIdx.x * (thread_height*block_dim)) + (threadIdx.x * thread_height));
int reached_row = (pivot_row + 1) + ((blockIdx.x * (thread_height*block_dim)) + ((threadIdx.x + 1) * thread_height));
int iteration = thread_height;
if (reached_row > row) {
iteration = thread_height - (reached_row - row);
if (iteration > thread_height) {
iteration = 0;
}
}
for (int i = 0; i<iteration; i++) {
matrix[(start_row + i)*col + pivot_col] = 0;
}
}
__global__ void swap_rows(int *matrix, int row, int col, int j, int i) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= col) {
return;
}
int ii = i * col + tid;
int jj = j * col + tid;
int tmp = matrix[ii];
matrix[ii] = matrix[jj];
matrix[jj] = tmp;
}
|
6,999 | #define ACCUM_N 1024
__global__ void get_accelerations(float *a, float *pos, float * mass, int n)
{
__shared__ float3 accumResult[ACCUM_N];
//Loop over all the vectors
for(int vec = blockIdx.x; vec < n; vec += gridDim.x){
float3 c_pos = make_float3(pos[3*vec],pos[3*vec+1],pos[3*vec+2]);
////////////////////////////////////////////////////////////////////////
// Each accumulator cycles through vectors with
// stride equal to number of total number of accumulators ACCUM_N
// At this stage ACCUM_N is only preferred be a multiple of warp size
// to meet memory coalescing alignment constraints.
////////////////////////////////////////////////////////////////////////
for(int iAccum = threadIdx.x; iAccum < ACCUM_N; iAccum += blockDim.x){
float3 r = make_float3(0,0,0);
float r_3 = 0;
float3 accel = make_float3(0,0,0);
for(int pos_index = iAccum; pos_index < n; pos_index += ACCUM_N){
r.x = pos[ 3*pos_index ] - c_pos.x;
r.y = pos[3*pos_index+1] - c_pos.y;
r.z = pos[3*pos_index+2] - c_pos.z;
r_3 = pow(pow(r.x,2) + pow(r.y,2) + pow(r.z,2),1.5f);
if(r_3 > 0){
accel.x += r.x / r_3 * mass[pos_index];
accel.y += r.y / r_3 * mass[pos_index];
accel.z += r.z / r_3 * mass[pos_index];
}
accumResult[iAccum] = accel;
}
}
////////////////////////////////////////////////////////////////////////
// Perform tree-like reduction of accumulators' results.
// ACCUM_N has to be power of two at this stage
////////////////////////////////////////////////////////////////////////
for(int stride = ACCUM_N / 2; stride > 0; stride >>= 1){
__syncthreads();
for(int iAccum = threadIdx.x; iAccum < stride; iAccum += blockDim.x){
accumResult[iAccum].x += accumResult[stride + iAccum].x;
accumResult[iAccum].y += accumResult[stride + iAccum].y;
accumResult[iAccum].x += accumResult[stride + iAccum].z;
}
}
if(threadIdx.x == 0){
a[ 3*vec ] = accumResult[0].x;
a[3*vec+1] = accumResult[0].y;
a[3*vec+2] = accumResult[0].z;
}
}
}
|
7,000 | /*
*
* Auth: Kapil Ashok Melwani
* Email: alu0100883473@ull.edu.es
* CUDA C Programming
* Histograma
* Arquitecturas avanzadas y de Propósito Específico
* 29 - Jan - 2018
*
*/
///////////////////////////////////////////////////////////////////////////
// includes
///////////////////////////////////////////////////////////////////////////
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
///////////////////////////////////////////////////////////////////////////
// defines
///////////////////////////////////////////////////////////////////////////
#define N 5000000
#define M 8
///////////////////////////////////////////////////////////////////////////
// declaracion de funciones
///////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////
// Kernel de operaciones dentro del histograma
///////////////////////////////////////////////////////////////////////////
__global__
void kernel(int *vector, int *histograma)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int posicion_histograma = 0;
if(i < N){
posicion_histograma = vector[i]%M; //ValorElementoV mod M
atomicAdd(&(histograma[posicion_histograma]),1);
}
}
///////////////////////////////////////////////////////////////////////////
// Kernel de inicialización del histograma
///////////////////////////////////////////////////////////////////////////
__global__
void histograma(int *histo)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i < N){
if(i==0){
for(int j=0;j<M;j++)
histo[j] = 0;
}
}
}
///////////////////////////////////////////////////////////////////////////
// Función para mostrar histograma final
///////////////////////////////////////////////////////////////////////////
void mostrar_histograma(int *hst_vector,int suma)
{
printf("\n\t\t\t\t\tHISTOGRAMA\n\n");
printf("\n=====================================================================================================\n");
for(int i=1;i<=M;i++)
printf("| %d | ",i);
printf("| TOTAL | ");
printf("\n");
for(int x=0;x<M;x++)
printf("| %d | ",hst_vector[x]);
printf("| %d |",suma);
printf("\n=====================================================================================================");
}
///////////////////////////////////////////////////////////////////////////
// rutina principal
///////////////////////////////////////////////////////////////////////////
int main(void)
{
//vectores de números en host y device
int *hst_vector;
int *dev_vector;
//vectores de histograma en host y device
int *hst_histograma;
int *dev_histograma;
//errores Cuda
cudaError_t error = cudaSuccess;
//bloques e hilos
int threadperBlock;
int blockperGrid;
//temporizadores
cudaEvent_t start;
cudaEvent_t stop;
float elapsedTime;
//comprobación final
int sum_elements = 0;
printf("Info: Reservando memoria para los vectores\n");
//reservamos memoria en host vector
hst_vector = (int*)malloc(N*sizeof(int));
//comprobamos la reserva de memoria
if(hst_vector == NULL){
printf("\nError en reserva de memoria de hst_vector");
exit(EXIT_FAILURE);
}
//reservamos memoria en host histograma
hst_histograma = (int*)malloc(M*sizeof(int));
//comprobamos la reserva de memoria
if(hst_histograma == NULL){
printf("\nError en reserva de memoria de hst_histograma");
exit(EXIT_FAILURE);
}
//reservamos memoria en device de vector
error = cudaMalloc((void**)&dev_vector,N*sizeof(int));
//comprobamos la reserva de memoria
if(error != cudaSuccess){
printf("\nError en reserva de memoria de dev_vector. (Code Error: %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
//reservamos memoria en device de histograma
error = cudaMalloc((void**)&dev_histograma,M*sizeof(int));
//comprobamos la reserva de memoria
if(error != cudaSuccess){
printf("\nError en reserva de memoria de dev_histograma. (Code Error: %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
//inicialización de valores del vector de numeros con valores entre 1 y 8
srand ((int)time(NULL));
for(int i=0; i<N; i++)
hst_vector[i] = (int)(1+rand()%(256-1));
// Copiamos los elementos del vector hst_vector en el vector dev_vector
printf("Info: Copiando elementos de HOST -> DEVICE");
error = cudaMemcpy(dev_vector,hst_vector,N*sizeof(int),cudaMemcpyHostToDevice);
//comprobamos la copia
if(error != cudaSuccess){
printf("\nError en la copia de elementos de hst_vector a dev_vector. (Code Error: %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMemcpy(dev_histograma,hst_histograma,M*sizeof(int),cudaMemcpyHostToDevice);
//comprobamos la copia
if(error != cudaSuccess){
printf("\nError en la copia de elementos de hst_histograma a dev_histograma. (Code Error: %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
threadperBlock = 512;
blockperGrid = (N + threadperBlock-1)/threadperBlock;
printf("Info: CUDA ejecutara %d hilos y %d bloques\n",threadperBlock,blockperGrid);
//Ejecutaremos kernel y temporizadores
printf("Info: Ejecutamos Kernel e iniciamos temporizadores\n");
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Iniciamos temporizador
printf("Info: Temporizador iniciado\n");
cudaEventRecord(start,0);
histograma<<<blockperGrid,threadperBlock>>>(dev_histograma);
kernel<<<blockperGrid,threadperBlock>>>(dev_vector,dev_histograma);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
error = cudaGetLastError();
if(error != cudaSuccess){
printf("Error durante las llamadas a kernel. (Code Error: %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
printf("Info: Duración de la creación del histograma completo %.2f sec\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Info: Copiamos elementos de vectores dev -> host\n");
error = cudaMemcpy(hst_vector,dev_vector,N*sizeof(int),cudaMemcpyDeviceToHost);
//comprobamos la copia
if(error != cudaSuccess){
printf("\nError en la copia de elementos de dev_vector a hst_vector. (Code Error: %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaMemcpy(hst_histograma,dev_histograma,M*sizeof(int),cudaMemcpyDeviceToHost);
//comprobamos la copia
if(error != cudaSuccess){
printf("\nError en la copia de elementos de dev_histograma a hst_histograma. (Code Error: %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
//comprobamos que la suma de los elementos del histograma corresponde con el numero de elementos
for(int x=0;x<M;x++)
sum_elements += hst_histograma[x];
if(sum_elements != N){ //si no es igual, la ejecucion es incorrecta
printf("Error, en el histograma hay %d elementos de 5 millones\n",sum_elements);
}else{ //si es igual, la ejecucion es correcta y mostramos el histograma
printf("\n\nInfo: Ejecución finalizada correctamente :) \n");
mostrar_histograma(hst_histograma,sum_elements);
}
error = cudaFree(dev_vector);
if(error != cudaSuccess){
printf("Error, fallo librando el vector dev_vector (error code %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
error = cudaFree(dev_histograma);
if(error != cudaSuccess){
printf("Error, fallo librando el vector dev_local_histograma (error code %s)\n",cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.