serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
20,901 | #include <vector>
#include <iostream>
#include <cmath>
__global__ void vector_add_kernel(float * r, float * v1, float * v2, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < size)
{
r[i] = v1[i] + v2[i];
}
}
void vector_add_cpu(float * r, float * v1, float * v2, int size)
{
for(std::size_t i=0; i < size; ++i)
{
r[i] = v1[i] + v2[i];
}
}
void fill_vector(std::vector<float> & v)
{
for(std::size_t i=0; i < v.size(); ++i)
{
v[i] = i;
}
}
bool compare_vectors(std::vector<float> const& v1, std::vector<float> const& v2)
{
if(v1.size() != v2.size())
{
std::cout << "ERROR: Vector sizes mismatch!" << std::endl;
return false;
}
bool ok = true;
for(std::size_t i=0; i < v1.size(); ++i)
{
if(std::abs(v1[i]-v2[i]) > 1e-5)
{
std::cout << "ERROR: element " << i << " mismatch: " << v1[i] << " != " << v2[i] << std::endl;
ok = false;
}
}
return ok;
}
int main()
{
int const N = 1000;
std::vector<float> a(N);
std::vector<float> b(N);
std::vector<float> c(N);
fill_vector(a);
fill_vector(b);
vector_add_cpu(&c[0], &a[0], &b[0], N);
// Create copies on GPU device
// Allocate memory
float * d_a;
float * d_b;
float * d_c;
cudaMalloc(&d_a, N*sizeof(float));
cudaMalloc(&d_b, N*sizeof(float));
cudaMalloc(&d_c, N*sizeof(float));
// Copy
cudaMemcpy(d_a, &a[0], N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b[0], N*sizeof(float), cudaMemcpyHostToDevice);
//
// Kernel call
//
vector_add_kernel<<<8,128>>>(d_c, d_a, d_b, N); // (8 * 128) > 1000
// Get result
std::vector<float> c_from_gpu(N);
cudaMemcpy(&c_from_gpu[0], d_c, N*sizeof(float), cudaMemcpyDeviceToHost);
bool ok = compare_vectors(c, c_from_gpu);
// Free the device memory
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
if(ok)
std::cout << "Results match... It works!" << std::endl;
return ok ? 0 : 1;
}
|
20,902 | #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcpp"
#include <thrust/device_vector.h>
__global__ void
_cu_vertdegree(int numpts, int colsize, float eps, float* d_data, int* d_Va)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numpts)
return;
d_Va[i] = 0;
for (int j = 0; j < numpts; ++j) {
float accum = 0.0;
for (int cs = 0; cs < colsize; ++cs) {
accum += (d_data[i * colsize + cs] - d_data[j * colsize + cs]) *
(d_data[i * colsize + cs] - d_data[j * colsize + cs]);
}
accum = sqrtf(accum);
if (accum < eps) {
d_Va[i] += 1;
}
}
}
__global__ void
_cu_asmadjlist(int numpts, int colsize, float eps, float* d_data, int* d_Va1, int* d_Ea)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= numpts)
return;
int basei = d_Va1[i];
for (int j = 0; j < numpts; ++j) {
float accum = 0.0;
for (int cs = 0; cs < colsize; ++cs) {
accum += (d_data[i * colsize + cs] - d_data[j * colsize + cs]) *
(d_data[i * colsize + cs] - d_data[j * colsize + cs]);
}
accum = sqrtf(accum);
if (accum < eps) {
d_Ea[basei] = j;
++basei;
}
}
}
__global__ void
_cu_breadth_first_search_kern(int numpts, int* d_Ea, int* d_Va0, int* d_Va1, int* d_Fa, int* d_Xa)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= numpts)
return;
if (d_Fa[tid])
{
d_Fa[tid] = 0;
d_Xa[tid] = 1;
int nmax_idx = d_Va1[tid] + d_Va0[tid];
for (int i = d_Va1[tid]; i < nmax_idx; ++i )
{
int nid = d_Ea[i];
if (!d_Xa[nid])
{
d_Fa[nid] = 1;
}
}
}
}
namespace clustering {
void vertdegree(int N, int colsize, float eps, float* d_data, int* d_Va)
{
_cu_vertdegree<<<(N + 255) / 256, 256>>>(N, colsize, eps, d_data, d_Va);
cudaDeviceSynchronize();
}
void adjlistsind(int N, int* Va0, int* Va1)
{
thrust::device_ptr<int> va0_ptr(Va0);
thrust::device_ptr<int> va1_ptr(Va1);
thrust::exclusive_scan(va0_ptr, va0_ptr+N, va1_ptr);
cudaDeviceSynchronize();
}
void asmadjlist(int N, int colsize, float eps, float* d_data, int* d_Va1, int* d_Ea)
{
_cu_asmadjlist<<<(N + 255) / 256, 256>>>(N, colsize, eps, d_data, d_Va1, d_Ea);
cudaDeviceSynchronize();
}
void breadth_first_search_kern(int N, int* d_Ea, int* d_Va0, int* d_Va1, int* d_Fa, int* d_Xa)
{
_cu_breadth_first_search_kern<<<(N + 255) / 256, 256>>>(N, d_Ea, d_Va0, d_Va1, d_Fa, d_Xa);
cudaDeviceSynchronize();
}
}
#pragma GCC diagnostic pop
|
20,903 | __global__ void
naiveKernel(float *A,float *A_out,const int n){
//First make a function that works for the input size 2*2
extern __shared__ float sdata[];
int tid = threadIdx.x;
int i = blockDim.x*blockIdx.x + threadIdx.x;
sdata[tid] = 0;
if(i<n)
sdata[tid] = A[i];
for(unsigned int s=4;s<blockDim.x;s*=2){
if(tid%(2*s)==0)
if((tid+s)<blockDim.x){
sdata[tid] += sdata[tid+s];
sdata[tid+1] += sdata[tid+1+s];
sdata[tid+2] += sdata[tid+2+s];
sdata[tid+3] += sdata[tid+3+s];
}
__syncthreads();
}
if(tid<4)
A_out[4*blockIdx.x+tid] = sdata[tid];
}
__global__ void
optimKernel1(float *A,float *A_out,const int n){
//Getting rid of the divergent threads
extern __shared__ float sdata[];
int tid = threadIdx.x;
int i = blockDim.x*blockIdx.x + threadIdx.x;
sdata[tid] = 0;
if(i<n)
sdata[tid] = A[i];
for(unsigned int s=4;s<blockDim.x;s*=2){
int index = 2*s*(tid/4);
if(index< blockDim.x)
sdata[index + tid%4] += sdata[index+ s + tid%4];
__syncthreads();
}
if(tid<4)
A_out[4*blockIdx.x+tid] = sdata[tid];
}
__global__ void
optimKernel2(float *A,float *A_out,const int n){
//Sequential Addressing
//NOTE : This thing works since blockDim is 1024
extern __shared__ float sdata[];
int tid = threadIdx.x;
int i = blockDim.x*blockIdx.x + threadIdx.x;
sdata[tid] = 0;
if(i<n)
sdata[tid] = A[i];
for(unsigned int s=blockDim.x/2;s>=4;s/=2){
if(tid<s){
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
if(tid<4)
A_out[4*blockIdx.x+tid] = sdata[tid];
}
__global__ void
optimKernel3(float *A,float *A_out,const int n){
//Idle Threads
//NOTE : This thing works since blockDim is 1024
extern __shared__ float sdata[];
int tid = threadIdx.x;
int i = blockDim.x*(blockIdx.x*2)+threadIdx.x;
sdata[tid] = 0;
if(i<n)
sdata[tid] = A[i] + (1 - (i+blockDim.x)/n)*A[i + blockDim.x];
for(unsigned int s=blockDim.x/2;s>2;s/=2){
if(tid<s){
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
if(tid<4)
A_out[4*blockIdx.x+tid] = sdata[tid];
}
__global__ void
optimKernel4(float *A,float *A_out,const int n){
//Unwrapping the last roll
//NOTE : This thing works since blockDim is 1024
//NOTE :- Code incorrect,doesn't work , see to it
extern __shared__ float sdata[];
int tid = threadIdx.x;
int i = blockDim.x*(blockIdx.x*2)+threadIdx.x;
sdata[tid] = 0;
if(i<n)
sdata[tid] = A[i] + (1 - (i+blockDim.x)/n)*A[i + blockDim.x];
for(unsigned int s=blockDim.x/2;s>32;s/=2){
if(tid<s){
sdata[tid] += sdata[tid+s];
}
__syncthreads();
}
__syncthreads();
if(tid<32){
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
}
if(tid<4)
A_out[4*blockDim.x + tid] = sdata[tid];
}
|
20,904 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
#define THREADNUM 4
#define BLOCKNUM 4
__device__ float G_rand(curandState *states, int ind){
curandState local_state = states[ind];
float rand_num = curand_uniform(&local_state);
//states[ind] = local_state;
return rand_num;
}
__global__ void G_srand(curandState *states, unsigned long seed){
int ind = threadIdx.x;
//what is curand_init
curand_init(seed, ind, 0, &states[ind]);
}
__global__ void G_testRand(double *tmp_space, curandState *states){
int t_id = threadIdx.x;
int b_id = blockIdx.x;
tmp_space[(b_id * THREADNUM) + t_id] = G_rand(states, t_id);
return;
}
int main(){
// initialize for parallel computation
curandState *dev_states;
cudaMalloc((void**) &dev_states, sizeof(curandState) * THREADNUM);
G_srand<<<BLOCKNUM, THREADNUM>>>(dev_states, unsigned(time(NULL)));
// prepering for args space
double *G_rand, *C_rand;
cudaMalloc((void**) &G_rand, sizeof(double) * BLOCKNUM * THREADNUM);
C_rand = (double*)malloc(sizeof(double) * BLOCKNUM * THREADNUM);
// calculation
G_testRand<<<BLOCKNUM, THREADNUM>>>(G_rand, dev_states);
// copy back to MainMemory
cudaMemcpy(C_rand, G_rand, sizeof(double) * BLOCKNUM * THREADNUM, cudaMemcpyDeviceToHost);
// output result
int i, j;
printf("Result: ----------------\n");
for (i = 0; i < BLOCKNUM; i++) {
for (j = 0; j < THREADNUM; j++) {
printf("%lf\t", C_rand[(i * THREADNUM) + j]);
}
printf("\n");
}
// delete used memory
cudaFree(dev_states);
cudaFree(G_rand);
free(C_rand);
return 0;
}
|
20,905 | #include <iostream>
#include <cuda.h>
#define WIDTH 3833
#define HEIGHT 2160
bool checkResults(uchar4* rgba, uchar3* bgr, int size) {
bool correct = true;
for (int i=0; i < size; ++i) {
correct &= rgba[i].x == bgr[i].z;
correct &= rgba[i].y == bgr[i].y;
correct &= rgba[i].z == bgr[i].x;
correct &= rgba[i].w == 255;
}
return correct;
}
__global__ void convertBGR2RGBA(uchar3 *bgr, uchar4* rgba, int width, int height) {
int position = 0;// 0 is not correct. Compute each thread position;
// Protection to avoid segmentation fault
if (position < width * height) {
rgba[position].x == bgr[position].z;
rgba[position].y == bgr[position].y;
rgba[position].z == bgr[position].x;
rgba[position].w == 255;
}
}
int main() {
uchar3 *h_bgr, *d_bgr;
uchar4 *h_rgba, *d_rgba;
int bar_widht = HEIGHT/3;
// Alloc and generate BGR bars.
h_bgr = (uchar3*)malloc(sizeof(uchar3)*WIDTH*HEIGHT);
for (int i=0; i < WIDTH * HEIGHT; ++i) {
if (i < bar_widht) { h_bgr[i] = { 255, 0, 0 }; }
else if (i < bar_widht*2) { h_bgr[i] = { 0, 255, 0 }; }
else { h_bgr[i] = { 0, 0, 255 }; }
}
// Alloc RGBA pointers
h_rgba = (uchar4*)malloc(sizeof(uchar4)*WIDTH*HEIGHT);
// Alloc gpu pointers
cudaError_t error = cudaMalloc(&d_bgr, sizeof(uchar3) * WIDTH * HEIGHT);
if (error != cudaSuccess) {
std::cout << "Error in cudaMalloc" << std::endl;
}
error = cudaMalloc(&d_rgba, sizeof(uchar4) * WIDTH * HEIGHT);
if (error != cudaSuccess) {
std::cout << "Error in cudaMalloc" << std::endl;
}
// Copy data to GPU
error = cudaMemcpy(d_bgr, h_bgr, sizeof(uchar3) * WIDTH * HEIGHT, cudaMemcpyHostToDevice);
if (error != cudaSuccess) {
std::cout << "Error in cudaMemcpy" << std::endl;
}
dim3 block(512, 1, 1);
dim3 grid(ceil(WIDTH*HEIGHT/(float)block.x), 1, 1);
convertBGR2RGBA<<<grid, block, 0, 0>>>(d_bgr, d_rgba, WIDTH, HEIGHT);
error = cudaMemcpy(h_rgba, d_rgba, sizeof(uchar4) * WIDTH * HEIGHT, cudaMemcpyDeviceToHost);
if (error != cudaSuccess) {
std::cout << "Error in cudaMemcpy" << std::endl;
}
bool ok = checkResults(h_rgba, h_bgr, WIDTH*HEIGHT);
if (ok) {
std::cout << "Executed!! Results OK." << std::endl;
} else {
std::cout << "Executed!! Results NOT OK." << std::endl;
}
return 0;
}
|
20,906 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21) {
comp = -0.0f / +0.0f - -0.0f / (var_2 + var_3);
float tmp_1 = log10f((-1.9931E36f / (+1.2540E-37f - -1.7856E28f)));
comp += tmp_1 - logf((var_4 / var_5 / var_6));
for (int i=0; i < var_1; ++i) {
float tmp_2 = fmodf(var_7 / -1.7238E-36f, (var_8 / -1.7777E-15f));
comp += tmp_2 + -1.8309E-36f - (var_9 + (-1.7789E-44f - var_10));
comp = (-1.5996E-9f / (var_11 + (-1.3240E36f * var_12 - var_13 - var_14)));
comp += +1.3123E-35f - var_15 + var_16;
}
if (comp == powf(-1.4214E-28f / var_17 - (-1.7691E-14f / var_18 * +1.7414E-42f), (var_19 - +1.8289E36f - +1.6617E-36f))) {
comp += atan2f(-1.0248E35f, -1.0499E-11f - var_20 / var_21 * -1.0678E-37f * +1.5149E-44f);
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22);
cudaDeviceSynchronize();
return 0;
}
|
20,907 | #include "includes.h"
__global__ void build_hll(int n, unsigned int *in, unsigned int *out) {
int offset = (blockIdx.x * blockDim.x + threadIdx.x);
if (offset < n) {
// Extract the parts
unsigned int val = *(in + offset);
int bucket = val >> HLL_BUCKET_WIDTH;
// Update the maximum position
int pos = val & ((1 << HLL_BUCKET_WIDTH) - 1);
// Wait for all the maximums to be sync'd
atomicMax(&out[bucket], pos);
}
} |
20,908 | #include "includes.h"
__global__ void histo_kernel(unsigned char *buffer1, long size1, unsigned int *histo1){
// Phase 1 ------------------------------------------------------------
__shared__ unsigned int temp[256];
temp[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockDim.x * blockIdx.x;
int stride = blockDim.x * gridDim.x;
while (i < size1){
atomicAdd(&(temp[buffer1[i]]),1);
i += stride;
}
__syncthreads();
//---------------------------------------------------------------------
// Phase 2 ------------------------------------------------------------
atomicAdd(&(histo1[threadIdx.x]), temp[threadIdx.x]);
//---------------------------------------------------------------------
} |
20,909 | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
// Define your kernels in this file you may use more than one kernel if you
// need to
__global__ void scan(float *sums, float *out, float *in, unsigned size)
{
/********************************************************************
Load a segment of the input vector into shared memory
Traverse the reduction tree
Write the computed sum to the output vector at the correct index
********************************************************************/
__shared__ float partialSum[2 * BLOCK_SIZE];
unsigned int tx = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
if ((start + tx) < size)
partialSum[tx] = in[start + tx];
else
partialSum[tx] = 0.0f;
if ((start + blockDim.x + tx) < size)
partialSum[blockDim.x + tx] = in[start + blockDim.x + tx];
else
partialSum[blockDim.x + tx] = 0.0f;
__syncthreads();
int stride = 1;
while (stride < 2 * BLOCK_SIZE)
{
int index = (tx + 1) * stride * 2 - 1;
if (index < 2 * BLOCK_SIZE)
{
partialSum[index] += partialSum[index - stride];
}
stride *= 2;
__syncthreads();
}
if (tx == 0)
{
sums[blockIdx.x] = partialSum[2 * BLOCK_SIZE - 1];
partialSum[2 * BLOCK_SIZE - 1] = 0;
}
stride = BLOCK_SIZE;
while(stride > 0)
{
int index = (tx + 1) * stride * 2 - 1;
if (index < 2 * BLOCK_SIZE)
{
float temp = partialSum[index];
partialSum[index] += partialSum[index - stride];
partialSum[index - stride] = temp;
}
stride /= 2;
__syncthreads();
}
if ((start + tx) < size)
out[start + tx] = partialSum[tx];
if ((start + blockDim.x + tx) < size)
out[start + blockDim.x + tx] = partialSum[blockDim.x + tx];
}
__global__ void addSums(float *sums, float *out, unsigned size)
{
unsigned int start = 2 * blockIdx.x * blockDim.x;
unsigned int tx = threadIdx.x;
if ((start + tx) < size)
out[start + tx] += sums[blockIdx.x];
if ((start + blockDim.x + tx) < size)
out[start + blockDim.x + tx] += sums[blockIdx.x];
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void preScan(float *out, float *in, unsigned in_size)
{
float * sums;
dim3 dim_grid, dim_block;
unsigned out_elements;
out_elements = in_size / (BLOCK_SIZE << 1);
if (in_size % (BLOCK_SIZE<<1)) out_elements++;
cudaMalloc((void**)&sums, out_elements*sizeof(float));
dim_block.x = BLOCK_SIZE;
dim_block.y = dim_block.z = 1;
dim_grid.x = out_elements;
dim_grid.y = dim_grid.z = 1;
if (in_size <= 2 * BLOCK_SIZE)
scan<<<dim_grid, dim_block>>>(sums, out, in, in_size);
else
{
scan<<<dim_grid, dim_block>>>(sums, out, in, in_size);
float * sums_scanned;
cudaMalloc((void**)&sums_scanned, out_elements*sizeof(float));
preScan(sums_scanned, sums, out_elements);
addSums<<<dim_grid, dim_block>>>(sums_scanned, out, in_size);
cudaFree(sums_scanned);
}
cudaFree(sums);
}
|
20,910 | #include <stdlib.h>
#include <stdio.h>
void fill_matrix(double *mat, unsigned numRows, unsigned numCols)
{
for(unsigned i=0; i < numRows; i++)
for(unsigned j=0; j < numCols; j++)
{
mat[i*numCols + j] = i*2.1f + j*3.2f;
}
}
void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols)
{
const char *fname = "assignment2_7_out";
FILE *f = fopen(fname, "w");
for(unsigned i=0; i < numRows; i++)
{
for(unsigned j=0; j < numCols; j++)
fprintf(f,"%4.4f ", mat[i*numCols + j]);
fprintf(f,"\n");
}
fclose(f); }
__global__ void MatrixMulKernel_col_maj(double* M, double* N, double* P, int M_r, int N_c, int M_c, int TILE_WIDTH) {
extern __shared__ double buffer[];
double *ds_M = &buffer[0];
double *ds_N = &buffer[TILE_WIDTH*TILE_WIDTH];
//__shared__ double ds_M[TILE_WIDTH][TILE_WIDTH];
//__shared__ double ds_N[TILE_WIDTH][TILE_WIDTH];
// Generate IDs
double Pvalue=0;
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
// Loop over the M and N tiles required to compute the P element
for (int p = 0; p < (M_c)/TILE_WIDTH; ++p) {
if ( (Row < M_r) && (tx + p*TILE_WIDTH) < M_c){
// Collaborative loading of M and N tiles into shared memory
ds_M[ty*TILE_WIDTH + tx] = M[Row*M_c + p*TILE_WIDTH+tx];
}
else{
ds_M[ty*TILE_WIDTH + tx]=0.0;
}
if ( (Col < N_c) && (ty + p*TILE_WIDTH) < M_c){
ds_N[ty*TILE_WIDTH + tx] = N[(p*TILE_WIDTH+ty)*N_c + Col];
}
else{
ds_N[ty*TILE_WIDTH + tx]=0.0;
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i){
Pvalue += ds_M[ty*TILE_WIDTH + i] * ds_N[i*TILE_WIDTH + tx];
}
__syncthreads();
}
if ((Row < M_r) && (Col < N_c)){
P[Row*N_c+Col] = Pvalue;
}
}
int main(int argc,char **argv) {
int M_r,M_c,N_c;
int TILE_WIDTH_ll[4], TILE_WIDTH;
float time_spent_ll[4], time_spent;
int loop,loop1, loop2,min; // loop variables
M_r=8192;
M_c=16384;
N_c=32768;
size_t size1 = M_r *M_c* sizeof(double);
size_t size2 = M_c *N_c* sizeof(double);
size_t size3 = M_r *N_c* sizeof(double);
double*h_matA = (double*)malloc(size1);
double*h_matB = (double*)malloc(size2);
double*h_matC = (double*)malloc(size3); // result
fill_matrix(h_matA,M_r,M_c);
fill_matrix(h_matB,M_c,N_c);
for (loop = 0; loop<4; loop++){
TILE_WIDTH_ll[loop]=pow(2,2+loop);
}
printf("\nMatrix A (first 10*10 inputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matA + M_c*loop1 + loop2));
printf("\n");
}
printf("\n\nMatrix B (first 10*10 inputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matB + N_c*loop1 + loop2));
printf("\n");
}
double* d_matA; cudaMalloc(&d_matA, size1);
double* d_matB; cudaMalloc(&d_matB, size2);
double* d_matC; cudaMalloc(&d_matC, size3);
//GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy vectors from host memory to device memory
cudaMemcpy(d_matA, h_matA, size1,cudaMemcpyHostToDevice);
cudaMemcpy(d_matB, h_matB, size2,cudaMemcpyHostToDevice);
for (loop =0;loop < 4; loop++){
TILE_WIDTH=TILE_WIDTH_ll[loop];
// Invoke kernel
dim3 threadsPerBlock (TILE_WIDTH,TILE_WIDTH,1);
dim3 blocksPerGrid ((M_r + threadsPerBlock.x) /threadsPerBlock.x,(N_c + threadsPerBlock.y) /threadsPerBlock.y,1);
size_t blocksize = 2 * TILE_WIDTH * TILE_WIDTH;
cudaEventRecord(start, 0);
MatrixMulKernel_col_maj<<<blocksPerGrid, threadsPerBlock, sizeof(double)*blocksize>>>(d_matA,d_matB, d_matC, M_r,N_c,M_c, TILE_WIDTH);
//cudaDeviceSynchronize();//To synchronize the device
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
printf("\nTime spent in col maj for tile %d x %d %f\n",TILE_WIDTH,TILE_WIDTH,time_spent);
time_spent_ll[loop]=time_spent;
// h_C contains the result in host memory
cudaMemcpy(h_matC, d_matC, size3,cudaMemcpyDeviceToHost);
printf("\n\nMatrix C (first 10*10 outputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matC + N_c*loop1 + loop2));
printf("\n");
}
}
min=0;
for ( loop = 1 ; loop < 4 ; loop++ )
{
if ( time_spent_ll[loop] < time_spent_ll[min] )
{
min = loop;
}
}
float min_time;
int tile;
min_time=time_spent_ll[min];
tile=TILE_WIDTH_ll[min];
printf("For the configuration of %d x %d multiplied by %d x %d \n",M_r,M_c,M_c,N_c);
printf("Optimal time is %f, threads per block is %d x %d, tile size is %d x %d blocks per grid is %d x %d.",
min_time,tile,tile,tile,tile,(M_r + tile) / tile,(N_c + tile) /tile);
// Log outputs
printf("\nWritting to file assignment_2_1_out as Mat C");
print_matrix_to_file(h_matC,M_r,N_c);
// Free device memory
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
// Free host memory
free(h_matA);
free(h_matB);
free(h_matC);
return 0;
} |
20,911 | __global__ void smallKernel(int *offset, int *col_id, int *small, int sizeSmall, int *color, int currentColor)
{
if((blockIdx.x*blockDim.x+threadIdx.x)<sizeSmall)
{
int node = small[blockIdx.x*blockDim.x+threadIdx.x];
if(color[node]==0) {
int neighLen = offset[node+1]-offset[node];
bool set = 1;
for(int i=0; i<neighLen; i++)
{
int item = col_id[offset[node]+i];
if(item >= node && (color[item]==0 || color[item]==currentColor))
{
set = 0;
break;
}
}
__syncthreads();
if(set==1)
color[node]=currentColor;
}
}
}
|
20,912 | #include "includes.h"
__global__ void bcnn_cuda_axpy_strided_kernel(int n, int num_batches, float a, float *x, float *y, int dst_stride, int src_stride, int x_c, int x_h, int x_w, int y_c, int y_h, int y_w, int min_c, int min_h, int min_w) {
int id = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (id >= n) {
return;
}
int i = id % min_w;
id /= min_w;
int j = id % min_h;
id /= min_h;
int k = id % min_c;
id /= min_c;
int b = id % num_batches;
int dst_int = i * dst_stride + y_w * (j * dst_stride + y_h * (y_c * b + k));
int src_ind = i * src_stride + x_w * (j * src_stride + x_h * (x_c * b + k));
y[dst_int] += a * x[src_ind];
} |
20,913 | //Kelvin silva
//matrix vector parallel naive
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
//matrix vector -> y = A*x
__global__ void simpleMxv(int width, int height, float *matrix, float *vector, float * result_vector) {
int current_index = blockIdx.x * blockDim.x + threadIdx.x;
float accumulate = 0.0;
int vector_index = 0;
for (int i = current_index; i < current_index+width; i++){
accumulate += (matrix[i] * vector[vector_index]);
vector_index++;
}
result_vector[blockIdx.x * blockDim.x] = accumulate;
}
typedef unsigned long long timestamp_t;
static timestamp_t get_timestamp();
int main(void) {
int width = 32768;
int height = 32768;
float *matrix, *vector, *result_vector;
float *d_matrix, *d_vector, *d_result_vector;
// allocate host
matrix = (float * ) malloc(width*height * sizeof(float));
vector = (float * ) malloc(width * sizeof(float));
result_vector = (float *) calloc(width, sizeof(float));
// allocate device mem
cudaMalloc( &d_matrix, width*height * sizeof(float));
cudaMalloc( &d_vector, height * sizeof(float));
cudaMalloc( &d_result_vector, width * sizeof(float));
// initialize host mem
for (int i = 0; i < width*height; i++) {
matrix[i] = 2.5;
}
for (int i = 0; i < width; i++){
vector[i] = 2.3;
}
// transfer from host to dev
cudaMemcpy(d_matrix, matrix, width*height * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_vector, vector, width * sizeof(float), cudaMemcpyHostToDevice);
timestamp_t t0 = get_timestamp();
// Perform SAXPY on 1M elements
// device configuration, <<< number of blocks ,num threads in block, size of shared memory >>>
simpleMxv <<<width, 1 >>> (width, height, d_matrix, d_vector, d_result_vector);
cudaMemcpy(result_vector, d_result_vector, width * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
timestamp_t t1 = get_timestamp();
double diff = ((double)t1 - (double)t0);
printf("Completed in: %lf microseconds\n", diff);
printf("Result:\n");
/*for (int i = 0; i < width; i++){
printf("%f\n", result_vector[i]);
}
*/
float maxError = 0.0f;
printf("Max error: %f\n", maxError);
cudaFree(d_matrix);
cudaFree(d_vector);
cudaFree(d_result_vector);
free(matrix);
free(vector);
free(result_vector);
}
static timestamp_t get_timestamp(){
struct timeval now;
gettimeofday(&now, NULL);
return now.tv_usec + (timestamp_t)now.tv_sec * 1000000;
}
|
20,914 | #include "includes.h"
__global__ void kernel_euclidean_norm(const double *vec, int numElements, double *answer)
{
extern __shared__ double square[]; // one element per thread
int i = threadIdx.x; // numElements assumed to fit into one block
square[i] = vec[i] * vec[i];
__syncthreads();
if (i == 0) {
double sum = 0;
for (int j = 0; j < numElements; ++j) {
sum += square[j];
}
*answer = sqrt(sum);
}
} |
20,915 | #include <cuda.h>
#include <stdio.h>
#include <random>
__global__ void randSumKernel(int *arr, int a) {
// threadIdx.x is x and blockIdx.x is y
arr[blockIdx.x * blockDim.x + threadIdx.x] = a * threadIdx.x + blockIdx.x;
}
// reference is https://github.com/DanNegrut/ME759/blob/main/2021Spring/GPU/setArray.cu
int main(){
const int numBlocks = 2;
const int numThreads = 8;
const int numElement = 16;
// set up random number generator
std::random_device entropy_source;
std::mt19937_64 generator(entropy_source());
const int min = 0, max = 10; // The range for the random number generator is 0 to 10
// there are tons of oter distributino that could be found from https://en.cppreference.com/w/cpp/header/random
std::uniform_int_distribution<> dist(min, max);
// use random number generator to generate integer a
int a = dist(generator);
// initialize device array dA
int *dA;
// allocate memory on the device; zero out all entries in this device array
cudaMalloc((void **)&dA, sizeof(int) * numElement);
cudaMemset(dA, 0, numElement * sizeof(int));
// initialize host array hA
int hA[numElement];
// invoke GPU kernel with 2 blocks that has eight threads
randSumKernel<<<numBlocks, numThreads>>>(dA, a);
cudaDeviceSynchronize();
// bring the result back from the GPU into the hostArray
cudaMemcpy(hA, dA, sizeof(int) * numElement, cudaMemcpyDeviceToHost);
// free array
cudaFree(dA);
// print out element
for (int i = 0; i < numElement - 1; i++) {
std::printf("%d ", hA[i]);
}
std::printf("%d\n", hA[numElement - 1]);
return 0;
}
|
20,916 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <string>
#include <vector>
#include <iostream>
using namespace std;
int main() {
string str = "\002banana\003";
vector<string> table;
for (int i = 0; i < str.length(); i++) {
string temp = str.substr(i, str.length()) + str.substr(0, i);
table.push_back(temp);
}
thrust::device_vector<char*> device_table;
for (int i = 0; i < table.size(); i++) {
char* temp;
cudaMalloc((void**)&temp, sizeof(char) * (str.length() + 1));
cudaMemcpy(temp, table[i].c_str(), sizeof(char) * (str.length() + 1), cudaMemcpyHostToDevice);
device_table.push_back(temp);
}
thrust::sort(device_table.begin(), device_table.end());
char* result;
cudaMallocHost((void**)&result, sizeof(char) * (device_table.size() + 1));
for (int i = 0; i < device_table.size(); i++) {
char* temp;
cudaMallocHost((void**)&temp, sizeof(char) * (str.length() + 1));
cudaMemcpy(temp, device_table[i], sizeof(char) * (str.length() + 1), cudaMemcpyDeviceToHost);
result[i] = temp[str.length() - 1];
cudaFreeHost(temp);
}
cout << result << endl;
for (int i = 0; i < device_table.size(); i++) {
cudaFree(device_table[i]);
}
cudaFreeHost(result);
return 0;
}
|
20,917 | #include "includes.h"
/*
* Find BLANK and replace your own code.
* And submit report why do you replace the blank that way.
*/
/* 2015004693_YangSangheon */
#define TILE_WIDTH 24 /* set TILE_WIDTH 16 for the evaluation! */
#define MAXPOOL_INPUT_FILENAME "input.txt"
#define A_FILENAME "a.txt"
#define B_FILENAME "b.txt"
#define C_FILENAME "c.txt"
using namespace std;
__global__ void maxpool(float *input, float *output, const int input_size, const int filter_size) {
// input : input_matrix address
// output : output buffer address
// input_size : width, height of input matrix
// filter_size : filter_size of maxpolling
// all input, output matrices are vectorized
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
// out of bound
// CHANGE
float tmp = 0.0;
float Max = -999999.9;
for(int i = 0; i < filter_size; i++){
for(int j = 0; j < filter_size; j++){
tmp = input[(input_size*filter_size*row)+(filter_size*col)+(input_size*j)+i];
if(Max<tmp)
Max = tmp;
}
}
if(col < (input_size/filter_size) && row < (input_size/filter_size))
output[((input_size/filter_size)*row)+col] = Max;
//printf("thread_made\n");
} |
20,918 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>;
using namespace std;
__global__ void AddIntsCuda(int *a, int *b)
{
int i = threadIdx.x;
a[i] += b[i];
}
__global__ void InterChangeCuda(int *a, int *b)
{
int i = threadIdx.x;
int temp = a[i];
a[i] = b[i];
b[i] = a[i];
}
int main() {
int a = 5, b = 9;
int *d_a, *d_b;
cudaMalloc(&d_a, sizeof(int));
cudaMalloc(&d_b, sizeof(int));
cudaMemcpy(d_a, &a, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, sizeof(int), cudaMemcpyHostToDevice);
AddIntsCuda<<< 1, 1 >>>(d_a, d_b);
cudaMemcpy(&a, d_a, sizeof(int), cudaMemcpyDeviceToHost);
cout << "The result is: " << a << endl;
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
20,919 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#define SIZE 1024*1024*1000
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat),__LINE__, __FILE__);\
exit(1);\
}\
}
__host__ int main()
{
char dev;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf(" Total amount of constant memory: %lu bytes\n", deviceProp.totalConstMem);
printf(" Total amount of shared memory per block: %lu bytes\n", deviceProp.sharedMemPerBlock);
printf(" Total number of registers available per block: %d\n", deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per multiprocessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of threads per block: %d\n", deviceProp.maxThreadsPerBlock);
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float* vec1 = new float[SIZE];
cudaEventRecord(start, 0);
for (int i = 0; i < SIZE; i++)
{
vec1[i] = i;
// printf("#%d\t%f\t %f\n", i, vec1[i]);
}
cudaEventRecord(stop, 0);
// float time = 0;
cudaEvent_t syncEvent;
printf("%g", elapsedTime);
float* devVec1;
cudaMalloc((void**)&devVec1, sizeof(float) * SIZE);
cudaMemcpy(devVec1, vec1, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
cudaFree(devVec1);
delete[] vec1; vec1 = 0;
return 0;
}
|
20,920 | #include <iostream>
int main(){
std::cout << "hi" << std::endl;
int devicesCount;
cudaGetDeviceCount(&devicesCount);
for(int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex)
{
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, deviceIndex);
// printf("Device name: %s", deviceProperties.name);
std::cout << deviceProperties.name << std::endl;
}
return 0;
} |
20,921 | #include <cuda.h>
__global__ void vecadd(int * a, int * b, int len) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
a[idx] += b[idx];
}
|
20,922 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
using namespace std;
__global__
void sumaMatrixKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n*n)
C[i] = A[i] +B[i];
}
__global__
void sumaMatrixKernelRow(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n)
{
for(int j=0;j<n;j++)
C[i*n+j] = A[i*n+j] + B[i*n+j];
}
}
__global__
void sumaMatrixKernelColumn(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n)
{
for(int j=0;j<n;j++)
C[i+j*n] = A[i+j*n] + B[i+j*n];
}
}
void sumaMatrix(float* A, float* B, float* C, int tam)
{
int size = (tam*tam) * sizeof(float);
float *d_A,*d_B,*d_C;
cudaMalloc((void**)&d_A,size);
cudaMalloc((void**)&d_B,size);
cudaMalloc((void**)&d_C,size);
cudaMemcpy(d_A,A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
sumaMatrixKernel<<<ceil(tam*tam/256.0),256>>>(d_A,d_B,d_C,tam);
//sumaMatrixKernelRow<<<ceil(tam/256.0),256>>>(d_A,d_B,d_C,tam);
//sumaMatrixKernelColumn<<<ceil(tam/256.0),256>>>(d_A,d_B,d_C,tam);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);
}
void datosRandom(float *array,int n){
//srand(time(NULL));
for(int i = 0; i < n; i++){
for(int j = 0; j < n; j++)
array[i*n+j] = 1;
}
}
void printMatrix(float *array,int n){
for(int i = 0; i < n; i++){
printf("%f ", array[i]);
}
printf("\n");
}
int main()
{
int n = 10;
float *h_A,*h_B,*h_C;
h_A = new float[n*n];
h_B = new float[n*n];
h_C = new float[n*n];
datosRandom(h_A,n);
datosRandom(h_B,n);
sumaMatrix(h_A,h_B,h_C,n);
printMatrix(h_C,n);
return 0;
} |
20,923 | #include <iostream>
using namespace std;
int main() {
// Get the Number of Devices
int count;
cudaGetDeviceCount(&count);
cout << "Number of Devices: " << count << endl;
// Get Useful Properties
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
// Output Requested Information
cout << "Name of Device: " << prop.name << endl;
cout << "Global Memory Capacity: " << prop.totalGlobalMem << endl;
cout << "Shared Memory Maximum: " << prop.sharedMemPerBlock << endl;
cout << "Warp Size: " << prop.warpSize << endl;
cout << "Maximum Threads per Block: " << prop.maxThreadsPerBlock << endl;
cout << "Maximum Dimensions for Thread Blocks: (" << prop.maxThreadsDim[0];
cout << ", " << prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] <<")" << endl;
cout << "Maximum Dimensions for Grid of Blocks: (" << prop.maxGridSize[0];
cout << ", " << prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << ")" << endl;
cout << "Compute Capability: " << prop.major << "." << prop.minor << endl;
// Output Additional Information
cout << "Clock Speed in KHz: " << prop.memoryClockRate << endl;
cout << "Width of Memory Bus: " << prop.memoryBusWidth << endl;
} |
20,924 | #include "includes.h"
//#define DEPTH 2
// dp - cost aggregation array
// cost_image - m x n x D array
// d - use every d channels of input to conserve register memory
// m - image rows
// n - image columns
// D - depth
// depth_stride - pitch along depth dimension
// row_stride - pitch along row dimension
__device__ float dp_criteria(float *dp, int ind, int depth_dim_size, int d, float P_one, float P_two, float * d_zero, float * d_one, float * d_two, float * d_three){
*d_zero = dp[ind];
if (d > 0)
*d_one = dp[ind - depth_dim_size] + P_one;
else
*d_one = 10000000;
if (d < D-1)
*d_two = dp[ind + depth_dim_size] + P_one;
else
*d_two = 10000000;
return fminf(fminf(*d_zero, *d_one), fminf(*d_two, *d_three)) - *d_three + P_two;
}
__global__ void __l_aggregate(float *dp, float *cost_image, int m, int n)
{
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = n - 1 - threadIdx.x;
int depth_dim_size = m*n;
__shared__ float MinArray[SHMEM_SIZE][SHMEM_SIZE];
int K = n-1; // this variable keeps track of the progress in aggregating
// across the columns of the image
while ((col >= 0) & (row < m))
{
int ind = row * n + col;
float prev_min = 100000000.0;
for (int depth = 0; depth < D; depth+=D_STEP){
prev_min = fminf(dp[ind], prev_min);
ind += (depth_dim_size * D_STEP);
}
MinArray[threadIdx.y][SHMEM_SIZE - 1 - threadIdx.x] = prev_min;
__syncthreads();
float d0 = 0;
float d1 = 0;
float d2 = 0;
// when processing a video stream, need to make sure that processing of multiple
// frames can overlap, since after this point only one warp of threads is executing
// threads from only one warp will handle rightward aggregation across the
// region that has been loaded into shared memory
// for threads where threadIdx.y is 0, now threadIdx.x will index the rows
if (threadIdx.y == 0)
{
int agg_row = threadIdx.x + blockIdx.y * blockDim.y;
int start_K = K;
int local_K = SHMEM_SIZE - 1;
if (agg_row < m)
{
for(; (K > 0) && (K > (start_K - SHMEM_SIZE)); K--)
{
float d3 = MinArray[threadIdx.x][local_K] + (float) P2;
int ind = agg_row * n + K - 1;
for (int d = 0; d < D; d+=D_STEP){
dp[ind] += cost_image[ind] + dp_criteria(dp, ind+1, depth_dim_size, d, (float) P1, (float) P2, &d0, &d1, &d2, &d3);
//dp[ind] = cost_image[ind] + dp[ind - 1];
ind += (depth_dim_size * D_STEP);
}
local_K--;
}
}
}
__syncthreads();
col-=blockDim.x;
}
} |
20,925 | #include <stdio.h>
#include <stdlib.h>
#define DEBUGG 1
//static const int N = 16; //Siempre matrices cuadradas
static const int N = 36; //Siempre matrices cuadradas
//...
//Kernel que distribueix la l'execució a la grid
__global__ void organitza_grid(int *array) {
//Distribueix la grid(blocks i threads) com a un array unidimensional i calcula l'index d'aquesta distribució.
//On cada index correspon a un thread de la grid
int idx = threadIdx.x;
int idy = threadIdx.y;
int idblocy = blockIdx.y;
int idblocx = blockIdx.x;
int width = gridDim.x * blockDim.x;
int id_array = (idy*width + idx) + (idblocx * blockDim.x) + (idblocy * width * blockDim.y);
array[id_array]=(2*idblocy)+idblocx;
//....
//Recupera l'index del block a la grid
//...
//Guarda resultad al array
//...
}
__host__ void printa(int *array,int sizex,int sizey)
{
//Els vostre codi...
for(int i = 0 ; i < sizey ; ++i){ //impresion de la grid dependiendo del tamaño en el eje x e y
for(int j = 0 ; j < sizex; ++j){
printf("%d ",array[i*sizex+j]);
}
printf("\n");
}
}
int main(void) {
int *dev_a , gridsizex,gridsizey;
int *array;
int size = N*sizeof(int);
// Reserva memoria al host i al device
array = (int *)malloc(size);
cudaMalloc((void **)&dev_a, size);
memset(array,0,N); //inicializamos en 0 el array
cudaMemcpy(dev_a,array,size,cudaMemcpyHostToDevice); //copiamos el array del host al device
//Crea blocks de dos dimensions amb diferent nombre de threads. Ex: Comença amb 4x4
dim3 block_dim(sqrt(N)/2,sqrt(N)/2); //4 threads x bloque, dimension 2*2
//...
dim3 grid_dim(sqrt(N)/block_dim.x,sqrt(N)/block_dim.y); //numero de bloques que tendremos
// Crea i inicialitza una grid en 2 dimensions
//dim3 grid_dim(grid_dim,block_dim); //la grid siempre tendra dos bloques en el eje x
gridsizex = grid_dim.x*block_dim.x;
gridsizey = grid_dim.y*block_dim.y;
//...
#if DEBUGG
printf("Dim block (x,y) %d-%d",block_dim.x,block_dim.y);
printf("\nDim Grid (blocks)(x,y) %d-%d",grid_dim.x,grid_dim.y);
printf("\ngrid size (threads)(x,y) %d-%d\n",gridsizex,gridsizey);
#endif
organitza_grid<<<grid_dim, block_dim>>>(dev_a);
cudaMemcpy(array,dev_a,size,cudaMemcpyDeviceToHost);
// Printa els resultats de l'organització de la grid
printa(array,gridsizex,gridsizey);
return 0;
}
|
20,926 | #include <cstdlib>
#include <cstdio>
#include <cooperative_groups.h>
#define P(i, j) ((i) * nx + (j))
void allocate_2d(float *&a, int nx, int ny){
cudaMallocManaged(&a, nx*ny*sizeof(float));
}
__global__ void build_up_b(float *b, float rho, float dt, float *u, float *v, float dx, float dy, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
b[P(i, j)] = (
rho * (
1 / dt * (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
) + (
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
) - (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
)
) * (
(
u[P(i, j+1)] - u[P(i, j-1)]
) / (
2 * dx
)
) - 2 * (
(
u[P(i+1, j)] - u[P(i-1, j)]
) / (
2 * dy
) * (
v[P(i, j+1)] - v[P(i, j-1)]
) / (
2 * dx
)
) - (
(
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
) * (
(
v[P(i+1, j)] - v[P(i-1, j)]
) / (
2 * dy
)
)
)
);
}
__global__ void copy(float *a, float *b, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 0 || i >= ny || j < 0 || j >= nx){
return;
}
b[P(i, j)] = a[P(i, j)];
}
__global__ void pressure_poisson_step(float *p, float *pn, float dx, float dy, float *b, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
p[P(i, j)] = (
(
(
pn[P(i, j+1)] + pn[P(i, j-1)]
) * dy * dy + (
pn[P(i+1, j)] + pn[P(i-1, j)]
) * dx * dx
) / (
2 * (
dx * dx + dy * dy
)
) - dx * dx * dy * dy / (
2 * (
dx * dx + dy * dy
)
) * b[P(i, j)]
);
}
void pressure_poisson(float *p, float *pn, float dx, float dy, float *b, int nit, int nx, int ny){
for(int i = 0;i < nit;i++){
dim3 grid = dim3(2, 2);
dim3 block = dim3(32, 32);
copy<<<grid,block>>>(p, pn, nx, ny);
pressure_poisson_step<<<grid,block>>>(p, pn, dx, dy, b, nx, ny);
cudaDeviceSynchronize();
for(int i = 0;i < ny;i++){
p[P(i, nx-1)] = p[P(i, nx-2)];
}
for(int i = 0;i < nx;i++){
p[P(0, i)] = p[P(1, i)];
}
for(int i = 0;i < ny;i++){
p[P(i, 0)] = p[P(i, 1)];
}
for(int i = 0;i < nx;i++){
p[P(ny-1, i)] = 0;
}
}
}
__global__ void update_u(float *u, float *v, float *un, float *vn, float dt, float dx, float dy, float rho, float nu, float *p, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
u[P(i, j)] = (
un[P(i, j)] - un[P(i, j)] * dt / dx * (
un[P(i, j)] - un[P(i, j-1)]
) - vn[P(i, j)] * dt / dy * (
un[P(i, j)] - un[P(i-1, j)]
) - dt / (
2 * rho * dx
) * (
p[P(i, j+1)] - p[P(i, j-1)]
) + nu * (
dt / (dx * dx) * (
un[P(i, j+1)] - 2 * un[P(i, j)] + un[P(i, j-1)]
) + dt / (dy * dy) * (
un[P(i+1, j)] - 2 * un[P(i, j)] + un[P(i-1, j)]
)
)
);
}
__global__ void update_v(float *u, float *v, float *un, float *vn, float dt, float dx, float dy, float rho, float nu, float *p, int nx, int ny){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if(i < 1 || i >= ny-1 || j < 1 || j >= nx-1){
return;
}
v[P(i, j)] = (
vn[P(i, j)] - un[P(i, j)] * dt / dx * (
vn[P(i, j)] - vn[P(i, j-1)]
) - vn[P(i, j)] * dt / dy * (
vn[P(i, j)] - vn[P(i-1, j)]
) - dt / (
2 * rho * dy
) * (
p[P(i+1, j)] - p[P(i-1, j)]
) + nu * (
dt / (dx * dx) * (
vn[P(i, j+1)] - 2 * vn[P(i, j)] + vn[P(i, j-1)]
) + dt / (dy * dy) * (
vn[P(i+1, j)] - 2 * vn[P(i, j)] + vn[P(i-1, j)]
)
)
);
}
void boarder(float *u, float *v, int nx, int ny){
for(int i = 0;i < nx;i++){
u[P(0, i)] = 0;
}
for(int i = 0;i < ny;i++){
u[P(i, 0)] = 0;
}
for(int i = 0;i < ny;i++){
u[P(i, nx-1)] = 0;
}
for(int i = 0;i < nx;i++){
u[P(ny-1, i)] = 1;
}
for(int i = 0;i < nx;i++){
v[P(0, i)] = 0;
}
for(int i = 0;i < nx;i++){
v[P(ny-1, i)] = 0;
}
for(int i = 0;i < ny;i++){
v[P(i, 0)] = 0;
}
for(int i = 0;i < ny;i++){
v[P(i, nx-1)] = 0;
}
}
void print2d(float *a, int nx, int ny){
for(int i = 0;i < ny;i++){
for(int j = 0;j < nx;j++){
printf("%g\n", a[P(i, j)]);
}
}
}
void cavity_flow_step(float *u, float *un, float *v, float *vn, float dt, float dx, float dy, float *p, float *pn, float *b, float rho, float nu, int nx, int ny, int nit){
dim3 grid = dim3(2, 2);
dim3 block = dim3(32, 32);
build_up_b<<<grid,block>>>(b, rho, dt, u, v, dx, dy, nx, ny);
cudaDeviceSynchronize();
pressure_poisson(p, pn, dx, dy, b, nit, nx, ny);
cudaDeviceSynchronize();
copy<<<grid,block>>>(u, un, nx, ny);
copy<<<grid,block>>>(v, vn, nx, ny);
update_u<<<grid,block>>>(u, v, un, vn, dt, dx, dy, rho, nu, p, nx, ny);
update_v<<<grid,block>>>(u, v, un, vn, dt, dx, dy, rho, nu, p, nx, ny);
cudaDeviceSynchronize();
boarder(u, v, nx, ny);
cudaDeviceSynchronize();
}
void cavity_flow(int nt, float *u, float *un, float *v, float *vn, float dt, float dx, float dy, float *p, float *pn, float *b, float rho, float nu, int nx, int ny, int nit){
for(int i = 0;i < nt;i++){
cavity_flow_step(u, un, v, vn, dt, dx, dy, p, pn, b, rho, nu, nx, ny, nit);
}
}
int main(){
int nx = 41;
int ny = 41;
int nt = 700;
int nit = 50;
float cx = 2;
float cy = 2;
float dx = cx / (nx - 1);
float dy = cy / (ny - 1);
float rho = 1;
float nu = 0.1;
float dt = 0.001;
float *u;
allocate_2d(u, nx, ny);
float *v;
allocate_2d(v, nx, ny);
float *p;
allocate_2d(p, nx, ny);
float *b;
allocate_2d(b, nx, ny);
float *un;
allocate_2d(un, nx, ny);
float *vn;
allocate_2d(vn, nx, ny);
float *pn;
allocate_2d(pn, nx, ny);
for(int i = 0;i < ny;i++){
for(int j = 0;j < nx;j++){
u[P(i, j)] = v[P(i, j)] = p[P(i, j)] = b[P(i, j)] = 0;
}
}
cavity_flow(nt, u, un, v, vn, dt, dx, dy, p, pn, b, rho, nu, nx, ny, nit);
cudaDeviceSynchronize();
print2d(u, nx, ny);
print2d(v, nx, ny);
print2d(p, nx, ny);
}
|
20,927 | #include "includes.h"
__global__ void kApplySoftThreshold(float* mat, float alpha, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) {
float f = mat[i];
target[i] = f > 0 ? max(0., f - alpha) : min(0., f + alpha);
}
} |
20,928 | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/scan.h>
#include <cuda_runtime.h>
#include "device_launch_parameters.h"
#include <assert.h>
#include <chrono>
template <typename T, typename C>
__global__
void sub(T* output, const C* starter, const C* stopper, int64_t startsoffset, int64_t stopsoffset, int64_t n) {
int thid = threadIdx.x + blockIdx.x * blockDim.x;
if (thid < n) {
C start = starter[thid + startsoffset];
C stop = stopper[thid + stopsoffset];
assert(start <= stop);
output[thid] = stop - start;
}
}
template <typename T, typename C>
void prefix_sum(T* output, const C* arr, const C* arr2, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
int block, thread;
if (length > 1024) {
block = (length / 1024) + 1;
thread = 1024;
}
else {
thread = length;
block = 1;
}
T* d_output;
C* d_arr, * d_arr2;
cudaMalloc((void**)&d_output, length * sizeof(T));
cudaMalloc((void**)&d_arr, length * sizeof(C));
cudaMemcpy(d_arr, arr, length * sizeof(C), cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_arr2, length * sizeof(C));
cudaMemcpy(d_arr2, arr2, length * sizeof(C), cudaMemcpyHostToDevice);
sub<T, C> <<<block, thread>>>(d_output, d_arr, d_arr2, startsoffset, stopsoffset, length);
cudaDeviceSynchronize();
thrust::device_vector<T> data(d_output, d_output + length);
thrust::device_vector<T> temp(data.size() + 1);
thrust::exclusive_scan(data.begin(), data.end(), temp.begin());
temp[data.size()] = data.back() + temp[data.size() - 1];
thrust::copy(temp.begin(), temp.end(), output);
cudaFree(d_output);
cudaFree(d_arr);
cudaFree(d_arr2);
}
template <typename C, typename T>
void foo(T* tooffsets, const C* fromstarts, const C* fromstops, int64_t startsoffset, int64_t stopsoffset, int64_t length) {
tooffsets[0] = 0;
for (int64_t i = 0; i < length; i++) {
C start = fromstarts[startsoffset + i];
C stop = fromstops[stopsoffset + i];
assert(start <= stop);
tooffsets[i + 1] = tooffsets[i] + (stop - start);
}
}
template <typename T>
bool compare(T* arr1, T* arr2, int n) {
for (int i=0; i<n; i++) {
if (arr1[i] != arr2[i]) return false;
}
return true;
}
int main() {
int const size = 60000;
int starter[size], stopper[size], output[size + 1], output2[size + 1];
for (int i = 0; i < size; i++) {
starter[i] = i;
stopper[i] = i + 1;
}
prefix_sum<int, int>(output, starter, stopper, 0, 0, size); // GPU Warm up
cudaDeviceSynchronize();
auto start1 = std::chrono::high_resolution_clock::now();
prefix_sum<int, int>(output, starter, stopper, 0, 0, size);
cudaDeviceSynchronize();
auto stop1 = std::chrono::high_resolution_clock::now();
auto time1 = std::chrono::duration_cast<std::chrono::microseconds>(stop1 - start1);
std::cout << "Time taken for GPU = " << time1.count() << "\n";
auto start2 = std::chrono::high_resolution_clock::now();
foo<int, int>(output2, starter, stopper, 0, 0, size);
auto stop2 = std::chrono::high_resolution_clock::now();
auto time2 = std::chrono::duration_cast<std::chrono::microseconds>(stop2 - start2);
std::cout << "Time taken for CPU = " << time2.count() << "\n";
for (int i=0; i<size; i++) {
if (output2[i] != output[i]) {
std::cout << "FALSE" << std::endl;
return 0;
}
}
return 0;
}
|
20,929 | #include<cuda.h>
#include<iostream>
#include <unistd.h>
using namespace std;
const int numElems =2;
__global__ void dataKernel( double* data, int nsteps){
//this adds a value to a variable stored in global memory
int thid = threadIdx.x;
//data[thid] = 0;
int i = 0;
bool wait = 1;
clock_t start = clock64();
clock_t now;
while(i < nsteps){
data[thid] = data[thid]+.1;
clock_t start = clock64();
i = i+1;
while(wait == 1){
now = clock();
clock_t cycles = now > start ? now - start : now + (0xffffffff - start);
if(cycles > 5000)
wait = 0;
}
wait = 1;
__syncthreads();
}
}
__global__ void monitorKernel(double * write_2_ptr, double * read_in_ptr){
*write_2_ptr = *read_in_ptr;
}
int main()
{
cout <<"Running CUDA init" << endl;
double *dArray;
int i = 0;
//pointer of helper function return
double *h_data;
//double monitor_data[numElems];
double *monitor_data_dev;
cudaMalloc((void**)&dArray, sizeof(double)*numElems);
cudaMemset(dArray, 0, numElems*sizeof(double));
cudaMallocHost((void**)&h_data, sizeof(double)*numElems);
cudaStream_t stream1;
cudaStreamCreateWithFlags(&stream1, cudaStreamNonBlocking);
cudaMalloc((void**)&monitor_data_dev, sizeof(double)*numElems);
cout <<"Launching Helper Kernel" << endl;
//*help_rdy = help_fcn(*help_input, out);
dataKernel<<<1,numElems>>>(dArray, 1000);
sleep(.4);
cout <<"Launching Monitor Kernel" << endl;
//cudaStreamSynchronize(stream1);
monitorKernel<<<1, 1,0, stream1>>>(monitor_data_dev, dArray);
cout <<"Launching Async Mem Cpy" << endl;
cudaMemcpyAsync(h_data, monitor_data_dev, numElems*sizeof(double), cudaMemcpyDeviceToHost, stream1);
cudaStreamSynchronize(stream1);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
sleep(.3);
cout <<"Launching Monitor Kernel" << endl;
//cudaStreamSynchronize(stream1);
monitorKernel<<<1, 1,0, stream1>>>(monitor_data_dev, dArray);
cout <<"Launching Async Mem Cpy" << endl;
cudaMemcpyAsync(h_data, monitor_data_dev, numElems*sizeof(double), cudaMemcpyDeviceToHost, stream1);
cudaStreamSynchronize(stream1);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
cudaMemcpy(h_data, dArray, sizeof(double)*numElems, cudaMemcpyDeviceToHost);
for(i = 0; i < numElems; i++)
cout << "Value copied over: " << h_data[i] << endl;
cudaFree(dArray);
cudaFree(monitor_data_dev);
return 0;
}
|
20,930 | #include "includes.h"
__global__ void initialConditions(int n, double* x, double* y, double* z, double* vx, double* vy, double* vz, double* mass){
/* TODO */
} |
20,931 | #include <math.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void playGame(int *gridIn, int intpitch, int width, int height){
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y; //row index
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x; //column index
int tx = threadIdx.x; // For shared memory
int ty = threadIdx.y;
extern __shared__ int s_data[];
int sharedMemDim_x = blockDim.x+2;
/*
s_data[tx] = gridIn[(height-1)*intpitch + width - 1];//NW1 iy = 0, ix = 0, tx = 0
s_data[tx] = gridIn[(height-1)*intpitch + ix - 1]; //NW2 iy = 0, ix > 0, tx = 0
s_data[tx] = gridIn[(iy-1)*intpitch + width - 1]; //NW3 iy > 0, ix = 0, tx = 0
s_data[tx] = gridIn[(iy-1)*intpitch + ix - 1]; //NW4 iy > 0, ix > 0, tx = 0
s_data[tx + 1] = gridIn[(height-1)*intpitch + ix]; //N1 iy = 0, ix >= 0, tx >= 0
s_data[tx + 1] = gridIn[(iy-1)*intpitch + ix]; //N2 iy > 0, ix >= 0, tx >= 0
s_data[tx + 2] = gridIn[(height-1)*intpitch]; //NE1 iy = 0, ix = l, tx >= 0
s_data[tx + 2] = gridIn[(height-1)*intpitch + ix + 1]; //NE2 iy = 0, ix < l, tx = l
s_data[tx + 2] = gridIn[(iy-1)*intpitch]; //NE3 iy > 0, ix = l, tx >= 0
s_data[tx + 2] = gridIn[(iy-1)*intpitch + ix + 1]; //NE4 iy > 0, ix < l, tx = l
s_data[(ty+1)*sharedMemDim_x + tx] = gridIn[iy*intpitch + width - 1]; //W1 iy >= 0, ix = 0, tx = 0
s_data[(ty+1)*sharedMemDim_x + tx] = gridIn[iy*intpitch + ix - 1]; //W2 iy >= 0, ix > 0, tx = 0
s_data[(ty+1)*sharedMemDim_x + tx + 1] = gridIn[iy*intpitch + ix]; //itself
s_data[(ty+1)*sharedMemDim_x + tx + 2] = gridIn[iy*intpitch]; //E1 iy >= 0, ix = l, tx >= 0
s_data[(ty+1)*sharedMemDim_x + tx + 2] = gridIn[iy*intpitch + ix + 1]; //E2 iy >= 0, ix < l, tx = l
s_data[(ty+2)*sharedMemDim_x + tx + 2] = gridIn[0]; //SE1 iy = l, ix = l, tx >= 0
s_data[(ty+2)*sharedMemDim_x + tx + 2] = gridIn[ix + 1]; //SE2 iy = l, ix < l, tx = l
s_data[(ty+2)*sharedMemDim_x + tx + 2] = gridIn[(iy+1)*intpitch]; //SE3 iy < l, ix = l, tx >= 0
s_data[(ty+2)*sharedMemDim_x + tx + 2] = gridIn[(iy+1)*intpitch + ix + 1]; //SE4 iy < l, ix < l, tx = l
s_data[(ty+2)*sharedMemDim_x + tx + 1] = gridIn[ix]; //S1 iy = l, ix >= 0, tx >= 0
s_data[(ty+2)*sharedMemDim_x + tx + 1] = gridIn[(iy + 1)*intpitch + ix]; //S2 iy < l, ix >= 0, tx >= 0
s_data[(ty+2)*sharedMemDim_x + tx] = gridIn[width - 1]; //SW1 iy = l, ix = 0, tx = 0
s_data[(ty+2)*sharedMemDim_x + tx] = gridIn[ix - 1]; //SW2 iy = l, ix > 0, tx = 0
s_data[(ty+2)*sharedMemDim_x + tx] = gridIn[(iy+1)*intpitch + width - 1]; //SW3 iy < l, ix = 0, tx = 0
s_data[(ty+2)*sharedMemDim_x + tx] = gridIn[(iy+1)*intpitch + ix - 1]; //SW4 iy < l, ix > 0, tx = 0
2D block logic
NW: ((ty == 0) && ((tx == 0) || (tx == 1)))
N: ((ty == 0) && (((tx != 0) && (tx != blockDim.x-1) && (ix != width-1)) || ((ix == width - 1) && (tx = 0)) || (blockDim.x == 1))
NE: ((ty == 0) && ((tx == blockDim.x-1) || (tx == blockDim.x-2) || (ix == width-1) || (ix == width-2)))
W: (tx == 0)
SW: ((ty == height-1) && ((tx == 0) || (tx == 1)))
S: ((ty == height-1) && (((tx != 0) && (tx != blockDim.x-1) && (ix != width-1)) || ((ix == width-1) && (tx == 0)) || (blockDim.x == 1))
SE: ((ty == height-1) && ((tx == blockDim.x-1) || (tx == blockDim.x-2) || (ix == width-1) || (ix == width-2)))
E: ((tx == blockDim.x-1) || (ix = width-1))
*/
if ((iy < height) && (ix < width)){
if (ty == 0){
if (iy == 0){
if (((tx != 0) && (tx != blockDim.x-1) && (ix != width-1)) || ((ix == width - 1) && (tx == 0)) || (blockDim.x == 1))
s_data[tx + 1] = gridIn[(height-1)*intpitch + ix]; //N1
if ((tx==0) || (tx == 1)){
if (ix == 0)
s_data[tx] = gridIn[(height-1)*intpitch + width - 1]; //NW1
else
s_data[tx] = gridIn[(height-1)*intpitch + ix - 1]; //NW2
}
if ((tx == blockDim.x-1) || (tx == blockDim.x-2) || (ix == width-1) || (ix == width-2)){
if (ix == width-1)
s_data[tx + 2] = gridIn[(height-1)*intpitch]; //NE1
else
s_data[tx + 2] = gridIn[(height-1)*intpitch + ix + 1]; //NE2
}
}
else{
if (((tx != 0) && (tx != blockDim.x-1) && (ix != width-1)) || ((ix == width - 1) && (tx == 0)) || (blockDim.x == 1))
s_data[tx + 1] = gridIn[(iy-1)*intpitch + ix]; //N2
if ((tx==0) || (tx == 1)){
if (ix == 0)
s_data[tx] = gridIn[(iy-1)*intpitch + width - 1]; //NW3
else
s_data[tx] = gridIn[(iy-1)*intpitch + ix - 1]; //NW4
}
if ((tx == blockDim.x-1) || (tx == blockDim.x-2) || (ix == width-1) || (ix == width-2)){
if (ix == width-1)
s_data[tx + 2] = gridIn[(iy-1)*intpitch]; //NE3
else
s_data[tx + 2] = gridIn[(iy-1)*intpitch + ix + 1]; //NE4
}
}
}
if ((ty == blockDim.y-1) || (iy == height-1)){
if (iy == height-1){
if (((tx != 0) && (tx != blockDim.x-1) && (ix != width-1)) || ((ix == width-1) && (tx == 0)) || (blockDim.x == 1))
s_data[(ty+2)*sharedMemDim_x + tx + 1] = gridIn[ix]; //S1
if ((tx == 0) || (tx == 1)){
if (ix == 0)
s_data[(ty+2)*sharedMemDim_x + tx] = gridIn[width - 1]; //SW1
else
s_data[(ty+2)*sharedMemDim_x + tx] = gridIn[ix - 1]; //SW2
}
if ((tx == blockDim.x-1) || (tx == blockDim.x-2) || (ix == width-1) || (ix == width-2)){
if (ix == width-1)
s_data[(ty+2)*sharedMemDim_x + tx + 2] = gridIn[0]; //SE1
else
s_data[(ty+2)*sharedMemDim_x + tx + 2] = gridIn[ix + 1]; //SE2
}
}
else{
if (((tx != 0) && (tx != blockDim.x-1) && (ix != width-1)) || ((ix == width-1) && (tx == 0)) || (blockDim.x == 1))
s_data[(ty+2)*sharedMemDim_x + tx + 1] = gridIn[(iy + 1)*intpitch + ix]; //S2
if ((tx == 0) || (tx == 1)){
if (ix == 0)
s_data[(ty+2)*sharedMemDim_x + tx] = gridIn[(iy+1)*intpitch + width - 1]; //SW3
else
s_data[(ty+2)*sharedMemDim_x + tx] = gridIn[(iy+1)*intpitch + ix - 1]; //SW4
}
if ((tx == blockDim.x-1) || (tx == blockDim.x-2) || (ix == width-1) || (ix == width-2)){
if (ix == width-1)
s_data[(ty+2)*sharedMemDim_x + tx + 2] = gridIn[(iy+1)*intpitch]; //SE3
else
s_data[(ty+2)*sharedMemDim_x + tx + 2] = gridIn[(iy+1)*intpitch + ix + 1]; //SE4
}
}
}
if (tx == 0){
if (ix == 0)
s_data[(ty+1)*sharedMemDim_x] = gridIn[iy*intpitch + width - 1]; //W1
else
s_data[(ty+1)*sharedMemDim_x] = gridIn[iy*intpitch + ix - 1]; //W2
}
if ((tx == blockDim.x-1) || (ix == width-1)){
if (ix == width-1)
s_data[(ty+1)*sharedMemDim_x + tx + 2] = gridIn[iy*intpitch]; //E1
else
s_data[(ty+1)*sharedMemDim_x + tx + 2] = gridIn[iy*intpitch + ix + 1]; //E2
}
s_data[(ty+1)*sharedMemDim_x + tx + 1] = gridIn[iy*intpitch + ix]; //itself
}
__syncthreads();
/*if ((blockIdx.y == 0) && (blockIdx.x == 0) && (threadIdx.x == 0) && (threadIdx.y == 0)){
int sharedMemDim_y = blockDim.y+2;
printf("block_y: %d, block_x: %d\n", 0, 0);
for(int i=0; i<sharedMemDim_y; i++){
for(int j=0; j<sharedMemDim_x; j++){
printf("%d ", s_data[i*sharedMemDim_x + j]);
}
printf("\n");
}
printf("\n");
}*/
if ((ix<width) && (iy<height)){
int sum = (
s_data[ty*sharedMemDim_x + tx ] + //NW
s_data[ty*sharedMemDim_x + tx + 1 ] + //N
s_data[ty*sharedMemDim_x + tx + 2 ] + //NE
s_data[(ty+1)*sharedMemDim_x + tx ] + //W
s_data[(ty+1)*sharedMemDim_x + tx + 2 ] + //E
s_data[(ty+2)*sharedMemDim_x + tx ] + //SW
s_data[(ty+2)*sharedMemDim_x + tx + 1 ] + //S
s_data[(ty+2)*sharedMemDim_x + tx + 2 ] //SE
);
if ((s_data[(ty+1)*sharedMemDim_x + tx + 1] == 1) && (sum != 2) && (sum != 3))
gridIn[iy*intpitch + ix] = 0;
else if ((s_data[(ty+1)*sharedMemDim_x + tx + 1] == 0) && (sum == 3))
gridIn[iy*intpitch + ix] = 1;
}
}
|
20,932 | #include "includes.h"
static unsigned int GRID_SIZE_N;
static unsigned int GRID_SIZE_4N;
static unsigned int MAX_STATE_VALUE;
__global__ static void cudaIIGammaKernel(double *extEV, double *x1, double *x2, double *x3, double *left, double *right) {
__shared__ volatile double al[64], ar[64], v[64], x1px2[16];
const int tid = (threadIdx.z * 16) + (threadIdx.y * 4) + threadIdx.x;
const int offset = 16 * blockIdx.x + 4 * threadIdx.z;
al[tid] = x1[offset + threadIdx.x] * left[tid];
ar[tid] = x2[offset + threadIdx.x] * right[tid];
__syncthreads();
if (threadIdx.x <= 1) {
al[tid] += al[tid + 2];
ar[tid] += ar[tid + 2];
}
__syncthreads();
if (threadIdx.x == 0) {
al[tid] += al[tid + 1];
ar[tid] += ar[tid + 1];
x1px2[(threadIdx.z * 4) + threadIdx.y] = al[tid] * ar[tid];
}
__syncthreads();
v[tid] = x1px2[threadIdx.y + (threadIdx.z * 4)] *
extEV[threadIdx.y * 4 + threadIdx.x];
__syncthreads();
if (threadIdx.y <= 1) {
v[tid] += v[tid + 8];
}
__syncthreads();
if (threadIdx.y == 0) {
v[tid] += v[tid + 4];
x3[offset + threadIdx.x] = v[tid];
}
} |
20,933 | /****************************************************************************************************
* Tyler Griffith *
* October 25th, 2018 *
* Project 7: Matrix Mult on GPU *
* CSC-4310-01 PROF: R. Shore *
* Desc: Use one thread to compute each element of the solution *
matrix *
* To Compile: nvcc p6.cu -o cuda_mult_v1 *
* To Run: ./cuda_mult_v1 <device #> <tile width> <matrix A file> <matrix B file> <matrix A*B file> *
****************************************************************************************************/
#include <stdio.h>
#include <stdlib.h>
int getN(char* fileName);
int* readMatrix(char* fileName);
void writeMatrix(int *x, int n, char* fileName);
__global__
void MatrixMultKernel(int *a, int *b, int *ab, int size){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int prod = 0;
if(col < size && row < size){
for(int i=0; i<size; i++){
prod += a[row*size+i] * b[i*size+col];
}
ab[row*size+col] = prod;
}
}
int main(int argc, char *argv[]){
//reading in the matrix
int n;
//make sure correct syntax is used
if(argc != 6){
printf("Error! You do not have 5 elements to your command!\n");
printf("To multiply 2 matricies please use the following syntax:\n");
printf("./cuda_mult_v1 <device #> <tile width> <matrix A file> <matrix B file> <matrix A*B file>\n");
exit(1);
}
//variable declaration
long dNum = strtol(argv[1], NULL, 10);
int deviceNum = int(dNum);
long width = strtol(argv[2], NULL, 10);
const int tileWidth = (int)width;
int *matrixA, *matrixB, *matrixC, *d_a, *d_b, *d_c;
//set device
cudaSetDevice(deviceNum);
//get n
n = getN(argv[3]);
//file I/O
matrixA = readMatrix(argv[3]);
matrixB = readMatrix(argv[4]);
//allocate and initialize result
matrixC = new int[n*n];
for (int i = 0; i < n*n; ++i) {
matrixC[i] = 0;
}
//cuda timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int size = n*n*sizeof(int);
//allocate cuda memory and copy to global memory
cudaMalloc((void **)&d_a, size);
cudaMemcpy(d_a, matrixA, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, size);
cudaMemcpy(d_b, matrixB, size, cudaMemcpyHostToDevice);
//allocate memory for result
cudaMalloc((void **)&d_c, size);
cudaMemset(d_c, 0, size);
dim3 dimGrid(tileWidth, tileWidth);
dim3 dimBlock(tileWidth>>1, tileWidth>>1);
//start cuda timing
cudaEventRecord(start);
//kernel call
MatrixMultKernel<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
//end cuda timing
cudaEventRecord(stop);
//copy answer back to CPU
cudaMemcpy(matrixC, d_c, n*n*sizeof(int), cudaMemcpyDeviceToHost);
//stop timing
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
//check for error
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
//print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
//print timing
printf("Computation completed in %fms", ms);
writeMatrix(matrixC, n, argv[5]);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
int getN(char* fileName){
int n;
FILE *inFile;
inFile = fopen(fileName, "r");
fscanf(inFile, "%d", &n);
fclose(inFile);
return n;
}
int* readMatrix(char* fileName){
int n;
FILE *inFile;
inFile = fopen(fileName, "r");
fscanf(inFile, "%d", &n);
//allocate memory
int *x = (int*)malloc(n*n*sizeof(int));
//read in matrix
for(int row=0; row<n; row++){
for(int col=0; col<n; col++){
fscanf(inFile, "%d", &x[row*n+col]);
}
}
fclose(inFile);
return x;
}
void writeMatrix(int *x, int n, char* fileName){
FILE *outFile;
outFile = fopen(fileName, "w");
for(int row=0; row<n; row++){
for(int col=0; col<n; col++)
fprintf(outFile, "%d ", x[row*n+col]);
fprintf(outFile, "\n");
}
fclose(outFile);
} |
20,934 |
#include <sys/time.h>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
// this is the max iterations decide to do in the loop
unsigned long MAX_OPS = 20000000;
__global__ void gpu_iops(unsigned long max_ops) {
int ab=1;
int bb=1;
int cb=1;
int db=1;
int eb=1;
int fb=1;
int gb=1;
int hb=1;
int ib=1;
int jb=1;
int kb=1;
int lb=1;
int mb=1;
int nb=1;
int ob=1;
int pb=1;
int qb=1;
int rb=1;
int sb=1;
int tb=1;
int ub=1;
int vb=1;
int wb=1;
int xb=1;
// 24 interger calculation
for(unsigned long count=0; count<max_ops; count++)
{
ab=ab+12;
bb=bb+22;
cb=cb+32;
db=db+42;
eb=eb+52;
fb=fb+62;
gb=gb+72;
hb=hb+82;
ib=ib+92;
jb=jb+10;
kb=kb+11;
lb=lb+12;
mb=mb*13;
nb=nb*14;
ob=ob*15;
pb=pb*16;
qb=qb*17;
rb=rb*18;
sb=sb*19;
tb=tb*20;
ub=ub*21;
vb=vb*22;
wb=wb*23;
xb=xb*24;
}
}
__global__ void gpu_flops(unsigned long max_ops) {
float ae=0.2;
float be=0.2;
float ce=0.2;
float de=0.2;
float ee=0.2;
float fe=0.2;
float ge=0.2;
float he=0.2;
float ie=0.2;
float je=0.2;
float ke=0.2;
float le=0.2;
float me=0.2;
float ne=0.2;
float oe=0.2;
float pe=0.2;
float qe=0.2;
float re=0.2;
float se=0.2;
float te=0.2;
float ue=0.2;
float ve=0.2;
float we=0.2;
float xe=0.2;
//24 floating point calculation
for(unsigned long count=0; count<max_ops; count++)
{
ae=ae+1.11;
be=be+2.22;
ce=ce+3.33;
de=de+4.44;
ee=ee+5.52;
fe=fe+6.61;
ge=ge+7.72;
he=he+8.83;
ie=ie+9.94;
je=je+10.10;
ke=ke+11.11;
le=le+12.12;
me=me*13.13;
ne=ne*14.14;
oe=oe*15.15;
pe=pe*16.16;
qe=qe*17.17;
re=re*18.18;
se=se*19.19;
te=te*20.20;
ue=ue*21.21;
ve=ve*22.22;
we=we*23.23;
xe=xe*24.24;
}
}
int main(int argc, char *argv[]) {
//decide the character
char c;
//decide the maximum thread
int threads = 1024;
char test = 'I';
//get the parameter
while ( (c = getopt(argc, argv, "n:l:t:") ) != -1)
{
switch (c)
{
case 'n':
threads = atoi(optarg);
break;
case 'l':
MAX_OPS = atol(optarg);
break;
case 't':
test = optarg[0];
break;
default:
printf("Usage\n");
return -1;
}
}
//set the time parameter
struct timeval tv;
long long start, stop;
double secs;
if(test == 'I')
{
gettimeofday(&tv, NULL);
//get the start time
start = tv.tv_sec*1000000LL + tv.tv_usec;
gpu_iops<<< ceil(threads/1024), 1024 >>>(MAX_OPS);
cudaThreadSynchronize();
gettimeofday(&tv, NULL);
//get the stop time
stop = tv.tv_sec*1000000LL + tv.tv_usec;
//get the totally time cost
secs = (stop-start)/1000000.0;
printf("Time taken: %lf\n", secs);
printf("IOPS\t%lf\n", (MAX_OPS*24.*threads)/(secs*10000000000.));
}
else if(test == 'F')
{
gettimeofday(&tv, NULL);
//get the start time
start = tv.tv_sec*1000000LL + tv.tv_usec;
gpu_flops<<< ceil(threads/1024), 1024 >>>(MAX_OPS);
cudaThreadSynchronize();
// get the stop time
gettimeofday(&tv, NULL);
stop = tv.tv_sec*1000000LL + tv.tv_usec;
//get the totally time cost
secs = (stop-start)/1000000.0;
printf("Time taken: %lf\n", secs);
printf("GFLOPS\t%lf\n", (MAX_OPS*24.*threads)/(secs*10000000000.));
}
}
|
20,935 | // input: in_data (b,g,c), in_grid (b,n)
// output: out_data (b,n,c)
__global__ void grid_upsampling_gpu(int b,int n,int c,int g,const float * in_data,const int * in_grid,float * out_data){
//int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < c*n; index += stride)
{
int index_channel = index / n;
int index_point = index % n;
for (int index_batch = 0; index_batch < b; index_batch++)
{
int index_grid = in_grid[index_batch*n + index_point]; // in_grid[b, n]
int index_in_data = index_batch*g*c + index_grid*c + index_channel; // in_data[b,g,c]
int index_out_data = index_batch*n*c + index_point*c + index_channel; // out_data[b,n,c]
out_data[index_out_data] = in_data[index_in_data];
}
}
}
__global__ void grid_upsampling_grad_gpu(int b,int n,int c,int g,const int * in_grid,const float * grad_out,float * grad_in){
//int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < b*c; index += stride)
{
int index_batch = index / c;
int index_channel = index % c;
/*for (int index_grid = 0; index_grid < g; index_grid++)
{
int index_grad_in = index_batch*g*c + index_grid*c + index_channel;
grad_in[index_grad_in] = .0;
}*/
for (int index_point = 0; index_point < n; index_point++)
{
int index_grid = in_grid[index_batch*n + index_point]; // in_grid[b, n]
int index_grad_out = index_batch*n*c + index_point*c + index_channel; // grad_out[b,n,c]
int index_grad_in = index_batch*g*c + index_grid*c + index_channel; // grad_in[b,g,c]
grad_in[index_grad_in] += 1 * grad_out[index_grad_out];
}
}
}
void gridupsamplingLauncher(int b,int n,int c,int g,const float * in_data,const int * in_grid,float * out_data){
const int threads_per_block = 128;
//const int number_of_blocks = (c*n + threads_per_block - 1)/threads_per_block;
const int number_of_blocks = 8;
cudaMemset(out_data,0,b*n*c*4);
grid_upsampling_gpu<<<number_of_blocks, threads_per_block>>>(b,n,c,g,in_data,in_grid,out_data);
//grid_upsampling_gpu<<<1, 1>>>(b,n,c,g,inp_data,grid_index,out);
//cudaDeviceSynchronize();
}
void gridupsamplinggradLauncher(int b,int n,int c,int g,const int * in_grid,const float * grad_out,float * grad_in){
const int threads_per_block = 128;
//const int number_of_blocks = (b*c + threads_per_block - 1)/threads_per_block;
const int number_of_blocks = 8;
cudaMemset(grad_in,0,b*g*c*4);
grid_upsampling_grad_gpu<<<number_of_blocks, threads_per_block>>>(b,n,c,g,in_grid,grad_out,grad_in);
//grid_upsampling_grad_gpu<<<1, 1>>>(b,n,c,g,in_grid,grad_out,grad_in);
//cudaDeviceSynchronize();
}
|
20,936 | #include <iostream>
#include <cmath>
#include <cstdlib>
#include <cstdio>
#include <cuda.h>
#include <ctime>
using namespace std;
__device__ int hashing_d (int element, int a, int b, int c, int p, int n){
return (unsigned int)(a * element + b) % p % n;
}
int hashing (int element, int a, int b, int c, int p, int n){
return (unsigned int)(a * element + b) % p % n;
}
__global__ void add_two(int p, int n, int N, int t, int* hash_table, int* hash_table2, int* hash_elements, int* func_table, int* a, int* b, int* c){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int func1 = 0;
int func2 = 1;
int func3 = 2;
int hash_element = hash_elements[i];
unsigned int loca1 = hashing_d(hash_element, a[func1], b[func1], c[func1], p, n);
unsigned int loca2 = hashing_d(hash_element, a[func2], b[func2], c[func2], p, n);
unsigned int loca3 = hashing_d(hash_element, a[func3], b[func3], c[func3], p, n);
atomicAdd(&hash_table2[(unsigned int)loca1], 1);
atomicAdd(&hash_table2[(unsigned int)loca2], 1);
atomicAdd(&hash_table2[(unsigned int)loca3], 1);
}
__global__ void fix_location(int p, int n, int N, int t, int* hash_table, int* hash_table2, int* hash_elements, int* func_table, int* a, int* b, int* c){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int func1 = 0;
int func2 = 1;
int func3 = 2;
int hash_element = hash_elements[i];
unsigned int loca1 = hashing_d(hash_element, a[func1], b[func1], c[func1], p, n);
unsigned int loca2 = hashing_d(hash_element, a[func2], b[func2], c[func2], p, n);
unsigned int loca3 = hashing_d(hash_element, a[func3], b[func3], c[func3], p, n);
if (hash_table2[(unsigned int)loca1] == 1){
func_table[i] = 0;
}
if (hash_table2[(unsigned int)loca2] == 1){
func_table[i] = 1;
}
if (hash_table2[(unsigned int)loca3] == 1){
func_table[i] = 2;
}
}
__global__ void check(int p, int n, int N, int t, int* hash_table, int* hash_elements, int* func_table, int* a, int* b, int* c, int max_count, int *indicator){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int curr_func = func_table[i];
int hash_element = hash_elements[i];
unsigned int loca = hashing_d(hash_element, a[curr_func], b[curr_func], c[curr_func], p, n);
if (hash_table[(unsigned int)loca] != hash_element){
*indicator = -1;
}
}
__global__ void insert(int p, int n, int N, int t, int* hash_table, int* hash_elements, int* func_table, int* a, int* b, int* c, int max_count){
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < N){
int curr_func = func_table[i];
int hash_element = hash_elements[i];
unsigned int loca = hashing_d(hash_element, a[curr_func], b[curr_func], c[curr_func], p, n);
//hash_table[(unsigned int)loca] = hash_element;
if (hash_table[(unsigned int)loca] != hash_element){
func_table[i] = (curr_func + 1) % t;
curr_func = (curr_func + 1) % t;
loca = hashing_d(hash_element, a[curr_func], b[curr_func], c[curr_func], p, n);
hash_table[(unsigned int)loca] = hash_element;
}
/*for (int j = 0; j < max_count; j++){
if (hash_table[(unsigned int)loca] != hash_element){
func_table[i] = (curr_func + 1) % t;
curr_func = (curr_func + 1) % t;
loca = ((unsigned int)(a[curr_func] * hash_element + b[curr_func]) % p % n);
hash_table[(unsigned int)loca] = hash_element;
}
__syncthreads();
}*/
}
}
__global__ void find(int p, int n, int N, int t, int* hash_table, int* find_elements, int* a, int* b, int* c, int* find_result){
int i = threadIdx.x + blockDim.x * blockIdx.x;
int find = 0;
for (int j = 0; j < t; j++){
if (hash_table[hashing_d(find_elements[i], a[j], b[j], c[j], p, n)] == find_elements[i]){
find = 1;
}
}
find_result[i] = find;
}
__global__ void delete_ele(int p, int n, int N, int t, int* hash_table, int* fun_index_table,int* delete_elements, int* a, int* b, int* c, int* find_result){
int i = threadIdx.x + blockDim.x * blockIdx.x;
for (int j = 0; j < t; j++){
if (hash_table[hashing_d(delete_elements[i], a[j], b[j], c[j], p, n)] == delete_elements[i]){
hash_table[hashing_d(delete_elements[i], a[j], b[j], c[j], p, n)] = 0;
fun_index_table[hashing_d(delete_elements[i], a[j], b[j], c[j], p, n)] = t;
}
}
}
void random_hash_fun(int t, int* a, int* b, int* c){
for (int i = 0; i < t; i++){
a[i] = rand();
b[i] = rand();
c[i] = rand();
}
return;
}
void random_hash_elements(int N, int* hash_elements){
for (int i = 0; i < N; i++){
hash_elements[i] = rand() % (1 << 27);
}
return;
}
void initialize(int n, int N, int* hash_table, int* hash_table2, int* func_table, int* hash_value, int* hash_elements, int a, int b, int c, int p ){
for (int i = 0; i < n; i++){
hash_table[i] = 0;
hash_table2[i] = 0;
}
for (int i = 0; i < N; i++){
int loca = hashing(hash_elements[i], a, b, c, p, n);
hash_value[i] = loca;
func_table[i] = 0;
}
}
void generating_find_element(int N, int* find_elements, int* hash_element, float partial){
for (int i = 0; i < (int) (N * partial); i++){
find_elements[i] = hash_element[i];
}
for (int i = (int) (N * partial); i < N; i++){
find_elements[i] = rand() % (1 << 27);
}
}
void quicksort(int * hash_value, int* hash_elements, int low, int high)
{
if(low >= high){
return;
}
int first = low;
int last = high;
int key = hash_value[first];
int key1 = hash_elements[first];
while(first < last){
while(first < last && hash_value[last] >= key){
--last;
}
hash_value[first] = hash_value[last];
hash_elements[first] = hash_elements[last];
while(first < last && hash_value[first] <= key){
++first;
}
hash_value[last] = hash_value[first];
hash_elements[last] = hash_elements[first];
}
hash_value[first] = key;
hash_elements[first] = key1;
quicksort(hash_value, hash_elements, low, first-1);
quicksort(hash_value, hash_elements, first+1, high);
}
double once(int p, int n, int t, int N, int max_count, int trail, float partial, int thread_per_block){
cout << "Trail: " << trail << endl;
int* hash_table;
int* hash_table2;
int* hash_elements;
int* func_table;
int* a;
int* b;
int* c;
int* find_result;
int* find_elements;
int* indicator;
int count = 0;
int* hash_value = new int [N];
cudaMallocManaged(&hash_table, n * sizeof(int));
cudaMallocManaged(&hash_table2, n * sizeof(int));
cudaMallocManaged(&hash_elements, N * sizeof(int));
cudaMallocManaged(&func_table, N * sizeof(int));
cudaMallocManaged(&a, t * sizeof(int));
cudaMallocManaged(&b, t * sizeof(int));
cudaMallocManaged(&c, t * sizeof(int));
cudaMallocManaged(&find_elements, N * sizeof(int));
cudaMallocManaged(&find_result, N * sizeof(int));
cudaMallocManaged(&indicator, sizeof(int));
*indicator = 0;
random_hash_fun(t, a, b, c);
random_hash_elements(N, hash_elements);
initialize(n, N, hash_table, hash_table2, func_table, hash_value, hash_elements, a[0], b[0], c[0], p);
quicksort(hash_value, hash_elements, 0, N - 1);
add_two<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_table2, hash_elements, func_table, a, b, c);
cudaDeviceSynchronize();
fix_location<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_table2, hash_elements, func_table, a, b, c);
cudaDeviceSynchronize();
for (count = 0; count < max_count; count ++){
*indicator = 0;
insert<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_elements, func_table, a, b, c, max_count);
cudaDeviceSynchronize();
check<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_elements, func_table, a, b, c, max_count, indicator);
cudaDeviceSynchronize();
if (*indicator == 0){
break;
}
}
check<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_elements, func_table, a, b, c, max_count, indicator);
cudaDeviceSynchronize();
int count1 = 0;
while (*indicator == -1 && count1 < 1000){
random_hash_fun(t, a, b, c);
add_two<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_table2, hash_elements, func_table, a, b, c);
cudaDeviceSynchronize();
fix_location<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_table2, hash_elements, func_table, a, b, c);
cudaDeviceSynchronize();
for (count = 0; count < max_count; count ++){
*indicator = 0;
insert<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_elements, func_table, a, b, c, max_count);
cudaDeviceSynchronize();
check<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_elements, func_table, a, b, c, max_count, indicator);
cudaDeviceSynchronize();
if (*indicator == 0){
break;
}
}
check<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, hash_elements, func_table, a, b, c, max_count, indicator);
cudaDeviceSynchronize();
count1 ++;
}
generating_find_element(N, find_elements, hash_elements, partial);
clock_t start = clock();
find<<< ceil(N / thread_per_block), thread_per_block>>>(p, n, N, t, hash_table, find_elements, a, b, c, find_result);
cudaDeviceSynchronize();
double duration = (clock() - start) / (double) CLOCKS_PER_SEC;
cout << "Time:" << duration << endl;
cudaMemcpy(hash_table, hash_table, n * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(hash_elements, hash_elements, N * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(find_result, find_result, N * sizeof(int), cudaMemcpyDeviceToHost);
int val_count = 0;
val_count = 0;
for (int i = 0 ; i < N; i++){
if(find_result[i] == 1){
val_count ++;
}
}
cout << "Difference between insertion and find result: " << N - val_count << endl;
cudaFree(hash_table);
cudaFree(hash_elements);
cudaFree(find_result);
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaFree(hash_table2);
cudaFree(func_table);
cudaFree(indicator);
return duration;
}
int main(void){
int p = 99984923;
int count_bit;
float relative;
float partial;
int n;
int c;
cout << "Data Bit: " ;
cin >> count_bit;
cout << "Find partial: ";
cin >> partial;
cout << "Hash table size (0 for 2^25): ";
cin >> relative;
cout << "Evict Chain Constant: ";
cin >> c;
int N = (1<<count_bit);
if (relative == 0){
n = 1 << 25;
}else{
n = relative * N;
}
int t = 3;
int max_count = (int) c * log(n);
int thread_per_block;
if (count_bit >= 18){
thread_per_block = 1024;
}else{
thread_per_block = 64;
}
double Time = 0;
Time += once(p, n, t, N, max_count, 1, partial, thread_per_block);
Time += once(p, n, t, N, max_count, 2, partial, thread_per_block);
Time += once(p, n, t, N, max_count, 3, partial, thread_per_block);
Time += once(p, n, t, N, max_count, 4, partial, thread_per_block);
Time += once(p, n, t, N, max_count, 5, partial, thread_per_block);
Time = Time / 5;
cout << "Average Time: " << Time << endl;
cout << "Million of insertion per second: " << (N / Time) / 1000000 << endl;
return 0;
}
|
20,937 |
#include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <sys/time.h>
#include <cooperative_groups.h>
//#include <helper_cuda.h>
#define N_INPUTS 256
#define N_ARITH 4096
__global__ void
ac(float *A, const int *B, const int *C, const int *op_sel, int n_inputs, const int n_arith, int thresh, int iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
int idx_off= i*n_inputs;
float temp[N_ARITH];
for (int k=0; k<iter; k++) {
for (int j=0; j <n_arith; j++ ) {
if (op_sel[j] == 0) {
if (j < thresh)
temp[j] = A[idx_off + B[j]] + A[idx_off + C[j]];
else
temp[j] = temp[B[j]] + temp[C[j]];
}
else {
if (j < thresh)
temp[j] = A[idx_off + B[j]] * A[idx_off + C[j]];
else
temp[j] = temp[B[j]] * temp[C[j]];
}
}
}
A[i*n_inputs]= temp[n_arith-1];
}
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
const int n_inputs= N_INPUTS;
const int n_arith= N_ARITH;
const int batch_size= 512;
const int iter= 4096;
const int thresh= n_arith/3;
size_t size= batch_size * (n_inputs) * sizeof(float);
size_t size_idx= n_arith * sizeof(int);
float *h_A= (float *)malloc(size);
int *h_B= (int *)malloc(size_idx);
int *h_C= (int *)malloc(size_idx);
int *h_op_sel= (int *) malloc(size_idx);
// Initialize the host input vectors
for (int i = 0; i < n_arith; ++i)
{
if (i < thresh) {
h_B[i] = rand() % (n_inputs);
h_C[i] = rand() % (n_inputs);
}
else{
h_B[i] = rand() % (i);
h_C[i] = rand() % (i);
}
h_op_sel[i]= rand() % 2;
}
for (int i= 0; i < n_inputs; ++i) {
for (int b =0; b< batch_size; ++b) {
h_A[b* n_inputs + i]= float(rand());
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_C = NULL;
err = cudaMalloc((void **)&d_C, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_op_sel = NULL;
err = cudaMalloc((void **)&d_op_sel, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_C, h_C, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_op_sel, h_op_sel, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 64;
int blocksPerGrid= (batch_size + threadsPerBlock -1)/ threadsPerBlock;
struct timeval t1, t2;
// Perform Warmup
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t1, 0);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time of kernel: %3.4f ms \n", time);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Throughput: %.3f Gops/sec\n", (((1.0*batch_size*iter*n_arith))/time)/10E6);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//for (int i=0; i<numElements; i++) {
for (int i=0; i<8; i++) {
printf("%d , %f\n", i, h_A[i]);
}
err = cudaFree(d_A);
err = cudaFree(d_B);
err = cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
printf("Done!\n");
return 0;
}
|
20,938 | /*
* FILE: isingV3.cu
* THMMY, 7th semester, Parallel and Distributed Systems: 3rd assignment
* Parallel Implementation with shared memory of the Ising Model
* Authors:
* Moustaklis Apostolos, 9127, amoustakl@ece.auth.gr
* Papadakis Charis , 9128, papadakic@ece.auth.gr
* Compile command with :
* make all
* Run command example:
* ./src/isingV3
* It will calculate the evolution of the ising Model
* for a given number n of points and k steps
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// Defines for the block and grid calculation
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
//The dimensions of the lattice
#define N_X 517
#define N_Y 517
// The size of the weights and the radius of the "stencil"
#define WSIZE 5
#define RADIUS 2
//Helper Defines to access easier the arrays
#define old(i,j,n) *(old+(i)*n+j)
#define current(i,j,n) *(current+(i)*n+j)
#define w(i,j) *(w+(i)*5+j)
#define d_w(i,j) *(d_w+(i)*5+j)
#define s_w(i,j) *(s_w+(i)*5+j)
#define d_current(i,j,n) *(d_current+(i)*n+j)
#define d_old(i,j,n) *(d_old+(i)*n+j)
#define G(i,j,n) *(G+(i)*n+j)
#define s_old(i,j) *(s_old + (i)*(BLOCK_SIZE_X+2*RADIUS) + j)
//Functions Declaration
void swapElement(int ** one, int ** two);
__global__
void kernel2D(int *d_current, int *d_old, double *d_w, int n , int * d_flag);
void ising( int *G, double *w, int k, int n);
//! Ising model evolution
/*!
\param G Spins on the square lattice [n-by-n]
\param w Weight matrix [5-by-5]
\param k Number of iterations [scalar]
\param n Number of lattice points per dim [scalar]
NOTE: Both matrices G and w are stored in row-major format.
*/
void ising( int *G, double *w, int k, int n){
//Grid and block construction
dim3 block(BLOCK_SIZE_X,BLOCK_SIZE_Y);
int grid_size_x = (N_X + BLOCK_SIZE_X - 1) / BLOCK_SIZE_X;
int grid_size_y = (N_Y + BLOCK_SIZE_Y - 1) / BLOCK_SIZE_Y;
dim3 grid(grid_size_x,grid_size_y);
//Device memory allocation
int * old = (int*) malloc(n*n*(size_t)sizeof(int)); // old spin lattice
int * current = (int*) malloc(n*n*(size_t)sizeof(int)); // current spin lattice
//Leak check
if(old==NULL || current == NULL){
printf("Problem at memory allocation at host \n");
exit(0);
}
int * d_old;
int * d_current;
double * d_w;
int *d_flag ;
int flag ;
//Host memory allocation and leak check
if( cudaMalloc((void **)&d_old ,n*n*(size_t)sizeof(int)) != cudaSuccess || cudaMalloc((void **)&d_current,n*n*(size_t)sizeof(int)) != cudaSuccess || cudaMalloc((void **)&d_w, WSIZE*WSIZE*(size_t)sizeof(double)) != cudaSuccess || cudaMalloc(&d_flag,(size_t)sizeof(int)) !=cudaSuccess){
printf("Problem at memory allocation");
exit(0);
}
//Copy memory from host to device
cudaMemcpy(d_w, w, WSIZE*WSIZE*sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy(d_old, G, n*n*sizeof(int), cudaMemcpyHostToDevice );
// run for k steps
for(int l=0; l<k; l++){
flag = 0;
kernel2D<<<grid,block>>>(d_current, d_old, d_w, n , d_flag );
cudaDeviceSynchronize();
// cudaMemcpy(old, d_old, n*n*sizeof(int), cudaMemcpyDeviceToHost );
cudaMemcpy(current, d_current, n*n*sizeof(int), cudaMemcpyDeviceToHost );
// save result in G
memcpy(G , current , n*n*sizeof(int));
// swap the pointers for the next iteration
swapElement(&d_old,&d_current);
cudaMemcpy(&flag , d_flag , (size_t)sizeof(int), cudaMemcpyDeviceToHost);
// terminate if no changes are made
if(flag){
printf("terminated: spin values stay same (step %d)\n" , l);
exit(0);
}
}
//Memory deallocation
free(old);
free(current);
cudaFree(d_old);
cudaFree(d_current);
cudaFree(d_w);
}
//Helper function to swap the pointers of the arrays
void swapElement(int ** one, int ** two) {
int * temp = * one;
* one = * two;
* two = temp;
}
//The kernel function that updates the values of the ising model
__global__
void kernel2D(int *d_current, int *d_old, double *d_w, int n , int * d_flag){
double influence = 0;
// Compute global column and row indices
int r = blockIdx.x * blockDim.x + threadIdx.x;
int c = blockIdx.y * blockDim.y + threadIdx.y;
// Compute local column and row indices
int lxindex = threadIdx.x + RADIUS;
int lyindex = threadIdx.y + RADIUS;
//The strides
int strideX = blockDim.x*gridDim.x ;
int strideY = blockDim.y*gridDim.y ;
__shared__ double s_w[WSIZE*WSIZE];
__shared__ int s_old[(BLOCK_SIZE_X + 2*RADIUS)*(BLOCK_SIZE_Y + 2*RADIUS)];
//Fully Parallel Shared weights
//incase we have sufficient amount of threads ( threads at X > WSIZE AND threads at Y > WSIZE)
if(blockDim.x > WSIZE && blockDim.y > WSIZE ){
if(threadIdx.x < WSIZE && threadIdx.y < WSIZE){
s_w(threadIdx.x, threadIdx.y) = d_w(threadIdx.x, threadIdx.y);
}
}
//Otherwise
//threadIdx.x == 0 , threadIdx.y == 0 will copy
else{
if(threadIdx.x == 0 && threadIdx.y == 0){
for(int i = 0 ; i<WSIZE ; i++){
for(int j =0; j<WSIZE ; j++){
s_w(i,j)=d_w(i,j);
}
}
}
}
__syncthreads();
// // Read input elements into shared memory
for(int i=r; i<n+RADIUS; i+= strideX){
for(int j=c; j<n+RADIUS ; j+= strideY ){
// window for old to shared
s_old(lxindex,lyindex) = d_old((i+n)%n,(j+n)%n,n);
__syncthreads();
//stencil at x
if( threadIdx.y < RADIUS){
s_old( lxindex , lyindex - RADIUS ) = d_old((i + n)%n , (j-RADIUS+n)%n , n);
s_old(lxindex , lyindex + BLOCK_SIZE_Y ) = d_old( (i + n )%n , (j+BLOCK_SIZE_X + n)%n , n);
}
//stencil at y
if( threadIdx.x < RADIUS){
s_old(lxindex - RADIUS,lyindex ) = d_old( (i-RADIUS+n)%n , (j+n)%n , n );
s_old( lxindex + BLOCK_SIZE_Y,lyindex ) = d_old(( i +BLOCK_SIZE_X +n)%n , (j + n)%n , n );
}
//4X4 squares at the corners
if(threadIdx.x < RADIUS && threadIdx.y < RADIUS ){
//panw aristera
s_old(lxindex - RADIUS , lyindex - RADIUS ) = d_old ( ( i - RADIUS + n)%n , (j-RADIUS + n )%n , n );
//katw deksia
s_old(lxindex + BLOCK_SIZE_X , lyindex + BLOCK_SIZE_Y ) = d_old( (i + BLOCK_SIZE_X + n )%n , (j + BLOCK_SIZE_Y +n )%n , n);
//katw aristera
s_old(lxindex - RADIUS, lyindex + BLOCK_SIZE_Y )= d_old ((i - RADIUS + n)%n , (j + BLOCK_SIZE_X )%n, n);
//panw deksia
s_old(lxindex + BLOCK_SIZE_X,lyindex - RADIUS ) = d_old ((i + BLOCK_SIZE_X+n )%n , (j -RADIUS + n )%n , n );
}
__syncthreads();
if((i<n)&&(j<n)){
influence = 0;
for(int ii=0; ii<WSIZE ; ii++){
for(int jj=0 ; jj<WSIZE ; jj++){
if((ii==2) && (jj==2))
continue;
//influence calculation
influence += s_w(ii,jj) * s_old( threadIdx.x + ii,threadIdx.y + jj );
}
}
// // magnetic moment gets the value of the SIGN of the weighted influence of its neighbors
if(fabs(influence) < 10e-7){
d_current(i,j,n) = s_old(lxindex, lyindex ); // remains the same in the case that the weighted influence is zero
}
else if(influence > 10e-7){
d_current(i,j,n) = 1;
*d_flag = 0;
}
else if(influence < 0){
d_current(i,j,n) = -1;
*d_flag = 0;
}
}
__syncthreads();
}
}
}
|
20,939 |
#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <string>
#include <vector>
#include <set>
using namespace std;
vector<string> splitpath( const string& str, const set<char> delimiters)
{
vector<string> result;
char const* pch = str.c_str();
char const* start = pch;
for(; *pch; ++pch)
{
if (delimiters.find(*pch) != delimiters.end())
{
if (start != pch)
{
string str(start, pch);
result.push_back(str);
}
else
{
result.push_back("");
}
start = pch + 1;
}
}
result.push_back(start);
return result;
}
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
cout << cudaGetErrorString(error) << endl; \
} \
} while (0)
// Sumar cada columna(pixel) de las imagenes en paralelo
__global__ void kernel_swapArray(float *r_in, float *g_in, float *b_in,
float *r_result, float *g_result, float *b_result , int size, int x) {
int Idx = threadIdx.x + blockIdx.x * blockDim.x;
if (Idx < size) {
r_result[Idx] = ((Idx/x)%2 == 0)? r_in[Idx +x]: r_in[Idx-x];
g_result[Idx] = ((Idx/x)%2 == 0)? g_in[Idx +x]: g_in[Idx-x];
b_result[Idx] = ((Idx/x)%2 == 0)? b_in[Idx +x]: b_in[Idx-x];
}
}
int main(int argc, char *argv[]){
string input_file_name;
if (argc > 1) {
input_file_name = argv[1];
}
ifstream infile;
cout << input_file_name.c_str() << endl;
infile.open(input_file_name.c_str());
int M,N, size;
float *r_in_host, *g_in_host, *b_in_host, *r_out_host, *g_out_host, *b_out_host;
float *r_in_dev, *g_in_dev, *b_in_dev, *r_out_dev, *g_out_dev, *b_out_dev;
infile >> M >> N;
cout << M << N << endl;
size = M*N;
// Allocating arrays
r_in_host = (float *)malloc(size * sizeof(float));
g_in_host = (float *)malloc(size * sizeof(float));
b_in_host = (float *)malloc(size * sizeof(float));
r_out_host = (float *)malloc(size * sizeof(float));
g_out_host = (float *)malloc(size * sizeof(float));
b_out_host = (float *)malloc(size * sizeof(float));
// Reading channels
for (int i = 0; i < size; i++)
{
infile >> r_in_host[i];
}
for (int i = 0; i < size; i++)
{
infile >> g_in_host[i];
}
for (int i = 0; i < size; i++)
{
infile >> b_in_host[i];
}
// Preparando archivo donde iran los resultados
set<char> delims{'/'};
vector<string> path = splitpath(input_file_name, delims);
ofstream times_file, result_file;
times_file.open("resultados/times_cuda_pregunta2.txt", ios_base::app);
int x_to_test[] = {1, 2, 4, 8, 16, 32, 64, 128, 256, 512};
for (int i = 0; i < 10; i++)
{
int X = x_to_test[i];
cudaEvent_t ct1, ct2;
float dt;
// Input in device
CUDA_CHECK(cudaMalloc((void**)&r_in_dev, size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&g_in_dev, size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&b_in_dev, size * sizeof(float)));
// Copy
CUDA_CHECK(cudaMemcpy(r_in_dev, r_in_host, size * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(g_in_dev, g_in_host, size * sizeof(float), cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(b_in_dev, b_in_host, size * sizeof(float), cudaMemcpyHostToDevice));
// Output in device
CUDA_CHECK(cudaMalloc((void**)&r_out_dev, size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&g_out_dev, size * sizeof(float)));
CUDA_CHECK(cudaMalloc((void**)&b_out_dev, size * sizeof(float)));
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
// Llamar algoritmo
int grid_size, block_size = 256;
grid_size = (int)ceil((float) size / block_size);
kernel_swapArray<<<grid_size, block_size>>>(r_in_dev, g_in_dev, b_in_dev, r_out_dev, g_out_dev, b_out_dev, size, X);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
float duration;
duration = dt;
std::cout << "Tiempo GPU: " << duration << "[ms]" << std::endl;
CUDA_CHECK(cudaMemcpy(r_out_host, r_out_dev, size * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(g_out_host, g_out_dev, size * sizeof(float), cudaMemcpyDeviceToHost));
CUDA_CHECK(cudaMemcpy(b_out_host, b_out_dev, size * sizeof(float), cudaMemcpyDeviceToHost));
// Escribiendo resultado en archivo
times_file << "X = "<< X << " " << path.back() << " " << duration << "[ms]" << endl;
// Printing the result file
string result_file_name = "resultados/result_cuda_pregunta3_x"+to_string(X)+"_"+path.back();
cout << result_file_name << endl;
result_file.open(result_file_name);
result_file << M << " " << N << endl;
for (int j = 0; j < size-1; j++)
{
result_file << r_out_host[j] << " ";
}
result_file << r_out_host[size-1] << endl;
for (int j = 0; j < size-1; j++)
{
result_file << g_out_host[j] << " ";
}
result_file << g_out_host[size-1] << endl;
for (int j = 0; j < size-1; j++)
{
result_file << b_out_host[j] << " ";
}
result_file << b_out_host[size-1];
result_file.close();
CUDA_CHECK(cudaFree(r_in_dev));
CUDA_CHECK(cudaFree(g_in_dev));
CUDA_CHECK(cudaFree(b_in_dev));
CUDA_CHECK(cudaFree(r_out_dev));
CUDA_CHECK(cudaFree(g_out_dev));
CUDA_CHECK(cudaFree(b_out_dev));
}
// Liberar memoria
free(r_in_host);
free(g_in_host);
free(b_in_host);
free(r_out_host);
free(g_out_host);
free(b_out_host);
times_file.close();
infile.close();
return 0;
}
|
20,940 | /*
============================================================================
Name : review_chp3_2_bhd.cu
Author : freshield
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <stdio.h>
__global__ void add(int a, int b, int *c){
*c = a + b;
}
int find(int *a){
return *a;
}
int main(){
int c;
int *dev_c;
int b = 1;
int * b_ad;
int find_b;
b_ad = &b;
find_b = find(&b);
printf("b is %d\n",b);
printf("b address is %d\n", &b);
//printf("*b is %d\n", *b); error
printf("b_ad is %d\n", b_ad);
printf("&b_ad is %d\n", &b_ad);
printf("*b_ad is %d\n", *b_ad);
printf("find_b is %d\n", find_b);
printf("find_b address is %d\n", &find_b);
cudaMalloc((void**)&dev_c, sizeof(int));
add<<<1,1>>>(2, 7, dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
//from the memory where dev_c point to the memory address of c
cudaFree(dev_c);
printf("2 + 7 = %d", c);
}
|
20,941 | #include<stdio.h>
#include"cuda_runtime.h"
#include"device_launch_parameters.h"
__global__ void add(int *a,int *b,int *c)
{
int tid=threadIdx.x;
c[tid]=a[tid]+b[tid];
}
int main()
{
int n,a[10],b[10],c[10];
printf("\nValue of N:");
scanf("%d",&n);
printf("\n Enter the Values of array A:");
for (int i = 0; i < n; ++i)
{
scanf("%d",&a[i]);
}
printf("\n Enter the Value of array B :");
for (int i = 0; i < n; ++i)
{
scanf("%d",&b[i]);
}
int *d_a,*d_b,*d_c;
int size=sizeof(int);
cudaMalloc((void**)&d_a,size*n);
cudaMalloc((void**)&d_b,size*n);
cudaMalloc((void**)&d_c,size*n);
cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size*n,cudaMemcpyHostToDevice);
add<<<1,n>>>(d_a,d_b,d_c);
cudaMemcpy(c,d_c,size*n,cudaMemcpyDeviceToHost);
printf("The Sum of the array is :");
for (int i = 0; i < n; ++i)
{
printf("%d ",c[i]);
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 1;
} |
20,942 | /*
============================================================================
Filename : algorithm.c
Author : Your name goes here
SCIPER : Your SCIPER number
============================================================================
*/
#include <iostream>
#include <iomanip>
#include <sys/time.h>
#include <cuda_runtime.h>
using namespace std;
// CPU Baseline
void array_process(double *input, double *output, int length, int iterations)
{
double *temp;
for(int n=0; n<(int) iterations; n++)
{
for(int i=1; i<length-1; i++)
{
for(int j=1; j<length-1; j++)
{
output[(i)*(length)+(j)] = (input[(i-1)*(length)+(j-1)] +
input[(i-1)*(length)+(j)] +
input[(i-1)*(length)+(j+1)] +
input[(i)*(length)+(j-1)] +
input[(i)*(length)+(j)] +
input[(i)*(length)+(j+1)] +
input[(i+1)*(length)+(j-1)] +
input[(i+1)*(length)+(j)] +
input[(i+1)*(length)+(j+1)] ) / 9;
}
}
output[(length/2-1)*length+(length/2-1)] = 1000;
output[(length/2)*length+(length/2-1)] = 1000;
output[(length/2-1)*length+(length/2)] = 1000;
output[(length/2)*length+(length/2)] = 1000;
temp = input;
input = output;
output = temp;
}
}
__global__
void gpu_computation_row(double* input, double* output, int length);
__global__
void gpu_computation_col(double* input, double* output, int length);
// GPU Optimized function
void GPU_array_process(double *input, double *output, int length, int iterations)
{
//Cuda events for calculating elapsed time
cudaEvent_t cpy_H2D_start, cpy_H2D_end, comp_start, comp_end, cpy_D2H_start, cpy_D2H_end;
cudaEventCreate(&cpy_H2D_start);
cudaEventCreate(&cpy_H2D_end);
cudaEventCreate(&cpy_D2H_start);
cudaEventCreate(&cpy_D2H_end);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_end);
/* Preprocessing goes here */
/*----- What I did -----*/
const long SIZE = length * length * sizeof(double);
double* gpu_input;
double* gpu_output;
dim3 threadsPerBlock(128);
dim3 nbBlocks(length * length / threadsPerBlock.x + 1);
cudaSetDevice(0);
if(cudaMalloc((void**)&gpu_input, SIZE) != cudaSuccess){
cerr << "Error allocating input" << endl;
}
if(cudaMalloc((void**)&gpu_output, SIZE) != cudaSuccess){
cerr << "Error allocating output" << endl;
}
/*----------------------*/
cudaEventRecord(cpy_H2D_start);
/* Copying array from host to device goes here */
/*----- What I did -----*/
if(cudaMemcpy(gpu_input, input, SIZE, cudaMemcpyHostToDevice) != cudaSuccess){
cerr << "Error copying input to gpu" << endl;
}
if(cudaMemcpy(gpu_output, output, SIZE, cudaMemcpyHostToDevice) != cudaSuccess){
cerr << "Error copying output to gpu" << endl;
}
/*----------------------*/
cudaEventRecord(cpy_H2D_end);
cudaEventSynchronize(cpy_H2D_end);
//Copy array from host to device
cudaEventRecord(comp_start);
/* GPU calculation goes here */
/*----- What I did -----*/
for(int iter(0); iter < iterations; iter++){
gpu_computation_row <<< nbBlocks, threadsPerBlock >>> (gpu_input, gpu_output, length);
gpu_computation_col <<< nbBlocks, threadsPerBlock >>> (gpu_output, gpu_input, length);
cudaThreadSynchronize();
}
/*----------------------*/
cudaEventRecord(comp_end);
cudaEventSynchronize(comp_end);
cudaEventRecord(cpy_D2H_start);
/* Copying array from device to host goes here */
/*----- What I did -----*/
//due to the computation above, the desired output is in gpu_input
if(cudaMemcpy(output, gpu_input, SIZE, cudaMemcpyDeviceToHost) != cudaSuccess){
cerr << "failed to retrieve gpu_output from GPU" << endl;
}
/*----------------------*/
cudaEventRecord(cpy_D2H_end);
cudaEventSynchronize(cpy_D2H_end);
/* Postprocessing goes here */
/*----- What I did -----*/
cudaFree(&gpu_input);
cudaFree(&gpu_output);
/*----------------------*/
float time;
cudaEventElapsedTime(&time, cpy_H2D_start, cpy_H2D_end);
cout<<"Host to Device MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, comp_start, comp_end);
cout<<"Computation takes "<<setprecision(4)<<time/1000<<"s"<<endl;
cudaEventElapsedTime(&time, cpy_D2H_start, cpy_D2H_end);
cout<<"Device to Host MemCpy takes "<<setprecision(4)<<time/1000<<"s"<<endl;
}
__global__
void gpu_computation_row(double* input, double* output, int length){
int element_id = blockIdx.x * blockDim.x + threadIdx.x + 1;
int x_glob = element_id % length;
int y_glob = element_id / length;
if(x_glob <= 0 || y_glob <= 0 || x_glob >= length - 1 || y_glob >= length-1){
return;
}
output[element_id] = input[element_id - 1] +
input[element_id] +
input[element_id + 1];
}
__global__
void gpu_computation_col(double* input, double* output, int length){
int element_id = blockIdx.x * blockDim.x + threadIdx.x + 1;
int x_glob = element_id % length;
int y_glob = element_id / length;
bool isCenter = ((x_glob == length/2-1) || (x_glob == length/2)) && ((y_glob == length/2-1) || (y_glob == length/2));
if(x_glob <= 0 || y_glob <= 0 || x_glob >= length - 1 || y_glob >= length-1){
return;
}
output[element_id] = isCenter ? 1000 : ((input[element_id - length] +
input[element_id] +
input[element_id + length]) / 9.0);
} |
20,943 | // Matrix addition program MatrixMult.cu, Barry Wilkinson, Dec. 28, 2010.
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
__global__ void gpu_matrixmult(int *gpu_a, int *gpu_b, int *gpu_c, int N) {
int k, sum = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if (col < N && row < N) {
for (k = 0; k < N; k++)
sum += gpu_a[row * N + k] * gpu_b[k * N + col];
gpu_c[row * N + col] = sum;
}
}
void cpu_matrixmult(int *cpu_a, int *cpu_b, int *cpu_c, int N) {
int row, col, k, sum;
for (row =0; row < N; row++) // row of a
for (col =0; col < N; col++) { // column of b
sum = 0;
for(k = 0; k < N; k++)
sum += cpu_a[row * N + k] * cpu_b[k * N + col];
cpu_c[row * N + col] = sum;
//d[row * N + col] = gpu_c[row *N + col];
}
}
int main(int argc, char *argv[]) {
int i, j; // loop counters
int Grid_Dim_x=1, Grid_Dim_y=1; //Grid structure values
int Block_Dim_x=1, Block_Dim_y=1; //Block structure values
int noThreads_x, noThreads_y; // number of threads available in device, each dimension
int noThreads_block; // number of threads in a block
int N = 10; // size of array in each dimension
int B;
int T;
int *a,*b,*c,*d;
int *dev_a, *dev_b, *dev_c;
int size; // number of bytes in arrays
cudaEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
cudaEventCreate(&start);
cudaEventCreate(&stop);
int repeat = 1;
while(repeat == 1) {
/* --------------------ENTER INPUT PARAMETERS AND ALLOCATE DATA -----------------------*/
// keyboard input
printf("Enter the value for N: ");
scanf("%d", &N);
//takes in input
int valid = 0;
while(valid == 0) {
printf("Enter the number of blocks: ");
scanf("%d", &B);
printf("Enter the number of threads: ");
scanf("%d", &T);
if(B > 1024 || T > 1024 || B*T < N*N) {
printf("Invlaid input entered.");
} else {
valid = 1;
Grid_Dim_x = B;
Block_Dim_x = T; //puts the size of blocks and thread in for the dim3
}
}
dim3 Grid(Grid_Dim_x, Grid_Dim_x); //Grid structure
dim3 Block(Block_Dim_x,Block_Dim_y); //Block structure, threads/block limited by specific device
size = N * N * sizeof(int); // number of bytes in total in arrays
a = (int*) malloc(size); //dynamically allocated memory for arrays on host
b = (int*) malloc(size);
c = (int*) malloc(size); // results from GPU
d = (int*) malloc(size); // results from CPU
// load arrays with some numbers
int row, col;
srand(2);
for(row=0; row < N; row++) { // load arrays with some numbers
for(col=0; col < N; col++) {
a[row * N + col] = rand() % 10;
b[row * N + col] = rand() % 10;
}
}
cudaMalloc((void**)&dev_a, size); // allocate memory on device
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, a , size ,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b , size ,cudaMemcpyHostToDevice);
cudaEventRecord(start, 0); // here start time, after memcpy
gpu_matrixmult<<<Grid,Block>>>(dev_a,dev_b,dev_c,N);
cudaMemcpy(c, dev_c, size , cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0); // measuse end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop );
printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms);
double gpuTime = elapsed_time_ms;
/* ------------- COMPUTATION DONE ON HOST CPU ----------------------------*/
cudaEventRecord(start, 0); // use same timing*
cpu_matrixmult(a,b,d,N); // do calculation on host
cudaEventRecord(stop, 0); // measure end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop );
printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // exe. time
double cpuTime = elapsed_time_ms;
/* ------------------- check device creates correct results -----------------*/
/*
int s;
for(s=0;s<N*N;s++) {
printf("%d\t", d[s]);
if(s%N == 0 && s != 0) {
printf("\n");
}
}
*/
//puts out an error is the two matricies are not the same
printf("\n");
int error = 0;
int k;
for(k=0; k<N*N; k++) {
if(d[k] != c[k]) {
error =1;
break;
}
}
if(error ==1 )
printf("There is an error.\n");
else
printf("Sequential and parallel produce the same results.\n");
double speedupFactor;
speedupFactor = cpuTime/gpuTime;
printf("Speedup Factor: %lf\n", speedupFactor);
printf("Would you like to repeat? Enter 1 for yes or 0 for no.\n");
scanf("%d", &repeat);
}
/* --------------------- repeat program ----------------------------------------*/
// while loop to repeat calc with different parameters
/* -------------- clean up ---------------------------------------*/
free(a); free(b); free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
20,944 | #include "includes.h"
__device__ bool checkBoundary(int blockIdx, int blockDim, int threadIdx){
int x = threadIdx;
int y = blockIdx;
return (x == 0 || x == (blockDim-1) || y == 0 || y == 479);
}
__global__ void mSetFieldBoundary(float *field, float scalar) {
if(checkBoundary(blockIdx.x, blockDim.x, threadIdx.x)) {
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
int x = threadIdx.x;
int y = blockIdx.x;
if(x == 0 && y == 0) {
field[Idx] = field[Idx+blockDim.x+1]*scalar;
} else if(x == 0 && y == blockDim.x-1) {
field[Idx] = field[Idx-blockDim.x+1]*scalar;
} else if (x == blockDim.x-1 && y == 0) {
field[Idx] = field[Idx+blockDim.x-1]*scalar;
} else if (x == blockDim.x-1 && y == blockDim.x-1) {
field[Idx] = field[Idx-blockDim.x-1]*scalar;
} else if (x == 0) {
field[Idx] = field[Idx+1]*scalar;
} else if(x == blockDim.x-1) {
field[Idx] = field[Idx-1]*scalar;
} else if(y == 0) {
field[Idx] = field[Idx+blockDim.x]*scalar;
} else field[Idx] = field[Idx-blockDim.x]*scalar;
} else return;
} |
20,945 | #include "device_launch_parameters.h"
#include "curand_kernel.h"
#include "dev_noise.cuh"
__global__ void cudaNoiseGeneWithSoS(float *dev_cos_value, float *dev_sin_value, unsigned int length, unsigned int path_num,
unsigned long long uniform_seed, float omega_amp, float delta_alpha, float delta_omega, float delta_t, float sum_amp){
extern __shared__ float _sha[];
unsigned int tidx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int tidy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int x = tidx % path_num;
unsigned int y = (tidy * blockDim.x * gridDim.x + tidx) / path_num;
float omega_n_I = omega_amp * cosf(delta_alpha * x) + delta_omega;
float omega_n_Q = omega_amp * sinf(delta_alpha * x) + delta_omega;
curandState_t rand_status;
curand_init(uniform_seed, x, 0, &rand_status);
float phi_n_I = curand_uniform(&rand_status);
float phi_n_Q = curand_uniform(&rand_status);
float *cos_value = _sha, *sin_value = _sha + path_num;
unsigned int y_step = gridDim.y * blockDim.x * gridDim.x / path_num;
for (; y < length; y += y_step){
cos_value[x] = cosf(omega_n_I * delta_t*y + 2 * CR_CUDART_PI*phi_n_I);
sin_value[x] = sinf(omega_n_Q * delta_t*y + 2 * CR_CUDART_PI*phi_n_Q);
__syncthreads();
for (path_num >>= 1; path_num > 0; path_num >>= 1){
if (x < path_num){
cos_value[x] += cos_value[x + path_num];
sin_value[x] += sin_value[x + path_num];
}
__syncthreads();
}
if (x == 0){
dev_cos_value[y] = sum_amp * cos_value[x];
dev_sin_value[y] = sum_amp * sin_value[x];
}
__syncthreads();
}
}
__global__ void cudaNoiseOmegaCulc(float *dev_omega_n_I, float *dev_omega_n_Q, unsigned int length,
float omega_amp, float delta_alpha, float delta_omega){
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < length){
dev_omega_n_I[tid] = omega_amp*cosf(delta_alpha*tid) + delta_omega;
dev_omega_n_Q[tid] = omega_amp*sinf(delta_alpha*tid) + delta_omega;
}
}
__global__ void cudaNoiseSoSCulc(float *dev_cos_value, float *dev_sin_value,
unsigned int pitch_width, unsigned int width, unsigned int heigth, float delta_t,
float *dev_omega_n_I, float *dev_omega_n_Q, float *dev_phi_n){
unsigned int x = threadIdx.x,
y = threadIdx.y,
tidy = blockIdx.y * blockDim.y + y;
__shared__ float sha_cos_value[BLOCK_DIM_Y][BLOCK_DIM_X],
sha_sin_value[BLOCK_DIM_Y][BLOCK_DIM_X];
sha_cos_value[y][x] = 0;
sha_sin_value[y][x] = 0;
__syncthreads();
if (tidy < heigth){
__shared__ float sha_omega_n_I[BLOCK_DIM_Y], sha_omega_n_Q[BLOCK_DIM_Y],
sha_phi_n_I[BLOCK_DIM_Y], sha_phi_n_Q[BLOCK_DIM_Y];
if (x == 0){
sha_omega_n_I[y] = dev_omega_n_I[tidy];
sha_omega_n_Q[y] = dev_omega_n_Q[tidy];
sha_phi_n_I[y] = dev_phi_n[tidy];
sha_phi_n_Q[y] = dev_phi_n[heigth + tidy];
}
__syncthreads();
for (unsigned int tidx = blockIdx.x * blockDim.x + x;
tidx < width; tidx += gridDim.x*blockDim.x){
sha_cos_value[y][x] = cosf(sha_omega_n_I[y] * delta_t*tidx + 2 * CR_CUDART_PI*sha_phi_n_I[y]);
sha_sin_value[y][x] = sinf(sha_omega_n_Q[y] * delta_t*tidx + 2 * CR_CUDART_PI*sha_phi_n_Q[y]);
__syncthreads();
for (unsigned int heigth_ii = blockDim.y / 2, extra = blockDim.y % 2;
heigth_ii > 0; extra = heigth_ii % 2, heigth_ii /= 2){
if (y < heigth_ii){
sha_cos_value[y][x] += sha_cos_value[heigth_ii + extra + y][x];
sha_sin_value[y][x] += sha_sin_value[heigth_ii + extra + y][x];
}
heigth_ii += extra;
__syncthreads();
}
if (y == 0){
unsigned int loc = blockIdx.y*pitch_width + tidx;
dev_cos_value[loc] = sha_cos_value[0][x];
dev_sin_value[loc] = sha_sin_value[0][x];
}
}
}
}
__global__ void cudaNoiseSoSSum(float *dev_cos_value, float *dev_sin_value,
unsigned int pitch_width, unsigned int width, unsigned int heigth, float sum_amp){
unsigned int loc;
float reg_cos_value, reg_sin_value;
for (unsigned int tidx = blockIdx.x*blockDim.x + threadIdx.x;
tidx < width; tidx += gridDim.x*blockDim.x){
reg_cos_value = 0;
reg_sin_value = 0;
for (unsigned int heigth_ii = 0; heigth_ii < heigth; heigth_ii++){
loc = heigth_ii*pitch_width + tidx;
reg_cos_value += dev_cos_value[loc];
reg_sin_value += dev_sin_value[loc];
}
dev_cos_value[tidx] = sum_amp * reg_cos_value;
dev_sin_value[tidx] = sum_amp * reg_sin_value;
}
} |
20,946 | #include "includes.h"
__global__ void initGridKernel ( float *d_grid, int axis, int w, int h, int d ) {
const int baseX = blockIdx.x * IG_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * IG_BLOCKDIM_Y + threadIdx.y;
const int baseZ = blockIdx.z * IG_BLOCKDIM_Z + threadIdx.z;
const int idx = (baseZ * h + baseY) * w + baseX;
if(axis == 0) {
d_grid[idx] = (float)baseX;
} else if(axis == 1) {
d_grid[idx] = (float)baseY;
} else {
d_grid[idx] = (float)baseZ;
}
} |
20,947 | #include "includes.h"
__global__ void brickSort(int* array, int arrayLen, int p) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= arrayLen - 1)
return;
if ((p % 2 == 0) && (idx % 2 == 1))
return;
if ((p % 2 == 1) && (idx % 2 == 0))
return;
if (array[idx] > array[idx + 1]) {
int tmp = array[idx + 1];
array[idx + 1] = array[idx];
array[idx] = tmp;
}
} |
20,948 | #include <cuda.h>
#include <cuda_runtime.h>
/* initialize grid
* we'll distribute all particles evenly on the screen
*/
__global__ void d_reset( float4* verts, float4* states,
float ww, float wh,
int mesh_width, int mesh_height
)
{
int x, y;
for (y = blockIdx.y * blockDim.y + threadIdx.y;
y < mesh_height;
y += blockDim.y * gridDim.y)
{
for (x = blockIdx.x * blockDim.x + threadIdx.x;
x < mesh_width;
x += blockDim.x * gridDim.x)
{
int vi = y*mesh_width + x;
float u, v, w;
u = ww * (-0.5f + x*1.f/mesh_width);
v = wh * (-0.5f + y*1.f/mesh_height);
w = 0.0f;
verts[vi].x = u;
verts[vi].y = v;
verts[vi].z = w;
states[vi].x = 0.0f;
states[vi].y = 0.0f;
states[vi].z = 0.0f;
/* color information is uploaded by host, no init here */
}
}
}
/*
* calculate particle forces & new positions & colors
*/
__global__ void d_advance( float4* verts, float4* states,
float mx, float my,
int mesh_count,
float speed,
float delta)
{
int vi;
for (vi = blockIdx.x * blockDim.x + threadIdx.x;
vi < mesh_count;
vi += blockDim.x * gridDim.x)
{
// colors are stored after all vertices
unsigned int ci = mesh_count + vi;
float3 state = {states[vi].x, states[vi].y, 0.f/*states[vi].z*/};
// calculate vector between mouse and particle
float3 dir_force = {verts[vi].x-mx, verts[vi].y-my, 0.f};
/* steering:
* the new particle state vector is given by current directional force
* and the old state scaled by the particle mass
*/
float flen = rsqrtf( dir_force.x*dir_force.x
+dir_force.y*dir_force.y
/*+dir_force.z*dir_force.z*/);
dir_force.x *= flen;
dir_force.y *= flen;
/*dir_force.z *= flen;*/
state.x += delta * dir_force.x;
state.y += delta * dir_force.y;
/*state.z += dir_force.z;*/
// speed: base speed scale by particle mass
float v = speed / states[vi].w;
/* update particle position:
* new_position = old_position - state_vector*speed;
*/
float3 dv = {state.x * v, state.y * v, 0.f/*state.z * v*/};
verts[vi].x -= delta * dv.x;
verts[vi].y -= delta * dv.y;
/*verts[vi].z -= delta * dv.z;*/
// update colors depending on particles force
float dx = 0.5f*dv.x;
float dy = 0.5f*dv.y;
verts[ci].x = 0.1f+fabs(dx);
verts[ci].y = 0.1f+fabs(dy);
verts[ci].z = 0.1f+fabs(dx+dy);
states[vi].x = state.x;
states[vi].y = state.y;
/*states[vi].z = state.z;*/
}
}
void kernel_reset(float4* verts, float4* states,
int ww, int wh,
int mesh_width, int mesh_height,
int numSMs)
{
dim3 threads(16,16);
dim3 blocks(16*numSMs);
d_reset<<<blocks, threads>>>( verts, states,
ww, wh,
mesh_width, mesh_height );
}
void kernel_advance(float4* verts, float4* states,
float mx, float my,
int mesh_count,
float speed,
int numSMs,
double delta)
{
dim3 threads(128);
dim3 blocks( 16*numSMs );
d_advance<<<blocks, threads>>>( verts, states,
mx, my,
mesh_count,
speed,
static_cast<float>(delta));
}
|
20,949 | #include <iostream>
#include <stdlib.h>
#include <set>
#include <fstream>
#include <stdio.h>
#include <malloc.h>
#include <time.h>
#include <math.h>
#include <random>
#include <chrono>
#include <ratio>
#include <thread>
#include <mutex>
//#define MODULUS_PRIME 1073741827// 30 bit prime
#define MODULUS_PRIME 536870909 //29 bit prime
//#define MODULUS_PRIME 9973 //14 bit prime
//#define MODULUS_PRIME 11 //4 bit prime
void runKernel(unsigned *queryVector,int p, unsigned *queryVector_d, int NDVSM, int THREADSPERBLOCKDVSM, int lengthOfResultVectorReduced,int *columnPtr_d,int *rowIndex_d,unsigned *valueArray_d,unsigned long long int *resultSparseVectorValueArray_d, unsigned long long int *resultSparseVectorValueArrayDD);
static const int numberOfThreads = 500;
std::mutex mtxKernel;
void generateQVector(unsigned *queryVector, int p){
int i;
for(i=0;i<p;i++){
queryVector[i] = rand() % MODULUS_PRIME + 1;
}
}
void printMatrix(int **a,int r, int c) {
int i=0,j=0;
for(;i<r;i++){
for(j=0;j<c;j++){
printf("%d ",a[i][j]);
}
printf("\n");
}
printf("\n");
}
void printVector(unsigned *a,int c) {
int j=0;
for(j=0;j<c;j++){
printf("%d ",a[j]);
}
printf("\n");
}
void printVector2(unsigned long long int *a,int c) {
int j=0;
for(j=0;j<c;j++){
printf("%d ",a[j]);
}
printf("\n");
}
int checkIfEqual(unsigned long long int *resultSparseVectorValueArray, unsigned long long int *resultSparseVectorValueArrayDD, int length) {
int i;
for(i=0;i<length;i++){
if(resultSparseVectorValueArray[i]!=resultSparseVectorValueArrayDD[i]) {
return i;
}
}
return 1;
}
int checkIfEqual2(unsigned *resultSparseVectorValueArray, unsigned *resultSparseVectorValueArrayDD, int length) {
int i;
for(i=0;i<length;i++){
if(resultSparseVectorValueArray[i]!=resultSparseVectorValueArrayDD[i]) {
return i;
}
}
return 1;
}
__global__ void dvsmMultKernel(int lengthOfResultVectorReduced, int *columnPtr,int *rowIndex,unsigned *valueArray,
unsigned *queryVector,unsigned long long int *resultSparseVectorValueArray){
int col = blockDim.x * blockIdx.x + threadIdx.x;
if(col<lengthOfResultVectorReduced){
unsigned long long int temp = 0;
int j;
for(j=columnPtr[col];j<columnPtr[col+1];j++) {
temp += valueArray[j]*queryVector[rowIndex[j]];
}
resultSparseVectorValueArray[col]= temp % MODULUS_PRIME; //mul_m(1,temp,MODULUS_PRIME,INVK);//
}
}
int main()
{
std::ofstream myfile;
myfile.open ("testDataStatNOPsE.txt");
srand (time(NULL));
const long max_u = 16, r = 1L << 18;
for (long p = 2; p <= r; p <<= 1)
{
for (long u = 1; u <= max_u; ++u)
{
//top:
std::cout << "************************************************************************\n";
std::cout << "p: " << p << "; r: " << r << "; u: " << u << "\n";
myfile << "************************************************************************\n";
myfile << "p: " << p << "; r: " << r << "; u: " << u << "\n";
// long k = 0;
std::vector<std::set<long>> cols;
std::vector<std::set<long>*> cols2;
cols.resize(r);
for (auto it = begin(cols); it != end(cols); ++it) cols2.push_back(&(*it));
for (long i = 1; i <= p; ++i)
{
for (long j = 1; j <= u; )
{
long c = rand() % cols2.size();
if (cols2[c]->size() < u && cols2[c]->insert(i).second)
{
j++;
}
else
{
long a = rand() % r;
if (cols[a].size() > 0 && cols[a].find(i) == end(cols[a]))
{
auto elt = begin(cols[a]);
std::advance(elt, rand() % cols[a].size());
long tmp = *elt;
if (cols2[c]->find(tmp) == end(*(cols2[c])))
{
cols[a].erase(elt);
cols[a].insert(i);
cols2[c]->insert(tmp);
j++;
}
}
}
if (cols2[c]->size() == u) cols2.erase(begin(cols2) + c);
}
}
int numberOfNonZeroElements = p*u;
int lengthOfColumnPtr = r+1;
unsigned *valueArray = (unsigned*)malloc(sizeof(unsigned)*numberOfNonZeroElements);
int *rowIndex = (int*)malloc(sizeof(int)*numberOfNonZeroElements);
int *columnPtr = (int*)malloc(sizeof(int)*(lengthOfColumnPtr));
//std::cout << "\nval (" << numberOfNonZeroElements << "): ";
for (long i = 0; i < p * u; i++) {
valueArray[i] = (rand() % MODULUS_PRIME);
//std::cout << valueArray[i] << ",";
}
//std::cout << "\n\nRow: ";
int t=0;
int sum=0;
columnPtr[0] = 0;
int lengthOfCPReduced = 0;
for (int i = 0; i < r; ++i)
{
for (auto it = begin(cols[i]); it != end(cols[i]); ++it)
{
rowIndex[t++] = (*it)-1;
//std::cout << rowIndex[t-1] << ",";
}
if (cols[i].size())
{
columnPtr[lengthOfCPReduced+1]=columnPtr[lengthOfCPReduced]+cols[i].size();
lengthOfCPReduced++;
}
sum+=cols[i].size();
}
//std::cout << "\n\nCol (" << cols.size() <<"): ";
/*
* CUDA started
*
**/
int lengthOfResultVectorReduced = lengthOfCPReduced-1;
int THREADSPERBLOCKDVSM = lengthOfResultVectorReduced < 1024 ? lengthOfResultVectorReduced : 1024;
int NDVSM = (lengthOfResultVectorReduced+THREADSPERBLOCKDVSM-1) / THREADSPERBLOCKDVSM;
unsigned long long int *resultSparseVectorValueArrayDD = (unsigned long long int *)malloc(sizeof(unsigned long long int)*lengthOfResultVectorReduced*numberOfThreads);
unsigned long long int *resultSparseVectorValueArray_d;
unsigned *queryVector = (unsigned*)malloc(sizeof(unsigned)*p*numberOfThreads);
int *rowIndex_d, *columnPtr_d;
unsigned *valueArray_d, *queryVector_d;
cudaMalloc((void**)&valueArray_d,(numberOfNonZeroElements*sizeof(unsigned)));
cudaMalloc((void**)&rowIndex_d,(numberOfNonZeroElements*sizeof(int)));
cudaMalloc((void**)&columnPtr_d,(lengthOfCPReduced)*sizeof(int));
cudaMalloc((void**)&queryVector_d,numberOfThreads*p*sizeof(unsigned));
cudaMalloc((void**)&resultSparseVectorValueArray_d,(numberOfThreads*lengthOfResultVectorReduced*sizeof(unsigned long long int)));
cudaMemcpy( valueArray_d, valueArray, numberOfNonZeroElements*sizeof(unsigned), cudaMemcpyHostToDevice );
cudaMemcpy( rowIndex_d, rowIndex, numberOfNonZeroElements*sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( columnPtr_d, columnPtr, lengthOfCPReduced*sizeof(int), cudaMemcpyHostToDevice );
unsigned long numberOfOps;
generateQVector(queryVector,p*numberOfThreads);
std::thread thrds[numberOfThreads];
std::chrono::duration<int,std::nano> timeSpend;
std::chrono::nanoseconds zeroSec{0};
timeSpend = zeroSec;
//std::chrono::nanoseconds nsInOneSec{1000000000};
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
for(numberOfOps=0; numberOfOps < numberOfThreads; numberOfOps++){
runKernel(queryVector+numberOfOps,p,queryVector_d+numberOfOps,NDVSM,THREADSPERBLOCKDVSM,lengthOfResultVectorReduced,columnPtr_d,rowIndex_d,valueArray_d,resultSparseVectorValueArray_d+numberOfOps,resultSparseVectorValueArrayDD+numberOfOps);
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
std::chrono::duration<int,std::nano> time_span = std::chrono::duration_cast<std::chrono::duration<int,std::nano>>(t2 - t1);
timeSpend = timeSpend + time_span ;
std::cout << "Number of "<< numberOfThreads <<" operations at GPU takes: "<< (double)timeSpend.count()/1000 << "ms\n";
myfile << "Number of "<< numberOfThreads <<" operations at GPU takes: "<< (double)timeSpend.count()/1000 << "ms\n";
cudaFree( valueArray_d );
cudaFree( rowIndex_d );
cudaFree( columnPtr_d );
cudaFree( queryVector_d );
cudaFree( resultSparseVectorValueArray_d );
free(queryVector);
free(resultSparseVectorValueArrayDD);
free(valueArray);
free(rowIndex);
free(columnPtr);
/*
* CUDA finished
*
**/ }
}
myfile.close();
}
void runKernel(unsigned *queryVector,int p, unsigned *queryVector_d, int NDVSM, int THREADSPERBLOCKDVSM, int lengthOfResultVectorReduced,int *columnPtr_d,int *rowIndex_d,unsigned *valueArray_d,unsigned long long int *resultSparseVectorValueArray_d, unsigned long long int *resultSparseVectorValueArrayDD) {
cudaMemcpy( queryVector_d, queryVector, p*sizeof(unsigned), cudaMemcpyHostToDevice );
// mtxKernel.lock();
dvsmMultKernel<<< NDVSM, THREADSPERBLOCKDVSM>>>(lengthOfResultVectorReduced,columnPtr_d, rowIndex_d, valueArray_d, queryVector_d, resultSparseVectorValueArray_d);
// mtxKernel.unlock();
cudaMemcpy( resultSparseVectorValueArrayDD, resultSparseVectorValueArray_d, (lengthOfResultVectorReduced*sizeof(unsigned long long int)), cudaMemcpyDeviceToHost );
}
|
20,950 | #include <iostream>
#define imin(a, b) (a < b ? a : b)
const int N = 33 * 1024;
const int threadsPerBlock = 256;
// blocksPerGrid is smart. We won't use a constant number
// of blocks 'cause it's unnecessary. We will use the right amount,
// which comes from this simple formula, meaning, the max number
// of blocks will be 32. If we have a number smaller than that,
// do not create 32 blocks, create only the right amount of blocks.
const int blocksPerGrid =
imin(32, (N+threadsPerBlock-1)/threadsPerBlock);
__global__ void dot(float *a, float *b, float *c){
__shared__ float cache[threadsPerBlock];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
float temp = 0;
while(tid < N){ // control condition
temp += a[tid] + b[tid];
// Remember:
// blockDim = #threadsPerBlock
// gridDim = #blocksPerGrid
tid += blockDim.x * gridDim.x;
}
// set cache value
cache[cacheIndex] = temp;
// synchronize threads because we are going to read afterwards
__syncthreads();
/*
We are going to use an iterative reduction threaded technique,
which will leave us with a small array.
@constraint: threadsPerBlock must be a power of 2
*/
int i = blockDim.x / 2; // take half number of threads
while(i != 0){
if(cacheIndex < i){
cache[cacheIndex] += cache[cacheIndex+1];
}
// syncthreads must be executed in all threads.
// failing to comply with this rule -> program never
// ends since it'll be waiting for all the threads to
// execute that line.
__syncthreads(); // we will read later on (while) the shared variable
i /= 2;
}
if(cacheIndex == 0){ // for instance, but it could be any thread.
// If you think about it, the sum that we do it's perform
// at the blockIdx level, meaning the resulting array has
// a size equivalent to the number of blocks of the grid.
c[blockIdx.x] = cache[0];
}
}
int main( void ){
float *a, *b, c, *partial_c;
float *dev_a, *dev_b, *dev_partial_c;
// allocate memory on the CPU
a = (float*) malloc(N * sizeof(float));
b = (float*) malloc(N * sizeof(float));
partial_c = (float*) malloc(blocksPerGrid * sizeof(float));
// allocate memory on GPU
cudaMalloc( (void**) &dev_a,
N * sizeof(float));
cudaMalloc( (void**) &dev_b, N * sizeof(float));
cudaMalloc( (void**) &dev_partial_c, blocksPerGrid * sizeof(float));
// fill in the host memory with data
for(int i=0; i<N; i++){
a[i] = i;
b[i] = i*2;
}
// Copy the arrays to the GPU
cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
// Execute in GPU
dot<<<blocksPerGrid,threadsPerBlock>>>(dev_a, dev_b, dev_partial_c);
// Copy result from GPU to CPU memory
cudaMemcpy(partial_c, dev_partial_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
// Finish the final result in CPU,
// that's why we have the blocksPerGrid * sizeof(float)
c = 0;
for(int i=0; i<blocksPerGrid; i++){
c += partial_c[i];
}
//#define sum_squares(x) (x * (x+1) * (2*x + 1)/6)
//printf("Does GPU value %.6g = %.6g\n", c, 2 * sum_squares((float) (N-1) ) );
printf("The final result is: %.6g\n", c);
// free memory on GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_partial_c);
// free memory on CPU
free(a);
free(b);
free(partial_c);
} |
20,951 | #include "includes.h"
__global__ void donothing()
{
/* Do nothing! */
return;
} |
20,952 | /*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_v_forward(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim, int l,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *attn_weight, const float* value_features,
const float *relpos, const float *lookup_table,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params attn_weight: [total_query_num, local_size, nhead]
// params value_features: [total_key_num, nhead, hdim]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params output: [total_query_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
if (index_pair[index] == -1){
// Ignore index.
return;
}
int query_idx = index / local_size;
int batch_idx = index_pair_batch[query_idx];
int key_start_idx = 0;
for (int i = 0; i < batch_idx; i++){
key_start_idx += key_batch_cnt[i];
}
// 1. Obtain value features.
key_start_idx += index_pair[index];
value_features += key_start_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain attention weight.
attn_weight += index * nhead + head_idx;
// 3. Obtain rpe value.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Do dot product.
output += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
atomicAdd(
output,
attn_weight[0] * (value_features[0] + lookup_table[0]));
}
void rpe_v_launcher(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim, int l,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *attn_weight, const float* value_features,
const float *relpos, const float *lookup_table,
float *output){
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params attn_weight: [total_query_num, local_size, nhead]
// params value_features: [total_key_num, nhead, hdim]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params output: [total_query_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_v_forward<<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim, l,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, attn_weight, value_features,
relpos, lookup_table,
output);
}
__global__ void rpe_v_backward(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim, int l,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *attn_weight, const float* value_features,
const float* relpos, const float* lookup_table,
float *grad_out, float * grad_attn_weight, float * grad_value_features,
float *grad_lookup_table) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params attn_weight: [total_query_num, local_size, nhead]
// params value_features: [total_key_num, nhead, hdim]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params grad_out: [total_query_num, nhead, hdim]
// params grad_attn_weight: [total_query_num, local_size, nhead]
// params grad_value_features: [total_key_num, nhead, hdim]
// params grad_lookup_table: [l, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
if (index_pair[index] == -1){
// Ignore index.
return;
}
int query_idx = index / local_size;
int batch_idx = index_pair_batch[query_idx];
int key_start_idx = 0;
for (int i = 0; i < batch_idx; i++){
key_start_idx += key_batch_cnt[i];
}
// 1. Obtain value features.
key_start_idx += index_pair[index];
value_features += key_start_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_value_features += key_start_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain attention weight.
attn_weight += index * nhead + head_idx;
grad_attn_weight += index * nhead + head_idx;
// 3. Obtain rpe value.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain grad out.
grad_out += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// out = atten_weight * (value + lookup_table)
atomicAdd(
grad_attn_weight,
grad_out[0] * (value_features[0] + lookup_table[0]));
atomicAdd(
grad_value_features,
grad_out[0] * attn_weight[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * attn_weight[0]);
}
void rpe_v_grad_launcher(
int b, int total_query_num, int local_size,
int total_key_num, int nhead, int hdim, int l,
const int *query_batch_cnt, const int *key_batch_cnt, const int* index_pair_batch,
const int *index_pair,
const float *attn_weight, const float* value_features,
const float* relpos, const float* lookup_table,
float *grad_out, float* grad_attn_weight, float* grad_value_features,
float *grad_lookup_table){
// params query_batch_cnt: [b]
// params key_batch_cnt: [b]
// params index_pair_batch: [total_query_num]
// params index_pair: [total_query_num, local_size]
// params attn_weight: [total_query_num, local_size, nhead]
// params value_features: [total_key_num, nhead, hdim]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params grad_out: [total_query_num, nhead, hdim]
// params grad_attn_weight: [total_query_num, local_size, nhead]
// params grad_value_features: [total_key_num, nhead, hdim]
// params grad_lookup_table: [l, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_v_backward<<<blocks, threads>>>(
b, total_query_num, local_size, total_key_num, nhead, hdim, l,
query_batch_cnt, key_batch_cnt, index_pair_batch,
index_pair, attn_weight, value_features,
relpos, lookup_table,
grad_out, grad_attn_weight, grad_value_features, grad_lookup_table);
}
|
20,953 | #include <iostream>
using namespace std;
#define N 65536
#define A 2
#define blockSize 65
// SAXPY Kernel
// Performs A*X+Y
// Assumes single N blocks with 32 threads each
__global__ void saxpy(int *X, int *Y, int *Z){
// Need to account for different smx tid's
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<N){
Z[tid] = A * X[tid] + Y[tid];
}
}
int main(){
int X[N], Y[N], Z[N]; // Host data: X,Y input data, Z output data
int *dev_X, *dev_Y, *dev_Z; // Device data pointers
// Allocate memory on the device/GPU
cudaMalloc((void**)&dev_X, N*sizeof(int));
cudaMalloc((void**)&dev_Y, N*sizeof(int));
cudaMalloc((void**)&dev_Z, N*sizeof(int));
// Fill input arrays
for(int i = 0; i<N; i++){
X[i] = i;
Y[i] = i*i;
}
// Copy data to the device
cudaMemcpy(dev_X,X,N*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dev_Y,Y,N*sizeof(int),cudaMemcpyHostToDevice);
// Create Event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Cuda Kernel Call
int gridSize = (N+(blockSize-1)) / blockSize;
// (N+31) / 32
// Call Event
cudaEventRecord(start);
saxpy<<<gridSize,blockSize>>>(dev_X,dev_Y,dev_Z);
cudaEventRecord(stop);
// Copy memory off of the device
cudaMemcpy(Z,dev_Z,N*sizeof(int),cudaMemcpyDeviceToHost);
// Stop Event
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
cout << milliseconds << endl;
// Check out contents of working arrays/output data
for(int i = 0; i<N; i++){
int checkValue = A * X[i] + Y[i];
if (Z[i] != checkValue) {
cout << "Mismatch " << i << endl;
}
}
// Free up memory
cudaFree(dev_X);
cudaFree(dev_Y);
cudaFree(dev_Z);
} |
20,954 | #include "cuda.cuh"
__global__ void
kernel(void){
}
void run(){
kernel<<<1,1>>>();
}
|
20,955 | #include <iostream>
__global__ void helloWorld(){
}
int main(int argc, char const *argv[]){
helloWorld<<< 1,1 >>>();
return 0;
} |
20,956 | #include <stdlib.h>
#include <stdio.h>
#define BLOCKS size
#define THREADS 1
#define T 100000
#define H 0.01
#define R 1.0
#define K 2.0
#define ALPHA 9.96
#define BETA 1.0
#define M 0.28
#define DN 0.5
#define DP 0.5
__global__ void rosmac(float *n0, float *n1, float *p0, float *p1)
{
// Better integration:
const int tid = blockIdx.x;
const int left = (tid == 0)? blockDim.x - 1 : tid - 1;
const int right = (tid == blockDim.x - 1)? 0 : tid + 1;
const float dn = R * n0[tid] * (1.0f - n0[tid] / K) - (ALPHA * n0[tid] * p0[tid]) / (1.0f + BETA * n0[tid]) - DN * (n0[tid] - n0[left] / 2.0f - n0[right] / 2.0f);
const float dp = (ALPHA * n0[tid] * p0[tid]) / (1.0f + BETA * n0[tid]) - M * p0[tid] - DP * (p0[tid] - p0[left] / 2.0f - p0[right] / 2.0f);
n1[tid] = n0[tid] + H * dn;
p1[tid] = p0[tid] + H * dp;
}
int main(int argc, char **argv)
{
const unsigned int size = (argc == 2)? atof(argv[1]) : 1000;
const unsigned int bytes = size * sizeof(float);
float *h_n = (float*)malloc(bytes);
float *h_p = (float*)malloc(bytes);
float *d_n0, *d_n1, *d_p0, *d_p1;
cudaMalloc((void**)&d_n0, bytes);
cudaMalloc((void**)&d_n1, bytes);
cudaMalloc((void**)&d_p0, bytes);
cudaMalloc((void**)&d_p1, bytes);
// Use gsl:
srand(42);
rand();
for (int i = 0; i < size; ++i)
{
h_n[i] = ((float)rand() / RAND_MAX);
h_p[i] = ((float)rand() / RAND_MAX);
}
cudaMemcpy(d_n0, h_n, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_p0, h_p, bytes, cudaMemcpyHostToDevice);
for (int t = 0; t < T; t += 2)
{
rosmac<<<BLOCKS,THREADS>>>(d_n0, d_n1, d_p0, d_p1);
rosmac<<<BLOCKS,THREADS>>>(d_n1, d_n0, d_p1, d_p0);
if (t % 100 == 0)
{
//printf("%16d -> ", t);
cudaMemcpy(h_n, d_n1, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy(h_p, d_p1, bytes, cudaMemcpyDeviceToHost);
for (int i = 0; i < size; ++i)
{
printf("%.4f\t", h_p[i]);
}
printf("\n");
}
}
cudaFree(d_n0);
cudaFree(d_n1);
cudaFree(d_p0);
cudaFree(d_p1);
free(h_n);
free(h_p);
return EXIT_SUCCESS;
}
|
20,957 | // ================================================================================================
// A simple script to get memory usage & properties of CUDA supported NVIDIA devices
//
// Author: Sivagnanam Namasivayamurthy
//
// ================================================================================================
#include <stdio.h>
#include <time.h>
// CUDA C headers
#include <cuda.h>
#include <cuda_runtime.h>
/*
*
*/
void printCurrentTime(){
time_t t = time(NULL);
struct tm tm = *localtime(&t);
printf("\n%d-%d-%d %d:%d:%d", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec);
}
/*
* Get number of CUDA supported devices available
*
* returns: the number of CUDA supported devices
*/
int getCudaDevicesCount(){
int deviceCount;
cudaError_t cu_err = cudaGetDeviceCount(&deviceCount);
if(cudaSuccess != cu_err){
printf("Unable to get cudaGetDeviceCount : error num %d - %s\n", (int) cu_err, cudaGetErrorString(cu_err));
exit(EXIT_FAILURE);
}
return deviceCount;
}
/*
* Print the number of CUDA supported devices available
*/
void printDevicesCount(){
int deviceCount = getCudaDevicesCount();
if(0 == deviceCount){
printf("No CUDA supported device(s) found !!! \n");
exit(EXIT_FAILURE);
} else {
printf("Found %d CUDA supported device(s)\n", deviceCount);
}
}
/*
* Get device properties
*
* deviceId : ID of the CUDA supported device
* returns: the cudaDeviceProp struct that contains the CUDA device properties
*/
cudaDeviceProp getCudaDeviceProps(int deviceId){
cudaDeviceProp deviceProps;
cudaError_t cu_err = cudaGetDeviceProperties(&deviceProps, deviceId);
if(cudaSuccess != cu_err){
printf("Unable to get cudaGetDeviceProperties for device ID %d : error num %d - %s\n", deviceId, (int) cu_err, cudaGetErrorString(cu_err));
exit(EXIT_FAILURE);
}
return deviceProps;
}
/*
* Print the CUDA device properties
*
* deviceId: ID of the CUDA supported device
*/
void printCudaDeviceProps(int deviceId)
{
cudaDeviceProp deviceProps = getCudaDeviceProps(deviceId);
printf("\n Device ID: %d Name: %s\n", deviceId, deviceProps.name);
printf("--------------------------------------------------------------\n");
printf("CUDA capability Major/Minor version number: %d.%d\n", deviceProps.major, deviceProps.minor);
printf("Total global memory: %0.f MB\n", (float)deviceProps.totalGlobalMem/(1048576.0f));
printf("Total shared memory per block: %lu bytes\n", deviceProps.sharedMemPerBlock);
printf("Total registers per block: %d\n", deviceProps.regsPerBlock);
printf("Warp size: %d\n", deviceProps.warpSize);
printf("Maximum memory pitch: %lu bytes\n", deviceProps.memPitch);
printf("Maximum threads per block: %d\n", deviceProps.maxThreadsPerBlock);
printf("Maximum sizes of each dimension of a block: %d x %d x %d \n", deviceProps.maxThreadsDim[0], deviceProps.maxThreadsDim[1], deviceProps.maxThreadsDim[2]);
printf("Maximum sizes of each dimension of a grid: %d x %d x %d \n", deviceProps.maxGridSize[0], deviceProps.maxGridSize[1], deviceProps.maxGridSize[2]);
printf("Clock rate: %d\n", deviceProps.clockRate);
printf("Total constant memory: %lu\n", deviceProps.totalConstMem);
printf("Texture alignment: %lu bytes\n", deviceProps.textureAlignment);
printf("Concurrent copy and execution: %s\n", (deviceProps.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", deviceProps.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (deviceProps.kernelExecTimeoutEnabled ? "Yes" : "No"));
printf("--------------------------------------------------------------\n");
}
/*
* Find all CUDA supported devices & print their properties
*/
void printAllCudaDeviceProps(){
int deviceCount = getCudaDevicesCount();
printCurrentTime();
printf("\nFound %d CUDA supported device(s)\n", deviceCount);
for (int deviceId = 0; deviceId < deviceCount; ++deviceId){
printCudaDeviceProps(deviceId);
}
}
/*
* Print the free & used memory information of all NVIDIA CUDA supported devices
*/
void printNvidiaDevicesMemoryInfo(){
int driverVersion, runTimeVersion, deviceCount;
size_t mem_available, mem_free;
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runTimeVersion);
deviceCount = getCudaDevicesCount();
printCurrentTime();
printf("\n+----------------------------------------------------------------------+\n");
printf("| CUDA Driver version: %d.%d Runtime Version: %d.%d |\n", driverVersion/1000, (driverVersion % 100)/10, runTimeVersion/1000, (runTimeVersion%100)/10);
for (int deviceId = 0; deviceId < deviceCount; ++deviceId){
cudaSetDevice(deviceId);
cudaDeviceProp deviceProps = getCudaDeviceProps(deviceId);
cudaMemGetInfo(&mem_free, &mem_available);
printf("+----------------------------------------------------------------------------------------+\n");
printf("| Device ID: %d Name: %s %.0f MB (free) / %.0f MB (total) |\n", deviceId, deviceProps.name, (float)mem_free/(1024*1024.), (float)mem_available/(1024*1024.));
printf("+----------------------------------------------------------------------------------------+\n");
}
}
int main(int argc, char *argv[])
{
if(argc < 2) {
printNvidiaDevicesMemoryInfo(); // Print memory information if not argument is passed
} else {
if (0 == strcmp(argv[1], "-mem")){
printNvidiaDevicesMemoryInfo();
} else if (0 == strcmp(argv[1], "-props")) {
printAllCudaDeviceProps();
}
}
return 0;
}
|
20,958 | /* written by Xin Liu
* Dec 2017
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// deposition_sim.cpp : Defines the entry point for the console application.
//
#include<iostream>
#include<fstream>
#include<stdio.h>
#include<random>
#include<math.h>
#include <time.h>
// Nondimensionalization length
#define C_bar (0.000027)
// x left boundary
#define boundary_left (0)
// x right boundary, length
#define boundary_right (0.001)
// y lower boundary
#define boundary_front (0)
// y upper boundary width
#define boundary_rear (0.0005)
// z upper boundary, never used
#define boundary_up (0.003)
// z lower boundary, the substrate height
#define boundary_bottom (0.000)
// total particle number
#define N_PARTICLES (512)
//initial particle height arange
#define MAX_HEIGHT (0.00020)
#define MIN_HEIGHT (0.00004)
//mean radius
#define MU (0.000027)
//radius half bindwidth
#define BIND (0.000010)
#define zeta (0.1)
#define mu_d (0.1)
#define mu_f (2.1e-3)
#define g_grav (-9.8)
#define rho (7800)
#define E_modulus (193e9)
#define nu (0.26)
#define length boundary_right
#define width boundary_rear
#define E_aster (E_modulus/(2-2*nu*nu))
#define R_aster Rs[i]*Rs[j]/(Rs[i]+Rs[j])
// These two are just for simplifying code
#define norm_x_ij sqrt((x_j[0]-x_i[0])*(x_j[0]-x_i[0])+(x_j[1]-x_i[1])*(x_j[1]-x_i[1])+(x_j[2]-x_i[2])*(x_j[2]-x_i[2]))
#define norm_dxij sqrt((dx_j[0]-dx_i[0])*(dx_j[0]-dx_i[0])+(dx_j[1]-dx_i[1])*(dx_j[1]-dx_i[1])+(dx_j[2]-dx_i[2])*(dx_j[2]-dx_i[2]))
// never used
#define C_factor 0.0055
//sqrt(1/(4/3*rho*pi))
// Nondimensionalization mass
#define m_bar (4.0/3*M_PI*C_bar*C_bar*C_bar)
// Nondimensionalization temporal coefficient
#define omega_bar (sqrt(E_modulus/(2-2*nu*nu)*C_bar/m_bar))
// coefficient to bound the contact force
#define MAX_R 10
// particle initial velocity
#define v0 (0)
// time step, actual time step is 5e-8, multiply omega_bar for nondimensionalization
#define TIME_STEP (5e-8*omega_bar)
// total simulation time
#define TIME 0.1
// size of a block
#define BLOCKDIM 128
typedef double float_value_t;
typedef struct
{
int n_particles;
float_value_t* xyzs;
float_value_t* rs;
float_value_t* ms;
float_value_t* fs;
float_value_t* vs;
float_value_t* as;
float_value_t* E;
float_value_t* mu;
// float_value_t* temperatures;
} particles_t;
void readParticles(particles_t * particles);
void writeFile(float_value_t* array, int row, int col, std::string filename);
void initParticles(particles_t * particles);
float_value_t* initFloatArray(int num_elements);
void freeParticles(particles_t * particles, particles_t * dp);
// void updateAcceleration(particles_t * particles);
void copyParticles(particles_t * particles, particles_t * dp)
{
}
// norm of r
__device__ float_value_t norm(float_value_t* r)
{
return sqrt(r[0] * r[0] + r[1] * r[1] + r[2] * r[2]);
}
// dot product of r*r1
__device__ float_value_t dot(float_value_t* r, float_value_t* r1)
{
return (r[0] * r1[0] + r[1] * r1[1] + r[2] * r1[2]);
}
// dot product of r*(r2-r1)
__device__ float_value_t dot3(float_value_t* r, float_value_t* r1, float_value_t* r2)
{
return (r[0] * (r2[0]-r1[0]) + r[1] * (r2[1]-r1[1]) + r[2] * (r2[2]- r1[2]));
}
// addition for C = A + factor*B
__global__ void vector_add(float_value_t* A, float_value_t *B, float_value_t *C, float_value_t factor, int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < N)
{
C[i] = A[i] + factor*B[i];
}
}
// addition for x = x+2.0 / 6 * TIME_STEP*fy2 + 2.0 / 6 * TIME_STEP*fy3 + 1.0 / 6 * TIME_STEP*fy4_1;
__global__ void vector_add3(float_value_t* x, float_value_t *fy2, float_value_t *fy3, float_value_t *fy4_1, int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < N)
{
x[i] = x[i] + 2.0 / 6 * TIME_STEP*fy2[i] + 2.0 / 6 * TIME_STEP*fy3[i] + 1.0 / 6 * TIME_STEP*fy4_1[i];
}
}
// intent to control energy not to increase, but actually never used
__global__ void vel_checker(float_value_t* x, float_value_t* y, int N)
{
int i = blockDim.x*blockIdx.x + threadIdx.x;
if (i < N)
{
x[3 * i] = y[3 * i];
x[3 * i + 1] = y[3 * i + 1];
x[3 * i + 2] = y[3 * i + 2];
float_value_t vx = x[3 * i + 3 * N_PARTICLES];
float_value_t vy = x[3 * i + 1 + 3 * N_PARTICLES];
float_value_t vz = x[3 * i + 2 + 3 * N_PARTICLES];
float_value_t vn = 1 / 2 * (vx*vx + vy*vy + vz*vz)*omega_bar*C_bar*omega_bar*C_bar +(-g_grav)*x[3 * i + 2] * C_bar;
float_value_t vx1 = y[3 * i + 3 * N_PARTICLES];
float_value_t vy1 = y[3 * i + 1 + 3 * N_PARTICLES];
float_value_t vz1 = y[3 * i + 2 + 3 * N_PARTICLES];
float_value_t vn1 = 1 / 2 *(vx1*vx1 + vy1*vy1 + vz1*vz1)*omega_bar*C_bar *omega_bar*C_bar +(-g_grav)*y[3 * i + 2] * C_bar;
if (abs(vn1) >abs(vn) && vn1>1e-6 )
{
x[3 * i + 3 * N_PARTICLES] = vx1*vn / vn1;
x[3 * i + 1 + 3 * N_PARTICLES] = vy1*vn / vn1;
x[3 * i + 2 + 3 * N_PARTICLES] = vz1*vn / vn1;
}
else
{
x[3 * i + 3 * N_PARTICLES] = vx1;
x[3 * i + 1 + 3 * N_PARTICLES] = vy1;
x[3 * i + 2 + 3 * N_PARTICLES] = vz1;
}
}
}
// calculate velocity and acceleration
__global__ void f(float_value_t* res, float_value_t* res1, float_value_t coeff, float_value_t *Es, float_value_t *Rs, float_value_t *Ms, float_value_t *Mus, float_value_t *v3s, float_value_t* x_input, float_value_t delta_t)
{
int j = threadIdx.x;//blockDim.x*blockIdx.x+
int i = blockIdx.x;
// shared memory for reduction of particle contact force
__shared__ float_value_t s_fcon[3 * N_PARTICLES];
// shared memory for reduction of particle friction force
__shared__ float_value_t s_ffric[3 * N_PARTICLES];
// position of particle-i, multiply C_bar so now x_i is the dimensionalized coordinate
float_value_t x_i[3];
x_i[0] = x_input[3 * i]*C_bar;
x_i[1] = x_input[3 * i + 1] * C_bar;
x_i[2] = x_input[3 * i + 2] * C_bar;
// velocity of particle-i, multiply omega_bar*C_bar so now dx_i is the dimensionalized velocity
float_value_t dx_i[3];
dx_i[0] = x_input[3 * i + 3 * N_PARTICLES] *omega_bar* C_bar;
dx_i[1] = x_input[3 * i + 1 + 3 * N_PARTICLES] * omega_bar* C_bar;
dx_i[2] = x_input[3 * i + 2 + 3 * N_PARTICLES] * omega_bar* C_bar;
// environmental drag
float_value_t F_env[3] = { 0 };
// total contact force (particle+wall)
float_value_t F_con[3] = { 0 };
// total friction force (particle+wall)
float_value_t F_fric[3] = { 0 };
// wall contact force
float_value_t F_con_iw[3] = { 0 };
// wall friction force
float_value_t F_fric_iw[3] = { 0 };
// environmental drag evaluation
F_env[0] = -6 * M_PI*mu_f*Rs[i] *dx_i[0];
F_env[1] = -6 * M_PI*mu_f*Rs[i] *dx_i[1];
F_env[2] = -6 * M_PI*mu_f*Rs[i] *dx_i[2];
// in every block, the N_PARTICLES-thread calculate the wall-interaction, including left wall, right wall, front wall, back wall, bottom wall,
if (j == N_PARTICLES)
{
float_value_t delta_iw = 0.0;
if (x_i[0] - Rs[i] <= boundary_left)
{
delta_iw = x_i[0] - boundary_left - (Rs[i]);
F_con_iw[0] = 4.0 / 3 * sqrt(Rs[i]) * E_aster*pow(abs(delta_iw), 1.5) + 2 * zeta*sqrt(2 * E_aster* Ms[i] *sqrt(Rs[i]))*pow(abs(delta_iw), (0.25))* (-dx_i[0]);
if (F_con_iw[0] < 0)
F_con_iw[0] = 0;
F_con_iw[1] = 0;
F_con_iw[2] = 0;
}
if (x_i[0] + Rs[i] >= boundary_right)
{
delta_iw = x_i[0] - boundary_right + (Rs[i]);
F_con_iw[0] = -4.0 / 3 * sqrt(Rs[i]) * E_aster*pow(abs(delta_iw), 1.5) - 2 * zeta*sqrt(2 * E_aster* Ms[i] *sqrt(Rs[i]))*pow(abs(delta_iw), (0.25))* dx_i[0] ;
if (F_con_iw[0] > 0)
F_con_iw[0] = 0;
F_con_iw[1] = 0;
F_con_iw[2] = 0;
}
if (x_i[1] - Rs[i] <= boundary_front)
{
delta_iw = x_i[1] - boundary_front - (Rs[i]);
F_con_iw[0] = 0;
F_con_iw[1] = 4.0 / 3 * sqrt(Rs[i]) * E_aster*pow(abs(delta_iw), 1.5) + 2 * zeta*sqrt(2 * E_aster* Ms[i] *sqrt(Rs[i]))*pow(abs(delta_iw), (0.25))* (-dx_i[1]);
F_con_iw[2] = 0;
if (F_con_iw[1] < 0)
F_con_iw[1] = 0;
}
if (x_i[1] + Rs[i] >= boundary_rear)
{
delta_iw = x_i[1] - boundary_rear + (Rs[i]);
F_con_iw[0] = 0;
F_con_iw[1] = -4.0 / 3 * sqrt(Rs[i]) * E_aster*pow(abs(delta_iw), 1.5) - 2 * zeta*sqrt(2 * E_aster* Ms[i] *sqrt(Rs[i]))*pow(abs(delta_iw), (0.25))* dx_i[1] ;
F_con_iw[2] = 0;
if (F_con_iw[1] > 0)
F_con_iw[1] = 0;
}
if (x_i[2] - Rs[i] <= boundary_bottom)
{
delta_iw = x_i[2] - boundary_bottom - (Rs[i]);
F_con_iw[0] = 0;
F_con_iw[1] = 0;
F_con_iw[2] = 4.0 / 3 * sqrt(Rs[i]) * E_aster*pow(abs(delta_iw), 1.5) + 2 * zeta*sqrt(2 * E_aster* Ms[i] *sqrt(Rs[i]))*pow(abs(delta_iw), (0.25))* (-dx_i[2]);
if (F_con_iw[2] < 0)
F_con_iw[2] = 0;
//float_value_t normdxi = sqrt(dx_i[0] * dx_i[0] + dx_i[1] * dx_i[1] + dx_i[2] * dx_i[2])
}
// evluation of wall-friction, ensure velocity not too small causing numeric singular value
if (norm(dx_i) <= 1e-30)
{
F_fric_iw[0] = 0;
F_fric_iw[1] = 0;
F_fric_iw[2] = 0;
}
else
{
F_fric_iw[0] = mu_d*norm(F_con_iw)*(-dx_i[0]) / norm(dx_i);
F_fric_iw[1] = mu_d*norm(F_con_iw)*(-dx_i[1]) / norm(dx_i);
F_fric_iw[2] = mu_d*norm(F_con_iw)*(-dx_i[2]) / norm(dx_i);
}
}
// for 0,...,j,...,N_PARTICLES-1 thread, every thread calculate its interaction with i-particle(if exists)
if (j < N_PARTICLES)
{
// i-particle should not interact with itself
if (i == j)
{
s_fcon[3 * j + 0] = 0;
s_fcon[3 * j + 1] = 0;
s_fcon[3 * j + 2] = 0;
s_ffric[3 * j + 0] = 0;
s_ffric[3 * j + 1] = 0;
s_ffric[3 * j + 2] = 0;
}
else
{
// position for j-particle
float_value_t x_j[3];
x_j[0] = x_input[3 * j] * C_bar;
x_j[1] = x_input[3 * j + 1] * C_bar;
x_j[2] = x_input[3 * j + 2] * C_bar;
// velocity for j-particle
float_value_t dx_j[3];
dx_j[0] = x_input[3 * j + 3 * N_PARTICLES] * omega_bar* C_bar;
dx_j[1] = x_input[3 * j + 1 + 3 * N_PARTICLES] * omega_bar* C_bar;
dx_j[2] = x_input[3 * j + 2 + 3 * N_PARTICLES] * omega_bar* C_bar;
// overlap between i-particle and j-particle
float_value_t delta_ij = norm_x_ij - (Rs[i] + Rs[j]);
// negative overlap indicates a collision
if (delta_ij < 0)
{
float_value_t v_delta_ij = ((x_j[0] - x_i[0])*(dx_j[0] - dx_i[0])+(x_j[1] - x_i[1])*(dx_j[1] - dx_i[1])+(x_j[2] - x_i[2])*(dx_j[2] - dx_i[2])) / (delta_ij + (Rs[i] + Rs[j]));
s_fcon[3 * j] = -4.0 / 3 * sqrt(R_aster)*E_aster*pow(abs(delta_ij), 1.5)*(x_j[0] - x_i[0]) / (delta_ij+ (Rs[i] + Rs[j])) -2 * zeta*sqrt(2 * E_aster* Ms[i] * Ms[j] / (Ms[i] + Ms[j])*sqrt(R_aster))*pow(abs(delta_ij), 0.25)*v_delta_ij*(x_j[0] - x_i[0]) / (delta_ij + (Rs[i] + Rs[j]));
if (s_fcon[3 * j] * (x_j[0] - x_i[0]) > 0)
s_fcon[3 * j] = 0;
s_fcon[3 * j + 1] = -4.0 / 3 * sqrt(R_aster)*E_aster*pow(abs(delta_ij), 1.5)*(x_j[1] - x_i[1]) / (delta_ij + (Rs[i] + Rs[j])) -2 * zeta*sqrt(2 * E_aster* Ms[i] * Ms[j] / (Ms[i] + Ms[j])*sqrt(R_aster))*pow(abs(delta_ij), 0.25)*v_delta_ij*(x_j[1] - x_i[1]) / (delta_ij + (Rs[i] + Rs[j]));
if (s_fcon[3 * j + 1] * (x_j[1] - x_i[1]) > 0)
s_fcon[3 * j + 1] = 0;
s_fcon[3 * j + 2] = -4.0 / 3 * sqrt(R_aster)*E_aster*pow(abs(delta_ij), 1.5)*(x_j[2] - x_i[2]) / (delta_ij + (Rs[i] + Rs[j])) -2 * zeta*sqrt(2 * E_aster*Ms[i] * Ms[j] / (Ms[i] + Ms[j])*sqrt(R_aster))*pow(abs(delta_ij), 0.25)*v_delta_ij*(x_j[2] - x_i[2]) / (delta_ij + (Rs[i] + Rs[j]));
if (s_fcon[3 * j + 2] * (x_j[2] - x_i[2]) > 0)
s_fcon[3 * j + 2] = 0;
float_value_t vtij[3];
vtij[0] = x_j[0] - x_i[0] - ((dot3(dx_j, x_i, x_j) - dot3(dx_i, x_i, x_j)) / (delta_ij + (Rs[i] + Rs[j]))) *((x_j[0] - x_i[0]) / (delta_ij + (Rs[i] + Rs[j])));
vtij[1] = x_j[1] - x_i[1] - ((dot3(dx_j, x_i, x_j) - dot3(dx_i, x_i, x_j)) / (delta_ij + (Rs[i] + Rs[j]))) *((x_j[1] - x_i[1]) / (delta_ij + (Rs[i] + Rs[j])));
vtij[2] = x_j[2] - x_i[2] - ((dot3(dx_j, x_i, x_j) - dot3(dx_i, x_i, x_j)) / (delta_ij + (Rs[i] + Rs[j]))) *((x_j[2] - x_i[2]) / (delta_ij + (Rs[i] + Rs[j])));
s_ffric[3 * j + 0] = mu_d*norm((float_value_t*)&(s_fcon[3 * j]))*(vtij[0] - vtij[0]) / sqrt(vtij[0] * vtij[0] + vtij[1] * vtij[1] + vtij[2] * vtij[2]);
s_ffric[3 * j + 1] = mu_d*norm((float_value_t*)&(s_fcon[3 * j]))*(vtij[1] - vtij[1]) / sqrt(vtij[0] * vtij[0] + vtij[1] * vtij[1] + vtij[2] * vtij[2]);
s_ffric[3 * j + 2] = mu_d*norm((float_value_t*)&(s_fcon[3 * j]))*(vtij[2] - vtij[2]) / sqrt(vtij[0] * vtij[0] + vtij[1] * vtij[1] + vtij[2] * vtij[2]);
}
else
{
s_fcon[3 * j + 0] = 0;
s_fcon[3 * j + 1] = 0;
s_fcon[3 * j + 2] = 0;
s_ffric[3 * j + 0] = 0;
s_ffric[3 * j + 1] = 0;
s_ffric[3 * j + 2] = 0;
}
}
}
__syncthreads();
// after syncronization it begins reduction to have the sum of s_fcon and s_ffric
if (j < N_PARTICLES / 2)
{
s_fcon[3 * j + 0] += s_fcon[3 * (j + N_PARTICLES / 2) + 0];
s_fcon[3 * j + 1] += s_fcon[3 * (j + N_PARTICLES / 2) + 1];
s_fcon[3 * j + 2] += s_fcon[3 * (j + N_PARTICLES / 2) + 2];
s_ffric[3 * j + 0] += s_ffric[3 * (j + N_PARTICLES / 2) + 0];
s_ffric[3 * j + 1] += s_ffric[3 * (j + N_PARTICLES / 2) + 1];
s_ffric[3 * j + 2] += s_ffric[3 * (j + N_PARTICLES / 2) + 2];
}
else if (j<N_PARTICLES)
{
s_fcon[3 * j + 0] = 0;
s_fcon[3 * j + 1] = 0;
s_fcon[3 * j + 2] = 0;
s_ffric[3 * j + 0] = 0;
s_ffric[3 * j + 1] = 0;
s_ffric[3 * j + 2] = 0;
}
__syncthreads(); //
for (unsigned int s = (1 << int(log10f(N_PARTICLES / 2.0) / log10f(2.0))); s > 0; s >>= 1)
{
if (j < s)
{
s_fcon[3 * j + 0] += s_fcon[3 * (j + s) + 0];
s_fcon[3 * j + 1] += s_fcon[3 * (j + s) + 1];
s_fcon[3 * j + 2] += s_fcon[3 * (j + s) + 2];
s_ffric[3 * j + 0] += s_ffric[3 * (j + s) + 0];
s_ffric[3 * j + 1] += s_ffric[3 * (j + s) + 1];
s_ffric[3 * j + 2] += s_ffric[3 * (j + s) + 2];
}
__syncthreads();
}
__syncthreads();
// reduction finishes
// the last thread sum forces of particle and wall
if (j == N_PARTICLES)
{
F_con[0] = s_fcon[0] + F_con_iw[0];
F_con[1] = s_fcon[1] + F_con_iw[1];
F_con[2] = s_fcon[2] + F_con_iw[2];
F_fric[0] = s_ffric[0] + F_fric_iw[0];
F_fric[1] = s_ffric[1] + F_fric_iw[1];
F_fric[2] = s_ffric[2] + F_fric_iw[2];
// bound the contact force not to go wild
float_value_t r = norm(F_con) / (Ms[i] * 9.8);
if (r>MAX_R)
{
F_con[0] = F_con[0] / r * MAX_R;
F_con[1] = F_con[1] / r * MAX_R;
F_con[2] = F_con[2] / r * MAX_R;
F_fric[0] = F_fric[0] / r * MAX_R;
F_fric[1] = F_fric[1] / r * MAX_R;
F_fric[2] = F_fric[2] / r * MAX_R;
}
// velocity
res[3 * i] = v3s[3 * i + 0];
res[3 * i + 1] = v3s[3 * i + 1];
res[3 * i + 2] = v3s[3 * i + 2];
// acceleration
res[3 * i + 3 * N_PARTICLES] = (F_env[0] + F_con[0] + F_fric[0]) / (Ms[i] * C_bar* omega_bar *omega_bar);
res[3 * i + 1 + 3 * N_PARTICLES] = (F_env[1] + F_con[1] + F_fric[1]) / (Ms[i] * C_bar* omega_bar *omega_bar);
res[3 * i + 2 + 3 * N_PARTICLES] = (F_env[2] + F_con[2] + F_fric[2]+ g_grav*Ms[i]) / (Ms[i] * C_bar* omega_bar *omega_bar);
//v3s[3 * i + 0] = v3s[3 * i] + delta_t*res[3 * i + 3 * N_PARTICLES];
//v3s[3 * i + 1] = v3s[3 * i + 1] + delta_t*res[3 * i + 1 + 3 * N_PARTICLES];
//v3s[3 * i + 2] = v3s[3 * i + 2] + delta_t*res[3 * i + 2 + 3 * N_PARTICLES];
}
__syncthreads();
// this actually acts as vector_add, so we save a kernel launch
if (i < N_PARTICLES)
{
if (j < 3)
{
res1[3 * i + j] = coeff*res[3 * i + j] + x_input[3 * i + j];
}
else if(j>=3 && j<6)
res1[3 * i + j-3 + 3 * N_PARTICLES] = coeff*res[3 * i + j - 3 + 3 * N_PARTICLES] + x_input[3 * i + j - 3 + 3 * N_PARTICLES];
}
}
int main(void)
{
particles_t* particles = (particles_t *)malloc(sizeof(particles_t));
initParticles(particles);
//printf("vel_max: %e\n",g_grav / (6 * M_PI*mu_f*MU/C_bar/C_bar / (4.0 / 3 * M_PI*MU*MU*MU)));
// simulation of the deposition of powder particles
float_value_t *x, *x1, *y2, *y3, *y4, *fy1, *fy2, *fy3, *fy4_1;
particles_t dp[1];
cudaMalloc((void**)&(dp->xyzs), 3 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&(dp->rs), 1 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&(dp->ms), 1 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&(dp->fs), 3 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&(dp->vs), 3 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&(dp->as), 3 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&(dp->E), 1 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&(dp->mu), 1 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&(dp->vs), 3 * sizeof(float_value_t)*N_PARTICLES);
cudaMemcpy((dp->xyzs), (particles->xyzs), 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy((dp->vs), (particles->vs), 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy((dp->as), (particles->as), 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy((dp->fs), (particles->fs), 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy((dp->rs), (particles->rs), 1 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy((dp->E), (particles->E), 1 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy((dp->ms), (particles->ms), 1 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy((dp->mu), (particles->mu), 1 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy((dp->vs), (particles->vs), 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMalloc((void**)&x, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&x1, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&y2, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&y3, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&y3, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&y4, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&fy1, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&fy2, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&fy3, 6 * sizeof(float_value_t)*N_PARTICLES);
cudaMalloc((void**)&fy4_1, 6 * sizeof(float_value_t)*N_PARTICLES);
//float_value_t *fy2_2 = (float_value_t*)malloc(6 * sizeof(float_value_t)*N_PARTICLES);
//float_value_t *fy3_2 = (float_value_t*)malloc(6 * sizeof(float_value_t)*N_PARTICLES);
std::string coord0_file = "xyzs0.txt";
writeFile(particles->xyzs, particles->n_particles, 3, coord0_file);
cudaEvent_t startEvent_inc, stopEvent_inc;
cudaEventCreate(&startEvent_inc);
cudaEventCreate(&stopEvent_inc);
for (int k = 0; k < N_PARTICLES; k++)
{
particles->xyzs[3 * k] = particles->xyzs[3 * k] / C_bar;
particles->xyzs[3 * k + 1] = particles->xyzs[3 * k+1] / C_bar;
particles->xyzs[3 * k + 2] = particles->xyzs[3 * k + 2] / C_bar;
}
cudaEventRecord(startEvent_inc, 0); // starting timing for inclusive
cudaMemcpy(x, particles->xyzs, 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
cudaMemcpy(&(x[3 * N_PARTICLES]), particles->vs, 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyHostToDevice);
float elapsedTime_inc;
printf("timestep: %e\nomega: %e\n", TIME_STEP, omega_bar);
double current_time = 0.0f;
// when step++, one more percent of total time simulated
int step = 0;
// nondimensionalized total time
double T_total = TIME*omega_bar;
// this should never be true if the solution is stable
bool b_blowup = false;
while (current_time < T_total &&!b_blowup)
{
// RK-4, invoke f() four times, then an addition (vector_add3)
f <<< N_PARTICLES, N_PARTICLES + 1 >>> (fy1, y2, 0.5*TIME_STEP, (dp->E), (dp->rs), (dp->ms), (dp->mu), &(x[3 * N_PARTICLES]), x, 0);
//vector_add <<< (6 * N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>> (x, fy1, y2, 0.5*TIME_STEP, 6 * N_PARTICLES);
f <<< N_PARTICLES, N_PARTICLES+1 >>> (fy2, y3, 0.5*TIME_STEP, (dp->E), (dp->rs), (dp->ms), (dp->mu), &(x[3 * N_PARTICLES]), y2, 0.5 * TIME_STEP);
//vector_add <<< (6 * N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>> (x, fy2, y3, 0.5*TIME_STEP, 6 * N_PARTICLES);
f <<< N_PARTICLES, N_PARTICLES + 1 >>> (fy3, y4, TIME_STEP, (dp->E), (dp->rs), (dp->ms), (dp->mu), &(x[3 * N_PARTICLES]), y3, 0.5 * TIME_STEP);
//vector_add <<< (6 * N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>> (x, fy3, y4, TIME_STEP, 6 * N_PARTICLES);
f <<< N_PARTICLES, N_PARTICLES + 1 >>> (fy4_1, x1, 1.0 / 6 * TIME_STEP, (dp->E), (dp->rs), (dp->ms), (dp->mu), &(x[3 * N_PARTICLES]), y4, TIME_STEP);
// vector_add3 is equivalent to several vector_add
//vector_add <<< (6 * N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>>(x, fy1, x, 1.0 / 6 * TIME_STEP, 6 * N_PARTICLES);
//vector_add <<< (6 * N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>>(x, fy2, x, 2.0 / 6 * TIME_STEP, 6 * N_PARTICLES);
//vector_add <<< (6 * N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>>(x, fy3, x, 2.0 / 6 * TIME_STEP, 6 * N_PARTICLES);
//vector_add <<< (6 * N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>>(x, fy4_1, x, 1.0 / 6 * TIME_STEP, 6 * N_PARTICLES);
vector_add3 <<< (6 * N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>> (x, fy2, fy3, fy4_1, 6 * N_PARTICLES);
//vel_checker <<< (N_PARTICLES + BLOCKDIM - 1) / BLOCKDIM, BLOCKDIM >>> (x, x1, N_PARTICLES);
current_time += TIME_STEP;
// this should excute 100 times independent of T_total
if (current_time / T_total > step*0.01)
{
cudaMemcpy(particles->xyzs, x, 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyDeviceToHost);
cudaMemcpy(particles->vs, &(x[3 * N_PARTICLES]), 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyDeviceToHost);
float_value_t min_z = boundary_up, min_vz;
for (int k = 0; k < N_PARTICLES; k++)
{
particles->xyzs[3 * k] = particles->xyzs[3 * k] * C_bar;
particles->xyzs[3 * k + 1] = particles->xyzs[3 * k + 1] * C_bar;
particles->xyzs[3 * k + 2] = particles->xyzs[3 * k + 2] * C_bar;
particles->vs[3 * k] = particles->vs[3 * k] * C_bar*omega_bar;
particles->vs[3 * k + 1] = particles->vs[3 * k + 1] * C_bar*omega_bar;
particles->vs[3 * k + 2] = particles->vs[3 * k + 2] * C_bar*omega_bar;
if (particles->xyzs[3 * k + 2] < min_z)
{
min_z = particles->xyzs[3 * k + 2];
min_vz = particles->vs[3 * k + 2];
}
// check if the solution is stable, should never excute
if (abs(particles->xyzs[3 * k]) > 0.01 || abs(particles->xyzs[3 * k + 1]) > 0.01 || abs(particles->xyzs[3 * k + 2]) > 0.01)
{
printf("blow up!!! time: %f\n", current_time);
b_blowup = true;
break;
}
// check if the solution is stable, should never excute
if (isnan(particles->xyzs[3 * k]) || isnan(particles->xyzs[3 * k]) || isnan(particles->xyzs[3 * k]))
{
printf("blowup!\n");
getchar();
exit(1);
}
printf("%d paritcle: z = %e, r = %e, v = [%e %e %e]\n", k, particles->xyzs[3 * k + 2], particles->rs[k] , particles->vs[3 * k + 0], particles->vs[3 * k + 1], particles->vs[3 * k + 2]);
}
// out put xyzs.txt, vxyzs.txt, and radius.txt for position, velocity and radius
// char p_step[10];
// sprintf(p_step,"%03d",step);
// std::string coord_file = std::string("xyzs") + std::string(p_step) + std::string(".txt");
// std::string v_file = std::string("vxyzs") + std::string(p_step) + std::string(".txt");
std::string coord_file = "xyzs.txt";
std::string v_file = "cxyzs.txt";
std::string radius_file = "radius.txt";
printf("writing coordinates to %s\n", coord_file.c_str());
writeFile(particles->xyzs, particles->n_particles, 3, coord_file);
writeFile(particles->rs, particles->n_particles, 1, radius_file);
// writeFile(particles->vs, particles->n_particles, 3, v_file);
printf("%d%% has been completed. min z = %e\t vz = %e\n", step,min_z, min_vz);
step++;
}
}
cudaMemcpy(particles->xyzs, x, 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyDeviceToHost);
cudaMemcpy(particles->vs, &(x[3 * N_PARTICLES]), 3 * sizeof(float_value_t)*N_PARTICLES, cudaMemcpyDeviceToHost);
cudaEventRecord(stopEvent_inc, 0);
cudaEventSynchronize(stopEvent_inc);
cudaEventElapsedTime(&elapsedTime_inc, startEvent_inc, stopEvent_inc);
printf("total time: %f\n", elapsedTime_inc);
// write to file
// simulation of temperature evolution
//
// free memory
freeParticles(particles, dp);
cudaFree(x);
cudaFree(x1);
cudaFree(y2);
cudaFree(y3);
cudaFree(y4);
cudaFree(fy1);
cudaFree(fy2);
cudaFree(fy3);
cudaFree(fy4_1);
return 0;
}
void freeParticles(particles_t * particles, particles_t * dp)
{
free(particles->xyzs);
particles->xyzs = NULL;
free(particles->rs);
particles->rs = NULL;
free(particles->ms);
particles->ms = NULL;
free(particles->fs);
particles->fs = NULL;
free(particles->vs);
particles->vs = NULL;
free(particles->as);
particles->as = NULL;
free(particles->E);
particles->E = NULL;
free(particles->mu);
particles->mu = NULL;
free(particles);
particles = NULL;
cudaFree(dp->xyzs);
cudaFree(dp->rs);
cudaFree(dp->ms);
cudaFree(dp->fs);
cudaFree(dp->vs);
cudaFree(dp->as);
cudaFree(dp->E);
cudaFree(dp->mu);
}
void writeFile(float_value_t* array, int row, int col, std::string filename)
{
std::ofstream myfile;
myfile.open(filename);
for (int i = 0; i < row; i++)
{
for (int j = 0; j < col; j++)
{
myfile << array[i * col + j] << " ";
}
myfile << "\n";
}
std::cout << "Writing data to " << filename << ".\n";
myfile.close();
}
void initParticles(particles_t * particles)
{
particles->n_particles = N_PARTICLES;
particles->xyzs = initFloatArray(particles->n_particles * 3);
particles->rs = initFloatArray(particles->n_particles);
particles->ms = initFloatArray(particles->n_particles);
particles->fs = initFloatArray(particles->n_particles * 3);
particles->vs = initFloatArray(particles->n_particles * 3); // zero init
for (int i = 0; i < N_PARTICLES; i++)
{
(particles->vs)[i * 3] = 0;
(particles->vs)[i * 3+1] = 0;
(particles->vs)[i * 3 +2] = v0/omega_bar/C_bar;
}
particles->as = initFloatArray(particles->n_particles * 3);
particles->E = initFloatArray(particles->n_particles * 3);
particles->mu = initFloatArray(particles->n_particles * 3);
bool find_particle = false;
float_value_t x, y, z, radius;
srand(time(NULL));
for (int i = 0; i < particles->n_particles; i++)
{
while (!find_particle)
{
while (true)
{
x = (float_value_t)rand()/RAND_MAX*length;
y = (float_value_t)rand() / RAND_MAX*width;
z = (float_value_t)rand() / RAND_MAX*(MAX_HEIGHT - MIN_HEIGHT)+ MIN_HEIGHT;
radius = (float_value_t)rand() / RAND_MAX*2* BIND+(MU- BIND);
if (x + radius <= length && x - radius >= 0 && y + radius <= width && y - radius >= 0) break;
}
find_particle = true;
for (int j = 0; j < i; j++)
{
float_value_t dist = 0.0f;
dist += (x - particles->xyzs[j * 3])*(x - particles->xyzs[j * 3]);
dist += (y - particles->xyzs[j * 3 + 1])*(y - particles->xyzs[j * 3 + 1]);
dist += (z - particles->xyzs[j * 3 + 2])*(z - particles->xyzs[j * 3 + 2]);
dist = sqrt(dist);
if (dist < radius + particles->rs[j])
{
find_particle = false;
break;
}
}
}
find_particle = false;
particles->xyzs[i * 3 + 0] = x;
particles->xyzs[i * 3 + 1] = y;
particles->xyzs[i * 3 + 2] = z;
particles->rs[i] = radius;
particles->ms[i] = 4.0 / 3.0*M_PI*radius*radius*radius*rho;
particles->E[i] = E_modulus;
particles->mu[i] = nu;
// if(i == 0) printf("%0.15f\n", particles->ms[i]);
}
}
float_value_t* initFloatArray(int num_elements)
{
float_value_t * array = NULL;
array = (float_value_t *)malloc(sizeof(float_value_t) * num_elements);
if (array == NULL)
{
fprintf(stderr, "malloc fails\n");
exit(1);
}
else
{
return array;
}
}
void readParticles(particles_t * particles)
{
FILE* fr = fopen("radius.txt", "r");
FILE* fv = fopen("vxyzs.txt", "r");
FILE* fx = fopen("xyzs.txt", "r");
particles->n_particles = N_PARTICLES;
particles->xyzs = initFloatArray(particles->n_particles * 3);
particles->rs = initFloatArray(particles->n_particles);
particles->ms = initFloatArray(particles->n_particles);
particles->fs = initFloatArray(particles->n_particles * 3);
particles->vs = initFloatArray(particles->n_particles * 3); // zero init
particles->as = initFloatArray(particles->n_particles * 3);
particles->E = initFloatArray(particles->n_particles * 3);
particles->mu = initFloatArray(particles->n_particles * 3);
int rc;
for (int i = 0; i < N_PARTICLES; i++)
{
rc = 1;
rc *= fscanf(fv, "%lf", &(particles->vs[i * 3]));
rc *= fscanf(fv, "%lf", &(particles->vs[i * 3 + 1]));
rc *= fscanf(fv, "%lf", &(particles->vs[i * 3 + 2]));
(particles->vs)[i * 3] = (particles->vs)[i * 3]/omega_bar/C_bar;
(particles->vs)[i * 3 + 1] = (particles->vs)[i * 3 + 1] / omega_bar/C_bar;
(particles->vs)[i * 3 + 2] = (particles->vs)[i * 3 + 2] / omega_bar/C_bar;
rc *= fscanf(fx, "%lf", &(particles->xyzs[i * 3]));
rc *= fscanf(fx, "%lf", &(particles->xyzs[i * 3 + 1]));
rc *= fscanf(fx, "%lf", &(particles->xyzs[i * 3 + 2]));
(particles->xyzs)[i * 3] = (particles->xyzs)[i * 3] / C_bar;
(particles->xyzs)[i * 3 + 1] = (particles->xyzs)[i * 3 + 1] / C_bar;
(particles->xyzs)[i * 3 + 2] = (particles->xyzs)[i * 3 + 2] / C_bar;
rc *= fscanf(fr, "%lf", &(particles->rs[i]));
if (rc == 0)
{
fprintf(stderr, "fscanf fails\n");
exit(1);
}
particles->ms[i] = 4.0 / 3.0*M_PI*particles->rs[i] * particles->rs[i] * particles->rs[i] *rho;
particles->E[i] = E_modulus;
particles->mu[i] = nu;
}
fclose(fr);
fclose(fv);
fclose(fx);
}
|
20,959 | //pass
//--gridDim=40 --blockDim=256
typedef unsigned char Bool;
typedef unsigned int uint;
__global__ void computeVisibilities_kernel(const float *angles,
const float *scannedAngles,
int numAngles,
Bool *visibilities)
{
uint i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numAngles)
{
visibilities[i] = scannedAngles[i] <= angles[i];
}
}
|
20,960 | #include "cuda.h"
__global__ void multiply_by_two(double *y, const double *x, int n){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n){
y[i] = 2*x[i];
}
}
void multiply_by_two_forward(double *y, const double *x, int n){
multiply_by_two<<< (n-1)/64 + 1, 64 >>>(y, x, n);
}
void multiply_by_two_backward(double *grad_x, const double *grad_y, int n){
multiply_by_two<<< (n-1)/64 + 1, 64 >>>(grad_x, grad_y, n);
} |
20,961 | #include "includes.h"
__global__ void pre_mul_kernel(int n, double *a, double *ct) {
const int j2 = blockIdx.x * blockDim.x + threadIdx.x;
double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki;
const int nc = n >> 2;
const int j = j2 << 1;
if (j2) {
int nminusj = n - j;
wkr = 0.5 - ct[nc - j2];
wki = ct[j2];
ajr = a[j];
aji = a[1 + j];
akr = a[nminusj];
aki = a[1 + nminusj];
xr = ajr - akr;
xi = aji + aki;
yr = wkr * xr - wki * xi;
yi = wkr * xi + wki * xr;
ajr -= yr;
aji -= yi;
akr += yr;
aki -= yi;
a[j] = ajr;
a[1 + j] = aji;
a[nminusj] = akr;
a[1 + nminusj] = aki;
}
} |
20,962 | // This is a generated file, do not edit it!
#pragma once
#include <stdint.h>
#define Constants_NumThreadsPerBlock 128
#define Constants_LargeBlock 1024
#define Constants_MaxClasses 32
#define Constants_MaxLevels 16
#define Constants_MaxAttributeAxes 40
#define Constants_MaxCategoricalAxes 8
#define Constants_MaxCategories 32
#define Constants_SplitType_Null 0xFF
#define Constants_SplitType_None 0
#define Constants_SplitType_Attribute 1
#define Constants_SplitType_Categorical 2
#define Constants_MaxSplits 48
#define Constants_MaxNodesAtSingleLevel 32768
#define Constants_MaxTotalNodes 65535
#define Constants_RequiredImprovementToSplit 0.001f
typedef struct Constants {
} Constants;
|
20,963 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <time.h>
#include "cuda_fp16.h"
#define L1_SIZE 65536
#define FP_TYPE double
#define FP_DEV_TYPE double
/* Kernel for vector addition */
__global__ void Vec_add(FP_DEV_TYPE x[], FP_DEV_TYPE y[], FP_DEV_TYPE z[], int n, FP_DEV_TYPE lookup[], uint32_t startClk[], uint32_t stopClk[]) {
/* blockDim.x = threads_per_block */
/* First block gets first threads_per_block components. */
/* Second block gets next threads_per_block components, etc. */
int tid = blockDim.x * blockIdx.x + threadIdx.x;
/* block_count*threads_per_block may be >= n */
// a register to avoid compiler optimization
//float sink = 0;
if (tid < n) {
float temp_x = x[tid];
float temp_y = y[tid];
float temp_z = 1e-8;
// synchronize all threads
asm volatile ("bar.sync 0;");
uint32_t start = 0;
// start timing
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
for (int i =0;i < 1000; i++)
temp_z += temp_y * temp_x * temp_x;
// synchronize all threads
//asm volatile ("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[tid] = start;
stopClk[tid] = stop;
// dsink[tid] = sink;
z[tid] = temp_z;///1000.0;
}
} /* Vec_add */
/* Host code */
int main(int argc, char* argv[]) {
int n, i;
FP_TYPE *h_x, *h_y, *h_z, *h_lookup;
FP_DEV_TYPE *d_x, *d_y, *d_z, *d_lookup ;
uint32_t *h_startClk, *h_stopClk;
uint32_t *d_startClk, *d_stopClk;
int threads_per_block;
int block_count;
size_t size, size_clock;
cudaEvent_t start, stop;
float elapsedTime;
srand(time(0));
/* Get number of components in vector */
if (argc != 2) {
fprintf(stderr, "usage: %s <vector order>\n", argv[0]);
exit(0);
}
n = strtol(argv[1], NULL, 10); // half2 = 2x half , reduce size
size = n*sizeof(FP_TYPE);
size_clock = n*sizeof(uint32_t);
/* Allocate input vectors in host memory */
h_x = (FP_TYPE*) malloc(size);
h_y = (FP_TYPE*) malloc(size);
h_z = (FP_TYPE*) malloc(size);
h_startClk = (uint32_t*) malloc(size_clock);
h_stopClk = (uint32_t*) malloc(size_clock);
h_lookup = (FP_TYPE*) malloc(L1_SIZE*sizeof(FP_TYPE));
// declare and allocate memory
/* Initialize input vectors */
for (i = 0; i < n; i++) {
h_x[i] = 0.5641895835477563; //R of circle with area = 10
h_y[i] = 3.14159265358979;
// h_x[i] = rand()%L1_SIZE;
}
for (i=0;i<L1_SIZE;i++)
h_lookup[i] = (i*5)%L1_SIZE;
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
cudaMalloc(&d_lookup, L1_SIZE*sizeof(FP_TYPE));
cudaMalloc(&d_stopClk, size_clock);
cudaMalloc(&d_startClk, size_clock);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_lookup, h_lookup, L1_SIZE*sizeof(FP_TYPE), cudaMemcpyHostToDevice);
// cudaMemcpy(buffer, h_buffer, MAX_TEXTURE_SIZE*sizeof(float), cudaMemcpyHostToDevice); //copy data to texture
/* Define block size */
threads_per_block = 256;
block_count = (n + threads_per_block - 1)/threads_per_block;
cudaEventCreate(&start);
cudaEventRecord(start,0);
for (int i =0; i<100 ; i++){
Vec_add<<<block_count, threads_per_block>>>(d_x, d_y, d_z, n, d_lookup, d_startClk,d_stopClk);
cudaDeviceSynchronize();
}
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime/100);
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_startClk, d_startClk, size_clock, cudaMemcpyDeviceToHost);
cudaMemcpy(h_stopClk, d_stopClk, size_clock, cudaMemcpyDeviceToHost);
uint32_t sum = 0;
printf("clk cycles spent on each thread \n");
for (i = 0; i < n; i++) {
;
// printf("%d, \n", h_z[i]);
// if(i%100 == 0)
// printf("%u,", h_stopClk[i] - h_startClk[i]);
sum+=h_stopClk[i] - h_startClk[i];
}
printf("\n -------- \n average latency (cycles) %f \n",float(sum)/n);
double sum_err = 0.0;
for (i = 0; i < n; i++) {
sum_err += fabs(1000 + 1e-8 - h_z[i]);
}
printf("\n -------- \n avg error %.10f \n",sum_err/n);
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
return 0;
} /* main */
|
20,964 | #include"tracker.cuh"
using namespace std::chrono;
LaserScan * d_scan=NULL;
LaserScan h_scan;
EgoMotion h_egomotion;
ObjectState * d_particle=NULL;
ObjectState h_particle[RQPN];
ObjectState * d_tmpparticle=NULL;
ObjectState h_tmpparticle[MAXPN];
bool h_flag[MAXPN];
int h_seed[MAXPN];
thrust::minstd_rand * d_rng=NULL;
#define PI 3.14159265359
//==============================================================================
__host__ __device__
void deviceBuildModel(ObjectState & state, double & density)
{
double c=cos(state.theta);
double s=sin(state.theta);
state.ox=-c*state.x-s*state.y;
state.oy=s*state.x-c*state.y;
state.cx[0]=c*state.lf-s*state.wl+state.x; state.cy[0]=s*state.lf+c*state.wl+state.y;
state.cx[1]=c*state.lf+s*state.wr+state.x; state.cy[1]=s*state.lf-c*state.wr+state.y;
state.cx[2]=-c*state.lb+s*state.wr+state.x; state.cy[2]=-s*state.lb-c*state.wr+state.y;
state.cx[3]=-c*state.lb-s*state.wl+state.x; state.cy[3]=-s*state.lb+c*state.wl+state.y;
state.cl[0]=state.cl[2]=state.wl+state.wr;
state.cl[1]=state.cl[3]=state.lf+state.lb;
state.bid[0]=(atan2(state.cy[0],state.cx[0])+PI)/density;
state.bid[1]=(atan2(state.cy[1],state.cx[1])+PI)/density;
state.bid[2]=(atan2(state.cy[2],state.cx[2])+PI)/density;
state.bid[3]=(atan2(state.cy[3],state.cx[3])+PI)/density;
if(state.ox>state.lf)
{
if(state.oy>state.wl)
{
state.eid[0]=0;state.eid[1]=3;
}
else if(state.oy<-state.wr)
{
state.eid[0]=0;state.eid[1]=1;
}
else
{
state.eid[0]=0;state.eid[1]=-1;
}
}
else if(state.ox<-state.lb)
{
if(state.oy>state.wl)
{
state.eid[0]=2;state.eid[1]=3;
}
else if(state.oy<-state.wr)
{
state.eid[0]=2;state.eid[1]=1;
}
else
{
state.eid[0]=2;state.eid[1]=-1;
}
}
else
{
if(state.oy>state.wl)
{
state.eid[0]=3;state.eid[1]=-1;
}
else if(state.oy<-state.wr)
{
state.eid[0]=1;state.eid[1]=-1;
}
else
{
state.eid[0]=-1;state.eid[1]=-1;
}
}
return;
}
__host__ __device__
void deviceMeasureEdge(ObjectState & state, int edgeid, LaserScan * scan, double anneal, int * beamnum, int * beamid, bool uncertainflag)
{
if(state.eid[edgeid]<0)
{
return;
}
if(uncertainflag)
{
switch(state.eid[edgeid])
{
case 0:
if(state.dlf>UNCERTAINTHRESH)
{
return;
}
break;
case 1:
if(state.dwr>UNCERTAINTHRESH)
{
return;
}
break;
case 2:
if(state.dlb>UNCERTAINTHRESH)
{
return;
}
break;
case 3:
if(state.dwl>UNCERTAINTHRESH)
{
return;
}
break;
default:
break;
}
}
int starteid=state.eid[edgeid];
int endeid=(state.eid[edgeid]+1)%4;
int startbid=state.bid[starteid];
int endbid=state.bid[endeid];
if(startbid>endbid)
{
endbid+=scan->beamnum;
}
int totalbeam=(endbid-startbid)+1;
if(totalbeam<=3)
{
state.eid[edgeid]=-1;
}
double dx1=state.cx[endeid]-state.cx[starteid];
double dy1=state.cy[endeid]-state.cy[starteid];
double dx2=-dy1/state.cl[starteid];
double dy2=dx1/state.cl[starteid];
double density=2*PI/scan->beamnum;
// int midbid=(startbid+endbid)/2;
for(int i=startbid;i<=endbid;i++)
{
double P[4]={MAXBEAM,MAXBEAM,MAXBEAM,MAXBEAM};
int tmpid=i%scan->beamnum;
// double weightsigma=abs(i-midbid)*2.0/totalbeam*0.09+0.01;
double bear=tmpid*density-PI;
double c=cos(bear);
double s=sin(bear);
double tmpx=c*dx1+s*dy1;
double tmpy=s*dx1-c*dy1;
if(tmpy!=0)
{
double beta=tmpx/tmpy*(c*state.cy[starteid]-s*state.cx[starteid])+(c*state.cx[starteid]+s*state.cy[starteid]);
if(beta>=MINBEAM&&beta<=MAXBEAM)
{
P[2]=beta;
double gamma0,gamma1,gamma2;
if(beta<NEARESTRING)
{
gamma0=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*beta)-s*(state.cx[starteid]+dx2*beta))+c*(state.cx[starteid]+dx2*beta)+s*(state.cy[starteid]+dy2*beta)));
gamma1=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*2)-s*(state.cx[starteid]+dx2*2))+c*(state.cx[starteid]+dx2*2)+s*(state.cy[starteid]+dy2*2)));
gamma2=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*beta)-s*(state.cx[starteid]+dx2*beta))+c*(state.cx[starteid]+dx2*beta)+s*(state.cy[starteid]+dy2*beta)));
}
else
{
gamma0=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*MARGIN0)-s*(state.cx[starteid]+dx2*MARGIN0))+c*(state.cx[starteid]+dx2*MARGIN0)+s*(state.cy[starteid]+dy2*MARGIN0)));
gamma1=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*MARGIN1)-s*(state.cx[starteid]+dx2*MARGIN1))+c*(state.cx[starteid]+dx2*MARGIN1)+s*(state.cy[starteid]+dy2*MARGIN1)));
gamma2=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*MARGIN2)-s*(state.cx[starteid]+dx2*MARGIN2))+c*(state.cx[starteid]+dx2*MARGIN2)+s*(state.cy[starteid]+dy2*MARGIN2)));
}
P[1]=P[2]-gamma0>=MINBEAM?P[2]-gamma0:MINBEAM;
P[3]=P[2]+gamma1<=MAXBEAM?P[2]+gamma1:MAXBEAM;
P[0]=P[2]-gamma2>=MINBEAM?P[2]-gamma2:MINBEAM;
double tmplogweight;
if(scan->length[tmpid]<=P[0])
{
tmplogweight=0;
// double delta=scan->length[tmpid]-P[0];
// double w1=WEIGHT0-WEIGHT0;
// double w2=WEIGHT1-WEIGHT0;
// tmplogweight=w1+(w2-w1)*exp(-delta*delta/0.01);
}
else if(scan->length[tmpid]<=P[1])
{
tmplogweight=WEIGHT1-WEIGHT0;
// double delta=scan->length[tmpid]-P[1];
// double w1=WEIGHT1-WEIGHT0;
// double w2=WEIGHT2-WEIGHT0;
// tmplogweight=w1+(w2-w1)*exp(-delta*delta/0.01);
}
else if(scan->length[tmpid]<=P[3])
{
if(beta>=NEARESTRING)
{
if(beamnum!=NULL&&beamid!=NULL&&totalbeam>3)
{
beamid[*beamnum]=tmpid;
(*beamnum)++;
}
state.count++;
}
// tmplogweight=WEIGHT2-WEIGHT0;
double delta=scan->length[tmpid]-P[2];
double w1=WEIGHT2-WEIGHT0;
double w2=2*w1;
tmplogweight=(w1+(w2-w1)*exp(-delta*delta/0.01));
}
else
{
tmplogweight=WEIGHT3-WEIGHT0;
// double delta=scan->length[tmpid]-P[3];
// double w1=WEIGHT3-WEIGHT0;
// double w2=WEIGHT2-WEIGHT0;
// tmplogweight=w1+(w2-w1)*exp(-delta*delta/0.01);
}
state.weight+=tmplogweight/anneal;
}
}
}
}
__host__ __device__
void deviceEgoMotion(ObjectState & state, EgoMotion & egomotion)
{
double c=cos(egomotion.dtheta);
double s=sin(egomotion.dtheta);
double tmpx=c*state.x-s*state.y+egomotion.dx;
double tmpy=s*state.x+c*state.y+egomotion.dy;
state.x=tmpx;
state.y=tmpy;
state.theta+=egomotion.dtheta;
return;
}
__host__ __device__
void deviceAckermannModel(ObjectState & state0, ObjectState & state1, EgoMotion & egomotion)
{
state1=state0;
if(state1.v==0)
{
state1.k=0;
state1.a=0;
deviceEgoMotion(state1,egomotion);
return;
}
double c=cos(state1.theta);
double s=sin(state1.theta);
if(state1.k==0)
{
state1.x=state1.x+c*state1.v*egomotion.dt;
state1.y=state1.y+s*state1.v*egomotion.dt;
state1.a=0;
deviceEgoMotion(state1,egomotion);
return;
}
double c0=cos(state1.theta+state1.a);
double s0=sin(state1.theta+state1.a);
state1.omega=state1.v*state1.k;
double dtheta=state1.omega*egomotion.dt;
state1.theta+=dtheta;
double c1=cos(state1.theta+state1.a);
double s1=sin(state1.theta+state1.a);
double R=1/state1.k;
state1.x=state1.x+R*(-s0+s1);
state1.y=state1.y+R*(c0-c1);
deviceEgoMotion(state1,egomotion);
return;
}
//==============================================================================
__global__
void kernelSetRandomSeed(int * seed, thrust::minstd_rand * rng, int tmppnum)
{
GetThreadID_1D(id);
if(id>=tmppnum)
{
return;
}
rng[id]=thrust::minstd_rand(seed[id]);
return;
}
__global__
void kernelMeasureModel(LaserScan * scan, ObjectState * particle, ObjectState * tmpparticle, int tmppnum, thrust::minstd_rand * rng, ObjectStateOffset objectstateoffset, StateConstrain stateconstrain, EgoMotion egomotion)
{
GetThreadID_1D(id);
if(id>=tmppnum)
{
return;
}
int pid=id/SPN;
tmpparticle[id]=particle[pid];
if(objectstateoffset.thetaoff>objectstateoffset.thetaprec)
{
double thetamin=tmpparticle[id].theta-objectstateoffset.thetaoff;thetamin=thetamin>stateconstrain.thetamin?thetamin:stateconstrain.thetamin;
double thetamax=tmpparticle[id].theta+objectstateoffset.thetaoff;thetamax=thetamax<stateconstrain.thetamax?thetamax:stateconstrain.thetamax;
tmpparticle[id].theta=thrust::random::uniform_real_distribution<double>(thetamin,thetamax)(rng[id]);
}
double wlmin=tmpparticle[id].wl-objectstateoffset.wloff;wlmin=wlmin>stateconstrain.wlmin?wlmin:stateconstrain.wlmin;
double wlmax=tmpparticle[id].wl+objectstateoffset.wloff;wlmax=wlmax<stateconstrain.wlmax?wlmax:stateconstrain.wlmax;
tmpparticle[id].wl=thrust::random::uniform_real_distribution<double>(wlmin,wlmax)(rng[id]);
double wrmin=tmpparticle[id].wr-objectstateoffset.wroff;wrmin=wrmin>stateconstrain.wrmin?wrmin:stateconstrain.wrmin;
double wrmax=tmpparticle[id].wr+objectstateoffset.wroff;wrmax=wrmax<stateconstrain.wrmax?wrmax:stateconstrain.wrmax;
tmpparticle[id].wr=thrust::random::uniform_real_distribution<double>(wrmin,wrmax)(rng[id]);
double lfmin=tmpparticle[id].lf-objectstateoffset.lfoff;lfmin=lfmin>stateconstrain.lfmin?lfmin:stateconstrain.lfmin;
double lfmax=tmpparticle[id].lf+objectstateoffset.lfoff;lfmax=lfmax<stateconstrain.lfmax?lfmax:stateconstrain.lfmax;
tmpparticle[id].lf=thrust::random::uniform_real_distribution<double>(lfmin,lfmax)(rng[id]);
double lbmin=tmpparticle[id].lb-objectstateoffset.lboff;lbmin=lbmin>stateconstrain.lbmin?lbmin:stateconstrain.lbmin;
double lbmax=tmpparticle[id].lb+objectstateoffset.lboff;lbmax=lbmax<stateconstrain.lbmax?lbmax:stateconstrain.lbmax;
tmpparticle[id].lb=thrust::random::uniform_real_distribution<double>(lbmin,lbmax)(rng[id]);
deviceBuildModel(tmpparticle[id],egomotion.density);
tmpparticle[id].weight=0;
tmpparticle[id].count=0;
deviceMeasureEdge(tmpparticle[id],0,scan,objectstateoffset.anneal,NULL,NULL,0);
deviceMeasureEdge(tmpparticle[id],1,scan,objectstateoffset.anneal,NULL,NULL,0);
return;
}
__global__
void kernelMotionModel(LaserScan * scan, ObjectState * particle, int pnum, ObjectState * tmpparticle, int tmppnum, thrust::minstd_rand * rng, ObjectStateOffset objectstateoffset, StateConstrain stateconstrain, EgoMotion egomotion)
{
GetThreadID_1D(id);
if(id>=tmppnum)
{
return;
}
double index=double(pnum)/double(tmppnum);
int pid=int(id*index);
tmpparticle[id]=particle[pid];
if(egomotion.motionflag)
{
tmpparticle[id].v=thrust::random::normal_distribution<double>(tmpparticle[id].v,objectstateoffset.voff)(rng[id]);
tmpparticle[id].v=tmpparticle[id].v>stateconstrain.vmin?tmpparticle[id].v:stateconstrain.vmin;
tmpparticle[id].v=tmpparticle[id].v<stateconstrain.vmax?tmpparticle[id].v:stateconstrain.vmax;
tmpparticle[id].omega=thrust::random::normal_distribution<double>(tmpparticle[id].omega,objectstateoffset.omegaoff)(rng[id]);
tmpparticle[id].omega=tmpparticle[id].omega>stateconstrain.omegamin?tmpparticle[id].omega:stateconstrain.omegamin;
tmpparticle[id].omega=tmpparticle[id].omega<stateconstrain.omegamax?tmpparticle[id].omega:stateconstrain.omegamax;
}
else
{
double vmin=tmpparticle[id].v-objectstateoffset.voff;vmin=vmin>stateconstrain.vmin?vmin:stateconstrain.vmin;
double vmax=tmpparticle[id].v+objectstateoffset.voff;vmax=vmax<stateconstrain.vmax?vmax:stateconstrain.vmax;
tmpparticle[id].v=thrust::random::uniform_real_distribution<double>(vmin,vmax)(rng[id]);
double omegamin=tmpparticle[id].omega-objectstateoffset.omegaoff;omegamin=omegamin>stateconstrain.omegamin?omegamin:stateconstrain.omegamin;
double omegamax=tmpparticle[id].omega+objectstateoffset.omegaoff;omegamax=omegamax<stateconstrain.omegamax?omegamax:stateconstrain.omegamax;
tmpparticle[id].omega=thrust::random::uniform_real_distribution<double>(omegamin,omegamax)(rng[id]);
}
if(tmpparticle[id].v==0)
{
tmpparticle[id].k=(stateconstrain.kmin+stateconstrain.kmax)/2;
}
else
{
tmpparticle[id].k=tmpparticle[id].omega/tmpparticle[id].v;
if(tmpparticle[id].k<stateconstrain.kmin)
{
tmpparticle[id].k=stateconstrain.kmin;
}
if(tmpparticle[id].k>stateconstrain.kmax)
{
tmpparticle[id].k=stateconstrain.kmax;
}
}
tmpparticle[id].omega=tmpparticle[id].v*tmpparticle[id].k;
double R,phi=stateconstrain.amax;
if(tmpparticle[id].k!=0)
{
R=1/fabs(tmpparticle[id].k);
phi=atan2(4.0,R);
}
if(tmpparticle[id].omega>0)
{
stateconstrain.amin=DEG2RAD(-20);
stateconstrain.amax=phi;
stateconstrain.amax=stateconstrain.amax>stateconstrain.amin?stateconstrain.amax:stateconstrain.amin;
}
else if(tmpparticle[id].omega<0)
{
stateconstrain.amax=DEG2RAD(20);
stateconstrain.amin=-phi;
stateconstrain.amin=stateconstrain.amin<stateconstrain.amax?stateconstrain.amin:stateconstrain.amax;
}
else if(tmpparticle[id].omega==0)
{
stateconstrain.amin=0;
stateconstrain.amax=0;
}
if(egomotion.motionflag)
{
tmpparticle[id].a=thrust::random::normal_distribution<double>(tmpparticle[id].a,objectstateoffset.aoff)(rng[id]);
tmpparticle[id].a=tmpparticle[id].a>stateconstrain.amin?tmpparticle[id].a:stateconstrain.amin;
tmpparticle[id].a=tmpparticle[id].a<stateconstrain.amax?tmpparticle[id].a:stateconstrain.amax;
}
else
{
double amin=tmpparticle[id].a-objectstateoffset.aoff;amin=amin>stateconstrain.amin?amin:stateconstrain.amin;
double amax=tmpparticle[id].a+objectstateoffset.aoff;amax=amax<stateconstrain.amax?amax:stateconstrain.amax;
tmpparticle[id].a=thrust::random::uniform_real_distribution<double>(amin,amax)(rng[id]);
}
// tmpparticle[id].a=0;
ObjectState movedparticle=tmpparticle[id];
deviceAckermannModel(movedparticle,movedparticle,egomotion);
deviceBuildModel(movedparticle,egomotion.density);
movedparticle.weight=0;
movedparticle.count=0;
deviceMeasureEdge(movedparticle,0,scan,objectstateoffset.anneal,NULL,NULL,0);
deviceMeasureEdge(movedparticle,1,scan,objectstateoffset.anneal,NULL,NULL,0);
tmpparticle[id].weight=movedparticle.weight;
tmpparticle[id].count=movedparticle.count;
return;
}
__global__
void kernelMotionUpdate(ObjectState * particle, int pnum, EgoMotion egomotion)
{
GetThreadID_1D(id);
if(id>=pnum)
{
return;
}
deviceAckermannModel(particle[id],particle[id],egomotion);
deviceBuildModel(particle[id],egomotion.density);
}
//==============================================================================
int sampleParticle(int tmppnum, ObjectState & average, ObjectStateOffset & objectstateoffset, bool rejectflag)
{
cudaMemcpy(h_tmpparticle,d_tmpparticle,sizeof(ObjectState)*tmppnum,cudaMemcpyDeviceToHost);
double maxlogweight=h_tmpparticle[0].weight;
double minlogweight=h_tmpparticle[0].weight;
for(int j=0;j<tmppnum;j++)
{
if(maxlogweight<h_tmpparticle[j].weight)
{
maxlogweight=h_tmpparticle[j].weight;
}
if(minlogweight>h_tmpparticle[j].weight)
{
minlogweight=h_tmpparticle[j].weight;
}
h_flag[j]=1;
}
double maxscale=maxlogweight<=30?1:30/maxlogweight;
double minscale=minlogweight>=-30?1:-30/minlogweight;
double scale=maxscale<minscale?maxscale:minscale;
for(int j=0;j<tmppnum;j++)
{
h_tmpparticle[j].weight=exp(h_tmpparticle[j].weight*scale);
if(j>0)
{
h_tmpparticle[j].weight+=h_tmpparticle[j-1].weight;
}
}
int planpnum=tmppnum<RQPN?tmppnum:RQPN;
double step=1.0/planpnum;
int accuracy=1000000;
double samplebase=(rand()%accuracy)*step/accuracy;
double weightsum=h_tmpparticle[tmppnum-1].weight;
int pnum=0;
average.weight=0;
average.x=0;average.y=0;average.theta=0;
average.wl=0;average.wr=0;average.lf=0;average.lb=0;
average.a=0;average.v=0;average.k=0;
average.count=0;
ObjectState minstate,maxstate;
double weight=1.0/planpnum;
for(int j=0, k=0;j<planpnum;j++)
{
double sample=samplebase+j*step;
while(k<tmppnum)
{
if(sample>h_tmpparticle[k].weight/weightsum)
{
k++;
continue;
}
if(h_flag[k])
{
h_flag[k]=0;
if(rejectflag)
{
bool flag=0;
for(int l=0;l<pnum;l++)
{
if(h_tmpparticle[k].wl>=h_particle[l].wl-objectstateoffset.wloff&&h_tmpparticle[k].wl<=h_particle[l].wl+objectstateoffset.wloff
&&h_tmpparticle[k].wr>=h_particle[l].wr-objectstateoffset.wroff&&h_tmpparticle[k].wr<=h_particle[l].wr+objectstateoffset.wroff
&&h_tmpparticle[k].lf>=h_particle[l].lf-objectstateoffset.lfoff&&h_tmpparticle[k].lf<=h_particle[l].lf+objectstateoffset.lfoff
&&h_tmpparticle[k].lb>=h_particle[l].lb-objectstateoffset.lboff&&h_tmpparticle[k].lb<=h_particle[l].lb+objectstateoffset.lboff
&&h_tmpparticle[k].a>=h_particle[l].a-objectstateoffset.aoff&&h_tmpparticle[k].a<=h_particle[l].a+objectstateoffset.aoff
&&h_tmpparticle[k].v>=h_particle[l].v-objectstateoffset.voff&&h_tmpparticle[k].v<=h_particle[l].v+objectstateoffset.voff
&&h_tmpparticle[k].k>=h_particle[l].k-objectstateoffset.koff&&h_tmpparticle[k].k<=h_particle[l].k+objectstateoffset.koff)
{
flag=1;
break;
}
}
if(flag)
{
break;
}
}
h_particle[pnum]=h_tmpparticle[k];
h_particle[pnum].weight=weight;
if(pnum==0)
{
minstate.x=h_particle[pnum].x;maxstate.x=h_particle[pnum].x;
minstate.y=h_particle[pnum].y;maxstate.y=h_particle[pnum].y;
minstate.theta=h_particle[pnum].theta;maxstate.theta=h_particle[pnum].theta;
minstate.wl=h_particle[pnum].wl;maxstate.wl=h_particle[pnum].wl;
minstate.wr=h_particle[pnum].wr;maxstate.wr=h_particle[pnum].wr;
minstate.lf=h_particle[pnum].lf;maxstate.lf=h_particle[pnum].lf;
minstate.lb=h_particle[pnum].lb;maxstate.lb=h_particle[pnum].lb;
minstate.a=h_particle[pnum].a;maxstate.a=h_particle[pnum].a;
minstate.v=h_particle[pnum].v;maxstate.v=h_particle[pnum].v;
minstate.k=h_particle[pnum].k;maxstate.k=h_particle[pnum].k;
minstate.omega=h_particle[pnum].omega;maxstate.omega=h_particle[pnum].omega;
}
else
{
minstate.x=minstate.x<h_particle[pnum].x?minstate.x:h_particle[pnum].x;
maxstate.x=maxstate.x>h_particle[pnum].x?maxstate.x:h_particle[pnum].x;
minstate.y=minstate.y<h_particle[pnum].y?minstate.y:h_particle[pnum].y;
maxstate.y=maxstate.y>h_particle[pnum].y?maxstate.y:h_particle[pnum].y;
minstate.theta=minstate.theta<h_particle[pnum].theta?minstate.theta:h_particle[pnum].theta;
maxstate.theta=maxstate.theta>h_particle[pnum].theta?maxstate.theta:h_particle[pnum].theta;
minstate.wl=minstate.wl<h_particle[pnum].wl?minstate.wl:h_particle[pnum].wl;
maxstate.wl=maxstate.wl>h_particle[pnum].wl?maxstate.wl:h_particle[pnum].wl;
minstate.wr=minstate.wr<h_particle[pnum].wr?minstate.wr:h_particle[pnum].wr;
maxstate.wr=maxstate.wr>h_particle[pnum].wr?maxstate.wr:h_particle[pnum].wr;
minstate.lf=minstate.lf<h_particle[pnum].lf?minstate.lf:h_particle[pnum].lf;
maxstate.lf=maxstate.lf>h_particle[pnum].lf?maxstate.lf:h_particle[pnum].lf;
minstate.lb=minstate.lb<h_particle[pnum].lb?minstate.lb:h_particle[pnum].lb;
maxstate.lb=maxstate.lb>h_particle[pnum].lb?maxstate.lb:h_particle[pnum].lb;
minstate.a=minstate.a<h_particle[pnum].a?minstate.a:h_particle[pnum].a;
maxstate.a=maxstate.a>h_particle[pnum].a?maxstate.a:h_particle[pnum].a;
minstate.v=minstate.v<h_particle[pnum].v?minstate.v:h_particle[pnum].v;
maxstate.v=maxstate.v>h_particle[pnum].v?maxstate.v:h_particle[pnum].v;
minstate.k=minstate.k<h_particle[pnum].k?minstate.k:h_particle[pnum].k;
maxstate.k=maxstate.k>h_particle[pnum].k?maxstate.k:h_particle[pnum].k;
minstate.omega=minstate.omega<h_particle[pnum].omega?minstate.omega:h_particle[pnum].omega;
maxstate.omega=maxstate.omega>h_particle[pnum].omega?maxstate.omega:h_particle[pnum].omega;
}
pnum++;
}
else
{
h_particle[pnum-1].weight+=weight;
}
average.weight+=weight;
average.x+=h_particle[pnum-1].x*weight;
average.y+=h_particle[pnum-1].y*weight;
average.theta+=h_particle[pnum-1].theta*weight;
average.wl+=h_particle[pnum-1].wl*weight;
average.wr+=h_particle[pnum-1].wr*weight;
average.lf+=h_particle[pnum-1].lf*weight;
average.lb+=h_particle[pnum-1].lb*weight;
average.a+=h_particle[pnum-1].a*weight;
average.v+=h_particle[pnum-1].v*weight;
average.k+=h_particle[pnum-1].k*weight;
average.omega+=h_particle[pnum-1].omega*weight;
average.count+=h_particle[pnum-1].count*weight;
break;
}
}
average.x/=average.weight;
average.y/=average.weight;
average.theta/=average.weight;
average.wl/=average.weight;
average.wr/=average.weight;
average.lf/=average.weight;
average.lb/=average.weight;
average.a/=average.weight;
average.v/=average.weight;
average.k/=average.weight;
average.omega/=average.weight;
average.count/=average.weight;
average.weight/=average.weight;
average.dx=std::max(average.x-minstate.x,maxstate.x-average.x);
average.dy=std::max(average.y-minstate.y,maxstate.y-average.y);
average.dtheta=std::max(average.theta-minstate.theta,maxstate.theta-average.theta);
average.dwl=std::max(average.wl-minstate.wl,maxstate.wl-average.wl);
average.dwr=std::max(average.wr-minstate.wr,maxstate.wr-average.wr);
average.dlf=std::max(average.lf-minstate.lf,maxstate.lf-average.lf);
average.dlb=std::max(average.lb-minstate.lb,maxstate.lb-average.lb);
average.da=std::max(average.a-minstate.a,maxstate.a-average.a);
average.dv=std::max(average.v-minstate.v,maxstate.v-average.v);
average.dk=std::max(average.k-minstate.k,maxstate.k-average.k);
average.domega=std::max(average.omega-minstate.omega,maxstate.omega-average.omega);
cudaMemcpy(d_particle,h_particle,sizeof(ObjectState)*pnum,cudaMemcpyHostToDevice);
return pnum;
}
#define CALRATIO(ratio, vratio, maxratio, maxrange, minrange) \
ratio=maxrange/minrange; vratio*=ratio; maxratio=ratio>maxratio?ratio:maxratio;
#define CALZOOM(zoom, maxrange, minrange, N) \
zoom=log(maxrange/minrange)/log(2)/N;zoom=1/pow(2,zoom);
void SSPF_MeasureModel(ObjectState * particles, int & pnum, ObjectState & average, ObjectStateOffset & objectstateoffset)
{
cudaMemcpy(d_particle,particles,sizeof(ObjectState)*pnum,cudaMemcpyHostToDevice);
double ratio=1,vratio=1,maxratio=1;
CALRATIO(ratio,vratio,maxratio,objectstateoffset.thetaoff,objectstateoffset.thetaprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.wloff,objectstateoffset.wlprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.wroff,objectstateoffset.wrprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.lfoff,objectstateoffset.lfprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.lboff,objectstateoffset.lbprec);
objectstateoffset.anneal=maxratio*maxratio;
double N=log(vratio)/log(2);
CALZOOM(objectstateoffset.thetazoom,objectstateoffset.thetaoff,objectstateoffset.thetaprec,N);
CALZOOM(objectstateoffset.wlzoom,objectstateoffset.wloff,objectstateoffset.wlprec,N);
CALZOOM(objectstateoffset.wrzoom,objectstateoffset.wroff,objectstateoffset.wrprec,N);
CALZOOM(objectstateoffset.lfzoom,objectstateoffset.lfoff,objectstateoffset.lfprec,N);
CALZOOM(objectstateoffset.lbzoom,objectstateoffset.lboff,objectstateoffset.lbprec,N);
objectstateoffset.annealratio=pow(objectstateoffset.anneal,-1/N);
StateConstrain stateconstrain;
stateconstrain.thetamin=particles[0].theta-objectstateoffset.thetaoff;
stateconstrain.thetamax=particles[0].theta+objectstateoffset.thetaoff;
int tmppnum;
for(int i=1;i<=N;i++)
{
tmppnum=pnum*SPN;
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelMeasureModel<<<blocknum,threadnum>>>(d_scan,d_particle,d_tmpparticle,tmppnum,d_rng,objectstateoffset,stateconstrain,h_egomotion);
objectstateoffset.thetaoff*=objectstateoffset.thetazoom;
objectstateoffset.wloff*=objectstateoffset.wlzoom;
objectstateoffset.wroff*=objectstateoffset.wrzoom;
objectstateoffset.lfoff*=objectstateoffset.lfzoom;
objectstateoffset.lboff*=objectstateoffset.lbzoom;
objectstateoffset.anneal*=objectstateoffset.annealratio;
pnum=sampleParticle(tmppnum,average,objectstateoffset,REJECTFLAG);
}
{
objectstateoffset.thetaoff=objectstateoffset.thetaprec;
objectstateoffset.wloff=objectstateoffset.wlprec;
objectstateoffset.wroff=objectstateoffset.wrprec;
objectstateoffset.lfoff=objectstateoffset.lfprec;
objectstateoffset.lboff=objectstateoffset.lbprec;
objectstateoffset.anneal=1;
tmppnum=pnum*SPN;
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelMeasureModel<<<blocknum,threadnum>>>(d_scan,d_particle,d_tmpparticle,tmppnum,d_rng,objectstateoffset,stateconstrain,h_egomotion);
pnum=sampleParticle(tmppnum,average,objectstateoffset,0);
}
{
cudaMemcpy(particles,d_particle,sizeof(ObjectState)*pnum,cudaMemcpyDeviceToHost);
deviceBuildModel(average,h_egomotion.density);
}
}
void SSPF_MotionModel(ObjectState * particles, int & pnum, ObjectState & average, ObjectStateOffset & objectstateoffset)
{
cudaMemcpy(d_particle,particles,sizeof(ObjectState)*pnum,cudaMemcpyHostToDevice);
StateConstrain stateconstrain;
int tmppnum;
if(h_egomotion.motionflag||SSPFFLAG==0)
{
objectstateoffset.anneal=1;
if(h_egomotion.motionflag)
{
tmppnum=MAXPN;
}
else
{
tmppnum=MAXPN;
}
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelMotionModel<<<blocknum,threadnum>>>(d_scan,d_particle,pnum,d_tmpparticle,tmppnum,d_rng,objectstateoffset,stateconstrain,h_egomotion);
kernelMotionUpdate<<<blocknum,threadnum>>>(d_tmpparticle,tmppnum,h_egomotion);
pnum=sampleParticle(tmppnum,average,objectstateoffset,0);
}
else
{
double ratio=1,vratio=1,maxratio=1;
CALRATIO(ratio,vratio,maxratio,objectstateoffset.aoff,objectstateoffset.aprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.voff,objectstateoffset.vprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.omegaoff,objectstateoffset.omegaprec);
objectstateoffset.anneal=maxratio*maxratio;
double N=log(vratio)/log(2);
CALZOOM(objectstateoffset.azoom,objectstateoffset.aoff,objectstateoffset.aprec,N);
CALZOOM(objectstateoffset.vzoom,objectstateoffset.voff,objectstateoffset.vprec,N);
CALZOOM(objectstateoffset.omegazoom,objectstateoffset.omegaoff,objectstateoffset.omegaprec,N);
objectstateoffset.annealratio=pow(objectstateoffset.anneal,-1/N);
// stateconstrain.amin=std::max(stateconstrain.amin,average.a-objectstateoffset.aoff);
// stateconstrain.amax=std::min(stateconstrain.amax,average.a+objectstateoffset.aoff);
// stateconstrain.vmin=std::max(stateconstrain.vmin,average.v-objectstateoffset.voff);
// stateconstrain.vmax=std::min(stateconstrain.vmax,average.v+objectstateoffset.voff);
// stateconstrain.kmin=std::max(stateconstrain.kmin,average.k-objectstateoffset.koff);
// stateconstrain.kmax=std::min(stateconstrain.kmax,average.k+objectstateoffset.koff);
// stateconstrain.omegamin=std::max(stateconstrain.omegamin,average.omega-objectstateoffset.omegaoff);
// stateconstrain.omegamax=std::min(stateconstrain.omegamax,average.omega+objectstateoffset.omegaoff);
int count=0;
for(int i=1;i<=N;i++)
{
tmppnum=pnum*SPN;
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelMotionModel<<<blocknum,threadnum>>>(d_scan,d_particle,pnum,d_tmpparticle,tmppnum,d_rng,objectstateoffset,stateconstrain,h_egomotion);
objectstateoffset.aoff*=objectstateoffset.azoom;
objectstateoffset.voff*=objectstateoffset.vzoom;
objectstateoffset.omegaoff*=objectstateoffset.omegazoom;
objectstateoffset.anneal*=objectstateoffset.annealratio;
pnum=sampleParticle(tmppnum,average,objectstateoffset,REJECTFLAG);
count+=pnum;
}
{
objectstateoffset.aoff=objectstateoffset.aprec;
objectstateoffset.voff=objectstateoffset.vprec;
objectstateoffset.omegaoff=objectstateoffset.omegaprec;
objectstateoffset.anneal=1;
tmppnum=pnum*SPN;
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelMotionModel<<<blocknum,threadnum>>>(d_scan,d_particle,pnum,d_tmpparticle,tmppnum,d_rng,objectstateoffset,stateconstrain,h_egomotion);
kernelMotionUpdate<<<blocknum,threadnum>>>(d_tmpparticle,tmppnum,h_egomotion);
pnum=sampleParticle(tmppnum,average,objectstateoffset,0);
count+=pnum;
}
std::cerr<<N<<"\n";
}
{
cudaMemcpy(particles,d_particle,sizeof(ObjectState)*pnum,cudaMemcpyDeviceToHost);
deviceBuildModel(average,h_egomotion.density);
}
}
//==============================================================================
extern "C" void startTracker()
{
stopTracker();
cudaMalloc(&(d_scan),sizeof(LaserScan));
cudaMalloc(&(d_particle),sizeof(ObjectState)*RQPN);
cudaMalloc(&(d_tmpparticle),sizeof(ObjectState)*MAXPN);
cudaMalloc(&(d_rng),sizeof(thrust::minstd_rand)*MAXPN);
thrust::generate(h_seed,h_seed+MAXPN,rand);
int * d_seed;
cudaMalloc(&(d_seed),sizeof(int)*MAXPN);
cudaMemcpy(d_seed,h_seed,sizeof(int)*MAXPN,cudaMemcpyHostToDevice);
GetKernelDim_1D(blocks,threads,MAXPN);
kernelSetRandomSeed<<<blocks,threads>>>(d_seed,d_rng,MAXPN);
CUDAFREE(d_seed);
}
extern "C" void stopTracker()
{
CUDAFREE(d_scan);
CUDAFREE(d_particle);
CUDAFREE(d_tmpparticle);
CUDAFREE(d_rng);
}
extern "C" void setLaserScan(LaserScan & scan)
{
cudaMemcpy(d_scan,&scan,sizeof(LaserScan),cudaMemcpyHostToDevice);
h_scan=scan;
if(h_egomotion.validflag)
{
double tmpdx=h_egomotion.x-scan.x;
double tmpdy=h_egomotion.y-scan.y;
double c=cos(scan.theta);
double s=sin(scan.theta);
h_egomotion.dx=c*tmpdx+s*tmpdy;
h_egomotion.dy=-s*tmpdx+c*tmpdy;
h_egomotion.dtheta=h_egomotion.theta-scan.theta;
h_egomotion.dt=scan.timestamp-h_egomotion.timestamp;
}
h_egomotion.x=scan.x;
h_egomotion.y=scan.y;
h_egomotion.theta=scan.theta;
h_egomotion.timestamp=scan.timestamp;
h_egomotion.validflag=1;
h_egomotion.density=2*PI/scan.beamnum;
// std::cerr<<h_egomotion.timestamp<<"\t"<<h_egomotion.x<<"\t"<<h_egomotion.y<<"\t"<<h_egomotion.theta<<"\n";
}
extern "C" void initTracker(ObjectState * particles, int & pnum, ObjectState & average, int & beamnum, int * beamid)
{
ObjectStateOffset objectstateoffset;
pnum=1;
// particles[0]=average;
particles[0].x=average.x;particles[0].y=average.y;particles[0].theta=average.theta;
particles[0].wl=average.wl;particles[0].wr=average.wr;particles[0].lf=average.lf;particles[0].lb=average.lb;
particles[0].a=average.a;particles[0].v=average.v;particles[0].k=average.k;particles[0].omega=average.v*average.k;
SSPF_MeasureModel(particles,pnum,average,objectstateoffset);
cudaDeviceSynchronize();
average.dwl=average.dwl>MINSIGMA?average.dwl:MINSIGMA;
average.dwr=average.dwr>MINSIGMA?average.dwr:MINSIGMA;
average.dlf=average.dlf>MINSIGMA?average.dlf:MINSIGMA;
average.dlb=average.dlb>MINSIGMA?average.dlb:MINSIGMA;
average.dwl=average.dwl<UNCERTAINTHRESH?average.dwl:MAXSIGMA;
average.dwr=average.dwr<UNCERTAINTHRESH?average.dwr:MAXSIGMA;
average.dlf=average.dlf<UNCERTAINTHRESH?average.dlf:MAXSIGMA;
average.dlb=average.dlb<UNCERTAINTHRESH?average.dlb:MAXSIGMA;
if(average.dwl<UNCERTAINTHRESH&&average.dwr<UNCERTAINTHRESH)
{
double s=sin(average.theta);
double c=cos(average.theta);
double offset=(average.wl-average.wr)/2;
average.x=average.x-s*offset;
average.y=average.y+c*offset;
average.wl=average.wr=(average.wl+average.wr)/2;
}
deviceBuildModel(average,h_egomotion.density);
beamnum=0;
deviceMeasureEdge(average,0,&h_scan,1,&beamnum,beamid,1);
deviceMeasureEdge(average,1,&h_scan,1,&beamnum,beamid,1);
}
extern "C" void initMotion(ObjectState * particles, int & pnum, ObjectState & average, int & beamnum, int * beamid)
{
ObjectState preaverage=average;
ObjectState curaverage;
ObjectStateOffset objectstateoffset;
objectstateoffset.thetaoff=objectstateoffset.thetaprec;
if(average.dwl<objectstateoffset.wlprec)
{
objectstateoffset.wloff=objectstateoffset.wlprec;
}
if(average.dwr<objectstateoffset.wrprec)
{
objectstateoffset.wroff=objectstateoffset.wrprec;
}
if(average.dlf<objectstateoffset.lfprec)
{
objectstateoffset.lfoff=objectstateoffset.lfprec;
}
if(average.dlb<objectstateoffset.lbprec)
{
objectstateoffset.lboff=objectstateoffset.lbprec;
}
h_egomotion.motionflag=0;
pnum=1;
// particles[0]=preaverage;
particles[0].x=preaverage.x;particles[0].y=preaverage.y;particles[0].theta=preaverage.theta;
particles[0].wl=preaverage.wl;particles[0].wr=preaverage.wr;particles[0].lf=preaverage.lf;particles[0].lb=preaverage.lb;
particles[0].a=preaverage.a;particles[0].v=preaverage.v;particles[0].k=preaverage.k;particles[0].omega=preaverage.v*preaverage.k;
SSPF_MotionModel(particles,pnum,curaverage,objectstateoffset);
double dx=curaverage.dx;
double dy=curaverage.dy;
double dtheta=curaverage.dtheta;
pnum=1;
// particles[0]=curaverage;
particles[0].x=curaverage.x;particles[0].y=curaverage.y;particles[0].theta=curaverage.theta;
particles[0].wl=curaverage.wl;particles[0].wr=curaverage.wr;particles[0].lf=curaverage.lf;particles[0].lb=curaverage.lb;
particles[0].a=curaverage.a;particles[0].v=curaverage.v;particles[0].k=curaverage.k;particles[0].omega=curaverage.v*curaverage.k;
SSPF_MeasureModel(particles,pnum,curaverage,objectstateoffset);
cudaDeviceSynchronize();
average=curaverage;
curaverage.dwl=curaverage.dwl>MINSIGMA?curaverage.dwl:MINSIGMA;
curaverage.dwr=curaverage.dwr>MINSIGMA?curaverage.dwr:MINSIGMA;
curaverage.dlf=curaverage.dlf>MINSIGMA?curaverage.dlf:MINSIGMA;
curaverage.dlb=curaverage.dlb>MINSIGMA?curaverage.dlb:MINSIGMA;
curaverage.dwl=curaverage.dwl<UNCERTAINTHRESH?curaverage.dwl:MAXSIGMA;
curaverage.dwr=curaverage.dwr<UNCERTAINTHRESH?curaverage.dwr:MAXSIGMA;
curaverage.dlf=curaverage.dlf<UNCERTAINTHRESH?curaverage.dlf:MAXSIGMA;
curaverage.dlb=curaverage.dlb<UNCERTAINTHRESH?curaverage.dlb:MAXSIGMA;
average.dx=dx;average.dy=dy;average.dtheta=dtheta;
average.wl=(preaverage.wl*curaverage.dwl*curaverage.dwl+curaverage.wl*preaverage.dwl*preaverage.dwl)/(preaverage.dwl*preaverage.dwl+curaverage.dwl*curaverage.dwl);
average.dwl=sqrt((preaverage.dwl*preaverage.dwl*curaverage.dwl*curaverage.dwl)/(preaverage.dwl*preaverage.dwl+curaverage.dwl*curaverage.dwl));
average.dwl=average.dwl>MINSIGMA?average.dwl:MINSIGMA;
average.wr=(preaverage.wr*curaverage.dwr*curaverage.dwr+curaverage.wr*preaverage.dwr*preaverage.dwr)/(preaverage.dwr*preaverage.dwr+curaverage.dwr*curaverage.dwr);
average.dwr=sqrt((preaverage.dwr*preaverage.dwr*curaverage.dwr*curaverage.dwr)/(preaverage.dwr*preaverage.dwr+curaverage.dwr*curaverage.dwr));
average.dwr=average.dwr>MINSIGMA?average.dwr:MINSIGMA;
average.lf=(preaverage.lf*curaverage.dlf*curaverage.dlf+curaverage.lf*preaverage.dlf*preaverage.dlf)/(preaverage.dlf*preaverage.dlf+curaverage.dlf*curaverage.dlf);
average.dlf=sqrt((preaverage.dlf*preaverage.dlf*curaverage.dlf*curaverage.dlf)/(preaverage.dlf*preaverage.dlf+curaverage.dlf*curaverage.dlf));
average.dlf=average.dlf>MINSIGMA?average.dlf:MINSIGMA;
average.lb=(preaverage.lb*curaverage.dlb*curaverage.dlb+curaverage.lb*preaverage.dlb*preaverage.dlb)/(preaverage.dlb*preaverage.dlb+curaverage.dlb*curaverage.dlb);
average.dlb=sqrt((preaverage.dlb*preaverage.dlb*curaverage.dlb*curaverage.dlb)/(preaverage.dlb*preaverage.dlb+curaverage.dlb*curaverage.dlb));
average.dlb=average.dlb>MINSIGMA?average.dlb:MINSIGMA;
deviceBuildModel(average,h_egomotion.density);
beamnum=0;
deviceMeasureEdge(average,0,&h_scan,1,&beamnum,beamid,1);
deviceMeasureEdge(average,1,&h_scan,1,&beamnum,beamid,1);
}
extern "C" void updateTracker(ObjectState * particles, int & pnum, ObjectState & average, bool & pfflag, int & beamnum, int * beamid)
{
ObjectState preaverage=average;
ObjectState curaverage;
ObjectStateOffset objectstateoffset;
objectstateoffset.thetaoff=objectstateoffset.thetaprec;
if(average.dwl<objectstateoffset.wlprec)
{
objectstateoffset.wloff=objectstateoffset.wlprec;
}
if(average.dwr<objectstateoffset.wrprec)
{
objectstateoffset.wroff=objectstateoffset.wrprec;
}
if(average.dlf<objectstateoffset.lfprec)
{
objectstateoffset.lfoff=objectstateoffset.lfprec;
}
if(average.dlb<objectstateoffset.lbprec)
{
objectstateoffset.lboff=objectstateoffset.lbprec;
}
if(preaverage.dx<=0.5&&preaverage.dy<=0.5&&preaverage.dtheta<=DEG2RAD(10)&&preaverage.count>=3)
{
h_egomotion.motionflag=0;
pnum=1;
//particles[0]=preaverage;
particles[0].x=preaverage.x;particles[0].y=preaverage.y;particles[0].theta=preaverage.theta;
particles[0].wl=preaverage.wl;particles[0].wr=preaverage.wr;particles[0].lf=preaverage.lf;particles[0].lb=preaverage.lb;
particles[0].a=preaverage.a;particles[0].v=preaverage.v;particles[0].k=preaverage.k;particles[0].omega=preaverage.v*preaverage.k;
}
else
{
h_egomotion.motionflag=1;
}
if(h_egomotion.motionflag)
{
objectstateoffset.aoff=DEG2RAD(10);
objectstateoffset.voff=3;
objectstateoffset.koff=0.05;
objectstateoffset.omegaoff=DEG2RAD(10);
}
else
{
objectstateoffset.aoff=DEG2RAD(30);
objectstateoffset.voff=10;
objectstateoffset.koff=0.15;
objectstateoffset.omegaoff=DEG2RAD(30);
}
pfflag=h_egomotion.motionflag;
// std::cerr<<"Before Motion\n";
// std::cerr<<preaverage.x<<"\t"<<preaverage.y<<"\t"<<preaverage.theta<<"\t"
// <<preaverage.wl<<"\t"<<preaverage.wr<<"\t"<<preaverage.lf<<"\t"<<preaverage.lb<<"\t"
// <<preaverage.a<<"\t"<<preaverage.v<<"\t"<<preaverage.k<<"\t"<<preaverage.v*preaverage.k<<"\t"
// <<preaverage.count<<"\n";
SSPF_MotionModel(particles,pnum,curaverage,objectstateoffset);
// std::cerr<<"After Motion\n";
// std::cerr<<curaverage.x<<"\t"<<curaverage.y<<"\t"<<curaverage.theta<<"\t"
// <<curaverage.wl<<"\t"<<curaverage.wr<<"\t"<<curaverage.lf<<"\t"<<curaverage.lb<<"\t"
// <<curaverage.a<<"\t"<<curaverage.v<<"\t"<<curaverage.k<<"\t"<<curaverage.v*curaverage.k<<"\t"
// <<curaverage.count<<"\n";
if(curaverage.count>=10||(curaverage.dx<=0.5&&curaverage.dy<=0.5&&curaverage.dtheta<=DEG2RAD(10)&&curaverage.count>=3))
{
double dx=curaverage.dx;
double dy=curaverage.dy;
double dtheta=curaverage.dtheta;
pnum=1;
// particles[0]=curaverage;
particles[0].x=curaverage.x;particles[0].y=curaverage.y;particles[0].theta=curaverage.theta;
particles[0].wl=curaverage.wl;particles[0].wr=curaverage.wr;particles[0].lf=curaverage.lf;particles[0].lb=curaverage.lb;
particles[0].a=curaverage.a;particles[0].v=curaverage.v;particles[0].k=curaverage.k;particles[0].omega=curaverage.v*curaverage.k;
SSPF_MeasureModel(particles,pnum,curaverage,objectstateoffset);
// std::cerr<<"After Geometry\n";
// std::cerr<<curaverage.x<<"\t"<<curaverage.y<<"\t"<<curaverage.theta<<"\t"
// <<curaverage.wl<<"\t"<<curaverage.wr<<"\t"<<curaverage.lf<<"\t"<<curaverage.lb<<"\t"
// <<curaverage.a<<"\t"<<curaverage.v<<"\t"<<curaverage.k<<"\t"<<curaverage.v*curaverage.k<<"\t"
// <<curaverage.count<<"\n";
cudaDeviceSynchronize();
average=curaverage;
curaverage.dwl=curaverage.dwl>MINSIGMA?curaverage.dwl:MINSIGMA;
curaverage.dwr=curaverage.dwr>MINSIGMA?curaverage.dwr:MINSIGMA;
curaverage.dlf=curaverage.dlf>MINSIGMA?curaverage.dlf:MINSIGMA;
curaverage.dlb=curaverage.dlb>MINSIGMA?curaverage.dlb:MINSIGMA;
curaverage.dwl=curaverage.dwl<UNCERTAINTHRESH?curaverage.dwl:MAXSIGMA;
curaverage.dwr=curaverage.dwr<UNCERTAINTHRESH?curaverage.dwr:MAXSIGMA;
curaverage.dlf=curaverage.dlf<UNCERTAINTHRESH?curaverage.dlf:MAXSIGMA;
curaverage.dlb=curaverage.dlb<UNCERTAINTHRESH?curaverage.dlb:MAXSIGMA;
average.dx=dx;average.dy=dy;average.dtheta=dtheta;
average.wl=(preaverage.wl*curaverage.dwl*curaverage.dwl+curaverage.wl*preaverage.dwl*preaverage.dwl)/(preaverage.dwl*preaverage.dwl+curaverage.dwl*curaverage.dwl);
average.dwl=sqrt((preaverage.dwl*preaverage.dwl*curaverage.dwl*curaverage.dwl)/(preaverage.dwl*preaverage.dwl+curaverage.dwl*curaverage.dwl));
average.dwl=average.dwl>MINSIGMA?average.dwl:MINSIGMA;
average.wr=(preaverage.wr*curaverage.dwr*curaverage.dwr+curaverage.wr*preaverage.dwr*preaverage.dwr)/(preaverage.dwr*preaverage.dwr+curaverage.dwr*curaverage.dwr);
average.dwr=sqrt((preaverage.dwr*preaverage.dwr*curaverage.dwr*curaverage.dwr)/(preaverage.dwr*preaverage.dwr+curaverage.dwr*curaverage.dwr));
average.dwr=average.dwr>MINSIGMA?average.dwr:MINSIGMA;
average.lf=(preaverage.lf*curaverage.dlf*curaverage.dlf+curaverage.lf*preaverage.dlf*preaverage.dlf)/(preaverage.dlf*preaverage.dlf+curaverage.dlf*curaverage.dlf);
average.dlf=sqrt((preaverage.dlf*preaverage.dlf*curaverage.dlf*curaverage.dlf)/(preaverage.dlf*preaverage.dlf+curaverage.dlf*curaverage.dlf));
average.dlf=average.dlf>MINSIGMA?average.dlf:MINSIGMA;
average.lb=(preaverage.lb*curaverage.dlb*curaverage.dlb+curaverage.lb*preaverage.dlb*preaverage.dlb)/(preaverage.dlb*preaverage.dlb+curaverage.dlb*curaverage.dlb);
average.dlb=sqrt((preaverage.dlb*preaverage.dlb*curaverage.dlb*curaverage.dlb)/(preaverage.dlb*preaverage.dlb+curaverage.dlb*curaverage.dlb));
average.dlb=average.dlb>MINSIGMA?average.dlb:MINSIGMA;
}
else
{
cudaDeviceSynchronize();
average=curaverage;
average.wl=preaverage.wl;average.dwl=preaverage.dwl;
average.wr=preaverage.wr;average.dwr=preaverage.dwr;
average.lf=preaverage.lf;average.dlf=preaverage.dlf;
average.lb=preaverage.lb;average.dlb=preaverage.dlb;
}
deviceBuildModel(average,h_egomotion.density);
beamnum=0;
deviceMeasureEdge(average,0,&h_scan,1,&beamnum,beamid,1);
deviceMeasureEdge(average,1,&h_scan,1,&beamnum,beamid,1);
}
|
20,965 | /*
* This program uses the host CURAND API to generate 100
* pseudorandom floats.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
#include <time.h>
#include <sys/time.h>
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[])
{
struct timeval start_h2d, end_h2d, start_d2h, end_d2h;
int n;
n = 256 * atof(argv[1]);//256 floats per 1 KB
size_t i;
curandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
CUDA_CALL(cudaMalloc((void **)&devData, n*sizeof(float)));
/* Create pseudo-random number generator */
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen,
1234ULL));
/* Generate n floats on device */
CURAND_CALL(curandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostData, devData, n * sizeof(float),
cudaMemcpyDeviceToHost));
/* Show result
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]);
}
printf("\n");
*/
float* rando = (float*)malloc(n*sizeof(float));
float* d_rand;
cudaMalloc((void **)&d_rand,sizeof(float)*n);
gettimeofday(&start_h2d, NULL);
cudaMemcpy(d_rand, rando,sizeof(float)*n,cudaMemcpyHostToDevice);
gettimeofday(&end_h2d, NULL);
gettimeofday(&start_d2h, NULL);
cudaMemcpy(rando, d_rand,sizeof(float)*n,cudaMemcpyDeviceToHost);
gettimeofday(&end_d2h, NULL);
printf("%ld, %ld\n", ((end_h2d.tv_sec * 1000000 + end_h2d.tv_usec) - (start_h2d.tv_sec * 1000000 + start_h2d.tv_usec)),((end_d2h.tv_sec * 1000000 + end_d2h.tv_usec) - (start_d2h.tv_sec * 1000000 + start_d2h.tv_usec)));
/* Cleanup */
CURAND_CALL(curandDestroyGenerator(gen));
CUDA_CALL(cudaFree(devData));
free(hostData);
return EXIT_SUCCESS;
} |
20,966 | #include "includes.h"
__global__ void kernel_sqrtweights_fl(int N, float *wt){
unsigned int tid = blockIdx.x*blockDim.x + threadIdx.x;
/* make sure to use only M threads */
if (tid<N) {
wt[tid]=sqrtf(wt[tid]);
}
} |
20,967 | /*
* MAC0431 - Introducao a Programacao Paralela e Distribuida
*
* Fisica Alternativa
*
* Bruno Endo - 7990982
* Danilo Aleixo - 7972370
* Gustavo Caparica - 7991020
*
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
__constant__ int WIDTH, HEIGHT;
int WIDTHH, HEIGHTH;
#define SIZE (WIDTH*HEIGHT)
#define SIZEH (WIDTHH*HEIGHTH)
#define BORDERW (WIDTH+2)
#define BORDERH (HEIGHT+2)
#define FAKE_SIZE (BORDERW*BORDERH)
#define FAKE_SIZEH ((WIDTHH+2)*(HEIGHTH+2))
#define MAXLINE 128
#define PI acosf(-1)
__device__ void get_components(int x, int y, float theta, float *V, float *outx, float *outy) {
*outx = V[y*BORDERW + x]*sinf(theta);
*outy = V[y*BORDERW + x]*cosf(theta);
}
__device__ float get_theta(int x, int y, float *V) {
return V[y*BORDERW + x]*2*PI;
}
__global__ void calc_contributions(float *R, float *G, float *B, float *Rx, float *Ry, float *Bx, float *By) {
int x = blockIdx.x + 1;
int y = blockIdx.y + 1;
float theta = get_theta(x, y, G);
float contribRx, contribRy;
get_components(x, y, theta, R, &contribRx, &contribRy);
float contribBx, contribBy;
get_components(x, y, theta, B, &contribBx, &contribBy);
contribBx *= -1;
contribBy *= -1;
int xx = 0;
int yy = 0;
if (contribRx > 0) {
xx = 1;
} else {
xx = -1;
}
if (contribRy > 0) {
yy = 1;
} else {
yy = -1;
}
float deltaRx = (1 - R[y*BORDERW + (x + xx)])*contribRx/4.0;
float deltaRy = (1 - R[(y + yy)*BORDERW + x])*contribRy/4.0;
float deltaBx = (1 - B[y*BORDERW + (x - xx)])*contribBx/4.0;
float deltaBy = (1 - B[(y - yy)*BORDERW + x])*contribBy/4.0;
if (xx != 0) {
atomicAdd(&Rx[y*BORDERW + (x + xx)], deltaRx);
atomicAdd(&Rx[y*BORDERW + x], -deltaRx);
atomicAdd(&Bx[y*BORDERW + (x - xx)], deltaBx);
atomicAdd(&Bx[y*BORDERW + x], -deltaBx);
}
if (yy != 0) {
atomicAdd(&Ry[(y + yy)*BORDERW + x], deltaRy);
atomicAdd(&Ry[y*BORDERW + x], -deltaRy);
atomicAdd(&By[(y - yy)*BORDERW + x], deltaBy);
atomicAdd(&By[y*BORDERW + x], -deltaBy);
}
}
__global__ void calc_components(float *R, float *G, float *B, float *Rx, float *Ry, float *Bx, float *By) {
int x = blockIdx.x + 1;
int y = blockIdx.y + 1;
get_components(x, y, get_theta(x, y, R), R, &Rx[y*BORDERW + x], &Ry[y*BORDERW + x]);
get_components(x, y, get_theta(x, y, B), B, &Bx[y*BORDERW + x], &By[y*BORDERW + x]);
Bx[y*BORDERW + x] *= -1;
By[y*BORDERW + x] *= -1;
}
__global__ void recalc_magnitudes(float *Rx, float *Ry, float *Bx, float *By, float *R, float *B) {
int x = blockIdx.x + 1;
int y = blockIdx.y + 1;
float rx = Rx[y*BORDERW + x];
float ry = Ry[y*BORDERW + x];
float r = sqrtf((rx*rx)+(ry*ry));
R[y*BORDERW + x] = r;
float bx = Bx[y*BORDERW + x];
float by = By[y*BORDERW + x];
float b = sqrtf((bx*bx)+(by*by));
B[y*BORDERW + x] = b;
Rx[y*BORDERW + x] = r;
Bx[y*BORDERW + x] = b;
}
__global__ void redist(float *nR, float *nB, float *R, float *B) {
int x = blockIdx.x + 1;
int y = blockIdx.y + 1;
if (R[y*BORDERW + x] > 1) {
float tmp = 1 - R[y*BORDERW + x];
nR[y*BORDERW + x] = 1;
atomicAdd(&nR[y*BORDERW + x + 1], tmp/4);
atomicAdd(&nR[y*BORDERW + x - 1], tmp/4);
atomicAdd(&nR[(y+1)*BORDERW + x], tmp/4);
atomicAdd(&nR[(y-1)*BORDERW + x], tmp/4);
}
if (B[y*BORDERW + x] > 1) {
float tmp = 1 - B[y*BORDERW + x];
nB[y*BORDERW + x] = 1;
atomicAdd(&nB[y*BORDERW + x + 1], tmp/4);
atomicAdd(&nB[y*BORDERW + x - 1], tmp/4);
atomicAdd(&nB[(y+1)*BORDERW + x], tmp/4);
atomicAdd(&nB[(y-1)*BORDERW + x], tmp/4);
}
}
__global__ void re_redist_w(float *R, float *B) {
int x = blockIdx.x + 1;
atomicAdd(&R[x + BORDERW], R[x]);
atomicAdd(&R[FAKE_SIZE - 1 - x - BORDERW], R[FAKE_SIZE - 1 - x]);
atomicAdd(&B[x + BORDERW], B[x]);
atomicAdd(&B[FAKE_SIZE - 1 - x - BORDERW], B[FAKE_SIZE - 1 - x]);
R[x] = 0;
R[FAKE_SIZE - 1 - x] = 0;
B[x] = 0;
B[FAKE_SIZE - 1 - x] = 0;
}
__global__ void re_redist_h(float *R, float *B) {
int x = blockIdx.x + 1;
atomicAdd(&R[x * BORDERW + 1], R[x * BORDERW]);
atomicAdd(&R[x * BORDERW + BORDERW - 2], R[x * BORDERW + BORDERW - 1]);
atomicAdd(&B[x * BORDERW + 1], B[x * BORDERW]);
atomicAdd(&B[x * BORDERW + BORDERW - 2], B[x * BORDERW + BORDERW - 1]);
R[x * BORDERW] = 0;
R[x * BORDERW + BORDERW - 1] = 0;
B[x * BORDERW] = 0;
B[x * BORDERW + BORDERW - 1] = 0;
}
__global__ void finalize(float *R, float *G, float *B) {
int x = blockIdx.x + 1;
int y = blockIdx.y + 1;
if (R[y*BORDERW + x] > 1) R[y*BORDERW + x] = 1;
if (B[y*BORDERW + x] > 1) B[y*BORDERW + x] = 1;
if (R[y*BORDERW + x] < 0) R[y*BORDERW + x] = 0;
if (B[y*BORDERW + x] < 0) B[y*BORDERW + x] = 0;
atomicAdd(&G[y*BORDERW + x], atan2f(B[y*BORDERW + x], R[y*BORDERW + x])/(2*PI));
if (G[y*BORDERW + x] > 1) G[y*BORDERW + x] = 0;
if (G[y*BORDERW + x] < 0) G[y*BORDERW + x] = 0;
}
// the wrapper around the kernel call for main program to call.
extern "C" void kernel_wrapper(float *R, float *G, float *B, float *Rx, float *Ry, float *Bx, float *By) {
dim3 image_size(WIDTHH, HEIGHTH);
calc_components<<<image_size, 1>>>(R, G, B, Rx, Ry, Bx, By);
assert(cudaGetLastError() == cudaSuccess);
calc_contributions<<<image_size, 1>>>(R, G, B, Rx, Ry, Bx, By);
assert(cudaGetLastError() == cudaSuccess);
recalc_magnitudes<<<image_size, 1>>>(Rx, Ry, Bx, By, R, B);
assert(cudaGetLastError() == cudaSuccess);
redist<<<image_size, 1>>>(R, B, Rx, Bx);
assert(cudaGetLastError() == cudaSuccess);
re_redist_w<<<WIDTHH, 1>>>(R, B);
assert(cudaGetLastError() == cudaSuccess);
re_redist_h<<<HEIGHTH, 1>>>(R, B);
assert(cudaGetLastError() == cudaSuccess);
finalize<<<image_size, 1>>>(R, G, B);
assert(cudaGetLastError() == cudaSuccess);
}
void writePPM(const char* file, float *R, float *G, float *B) {
FILE *fp = fopen(file, "w");
if (fp == NULL) {
fprintf(stderr, "Erro ao escrever %s\n", file);
return;
}
fprintf(fp, "P3\n%d %d\n255\n", WIDTHH, HEIGHTH);
for (int i = 1; i <= HEIGHTH; i++) {
for (int j = 1; j <= WIDTHH; j++) {
int idx = i*(WIDTHH+2) + j;
fprintf(fp, "%d %d %d ", (int)(R[idx]*255), (int)(G[idx]*255), (int)(B[idx]*255));
}
fprintf(fp, "\n");
}
fclose(fp);
}
int readPPM(const char* file, float **R, float **G, float **B) {
FILE *fp = fopen(file, "rb");
if (fp == NULL) {
fprintf(stderr, "Erro ao abrir %s\n", file);
return 0;
}
int w, h, k;
int red, green, blue;
int *gw, *gh;
char c;
char tmp[MAXLINE];
c = getc(fp);
if (c == 'P' || c == 'p') {
c = getc(fp);
}
if (c != '3') {
fprintf(stderr, "Erro: formato do PPM (P%c) nao suportado\n", c);
return 0;
}
c = getc(fp);
if (c == '\n' || c == '\r') {
c = getc(fp);
while(c == '#') {
fscanf(fp, "%[^\n\r] ", tmp);
c = getc(fp);
}
ungetc(c,fp);
}
fscanf(fp, "%d %d %d", &w, &h, &k);
WIDTHH = w;
HEIGHTH = h;
cudaGetSymbolAddress((void**)&gw, WIDTH);
assert(cudaGetLastError() == cudaSuccess);
cudaGetSymbolAddress((void**)&gh, HEIGHT);
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpy(gw, &w, sizeof(int), cudaMemcpyHostToDevice);
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpy(gh, &h, sizeof(int), cudaMemcpyHostToDevice);
assert(cudaGetLastError() == cudaSuccess);
*R = (float*)malloc(FAKE_SIZEH*sizeof(float));
*G = (float*)malloc(FAKE_SIZEH*sizeof(float));
*B = (float*)malloc(FAKE_SIZEH*sizeof(float));
for (int i = 1; i <= h; i++) {
for (int j = 1; j <= w; j++) {
fscanf(fp, "%d %d %d", &red, &green, &blue );
(*R)[i*(w+2) + j] = (float)red/(float)k;
(*G)[i*(w+2) + j] = (float)green/(float)k;
(*B)[i*(w+2) + j] = (float)blue/(float)k;
}
}
for (int i = 0; i < w + 2; i++) {
(*R)[i] = (*G)[i] = (*B)[i] = 0;
(*R)[(h+1)*(w+2) + i] = (*G)[(h+1)*(w+2) + i] = (*B)[(h+1)*(w+2) + i] = 0;
}
for (int i = 0; i < h + 2; i++) {
(*R)[i*(w+2)] = (*G)[i*(w+2)] = (*B)[i*(w+2)] = 0;
(*R)[i*(w+2) + w + 1] = (*G)[i*(w+2) + w + 1] = (*B)[i*(w+2) + w + 1] = 0;
}
return 1;
}
int main(int argc, char const *argv[]) {
if (argc < 4) {
printf("Uso:\n");
printf("%s entrada saida num_iters [num_procs]\n", argv[0]);
printf("Nota: num_procs é ignorado pois usamos CUDA\n");
return 0;
}
int iters = atoi(argv[3]);
srand(time(NULL));
float *R, *G, *B, *gR, *gG, *gB, *nR, *nG, *nB, *nN;
if (!readPPM(argv[1], &R, &G, &B)) {
fprintf(stderr, "Erro durante a leitura\n");
return 1;
}
cudaMalloc((void**)&gR, FAKE_SIZEH*sizeof(float));
assert(cudaGetLastError() == cudaSuccess);
cudaMalloc((void**)&gG, FAKE_SIZEH*sizeof(float));
assert(cudaGetLastError() == cudaSuccess);
cudaMalloc((void**)&gB, FAKE_SIZEH*sizeof(float));
assert(cudaGetLastError() == cudaSuccess);
cudaMalloc((void**)&nR, FAKE_SIZEH*sizeof(float));
assert(cudaGetLastError() == cudaSuccess);
cudaMalloc((void**)&nG, FAKE_SIZEH*sizeof(float));
assert(cudaGetLastError() == cudaSuccess);
cudaMalloc((void**)&nB, FAKE_SIZEH*sizeof(float));
assert(cudaGetLastError() == cudaSuccess);
cudaMalloc((void**)&nN, FAKE_SIZEH*sizeof(float));
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpy(gR, R, FAKE_SIZEH*sizeof(float), cudaMemcpyHostToDevice);
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpy(gG, G, FAKE_SIZEH*sizeof(float), cudaMemcpyHostToDevice);
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpy(gB, B, FAKE_SIZEH*sizeof(float), cudaMemcpyHostToDevice);
assert(cudaGetLastError() == cudaSuccess);
clock_t start, stop;
start = clock();
for(int i = 0; i < iters; i++) {
kernel_wrapper(gR, gG, gB, nR, nG, nB, nN);
}
cudaDeviceSynchronize();
stop = clock();
cudaMemcpy(R, gR, FAKE_SIZEH*sizeof(float), cudaMemcpyDeviceToHost);
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpy(G, gG, FAKE_SIZEH*sizeof(float), cudaMemcpyDeviceToHost);
assert(cudaGetLastError() == cudaSuccess);
cudaMemcpy(B, gB, FAKE_SIZEH*sizeof(float), cudaMemcpyDeviceToHost);
assert(cudaGetLastError() == cudaSuccess);
float tempo = (float)(stop - start) / CLOCKS_PER_SEC;
printf("Tempo total: %fs (tempo medio por iteracao: %fs)\n", tempo, tempo/iters);
printf("Nota: tempo nao inclui tempo de copia de/para a GPU\n");
writePPM(argv[2], R, G, B);
return 0;
}
|
20,968 | #ifdef __cplusplus
extern "C" {
#endif
__constant__ int sobx[3][3] = { {-1, 0, 1},
{-2, 0, 2},
{-1, 0, 1} };
__constant__ int soby[3][3] = { {-1,-2,-1},
{ 0, 0, 0},
{ 1, 2, 1} };
// Sobel kernel. Apply sobx and soby separately, then find the sqrt of their
// squares.
// data: image input data with each pixel taking up 1 byte (8Bit 1Channel)
// out: image output data (8B1C)
// theta: angle output data
__global__ void sobel_kernel( int *data,
int *out,
float *theta,
int rows,
int cols)
{// Some of the available convolution kernels
// collect sums separately. we're storing them into floats because that
// is what hypot and atan2 will expect.
const float PI = 3.14159265;
int l_row = threadIdx.y + 1;
int l_col = threadIdx.x + 1;
int g_row = threadIdx.y + (blockIdx.y * blockDim.y);;
int g_col = threadIdx.x + (blockIdx.x * blockDim.x);;
int pos = g_row * cols + g_col;
__shared__ int l_data[18][18];
// copy to local
l_data[l_row][l_col] = data[pos];
// top most row
if (l_row == 1)
{
l_data[0][l_col] = data[pos-cols];
// top left
if (l_col == 1)
l_data[0][0] = data[pos-cols-1];
// top right
else if (l_col == 16)
l_data[0][17] = data[pos-cols+1];
}
// bottom most row
else if (l_row == 16)
{
l_data[17][l_col] = data[pos+cols];
// bottom left
if (l_col == 1)
l_data[17][0] = data[pos+cols-1];
// bottom right
else if (l_col == 16)
l_data[17][17] = data[pos+cols+1];
}
// left
if (l_col == 1)
l_data[l_row][0] = data[pos-1];
// right
else if (l_col == 16)
l_data[l_row][17] = data[pos+1];
float sumx = 0, sumy = 0, angle = 0;
// find x and y derivatives
for (int i = 0; i < 3; i++)
{
for (int j = 0; j < 3; j++)
{
sumx += sobx[i][j] * l_data[i+l_row-1][j+l_col-1];
sumy += soby[i][j] * l_data[i+l_row-1][j+l_col-1];
}
}
// The output is now the square root of their squares, but they are
// constrained to 0 <= value <= 255. Note that hypot is a built in function
// defined as: hypot(x,y) = sqrt(x*x, y*y).
out[pos] = min(255,max(0, (int)hypot(sumx,sumy) ));
// Compute the direction angle theta in radians
// atan2 has a range of (-PI, PI) degrees
angle = atan2(sumy,sumx);
// If the angle is negative,
// shift the range to (0, 2PI) by adding 2PI to the angle,
// then perform modulo operation of 2PI
if (angle < 0)
{
angle = fmod((angle + 2*PI),(2*PI));
}
// Round the angle to one of four possibilities: 0, 45, 90, 135 degrees
// then store it in the theta buffer at the proper position
theta[pos] = ((int)(57.29577951 * (angle * (PI/8) + PI/8-0.0001) / 45) * 45) % 180;
}
#ifdef __cplusplus
}
#endif
|
20,969 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
const int Array = 1024 * 1024 * 8;
const int threadsPerBlock = 512;
__global__ void dot(int *d_a, int *d_b, int *d_c){
int tid = threadIdx.x;
int tidTemp = tid ;
while(tidTemp < Array){
d_c[tidTemp] = d_a[tidTemp] * d_b[tidTemp];
tidTemp += blockDim.x;
}
__syncthreads();
int i = Array / 2;
while(i != 0){
tid = threadIdx.x;
while(tid < i){
d_c[tid] += d_c[tid + i];
tid += blockDim.x;
}
__syncthreads();
i /= 2;
}
}
int main(){
int *a, *b, *c, *gold_c;
int *d_a, *d_b, *d_c;
int i;
int pass = 1;
a = (int*)malloc(Array*sizeof(int));
b = (int*)malloc(Array*sizeof(int));
c = (int*)malloc(Array*sizeof(int));
gold_c = (int*)malloc(Array*sizeof(int));
for(i = 0; i < Array; i++){
a[i] = rand()%100;
b[i] = rand()%100;
}
struct timespec t_start, t_end;
double elapsedTimeCPU;
clock_gettime(CLOCK_REALTIME, &t_start);
//dot
int sum = 0;
for(i = 0; i < Array; i++){
//gold_c[i] = a[i] * b[i];
sum += a[i] * b[i];
}
clock_gettime(CLOCK_REALTIME, &t_end);
elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0;
elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0;
printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU);
cudaMalloc((void**)&d_a, Array * sizeof(int));
cudaMalloc((void**)&d_b, Array * sizeof(int));
cudaMalloc((void**)&d_c, Array * sizeof(int));
cudaMemcpy(d_a, a, Array*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, Array*sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dot<<<1, threadsPerBlock>>>(d_a, d_b, d_c);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float cudaelapsedTime;
cudaEventElapsedTime(&cudaelapsedTime, start, stop);
printf("GPU elapsedTime: %lf ms\n", cudaelapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(c, d_c, Array*sizeof(int), cudaMemcpyDeviceToHost);
/*for(i = 0; i < Array; i++){
if(gold_c[i] != c[i]){
pass = 0;
break;
}
}*/
if(c[0] != sum){
pass =0;
}
if(pass==1)
printf("test pass!\n");
else
printf("error...\n");
return 0;
}
|
20,970 | #include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
#include <unistd.h>
#define BLOCKSIZEX 8
#define BLOCKSIZEY 8
#define BLOCKSIZEZ 8
void checkCUDAError (const char *msg);
void dprint (float *campo, int x, int y, int Lx, int Ly, int Lz);
void mprint (float *campo, int x, int y, int Lx, int Ly, int Lz);
void midprint (float *campo, int Lx, int Ly, int Lz);
void cria_arquivo(float *campo, int x, int y,int t, int Lx, int Ly, int Lz);
__global__ void WaveStepH (float *Hx, float *Hy, float *Hz, float *Ex, float *Ey, float *Ez, int Lx, int Ly, int Lz, float mx, float my, float *out)
{
int i = blockIdx.x * (blockDim.x) + threadIdx.x;
int j = blockIdx.y * (blockDim.y) + threadIdx.y;
int k = blockIdx.z * (blockDim.z) + threadIdx.z;
if( j< Ly && i<Lx && k<Lz)
{
Hx[4*i+j*Lx+k*Lx*Ly] = Hx[4*i+j*Lx+k*Lx*Ly] + mx * (Ey[4*i+j*Lx+(k+1)*Lx*Ly] - Ey[4*i+j*Lx+k*Lx*Ly] - Ez[4*i+(j+1)*Lx+k*Lx*Ly] + Ez[4*i+j*Lx+k*Lx*Ly] );
Hx[(4*i+1)+j*Lx+k*Lx*Ly] = Hx[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+1)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+1)+j*Lx+k*Lx*Ly] - Ez[(4*i+1)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+1)+j*Lx+k*Lx*Ly] );
Hx[(4*i+2)+j*Lx+k*Lx*Ly] = Hx[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+2)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+2)+j*Lx+k*Lx*Ly] - Ez[(4*i+2)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+2)+j*Lx+k*Lx*Ly] );
Hx[(4*i+3)+j*Lx+k*Lx*Ly] = Hx[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ey[(4*i+3)+j*Lx+(k+1)*Lx*Ly] - Ey[(4*i+3)+j*Lx+k*Lx*Ly] - Ez[(4*i+3)+(j+1)*Lx+k*Lx*Ly] + Ez[(4*i+3)+j*Lx+k*Lx*Ly] );
Hy[4*i+j*Lx+k*Lx*Ly] = Hy[4*i+j*Lx+k*Lx*Ly] + mx * (Ez[4*i+1+j*Lx+k*Lx*Ly] - Ez[4*i+j*Lx+k*Lx*Ly] - Ex[4*i+j*Lx+(k+1)*Lx*Ly] + Ex[4*i+j*Lx+k*Lx*Ly] );
Hy[(4*i+1)+j*Lx+k*Lx*Ly] = Hy[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+2)+j*Lx+k*Lx*Ly] - Ez[(4*i+1)+j*Lx+k*Lx*Ly] - Ex[(4*i+1)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+1)+j*Lx+k*Lx*Ly] );
Hy[(4*i+2)+j*Lx+k*Lx*Ly] = Hy[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+3)+j*Lx+k*Lx*Ly] - Ez[(4*i+2)+j*Lx+k*Lx*Ly] - Ex[(4*i+2)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+2)+j*Lx+k*Lx*Ly] );
Hy[(4*i+3)+j*Lx+k*Lx*Ly] = Hy[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ez[(4*i+4)+j*Lx+k*Lx*Ly] - Ez[(4*i+3)+j*Lx+k*Lx*Ly] - Ex[(4*i+3)+j*Lx+(k+1)*Lx*Ly] + Ex[(4*i+3)+j*Lx+k*Lx*Ly] );
Hz[4*i+j*Lx+k*Lx*Ly] = Hz[4*i+j*Lx+k*Lx*Ly] + mx * (Ex[4*i+(j+1)*Lx+k*Lx*Ly] - Ex[4*i+j*Lx+k*Lx*Ly] - Ey[4*i+1+j*Lx+k*Lx*Ly] + Ey[4*i+j*Lx+k*Lx*Ly] );
Hz[(4*i+1)+j*Lx+k*Lx*Ly] = Hz[(4*i+1)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+1)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+1)+j*Lx+k*Lx*Ly] - Ey[(4*i+2)+j*Lx+k*Lx*Ly] + Ey[(4*i+1)+j*Lx+k*Lx*Ly] );
Hz[(4*i+2)+j*Lx+k*Lx*Ly] = Hz[(4*i+2)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+2)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+2)+j*Lx+k*Lx*Ly] - Ey[(4*i+3)+j*Lx+k*Lx*Ly] + Ey[(4*i+2)+j*Lx+k*Lx*Ly] );
Hz[(4*i+3)+j*Lx+k*Lx*Ly] = Hz[(4*i+3)+j*Lx+k*Lx*Ly] + mx * (Ex[(4*i+3)+(j+1)*Lx+k*Lx*Ly] - Ex[(4*i+3)+j*Lx+k*Lx*Ly] - Ey[(4*i+4)+j*Lx+k*Lx*Ly] + Ey[(4*i+3)+j*Lx+k*Lx*Ly] );
}
}
__global__ void WaveStepE (float *Hx, float *Hy, float *Hz, float *Ex, float *Ey, float *Ez, int Lx, int Ly,int Lz, float field, float *ez, float *out)
{
int i = blockIdx.x * (blockDim.x) + threadIdx.x;
int j = blockIdx.y * (blockDim.y) + threadIdx.y;
int k = blockIdx.z * (blockDim.z) + threadIdx.z;
// if( j>1 && i>1 && k>1 && j<Ly-1 && i<Lx-1 && k<Lz-1)
if( j>0 && i>0 && k>0 )
{
Ex[4*i+j*Lx+k*Lx*Ly] = Ex[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hz[4*i+j*Lx+k*Lx*Ly] - Hz[4*i+(j-1)*Lx+k*Lx*Ly] - Hy[4*i+j*Lx+k*Lx*Ly] + Hy[4*i+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+1)+j*Lx+k*Lx*Ly] = Ex[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hz[(4*i+1)+j*Lx+k*Lx*Ly] - Hz[(4*i+1)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+1)+j*Lx+k*Lx*Ly] + Hy[(4*i+1)+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+2)+j*Lx+k*Lx*Ly] = Ex[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hz[(4*i+2)+j*Lx+k*Lx*Ly] - Hz[(4*i+2)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+2)+j*Lx+k*Lx*Ly] + Hy[(4*i+2)+j*Lx+(k-1)*Lx*Ly] );
Ex[(4*i+3)+j*Lx+k*Lx*Ly] = Ex[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hz[(4*i+3)+j*Lx+k*Lx*Ly] - Hz[(4*i+3)+(j-1)*Lx+k*Lx*Ly] - Hy[(4*i+3)+j*Lx+k*Lx*Ly] + Hy[(4*i+3)+j*Lx+(k-1)*Lx*Ly] );
Ey[4*i+j*Lx+k*Lx*Ly] = Ey[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hx[4*i+j*Lx+k*Lx*Ly] - Hx[4*i+j*Lx+(k-1)*Lx*Ly] - Hz[4*i+j*Lx+k*Lx*Ly] + Hz[4*i-1+j*Lx+k*Lx*Ly] );
Ey[(4*i+1)+j*Lx+k*Lx*Ly] = Ey[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hx[(4*i+1)+j*Lx+k*Lx*Ly] - Hx[(4*i+1)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+1)+j*Lx+k*Lx*Ly] + Hz[(4*i)+j*Lx+k*Lx*Ly] );
Ey[(4*i+2)+j*Lx+k*Lx*Ly] = Ey[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hx[(4*i+2)+j*Lx+k*Lx*Ly] - Hx[(4*i+2)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+2)+j*Lx+k*Lx*Ly] + Hz[(4*i+1)+j*Lx+k*Lx*Ly] );
Ey[(4*i+3)+j*Lx+k*Lx*Ly] = Ey[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hx[(4*i+3)+j*Lx+k*Lx*Ly] - Hx[(4*i+3)+j*Lx+(k-1)*Lx*Ly] - Hz[(4*i+3)+j*Lx+k*Lx*Ly] + Hz[(4*i+2)+j*Lx+k*Lx*Ly] );
Ez[4*i+j*Lx+k*Lx*Ly] = Ez[4*i+j*Lx+k*Lx*Ly] + ez[4*i+j*Lx+k*Lx*Ly] * (Hy[4*i+j*Lx+k*Lx*Ly] - Hy[4*i-1+j*Lx+k*Lx*Ly] - Hx[4*i+j*Lx+k*Lx*Ly] + Hx[4*i+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+1)+j*Lx+k*Lx*Ly] = Ez[(4*i+1)+j*Lx+k*Lx*Ly] + ez[(4*i+1)+j*Lx+k*Lx*Ly] * (Hy[(4*i+1)+j*Lx+k*Lx*Ly] - Hy[(4*i)+j*Lx+k*Lx*Ly] - Hx[(4*i+1)+j*Lx+k*Lx*Ly] + Hx[(4*i+1)+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+2)+j*Lx+k*Lx*Ly] = Ez[(4*i+2)+j*Lx+k*Lx*Ly] + ez[(4*i+2)+j*Lx+k*Lx*Ly] * (Hy[(4*i+2)+j*Lx+k*Lx*Ly] - Hy[(4*i+1)+j*Lx+k*Lx*Ly] - Hx[(4*i+2)+j*Lx+k*Lx*Ly] + Hx[(4*i+2)+(j-1)*Lx+k*Lx*Ly] );
Ez[(4*i+3)+j*Lx+k*Lx*Ly] = Ez[(4*i+3)+j*Lx+k*Lx*Ly] + ez[(4*i+3)+j*Lx+k*Lx*Ly] * (Hy[(4*i+3)+j*Lx+k*Lx*Ly] - Hy[(4*i+2)+j*Lx+k*Lx*Ly] - Hx[(4*i+3)+j*Lx+k*Lx*Ly] + Hx[(4*i+3)+(j-1)*Lx+k*Lx*Ly] );
Ez[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly]=field; ////// fonte
out[4*i+j*Lx+k*Lx*Ly]=Ez[4*i+j*Lx+k*Lx*Ly];
out[(4*i+1)+j*Lx+k*Lx*Ly]=Ez[(4*i+1)+j*Lx+k*Lx*Ly];
out[(4*i+2)+j*Lx+k*Lx*Ly]=Ez[(4*i+2)+j*Lx+k*Lx*Ly];
out[(4*i+3)+j*Lx+k*Lx*Ly]=Ez[(4*i+3)+j*Lx+k*Lx*Ly];
}
}
int main (int argc, char **argv)
{
int i,j,k;
/////////// Set Domain Sizes ////////////
int Lx=32; //computational x size set by user
int Ly=32; //computational y size set by user
int Lz=32;
dim3 dimBlock (BLOCKSIZEX, BLOCKSIZEY,BLOCKSIZEZ); //dimensions of threads block
dim3 dimGrid (( Lx / (4*dimBlock.x) + ( Lx % (4*dimBlock.x) == 0?0:1)), ( Ly / (dimBlock.y) + ( Ly % (dimBlock.y) == 0?0:1)), ( Lz / (dimBlock.z) + ( Lz % (dimBlock.z) == 0?0:1))); //grid size that fits the user domain
Lx=4*dimBlock.x*dimGrid.x; //computational x size
Ly=dimBlock.y*dimGrid.y; //computational y size
Lz=dimBlock.z*dimGrid.z;
int D=Lx*Ly*Lz; //total computational domais.
int Dsize=D*sizeof(float);
//////////////////////////////////////////
// printf("%d %d\n",Lx,Ly);
///////////////////////////Physical Quantities/////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
float pi=4.0*atan(1.0);
float muo=4.0*pi*1.0e-7; // Permeability of free space
float epso=8.854e-12; // Permittivity of free space
float co=1.0/sqrt(muo*epso); // Speed of light in free space
//aimp=sqrt(muo/epso); // Wave impedance in free space
float dx=0.0001; // FDTD cell size
float dt=dx/co/sqrt(3.0); // Time step size
////////// eletrical permittivity ////////
float *ez_h;
ez_h = (float *)malloc(Dsize);
float *ez;
cudaMalloc ((void **) &ez, Dsize);
////////////////////////////////////////////
for(i=0;i<Lx;i++)
{
for(j=0;j<Ly;j++)
{
for(k=0;k<Lz;k++)
{
ez_h[i+Lx*j+Lx*Ly*k]=dt/(epso*dx);
}
}
}
float mx = dt/muo/dx;
float my = mx;
///////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
////////// Aloccate Ex field arrays ////////
float *Ex_h;
Ex_h = (float *)malloc(Dsize);
float *Ex;
cudaMalloc ((void **) &Ex, Dsize);
////////// Aloccate Ey field arrays ////////
float *Ey_h;
Ey_h = (float *)malloc(Dsize);
float *Ey;
cudaMalloc ((void **) &Ey, Dsize);
////////// Aloccate Ez field arrays ////////
float *Ez_h;
Ez_h = (float *)malloc(Dsize);
float *Ez;
cudaMalloc ((void **) &Ez, Dsize);
////////// Aloccate Hxfield arrays ////////
float *Hx_h;
Hx_h = (float *)malloc(Dsize);
float *Hx;
cudaMalloc ((void **) &Hx, Dsize);
////////// Aloccate Hy field arrays ////////
float *Hy_h;
Hy_h = (float *)malloc(Dsize);
float *Hy;
cudaMalloc ((void **) &Hy, Dsize);
////////// Aloccate Hz field arrays ////////
float *Hz_h;
Hz_h = (float *)malloc(Dsize);
float *Hz;
cudaMalloc ((void **) &Hz, Dsize);
////////// Debbuger out array ////////
float *out_h;
out_h = (float *)malloc(Dsize);
float *out;
cudaMalloc ((void **) &out, Dsize);
////////////////////////////////////////////
/////////// Null Field initial condition //////
for(i=0;i<Lx+1;i++)
{
for(j=0;j<Ly+1;j++)
{
for(k=0;k<Lz+1;k++)
{
Ex_h[i+Lx*j+Lx*Ly*k]=0.f;
Ey_h[i+Lx*j+Lx*Ly*k]=0.f;
Ez_h[i+Lx*j+Lx*Ly*k]=0.f;
Hx_h[i+Lx*j+Lx*Ly*k]=0.f;
Hy_h[i+Lx*j+Lx*Ly*k]=0.f;
Hz_h[i+Lx*j+Lx*Ly*k]=0.f;
}
}
}
///////////////////////////////////////////////////////////
/////////////// Coping data to Device /////////////////////
cudaMemcpy (Ex, Ex_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Ey, Ey_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Ez, Ez_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Hx, Hx_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Hy, Hy_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (Hz, Hz_h, Dsize, cudaMemcpyHostToDevice);
cudaMemcpy (ez, ez_h, Dsize, cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////
////////////////////Time iteration ////////////////////////
int T=4000;
int b = 25.0;
float dum,voltage,field;
for (int t = 0; t < T; t = t + 1) //iterando no tempo
{
dum = (4.0/b/dt)*(t*dt-b*dt);
voltage = 2.0*dum*exp(-(pow(dum,2.f)));
// if(t<50)
// {
field = voltage/dx;
// }
// else
// {
// field=Ez_h[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly];
//
// }
WaveStepH <<< dimGrid, dimBlock >>> (Hx, Hy, Hz, Ex, Ey, Ez, Lx, Ly,Lz, mx, my,out);
WaveStepE <<< dimGrid, dimBlock >>> (Hx, Hy, Hz, Ex, Ey, Ez, Lx, Ly,Lz, field, ez,out);
checkCUDAError ("kernel invocation");
cudaMemcpy (out_h, out, Dsize, cudaMemcpyDeviceToHost);
// cudaMemcpy (Ez_h, Ez, Dsize, cudaMemcpyDeviceToHost);
// cudaMemcpy (Hy_h, Hy, Dsize, cudaMemcpyDeviceToHost);
checkCUDAError ("getting data from device");
// dprint (Ez_h, Dx, Dy);
// if (t%10==0)
// {
// cria_arquivo(out_h, Lx , Ly , t, Lx , Ly , Lz );
// }
dprint ( out_h, Lx , Ly , Lx , Ly , Lz );
}
// mprint ( Ez_h, Lx , Ly , Lx , Ly , Lz );
}
///////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////
void checkCUDAError (const char *msg)
{
cudaError_t err = cudaGetLastError ();
if (cudaSuccess != err)
{
fprintf (stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString (err));
exit (EXIT_FAILURE);
}
}
///////////////////////////////////////////////////////////
void dprint (float *campo, int x, int y, int Lx, int Ly, int Lz)
{
for(int j = 0; j < y; j++)
{
for(int i = 0; i < x; i++)
{
// if (campo[i+j*Lx+k*Lx*Ly]!=0.f)
{
printf("%d %d %f\n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
// printf("i=%d j=%d campo=%f\n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
}
}
}
///////////////////////////////////////////////////////////////////
void mprint (float *campo, int x, int y, int Lx, int Ly, int Lz )
{
for(int j = 0;j < y; j++)
{
for(int i = 0; i < x; i++)
{
printf("%g ", campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
printf("\n");
}
printf("\n");
}
//////////////////////////////////////////////////////////////////////
void midprint (float *campo, int Lx, int Ly, int Lz)
{
printf("%f ", campo[Lx*(Ly/2)+Lx/2+(Lz/2)*Lx*Ly]);
printf("\n");
}
/////////////////////////////////////////////////////
void cria_arquivo(float *campo, int x, int y, int t, int Lx, int Ly, int Lz)
{
FILE *onda;
//remove("DATOS_ONDA");
onda = fopen ("DATOS_ONDA", "a");
fprintf(onda, "valor de t=%d ******************************************************************** \n",t);
for(int j = 0; j < y; j++)
{
for(int i = 0; i < x; i++)
{
fprintf(onda, " i=%d j=%d campo=%f \n",i,j,campo[i+j*Lx+(Lz/2)*Lx*Ly]);
}
}
fclose(onda);
}
|
20,971 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
__device__ void mystrcpy(char dest[], const char source[])
{
int i = 0;
while ((dest[i] = source[i]) != '\0')
{
i++;
}
}
__device__ int mystrcmp(char string1[], char string2[] )
{
for (int i = 0; ; i++)
{
if (string1[i] != string2[i])
{
return string1[i] < string2[i] ? -1 : 1;
}
if (string1[i] == '\0')
{
return 0;
}
}
}
__device__ size_t mystrlen(const char *str)
{
const char *s;
for (s = str; *s; ++s);
return (s - str);
}
__device__ int dResult = 0;
__device__ char dPass[30] = "";
__device__ void permuteKRec(char *alphabet, char *prefix, int n, int k, char *pwd)
{
if(dResult == 1) return;
if (k == 0)
{
//printf("%s\n", prefix);
if(mystrcmp(prefix, pwd) == 0){
dResult = 1;
mystrcpy(dPass, prefix);
}
return;
}
for (int i = 0; i < n; ++i)
{
size_t len = mystrlen(prefix);
char newPrefix[100]; // = malloc(len + 1 + 1 );
mystrcpy(newPrefix, prefix);
newPrefix[len] = alphabet[i];
newPrefix[len + 1] = '\0';
permuteKRec(alphabet, newPrefix, n, k - 1, pwd);
free( newPrefix );
}
}
__global__ void permuteK(char *dAlphabet, char *dPermut, int k, char *pwd){
if(dResult == 1) return;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int pos = i*3;
char prefix[4];
prefix[0] = dPermut[pos];
prefix[1] = dPermut[pos+1];
prefix[2] = dPermut[pos+2];
prefix[3] = '\0';
permuteKRec(dAlphabet, prefix, 26, k-3, pwd);
//if(result == 1) dResult = 1;
}
void permute3(char *permutation)
{
char alphabet[] = "abcdefghijklmnopqrstuvwxyz";
int n = 26;
for (int i = 0; i < n; ++i){
for (int j = 0; j < n; ++j){
for (int k = 0; k < n; ++k){
char newPrefix[4];
newPrefix[0] = alphabet[i];
newPrefix[1] = alphabet[j];
newPrefix[2] = alphabet[k];
newPrefix[3] = '\0';
strcat(permutation, newPrefix);
}
}
}
}
int permute1and2(char *pwd, char *guessed, int k){
char alphabet[] = "abcdefghijklmnopqrstuvwxyz";
int n = 26;
char word[3];
for (int i = 0; i < n; ++i){
word[0] = alphabet[i];
if(k == 1){
word[1] = '\0';
if(strcmp(word, pwd) == 0){
strcpy(guessed, word);
return 1;
}
}
else{
for (int j = 0; j < n; ++j){
word[1] = alphabet[j];
word[2] = '\0';
if(strcmp(word, pwd) == 0){
strcpy(guessed, word);
return 1;
}
}
}
}
return 0;
}
void CheckCudaError(char sms[], int line) {
cudaError_t error;
error = cudaGetLastError();
if (error) {
printf("(ERROR) %s - %s in %s at line %d\n", sms, cudaGetErrorString(error), __FILE__, line);
exit(EXIT_FAILURE);
}
}
int main(int argc, char** argv){
//Buscar la palabra dentro de un diccionario, words.txt
printf("empezamos\n");
char passwordGuessed[15];
/*FILE * fp;
char * line = NULL;
size_t len = 0;
ssize_t read;
fp = fopen("words.txt", "r");
if (fp == NULL)
exit(EXIT_FAILURE);
int result = 0;
int cont = 0;
while ((read = getline(&line, &len, fp)) != -1 && result == 0) {
line[read-1] = '\0';
++cont;
if(strcmp(line, argv[1]) == 0){
result = 1;
strcpy(passwordGuessed, argv[1]);
}
}
fclose(fp);
if (line)
free(line);*/
int result = 0;
char alphabet[] = "abcdefghijklmnopqrstuvwxyz";
char hPermut[26*26*26*4] = "";
//Fuerza bruta
int k = 1;
unsigned long int numBytes;
char *dPermut;
char *dAlphabet;
char *dPwd;
int nThreads = 1024;
unsigned long int nBlocks;
cudaEvent_t E0, E1;
float TiempoTotal;
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
while(result == 0 && k < 15){
printf("Probando con palabras de longitud %d\n" , k);
if(k < 3)
result = permute1and2(argv[1], passwordGuessed, k);
if(k == 3){
permute3(hPermut);
numBytes = 26*26*26*4*sizeof(char);
cudaMalloc((char**)&dPermut, numBytes);
cudaMalloc((char**)&dAlphabet, 26*sizeof(char));
cudaMalloc((char**)&dPwd, 26*sizeof(char));
//cudaMalloc((int*)&dResult, sizeof(int));
CheckCudaError((char *) "Obtener Memoria en el device", __LINE__);
// Copiar datos desde el host en el device
cudaMemcpy(dPermut, hPermut, numBytes, cudaMemcpyHostToDevice);
cudaMemcpy(dAlphabet, alphabet, 26*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dPwd, argv[1], 26*sizeof(char), cudaMemcpyHostToDevice);
//cudaMemcpy(dResult, 0, sizeof(int), cudaMemcpyHostToDevice);
CheckCudaError((char *) "Copiar Datos Host --> Device", __LINE__);
}
if(k >= 3){
unsigned long int N = pow(26, 3);
nBlocks = (N + nThreads - 1)/nThreads;
dim3 dimGrid(nBlocks, 1, 1);
dim3 dimBlock(nThreads, 1, 1);
permuteK<<<dimGrid, dimBlock>>>(dAlphabet, dPermut, k, dPwd);
printf("permutao\n");
CheckCudaError((char *) "Invocar Kernel", __LINE__);
cudaMemcpyFromSymbol(&result, dResult, sizeof(int), 0, cudaMemcpyDeviceToHost);
if(result == 1){
cudaMemcpyFromSymbol(&passwordGuessed, dPass, k*sizeof(char), 0, cudaMemcpyDeviceToHost);
}
}
++k;
}
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
cudaEventElapsedTime(&TiempoTotal, E0, E1);
cudaEventDestroy(E0); cudaEventDestroy(E1);
if(k >= 3){
// Liberar Memoria del device
cudaFree(dPermut); cudaFree(dAlphabet); cudaFree(dPwd);
cudaDeviceSynchronize();
}
if(result == 1){
printf("Tiempo paralelo para encontrar la contraseña %s: %4.6f segundos\n", passwordGuessed, TiempoTotal/1000.0f);
}
else
printf("Password not found...\n");
}
|
20,972 | #include "cuda.h"
#include "stdio.h"
#define threads_per_block 512
void printi(int i){
printf("%d\n", i);
}
void init_CPU_array(int* array, int n){
for(int i = 0; i < n; i++) {
array[i] = 1;
}
}
void print_CPU_array(int array[], int n){
for(int i = 0; i < n; i++) {
printi(array[i]);
}
}
// realiza la suma de determinantes
__global__ void sumador(int* arreglo, int* result, float N)
{
__shared__ int compartida[threads_per_block];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid > N)
{
return;
}
compartida[threadIdx.x] = arreglo[tid];
__syncthreads();
for(int i=1; pow((float)2,(float)i-1) < threads_per_block; i++)
{
int acceso = pow((float)2,(float)i);
int offset = pow((float)2, (float)i-1);
if(threadIdx.x < ((float)threads_per_block/acceso) && (threadIdx.x * acceso + offset) < (N - blockIdx.x * blockDim.x))
{
compartida[threadIdx.x * acceso] = compartida[threadIdx.x * acceso] + compartida[threadIdx.x * acceso + offset];
// compartida[threadIdx.x * acceso + offset] = 0;
}
__syncthreads();
}
//el primer thread de cada grupo guarda el resultado
if(threadIdx.x == 0)
result[blockIdx.x] = compartida[0];
}
int* arreglo_suma1;
int* d_arreglo_suma1;
int* arreglo_result;
int* d_arreglo_suma2;
int main(int argc, char** argv){
int N = 1024000;
//##################################################################################
//############################## INICIALIZACION ####################################
arreglo_suma1 = (int*) malloc(N * sizeof(int));
cudaMalloc(&d_arreglo_suma1, N * sizeof(int));
arreglo_result = (int*) malloc(N * sizeof(int));
cudaMalloc(&d_arreglo_suma2, N * sizeof(int));
init_CPU_array(arreglo_suma1, N);
cudaMemcpy(d_arreglo_suma1, arreglo_suma1, N * sizeof(int), cudaMemcpyHostToDevice);
// int threads_per_block = 10;
// int block_count = ceil((float)N / threads_per_block);
//##################################################################################
//################################ EJECUCIONES #####################################
dim3 miBloque1D_1(threads_per_block,1);
for(int i=0; pow(threads_per_block, i) < N ; i++)
{
int remaining_elements = ceil((float)N/pow(threads_per_block, i));
int block_count = ceil((float)N/pow(threads_per_block, i+1));
dim3 miGrid1D_1(block_count,1);
sumador<<<miGrid1D_1, miBloque1D_1>>>(d_arreglo_suma1, d_arreglo_suma2, remaining_elements);
cudaThreadSynchronize();
int* tmp = d_arreglo_suma1;
d_arreglo_suma1 = d_arreglo_suma2;
d_arreglo_suma2 = tmp;
printf("elementos restantes: %d \n", remaining_elements);
printf("bloques usados: %d \n\n", block_count);
}
//##################################################################################
//################################### READ BACK #####################################
cudaMemcpy(arreglo_result, d_arreglo_suma1, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("%s\n", "RESULTADO DE LA SUMA:");
print_CPU_array(arreglo_result, 1);
free(arreglo_suma1);
cudaFree (d_arreglo_suma1);
free(arreglo_result);
cudaFree (d_arreglo_suma2);
} |
20,973 | #include "includes.h"
using namespace std;
__device__ int getGlobalIdx_2D_2D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
return threadId;
}
__global__ void matrixSquareElementWiseKernel(float* in, float* out, int n, int m){
extern __shared__ float Rs[];
int index = getGlobalIdx_2D_2D();
if (index < n*m){
out[index] = in[index] * in[index];
}
} |
20,974 | #include <cuda_runtime.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 2048
#define THREADS_PER_BLOCK 256
//Kernel
__global__ void marks(float * media, int * final){
int thread = blockIdx.x*blockDim.x + threadIdx.x;
final[thread] = (media[thread] == (int)media[thread]) * (int)media[thread] +
(media[thread] != (int)media[thread] && media[thread] > 4 && media[thread] < 5)* 4 +
(media[thread] != (int)media[thread] && media[thread] > 9)* 9 +
(media[thread] != (int)media[thread] && (media[thread] < 4 || (media[thread] > 5 && media[thread] < 9))) * ((int)media[thread] + 1);
}
int main(){
struct timeval t1, t2;
float *hMedia, *dMedia;
int *hFinal, *dFinal;
//Reserva de memoria Host
hMedia = (float*)malloc(N*sizeof(int));
hFinal = (int*)malloc(N*sizeof(int));
//Inicialización de vectores
srand(time(NULL));
for (int i = 0; i < N; i++){
hMedia[i] = ((float)rand()/RAND_MAX)*10;
}
//Reserva de memoria Device
cudaMalloc((void **)&dMedia, N*sizeof(int));
cudaMalloc((void **)&dFinal, N*sizeof(int));
//Copia de memoria Host->Device
cudaMemcpy(dMedia, hMedia, N*sizeof(int), cudaMemcpyHostToDevice);
int nblocks = N / THREADS_PER_BLOCK;
gettimeofday(&t1, 0);
//Función Kernel
marks<<<nblocks, THREADS_PER_BLOCK>>>(dMedia, dFinal);
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
//Copia de memoria Device->Host
cudaMemcpy(hFinal, dFinal, N*sizeof(int), cudaMemcpyDeviceToHost);
//Comprobación de errores
bool error = false;
for(int i = 0; i < N; i++){
if (hMedia[i] == (int)hMedia[i]){
if(hFinal[i] != (int)hMedia[i]){
error = true;
printf("Media[%d] = %f -> Final[%d] = %d\n", i, hMedia[i], i, hFinal[i]);
break;
}
}else if (hMedia[i] > 4 && hMedia[i] < 5){
if (hFinal[i] != 4){
error = true;
printf("Media[%d] = %f -> Final[%d] = %d\n", i, hMedia[i], i, hFinal[i]);
break;
}
}else if(hMedia[i] > 9){
if (hFinal[i] != 9){
error = true;
printf("Media[%d] = %f -> Final[%d] = %d\n", i, hMedia[i], i, hFinal[i]);
break;
}
}else if(hFinal[i] != (int)hMedia[i] + 1){
error = true;
printf("Media[%d] = %f -> Final[%d] = %d\n", i, hMedia[i], i, hFinal[i]);
break;
}
}
if(error)
printf("La nota final no se ha calculado correctamente :(\n");
else
printf("La nota final se ha calculado correctamente! :D\n");
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Tiempo: %f ms\n", time);
//Liberar memoria Host y Device
free(hMedia);
free(hFinal);
cudaFree(dMedia);
cudaFree(dFinal);
}
|
20,975 |
extern "C" {
__device__
void colorize_pixel(double pixel[4], double mag, double escape, double i, double maxiter, double2 val, double2 coord) {
double darkener;
if (i < maxiter) {
double inp = (double)i / (double)maxiter;
double x = escape / mag;
x = (x / (double)maxiter + inp) - 0.2;
darkener = 1 / (1.0 + exp(-15.0 * x));
} else {
darkener = 0.0f;
}
pixel[0] *= darkener;
pixel[1] *= darkener;
pixel[2] *= darkener;
}
}
|
20,976 | //#include <thrust\adjacent_difference.h>
//#include <thrust\execution_policy.h>
//#include <thrust\sort.h>
//#include <thrust\gather.h>
//#include <thrust\iterator\constant_iterator.h>
//#include <thrust\binary_search.h>
//
////__global__ void CircshiftKernel(double2 * __restrict out, //double2 __restrict * out,
//// const double2 * __restrict in,
//// const int xdim, const int ydim,
//// const int xshift, const int yshift)
////{
////
//// int i = threadIdx.x + blockIdx.x * blockDim.x;
//// int j = threadIdx.y + blockIdx.y * blockDim.y;
////
//// if ((i < xdim) && (j < ydim)) {
////
//// int ii = (i + xshift) % xdim;
//// int jj = (j + yshift) % ydim;
////
//// //out[jj * xdim + ii].x = in[j * xdim + i].x;
//// //out[jj * xdim + ii].y = in[j * xdim + i].y;
//// out[jj * xdim + ii].x = -(1. - 2 * ((i + j) & 1)) * in[j * xdim + i].x;
//// out[jj * xdim + ii].y = -(1. - 2 * ((i + j) & 1)) * in[j * xdim + i].y;
////
//// }
////
////}
////
////__global__ void FftshiftKernel(double2 * __restrict data,
//// const int N1,
//// const int N2) {
////
//// int i = threadIdx.x + blockIdx.x * blockDim.x;
//// int j = threadIdx.y + blockIdx.y * blockDim.y;
////
//// if ((i < N1) && (j < N2)) {
////
//// data[j*N1 + i].x *= -(1. - 2 * ((i + j) & 1));
//// data[j*N1 + i].y *= -(1. - 2 * ((i + j) & 1));
//// }
////
////}
//
///************************/
///* MORTON CODE ENCODING */
///************************/
//// "Insert" a 0 bit after each of the 16 low bits of x
//// --- ^ xor
//// --- & and
//__host__ __device__ uint32_t Part1By1(uint32_t x){
// x &= 0x0000ffff; // x = ---- ---- ---- ---- fedc ba98 7654 3210
// x = (x ^ (x << 8)) & 0x00ff00ff; // x = ---- ---- fedc ba98 ---- ---- 7654 3210
// x = (x ^ (x << 4)) & 0x0f0f0f0f; // x = ---- fedc ---- ba98 ---- 7654 ---- 3210
// x = (x ^ (x << 2)) & 0x33333333; // x = --fe --dc --ba --98 --76 --54 --32 --10
// x = (x ^ (x << 1)) & 0x55555555; // x = -f-e -d-c -b-a -9-8 -7-6 -5-4 -3-2 -1-0
// return x;
//}
//
//__host__ __device__ uint32_t encode_morton2d(uint32_t x, uint32_t y)
//{
// return (Part1By1(y) << 1) + Part1By1(x);
//}
//
///********************************************************/
///* SPATIAL CONVOLUTION KERNEL WITHOUT ATOMIC OPERATIONS */
///********************************************************/
////__host__ __device__ double2 addContributions(const double * __restrict__ d_x, const double * __restrict__ d_y, const double2 * __restrict d_f, double fact, double2 temp_sum, const double xb, const double yb, const double sb,
//// const double tb, const double Dx, const double Dy, const int msp, const double d_n_si, const double d_n_sj, const double t1, const double b, const int num_elements) {
////
//// for (int kk = 0; kk < num_elements; kk++) {
////
//// double x_temp = (d_x[kk] - xb) / Dx;
//// double y_temp = (d_y[kk] - yb) / Dy;
////
//// if ((abs(x_temp - d_n_si) <= msp) && (abs(y_temp - d_n_sj) <= msp)) {
////
//// double arg = sb * d_x[kk] + tb * d_y[kk];
////
//// double real_part_temp = d_f[kk].x * cos(arg) + d_f[kk].y * sin(arg);
//// double imag_part_temp = d_f[kk].y * cos(arg) - d_f[kk].x * sin(arg);
////
//// double exp_temp = fact * exp(b * (Dy * Dy * d_n_sj * d_n_sj + Dx * Dx * d_n_si * d_n_si) - t1 * (((x_temp - d_n_si) * (x_temp - d_n_si) + (y_temp - d_n_sj) * (y_temp - d_n_sj))));
////
//// temp_sum.x = temp_sum.x + real_part_temp * exp_temp;
//// temp_sum.y = temp_sum.y + imag_part_temp * exp_temp;
////
//// }
////
//// }
////
//// return temp_sum;
////
////}
//
//struct complex_sum {
//
// __device__ double2 operator()(const double2 &a, const double2 &b) {
//
// double2 result;
// result.x = a.x + b.x;
// result.y = a.y + b.y;
//
// return result;
// }
//};
//
//struct transf
//{
// int msp;
// double t1, d_n_si, d_n_sj;
//
// __device__ transf(double t1_, double d_n_si_, double d_n_sj_, int msp_) : t1(t1_),
// d_n_si(d_n_si_), d_n_sj(d_n_sj_), msp(msp_) { }
//
// __device__ double2 operator()(thrust::tuple<double, double, double2> t)
// {
// double x_temp = thrust::get<0>(t);
// double y_temp = thrust::get<1>(t);
//
// double exp_temp = exp(-t1 * (((x_temp - d_n_si) * (x_temp - d_n_si) + (y_temp - d_n_sj) * (y_temp - d_n_sj))));
//
// double2 temp_sum;
// temp_sum.x = thrust::get<2>(t).x;
// temp_sum.y = thrust::get<2>(t).y;
//
// double test = ((abs(x_temp - d_n_si) <= msp) && (abs(y_temp - d_n_sj) <= msp));
//
// temp_sum.x = test * temp_sum.x * exp_temp;
// temp_sum.y = test * temp_sum.y * exp_temp;
//
// return temp_sum;
//
// }
//
//};
//
//__device__ double2 addContributions(const double * __restrict__ d_x, const double * __restrict__ d_y, const double2 * __restrict d_f, double fact, double2 temp_sum, const double xb, const double yb, const double sb,
// const double tb, const double Dx, const double Dy, const int msp, const double d_n_si, const double d_n_sj, const double t1, const double b, const int num_elements) {
//
// //auto begin = thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(d_x), thrust::device_pointer_cast(d_y), thrust::device_pointer_cast(d_f)));
// //auto end = thrust::make_zip_iterator(thrust::make_tuple(thrust::device_pointer_cast(d_x), thrust::device_pointer_cast(d_y), thrust::device_pointer_cast(d_f))) + num_elements;
//
// //double2 initial_result;
// //initial_result.x = 0.;
// //initial_result.y = 0.;
//
// //double2 result = thrust::transform_reduce(thrust::device, begin, end, transf(t1, d_n_si, d_n_sj, msp), temp_sum, complex_sum());
// //
// //return result;
//
// for (int kk = 0; kk < num_elements; kk++) {
//
// double x_temp = d_x[kk];
// double y_temp = d_y[kk];
//
// if ((abs(x_temp - d_n_si) <= msp) && (abs(y_temp - d_n_sj) <= msp)) {
//
// double exp_temp = exp(-t1 * (((x_temp - d_n_si) * (x_temp - d_n_si) + (y_temp - d_n_sj) * (y_temp - d_n_sj))));
//
// temp_sum.x = temp_sum.x + d_f[kk].x * exp_temp;
// temp_sum.y = temp_sum.y + d_f[kk].y * exp_temp;
//
// }
//
// }
//
// return temp_sum;
//
//}
//
//__global__ void spatialConvolutionKernelNoAtomic(double2 * __restrict__ d_f_tau, const double2 * __restrict__ d_f, const double * __restrict__ d_x, const double * __restrict__ d_y, const int * __restrict__ d_cumsum,
// const double sb, const double tb, const int len_in,
// const int Mrx, const int Mry, const double b, const double t1, const double xb, const double yb, const double Dx, const double Dy, const double offsetx, const double offsety, const int num_bins, const int msp) {
//
// int i = threadIdx.x + blockIdx.x * blockDim.x;
// int j = threadIdx.y + blockIdx.y * blockDim.y;
//
// int tid = j * Mrx + i;
//
// if ((i < Mrx) && (j < Mry)) {
//
// double fact = 1. / (4.* PI *b);
// fact = fact * fact;
//
// double2 temp_sum;
//
// temp_sum.x = 0.;
// temp_sum.y = 0.;
//
// double d_n_si = i - (int)Mrx / 2;
// double d_n_sj = j - (int)Mry / 2;
//
// uint32_t indicesx = floor((d_n_si + offsetx) / (2 * msp + 1));
// uint32_t indicesy = floor((d_n_sj + offsety) / (2 * msp + 1));
//
// uint32_t d_code = encode_morton2d(indicesx, indicesy); // --- Center
// uint32_t d_code_W = encode_morton2d(indicesx - 1, indicesy); // --- West
// uint32_t d_code_E = encode_morton2d(indicesx + 1, indicesy); // --- East
// uint32_t d_code_S = encode_morton2d(indicesx, indicesy - 1); // --- South
// uint32_t d_code_N = encode_morton2d(indicesx, indicesy + 1); // --- North
// uint32_t d_code_NE = encode_morton2d(indicesx + 1, indicesy + 1); // --- North-East
// uint32_t d_code_NW = encode_morton2d(indicesx - 1, indicesy + 1); // --- North-West
// uint32_t d_code_SE = encode_morton2d(indicesx + 1, indicesy - 1); // --- South-East
// uint32_t d_code_SW = encode_morton2d(indicesx - 1, indicesy - 1); // --- South-West
//
// if ((d_code < num_bins) && ((d_cumsum[d_code] - d_cumsum[d_code - 1]) > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code - 1], d_y + d_cumsum[d_code - 1], d_f + d_cumsum[d_code - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code] - d_cumsum[d_code - 1]);
// }
// if ((d_code_W < num_bins) && ((d_cumsum[d_code_W] - d_cumsum[d_code_W - 1]) > 0) && (d_code_W > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code_W - 1], d_y + d_cumsum[d_code_W - 1], d_f + d_cumsum[d_code_W - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_W] - d_cumsum[d_code_W - 1]);
// }
// if ((d_code_E < num_bins) && ((d_cumsum[d_code_E] - d_cumsum[d_code_E - 1]) > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code_E - 1], d_y + d_cumsum[d_code_E - 1], d_f + d_cumsum[d_code_E - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_E] - d_cumsum[d_code_E - 1]);
// }
// if ((d_code_S < num_bins) && ((d_cumsum[d_code_S] - d_cumsum[d_code_S - 1]) > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code_S - 1], d_y + d_cumsum[d_code_S - 1], d_f + d_cumsum[d_code_S - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_S] - d_cumsum[d_code_S - 1]);
// }
// if ((d_code_N < num_bins) && ((d_cumsum[d_code_N] - d_cumsum[d_code_N - 1]) > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code_N - 1], d_y + d_cumsum[d_code_N - 1], d_f + d_cumsum[d_code_N - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_N] - d_cumsum[d_code_N - 1]);
// }
// if ((d_code_NE < num_bins) && ((d_cumsum[d_code_NE] - d_cumsum[d_code_NE - 1]) > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code_NE - 1], d_y + d_cumsum[d_code_NE - 1], d_f + d_cumsum[d_code_NE - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_NE] - d_cumsum[d_code_NE - 1]);
// }
// if ((d_code_NW < num_bins) && ((d_cumsum[d_code_NW] - d_cumsum[d_code_NW - 1]) > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code_NW - 1], d_y + d_cumsum[d_code_NW - 1], d_f + d_cumsum[d_code_NW - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_NW] - d_cumsum[d_code_NW - 1]);
// }
// if ((d_code_SE < num_bins) && ((d_cumsum[d_code_SE] - d_cumsum[d_code_SE - 1]) > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code_SE - 1], d_y + d_cumsum[d_code_SE - 1], d_f + d_cumsum[d_code_SE - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_SE] - d_cumsum[d_code_SE - 1]);
// }
// if ((d_code_SW < num_bins) && ((d_cumsum[d_code_SW] - d_cumsum[d_code_SW - 1]) > 0)) {
// temp_sum = addContributions(d_x + d_cumsum[d_code_SW - 1], d_y + d_cumsum[d_code_SW - 1], d_f + d_cumsum[d_code_SW - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_SW] - d_cumsum[d_code_SW - 1]);
// }
//
// d_f_tau[tid].x = fact * exp(b * (Dy * Dy * d_n_sj * d_n_sj + Dx * Dx * d_n_si * d_n_si)) * temp_sum.x;
// d_f_tau[tid].y = fact * exp(b * (Dy * Dy * d_n_sj * d_n_sj + Dx * Dx * d_n_si * d_n_si)) * temp_sum.y;
//
// }
//}
//
//
////void spatialConvolutionKernelNoAtomicCPU(double2 * __restrict__ d_f_tau, const double2 * __restrict d_f, const double * __restrict__ d_x, const double * __restrict__ d_y, const int * __restrict__ d_cumsum,
//// const double sb, const double tb, const int len_in,
//// const int Mrx, const int Mry, const double b, const double t1, const double xb, const double yb, const double Dx, const double Dy, const double offsetx, const double offsety, const int num_bins, const int msp) {
////
//// for (int j = 0; j < Mry; j++)
//// for (int i = 0; i < Mrx; i++) {
////
//// int tid = j * Mrx + i;
////
//// double fact = 1. / (4.* PI *b);
//// fact = fact * fact;
////
//// double2 temp_sum;
////
//// temp_sum.x = 0.;
//// temp_sum.y = 0.;
////
//// double2 temp_sum_partial;
////
//// double d_n_si = i - (int)Mrx / 2;
//// double d_n_sj = j - (int)Mry / 2;
////
//// uint32_t indicesx = floor((d_n_si + offsetx) / (2 * msp + 1));
//// uint32_t indicesy = floor((d_n_sj + offsety) / (2 * msp + 1));
////
//// uint32_t d_code = encode_morton2d(indicesx, indicesy); // --- Center
//// uint32_t d_code_W = encode_morton2d(indicesx - 1, indicesy); // --- West
//// uint32_t d_code_E = encode_morton2d(indicesx + 1, indicesy); // --- East
//// uint32_t d_code_S = encode_morton2d(indicesx, indicesy - 1); // --- South
//// uint32_t d_code_N = encode_morton2d(indicesx, indicesy + 1); // --- North
//// uint32_t d_code_NE = encode_morton2d(indicesx + 1, indicesy + 1); // --- North-East
//// uint32_t d_code_NW = encode_morton2d(indicesx - 1, indicesy + 1); // --- North-West
//// uint32_t d_code_SE = encode_morton2d(indicesx + 1, indicesy - 1); // --- South-East
//// uint32_t d_code_SW = encode_morton2d(indicesx - 1, indicesy - 1); // --- South-West
////
//// if ((d_code < num_bins) && ((d_cumsum[d_code] - d_cumsum[d_code - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code - 1], d_y + d_cumsum[d_code - 1], d_f + d_cumsum[d_code - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code] - d_cumsum[d_code - 1]);
//// }
//// if ((d_code_W < num_bins) && ((d_cumsum[d_code_W] - d_cumsum[d_code_W - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code_W - 1], d_y + d_cumsum[d_code_W - 1], d_f + d_cumsum[d_code_W - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_W] - d_cumsum[d_code_W - 1]);
//// }
//// if ((d_code_E < num_bins) && ((d_cumsum[d_code_E] - d_cumsum[d_code_E - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code_E - 1], d_y + d_cumsum[d_code_E - 1], d_f + d_cumsum[d_code_E - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_E] - d_cumsum[d_code_E - 1]);
//// }
//// if ((d_code_S < num_bins) && ((d_cumsum[d_code_S] - d_cumsum[d_code_S - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code_S - 1], d_y + d_cumsum[d_code_S - 1], d_f + d_cumsum[d_code_S - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_S] - d_cumsum[d_code_S - 1]);
//// }
//// if ((d_code_N < num_bins) && ((d_cumsum[d_code_N] - d_cumsum[d_code_N - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code_N - 1], d_y + d_cumsum[d_code_N - 1], d_f + d_cumsum[d_code_N - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_N] - d_cumsum[d_code_N - 1]);
//// }
//// if ((d_code_NE < num_bins) && ((d_cumsum[d_code_NE] - d_cumsum[d_code_NE - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code_NE - 1], d_y + d_cumsum[d_code_NE - 1], d_f + d_cumsum[d_code_NE - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_NE] - d_cumsum[d_code_NE - 1]);
//// }
//// if ((d_code_NW < num_bins) && ((d_cumsum[d_code_NW] - d_cumsum[d_code_NW - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code_NW - 1], d_y + d_cumsum[d_code_NW - 1], d_f + d_cumsum[d_code_NW - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_NW] - d_cumsum[d_code_NW - 1]);
//// }
//// if ((d_code_SE < num_bins) && ((d_cumsum[d_code_SE] - d_cumsum[d_code_SE - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code_SE - 1], d_y + d_cumsum[d_code_SE - 1], d_f + d_cumsum[d_code_SE - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_SE] - d_cumsum[d_code_SE - 1]);
//// }
//// if ((d_code_SW < num_bins) && ((d_cumsum[d_code_SW] - d_cumsum[d_code_SW - 1]) > 0)) {
//// temp_sum = addContributions(d_x + d_cumsum[d_code_SW - 1], d_y + d_cumsum[d_code_SW - 1], d_f + d_cumsum[d_code_SW - 1], fact, temp_sum, xb, yb, sb, tb, Dx, Dy, msp, d_n_si, d_n_sj, t1, b, d_cumsum[d_code_SW] - d_cumsum[d_code_SW - 1]);
//// }
////
//// d_f_tau[tid].x = temp_sum.x;
//// d_f_tau[tid].y = temp_sum.y;
////
//// }
////
////}
//
///***********************/
///* PARTITIONING KERNEL */
///***********************/
////__global__ void partitionKernel(const double * __restrict__ d_x, const double * __restrict__ d_y, uint32_t * __restrict__ d_code, const double offsetx, const double offsety, const double xb, const double yb,
//// const double Dx, const double Dy, const int msp, const int N) {
////
//// const int tid = threadIdx.x + blockIdx.x * blockDim.x;
////
//// if (tid > N) return;
////
//// double d_x_temp = (d_x[tid] - xb) / Dx + offsetx;
//// double d_y_temp = (d_y[tid] - yb) / Dy + offsety;
////
//// uint32_t indicesx = floor(d_x_temp / (2 * msp + 1));
//// uint32_t indicesy = floor(d_y_temp / (2 * msp + 1));
////
//// d_code[tid] = encode_morton2d(indicesx, indicesy);
////
////}
//
//__global__ void partitionShiftKernel(double * __restrict__ d_x, double * __restrict__ d_y, double2 * __restrict__ d_f, uint32_t * __restrict__ d_code, const double offsetx, const double offsety,
// const double xb, const double yb, const double sb, const double tb, const double Dx, const double Dy, const int msp, const int N) {
//
// const int tid = threadIdx.x + blockIdx.x * blockDim.x;
//
// if (tid > N) return;
//
// double d_x_temp1 = (d_x[tid] - xb) / Dx;
// double d_y_temp1 = (d_y[tid] - yb) / Dy;
//
// double d_x_temp = d_x_temp1 + offsetx;
// double d_y_temp = d_y_temp1 + offsety;
//
// uint32_t indicesx = floor(d_x_temp / (2 * msp + 1));
// uint32_t indicesy = floor(d_y_temp / (2 * msp + 1));
//
// d_code[tid] = encode_morton2d(indicesx, indicesy);
//
// double arg = sb * d_x[tid] + tb * d_y[tid];
//
// double real_part_temp = d_f[tid].x * cos(arg) + d_f[tid].y * sin(arg);
// double imag_part_temp = d_f[tid].y * cos(arg) - d_f[tid].x * sin(arg);
//
// d_f[tid].x = real_part_temp;
// d_f[tid].y = imag_part_temp;
//
// d_x[tid] = d_x_temp1;
// d_y[tid] = d_y_temp1;
//}
//
////gpuErrchk(cudaDeviceSetLimit(cudaLimitDevRuntimePendingLaunchCount, 32768));
//
////thrust::device_vector<double> d_x_sorted(len_in);
////thrust::device_vector<double> d_y_sorted(len_in);
////thrust::device_vector<double2> d_f_sorted(len_in);
//
////thrust::device_vector<uint32_t> d_code(len_in);
//
//// double offsetx = std::max(0.5 * Mrx, (Max_x - xb) / Dx) + 2. * (double)(2 * msp + 1);
//// double offsety = std::max(0.5 * Mry, (Max_y - yb) / Dy) + 2. * (double)(2 * msp + 1);
////
//// timerGPU.StartCounter();
//// //partitionKernel << <iDivUp(len_in, BLOCKSIZE), BLOCKSIZE >> >(d_x, d_y, thrust::raw_pointer_cast(d_code.data()), offsetx, offsety, xb, yb, Dx, Dy, msp, len_in);
//// partitionShiftKernel << <iDivUp(len_in, BLOCKSIZE), BLOCKSIZE >> >(d_x, d_y, d_f, thrust::raw_pointer_cast(d_code.data()), offsetx, offsety, xb, yb, sb, tb, Dx, Dy, msp, len_in);
////#ifdef DEBUG
//// gpuErrchk(cudaPeekAtLastError());
//// gpuErrchk(cudaDeviceSynchronize());
////#endif
//// //timingFile << "Partition kernel " << timerGPU.GetCounter() << "\n";
////
//// // --- Initialize indices vector to [0, 1, 2, ...]
//// //timerGPU.StartCounter();
//// thrust::counting_iterator<int> iter(0);
//// thrust::device_vector<int> indices(len_in);
//// thrust::copy(iter, iter + indices.size(), indices.begin());
////
//// // --- First, sort the keys and indices by the keys
//// thrust::sort_by_key(d_code.begin(), d_code.end(), indices.begin());
////
//// // --- Now reorder the ID arrays using the sorted indices
//// thrust::gather(indices.begin(), indices.end(), thrust::device_pointer_cast(d_x), d_x_sorted.begin());
//// thrust::gather(indices.begin(), indices.end(), thrust::device_pointer_cast(d_y), d_y_sorted.begin());
//// thrust::gather(indices.begin(), indices.end(), thrust::device_pointer_cast(d_f), d_f_sorted.begin());
////
//// thrust::device_vector<int> d_cumsum;
////
//// // --- The number of d_cumsum bins is equal to the maximum value plus one
//// int num_bins = d_code.back() + 1;
////
//// // --- Resize d_cumsum storage
//// d_cumsum.resize(num_bins);
////
//// // --- Find the end of each bin of values - Cumulative d_cumsum
//// thrust::counting_iterator<int> search_begin(0);
//// thrust::upper_bound(d_code.begin(), d_code.end(), search_begin, search_begin + num_bins, d_cumsum.begin());
//// //timingFile << "Parte thrust " << timerGPU.GetCounter() << "\n";
////
//// //timerGPU.StartCounter();
//// dim3 GridSize_SpatialScaling(iDivUp(Mrx, BLOCKSIZE_X_SPATIAL_SCALING), iDivUp(Mry, BLOCKSIZE_Y_SPATIAL_SCALING));
//// dim3 BlockSize_SpatialScaling(BLOCKSIZE_X_SPATIAL_SCALING, BLOCKSIZE_Y_SPATIAL_SCALING);
//// spatialConvolutionKernelNoAtomic << <GridSize_SpatialScaling, BlockSize_SpatialScaling >> >(d_f_tau, thrust::raw_pointer_cast(d_f_sorted.data()), thrust::raw_pointer_cast(d_x_sorted.data()), thrust::raw_pointer_cast(d_y_sorted.data()), thrust::raw_pointer_cast(d_cumsum.data()), sb, tb, len_in, Mrx, Mry, b, t1, xb, yb, Dx, Dy, offsetx, offsety, num_bins, msp);
////#ifdef DEBUG
//// gpuErrchk(cudaPeekAtLastError());
//// gpuErrchk(cudaDeviceSynchronize());
////#endif
//// //timingFile << "Parte kernel " << timerGPU.GetCounter() << "\n";
//
////timingFile << timerGPU.GetCounter();
//
|
20,977 | #include <stdio.h>
int main(int argc, char **argv) {
struct cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("name: %s\n", prop.name);
printf("totalGlobalMem: %zd\n", prop.totalGlobalMem);
printf("sharedMemPerBlock: %zd\n", prop.sharedMemPerBlock);
printf("regsPerBlock: %d\n", prop.regsPerBlock);
printf("warpSize: %d\n", prop.warpSize);
printf("memPitch: %zd\n", prop.memPitch);
printf("maxThreadsPerBlock: %d\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim: %dx%dx%d\n", prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
printf("maxGridSize: %dx%dx%d\n", prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
printf("totalConstMem: %zd\n", prop.totalConstMem);
printf("major: %d\n", prop.major);
printf("minor: %d\n", prop.minor);
printf("clockRate: %d\n", prop.clockRate);
printf("textureAlignment: %zd\n", prop.textureAlignment);
printf("deviceOverlap: %d\n", prop.deviceOverlap);
printf("multiProcessorCount: %d\n", prop.multiProcessorCount);
return EXIT_SUCCESS;
}
|
20,978 | #include <stdio.h>
#include <cuda.h>
#include "mytime.h"
#define N 1024
__global__ void dkernel(unsigned *a, unsigned wpt, unsigned chunksize) {
for (unsigned ii = 0; ii < wpt; ii += chunksize) {
unsigned start = wpt * blockDim.x * threadIdx.x;
for (unsigned nn = start; nn < start + chunksize; ++nn) {
a[nn]++;
}
}
}
int main() {
unsigned *a;
double start, end;
int i;
cudaMalloc(&a, sizeof(unsigned) * N);
for (i = 1; i < 33; ++i) {
start = rtclock();
dkernel<<<1, 32>>>(a, N / 32, i);
cudaDeviceSynchronize();
end = rtclock();
printf("%3d: ", i);
printtime("", start, end);
}
return 0;
}
|
20,979 | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
using namespace std;
#define N 1024
#define GRID_SIZE 128
#define BLOCK_SIZE 128
__global__ void PiCalcGPU(float* res, curandState* states) {
unsigned long index = threadIdx.x + blockDim.x * blockIdx.x;
int V = 0;
float x, y;
//curand для генерации случайных чисел на GPU
curand_init(index, index, 0, &states[index]);
for (int i = 0; i < N; i++) {
//создаем последовательности значений x и y
x = curand_uniform(&states[index]);
y = curand_uniform(&states[index]);
//рассчитываем V для значений
V += (x * x + y * y <= 1.0f);
}
res[index] = 4.0f * V / (float)N;
}
float PiCalcGPU(long n) {
float x, y;
long V = 0;
for (long i = 0; i < n; i++) {
x = rand() / (float)RAND_MAX;
y = rand() / (float)RAND_MAX;
V += (x * x + y * y <= 1.0f);
}
return 4.0f * V / n;
}
int main(int argc, char* argv[]) {
setlocale(LC_ALL, "Russian");
//переменные времени
clock_t start, stop;
float host[GRID_SIZE * BLOCK_SIZE];
float* device;
curandState* curand;
//Вычисление на GPU
//Старт
start = clock();
//Выделение памяти
cudaError_t cuerr = cudaMalloc((void**)&device, GRID_SIZE * BLOCK_SIZE * sizeof(float));
if (cuerr != cudaSuccess)
{
fprintf(stderr, "Cannot allocate device: %s\n",
cudaGetErrorString(cuerr));
return 0;
}
cuerr = cudaMalloc((void**)&curand, BLOCK_SIZE * GRID_SIZE * sizeof(curandState));
if (cuerr != cudaSuccess)
{
fprintf(stderr, "Cannot allocate device: %s\n",
cudaGetErrorString(cuerr));
return 0;
}
//Запуск ядра
PiCalcGPU <<< GRID_SIZE, BLOCK_SIZE >>> (device, curand);
cuerr = cudaGetLastError();
if (cuerr != cudaSuccess)
{
fprintf(stderr, "Cannot launch CUDA kernel: %s\n",
cudaGetErrorString(cuerr));
return 0;
}
//Копируем результат с девайса на хост
cuerr = cudaMemcpy(host, device, GRID_SIZE * BLOCK_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
if (cuerr != cudaSuccess)
{
fprintf(stderr, "Cannot copy from device to host: %s\n",
cudaGetErrorString(cuerr));
return 0;
}
float PI_GPU = 0;
for (int i = 0; i < GRID_SIZE * BLOCK_SIZE; i++) {
PI_GPU += host[i];
}
PI_GPU /= (GRID_SIZE * BLOCK_SIZE);
stop = clock();
printf("GPU PI = %f\n", PI_GPU);
printf("Время работы на GPU %f c\n", (stop - start) / (float)CLOCKS_PER_SEC);
//Вычисление на CPU
start = clock();
float cpuPI = PiCalcGPU(GRID_SIZE * BLOCK_SIZE * N);
stop = clock();
printf("CPU PI = %f\n", cpuPI);
printf("Время работы на СPU %f c.\n", (stop - start) / (float)CLOCKS_PER_SEC);
return 0;
}
|
20,980 | // ........ jsaes: AES in JavaScript (... B. Poettering) ... C ....
// ... http://point-at-infinity.org/jsaes/.... GNU GPL ...
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <cstring>
#include <fstream>
#include <iostream>
#include <sstream>
#define BYTE unsigned char
using namespace std;
class aes_block {
public:
BYTE block[16];
};
void printBytes(BYTE b[], int len) {
int i;
for (i = 0; i < len; i++)
printf("%x ", b[i]);
printf("\n");
}
void f1printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i = 0; i < len; i++)
fprintf(fp, "%02x ", b[i]);
fprintf(fp, "\n");
}
int flag = 0;
void f2printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i = 0; i < len; i++) {
fprintf(fp, "%c", b[i]);
if (b[i] == '\n')
flag++;
}
}
void f3printBytes(BYTE b[], int len, FILE* fp) {
int i;
for (i = 0; i < len; i++) {
if (b[i] == '\0') {
return;
}
fprintf(fp, "%c", b[i]);
if (b[i] == '\n')
flag++;
}
}
/******************************************************************************/
// The following lookup tables and functions are for internal use only!
BYTE AES_Sbox[] =
{
/*0 1 2 3 4 5 6 7 8 9 a b c d e f */
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, /*0*/
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, /*1*/
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, /*2*/
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, /*3*/
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, /*4*/
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, /*5*/
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, /*6*/
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, /*7*/
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, /*8*/
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, /*9*/
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, /*a*/
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, /*b*/
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, /*c*/
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, /*d*/
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, /*e*/
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 /*f*/
};
__device__ void AES_SubBytes(BYTE state[], BYTE sbox[]) {
int i;
for (i = 0; i < 16; i++)
state[i] = sbox[state[i]];
}
__device__ void AES_AddRoundKey(BYTE state[], BYTE rkey[]) {
int i;
for (i = 0; i < 16; i++)
state[i] ^= rkey[i];
}
__device__ void AES_ShiftRows(BYTE state[], BYTE shifttab[]) {
BYTE h[16];
memcpy(h, state, 16);
int i;
for (i = 0; i < 16; i++)
state[i] = h[shifttab[i]];
}
__device__ void AES_MixColumns(BYTE state[], BYTE AES_xtime[]) {
int i;
for (i = 0; i < 16; i += 4) {
BYTE s0 = state[i + 0], s1 = state[i + 1];
BYTE s2 = state[i + 2], s3 = state[i + 3];
BYTE h = s0 ^ s1 ^ s2 ^ s3;
state[i + 0] ^= h ^ AES_xtime[s0 ^ s1];
state[i + 1] ^= h ^ AES_xtime[s1 ^ s2];
state[i + 2] ^= h ^ AES_xtime[s2 ^ s3];
state[i + 3] ^= h ^ AES_xtime[s3 ^ s0];
}
}
__device__ void AES_MixColumns_Inv(BYTE state[], BYTE AES_xtime[]) {
int i;
for (i = 0; i < 16; i += 4) {
BYTE s0 = state[i + 0], s1 = state[i + 1];
BYTE s2 = state[i + 2], s3 = state[i + 3];
BYTE h = s0 ^ s1 ^ s2 ^ s3;
BYTE xh = AES_xtime[h];
BYTE h1 = AES_xtime[AES_xtime[xh ^ s0 ^ s2]] ^ h;
BYTE h2 = AES_xtime[AES_xtime[xh ^ s1 ^ s3]] ^ h;
state[i + 0] ^= h1 ^ AES_xtime[s0 ^ s1];
state[i + 1] ^= h2 ^ AES_xtime[s1 ^ s2];
state[i + 2] ^= h1 ^ AES_xtime[s2 ^ s3];
state[i + 3] ^= h2 ^ AES_xtime[s3 ^ s0];
}
}
// AES_Init: initialize the tables needed at runtime.
// Call this function before the (first) key expansion.
__device__ void AES_Init(BYTE AES_Sbox[], BYTE AES_ShiftRowTab[], BYTE AES_Sbox_Inv[], BYTE AES_xtime[], BYTE AES_ShiftRowTab_Inv[]) {
AES_ShiftRowTab[0] = 0;
AES_ShiftRowTab[1] = 5;
AES_ShiftRowTab[2] = 10;
AES_ShiftRowTab[3] = 15;
AES_ShiftRowTab[4] = 4;
AES_ShiftRowTab[5] = 9;
AES_ShiftRowTab[6] = 14;
AES_ShiftRowTab[7] = 3;
AES_ShiftRowTab[8] = 8;
AES_ShiftRowTab[9] = 13;
AES_ShiftRowTab[10] = 2;
AES_ShiftRowTab[11] = 7;
AES_ShiftRowTab[12] = 12;
AES_ShiftRowTab[13] = 1;
AES_ShiftRowTab[14] = 6;
AES_ShiftRowTab[15] = 11;
AES_Sbox[0] = 0x63;
AES_Sbox[1] = 0x7c;
AES_Sbox[2] = 0x77;
AES_Sbox[3] = 0x7b;
AES_Sbox[4] = 0xf2;
AES_Sbox[5] = 0x6b;
AES_Sbox[6] = 0x6f;
AES_Sbox[7] = 0xc5;
AES_Sbox[8] = 0x30;
AES_Sbox[9] = 0x1;
AES_Sbox[10] = 0x67;
AES_Sbox[11] = 0x2b;
AES_Sbox[12] = 0xfe;
AES_Sbox[13] = 0xd7;
AES_Sbox[14] = 0xab;
AES_Sbox[15] = 0x76;
AES_Sbox[16] = 0xca;
AES_Sbox[17] = 0x82;
AES_Sbox[18] = 0xc9;
AES_Sbox[19] = 0x7d;
AES_Sbox[20] = 0xfa;
AES_Sbox[21] = 0x59;
AES_Sbox[22] = 0x47;
AES_Sbox[23] = 0xf0;
AES_Sbox[24] = 0xad;
AES_Sbox[25] = 0xd4;
AES_Sbox[26] = 0xa2;
AES_Sbox[27] = 0xaf;
AES_Sbox[28] = 0x9c;
AES_Sbox[29] = 0xa4;
AES_Sbox[30] = 0x72;
AES_Sbox[31] = 0xc0;
AES_Sbox[32] = 0xb7;
AES_Sbox[33] = 0xfd;
AES_Sbox[34] = 0x93;
AES_Sbox[35] = 0x26;
AES_Sbox[36] = 0x36;
AES_Sbox[37] = 0x3f;
AES_Sbox[38] = 0xf7;
AES_Sbox[39] = 0xcc;
AES_Sbox[40] = 0x34;
AES_Sbox[41] = 0xa5;
AES_Sbox[42] = 0xe5;
AES_Sbox[43] = 0xf1;
AES_Sbox[44] = 0x71;
AES_Sbox[45] = 0xd8;
AES_Sbox[46] = 0x31;
AES_Sbox[47] = 0x15;
AES_Sbox[48] = 0x4;
AES_Sbox[49] = 0xc7;
AES_Sbox[50] = 0x23;
AES_Sbox[51] = 0xc3;
AES_Sbox[52] = 0x18;
AES_Sbox[53] = 0x96;
AES_Sbox[54] = 0x5;
AES_Sbox[55] = 0x9a;
AES_Sbox[56] = 0x7;
AES_Sbox[57] = 0x12;
AES_Sbox[58] = 0x80;
AES_Sbox[59] = 0xe2;
AES_Sbox[60] = 0xeb;
AES_Sbox[61] = 0x27;
AES_Sbox[62] = 0xb2;
AES_Sbox[63] = 0x75;
AES_Sbox[64] = 0x9;
AES_Sbox[65] = 0x83;
AES_Sbox[66] = 0x2c;
AES_Sbox[67] = 0x1a;
AES_Sbox[68] = 0x1b;
AES_Sbox[69] = 0x6e;
AES_Sbox[70] = 0x5a;
AES_Sbox[71] = 0xa0;
AES_Sbox[72] = 0x52;
AES_Sbox[73] = 0x3b;
AES_Sbox[74] = 0xd6;
AES_Sbox[75] = 0xb3;
AES_Sbox[76] = 0x29;
AES_Sbox[77] = 0xe3;
AES_Sbox[78] = 0x2f;
AES_Sbox[79] = 0x84;
AES_Sbox[80] = 0x53;
AES_Sbox[81] = 0xd1;
AES_Sbox[82] = 0x0;
AES_Sbox[83] = 0xed;
AES_Sbox[84] = 0x20;
AES_Sbox[85] = 0xfc;
AES_Sbox[86] = 0xb1;
AES_Sbox[87] = 0x5b;
AES_Sbox[88] = 0x6a;
AES_Sbox[89] = 0xcb;
AES_Sbox[90] = 0xbe;
AES_Sbox[91] = 0x39;
AES_Sbox[92] = 0x4a;
AES_Sbox[93] = 0x4c;
AES_Sbox[94] = 0x58;
AES_Sbox[95] = 0xcf;
AES_Sbox[96] = 0xd0;
AES_Sbox[97] = 0xef;
AES_Sbox[98] = 0xaa;
AES_Sbox[99] = 0xfb;
AES_Sbox[100] = 0x43;
AES_Sbox[101] = 0x4d;
AES_Sbox[102] = 0x33;
AES_Sbox[103] = 0x85;
AES_Sbox[104] = 0x45;
AES_Sbox[105] = 0xf9;
AES_Sbox[106] = 0x2;
AES_Sbox[107] = 0x7f;
AES_Sbox[108] = 0x50;
AES_Sbox[109] = 0x3c;
AES_Sbox[110] = 0x9f;
AES_Sbox[111] = 0xa8;
AES_Sbox[112] = 0x51;
AES_Sbox[113] = 0xa3;
AES_Sbox[114] = 0x40;
AES_Sbox[115] = 0x8f;
AES_Sbox[116] = 0x92;
AES_Sbox[117] = 0x9d;
AES_Sbox[118] = 0x38;
AES_Sbox[119] = 0xf5;
AES_Sbox[120] = 0xbc;
AES_Sbox[121] = 0xb6;
AES_Sbox[122] = 0xda;
AES_Sbox[123] = 0x21;
AES_Sbox[124] = 0x10;
AES_Sbox[125] = 0xff;
AES_Sbox[126] = 0xf3;
AES_Sbox[127] = 0xd2;
AES_Sbox[128] = 0xcd;
AES_Sbox[129] = 0xc;
AES_Sbox[130] = 0x13;
AES_Sbox[131] = 0xec;
AES_Sbox[132] = 0x5f;
AES_Sbox[133] = 0x97;
AES_Sbox[134] = 0x44;
AES_Sbox[135] = 0x17;
AES_Sbox[136] = 0xc4;
AES_Sbox[137] = 0xa7;
AES_Sbox[138] = 0x7e;
AES_Sbox[139] = 0x3d;
AES_Sbox[140] = 0x64;
AES_Sbox[141] = 0x5d;
AES_Sbox[142] = 0x19;
AES_Sbox[143] = 0x73;
AES_Sbox[144] = 0x60;
AES_Sbox[145] = 0x81;
AES_Sbox[146] = 0x4f;
AES_Sbox[147] = 0xdc;
AES_Sbox[148] = 0x22;
AES_Sbox[149] = 0x2a;
AES_Sbox[150] = 0x90;
AES_Sbox[151] = 0x88;
AES_Sbox[152] = 0x46;
AES_Sbox[153] = 0xee;
AES_Sbox[154] = 0xb8;
AES_Sbox[155] = 0x14;
AES_Sbox[156] = 0xde;
AES_Sbox[157] = 0x5e;
AES_Sbox[158] = 0xb;
AES_Sbox[159] = 0xdb;
AES_Sbox[160] = 0xe0;
AES_Sbox[161] = 0x32;
AES_Sbox[162] = 0x3a;
AES_Sbox[163] = 0xa;
AES_Sbox[164] = 0x49;
AES_Sbox[165] = 0x6;
AES_Sbox[166] = 0x24;
AES_Sbox[167] = 0x5c;
AES_Sbox[168] = 0xc2;
AES_Sbox[169] = 0xd3;
AES_Sbox[170] = 0xac;
AES_Sbox[171] = 0x62;
AES_Sbox[172] = 0x91;
AES_Sbox[173] = 0x95;
AES_Sbox[174] = 0xe4;
AES_Sbox[175] = 0x79;
AES_Sbox[176] = 0xe7;
AES_Sbox[177] = 0xc8;
AES_Sbox[178] = 0x37;
AES_Sbox[179] = 0x6d;
AES_Sbox[180] = 0x8d;
AES_Sbox[181] = 0xd5;
AES_Sbox[182] = 0x4e;
AES_Sbox[183] = 0xa9;
AES_Sbox[184] = 0x6c;
AES_Sbox[185] = 0x56;
AES_Sbox[186] = 0xf4;
AES_Sbox[187] = 0xea;
AES_Sbox[188] = 0x65;
AES_Sbox[189] = 0x7a;
AES_Sbox[190] = 0xae;
AES_Sbox[191] = 0x8;
AES_Sbox[192] = 0xba;
AES_Sbox[193] = 0x78;
AES_Sbox[194] = 0x25;
AES_Sbox[195] = 0x2e;
AES_Sbox[196] = 0x1c;
AES_Sbox[197] = 0xa6;
AES_Sbox[198] = 0xb4;
AES_Sbox[199] = 0xc6;
AES_Sbox[200] = 0xe8;
AES_Sbox[201] = 0xdd;
AES_Sbox[202] = 0x74;
AES_Sbox[203] = 0x1f;
AES_Sbox[204] = 0x4b;
AES_Sbox[205] = 0xbd;
AES_Sbox[206] = 0x8b;
AES_Sbox[207] = 0x8a;
AES_Sbox[208] = 0x70;
AES_Sbox[209] = 0x3e;
AES_Sbox[210] = 0xb5;
AES_Sbox[211] = 0x66;
AES_Sbox[212] = 0x48;
AES_Sbox[213] = 0x3;
AES_Sbox[214] = 0xf6;
AES_Sbox[215] = 0xe;
AES_Sbox[216] = 0x61;
AES_Sbox[217] = 0x35;
AES_Sbox[218] = 0x57;
AES_Sbox[219] = 0xb9;
AES_Sbox[220] = 0x86;
AES_Sbox[221] = 0xc1;
AES_Sbox[222] = 0x1d;
AES_Sbox[223] = 0x9e;
AES_Sbox[224] = 0xe1;
AES_Sbox[225] = 0xf8;
AES_Sbox[226] = 0x98;
AES_Sbox[227] = 0x11;
AES_Sbox[228] = 0x69;
AES_Sbox[229] = 0xd9;
AES_Sbox[230] = 0x8e;
AES_Sbox[231] = 0x94;
AES_Sbox[232] = 0x9b;
AES_Sbox[233] = 0x1e;
AES_Sbox[234] = 0x87;
AES_Sbox[235] = 0xe9;
AES_Sbox[236] = 0xce;
AES_Sbox[237] = 0x55;
AES_Sbox[238] = 0x28;
AES_Sbox[239] = 0xdf;
AES_Sbox[240] = 0x8c;
AES_Sbox[241] = 0xa1;
AES_Sbox[242] = 0x89;
AES_Sbox[243] = 0xd;
AES_Sbox[244] = 0xbf;
AES_Sbox[245] = 0xe6;
AES_Sbox[246] = 0x42;
AES_Sbox[247] = 0x68;
AES_Sbox[248] = 0x41;
AES_Sbox[249] = 0x99;
AES_Sbox[250] = 0x2d;
AES_Sbox[251] = 0xf;
AES_Sbox[252] = 0xb0;
AES_Sbox[253] = 0x54;
AES_Sbox[254] = 0xbb;
AES_Sbox[255] = 0x16;
int i;
for (i = 0; i < 256; i++) {
AES_Sbox_Inv[AES_Sbox[i]] = i;
}
for (i = 0; i < 16; i++)
AES_ShiftRowTab_Inv[AES_ShiftRowTab[i]] = i;
for (i = 0; i < 128; i++) {
AES_xtime[i] = i << 1;
AES_xtime[128 + i] = (i << 1) ^ 0x1b;
}
}
__device__ void AES_Init2(BYTE AES_Sbox[], BYTE AES_ShiftRowTab[], BYTE AES_Sbox_Inv[], BYTE AES_xtime[], BYTE AES_ShiftRowTab_Inv[]) {
AES_ShiftRowTab[0] = 0;
AES_ShiftRowTab[1] = 5;
AES_ShiftRowTab[2] = 10;
AES_ShiftRowTab[3] = 15;
AES_ShiftRowTab[4] = 4;
AES_ShiftRowTab[5] = 9;
AES_ShiftRowTab[6] = 14;
AES_ShiftRowTab[7] = 3;
AES_ShiftRowTab[8] = 8;
AES_ShiftRowTab[9] = 13;
AES_ShiftRowTab[10] = 2;
AES_ShiftRowTab[11] = 7;
AES_ShiftRowTab[12] = 12;
AES_ShiftRowTab[13] = 1;
AES_ShiftRowTab[14] = 6;
AES_ShiftRowTab[15] = 11;
AES_Sbox_Inv[0] = 0x52;
AES_Sbox_Inv[1] = 0x9;
AES_Sbox_Inv[2] = 0x6a;
AES_Sbox_Inv[3] = 0xd5;
AES_Sbox_Inv[4] = 0x30;
AES_Sbox_Inv[5] = 0x36;
AES_Sbox_Inv[6] = 0xa5;
AES_Sbox_Inv[7] = 0x38;
AES_Sbox_Inv[8] = 0xbf;
AES_Sbox_Inv[9] = 0x40;
AES_Sbox_Inv[10] = 0xa3;
AES_Sbox_Inv[11] = 0x9e;
AES_Sbox_Inv[12] = 0x81;
AES_Sbox_Inv[13] = 0xf3;
AES_Sbox_Inv[14] = 0xd7;
AES_Sbox_Inv[15] = 0xfb;
AES_Sbox_Inv[16] = 0x7c;
AES_Sbox_Inv[17] = 0xe3;
AES_Sbox_Inv[18] = 0x39;
AES_Sbox_Inv[19] = 0x82;
AES_Sbox_Inv[20] = 0x9b;
AES_Sbox_Inv[21] = 0x2f;
AES_Sbox_Inv[22] = 0xff;
AES_Sbox_Inv[23] = 0x87;
AES_Sbox_Inv[24] = 0x34;
AES_Sbox_Inv[25] = 0x8e;
AES_Sbox_Inv[26] = 0x43;
AES_Sbox_Inv[27] = 0x44;
AES_Sbox_Inv[28] = 0xc4;
AES_Sbox_Inv[29] = 0xde;
AES_Sbox_Inv[30] = 0xe9;
AES_Sbox_Inv[31] = 0xcb;
AES_Sbox_Inv[32] = 0x54;
AES_Sbox_Inv[33] = 0x7b;
AES_Sbox_Inv[34] = 0x94;
AES_Sbox_Inv[35] = 0x32;
AES_Sbox_Inv[36] = 0xa6;
AES_Sbox_Inv[37] = 0xc2;
AES_Sbox_Inv[38] = 0x23;
AES_Sbox_Inv[39] = 0x3d;
AES_Sbox_Inv[40] = 0xee;
AES_Sbox_Inv[41] = 0x4c;
AES_Sbox_Inv[42] = 0x95;
AES_Sbox_Inv[43] = 0xb;
AES_Sbox_Inv[44] = 0x42;
AES_Sbox_Inv[45] = 0xfa;
AES_Sbox_Inv[46] = 0xc3;
AES_Sbox_Inv[47] = 0x4e;
AES_Sbox_Inv[48] = 0x8;
AES_Sbox_Inv[49] = 0x2e;
AES_Sbox_Inv[50] = 0xa1;
AES_Sbox_Inv[51] = 0x66;
AES_Sbox_Inv[52] = 0x28;
AES_Sbox_Inv[53] = 0xd9;
AES_Sbox_Inv[54] = 0x24;
AES_Sbox_Inv[55] = 0xb2;
AES_Sbox_Inv[56] = 0x76;
AES_Sbox_Inv[57] = 0x5b;
AES_Sbox_Inv[58] = 0xa2;
AES_Sbox_Inv[59] = 0x49;
AES_Sbox_Inv[60] = 0x6d;
AES_Sbox_Inv[61] = 0x8b;
AES_Sbox_Inv[62] = 0xd1;
AES_Sbox_Inv[63] = 0x25;
AES_Sbox_Inv[64] = 0x72;
AES_Sbox_Inv[65] = 0xf8;
AES_Sbox_Inv[66] = 0xf6;
AES_Sbox_Inv[67] = 0x64;
AES_Sbox_Inv[68] = 0x86;
AES_Sbox_Inv[69] = 0x68;
AES_Sbox_Inv[70] = 0x98;
AES_Sbox_Inv[71] = 0x16;
AES_Sbox_Inv[72] = 0xd4;
AES_Sbox_Inv[73] = 0xa4;
AES_Sbox_Inv[74] = 0x5c;
AES_Sbox_Inv[75] = 0xcc;
AES_Sbox_Inv[76] = 0x5d;
AES_Sbox_Inv[77] = 0x65;
AES_Sbox_Inv[78] = 0xb6;
AES_Sbox_Inv[79] = 0x92;
AES_Sbox_Inv[80] = 0x6c;
AES_Sbox_Inv[81] = 0x70;
AES_Sbox_Inv[82] = 0x48;
AES_Sbox_Inv[83] = 0x50;
AES_Sbox_Inv[84] = 0xfd;
AES_Sbox_Inv[85] = 0xed;
AES_Sbox_Inv[86] = 0xb9;
AES_Sbox_Inv[87] = 0xda;
AES_Sbox_Inv[88] = 0x5e;
AES_Sbox_Inv[89] = 0x15;
AES_Sbox_Inv[90] = 0x46;
AES_Sbox_Inv[91] = 0x57;
AES_Sbox_Inv[92] = 0xa7;
AES_Sbox_Inv[93] = 0x8d;
AES_Sbox_Inv[94] = 0x9d;
AES_Sbox_Inv[95] = 0x84;
AES_Sbox_Inv[96] = 0x90;
AES_Sbox_Inv[97] = 0xd8;
AES_Sbox_Inv[98] = 0xab;
AES_Sbox_Inv[99] = 0x0;
AES_Sbox_Inv[100] = 0x8c;
AES_Sbox_Inv[101] = 0xbc;
AES_Sbox_Inv[102] = 0xd3;
AES_Sbox_Inv[103] = 0xa;
AES_Sbox_Inv[104] = 0xf7;
AES_Sbox_Inv[105] = 0xe4;
AES_Sbox_Inv[106] = 0x58;
AES_Sbox_Inv[107] = 0x5;
AES_Sbox_Inv[108] = 0xb8;
AES_Sbox_Inv[109] = 0xb3;
AES_Sbox_Inv[110] = 0x45;
AES_Sbox_Inv[111] = 0x6;
AES_Sbox_Inv[112] = 0xd0;
AES_Sbox_Inv[113] = 0x2c;
AES_Sbox_Inv[114] = 0x1e;
AES_Sbox_Inv[115] = 0x8f;
AES_Sbox_Inv[116] = 0xca;
AES_Sbox_Inv[117] = 0x3f;
AES_Sbox_Inv[118] = 0xf;
AES_Sbox_Inv[119] = 0x2;
AES_Sbox_Inv[120] = 0xc1;
AES_Sbox_Inv[121] = 0xaf;
AES_Sbox_Inv[122] = 0xbd;
AES_Sbox_Inv[123] = 0x3;
AES_Sbox_Inv[124] = 0x1;
AES_Sbox_Inv[125] = 0x13;
AES_Sbox_Inv[126] = 0x8a;
AES_Sbox_Inv[127] = 0x6b;
AES_Sbox_Inv[128] = 0x3a;
AES_Sbox_Inv[129] = 0x91;
AES_Sbox_Inv[130] = 0x11;
AES_Sbox_Inv[131] = 0x41;
AES_Sbox_Inv[132] = 0x4f;
AES_Sbox_Inv[133] = 0x67;
AES_Sbox_Inv[134] = 0xdc;
AES_Sbox_Inv[135] = 0xea;
AES_Sbox_Inv[136] = 0x97;
AES_Sbox_Inv[137] = 0xf2;
AES_Sbox_Inv[138] = 0xcf;
AES_Sbox_Inv[139] = 0xce;
AES_Sbox_Inv[140] = 0xf0;
AES_Sbox_Inv[141] = 0xb4;
AES_Sbox_Inv[142] = 0xe6;
AES_Sbox_Inv[143] = 0x73;
AES_Sbox_Inv[144] = 0x96;
AES_Sbox_Inv[145] = 0xac;
AES_Sbox_Inv[146] = 0x74;
AES_Sbox_Inv[147] = 0x22;
AES_Sbox_Inv[148] = 0xe7;
AES_Sbox_Inv[149] = 0xad;
AES_Sbox_Inv[150] = 0x35;
AES_Sbox_Inv[151] = 0x85;
AES_Sbox_Inv[152] = 0xe2;
AES_Sbox_Inv[153] = 0xf9;
AES_Sbox_Inv[154] = 0x37;
AES_Sbox_Inv[155] = 0xe8;
AES_Sbox_Inv[156] = 0x1c;
AES_Sbox_Inv[157] = 0x75;
AES_Sbox_Inv[158] = 0xdf;
AES_Sbox_Inv[159] = 0x6e;
AES_Sbox_Inv[160] = 0x47;
AES_Sbox_Inv[161] = 0xf1;
AES_Sbox_Inv[162] = 0x1a;
AES_Sbox_Inv[163] = 0x71;
AES_Sbox_Inv[164] = 0x1d;
AES_Sbox_Inv[165] = 0x29;
AES_Sbox_Inv[166] = 0xc5;
AES_Sbox_Inv[167] = 0x89;
AES_Sbox_Inv[168] = 0x6f;
AES_Sbox_Inv[169] = 0xb7;
AES_Sbox_Inv[170] = 0x62;
AES_Sbox_Inv[171] = 0xe;
AES_Sbox_Inv[172] = 0xaa;
AES_Sbox_Inv[173] = 0x18;
AES_Sbox_Inv[174] = 0xbe;
AES_Sbox_Inv[175] = 0x1b;
AES_Sbox_Inv[176] = 0xfc;
AES_Sbox_Inv[177] = 0x56;
AES_Sbox_Inv[178] = 0x3e;
AES_Sbox_Inv[179] = 0x4b;
AES_Sbox_Inv[180] = 0xc6;
AES_Sbox_Inv[181] = 0xd2;
AES_Sbox_Inv[182] = 0x79;
AES_Sbox_Inv[183] = 0x20;
AES_Sbox_Inv[184] = 0x9a;
AES_Sbox_Inv[185] = 0xdb;
AES_Sbox_Inv[186] = 0xc0;
AES_Sbox_Inv[187] = 0xfe;
AES_Sbox_Inv[188] = 0x78;
AES_Sbox_Inv[189] = 0xcd;
AES_Sbox_Inv[190] = 0x5a;
AES_Sbox_Inv[191] = 0xf4;
AES_Sbox_Inv[192] = 0x1f;
AES_Sbox_Inv[193] = 0xdd;
AES_Sbox_Inv[194] = 0xa8;
AES_Sbox_Inv[195] = 0x33;
AES_Sbox_Inv[196] = 0x88;
AES_Sbox_Inv[197] = 0x7;
AES_Sbox_Inv[198] = 0xc7;
AES_Sbox_Inv[199] = 0x31;
AES_Sbox_Inv[200] = 0xb1;
AES_Sbox_Inv[201] = 0x12;
AES_Sbox_Inv[202] = 0x10;
AES_Sbox_Inv[203] = 0x59;
AES_Sbox_Inv[204] = 0x27;
AES_Sbox_Inv[205] = 0x80;
AES_Sbox_Inv[206] = 0xec;
AES_Sbox_Inv[207] = 0x5f;
AES_Sbox_Inv[208] = 0x60;
AES_Sbox_Inv[209] = 0x51;
AES_Sbox_Inv[210] = 0x7f;
AES_Sbox_Inv[211] = 0xa9;
AES_Sbox_Inv[212] = 0x19;
AES_Sbox_Inv[213] = 0xb5;
AES_Sbox_Inv[214] = 0x4a;
AES_Sbox_Inv[215] = 0xd;
AES_Sbox_Inv[216] = 0x2d;
AES_Sbox_Inv[217] = 0xe5;
AES_Sbox_Inv[218] = 0x7a;
AES_Sbox_Inv[219] = 0x9f;
AES_Sbox_Inv[220] = 0x93;
AES_Sbox_Inv[221] = 0xc9;
AES_Sbox_Inv[222] = 0x9c;
AES_Sbox_Inv[223] = 0xef;
AES_Sbox_Inv[224] = 0xa0;
AES_Sbox_Inv[225] = 0xe0;
AES_Sbox_Inv[226] = 0x3b;
AES_Sbox_Inv[227] = 0x4d;
AES_Sbox_Inv[228] = 0xae;
AES_Sbox_Inv[229] = 0x2a;
AES_Sbox_Inv[230] = 0xf5;
AES_Sbox_Inv[231] = 0xb0;
AES_Sbox_Inv[232] = 0xc8;
AES_Sbox_Inv[233] = 0xeb;
AES_Sbox_Inv[234] = 0xbb;
AES_Sbox_Inv[235] = 0x3c;
AES_Sbox_Inv[236] = 0x83;
AES_Sbox_Inv[237] = 0x53;
AES_Sbox_Inv[238] = 0x99;
AES_Sbox_Inv[239] = 0x61;
AES_Sbox_Inv[240] = 0x17;
AES_Sbox_Inv[241] = 0x2b;
AES_Sbox_Inv[242] = 0x4;
AES_Sbox_Inv[243] = 0x7e;
AES_Sbox_Inv[244] = 0xba;
AES_Sbox_Inv[245] = 0x77;
AES_Sbox_Inv[246] = 0xd6;
AES_Sbox_Inv[247] = 0x26;
AES_Sbox_Inv[248] = 0xe1;
AES_Sbox_Inv[249] = 0x69;
AES_Sbox_Inv[250] = 0x14;
AES_Sbox_Inv[251] = 0x63;
AES_Sbox_Inv[252] = 0x55;
AES_Sbox_Inv[253] = 0x21;
AES_Sbox_Inv[254] = 0xc;
AES_Sbox_Inv[255] = 0x7d;
int i;
for (i = 0; i < 16; i++)
AES_ShiftRowTab_Inv[AES_ShiftRowTab[i]] = i;
for (i = 0; i < 128; i++) {
AES_xtime[i] = i << 1;
AES_xtime[128 + i] = (i << 1) ^ 0x1b;
}
}
// AES_Done: release memory reserved by AES_Init.
// Call this function after the last encryption/decryption operation.
void AES_Done() {}
/* AES_ExpandKey: expand a cipher key. Depending on the desired encryption
strength of 128, 192 or 256 bits 'key' has to be a byte array of length
16, 24 or 32, respectively. The key expansion is done "in place", meaning
that the array 'key' is modified.
*/
int AES_ExpandKey(BYTE key[], int keyLen) {
int kl = keyLen, ks, Rcon = 1, i, j;
BYTE temp[4], temp2[4];
switch (kl) {
case 16:
ks = 16 * (10 + 1);
break;
case 24:
ks = 16 * (12 + 1);
break;
case 32:
ks = 16 * (14 + 1);
break;
default:
printf("AES_ExpandKey: Only key lengths of 16, 24 or 32 bytes allowed!");
}
for (i = kl; i < ks; i += 4) {
memcpy(temp, &key[i - 4], 4);
if (i % kl == 0) {
temp2[0] = AES_Sbox[temp[1]] ^ Rcon;
temp2[1] = AES_Sbox[temp[2]];
temp2[2] = AES_Sbox[temp[3]];
temp2[3] = AES_Sbox[temp[0]];
memcpy(temp, temp2, 4);
if ((Rcon <<= 1) >= 256)
Rcon ^= 0x11b;
} else if ((kl > 24) && (i % kl == 16)) {
temp2[0] = AES_Sbox[temp[0]];
temp2[1] = AES_Sbox[temp[1]];
temp2[2] = AES_Sbox[temp[2]];
temp2[3] = AES_Sbox[temp[3]];
memcpy(temp, temp2, 4);
}
for (j = 0; j < 4; j++)
key[i + j] = key[i + j - kl] ^ temp[j];
}
return ks;
}
// AES_Encrypt: encrypt the 16 byte array 'block' with the previously expanded key 'key'.
__global__ void AES_Encrypt(aes_block aes_block_array[], BYTE key[], int keyLen, int block_number) {
int global_thread_index = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ BYTE AES_ShiftRowTab[16];
__shared__ BYTE AES_Sbox[256];
__shared__ BYTE AES_ShiftRowTab_Inv[16];
__shared__ BYTE AES_Sbox_Inv[256];
__shared__ BYTE AES_xtime[256];
if (global_thread_index < block_number) {
if (threadIdx.x == 0) {
AES_Init(AES_Sbox, AES_ShiftRowTab, AES_Sbox_Inv, AES_xtime, AES_ShiftRowTab_Inv);
}
__syncthreads();
BYTE block[16];
for (int i = 0; i < 16; i++) {
block[i] = aes_block_array[global_thread_index].block[i];
}
int l = keyLen, i;
AES_AddRoundKey(block, &key[0]);
for (i = 16; i < l - 16; i += 16) {
AES_SubBytes(block, AES_Sbox);
AES_ShiftRows(block, AES_ShiftRowTab);
AES_MixColumns(block, AES_xtime);
AES_AddRoundKey(block, &key[i]);
}
AES_SubBytes(block, AES_Sbox);
AES_ShiftRows(block, AES_ShiftRowTab);
AES_AddRoundKey(block, &key[i]);
for (int i = 0; i < 16; i++) {
aes_block_array[global_thread_index].block[i] = block[i];
}
}
}
// AES_Decrypt: decrypt the 16 byte array 'block' with the previously expanded key 'key'.
__global__ void AES_Decrypt(aes_block aes_block_array[], BYTE key[], int keyLen, int block_number) {
int global_thread_index = blockDim.x * blockIdx.x + threadIdx.x;
__shared__ BYTE AES_ShiftRowTab[16];
__shared__ BYTE AES_Sbox[256];
__shared__ BYTE AES_ShiftRowTab_Inv[16];
__shared__ BYTE AES_Sbox_Inv[256];
__shared__ BYTE AES_xtime[256];
if (global_thread_index < block_number) {
if (threadIdx.x == 0) {
AES_Init2(AES_Sbox, AES_ShiftRowTab, AES_Sbox_Inv, AES_xtime, AES_ShiftRowTab_Inv);
}
__syncthreads();
BYTE block[16];
for (int i = 0; i < 16; i++) {
block[i] = aes_block_array[global_thread_index].block[i];
}
int l = keyLen, i;
AES_AddRoundKey(block, &key[l - 16]);
AES_ShiftRows(block, AES_ShiftRowTab_Inv);
AES_SubBytes(block, AES_Sbox_Inv);
for (i = l - 32; i >= 16; i -= 16) {
AES_AddRoundKey(block, &key[i]);
AES_MixColumns_Inv(block, AES_xtime);
AES_ShiftRows(block, AES_ShiftRowTab_Inv);
AES_SubBytes(block, AES_Sbox_Inv);
}
AES_AddRoundKey(block, &key[0]);
for (int i = 0; i < 16; i++) {
aes_block_array[global_thread_index].block[i] = block[i];
}
}
}
// ===================== test ============================================
int main(int argc, char* argv[]) {
/*
// File read (message)
ifstream ifs;
ifs.open(argv[1], std::ifstream::binary);
if (!ifs) {
cerr << "Cannot open the input file" << endl;
exit(1);
}
ifs.seekg(0, ios::end);
int infileLength = ifs.tellg();
ifs.seekg(0, ios::beg);
cout << "Length of input file: " << infileLength << endl;
int block_number = infileLength / 16;
int number_of_zero_pending = infileLength % 16;
aes_block* aes_block_array;
BYTE key[16 * (14 + 1)];
int keyLen = 0;
int blockLen = 16;
// File read (key)
ifstream key_fp;
key_fp.open(argv[2]);
while (key_fp.peek() != EOF) {
key_fp >> key[keyLen];
if (key_fp.eof())
break;
keyLen++;
}
// Key length check
cout << keyLen << endl;
switch (keyLen) {
case 16:
break;
default:
cerr << "ERROR : keyLen should be 128, 192, 256bits\n";
return 1;
}
int expandKeyLen = AES_ExpandKey(key, keyLen);
// TODO: Read IV; IV length check; Auth read; Auth zero padding;
// AES block array. Size might be changed.
if (number_of_zero_pending != 0)
aes_block_array = new aes_block[block_number + 1];
else
aes_block_array = new aes_block[block_number];
// Output file init
char temp[16];
FILE* en_fp;
FILE* de_fp;
en_fp = fopen(argv[3], "wb");
de_fp = fopen(argv[4], "wb");
for (int i = 0; i < block_number; i++) {
// Not true actually. It should generate from IV.
ifs.read(temp, 16);
for (int j = 0; j < 16; j++) {
aes_block_array[i].block[j] = (unsigned char)temp[j];
}
}
// Zero-padding for last block. (Should be removed.)
if (number_of_zero_pending != 0) {
ifs.read(temp, number_of_zero_pending);
for (int j = 0; j < 16; j++) {
aes_block_array[block_number].block[j] = (unsigned char)temp[j];
}
for (int j = 1; j <= 16 - number_of_zero_pending; j++)
aes_block_array[block_number].block[16 - j] = '\0';
block_number++;
}
// CUDA implementation. Will be modified.
cudaSetDevice(0);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
int num_sm = prop.multiProcessorCount;
aes_block* cuda_aes_block_array;
BYTE* cuda_key;
int thrdperblock = block_number / num_sm;
if (block_number % num_sm > 0)
thrdperblock++;
if (thrdperblock > 1024) {
thrdperblock = 1024;
num_sm = block_number / 1024;
if (block_number % 1024 > 0) {
num_sm++;
}
}
dim3 ThreadperBlock(thrdperblock);
printf("num of sms: %d\nThreads per block: %d\n", num_sm, thrdperblock);
dim3 BlockperGrid(num_sm);
cudaMalloc(&cuda_aes_block_array, block_number * sizeof(class aes_block));
cudaMalloc(&cuda_key, 16 * 15 * sizeof(BYTE));
cudaMemcpy(cuda_aes_block_array, aes_block_array, block_number * sizeof(class aes_block), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_key, key, 16 * 15 * sizeof(BYTE), cudaMemcpyHostToDevice);
// Lots of changes here
AES_Encrypt<<<BlockperGrid, ThreadperBlock>>>(cuda_aes_block_array, cuda_key, expandKeyLen, block_number);
cudaMemcpy(aes_block_array, cuda_aes_block_array, block_number * sizeof(class aes_block), cudaMemcpyDeviceToHost);
for (int i = 0; i < block_number - 1; i++) {
f1printBytes(aes_block_array[i].block, blockLen, en_fp);
}
if (number_of_zero_pending == 0)
f1printBytes(aes_block_array[block_number - 1].block, blockLen, en_fp);
else
f1printBytes(aes_block_array[block_number - 1].block, blockLen, en_fp);
// Lots of changes here
AES_Decrypt<<<BlockperGrid, ThreadperBlock>>>(cuda_aes_block_array, cuda_key, expandKeyLen, block_number);
cudaMemcpy(aes_block_array, cuda_aes_block_array, block_number * sizeof(class aes_block), cudaMemcpyDeviceToHost);
for (int i = 0; i < block_number - 1; i++) {
f2printBytes(aes_block_array[i].block, blockLen, de_fp);
}
if (number_of_zero_pending == 0)
f2printBytes(aes_block_array[block_number - 1].block, blockLen, de_fp);
else
f3printBytes(aes_block_array[block_number - 1].block, blockLen, de_fp);
AES_Done();
fclose(en_fp);
fclose(de_fp);
return 0;
*/
// aes.exe <-e/d> <input> <key> <IV> <auth> <output>
if (argv[1][0] != '-' || (argv[1][1] != 'e' && argv[1][1] != 'd')) {
cerr << "Specify encrypt or decrypt first using -e or -d!" << endl;
return 1;
}
if (argc != 7) {
cerr << "Arg count wrong!" << endl;
cerr << "Usage: aes.exe <-e|-d> <input_file> <key> <IV> <auth> <tag> <output>" << endl;
return 1;
}
// File input read
ifstream input_file(argv[2], std::ifstream::in);
if (!input_file.is_open()) {
cerr << "Cannot open the input file" << endl;
return 1;
}
// File length
input_file.seekg(0, ios::end);
int input_length = input_file.tellg();
input_file.seekg(0, ios::beg);
clog << "Input file length: " << input_length << endl;
int input_block_number = input_length / 16,
input_zero_padding_bytes = input_length % 16;
aes_block* aes_block_array;
if (input_zero_padding_bytes != 0)
aes_block_array = new aes_block[1 + input_block_number + 1 + 1];
else
aes_block_array = new aes_block[1 + input_block_number + 1];
// Auth read
ifstream auth_file(argv[5], std::ifstream::in);
if (!auth_file.is_open()) {
cerr << "Cannot open the auth file" << endl;
return 1;
}
// Auth length
auth_file.seekg(0, ios::end);
int auth_length = auth_file.tellg();
input_file.seekg(0, ios::beg);
clog << "Auth file length: " << auth_length << endl;
int auth_block_number = auth_length / 16,
auth_zero_padding_bytes = auth_length % 16;
// Key read
ifstream key_file(argv[3], std::ifstream::in);
if (!key_file.is_open()) {
cerr << "Cannot open the key file" << endl;
return 1;
}
BYTE key[16 * (10 + 1)];
for (int i = 0; i < 16 && key_file.peek() != EOF; i++) {
char temp[3];
int temp_int;
key_file.get(temp, 3);
std::stringstream temp_stream;
temp_stream << temp;
temp_stream >> std::hex >> temp_int;
key[i] = temp_int;
}
if (key_file.tellg() > 32) {
clog << "Key file gives a key too long. Remaining parts omitted." << endl;
} else if (key_file.tellg() < 32) {
cerr << "Key file too short!" << endl;
return 1;
}
int expandKeyLen = AES_ExpandKey(key, 16);
// IV read
ifstream iv_file(argv[4], std::ifstream::in);
if (!iv_file.is_open()) {
cerr << "Cannot open the IV file" << endl;
return 1;
}
BYTE IV[16];
for (int i = 0; i < 12 && key_file.peek() != EOF; i++) {
int temp_int;
char temp[3];
iv_file.get(temp, 3);
std::stringstream temp_stream;
temp_stream << temp;
temp_stream >> std::hex >> temp_int;
IV[i] = temp_int;
}
if (iv_file.tellg() > 24) {
clog << "IV file gives an IV too long. Remaining parts omitted." << endl;
} else if (iv_file.tellg() < 24) {
cerr << "IV file too long!" << endl;
return 1;
}
IV[12] = IV[13] = IV[14] = 0; IV[15] = 1;
// TODO:
// GCM init (Compute H, multiplication table)
// Generate counter blocks.
// Encrypt counter blocks.
// Encrypted counter blocks XOR message blocks.
// Compute GHASH
// Prepare for the output file
ofstream output_file(argv[7], std::ofstream::trunc);
if (argv[2][1] == 'e') {
// Encrypt branch
// Output XORed blocks to output
// Output tag to tag file
} else {
// Decrypt branch
// Read tag file & compare tag file with computed tag
// If same, output decrypted result
// Otherwise, return FAILED.
}
}
|
20,981 | /*
file name: matrix_mul.cu
*
* matrix.cu contains two implemention of matrix multiplication in class
* Each matrix size is 1024*1024
* In this program, the elapesed time is only calculating kernel time. Time periods of allocating cuda memory, data transfer and freeing cuda memory are not included. However, in your homework, you should include these overheads.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define BLOCK_SIZE 16
#define TILE_WIDTH 16
/*
*********************************************************************
function name: gpu_matrix_mult
description: simple impliementation
*********************************************************************
*/
__global__ void gpu_matrix_mult(float *A, float *B, float *C, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n && col < n) {
for (int i = 0; i < n; ++i) {
C[row * n + col] += A[row * n + i] * B[i * n + col];
}
}
}
/*
*********************************************************************
function name: MatrixMul_tileKernel
description: Using tiling stratagy for matrix multiplication in GPU
*********************************************************************
*/
__global__ void MatrixMul_tileKernel(float* Md, float* Nd, float* Pd, int Width){
int Row = blockIdx.y*TILE_WIDTH + threadIdx.y;
int Col = blockIdx.x*TILE_WIDTH + threadIdx.x;
int tx = threadIdx.x, ty = threadIdx.y;
__shared__ float a[TILE_WIDTH][TILE_WIDTH], b[TILE_WIDTH][TILE_WIDTH];
float Pvalue = 0;
//Each thread computes one element of the block sub-matrix
for(int k=0; k< Width/TILE_WIDTH; k++){
a[ty][tx] = Md[Row*Width+k*TILE_WIDTH+tx];
b[ty][tx] = Nd[Col+Width*(k*TILE_WIDTH + ty)];
__syncthreads(); //sync all threads in a block;
for(int kk=0; kk<TILE_WIDTH; kk++)
Pvalue += a[ty][kk]*b[kk][tx];
__syncthreads(); //avoid memory hazards;
}
Pd[Row*Width+Col] = Pvalue;
}
/*
*********************************************************************
function name: main
description: test and compare
parameters:
none
return: none
*********************************************************************
*/
int main(int argc, char const *argv[])
{
int n=1024;
/* Fixed seed for illustration */
srand(3333);
// allocate memory in host RAM
float *h_a, *h_b, *h_c;
cudaMallocHost((void **) &h_a, sizeof(float)*n*n);
cudaMallocHost((void **) &h_b, sizeof(float)*n*n);
cudaMallocHost((void **) &h_c, sizeof(float)*n*n);
//generate matrix A and B
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 1024/2.3;
h_b[i * n + j] = rand() % 24/3.3;
}
}
float gpu_elapsed_time_ms;
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Allocate memory space on the device
float *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, sizeof(float)*n*n);
cudaMalloc((void **) &d_b, sizeof(float)*n*n);
cudaMalloc((void **) &d_c, sizeof(float)*n*n);
// copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, sizeof(float)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float)*n*n, cudaMemcpyHostToDevice);
unsigned int grid_rows = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (n + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// start to count execution time of GPU Kernel
cudaEventRecord(start, 0);
// Launch simple matrix multiplication kernel
gpu_matrix_mult<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, n);
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Transefr results from device to host
cudaMemcpy(h_c, d_c, sizeof(float)*n*n, cudaMemcpyDeviceToHost);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on simple matrix multiplication on GPU: %f ms.\n\n", gpu_elapsed_time_ms);
cudaEventRecord(start, 0);
// Launch tile matrix multiplication kernel
MatrixMul_tileKernel<<<dimGrid, dimBlock>>>( d_a, d_b, d_c, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Transefr results from device to host
cudaMemcpy(h_c, d_c, sizeof(float)*n*n, cudaMemcpyDeviceToHost);
// compute time elapse on GPU kernel
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication with tiling strategy on GPU: %f ms.\n\n", gpu_elapsed_time_ms);
// free memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
}
|
20,982 | #include "includes.h"
extern "C"
__global__ void add32(float* A, float *B, int size) {
int block = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int index = block * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index >= size) return;
A[index] = A[index] + B[index];
} |
20,983 | #include "../include/Activation.cuh"
#include <vector>
/* ----------------------------------------------
maxGPU
Parameters:
a - double
b - double
Finds max of a and b and returns it
Returns:
max(a, b)
---------------------------------------------- */
__device__ double maxGPU(double a, double b)
{
bool sel = (a <= b);
return (double)(sel) * b + (double)(1 - sel) * a;
} // end maxGPU
/* ----------------------------------------------
binaryStep
Parameters:
x - vector to apply activation to, can be matrix in row-major form
len - length of x
Applies binaryStep to every element of x
---------------------------------------------- */
__global__ void binaryStep(double *x, int len)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= len)
return;
x[idx] = (int)(x[idx] >= 0) % 2; // if val is ≥ 0, its 1 ; if < 0, its 0
} // end binaryStep
/* ----------------------------------------------
binaryStepGPU
Parameters:
z - vector to apply activation to, can be matrix in row-major form
calls binaryStep cuda kernel on z.data()
Returns:
a - activated z, (f(z))
---------------------------------------------- */
std::vector<double> binaryStepGPU(std::vector<double>& z)
{
double *d_z;
std::vector<double> a(z.size());
int BLOCKSIZE = z.size() >= 512 ? 512 : z.size();
cudaMalloc((void **) &d_z, z.size() * sizeof(double));
cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE);
binaryStep<<<GRID, BLOCK, 0>>>(d_z, z.size());
cudaDeviceSynchronize();
cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_z);
return a;
} // binaryStepGPU
/* ----------------------------------------------
sigmoid
Parameters:
x - vector to apply activation to, can be matrix in row-major form
len - length of x
Applies sigmoid (1/(1 + exp(-x))) to every element of x
---------------------------------------------- */
// exp(x) returns e^x ; its a cuda library function
__global__ void sigmoid(double *x, int len)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= len)
return;
x[idx] = 1 / (1 + exp(-1 * x[idx]));
} // end sigmoid
/* ----------------------------------------------
sigmoid_prime
Parameters:
x - vector to apply activation to, can be matrix in row-major form
len - length of x
Applies sigmoidPrime (exp(-x)(1 + exp(-x))^(-2)) to every element of x
---------------------------------------------- */
__global__ void sigmoid_prime(double *x, int len)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= len)
return;
x[idx] = exp(-1 * x[idx]) / ((1 + exp(-1 * x[idx])) * (1 + exp(-1 * x[idx])));
} // end sigmoid_prime
/* ----------------------------------------------
sigmoidGPU
Parameters:
z - vector to apply activation to, can be matrix in row-major form
diff - bool determining whether to applu sig or sig_prime
calls sigmoid or sigmoid_prime cuda kernel on z.data()
Returns:
a - activated z, (f(z) or f'(z))
---------------------------------------------- */
std::vector<double> sigmoidGPU(std::vector<double>& z, bool diff)
{
double *d_z;
std::vector<double> a(z.size());
int BLOCKSIZE = z.size() >= 512 ? 512 : z.size();
cudaMalloc((void **) &d_z, z.size() * sizeof(double));
cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE);
if (!diff)
sigmoid<<<GRID, BLOCK, 0>>>(d_z, z.size());
else
sigmoid_prime<<<GRID, BLOCK, 0>>>(d_z, z.size());
cudaDeviceSynchronize();
cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_z);
return a;
} // end sigmoidGPU
/* ----------------------------------------------
relu
Parameters:
x - vector to apply activation to, can be matrix in row-major form
len - length of x
Applies relu (x if x > 0, else 0) to every element of x
---------------------------------------------- */
__global__ void relu(double *x, int len)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= len)
return;
x[idx] = maxGPU(x[idx], 0); // if val > 0, return itself ; if ≤ 0, return 0
} // end relu
/* ----------------------------------------------
relu_prime
Parameters:
x - vector to apply activation to, can be matrix in row-major form
len - length of x
Applies relu_prime (1 if x > 0, else 0) to every element of x
---------------------------------------------- */
__global__ void relu_prime(double *x, int len)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= len)
return;
x[idx] = (x[idx] > 0); // if val > 0, 1 ; if ≤ 0, 0
} // end relu
/* ----------------------------------------------
reluGPU
Parameters:
z - vector to apply activation to, can be matrix in row-major form
diff - bool determining whether to applu sig or sig_prime
calls relu or relu_prime cuda kernel on z.data()
Returns:
a - activated z, (f(z) or f'(z))
---------------------------------------------- */
std::vector<double> reluGPU(std::vector<double>& z, bool diff)
{
double *d_z;
std::vector<double> a(z.size());
int BLOCKSIZE = z.size() >= 512 ? 512 : z.size();
cudaMalloc((void **) &d_z, z.size() * sizeof(double));
cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE);
if (!diff)
relu<<<GRID, BLOCK, 0>>>(d_z, z.size());
else
relu_prime<<<GRID, BLOCK, 0>>>(d_z, z.size());
cudaDeviceSynchronize();
cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_z);
return a;
} // end reluGPU
/* ----------------------------------------------
leakyRelu
Parameters:
x - vector to apply activation to, can be matrix in row-major form
len - length of x
Applies leakyRelu (x if x > 0, else 0.05x) to every element of x
---------------------------------------------- */
__global__ void leakyRelu(double *x, int len)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= len)
return;
x[idx] = maxGPU(x[idx], 0.05 * x[idx]);
} // end leakyRelu
/* ----------------------------------------------
leakyRelu_prime
Parameters:
x - vector to apply activation to, can be matrix in row-major form
len - length of x
Applies leakyRelu_prime (1 if x > 0, else 0.05) to every element of x
---------------------------------------------- */
__global__ void leakyRelu_prime(double *x, int len)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= len)
return;
bool size = x[idx] > 0;
x[idx] = size * x[idx] + (1 - size) * 0.05;
} // end leakyRelu
/* ----------------------------------------------
leakyReluGPU
Parameters:
z - vector to apply activation to, can be matrix in row-major form
diff - bool determining whether to applu sig or sig_prime
calls leakyRelu or leakyRelu_prime cuda kernel on z.data()
Returns:
a - activated z, (f(z) or f'(z))
---------------------------------------------- */
std::vector<double> leakyReluGPU(std::vector<double>& z, bool diff)
{
double *d_z;
std::vector<double> a(z.size());
int BLOCKSIZE = z.size() >= 512 ? 512 : z.size();
cudaMalloc((void **) &d_z, z.size() * sizeof(double));
cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE);
if (!diff)
leakyRelu<<<GRID, BLOCK, 0>>>(d_z, z.size());
else
leakyRelu_prime<<<GRID, BLOCK, 0>>>(d_z, z.size());
cudaDeviceSynchronize();
cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_z);
return a;
} // end leakyReluGPU
__global__ void exponential(double *x, int len)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx >= len)
return;
x[idx] = exp(x[idx]);
} // end exponential
std::vector<double> exponentialGPU(std::vector<double>& z, bool diff)
{
double *d_z;
std::vector<double> a(z.size());
int BLOCKSIZE = z.size() >= 512 ? 512 : z.size();
cudaMalloc((void **) &d_z, z.size() * sizeof(double));
cudaMemcpy(d_z, z.data(), z.size() * sizeof(double), cudaMemcpyHostToDevice);
dim3 GRID((z.size() + BLOCKSIZE - 1) / BLOCKSIZE);
dim3 BLOCK(BLOCKSIZE);
if (!diff)
exponential<<<GRID, BLOCK, 0>>>(d_z, z.size());
else
exponential<<<GRID, BLOCK, 0>>>(d_z, z.size());
cudaDeviceSynchronize();
cudaMemcpy(a.data(), d_z, z.size() * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_z);
return a;
} // end exponentialGPU
|
20,984 | #include "includes.h"
__global__ void kernel_image2D1C_ConvolveColumn(float* img, int n_x, int n_y, short k, float *kernel, float* out)
{
// Find index of current thread
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
if (idx_x>=n_x) return;
if (idx_y>=n_y) return;
float sum=0;
for (short i=-k;i<=k;i++)
{
short y=idx_y+i;
if (y<0) y=0;
if (y>=n_y) y=n_y-1;
sum+=kernel[i+k]*img[y*n_x+idx_x];
}
out[idx_y*n_x+idx_x]=sum;
} |
20,985 | #include <iostream>
#include <fstream>
#include <vector>
#include <stdlib.h>
//#include <common\book.h>
#define DIM 512
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"\nGPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
using namespace std;
#define min(a,b) (a<b)?a:b
__global__ void kernel(float *index,int *min_holder)
{
__shared__ float tmp[DIM];
int idx = threadIdx.x+blockIdx.x*blockDim.x;
int local_index = threadIdx.x;
int row_idx = blockIdx.x;
__shared__ int min_index[DIM];
int size = DIM/2;
tmp[local_index] = index[idx];
min_index[local_index] = local_index;
__syncthreads();
while(size)
{
if(local_index<size)
{
if(tmp[local_index+size]<tmp[local_index])
{
tmp[local_index]= tmp[local_index+size];
min_index[local_index] = min_index[local_index+size];
}
}
size/=2;
__syncthreads();
}
if(local_index==0)
{
min_holder[row_idx] = min_index[0];
}
}
int main()
{
char file_name[255];// = "in.txt";
ofstream fout("out.txt");
cout<<"Please enter the file path to the distance matrix: ";
cin.getline(file_name,255);
std::vector<char> buffer(64 * 1024 * 1024);
fstream fin;
fin.rdbuf()->pubsetbuf(&buffer[0],buffer.size());
fin.open(file_name);
//cudaDeviceProp deviceProp;
//cudaGetDeviceProperties(&deviceProp, 0);
//cout<<deviceProp.name<<" has compute capability "<<deviceProp.major<<","<< deviceProp.minor<<endl;
int size = INT_MIN;
int r=0,c=0;
fin>>size;
int pitch=ceil((double)size/DIM);
float *indexs=new float[size*size];
int *min_holder = new int[size*pitch];
float *indexes_d;
int *min_holder_d;
cudaMalloc(&indexes_d,size*size*sizeof(float));
cudaMalloc(&min_holder_d,(size*pitch)*sizeof(int));
bool *mark = new bool[size+1];
for(int i=0; i<2000; i++)
{
indexs[i]=INT_MAX;
}
for(int i=0; i<size+1; i++)
mark[i]=true;
r=c=0;
char tmp[255];
cout<<"Reading input file";
fin>>tmp;
//cout<<tmp;
while(1)
{
/*fin>>r>>c;
r--;
c--;*/
fin>>indexs[r*size+c];
c++; //:D
//cout<<".";
if(c==size)
{
mark[r]=false;
r++;
c=0;
//cout<<endl;
if(r<size)
{
fin>>tmp;
}
else
break;
}
}
cout<<" ..."<<endl;
//cout<<size<<endl;
//size--;
int index=0;
int handler=size;
float min;
float time;
float time_total=0;
cout<<"Working ";
dim3 blocks(size*pitch);
dim3 threads(512);
while(handler)
{
cout<<".";
min= INT_MAX;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//GPU code
cudaMemcpy(indexes_d,indexs,size*size*sizeof(float),cudaMemcpyHostToDevice);
kernel<<<blocks,threads>>>(indexes_d,min_holder_d);
gpuErrchk(cudaMemcpy(min_holder,min_holder_d,(size*pitch)*sizeof(int),cudaMemcpyDeviceToHost));// end of GPU code
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
time_total+=time;
if(time==0)
{
cout<<"\nSomething went wrong on GPU."<<endl;
exit(0);
}
//cout<<"Time this round: "<<time<<endl;
//for(int i=0; i<size*size ; i ++ )
//cout<<i<<": "<<indexs[i]<<" ";
//cout<<endl;
//getwchar();
bool flag=false;
int trow=-1;
int row=0;
int col=0;
for(int k=0; k<size*pitch; k++)
{
if((k%(pitch))==0)
trow++;
int i = trow*size + min_holder[k];
if(indexs[i]<min)
{
min=indexs[i];
col = pitch*DIM+min_holder[k];
row = trow;
flag=true;
}
}
//cout<<min<<endl;
if(flag)
{
//cout<<row+1<<endl;
fout<<row+1<<endl;
//cout<<col+1<<endl;
fout<<col+1<<endl;
}
//merging two rows and columns
for(int i=0; i<size; i++)
{
indexs[col*size+i]= indexs[row*size+i]=(indexs[row*size+i]+indexs[col*size+i])/2;
indexs[i*size+row]= indexs[i*size+col]=(indexs[i*size+row]+indexs[i*size+col])/2;
indexs[i*size+i]=INT_MAX;
}
indexs[row*size+col] = indexs[col*size+row] = INT_MAX;
handler--;
}
cout<<"\nTime: "<<time_total<<"ms"<<endl;
cout<<"Press Enter to exit.";
getchar();
return 0;
}
|
20,986 | // input: radius (1), nsample (1), xyz1 (b,n,3), xyz2 (b,m,3)
// output: idx (b,m,nsample), pts_cnt (b,m)
__global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
pts_cnt += m*batch_index; // counting how many unique points selected in local region
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius[0]) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
pts_cnt[j] = cnt;
}
}
// input: points (b,n,c), idx (b,m,nsample)
// output: out (b,m,nsample,c)
__global__ void group_point_gpu(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out) {
int batch_index = blockIdx.x;
points += n*c*batch_index;
idx += m*nsample*batch_index;
out += m*nsample*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
out[j*nsample*c+k*c+l] = points[ii*c+l];
}
}
}
}
// input: grad_out (b,m,nsample,c), idx (b,m,nsample),
// output: grad_points (b,n,c)
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
void queryBallPointLauncher(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx, int *pts_cnt) {
query_ball_point_gpu<<<b,256>>>(b,n,m,radius,nsample,xyz1,xyz2,idx,pts_cnt);
//cudaDeviceSynchronize();
}
void groupPointLauncher(int b, int n, int c, int m, int nsample, const float *points, const int *idx, float *out){
group_point_gpu<<<b,256>>>(b,n,c,m,nsample,points,idx,out);
//cudaDeviceSynchronize();
}
void groupPointGradLauncher(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points){
group_point_grad_gpu<<<b,256>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//group_point_grad_gpu<<<1,1>>>(b,n,c,m,nsample,grad_out,idx,grad_points);
//cudaDeviceSynchronize();
}
|
20,987 | extern "C" __global__ void build_hashtable(int *R, int R_size, int *hash_table) {
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int key = R[offset];
int hash = key & (R_size-1);
if (offset < R_size) {
hash_table[hash] = key;
}
} |
20,988 | // one dimension of mean filter designed and coded by neo
/*
Ŀ
ֵ˲
һάоֵ˲
ÿΪԴ˵Ϊİ뾶Ϊr鵥Ԫ
ȡֵңƽֵԹв
Ҫ:
1.C ʵִ
2.Cuda ʵִ
3.shared memoryʹ
4.СݷʲԽ
5.ʱͼ
6.ִ֧ݵĴ
thinking:
the data of margin side can be dealed by this (i-j+n)%n
shaped the array like circle
in the same block ,the threads visit the data range in [r-i r r+i]
so copy global memory to shared memory to boost the speed
the shared memory is 48KB
so the num of radius 3*r<= (48KB/4B) => r <= 4K
test data:
10 3 1
100000 100 0
100000 50 0
*/
#include "cuda_runtime.h"
#include "cuda.h"
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <fstream>
#define MIN(a,b) (a<b?a:b)
#define MAX_BLOCK 1024 // biggest block numbers
using namespace std;
//just with gpu
__global__ void calcWithGPU_filter(float *b, const float *a, const int n,const int r){
//__shared__ mean we can add the shared memory to boost calc
int g = blockIdx.x;
int t = threadIdx.x;
int i_global,i_inner;
i_inner=g*r+t; //䵽 block е thread ̶߳Ӧ ֵ˲λ
if(i_inner>=n){ //Խ紦
return ;
}
float sum=0;
for(int i=-r;i<=r;i++){ //ֵ˲Ľ ȡ 2*r+1 ľֵ
i_global=(i_inner+i+n)%n; //ҾĿ ȡģ ֵ˲ Ե ͷβ ˲
sum+=a[i_global];
}
b[i_inner]=sum/(2*r+1);
}
//using the shared memory to save time
__global__ void calcWithGPU_filter_shared(float *b, const float *a, const int n,const int r){
//__shared__ mean we can add the shared memory to boost calc
int g = blockIdx.x;
int t = threadIdx.x;
int i_global,i_inner;
extern __shared__ float cache[]; //dynamic shared memory allocation
i_inner=g*r+t;
// copy global shared memory ÿλҪԳƸ
i_global=(i_inner-r+n)%n;
cache[t]=a[i_global];
i_global=(i_inner+n)%n;
cache[t+r]=a[i_global];
i_global=(i_inner+r+n)%n;
cache[t+2*r]=a[i_global];
__syncthreads();
if(i_inner>=n){
return ;
}
float sum=0;
for(int i=-r;i<=r;i++){ //ֵ˲Ľ ȡ 2*r+1 ľֵ
sum+=cache[r+t+i];
}
b[i_inner]=sum/(2*r+1);
}
//using the shared memory to save time
__global__ void calcWithGPU_filter_shared_bd(float *b, const float *a, const int n,const int r){
//__shared__ mean we can add the shared memory to boost calc
int g = blockIdx.x;
int t = threadIdx.x;
int i_global,i_inner;
extern __shared__ float cache[]; //dynamic shared memory allocation
int blocknum=r*MAX_BLOCK; //the turns of r*MAX_BLOCK
for(i_inner=g*r+t;i_inner<n+blocknum;i_inner=i_inner+blocknum){
// copy global shared memory ÿλҪԳƸ
i_global=(i_inner-r+n)%n;
cache[t]=a[i_global];
i_global=(i_inner+n)%n;
cache[t+r]=a[i_global];
i_global=(i_inner+r+n)%n;
cache[t+2*r]=a[i_global];
__syncthreads();
if(i_inner>=n){
return ;
}
float sum=0;
for(int i=-r;i<=r;i++){ //calc the sum of 2*r+1 data
sum+=cache[r+t+i];
}
b[i_inner]=sum/(2*r+1); // get the result of mean filter
__syncthreads();
}
}
//cpu mean filter process in detail
void calcWithCPU_filter(float *b, const float *a, const int n,const int r){
if(r>n) //can not calc
return ;
int i,j,index;
float sum=0;
for(i=0;i<n;i++){
sum=0;
for(j=-r;j<=r;j++){
index=(i+j+n)%n; //ҾĿ ȡģ ֵ˲ Ե ͷβ ˲
sum+=a[index];
}
b[i]=sum/(2*r+1);
}
}
//calc the error between cpu and gpu data
void calcErrorBetweenData(const float *b, const float *a, const int n,const int type){
float error=0.0f;
float tmp;
ofstream file;
if(type>0){
if(type==1){
file.open("result_nobd.txt");
file.clear();
}
if(type==2){
file.open("result_bd.txt");
file.clear();
}
}
for(int i=0;i<n;i++){
tmp=b[i]-a[i];
error+=tmp;
if(type>0){
file<<i<<" "<<b[i]<<" "<<a[i]<<endl;
}
}
if(type>0){
file.close();
}
printf(" the error between two data : %.3f \n",error);
}
//data to print
void print_data(float *b, float *a, const int n,const int r){
for(int i=0;i<n;i++){
printf(" %f %f \n",a[i],b[i]);
}
}
//cpu block to filter data
void process_cpu(float *b, float *a, const int n,const int r, double *time,const bool print=true){
double duration;
clock_t begin, end;
begin = clock();
calcWithCPU_filter(b,a,n,r);
end = clock();
duration = (double)( end -begin )*1000 / CLOCKS_PER_SEC;
printf(" cpu mean_filter result: \n");
(*time)=duration;
if(print){
printf(" [data] [filter] \n");
print_data(b,a,n,r);
}
printf(" Time elapsed : %3.3f ms \n", duration);
}
// gpu block to filter data contains two branches the filter without shared memory or without shared memory
void process_gpu(float *b, float *a, const int n,const int r ,int type, double *time,const bool print=true){
float *dev_a,*dev_b;
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start, 0 );
cudaMalloc((void**)&dev_a, n * sizeof(float));
cudaMalloc((void**)&dev_b, n * sizeof(float));
cudaMemcpy(dev_a, a, n * sizeof(float), cudaMemcpyHostToDevice);
if(type==1)
calcWithGPU_filter<<<MIN(((n+r-1)/r),MAX_BLOCK),r>>>(dev_b, dev_a, n, r);
if(type==2)
calcWithGPU_filter_shared<<<MIN(((n+r-1)/r),MAX_BLOCK),r,r*3*sizeof(float)>>>(dev_b, dev_a, n, r);
if(type==3)
calcWithGPU_filter_shared_bd<<<MIN(((n+r-1)/r),MAX_BLOCK),r,r*3*sizeof(float)>>>(dev_b, dev_a, n, r);
cudaMemcpy(b, dev_b, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf(" cuda mean_filter result: \n");
(*time)=elapsedTime;
if(print){
printf(" [data] [filter] \n");
print_data(b,a,n,r);
}
printf( " Time elapsed : %3.3f ms \n", elapsedTime );
}
int main(){
int n,r,debug_status;
bool debug=false;
printf("input the n and r [or is_debug(1)]to filter data : ");
scanf(" %d %d %d",&n,&r,&debug_status); //n sizes of array r the radius of filter
float *a,*b,*c,*d,*e;
double time_cpu,time_gpu_1,time_gpu_2,time_gpu_3;
a=(float*)malloc(sizeof(float)*n);
b=(float*)malloc(sizeof(float)*n);
c=(float*)malloc(sizeof(float)*n);
d=(float*)malloc(sizeof(float)*n);
e=(float*)malloc(sizeof(float)*n);
//srand((unsigned)time(NULL));/**/
for(int i=0;i<n;i++){
if(debug_status>0){
a[i]=(float)i;
}else{
a[i]=rand()%RAND_MAX %100 ;
}
//a[i]=powf(a[i],7);
}
debug=(debug_status>0)?true:false;
//CPU===============================================================
process_cpu(b,a,n,r,&time_cpu,debug);
//GPU===============================================================
process_gpu(c,a,n,r,1,&time_gpu_1,debug); // without shared_memory
process_gpu(d,a,n,r,2,&time_gpu_2,debug); // with shared_memory
process_gpu(e,a,n,r,3,&time_gpu_3,debug); // with shared_memory big data
//printf( "\n %f %f %f \n",time_cpu,time_gpu_1,time_gpu_2);
//printf( "\n the error: [ cpu mean filter data / cuda mf data without bd ] \n");
//calcErrorBetweenData(b,d,n);
printf( "\n the error: [ cpu mean filter data / cuda mf data without bd] \n");
calcErrorBetweenData(b,d,n,1);
printf( "\n the error: [ cpu mean filter data / cuda mf data with bd] \n");
calcErrorBetweenData(b,e,n,2);
printf("\n speedup rate:\n");
if(time_cpu>time_gpu_1 &&time_cpu!=0&&time_gpu_1!=0){
printf( " cuda without shared memory => speedup rate : %d:1 \n", (int)ceil(time_cpu/time_gpu_1 ));
}else{
printf( " cuda without shared memory => no speed up \n");
}
if(time_cpu>time_gpu_2 &&time_cpu!=0&&time_gpu_2!=0){
printf( " cuda with shared memory => speedup rate : %d:1 \n", (int)ceil(time_cpu/time_gpu_3 ));
}else{
printf( " cuda with shared memory => no speed up \n");
}
if(time_gpu_1>time_gpu_2 &&time_gpu_1!=0&&time_gpu_2!=0){
printf( " cuda without shared memory / cuda with shared memory => speedup rate : %d:1 \n", (int)ceil(time_gpu_1/time_gpu_3 ));
}else{
printf( " cuda without shared memory / cuda with shared memory => no speed up \n");
}
return 0;
}
|
20,989 | extern "C" __global__ void addVectors(const int entries,
const float *a,
const float *b,
float *ab) {
const int N = threadIdx.x + (16 * blockIdx.x);
if (N < entries) {
ab[N] = a[N] + b[N];
}
}
|
20,990 | #include "includes.h"
__global__ void find_maximum(double *array, double *max, int dSize, int *d_mutex){
int index = threadIdx.x + blockIdx.x*blockDim.x;
int stride = gridDim.x*blockDim.x;
int offset = 0;
__shared__ double cache[threadsPerBlock];
double temp = -999999999.0;
while(index + offset < dSize){
temp = fmaxf(temp, array[index + offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmax(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(d_mutex,0,1) != 0); //lock
*max = fmax(*max, cache[0]);
atomicExch(d_mutex, 0); //unlock
}
} |
20,991 |
#include <cassert>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main()
{
int dev_count;
cudaDeviceProp prop;
cudaGetDeviceCount( &dev_count);
for (int i = 0; i < dev_count; i++) {
cudaGetDeviceProperties(&prop, i);
}
if (prop.deviceOverlap){
printf("Device support CUDA streams\n");
}
if (prop.canMapHostMemory==1){
printf("Device supports zero-copying");
}
printf("Device has %d SMs\n",prop.multiProcessorCount);
printf("Device has %d threads per SMs",prop.maxThreadsPerMultiProcessor);
printf("Device has %d threads per block",prop.maxThreadsPerBlock);
return 0;
} |
20,992 | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void helloFromGPU()
{
printf("Hello from GPU thread %d!\n", threadIdx.x);
}
int main(int argc, char **argv)
{
printf("Hello from CPU\n");
helloFromGPU <<<1, 10>>>();
// cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
}
|
20,993 | //
// Created by root on 2020/11/11.
//
#include "cuda_runtime.h"
#include <stdio.h>
__global__ void LocateThreadIdKernel() {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int z = blockDim.z * blockIdx.z + threadIdx.z;
// printf("%d, %d. %d\n", threadIdx.x, threadIdx.y, threadIdx.z);
printf("Thread coordinate: (%d, %d, %d)\n", x, y, z);
}
int main () {
int x = 10, y = 15, z = 20;
dim3 block(2, 3, 4);
dim3 grid((x + block.x - 1) / block.x, (y + block.y - 1) / block.y, (z + block.z - 1) / block.z);
LocateThreadIdKernel<<<grid, block>>>();
cudaDeviceSynchronize();
return 0;
} |
20,994 | #include "includes.h"
__global__ void gpu_array_scale_r8__(size_t tsize, double *arr, double val)
/** arr(:)*=val **/
{
size_t _ti = blockIdx.x*blockDim.x + threadIdx.x;
size_t _gd = gridDim.x*blockDim.x;
for(size_t l=_ti;l<tsize;l+=_gd){arr[l]*=val;}
return;
} |
20,995 | //#include "cuda_hamming_distance.cuh"
//#include <cuda.h>
//#include <cuda_runtime.h>
//
//
//#include <stdio.h>
//
//namespace dce {
// namespace metrics {
// namespace cuda {
// namespace hamming {
//
// template<typename TValue>
// __global__ void distance(size_t size, TValue *vec_1, TValue *vec_2, TValue *distance) {
// int index = threadIdx.x + blockIdx.x * blockDim.x;
//
// TValue count = 0.0f;
//
// if (vec_1[index] != vec_2[index]) {
// count++;
// }
//
// *distance = count / size;
// }
//
// template<typename TValue, typename TVectorIter>
// TValue cuda_distance(size_t size, TVectorIter iter1, TVectorIter iter2) {
// TValue* host_vec_1, host_vec_2, value;
// host_vec_1 = (TValue *)malloc(size);
// host_vec_2 = (TValue *)malloc(size);
// value = (TValue *)malloc(sizeof(TValue));
//
//
// TValue *device_vec_1, device_vec_2, distance;
// cudaMalloc((void **) &device_vec_1, size);
// cudaMalloc((void **) &device_vec_1, size);
// cudaMalloc((void **) &distance, sizeof(TValue));
//
// for (int i = 0; i < size; ++i) {
// host_vec_1[i] = iter1[i];
// host_vec_2[i] = iter2[i];
// }
//
// // Copy inputs to device
// cudaMemcpy(device_vec_1, host_vec_1, size, cudaMemcpyHostToDevice);
// cudaMemcpy(device_vec_2, host_vec_2, size, cudaMemcpyHostToDevice);
//
// int THREADS_PER_BLOCK = 512;
//
// distance<TValue><<<size /
// THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(size, device_vec_1, device_vec_2, distance);
//
// cudaMemcpy(value, distance, sizeof(TValue), cudaMemcpyDeviceToHost);
//
//
// free(host_vec_1);
// free(host_vec_2);
// free(value);
//
// cudaFree(device_vec_1);
// cudaFree(device_vec_2);
// cudaFree(distance);
//
// return *value;
// }
// }
// }
// }
//} |
20,996 | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime_api.h>
int main(int argc, char *argv[]) {
cudaDeviceProp prop;
cudaError_t status;
int device_count;
int device_index = 0;
if (argc > 1) {
device_index = atoi(argv[1]);
}
status = cudaGetDeviceCount(&device_count);
if (status != cudaSuccess) {
fprintf(stderr,"cudaGetDeviceCount() failed: %s\n", cudaGetErrorString(status));
return -1;
}
if (device_index >= device_count) {
fprintf(stderr, "Specified device index %d exceeds the maximum (the device count on this system is %d)\n", device_index, device_count);
return -1;
}
status = cudaGetDeviceProperties(&prop, device_index);
if (status != cudaSuccess) {
fprintf(stderr,"cudaGetDeviceProperties() for device device_index failed: %s\n", cudaGetErrorString(status));
return -1;
}
int v = prop.major * 10 + prop.minor;
printf("%d\n", v);
printf("%d\n", prop.multiProcessorCount);
}
|
20,997 | // cuda_example3.cu : Defines the entry point for the console application.
//
#include <stdio.h>
#include <string.h>
#include <cuda.h>
#define N_h(x,y) N_h[(dimension)*(x-1)+(y-1)]
#define N_d(x,y) N_d[dimension*(x-1)+(y-1)]
#define MAX 100
#define ZERO 0
#define ONE 1
#define INICIO 1
#define TRUE 1
#define INFINITO 999999
void checkCUDAError(const char* msg);
struct nodo{
int nivel;
int index;
int custo;
int nodo_pai;
int vflag[MAX];
};
typedef struct nodo nodo_t;
int vflag[MAX];
int N_h[] = {999999, 436, 636, 119, 131, 150, 999999, 668, 224, 305, 386, 802, 999999, 906, 31, 756, 226, 131, 999999, 602, 440, 107, 915, 275, 999999};
int dimension = 5;
// Kernel that executes on the CUDA device
__global__ void dfs(int *N_d, nodo_t *matriz_de_nodos, int *matriz_de_solucoes,int dimension ){
/*
@TODO: Tornar a matriz de solucoes compartilhada.
*/
int idx = blockIdx.x * blockDim.x + threadIdx.x;
nodo_t auxiliar;
nodo_t pilha[25];
int topo = 0;
//int contador = 0;
int verificados = 0;
int posicao_solucao = idx;
pilha[topo] = matriz_de_nodos[idx];
/*while(topo>=ZERO){
auxiliar = pilha[topo];
if(auxiliar.nivel == dimension){
matriz_de_solucoes[posicao_solucao] = auxiliar.custo;
++posicao_solucao;
topo--;
}
else{
verificados = ZERO;
for(int i = 1; i<=dimension; ++i){
if(auxiliar.vflag[i] == TRUE){
++verificados;
continue;
}
else{
auxiliar.custo+=N_d(auxiliar.index, i);
auxiliar.index = i;
auxiliar.nivel++;
auxiliar.vflag[i] = TRUE;
++topo;
pilha[topo] = auxiliar;
break;
}
}
if(verificados == dimension){
topo--; //desempilha
}
}//else
}//while*/
}
int inline fat(int a){
return 1;
}
nodo_t matriz[MAX];
// main routine that executes on the host
int main( void )
{
int contador = 0;
//int custo = 0;
int nivel;
nodo_t *matriz_de_nodos_d;
int *matriz_solucao_d;
int *matriz_solucao_h;
int *N_d;
int n_blocks;
const int block_size = 32;
size_t size_nodos = (24)*sizeof(nodo_t);
size_t size = (dimension)*(dimension)*(sizeof(int));
size_t size_mat_sols = (24)*sizeof(int);
cudaMalloc( (void **)&N_d, size );
cudaMalloc( (void **)&matriz_de_nodos_d, size_nodos );
cudaMalloc( (void **)&matriz_solucao_d,size_mat_sols );
cudaMemcpy( N_d, N_h, size, cudaMemcpyHostToDevice );// passando custo para GPU
//inicializando vflag e matriz de solucoes
memset(vflag,ZERO, sizeof(vflag));
matriz_solucao_h = (int *)malloc(size_mat_sols);
for(int i = 0;i<24; ++i)
matriz_solucao_h[i] = INFINITO;
/*
inicio do DFS
*/
/*
*/
vflag[INICIO] = TRUE;
nivel = INICIO;
for(int i = 1; i<=dimension; ++i){
if(vflag[i] == TRUE){
continue;}
else{
matriz[contador].index = i;
matriz[contador].custo = N_h(INICIO,i);
matriz[contador].nivel = nivel+1;
matriz[contador].nodo_pai = INICIO;
memcpy(matriz[contador].vflag, vflag, sizeof(vflag));
matriz[contador].vflag[i] = TRUE;
++contador;
}
}
/*for(int i = 0; i<contador; ++i){
printf("\n\t Nodo de numero %d:", i);
printf("\nIndice: %d",matriz[i].index);
printf("\nNivel: %d",matriz[i].nivel);
printf("\nCusto: %d",matriz[i].custo);
printf("\nFlag do nodo %d: ",i);
for(int j = 1; j<=dimension; ++j){
printf("%d",matriz[i].vflag[j]);
}
}
node = matriz[0];
printf("\n\nNodo dps da copia:");
printf("\nIndice: %d",node.index);
printf("\nNivel: %d",node.nivel);
printf("\nCusto: %d",node.custo);
printf("\nFlag do nodo:");
for(int j = 1; j<=dimension; ++j){
printf("%d",node.vflag[j]);
}*/
cudaMemcpy(matriz_de_nodos_d, matriz, contador*(sizeof(nodo_t)), cudaMemcpyHostToDevice );
checkCUDAError("memcpy1");
cudaMemcpy(matriz_solucao_d, matriz_solucao_h, size_mat_sols, cudaMemcpyHostToDevice );
checkCUDAError("memcpy2");
n_blocks = contador / block_size + ( contador % block_size == 0 ? 0 : 1 );
cudaThreadSynchronize();
dfs<<<n_blocks, block_size>>>(N_d, matriz_de_nodos_d, matriz_solucao_d, dimension);
checkCUDAError("kernel invocation");
cudaThreadSynchronize();
cudaMemcpy( matriz_solucao_h, matriz_solucao_d, size_mat_sols, cudaMemcpyDeviceToHost );
checkCUDAError("memcpy-d-h");
puts("\nSolucoes");
for(int i = 0; i<24; ++i){
printf("\nSulcao %d: %d", i, matriz_solucao_h[i]);
}
cudaFree( N_d );
/*
colocar o free das outras variaveis
*/
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
20,998 | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime.h>
#include <errno.h>
using namespace std;
int
main(int argc, char **argv)
{
int num_devices, use_device;
cudaDeviceProp device_prop;
cudaGetDeviceCount(&num_devices);
printf("number of devices: %d\n", num_devices);
const char *device_pick = getenv("CUDA_DEVICE");
use_device = 0;
if(device_pick) {
errno = 0;
use_device = strtol(device_pick, NULL, 10);
if(errno || use_device >= num_devices) {
printf("invalid device number\n");
exit(EXIT_FAILURE);
}
}
cudaGetDeviceProperties(&device_prop, use_device);
fprintf(stdout, "using dev %d: %s\n", use_device, device_prop.name);
return 0;
}
|
20,999 | #include<iostream>
using namespace std;
__global__ void test(float *data) {
unsigned int tid = threadIdx.x;
if(tid < 32) {
volatile float *in = data;
in[tid] += in[tid + 32];
in[tid] += in[tid + 16];
in[tid] += in[tid + 8];
in[tid] += in[tid + 4];
in[tid] += in[tid + 2];
in[tid] += in[tid + 1];
}
}
int main() {
float in[64];
for(int i = 0;i < 64;++i) {
in[i] = 1;
}
float *in_dev;
cudaMalloc((void**)&in_dev, sizeof(in));
cudaMemcpy(in_dev, in , sizeof(in), cudaMemcpyHostToDevice);
dim3 block(32,1);
dim3 grid(1,1);
test<<<grid, block>>>(in_dev);
cudaMemcpy(in, in_dev , sizeof(in), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int i = 0;i < 64;++i) {
cout << in[i] <<" ";
}
return 0;
}
|
21,000 | // file esempio tantiprint
#include "stdio.h"
__global__ void miokernel(void){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
printf("Sono il thread %d!\n", tid);
}
int main() {
//miokernel<<<2,32>>>();
miokernel<<<1,8>>>();
printf("Hello World!\n");
cudaDeviceSynchronize();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.