serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,601 | #include "includes.h"
#define tileSize 32
//function for data initialization
void initialization( double *M, double *N, int arow, int acol, int brow, int bcol);
//(for Debugging) prints out the input data
void printInput( double *M, double *N, int arow, int acol, int brow, int bcol);
//(for Debugging) prints out the output data
void printOutput( double *P_C, double *P_G, int arow, int bcol);
//GPU kernels
__global__
__global__ void matrixTransposeSqr(double *P, double* M, int width, int height)
{
unsigned int xIdx = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int yIdx = blockDim.y * blockIdx.y + threadIdx.y;
if (xIdx < width && yIdx < height)
{
unsigned int inIdx = xIdx + width * yIdx;
unsigned int outIdx= yIdx + height * xIdx;
P[outIdx] = M[inIdx];
}
} |
12,602 | #define BLOCK_SIZE 256
__global__ void apspKernel(const int N, const int k, int *global_mat) {
const int ij = blockIdx.x * BLOCK_SIZE + threadIdx.x;
const int i = ij / N;
const int ik = i * N + k;
const int kj = ij + (k - i) * N;
const int dik = global_mat[ik];
const int dkj = global_mat[kj];
if (dik != -1 && dkj != -1) {
const int sum = dik + dkj;
int& dij = global_mat[ij];
if (dij == -1 || dij > sum)
dij = sum;
}
}
void par_apsp(int N, int *mat) {
int *global_mat, size = sizeof(int) * N * N;
cudaMalloc(&global_mat, size);
cudaMemcpy(global_mat, mat, size, cudaMemcpyHostToDevice);
dim3 dimGrid(N * N / BLOCK_SIZE, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
for (int k = 0; k < N; k++)
apspKernel<<<dimGrid, dimBlock>>>(N, k, global_mat);
cudaMemcpy(mat, global_mat, size, cudaMemcpyDeviceToHost);
}
// __global__ void apspKernel(int N, int k, int *g_idata, int *g_odata) {
// // access thread id
// const unsigned int tid = threadIdx.x;
// // access block id
// const unsigned int bid = blockIdx.x;
// // access number of threads in this block
// const unsigned int bdim = blockDim.x;
// const unsigned int i = (bid * bdim + tid)/N;
// const unsigned int j = (bid * bdim + tid)%N;
// if (g_idata[i*N+k] == -1 || g_idata[k*N+j] == -1) g_odata[i*N+j] = g_idata[i*N+j];
// else if (g_idata[i*N+j] == -1) g_odata[i*N+j] = g_idata[i*N+k]+g_idata[k*N+j];
// else g_odata[i*N+j] = min(g_idata[i*N+j], g_idata[i*N+k]+g_idata[k*N+j]);
// }
// void par_apsp(int N, int *mat) {
// //copy mat from host to device memory d_mat
// int* d_mat;
// int* d_mat_out;
// int size = sizeof(int) * N * N;
// cudaMalloc((void**) &d_mat, size);
// cudaMemcpy(d_mat, mat, size, cudaMemcpyHostToDevice);
// //allocate matrix to hold temporary result of each iteration to avoid race condition.
// cudaMalloc((void**) &d_mat_out, size);
// for (int k = 0; k < N; k++) {
// apspKernel<<<ceil(N*N/256), 256>>>(N, k, d_mat, d_mat_out);
// //copy the temporary result back to the matrix
// cudaMemcpy(d_mat, d_mat_out, size, cudaMemcpyDeviceToDevice);
// }
// cudaMemcpy(mat, d_mat, size, cudaMemcpyDeviceToHost);
// }
// __global__ void apspKernel(int N, int k, int *g_data) {
// const int tid = threadIdx.x;
// const int bid = blockIdx.x;
// const int bdim = blockDim.x;
// const int ij = bid * bdim + tid;
// const int i = ij / N;
// const int j = ij % N;
// const int ik = i * N + k;
// const int kj = k * N + j;
// const int dik = g_data[ik];
// const int dkj = g_data[kj];
// if (dik != -1 && dkj != -1) {
// const int sum = dik + dkj;
// int& dij = g_data[ij];
// if (dij == -1 || dij > sum)
// dij = sum;
// }
// }
// void par_apsp(int N, int *mat) {
// int* d_mat;
// int size = sizeof(int) * N * N;
// cudaMalloc((void**) &d_mat, size);
// cudaMemcpy(d_mat, mat, size, cudaMemcpyHostToDevice);
// for (int k = 0; k < N; k++)
// apspKernel<<<ceil(N*N/256), 256>>>(N, k, d_mat);
// cudaMemcpy(mat, d_mat, size, cudaMemcpyDeviceToHost);
// } |
12,603 | #include "includes.h"
__global__ void chol_kernel(float * U, int ops_per_thread) {
//Determine the boundaries for this thread
//Get a thread identifier
int tx = blockIdx.x * blockDim.x + threadIdx.x;
//Iterators
unsigned int i, j, k;
//unsigned int size = MATRIX_SIZE*MATRIX_SIZE;
unsigned int num_rows = MATRIX_SIZE;
//Contents of the A matrix should already be in U
//Perform the Cholesky decomposition in place on the U matrix
for (k = 0; k < num_rows; k++) {
//Only one thread does squre root and division
if (tx == 0) {
// Take the square root of the diagonal element
U[k * num_rows + k] = sqrt(U[k * num_rows + k]);
//Don't bother doing check...live life on the edge!
// Division step
for (j = (k + 1); j < num_rows; j++) {
U[k * num_rows + j] /= U[k * num_rows + k]; // Division step
}
}
//Sync threads!!!!! (only one thread block so, ok)
__syncthreads();
//Elimination step
int istart = ( k + 1 ) + tx * ops_per_thread;
int iend = istart + ops_per_thread;
for (i = istart; i < iend; i++) {
//Do work for this i iteration
for (j = i; j < num_rows; j++) {
U[i * num_rows + j] -= U[k * num_rows + i] * U[k * num_rows + j];
}
}
//Sync threads!!!!! (only one thread block so, ok)
__syncthreads();
}
//Sync threads!!!!! (only one thread block so, ok)
__syncthreads();
//As the final step, zero out the lower triangular portion of U
//for(i = 0; i < U.num_rows; i++)
//Each thread does so many iterations of zero out loop
//Starting index for this thread
int istart = tx * ops_per_thread;
//Ending index for this thread
int iend = istart + ops_per_thread;
//Check boundaries, else do nothing
for (i = istart; i < iend; i++) {
//Do work for this i iteration
for (j = 0; j < i; j++) {
U[i * num_rows + j] = 0.0;
}
}
//Don't sync, will sync outside here
} |
12,604 | #define LIMIT -999
#define BLOCK_SIZE 16
#define MAX_SEQ_LEN 2100
#define MAX_SEQ_NUM 1024
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
inline void cudaCheckError(int line, cudaError_t ce)
{
if (ce != cudaSuccess) {
printf("Error: line %d %s\n", line, cudaGetErrorString(ce));
exit(1);
}
}
// HACK Huan's hack
// this is not the updated validation code
int validation(int *score_matrix_cpu, int *score_matrix, unsigned int length)
{
unsigned int i = 0;
while (i!=length) {
if ( (score_matrix_cpu[i]) == (score_matrix[i] >> 2) ) {
++i;
continue;
}
else {
printf("i = %d, expected %d, got %d.\n",i, score_matrix_cpu[i], score_matrix[i] >> 2);
return 0;
}
}
return 1;
}
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
__global__ void dummy_function(int * array, unsigned int howlarge)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
for (int i = 0; i < 9; i++) {
for (int delta=0; delta<howlarge; delta=delta+blockDim.x*gridDim.x) {
if (tid+delta < howlarge)
array[tid+delta] = array[tid+delta] + tid;
}
}
}
void runTest()
{
double start, end, now;
unsigned int nints = 100 * 1024 * 1024;
unsigned int sz = nints * sizeof(int);
unsigned int nints_small = 1 * 1024 * 1024;
unsigned int sz_small = nints_small * sizeof(int);
#ifdef _LP64
printf ("Running on a 64-bit platform!\n", 0);
#else
#endif
printf("Big Chunk of memory allocated on host & device = %d\n", sz / 1024 /1024);
printf("Small Chunk of memory allocated on host & device = %d\n", sz_small / 1024 /1024);
int * dummy_cpu, * dummy_cpu2, * dummy_small_cpu, * dummy_small_cpu2;
cudaMallocHost( (void**) &dummy_cpu, sz );
cudaMallocHost( (void**) &dummy_cpu2, sz );
cudaMallocHost ( (void**) &dummy_small_cpu, sz_small);
cudaMallocHost ( (void**) &dummy_small_cpu2, sz_small);
int * dummy_gpu, * dummy_gpu2, * dummy_small_gpu, * dummy_small_gpu2;
cudaMalloc( (void**) &dummy_gpu, sz );
cudaMalloc( (void**) &dummy_gpu2, sz );
cudaMalloc( (void**) &dummy_small_gpu, sz_small );
cudaMalloc( (void**) &dummy_small_gpu2, sz_small );
double kernelt = 0, memcpyt = 0, st = 0, ast = 0;
#define TIMES 5
start = gettime();
dummy_function<<<100,512>>>(dummy_gpu, nints);
cudaDeviceSynchronize();
end = gettime();
printf("time for kernel call = %f\n", end-start);
start = gettime();
cudaMemcpy(dummy_cpu, dummy_gpu, sz, cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
end = gettime();
printf("time for memcopy D-H = %f\n", end-start);
start = gettime();
cudaMemcpy(dummy_small_gpu, dummy_small_cpu, sz_small, cudaMemcpyHostToDevice);
dummy_function<<<100,512>>>(dummy_gpu, nints);
cudaMemcpy(dummy_cpu, dummy_gpu, sz, cudaMemcpyDeviceToHost );
cudaDeviceSynchronize();
end = gettime();
printf("time for one iteration = %f\n", end-start);
cudaStream_t stream1;
cudaStreamCreate(&stream1);
cudaStream_t stream2;
cudaStreamCreate(&stream2);
#define DEBUG 1
for (int sync=0; sync<2; sync++){
start = gettime();
for (int i = 0; i< TIMES; i++) {
// small sync copy H->D
cudaMemcpyAsync(dummy_small_gpu, dummy_small_cpu, sz_small, cudaMemcpyHostToDevice, stream1);
//kernel function
dummy_function<<<100,512, 0, stream1>>>(dummy_gpu, nints);
cudaDeviceSynchronize();
//large copy D->H can be sync or async
#ifdef DEBUG
now = gettime();
#endif
if (sync){
cudaMemcpy(dummy_cpu, dummy_gpu, sz, cudaMemcpyDeviceToHost);
}else{
cudaMemcpyAsync(dummy_cpu, dummy_gpu, sz, cudaMemcpyDeviceToHost, stream1 );
}
#ifdef DEBUG
printf("(A)sync call took %f\n", gettime() - now);
#endif
// small sync copy H->D
cudaMemcpyAsync(dummy_small_gpu2, dummy_small_cpu2, sz_small, cudaMemcpyHostToDevice, stream2);
//kernel function
dummy_function<<<100,512, 0, stream2>>>(dummy_gpu2, nints);
cudaDeviceSynchronize();
//large copy D->H can be sync or async
if (sync){
cudaMemcpy(dummy_cpu2, dummy_gpu2, sz, cudaMemcpyDeviceToHost);
}else{
cudaMemcpyAsync(dummy_cpu2, dummy_gpu2, sz, cudaMemcpyDeviceToHost , stream2);
}
}
cudaDeviceSynchronize();
end = gettime();
if (!sync)
printf("%d iterations: time for ASYNC calls = %f\n",TIMES,end-start);
else
printf("%d iterations: time for SYNC calls = %f\n",TIMES,end-start);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
runTest();
return EXIT_SUCCESS;
}
|
12,605 | #include <stdio.h>
__global__ void mxv(int m, int n, double *a, double *b, double *c){
printf("Have we at least entered the function?\n");
int index,j;
index = threadIdx.x + blockIdx.x*blockDim.x;
double sum;
sum = 0.0;
if(index<m){
for (j=0; j<n; j++){
sum += a[m*j + index]*b[j];
}
c[index] = sum;
}
}
|
12,606 | #include "includes.h"
__global__ void kAdd3(float* a, const float* b, const float* c, const unsigned int numEls, const float scaleA, const float scaleB, const float scaleC) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < numEls; i += numThreads) {
a[i] = scaleA * a[i] + scaleB * b[i] + scaleC * c[i];
}
} |
12,607 | // Kernel for forward elimination in Gauss elimination
// #include <cuda.h>
__global__ void ForwardElimKernel(double *d_A[10][11], double *d_piv[10], int nDim)
{
// Assign matrix elements to blocks and threads
int i = blockDim.y*blockIdx.y + threadIdx.y;
int j = blockDim.x*blockIdx.x + threadIdx.x;
// Parallel forward elimination
for (int k = 0; k <= nDim-2; k++)
{
*d_piv[i] = *d_A[i][k]/(*d_A[k][k]);
__syncthreads();
if ((i>k) && (i<nDim) && (j>=k) && (j<=nDim))
*d_A[i][j] -= *d_piv[i]*(*d_A[k][j]);
__syncthreads();
}
}
|
12,608 |
#include "complex.h"
extern "C" {
__device__
double2 fetch_initial_point(unsigned long i) {
return (double2){1.0,1.0};
}
__device__
static double _z = 1.0, h = 0.0001, sigma = 10.0, rho = 28.0, beta = 2.6666666667;
__device__
double2 iterate_point(double2 val, unsigned long i, double2 ipnt, unsigned long func_n) {
double x = val.x, y = val.y, z = _z;
//double x = _z, y = val.x, z = val.y;
double nx, ny, nz;
nx = x + h * sigma * y - h * sigma * x;
ny = y + h * x * rho - h * x * z - h * y;
nz = z + h * x * y - h * beta * z;
_z = nz;
return (double2){nx, ny};
//_z = nx;
//return (double2){ny, nz};
}
}
|
12,609 | #include "includes.h"
__global__ void lif_update_membrane_potentials(float *d_membrane_potentials_v, float * d_membrane_resistances_R, float * d_membrane_time_constants_tau_m, float * d_resting_potentials, float* d_current_injections, float timestep, size_t total_number_of_neurons){
// // Get thread IDs
int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx < total_number_of_neurons) {
float equation_constant = timestep / d_membrane_time_constants_tau_m[idx];
float membrane_potential_Vi = d_membrane_potentials_v[idx];
float current_injection_Ii = d_current_injections[idx];
float resting_potential_V0 = d_resting_potentials[idx];
float temp_membrane_resistance_R = d_membrane_resistances_R[idx];
float new_membrane_potential = equation_constant * (resting_potential_V0 + temp_membrane_resistance_R * current_injection_Ii) + (1 - equation_constant) * membrane_potential_Vi;
d_membrane_potentials_v[idx] = new_membrane_potential;
idx += blockDim.x * gridDim.x;
}
__syncthreads();
} |
12,610 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define R 4096
#define C 4096
#define BLOCK_W 32
#define BLOCK_H 32
float *a, *b, *result;
__global__ void multiple(float *A, float *B, float *res){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float ret = 0;
for (int i = 0; i < C; i += BLOCK_W){
for (int j = i; j < i + BLOCK_W; j++){
ret += A[row * C + j] * B[j * C + col];
}
}
res[row * C + col] = ret;
}
int main(){
float *d_a, *d_b, *d_r;
cudaEvent_t start, end;
float etime;
dim3 block(BLOCK_W, BLOCK_H);
dim3 grid(C/BLOCK_W, R/BLOCK_H);
a = (float*)malloc(sizeof(float)*R*C);
b = (float*)malloc(sizeof(float)*R*C);
result = (float*)malloc(sizeof(float)*R*C);
cudaEventCreate(&start);
cudaEventCreate(&end);
srand(time(NULL));
// random number creation
for (int i = 0; i < R; i++)
for (int j = 0; j < C; j++){
a[i * C + j] = (rand() % 1000000) / (float)10000;
b[i * C + j] = (rand() % 1000000) / (float)10000;
}
// cuda var initialization
cudaMalloc((void**)&d_a, sizeof(float)*R*C);
cudaMalloc((void**)&d_b, sizeof(float)*R*C);
cudaMalloc((void**)&d_r, sizeof(float)*R*C);
// kernel call & exec time check
cudaEventRecord(start, 0);
cudaMemcpy(d_a, a, sizeof(float)*R*C, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float)*R*C, cudaMemcpyHostToDevice);
multiple<<<grid, block>>>(d_a, d_b, d_r);
cudaMemcpy(result, d_r, sizeof(float)*R*C, cudaMemcpyDeviceToHost);
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&etime, start, end);
printf("EXEC TIME : %f ms\n", etime);
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_r);
return 0;
} |
12,611 | #include <array>
#include <experimental/tuple>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <tuple>
#include <type_traits>
#include <utility>
namespace gpu {
template <typename T, std::size_t s>
class cuArray {
public:
__host__ __device__ const T*
begin() const
{
return &data[0];
}
__host__ __device__ const T*
end() const
{
return (&data[0] + s);
}
__host__ __device__ constexpr std::size_t
size() const
{
return s;
}
__host__ __device__ T& operator[](std::size_t i) { return data[i]; }
__host__ __device__ T const& operator[](std::size_t i) const
{
return data[i];
}
T data[s];
};
};
__host__ __device__ double
foo(double x, double y, double z)
{
printf("x:%f y:%f z:%f\n", x, y, z);
return x + y + z;
}
// class Test {
// public:
// __host__ __device__
// Test()
// {}
// __host__ __device__ // Test(int r)
// {
// rows = r;
// }
// __host__ __device__ double
// operator()(double x, double y, double z) const
// {
// printf("rows:%i\n", rows);
// return 0.0;
// }
// int rows;
// };
namespace gpu {
namespace detail {
template <class F, size_t N, std::size_t... I>
__device__ double
apply_impl(F&& f,
gpu::cuArray<double, N> const& data,
std::index_sequence<I...>)
{
return f(data[I]...);
};
}
template <class F, size_t N>
__device__ double
// Unsure if we need to pass 'f' by value, for GPU execution
apply(F&& f, gpu::cuArray<double, N> const& data)
{
return detail::apply_impl(
std::forward<F>(f), data, std::make_index_sequence<N>());
}
}
__global__ void
toyKernel(gpu::cuArray<double, 3> const* args, double* result)
{
*result = gpu::apply(foo, *args);
}
double
execute_sample(std::array<double, 3> const& args)
{
gpu::cuArray<double, 3>* d_args;
std::size_t const nbytes = args.size() * sizeof(double);
cudaMalloc((void**)&d_args, nbytes);
cudaMemcpy(d_args, args.data(), nbytes, cudaMemcpyHostToDevice);
double* d_answer;
cudaMalloc((void**)&d_answer, sizeof(double));
toyKernel<<<1, 1>>>(d_args, d_answer);
cudaDeviceSynchronize();
cudaFree(d_args);
double answer = -1.0;
cudaMemcpy(&answer, d_answer, sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_answer);
return answer;
}
int
main()
{
// Test t;
std::array<double, 3> cpuarray = {1, 2, 3};
std::cout << execute_sample(cpuarray) << '\n';
std::experimental::apply(foo, cpuarray);
return 0;
}
|
12,612 | #include <math.h>
#include <iostream>
#include <fstream>
#include <chrono>
#define DIMENSION 512
// CUDA kernel to add elements of two arrays
__global__
void render(int n, float *x, float *y, float *pixels)
{
x[n] = 500.0;
// pixels[n*3+0] = 0.0;
// pixels[n*3+1] = 0.5;
// pixels[n*3+2] = 1.0;
}
void time()
{
int now = std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count();
std::cout << now << std::endl;
}
int main(void)
{
cudaError_t error = cudaGetLastError();
if (error != 0) {
std::cout << "ERROR: could not start program CUDA gave error: " << cudaGetErrorString(error) << std::endl;
return 1;
}
int N = 1 << 18;
float *x, *y, *pixels;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
cudaMallocManaged(&pixels, 3*N*sizeof(float));
error = cudaGetLastError();
if (error != 0) {
std::cout << "ERROR: this happened: " << cudaGetErrorString(error) << std::endl;
return 1;
}
for (int i = 0; i < N; i++) {
x[i] = 2.0f;
y[i] = 1.0f;
pixels[i] = 0.0f;
}
time();
render<<<1, 1>>>(N, x, y, pixels);
cudaDeviceSynchronize();
time();
error = cudaGetLastError();
if (error != 0) {
std::cout << "ERROR: this happened: " << cudaGetErrorString(error) << std::endl;
return 1;
}
for (int i = 0; i < N; i++) {
std::cout << x[i] << " " << y[i] << std::endl;
}
// std::ofstream file;
// file.open("out.ppm");
// file << "P3\n" << DIMENSION << " " << DIMENSION << "\n255\n";
// std::cout << "writing file\n";
// for (int i = DIMENSION*DIMENSION -1; i > 0; i--)
// {
// int red = int(255.99 * pixels[i*3+0]);
// int green = int(255.99 * pixels[i*3+1]);
// int blue = int(255.99 * pixels[i*3+2]);
// file << red << " " << green << " " << blue << "\n";
// }
// file.close();
cudaFree(x);
cudaFree(y);
cudaFree(pixels);
}
|
12,613 | #include "includes.h"
__global__ void addOne(int* array, int size) {
if ( blockIdx.x < size ) {
array[blockIdx.x]++;
}
} |
12,614 | #include "includes.h"
__global__ void tanhActivationForward(float* Z, float* A, int Z_x_dim, int Z_y_dim) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < Z_x_dim * Z_y_dim) {
A[index] = std::tanh(Z[index]);
}
} |
12,615 | #include<stdio.h>
#include<cuda_runtime.h>
#define SIZE 1000
int h_M[SIZE*SIZE],h_N[SIZE*SIZE],h_S[SIZE*SIZE];
__global__ void addition_kernel(int *d_M,int *d_N,int *d_S)
{
int tIdx = blockDim.x*blockIdx.x + threadIdx.x;
int tIdy = blockDim.y*blockIdx.y + threadIdx.y;
int s= d_M[tIdy*SIZE+tIdx] + d_N[tIdy*SIZE+tIdx];
d_S[tIdy*SIZE+tIdx] = s;
}
void matrix_addition(int *d_M,int *d_N,int *d_S)
{
dim3 dimBlock(32,32,1);
dim3 dimGrid(SIZE/dimBlock.x,SIZE/dimBlock.y);
addition_kernel<<<dimGrid,dimBlock>>>(d_M,d_N,d_S);
}
void display_matrix(int mat[])
{
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
printf("%d ",mat[i*SIZE+j]);
printf("\n");
}
}
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(!deviceCount){
fprintf(stderr,"No devices supporting cuda\n");
exit(EXIT_FAILURE);
}
int deviceId = 0;
cudaSetDevice(deviceId);
const int ARRAY_BYTES = SIZE*SIZE*sizeof(int);
int *d_M,*d_N,*d_S;
cudaMalloc((void**)&d_M,ARRAY_BYTES);
cudaMalloc((void**)&d_N,ARRAY_BYTES);
cudaMalloc((void**)&d_S,ARRAY_BYTES);
int i,j;
for(i=0;i<SIZE;i++)
{
for(j=0;j<SIZE;j++)
{
h_M[i*SIZE+j] = i*SIZE+j;
h_N[i*SIZE+j] = j*SIZE+i;
}
}
cudaMemcpy(d_M,h_M,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,h_N,ARRAY_BYTES,cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
matrix_addition(d_M,d_N,d_S);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
cudaMemcpy(h_S,d_S,ARRAY_BYTES,cudaMemcpyDeviceToHost);
/*printf("M is \n");
display_matrix(h_M);
printf("N is \n");
display_matrix(h_N);
printf("Addition of M and N is \n");
display_matrix(h_S);*/
printf("Elapsed time is %f\n",elapsedTime);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_S);
return 0;
} |
12,616 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void initialize(int N, float *a, float *b, float *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
c[i] = 0;
a[i] = 1 + i;
b[i] = 1 - i;
}
}
__global__ void addVectors(int N, float *a, float *b, float *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N){
c[i] = a[i] + b[i];
}
}
int main (int argc, char **argv){
if (argc != 2) exit (1);
int N = atoi(argv[1]);
int block_size = 512;
int grid_size = (N-1) / block_size + 1;
float *a, *b, *c;
cudaMallocManaged (&a, N*sizeof(float));
cudaMallocManaged (&b, N*sizeof(float));
cudaMallocManaged (&c, N*sizeof(float));
initialize<<<grid_size, block_size>>>(N,a,b,c);
cudaDeviceSynchronize();
addVectors<<<grid_size, block_size>>>(N,a,b,c);
cudaDeviceSynchronize();
for (int i = 0; i < 5; i++) {
printf("%f\n", c[i]);
}
cudaFree(a);
cudaFree(b);
cudaFree(c);
}
|
12,617 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
void initData(void);
void initValues(void);
void initValuesLin(void);
void calculateMovingAverage(void);
void calculateMovingAverageCuda(void);
__global__ void kernelMovingAverage(double *MOV_LIN_D, double *DATA_LIN_D, int *ROWS);
void calculateColumnAverage(void);
void calculateColumnAverageCuda(void);
__global__ void kernelColumnAverage(double *AVG_D, double *DATA_LIN_D, int *COLUMNS, int *ELEMS_AVG);
void showData(void);
void showMov(void);
void showColAverage(void);
void showMovCuda(void);
void showColAverageCuda(void);
void checkResults(void);
void freeAll(void);
double **DATA;
double **MOV;
double *DATA_LIN;
double *DATA_TRANSPOSED_LIN;
double *MOV_LIN;
double *DATA_TRANSPOSED_LIN_D;
double *AVG_D;
double *AVG;
double *AVG_CUDA;
const int OK = 1;
const int KO = 0;
int COLUMNS;
int ROWS;
int ELEMS_AVG;
int TOTAL_CELLS;
void initData(void) {
int i;
AVG = (double *) malloc(COLUMNS * sizeof(double));
AVG_CUDA = (double *) malloc(COLUMNS * sizeof(double));
DATA = (double **) malloc(ROWS * sizeof(double *));
MOV = (double **) malloc(ROWS * sizeof(double *));
DATA_LIN = (double *) malloc(TOTAL_CELLS * sizeof(double));
DATA_TRANSPOSED_LIN = (double *) malloc(TOTAL_CELLS * sizeof(double));
MOV_LIN = (double *) malloc(TOTAL_CELLS * sizeof(double));
for (i = 0; i < ROWS; i++) {
DATA[i] = (double *) malloc(COLUMNS * sizeof(double));
MOV[i] = (double *) malloc(COLUMNS * sizeof(double *));
}
initValues();
}
void initValues(void) {
int i, j;
srand(time(NULL));
for (i = 0; i < ROWS; i++) {
for (j = 0; j < COLUMNS; j++) {
DATA[i][j] = (100.0 * rand()) / ((double) RAND_MAX);
MOV[i][j] = 0.0;
}
}
for (i = 0; i < COLUMNS; i++) {
AVG[i] = 0.0;
AVG_CUDA[i] = 0.0;
}
initValuesLin();
}
void initValuesLin(void) {
int i, j, k, l;
srand(time(NULL));
for (i = 0; i < ROWS; i++) {
for (j = 0; j < COLUMNS; j++) {
k = i * COLUMNS + j;
l = j * ROWS + i;
DATA_LIN[k] = DATA[i][j];
MOV_LIN[k] = MOV[i][j];
DATA_TRANSPOSED_LIN[l] = DATA[i][j];
}
}
}
int canShow() {
if ((ROWS <= 15) && (COLUMNS <= 15)) {
return 1;
} else {
return 0;
}
}
void showData(void) {
int x, y;
if (!canShow()) return;
for (y = 0; y < ROWS; y++) {
for (x = 0; x < COLUMNS; x++) {
printf("%07.4f ", DATA[y][x]);
}
printf("\n");
}
printf("\n");
}
void showMov(void) {
int x, y;
if (!canShow()) return;
for (y = 0; y < ROWS; y++) {
for (x = 0; x < COLUMNS; x++) {
printf("%07.4f ", MOV[y][x]);
}
printf("\n");
}
printf("\n");
}
void showMovCuda(void) {
int x, y;
if (!canShow()) return;
for (y = 0; y < ROWS; y++) {
for (x = 0; x < COLUMNS; x++) {
printf("%07.4f ", MOV_LIN[y * COLUMNS + x]);
}
printf("\n");
}
printf("\n");
}
void showColAverage(void) {
int x;
if (!canShow()) return;
for (x = 0; x < COLUMNS; x++) {
printf("%07.4f ", AVG[x]);
}
printf("\n\n");
}
void showColAverageCuda(void) {
int x;
if (!canShow()) return;
for (x = 0; x < COLUMNS; x++) {
printf("%07.4f ", AVG_CUDA[x]);
}
printf("\n\n");
}
void calculateMovingAverage(void) {
int x, y;
int i;
for (y = 0; y < ROWS; y++) {
for (x = 0; x < COLUMNS; x++) {
MOV[y][x] = DATA[y][x];
for(i = 1; i <= 8; i++) {
if ((x - i) >= 0) {MOV[y][x] = MOV[y][x] + DATA[y][x - i];}
}
MOV[y][x] = MOV[y][x] / ELEMS_AVG;
}
}
}
void calculateColumnAverage(void) {
int x, y;
for (x = 0; x < COLUMNS; x++) {
for (y = 0; y < ROWS; y++) {
AVG[x] = AVG[x] + DATA[y][x];
}
AVG[x] = AVG[x] / ROWS;
}
}
__global__ void kernelMovingAverage(double *MOV_LIN_D, double *DATA_LIN_D, int *COLUMNS, int *ELEMS_AVG) {
int i;
int posIni;
double cell;
int columns;
int elemsAvg;
int row;
int column;
columns = *COLUMNS;
elemsAvg = *ELEMS_AVG;
row = blockIdx.x;
column = threadIdx.x;
cell = 0.0;
posIni = row * columns + column;
cell = DATA_LIN_D[posIni];
for(i = 1; i <= 8; i++) {
if ((column - i) >= 0) {
cell = cell + DATA_LIN_D[posIni - i];
}
}
cell = cell / elemsAvg;
MOV_LIN_D[posIni] = cell;
}
void calculateMovingAverageCuda() {
double *DATA_LIN_D;
double *MOV_LIN_D;
int *COLUMNS_D;
int *ELEMS_AVG_D;
cudaMalloc((void **) &DATA_LIN_D, TOTAL_CELLS * sizeof(double));
cudaMalloc((void **) &MOV_LIN_D, TOTAL_CELLS * sizeof(double));
cudaMalloc((void **) &COLUMNS_D, sizeof(int));
cudaMalloc((void **) &ELEMS_AVG_D, sizeof(int));
cudaMemcpy(DATA_LIN_D, DATA_LIN, TOTAL_CELLS * sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy(MOV_LIN_D, MOV_LIN, TOTAL_CELLS * sizeof(double), cudaMemcpyHostToDevice );
cudaMemcpy(COLUMNS_D, &COLUMNS, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(ELEMS_AVG_D, &ELEMS_AVG, sizeof(int), cudaMemcpyHostToDevice);
dim3 numColumns(COLUMNS, 1);
dim3 numRows(ROWS, 1);
kernelMovingAverage<<<numRows, numColumns>>>(MOV_LIN_D, DATA_LIN_D, COLUMNS_D, ELEMS_AVG_D);
cudaMemcpy(MOV_LIN, MOV_LIN_D, TOTAL_CELLS * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(COLUMNS_D);
cudaFree(ELEMS_AVG_D);
cudaFree(MOV_LIN_D);
cudaFree(DATA_LIN_D);
}
__global__ void kernelColumnAverage(double *AVG_D, double *DATA_LIN_D, int *ROWS) {
int i;
int posIni;
int posFin;
int rows;
double cell;
rows = *ROWS;
cell = 0.0;
posIni = threadIdx.x * rows;
posFin = posIni + rows;
for (i = posIni; i < posFin; i++) {
cell = cell + DATA_LIN_D[i];
}
cell = cell / rows;
AVG_D[threadIdx.x] = cell;
}
void calculateColumnAverageCuda(void) {
double *DATA_TRANSPOSED_LIN_D;
double *AVG_D;
int *ROWS_D;
cudaMalloc((void **) &DATA_TRANSPOSED_LIN_D, TOTAL_CELLS * sizeof(double));
cudaMalloc((void **) &AVG_D, COLUMNS * sizeof(double));
cudaMalloc((void **) &ROWS_D, sizeof(int));
cudaMemcpy(DATA_TRANSPOSED_LIN_D, DATA_TRANSPOSED_LIN, TOTAL_CELLS * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(AVG_D, AVG, COLUMNS * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(ROWS_D, &ROWS, sizeof(int), cudaMemcpyHostToDevice);
dim3 numColumns(COLUMNS, 1);
dim3 numRows(1, 1);
kernelColumnAverage<<<numRows, numColumns>>>(AVG_D, DATA_TRANSPOSED_LIN_D, ROWS_D);
cudaMemcpy(AVG_CUDA, AVG_D, COLUMNS * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(ROWS_D);
cudaFree(DATA_TRANSPOSED_LIN_D);
cudaFree(AVG_D);
}
void checkResults(void) {
int y,x, k;
double diff;
double epsilon;
int result;
result= OK;
epsilon = 0.0000001;
for(y = 0; y < ROWS; y++) {
for(x = 0; x < COLUMNS; x++) {
k = y * COLUMNS + x;
diff = abs(MOV[y][x] - MOV_LIN[k]);
if (diff > epsilon) {
result = KO;
break;
}
}
}
printf("\nCheck result moving average : %d\n\n", result);
result= OK;
for(x = 0; x < COLUMNS; x++) {
diff = abs(AVG[x] - AVG_CUDA[x]);
if (diff > epsilon) {
result = KO;
break;
}
}
printf("Check result column average : %d\n\n", result);
}
void freeAll(void) {
int y;
free(AVG);
for (y = 0; y < ROWS; y++) {
free(DATA[y]);
free(MOV[y]);
}
free(DATA);
free(MOV);
}
int main(int argc, char**argv) {
if (argc != 3) {
printf("usage: exercici_cuda N M\n N : number of rows\n M number of columns\n\n");
ROWS = 5;
COLUMNS = 10;
} else {
ROWS = atoi(argv[1]);
COLUMNS = atoi(argv[2]);
}
ELEMS_AVG = 9;
TOTAL_CELLS = ROWS * COLUMNS;
initData();
showData();
calculateMovingAverage();
showMov();
calculateColumnAverage();
showColAverage();
calculateMovingAverageCuda();
showMovCuda();
calculateColumnAverageCuda();
showColAverageCuda();
checkResults();
freeAll();
}
|
12,618 | #include "includes.h"
__global__ void copy_buffer_util_kernel( const float4 * __restrict input_buf, float4 * __restrict output_buf, int elem_count)
{
int elem_id = blockDim.x * blockIdx.x + threadIdx.x;
if (elem_id < elem_count)
output_buf[elem_id] = input_buf[elem_id];
} |
12,619 | __global__ void symmetrize2D( float *h, int natoms ) {
const int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
const int dof = 3 * natoms;
if( elementNum >= dof * dof ) {
return;
}
int r = elementNum / dof;
int c = elementNum % dof;
if( r > c ) {
return;
} else {
const float avg = 0.5 * ( h[r * dof + c] + h[c * dof + r] );
h[r * dof + c] = avg;
h[c * dof + r] = avg;
}
}
|
12,620 | /*
* purpose: testing/verifying CUDA Unified Memory for >=pascal
* architectures;
* n.b. the only question here is whether we are able to
* let the GPU work on data arrays much larger than what
* is available on-board of the device, say with the
* 8GB onboard memory of the gtx1080 master an array
* of size 16GB allocated as cudaMallocManaged() on
* the host
* n.b.2. the other real/unusual question is what could be a
* reasonable one-dim array consuming 16GB ? and what
* would be a tractable correctness test corresponding
* to it ?
* result: yes, working great !
* running 'watch nvidia-smi' in a 2nd terminal,
* we really see the 8111MiB resident for a long time
* even for times when the kernel is already back and
* we just wait for function check_array() to complete !
* compilation: nvcc ./unified_memory_oversubscription.cu
* usage: ./a.out
*/
#include <stdio.h>
#define DBLEARRAY16GB 2147483648
/*
* GPU kernel: working on managed unified memory
* of really huge size, say 16GB, which is twice the
* amount physically available on the device
* n.b. threadIdx.x is of type unsigned int !
* so we should have access to 2^32 = 4294967296
* different elements in terms of indices, hence
* sufficient to service the 2147483648 needed
* within our huge-sized array but only if we
* drop the sign !
*/
__global__ void KrnlDmmy(double *a)
{
unsigned int i;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
a[i] = (double) i + 5.0e00;
return;
}
/*
* host: array check
* this is actually all but trivial in terms of achievable/required
* numerical accuracy ! theoretically for our regular array, a[0]=5, a[1]=6,
* a[2]=7 ... a[2147483647]=2147483652 with the requested number of
* DBLEARRAY16GB elements, we expect a total sum of
* (5+2147483652)*2147483648/2=2305843018877370368
* which turns out to be really hard to compute :-)
*/
long double check_array(double *a)
{
unsigned int i;
long double rslt, prvs_rslt, dlta;
/*
* receives the array a[] after it had been modified on the GPU
* and so we just want to run a sum over all its elements to
* check whether the GPU kernel had been working correctly;
*/
rslt = 0.0e00;
for (i=0; i<DBLEARRAY16GB; i++) {
prvs_rslt = rslt;
rslt += a[i];
dlta = rslt - prvs_rslt;
if (dlta > (i + 5)) {
printf("element %ld, of value %lf causes dlta %lf\n", i, a[i], dlta);
}
}
return(rslt);
}
/*
* host: main
*/
int main()
{
long int huge;
dim3 thrds_per_block, blcks_per_grid;
double *c;
long double sgnl;
/*
* at first we want to call a simple kernel that writes something into
* CUDA unified memory (to be host-allocated next); the specific
* challenge here is to figure out whether a 16GB array may still
* fit into the device despite its physical size being limited to 8GB !
*/
huge = (long int) DBLEARRAY16GB * sizeof(double);
printf("huge in Bytes: %ld sizeof(double): %d\n", huge, sizeof(double));
c = NULL;
cudaMallocManaged(&c, huge);
thrds_per_block.x = 512;
blcks_per_grid.x = ((long int) DBLEARRAY16GB) / thrds_per_block.x;
printf("working with blcks_per_grid.x: %d\n", blcks_per_grid.x);
KrnlDmmy<<<blcks_per_grid, thrds_per_block>>>(c);
cudaDeviceSynchronize();
printf("back from GPU kernel\n");
/*
* if everything has gone well we should end up with 2305843018877370368
* when adding together all content of all individual array elements, c[]
*/
printf("c[0] %lf\n", c[0]);
printf("c[9] %lf\n", c[9]);
printf("c[99] %lf\n", c[99]);
printf("c[999] %lf\n", c[999]);
printf("c[9999] %lf\n", c[9999]);
printf("c[99999] %lf\n", c[99999]);
printf("c[999999] %lf\n", c[999999]);
printf("c[9999999] %lf\n", c[9999999]);
printf("c[99999999] %lf\n", c[99999999]);
printf("c[999999999] %lf\n", c[999999999]);
printf("c[2147483647] %lf\n", c[2147483647]);
printf("c[DBLEARRAY16GB] %lf\n", c[DBLEARRAY16GB-1]);
sgnl = check_array(c);
printf("expecting 2305843018877370368 while receiving %llf\n", sgnl);
cudaFree(c);
return(0);
}
|
12,621 | #include "includes.h"
# define MAX(a, b) ((a) > (b) ? (a) : (b))
# define GAUSSIAN_KERNEL_SIZE 3
# define SOBEL_KERNEL_SIZE 5
# define TILE_WIDTH 32
# define SMEM_SIZE 128
__global__ void magnitudeImage(float *d_gradientX, float *d_gradientY, float *d_gradientMag, int width, int height) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix < width && iy < height) {
int idx = iy * width + ix;
d_gradientMag[idx] = sqrtf(powf(d_gradientX[idx], 2.0f) + powf(d_gradientY[idx], 2.0f));
}
} |
12,622 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) {
for (int i=0; i < var_1; ++i) {
if (comp < (var_2 + +1.1621E-35f + -0.0f)) {
if (comp <= +0.0f - var_3 + sinf(-1.8109E1f)) {
if (comp < (+1.6534E-43f - var_4 * var_5)) {
float tmp_1 = -0.0f - (var_6 + logf(var_7 * var_8 * cosf(var_9 - (var_10 + (var_11 / var_12)))));
comp = tmp_1 - (+1.1201E36f + (var_13 * +1.2268E-36f * -1.3791E36f * -1.2918E-12f));
if (comp >= (-1.6497E-30f + -1.5878E-44f / var_14 / (var_15 + (var_16 / var_17)))) {
float tmp_2 = -1.9083E35f;
comp += tmp_2 / +1.2917E34f / -1.0018E-16f + (-1.3099E-42f / +1.1907E34f * var_18 / var_19);
comp = (+1.1179E-28f + (var_20 - (-0.0f + (var_21 - (-1.5491E-42f / var_22)))));
}
if (comp == (var_23 / (+1.8109E35f - (var_24 * var_25)))) {
comp += cosf(-1.0381E-11f + (-1.4237E-41f * var_26));
comp += (+1.2706E36f * var_27);
comp += (-1.6229E-37f - (+1.9955E-41f + -1.1126E5f));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28);
cudaDeviceSynchronize();
return 0;
}
|
12,623 | #include <cstdio>
#include <cstring>
#include <iomanip>
#include <iostream>
int const MARKS = 256;
int const ROWS = 128;
__global__
void knotHash(unsigned char const *input, int inputSize, int *usedSquares) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char lengths[64];
int numLengths = 0;
for (int i = 0; i < inputSize; i++) {
lengths[numLengths++] = input[i];
}
lengths[numLengths++] = '-';
if (row >= 100) {
lengths[numLengths++] = '0' + (row / 100);
}
if (row >= 10) {
lengths[numLengths++] = '0' + ((row % 100) / 10);
}
lengths[numLengths++] = '0' + (row % 10);
unsigned char const APPEND[] = { 17, 31, 73, 47, 23 };
int const APPEND_SIZE = sizeof(APPEND) / sizeof(unsigned char);
for (int i = 0; i < APPEND_SIZE; i++) {
lengths[numLengths++] = APPEND[i];
}
int items[MARKS];
for (int i = 0; i < MARKS; i++) {
items[i] = i;
}
int start = 0;
int skip = 0;
for (int round = 0; round < 64; round++) {
for (int i = 0; i < numLengths; i++) {
int length = lengths[i];
for (int j = 0; j < length / 2; j++) {
int a = (start + j) % MARKS;
int b = (start + length - 1 - j) % MARKS;
unsigned char t = items[a];
items[a] = items[b];
items[b] = t;
}
start = (start + length + skip) % 256;
skip = (skip + 1) % 256;
}
}
unsigned char hash[16];
for (int i = 0; i < 16; i++) {
unsigned char xored = 0;
for (int j = 0; j < 16; j++) {
xored ^= items[16 * i + j];
}
hash[i] = xored;
}
int bitCount = 0;
for (int i = 0; i < 16; i++) {
for (int j = 1; j < 0x100; j <<= 1) {
if (hash[i] & j) {
bitCount++;
}
}
}
usedSquares[row] = bitCount;
}
int main(void) {
std::string inputStr;
std::getline(std::cin, inputStr);
int n = inputStr.size();
unsigned char *input;
cudaMallocManaged(&input, n);
memcpy(input, inputStr.data(), n);
int *usedSquares;
cudaMallocManaged(&usedSquares, ROWS * sizeof(int));
knotHash<<<1, ROWS>>>(input, n, usedSquares);
cudaDeviceSynchronize();
int totalUsedSquares = 0;
for (int i = 0; i < ROWS; i++) {
totalUsedSquares += usedSquares[i];
}
std::cout << totalUsedSquares << '\n';
cudaFree(input);
cudaFree(usedSquares);
return 0;
}
|
12,624 | #include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <chrono>
//#define N 1000
//#define M 512
//nvcc testing.cu -o test
#define BLOCK_SIZE 128
#define Rd 4
__global__ void stencil_1d(int *in, int *out, int RADIUS) {
__shared__ int temp[BLOCK_SIZE + 2 * Rd];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS && blockIdx.x>0) {
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
else if(threadIdx.x < RADIUS && blockIdx.x<=0) {
temp[lindex - RADIUS] = 0;
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
// Synchronize (ensure all the data is available)
__syncthreads();
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++) {
result += temp[lindex + offset];
}
// Store the result
out[gindex] = result;
}
__global__ void stencil_noMem(int *in, int *out, int RADIUS, int n) {
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
// Apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++) {
int idx = gindex + offset;
if(idx>0 && idx<n) {
result += in[idx];
}
}
// Store the result
out[gindex] = result;
}
void cpuStencil(int *in, int *out, int RADIUS, int n) {
// Apply the stencil
for (int i=0; i < n; i++) {
for (int offset = -RADIUS ; offset <= RADIUS ; offset++) {
out[i] += in[i + offset];
}
}
}
void random_ints(int* x, int size)
{
int i;
for (i=0;i<size;i++) {
x[i]=rand()%10;
}
}
int main(int argc, char* argv[]) {
int N = atoi(argv[1]);
int R = atoi(argv[2]);
int *in, *out;
// device copies of a, b, c
int size = N * sizeof(int);
// Setup input values
in = (int*)malloc(size); random_ints(in, N);
out = (int*)malloc(size); random_ints(out, N);
if (strcmp(argv[3],"gpu")==0) {
// host copies of a, b, c
int *d_in, *d_out;
float milli;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_in, size);
cudaMalloc((void **)&d_out, size);
// Copy inputs to device
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
//stencil_1d<<<(N + BLOCK_SIZE-1) / BLOCK_SIZE,BLOCK_SIZE>>>(d_in, d_out, R);
stencil_noMem<<<(N + BLOCK_SIZE-1) / BLOCK_SIZE,BLOCK_SIZE>>>(d_in, d_out, R, N);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &milli, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
// Copy result back to host
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
cudaFree(d_in); cudaFree(d_out);
float nanosec = (milli)*1000000;
std::cout << "N: " << N << " R: " << R << " GPU time: " << nanosec << " ns" << std::endl;
}
else {
auto t1 = std::chrono::high_resolution_clock::now();
cpuStencil(in,out,R, N);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::nanoseconds>( t2 - t1 ).count();
std::cout << "N: " << N << " R: " << R << " CPU time: " << duration << " ns" << std::endl;
//time = 100.0;
}
//printf("a[0]: %i, b[0]: %i, c[0]: %i\nGPU Time: %f\n", a[0], b[0], c[0], time);
//printf("N: %i M: %i Time: %f\n", N, M, time);
// Cleanup
free(in); free(out);
return 0;
}
|
12,625 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
void initialData_1(float *in, const int size)
{
for (int i = 0; i < size; i++)
{
in[i] = (float)(rand() & 0xFF) / 10.0f; //100.0f;
}
return;
}
// case 0 copy kernel: access data in rows
__global__ void copyRow_1(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[iy * nx + ix] = in[iy * nx + ix];
}
}
// main functions
//int main(int argc, char **argv)
//{
// // set up device
// int dev = 0;
// cudaDeviceProp deviceProp;
// cudaError error;
//
// error = cudaGetDeviceProperties(&deviceProp, dev);
// printf("%s starting transpose at ", argv[0]);
// printf("device %d: %s ", dev, deviceProp.name);
// printf("allowed memory size : %d",(int)deviceProp.totalGlobalMem);
// error = cudaSetDevice(dev);
//
// // set up array size 2048
// int nx = 1 << 5;
// int ny = 1 << 5;
//
// // select a kernel and block size
// int iKernel = 1;
// int blockx = 16;
// int blocky = 16;
//
// if (argc > 1) blockx = atoi(argv[1]);
//
// if (argc > 2) blocky = atoi(argv[2]);
//
// if (argc > 3) nx = atoi(argv[3]);
//
// if (argc > 4) ny = atoi(argv[4]);
//
//
// size_t nBytes = nx * ny * sizeof(float);
// float bytes = nBytes / (1024 * 1024);
// printf(" with matrix nx %d ny %d with kernel %d with %.2f MB memory\n", nx, ny, iKernel, bytes);
//
// // execution configuration
// dim3 block(blockx, blocky);
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
//
// // allocate host memory
// float *h_A = (float *)malloc(nBytes);
// float *hostRef = (float *)malloc(nBytes);
// float *gpuRef = (float *)malloc(nBytes);
//
// // initialize host array
// initialData_1(h_A, nx * ny);
//
// // allocate device memory
// float *d_A, *d_C;
// error = cudaMalloc((float**)&d_A, nBytes);
// error = cudaMalloc((float**)&d_C, nBytes);
//
// // copy data from host to device
// error = cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice);
// error = cudaGetLastError();
// printf("%s - %s \n", cudaGetErrorName(error),cudaGetErrorString(error));
//
// copyRow_1 << <grid, block >> >(d_C, d_A, nx, ny);
// error = cudaDeviceSynchronize();
//
// printf("%s <<< grid (%d,%d) block (%d,%d)>>> ", "CopyRow", grid.x, grid.y, block.x,
// block.y);
// cudaGetLastError();
//
// // check kernel results
// if (iKernel > 1)
// {
// error = cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost);
// }
//
// // free host and device memory
// error = cudaFree(d_A);
// error = cudaFree(d_C);
// free(h_A);
// free(hostRef);
// free(gpuRef);
//
// // reset device
// error = cudaDeviceReset();
// system("pause");
// return EXIT_SUCCESS;
//}
|
12,626 | #include <iostream>
#include <cuda.h>
using namespace std;
__global__ void MatAdd (float* A, float* B, float* C, int N){
int index = threadIdx.x;
if (index < 5){
C[index] = A[index] + B[index];
}
}
int main(void){
/*Set array size*/
int N = 5;
int size = N * sizeof(float);
/*Define and initialize arrays in HOST*/
float* h_A = (float *)malloc(size);
float* h_B = (float *)malloc(size);
float* h_C = (float *)malloc(size);
for (int i = 0; i < N; i++){
h_A[i] = i;
h_B[i] = i;
h_C[i] = 0;
}
/*Define and allocate arrays in DEVICE*/
float* d_A;
float* d_B;
float* d_C;
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
/*Copy arrays from HOST to DEVICE*/
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
/*Define level of parallelism*/
dim3 threadsPerBlock(16, 16);
dim3 numBlocks(1,1,1);
/*Launch kernel and synchronize*/
MatAdd<<<numBlocks, threadsPerBlock>>>(d_A, d_B, d_C, N);
cudaDeviceSynchronize();
/*Copy output array from DEVICE to HOST*/
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
/*Free device memory*/
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// /*Print to console*/
// cout << "A[] = ";
// for (int i = 0; i < N; i++){
// if (i == 0) {cout << "[";}
// cout << h_A[i];
// if (i == N-1) {cout << "]" << endl << endl;} else {cout << ", ";}
// }
//
// cout << "B[] = ";
// for (int i = 0; i < N; i++){
// if (i == 0) {cout << "[";}
// cout << h_B[i];
// if (i == N-1) {cout << "]" << endl << endl;} else {cout << ", ";}
// }
//
// cout << "C[] = ";
// for (int i = 0; i < N; i++){
// if (i == 0) {cout << "[";}
// cout << h_C[i];
// if (i == N-1) {cout << "]" << endl;} else {cout << ", ";}
//
// }
return 0;
}
|
12,627 | #include "includes.h"
__global__ void fillZeros(float* buf, int size) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
{
buf[i] = 0.0f;
}
} |
12,628 | #include "ConcStadiumHash.cuh"
#include <stdint.h>
#define TICKET_SIZE 4
#define NUM_POS_BITS 3 // log(32/TICKET_SIZE)
// Generic Hash Function
template<typename keyT, typename valueT>
template <uint nRounds, uint rShift, uint mulOp>
__device__ uint ConcStadiumHash<keyT, valueT>::hash(const keyT key)
{
keyT x = key;
for (uint j = nRounds; j > 0; --j) {
x = ((x >> rShift) ^ x) * mulOp + j;
}
return (uint)x;
}
// First Hash
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::initHash(keyT key)
{
uint hashed = hash<sizeof(keyT), 8, 0xE9D58A6B>(key);
return __umulhi(hashed, tableSize);
}
// Second Hash
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::rehash(uint hashed, const keyT key)
{
uint h_2 = hash<sizeof(keyT), 8, 0x6E5B9D8A>(key);
uint dh = hashed + 1 + __umulhi(h_2, tableSize - 1);
return (dh >= tableSize) ? (dh - tableSize) : dh;
}
// Info Hash
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::infoHash(keyT key)
{
return hash<sizeof(keyT), 8, 0xCA34BE7D>(key) >> (32 - (TICKET_SIZE - 2)); // mulOp was chosen randomly
}
// First Array Hash
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::arrayHash(keyT* arr, uint length)
{
uint hashed = 0;
for (uint i = 0; i < length; i++)
{
hashed = hashed * 31 + hash<sizeof(keyT), 8, 0xE9D58A6B>(arr[i]);
}
return __umulhi(hashed, tableSize);
}
// Second Array Hash
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::arrayRehash(uint hashed, keyT* arr, uint length)
{
uint h_2 = 0;
for (uint i = 0; i < length; i++)
{
h_2 = h_2 * 31 + hash<sizeof(keyT), 8, 0x6E5B9D8A>(arr[i]);
}
uint dh = hashed + 1 + __umulhi(h_2, tableSize - 1);
return (dh >= tableSize) ? (dh - tableSize) : dh;
}
// Info Array Hash
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::arrayInfoHash(keyT* arr, uint length)
{
uint hashed = 0;
for (uint i = 0; i < length; i++)
{
hashed = hashed * 31 + hash<sizeof(keyT), 8, 0xCA34BE7D>(arr[i]);
}
return hashed >> (32 - (TICKET_SIZE - 2));
}
// Allocation of the ticket board
template<typename keyT, typename valueT>
__host__ void ConcStadiumHash<keyT, valueT>::allocTicketBoard()
{
const int ticketBoardSize = tableSize / (32 / TICKET_SIZE) + 1;
cudaError_t cudaStatus;
// Allocate memory for ticket board
cudaStatus = cudaMalloc((void**)&ticketBoard, ticketBoardSize * sizeof(uint));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ticket Board: cudaMalloc failed!\n");
}
// Initialize ticket board with 1s
clearTicketBoard();
}
// Clearing of the ticket board
template<typename keyT, typename valueT>
__host__ void ConcStadiumHash<keyT, valueT>::clearTicketBoard()
{
const int ticketBoardSize = tableSize / (32 / TICKET_SIZE) + 1;
cudaError_t cudaStatus;
cudaStatus = cudaMemset((void*)ticketBoard, 0xFF, ticketBoardSize * sizeof(uint));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "Ticket Board: cudaMemset failed!\n");
}
}
// Deletion of the ticket board
template<typename keyT, typename valueT>
__host__ void ConcStadiumHash<keyT, valueT>::freeTicketBoard()
{
cudaFree((void*)ticketBoard);
}
// Creates a mask containing info starting at infoStart and otherwise 1s
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::prepareTicket(uint info, uint infoStart)
{
uint mask = (1 << (TICKET_SIZE - 2)) - 1;
mask = ~(mask << infoStart);
uint result = mask | (info << infoStart);
return result;
}
// Extracts the info from a ticket
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::extractInfo(uint ticket, uint infoStart)
{
uint mask = (1 << (TICKET_SIZE - 2)) - 1;
mask = mask << infoStart;
uint result = (mask & ticket) >> infoStart;
return result;
}
// Calculates Ticket Board Index from the hashed key
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::getTbIndex(uint hashed)
{
return hashed >> NUM_POS_BITS;
}
// Calculates the Position of the ticket in the Ticket Board Entry from the hashed key
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::getPosInInt(uint hashed)
{
uint mask = (1 << NUM_POS_BITS) - 1;
return (hashed & mask) << (5 - NUM_POS_BITS);
}
// Entry Reservation
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::tryBookASeat(uint tbIndex, uint posInInt)
{
uint availability = 1 << posInInt;
uint access = 1 << (posInInt + 1);
uint permit = availability | access;
// check if availability bit is set
if (availability & ticketBoard[tbIndex])
{
// try to reserve ticket
uint auth = atomicAnd((uint*)(ticketBoard + tbIndex), ~permit);
if (auth & availability)
{
// ticket was booked succesfully
return (~0);
}
else if (auth & permit == 0)
{
// ticket was booked first by another thread
return 0;
}
else
{
// ticket is accessed by another thread
// set access bit (necessary if bit was unset without obtaining the ticket)
atomicOr((uint*)(ticketBoard + tbIndex), access);
return 0;
}
}
else
{
return 0;
}
}
// Entry Search
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::tryFindTheSeat(keyT* key, uint hashed, uint tbIndex, uint posInInt, uint info, keyT* keys, uint keyLength)
{
uint permit = 1 << posInInt;
if (permit & ticketBoard[tbIndex]) {
return KEY_NOT_INSIDE_TABLE;
}
else {
// wait until ticket can be accessed
while (isAccessed(tbIndex, posInInt)) {};
// get and compare info from ticket
uint retrievedInfo = extractInfo(ticketBoard[tbIndex], posInInt + 2);
if (info != retrievedInfo) return 0;
// compare keys
for (int i = 0; i < keyLength; i++)
{
if (keys[i] != key[i]) return 0;
}
return (~0);
}
}
// Entry Search
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::tryFindTheSeat(keyT key, uint hashed, uint tbIndex, uint posInInt, uint info, keyT* keys)
{
uint permit = 1 << posInInt;
if (permit & ticketBoard[tbIndex]) {
return KEY_NOT_INSIDE_TABLE;
}
else {
// wait until ticket can be accessed
while (isAccessed(tbIndex, posInInt)) {};
// get info from ticket
uint retrievedInfo = extractInfo(ticketBoard[tbIndex], posInInt + 2); // ACCESS CHANGE
if (info != retrievedInfo) return 0;
// compare keys
if (key != keys[hashed]) return 0;
return (~0);
}
}
// Inserts info into the ticket board
template<typename keyT, typename valueT>
__device__ void ConcStadiumHash<keyT, valueT>::insertTicketInfo(uint info, uint tbIndex, uint posInInt)
{
uint prepTicket = prepareTicket(info, posInInt + 2);
atomicAnd((uint*)ticketBoard + tbIndex, prepTicket);
}
// Checks if a ticket is accessed by a thread
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::isAccessed(uint tbIndex, uint posInInt)
{
uint access = (1 << (posInInt + 1));
return (ticketBoard[tbIndex] & access) ? (0) : (~0);
}
// Unlocks access to the ticket
template<typename keyT, typename valueT>
__device__ void ConcStadiumHash<keyT, valueT>::unlockAccess(uint tbIndex, uint posInInt)
{
__threadfence();
uint access = (1 << (posInInt + 1));
atomicOr((uint*)ticketBoard + tbIndex, access);
}
// Constructor
template<typename keyT, typename valueT>
ConcStadiumHash<keyT, valueT>::ConcStadiumHash(uint tableSize)
{
this->tableSize = tableSize;
}
// Destructor
template<typename keyT, typename valueT>
ConcStadiumHash<keyT, valueT>::~ConcStadiumHash()
{
freeTicketBoard();
}
// Inserts a given key value pair into the table
template<typename keyT, typename valueT>
__device__ void ConcStadiumHash<keyT, valueT>::insert(keyT* key, valueT value, keyT* keys, valueT* values, uint keyLength)
{
// Initial Hash
uint hashed = arrayHash(key, keyLength);
// Get ticket board index and the position in the integer from hashed value
uint tbIndex = getTbIndex(hashed);
uint posInInt = getPosInInt(hashed);
uint tryCounter = 0;
uint gotSeat;
// loop until a bucket is succesfully reserved
do
{
// try to reserve a free bucket
gotSeat = tryBookASeat(tbIndex, posInInt);
// check if bucket was already reserved
if (!gotSeat)
{
// rehash the key with the second hash function
hashed = arrayRehash(hashed, key, keyLength);
// Get ticket board index and the position in the integer from hashed value
tbIndex = getTbIndex(hashed);
posInInt = getPosInInt(hashed);
}
// if the number of tries is bigger than the table size, the table is already full
assert(++tryCounter < tableSize);
//throw std::runtime_error("INSERT FAILED - Table is full!");
} while (!gotSeat);
// Generate Info from key
uint info = arrayInfoHash(key, keyLength);
// Insert info into the reserved bucket
insertTicketInfo(info, tbIndex, posInInt);
// Insert key value pair into the table
for (int i = 0; i < keyLength; i++)
{
keys[hashed * keyLength + i] = key[i];
}
values[hashed] = value;
unlockAccess(tbIndex, posInInt);
}
// searches fo a key in the table
template<typename keyT, typename valueT>
__device__ uint ConcStadiumHash<keyT, valueT>::find(keyT* key, keyT* keys, uint keyLength)
{
// Intial Hash
uint hashed = arrayHash(key, keyLength); // initHash(key);
// printf("Key: %u Hashed: %u\n", key, hashed);
// Generate Info from key
uint info = arrayInfoHash(key, keyLength);
uint tbIndex = getTbIndex(hashed);
uint posInInt = getPosInInt(hashed);
uint tryCounter = 0;
uint seatFound;
// loop until the entry with the searched key is found
do {
// search for the key in the ticket board and table
seatFound = tryFindTheSeat(key, hashed, tbIndex, posInInt, info, keys + (hashed*keyLength), keyLength);
// if no entry entry was found, rehash the key
if (!seatFound)
{
hashed = arrayRehash(hashed, key, keyLength);
tbIndex = getTbIndex(hashed);
posInInt = getPosInInt(hashed);
}
// if the number of tries is bigger than the table size, the key isn't included in the table
if (++tryCounter == tableSize)
seatFound = KEY_NOT_INSIDE_TABLE;
} while (!seatFound && seatFound != KEY_NOT_INSIDE_TABLE);
if (seatFound == KEY_NOT_INSIDE_TABLE)
{
// if key wasn't found in the table return tableSize as index
hashed = tableSize;
}
return hashed;
}
template class ConcStadiumHash<unsigned int, unsigned int>;
template class ConcStadiumHash<uint64_t, uint32_t>;
|
12,629 | #include <stdio.h>
#define HANDLE_ERROR( err ) ( HandleError( err, __FILE__, __LINE__ ) )
static void HandleError( cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess)
{
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
const int N = 2048;
// CUDA Kernel for Vector Addition
__global__ void
Vector_Addition ( const int *dev_a , const int *dev_b , int *dev_c)
{
//Get the id of thread within a block
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x ;
while ( tid < N ) // check the boundry condition for the threads
{
dev_c [tid] = dev_a[tid] + dev_b[tid] ;
tid+= blockDim.x * gridDim.x ;
}
}
int
main (void)
{
//Host array
int Host_a[N], Host_b[N], Host_c[N];
//Device array
int *dev_a , *dev_b, *dev_c ;
//Grid and Block dimensions
dim3 grid((N+127)/128);
dim3 block(128);
//Allocate the memory on the GPU
HANDLE_ERROR ( cudaMalloc((void **)&dev_a , N*sizeof(int) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_b , N*sizeof(int) ) );
HANDLE_ERROR ( cudaMalloc((void **)&dev_c , N*sizeof(int) ) );
//fill the Host array with random elements on the CPU
for ( int i = 0; i <N ; i++ )
{
Host_a[i] = -i ;
Host_b[i] = i*i ;
}
//Copy Host array to Device array
HANDLE_ERROR (cudaMemcpy (dev_a , Host_a , N*sizeof(int) , cudaMemcpyHostToDevice));
HANDLE_ERROR (cudaMemcpy (dev_b , Host_b , N*sizeof(int) , cudaMemcpyHostToDevice));
//Make a call to GPU kernel
Vector_Addition <<< grid, block >>> (dev_a , dev_b , dev_c ) ;
//Copy back to Host array from Device array
HANDLE_ERROR (cudaMemcpy(Host_c , dev_c , N*sizeof(int) , cudaMemcpyDeviceToHost));
//Display the result
for ( int i = 0; i<N; i++ )
printf ("%d + %d = %d\n", Host_a[i] , Host_b[i] , Host_c[i] ) ;
//Free the Device array memory
cudaFree (dev_a) ;
cudaFree (dev_b) ;
cudaFree (dev_c) ;
return 0 ;
}
|
12,630 | #include "includes.h"
__global__ void read_G_matrix_kernel(int S, int vertex_index, int* i_index, int* j_index, bool* is_Bennett, double* exp_Vj, double* N_ptr, int LD_N, double* G_ptr, int LD_G, double* result_ptr, int incr) {
int l = threadIdx.x;
double result, delta;
if (j_index[l] < vertex_index) {
delta = i_index[l] == j_index[l] ? 1. : 0.;
result = (N_ptr[i_index[l] + LD_N * j_index[l]] * exp_Vj[l] - delta) / (exp_Vj[l] - 1.);
}
else
result = G_ptr[i_index[l] + LD_G * (j_index[l] - vertex_index)];
result_ptr[l * incr] = is_Bennett[l] ? 0. : result;
} |
12,631 | #include <stdio.h>
int main (void)
{
cudaDeviceProp prop;
int count;
cudaGetDeviceCount (&count);
for (int i=0; i<count; i++)
{
cudaGetDeviceProperties (&prop, i);
printf ("General Information for device %d\n", i);
printf ("Name :\t\t\t%s\n", prop.name);
printf ("CUDA Ver :\t\t%d.%d\n", prop.major, prop.minor);
printf ("Clock rate :\t\t%d\n", prop.clockRate);
printf ("Max Threads Per Block :\t%d\n", prop.maxThreadsPerBlock);
printf ("Max Threads Dim :\t[%d %d %d]\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf ("Max Grid Size :\t\t[%d %d %d]\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
}
return 0;
}
|
12,632 | #include "includes.h"
// filename: eeTanh.cu
// a simple CUDA kernel to square the elements of a matrix
extern "C" // ensure function name to be exactly "eeTanh"
{
}
__global__ void normLogErrDeriv(int N, int M, float *A, float *Y, float *out)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
int L = N*M;
if (i < N && j < M)
{
// A2 in this case is stored in the doubled rows of A, the length of A is
// doublt that of Y, out is the same length as A and will store both parts of the derivative
float a = __expf(__fmul_rn(2.0, A[index+L]));
float b = __fsub_rn(A[index], Y[index]);
out[index] = __fmul_rn(b, a);
out[index+L] = __fsub_rn(__fmul_rn(out[index], b), 1.0);
}
} |
12,633 | #include "includes.h"
__device__ int GPUKernel_Position(int i,int j) {
if (i<j){
return j*(j+1)/2+i;
}
return i*(i+1)/2+j;
}
__global__ void GPUKernel_Vm(int a, int v,double * in,double * out) {
int blockid = blockIdx.x*gridDim.y + blockIdx.y;
int id = blockid*blockDim.x + threadIdx.x;
if ( id >= v*v*v ) return;
int d = id%v;
int b = (id-d)%(v*v)/v;
int c = (id-d-b*v)/(v*v);
if ( b < a ) return;
if ( d > c ) return;
int cd = GPUKernel_Position(c,d);
int vtri = v*(v+1)/2;
out[(b-a)*vtri+cd] = in[(b-a)*v*v+d*v+c] - in[(b-a)*v*v+c*v+d];
} |
12,634 | #include "includes.h"
__global__ void uniformAdd(int *g_data, int *uniforms, int n, int blockOffset, int baseIndex)
{
__shared__ int uni;
if (threadIdx.x == 0)
uni = uniforms[blockIdx.x + blockOffset];
unsigned int address = __mul24(blockIdx.x, (blockDim.x << 1)) + baseIndex + threadIdx.x;
__syncthreads();
// note two adds per thread
g_data[address] += uni;
g_data[address + blockDim.x] += (threadIdx.x + blockDim.x < n) * uni;
} |
12,635 | #include <iostream>
#include <cstdlib>
#include <cassert>
#include <zlib.h>
#include <png.h>
#define MASK_N 2
#define MASK_X 5
#define MASK_Y 5
#define SCALE 8
__device__ int mask[MASK_N][MASK_X][MASK_Y] = {
{{ -1, -4, -6, -4, -1},
{ -2, -8,-12, -8, -2},
{ 0, 0, 0, 0, 0},
{ 2, 8, 12, 8, 2},
{ 1, 4, 6, 4, 1}},
{{ -1, -2, 0, 2, 1},
{ -4, -8, 0, 8, 4},
{ -6,-12, 0, 12, 6},
{ -4, -8, 0, 8, 4},
{ -1, -2, 0, 2, 1}}
};
int read_png(const char* filename, unsigned char** image, unsigned* height,
unsigned* width, unsigned* channels) {
unsigned char sig[8];
FILE* infile;
infile = fopen(filename, "rb");
fread(sig, 1, 8, infile);
if (!png_check_sig(sig, 8))
return 1; /* bad signature */
png_structp png_ptr;
png_infop info_ptr;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
if (!png_ptr)
return 4; /* out of memory */
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return 4; /* out of memory */
}
png_init_io(png_ptr, infile);
png_set_sig_bytes(png_ptr, 8);
png_read_info(png_ptr, info_ptr);
int bit_depth, color_type;
png_get_IHDR(png_ptr, info_ptr, width, height, &bit_depth, &color_type, NULL, NULL, NULL);
png_uint_32 i, rowbytes;
png_bytep row_pointers[*height];
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
*channels = (int) png_get_channels(png_ptr, info_ptr);
if ((*image = (unsigned char *) malloc(rowbytes * *height)) == NULL) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return 3;
}
for (i = 0; i < *height; ++i)
row_pointers[i] = *image + i * rowbytes;
png_read_image(png_ptr, row_pointers);
png_read_end(png_ptr, NULL);
return 0;
}
void write_png(const char* filename, png_bytep image, const unsigned height, const unsigned width,
const unsigned channels) {
FILE* fp = fopen(filename, "wb");
png_structp png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING, NULL, NULL, NULL);
png_infop info_ptr = png_create_info_struct(png_ptr);
png_init_io(png_ptr, fp);
png_set_IHDR(png_ptr, info_ptr, width, height, 8,
PNG_COLOR_TYPE_RGB, PNG_INTERLACE_NONE,
PNG_COMPRESSION_TYPE_DEFAULT, PNG_FILTER_TYPE_DEFAULT);
png_set_filter(png_ptr, 0, PNG_NO_FILTERS);
png_write_info(png_ptr, info_ptr);
png_set_compression_level(png_ptr, 1);
png_bytep row_ptr[height];
for (int i = 0; i < height; ++ i) {
row_ptr[i] = image + i * width * channels * sizeof(unsigned char);
}
png_write_image(png_ptr, row_ptr);
png_write_end(png_ptr, NULL);
png_destroy_write_struct(&png_ptr, &info_ptr);
fclose(fp);
}
__global__ void sobel (unsigned char* s, unsigned char* t, unsigned height, unsigned width, unsigned channels) {
int x, y, i, v, u;
int R, G, B;
double val[MASK_N*3] = {0.0};
int adjustX, adjustY, xBound, yBound;
//y = threadIdx.x + blockIdx.x * blockDim.x;
y = blockIdx.x;
// for (y = 0; y < height; ++y) {
for (x = threadIdx.x; x < width; x+=256) {
for (i = 0; i < MASK_N; ++i) {
adjustX = (MASK_X % 2) ? 1 : 0;
adjustY = (MASK_Y % 2) ? 1 : 0;
xBound = MASK_X /2;
yBound = MASK_Y /2;
val[i*3+2] = 0.0;
val[i*3+1] = 0.0;
val[i*3] = 0.0;
for (v = -yBound; v < yBound + adjustY; ++v) {
for (u = -xBound; u < xBound + adjustX; ++u) {
if ((x + u) >= 0 && (x + u) < width && y + v >= 0 && y + v < height) {
R = s[channels * (width * (y+v) + (x+u)) + 2];
G = s[channels * (width * (y+v) + (x+u)) + 1];
B = s[channels * (width * (y+v) + (x+u)) + 0];
val[i*3+2] += R * mask[i][u + xBound][v + yBound];
val[i*3+1] += G * mask[i][u + xBound][v + yBound];
val[i*3+0] += B * mask[i][u + xBound][v + yBound];
}
}
}
}
double totalR = 0.0;
double totalG = 0.0;
double totalB = 0.0;
for (i = 0; i < MASK_N; ++i) {
totalR += val[i * 3 + 2] * val[i * 3 + 2];
totalG += val[i * 3 + 1] * val[i * 3 + 1];
totalB += val[i * 3 + 0] * val[i * 3 + 0];
}
totalR = sqrt(totalR) / SCALE;
totalG = sqrt(totalG) / SCALE;
totalB = sqrt(totalB) / SCALE;
const unsigned char cR = (totalR > 255.0) ? 255 : totalR;
const unsigned char cG = (totalG > 255.0) ? 255 : totalG;
const unsigned char cB = (totalB > 255.0) ? 255 : totalB;
t[channels * (width * y + x) + 2] = cR;
t[channels * (width * y + x) + 1] = cG;
t[channels * (width * y + x) + 0] = cB;
}
// }
}
int main(int argc, char** argv) {
assert(argc == 3);
unsigned height, width, channels;
unsigned char* host_s = NULL;
read_png(argv[1], &host_s, &height, &width, &channels);
unsigned char* device_s;
unsigned char* device_t;
// malloc memory for GPU
cudaMalloc(&device_s, height * width * channels);
cudaMalloc(&device_t, height * width * channels);
unsigned char* host_t = (unsigned char*) malloc(height * width * channels * sizeof(unsigned char));
// use cudaMemcpy2D to accelerate
//cudaMemcpy2D(device_t, channels, device_s, channels, height, width*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(device_s, host_s, height * width * channels, cudaMemcpyHostToDevice);
// by using multiple threads
sobel<<<height, 256>>>(device_s, device_t, height, width, channels);
cudaMemcpy(host_t, device_t, height * width * channels, cudaMemcpyDeviceToHost);
write_png(argv[2], host_t, height, width, channels);
return 0;
}
|
12,636 | //function kernel
__device__ float length(float3 r) {
return r.x*r.x + r.y*r.y + r.z*r.z;
}
__device__ float3 mul_float3(float3 r1, float3 r2) {
return make_float3(r1.x * r2.x, r1.y * r2.y, r1.z * r2.z);
}
__device__ float3 add_float3(float3 r1, float3 r2) {
return make_float3(r1.x + r2.x, r1.y + r2.y, r1.z + r2.z);
}
__device__ float3 dif_float3(float3 r1, float3 r2) {
return make_float3(r1.x - r2.x, r1.y - r2.y, r1.z - r2.z);
}
__device__ float3 scale_float3(float s, float3 r) {
r.x *= s;
r.y *= s;
r.z *= s;
return r;
}
__device__ float Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return 315.0f / (64 * PI * pow(h, 9)) * pow(pow(h, 2) - length(r), 3);
}
__device__ float3 Gradient_Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return make_float3(
r.x * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2),
r.y * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2),
r.z * -945.0f / ( 32.0f * PI * pow(h,9) ) * pow(pow(h, 2) - length(r), 2));
}
__device__ float Lap_Kernel_Poly6(float3 r, float h) {
float PI = 3.14159;
return 945.0f / (8 * PI * pow(h, 9)) * (pow(h, 2) - length(r)) * (length(r) - 3 / 4 * (pow(h, 2) - length(r)));
}
__device__ float3 Gradient_Kernel_Spiky(float3 r, float h) {
float PI = 3.14159;
float _r = sqrt(length(r));
float v = -45.0f / (PI * pow(h, 6) * _r) * pow(h - _r, 2);
return make_float3(r.x*v, r.y*v, r.z*v);
}
__device__ float Lap_Kernel_Viscosity(float3 r, float h) {
float PI = 3.14159;
return 45.0f / (PI * pow(h, 5)) * (1 - sqrt(length(r)) / h);
}
//PBF particle struct
struct pPBF {
float3 pos;
float3 vel;
float m;
float rho;
float lambda;
float col;
};
extern "C" __global__ void
PBD_2(float3 *pos, float3 *delta_p, int *M_index, float *M, const int N, const int NP)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx > N) return;
float3 _pos = pos[idx];
float3 _delta_p = make_float3(0,0,0);
float Sigma_i = 0, Sigma_j= 0;
int i;
// Policy
// dont use H, and use M for connection
// Sigma_j
for (i = 0; i < NP; ++i)
{
int index = M_index[i + idx * NP];
if (index == 0) break;
float3 __pos = pos[index];
float3 r = dif_float3(_pos, __pos);
Sigma_j += length(scale_float3(1.0f/sqrt(length(r)), r));
}
// delta_p
for (i = 0; i < NP; ++i)
{
int index = M_index[i + idx * NP];
if (index == 0) break;
float3 __pos = pos[index];
float3 r = dif_float3(_pos, __pos);
float d = M[i + idx * NP];
Sigma_i = sqrt(length(r)) - d;
float s = Sigma_i / Sigma_j;
_delta_p = add_float3(_delta_p, scale_float3(-1.0f*s/sqrt(length(r)), r));
}
if (isnan(length(_delta_p))) _delta_p = make_float3(0,0,0);
delta_p[idx] = _delta_p;
return;
}
/*
// S
for (i = 0; i < NP; ++i)
{
int index = M[i + idx * NP];
if (index == 0) break;
float3 __pos = pos[index];
float3 r = dif_float3(_pos, __pos);
Sigma_i += sqrt(length(r)) - d;
Sigma_j += length(scale_float3(1.0f/sqrt(length(r)), r));
}
float s = Sigma_i / Sigma_j;
if (s >= 1.0f) s = 1.0f;
// delta_p
for (i = 0; i < NP; ++i)
{
int index = M[i + idx * NP];
if (index == 0) break;
float3 __pos = pos[index];
float3 r = dif_float3(_pos, __pos);
_delta_p = add_float3(_delta_p, scale_float3(-1.0f*s/sqrt(length(r)), r));
}
if (isnan(length(_delta_p))) _delta_p = make_float3(0,0,0);
delta_p[idx] = _delta_p;
return;
*/
|
12,637 | #include "noise_perlin.cuh"
Perlin::Perlin() :
NoiseModule(),
m_frequency (DEFAULT_PERLIN_FREQUENCY ),
m_lacunarity (DEFAULT_PERLIN_LACUNARITY ),
m_octaveCount (DEFAULT_PERLIN_OCTAVE_COUNT),
m_persistence (DEFAULT_PERLIN_PERSISTENCE ),
m_seed (DEFAULT_PERLIN_SEED) {
}
void Perlin::SetFrequency(double frequency) {
m_frequency = frequency;
}
void Perlin::SetLacunarity(double lacunarity) {
m_lacunarity = lacunarity;
}
void Perlin::SetOctaveCount(int octaveCount) {
m_octaveCount = octaveCount;
}
void Perlin::SetPersistence(double persistence) {
m_persistence = persistence;
}
void Perlin::SetSeed(int seed) {
m_seed = seed;
}
double Perlin::GetValue(double x, double y, double z) {
double value = 0.0;
double signal = 0.0;
double curPersistence = 1.0;
double nx, ny, nz;
int seed;
x *= m_frequency;
y *= m_frequency;
z *= m_frequency;
for (int curOctave = 0; curOctave < m_octaveCount; curOctave++) {
// Make sure that these floating-point values have the same range as a 32-
// bit integer so that we can pass them to the coherent-noise functions.
nx = MakeInt32Range(x);
ny = MakeInt32Range(y);
nz = MakeInt32Range(z);
// Get the coherent-noise value from the input value and add it to the
// final result.
seed = (m_seed + curOctave) & 0xffffffff;
signal = GradientCoherentNoise3D (nx, ny, nz, seed);
value += signal * curPersistence;
// Prepare the next octave.
x *= m_lacunarity;
y *= m_lacunarity;
z *= m_lacunarity;
curPersistence *= m_persistence;
}
return value;
}
|
12,638 | #include <stdio.h>
__global__ void initializePositionsKernel(double2* r, double2* rnew, int N){
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i<N){
rnew[i].x = r[i].x;
rnew[i].y = r[i].y;
}
//if( i >= N){
// rnew[i].x = 0.0;
// rnew[i].y = 0.0;
// r[i].x = 0.0;
// r[i].y = 0.0;
//}
}
|
12,639 | #include <thrust/device_vector.h>
#include <thrust/sequence.h>
#include <thrust/fill.h>
#include <thrust/copy.h>
#include <thrust/functional.h>
#include <iostream>
using namespace std;
#define N 10
typedef thrust::device_vector<float> dVecFloat;
void saxpy_slow(float a, dVecFloat &X, dVecFloat &Y)
{
dVecFloat temp(X.size());
thrust::fill(temp.begin(), temp.end(), a); // temp <- a
thrust::transform(X.begin(), X.end(), temp.begin(), temp.begin(), thrust::multiplies<float>()); // temp <- temp * x
thrust::transform(temp.begin(), temp.end(), Y.begin(), Y.begin(), thrust::plus<float>()); // y <- temp + y
}
// better way
struct saxpy_functor
{
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__ float operator()(const float &x, const float &y) const
{
return a * x + y;
}
};
void saxpy_fast(float a, dVecFloat &X, dVecFloat &Y)
{
thrust::transform(X.begin(), X.end(), Y.begin(), Y.begin(), saxpy_functor(a));
}
int main()
{
dVecFloat X(N), Y(N);
float a = 3.4;
thrust::sequence(X.begin(), X.end());
thrust::fill(Y.begin(), Y.end(), 5);
saxpy_fast(a, X, Y);
// saxpy_slow(a, X, Y);
cout << "Y: ";
thrust::copy(Y.begin(), Y.end(), ostream_iterator<float>(cout, " "));
cout << endl;
return 0;
}
|
12,640 |
#include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define TILE_SIZE 16
// CUDA Kernel
__global__ void matrixMul( float* C, float* A, float* B, int TM)
{
__shared__ float As [TILE_SIZE][TILE_SIZE];
__shared__ float Bs [TILE_SIZE][TILE_SIZE];
// chaque thread calcule C[i][j]
// Coordonnees absolues du thread : indices i j
int j = blockIdx.x * blockDim.x+ threadIdx.x;
int i = blockIdx.y * blockDim.y+ threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float value = 0;
for(int ke=0; ke<TM; ke += TILE_SIZE) {
// Remplissage de As et de Bs
As[ty][tx] = A[i * TM + ke + tx];
Bs[ty][tx] = B[(ke + ty) * TM + j];
__syncthreads();
// Calcul
for (int k = 0; k < TILE_SIZE; k++) {
value += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
C[i * TM + j] = value;
}
/////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////
int main(int argc, char** argv) {
int i, j, TM, BLOCK_SIZE_X, BLOCK_SIZE_Y;
unsigned int M_size;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
cudaError_t cerror;
float elapsedTime ;
cudaEvent_t start , stop ;
// Valeurs par defaut
TM=2048;
BLOCK_SIZE_X = TILE_SIZE;
BLOCK_SIZE_Y = TILE_SIZE;
// Possibilite de lire TM dans arg1, BLOCK_SIZE_X dans arg2 et BLOCK_SIZE_Y ans arg3
if (argc>1) {
TM=atoi(argv[1]);
}
// if (argc>3) {
// BLOCK_SIZE_X =atoi(argv[2]);
// BLOCK_SIZE_Y =atoi(argv[3]);
// }
// Verification de la bonne taille TM par rapport aux dimensions des blocs
if ((TM % BLOCK_SIZE_X) !=0) {
printf("Taille matrice non multiple de taille bloc X %d \n", BLOCK_SIZE_X);
exit(1);
}
if ((TM % BLOCK_SIZE_Y) !=0) {
printf("Taille matrice non multiple de taille bloc Y %d \n", BLOCK_SIZE_Y);
exit(1);
}
// Allocation memoire sur CPU
M_size = TM*TM*sizeof(float);
h_A = (float*) malloc(M_size);
h_B = (float*) malloc(M_size);
h_C = (float*) malloc(M_size);
// initialisation des matrices avec des valeurs permettant de verifier le resultat
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
// Allocation memoire sur GPU
cudaMalloc((void**) &d_A, M_size);
cudaMalloc((void**) &d_B, M_size);
cudaMalloc((void**) &d_C, M_size);
// Calcul du temps : top depart
cudaEventCreate (&start ) ;
cudaEventCreate (&stop ) ;
cudaEventRecord ( start , 0 ) ;
// copie des donnes CPU vers GPU
cudaMemcpy(d_A, h_A, M_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, M_size, cudaMemcpyHostToDevice);
// choix de la structure : grille et blocs
dim3 threads(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grid(TM / threads.x, TM / threads.y);
printf("bloc %d %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d %d \n", TM / threads.x, TM / threads.y);
// Lancement des threads
matrixMul<<< grid, threads >>>(d_C, d_A, d_B, TM);
// En cas d'erreur
cerror=cudaGetLastError();
if ((int)cerror !=0) {
printf("Erreur appel kernel %d \n", (int) cerror);
exit(cerror);
}
// copie des resultats depuis le GPU
cudaMemcpy(h_C, d_C, M_size, cudaMemcpyDeviceToHost);
// Calcul du temps d'execution
cudaEventRecord ( stop , 0 ) ;
cudaEventSynchronize ( stop ) ;
cudaEventElapsedTime ( &elapsedTime , start , stop ) ;
cudaEventDestroy ( start ) ;
cudaEventDestroy ( stop ) ;
printf ( "Temps consomme: %f secondes\n" , elapsedTime / 1000.0 ) ;
// Verification des resultats
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] ); exit(1);
}
else if ((i!=j) && (h_C[i*TM+j] != (float)(i+j+TM))){
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
// liberation de la memoire
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
12,641 |
#include <stdio.h>
#include <stdlib.h>
#define N 128*256
#define THREADS_PER_BLOCK 256
#define N_BLOCKS N/THREADS_PER_BLOCK
// Kernel to add N integers using threads and blocks
__global__ void add(int *a, int *b, int *c){
int index = blockIdx.x * blockDim.x + threadIdx.x;
c[index] = a[index] + b[index];
}
// Main program
int main(void){
int *a,*b,*c; // Host copies
int *a_dev,*b_dev,*c_dev; // Device copies
int size = N*sizeof(int); // Size of N integer
// Allocate host memory
a = (int *) malloc (size);
b = (int *) malloc (size);
c = (int *) malloc (size);
// Allocate device memory
cudaMalloc( (void**)&a_dev, size);
cudaMalloc( (void**)&b_dev, size);
cudaMalloc( (void**)&c_dev, size);
// Initialize
for (int i=0; i<N; i++){
a[i] = i;
b[i] = i;
}
// Copy inputs to device
cudaMemcpy( a_dev, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( b_dev, b, size, cudaMemcpyHostToDevice );
// Launch kernel on device
add <<< N_BLOCKS , THREADS_PER_BLOCK >>> (a_dev,b_dev,c_dev);
// Copy device result back to host
cudaMemcpy( c, c_dev, size, cudaMemcpyDeviceToHost );
// Print result
for (int i=0; i<N; i++)
printf("%d\n",c[i]);
// Free device memory
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
// Free host memory
free(a);
free(b);
free(c);
return 0;
}
|
12,642 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <time.h>
// hard parameters
#define L 150
#define MULTISPIN unsigned int
#define MULTISIZE 32
// #define MULTISPIN unsigned long int
// #define MULTISIZE 64
#define STEPS_REPEAT 3
const int AREA = L*L;
const int NTOT = (L-2)*(L-2);
//external parameters
struct parameters {
float t;
float t_start;
float j;
int steps_repeat;
int t_max_sim;
int t_measure_wait;
int t_measure_interval;
int seed;
};
// average tracker struct
struct avg_tr {
double sum;
double sum_squares;
int n;
};
static inline struct avg_tr new_avg_tr(int locn) {
struct avg_tr a = { .sum = 0, .sum_squares = 0, .n = locn};
return a;
}
// if the numbers overflow, then it would be necessary to divide by N before summing
// however it's faster the other way
static inline void update_avg(struct avg_tr * tr_p, double newval) {
tr_p->sum += newval;
tr_p->sum_squares += (newval*newval);
}
static inline double average( struct avg_tr tr) {
return (tr.sum)/((double) tr.n) ;
}
static inline double stdev( struct avg_tr tr) {
return sqrt( ( tr.sum_squares)/((double) tr.n) - pow(( (tr.sum)/((double) tr.n) ),2) );
}
// static inline double variance( struct avg_tr tr) {
// return ( ( tr.sum_squares)/((double) tr.n) - pow(( (tr.sum)/((double) tr.n) ),2) );
// }
//multispin averages, hard-coded to track a number MULTISPIN * STEPS_REPEAT of values
struct multiavg_tr {
double sum[MULTISIZE * STEPS_REPEAT];
double sum_squares[MULTISIZE * STEPS_REPEAT];
int n; // number of terms in the avg
};
// localn is not multisize*steps_repeat, it's the number of terms that will contribute to each avg ...
struct multiavg_tr new_multiavg_tr(int localn) {
struct multiavg_tr a;
for(int k=0; k<MULTISIZE * STEPS_REPEAT; k++ ) {
a.sum[k] = 0;
a.sum_squares[k] = 0;
}
a.n = localn;
return a;
}
// must be 0 =< k < MULTISIZE * STEPS_REPEAT
static inline void update_multiavg(struct multiavg_tr * tr_p, double newval, int k) {
tr_p->sum[k] += newval;
tr_p->sum_squares[k] += (newval*newval);
}
static inline double multiaverage( struct multiavg_tr tr, int k) {
return (tr.sum[k])/((double) tr.n) ;
}
static inline double multistdev( struct multiavg_tr tr, int k) {
return sqrt( ( tr.sum_squares[k])/((double) tr.n) - pow(( (tr.sum[k])/((double) tr.n) ),2) );
}
// static inline double multivariance( struct multiavg_tr tr, int k) {
// return ( ( tr.sum_squares[k])/((double) tr.n) - pow(( (tr.sum[k])/((double) tr.n) ),2) );
// }
double unitrand(){
return (double)rand() / (double)RAND_MAX;
}
// index has to be less that MULTISIZE
static inline void set_spin_1 (MULTISPIN * multi, int index) {
*multi |= 1 << index;
}
static inline void set_spin_0 (MULTISPIN * multi, int index) {
*multi &= ~(1 << index);
}
static inline MULTISPIN read_spin(MULTISPIN multi, int index) {
return ((multi >> index) & 1);
}
// each bit exp8 and exp8 describes the Metropolis RNG result for that bit,
// specifying if the random r is bigger or smaller than the relevant values e^(4J/kT) and e^(8J/kT) (passed from outside)
static inline MULTISPIN generate_exp4_mask(double exp4, double exp8, double random_number) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
if( exp4 > random_number && random_number > exp8) { // this is taken from the article and works. the version below might not but simplifies some things
// if( exp4 > random_number) {
set_spin_1(&res, k);
} else {
set_spin_0(&res, k);
}
}
return res;
}
static inline MULTISPIN generate_exp8_mask(double exp8, double random_number) {
MULTISPIN res;
for(int k=0; k<MULTISIZE; k++) {
if( random_number < exp8 ) {
set_spin_1(&res, k);
} else {
set_spin_0(&res, k);
}
}
return res;
}
MULTISPIN init_random_multispin() {
return (MULTISPIN) rand(); // just spam random bits
}
void init_random_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_random_multispin();
}
}
}
MULTISPIN init_t0_multispin() {
return (MULTISPIN) 0; // should be all zeros for all sensible multispin types
}
void init_t0_grid(MULTISPIN grid[L*L]) {
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
grid[x+y*L] = init_t0_multispin();
}
}
}
// static inline void flip(MULTISPIN grid[L*L], int x, int y) {
// grid[x+y*L] = ~grid[x+y*L];
// }
// can segfault
static inline MULTISPIN grid_step(MULTISPIN grid[L*L], int x, int y, int xstep, int ystep) {
return grid[(x+xstep) + (y+ystep)*L];
}
// segfault if applied to an edge spin, must be called only on the inner L-1 grid
void update_multispin(MULTISPIN grid[L*L], int x, int y, double exp4, double exp8 ) {
MULTISPIN s0 = grid[x+y*L];
double ur = unitrand();
MULTISPIN exp4_mask = generate_exp4_mask(exp4, exp8, ur); // here
MULTISPIN exp8_mask = generate_exp8_mask(exp8, ur);
// "energy variables" indicating whether s0 is equal or opposite to each of its 4 neighbours
MULTISPIN i1 = s0 ^ grid_step(grid, x, y, 1, 0);
MULTISPIN i2 = s0 ^ grid_step(grid, x, y, -1, 0);
MULTISPIN i3 = s0 ^ grid_step(grid, x, y, 0, 1);
MULTISPIN i4 = s0 ^ grid_step(grid, x, y, 0, -1);
// bit sums with carry over between the i variables
MULTISPIN j1 = i1 & i2;
MULTISPIN j2 = i1 ^ i2;
MULTISPIN j3 = i3 & i4;
MULTISPIN j4 = i3 ^ i4;
// logic for deciding whether to flip s0 or not
MULTISPIN flip_mask = ( ((j1 | j3) | (~(j1^j3) & (j2&j4)) ) | ((j2 | j4) & exp4_mask ) | exp8_mask );
grid[x+y*L] = grid[x+y*L] ^ flip_mask;
// explanation:
// spins | i1234 | deltaE | j1 j2 j3 j4 |
// 1 | 1 | | |
// 101 | 1 1 | -8 | 1 0 1 0 |
// 1 | 1 | | |
//
// 0 | 0 | | |
// 101 | 1 1 | -4 | 0 1 1 0 | (j1 | j3)
// 1 | 1 | | |
//
// 0 | 0 | | 0 0 1 0 |
// 001 | 0 1 | 0 | or |-------------------------
// 1 | 1 | | 0 1 0 1 | ~(j1^j3) & (j2&j4))
//------------------------------------------------------------------
//
// 0 | 0 | | |
// 000 | 0 0 | +4 | | (j2 | j4) & exp4
// 1 | 1 | | |
//------------------------------------------------------------------
//
// 0 | 0 | | |
// 000 | 0 0 | +8 | 0 0 0 0 | exp8
// 0 | 0 | | |
// the first 2 cases are detected by (j1 | j3) and lead to the spin flip regardless of the RNG roll.
// the deltaH = 0 case can result in two different forms for the j's depending on ho the spins are paired.
// the first of these is correctly picked up by (j1 | j3), while the second needs its own expression ~(j1^j3) & (j2&j4))
// in the 4th case, detected by (j2 | j4), the spin is flipped only if the RNG roll is lucky enough (exp4 = 1)
// if we still haven't flipped, we get to the last case. here the spin is flipped only if the RNG roll gives the luckiest result (exp8 = 1).
}
// checkerboard patterns
void update_grid_white(MULTISPIN grid[L*L], double exp4, double exp8 ) {
for(int x = 1; x<L-1; x+=1) {
for(int y = (1 + x%2) ; y<L-1; y+=2) {
update_multispin(grid, x, y, exp4, exp8);
}
}
}
void update_grid_black(MULTISPIN grid[L*L], double exp4, double exp8 ) {
for(int x = 1; x<L-1; x+=1) {
for(int y = (1 + (x+1)%2) ; y<L-1; y+=2) {
update_multispin(grid, x, y, exp4, exp8);
}
}
}
// non GPU function
void multidump_first(MULTISPIN grid[L*L]) {
// printf("first bit grid (out of %i):\n", MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & 1 ) == 0) printf(" ");
else printf("█");
}
printf("\n");
}
printf("\n");
}
// non GPU function
void multidump_a_few(MULTISPIN grid[L*L]) {
for(int k=0; k<5; k++) {
printf("grid on bit %i (out of %i):\n", k+1, MULTISIZE);
for(int x = 0; x<L; x++) {
for(int y = 0; y<L; y++) {
if(( grid[x+y*L] & (1 << k) ) == 0) printf(" ");
else printf("█");
}
printf("\n");
}
printf("\n");
}
}
// non GPU
double measure_1bit_magnetization(MULTISPIN grid[L*L], int multi_index) {
double lmagn = 0;
for(int x = 1; x<L-1; x++) {
for(int y = 1; y<L-1; y++) {
double bit = (double) read_spin(grid[x+y*L], multi_index);
lmagn = lmagn + ((bit*2.) -1.);
}
}
return (((double) lmagn ) / (double) NTOT) ;
}
// as usual, the number of independent measures is hardcoded to MULTISIZE * STEPS_REPEAT.
// this function measures a single multispin, updating a chunk of the tracker. (for example the first half if rep_steps is 2)
void update_magnetization_tracker( struct multiavg_tr * tr_p, MULTISPIN grid[L*L], int block_count) {
for( int k=0; k < MULTISIZE; k++) {
double mag = measure_1bit_magnetization(grid, k);
update_multiavg(tr_p, mag, k + MULTISIZE*block_count );
}
}
// static inline void update_multiavg(struct multiavg_tr * tr_p, double newval, int k) {
// tr_p->sum[k] += newval;
// tr_p->sum_squares[k] += (newval*newval);
// }
int main() {
// read params
FILE *param_f = fopen("params.txt", "r");
struct parameters par;
fscanf(param_f, "temperature %f\ntemperature_start %f\ncoupling %f\nsimulation_t_max %i\nthermalization_time %i\ntime_between_measurements %i\nbase_random_seed %i\n",
&(par.t), &(par.t_start), &(par.j), &(par.t_max_sim), &(par.t_measure_wait), &(par.t_measure_interval), &(par.seed));
printf("%f\n", par.t);
printf("%f\n", par.t_start);
printf("%f\n", par.j);
printf("%i\n", par.t_max_sim);
printf("%i\n", par.t_measure_wait);
printf("%i\n", par.t_measure_interval);
printf("%i\n", par.seed);
fclose(param_f);
srand(par.seed);
MULTISPIN startgrid[L*L];
init_t0_grid(startgrid);
const double EXP4 = exp( -(4.*par.j) / par.t);
const double EXP8 = exp( -(8.*par.j) / par.t);
// measure cycle
FILE *resf = fopen("results.txt", "w");
fprintf(resf, "# cpu2\n");
fprintf(resf, "# hard-coded parameters:\n# linear_size: %i\n# spin_coding_size: %i\n", L, MULTISIZE);
fprintf(resf, "# parameters:\n# temperature: %f\n#temp_start: %f\n# coupling: %f\n# repetitions: %i\n", par.t, par.t_start, par.j, STEPS_REPEAT);
fprintf(resf, "# simulation_t_max: %i\n# thermalization_time: %i\n# time_between_measurements: %i\n# base_random_seed: %i\n", par.t_max_sim, par.t_measure_wait, par.t_measure_interval, par.seed);
fprintf(resf, "# extra:\n# area: %i\n# active_spins_excluding_boundaries:%i\n# total_independent_sims: %i\n", AREA, NTOT, MULTISIZE*STEPS_REPEAT);
MULTISPIN grid[L*L];
double n_measures_per_sim = (double) ((par.t_max_sim - par.t_measure_wait)/par.t_measure_interval);
//OUTER REP LOOP
struct multiavg_tr single_run_avgs = new_multiavg_tr(n_measures_per_sim);
for( int krep=0; krep< STEPS_REPEAT; krep++) {
srand(par.seed + krep);
memcpy(grid, startgrid, L*L*sizeof(MULTISPIN) );
// INNER SIM LOOPS
printf("# simulation %i\n", krep+1);
printf("# waiting thermalization for the first %i sim steps.\n", par.t_measure_wait);
int ksim=0;
for( ; ksim<par.t_measure_wait; ksim++) {
update_grid_black(grid, EXP4, EXP8);
update_grid_white(grid, EXP4, EXP8);
}
printf("# finished thermalization. running %i more simulation steps and performing %f measures.\n",(par.t_max_sim - par.t_measure_wait), n_measures_per_sim);
for( ; ksim<par.t_max_sim; ksim++) {
update_grid_black(grid, EXP4, EXP8);
update_grid_white(grid, EXP4, EXP8);
if( ksim % par.t_measure_interval == 0) {
update_magnetization_tracker(&single_run_avgs, grid, krep);
}
}
// END INNER SIM LOOPS
printf("# end simulation %i\n", krep+1);
}
// END OUTER REPETITION LOOP
struct avg_tr avg_of_blocks = new_avg_tr( MULTISIZE * STEPS_REPEAT );
for(int k=0; k < MULTISIZE * STEPS_REPEAT; k++) {
double lres = multiaverage(single_run_avgs, k);
double lstdev = multistdev(single_run_avgs, k);
fprintf(resf, "# average of simulation %i\n: %f +- %f\n", k+1, lres, lstdev);
update_avg(&avg_of_blocks, lres);
}
fprintf(resf, "# overall average \n: %f +- %f\n", average(avg_of_blocks), stdev(avg_of_blocks));
multidump_a_few(grid);
return 0;
}
|
12,643 | //
// Created by saleh on 7/16/18.
//
#define BDIMX 16
#define BDIMY 16
#define IPAD 0
__global__ void transposeBatch_try01 (const float * __restrict__ in, float * __restrict__ out, const int dim0, const int dim1, const int dim2)
{
// dynamic shared memory
extern __shared__ float tile[];
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
unsigned long batch_offset = dim1*dim2;
if(ix <dim2 && iy<dim1){
unsigned int ti = iy * dim2 + ix; //input is of shape (nx, ny)
unsigned int to = ix * dim1 + iy; //output is of shape (ny, nx)
// load data from global memory to shared memory
unsigned int local_idx = threadIdx.y * (blockDim.x+ IPAD) + threadIdx.x;
for(int b=0;b<dim0;b++){
tile[local_idx] = in[b*batch_offset + ti];
__syncthreads();
out[b*batch_offset + to] = tile[local_idx];
}
/*if(ti==4 || to==4 || ti==5){
printf("bidx:%d, bidy:%d, thidx:%d, thidy:%d, ix:%d, iy:%d, ti:%d, to:%d\n",
blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y,ix,iy,ti,to);
}*/
}
}
// This is the wrapper function that will be used in cpp files!
void transpose(
const float* g_i,
float* g_o,
unsigned int dim0,
unsigned int dim1,
unsigned int dim2){
dim3 blocksize (BDIMX, BDIMY);
dim3 gridsize ((dim2 + blocksize.x - 1) / blocksize.x, (dim1 + blocksize.y - 1) / blocksize.y);
transposeBatch_try01<<<gridsize, blocksize, (BDIMX + IPAD) * BDIMY * sizeof(float)>>>(g_i, g_o, dim0,dim1,dim2);
}
|
12,644 | #include "includes.h"
//#define DEPTH 2
// dp - cost aggregation array
// cost_image - m x n x D array
// d - use every d channels of input to conserve register memory
// m - image rows
// n - image columns
// D - depth
// depth_stride - pitch along depth dimension
// row_stride - pitch along row dimension
__global__ void argmin_3d_mat(float * dp, int * stereo_im, int m, int n)
{
int col = blockDim.x * blockIdx.x + threadIdx.x;
int imsize = m*n;
int loop_limit = D*m*n;
while(col < n)
{
int row = blockDim.y * blockIdx.y + threadIdx.y;
while(row < m)
{
int min_ind = -1;
float current_min = 100000000.0;
int current_val = row * n + col;
int v = 0;
for (int depth = 0; depth < loop_limit; depth+=imsize){
if (dp[depth + current_val] < current_min)
{
min_ind = v;
current_min = dp[depth + current_val];
}
v++;
}
stereo_im[current_val] = min_ind;
row+=blockDim.y;
}
col+=blockDim.x;
}
} |
12,645 | #include <iostream>
#include <cstdlib>
#include <cmath>
#include <ctime>
using namespace std;
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
static const long long SIZE = 4096*4096*4;
cudaError_t calaAddArray(const double* a, const double* b, double* res, const long SIZE);
__global__ void addArray(const double* a,const double *b, double* c)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
while (i < SIZE)
{
c[i] = (a[i] * b[i]-b[i]*3.1415926)/a[i] * b[i]*b[i]-a[i]*b[i];
i += gridDim.x*blockDim.x;;
}
}
int main4444()
{
cout<<"Begin : "<<endl;
srand(time(0));
double* a = new double[SIZE];
double* b = new double[SIZE];
for (long i = 0; i < SIZE; i++)
{
a[i] = (double)rand() * (double)rand();
b[i] = (double)rand() * (double)rand();
}
double* res = new double[SIZE];
int count=2;
clock_t start, finish;
start = clock();
for(int i=0;i<count;i++)
{
for (long long i = 0; i < SIZE; i++)
res[i] = (a[i] * b[i]-b[i]*3.1415926)/a[i] * b[i]*b[i]-a[i]*b[i];
}
finish = clock();
/*
for (int i = 0; i < SIZE; i++)
cout << a[i] << ", ";
cout << endl;
for (int i = 0; i < SIZE; i++)
cout << b[i] << ", ";
cout << endl;
for (int i = 0; i < SIZE; i++)
cout << res[i] << ", ";
cout << endl << endl << endl;
*/
for (long long i = 0; i < SIZE; i++)
{
if (res[i] != (a[i] * b[i]-b[i]*3.1415926)/a[i] * b[i]*b[i]-a[i]*b[i] )
{
cout << "*********** Failed CPU" << endl;
break;
}
}
cout << start << " " << finish << endl;
cout << "By CPU Array Mult: " << (double)(finish - start) / CLOCKS_PER_SEC << endl;
start = clock();
for(int i=0;i<count;i++)
{
cudaError_t cudaErrorRes = calaAddArray(a, b, res, SIZE);
if (cudaErrorRes != cudaSuccess)
{
cout << "************* CUDA Return Result Failed " << endl;
}
}
finish = clock();
/*
for (int i = 0; i < SIZE; i++)
cout << a[i] << ", ";
cout << endl;
for (int i = 0; i < SIZE; i++)
cout << b[i] << ", ";
cout << endl;
*/
for (long long i = 0; i < SIZE; i++)
{
if (res[i] != (a[i] * b[i]-b[i]*3.1415926)/a[i] * b[i]*b[i]-a[i]*b[i])
{
cout << "************* Failed GPU" << endl;
break;
}
}
cout << endl;
cout << start << " " << finish << endl;
cout << "By CUDA Array Mult: " << (double)(finish - start)/CLOCKS_PER_SEC << endl;
cout<<"Game Over!!!!"<<endl;
return 1;
}
cudaError_t calaAddArray(const double* a, const double* b, double* c, const long SIZE)
{
double* dev_a=0;
double* dev_b=0;
double* dev_c=0;
cudaError_t cudaStatus=cudaSetDevice(0);
if (cudaStatus != cudaSuccess)
{
cout << "CUDA ADD Failed In Function cudaError_t calaAddArray " << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, sizeof(double)*SIZE);
if (cudaStatus != cudaSuccess)
{
cout << "cudaMalloc decvA failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, sizeof(double)*SIZE);
if (cudaStatus != cudaSuccess)
{
cout << "cudaMalloc decvB failed!" << endl;
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_c, sizeof(double)*SIZE);
if (cudaStatus != cudaSuccess)
{
cout << "cudaMalloc decvC failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_a,a,SIZE * sizeof(double),cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
cout << "cudaMemcpy decvA failed!" << endl;
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, SIZE * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess)
{
cout << "cudaMemcpy decvB failed!" << endl;
goto Error;
}
addArray <<<4096,1024>>>(dev_a, dev_b, dev_c);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
cout << "addKernel launch failed: " <<cudaGetErrorString(cudaStatus)<<endl;
goto Error;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess)
{
cout<<cudaStatus<<" cudaDeviceSynchronize returned error code %d after launching addKernel!"<<endl;
goto Error;
}
cudaStatus = cudaMemcpy(c, dev_c, SIZE * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess)
{
cout << "cudaMemcpy decvC failed!" << endl;
goto Error;
}
Error:
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return cudaStatus;
}
|
12,646 | /*
使用CUDA实现数组对应值相加,算是CUDA的hello world。
这段代码在一定程度上进行了优化:可以计算任意长度数组的加和,不限于GPU上block和thread的限制
*/
#include <stdio.h>
#define N 128
__global__ void add(int *a, int *b, int *c){
int tid = blockDim.x * blockIdx.x + threadIdx.x;//计算每个thread对应的整体index
if (tid < N){
c[tid] = a[tid] + b[tid];
tid += gridDim.x * blockDim.x;//通过每次位移一个grid所有线程的数量,实现了任意长度数组求和
}
}
int main(){
int a[N], b[N], c[N]; //定义主机数组
int *dev_a, *dev_b, *dev_c; //定义GPU上的内存指针
//为GPU上的内存分配地址
cudaMalloc((void**)&dev_a, N*sizeof(int));
cudaMalloc((void**)&dev_b, N*sizeof(int));
cudaMalloc((void**)&dev_c, N*sizeof(int));
//初始化主机数组
for(int i = 0;i<N;i++){
a[i] = i;
b[i] = 2*i-3;
}
//将主机数组的值复制到GPU内存上
cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
//启动核函数,在GPU上开辟(N+127)/128个block,每个block128个thread。其中N+128是为了防止出现N<128开启0个block报错
add<<<(N+127)/128, 128>>>(dev_a, dev_b, dev_c);
//将计算的值从GPU内存copy到主机内存
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0;i<N;i++)
printf("%d + %d = %d\n", a[i], b[i], c[i]);
//释放内存
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
12,647 | // CUDA to start with...
// Operation: B = parMatTranspose(A)
#include <stdio.h>
#include <unistd.h>
#define BLOCK_SIZE 32
__global__ void parMatTranspose (float *B, float *A, int ndim) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index into global array A.
int i = blockDim.y*by+ty;
int j = blockDim.x*bx+tx;
// Working memory;
__shared__ float block[BLOCK_SIZE][BLOCK_SIZE];
if((i<ndim)&&(j<ndim)) {
block[ty][tx] = A[j*ndim+i];
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
if((i<ndim)&&(j<ndim)) {
B[i*ndim+j]=block[ty][tx];
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
}
int main(int argc, char **argv) {
int ndim;
int threaddim=BLOCK_SIZE;
int blockdim;
float *h_A, *h_B;
float *d_A, *d_B;
int i,j,k;
if(argc>1) {
ndim=atoi(argv[1]);
}
else {
ndim=1024;
}
blockdim=(ndim+BLOCK_SIZE-1)/BLOCK_SIZE;
while (1)
{
h_A=(float *)malloc(ndim*ndim*sizeof(float));
h_B=(float *)malloc(ndim*ndim*sizeof(float));
for(k=0; k<ndim*ndim; k++)
h_A[k]=(float)k;
cudaMalloc((void **)&d_A,ndim*ndim*sizeof(float));
printf("%lX\n", (unsigned long)d_A);
cudaMalloc((void **)&d_B,ndim*ndim*sizeof(float));
cudaMemcpy(d_A,h_A, ndim*ndim*sizeof(float), cudaMemcpyHostToDevice);
dim3 grid(blockdim,blockdim);
dim3 threads(threaddim,threaddim);
parMatTranspose <<<grid, threads>>>(d_B, d_A, ndim);
cudaMemcpy(h_B,d_B, ndim*ndim*sizeof(float), cudaMemcpyDeviceToHost);
int count=0;
for(i=0; i<ndim; i++) {
for(j=0; j<ndim; j++) {
if(h_B[ndim*j+i]!=(float)(ndim*i+j)) count++;
}
}
printf("Count=%d\n", count);
if(ndim<10) {
for(i=0; i<ndim*ndim; i++)
printf("a[%d]=%g b[%d]=%g\n", i, h_A[i], i, h_B[i]);
}
sleep (1);
//cudaFree(d_A);
//cudaFree(d_B);
free(h_A);
free(h_B);
}
}
/* Major mode settings for GNU Emacs */
/* This coerces the editor to treat this as C code. */
/* Local Variables: */
/* mode:c */
/* End: */
|
12,648 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
__global__ void VMF_GPU_GLOBAL(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
float vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
float disteucl, disteucl1[9], hold;
float valAngulo = 0.0, r = 0.0;
if ((Row < m - 1) && (Col < n - 1)) {
//hacer el arreglo
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
disteucl = 0;
for (F = 0; F <= 8; F++) {
for (x = 0; x <= 8; x++) {
disteucl += abs(vectB[F] - vectB[x]) + abs(vectG[F] - vectG[x]) + abs(vectR[F] - vectR[x]);
//disteucl += sqrt(pow(vectB[F] - vectB[x], 2) + pow(vectG[F] - vectG[x], 2) + pow(vectR[F] - vectR[x], 2));
//disteucl += (vectB[F]-vectB[x]) * (vectB[F]-vectB[x]) + (vectG[F]-vectG[x]) * (vectG[F]-vectG[x])+(vectR[F]-vectR[x]) * (vectR[F]-vectR[x]);
//disteucl += (vectB[F]-vectB[x]);
//disteucl += (vectB[F] - vectB[x])*(vectB[F] - vectB[x]) + (vectG[F] - vectG[x])*(vectG[F] - vectG[x]) + (vectR[F] - vectR[x])*(vectR[F] - vectR[x]);
}
disteucl1[F] = disteucl;
disteucl = 0;
}
for (F = 0; F <= 8; F++) {
for (x = 0; x <= 7; x++) {
if (disteucl1[x] > disteucl1[x + 1]) {
hold = disteucl1[x];
hold2 = posicion[x];
disteucl1[x] = disteucl1[x + 1];
posicion[x] = posicion[x + 1];
disteucl1[x + 1] = hold;
posicion[x + 1] = hold2;
}
}
}
d_Pout[(Row * n + Col) * 3 + 0] = vectR[posicion[0]];
d_Pout[(Row * n + Col) * 3 + 1] = vectG[posicion[0]];
d_Pout[(Row * n + Col) * 3 + 2] = vectB[posicion[0]];
}
}
#define maxCUDA( a, b ) ( ((a) > (b)) ? (a) : (b) )
#define minCUDA( a, b ) ( ((a) < (b)) ? (a) : (b) )
__global__ void Detection_FuzzyMetric(unsigned char* Noise, const unsigned char* d_Pin, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
float vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
float disteucl, disteucl1[9], hold;
float valAngulo = 0.0, r = 0.0;
float arriva = 0.0, abajo = 0.0, val1, val2, val3, dist_M = 0;
unsigned int P = 0;
const unsigned int K = 1024, q = 1;
const float d = .95;
if ((Row < m - 1) && (Col < n - 1)) {
//hacer el arreglo
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
for (F = 0; F <= 8; F++) {
arriva = minCUDA(vectR[F], vectR[4]) + K;
abajo = maxCUDA(vectR[F], vectR[4]) + K;
val1 = arriva / abajo;
arriva = minCUDA(vectG[F], vectG[4]) + K;
abajo = maxCUDA(vectG[F], vectG[4]) + K;
val2 = arriva / abajo;
arriva = minCUDA(vectB[F], vectB[4]) + K;
abajo = maxCUDA(vectB[F], vectB[4]) + K;
val3 = arriva / abajo;
dist_M = minCUDA(minCUDA(val1, val2), val3);
if (dist_M>d) P++;
}
if (P <= (q + 1)) {
Noise[(Row * m + Col)] = 255;
}
else {
Noise[(Row * m + Col)] = 0;
}
}
}
__device__ float Magnitud(unsigned char* VectR, unsigned char* VectG, unsigned char* VectB, unsigned int i, unsigned int j) {
float distR = abs(VectR[i] - VectR[j]);
float distG = abs(VectG[i] - VectG[j]);
float distB = abs(VectB[i] - VectB[j]);
return distR + distB + distG;
}
__global__ void Detection_Euclidean(unsigned char* Noise, const unsigned char* d_Pin, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0;
unsigned int F = 0;
unsigned char vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
float disteucl, disteucl1[9], hold;
float valAngulo = 0.0, r = 0.0;
float arriva = 0.0, abajo = 0.0, val1, val2, val3, dist_M = 0;
unsigned int P = 0;
const unsigned int K = 1024, q = 1, dEuclidiana = 45;
const float d = .95;
if ((Row < m - 1) && (Col < n - 1)) {
//hacer el arreglo
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
for (F = 0; F <= 8; F++) {
dist_M = Magnitud(vectR, vectG, vectB, F, 4);
if (dist_M>45) P++;
}
if (P <= (q + 1)) {
Noise[(Row * m + Col)] = 255;
}
else {
Noise[(Row * m + Col)] = 0;
}
}
}
__global__ void AMF_Filtering(unsigned char* d_Pout, const unsigned char* d_Pin, unsigned char* Noise, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
float vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
float disteucl, disteucl1[9], hold;
float sumR = 0.0, sumG = 0.0, sumB = 0.0;
unsigned int Div = 0;
if ( (Row>1) && (Col>1) && ( Row < m - 1) && (Col < n - 1) ) {
sumR = 0.0, sumG = 0.0, sumB = 0.0;
if (Noise[(Row * m + Col)] == 255) {
Div = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
if (Noise[((Row + i) * m + (Col + j))] == 0) {//solo los que no son Noise
Div++;
sumR += d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
sumG += d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
sumB += d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
}
}
}
d_Pout[((Row*m) + Col) * 3 + 0] = sumR / Div;
d_Pout[((Row*m) + Col) * 3 + 1] = sumG / Div;
d_Pout[((Row*m) + Col) * 3 + 2] = sumB / Div;
}//fin de if
else {
d_Pout[((Row*m) + Col) * 3 + 0] = d_Pin[((Row*m) + Col) * 3 + 0];
d_Pout[((Row*m) + Col) * 3 + 1] = d_Pin[((Row*m) + Col) * 3 + 1];
d_Pout[((Row*m) + Col) * 3 + 2] = d_Pin[((Row*m) + Col) * 3 + 2];
}
}
}
__global__ void VMF_Filtering(unsigned char* d_Pout, const unsigned char* d_Pin, unsigned char* Noise, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
float vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
float disteucl, disteucl1[9], hold;
unsigned char arrayFiltradoR[9], arrayFiltradoG[9], arrayFiltradoB[9];
float mn, mx;
int posMin = 0;
int c = 0, i = 0, j = 0;
unsigned char aux = 100;
if ((Row>1) && (Col>1) && (Row < m - 1) && (Col < n - 1)) {
if (Noise[(Row * m + Col)] == 255) {
c = 0;
F = 0;
for (i = -1; i <= 1; i++) {
for (j = -1; j <= 1; j++) {
posicion[F] = 0;
if (Noise[((Row + i) * m + (Col + j))] == 0) {//solo los que no son Noise
arrayFiltradoR[c] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
arrayFiltradoG[c] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
arrayFiltradoB[c] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
aux = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
posicion[c] = c;
c++;
}
F++;
}
}
disteucl = 0;
for (i = 0; i <= c - 1; i++) {
disteucl = 0;
for (j = 0; j <= c - 1; j++) {
float distR = abs(arrayFiltradoR[i] - arrayFiltradoR[j]);
float distG = abs(arrayFiltradoG[i] - arrayFiltradoG[j]);
float distB = abs(arrayFiltradoB[i] - arrayFiltradoB[j]);
disteucl += distR + distB + distG;
}
disteucl1[i] = disteucl;
}
mn = disteucl1[0];
mx = disteucl1[0];
posMin = 0;
for (i = 0; i <= c - 1; i++)
{
if (mn>disteucl1[i])
{
mn = disteucl1[i];
posMin = posicion[i];
}
else if (mx<disteucl1[i])
{
}
}
d_Pout[(Row * m + Col) * 3 + 0] = arrayFiltradoR[posMin];
d_Pout[(Row * m + Col) * 3 + 1] = arrayFiltradoG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = arrayFiltradoB[posMin];
}//fin de if
else {
d_Pout[((Row*m) + Col) * 3 + 0] = d_Pin[((Row*m) + Col) * 3 + 0];
d_Pout[((Row*m) + Col) * 3 + 1] = d_Pin[((Row*m) + Col) * 3 + 1];
d_Pout[((Row*m) + Col) * 3 + 2] = d_Pin[((Row*m) + Col) * 3 + 2];
}
}
}
__global__ void FiltradoPropuesta(unsigned char* d_Pout, const unsigned char* d_Pin, unsigned char* Noise, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
unsigned char vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
float disteucl, disteucl1[9], hold;
unsigned char arrayFiltradoR[9], arrayFiltradoG[9], arrayFiltradoB[9];
float mn, mx;
int posMin = 0;
int c = 0, i = 0, j = 0;
unsigned char aux = 100;
float D[40];
if ((Row < m - 1) && (Col < n - 1)) {
if (Noise[(Row * m + Col)] == 255) {
c = 0;
for (i = -1; i <= 1; i++) {
for (j = -1; j <= 1; j++) {
vectR[c] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 0];
vectG[c] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 1];
vectB[c] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 2];
posicion[c] = c;
c++;
}
}
//D[0]=Magnitud(vectR, vectG, vectB, i, j//i==0 y j==0 no se hace
D[0] = (Magnitud(vectR, vectG, vectB, 0, 1));
D[1] = (Magnitud(vectR, vectG, vectB, 0, 2));
D[2] = (Magnitud(vectR, vectG, vectB, 0, 3));
D[3] = (Magnitud(vectR, vectG, vectB, 0, 4));
D[4] = (Magnitud(vectR, vectG, vectB, 0, 5));
D[5] = (Magnitud(vectR, vectG, vectB, 0, 6));
D[6] = (Magnitud(vectR, vectG, vectB, 0, 7));
D[7] = (Magnitud(vectR, vectG, vectB, 0, 8));
disteucl1[0] = D[0] + D[1] + D[2] + D[3] + D[4] + D[5] + D[6] + D[7];
//i=1,j=0 ya esta es D[0]
//i=1,j=1 No se hace
D[8] = (Magnitud(vectR, vectG, vectB, 1, 2));
D[9] = (Magnitud(vectR, vectG, vectB, 1, 3));
D[10] = (Magnitud(vectR, vectG, vectB, 1, 4));
D[11] = (Magnitud(vectR, vectG, vectB, 1, 5));
D[12] = (Magnitud(vectR, vectG, vectB, 1, 6));
D[13] = (Magnitud(vectR, vectG, vectB, 1, 7));
D[14] = (Magnitud(vectR, vectG, vectB, 1, 8));
disteucl1[1] = D[0] + D[8] + D[9] + D[10] + D[11] + D[12] + D[13] + D[14];
//i=2,j=0 ya esta es D[1]
//i=2,j=1 ya esta es D[8]
//i=2,j=2 No se hace
D[15] = (Magnitud(vectR, vectG, vectB, 2, 3));
D[16] = (Magnitud(vectR, vectG, vectB, 2, 4));
D[17] = (Magnitud(vectR, vectG, vectB, 2, 5));
D[18] = (Magnitud(vectR, vectG, vectB, 2, 6));
D[19] = (Magnitud(vectR, vectG, vectB, 2, 7));
D[20] = (Magnitud(vectR, vectG, vectB, 2, 8));
disteucl1[2] = D[1] + D[8] + D[15] + D[16] + D[17] + D[18] + D[19] + D[20];
//i=3,j=0 ya esta es D[2]
//i=3,j=1 ya esta es D[9]
//i=3,j=2 ya esta es D[15]
//i=3,j=3 No se hace
D[21] = (Magnitud(vectR, vectG, vectB, 3, 4));
D[22] = (Magnitud(vectR, vectG, vectB, 3, 5));
D[23] = (Magnitud(vectR, vectG, vectB, 3, 6));
D[24] = (Magnitud(vectR, vectG, vectB, 3, 7));
D[25] = (Magnitud(vectR, vectG, vectB, 3, 8));
disteucl1[3] = D[2] + D[9] + D[15] + D[21] + D[22] + D[23] + D[24] + D[25];
//i=4,j=0 ya esta es D[3]
//i=4,j=1 ya esta es D[10]
//i=4,j=2 ya esta es D[16]
//i=4,j=3 ya esta es D[21]
//i=4,j=4 No se hace
D[26] = (Magnitud(vectR, vectG, vectB, 4, 5));
D[27] = (Magnitud(vectR, vectG, vectB, 4, 6));
D[28] = (Magnitud(vectR, vectG, vectB, 4, 7));
D[29] = (Magnitud(vectR, vectG, vectB, 4, 8));
disteucl1[4] = D[3] + D[10] + D[16] + D[21] + D[26] + D[27] + D[28] + D[29];
//i=5,j=0 ya esta es D[4]
//i=5,j=1 ya esta es D[11]
//i=5,j=2 ya esta es D[17]
//i=5,j=3 ya esta es D[22]
//i=5,j=4 ya esta es D[26]
//i=5,j=5 No se hace
D[30] = (Magnitud(vectR, vectG, vectB, 5, 6));
D[31] = (Magnitud(vectR, vectG, vectB, 5, 7));
D[32] = (Magnitud(vectR, vectG, vectB, 5, 8));
disteucl1[5] = D[4] + D[11] + D[17] + D[22] + D[26] + D[30] + D[31] + D[32];
//i=6,j=0 ya esta es D[5]
//i=6,j=1 ya esta es D[12]
//i=6,j=2 ya esta es D[18]
//i=6,j=3 ya esta es D[23]
//i=6,j=4 ya esta es D[27]
//i=6,j=5 ya esta es D[30]
//i=6,j=6 No se hace
D[33] = (Magnitud(vectR, vectG, vectB, 6, 7));
D[34] = (Magnitud(vectR, vectG, vectB, 6, 8));
disteucl1[6] = D[5] + D[12] + D[18] + D[23] + D[27] + D[30] + D[33] + D[34];
//i=7,j=0 ya esta es D[6]
//i=7,j=1 ya esta es D[13]
//i=7,j=2 ya esta es D[19]
//i=7,j=3 ya esta es D[24]
//i=7,j=4 ya esta es D[28]
//i=7,j=5 ya esta es D[31]
//i=7,j=6 ya esta es D[33]
//i=7,j=7 No se hace
D[35] = (Magnitud(vectR, vectG, vectB, 7, 8));
disteucl1[7] = D[6] + D[13] + D[19] + D[24] + D[28] + D[31] + D[33] + D[35];
//i=8,j=0 ya esta es D[7]
//i=8,j=1 ya esta es D[14]
//i=8,j=2 ya esta es D[20]
//i=8,j=3 ya esta es D[25]
//i=8,j=4 ya esta es D[29]
//i=8,j=5 ya esta es D[32]
//i=8,j=6 ya esta es D[34]
//i=8,j=7 ya esta es D[35]
//i=8,j=8 No se hace
disteucl1[8] = D[7] + D[14] + D[20] + D[25] + D[29] + D[32] + D[34] + D[35];
mn = disteucl1[0];
mx = disteucl1[0];
posMin = 0;
for (int i = 0; i<8; i++)
{
if (mn>disteucl1[i])
{
mn = disteucl1[i];
posMin = posicion[i];
}
else if (mx<disteucl1[i])
{
}
}
d_Pout[(Row * m + Col) * 3 + 0] = vectR[posMin];
d_Pout[(Row * m + Col) * 3 + 1] = vectG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[posMin];
}//fin de if (Noise[(Row * m + Col)] == 255)
else {
d_Pout[((Row*m) + Col) * 3 + 0] = d_Pin[((Row*m) + Col) * 3 + 0];
d_Pout[((Row*m) + Col) * 3 + 1] = d_Pin[((Row*m) + Col) * 3 + 1];
d_Pout[((Row*m) + Col) * 3 + 2] = d_Pin[((Row*m) + Col) * 3 + 2];
}
}
}
__global__ void FiltradoPropuesta2(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
unsigned char vectR[9], vectG[9], vectB[9];
float disteucl, disteucl1[9], hold, D[40];
float valAngulo = 0.0, r = 0.0;
float arriva = 0.0, abajo = 0.0, val1, val2, val3, dist_M = 0, Noise = 0.0;
unsigned int P = 0;
const unsigned int K = 1024, q = 1;
const float d = .95;
float mn, mx;
int posMin = 0;
if ((Row < m - 1) && (Col < n - 1)) {
//hacer el arreglo
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
for (F = 0; F <= 8; F++) {
arriva = minCUDA(vectR[F], vectR[4]) + K;
abajo = maxCUDA(vectR[F], vectR[4]) + K;
val1 = arriva / abajo;
arriva = minCUDA(vectG[F], vectG[4]) + K;
abajo = maxCUDA(vectG[F], vectG[4]) + K;
val2 = arriva / abajo;
arriva = minCUDA(vectB[F], vectB[4]) + K;
abajo = maxCUDA(vectB[F], vectB[4]) + K;
val3 = arriva / abajo;
dist_M = minCUDA(minCUDA(val1, val2), val3);
if (dist_M>d) P++;
}
if (P <= (q + 1)) {
Noise = 255;
}
else {
Noise = 0;
}
if (Noise == 255) {
//D[0]=Magnitud(vectR, vectG, vectB, i, j//i==0 y j==0 no se hace
D[0] = (Magnitud(vectR, vectG, vectB, 0, 1));
D[1] = (Magnitud(vectR, vectG, vectB, 0, 2));
D[2] = (Magnitud(vectR, vectG, vectB, 0, 3));
D[3] = (Magnitud(vectR, vectG, vectB, 0, 4));
D[4] = (Magnitud(vectR, vectG, vectB, 0, 5));
D[5] = (Magnitud(vectR, vectG, vectB, 0, 6));
D[6] = (Magnitud(vectR, vectG, vectB, 0, 7));
D[7] = (Magnitud(vectR, vectG, vectB, 0, 8));
disteucl1[0] = D[0] + D[1] + D[2] + D[3] + D[4] + D[5] + D[6] + D[7];
//i=1,j=0 ya esta es D[0]
//i=1,j=1 No se hace
D[8] = (Magnitud(vectR, vectG, vectB, 1, 2));
D[9] = (Magnitud(vectR, vectG, vectB, 1, 3));
D[10] = (Magnitud(vectR, vectG, vectB, 1, 4));
D[11] = (Magnitud(vectR, vectG, vectB, 1, 5));
D[12] = (Magnitud(vectR, vectG, vectB, 1, 6));
D[13] = (Magnitud(vectR, vectG, vectB, 1, 7));
D[14] = (Magnitud(vectR, vectG, vectB, 1, 8));
disteucl1[1] = D[0] + D[8] + D[9] + D[10] + D[11] + D[12] + D[13] + D[14];
//i=2,j=0 ya esta es D[1]
//i=2,j=1 ya esta es D[8]
//i=2,j=2 No se hace
D[15] = (Magnitud(vectR, vectG, vectB, 2, 3));
D[16] = (Magnitud(vectR, vectG, vectB, 2, 4));
D[17] = (Magnitud(vectR, vectG, vectB, 2, 5));
D[18] = (Magnitud(vectR, vectG, vectB, 2, 6));
D[19] = (Magnitud(vectR, vectG, vectB, 2, 7));
D[20] = (Magnitud(vectR, vectG, vectB, 2, 8));
disteucl1[2] = D[1] + D[8] + D[15] + D[16] + D[17] + D[18] + D[19] + D[20];
//i=3,j=0 ya esta es D[2]
//i=3,j=1 ya esta es D[9]
//i=3,j=2 ya esta es D[15]
//i=3,j=3 No se hace
D[21] = (Magnitud(vectR, vectG, vectB, 3, 4));
D[22] = (Magnitud(vectR, vectG, vectB, 3, 5));
D[23] = (Magnitud(vectR, vectG, vectB, 3, 6));
D[24] = (Magnitud(vectR, vectG, vectB, 3, 7));
D[25] = (Magnitud(vectR, vectG, vectB, 3, 8));
disteucl1[3] = D[2] + D[9] + D[15] + D[21] + D[22] + D[23] + D[24] + D[25];
//i=4,j=0 ya esta es D[3]
//i=4,j=1 ya esta es D[10]
//i=4,j=2 ya esta es D[16]
//i=4,j=3 ya esta es D[21]
//i=4,j=4 No se hace
D[26] = (Magnitud(vectR, vectG, vectB, 4, 5));
D[27] = (Magnitud(vectR, vectG, vectB, 4, 6));
D[28] = (Magnitud(vectR, vectG, vectB, 4, 7));
D[29] = (Magnitud(vectR, vectG, vectB, 4, 8));
disteucl1[4] = D[3] + D[10] + D[16] + D[21] + D[26] + D[27] + D[28] + D[29];
//i=5,j=0 ya esta es D[4]
//i=5,j=1 ya esta es D[11]
//i=5,j=2 ya esta es D[17]
//i=5,j=3 ya esta es D[22]
//i=5,j=4 ya esta es D[26]
//i=5,j=5 No se hace
D[30] = (Magnitud(vectR, vectG, vectB, 5, 6));
D[31] = (Magnitud(vectR, vectG, vectB, 5, 7));
D[32] = (Magnitud(vectR, vectG, vectB, 5, 8));
disteucl1[5] = D[4] + D[11] + D[17] + D[22] + D[26] + D[30] + D[31] + D[32];
//i=6,j=0 ya esta es D[5]
//i=6,j=1 ya esta es D[12]
//i=6,j=2 ya esta es D[18]
//i=6,j=3 ya esta es D[23]
//i=6,j=4 ya esta es D[27]
//i=6,j=5 ya esta es D[30]
//i=6,j=6 No se hace
D[33] = (Magnitud(vectR, vectG, vectB, 6, 7));
D[34] = (Magnitud(vectR, vectG, vectB, 6, 8));
disteucl1[6] = D[5] + D[12] + D[18] + D[23] + D[27] + D[30] + D[33] + D[34];
//i=7,j=0 ya esta es D[6]
//i=7,j=1 ya esta es D[13]
//i=7,j=2 ya esta es D[19]
//i=7,j=3 ya esta es D[24]
//i=7,j=4 ya esta es D[28]
//i=7,j=5 ya esta es D[31]
//i=7,j=6 ya esta es D[33]
//i=7,j=7 No se hace
D[35] = (Magnitud(vectR, vectG, vectB, 7, 8));
disteucl1[7] = D[6] + D[13] + D[19] + D[24] + D[28] + D[31] + D[33] + D[35];
//i=8,j=0 ya esta es D[7]
//i=8,j=1 ya esta es D[14]
//i=8,j=2 ya esta es D[20]
//i=8,j=3 ya esta es D[25]
//i=8,j=4 ya esta es D[29]
//i=8,j=5 ya esta es D[32]
//i=8,j=6 ya esta es D[34]
//i=8,j=7 ya esta es D[35]
//i=8,j=8 No se hace
disteucl1[8] = D[7] + D[14] + D[20] + D[25] + D[29] + D[32] + D[34] + D[35];
mn = disteucl1[0];
mx = disteucl1[0];
posMin = 0;
for (int i = 0; i<8; i++)
{
if (mn>disteucl1[i])
{
mn = disteucl1[i];
posMin = posicion[i];
}
else if (mx < disteucl1[i])
{
}
}
d_Pout[(Row * m + Col) * 3 + 0] = vectR[posMin];
d_Pout[(Row * m + Col) * 3 + 1] = vectG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[posMin];
}
else {
d_Pout[((Row*m) + Col) * 3 + 0] = vectR[4];
d_Pout[((Row*m) + Col) * 3 + 1] = vectG[4];
d_Pout[((Row*m) + Col) * 3 + 2] = vectB[4];
}
}//if de Row y Col
}//cierre de funcion
__device__ inline void s(unsigned char* a, unsigned char*b)
{
int tmp;
if (*a>*b) {//si a es mayor a b, se intercambian a y b.
tmp = *b;
*b = *a;
*a = tmp;
}
}
#define min3(a,b,c) s(a, b); s(a,c);
#define max3(a,b,c) s(b, c); s(a,c);
#define minmax3(a,b,c) max3(a, b, c); s(a,b);
#define minmax4(a,b,c,d) s(a, b); s(c,d);s(a, c); s(b,d);
#define minmax5(a,b,c,d,e) s(a, b); s(c,d);min3(a,c,e);max3(b,d,e);
#define minmax6(a,b,c,d,e,f) s(a,d);s(b,e);s(c,f);min3(a,b,c);max3(d,e,f);
__global__ void MarginalMedianFilter_Global_Forgetfull(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m) {
//Calculate the row # of the d_Pin and d_Pout element to process
int Row = blockIdx.y*blockDim.y + threadIdx.y;
// Calculate the column # of the d_Pin and d_Pout element to process
int Col = blockIdx.x*blockDim.x + threadIdx.x;
// each thread computes one element of d_Pout if in range
// Se debe de checar si los pixeles esta dentro del intervalo de 8 bits
int x = 0, c = 0, d = 0, F, canal;
int i, j;
unsigned char vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
unsigned char swap;
unsigned char a0, a1, a2, a3, a4, a5;
if ((Row < m - 1) && (Col < n - 1)) {
vectR[0] = d_Pin[((Row - 1) * n + (Col - 1)) * 3 + 0];
vectG[0] = d_Pin[((Row - 1) * n + (Col - 1)) * 3 + 1];
vectB[0] = d_Pin[((Row - 1) * n + (Col - 1)) * 3 + 2];
vectR[1] = d_Pin[((Row - 1) * n + (Col + 0)) * 3 + 0];
vectG[1] = d_Pin[((Row - 1) * n + (Col + 0)) * 3 + 1];
vectB[1] = d_Pin[((Row - 1) * n + (Col + 0)) * 3 + 2];
vectR[2] = d_Pin[((Row - 1) * n + (Col + 1)) * 3 + 0];
vectG[2] = d_Pin[((Row - 1) * n + (Col + 1)) * 3 + 1];
vectB[2] = d_Pin[((Row - 1) * n + (Col + 1)) * 3 + 2];
vectR[3] = d_Pin[((Row + 0) * n + (Col - 1)) * 3 + 0];
vectG[3] = d_Pin[((Row + 0) * n + (Col - 1)) * 3 + 1];
vectB[3] = d_Pin[((Row + 0) * n + (Col - 1)) * 3 + 2];
vectR[4] = d_Pin[((Row + 0) * n + (Col + 0)) * 3 + 0];
vectG[4] = d_Pin[((Row + 0) * n + (Col + 0)) * 3 + 1];//central
vectB[4] = d_Pin[((Row + 0) * n + (Col + 0)) * 3 + 2];
vectR[5] = d_Pin[((Row + 0) * n + (Col + 1)) * 3 + 0];
vectG[5] = d_Pin[((Row + 0) * n + (Col + 1)) * 3 + 1];
vectB[5] = d_Pin[((Row + 0) * n + (Col + 1)) * 3 + 2];
minmax6(&vectR[0], &vectR[1], &vectR[2], &vectR[3], &vectR[4], &vectR[5]);
minmax6(&vectG[0], &vectG[1], &vectG[2], &vectG[3], &vectG[4], &vectG[5]);
minmax6(&vectB[0], &vectB[1], &vectB[2], &vectB[3], &vectB[4], &vectB[5]);
vectR[5] = d_Pin[((Row + 1) * n + (Col - 1)) * 3 + 0];
vectG[5] = d_Pin[((Row + 1) * n + (Col - 1)) * 3 + 1];
vectB[5] = d_Pin[((Row + 1) * n + (Col - 1)) * 3 + 2];
minmax5(&vectR[1], &vectR[2], &vectR[3], &vectR[4], &vectR[5]);
minmax5(&vectG[1], &vectG[2], &vectG[3], &vectG[4], &vectG[5]);
minmax5(&vectB[1], &vectB[2], &vectB[3], &vectB[4], &vectB[5]);
vectR[5] = d_Pin[((Row + 1) * n + (Col + 0)) * 3 + 0];
vectG[5] = d_Pin[((Row + 1) * n + (Col + 0)) * 3 + 1];
vectB[5] = d_Pin[((Row + 1) * n + (Col + 0)) * 3 + 2];
minmax4(&vectR[2], &vectR[3], &vectR[4], &vectR[5]);
minmax4(&vectG[2], &vectG[3], &vectG[4], &vectG[5]);
minmax4(&vectB[2], &vectB[3], &vectB[4], &vectB[5]);
vectR[5] = d_Pin[((Row + 1) * n + (Col + 1)) * 3 + 0];
vectG[5] = d_Pin[((Row + 1) * n + (Col + 1)) * 3 + 1];
vectB[5] = d_Pin[((Row + 1) * n + (Col + 1)) * 3 + 2];
minmax3(&vectR[3], &vectR[4], &vectR[5]);
minmax3(&vectG[3], &vectG[4], &vectG[5]);
minmax3(&vectB[3], &vectB[4], &vectB[5]);
d_Pout[(Row * m + Col) * 3 + 0] = vectR[4]; // ojo aqui va desde 0 a 8
d_Pout[(Row * m + Col) * 3 + 1] = vectG[4];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[4];
}
}
//este es la propuesta
__global__ void FiltradoPropuesta_MMF(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
unsigned char vectR[9], vectG[9], vectB[9];
float disteucl, disteucl1[9], hold, D[40];
float valAngulo = 0.0, r = 0.0;
float arriva = 0.0, abajo = 0.0, val1, val2, val3, dist_M = 0, Noise = 0.0;
unsigned int P = 0;
const unsigned int K = 1024, q = 1;
const float d = .95;
float mn, mx;
int posMin = 0;
if ((Row < m - 1) && (Col < n - 1)) {
//hacer el arreglo
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
for (F = 0; F <= 8; F++) {
arriva = minCUDA(vectR[F], vectR[4]) + K;
abajo = maxCUDA(vectR[F], vectR[4]) + K;
val1 = arriva / abajo;
arriva = minCUDA(vectG[F], vectG[4]) + K;
abajo = maxCUDA(vectG[F], vectG[4]) + K;
val2 = arriva / abajo;
arriva = minCUDA(vectB[F], vectB[4]) + K;
abajo = maxCUDA(vectB[F], vectB[4]) + K;
val3 = arriva / abajo;
dist_M = minCUDA(minCUDA(val1, val2), val3);
if (dist_M>d) P++;
}
if (P <= (q + 1)) {
Noise = 255;
}
else {
Noise = 0;
}
if (Noise == 255) {
minmax6(&vectR[0], &vectR[1], &vectR[2], &vectR[3], &vectR[4], &vectR[5]);
minmax6(&vectG[0], &vectG[1], &vectG[2], &vectG[3], &vectG[4], &vectG[5]);
minmax6(&vectB[0], &vectB[1], &vectB[2], &vectB[3], &vectB[4], &vectB[5]);
vectR[5] = vectR[6];
vectG[5] = vectG[6];
vectB[5] = vectB[6];
minmax5(&vectR[1], &vectR[2], &vectR[3], &vectR[4], &vectR[5]);
minmax5(&vectG[1], &vectG[2], &vectG[3], &vectG[4], &vectG[5]);
minmax5(&vectB[1], &vectB[2], &vectB[3], &vectB[4], &vectB[5]);
vectR[5] = vectR[7];
vectG[5] = vectG[7];
vectB[5] = vectB[7];
minmax4(&vectR[2], &vectR[3], &vectR[4], &vectR[5]);
minmax4(&vectG[2], &vectG[3], &vectG[4], &vectG[5]);
minmax4(&vectB[2], &vectB[3], &vectB[4], &vectB[5]);
vectR[5] = vectR[8];
vectG[5] = vectG[8];
vectB[5] = vectB[8];
minmax3(&vectR[3], &vectR[4], &vectR[5]);
minmax3(&vectG[3], &vectG[4], &vectG[5]);
minmax3(&vectB[3], &vectB[4], &vectB[5]);
d_Pout[(Row * m + Col) * 3 + 0] = vectR[4]; // ojo aqui va desde 0 a 8
d_Pout[(Row * m + Col) * 3 + 1] = vectG[4];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[4];
}
else {
d_Pout[((Row*m) + Col) * 3 + 0] = vectR[4];
d_Pout[((Row*m) + Col) * 3 + 1] = vectG[4];
d_Pout[((Row*m) + Col) * 3 + 2] = vectB[4];
}
}//if de Row y Col
}//cierre de funcion
//propuesta con filtrado VMF
__global__ void FiltradoPropuesta_VMF(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0, i = 0, c = 0;
unsigned char vectR[9], vectG[9], vectB[9];
float disteucl, disteucl1[9], hold, D[40];
float valAngulo = 0.0, r = 0.0;
float arriva = 0.0, abajo = 0.0, val1, val2, val3, dist_M = 0, Noise = 0.0;
unsigned int P = 0;
const unsigned int K = 1024, q = 1;
const float d = .95;
float mn, mx;
int posMin = 0;
if ((Row < m - 1) && (Col < n - 1)) {
//hacer el arreglo
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
for (F = 0; F <= 8; F++) {
arriva = minCUDA(vectR[F], vectR[4]) + K;
abajo = maxCUDA(vectR[F], vectR[4]) + K;
val1 = arriva / abajo;
arriva = minCUDA(vectG[F], vectG[4]) + K;
abajo = maxCUDA(vectG[F], vectG[4]) + K;
val2 = arriva / abajo;
arriva = minCUDA(vectB[F], vectB[4]) + K;
abajo = maxCUDA(vectB[F], vectB[4]) + K;
val3 = arriva / abajo;
dist_M = minCUDA(minCUDA(val1, val2), val3);
if (dist_M>d) P++;
}
if (P <= (q + 1)) {
Noise = 255;
}
else {
Noise = 0;
}
if (Noise == 255) {
disteucl = 0;
for (F = 0; F <= 8; F++) {
for (x = 0; x <= 8; x++) {
//disteucl += abs(vectB[F] - vectB[x]) + abs(vectG[F] - vectG[x]) + abs(vectR[F] - vectR[x]);
//disteucl += sqrt(pow(vectB[F] - vectB[x], 2) + pow(vectG[F] - vectG[x], 2) + pow(vectR[F] - vectR[x], 2));
//disteucl += (vectB[F]-vectB[x]) * (vectB[F]-vectB[x]) + (vectG[F]-vectG[x]) * (vectG[F]-vectG[x])+(vectR[F]-vectR[x]) * (vectR[F]-vectR[x]);
//disteucl += (vectB[F]-vectB[x]);
//disteucl += (vectB[F] - vectB[x])*(vectB[F] - vectB[x]) + (vectG[F] - vectG[x])*(vectG[F] - vectG[x]) + (vectR[F] - vectR[x])*(vectR[F] - vectR[x]);
float distR = abs(vectR[F] - vectR[x]);
float distG = abs(vectG[F] - vectG[x]);
float distB = abs(vectB[F] - vectB[x]);
disteucl += distR + distB + distG;
}
disteucl1[F] = disteucl;
disteucl = 0;
}
mn = disteucl1[0];
mx = disteucl1[0];
posMin = 0;
for (i = 0; i<8; i++)
{
if (mn>disteucl1[i])
{
mn = disteucl1[i];
posMin = posicion[i];
}
else if (mx<disteucl1[i])
{
}
}
d_Pout[(Row * m + Col) * 3 + 0] = vectR[posMin]; // ojo aqui va desde 0 a 8
d_Pout[(Row * m + Col) * 3 + 1] = vectG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[posMin];
}
else {
d_Pout[((Row*m) + Col) * 3 + 0] = vectR[4];
d_Pout[((Row*m) + Col) * 3 + 1] = vectG[4];
d_Pout[((Row*m) + Col) * 3 + 2] = vectB[4];
}
}//if de Row y Col
}//cierre de funcion
__global__ void FiltradoPropuesta_AMF(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0, i = 0, c = 0;
unsigned char vectR[9], vectG[9], vectB[9];
float disteucl, disteucl1[9], hold, D[40];
float valAngulo = 0.0, r = 0.0;
float arriva = 0.0, abajo = 0.0, val1, val2, val3, dist_M = 0, Noise = 0.0;
unsigned int P = 0;
const unsigned int K = 1024, q = 1;
const float d = .95;
unsigned int Div = 0;
float mn, mx;
int posMin = 0;
float sumR = 0.0, sumG = 0.0, sumB = 0.0;
if ((Row>1) && (Col>1) && (Row < m - 1) && (Col < n - 1)) {
//hacer el arreglo
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
for (F = 0; F <= 8; F++) {
arriva = minCUDA(vectR[F], vectR[4]) + K;
abajo = maxCUDA(vectR[F], vectR[4]) + K;
val1 = arriva / abajo;
arriva = minCUDA(vectG[F], vectG[4]) + K;
abajo = maxCUDA(vectG[F], vectG[4]) + K;
val2 = arriva / abajo;
arriva = minCUDA(vectB[F], vectB[4]) + K;
abajo = maxCUDA(vectB[F], vectB[4]) + K;
val3 = arriva / abajo;
dist_M = minCUDA(minCUDA(val1, val2), val3);
if (dist_M>d) P++;
}
if (P <= (q + 1)) {
Noise = 255;
}
else {
Noise = 0;
}
if (Noise == 255) {
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
sumR += d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
sumG += d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
sumB += d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
}
}
d_Pout[((Row*m) + Col) * 3 + 0] = sumR / 9;
d_Pout[((Row*m) + Col) * 3 + 1] = sumG / 9;
d_Pout[((Row*m) + Col) * 3 + 2] = sumB / 9;
}
else {
d_Pout[((Row*m) + Col) * 3 + 0] = vectR[4];
d_Pout[((Row*m) + Col) * 3 + 1] = vectG[4];
d_Pout[((Row*m) + Col) * 3 + 2] = vectB[4];
}
}//if de Row y Col
}//cierre de funcion
__global__ void VMF_Global_Forgetfull_Reuse(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
unsigned char vectR[9], vectG[9], vectB[9];
float disteucl = 0.0, disteucl1[9], hold;
float D[40];
float mn, mx;
int posMin = 0;
if ((Row < m - 1) && (Col < n - 1)) {
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
//D[0]=Magnitud(vectR, vectG, vectB, i, j//i==0 y j==0 no se hace
D[0] = (Magnitud(vectR, vectG, vectB, 0, 1));
D[1] = (Magnitud(vectR, vectG, vectB, 0, 2));
D[2] = (Magnitud(vectR, vectG, vectB, 0, 3));
D[3] = (Magnitud(vectR, vectG, vectB, 0, 4));
D[4] = (Magnitud(vectR, vectG, vectB, 0, 5));
D[5] = (Magnitud(vectR, vectG, vectB, 0, 6));
D[6] = (Magnitud(vectR, vectG, vectB, 0, 7));
D[7] = (Magnitud(vectR, vectG, vectB, 0, 8));
disteucl1[0] = D[0] + D[1] + D[2] + D[3] + D[4] + D[5] + D[6] + D[7];
//i=1,j=0 ya esta es D[0]
//i=1,j=1 No se hace
D[8] = (Magnitud(vectR, vectG, vectB, 1, 2));
D[9] = (Magnitud(vectR, vectG, vectB, 1, 3));
D[10] = (Magnitud(vectR, vectG, vectB, 1, 4));
D[11] = (Magnitud(vectR, vectG, vectB, 1, 5));
D[12] = (Magnitud(vectR, vectG, vectB, 1, 6));
D[13] = (Magnitud(vectR, vectG, vectB, 1, 7));
D[14] = (Magnitud(vectR, vectG, vectB, 1, 8));
disteucl1[1] = D[0] + D[8] + D[9] + D[10] + D[11] + D[12] + D[13] + D[14];
//i=2,j=0 ya esta es D[1]
//i=2,j=1 ya esta es D[8]
//i=2,j=2 No se hace
D[15] = (Magnitud(vectR, vectG, vectB, 2, 3));
D[16] = (Magnitud(vectR, vectG, vectB, 2, 4));
D[17] = (Magnitud(vectR, vectG, vectB, 2, 5));
D[18] = (Magnitud(vectR, vectG, vectB, 2, 6));
D[19] = (Magnitud(vectR, vectG, vectB, 2, 7));
D[20] = (Magnitud(vectR, vectG, vectB, 2, 8));
disteucl1[2] = D[1] + D[8] + D[15] + D[16] + D[17] + D[18] + D[19] + D[20];
//i=3,j=0 ya esta es D[2]
//i=3,j=1 ya esta es D[9]
//i=3,j=2 ya esta es D[15]
//i=3,j=3 No se hace
D[21] = (Magnitud(vectR, vectG, vectB, 3, 4));
D[22] = (Magnitud(vectR, vectG, vectB, 3, 5));
D[23] = (Magnitud(vectR, vectG, vectB, 3, 6));
D[24] = (Magnitud(vectR, vectG, vectB, 3, 7));
D[25] = (Magnitud(vectR, vectG, vectB, 3, 8));
disteucl1[3] = D[2] + D[9] + D[15] + D[21] + D[22] + D[23] + D[24] + D[25];
//i=4,j=0 ya esta es D[3]
//i=4,j=1 ya esta es D[10]
//i=4,j=2 ya esta es D[16]
//i=4,j=3 ya esta es D[21]
//i=4,j=4 No se hace
D[26] = (Magnitud(vectR, vectG, vectB, 4, 5));
D[27] = (Magnitud(vectR, vectG, vectB, 4, 6));
D[28] = (Magnitud(vectR, vectG, vectB, 4, 7));
D[29] = (Magnitud(vectR, vectG, vectB, 4, 8));
disteucl1[4] = D[3] + D[10] + D[16] + D[21] + D[26] + D[27] + D[28] + D[29];
//i=5,j=0 ya esta es D[4]
//i=5,j=1 ya esta es D[11]
//i=5,j=2 ya esta es D[17]
//i=5,j=3 ya esta es D[22]
//i=5,j=4 ya esta es D[26]
//i=5,j=5 No se hace
D[30] = (Magnitud(vectR, vectG, vectB, 5, 6));
D[31] = (Magnitud(vectR, vectG, vectB, 5, 7));
D[32] = (Magnitud(vectR, vectG, vectB, 5, 8));
disteucl1[5] = D[4] + D[11] + D[17] + D[22] + D[26] + D[30] + D[31] + D[32];
//i=6,j=0 ya esta es D[5]
//i=6,j=1 ya esta es D[12]
//i=6,j=2 ya esta es D[18]
//i=6,j=3 ya esta es D[23]
//i=6,j=4 ya esta es D[27]
//i=6,j=5 ya esta es D[30]
//i=6,j=6 No se hace
D[33] = (Magnitud(vectR, vectG, vectB, 6, 7));
D[34] = (Magnitud(vectR, vectG, vectB, 6, 8));
disteucl1[6] = D[5] + D[12] + D[18] + D[23] + D[27] + D[30] + D[33] + D[34];
//i=7,j=0 ya esta es D[6]
//i=7,j=1 ya esta es D[13]
//i=7,j=2 ya esta es D[19]
//i=7,j=3 ya esta es D[24]
//i=7,j=4 ya esta es D[28]
//i=7,j=5 ya esta es D[31]
//i=7,j=6 ya esta es D[33]
//i=7,j=7 No se hace
D[35] = (Magnitud(vectR, vectG, vectB, 7, 8));
disteucl1[7] = D[6] + D[13] + D[19] + D[24] + D[28] + D[31] + D[33] + D[35];
//i=8,j=0 ya esta es D[7]
//i=8,j=1 ya esta es D[14]
//i=8,j=2 ya esta es D[20]
//i=8,j=3 ya esta es D[25]
//i=8,j=4 ya esta es D[29]
//i=8,j=5 ya esta es D[32]
//i=8,j=6 ya esta es D[34]
//i=8,j=7 ya esta es D[35]
//i=8,j=8 No se hace
disteucl1[8] = D[7] + D[14] + D[20] + D[25] + D[29] + D[32] + D[34] + D[35];
mn = disteucl1[0];
mx = disteucl1[0];
posMin = 0;
for (int i = 0; i<8; i++)
{
if (mn>disteucl1[i])
{
mn = disteucl1[i];
posMin = posicion[i];
}
else if (mx<disteucl1[i])
{
}
}
d_Pout[(Row * m + Col) * 3 + 0] = vectR[posMin];
d_Pout[(Row * m + Col) * 3 + 1] = vectG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[posMin];
}
}
/*
__global__ void Idea_VMF_FuzzyPeer(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
unsigned char vectR[9], vectG[9], vectB[9];
float disteucl = 0.0, disteucl1[9], hold;
float D[40];
float mn, mx;
int posMin = 0;
float arriva = 0.0, abajo = 0.0, val1, val2, val3, dist_M = 0;
if ((Row < m - 1) && (Col < n - 1)){
F = 0;
for (int i = -1; i <= 1; i++){
for (int j = -1; j <= 1; j++){
vectR[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
//D[0]=Magnitud(vectR, vectG, vectB, i, j//i==0 y j==0 no se hace
D[0] = (Magnitud(vectR, vectG, vectB, 0, 1));
D[1] = (Magnitud(vectR, vectG, vectB, 0, 2));
D[2] = (Magnitud(vectR, vectG, vectB, 0, 3));
D[3] = (Magnitud(vectR, vectG, vectB, 0, 4));
D[4] = (Magnitud(vectR, vectG, vectB, 0, 5));
D[5] = (Magnitud(vectR, vectG, vectB, 0, 6));
D[6] = (Magnitud(vectR, vectG, vectB, 0, 7));
D[7] = (Magnitud(vectR, vectG, vectB, 0, 8));
disteucl1[0] = D[0] + D[1] + D[2] + D[3] + D[4] + D[5] + D[6] + D[7];
//i=1,j=0 ya esta es D[0]
//i=1,j=1 No se hace
D[8] = (Magnitud(vectR, vectG, vectB, 1, 2));
D[9] = (Magnitud(vectR, vectG, vectB, 1, 3));
D[10] = (Magnitud(vectR, vectG, vectB, 1, 4));
D[11] = (Magnitud(vectR, vectG, vectB, 1, 5));
D[12] = (Magnitud(vectR, vectG, vectB, 1, 6));
D[13] = (Magnitud(vectR, vectG, vectB, 1, 7));
D[14] = (Magnitud(vectR, vectG, vectB, 1, 8));
disteucl1[1] = D[0] + D[8] + D[9] + D[10] + D[11] + D[12] + D[13] + D[14];
//i=2,j=0 ya esta es D[1]
//i=2,j=1 ya esta es D[8]
//i=2,j=2 No se hace
D[15] = (Magnitud(vectR, vectG, vectB, 2, 3));
D[16] = (Magnitud(vectR, vectG, vectB, 2, 4));
D[17] = (Magnitud(vectR, vectG, vectB, 2, 5));
D[18] = (Magnitud(vectR, vectG, vectB, 2, 6));
D[19] = (Magnitud(vectR, vectG, vectB, 2, 7));
D[20] = (Magnitud(vectR, vectG, vectB, 2, 8));
disteucl1[2] = D[1] + D[8] + D[15] + D[16] + D[17] + D[18] + D[19] + D[20];
//i=3,j=0 ya esta es D[2]
//i=3,j=1 ya esta es D[9]
//i=3,j=2 ya esta es D[15]
//i=3,j=3 No se hace
D[21] = (Magnitud(vectR, vectG, vectB, 3, 4));
D[22] = (Magnitud(vectR, vectG, vectB, 3, 5));
D[23] = (Magnitud(vectR, vectG, vectB, 3, 6));
D[24] = (Magnitud(vectR, vectG, vectB, 3, 7));
D[25] = (Magnitud(vectR, vectG, vectB, 3, 8));
disteucl1[3] = D[2] + D[9] + D[15] + D[21] + D[22] + D[23] + D[24] + D[25];
//i=4,j=0 ya esta es D[3]
//i=4,j=1 ya esta es D[10]
//i=4,j=2 ya esta es D[16]
//i=4,j=3 ya esta es D[21]
//i=4,j=4 No se hace
D[26] = (Magnitud(vectR, vectG, vectB, 4, 5));
D[27] = (Magnitud(vectR, vectG, vectB, 4, 6));
D[28] = (Magnitud(vectR, vectG, vectB, 4, 7));
D[29] = (Magnitud(vectR, vectG, vectB, 4, 8));
disteucl1[4] = D[3] + D[10] + D[16] + D[21] + D[26] + D[27] + D[28] + D[29];
//i=5,j=0 ya esta es D[4]
//i=5,j=1 ya esta es D[11]
//i=5,j=2 ya esta es D[17]
//i=5,j=3 ya esta es D[22]
//i=5,j=4 ya esta es D[26]
//i=5,j=5 No se hace
D[30] = (Magnitud(vectR, vectG, vectB, 5, 6));
D[31] = (Magnitud(vectR, vectG, vectB, 5, 7));
D[32] = (Magnitud(vectR, vectG, vectB, 5, 8));
disteucl1[5] = D[4] + D[11] + D[17] + D[22] + D[26] + D[30] + D[31] + D[32];
//i=6,j=0 ya esta es D[5]
//i=6,j=1 ya esta es D[12]
//i=6,j=2 ya esta es D[18]
//i=6,j=3 ya esta es D[23]
//i=6,j=4 ya esta es D[27]
//i=6,j=5 ya esta es D[30]
//i=6,j=6 No se hace
D[33] = (Magnitud(vectR, vectG, vectB, 6, 7));
D[34] = (Magnitud(vectR, vectG, vectB, 6, 8));
disteucl1[6] = D[5] + D[12] + D[18] + D[23] + D[27] + D[30] + D[33] + D[34];
//i=7,j=0 ya esta es D[6]
//i=7,j=1 ya esta es D[13]
//i=7,j=2 ya esta es D[19]
//i=7,j=3 ya esta es D[24]
//i=7,j=4 ya esta es D[28]
//i=7,j=5 ya esta es D[31]
//i=7,j=6 ya esta es D[33]
//i=7,j=7 No se hace
D[35] = (Magnitud(vectR, vectG, vectB, 7, 8));
disteucl1[7] = D[6] + D[13] + D[19] + D[24] + D[28] + D[31] + D[33] + D[35];
//i=8,j=0 ya esta es D[7]
//i=8,j=1 ya esta es D[14]
//i=8,j=2 ya esta es D[20]
//i=8,j=3 ya esta es D[25]
//i=8,j=4 ya esta es D[29]
//i=8,j=5 ya esta es D[32]
//i=8,j=6 ya esta es D[34]
//i=8,j=7 ya esta es D[35]
//i=8,j=8 No se hace
disteucl1[8] = D[7] + D[14] + D[20] + D[25] + D[29] + D[32] + D[34] + D[35];
mn = disteucl1[0];
mx = disteucl1[0];
posMin = 0;
for (int i = 0; i<8; i++)
{
if (mn>disteucl1[i])
{
mn = disteucl1[i];
posMin = posicion[i];
}
else if (mx<disteucl1[i])
{
}
}
vectR[4] = vectR[posMin];
vectG[4] = vectG[posMin];
vectB[4] = vectB[posMin];
for (F = 0; F <= 8; F++){
arriva = minCUDA(vectR[F], vectR[4]) + K;
abajo = maxCUDA(vectR[F], vectR[4]) + K;
val1 = arriva / abajo;
arriva = minCUDA(vectG[F], vectG[4]) + K;
abajo = maxCUDA(vectG[F], vectG[4]) + K;
val2 = arriva / abajo;
arriva = minCUDA(vectB[F], vectB[4]) + K;
abajo = maxCUDA(vectB[F], vectB[4]) + K;
val3 = arriva / abajo;
dist_M = minCUDA(minCUDA(val1, val2), val3);
if (dist_M>d) P++;
}
if (P <= (q + 1)){
Noise[(Row * m + Col)] = 255;
}
else{
Noise[(Row * m + Col)] = 0;
}
d_Pout[(Row * m + Col) * 3 + 0] = vectR[posMin];
d_Pout[(Row * m + Col) * 3 + 1] = vectG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[posMin];
}
}
*/
__device__ float MagnitudL1(float* VectR, float* VectG, float* VectB, unsigned int i, unsigned int j) {
float distR = abs(VectR[i] - VectR[j]);
float distG = abs(VectG[i] - VectG[j]);
float distB = abs(VectB[i] - VectB[j]);
//return sqrt((distR)*(distR)+(distG)*(distG)+(distB)*(distB));
return distR + distB + distG;
}
//Gran
__device__ float S_shape(float Nabla, unsigned int a, unsigned int b) {
if (Nabla <= a) return 0;
if (a <= Nabla && Nabla <= ((a + b) / 2)) {
float aux = (Nabla - a) / (b - a);
return 2 * aux*aux;
}
if (((a + b) / 2) <= Nabla && Nabla <= b) {
float aux = ((Nabla - b) / (b - a));
return 1 - (2 * aux*aux);
}
if (Nabla >= b) return 1;
}
//Peque
__device__ float Z_shape(float Nabla, unsigned int a, unsigned int b) {
if (Nabla <= a) return 1;
if (a <= Nabla && Nabla <= ((a + b) / 2)) {
float aux = (Nabla - a) / (b - a);
return 1 - (2 * aux*aux);
}
if (((a + b) / 2) <= Nabla && Nabla <= b) {
float aux = (Nabla - b) / (b - a);
return 2 * aux*aux;
}
if (Nabla >= b) return 0;
}
__global__ void FTSCF_GPU
(unsigned char* d_Pout, const unsigned char* d_Pin, const unsigned int a,
const unsigned int b, const unsigned int THS, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], posMin = 6, F = 0, i = 0, j = 0;
float vectR[25], vectG[25], vectB[25];
float D[45], disteucl1[9], uGran[3], uPeque[3], rs[9], r = 0;
float mn, mx;
posicion[0] = 6; posicion[1] = 7; posicion[2] = 8; posicion[3] = 11; posicion[4] = 12;
posicion[5] = 13; posicion[6] = 16; posicion[7] = 17; posicion[8] = 18;
if ((Row < m - 3) && (Col < n - 3)) {
for (i = -2; i <= 2; i++) {
for (j = -2; j <= 2; j++) {
vectR[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 2];
F++;
}
}
//NW
D[0] = (MagnitudL1(vectR, vectG, vectB, 12, 6));
uGran[0] = S_shape(D[0], a, b);
D[1] = (MagnitudL1(vectR, vectG, vectB, 12, 8));
uGran[1] = S_shape(D[1], a, b);
D[2] = (MagnitudL1(vectR, vectG, vectB, 12, 16));
uGran[2] = S_shape(D[2], a, b);
D[3] = (MagnitudL1(vectR, vectG, vectB, 16, 10));
uPeque[0] = Z_shape(D[3], a, b);
D[4] = (MagnitudL1(vectR, vectG, vectB, 2, 8));
uPeque[1] = Z_shape(D[4], a, b);
rs[0] = uGran[0] * uGran[1] * uGran[2] * uPeque[0] * uPeque[1];
//N
D[5] = (MagnitudL1(vectR, vectG, vectB, 12, 7));
uGran[0] = S_shape(D[5], a, b);
D[6] = (MagnitudL1(vectR, vectG, vectB, 12, 13));
uGran[1] = S_shape(D[6], a, b);
D[7] = (MagnitudL1(vectR, vectG, vectB, 12, 11));
uGran[2] = S_shape(D[7], a, b);
D[8] = (MagnitudL1(vectR, vectG, vectB, 11, 6));
uPeque[0] = Z_shape(D[8], a, b);
D[9] = (MagnitudL1(vectR, vectG, vectB, 8, 13));
uPeque[1] = Z_shape(D[9], a, b);
rs[1] = uGran[0] * uGran[1] * uGran[2] * uPeque[0] * uPeque[1];
//NE
//D[10] = (MagnitudL1(vectR, vectG, vectB, 12, 8));
// es D[1]
uGran[0] = S_shape(D[1], a, b);
//D[11] = (MagnitudL1(vectR, vectG, vectB, 12, 6));
// es D[0]
uGran[1] = S_shape(D[0], a, b);
D[10] = (MagnitudL1(vectR, vectG, vectB, 12, 18));
uGran[2] = S_shape(D[10], a, b);
D[11] = (MagnitudL1(vectR, vectG, vectB, 18, 14));
uPeque[0] = Z_shape(D[11], a, b);
D[12] = (MagnitudL1(vectR, vectG, vectB, 6, 2));
uPeque[1] = Z_shape(D[12], a, b);
rs[2] = uGran[0] * uGran[1] * uGran[2] * uPeque[0] * uPeque[1];
//E
//D[15] = (MagnitudL1(vectR, vectG, vectB, 12, 13));
//es D[6]
uGran[0] = S_shape(D[6], a, b);
//D[16] = (MagnitudL1(vectR, vectG, vectB, 12, 7));
//es D[5]
uGran[1] = S_shape(D[5], a, b);
D[13] = (MagnitudL1(vectR, vectG, vectB, 12, 17));
uGran[2] = S_shape(D[13], a, b);
D[14] = (MagnitudL1(vectR, vectG, vectB, 7, 8));
uPeque[0] = Z_shape(D[14], a, b);
D[15] = (MagnitudL1(vectR, vectG, vectB, 17, 18));
uPeque[1] = Z_shape(D[15], a, b);
rs[3] = uGran[0] * uGran[1] * uGran[2] * uPeque[0] * uPeque[1];
//SE
//D[20] = (MagnitudL1(vectR, vectG, vectB, 12, 18));
//es D[10]
uGran[0] = S_shape(D[10], a, b);
//es D[2]
//D[21] = (MagnitudL1(vectR, vectG, vectB, 12, 16));
uGran[1] = S_shape(D[2], a, b);
//es D[1]
//D[22] = (MagnitudL1(vectR, vectG, vectB, 12, 8));
uGran[2] = S_shape(D[1], a, b);
D[16] = (MagnitudL1(vectR, vectG, vectB, 16, 22));
uPeque[0] = Z_shape(D[16], a, b);
D[17] = (MagnitudL1(vectR, vectG, vectB, 8, 14));
uPeque[1] = Z_shape(D[17], a, b);
rs[4] = uGran[0] * uGran[1] * uGran[2] * uPeque[0] * uPeque[1];
//S
//D[18] = (MagnitudL1(vectR, vectG, vectB, 12, 17));
//es D[13]
uGran[0] = S_shape(D[13], a, b);
//es D[7]
//D[26] = (MagnitudL1(vectR, vectG, vectB, 12, 11));
uGran[1] = S_shape(D[7], a, b);
//es D[6]
//D[27] = (MagnitudL1(vectR, vectG, vectB, 12, 13));
uGran[2] = S_shape(D[6], a, b);
D[18] = (MagnitudL1(vectR, vectG, vectB, 11, 16));
uPeque[0] = Z_shape(D[18], a, b);
D[19] = (MagnitudL1(vectR, vectG, vectB, 13, 18));
uPeque[1] = Z_shape(D[19], a, b);
rs[5] = uGran[0] * uGran[1] * uGran[2] * uPeque[0] * uPeque[1];
//SW
//es D[2]
//D[30] = (MagnitudL1(vectR, vectG, vectB, 12, 16));
uGran[0] = S_shape(D[2], a, b);
//es D[0]
//D[31] = (MagnitudL1(vectR, vectG, vectB, 12, 6));
uGran[1] = S_shape(D[0], a, b);
//es D[10]
//D[32] = (MagnitudL1(vectR, vectG, vectB, 12, 18));
uGran[2] = S_shape(D[10], a, b);
D[20] = (MagnitudL1(vectR, vectG, vectB, 6, 10));
uPeque[0] = Z_shape(D[20], a, b);
D[21] = (MagnitudL1(vectR, vectG, vectB, 18, 22));
uPeque[1] = Z_shape(D[21], a, b);
rs[6] = uGran[0] * uGran[1] * uGran[2] * uPeque[0] * uPeque[1];
//W
//Es D[7]
//D[35] = (MagnitudL1(vectR, vectG, vectB, 12, 11));
uGran[0] = S_shape(D[7], a, b);
//Es D[5]
//D[36] = (MagnitudL1(vectR, vectG, vectB, 12, 7));
uGran[1] = S_shape(D[5], a, b);
//es D[13]
//D[37] = (MagnitudL1(vectR, vectG, vectB, 12, 17));
uGran[2] = S_shape(D[13], a, b);
D[21] = (MagnitudL1(vectR, vectG, vectB, 6, 7));
uPeque[0] = Z_shape(D[21], a, b);
D[22] = (MagnitudL1(vectR, vectG, vectB, 16, 17));
uPeque[1] = Z_shape(D[22], a, b);
rs[7] = uGran[0] * uGran[1] * uGran[2] * uPeque[0] * uPeque[1];
mn = rs[0];
r = rs[0];
for (i = 0; i <= 7; i++)
{
if (r<rs[i])
{
r = rs[i];
}
}
//Filtro VMF
if (r > THS) {
D[23] = (MagnitudL1(vectR, vectG, vectB, 6, 8));
D[24] = (MagnitudL1(vectR, vectG, vectB, 6, 13));
D[25] = (MagnitudL1(vectR, vectG, vectB, 6, 16));
D[26] = (MagnitudL1(vectR, vectG, vectB, 6, 17));
D[27] = (MagnitudL1(vectR, vectG, vectB, 6, 18));
disteucl1[0] = D[0] + D[8] + D[21] + D[23] + D[24] + D[25] + D[26] + D[27];
D[28] = (MagnitudL1(vectR, vectG, vectB, 7, 11));
D[29] = (MagnitudL1(vectR, vectG, vectB, 7, 13));
D[30] = (MagnitudL1(vectR, vectG, vectB, 7, 16));
D[31] = (MagnitudL1(vectR, vectG, vectB, 7, 17));
D[32] = (MagnitudL1(vectR, vectG, vectB, 7, 18));
disteucl1[1] = D[21] + D[5] + D[14] + D[28] + D[29] + D[30] + D[31] + D[32];
//es D[26] D[33] = (MagnitudL1(vectR, vectG, vectB, 8, 6));
D[33] = (MagnitudL1(vectR, vectG, vectB, 8, 11));
D[34] = (MagnitudL1(vectR, vectG, vectB, 8, 16));
D[35] = (MagnitudL1(vectR, vectG, vectB, 8, 17));
D[36] = (MagnitudL1(vectR, vectG, vectB, 8, 18));
disteucl1[2] = D[14] + D[1] + D[9] + D[26] + D[33] + D[34] + D[35] + D[36];
//es D[28] D[37] = (MagnitudL1(vectR, vectG, vectB, 11, 7));
//es D[33] D[38] = (MagnitudL1(vectR, vectG, vectB, 11, 8));
D[37] = (MagnitudL1(vectR, vectG, vectB, 11, 13));
D[38] = (MagnitudL1(vectR, vectG, vectB, 11, 17));
D[39] = (MagnitudL1(vectR, vectG, vectB, 11, 18));
disteucl1[3] = D[7] + D[8] + D[18] + D[28] + D[33] + D[37] + D[38] + D[39];
//Central ya estan todas las d calculadas
disteucl1[4] = D[0] + D[5] + D[1] + D[7] + D[6] + D[2] + D[13] + D[10];
D[40] = (MagnitudL1(vectR, vectG, vectB, 13, 16));
D[41] = (MagnitudL1(vectR, vectG, vectB, 13, 17));
disteucl1[5] = D[6] + D[19] + D[9] + D[24] + D[29] + D[37] + D[40] + D[41];
D[42] = (MagnitudL1(vectR, vectG, vectB, 16, 18));
disteucl1[6] = D[18] + D[2] + D[22] + D[25] + D[30] + D[34] + D[40] + D[42];
disteucl1[7] = D[22] + D[13] + D[15] + D[26] + D[31] + D[35] + D[38] + D[41];
disteucl1[8] = D[19] + D[10] + D[15] + D[27] + D[32] + D[36] + D[39] + D[42];
posMin = 6;
mn = disteucl1[0];
for (i = 0; i <= 7; i++) {
if (mn>disteucl1[i]) {
mn = disteucl1[i];
posMin = posicion[i];
}
}
d_Pout[(Row * m + Col) * 3 + 0] = vectR[posMin];
d_Pout[(Row * m + Col) * 3 + 1] = vectG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[posMin];
/*
d_Pout[(Row * m + Col) * 3 + 0] = 255;
d_Pout[(Row * m + Col) * 3 + 1] = 255;
d_Pout[(Row * m + Col) * 3 + 2] = 255;
*/
}
else {
// si no es ruido la salida el el pixel central de la ventana
d_Pout[(Row * m + Col) * 3 + 0] = vectR[12];
d_Pout[(Row * m + Col) * 3 + 1] = vectG[12];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[12];
}
}
}
__global__ void VMF_Global_TwoPixels(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
unsigned char vectR[9], vectG[9], vectB[9];
float disteucl = 0.0, disteucl1[9], hold;
float D[40];
float mn, mx;
int posMin = 0;
if ((Row < m - 1) && (Col < n - 1)) {
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
//D[0]=Magnitud(vectR, vectG, vectB, i, j//i==0 y j==0 no se hace
D[0] = (Magnitud(vectR, vectG, vectB, 0, 1));
D[1] = (Magnitud(vectR, vectG, vectB, 0, 2));
D[2] = (Magnitud(vectR, vectG, vectB, 0, 3));
D[3] = (Magnitud(vectR, vectG, vectB, 0, 4));
D[4] = (Magnitud(vectR, vectG, vectB, 0, 5));
D[5] = (Magnitud(vectR, vectG, vectB, 0, 6));
D[6] = (Magnitud(vectR, vectG, vectB, 0, 7));
D[7] = (Magnitud(vectR, vectG, vectB, 0, 8));
disteucl1[0] = D[0] + D[1] + D[2] + D[3] + D[4] + D[5] + D[6] + D[7];
//i=1,j=0 ya esta es D[0]
//i=1,j=1 No se hace
D[8] = (Magnitud(vectR, vectG, vectB, 1, 2));
D[9] = (Magnitud(vectR, vectG, vectB, 1, 3));
D[10] = (Magnitud(vectR, vectG, vectB, 1, 4));
D[11] = (Magnitud(vectR, vectG, vectB, 1, 5));
D[12] = (Magnitud(vectR, vectG, vectB, 1, 6));
D[13] = (Magnitud(vectR, vectG, vectB, 1, 7));
D[14] = (Magnitud(vectR, vectG, vectB, 1, 8));
disteucl1[1] = D[0] + D[8] + D[9] + D[10] + D[11] + D[12] + D[13] + D[14];
//i=2,j=0 ya esta es D[1]
//i=2,j=1 ya esta es D[8]
//i=2,j=2 No se hace
D[15] = (Magnitud(vectR, vectG, vectB, 2, 3));
D[16] = (Magnitud(vectR, vectG, vectB, 2, 4));
D[17] = (Magnitud(vectR, vectG, vectB, 2, 5));
D[18] = (Magnitud(vectR, vectG, vectB, 2, 6));
D[19] = (Magnitud(vectR, vectG, vectB, 2, 7));
D[20] = (Magnitud(vectR, vectG, vectB, 2, 8));
disteucl1[2] = D[1] + D[8] + D[15] + D[16] + D[17] + D[18] + D[19] + D[20];
//i=3,j=0 ya esta es D[2]
//i=3,j=1 ya esta es D[9]
//i=3,j=2 ya esta es D[15]
//i=3,j=3 No se hace
D[21] = (Magnitud(vectR, vectG, vectB, 3, 4));
D[22] = (Magnitud(vectR, vectG, vectB, 3, 5));
D[23] = (Magnitud(vectR, vectG, vectB, 3, 6));
D[24] = (Magnitud(vectR, vectG, vectB, 3, 7));
D[25] = (Magnitud(vectR, vectG, vectB, 3, 8));
disteucl1[3] = D[2] + D[9] + D[15] + D[21] + D[22] + D[23] + D[24] + D[25];
//i=4,j=0 ya esta es D[3]
//i=4,j=1 ya esta es D[10]
//i=4,j=2 ya esta es D[16]
//i=4,j=3 ya esta es D[21]
//i=4,j=4 No se hace
D[26] = (Magnitud(vectR, vectG, vectB, 4, 5));
D[27] = (Magnitud(vectR, vectG, vectB, 4, 6));
D[28] = (Magnitud(vectR, vectG, vectB, 4, 7));
D[29] = (Magnitud(vectR, vectG, vectB, 4, 8));
disteucl1[4] = D[3] + D[10] + D[16] + D[21] + D[26] + D[27] + D[28] + D[29];
//i=5,j=0 ya esta es D[4]
//i=5,j=1 ya esta es D[11]
//i=5,j=2 ya esta es D[17]
//i=5,j=3 ya esta es D[22]
//i=5,j=4 ya esta es D[26]
//i=5,j=5 No se hace
D[30] = (Magnitud(vectR, vectG, vectB, 5, 6));
D[31] = (Magnitud(vectR, vectG, vectB, 5, 7));
D[32] = (Magnitud(vectR, vectG, vectB, 5, 8));
disteucl1[5] = D[4] + D[11] + D[17] + D[22] + D[26] + D[30] + D[31] + D[32];
//i=6,j=0 ya esta es D[5]
//i=6,j=1 ya esta es D[12]
//i=6,j=2 ya esta es D[18]
//i=6,j=3 ya esta es D[23]
//i=6,j=4 ya esta es D[27]
//i=6,j=5 ya esta es D[30]
//i=6,j=6 No se hace
D[33] = (Magnitud(vectR, vectG, vectB, 6, 7));
D[34] = (Magnitud(vectR, vectG, vectB, 6, 8));
disteucl1[6] = D[5] + D[12] + D[18] + D[23] + D[27] + D[30] + D[33] + D[34];
//i=7,j=0 ya esta es D[6]
//i=7,j=1 ya esta es D[13]
//i=7,j=2 ya esta es D[19]
//i=7,j=3 ya esta es D[24]
//i=7,j=4 ya esta es D[28]
//i=7,j=5 ya esta es D[31]
//i=7,j=6 ya esta es D[33]
//i=7,j=7 No se hace
D[35] = (Magnitud(vectR, vectG, vectB, 7, 8));
disteucl1[7] = D[6] + D[13] + D[19] + D[24] + D[28] + D[31] + D[33] + D[35];
//i=8,j=0 ya esta es D[7]
//i=8,j=1 ya esta es D[14]
//i=8,j=2 ya esta es D[20]
//i=8,j=3 ya esta es D[25]
//i=8,j=4 ya esta es D[29]
//i=8,j=5 ya esta es D[32]
//i=8,j=6 ya esta es D[34]
//i=8,j=7 ya esta es D[35]
//i=8,j=8 No se hace
disteucl1[8] = D[7] + D[14] + D[20] + D[25] + D[29] + D[32] + D[34] + D[35];
mn = disteucl1[0];
mx = disteucl1[0];
posMin = 0;
for (int i = 0; i<8; i++)
{
if (mn>disteucl1[i])
{
mn = disteucl1[i];
posMin = posicion[i];
}
else if (mx<disteucl1[i])
{
}
}
d_Pout[(Row * m + Col) * 3 + 0] = vectR[posMin];
d_Pout[(Row * m + Col) * 3 + 1] = vectG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[posMin];
}
}
__global__ void VectorUnit_GPU_Global(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m)
{
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
float pixel_UR[9], pixel_UG[9], pixel_UB[9];
unsigned char vectR[9], vectG[9], vectB[9];// esta comentado por el sqrt
float disteucl = 0.0, disteucl1[9], hold;
float valMag;
float mn, mx, AuxResta = 0, aux1 = 0, aux2 = 0, aux3 = 0;
int posMin = 0;
if ((Row>1) && (Col>1) && (Row < m - 1) && (Col < n - 1)) {
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
pixel_UR[F] = vectR[F];
pixel_UG[F] = vectG[F];
pixel_UB[F] = vectB[F];
if (pixel_UR[F] == 0 && pixel_UG[F] == 0 && pixel_UB[F] == 0) {
pixel_UR[F] = 10;
pixel_UG[F] = 10;
pixel_UB[F] = 10;
}
else {
valMag = sqrtf((pixel_UR[F] * pixel_UR[F]) + (pixel_UG[F] * pixel_UG[F]) + (pixel_UB[F] * pixel_UB[F]));
pixel_UR[F] = pixel_UR[F] / valMag;
pixel_UG[F] = pixel_UG[F] / valMag;
pixel_UB[F] = pixel_UB[F] / valMag;
}
posicion[F] = F;
F++;
}
}
disteucl = 0;
for (F = 0; F <= 8; F++) {
for (x = 0; x <= 8; x++) {
//disteucl += abs(vectB[F]-vectB[x])+abs(vectG[F]-vectG[x])+abs(vectR[F]-vectR[x]);
//disteucl += sqrtf( powf(pixel_UR[F] - pixel_UR[x],2)
// + powf(pixel_UG[F] - pixel_UG[x],2)
// + powf(pixel_UB[F] - pixel_UB[x],2) );
//disteucl += ( fabsf(pixel_UR[F] - pixel_UR[x])
//+ fabsf(pixel_UG[F] - pixel_UG[x])
//+ fabsf(pixel_UB[F] - pixel_UB[x]));
aux1 = pixel_UR[F] - pixel_UR[x];
aux2 = pixel_UG[F] - pixel_UG[x];
aux3 = pixel_UB[F] - pixel_UB[x];
disteucl += sqrt((aux1*aux1) + (aux2*aux2) + (aux3*aux3));
//disteucl += sqrt(pow(pixel_UR[F] - pixel_UR[x], 2)
//+ pow(pixel_UG[F] - pixel_UG[x], 2)
//+ pow(pixel_UB[F] - pixel_UB[x], 2));
}
disteucl1[F] = disteucl;
disteucl = 0;
}
mn = disteucl1[0];
mx = disteucl1[0];
posMin = 0;
for (int i = 0; i<8; i++)
{
if (mn>disteucl1[i])
{
mn = disteucl1[i];
posMin = posicion[i];
}
else if (mx<disteucl1[i])
{
}
}
d_Pout[(Row * m + Col) * 3 + 0] = vectR[posMin];
d_Pout[(Row * m + Col) * 3 + 1] = vectG[posMin];
d_Pout[(Row * m + Col) * 3 + 2] = vectB[posMin];
}
}
__global__ void BVDF_GPU_Global(unsigned char* d_Pout, unsigned char* d_Pin, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int x = 0, posicion[9], hold2 = 0, F = 0;
//double vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
//double disteucl, disteucl1[9], hold;
//double vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
//double disteucl, disteucl1[9], hold;
//double arriva = 0, abajo = 0, valAngulo = 0.0, auxCos = 0;
float vectR[9], vectG[9], vectB[9]; // si el tipo de dato es double, no ay recursos para la ejecusion)
float disteucl, disteucl1[9], hold;
float arriva = 0, abajo = 0, valAngulo = 0.0, auxCos = 0;
if ((Row>1) && (Col>1) && (Row < m - 1) && (Col < n - 1)) {
//hacer el arreglo
F = 0;
for (int i = -1; i <= 1; i++) {
for (int j = -1; j <= 1; j++) {
vectR[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 0];
vectG[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 1];
vectB[F] = d_Pin[((Row + i) * n + (Col + j)) * 3 + 2];
posicion[F] = F;
F++;
}
}
valAngulo = 0;
for (F = 0; F <= 8; F++) {
for (x = 0; x <= 8; x++) {
if ((vectR[F] == 0 && vectG[F] == 0 && vectB[F] == 0) || (vectR[x] == 0 && vectG[x] == 0 && vectB[x] == 0)) {
// Es pixelZero
valAngulo += 1000;
}
else {
arriva = (vectR[F] * vectR[x]) + (vectG[F] * vectG[x]) + (vectB[F] * vectB[x]);
abajo = sqrt((vectR[F] * vectR[F]) + (vectG[F] * vectG[F]) + (vectB[F] * vectB[F])) * sqrt((vectR[x] * vectR[x]) + (vectG[x] * vectG[x]) + (vectB[x] * vectB[x]));
//if (abajo == 0) abajo = .01; //si abajo=o da inf
//if (arriva == 0) arriva= .01; //si abajo=o da inf
valAngulo += acos(arriva / abajo);
//valAngulo += __cosf(arriva / abajo);
}
}
disteucl1[F] = valAngulo;
valAngulo = 0;
}
for (F = 0; F <= 8; F++) {
for (x = 0; x <= 7; x++) {
if (disteucl1[x] > disteucl1[x + 1]) {
hold = disteucl1[x];
hold2 = posicion[x];
disteucl1[x] = disteucl1[x + 1];
posicion[x] = posicion[x + 1];
disteucl1[x + 1] = hold;
posicion[x + 1] = hold2;
}
}
}
d_Pout[(Row * n + Col) * 3 + 0] = vectR[posicion[0]];
d_Pout[(Row * n + Col) * 3 + 1] = vectG[posicion[0]];
d_Pout[(Row * n + Col) * 3 + 2] = vectB[posicion[0]];
/*
d_Pout[(Row * n + Col) * 3 + 0] = 255;
d_Pout[(Row * n + Col) * 3 + 1] = 255;
d_Pout[(Row * n + Col) * 3 + 2] = 255;
*/
}
}
#define min(a, b) ((a < b) ? a : b)
#define max(a, b) ((a > b) ? a : b) //estas dos funciones estan repetidas con minCUDA y maxCUDA
__global__ void FTSCF_GPU_Original
(unsigned char* d_Pout, const unsigned char* d_Pin, int n, int m) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int M = 0, j = 0, x = 0;
float vectR[9], vectG[9], vectB[9], hold;
float gam_small_1[18] = { 0 }, med_1, med_2, var_1, gam_big_1[18] = { 0 };
float gam_small_2[18] = { 0 }, med1, med2, var1, gam_big_2[18] = { 0 };
float array_R[25];
float array_G[25];
float array_B[25];
int F = 0, i = 0;
const int channels = 3;
if ((Row>1) && (Col>1) && (Row < m - 1) && (Col < n - 1)) {
//int tid = omp_get_thread_num();
//hacer el arreglo
F = 0;
for (i = -2; i <= 2; i++) {
for (j = -2; j <= 2; j++) {
array_R[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
array_G[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
array_B[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
F++;
}
}
// se copia a continuacion solo los 8-vecinos
M = 0;
for (F = 6; F <= 8; F++) {
vectG[M] = (array_G[F]);
vectR[M] = (array_R[F]);
vectB[M] = (array_B[F]);
M++;
}
for (F = 11; F <= 13; F++) {
vectG[M] = (array_G[F]);
vectR[M] = (array_R[F]);
vectB[M] = (array_B[F]);
M++;
}
for (F = 16; F <= 18; F++) {
vectG[M] = (array_G[F]);
vectR[M] = (array_R[F]);
vectB[M] = (array_B[F]);
M++;
}
float noreste_C_R, noreste_N1_R, noreste_N2_R, sur_C_R, sur_N1_R, sur_N2_R, noroeste_C_R, noroeste_N1_R, noroeste_N2_R;
float este_C_R, este_N1_R, este_N2_R, oeste_C_R, oeste_N1_R, oeste_N2_R, sureste_C_R, sureste_N1_R, sureste_N2_R;
float norte_C_R, norte_N1_R, norte_N2_R, suroeste_C_R, suroeste_N1_R, suroeste_N2_R;
float suroeste_NW_R, suroeste_SE_R, sur_W_R, sur_E_R, sureste_SW_R, sureste_NE_R, este_S_R, este_N_R, noreste_SE_R, noreste_NW_R;
float norte_W_R, norte_E_R, noroeste_NE_R, noroeste_SW_R, oeste_S_R, oeste_N_R;
float noreste_C_G, noreste_N1_G, noreste_N2_G, sur_C_G, sur_N1_G, sur_N2_G, noroeste_C_G, noroeste_N1_G, noroeste_N2_G;
float este_C_G, este_N1_G, este_N2_G, oeste_C_G, oeste_N1_G, oeste_N2_G, sureste_C_G, sureste_N1_G, sureste_N2_G;
float norte_C_G, norte_N1_G, norte_N2_G, suroeste_C_G, suroeste_N1_G, suroeste_N2_G;
float suroeste_NW_G, suroeste_SE_G, sur_W_G, sur_E_G, sureste_SW_G, sureste_NE_G, este_S_G, este_N_G, noreste_SE_G, noreste_NW_G;
float norte_W_G, norte_E_G, noroeste_NE_G, noroeste_SW_G, oeste_S_G, oeste_N_G;
float noreste_C_B, noreste_N1_B, noreste_N2_B, sur_C_B, sur_N1_B, sur_N2_B, noroeste_C_B, noroeste_N1_B, noroeste_N2_B;
float este_C_B, este_N1_B, este_N2_B, oeste_C_B, oeste_N1_B, oeste_N2_B, sureste_C_B, sureste_N1_B, sureste_N2_B;
float norte_C_B, norte_N1_B, norte_N2_B, suroeste_C_B, suroeste_N1_B, suroeste_N2_B;
float suroeste_NW_B, suroeste_SE_B, sur_W_B, sur_E_B, sureste_SW_B, sureste_NE_B, este_S_B, este_N_B, noreste_SE_B, noreste_NW_B;
float norte_W_B, norte_E_B, noroeste_NE_B, noroeste_SW_B, oeste_S_B, oeste_N_B;
float largo[9], largo_1[9], largo_2[9], LARGO[9], LARGO_1[9], LARGO_2[9];
float noise_R_R, noise_G_G, noise_B_B;
int SW_C_B, SW_N1_B, SW_N2_B, SW_NW_B, SW_SE_B, S_C_B, S_N1_B, S_N2_B, S_W_B, S_E_B, SE_C_B, SE_N1_B, SE_N2_B, SE_SW_B, SE_NE_B;
int E_C_B, E_N1_B, E_N2_B, E_S_B, E_N_B, NE_C_B, NE_N1_B, NE_N2_B, NE_SE_B, NE_NW_B, N_C_B, N_N1_B, N_N2_B, N_W_B, N_E_B;
int NW_C_B, NW_N1_B, NW_N2_B, NW_NE_B, NW_SW_B, W_C_B, W_N1_B, W_N2_B, W_S_B, W_N_B;
int SW_C_R, SW_N1_R, SW_N2_R, SW_NW_R, SW_SE_R, S_C_R, S_N1_R, S_N2_R, S_W_R, S_E_R, SE_C_R, SE_N1_R, SE_N2_R, SE_SW_R, SE_NE_R;
int E_C_R, E_N1_R, E_N2_R, E_S_R, E_N_R, NE_C_R, NE_N1_R, NE_N2_R, NE_SE_R, NE_NW_R, N_C_R, N_N1_R, N_N2_R, N_W_R, N_E_R;
int NW_C_R, NW_N1_R, NW_N2_R, NW_NE_R, NW_SW_R, W_C_R, W_N1_R, W_N2_R, W_S_R, W_N_R;
int SW_C_G, SW_N1_G, SW_N2_G, SW_NW_G, SW_SE_G, S_C_G, S_N1_G, S_N2_G, S_W_G, S_E_G, SE_C_G, SE_N1_G, SE_N2_G, SE_SW_G, SE_NE_G;
int E_C_G, E_N1_G, E_N2_G, E_S_G, E_N_G, NE_C_G, NE_N1_G, NE_N2_G, NE_SE_G, NE_NW_G, N_C_G, N_N1_G, N_N2_G, N_W_G, N_E_G;
int NW_C_G, NW_N1_G, NW_N2_G, NW_NE_G, NW_SW_G, W_C_G, W_N1_G, W_N2_G, W_S_G, W_N_G;
float cons1 = 255, cons2 = 255;
// blue
SW_C_B = abs(array_B[6] - array_B[12]);
SW_N1_B = abs(array_B[10] - array_B[16]);
SW_N2_B = abs(array_B[2] - array_B[8]);
SW_NW_B = abs(array_B[12] - array_B[16]);
SW_SE_B = abs(array_B[12] - array_B[8]);
S_C_B = abs(array_B[7] - array_B[12]);
S_N1_B = abs(array_B[6] - array_B[11]);
S_N2_B = abs(array_B[8] - array_B[13]);
S_W_B = abs(array_B[12] - array_B[11]);
S_E_B = abs(array_B[12] - array_B[13]);
SE_C_B = abs(array_B[8] - array_B[12]);
SE_N1_B = abs(array_B[2] - array_B[6]);
SE_N2_B = abs(array_B[14] - array_B[18]);
SE_SW_B = abs(array_B[12] - array_B[6]);
SE_NE_B = abs(array_B[12] - array_B[18]);
E_C_B = abs(array_B[13] - array_B[12]);
E_N1_B = abs(array_B[8] - array_B[7]);
E_N2_B = abs(array_B[18] - array_B[17]);
E_S_B = abs(array_B[12] - array_B[7]);
E_N_B = abs(array_B[12] - array_B[17]);
NE_C_B = abs(array_B[18] - array_B[12]);
NE_N1_B = abs(array_B[14] - array_B[8]);
NE_N2_B = abs(array_B[22] - array_B[16]);
NE_SE_B = abs(array_B[12] - array_B[8]);
NE_NW_B = abs(array_B[12] - array_B[16]);
N_C_B = abs(array_B[17] - array_B[12]);
N_N1_B = abs(array_B[18] - array_B[13]);
N_N2_B = abs(array_B[16] - array_B[11]);
N_W_B = abs(array_B[12] - array_B[11]);
N_E_B = abs(array_B[12] - array_B[13]);
NW_C_B = abs(array_B[16] - array_B[12]);
NW_N1_B = abs(array_B[22] - array_B[18]);
NW_N2_B = abs(array_B[10] - array_B[6]);
NW_NE_B = abs(array_B[12] - array_B[18]);
NW_SW_B = abs(array_B[12] - array_B[6]);
W_C_B = abs(array_B[11] - array_B[12]);
W_N1_B = abs(array_B[16] - array_B[17]);
W_N2_B = abs(array_B[6] - array_B[7]);
W_S_B = abs(array_B[12] - array_B[7]);
W_N_B = abs(array_B[12] - array_B[17]);
SW_C_G = abs(array_G[6] - array_G[12]);
SW_N1_G = abs(array_G[10] - array_G[16]);
SW_N2_G = abs(array_G[2] - array_G[8]);
SW_NW_G = abs(array_G[12] - array_G[16]);
SW_SE_G = abs(array_G[12] - array_G[8]);
S_C_G = abs(array_G[7] - array_G[12]);
S_N1_G = abs(array_G[6] - array_G[11]);
S_N2_G = abs(array_G[8] - array_G[13]);
S_W_G = abs(array_G[12] - array_G[11]);
S_E_G = abs(array_G[12] - array_G[13]);
SE_C_G = abs(array_G[8] - array_G[12]);
SE_N1_G = abs(array_G[2] - array_G[6]);
SE_N2_G = abs(array_G[14] - array_G[18]);
SE_SW_G = abs(array_G[12] - array_G[6]);
SE_NE_G = abs(array_G[12] - array_G[18]);
E_C_G = abs(array_G[13] - array_G[12]);
E_N1_G = abs(array_G[8] - array_G[7]);
E_N2_G = abs(array_G[18] - array_G[17]);
E_S_G = abs(array_G[12] - array_G[7]);
E_N_G = abs(array_G[12] - array_G[17]);
NE_C_G = abs(array_G[18] - array_G[12]);
NE_N1_G = abs(array_G[14] - array_G[8]);
NE_N2_G = abs(array_G[22] - array_G[16]);
NE_SE_G = abs(array_G[12] - array_G[8]);
NE_NW_G = abs(array_G[12] - array_G[16]);
N_C_G = abs(array_G[17] - array_G[12]);
N_N1_G = abs(array_G[18] - array_G[13]);
N_N2_G = abs(array_G[16] - array_G[11]);
N_W_G = abs(array_G[12] - array_G[11]);
N_E_G = abs(array_G[12] - array_G[13]);
NW_C_G = abs(array_G[16] - array_G[12]);
NW_N1_G = abs(array_G[22] - array_G[18]);
NW_N2_G = abs(array_G[10] - array_G[6]);
NW_NE_G = abs(array_G[12] - array_G[18]);
NW_SW_G = abs(array_G[12] - array_G[6]);
W_C_G = abs(array_G[11] - array_G[12]);
W_N1_G = abs(array_G[16] - array_G[17]);
W_N2_G = abs(array_G[6] - array_G[7]);
W_S_G = abs(array_G[12] - array_G[7]);
W_N_G = abs(array_G[12] - array_G[17]);
SW_C_R = abs(array_R[6] - array_R[12]);
SW_N1_R = abs(array_R[10] - array_R[16]);
SW_N2_R = abs(array_R[2] - array_R[8]);
SW_NW_R = abs(array_R[12] - array_R[16]);
SW_SE_R = abs(array_R[12] - array_R[8]);
S_C_R = abs(array_R[7] - array_R[12]);
S_N1_R = abs(array_R[6] - array_R[11]);
S_N2_R = abs(array_R[8] - array_R[13]);
S_W_R = abs(array_R[12] - array_R[11]);
S_E_R = abs(array_R[12] - array_R[13]);
SE_C_R = abs(array_R[8] - array_R[12]);
SE_N1_R = abs(array_R[2] - array_R[6]);
SE_N2_R = abs(array_R[14] - array_R[18]);
SE_SW_R = abs(array_R[12] - array_R[6]);
SE_NE_R = abs(array_R[12] - array_R[18]);
E_C_R = abs(array_R[13] - array_R[12]);
E_N1_R = abs(array_R[8] - array_R[7]);
E_N2_R = abs(array_R[18] - array_R[17]);
E_S_R = abs(array_R[12] - array_R[7]);
E_N_R = abs(array_R[12] - array_R[17]);
NE_C_R = abs(array_R[18] - array_R[12]);
NE_N1_R = abs(array_R[14] - array_R[8]);
NE_N2_R = abs(array_R[22] - array_R[16]);
NE_SE_R = abs(array_R[12] - array_R[8]);
NE_NW_R = abs(array_R[12] - array_R[16]);
N_C_R = abs(array_R[17] - array_R[12]);
N_N1_R = abs(array_R[18] - array_R[13]);
N_N2_R = abs(array_R[16] - array_R[11]);
N_W_R = abs(array_R[12] - array_R[11]);
N_E_R = abs(array_R[12] - array_R[13]);
NW_C_R = abs(array_R[16] - array_R[12]);
NW_N1_R = abs(array_R[22] - array_R[18]);
NW_N2_R = abs(array_R[10] - array_R[6]);
NW_NE_R = abs(array_R[12] - array_R[18]);
NW_SW_R = abs(array_R[12] - array_R[6]);
W_C_R = abs(array_R[11] - array_R[12]);
W_N1_R = abs(array_R[16] - array_R[17]);
W_N2_R = abs(array_R[6] - array_R[7]);
W_S_R = abs(array_R[12] - array_R[7]);
W_N_R = abs(array_R[12] - array_R[17]);
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[12])) == 0) suroeste_C_R = 0;
else suroeste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[10] * array_R[16])) == 0) suroeste_N1_R = 0;
else suroeste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[10] * array_R[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[2] * array_R[8])) == 0) suroeste_N2_R = 0;
else suroeste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[2] * array_R[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[2], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[16])) == 0) suroeste_NW_R = 0;
else suroeste_NW_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[8])) == 0) suroeste_SE_R = 0;
else suroeste_SE_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[7] * array_R[12])) == 0) sur_C_R = 0;
else sur_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[7] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[11])) == 0) sur_N1_R = 0;
else sur_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[13])) == 0) sur_N2_R = 0;
else sur_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[11])) == 0) sur_W_R = 0;
else sur_W_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[13])) == 0) sur_E_R = 0;
else sur_E_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[12])) == 0) sureste_C_R = 0;
else sureste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[2])) == 0) sureste_N1_R = 0;
else sureste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[2])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[2], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[14] * array_R[18])) == 0) sureste_N2_R = 0;
else sureste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[14] * array_R[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[6])) == 0) sureste_SW_R = 0;
else sureste_SW_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[6])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[18])) == 0) sureste_NE_R = 0;
else sureste_NE_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[13] * array_R[12])) == 0) este_C_R = 0;
else este_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[13] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[7])) == 0) este_N1_R = 0;
else este_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[17])) == 0) este_N2_R = 0;
else este_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[7])) == 0) este_S_R = 0;
else este_S_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[17])) == 0) este_N_R = 0;
else este_N_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[12])) == 0) noreste_C_R = 0;
else noreste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[14] * array_R[8])) == 0) noreste_N1_R = 0;
else noreste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[14] * array_R[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[22] * array_R[16])) == 0) noreste_N2_R = 0;
else noreste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[22] * array_R[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[8])) == 0) noreste_SE_R = 0;
else noreste_SE_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[16])) == 0) noreste_NW_R = 0;
else noreste_NW_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[17] * array_R[12])) == 0) norte_C_R = 0;
else norte_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[17] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[13])) == 0) norte_N1_R = 0;
else norte_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[11])) == 0) norte_N2_R = 0;
else norte_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[13])) == 0) norte_E_R = 0;
else norte_E_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[11])) == 0) norte_W_R = 0;
else norte_W_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[12])) == 0) noroeste_C_R = 0;
else noroeste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[22] * array_R[18])) == 0) noroeste_N1_R = 0;
else noroeste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[22] * array_R[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[10])) == 0) noroeste_N2_R = 0;
else noroeste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[10])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[18])) == 0) noroeste_NE_R = 0;
else noroeste_NE_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[12])) == 0) noroeste_SW_R = 0;
else noroeste_SW_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[11] * array_R[12])) == 0) oeste_C_R = 0;
else oeste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[11] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[17])) == 0) oeste_N1_R = 0;
else oeste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[7])) == 0) oeste_N2_R = 0;
else oeste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[17])) == 0) oeste_N_R = 0;
else oeste_N_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[7])) == 0) oeste_S_R = 0;
else oeste_S_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[12])) == 0) suroeste_C_G = 0;
else suroeste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[10] * array_G[16])) == 0) suroeste_N1_G = 0;
else suroeste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[10] * array_G[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[2] * array_G[8])) == 0) suroeste_N2_G = 0;
else suroeste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[2] * array_G[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[2], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[16])) == 0) suroeste_NW_G = 0;
else suroeste_NW_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[8])) == 0) suroeste_SE_G = 0;
else suroeste_SE_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[7] * array_G[12])) == 0) sur_C_G = 0;
else sur_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[7] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[11])) == 0) sur_N1_G = 0;
else sur_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[13])) == 0) sur_N2_G = 0;
else sur_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[11])) == 0) sur_W_G = 0;
else sur_W_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[13])) == 0) sur_E_G = 0;
else sur_E_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[12])) == 0) sureste_C_G = 0;
else sureste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[2])) == 0) sureste_N1_G = 0;
else sureste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[2])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[2], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[14] * array_G[18])) == 0) sureste_N2_G = 0;
else sureste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[14] * array_G[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[6])) == 0) sureste_SW_G = 0;
else sureste_SW_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[6])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[18])) == 0) sureste_NE_G = 0;
else sureste_NE_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[13] * array_G[12])) == 0) este_C_G = 0;
else este_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[13] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[7])) == 0) este_N1_G = 0;
else este_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[17])) == 0) este_N2_G = 0;
else este_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[7])) == 0) este_S_G = 0;
else este_S_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[17])) == 0) este_N_G = 0;
else este_N_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[12])) == 0) noreste_C_G = 0;
else noreste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[14] * array_G[8])) == 0) noreste_N1_G = 0;
else noreste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[14] * array_G[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[22] * array_G[16])) == 0) noreste_N2_G = 0;
else noreste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[22] * array_G[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[8])) == 0) noreste_SE_G = 0;
else noreste_SE_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[16])) == 0) noreste_NW_G = 0;
else noreste_NW_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[17] * array_G[12])) == 0) norte_C_G = 0;
else norte_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[17] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[13])) == 0) norte_N1_G = 0;
else norte_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[11])) == 0) norte_N2_G = 0;
else norte_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[13])) == 0) norte_E_G = 0;
else norte_E_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[11])) == 0) norte_W_G = 0;
else norte_W_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[12])) == 0) noroeste_C_G = 0;
else noroeste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[22] * array_G[18])) == 0) noroeste_N1_G = 0;
else noroeste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[22] * array_G[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[10])) == 0) noroeste_N2_G = 0;
else noroeste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[10])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[18])) == 0) noroeste_NE_G = 0;
else noroeste_NE_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[12])) == 0) noroeste_SW_G = 0;
else noroeste_SW_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[11] * array_G[12])) == 0) oeste_C_G = 0;
else oeste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[11] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[17])) == 0) oeste_N1_G = 0;
else oeste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[7])) == 0) oeste_N2_G = 0;
else oeste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[17])) == 0) oeste_N_G = 0;
else oeste_N_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[7])) == 0) oeste_S_G = 0;
else oeste_S_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[12])) == 0) suroeste_C_B = 0;
else suroeste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[10] * array_B[16])) == 0) suroeste_N1_B = 0;
else suroeste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[10] * array_B[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[2] * array_B[8])) == 0) suroeste_N2_B = 0;
else suroeste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[2] * array_B[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[2], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[16])) == 0) suroeste_NW_B = 0;
else suroeste_NW_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[8])) == 0) suroeste_SE_B = 0;
else suroeste_SE_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[7] * array_B[12])) == 0) sur_C_B = 0;
else sur_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[7] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[11])) == 0) sur_N1_B = 0;
else sur_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[13])) == 0) sur_N2_B = 0;
else sur_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[11])) == 0) sur_W_B = 0;
else sur_W_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[13])) == 0) sur_E_B = 0;
else sur_E_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[12])) == 0) sureste_C_B = 0;
else sureste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[2])) == 0) sureste_N1_B = 0;
else sureste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[2])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[2], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[14] * array_B[18])) == 0) sureste_N2_B = 0;
else sureste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[14] * array_B[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[6])) == 0) sureste_SW_B = 0;
else sureste_SW_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[6])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[18])) == 0) sureste_NE_B = 0;
else sureste_NE_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[13] * array_B[12])) == 0) este_C_B = 0;
else este_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[13] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[7])) == 0) este_N1_B = 0;
else este_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[17])) == 0) este_N2_B = 0;
else este_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[7])) == 0) este_S_B = 0;
else este_S_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[17])) == 0) este_N_B = 0;
else este_N_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[12])) == 0) noreste_C_B = 0;
else noreste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[14] * array_B[8])) == 0) noreste_N1_B = 0;
else noreste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[14] * array_B[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[22] * array_B[16])) == 0) noreste_N2_B = 0;
else noreste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[22] * array_B[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[8])) == 0) noreste_SE_B = 0;
else noreste_SE_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[16])) == 0) noreste_NW_B = 0;
else noreste_NW_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[17] * array_B[12])) == 0) norte_C_B = 0;
else norte_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[17] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[13])) == 0) norte_N1_B = 0;
else norte_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[11])) == 0) norte_N2_B = 0;
else norte_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[13])) == 0) norte_E_B = 0;
else norte_E_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[11])) == 0) norte_W_B = 0;
else norte_W_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[12])) == 0) noroeste_C_B = 0;
else noroeste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[22] * array_B[18])) == 0) noroeste_N1_B = 0;
else noroeste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[22] * array_B[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[10])) == 0) noroeste_N2_B = 0;
else noroeste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[10])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[18])) == 0) noroeste_NE_B = 0;
else noroeste_NE_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[12])) == 0) noroeste_SW_B = 0;
else noroeste_SW_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[11] * array_B[12])) == 0) oeste_C_B = 0;
else oeste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[11] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[17])) == 0) oeste_N1_B = 0;
else oeste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[7])) == 0) oeste_N2_B = 0;
else oeste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[17])) == 0) oeste_N_B = 0;
else oeste_N_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[7])) == 0) oeste_S_B = 0;
else oeste_S_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))))));
// SUROESTE
med_1 = 1, var_1 = 0.8;
med_2 = 0.1;
if (suroeste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((suroeste_C_R)-med_1), 2) / (2 * var_1))));
if (suroeste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((suroeste_N1_R)-med_2), 2) / (2 * var_1))));
if (suroeste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((suroeste_N2_R)-med_2), 2) / (2 * var_1))));
if (suroeste_NW_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((suroeste_NW_R)-med_1), 2) / (2 * var_1))));
if (suroeste_SE_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((suroeste_SE_R)-med_1), 2) / (2 * var_1))));
largo[0] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sur_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sur_C_R)-med_1), 2) / (2 * var_1))));
if (sur_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sur_N1_R)-med_2), 2) / (2 * var_1))));
if (sur_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sur_N2_R)-med_2), 2) / (2 * var_1))));
if (sur_W_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sur_W_R)-med_1), 2) / (2 * var_1))));
if (sur_E_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sur_E_R)-med_1), 2) / (2 * var_1))));
largo[1] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sureste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sureste_C_R)-med_1), 2) / (2 * var_1))));
if (sureste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sureste_N1_R)-med_2), 2) / (2 * var_1))));
if (sureste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sureste_N2_R)-med_2), 2) / (2 * var_1))));
if (sureste_NE_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sureste_NE_R)-med_1), 2) / (2 * var_1))));
if (sureste_SW_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sureste_SW_R)-med_1), 2) / (2 * var_1))));
largo[2] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (este_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((este_C_R)-med_1), 2) / (2 * var_1))));
if (este_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((este_N1_R)-med_2), 2) / (2 * var_1))));
if (este_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((este_N2_R)-med_2), 2) / (2 * var_1))));
if (este_N_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((este_N_R)-med_1), 2) / (2 * var_1))));
if (este_S_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((este_S_R)-med_1), 2) / (2 * var_1))));
largo[3] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noreste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noreste_C_R)-med_1), 2) / (2 * var_1))));
if (noreste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noreste_N1_R)-med_2), 2) / (2 * var_1))));
if (noreste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noreste_N2_R)-med_2), 2) / (2 * var_1))));
if (noreste_NW_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noreste_NW_R)-med_1), 2) / (2 * var_1))));
if (noreste_SE_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noreste_SE_R)-med_1), 2) / (2 * var_1))));
largo[4] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (norte_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((norte_C_R)-med_1), 2) / (2 * var_1))));
if (norte_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((norte_N1_R)-med_2), 2) / (2 * var_1))));
if (norte_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((norte_N2_R)-med_2), 2) / (2 * var_1))));
if (norte_W_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((norte_W_R)-med_1), 2) / (2 * var_1))));
if (norte_E_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((norte_E_R)-med_1), 2) / (2 * var_1))));
largo[5] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noroeste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noroeste_C_R)-med_1), 2) / (2 * var_1))));
if (noroeste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noroeste_N1_R)-med_2), 2) / (2 * var_1))));
if (noroeste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noroeste_N2_R)-med_2), 2) / (2 * var_1))));
if (noroeste_NE_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noroeste_NE_R)-med_1), 2) / (2 * var_1))));
if (noroeste_SW_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noroeste_SW_R)-med_1), 2) / (2 * var_1))));
largo[6] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (oeste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((oeste_C_R)-med_1), 2) / (2 * var_1))));
if (oeste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((oeste_N1_R)-med_2), 2) / (2 * var_1))));
if (oeste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((oeste_N2_R)-med_2), 2) / (2 * var_1))));
if (oeste_N_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((oeste_N_R)-med_1), 2) / (2 * var_1))));
if (oeste_S_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((oeste_S_R)-med_1), 2) / (2 * var_1))));
largo[7] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (suroeste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((suroeste_C_G)-med_1), 2) / (2 * var_1))));
if (suroeste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((suroeste_N1_G)-med_2), 2) / (2 * var_1))));
if (suroeste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((suroeste_N2_G)-med_2), 2) / (2 * var_1))));
if (suroeste_NW_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((suroeste_NW_G)-med_1), 2) / (2 * var_1))));
if (suroeste_SE_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((suroeste_SE_G)-med_1), 2) / (2 * var_1))));
largo_1[0] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sur_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sur_C_G)-med_1), 2) / (2 * var_1))));
if (sur_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sur_N1_G)-med_2), 2) / (2 * var_1))));
if (sur_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sur_N2_G)-med_2), 2) / (2 * var_1))));
if (sur_W_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sur_W_G)-med_1), 2) / (2 * var_1))));
if (sur_E_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sur_E_G)-med_1), 2) / (2 * var_1))));
largo_1[1] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sureste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sureste_C_G)-med_1), 2) / (2 * var_1))));
if (sureste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sureste_N1_G)-med_2), 2) / (2 * var_1))));
if (sureste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sureste_N2_G)-med_2), 2) / (2 * var_1))));
if (sureste_NE_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sureste_NE_G)-med_1), 2) / (2 * var_1))));
if (sureste_SW_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sureste_SW_G)-med_1), 2) / (2 * var_1))));
largo_1[2] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (este_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((este_C_G)-med_1), 2) / (2 * var_1))));
if (este_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((este_N1_G)-med_2), 2) / (2 * var_1))));
if (este_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((este_N2_G)-med_2), 2) / (2 * var_1))));
if (este_N_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((este_N_G)-med_1), 2) / (2 * var_1))));
if (este_S_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((este_S_G)-med_1), 2) / (2 * var_1))));
largo_1[3] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noreste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noreste_C_G)-med_1), 2) / (2 * var_1))));
if (noreste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noreste_N1_G)-med_2), 2) / (2 * var_1))));
if (noreste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noreste_N2_G)-med_2), 2) / (2 * var_1))));
if (noreste_NW_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noreste_NW_G)-med_1), 2) / (2 * var_1))));
if (noreste_SE_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noreste_SE_G)-med_1), 2) / (2 * var_1))));
largo_1[4] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (norte_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((norte_C_G)-med_1), 2) / (2 * var_1))));
if (norte_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((norte_N1_G)-med_2), 2) / (2 * var_1))));
if (norte_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((norte_N2_G)-med_2), 2) / (2 * var_1))));
if (norte_W_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((norte_W_G)-med_1), 2) / (2 * var_1))));
if (norte_E_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((norte_E_G)-med_1), 2) / (2 * var_1))));
largo_1[5] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noroeste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noroeste_C_G)-med_1), 2) / (2 * var_1))));
if (noroeste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noroeste_N1_G)-med_2), 2) / (2 * var_1))));
if (noroeste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noroeste_N2_G)-med_2), 2) / (2 * var_1))));
if (noroeste_NE_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noroeste_NE_G)-med_1), 2) / (2 * var_1))));
if (noroeste_SW_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noroeste_SW_G)-med_1), 2) / (2 * var_1))));
largo_1[6] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (oeste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((oeste_C_G)-med_1), 2) / (2 * var_1))));
if (oeste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((oeste_N1_G)-med_2), 2) / (2 * var_1))));
if (oeste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((oeste_N2_G)-med_2), 2) / (2 * var_1))));
if (oeste_N_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((oeste_N_G)-med_1), 2) / (2 * var_1))));
if (oeste_S_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((oeste_S_G)-med_1), 2) / (2 * var_1))));
largo_1[7] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (suroeste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((suroeste_C_B)-med_1), 2) / (2 * var_1))));
if (suroeste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((suroeste_N1_B)-med_2), 2) / (2 * var_1))));
if (suroeste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((suroeste_N2_B)-med_2), 2) / (2 * var_1))));
if (suroeste_NW_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((suroeste_NW_B)-med_1), 2) / (2 * var_1))));
if (suroeste_SE_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((suroeste_SE_B)-med_1), 2) / (2 * var_1))));
largo_2[0] = (gam_big_2[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_2[2]);
if (sur_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sur_C_B)-med_1), 2) / (2 * var_1))));
if (sur_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sur_N1_B)-med_2), 2) / (2 * var_1))));
if (sur_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sur_N2_B)-med_2), 2) / (2 * var_1))));
if (sur_W_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sur_W_B)-med_1), 2) / (2 * var_1))));
if (sur_E_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sur_E_B)-med_1), 2) / (2 * var_1))));
largo_2[1] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sureste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sureste_C_B)-med_1), 2) / (2 * var_1))));
if (sureste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sureste_N1_B)-med_2), 2) / (2 * var_1))));
if (sureste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sureste_N2_B)-med_2), 2) / (2 * var_1))));
if (sureste_NE_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sureste_NE_B)-med_1), 2) / (2 * var_1))));
if (sureste_SW_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sureste_SW_B)-med_1), 2) / (2 * var_1))));
largo_2[2] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (este_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((este_C_B)-med_1), 2) / (2 * var_1))));
if (este_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((este_N1_B)-med_2), 2) / (2 * var_1))));
if (este_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((este_N2_B)-med_2), 2) / (2 * var_1))));
if (este_N_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((este_N_B)-med_1), 2) / (2 * var_1))));
if (este_S_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((este_S_B)-med_1), 2) / (2 * var_1))));
largo_2[3] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noreste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noreste_C_B)-med_1), 2) / (2 * var_1))));
if (noreste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noreste_N1_B)-med_2), 2) / (2 * var_1))));
if (noreste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noreste_N2_B)-med_2), 2) / (2 * var_1))));
if (noreste_NW_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noreste_NW_B)-med_1), 2) / (2 * var_1))));
if (noreste_SE_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noreste_SE_B)-med_1), 2) / (2 * var_1))));
largo_2[4] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (norte_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((norte_C_B)-med_1), 2) / (2 * var_1))));
if (norte_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((norte_N1_B)-med_2), 2) / (2 * var_1))));
if (norte_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((norte_N2_B)-med_2), 2) / (2 * var_1))));
if (norte_W_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((norte_W_B)-med_1), 2) / (2 * var_1))));
if (norte_E_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((norte_E_B)-med_1), 2) / (2 * var_1))));
largo_2[5] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noroeste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noroeste_C_B)-med_1), 2) / (2 * var_1))));
if (noroeste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noroeste_N1_B)-med_2), 2) / (2 * var_1))));
if (noroeste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noroeste_N2_B)-med_2), 2) / (2 * var_1))));
if (noroeste_NE_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noroeste_NE_B)-med_1), 2) / (2 * var_1))));
if (noroeste_SW_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noroeste_SW_B)-med_1), 2) / (2 * var_1))));
largo_2[6] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (oeste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((oeste_C_B)-med_1), 2) / (2 * var_1))));
if (oeste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((oeste_N1_B)-med_2), 2) / (2 * var_1))));
if (oeste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((oeste_N2_B)-med_2), 2) / (2 * var_1))));
if (oeste_N_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((oeste_N_B)-med_1), 2) / (2 * var_1))));
if (oeste_S_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((oeste_S_B)-med_1), 2) / (2 * var_1))));
largo_2[7] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
med1 = 60;
med2 = 10;
var1 = 1000;
if (SW_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SW_C_R)-med1), 2) / (2 * var1))));
if (SW_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SW_N1_R)-med2), 2) / (2 * var1))));
if (SW_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SW_N2_R)-med2), 2) / (2 * var1))));
if (SW_NW_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SW_NW_R)-med1), 2) / (2 * var1))));
if (SW_SE_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SW_SE_R)-med1), 2) / (2 * var1))));
LARGO[0] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (S_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((S_C_R)-med1), 2) / (2 * var1))));
if (S_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((S_N1_R)-med2), 2) / (2 * var1))));
if (S_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((S_N2_R)-med2), 2) / (2 * var1))));
if (S_W_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((S_W_R)-med1), 2) / (2 * var1))));
if (S_E_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((S_E_R)-med1), 2) / (2 * var1))));
LARGO[1] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SE_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SE_C_R)-med1), 2) / (2 * var1))));
if (SE_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SE_N1_R)-med2), 2) / (2 * var1))));
if (SE_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SE_N2_R)-med2), 2) / (2 * var1))));
if (SE_NE_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SE_NE_R)-med1), 2) / (2 * var1))));
if (SE_SW_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SE_SW_R)-med1), 2) / (2 * var1))));
LARGO[2] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (E_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((E_C_R)-med1), 2) / (2 * var1))));
if (E_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((E_N1_R)-med2), 2) / (2 * var1))));
if (E_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((E_N2_R)-med2), 2) / (2 * var1))));
if (E_N_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((E_N_R)-med1), 2) / (2 * var1))));
if (E_S_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((E_S_R)-med1), 2) / (2 * var1))));
LARGO[3] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NE_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NE_C_R)-med1), 2) / (2 * var1))));
if (NE_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NE_N1_R)-med2), 2) / (2 * var1))));
if (NE_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NE_N2_R)-med2), 2) / (2 * var1))));
if (NE_NW_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NE_NW_R)-med1), 2) / (2 * var1))));
if (NE_SE_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NE_SE_R)-med1), 2) / (2 * var1))));
LARGO[4] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (N_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((N_C_R)-med1), 2) / (2 * var1))));
if (N_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((N_N1_R)-med2), 2) / (2 * var1))));
if (N_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((N_N2_R)-med2), 2) / (2 * var1))));
if (N_W_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((N_W_R)-med1), 2) / (2 * var1))));
if (N_E_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((N_E_R)-med1), 2) / (2 * var1))));
LARGO[5] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NW_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NW_C_R)-med1), 2) / (2 * var1))));
if (NW_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NW_N1_R)-med2), 2) / (2 * var1))));
if (NW_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NW_N2_R)-med2), 2) / (2 * var1))));
if (NW_NE_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NW_NE_R)-med1), 2) / (2 * var1))));
if (NW_SW_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NW_SW_R)-med1), 2) / (2 * var1))));
LARGO[6] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (W_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((W_C_R)-med1), 2) / (2 * var1))));
if (W_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((W_N1_R)-med2), 2) / (2 * var1))));
if (W_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((W_N2_R)-med2), 2) / (2 * var1))));
if (W_N_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((W_N_R)-med1), 2) / (2 * var1))));
if (W_S_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((W_S_R)-med1), 2) / (2 * var1))));
LARGO[7] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SW_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SW_C_G)-med1), 2) / (2 * var1))));
if (SW_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SW_N1_G)-med2), 2) / (2 * var1))));
if (SW_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SW_N2_G)-med2), 2) / (2 * var1))));
if (SW_NW_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SW_NW_G)-med1), 2) / (2 * var1))));
if (SW_SE_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SW_SE_G)-med1), 2) / (2 * var1))));
LARGO_1[0] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (S_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((S_C_G)-med1), 2) / (2 * var1))));
if (S_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((S_N1_G)-med2), 2) / (2 * var1))));
if (S_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((S_N2_G)-med2), 2) / (2 * var1))));
if (S_W_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((S_W_G)-med1), 2) / (2 * var1))));
if (S_E_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((S_E_G)-med1), 2) / (2 * var1))));
LARGO_1[1] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SE_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SE_C_G)-med1), 2) / (2 * var1))));
if (SE_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SE_N1_G)-med2), 2) / (2 * var1))));
if (SE_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SE_N2_G)-med2), 2) / (2 * var1))));
if (SE_NE_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SE_NE_G)-med1), 2) / (2 * var1))));
if (SE_SW_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SE_SW_G)-med1), 2) / (2 * var1))));
LARGO_1[2] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (E_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((E_C_G)-med1), 2) / (2 * var1))));
if (E_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((E_N1_G)-med2), 2) / (2 * var1))));
if (E_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((E_N2_G)-med2), 2) / (2 * var1))));
if (E_N_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((E_N_G)-med1), 2) / (2 * var1))));
if (E_S_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((E_S_G)-med1), 2) / (2 * var1))));
LARGO_1[3] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NE_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NE_C_G)-med1), 2) / (2 * var1))));
if (NE_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NE_N1_G)-med2), 2) / (2 * var1))));
if (NE_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NE_N2_G)-med2), 2) / (2 * var1))));
if (NE_NW_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NE_NW_G)-med1), 2) / (2 * var1))));
if (NE_SE_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NE_SE_G)-med1), 2) / (2 * var1))));
LARGO_1[4] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (N_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((N_C_G)-med1), 2) / (2 * var1))));
if (N_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((N_N1_G)-med2), 2) / (2 * var1))));
if (N_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((N_N2_G)-med2), 2) / (2 * var1))));
if (N_W_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((N_W_G)-med1), 2) / (2 * var1))));
if (N_E_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((N_E_G)-med1), 2) / (2 * var1))));
LARGO_1[5] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NW_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NW_C_G)-med1), 2) / (2 * var1))));
if (NW_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NW_N1_G)-med2), 2) / (2 * var1))));
if (NW_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NW_N2_G)-med2), 2) / (2 * var1))));
if (NW_NE_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NW_NE_G)-med1), 2) / (2 * var1))));
if (NW_SW_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NW_SW_G)-med1), 2) / (2 * var1))));
LARGO_1[6] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (W_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((W_C_G)-med1), 2) / (2 * var1))));
if (W_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((W_N1_G)-med2), 2) / (2 * var1))));
if (W_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((W_N2_G)-med2), 2) / (2 * var1))));
if (W_N_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((W_N_G)-med1), 2) / (2 * var1))));
if (W_S_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((W_S_G)-med1), 2) / (2 * var1))));
LARGO_1[7] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SW_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SW_C_B)-med1), 2) / (2 * var1))));
if (SW_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SW_N1_B)-med2), 2) / (2 * var1))));
if (SW_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SW_N2_B)-med2), 2) / (2 * var1))));
if (SW_NW_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SW_NW_B)-med1), 2) / (2 * var1))));
if (SW_SE_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SW_SE_B)-med1), 2) / (2 * var1))));
LARGO_2[0] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (S_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((S_C_B)-med1), 2) / (2 * var1))));
if (S_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((S_N1_B)-med2), 2) / (2 * var1))));
if (S_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((S_N2_B)-med2), 2) / (2 * var1))));
if (S_W_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((S_W_B)-med1), 2) / (2 * var1))));
if (S_E_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((S_E_B)-med1), 2) / (2 * var1))));
LARGO_2[1] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SE_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SE_C_B)-med1), 2) / (2 * var1))));
if (SE_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SE_N1_B)-med2), 2) / (2 * var1))));
if (SE_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SE_N2_B)-med2), 2) / (2 * var1))));
if (SE_NE_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SE_NE_B)-med1), 2) / (2 * var1))));
if (SE_SW_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SE_SW_B)-med1), 2) / (2 * var1))));
LARGO_2[2] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (E_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((E_C_B)-med1), 2) / (2 * var1))));
if (E_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((E_N1_B)-med2), 2) / (2 * var1))));
if (E_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((E_N2_B)-med2), 2) / (2 * var1))));
if (E_N_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((E_N_B)-med1), 2) / (2 * var1))));
if (E_S_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((E_S_B)-med1), 2) / (2 * var1))));
LARGO_2[3] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NE_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NE_C_B)-med1), 2) / (2 * var1))));
if (NE_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NE_N1_B)-med2), 2) / (2 * var1))));
if (NE_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NE_N2_B)-med2), 2) / (2 * var1))));
if (NE_NW_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NE_NW_B)-med1), 2) / (2 * var1))));
if (NE_SE_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NE_SE_B)-med1), 2) / (2 * var1))));
LARGO_2[4] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (N_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((N_C_B)-med1), 2) / (2 * var1))));
if (N_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((N_N1_B)-med2), 2) / (2 * var1))));
if (N_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((N_N2_B)-med2), 2) / (2 * var1))));
if (N_W_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((N_W_B)-med1), 2) / (2 * var1))));
if (N_E_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((N_E_B)-med1), 2) / (2 * var1))));
LARGO_2[5] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NW_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NW_C_B)-med1), 2) / (2 * var1))));
if (NW_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NW_N1_B)-med2), 2) / (2 * var1))));
if (NW_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NW_N2_B)-med2), 2) / (2 * var1))));
if (NW_NE_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NW_NE_B)-med1), 2) / (2 * var1))));
if (NW_SW_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NW_SW_B)-med1), 2) / (2 * var1))));
LARGO_2[6] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (W_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((W_C_B)-med1), 2) / (2 * var1))));
if (W_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((W_N1_B)-med2), 2) / (2 * var1))));
if (W_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((W_N2_B)-med2), 2) / (2 * var1))));
if (W_N_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((W_N_B)-med1), 2) / (2 * var1))));
if (W_S_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((W_S_B)-med1), 2) / (2 * var1))));
LARGO_2[7] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
float mu_R_R[8], mu_G_G[8], mu_B_B[8];
mu_R_R[0] = min(largo[0], LARGO[0]);
mu_R_R[1] = min(largo[1], LARGO[1]);
mu_R_R[2] = min(largo[2], LARGO[2]);
mu_R_R[3] = min(largo[3], LARGO[3]);
mu_R_R[4] = min(largo[4], LARGO[4]);
mu_R_R[5] = min(largo[5], LARGO[5]);
mu_R_R[6] = min(largo[6], LARGO[6]);
mu_R_R[7] = min(largo[7], LARGO[7]);
mu_G_G[0] = min(largo_1[0], LARGO_1[0]);
mu_G_G[1] = min(largo_1[1], LARGO_1[1]);
mu_G_G[2] = min(largo_1[2], LARGO_1[2]);
mu_G_G[3] = min(largo_1[3], LARGO_1[3]);
mu_G_G[4] = min(largo_1[4], LARGO_1[4]);
mu_G_G[5] = min(largo_1[5], LARGO_1[5]);
mu_G_G[6] = min(largo_1[6], LARGO_1[6]);
mu_G_G[7] = min(largo_1[7], LARGO_1[7]);
mu_B_B[0] = min(largo_2[0], LARGO_2[0]);
mu_B_B[1] = min(largo_2[1], LARGO_2[1]);
mu_B_B[2] = min(largo_2[2], LARGO_2[2]);
mu_B_B[3] = min(largo_2[3], LARGO_2[3]);
mu_B_B[4] = min(largo_2[4], LARGO_2[4]);
mu_B_B[5] = min(largo_2[5], LARGO_2[5]);
mu_B_B[6] = min(largo_2[6], LARGO_2[6]);
mu_B_B[7] = min(largo_2[7], LARGO_2[7]);
noise_R_R = max(max(max(max(max(max(max(mu_R_R[0], mu_R_R[1]), mu_R_R[2]), mu_R_R[3]), mu_R_R[4]), mu_R_R[5]), mu_R_R[6]), mu_R_R[7]);
noise_G_G = max(max(max(max(max(max(max(mu_G_G[0], mu_G_G[1]), mu_G_G[2]), mu_G_G[3]), mu_G_G[4]), mu_G_G[5]), mu_G_G[6]), mu_G_G[7]);
noise_B_B = max(max(max(max(max(max(max(mu_B_B[0], mu_B_B[1]), mu_B_B[2]), mu_B_B[3]), mu_B_B[4]), mu_B_B[5]), mu_B_B[6]), mu_B_B[7]);
//printf( "%f",noise_B_B);
if ((noise_B_B >= 0.3))
{
float weights[9], sum_weights = 0, hold2, suma = 0;
for (j = 0; j <= 7; j++)
{
sum_weights += (1 - mu_B_B[j]);
}
sum_weights = (sum_weights + 3 * sqrt(1 - noise_B_B)) / 2;
weights[0] = (1 - mu_B_B[0]);
weights[1] = (1 - mu_B_B[1]);
weights[2] = (1 - mu_B_B[2]);
weights[3] = (1 - mu_B_B[7]);
weights[4] = 3 * sqrt(1 - noise_B_B);
weights[5] = (1 - mu_B_B[3]);
weights[6] = (1 - mu_B_B[6]);
weights[7] = (1 - mu_B_B[5]);
weights[8] = (1 - mu_B_B[4]);
for (j = 0; j <= 8; j++)
{
for (x = 0; x <= 7; x++)
{
if (vectB[x] > vectB[x + 1])
{
hold = vectB[x];
hold2 = weights[x];
vectB[x] = vectB[x + 1];
weights[x] = weights[x + 1];
vectB[x + 1] = hold;
weights[x + 1] = hold2;
}
}
}
for (j = 8; j >= 0; j--)
{
suma += weights[j];
if (suma >= sum_weights)
{
if (j < 2)
{
sum_weights = sum_weights - (weights[0] + weights[1]);
sum_weights = sum_weights / 2;
suma = 0;
for (F = 8; F >= 2; F--)
{
suma += weights[F];
if (suma > sum_weights)
{
d_Pout[(Row * m + Col) * channels + 2] = vectB[F];
F = -1;
}
}
j = -1;
}
else
{
d_Pout[(Row * m + Col) * channels + 2] = vectB[j];
//d_Pout[(Row * m + Col) * channels + 0] = d_Pout[(Row * m + Col) * channels + 0];
j = -1;
}
suma = -1;
}
}
// fwrite (&CCC, 1, 1, header_file);
}
else
{
d_Pout[(Row * m + Col) * channels + 2] = vectB[4];
//d_Pout[(Row * m + Col) * channels + 0] = 0;
// fwrite (&CCC, 1, 1, header_file);
}
if (noise_G_G >= 0.3)
{
float weights[9], sum_weights = 0, hold2, suma = 0;
for (j = 0; j <= 7; j++)
{
sum_weights += (1 - mu_G_G[j]);
}
sum_weights = (sum_weights + 3 * sqrt(1 - noise_G_G)) / 2;
weights[0] = (1 - mu_G_G[0]);
weights[1] = (1 - mu_G_G[1]);
weights[2] = (1 - mu_G_G[2]);
weights[3] = (1 - mu_G_G[7]);
weights[4] = 3 * sqrt(1 - noise_G_G);
weights[5] = (1 - mu_G_G[3]);
weights[6] = (1 - mu_G_G[6]);
weights[7] = (1 - mu_G_G[5]);
weights[8] = (1 - mu_G_G[4]);
for (j = 0; j <= 8; j++)
{
for (x = 0; x <= 7; x++)
{
if (vectG[x] > vectG[x + 1])
{
hold = vectG[x];
hold2 = weights[x];
vectG[x] = vectG[x + 1];
weights[x] = weights[x + 1];
vectG[x + 1] = hold;
weights[x + 1] = hold2;
}
}
}
for (j = 8; j >= 0; j--)
{
suma += weights[j];
if (suma >= sum_weights)
{
if (j < 2)
{
sum_weights = sum_weights - (weights[0] + weights[1]);
sum_weights = sum_weights / 2;
suma = 0;
for (F = 8; F >= 2; F--)
{
suma += weights[F];
if (suma >= sum_weights)
{
d_Pout[(Row * m + Col) * channels + 1] = vectG[F];
F = -1;
}
}
j = -1;
}
else
{
d_Pout[(Row * m + Col) * channels + 1] = vectG[j];
j = -1;
}
suma = -1;
}
}
// fwrite (&BBB, 1, 1, header_file);
}
else
{
d_Pout[(Row * m + Col) * channels + 1] = vectG[4];
// fwrite (&BBB, 1, 1, header_file);
}
if (noise_R_R >= 0.3)
{
float weights[9], sum_weights = 0, hold2, suma = 0;
for (j = 0; j <= 7; j++)
{
sum_weights += (1 - mu_R_R[j]);
}
sum_weights = (sum_weights + 3 * sqrt(1 - noise_R_R)) / 2;
weights[0] = (1 - mu_R_R[0]);
weights[1] = (1 - mu_R_R[1]);
weights[2] = (1 - mu_R_R[2]);
weights[3] = (1 - mu_R_R[7]);
weights[4] = 3 * sqrt(1 - noise_R_R);
weights[5] = (1 - mu_R_R[3]);
weights[6] = (1 - mu_R_R[6]);
weights[7] = (1 - mu_R_R[5]);
weights[8] = (1 - mu_R_R[4]);
for (j = 0; j <= 8; j++)
{
for (x = 0; x <= 7; x++)
{
if (vectR[x] > vectR[x + 1])
{
hold = vectR[x];
hold2 = weights[x];
vectR[x] = vectR[x + 1];
weights[x] = weights[x + 1];
vectR[x + 1] = hold;
weights[x + 1] = hold2;
}
}
}
for (j = 8; j >= 0; j--)
{
suma += weights[j];
if (suma >= sum_weights)
{
if (j < 2)
{
sum_weights = sum_weights - (weights[0] + weights[1]);
sum_weights = sum_weights / 2;
suma = 0;
for (F = 8; F >= 2; F--)
{
suma += weights[F];
if (suma > sum_weights)
{
d_Pout[(Row * m + Col) * channels + 0] = vectR[F];
F = -1;
}
}
j = -1;
}
else
{
d_Pout[(Row * m + Col) * channels + 0] = vectR[j];
j = -1;
}
suma = -1;
}
}
// fwrite (&AAA, 1, 1, header_file);
}
else
{
d_Pout[(Row * m + Col) * channels + 0] = vectR[4];
//d_Pout[(Row * m + Col) * channels + 0] = 255;
// fwrite (&AAA, 1, 1, header_file);
}
//d_Pout[(Row * m + Col) * channels + 0] = 255;
}
}
__global__ void FTSCF_GPU_Original_Params
(unsigned char* d_Pout, const unsigned char* d_Pin, int n, int m,
float med_1, float var_1, float med_2, float med1, float med2, float var1, float THS) {
int Row = blockIdx.y*blockDim.y + threadIdx.y;
int Col = blockIdx.x*blockDim.x + threadIdx.x;
int M = 0, j = 0, x = 0;
float vectR[9], vectG[9], vectB[9], hold;
float gam_small_1[18] = { 0 }, gam_big_1[18] = { 0 };
float gam_small_2[18] = { 0 }, gam_big_2[18] = { 0 };
float array_R[25];
float array_G[25];
float array_B[25];
int F = 0, i = 0;
const int channels = 3;
if ((Row>1) && (Col>1) && (Row < m - 1) && (Col < n - 1)) {
//int tid = omp_get_thread_num();
//hacer el arreglo
F = 0;
for (i = -2; i <= 2; i++) {
for (j = -2; j <= 2; j++) {
array_R[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 0];
array_G[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 1];
array_B[F] = d_Pin[((Row + i) * m + (Col + j)) * 3 + 2];
F++;
}
}
// se copia a continuacion solo los 8-vecinos
M = 0;
for (F = 6; F <= 8; F++) {
vectG[M] = (array_G[F]);
vectR[M] = (array_R[F]);
vectB[M] = (array_B[F]);
M++;
}
for (F = 11; F <= 13; F++) {
vectG[M] = (array_G[F]);
vectR[M] = (array_R[F]);
vectB[M] = (array_B[F]);
M++;
}
for (F = 16; F <= 18; F++) {
vectG[M] = (array_G[F]);
vectR[M] = (array_R[F]);
vectB[M] = (array_B[F]);
M++;
}
float noreste_C_R, noreste_N1_R, noreste_N2_R, sur_C_R, sur_N1_R, sur_N2_R, noroeste_C_R, noroeste_N1_R, noroeste_N2_R;
float este_C_R, este_N1_R, este_N2_R, oeste_C_R, oeste_N1_R, oeste_N2_R, sureste_C_R, sureste_N1_R, sureste_N2_R;
float norte_C_R, norte_N1_R, norte_N2_R, suroeste_C_R, suroeste_N1_R, suroeste_N2_R;
float suroeste_NW_R, suroeste_SE_R, sur_W_R, sur_E_R, sureste_SW_R, sureste_NE_R, este_S_R, este_N_R, noreste_SE_R, noreste_NW_R;
float norte_W_R, norte_E_R, noroeste_NE_R, noroeste_SW_R, oeste_S_R, oeste_N_R;
float noreste_C_G, noreste_N1_G, noreste_N2_G, sur_C_G, sur_N1_G, sur_N2_G, noroeste_C_G, noroeste_N1_G, noroeste_N2_G;
float este_C_G, este_N1_G, este_N2_G, oeste_C_G, oeste_N1_G, oeste_N2_G, sureste_C_G, sureste_N1_G, sureste_N2_G;
float norte_C_G, norte_N1_G, norte_N2_G, suroeste_C_G, suroeste_N1_G, suroeste_N2_G;
float suroeste_NW_G, suroeste_SE_G, sur_W_G, sur_E_G, sureste_SW_G, sureste_NE_G, este_S_G, este_N_G, noreste_SE_G, noreste_NW_G;
float norte_W_G, norte_E_G, noroeste_NE_G, noroeste_SW_G, oeste_S_G, oeste_N_G;
float noreste_C_B, noreste_N1_B, noreste_N2_B, sur_C_B, sur_N1_B, sur_N2_B, noroeste_C_B, noroeste_N1_B, noroeste_N2_B;
float este_C_B, este_N1_B, este_N2_B, oeste_C_B, oeste_N1_B, oeste_N2_B, sureste_C_B, sureste_N1_B, sureste_N2_B;
float norte_C_B, norte_N1_B, norte_N2_B, suroeste_C_B, suroeste_N1_B, suroeste_N2_B;
float suroeste_NW_B, suroeste_SE_B, sur_W_B, sur_E_B, sureste_SW_B, sureste_NE_B, este_S_B, este_N_B, noreste_SE_B, noreste_NW_B;
float norte_W_B, norte_E_B, noroeste_NE_B, noroeste_SW_B, oeste_S_B, oeste_N_B;
float largo[9], largo_1[9], largo_2[9], LARGO[9], LARGO_1[9], LARGO_2[9];
float noise_R_R, noise_G_G, noise_B_B;
int SW_C_B, SW_N1_B, SW_N2_B, SW_NW_B, SW_SE_B, S_C_B, S_N1_B, S_N2_B, S_W_B, S_E_B, SE_C_B, SE_N1_B, SE_N2_B, SE_SW_B, SE_NE_B;
int E_C_B, E_N1_B, E_N2_B, E_S_B, E_N_B, NE_C_B, NE_N1_B, NE_N2_B, NE_SE_B, NE_NW_B, N_C_B, N_N1_B, N_N2_B, N_W_B, N_E_B;
int NW_C_B, NW_N1_B, NW_N2_B, NW_NE_B, NW_SW_B, W_C_B, W_N1_B, W_N2_B, W_S_B, W_N_B;
int SW_C_R, SW_N1_R, SW_N2_R, SW_NW_R, SW_SE_R, S_C_R, S_N1_R, S_N2_R, S_W_R, S_E_R, SE_C_R, SE_N1_R, SE_N2_R, SE_SW_R, SE_NE_R;
int E_C_R, E_N1_R, E_N2_R, E_S_R, E_N_R, NE_C_R, NE_N1_R, NE_N2_R, NE_SE_R, NE_NW_R, N_C_R, N_N1_R, N_N2_R, N_W_R, N_E_R;
int NW_C_R, NW_N1_R, NW_N2_R, NW_NE_R, NW_SW_R, W_C_R, W_N1_R, W_N2_R, W_S_R, W_N_R;
int SW_C_G, SW_N1_G, SW_N2_G, SW_NW_G, SW_SE_G, S_C_G, S_N1_G, S_N2_G, S_W_G, S_E_G, SE_C_G, SE_N1_G, SE_N2_G, SE_SW_G, SE_NE_G;
int E_C_G, E_N1_G, E_N2_G, E_S_G, E_N_G, NE_C_G, NE_N1_G, NE_N2_G, NE_SE_G, NE_NW_G, N_C_G, N_N1_G, N_N2_G, N_W_G, N_E_G;
int NW_C_G, NW_N1_G, NW_N2_G, NW_NE_G, NW_SW_G, W_C_G, W_N1_G, W_N2_G, W_S_G, W_N_G;
float cons1 = 255, cons2 = 255;
// blue
SW_C_B = abs(array_B[6] - array_B[12]);
SW_N1_B = abs(array_B[10] - array_B[16]);
SW_N2_B = abs(array_B[2] - array_B[8]);
SW_NW_B = abs(array_B[12] - array_B[16]);
SW_SE_B = abs(array_B[12] - array_B[8]);
S_C_B = abs(array_B[7] - array_B[12]);
S_N1_B = abs(array_B[6] - array_B[11]);
S_N2_B = abs(array_B[8] - array_B[13]);
S_W_B = abs(array_B[12] - array_B[11]);
S_E_B = abs(array_B[12] - array_B[13]);
SE_C_B = abs(array_B[8] - array_B[12]);
SE_N1_B = abs(array_B[2] - array_B[6]);
SE_N2_B = abs(array_B[14] - array_B[18]);
SE_SW_B = abs(array_B[12] - array_B[6]);
SE_NE_B = abs(array_B[12] - array_B[18]);
E_C_B = abs(array_B[13] - array_B[12]);
E_N1_B = abs(array_B[8] - array_B[7]);
E_N2_B = abs(array_B[18] - array_B[17]);
E_S_B = abs(array_B[12] - array_B[7]);
E_N_B = abs(array_B[12] - array_B[17]);
NE_C_B = abs(array_B[18] - array_B[12]);
NE_N1_B = abs(array_B[14] - array_B[8]);
NE_N2_B = abs(array_B[22] - array_B[16]);
NE_SE_B = abs(array_B[12] - array_B[8]);
NE_NW_B = abs(array_B[12] - array_B[16]);
N_C_B = abs(array_B[17] - array_B[12]);
N_N1_B = abs(array_B[18] - array_B[13]);
N_N2_B = abs(array_B[16] - array_B[11]);
N_W_B = abs(array_B[12] - array_B[11]);
N_E_B = abs(array_B[12] - array_B[13]);
NW_C_B = abs(array_B[16] - array_B[12]);
NW_N1_B = abs(array_B[22] - array_B[18]);
NW_N2_B = abs(array_B[10] - array_B[6]);
NW_NE_B = abs(array_B[12] - array_B[18]);
NW_SW_B = abs(array_B[12] - array_B[6]);
W_C_B = abs(array_B[11] - array_B[12]);
W_N1_B = abs(array_B[16] - array_B[17]);
W_N2_B = abs(array_B[6] - array_B[7]);
W_S_B = abs(array_B[12] - array_B[7]);
W_N_B = abs(array_B[12] - array_B[17]);
SW_C_G = abs(array_G[6] - array_G[12]);
SW_N1_G = abs(array_G[10] - array_G[16]);
SW_N2_G = abs(array_G[2] - array_G[8]);
SW_NW_G = abs(array_G[12] - array_G[16]);
SW_SE_G = abs(array_G[12] - array_G[8]);
S_C_G = abs(array_G[7] - array_G[12]);
S_N1_G = abs(array_G[6] - array_G[11]);
S_N2_G = abs(array_G[8] - array_G[13]);
S_W_G = abs(array_G[12] - array_G[11]);
S_E_G = abs(array_G[12] - array_G[13]);
SE_C_G = abs(array_G[8] - array_G[12]);
SE_N1_G = abs(array_G[2] - array_G[6]);
SE_N2_G = abs(array_G[14] - array_G[18]);
SE_SW_G = abs(array_G[12] - array_G[6]);
SE_NE_G = abs(array_G[12] - array_G[18]);
E_C_G = abs(array_G[13] - array_G[12]);
E_N1_G = abs(array_G[8] - array_G[7]);
E_N2_G = abs(array_G[18] - array_G[17]);
E_S_G = abs(array_G[12] - array_G[7]);
E_N_G = abs(array_G[12] - array_G[17]);
NE_C_G = abs(array_G[18] - array_G[12]);
NE_N1_G = abs(array_G[14] - array_G[8]);
NE_N2_G = abs(array_G[22] - array_G[16]);
NE_SE_G = abs(array_G[12] - array_G[8]);
NE_NW_G = abs(array_G[12] - array_G[16]);
N_C_G = abs(array_G[17] - array_G[12]);
N_N1_G = abs(array_G[18] - array_G[13]);
N_N2_G = abs(array_G[16] - array_G[11]);
N_W_G = abs(array_G[12] - array_G[11]);
N_E_G = abs(array_G[12] - array_G[13]);
NW_C_G = abs(array_G[16] - array_G[12]);
NW_N1_G = abs(array_G[22] - array_G[18]);
NW_N2_G = abs(array_G[10] - array_G[6]);
NW_NE_G = abs(array_G[12] - array_G[18]);
NW_SW_G = abs(array_G[12] - array_G[6]);
W_C_G = abs(array_G[11] - array_G[12]);
W_N1_G = abs(array_G[16] - array_G[17]);
W_N2_G = abs(array_G[6] - array_G[7]);
W_S_G = abs(array_G[12] - array_G[7]);
W_N_G = abs(array_G[12] - array_G[17]);
SW_C_R = abs(array_R[6] - array_R[12]);
SW_N1_R = abs(array_R[10] - array_R[16]);
SW_N2_R = abs(array_R[2] - array_R[8]);
SW_NW_R = abs(array_R[12] - array_R[16]);
SW_SE_R = abs(array_R[12] - array_R[8]);
S_C_R = abs(array_R[7] - array_R[12]);
S_N1_R = abs(array_R[6] - array_R[11]);
S_N2_R = abs(array_R[8] - array_R[13]);
S_W_R = abs(array_R[12] - array_R[11]);
S_E_R = abs(array_R[12] - array_R[13]);
SE_C_R = abs(array_R[8] - array_R[12]);
SE_N1_R = abs(array_R[2] - array_R[6]);
SE_N2_R = abs(array_R[14] - array_R[18]);
SE_SW_R = abs(array_R[12] - array_R[6]);
SE_NE_R = abs(array_R[12] - array_R[18]);
E_C_R = abs(array_R[13] - array_R[12]);
E_N1_R = abs(array_R[8] - array_R[7]);
E_N2_R = abs(array_R[18] - array_R[17]);
E_S_R = abs(array_R[12] - array_R[7]);
E_N_R = abs(array_R[12] - array_R[17]);
NE_C_R = abs(array_R[18] - array_R[12]);
NE_N1_R = abs(array_R[14] - array_R[8]);
NE_N2_R = abs(array_R[22] - array_R[16]);
NE_SE_R = abs(array_R[12] - array_R[8]);
NE_NW_R = abs(array_R[12] - array_R[16]);
N_C_R = abs(array_R[17] - array_R[12]);
N_N1_R = abs(array_R[18] - array_R[13]);
N_N2_R = abs(array_R[16] - array_R[11]);
N_W_R = abs(array_R[12] - array_R[11]);
N_E_R = abs(array_R[12] - array_R[13]);
NW_C_R = abs(array_R[16] - array_R[12]);
NW_N1_R = abs(array_R[22] - array_R[18]);
NW_N2_R = abs(array_R[10] - array_R[6]);
NW_NE_R = abs(array_R[12] - array_R[18]);
NW_SW_R = abs(array_R[12] - array_R[6]);
W_C_R = abs(array_R[11] - array_R[12]);
W_N1_R = abs(array_R[16] - array_R[17]);
W_N2_R = abs(array_R[6] - array_R[7]);
W_S_R = abs(array_R[12] - array_R[7]);
W_N_R = abs(array_R[12] - array_R[17]);
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[12])) == 0) suroeste_C_R = 0;
else suroeste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[10] * array_R[16])) == 0) suroeste_N1_R = 0;
else suroeste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[10] * array_R[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[2] * array_R[8])) == 0) suroeste_N2_R = 0;
else suroeste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[2] * array_R[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[2], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[16])) == 0) suroeste_NW_R = 0;
else suroeste_NW_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[8])) == 0) suroeste_SE_R = 0;
else suroeste_SE_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[7] * array_R[12])) == 0) sur_C_R = 0;
else sur_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[7] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[11])) == 0) sur_N1_R = 0;
else sur_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[13])) == 0) sur_N2_R = 0;
else sur_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[11])) == 0) sur_W_R = 0;
else sur_W_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[13])) == 0) sur_E_R = 0;
else sur_E_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[12])) == 0) sureste_C_R = 0;
else sureste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[2])) == 0) sureste_N1_R = 0;
else sureste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[2])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[2], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[14] * array_R[18])) == 0) sureste_N2_R = 0;
else sureste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[14] * array_R[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[6])) == 0) sureste_SW_R = 0;
else sureste_SW_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[6])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[18])) == 0) sureste_NE_R = 0;
else sureste_NE_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[13] * array_R[12])) == 0) este_C_R = 0;
else este_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[13] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[7])) == 0) este_N1_R = 0;
else este_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[8] * array_R[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[17])) == 0) este_N2_R = 0;
else este_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[7])) == 0) este_S_R = 0;
else este_S_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[17])) == 0) este_N_R = 0;
else este_N_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[12])) == 0) noreste_C_R = 0;
else noreste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[14] * array_R[8])) == 0) noreste_N1_R = 0;
else noreste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[14] * array_R[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[22] * array_R[16])) == 0) noreste_N2_R = 0;
else noreste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[22] * array_R[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[8])) == 0) noreste_SE_R = 0;
else noreste_SE_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[16])) == 0) noreste_NW_R = 0;
else noreste_NW_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[17] * array_R[12])) == 0) norte_C_R = 0;
else norte_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[17] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[13])) == 0) norte_N1_R = 0;
else norte_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[18] * array_R[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[11])) == 0) norte_N2_R = 0;
else norte_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[13])) == 0) norte_E_R = 0;
else norte_E_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[11])) == 0) norte_W_R = 0;
else norte_W_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[12])) == 0) noroeste_C_R = 0;
else noroeste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[22] * array_R[18])) == 0) noroeste_N1_R = 0;
else noroeste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[22] * array_R[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[10])) == 0) noroeste_N2_R = 0;
else noroeste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[10])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[18])) == 0) noroeste_NE_R = 0;
else noroeste_NE_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[12])) == 0) noroeste_SW_R = 0;
else noroeste_SW_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[11] * array_R[12])) == 0) oeste_C_R = 0;
else oeste_C_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[11] * array_R[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[17])) == 0) oeste_N1_R = 0;
else oeste_N1_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[16] * array_R[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[7])) == 0) oeste_N2_R = 0;
else oeste_N2_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[6] * array_R[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[17])) == 0) oeste_N_R = 0;
else oeste_N_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[17], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[7])) == 0) oeste_S_R = 0;
else oeste_S_R = acos(((cons1 + cons1) + (cons2*cons2) + (array_R[12] * array_R[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_R[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[12])) == 0) suroeste_C_G = 0;
else suroeste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[10] * array_G[16])) == 0) suroeste_N1_G = 0;
else suroeste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[10] * array_G[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[2] * array_G[8])) == 0) suroeste_N2_G = 0;
else suroeste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[2] * array_G[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[2], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[16])) == 0) suroeste_NW_G = 0;
else suroeste_NW_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[8])) == 0) suroeste_SE_G = 0;
else suroeste_SE_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[7] * array_G[12])) == 0) sur_C_G = 0;
else sur_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[7] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[11])) == 0) sur_N1_G = 0;
else sur_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[13])) == 0) sur_N2_G = 0;
else sur_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[11])) == 0) sur_W_G = 0;
else sur_W_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[13])) == 0) sur_E_G = 0;
else sur_E_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[12])) == 0) sureste_C_G = 0;
else sureste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[2])) == 0) sureste_N1_G = 0;
else sureste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[2])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[2], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[14] * array_G[18])) == 0) sureste_N2_G = 0;
else sureste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[14] * array_G[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[6])) == 0) sureste_SW_G = 0;
else sureste_SW_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[6])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[18])) == 0) sureste_NE_G = 0;
else sureste_NE_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[13] * array_G[12])) == 0) este_C_G = 0;
else este_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[13] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[7])) == 0) este_N1_G = 0;
else este_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[8] * array_G[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[17])) == 0) este_N2_G = 0;
else este_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[7])) == 0) este_S_G = 0;
else este_S_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[17])) == 0) este_N_G = 0;
else este_N_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[12])) == 0) noreste_C_G = 0;
else noreste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[14] * array_G[8])) == 0) noreste_N1_G = 0;
else noreste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[14] * array_G[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[22] * array_G[16])) == 0) noreste_N2_G = 0;
else noreste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[22] * array_G[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[8])) == 0) noreste_SE_G = 0;
else noreste_SE_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[16])) == 0) noreste_NW_G = 0;
else noreste_NW_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[17] * array_G[12])) == 0) norte_C_G = 0;
else norte_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[17] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[13])) == 0) norte_N1_G = 0;
else norte_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[18] * array_G[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[11])) == 0) norte_N2_G = 0;
else norte_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[13])) == 0) norte_E_G = 0;
else norte_E_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[11])) == 0) norte_W_G = 0;
else norte_W_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[12])) == 0) noroeste_C_G = 0;
else noroeste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[22] * array_G[18])) == 0) noroeste_N1_G = 0;
else noroeste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[22] * array_G[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[10])) == 0) noroeste_N2_G = 0;
else noroeste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[10])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[18])) == 0) noroeste_NE_G = 0;
else noroeste_NE_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[12])) == 0) noroeste_SW_G = 0;
else noroeste_SW_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[11] * array_G[12])) == 0) oeste_C_G = 0;
else oeste_C_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[11] * array_G[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[17])) == 0) oeste_N1_G = 0;
else oeste_N1_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[16] * array_G[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[7])) == 0) oeste_N2_G = 0;
else oeste_N2_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[6] * array_G[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[17])) == 0) oeste_N_G = 0;
else oeste_N_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[17], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[7])) == 0) oeste_S_G = 0;
else oeste_S_G = acos(((cons1 + cons1) + (cons2*cons2) + (array_G[12] * array_G[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_G[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[12])) == 0) suroeste_C_B = 0;
else suroeste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[10] * array_B[16])) == 0) suroeste_N1_B = 0;
else suroeste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[10] * array_B[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[2] * array_B[8])) == 0) suroeste_N2_B = 0;
else suroeste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[2] * array_B[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[2], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[16])) == 0) suroeste_NW_B = 0;
else suroeste_NW_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[8])) == 0) suroeste_SE_B = 0;
else suroeste_SE_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[7] * array_B[12])) == 0) sur_C_B = 0;
else sur_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[7] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2)))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2)))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[11])) == 0) sur_N1_B = 0;
else sur_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[13])) == 0) sur_N2_B = 0;
else sur_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[11])) == 0) sur_W_B = 0;
else sur_W_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[13])) == 0) sur_E_B = 0;
else sur_E_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[12])) == 0) sureste_C_B = 0;
else sureste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[2])) == 0) sureste_N1_B = 0;
else sureste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[2])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[2], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[14] * array_B[18])) == 0) sureste_N2_B = 0;
else sureste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[14] * array_B[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[6])) == 0) sureste_SW_B = 0;
else sureste_SW_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[6])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[18])) == 0) sureste_NE_B = 0;
else sureste_NE_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[13] * array_B[12])) == 0) este_C_B = 0;
else este_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[13] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[7])) == 0) este_N1_B = 0;
else este_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[8] * array_B[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[17])) == 0) este_N2_B = 0;
else este_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[7])) == 0) este_S_B = 0;
else este_S_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[17])) == 0) este_N_B = 0;
else este_N_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[12])) == 0) noreste_C_B = 0;
else noreste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[14] * array_B[8])) == 0) noreste_N1_B = 0;
else noreste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[14] * array_B[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[14], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[22] * array_B[16])) == 0) noreste_N2_B = 0;
else noreste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[22] * array_B[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[8])) == 0) noreste_SE_B = 0;
else noreste_SE_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[8])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[8], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[16])) == 0) noreste_NW_B = 0;
else noreste_NW_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[16])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[17] * array_B[12])) == 0) norte_C_B = 0;
else norte_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[17] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[13])) == 0) norte_N1_B = 0;
else norte_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[18] * array_B[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[11])) == 0) norte_N2_B = 0;
else norte_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[13])) == 0) norte_E_B = 0;
else norte_E_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[13])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[13], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[11])) == 0) norte_W_B = 0;
else norte_W_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[11])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[12])) == 0) noroeste_C_B = 0;
else noroeste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[22] * array_B[18])) == 0) noroeste_N1_B = 0;
else noroeste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[22] * array_B[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[22], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[10])) == 0) noroeste_N2_B = 0;
else noroeste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[10])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[10], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[18])) == 0) noroeste_NE_B = 0;
else noroeste_NE_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[18])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[18], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[12])) == 0) noroeste_SW_B = 0;
else noroeste_SW_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[11] * array_B[12])) == 0) oeste_C_B = 0;
else oeste_C_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[11] * array_B[12])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[11], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[17])) == 0) oeste_N1_B = 0;
else oeste_N1_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[16] * array_B[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[16], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[7])) == 0) oeste_N2_B = 0;
else oeste_N2_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[6] * array_B[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[6], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[17])) == 0) oeste_N_B = 0;
else oeste_N_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[17])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[17], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))))));
if (((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[7])) == 0) oeste_S_B = 0;
else oeste_S_B = acos(((cons1 + cons1) + (cons2*cons2) + (array_B[12] * array_B[7])) / ((sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[7], 2))*(sqrt(pow(cons1, 2) + pow(cons1, 2) + pow(array_B[12], 2))))));
// SUROESTE
/*
med_1 = 1, var_1 = 0.8;
med_2 = 0.1;
*/
if (suroeste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((suroeste_C_R)-med_1), 2) / (2 * var_1))));
if (suroeste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((suroeste_N1_R)-med_2), 2) / (2 * var_1))));
if (suroeste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((suroeste_N2_R)-med_2), 2) / (2 * var_1))));
if (suroeste_NW_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((suroeste_NW_R)-med_1), 2) / (2 * var_1))));
if (suroeste_SE_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((suroeste_SE_R)-med_1), 2) / (2 * var_1))));
largo[0] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sur_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sur_C_R)-med_1), 2) / (2 * var_1))));
if (sur_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sur_N1_R)-med_2), 2) / (2 * var_1))));
if (sur_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sur_N2_R)-med_2), 2) / (2 * var_1))));
if (sur_W_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sur_W_R)-med_1), 2) / (2 * var_1))));
if (sur_E_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sur_E_R)-med_1), 2) / (2 * var_1))));
largo[1] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sureste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sureste_C_R)-med_1), 2) / (2 * var_1))));
if (sureste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sureste_N1_R)-med_2), 2) / (2 * var_1))));
if (sureste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sureste_N2_R)-med_2), 2) / (2 * var_1))));
if (sureste_NE_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sureste_NE_R)-med_1), 2) / (2 * var_1))));
if (sureste_SW_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sureste_SW_R)-med_1), 2) / (2 * var_1))));
largo[2] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (este_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((este_C_R)-med_1), 2) / (2 * var_1))));
if (este_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((este_N1_R)-med_2), 2) / (2 * var_1))));
if (este_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((este_N2_R)-med_2), 2) / (2 * var_1))));
if (este_N_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((este_N_R)-med_1), 2) / (2 * var_1))));
if (este_S_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((este_S_R)-med_1), 2) / (2 * var_1))));
largo[3] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noreste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noreste_C_R)-med_1), 2) / (2 * var_1))));
if (noreste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noreste_N1_R)-med_2), 2) / (2 * var_1))));
if (noreste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noreste_N2_R)-med_2), 2) / (2 * var_1))));
if (noreste_NW_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noreste_NW_R)-med_1), 2) / (2 * var_1))));
if (noreste_SE_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noreste_SE_R)-med_1), 2) / (2 * var_1))));
largo[4] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (norte_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((norte_C_R)-med_1), 2) / (2 * var_1))));
if (norte_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((norte_N1_R)-med_2), 2) / (2 * var_1))));
if (norte_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((norte_N2_R)-med_2), 2) / (2 * var_1))));
if (norte_W_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((norte_W_R)-med_1), 2) / (2 * var_1))));
if (norte_E_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((norte_E_R)-med_1), 2) / (2 * var_1))));
largo[5] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noroeste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noroeste_C_R)-med_1), 2) / (2 * var_1))));
if (noroeste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noroeste_N1_R)-med_2), 2) / (2 * var_1))));
if (noroeste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noroeste_N2_R)-med_2), 2) / (2 * var_1))));
if (noroeste_NE_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noroeste_NE_R)-med_1), 2) / (2 * var_1))));
if (noroeste_SW_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noroeste_SW_R)-med_1), 2) / (2 * var_1))));
largo[6] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (oeste_C_R > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((oeste_C_R)-med_1), 2) / (2 * var_1))));
if (oeste_N1_R < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((oeste_N1_R)-med_2), 2) / (2 * var_1))));
if (oeste_N2_R < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((oeste_N2_R)-med_2), 2) / (2 * var_1))));
if (oeste_N_R > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((oeste_N_R)-med_1), 2) / (2 * var_1))));
if (oeste_S_R > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((oeste_S_R)-med_1), 2) / (2 * var_1))));
largo[7] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (suroeste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((suroeste_C_G)-med_1), 2) / (2 * var_1))));
if (suroeste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((suroeste_N1_G)-med_2), 2) / (2 * var_1))));
if (suroeste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((suroeste_N2_G)-med_2), 2) / (2 * var_1))));
if (suroeste_NW_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((suroeste_NW_G)-med_1), 2) / (2 * var_1))));
if (suroeste_SE_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((suroeste_SE_G)-med_1), 2) / (2 * var_1))));
largo_1[0] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sur_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sur_C_G)-med_1), 2) / (2 * var_1))));
if (sur_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sur_N1_G)-med_2), 2) / (2 * var_1))));
if (sur_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sur_N2_G)-med_2), 2) / (2 * var_1))));
if (sur_W_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sur_W_G)-med_1), 2) / (2 * var_1))));
if (sur_E_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sur_E_G)-med_1), 2) / (2 * var_1))));
largo_1[1] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sureste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sureste_C_G)-med_1), 2) / (2 * var_1))));
if (sureste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sureste_N1_G)-med_2), 2) / (2 * var_1))));
if (sureste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sureste_N2_G)-med_2), 2) / (2 * var_1))));
if (sureste_NE_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sureste_NE_G)-med_1), 2) / (2 * var_1))));
if (sureste_SW_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sureste_SW_G)-med_1), 2) / (2 * var_1))));
largo_1[2] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (este_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((este_C_G)-med_1), 2) / (2 * var_1))));
if (este_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((este_N1_G)-med_2), 2) / (2 * var_1))));
if (este_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((este_N2_G)-med_2), 2) / (2 * var_1))));
if (este_N_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((este_N_G)-med_1), 2) / (2 * var_1))));
if (este_S_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((este_S_G)-med_1), 2) / (2 * var_1))));
largo_1[3] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noreste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noreste_C_G)-med_1), 2) / (2 * var_1))));
if (noreste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noreste_N1_G)-med_2), 2) / (2 * var_1))));
if (noreste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noreste_N2_G)-med_2), 2) / (2 * var_1))));
if (noreste_NW_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noreste_NW_G)-med_1), 2) / (2 * var_1))));
if (noreste_SE_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noreste_SE_G)-med_1), 2) / (2 * var_1))));
largo_1[4] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (norte_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((norte_C_G)-med_1), 2) / (2 * var_1))));
if (norte_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((norte_N1_G)-med_2), 2) / (2 * var_1))));
if (norte_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((norte_N2_G)-med_2), 2) / (2 * var_1))));
if (norte_W_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((norte_W_G)-med_1), 2) / (2 * var_1))));
if (norte_E_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((norte_E_G)-med_1), 2) / (2 * var_1))));
largo_1[5] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noroeste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noroeste_C_G)-med_1), 2) / (2 * var_1))));
if (noroeste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noroeste_N1_G)-med_2), 2) / (2 * var_1))));
if (noroeste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noroeste_N2_G)-med_2), 2) / (2 * var_1))));
if (noroeste_NE_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noroeste_NE_G)-med_1), 2) / (2 * var_1))));
if (noroeste_SW_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noroeste_SW_G)-med_1), 2) / (2 * var_1))));
largo_1[6] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (oeste_C_G > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((oeste_C_G)-med_1), 2) / (2 * var_1))));
if (oeste_N1_G < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((oeste_N1_G)-med_2), 2) / (2 * var_1))));
if (oeste_N2_G < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((oeste_N2_G)-med_2), 2) / (2 * var_1))));
if (oeste_N_G > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((oeste_N_G)-med_1), 2) / (2 * var_1))));
if (oeste_S_G > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((oeste_S_G)-med_1), 2) / (2 * var_1))));
largo_1[7] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (suroeste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((suroeste_C_B)-med_1), 2) / (2 * var_1))));
if (suroeste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((suroeste_N1_B)-med_2), 2) / (2 * var_1))));
if (suroeste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((suroeste_N2_B)-med_2), 2) / (2 * var_1))));
if (suroeste_NW_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((suroeste_NW_B)-med_1), 2) / (2 * var_1))));
if (suroeste_SE_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((suroeste_SE_B)-med_1), 2) / (2 * var_1))));
largo_2[0] = (gam_big_2[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_2[2]);
if (sur_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sur_C_B)-med_1), 2) / (2 * var_1))));
if (sur_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sur_N1_B)-med_2), 2) / (2 * var_1))));
if (sur_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sur_N2_B)-med_2), 2) / (2 * var_1))));
if (sur_W_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sur_W_B)-med_1), 2) / (2 * var_1))));
if (sur_E_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sur_E_B)-med_1), 2) / (2 * var_1))));
largo_2[1] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (sureste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((sureste_C_B)-med_1), 2) / (2 * var_1))));
if (sureste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((sureste_N1_B)-med_2), 2) / (2 * var_1))));
if (sureste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((sureste_N2_B)-med_2), 2) / (2 * var_1))));
if (sureste_NE_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((sureste_NE_B)-med_1), 2) / (2 * var_1))));
if (sureste_SW_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((sureste_SW_B)-med_1), 2) / (2 * var_1))));
largo_2[2] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (este_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((este_C_B)-med_1), 2) / (2 * var_1))));
if (este_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((este_N1_B)-med_2), 2) / (2 * var_1))));
if (este_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((este_N2_B)-med_2), 2) / (2 * var_1))));
if (este_N_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((este_N_B)-med_1), 2) / (2 * var_1))));
if (este_S_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((este_S_B)-med_1), 2) / (2 * var_1))));
largo_2[3] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noreste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noreste_C_B)-med_1), 2) / (2 * var_1))));
if (noreste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noreste_N1_B)-med_2), 2) / (2 * var_1))));
if (noreste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noreste_N2_B)-med_2), 2) / (2 * var_1))));
if (noreste_NW_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noreste_NW_B)-med_1), 2) / (2 * var_1))));
if (noreste_SE_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noreste_SE_B)-med_1), 2) / (2 * var_1))));
largo_2[4] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (norte_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((norte_C_B)-med_1), 2) / (2 * var_1))));
if (norte_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((norte_N1_B)-med_2), 2) / (2 * var_1))));
if (norte_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((norte_N2_B)-med_2), 2) / (2 * var_1))));
if (norte_W_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((norte_W_B)-med_1), 2) / (2 * var_1))));
if (norte_E_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((norte_E_B)-med_1), 2) / (2 * var_1))));
largo_2[5] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (noroeste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((noroeste_C_B)-med_1), 2) / (2 * var_1))));
if (noroeste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((noroeste_N1_B)-med_2), 2) / (2 * var_1))));
if (noroeste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((noroeste_N2_B)-med_2), 2) / (2 * var_1))));
if (noroeste_NE_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((noroeste_NE_B)-med_1), 2) / (2 * var_1))));
if (noroeste_SW_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((noroeste_SW_B)-med_1), 2) / (2 * var_1))));
largo_2[6] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
if (oeste_C_B > med_1) gam_big_1[0] = 1;
else gam_big_1[0] = (exp(-(pow(((oeste_C_B)-med_1), 2) / (2 * var_1))));
if (oeste_N1_B < med_2) gam_small_1[0] = 1;
else gam_small_1[0] = (exp(-(pow(((oeste_N1_B)-med_2), 2) / (2 * var_1))));
if (oeste_N2_B < med_2) gam_small_1[1] = 1;
else gam_small_1[1] = (exp(-(pow(((oeste_N2_B)-med_2), 2) / (2 * var_1))));
if (oeste_N_B > med_1) gam_big_1[1] = 1;
else gam_big_1[1] = (exp(-(pow(((oeste_N_B)-med_1), 2) / (2 * var_1))));
if (oeste_S_B > med_1) gam_big_1[2] = 1;
else gam_big_1[2] = (exp(-(pow(((oeste_S_B)-med_1), 2) / (2 * var_1))));
largo_2[7] = (gam_big_1[0] * gam_small_1[0] * gam_small_1[1] * gam_big_1[1] * gam_big_1[2]);
/*
med1 = 60;
med2 = 10;
var1 = 1000;
*/
if (SW_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SW_C_R)-med1), 2) / (2 * var1))));
if (SW_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SW_N1_R)-med2), 2) / (2 * var1))));
if (SW_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SW_N2_R)-med2), 2) / (2 * var1))));
if (SW_NW_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SW_NW_R)-med1), 2) / (2 * var1))));
if (SW_SE_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SW_SE_R)-med1), 2) / (2 * var1))));
LARGO[0] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (S_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((S_C_R)-med1), 2) / (2 * var1))));
if (S_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((S_N1_R)-med2), 2) / (2 * var1))));
if (S_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((S_N2_R)-med2), 2) / (2 * var1))));
if (S_W_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((S_W_R)-med1), 2) / (2 * var1))));
if (S_E_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((S_E_R)-med1), 2) / (2 * var1))));
LARGO[1] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SE_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SE_C_R)-med1), 2) / (2 * var1))));
if (SE_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SE_N1_R)-med2), 2) / (2 * var1))));
if (SE_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SE_N2_R)-med2), 2) / (2 * var1))));
if (SE_NE_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SE_NE_R)-med1), 2) / (2 * var1))));
if (SE_SW_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SE_SW_R)-med1), 2) / (2 * var1))));
LARGO[2] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (E_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((E_C_R)-med1), 2) / (2 * var1))));
if (E_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((E_N1_R)-med2), 2) / (2 * var1))));
if (E_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((E_N2_R)-med2), 2) / (2 * var1))));
if (E_N_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((E_N_R)-med1), 2) / (2 * var1))));
if (E_S_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((E_S_R)-med1), 2) / (2 * var1))));
LARGO[3] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NE_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NE_C_R)-med1), 2) / (2 * var1))));
if (NE_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NE_N1_R)-med2), 2) / (2 * var1))));
if (NE_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NE_N2_R)-med2), 2) / (2 * var1))));
if (NE_NW_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NE_NW_R)-med1), 2) / (2 * var1))));
if (NE_SE_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NE_SE_R)-med1), 2) / (2 * var1))));
LARGO[4] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (N_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((N_C_R)-med1), 2) / (2 * var1))));
if (N_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((N_N1_R)-med2), 2) / (2 * var1))));
if (N_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((N_N2_R)-med2), 2) / (2 * var1))));
if (N_W_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((N_W_R)-med1), 2) / (2 * var1))));
if (N_E_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((N_E_R)-med1), 2) / (2 * var1))));
LARGO[5] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NW_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NW_C_R)-med1), 2) / (2 * var1))));
if (NW_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NW_N1_R)-med2), 2) / (2 * var1))));
if (NW_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NW_N2_R)-med2), 2) / (2 * var1))));
if (NW_NE_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NW_NE_R)-med1), 2) / (2 * var1))));
if (NW_SW_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NW_SW_R)-med1), 2) / (2 * var1))));
LARGO[6] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (W_C_R > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((W_C_R)-med1), 2) / (2 * var1))));
if (W_N1_R < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((W_N1_R)-med2), 2) / (2 * var1))));
if (W_N2_R < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((W_N2_R)-med2), 2) / (2 * var1))));
if (W_N_R > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((W_N_R)-med1), 2) / (2 * var1))));
if (W_S_R > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((W_S_R)-med1), 2) / (2 * var1))));
LARGO[7] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SW_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SW_C_G)-med1), 2) / (2 * var1))));
if (SW_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SW_N1_G)-med2), 2) / (2 * var1))));
if (SW_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SW_N2_G)-med2), 2) / (2 * var1))));
if (SW_NW_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SW_NW_G)-med1), 2) / (2 * var1))));
if (SW_SE_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SW_SE_G)-med1), 2) / (2 * var1))));
LARGO_1[0] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (S_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((S_C_G)-med1), 2) / (2 * var1))));
if (S_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((S_N1_G)-med2), 2) / (2 * var1))));
if (S_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((S_N2_G)-med2), 2) / (2 * var1))));
if (S_W_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((S_W_G)-med1), 2) / (2 * var1))));
if (S_E_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((S_E_G)-med1), 2) / (2 * var1))));
LARGO_1[1] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SE_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SE_C_G)-med1), 2) / (2 * var1))));
if (SE_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SE_N1_G)-med2), 2) / (2 * var1))));
if (SE_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SE_N2_G)-med2), 2) / (2 * var1))));
if (SE_NE_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SE_NE_G)-med1), 2) / (2 * var1))));
if (SE_SW_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SE_SW_G)-med1), 2) / (2 * var1))));
LARGO_1[2] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (E_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((E_C_G)-med1), 2) / (2 * var1))));
if (E_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((E_N1_G)-med2), 2) / (2 * var1))));
if (E_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((E_N2_G)-med2), 2) / (2 * var1))));
if (E_N_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((E_N_G)-med1), 2) / (2 * var1))));
if (E_S_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((E_S_G)-med1), 2) / (2 * var1))));
LARGO_1[3] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NE_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NE_C_G)-med1), 2) / (2 * var1))));
if (NE_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NE_N1_G)-med2), 2) / (2 * var1))));
if (NE_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NE_N2_G)-med2), 2) / (2 * var1))));
if (NE_NW_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NE_NW_G)-med1), 2) / (2 * var1))));
if (NE_SE_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NE_SE_G)-med1), 2) / (2 * var1))));
LARGO_1[4] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (N_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((N_C_G)-med1), 2) / (2 * var1))));
if (N_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((N_N1_G)-med2), 2) / (2 * var1))));
if (N_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((N_N2_G)-med2), 2) / (2 * var1))));
if (N_W_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((N_W_G)-med1), 2) / (2 * var1))));
if (N_E_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((N_E_G)-med1), 2) / (2 * var1))));
LARGO_1[5] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NW_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NW_C_G)-med1), 2) / (2 * var1))));
if (NW_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NW_N1_G)-med2), 2) / (2 * var1))));
if (NW_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NW_N2_G)-med2), 2) / (2 * var1))));
if (NW_NE_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NW_NE_G)-med1), 2) / (2 * var1))));
if (NW_SW_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NW_SW_G)-med1), 2) / (2 * var1))));
LARGO_1[6] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (W_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((W_C_G)-med1), 2) / (2 * var1))));
if (W_N1_G < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((W_N1_G)-med2), 2) / (2 * var1))));
if (W_N2_G < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((W_N2_G)-med2), 2) / (2 * var1))));
if (W_N_G > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((W_N_G)-med1), 2) / (2 * var1))));
if (W_S_G > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((W_S_G)-med1), 2) / (2 * var1))));
LARGO_1[7] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SW_C_G > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SW_C_B)-med1), 2) / (2 * var1))));
if (SW_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SW_N1_B)-med2), 2) / (2 * var1))));
if (SW_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SW_N2_B)-med2), 2) / (2 * var1))));
if (SW_NW_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SW_NW_B)-med1), 2) / (2 * var1))));
if (SW_SE_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SW_SE_B)-med1), 2) / (2 * var1))));
LARGO_2[0] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (S_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((S_C_B)-med1), 2) / (2 * var1))));
if (S_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((S_N1_B)-med2), 2) / (2 * var1))));
if (S_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((S_N2_B)-med2), 2) / (2 * var1))));
if (S_W_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((S_W_B)-med1), 2) / (2 * var1))));
if (S_E_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((S_E_B)-med1), 2) / (2 * var1))));
LARGO_2[1] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (SE_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((SE_C_B)-med1), 2) / (2 * var1))));
if (SE_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((SE_N1_B)-med2), 2) / (2 * var1))));
if (SE_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((SE_N2_B)-med2), 2) / (2 * var1))));
if (SE_NE_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((SE_NE_B)-med1), 2) / (2 * var1))));
if (SE_SW_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((SE_SW_B)-med1), 2) / (2 * var1))));
LARGO_2[2] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (E_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((E_C_B)-med1), 2) / (2 * var1))));
if (E_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((E_N1_B)-med2), 2) / (2 * var1))));
if (E_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((E_N2_B)-med2), 2) / (2 * var1))));
if (E_N_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((E_N_B)-med1), 2) / (2 * var1))));
if (E_S_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((E_S_B)-med1), 2) / (2 * var1))));
LARGO_2[3] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NE_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NE_C_B)-med1), 2) / (2 * var1))));
if (NE_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NE_N1_B)-med2), 2) / (2 * var1))));
if (NE_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NE_N2_B)-med2), 2) / (2 * var1))));
if (NE_NW_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NE_NW_B)-med1), 2) / (2 * var1))));
if (NE_SE_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NE_SE_B)-med1), 2) / (2 * var1))));
LARGO_2[4] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (N_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((N_C_B)-med1), 2) / (2 * var1))));
if (N_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((N_N1_B)-med2), 2) / (2 * var1))));
if (N_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((N_N2_B)-med2), 2) / (2 * var1))));
if (N_W_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((N_W_B)-med1), 2) / (2 * var1))));
if (N_E_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((N_E_B)-med1), 2) / (2 * var1))));
LARGO_2[5] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (NW_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((NW_C_B)-med1), 2) / (2 * var1))));
if (NW_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((NW_N1_B)-med2), 2) / (2 * var1))));
if (NW_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((NW_N2_B)-med2), 2) / (2 * var1))));
if (NW_NE_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((NW_NE_B)-med1), 2) / (2 * var1))));
if (NW_SW_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((NW_SW_B)-med1), 2) / (2 * var1))));
LARGO_2[6] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
if (W_C_B > med1) gam_big_2[0] = 1;
else gam_big_2[0] = (exp(-(pow(((W_C_B)-med1), 2) / (2 * var1))));
if (W_N1_B < med2) gam_small_2[0] = 1;
else gam_small_2[0] = (exp(-(pow(((W_N1_B)-med2), 2) / (2 * var1))));
if (W_N2_B < med2) gam_small_2[1] = 1;
else gam_small_2[1] = (exp(-(pow(((W_N2_B)-med2), 2) / (2 * var1))));
if (W_N_B > med1) gam_big_2[1] = 1;
else gam_big_2[1] = (exp(-(pow(((W_N_B)-med1), 2) / (2 * var1))));
if (W_S_B > med1) gam_big_2[2] = 1;
else gam_big_2[2] = (exp(-(pow(((W_S_B)-med1), 2) / (2 * var1))));
LARGO_2[7] = (gam_big_2[0] * gam_small_2[0] * gam_small_2[1] * gam_big_2[1] * gam_big_2[2]);
float mu_R_R[8], mu_G_G[8], mu_B_B[8];
mu_R_R[0] = min(largo[0], LARGO[0]);
mu_R_R[1] = min(largo[1], LARGO[1]);
mu_R_R[2] = min(largo[2], LARGO[2]);
mu_R_R[3] = min(largo[3], LARGO[3]);
mu_R_R[4] = min(largo[4], LARGO[4]);
mu_R_R[5] = min(largo[5], LARGO[5]);
mu_R_R[6] = min(largo[6], LARGO[6]);
mu_R_R[7] = min(largo[7], LARGO[7]);
mu_G_G[0] = min(largo_1[0], LARGO_1[0]);
mu_G_G[1] = min(largo_1[1], LARGO_1[1]);
mu_G_G[2] = min(largo_1[2], LARGO_1[2]);
mu_G_G[3] = min(largo_1[3], LARGO_1[3]);
mu_G_G[4] = min(largo_1[4], LARGO_1[4]);
mu_G_G[5] = min(largo_1[5], LARGO_1[5]);
mu_G_G[6] = min(largo_1[6], LARGO_1[6]);
mu_G_G[7] = min(largo_1[7], LARGO_1[7]);
mu_B_B[0] = min(largo_2[0], LARGO_2[0]);
mu_B_B[1] = min(largo_2[1], LARGO_2[1]);
mu_B_B[2] = min(largo_2[2], LARGO_2[2]);
mu_B_B[3] = min(largo_2[3], LARGO_2[3]);
mu_B_B[4] = min(largo_2[4], LARGO_2[4]);
mu_B_B[5] = min(largo_2[5], LARGO_2[5]);
mu_B_B[6] = min(largo_2[6], LARGO_2[6]);
mu_B_B[7] = min(largo_2[7], LARGO_2[7]);
noise_R_R = max(max(max(max(max(max(max(mu_R_R[0], mu_R_R[1]), mu_R_R[2]), mu_R_R[3]), mu_R_R[4]), mu_R_R[5]), mu_R_R[6]), mu_R_R[7]);
noise_G_G = max(max(max(max(max(max(max(mu_G_G[0], mu_G_G[1]), mu_G_G[2]), mu_G_G[3]), mu_G_G[4]), mu_G_G[5]), mu_G_G[6]), mu_G_G[7]);
noise_B_B = max(max(max(max(max(max(max(mu_B_B[0], mu_B_B[1]), mu_B_B[2]), mu_B_B[3]), mu_B_B[4]), mu_B_B[5]), mu_B_B[6]), mu_B_B[7]);
//printf( "%f",noise_B_B);
if ((noise_B_B >= THS))
{
float weights[9], sum_weights = 0, hold2, suma = 0;
for (j = 0; j <= 7; j++)
{
sum_weights += (1 - mu_B_B[j]);
}
sum_weights = (sum_weights + 3 * sqrt(1 - noise_B_B)) / 2;
weights[0] = (1 - mu_B_B[0]);
weights[1] = (1 - mu_B_B[1]);
weights[2] = (1 - mu_B_B[2]);
weights[3] = (1 - mu_B_B[7]);
weights[4] = 3 * sqrt(1 - noise_B_B);
weights[5] = (1 - mu_B_B[3]);
weights[6] = (1 - mu_B_B[6]);
weights[7] = (1 - mu_B_B[5]);
weights[8] = (1 - mu_B_B[4]);
for (j = 0; j <= 8; j++)
{
for (x = 0; x <= 7; x++)
{
if (vectB[x] > vectB[x + 1])
{
hold = vectB[x];
hold2 = weights[x];
vectB[x] = vectB[x + 1];
weights[x] = weights[x + 1];
vectB[x + 1] = hold;
weights[x + 1] = hold2;
}
}
}
for (j = 8; j >= 0; j--)
{
suma += weights[j];
if (suma >= sum_weights)
{
if (j < 2)
{
sum_weights = sum_weights - (weights[0] + weights[1]);
sum_weights = sum_weights / 2;
suma = 0;
for (F = 8; F >= 2; F--)
{
suma += weights[F];
if (suma > sum_weights)
{
d_Pout[(Row * m + Col) * channels + 2] = vectB[F];
F = -1;
}
}
j = -1;
}
else
{
d_Pout[(Row * m + Col) * channels + 2] = vectB[j];
//d_Pout[(Row * m + Col) * channels + 0] = d_Pout[(Row * m + Col) * channels + 0];
j = -1;
}
suma = -1;
}
}
// fwrite (&CCC, 1, 1, header_file);
}
else
{
d_Pout[(Row * m + Col) * channels + 2] = vectB[4];
//d_Pout[(Row * m + Col) * channels + 0] = 0;
// fwrite (&CCC, 1, 1, header_file);
}
if (noise_G_G >= THS)
{
float weights[9], sum_weights = 0, hold2, suma = 0;
for (j = 0; j <= 7; j++)
{
sum_weights += (1 - mu_G_G[j]);
}
sum_weights = (sum_weights + 3 * sqrt(1 - noise_G_G)) / 2;
weights[0] = (1 - mu_G_G[0]);
weights[1] = (1 - mu_G_G[1]);
weights[2] = (1 - mu_G_G[2]);
weights[3] = (1 - mu_G_G[7]);
weights[4] = 3 * sqrt(1 - noise_G_G);
weights[5] = (1 - mu_G_G[3]);
weights[6] = (1 - mu_G_G[6]);
weights[7] = (1 - mu_G_G[5]);
weights[8] = (1 - mu_G_G[4]);
for (j = 0; j <= 8; j++)
{
for (x = 0; x <= 7; x++)
{
if (vectG[x] > vectG[x + 1])
{
hold = vectG[x];
hold2 = weights[x];
vectG[x] = vectG[x + 1];
weights[x] = weights[x + 1];
vectG[x + 1] = hold;
weights[x + 1] = hold2;
}
}
}
for (j = 8; j >= 0; j--)
{
suma += weights[j];
if (suma >= sum_weights)
{
if (j < 2)
{
sum_weights = sum_weights - (weights[0] + weights[1]);
sum_weights = sum_weights / 2;
suma = 0;
for (F = 8; F >= 2; F--)
{
suma += weights[F];
if (suma >= sum_weights)
{
d_Pout[(Row * m + Col) * channels + 1] = vectG[F];
F = -1;
}
}
j = -1;
}
else
{
d_Pout[(Row * m + Col) * channels + 1] = vectG[j];
j = -1;
}
suma = -1;
}
}
// fwrite (&BBB, 1, 1, header_file);
}
else
{
d_Pout[(Row * m + Col) * channels + 1] = vectG[4];
// fwrite (&BBB, 1, 1, header_file);
}
if (noise_R_R >= THS)
{
float weights[9], sum_weights = 0, hold2, suma = 0;
for (j = 0; j <= 7; j++)
{
sum_weights += (1 - mu_R_R[j]);
}
sum_weights = (sum_weights + 3 * sqrt(1 - noise_R_R)) / 2;
weights[0] = (1 - mu_R_R[0]);
weights[1] = (1 - mu_R_R[1]);
weights[2] = (1 - mu_R_R[2]);
weights[3] = (1 - mu_R_R[7]);
weights[4] = 3 * sqrt(1 - noise_R_R);
weights[5] = (1 - mu_R_R[3]);
weights[6] = (1 - mu_R_R[6]);
weights[7] = (1 - mu_R_R[5]);
weights[8] = (1 - mu_R_R[4]);
for (j = 0; j <= 8; j++)
{
for (x = 0; x <= 7; x++)
{
if (vectR[x] > vectR[x + 1])
{
hold = vectR[x];
hold2 = weights[x];
vectR[x] = vectR[x + 1];
weights[x] = weights[x + 1];
vectR[x + 1] = hold;
weights[x + 1] = hold2;
}
}
}
for (j = 8; j >= 0; j--)
{
suma += weights[j];
if (suma >= sum_weights)
{
if (j < 2)
{
sum_weights = sum_weights - (weights[0] + weights[1]);
sum_weights = sum_weights / 2;
suma = 0;
for (F = 8; F >= 2; F--)
{
suma += weights[F];
if (suma > sum_weights)
{
d_Pout[(Row * m + Col) * channels + 0] = vectR[F];
F = -1;
}
}
j = -1;
}
else
{
d_Pout[(Row * m + Col) * channels + 0] = vectR[j];
j = -1;
}
suma = -1;
}
}
// fwrite (&AAA, 1, 1, header_file);
}
else
{
d_Pout[(Row * m + Col) * channels + 0] = vectR[4];
//d_Pout[(Row * m + Col) * channels + 0] = 255;
// fwrite (&AAA, 1, 1, header_file);
}
//d_Pout[(Row * m + Col) * channels + 0] = 255;
}
}
|
12,649 | /*
* purpose: just a demo to show how matrix addition can be done on
* the GPU "the standard way" ie using as many thread blocks
* as needed to cover the considered dimension N x N
* n.b. N=256 was set on purpose to evenly divide by the
* number of x/y-threads, ie 16, without any remainder,
* so don't have to worry about these details at this point
* compilation: nvcc ./multiple_thread_blocks_matrix_addition.cu
* usage: ./a.out
*/
#include <stdio.h>
#define N 256
/*
* GPU kernel
*/
__global__ void MatAdd(float **A, float **B, float **C)
{
int i, j;
i = (blockIdx.x * blockDim.x) + threadIdx.x;
j = (blockIdx.y * blockDim.y) + threadIdx.y;
C[i][j] = A[i][j] + B[i][j];
}
/*
* host main
*/
int main()
{
int i, j;
dim3 threadsPerBlock, numBlocks;
float **A, **B, **C;
/*
* using CUDA unified memory, first allocate
* the memory in convenient 2D format, then
* initialize with some dummy content
*/
cudaMallocManaged(&A, N * sizeof(float *));
cudaMallocManaged(&B, N * sizeof(float *));
cudaMallocManaged(&C, N * sizeof(float *));
for (i = 0; i < N; i++) {
cudaMallocManaged(&A[i], N * sizeof(float));
cudaMallocManaged(&B[i], N * sizeof(float));
cudaMallocManaged(&C[i], N * sizeof(float));
for (j = 0; j < N; j++) {
A[i][j] = (float) ((i * N) + j);
B[i][j] = (float) N - A[i][j];
C[i][j] = (float) 0;
}
}
/* set up GPU kernel execution configuration */
threadsPerBlock.x = 16;
threadsPerBlock.y = 16;
numBlocks.x = N / threadsPerBlock.x;
numBlocks.y = N / threadsPerBlock.y;
/* launch the GPU kernel */
MatAdd<<<numBlocks, threadsPerBlock>>>(A, B, C);
cudaDeviceSynchronize();
/* print result */
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
printf("%d %d %f\n", i, j, C[i][j]);
}
}
/* make clean */
for (i = 0; i < N; i++) {
cudaFree(C[i]);
cudaFree(B[i]);
cudaFree(A[i]);
}
cudaFree(C);
cudaFree(B);
cudaFree(A);
return(0);
}
|
12,650 | #include "includes.h"
__device__ __forceinline__ size_t gpu_scalar_index(unsigned int x, unsigned int y)
{
return NX*y+x;
}
__global__ void gpu_initialization(double *r, double *c, double *fi, double *u, double *v, double *ex, double *ey)
{
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
size_t sidx = gpu_scalar_index(x, y);
r[sidx] = rho0;
c[sidx] = 0.0;
fi[sidx] = voltage * (Ly - dy*y) / Ly;
u[sidx] = 0.0;
v[sidx] = 0.0;
ex[sidx] = 0.0;
ey[sidx] = 0.0;
} |
12,651 | #include "includes.h"
__global__ void expon(float* env, int nhalf) {
int i = threadIdx.x + blockDim.x*blockIdx.x;
if (i < nhalf) {
env[i] = exp(env[i]/nhalf); // exponentiate
}
} |
12,652 | #include <stdio.h>
#include <stdlib.h>
#include <cmath>
void box_blur(unsigned char* in_image, unsigned char* out_image, int width, int height, int radius) {
const int fsize = radius;
int ox, oy, x, y;
fprintf(stdout, "Doing box blur\n");
for (y = 0; y < height; ++y) {
for (x = 0; x < width; ++x) {
int index_r = 4 * width * y + 4 * x + 0;
int index_g = 4 * width * y + 4 * x + 1;
int index_b = 4 * width * y + 4 * x + 2;
int index_a = 4 * width * y + 4 * x + 3;
float output_red = 0;
float output_green = 0;
float output_blue = 0;
float output_alpha = 0;
int hits = 0;
for (ox = -fsize; ox < fsize+1; ++ox) {
for (oy = -fsize; oy < fsize+1; ++oy) {
if (x+ox > -1 && (x+ox) < width && (y+oy) > -1 && (y+oy) < height) {
//int currentoffset = (x + y + ox + oy)*4;
int currentoffset = 4 * width * (y+oy) + 4 * (x+ox);
output_red += in_image[currentoffset + 0];
output_green += in_image[currentoffset + 1];
output_blue += in_image[currentoffset + 2];
output_alpha += in_image[currentoffset + 3];
++hits;
}
}
}
out_image[index_r] = output_red / hits;
out_image[index_g] = output_green / hits;
out_image[index_b] = output_blue / hits;
out_image[index_a] = output_alpha / hits;
}
}
}
|
12,653 | #include <stdio.h>
#include <cuda.h>
//-----------------------------------------------------------------------------
// TheKernel: basic kernel containing a print statement.
//-----------------------------------------------------------------------------
__global__ void TheKernel()
{
// Give the kernel something to keep its (single) thread occupied
int i, j, k;
k = 0;
for (i = 0; i < 1000; i++) {
for (j = 0; j < 1000; j++) {
k += i;
if (k > 2000) {
k -= 4*j;
}
else {
k += j;
}
}
}
printf("This is the kernel saying hello world, from the GPU.\n");
}
//-----------------------------------------------------------------------------
// main
//-----------------------------------------------------------------------------
int main()
{
printf("This is the C layer saying hello world, from the host.\n");
TheKernel<<<1, 1>>>();
// Device synchronization
cudaDeviceSynchronize();
printf("LOOK: device synchronization stops the host until the kernel is "
"done.\n");
// It appears essential to call for synchronization before finally
// exiting, lest you risk the program crashing your machine!
cudaDeviceSynchronize();
return 0;
}
|
12,654 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
__global__ void mandelbrot(int *max_iter, int *output) {
}
/* function: main
* accepts a size of the data arrays.
*
*/
int main(int argc, char ** argv) {
printf("testing 1 2\n");
return 0;
}
|
12,655 | #include "includes.h"
__global__ void convolution1d_constant_simple_kernel(int *In, int *Out){
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float N_ds[TILE_SIZE];
N_ds[threadIdx.x] = In[i];
__syncthreads();
int This_tile_start_point = blockIdx.x * blockDim.x;
int Next_tile_start_point = (blockIdx.x + 1) * blockDim.x;
int N_start_point = i - (Mask_size/2);
int Pvalue = 0;
for (int j = 0; j < Mask_size; j ++){
int N_index = N_start_point + j;
if (N_index >= 0 && N_index < N_elements){
if ((N_index >= This_tile_start_point) && (N_index < Next_tile_start_point)){
Pvalue += N_ds[threadIdx.x+j-(Mask_size/2)]*Global_Mask[j];
} else{ Pvalue += In[N_index] * Global_Mask[j]; }
}
}
Out[i] = Pvalue;
} |
12,656 | #include "includes.h"
__global__ void transposeNaiveCol(float *out, float *in, const int nx, const int ny)
{
unsigned int ix = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int iy = blockDim.y * blockIdx.y + threadIdx.y;
if (ix < nx && iy < ny)
{
out[iy * nx + ix] = in[ix * ny + iy];
}
} |
12,657 | #pragma once
// #include "../utils/__utils.h"
// #include "../expressions/expressions.h"
#include <iostream>
using namespace std;
// template <typename val_type>
template <typename val_type, std::size_t dim>
class device_tensor1
{
public:
val_type* d_data_;
device_tensor1()
// : d_data_{new val_type[dim]}
{
// cudaMalloc((void**) &(this.d_data_), sizeof(val_type)*dim);
}
device_tensor1(device_tensor1 const& d_rhs_)
// : d_data_{new val_type[dim]}
{
cudaMalloc((void**) &d_data_, sizeof(val_type)*dim);
// DEBUG_MSG("device_tensor1 copy constructor is called");
// std::memcpy(d_data_, rhs_.d_data_, sizeof(d_data_)*dim);
cudaMemcpy(d_data_, d_rhs_.d_data_ , dim, cudaMemcpyDeviceToDevice);
}
device_tensor1& operator=(device_tensor1 const& d_rhs_){
// DEBUG_MSG("device_tensor1 copy assignment operator is called");
if(this != &d_rhs_){
// std::memcpy(d_data_, rhs_.d_data_, sizeof(d_data_)*dim);
cudaMemcpy(d_data_, d_rhs_.d_data_ , dim, cudaMemcpyDeviceToDevice);
}
return *this;
}
device_tensor1(device_tensor1&& d_rhs_) noexcept
// : d_data_{new val_type[dim]}
{
cudaMalloc((void**) &d_data_, sizeof(val_type)*dim);
std::swap(d_data_, d_rhs_.d_data_);
// DEBUG_MSG("device_tensor1 move constructor is called");
}
device_tensor1& operator=(device_tensor1&& d_rhs_) noexcept{
// DEBUG_MSG("device_tensor1 move assignment operator is called");
if(this != &d_rhs_){
std::swap(d_data_, d_rhs_.d_data_);
}
return *this;
}
~device_tensor1(){
if (d_data_!=nullptr){
cudaFree(d_data_);
d_data_=nullptr;
}
}
};
int main(int argc, char const *argv[])
{
device_tensor1<float,3> test ;
/* code */
return 0;
} |
12,658 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <ctime>
void ReadArray(double* arr, int size);
void WriteArray(double* arr, int size);
void Operate(double* firstDevice, double* secondDevice, double* resultDevice, int vectorLength);
double Min(double a, double b);
int main()
{
int vectorLength;
std::cin >> vectorLength;
clock_t begin = clock();
double* first;
double* second;
double* result;
first = (double*)malloc(sizeof(double) * vectorLength);
second = (double*)malloc(sizeof(double) * vectorLength);
result = (double*)malloc(sizeof(double) * vectorLength);
ReadArray(first, vectorLength);
ReadArray(second, vectorLength);
Operate(first, second, result, vectorLength);
clock_t end = clock();
std::cout << double(end - begin) / CLOCKS_PER_SEC << std::endl;
WriteArray(result, vectorLength);
}
void ReadArray(double* arr, int size)
{
for (int i = 0; i < size; i++)
{
std::cin >> arr[i];
}
}
void WriteArray(double* arr, int size)
{
for (int i = 0; i < size; i++)
{
std::cout << arr[i];
if (i < size - 1)
{
std::cout << ' ';
}
}
}
int Min(int a, int b)
{
return (a > b)
? b
: a;
}
int Max(int a, int b)
{
return (a < b)
? b
: a;
}
void Operate(double* firstDevice, double* secondDevice, double* resultDevice, int vectorLength)
{
for (int i = 0; i < vectorLength - 1; i++)
{
resultDevice[i] = Min(firstDevice[i], secondDevice[i]);
}
}
double Min(double a, double b)
{
return (a > b)
? b
: a;
} |
12,659 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
//Afegim un tamany per defecte
//Imprescindible que sigui potencia de 2
#define NUM_THREADS 1024
#define NUM_BLOCKS 32768
#define N NUM_THREADS*NUM_BLOCKS
//Macro per a swap
#define SWAP(_i, _ixj){\
int aux = vector[_i];\
vector[_i] = vector[_ixj];\
vector[_ixj] = aux;}
//Kernel per a bitonic sort
__global__ void bitonicSortKernel(int *vector, int j, int k){
int i, ixj;
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
if((ixj) > i){
if((i & k) == 0 && vector[i] > vector[ixj]){
SWAP(i, ixj);
//int aux = vector[i];
//vector[i] = vector[ixj];
//vector[ixj] = aux;
}
if((i & k) != 0 && vector[i] < vector[ixj]){
SWAP(i, ixj);
//int aux = vector[i];
//vector[i] = vector[ixj];
//vector[ixj] = aux;
}
}
}
//Funcio iterativa de bitonic sort
void bitonicSort(int *vector){
int j, k;
dim3 numBlocks(NUM_BLOCKS, 1);
dim3 numThreads(NUM_THREADS, 1);
for(k = 2; k <= N; k = 2*k){
//Els shifts son equivalents de dividir entre 2
for(j = k >> 1; j > 0; j = j >> 1){
bitonicSortKernel<<<numBlocks, numThreads>>>(vector, j, k);
}
}
}
int testOrdenacio(int *vector){
int ordenat = 1;
int i;
for(i = 0; i < N -1 && ordenat; ++i){
if(vector[i] > vector[i+1]) ordenat = 0;
}
return ordenat;
}
int main(int argc, char **argv) {
//Generacio dels parametres del vector
int n = N;
//if(argc > 1) n = atoi(argv[1]);
int *vector, *vectorDevice, *vectorAux;
cudaEvent_t E0, E1, E2, E3;
cudaEventCreate(&E0);
cudaEventCreate(&E1);
cudaEventCreate(&E2);
cudaEventCreate(&E3);
unsigned int NUM_BYTES = N*sizeof(int);
//Reserva de memoria per als vectors
cudaMallocHost(&vector, NUM_BYTES);
cudaMallocHost(&vectorAux, NUM_BYTES);
//Inicialitzacio amb valors random
int i;
srand(time(NULL));
for(i = 0; i < N; ++i){
vector[i] = rand();
}
cudaEventRecord(E0, 0);
cudaEventSynchronize(E0);
cudaMalloc((int **)&vectorDevice, NUM_BYTES);
cudaEventRecord(E1, 0);
cudaEventSynchronize(E1);
//Pas del vector de host a device
cudaMemcpy(vectorDevice, vector, NUM_BYTES, cudaMemcpyHostToDevice);
//Fem sort del vector
bitonicSort(vector);
cudaEventRecord(E2, 0);
cudaEventSynchronize(E2);
//Pas del vector de device a host
cudaMemcpy(vector, vectorDevice, NUM_BYTES, cudaMemcpyDeviceToHost);
//Test per veure si la ordenacio es correcte
if(testOrdenacio(vector)) printf("Agustin is happy\n");
else printf("Agustin te deniega el curso PUMPS\n");
//Alliberacio de memoria
cudaFree(vector);
cudaFree(vectorDevice);
cudaDeviceSynchronize();
cudaEventRecord(E3, 0);
cudaEventSynchronize(E3);
//Timing
float tempsTotal;
cudaEventElapsedTime(&tempsTotal, E0, E3);
printf("Temps: %f", tempsTotal);
//Destrueix events
cudaEventDestroy(E0);
cudaEventDestroy(E1);
cudaEventDestroy(E2);
cudaEventDestroy(E3);
}
|
12,660 | #include <cuda.h>
#include <float.h>
const float MAX_EXP = 80. ;
__global__ void kCumulativeSum(
float *probabilities,
float *sumProbabilities,
int softMaxWidth,
int signalLength
)
{
extern __shared__ float sData[] ;
float *dataPrev = (float *) &sData[0] ;
float *dataNext = (float *) &sData[softMaxWidth] ;
// first copy data for current softMax
int signalIndex = signalLength * blockIdx.x ;
int dataIndex = blockIdx.y * softMaxWidth + threadIdx.x ;
int maxLength = min(softMaxWidth, signalLength-dataIndex+threadIdx.x) ;
for (int index = threadIdx.x ; index < maxLength && dataIndex < signalLength ;
index+= blockDim.x, dataIndex += blockDim.x)
{
dataPrev[index] = probabilities[signalIndex+dataIndex] ;
}
__syncthreads() ;
// Now compute cumulative sum
dataIndex = blockIdx.y * softMaxWidth + threadIdx.x ;
for (int round = 1 ; round < maxLength ; round = round<<1)
{
for (int index = threadIdx.x ; index < maxLength ; index+= blockDim.x)
{
float val = dataPrev[index] ;
if (index >= round)
val += dataPrev[index-round] ;
dataNext[index] = val ;
}
float *temp = dataPrev ;
dataPrev = dataNext ;
dataNext = temp ;
__syncthreads() ;
}
// Write out the data.
for (int index = threadIdx.x ; index < maxLength && dataIndex < signalLength ;
index+= blockDim.x, dataIndex += blockDim.x)
{
sumProbabilities[signalIndex+dataIndex] = dataPrev[index] ;
}
}
__global__ void kMultinomialSample(
float *unifRandNums,
float *probabilities,
float *samples,
int softMaxWidth,
int startShift,
int signalLength
)
{
extern __shared__ float sData[] ;
float *dataPrev = (float *) &sData[0] ;
float *dataNext = (float *) &sData[softMaxWidth] ;
// first copy data for current softMax
int signalIndex = signalLength * blockIdx.x ;
int dataIndex = blockIdx.y * softMaxWidth + threadIdx.x + startShift ;
float randNum = unifRandNums[blockIdx.x*gridDim.y + blockIdx.y] ;
int maxLength = min(softMaxWidth, signalLength-dataIndex+threadIdx.x) ;
for (int index = threadIdx.x ; index < maxLength && dataIndex < signalLength ;
index+= blockDim.x, dataIndex += blockDim.x)
{
dataPrev[index] = probabilities[signalIndex+dataIndex] ;
}
__syncthreads() ;
// Now compute cumulative sum
dataIndex = blockIdx.y * softMaxWidth + threadIdx.x + startShift ;
for (int round = 1 ; round < maxLength ; round = round<<1)
{
for (int index = threadIdx.x ; index < maxLength ; index+= blockDim.x)
{
float val = dataPrev[index] ;
if (index >= round)
val += dataPrev[index-round] ;
dataNext[index] = val ;
}
float *temp = dataPrev ;
dataPrev = dataNext ;
dataNext = temp ;
__syncthreads() ;
}
// Find the appropriate index where cumulative[i-1]<= r < cumulative[i]
for (int index = threadIdx.x ; index < maxLength && dataIndex < signalLength ;
index+= blockDim.x, dataIndex += blockDim.x)
{
if (index == 0)
{
if (randNum < dataPrev[index])
samples[signalIndex+dataIndex] = 1 ;
else
samples[signalIndex+dataIndex] = 0 ;
}
else
{
if (randNum >= dataPrev[index-1] && randNum < dataPrev[index])
samples[signalIndex+dataIndex] = 1 ;
else
samples[signalIndex+dataIndex] = 0 ;
}
}
}
__global__ void kSoftMaxStackApproxWithPositionBiases(
float *activations,
float *probabilities,
float *stdevs,
float *featureBiases,
float *positionBiases,
int softMaxWidth,
int signalLength
)
{
extern __shared__ float sData[] ;
float *arrData = (float *) &sData[0] ;
float *threadStores = (float *) &sData[softMaxWidth] ;
int signalIndex = blockIdx.x*signalLength ;
int dataIndex = blockIdx.y*softMaxWidth + threadIdx.x ;
float maxVal = -FLT_MAX ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
arrData[index] = activations[signalIndex+dataIndex] + positionBiases[index] ;
if (maxVal < arrData[index])
maxVal = arrData[index] ;
}
threadStores[threadIdx.x] = maxVal ;
__syncthreads() ;
// do a reduction to find the max of all maxes.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
{
threadStores[threadIdx.x] =
threadStores[threadIdx.x+s]*(threadStores[threadIdx.x + s] >=threadStores[threadIdx.x]) +
threadStores[threadIdx.x] * (threadStores[threadIdx.x] > threadStores[threadIdx.x+s]) ;
}
__syncthreads();
}
// now we have max. Lets subract it from all elements, and compute intermediate logSumExp over all elements
// a thread is responsible for
float bias = featureBiases[blockIdx.x] ;
maxVal = threadStores[0] ;
__syncthreads();
float sumExp = 0. ;
dataIndex = blockIdx.y*softMaxWidth + threadIdx.x ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
arrData[index] = arrData[index] - maxVal ;
sumExp += __expf(arrData[index]) ;
}
threadStores[threadIdx.x] = sumExp ;
__syncthreads() ;
// compute normalization constant over sumExp by summing together all the intermediate values.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
threadStores[threadIdx.x] += threadStores[threadIdx.x + s];
__syncthreads();
}
__syncthreads() ;
sumExp = threadStores[0] ;
float maxValNew = fmaxf(maxVal, bias) ;
bias = bias - maxValNew ;
float reluSumExp = sumExp * __expf(maxVal-maxValNew) + __expf(bias) ;
// notice, reusing maxValNew and maxVal variables for new causes.
// maxValNew is the negative delta from the previous one.
maxValNew = maxVal-maxValNew ;
// maxVal is -log(exp(biases)/(exp(biases)+sum(exp(x_i))))
maxVal = -bias + __logf(reluSumExp) ;
// write out probabilities and standard deviations
dataIndex = blockIdx.y*softMaxWidth + threadIdx.x ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
float r_x_i = __expf(arrData[index])/ sumExp ;
float relu_r_x_i = __expf(arrData[index]+maxValNew)/ reluSumExp ;
probabilities[signalIndex+dataIndex] = r_x_i * maxVal ;
float variance = r_x_i * ((1-r_x_i)*maxVal + relu_r_x_i) ;
if (variance < 0) // stupid overflows, underflows etc, causing a perfectly reasonable calculation to look negative..
variance = 0.0 ;
stdevs[signalIndex+dataIndex] = sqrtf(variance) ;
}
__syncthreads() ;
}
__global__ void kSoftMaxReluWithPositionBiases(
float *activations,
float *probabilities,
float *meanValues,
float *featureStdevs,
float *featureBiases,
float *positionBiases,
int softMaxWidth,
int shift,
int signalLength,
float minExpForSum
)
{
extern __shared__ float sData[] ;
float *arrData = (float *) &sData[0] ;
float *arrActivation = (float *) &sData[softMaxWidth] ;
float *threadStores = (float *) &sData[2*softMaxWidth] ;
int signalIndex = blockIdx.x*signalLength ;
int dataIndex = blockIdx.y*softMaxWidth + threadIdx.x + shift ;
float maxVal = -FLT_MAX ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
float x_i = activations[signalIndex+dataIndex] + positionBiases[index] ;
arrActivation[index] = x_i ;
float val = x_i ;
x_i = x_i - .5 ;
while (x_i >= minExpForSum)
{
if (x_i > MAX_EXP)
val = val + x_i ;
else
val = val + __logf(1. + __expf(x_i)) ;
x_i = x_i - 1.0 ;
}
arrData[index] = val ;
if (maxVal < arrData[index])
maxVal = arrData[index] ;
}
threadStores[threadIdx.x] = maxVal ;
__syncthreads() ;
// do a reduction to find the max of all maxes.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
{
threadStores[threadIdx.x] =
threadStores[threadIdx.x+s]*(threadStores[threadIdx.x + s] >=threadStores[threadIdx.x]) +
threadStores[threadIdx.x] * (threadStores[threadIdx.x] > threadStores[threadIdx.x+s]) ;
}
__syncthreads();
}
// now we have max. Lets subract it from all elements, and compute intermediate logSumExp over all elements
// a thread is responsible for
float bias = featureBiases[blockIdx.x] ;
maxVal = fmaxf(threadStores[0], bias) ;
__syncthreads();
float sumExp = 0. ;
dataIndex = blockIdx.y*softMaxWidth + threadIdx.x + shift ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
arrData[index] = arrData[index] - maxVal ;
arrActivation[index] = arrActivation[index] - bias ;
sumExp += __expf(arrData[index]) ;
}
threadStores[threadIdx.x] = sumExp ;
__syncthreads() ;
// compute normalization constant over sumExp by summing together all the intermediate values.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
threadStores[threadIdx.x] += threadStores[threadIdx.x + s];
__syncthreads();
}
__syncthreads() ;
sumExp = threadStores[0] + __expf(bias-maxVal) ;
// write out probabilities.
dataIndex = blockIdx.y*softMaxWidth + threadIdx.x + shift ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
float prob = __expf(arrData[index])/ sumExp ;
probabilities[signalIndex+dataIndex] = prob ;
// using the relu approximation. Remember to threshold
meanValues[signalIndex+dataIndex] = arrActivation[index] ;
featureStdevs[signalIndex+dataIndex] = sqrtf(1. / (1 + __expf(-arrActivation[index])));
}
__syncthreads() ;
}
__global__ void kSoftMaxWithOffAndPositionBiases(
float *activations,
float *probabilities,
float *featureBiases,
float *positionBiases,
int softMaxWidth,
int signalLength
)
{
extern __shared__ float sData[] ;
float *arrData = (float *) &sData[0] ;
float *threadStores = (float *) &sData[softMaxWidth] ;
int signalIndex = blockIdx.x*signalLength ;
int dataIndex = blockIdx.y*softMaxWidth + threadIdx.x ;
float maxVal = -FLT_MAX ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
arrData[index] = activations[signalIndex+dataIndex] + positionBiases[index] ;
if (maxVal < arrData[index])
maxVal = arrData[index] ;
}
threadStores[threadIdx.x] = maxVal ;
__syncthreads() ;
// do a reduction to find the max of all maxes.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
{
threadStores[threadIdx.x] =
threadStores[threadIdx.x+s]*(threadStores[threadIdx.x + s] >=threadStores[threadIdx.x]) +
threadStores[threadIdx.x] * (threadStores[threadIdx.x] > threadStores[threadIdx.x+s]) ;
}
__syncthreads();
}
// now we have max. Lets subract it from all elements, and compute intermediate logSumExp over all elements
// a thread is responsible for
float bias = featureBiases[blockIdx.x] ;
maxVal = fmaxf(threadStores[0], bias) ;
__syncthreads();
float sumExp = 0. ;
dataIndex = blockIdx.y*softMaxWidth + threadIdx.x ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
arrData[index] = arrData[index] - maxVal ;
sumExp += __expf(arrData[index]) ;
}
threadStores[threadIdx.x] = sumExp ;
__syncthreads() ;
// compute normalization constant over sumExp by summing together all the intermediate values.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
threadStores[threadIdx.x] += threadStores[threadIdx.x + s];
__syncthreads();
}
__syncthreads() ;
sumExp = threadStores[0] + __expf(bias-maxVal) ;
// write out probabilities.
dataIndex = blockIdx.y*softMaxWidth + threadIdx.x ;
for (int index = threadIdx.x ; index < softMaxWidth && dataIndex < signalLength ;
index += blockDim.x, dataIndex += blockDim.x)
{
probabilities[signalIndex+dataIndex] = __expf(arrData[index])/ sumExp ;
}
__syncthreads() ;
}
__global__ void kSoftMaxWithOff(float *activations, float *probabilities, float *biases,
int softMaxWidth, int signalLength, int numPtsPerThread)
{
extern __shared__ float sData[] ;
float *arrData = (float *) &sData[0] ;
float *threadStores = (float *) &sData[softMaxWidth] ;
int signalIndex = blockIdx.x*signalLength ;
int blockIndex = blockIdx.y*softMaxWidth ;
int softMaxIndex = blockIndex + threadIdx.x*numPtsPerThread ;
// copy to local memory
int numToCopy = numPtsPerThread ;
if (softMaxIndex + numToCopy > signalLength)
numToCopy = signalLength - softMaxIndex ;
if (softMaxIndex + numToCopy > blockIndex + softMaxWidth)
numToCopy = blockIndex + softMaxWidth - softMaxIndex ;
float maxVal = -FLT_MAX ;
for (int index = 0 ; index < numToCopy ; index++)
{
arrData[softMaxIndex+index-blockIndex] = activations[signalIndex+softMaxIndex+index] ;
if (maxVal < arrData[softMaxIndex+index])
maxVal = arrData[softMaxIndex+index-blockIndex] ;
}
threadStores[threadIdx.x] = maxVal ;
__syncthreads() ;
// do a reduction to find the max of all maxes.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
{
threadStores[threadIdx.x] =
threadStores[threadIdx.x+s]*(threadStores[threadIdx.x + s] >=threadStores[threadIdx.x]) +
threadStores[threadIdx.x] * (threadStores[threadIdx.x] > threadStores[threadIdx.x+s]) ;
}
__syncthreads();
}
// now we have max. Lets subract it from all elements, and compute intermediate logSumExp over all elements
// a thread is responsible for
float bias = biases[blockIdx.x] ;
maxVal = threadStores[0] * (threadStores[0] > bias) + bias * (bias >= threadStores[0]) ;
__syncthreads();
float sumExp = 0. ;
for (int index = 0 ; index < numToCopy ; index++)
{
arrData[softMaxIndex+index-blockIndex] = arrData[softMaxIndex+index-blockIndex] - maxVal ;
sumExp += __expf(arrData[softMaxIndex+index-blockIndex]) ;
}
threadStores[threadIdx.x] = sumExp ;
__syncthreads() ;
// compute normalization constant over sumExp by summing together all the intermediate values.
for (unsigned int s=blockDim.x/2; s>0; s>>=1)
{
if (threadIdx.x < s)
threadStores[threadIdx.x] += threadStores[threadIdx.x + s];
__syncthreads();
}
__syncthreads() ;
sumExp = threadStores[0] + __expf(bias-maxVal) ;
for (int index = 0 ; index < numToCopy ; index++)
{
probabilities[signalIndex+softMaxIndex+index] = __expf(arrData[softMaxIndex+index-blockIndex])/ sumExp ;
}
__syncthreads() ;
}
|
12,661 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#define PIXELSIZE 4
double* ClusterCore;
unsigned int* ClusterPixelCount;
double* ClusterColorSum;
__constant__ double DevClusterCore[4*32];
__global__ void PixelToCluster(unsigned char* Image, int ClusterCount, int Width, int Height, int* LastIter)
{
int ElementsInStr=PIXELSIZE*Width;
int Distance=0;
int SelectedCluster=0;
int a,b,c;
int i,j;
for(int offset=4*(blockIdx.x*blockDim.x+threadIdx.x); offset<Height*ElementsInStr; offset+=4*(gridDim.x*blockDim.x))
{
i=offset/ElementsInStr;
j=offset%ElementsInStr;
a=(DevClusterCore[0]-Image[i*ElementsInStr+j])*(DevClusterCore[0]-Image[i*ElementsInStr+j]);
b=(DevClusterCore[1]-Image[i*ElementsInStr+j+1])*(DevClusterCore[1]-Image[i*ElementsInStr+j+1]);
c=(DevClusterCore[2]-Image[i*ElementsInStr+j+2])*(DevClusterCore[2]-Image[i*ElementsInStr+j+2]);
Distance=a+b+c;
SelectedCluster=0;
for(int k=1; k<ClusterCount; k++)
{
a=(DevClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j])*(DevClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j]);
b=(DevClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1])*(DevClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1]);
c=(DevClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2])*(DevClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2]);
if((a+b+c)<Distance)
{
Distance=a+b+c;
SelectedCluster=k;
}
}
if(Image[i*ElementsInStr+j+3]!=SelectedCluster) (*LastIter)=0;
Image[i*ElementsInStr+j+3]=SelectedCluster;
}
}
__host__ void ClusterDustribution(unsigned char* Image, int ClusterCount, int Width, int Height, int* LastIter)
{
int ElementsInStr=PIXELSIZE*Width;
int Distance=0;
int SelectedCluster=0;
int a,b,c;
*LastIter=1;
for(int i=0; i<Height; i++)
{
for(int j=0; j<ElementsInStr; j+=PIXELSIZE)
{
a=(ClusterCore[0]-Image[i*ElementsInStr+j])*(ClusterCore[0]-Image[i*ElementsInStr+j]);
b=(ClusterCore[1]-Image[i*ElementsInStr+j+1])*(ClusterCore[1]-Image[i*ElementsInStr+j+1]);
c=(ClusterCore[2]-Image[i*ElementsInStr+j+2])*(ClusterCore[2]-Image[i*ElementsInStr+j+2]);
Distance=a+b+c;
SelectedCluster=0;
for(int k=1; k<ClusterCount; k++)
{
a=(ClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j])*(ClusterCore[k*PIXELSIZE]-Image[i*ElementsInStr+j]);
b=(ClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1])*(ClusterCore[k*PIXELSIZE+1]-Image[i*ElementsInStr+j+1]);
c=(ClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2])*(ClusterCore[k*PIXELSIZE+2]-Image[i*ElementsInStr+j+2]);
if((a+b+c)<Distance)
{
Distance=a+b+c;
SelectedCluster=k;
}
}
if(Image[i*ElementsInStr+j+3]!=SelectedCluster) *LastIter=0;
Image[i*ElementsInStr+j+3]=SelectedCluster;
}
}
}
__host__ void ClusterOffset(unsigned char* Image, int ClusterCount, int Width, int Height)
{
for(int i=0; i<ClusterCount; i++)
ClusterPixelCount[i]=0;
for(int i=0; i<4*ClusterCount; i++)
ClusterColorSum[i]=0;
int ElementsInStr=PIXELSIZE*Width;
int ClusterNum=0;
for(int i=0; i<Height; i++)
{
for(int j=0; j<ElementsInStr; j+=PIXELSIZE)
{
ClusterNum=Image[i*ElementsInStr+j+3];
ClusterPixelCount[ClusterNum]++;
ClusterColorSum[ClusterNum*PIXELSIZE]+=Image[i*ElementsInStr+j];
ClusterColorSum[ClusterNum*PIXELSIZE+1]+=Image[i*ElementsInStr+j+1];
ClusterColorSum[ClusterNum*PIXELSIZE+2]+=Image[i*ElementsInStr+j+2];
}
}
for(int i=0; i<ClusterCount; i++)
{
ClusterCore[i*PIXELSIZE]=ClusterColorSum[i*PIXELSIZE]/ClusterPixelCount[i];
ClusterCore[i*PIXELSIZE+1]=ClusterColorSum[i*PIXELSIZE+1]/ClusterPixelCount[i];
ClusterCore[i*PIXELSIZE+2]=ClusterColorSum[i*PIXELSIZE+2]/ClusterPixelCount[i];
}
}
int main()
{
char InPath[256];
char OutPath[256];
scanf("%s", InPath);
FILE* InPut = fopen(InPath, "rb");
if (InPut == NULL)
{
fprintf(stderr, "Cannot open in.data");
exit(0);
}
scanf("%s", OutPath);
FILE* OutPut = fopen(OutPath, "wb");
if (OutPut == NULL)
{
fprintf(stderr, "Cannot create out.data");
exit(0);
}
int ClusterNumber;
scanf("%d", &ClusterNumber);
int* Xcoords = (int*)malloc(ClusterNumber*sizeof(int));
int* Ycoords = (int*)malloc(ClusterNumber*sizeof(int));
for(int i=0; i<ClusterNumber; i++)
{
scanf("%d", &Ycoords[i]);
scanf("%d", &Xcoords[i]);
}
ClusterCore = (double*)malloc(4*ClusterNumber*sizeof(double));
ClusterPixelCount = (unsigned int*)malloc(ClusterNumber*sizeof(unsigned int));
ClusterColorSum = (double*)malloc(4*ClusterNumber*sizeof(double));
int Width;
int Height;
fread(&Width, sizeof(int), 1, InPut);
fread(&Height, sizeof(int), 1, InPut);
unsigned char* Image = (unsigned char*)malloc(4*Width*Height*sizeof(unsigned char));
fread(Image, 4*Width*Height*sizeof(unsigned char), 1, InPut);
unsigned char* Dev_Image;
cudaMalloc((void**)&Dev_Image, 4*Width*Height*sizeof(unsigned char));
cudaMemcpy(Dev_Image, Image, 4*Width*Height*sizeof(unsigned char), cudaMemcpyHostToDevice);
for(int i=0; i<ClusterNumber; i++)
{
ClusterCore[i*PIXELSIZE]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]];
ClusterCore[i*PIXELSIZE+1]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]+1];
ClusterCore[i*PIXELSIZE+2]=Image[4*Width*Xcoords[i]+PIXELSIZE*Ycoords[i]+2];
ClusterCore[i*PIXELSIZE+3]=0;
}
int* NotLastIter;
int* HostNotLastIter = (int*)malloc(sizeof(int));
*HostNotLastIter=1;
cudaMalloc((void**)&NotLastIter, sizeof(int));
cudaMemcpy(NotLastIter, HostNotLastIter, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(DevClusterCore, ClusterCore, 4*ClusterNumber*sizeof(double));
while(1)
{
PixelToCluster<<<128, 512>>>(Dev_Image, ClusterNumber, Width, Height, NotLastIter);
cudaMemcpy(HostNotLastIter, NotLastIter, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(Image, Dev_Image, 4*Width*Height*sizeof(unsigned char), cudaMemcpyDeviceToHost);
if((*HostNotLastIter)==1) break;
ClusterOffset(Image, ClusterNumber, Width, Height);
cudaMemcpyToSymbol(DevClusterCore, ClusterCore, 4*ClusterNumber*sizeof(double));
*HostNotLastIter=1;
cudaMemcpy(NotLastIter, HostNotLastIter, sizeof(int), cudaMemcpyHostToDevice);
}
fwrite(&Width, sizeof(int), 1 ,OutPut);
fwrite(&Height, sizeof(int), 1, OutPut);
fwrite(Image, 4*Width*Height*sizeof(unsigned char),1, OutPut);
cudaFree(NotLastIter);
cudaFree(Dev_Image);
free(Image);
free(ClusterColorSum);
free(ClusterCore);
free(ClusterPixelCount);
return 0;
}
|
12,662 | #include "includes.h"
/* https://zxi.mytechroad.com/blog/dynamic-programming/leetcode-730-count-different-palindromic-subsequences/ */
long kMod = 1000000007;
__global__ void prepareData(int *dp, int n) {
for(int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
for(int j = blockIdx.y * blockDim.y + threadIdx.y; j < n; j+= blockDim.y * gridDim.y) {
dp[i * n + j] = 0;
}
}
} |
12,663 | __global__ void k(int *ret) {
for(int i=0; i<100000; i++) {
*ret = i;
}
}
int main(int argc, char** argv) {
int *d_argc;
cudaMalloc(&d_argc, sizeof(int));
cudaMemcpy(d_argc, &argc, sizeof(int), cudaMemcpyHostToDevice);
k<<<1,1>>>(d_argc);
for(int i=0; i<100; i++) {
argc += i;
}
}
|
12,664 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
void handleError(cudaError_t error, int lineno) {
if(error != cudaSuccess) {
printf("Error: %s %d\n", __FILE__, lineno);
printf("code: %d, reason %s\n", error, cudaGetErrorString(error));
exit(EXIT_FAILURE);
}
}
void initializeData(float *ptr, int size) {
time_t t;
srand((unsigned) time(&t));
for(int pos = 0; pos < size; pos++) {
ptr[pos] = (float) (rand() & 0xFF) / 10.0F;
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C) {
int id = threadIdx.x;
C[id] = A[id] + B[id];
}
int main(int argc, char *argv[]) {
int dev = 0;
cudaSetDevice(dev);
int nElem = 1024;
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
initializeData(h_A, nElem);
initializeData(h_B, nElem);
float *d_A, *d_B, *d_C;
handleError(cudaMalloc((float **)&d_A, nBytes), __LINE__);
handleError(cudaMalloc((float **)&d_B, nBytes), __LINE__);
handleError(cudaMalloc((float **)&d_C, nBytes), __LINE__);
handleError(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice), __LINE__);
handleError(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice), __LINE__);
dim3 block(nElem);
dim3 grid(nElem/block.x);
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C);
handleError(cudaMemcpy(h_C, d_C, nBytes, cudaMemcpyDeviceToHost),
__LINE__);
for(int pos = 0; pos < nElem; pos++) {
printf("%f + %f = %f\n", h_A[pos], h_B[pos], h_C[pos]);
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return 0;
}
|
12,665 | #include <stdio.h>
#include <stdlib.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void gpuMatMul(float * A, float * B, float *C,
int ROW_A, int COL_A, int COL_B);
void mat_mul_cuda_multi(float *A, float *B, float *C,
int ROW_A, int COL_A, int COL_B) {
/******************** TODO *********************/
}
|
12,666 | #include <cuda.h>
#include <cuda_runtime.h>
__global__ void
VecAdd( int* A) {
while(1) {
A[0] = 0;
}
}
int
main(int argc, char *argv[]) {
int *d_A;
cudaMalloc((void**)&d_A, 4096);
VecAdd<<<1, 1, 0>>>(d_A);
cudaDeviceSynchronize();
}
|
12,667 | #include <cuda.h>
#include <cstdio>
#define BLOCKSIZE 32
__global__ void kernelMultMat(double *d_a, double *d_b, double *d_c, int ROWS, int COL_A, int COL_B) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
double add;
if (row < ROWS && col < COL_B) {
add = 0;
for (int k = 0; k < COL_A; k++) {
add += d_a[row * COL_A + k] * d_b[k * COL_B + col];
}
d_c[row * COL_B + col] = add;
}
}
void cuda_mult_matriz(double *h_a,double *h_b, double *h_c,int ROWS, int COL_A, int COL_B){
double *d_a,*d_b,*d_c;
int sizeA = ROWS*COL_A;
int sizeB = COL_A*COL_B;
int sizeC = ROWS*COL_B;
cudaMalloc(&d_a, sizeof(double)*sizeA);
cudaMalloc(&d_b, sizeof(double)*sizeB);
cudaMalloc(&d_c, sizeof(double)*sizeC);
cudaMemcpy(d_a,h_a,sizeA * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,sizeB * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE, 1);
dim3 dimGrid((COL_B / BLOCKSIZE) + 1, (ROWS / BLOCKSIZE) + 1,1);
kernelMultMat<<< dimGrid, dimBlock >>>(d_a, d_b, d_c, ROWS, COL_A, COL_B);
cudaMemcpy(h_c, d_c, sizeC*sizeof(double),cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} |
12,668 | #include "includes.h"
//Library Definition
//Constant Definition
#define PI 3.141592654
#define blocksize 32
#define Repetitions 8192
//Print matrix into standard output
void print(double * M,int cols,int rows);
void dot(double * a,double * b, double & c, int cols);
void Create_New_Matrix(double * M,double * New,int * vec, int p0, int pp,int nn);
/*
DEVICE FUNCTIONS
*/
//Matrix transposition (Rows and Cols of M)
__global__ void matrixTrans(double * M,double * MT, int rows, int cols)
{
double val=0;
int col = blockIdx.y * blockDim.y + threadIdx.y;
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < rows && col < cols)
{
val = M[col + row*cols];
MT[row + col*rows] = val;
}
} |
12,669 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
if (comp > var_1 - var_2) {
if (comp <= ldexpf((var_3 + +0.0f), 2)) {
float tmp_1 = (var_4 - var_5);
float tmp_2 = +0.0f * var_6;
comp += tmp_2 * tmp_1 - floorf(+1.2953E-9f + (-0.0f + var_7 / (-1.9780E36f - powf(var_8 * +1.6747E-44f - var_9 / fmodf(-1.9290E34f / var_10 - -0.0f, (-1.0991E-44f - sinhf(var_11 * var_12 * -1.3232E-36f))), -1.6650E-35f * (+1.2151E-42f - (var_13 * (+1.8866E36f * (-1.7231E-41f * var_14))))))));
comp += +1.2812E10f / var_15;
if (comp <= atanf(-1.0044E-42f + (var_16 * -1.9840E8f * +1.6481E35f - var_17))) {
float tmp_3 = +0.0f;
comp = tmp_3 / var_18 - var_19 - -1.3206E-36f - var_20 / -1.0774E-41f;
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
cudaDeviceSynchronize();
return 0;
}
|
12,670 | #include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <inttypes.h>
#include <iostream>
using namespace std;
__global__ void add(int *a, int *b, int *c, int N) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
void random_ints(int *a, int N) {
int i,j;
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
a[i * N + j] = 1;
}
}
}
void print_vector(int *a, int N) {
int i;
int j;
cout << "Matrix:" << endl;
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
cout << a[i * N + j] << " ";
}
cout << endl;
}
}
int main(int argc, char* argv[]) {
// CPU copies of a, b, c
int N;
const int THREADS_PER_BLOCK = 32;
int *a, *b, *c;
int *d_a, *d_b, *d_c;
if (argc != 2) {
fprintf(stderr, "usage: %s <size>\n", argv[0]);
exit(0);
}
N = strtol(argv[1], NULL, 10);
size_t size = sizeof(int) * N * N;
// GPU copies of a, b, c
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
// Allocate GPU space for CPU copies of a, b, c
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(size);
// Setup random input variables
random_ints(a, N);
random_ints(b, N);
//print_vector(a, N);
//print_vector(b, N);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Exec add function on GPU
add<<<(N*N)/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_b, d_c, N);
cudaDeviceSynchronize();
// Copy results to CPU copy of c
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
print_vector(c, N);
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
12,671 |
__global__ void addVector(
double *c, double *a, double *b,
int height, int width) {
// x corresponds to vertical direction, meaning row
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
// y corresponds to horizontal direction, meaning column
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
// only execute valid pixels
if (x>=width || y>=height) {
return;
}
int wy = y+1;
if (wy<0 || wy>=height)
return;
c[y*width+x] = a[wy*width+x];
// c[y*width+x] = y;
// a[y*width+x] = x;
}
|
12,672 | /* -----------------------------------------------------------------------------------------------
Name: Anand Jhunjhunwala
Roll No: 17EC30041
CUDA
Assignment 3: Matrix transpose using dynamic shared memory
------------------------------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define cal_per_thread 4 //define calculation performend by each thred,
//keep it in power of 2
__host__ void RUN(cudaError_t call)
{
cudaError_t err = call;
if(err != cudaSuccess)
{
fprintf(stderr, " Failed with error code %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
__global__ void mat_transpose(float *d_A, float *d_B, int N, int tile_dim, int block_row)
{
extern __shared__ float tile[];
unsigned int x = blockIdx.x*tile_dim + threadIdx.x;
unsigned int y = blockIdx.y*tile_dim + threadIdx.y;
for(int j=0; j<tile_dim; j+= block_row)
{
if(y+j < N && x < N)
tile[(threadIdx.y + j)*(tile_dim + 1) + threadIdx.x] = d_A[(y+j)*N + x];
}
__syncthreads();
x = blockIdx.y*tile_dim + threadIdx.x;
y = blockIdx.x*tile_dim + threadIdx.y;
for(int j=0; j<tile_dim; j+= block_row)
{
if(y+j < N && x<N)
d_B[(y+j)*N + x]= tile[threadIdx.x*(tile_dim + 1) + threadIdx.y + j];
}
}
int main()
{
int max_x = 32; //maximum_thread_per_block 1024
int test_case, N, k=1;
long int i,j;
float *d_A, *h_A, *d_B, *h_B, ms;
printf("\n Enter the number of test cases:");
scanf("%d", &test_case);
printf(" %d\n", test_case);
cudaEvent_t startEvent, stopEvent;
cudaDeviceProp devp;
RUN(cudaGetDeviceProperties(&devp, 0));
int shared_mem_size = devp.sharedMemPerBlock;
RUN(cudaSetDevice(0));
shared_mem_size = shared_mem_size/(sizeof(float));
shared_mem_size = sqrt(shared_mem_size);
if(shared_mem_size < max_x)
{
printf("\n Not enough shared memory space available \n");
printf("Please reduce max_x and try again\n");
exit(EXIT_FAILURE);
}
while(test_case)
{
RUN(cudaEventCreate(&startEvent));
RUN(cudaEventCreate(&stopEvent));
printf("\nRunning test case: %d",k);
printf("\n Enter dimention of Matrix:");
scanf("%d", &N);
printf(" %d\n", N);
h_A = (float *)malloc(N*N*sizeof(float));
h_B = (float *)malloc(N*N*sizeof(float));
printf("\n Enter entries of input matrix:\n");
for(i=0; i<N*N; i++)
{
scanf("%f", &h_A[i]);
}
RUN(cudaMalloc((void **)&d_A, N*N*sizeof(float)));
RUN(cudaMalloc((void **)&d_B, N*N*sizeof(float)));
RUN(cudaMemcpy(d_A, h_A, N*N*sizeof(float), cudaMemcpyHostToDevice));
if(N <= max_x)
{
dim3 grid(1,1,1);
dim3 block(N, N, 1);
printf("\nLaunching kernel ");
RUN(cudaEventRecord(startEvent,0));
mat_transpose<<<grid,block, N*(N+1)*sizeof(float)>>>(d_A, d_B, N, N, N);
RUN(cudaEventRecord(stopEvent,0));
RUN(cudaEventSynchronize(stopEvent));
RUN(cudaEventElapsedTime(&ms, startEvent, stopEvent));
}
else
{
if(N%(max_x) == 0)
{
dim3 grid(N/(max_x), N/(max_x), 1);
dim3 block(max_x,max_x/cal_per_thread,1);
printf("\nLaunching kernel ");
RUN(cudaEventRecord(startEvent,0));
mat_transpose<<<grid,block, max_x*(max_x+1)*sizeof(float)>>>(d_A, d_B, N, max_x, max_x/cal_per_thread);
RUN(cudaEventRecord(stopEvent,0));
RUN(cudaEventSynchronize(stopEvent));
RUN(cudaEventElapsedTime(&ms, startEvent, stopEvent));
}
else
{
dim3 grid(N/max_x +1, N/max_x + 1, 1);
dim3 block(max_x,max_x/cal_per_thread,1);
printf("\nLaunching kernel ");
RUN(cudaEventRecord(startEvent,0));
mat_transpose<<<grid,block, max_x*(max_x+1)*sizeof(float)>>>(d_A, d_B, N, max_x, max_x/cal_per_thread);
RUN(cudaEventRecord(stopEvent,0));
RUN(cudaEventSynchronize(stopEvent));
RUN(cudaEventElapsedTime(&ms, startEvent, stopEvent));
}
}
RUN(cudaGetLastError());
RUN(cudaMemcpy(h_B, d_B, N*N*sizeof(float), cudaMemcpyDeviceToHost));
printf("\n Kernel launch complete \n time taken: %.6f ms\n", ms);
printf("\nPrinting Output:\n");
for(i=0; i<N; i++)
{
for(j=0; j<N; j++)
{
printf("%.2f ", h_B[i*N + j]);
}
printf("\n");
}
printf("\n End of test case: %d\n", k);
free(h_A);
free(h_B);
cudaFree(d_A);
cudaFree(d_B);
test_case = test_case -1;
k = k+1;
RUN(cudaEventDestroy(startEvent));
RUN(cudaEventDestroy(stopEvent));
}
printf("\n All test cases complete\n");
return 0;
} |
12,673 | /**
* Multiple tests to verify our implementation of the Dynamic Analysis tool. We hand
* compute the expected number of global memory uncoalesce acceses.
*/
#include <stdio.h>
/* Number big enough to assure no out of bounds accesses. */
#define N 10000
void charTests();
void intTests();
void doubleTests();
void structTests();
__global__ void charAddOne(char* array, int stride);
__global__ void intAddOne(int* array, int stride);
__global__ void intAddOneHalf(int* array, int stride);
__global__ void intAddOneOff(int* array, int stride);
__global__ void intAddOneEvens(int* array, int stride);
__global__ void intAddOneOdds(int* array, int stride);
__global__ void intAddOneDiff(int* array, int stride);
__global__ void intAddOneSame(int* array, int stride);
__global__ void doubleAddOne(double* array, int stride);
__global__ void structAddOneX(struct myStruct* array, int stride);
__global__ void structAddOneY(struct myStruct* array, int stride);
__global__ void structAddOneZ(struct myStruct* array, int stride);
struct myStruct{
int x;
int y;
int z;
};
char* printStr = "[Tests %d] Cache lines expected: %d\n\n";
char* printStrWarp = "[Tests %d] Warps printing expected: %d\n\n";
int main(){
// Uncomment to try out!
charTests();
intTests();
doubleTests();
structTests();
return 0;
}
/**
* Tests for struct
*/
void structTests(){
struct myStruct x[N] = {0};
struct myStruct * xDev;
cudaMalloc(&xDev, sizeof(struct myStruct) * N);
cudaMemcpy(xDev, x, sizeof(struct myStruct)* N, cudaMemcpyHostToDevice);
printf("[Stuct Tests]\n\n");
// Run multiple tests to ensure our pass is working!
// Synchonize necessary so that CPU waits for kernel to finish before printing.
// Test.
{ int testNum = 1; int blocks = 1;
int threads = 10; int stride = 1;
int cacheLines = 1;
structAddOneX<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 2; int blocks = 1;
int threads = 11; int stride = 1;
int cacheLines = 1;
structAddOneX<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 3; int blocks = 1;
int threads = 10; int stride = 1;
int cacheLines = 1;
structAddOneY<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 4; int blocks = 1;
int threads = 11; int stride = 1;
int cacheLines = 1;
structAddOneY<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 5; int blocks = 1;
int threads = 10; int stride = 1;
int cacheLines = 1;
structAddOneZ<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 6; int blocks = 1;
int threads = 11; int stride = 1;
int cacheLines = 2;
structAddOneZ<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
cudaMemcpy(x, xDev, sizeof(struct myStruct) * N, cudaMemcpyDeviceToHost);
cudaFree(x);
}
/**
* Tests for doubles.
*/
void doubleTests(){
double x[N] = {0};
double * xDev;
cudaMalloc(&xDev, sizeof(double) * N);
cudaMemcpy(xDev, x, sizeof(double)* N, cudaMemcpyHostToDevice);
printf("[Double Tests]\n\n");
// Run multiple tests to ensure our pass is working!
// Synchonize necessary so that CPU waits for kernel to finish before printing.
// Test.
{ int testNum = 1; int blocks = 1;
int threads = 32; int stride = 1;
int cacheLines = 2;
doubleAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 1; int blocks = 1;
int threads = 16; int stride = 1;
int cacheLines = 1;
doubleAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 1; int blocks = 1;
int threads = 17; int stride = 1;
int cacheLines = 2;
doubleAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 1; int blocks = 1;
int threads = 32; int stride = 2;
int cacheLines = 4;
doubleAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 1; int blocks = 1;
int threads = 8; int stride = 2;
int cacheLines = 1;
doubleAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
cudaMemcpy(x, xDev, sizeof(double) * N, cudaMemcpyDeviceToHost);
cudaFree(x);
}
/**
* Tests for accessing integers.
*/
void intTests(){
int x[N] = {0};
int* xDev;
cudaMalloc(&xDev, sizeof(int) * N);
cudaMemcpy(xDev, x, sizeof(int)* N, cudaMemcpyHostToDevice);
printf("[Int Tests]\n\n");
// Run multiple tests to ensure our pass is working!
// Synchonize necessary so that CPU waits for kernel to finish before printing.
// Test. Needs 32 bytes of continuous memory.
{ int testNum = 1; int blocks = 1;
int threads = 32; int stride = 1;
int cacheLines = 1;
intAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 2; int blocks = 1;
int threads = 32; int stride = 2;
int cacheLines = 2;
intAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 3; int blocks = 1;
int threads = 16; int stride = 2;
int cacheLines = 1;
intAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 4; int blocks = 32;
int threads = 1; int stride = 32;
int cacheLines = 1;
intAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 5; int blocks = 1;
int threads = 32; int stride = 32;
int cacheLines = 32;
intAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test
{ int testNum = 6; int blocks = 1;
int threads = 32; int stride = 2;
int cacheLines = 1;
intAddOneHalf<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test
{ int testNum = 7; int blocks = 1;
int threads = 32; int stride = 1;
int cacheLines = 2;
intAddOneOff<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// These tests care about number of warps that printed. Not cacheLines!
// Test
{ int testNum = 8; int blocks = 1;
int threads = 33; int stride = 1;
int cacheLines = 1; int warps = 2;
intAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines);
printf(printStrWarp, testNum, warps); }
// Test
{ int testNum = 9; int blocks = 1;
int threads = 64; int stride = 1;
int cacheLines = 1; int warps = 2;
intAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines);
printf(printStrWarp, testNum, warps); }
// Test
{ int testNum = 10; int blocks = 1;
int threads = 65; int stride = 1;
int cacheLines = 1; int warps = 3;
intAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines);
printf(printStrWarp, testNum, warps); }
// Test
{ int testNum = 11; int blocks = 1;
int threads = 33; int stride = 1;
int cacheLines = 1; int warps = 2;
intAddOneEvens<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines);
printf(printStrWarp, testNum, warps); }
// Test
{ int testNum = 12; int blocks = 1;
int threads = 33; int stride = 1;
int cacheLines = 1; int warps = 1;
intAddOneOdds<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines);
printf(printStrWarp, testNum, warps); }
// Check to see if different threads per warp can be the reduce thread.
// Test
{ int testNum = 13; int blocks = 1;
int threads = 64; int stride = 1;
int cacheLines = 1; int warps = 2;
intAddOneDiff<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines);
printf(printStrWarp, testNum, warps); }
// Test
{ int testNum = 14; int blocks = 3;
int threads = 32; int stride = 1;
int cacheLines = 1; int warps = 3;
intAddOneSame<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines);
printf(printStrWarp, testNum, warps); }
// Test
{ int testNum = 15; int blocks = 1;
int threads = 96; int stride = 1;
int cacheLines = 1; int warps = 3;
intAddOneSame<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines);
printf(printStrWarp, testNum, warps); }
cudaMemcpy(x, xDev, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaFree(x);
}
/**
* Tests for accessing chars.
*/
void charTests(){
char x[N] = {0};
char* xDev;
cudaMalloc(&xDev, sizeof(char) * N);
cudaMemcpy(xDev, x, sizeof(char)* N, cudaMemcpyHostToDevice);
printf("[Char Tests]\n\n");
// Run multiple tests to ensure our pass is working!
// Synchonize necessary so that CPU waits for kernel to finish before printing.
// Test. Needs 32 bytes of continuous memory.
{ int testNum = 1; int blocks = 1;
int threads = 32; int stride = 1;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test. Needs 63 bytes of continuous memory.
{ int testNum = 2; int blocks = 1;
int threads = 32; int stride = 2;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test. Needs 125 bytes of continuous memory.
{ int testNum = 3; int blocks = 1;
int threads = 32; int stride = 4;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test. Needs 249 bytes of continuous memory.
{ int testNum = 4; int blocks = 1;
int threads = 32; int stride = 8;
int cacheLines = 2;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test. Needs 121 bytes of continuous memory.
{ int testNum = 5; int blocks = 1;
int threads = 16; int stride = 8;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test. One access per thread.
{ int testNum = 6; int blocks = 1;
int threads = 32; int stride = 128;
int cacheLines = 32;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test. Needs 113 bytes of continuous memory.
{ int testNum = 7; int blocks = 1;
int threads = 8; int stride = 16;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test. Needs 278 bytes of continuous memory.
{ int testNum = 8; int blocks = 1;
int threads = 32; int stride = 9;
int cacheLines = 3;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test. Needs 1 byte of continuous memory.
{ int testNum = 9; int blocks = 5;
int threads = 1; int stride = 1;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 10; int blocks = 5;
int threads = 2; int stride = 16;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 11; int blocks = 5;
int threads = 8; int stride = 4;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 12; int blocks = 1;
int threads = 8; int stride = 4;
int cacheLines = 1;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 13; int blocks = 1;
int threads = 9; int stride = 17;
int cacheLines = 2;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
// Test.
{ int testNum = 14; int blocks = 1;
int threads = 8; int stride = 50;
int cacheLines = 3;
charAddOne<<<blocks, threads >>>(xDev, stride);
cudaDeviceSynchronize(); printf(printStr, testNum, cacheLines); }
cudaMemcpy(x, xDev, sizeof(char) * N, cudaMemcpyDeviceToHost);
cudaFree(x);
}
/**
* Elements accessing continous memory from 1 byte data structure.
* No uncoalesced accesses expected up to 128 byte ranges.
* For 32 threads running that is 4 * threadIdx.x
*/
__global__ void charAddOne(char* array, int stride){
int index = stride * threadIdx.x;
array[index] = array[index] + 1;
}
/**
* Elements accessing continous memory from 4 byte data structure.
* No uncoalesced accesses expected up to 128 byte ranges.
*/
__global__ void intAddOne(int* array, int stride){
int index = stride * threadIdx.x;
array[index] = array[index] + 1;
}
/**
* Elements accessing continous memory from 4 byte data structure.
// Only evens!
*/
__global__ void intAddOneEvens(int* array, int stride){
int index = stride * threadIdx.x;
if(threadIdx.x % 2 == 0){
array[index] = array[index] + 1;
}
}
/**
* Elements accessing continous memory from 4 byte data structure.
// Only odds.
*/
__global__ void intAddOneOdds(int* array, int stride){
int index = stride * threadIdx.x;
if(threadIdx.x % 2 == 1){
array[index] = array[index] + 1;
}
}
/**
* Elements accessing continous memory from 4 byte data structure.
* Offset by two to test alignment.
*/
__global__ void intAddOneOff(int* array, int stride){
int index = stride * threadIdx.x;
// Pointer arithmetic...
*(array + index - 1) = *(array + index - 1) + 1;
}
/**
* Elements accessing continous memory from 4 byte data structure.
* Only even threads running!
*/
__global__ void intAddOneHalf(int* array, int stride){
int index = stride * threadIdx.x;
if(index < 16)
array[index] = array[index] + 1;
}
/**
* Elements accessing continous memory from 4 byte data structure.
* In here we have different elements per warp being the reduce thread.
* on the first warp [0 - 31] the 0th thread is the reduce thread.
* on the second warp [32 - 63] the 48th thread is the reduce thread.
*/
__global__ void intAddOneDiff(int* array, int stride){
int index = stride * threadIdx.x;
if(index < 16 || index >= 48)
array[index] = array[index] + 1;
}
/**
* Elements accessing continous memory from 4 byte data structure.
* Same memory location through += operator.
*/
__global__ void intAddOneSame(int* array, int stride){
int index = stride * threadIdx.x;
array[index] += 1;
}
/**
* Elements accessing continous memory from 8 byte data structure.
*/
__global__ void doubleAddOne(double* array, int stride){
int index = stride * threadIdx.x;
array[index] = array[index] + 1;
}
/**
* Elements accessing continous memory from 12 byte data structure.
*/
__global__ void structAddOneX(struct myStruct* array, int stride){
int index = stride * threadIdx.x;
array[index].x = array[index].x + 1;
}
/**
* Elements accessing continous memory from 12 byte data structure.
*/
__global__ void structAddOneY(struct myStruct* array, int stride){
int index = stride * threadIdx.x;
array[index].y = array[index].y + 1;
}
/**
* Elements accessing continous memory from 12 byte data structure.
*/
__global__ void structAddOneZ(struct myStruct* array, int stride){
int index = stride * threadIdx.x;
array[index].z = array[index].z + 1;
}
|
12,674 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(
float *x,
int n,
float *mu,
float *sigma,
float *lo,
float *hi,
int maxtries,
int rngnum) //number of the random seed
{
int accepted;
float sample;
int numtries;
float m;
float alpha;
//variables for rejection sampling
float rexp;
float z;
float phi;
float u;
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
// map these into a single index : idx
int idx = myblock * blocksize + subthread;
// check whether the idx is <n
if (idx < n)
{
// Setup the RNG:
curandState rng_state;
curand_init(9131+idx*17,rngnum,0,&rng_state);
// Sample:
accepted = 0;
numtries = 0;
while (accepted == 0 && numtries < maxtries)
{
sample = mu[idx]+sigma[idx]*curand_normal(&rng_state);
numtries = numtries+1;
if (sample>lo[idx] && sample<hi[idx])
{
accepted = 1;
x[idx] = sample;
//printf("rnumber = %f\n", sample);
} //end of if(small) loop
} // end of while loop
while (accepted == 0) //if accepted = 0 run rejection sampling.
{
//code for rejection sampling.
if(abs(lo[idx]-mu[idx]) < abs(hi[idx]-mu[idx])){ //right tail
m = abs((lo[idx]-mu[idx])/sigma[idx]);
alpha = (m+sqrt(pow(m,2)+4))/2;
rexp = -log(curand_uniform(&rng_state))/alpha;
z = m + rexp;
if (m<alpha){
phi = exp(-pow(alpha-z,2)/2);
}
else{
phi = exp(pow(m-alpha,2)/2-pow(alpha-z,2)/2);
} //decide phi
u = curand_uniform(&rng_state);
if (u<phi){
accepted = 1;
x[idx] = mu[idx]+sigma[idx]*z;
}
} else{ //left tail
m = abs((mu[idx]-hi[idx])/sigma[idx]);
alpha = (m+sqrt(pow(m,2)+4))/2;
rexp = -log(curand_uniform(&rng_state))/alpha;
z = m + rexp;
if (m<alpha){
phi = exp(-pow(alpha-z,2)/2);
}
else{
phi = exp(pow(m-alpha,2)/2-pow(alpha-z,2)/2);
} //decide phi
u = curand_uniform(&rng_state);
if (u<phi){
accepted = 1;
x[idx] = mu[idx]-sigma[idx]*z;
}
}
} // end of rejection sampling.
} // end of if loop
return;
} // end of function
} // END extern "C"
//#### More variables: ########################
//int mu_len,
//int sigma_len,
//int lo_len,
//int hi_len,
//int maxtries
//#############################################
|
12,675 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
/*
Complie
nvcc -o PasswordCrackingCuda PasswordCrackingCuda.cu
*/
__device__ int pass(char *password){
char pass1[]="CY9829";
char pass2[]="PR5782";
char pass3[]="DE9811";
char pass4[]="SH9819";
char *p1 = password;
char *p2 = password;
char *p3 = password;
char *p4 = password;
char *pd1 = pass1;
char *pd2 = pass2;
char *pd3 = pass3;
char *pd4 = pass4;
while(*p1 == *pd1){
if(*p1 == '\0'){
return 1;
}
p1++;
pd1++;
}
while(*p2 == *pd2){
if(*p2 == '\0'){
return 1;
}
p2++;
pd2++;
}
while(*p3 == *pd3){
if(*p3 == '\0'){
return 1;
}
p3++;
pd3++;
}
while(*p4 == *pd4){
if(*p4 == '\0'){
return 1;
}
p4++;
pd4++;
}
return 0;
}
__global__ void kernel() {
char alphabet[26] = {'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z'};
char number[10] = {'0','1','2','3','4','5','6','7','8','9'};
char password[7];
password[6] = '\0';
int c, d, e, f;
for(c=0;c<10;c++){
for(d=0; d<10; d++){
for(e=0; e<10; e++){
for(f=0; f<10; f++){
password[0] = alphabet[blockIdx.x];
password[1] = alphabet[threadIdx.x];
password[2] = number[c];
password[3] = number[d];
password[4] = number[e];
password[5] = number[f];
if(pass(password)){
printf("Password successfully cracked: %s\n", password);
}
}
}
}
}
}
/*
claculating the time difference.
*/
int time_difference(struct timespec *start, struct timespec *finish, long long int *difference)
{
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 )
{
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
/*
Calulating the time
*/
int main(int argc, char *argv[])
{
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26, 26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
12,676 | #include <stdio.h>
#include <cuda.h>
/* Matrices are stored in row-major order: */
/* M(row, col) = (M.width*row +col); */
typedef struct{
/* suppose we use only square matrices */
int width;
int *elements;
} Matrix;
/* Thread block size */
#define BLOCK_SIZE 2
/***********************/
/* TODO, write KERNEL */
/***********************/
__global__ void MatMul(const Matrix A, const Matrix B, Matrix C){
int Cvalue = 0;
int i;
int size = A.width;
int col = blockIdx.x*blockDim.x+threadIdx.x;
int row = blockIdx.y*blockDim.y+threadIdx.y;
for(i=0;i<size;++i){
Cvalue += A.elements[row*size+i]*B.elements[i*size+col];
}
C.elements[row*size+col] = Cvalue;
}
void test(const Matrix C);
int main(int argc, char* argv[]){
int i;
/* init matrices */
Matrix h_A, h_B, h_C;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*******************/
/** READING INPUT **/
/*******************/
int size = 0; //dimension of matrices
scanf("%d", &size);
int full_size = sizeof(int)*size*size;
h_A.width = size;h_B.width = size;h_C.width = size;
/* Allocate host memory */
h_A.elements = (int*)malloc(full_size);
h_B.elements = (int*)malloc(full_size);
h_C.elements = (int*)malloc(full_size);
for(i=0;i<size*size;++i){ scanf("%d", &h_A.elements[i]);}
for(i=0;i<size*size;++i){ scanf("%d", &h_B.elements[i]);}
/********************/
/** FINISHED INPUT **/
/********************/
/*************************/
/* allocate device */
/* memory for A,B,C */
/*************************/
Matrix d_A, d_B, d_C;
d_A.width = size;d_B.width = size;d_C.width = size;
cudaMalloc(&d_A.elements, full_size);
cudaMalloc(&d_B.elements, full_size);
cudaMalloc(&d_C.elements, full_size);
cudaEventRecord(start,0);
/***********************************/
/* copy vectors A&B to device */
/***********************************/
cudaMemcpy(d_A.elements, h_A.elements, full_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B.elements, h_B.elements, full_size, cudaMemcpyHostToDevice);
/*********************/
/* call kernel */
/*********************/
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(h_B.width/dimBlock.x, h_A.width/dimBlock.y);
MatMul<<<dimGrid,dimBlock>>>(d_A, d_B, d_C);
/**************************/
/* copy result back */
/**************************/
cudaMemcpy(h_C.elements, d_C.elements, full_size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(stderr,"Elapsed time = %f (s)\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/*******************************************/
/** Testing output, don't change anything! */
/*******************************************/
test(h_C);
/* free device memory */
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
/* free host memory */
free(h_A.elements);
free(h_B.elements);
free(h_C.elements);
return 0;
}
//function to test the input, don't change anything!
void test(const Matrix C){
int i,j;
//int size = C.width*C.width;
for(i=0;i<C.width;++i)
{
for(j=0;j<C.width;++j) printf("%d ", C.elements[i*C.width+j]);
printf("\n");
}
} |
12,677 | #include <cstdlib>
#include <iostream>
#include <vector>
#include <algorithm>
#include <cassert>
#include <numeric>
using std::accumulate;
using std::generate;
using std::cout;
using std::vector;
#define SHMEM_SIZE 256
// For last iteration ( save useless work)
// Use volatile to prevent caching in registers (compiler optimization)
// No __syncthreads() necessary
__device__ void warpReduce(volatile int * shmem_ptr, int t)
{
shmem_ptr[t] += shmem_ptr[t + 32];
shmem_ptr[t] += shmem_ptr[t + 16];
shmem_ptr[t] += shmem_ptr[t + 8];
shmem_ptr[t] += shmem_ptr[t + 4];
shmem_ptr[t] += shmem_ptr[t + 2];
shmem_ptr[t] += shmem_ptr[t + 1];
}
__global__ void sum_reduction(int *v, int *v_r)
{
// Allocate shared memory
__shared__ int partial_sum[SHMEM_SIZE];
// Calculate thread ID
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Load elements AND do first add of reduction
// Vector now 2x as long as number of threads, so scale i
int i = blockIdx.x * ( 2 * blockDim.x) + threadIdx.x;
// Store first partial result instead of just the elements
partial_sum[threadIdx.x] = v[i] + v[i + blockDim.x];
__syncthreads();
// Iterate of logbase 2 the block dimension
// Stop early (call device function instead)
for(int s = blockDim.x/2; s > 32 ; s>>=1 )
{
if(threadIdx.x < s) // each thread does work unless it goes off the block
{
partial_sum[threadIdx.x] += partial_sum[threadIdx.x + s ];
}
__syncthreads();
}
if(threadIdx.x < 32)
{
warpReduce(partial_sum, threadIdx.x);
}
// Let the thread 0 for this block write it's results to main memory
// Result indexed by this block
if(threadIdx.x == 0 )
{
v_r[blockIdx.x] = partial_sum[0];
}
}
int main()
{
// vector size
int N = 1<<16;
size_t bytes = N * sizeof(int);
// host data
vector<int> h_v(N);
vector<int> h_v_r(N);
// initialize vector
generate(begin(h_v), end(h_v), [](){return rand() % 10; });
// device memory
int *d_v, *d_v_r;
cudaMalloc(&d_v, bytes);
cudaMalloc(&d_v_r, bytes);
// copy from host ( CPU ) to device ( GPU )
cudaMemcpy(d_v, h_v.data(), bytes, cudaMemcpyHostToDevice);
// Thread block size
const int TB_SIZE = 256;
// The Grid size
int GRID_SIZE = N/TB_SIZE/2;
// call the kernels
sum_reduction<<<GRID_SIZE, TB_SIZE>>>(d_v, d_v_r); // first convert the 65536 vector into a 256 sized vector of partial sums
sum_reduction<<<1, TB_SIZE>>>(d_v_r, d_v_r); // use the 256 sized vector of partial sums to calculate the final sum
cudaMemcpy(h_v_r.data(), d_v_r, bytes, cudaMemcpyDeviceToHost);
// check the result
assert(h_v_r[0] == std::accumulate(begin(h_v), end(h_v), 0));
cout<<"COMPLETED SUCCESSFULLY\n";
return 0;
} |
12,678 | #include "includes.h"
//#define DEBUG
//#define HANDLE_ERROR(x) if((x) != 0) cout << "Error!" << endl;
using namespace std;
struct SubBlock{
int * nnz_global_i_idx;
int * nnz_global_o_idx;
int nnz;
int * nnz_local_r_idx;
int * nnz_local_c_idx;
float * nnz_values;
};
//void printSubBlocksInfo(SubBlock * sbs, int nsbs, int mem_b_size);
__global__ void CudaCompute(SubBlock * d_sbs, float * d_x, float * d_y, int nblocks, int mem_b_size, int nrows, int ncols , float * sub_y_arr){
/*
sub_y_arr stores float number, with nblocks rows, mem_b_size columns
*/
//#ifdef DEBUG
//printf("This is Cuda Block # %d: \n", blockIdx.x);
//#endif
//if(blockIdx.x >= nblocks)
// return;
//SubBlock * work_sb = &d_sbs[blockIdx.x];
//printSubBlocksInfo(work_sb, 1, mem_b_size);
/*
float * x_sub = (float *) malloc(mem_b_size * sizeof(float));
float * y_sub = (float *) malloc(mem_b_size * sizeof(float));
//float * x;
for(int i = 0; i < mem_b_size; i++){
if(work_sb->nnz_global_i_idx[i] > 0 && work_sb->nnz_global_i_idx[i] <= ncols){
// d_x indexing starts from '1'
// x_sub indexing starts from '0'
x_sub[i] = d_x[work_sb->nnz_global_i_idx[i] - 1];
}
else{
x_sub[i] = 0.0;
}
}
for(int i = 0; i < work_sb->nnz; i++){
int x_sub_idx = work_sb->nnz_local_c_idx[i] - 1;
int y_sub_idx = work_sb->nnz_local_r_idx[i] - 1;
y_sub[y_sub_idx] += work_sb->nnz_values[i] * x_sub[x_sub_idx];
//#ifdef DEBUG
// printf("This is Cuda Block # %d: Computing (%d, %d) product as (%f)\n", blockIdx.x, x_sub_idx, y_sub_idx, work_sb->nnz_values[i] * x_sub[x_sub_idx]);
//#endif
}
for(int i = 0; i < mem_b_size; i++){
sub_y_arr[blockIdx.x * mem_b_size + i] = y_sub[i];
}
*/
} |
12,679 |
#include "parameter.cuh"
parameter::parameter(char* inputdir):rf()
{
input_file_name = new char [1024];
strcpy(input_file_name, inputdir);
rf.openinput( inputdir );
initial_particle();
initial_grid();
initial_static_potential();
inital_time_set();
};
//////////////////////////////////////////////////////////////////////////////////////////
void parameter::initial_particle( void )
// read and particle information
{
rf.openinput( input_file_name );
energy_e = atof( rf.setget( "&particle", "energy_e" ) );
energy_ion = atof( rf.setget( "&particle", "energy_ion" ) );
Ib = atof( rf.setget( "&particle", "Ib" ) );
wegith = atof( rf.setget( "&particle", "weigth" ) );
ni = atof( rf.setget( "&particle", "atom_density" ) );
rf.closeinput();
}
//////////////////////////////////////////////////////////////////////////////////////////
void parameter::initial_grid( void )
// read and gridinformation
{
rf.openinput( input_file_name );
dr = atof( rf.setget( "&box", "cells_dr" ) );
dz = atof( rf.setget( "&box", "cells_dz" ) );
L = atof( rf.setget( "&box", "L" ) );
R = atof( rf.setget( "&box", "R" ) );
ca_rd = atof( rf.setget( "&box", "cathode_rad" ) );
ca_len = atof( rf.setget( "&box", "cathode_len" ) );
rf.closeinput();
}
void parameter::initial_static_potential( void )
// read and cell information
{
rf.openinput( input_file_name );
anode_p = atof( rf.setget( "&static_field", "wall_electric_potential" ) );
cathode_p = atof( rf.setget( "&static_field", "cathode_electric_potential" ) );
screen_p = atof( rf.setget( "&static_field", "screen_electric_potential" ) );
rf.closeinput();
}
void parameter::inital_time_set( void )
// read and cell information
{
rf.openinput( input_file_name );
total_time = atof( rf.setget( "&time_set", "total_time" ) );
time_inter = atof( rf.setget( "&time_set", "time_interval" ) );
rf.closeinput();
}
//EOF
|
12,680 | #include<stdio.h>
#include<stdlib.h>
#include <cuda.h>
void cudaCheckError() {
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess) {
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(error));
exit(0);
}
}
__global__ void MatrixMulKernel(int *M,int *N,int *P,int SIZE){
int row = blockIdx.x*blockDim.x+threadIdx.x;
int col = blockIdx.y*blockDim.y+threadIdx.y;
if ((row < SIZE) && (col < SIZE)) {
int output_val = 0;
for (int k = 0; k < SIZE; ++k)
output_val += M[row*SIZE+k]*N[k*SIZE+col];
P[row*SIZE+col] = output_val;
}
}
__global__ void MatrixMulKernelWithTiling(int *M,int *N,int *P,int Tile_Width,int Width){
__shared__ double ds_M[16][16];
__shared__ double ds_N[16][16];
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int Row=by*blockDim.y+ty;
int Col=bx*blockDim.x+tx;
int Pvalue=0;
for(int p=0;p<Width/Tile_Width;p++){
ds_M[ty][tx]=M[Row*Width+p*Tile_Width+tx];
ds_N[ty][tx]=N[(p*Tile_Width+ty)*Width+Col];
__syncthreads();
for(int i=0;i<Tile_Width;i++)
Pvalue+=ds_M[ty][i]*ds_N[i][tx];//partial dot product
__syncthreads();
}
P[Row*Width+Col]=Pvalue;//final answer
}
void print(int* mat, int size){
for(int i=0;i<size;i++){
for(int j=0;j<size;j++)
printf("%d ",mat[i*size+j]);
printf("\n");
}
}
int main(int argc, char** argv){
FILE* fp = fopen(argv[1],"r");
int *arr1_h,*arr1_d,*arr2_d,*arr3_d,*degree_h, *arr_f1, *arr_f2, *arr_tmp1;
int a,b;
int f=0;
long long int vertices, original_vertices;
printf("1\n");
cudaStream_t stream1, stream2;
printf("2\n");
size_t bufferSize = 32;
size_t offset = bufferSize*bufferSize;
printf("3\n");
cudaStreamCreate(&stream1);
printf("4\n");
cudaError_t err = cudaGetLastError();
printf("Error: %s\n", cudaGetErrorString(err));
cudaStreamCreate(&stream2);
printf("5\n");
err = cudaGetLastError();
printf("Error: %s\n", cudaGetErrorString(err));
printf("10\n");
cudaMalloc(&arr1_d,bufferSize*bufferSize*sizeof(int));
err = cudaGetLastError();
printf("Error: %s\n", cudaGetErrorString(err));
printf("11\n");
cudaMalloc(&arr2_d,bufferSize*bufferSize*sizeof(int));
err = cudaGetLastError();
printf("Error: %s\n", cudaGetErrorString(err));
printf("12\n");
cudaMalloc(&arr3_d,bufferSize*bufferSize*sizeof(int));
err = cudaGetLastError();
printf("Error: %s\n", cudaGetErrorString(err));
printf("13\n");
cudaHostAlloc(&arr_tmp1, bufferSize*bufferSize*sizeof(int),cudaHostAllocDefault);
err = cudaGetLastError();
printf("Error: %s\n", cudaGetErrorString(err));
printf("14\n");
while(fscanf(fp,"%d %d\n",&a,&b)!=EOF){
if(f==0){
vertices=a;
original_vertices=a;
if(vertices<bufferSize){
vertices=bufferSize;
}
else{
long long int temp=bufferSize;
while(vertices>temp){
temp=temp*2;
}
vertices=temp;
printf("\nVertices:%lld\n",vertices);
}
cudaHostAlloc(&arr1_h, vertices*vertices*sizeof(int),cudaHostAllocDefault);
cudaHostAlloc(&arr_f1, vertices*vertices*sizeof(int),cudaHostAllocDefault);
cudaHostAlloc(&arr_f2, vertices*vertices*sizeof(int),cudaHostAllocDefault);
degree_h= (int*)malloc(original_vertices*sizeof(int));
for (long long int i = 0; i < vertices; ++i)
{
degree_h[i]=0;
}
for(long long int i=0;i<vertices;i++){
for(long long int j=0;j<vertices;j++){
arr1_h[i*vertices+j]=0;
arr_f1[i*vertices+j]=0;
arr_f2[i*vertices+j]=0;
}
}
f=1;
}
else{
arr1_h[a*vertices+b]=1;
arr1_h[b*vertices+a]=1;
degree_h[a]++;
degree_h[b]++;
}
}
fclose(fp);
for(int i=0;i<vertices/bufferSize;i++){
for(int j=0;j<vertices/bufferSize;j++){
for(int k=0;k<vertices/bufferSize;k++){
for(int k1=0;k1<bufferSize;k1++){
/* for(int x1=(i*vertices*bufferSize+k1*vertices+k*bufferSize),y1=0;y1<bufferSize;x1++,y1++)
printf("%d ",arr1_h[x1] );
printf("\n");
printf("\n"); */
cudaMemcpyAsync(arr1_d+k1*bufferSize,arr1_h+i*vertices*bufferSize+k1*vertices+k*bufferSize,bufferSize*sizeof(int),cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync(arr2_d+k1*bufferSize,arr1_h+k*vertices*bufferSize+k1*vertices+j*bufferSize,bufferSize*sizeof(int),cudaMemcpyHostToDevice,stream2);
// cudaStreamSynchronize(stream1);
// cudaStreamSynchronize(stream2);
}
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
dim3 threadsPerBlock(16, 16);
dim3 blocksPerGrid(bufferSize/16, bufferSize/16);
MatrixMulKernelWithTiling<<<blocksPerGrid, threadsPerBlock,0, stream1>>>(arr1_d, arr2_d, arr3_d,16,bufferSize);
cudaStreamSynchronize(stream1);
cudaMemcpyAsync(arr_tmp1,arr3_d,offset*sizeof(int),cudaMemcpyDeviceToHost,stream2);
cudaStreamSynchronize(stream2);
//print(arr_tmp1,bufferSize);
for(int k2=0;k2<bufferSize;k2++)
for(int k3=0;k3<bufferSize;k3++)
arr_f1[i*bufferSize*vertices+k2*vertices+j*bufferSize+k3] += arr_tmp1[k2*bufferSize+k3];
}
}
}
// print(arr_f1,vertices);
for(int i=0;i<vertices/bufferSize;i++){
for(int j=0;j<vertices/bufferSize;j++){
for(int k=0;k<vertices/bufferSize;k++){
for(int k1=0;k1<bufferSize;k1++){
cudaMemcpyAsync(arr1_d+k1*bufferSize,arr_f1+i*vertices*bufferSize+k1*vertices+k*bufferSize,bufferSize*sizeof(int),cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync(arr2_d+k1*bufferSize,arr1_h+k*vertices*bufferSize+k1*vertices+j*bufferSize,bufferSize*sizeof(int),cudaMemcpyHostToDevice,stream2);
// cudaStreamSynchronize(stream1);
// cudaStreamSynchronize(stream2);
}
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
dim3 threadsPerBlock(16, 16);
dim3 blocksPerGrid(bufferSize/16, bufferSize/16);
MatrixMulKernelWithTiling<<<blocksPerGrid, threadsPerBlock,0, stream1>>>(arr1_d, arr2_d, arr3_d,16,bufferSize);
cudaStreamSynchronize(stream1);
cudaMemcpyAsync(arr_tmp1,arr3_d,offset*sizeof(int),cudaMemcpyDeviceToHost,stream2);
cudaStreamSynchronize(stream2);
//print(arr_tmp1,bufferSize);
for(int k2=0;k2<bufferSize;k2++)
for(int k3=0;k3<bufferSize;k3++)
arr_f2[i*bufferSize*vertices+k2*vertices+j*bufferSize+k3] += arr_tmp1[k2*bufferSize+k3];
}
}
}
//print(arr_f2,vertices);
float cc=0;
for(int i=0;i<original_vertices;i++){
if(degree_h[i]>=2){
cc=cc+((float(arr_f2[i*vertices+i]/2))/((degree_h[i]*(degree_h[i]-1))/2));
}
}
cc=cc/original_vertices;
printf("%f\n",cc);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaFreeHost(&arr1_h);
cudaFreeHost(&arr_tmp1);
cudaFreeHost(&arr1_d);
cudaFree(&arr1_d);
cudaFree(&arr2_d);
cudaFree(&arr3_d);
return 0;
}
|
12,681 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//
//void initialize_data(float * ip, int size)
//{
// time_t t;
// srand((unsigned) time(&t));
//
// for (size_t i = 0; i < size; i++)
// {
// ip[i] = (float)(rand() & 0xFF) / 10.0f;
// }
//}
//
//__global__ void sum_array(float * a, float * b, float * c)
//{
// int i = threadIdx.x;
// c[i] = a[i] + b[i];
// printf("a =%f b = %f c = %f \n",a[i],b[i],c[i]);
//}
//
////int main()
////{
//// int element_Count = 32;
////
//// size_t number_bytes = element_Count * sizeof(float);
////
//// float *h_a, *h_b, *host_ref, *gpu_ref;
////
//// h_a = (float *)malloc(number_bytes);
//// h_b = (float *)malloc(number_bytes);
//// host_ref = (float *)malloc(number_bytes);
//// gpu_ref = (float *)malloc(number_bytes);
////
//// initialize_data(h_a,element_Count);
//// initialize_data(h_b, element_Count);
////
//// memset(host_ref,0,number_bytes);
//// memset(gpu_ref,0,number_bytes);
////
//// float *d_a, *d_b, *d_c;
//// cudaMalloc((float **)&d_a,number_bytes);
//// cudaMalloc((float **)&d_b, number_bytes);
//// cudaMalloc((float **)&d_c, number_bytes);
////
//// cudaMemcpy(d_a,h_a,number_bytes,cudaMemcpyHostToDevice);
//// cudaMemcpy(d_b, h_b ,number_bytes, cudaMemcpyHostToDevice);
////
//// dim3 block(element_Count);
//// dim3 grid(element_Count/block.x);
////
//// sum_array << <grid,block >> > (d_a,d_b,d_c);
////
//// cudaMemcpy(gpu_ref,d_c,number_bytes,cudaMemcpyDeviceToHost);
////
//// cudaFree(d_a);
//// cudaFree(d_b);
//// cudaFree(d_c);
////
//// free(h_a);
//// free(h_b);
//// free(host_ref);
//// free(gpu_ref);
////
//// system("pause");
//// return 0;
////} |
12,682 | #include <iostream>
#include <cstring>
#include <math.h>
#include <fstream>
#include <vector>
#include <sys/time.h>
#include <emmintrin.h>
#define totalNumPeriods 1598
#define SR 44100
#define MINP 9
#define MAXP 1604
using namespace std;
typedef unsigned char uchar;
struct timeval before, after;
void readWavBytes(string path, vector<char> *wavdata) {
ifstream inFile;
inFile.open(path, ifstream::binary);
if (!inFile) {
cout << "Can't find file!\n";
exit(1);
}
// get length of file:
inFile.seekg (0, inFile.end);
int length = inFile.tellg();
inFile.seekg (0, inFile.beg);
char *buffer = new char[length];
cout << "Reading " << length << " bytes from wave file... ";
// read data as a block:
inFile.read (buffer,length);
if (inFile) {
cout << "all bytes read successfully.\n";
} else {
cout << "error: only " << inFile.gcount() << " could be read";
inFile.close();
exit(1);
}
inFile.close();
//push buffer array into wavdata vector
for (int i = 0; i < length; i++) {
wavdata->push_back(buffer[i]);
}
delete[] buffer;
}
void audioRead(string path, vector<float> *buffer, int &sampleRate, int &numChannels) {
//Get bytes from wave file
vector<char> wavdata;
readWavBytes(path, &wavdata);
sampleRate = (int)((uchar)wavdata[24]) + (int)((uchar)wavdata[25]*256);
numChannels = (int)wavdata[22];
int floatSize = (((int)((uchar)wavdata[52]) + (int)((uchar)wavdata[53])*256 +
(int)((uchar)wavdata[54])*256*256 + (int)((uchar)wavdata[55])*256*256*256)/4) / numChannels;
if (numChannels == 1) {
//If audio is mono:
for (int i = 0; i < floatSize; i++) {
int block = (14+i)*4;
uchar bytes[] = {(uchar)wavdata[block], (uchar)wavdata[block+1],
(uchar)wavdata[block+2], (uchar)wavdata[block+3]};
float sample;
memcpy(&sample, &bytes, sizeof(sample));
buffer->push_back(sample);
}
} else {
//Else, audio is stereo. Read left channel:
for (int i = 0; i/2 < floatSize; i+=2) {
int block = (14+i)*4;
uchar bytes[] = {(uchar)wavdata[block], (uchar)wavdata[block+1],
(uchar)wavdata[block+2], (uchar)wavdata[block+3]};
float sample;
memcpy(&sample, &bytes, sizeof(sample));
buffer->push_back(sample);
}
}
}
void starttime() {
gettimeofday( &before, 0 );
}
void endtime(const char* c) {
gettimeofday( &after, 0 );
double elapsed = ( after.tv_sec - before.tv_sec ) * 1000.0 + ( after.tv_usec - before.tv_usec ) / 1000.0;
printf("%s: %f ms\n", c, elapsed);
}
/*
* This funciton is built from Beauregard's EstimatePeriod()
*/
float detectFrequency(vector<float> *x, int start) {
vector<float> nac (MAXP + 2); // Normal Autocorrelation array
int frameSize = MAXP*2;
//Find the autocorrelation value for each period
for (int p = MINP - 1; p <= MAXP + 1; p++) {
float ac = 0.0; // Standard auto-correlation
float sumSqBeg = 0.0; // Sum of squares of beginning part
float sumSqEnd = 0.0; // Sum of squares of ending part
int audioShift = frameSize + start;
for (int i = start; i < audioShift - p; i++) {
ac += x->at(i) * x->at(i + p);
sumSqBeg += x->at(i) * x->at(i);
sumSqEnd += x->at(i + p) * x->at(i + p);
}
float sumSqrt = sqrt(sumSqBeg * sumSqEnd);
if (sumSqrt == 0) {sumSqrt = 1;}
nac[p] = ac / sumSqrt;
}
// Get the highest value
int bestP = MINP;
for (int p = MINP; p <= MAXP; p++) {
if (nac[p] > nac[bestP]) {
bestP = p;
}
}
// For accuracy, interpolate based on neighboring values
float mid = nac[bestP];
float left = nac[bestP - 1];
float right = nac[bestP + 1];
float div = 2 * mid - left - right;
int error = 1;
if (div == 0 || (nac[bestP] < nac[bestP - 1] && nac[bestP] < nac[bestP + 1])) {
error = -1;
}
float shift = 0.5 * (right - left) / div;
float pEst = error * (bestP + shift);
// Check for octave mutiple errors
const float k_subMulThreshold = 0.90f;
int maxMul = bestP / MINP;
bool found = false;
for (int mul = maxMul; !found && mul >= 1; mul--) {
bool subsAllStrong = true;
for (int k = 1; k < mul; k++) {
int subMulP = int (k * pEst / mul + 0.5);
if (nac[subMulP] < k_subMulThreshold * nac[bestP]) {
subsAllStrong = false;
break;
}
}
if (subsAllStrong == true) {
found = true;
pEst = pEst / mul;
}
}
float fEst = 0;
if (pEst > 0) {
fEst = SR / pEst;
}
//returning frequency
return fEst;
}
/*
* This method takes in audio x and detect the frequency for audio frame
*/
float *normalPitchDetection(vector<float> *x) {
int audioSize = x->size();
int fEstsSize = audioSize / (MAXP*2); // The amount of audio frames that will be detected for frequencies
float *fEsts;
// Allocate Memory on CPU
fEsts = (float *)malloc(fEstsSize * sizeof(float));
// For each audio frame, detect the frequency and store it in fests array
for (int i = 0; i < fEstsSize; i++) {
int audioIdx = (MAXP*2) * i;
fEsts[i] = detectFrequency(x, audioIdx);
}
// Return the array
return fEsts;
}
/*
* This is the GPU implementation of the normal pitch detection.
* Each block in the gpu will detect the pitch of one frame of audio
* -frame of audio: a subsection (or "slice") of the audio. Its about MAXP*2 samples long
* (3,208 samples)
* For example: if an audio is 250ms long, the amount is audio frames being detected
* for pitch will be 250 / 70 (which is 3)
*/
__global__ void gpu_PitchDetection (float *gpu_audio, float *gpu_fEsts) {
// Storing blockIdx.x and threadIdx.x in variables for temporal locality
int bid = blockIdx.x;
int tid = threadIdx.x;
// The size of the "slice" of audio being detected for pitch
int frameSize = MAXP * 2;
/*
The Folling variables are for frequency detection calculations
The process for detecting a frequency goes as follows:
1. Sounds that have a pitch mean that there are sections in the waveform
that are periodic. In order to discover the size of this period, there
would have to be a one to one correlation a time t away.
2. My range of periods to search for are from notes A0 to C8. Each period's
correlation value are stored in an array called "nac" which stands for
"normal autocorrelation". A period of a waveform is determined if the nac
value close ot 1 (about 0.99880)
3. This method will also have a one to correlation of a multiple of the period.
So an additional part of the process will involve discovering which multiple
of the period will produce the most fundamental "period"
*/
// Each thread in a block will detect the correlation values for 4 periods
__shared__ float nac[MAXP + 2]; // Will store the normal autocorrelation values
int lowPThreshold = (tid * 4) + (MINP - 1); // Since each thread will compute 4 periods, this varible is the smallest period that the thread will compute for
int highPThreshold = lowPThreshold + 4; // This is the maximum period that a thread will compute for
/*
These variables are for loading an audio frame into a gpu block.
*/
// Each thread will load 8 samples into thier shared memory block
__shared__ float sharedAudioFrame[MAXP*2]; // The "slice" of audio that the gpu block will detect the pitch for
int loadMin = tid * 8; // This is the minimum sample that will be loaded by a thread
int loadMax = loadMin + 8; // The max sample that will load a thread
int audioIdx = bid * frameSize; // This varible holds the beginning index of the global audio to load the blocks frame
// This block loads the audio frame into shared memory
if (loadMin < frameSize) {
// Ensure that the threadId doesn't cause audio array to go out of bounds
// Only threads 0 - 400 should run this section
// Ensure loadMax doesn't index audio array out of bounds
if (loadMax > frameSize) {
loadMax = frameSize;
}
// Each thread will load 8 samples from the global audio
for (int i = loadMin; i < loadMax; i++) {
sharedAudioFrame[i] = gpu_audio[audioIdx + i];
}
}
__syncthreads(); // Wait for all threads to finish loading their samples into shared memory
// Each thread will find the correlation value of 4 periods
if (lowPThreshold <= totalNumPeriods) {
// If the highest period exceeds MAXP + 2, set highest period variable to MAXP + 2
if (highPThreshold > MAXP + 2) {
highPThreshold = MAXP + 2;
}
// This for loop is built from Beauregard's code for calculating the
// Normal autocorrelation value
// Each thread will calculate 4 period values
for (int p = lowPThreshold; p < highPThreshold; p++) {
float ac = 0.0; // Standard auto-correlation
float sumSqBeg = 0.0; // Sum of squares of beginning part
float sumSqEnd = 0.0; // Sum of squares of ending part
for (int i = 0; i < frameSize - p; i++) {
ac += sharedAudioFrame[i] * sharedAudioFrame[i + p];
sumSqBeg += sharedAudioFrame[i] * sharedAudioFrame[i];
sumSqEnd += sharedAudioFrame[i + p] * sharedAudioFrame[i + p];
}
float sumSqrt = sqrt(sumSqBeg * sumSqEnd);
if (sumSqrt == 0) {sumSqrt = 1;}
nac[p] = ac / sumSqrt;
}
}
__syncthreads(); // Wait for threads to finish calculating nac values
// The following block is built from Beauregard's code
// Use thread 0 to find the greatest value and store it in nac[0]
if (tid == 0) {
int bestP = MINP;
for (int p = MINP; p <= MAXP; p++) {
if (nac[p] > nac[bestP]) {bestP = p;}
}
nac[0] = (float)bestP;
}
__syncthreads(); // Wait for the largest number nac value to be found
// This entire section until the next __syncthreads() was built heavily from Beauregard's code
// This section finds the fundamental period be check each multiple's correlation value
int bestP = nac[0];
float mid = nac[bestP];
float left = nac[bestP - 1];
float right = nac[bestP + 1];
float div = 2 * mid - left - right;
// If error, terminate this block and return frequency 0
if (div == 0 || (nac[bestP] < nac[bestP - 1] && nac[bestP] < nac[bestP + 1])) {
if (tid == 0) {
gpu_fEsts[bid] = 0;
}
return;
}
float shift = 0.5 * (right - left) / div;
float pEst = (bestP + shift);
float k_subMulThreshold = 0.90f;
int maxMul = bestP / MINP;
__shared__ bool subsAllStrong[(MAXP/MINP)+1];
if (tid > 0 && tid <= maxMul) {
int mul = tid;
subsAllStrong[mul] = true;
for (int k = 1; k < mul; k++) {
int subMulP = int (k * pEst / mul + 0.5);
if (nac[subMulP] < k_subMulThreshold * nac[bestP]) {
subsAllStrong[mul] = false;
break;
}
}
}
__syncthreads(); // Wait for the subs to be calculated
// Thread 0 will scan the subsAllStrong array backward to find the strongest multiple
if (tid == 0) {
for (int mul = maxMul; mul >= 1; mul--) {
if (subsAllStrong[mul] == true) {
pEst = pEst / mul;
break;
}
}
float fEst = 0;
if (pEst > 0) {
fEst = SR / pEst;
}
gpu_fEsts[bid] = fEst;
}
}
float *gpuPitchDetection (vector<float> *x) {
const int threadsPerBlock = 416;
int audioSize = x->size();
int blockSize = audioSize / (MAXP*2);
int gpu_audioSize = audioSize - (audioSize % (MAXP*2));
float *gpu_audio;
float *gpu_fEsts;
float *h_fEsts;
//Allocate memory on CPU
h_fEsts = (float *)malloc(blockSize*sizeof(float));
// Allocate memory on the GPU
cudaMalloc(&gpu_audio, gpu_audioSize*sizeof(float));
cudaMalloc(&gpu_fEsts, blockSize*sizeof(float));
// Copy the audio into the gpu_audio
cudaMemcpy(gpu_audio, x->data(), gpu_audioSize*sizeof(float), cudaMemcpyHostToDevice);
// Run kernal pitch detection on entire audio (samples)
gpu_PitchDetection <<< blockSize, threadsPerBlock >>>(gpu_audio, gpu_fEsts);
// Copy all frequnecy estimates back to host
cudaMemcpy(h_fEsts, gpu_fEsts, blockSize*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(&gpu_audio); // Free the memory on the GPU
cudaFree(&gpu_fEsts); // Free the memory on the GPU
return h_fEsts;
}
int main(int argc, char** argv) {
if (argc == 1) {
printf("Must input wave file name!\n");
return 1;
}
const string path = argv[1];
vector<float> audioData;
int sampleRate;
int numChannels;
audioRead (path, &audioData, sampleRate, numChannels);
// This block just shows how the #define values above were obtained: MINP, MAXP, and totalNumPeriods (which is MAXP - MINP)
const double minF = 27.5; // Lowest pitch of interest (27.5 = A0, lowest note on piano.)
const double maxF = 4186.0; // Highest pitch of interest (4186 = C8, highest note on piano.)
const int minP = int (sampleRate / maxF - 1); // Minimum period
const int maxP = int (sampleRate / minF + 1); // Maximum period
const int numOfSamples = 2 * maxP; // Number of samples. For best results, should be at least 2 x maxP
if (audioData.size() < numOfSamples) {
printf("Audio too small!\n");
return 1;
}
if (sampleRate != SR) {
printf("Sample rate must be exacly 44100!\n");
return 1;
}
int n = audioData.size() / (MAXP*2); //Number of frequency estimations
printf("Calculating pitches normally... ");
float *pitchNormal;
starttime();
pitchNormal = normalPitchDetection(&audioData);
endtime("Normal");
printf("Estimated frequencies of %s by NORMAL:\n", path.c_str());
for (int i = 0; i < n; i++) {
printf("%f,\n", pitchNormal[i]);
}
printf("\n\n");
printf("Calculating pitches using GPU... ");
float *pitchGPU;
starttime();
pitchGPU = gpuPitchDetection(&audioData);
endtime("GPU");
printf("Estimated frequencies of %s by GPU:\n", path.c_str());
for (int i = 0; i < n; i++) {
printf("%f,\n", pitchGPU[i]);
}
printf("\n\n");
return 0;
} |
12,683 | #include "includes.h"
__global__ void shift_cypher(unsigned int *input_array, unsigned int *output_array, unsigned int shift_amount, unsigned int alphabet_max, unsigned int array_length)
{
// TODO your code here
} |
12,684 | #include <iostream>
#include <vector>
__global__ void vecmabite( int *out, int *in, int threads, std::size_t size )
{
auto tid_x = threadIdx.x;
auto tid_b = blockIdx.x;
out[ tid_x + threads * tid_b] = in[ 2 * (tid_x + threads * tid_b) ];
}
int main(int ac, char **av)
{
if (ac < 2)
return (-1);
int len = atoi(av[1]);
int * out_d = nullptr;
int * in_d = nullptr;
int thread_max = 0;
int thread_x = 0;
std::vector< int > out( len );
std::vector< int > in( 2 * len );
cudaDeviceGetAttribute(&thread_max, cudaDevAttrMaxThreadsPerBlock, 0);
if ((2 * len) / thread_max > 0)
thread_x = 1024;
else
thread_x = thread_max;
for( std::size_t i = 0 ; i < in.size() ; ++i )
{
in[ i ] = i;
}
cudaMalloc( &out_d, out.size() * sizeof( int ) );
cudaMalloc( &in_d, in.size() * sizeof( int ) );
cudaMemcpy( in_d, in.data(), in.size() * sizeof( int ), cudaMemcpyHostToDevice );
vecmabite<<< (2 * len) / thread_max, thread_x >>>( out_d, in_d, thread_max, out.size() );
cudaMemcpy( out.data(), out_d, out.size() * sizeof( int ), cudaMemcpyDeviceToHost );
for( auto const x: out )
{
std::cout << x << std::endl;
}
cudaFree( out_d );
cudaFree( in_d );
return 0;
} |
12,685 | //
// Created by root on 2020/11/24.
//
#include "curand_kernel.h"
#include "cuda_runtime.h"
#include "stdio.h"
__global__ void initialize_state(curandState* states) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// init curand state for each thread
curand_init(9444, tid, 0, states + tid);
}
__global__ void refill_randoms(float *dRand, int N, curandState* states) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
int nthreads = gridDim.x * blockDim.x;
curandState *state = states + tid;
for (int i = tid; i < N; i += nthreads) {
dRand[i] = curand_uniform(state);
// generate random number following uniform distribution for each thread
// the number of random numbers is N in total
}
}
float cuda_device_rand() {
// use device api to generate random numbers
static curandState *states = NULL;
static float *dRand = NULL, *hRand = NULL;
static int dRand_length = 1000000, dRand_used = 1000000;
int block = 256;
int grid = 30;
if (dRand == NULL) {
// if dRand is null, then allocate memory and initialize states
cudaMalloc(&dRand, sizeof(float ) * dRand_length);
cudaMalloc(&states, sizeof(curandState) * block * grid);
hRand = (float *) malloc(sizeof(float ) * dRand_length);
initialize_state<<<grid, block>>>(states);
}
if (dRand_used == dRand_length) {
// if all random data have been traversed, we should generate a new batch of data
refill_randoms<<<grid, block>>>(dRand, dRand_length, states);
cudaMemcpy(hRand, dRand, sizeof(float ) * dRand_length, cudaMemcpyDeviceToHost);
dRand_used = 0;
}
return hRand[dRand_used++];
}
float cuda_host_rand() {
// generate random data with host api
static curandGenerator_t randGen;
static float *dRand = NULL, *hRand = NULL;
static int dRand_length = 1000000, dRand_used = 1000000;
if (dRand == NULL) {
// if dRand is null, then allocate memory and create generator
cudaMalloc(&dRand, sizeof(float ) * dRand_length);
hRand = (float *) malloc(sizeof(float ) * dRand_length);
curandCreateGenerator(&randGen, CURAND_RNG_PSEUDO_DEFAULT);
}
if (dRand_used == dRand_length) {
// if all random data generated have been traversed, we should generate a batch of new data
curandGenerateUniform(randGen, dRand, dRand_length); // the new data are in device memory
cudaMemcpy(hRand, dRand, sizeof(float ) * dRand_length, cudaMemcpyDeviceToHost);
dRand_used = 0;
}
return hRand[dRand_used++];
}
// nvcc -lcurand ReplaceRand.cu -o ReplaceRand
int main() {
for (int i = 0; i < 256; i++) {
float h = cuda_host_rand();
float d = cuda_device_rand();
printf("h = %.2f, d = %.2f\n", h, d);
}
return 0;
} |
12,686 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <cuda.h>
#define N 1500
#define TILE_SIZE 4
#define MILI 1000
#define NANO 1000000000
void checkCudaError(cudaError_t errorCode)
{
if (errorCode != cudaSuccess)
{
fprintf(stderr, "Error %d\n", errorCode);
exit(1);
}
}
float** createSquareMatOnHost(int size)
{
int i;
float **mat;
mat = (float **) malloc(size * sizeof(float *));
if (!mat)
{
fprintf(stderr, "error allocating row memory");
exit(1);
}
mat[0] = (float *) malloc(size * size * sizeof(float));
if (!mat[0])
{
fprintf(stderr, "error allocating col memory");
exit(1);
}
for (i = 1; i < size; i++)
mat[i] = mat[i-1] + size;
return mat;
}
void freeSquareMatOnHost(float **mat)
{
free(mat[0]);
free(mat);
}
void printSquareMat(float **mat, int size)
{
int i, j;
for (i = 0; i < size; i++, printf("\n"))
for (j = 0; j < size; j++)
printf(" %f", mat[i][j]);
}
void multiplySquareMatOnHost(float **C, float **A, float **B, int size)
{
int i, j, k;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
float sum = 0.0;
for (k = 0; k < size; k++)
sum += A[i][k] * B[k][j];
C[i][j] = sum;
}
}
__global__ void multiplySquareSerializedMatOnDevice(float *C, float *A, float *B, int size)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i < size && j < size)
{
int k;
float sum = 0.0;
for (k = 0; k < size; k++)
sum += A[i*size+k] * B[k*size+j];
C[i*size+j] = sum;
}
}
long long convertToNsec(struct timespec ts)
{
long long tmp = (long long) ts.tv_sec*NANO + ts.tv_nsec;
return tmp;
}
int main(void)
{
float **ha, **hb, **hc, **hd; // host data
float *da, *db, *dc; // device data
int i, j;
int nbytes = N * N * sizeof(float);
// allocate memory in host
ha = createSquareMatOnHost(N);
hb = createSquareMatOnHost(N);
hc = createSquareMatOnHost(N);
hd = createSquareMatOnHost(N);
// allocate memory in device
checkCudaError(cudaMalloc((void **) &da, nbytes));
checkCudaError(cudaMalloc((void **) &db, nbytes));
checkCudaError(cudaMalloc((void **) &dc, nbytes));
// set values in ha randomly
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
ha[i][j] = rand() % 10;
// set values in hb randomly
srand(time(NULL));
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
hb[i][j] = rand() % 10;
// copy from host to device
checkCudaError(cudaMemcpy(da, ha[0], nbytes, cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(db, hb[0], nbytes, cudaMemcpyHostToDevice));
// multiply matrix on host
struct timespec ts_start, ts_end;
clock_gettime(CLOCK_MONOTONIC, &ts_start);
multiplySquareMatOnHost(hd, ha, hb, N);
clock_gettime(CLOCK_MONOTONIC, &ts_end);
// compute elapsed time
long long hostElapsedTime = convertToNsec(ts_end) - convertToNsec(ts_start);
printf("CPU time: %lf\n", (double) hostElapsedTime / NANO);
// multiply matrix on device
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int gridSize = (N/TILE_SIZE) + (N%TILE_SIZE>0?1:0);
dim3 grid(gridSize, gridSize), block(TILE_SIZE, TILE_SIZE);
cudaEventRecord(start, 0);
multiplySquareSerializedMatOnDevice<<<grid, block>>>(dc, da, db, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute elapsed time
float deviceElapsedTime;
cudaEventElapsedTime(&deviceElapsedTime, start, stop);
printf("CUDA time: %f\n", deviceElapsedTime / MILI);
// copy from device to host
checkCudaError(cudaMemcpy(hc[0], dc, nbytes, cudaMemcpyDeviceToHost));
// assertion
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
assert(hc[i][j] == hd[i][j]);
// free memory
freeSquareMatOnHost(ha);
freeSquareMatOnHost(hb);
freeSquareMatOnHost(hc);
freeSquareMatOnHost(hd);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
}
|
12,687 | #define FLOAT_TO_BITS(x) (*reinterpret_cast<unsigned int*>(x))
#define BITS_TO_FLOAT(x) (*reinterpret_cast<float*>(x))
__device__ __forceinline__ unsigned int extract_exponent(float *a) {
unsigned int temp = *(reinterpret_cast<unsigned int*>(a));
temp = (temp << 1 >> 24); // single preciision, 1 sign bit, 23 mantissa bits
return temp-127+1; // exponent offset and virtual bit
}
__device__ __forceinline__ unsigned int round_bitwise_stochastic(unsigned int target,
unsigned int rand_prob,
int man_bits) {
unsigned int mask = (1 << (23-man_bits)) - 1;
unsigned int add_r = target+(rand_prob & mask);
unsigned int quantized = add_r & ~mask;
return quantized;
}
__device__ __forceinline__ unsigned int round_bitwise_nearest(unsigned int target,
int man_bits) {
unsigned int mask = (1 << (23-man_bits)) - 1;
unsigned int rand_prob = 1 << (23-man_bits-1);
unsigned int add_r = target+rand_prob;
unsigned int quantized = add_r & ~mask;
return quantized;
}
__device__ __forceinline__ unsigned int clip_exponent(int exp_bits, int man_bits,
unsigned int old_num,
unsigned int quantized_num) {
int quantized_exponent_store = quantized_num << 1 >> 1 >> 23; // 1 sign bit, 23 mantissa bits
int min_exponent_store = -((1 << (exp_bits-1))-1) + 127;
int max_exponent_store = ((1 << (exp_bits-1))-1) + 127; // excluding the exponent for infinity
if (quantized_exponent_store > max_exponent_store) {
unsigned int max_man = (unsigned int ) -1 << 9 >> 9 >> (23-man_bits) << (23-man_bits); // 1 sign bit, 8 exponent bits, 1 virtual bit
unsigned int max_num = ((unsigned int) max_exponent_store << 23) | max_man;
unsigned int old_sign = old_num >> 31 << 31;
quantized_num = old_sign | max_num;
} else if (quantized_exponent_store < min_exponent_store) {
unsigned int min_num = ((unsigned int)min_exponent_store << 23);
unsigned int middle_num = ((unsigned int)(min_exponent_store - 1) << 23);
unsigned int unsigned_quantized_num = quantized_num << 1 >> 1;
if (unsigned_quantized_num > middle_num)
{
unsigned int old_sign = old_num >> 31 << 31;
quantized_num = old_sign | min_num;
}
else
{
quantized_num = 0;
}
}
return quantized_num;
}
__device__ __forceinline__ unsigned int clip_max_exponent(int man_bits,
unsigned int max_exponent,
unsigned int quantized_num) {
unsigned int quantized_exponent = quantized_num << 1 >> 24 << 23; // 1 sign bit, 23 mantissa bits
if (quantized_exponent > max_exponent) {
unsigned int max_man = (unsigned int ) -1 << 9 >> 9 >> (23-man_bits) << (23-man_bits); // 1 sign bit, 8 exponent bits
unsigned int max_num = max_exponent | max_man;
unsigned int old_sign = quantized_num >> 31 << 31;
quantized_num = old_sign | max_num;
}
return quantized_num;
} |
12,688 | #include <stdio.h>
#define N 1000
#define THREAD_X 4
__global__ void index(float *A, float *B){
int i = blockDim.x*blockIdx.x+threadIdx.x;
//int i = threadIdx.x;
float X = 1.23;
float Y = 2.34 ;
B[i] = A[i]*X + Y;
}
int main(){
float A[N], *A_d;
float B[N], *B_d;
int i;
dim3 dimBlock(THREAD_X);
dim3 dimGrid(N/THREAD_X);
for(i = 0 ; i < N; i++){
A[i] = i*2;
}
cudaMalloc((void**)&A_d, sizeof(int)*N);
cudaMalloc((void**)&B_d, sizeof(int)*N);
cudaMemcpy(A_d, A, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, sizeof(int)*N, cudaMemcpyHostToDevice);
index<<<dimGrid, dimBlock>>>(A_d, B_d);
cudaMemcpy(A, A_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
cudaMemcpy(B, B_d, sizeof(int)*N, cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++){
printf("%f ",B[i]);
}
cudaFree(A_d);
}
|
12,689 | #include "includes.h"
__global__ void UdpateEnergyTerm_time( float* energy, int energy_dim, int nPatches, float * idFocuser_focused , float par_time_increase_energy_on_focus, float par_time_decrease_energy_in_time)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int idDim = id % energy_dim;
int idPatch = id / energy_dim;
if (id<energy_dim*nPatches){
if (idDim==0){ // time
if (idPatch==(int)(*idFocuser_focused)) // it is id that focuser just focused
energy[id] += par_time_increase_energy_on_focus;
else
energy[id] /= par_time_decrease_energy_in_time ;
}
}
} |
12,690 | //pass
//--gridDim=[11377,1,1] --blockDim=[256,1,1]
#include "common.h"
__global__ void invalidateLoops(const uint *startpoints,
const uint *verticesMapping,
uint *edges,
uint edgesCount)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < edgesCount)
{
uint startpoint = startpoints[tid];
uint &endpoint = edges[tid];
uint newStartpoint = verticesMapping[startpoint];
uint newEndpoint = verticesMapping[endpoint];
if (newStartpoint == newEndpoint)
{
endpoint = UINT_MAX;
}
}
}
|
12,691 | #include<stdio.h>
#include<cuda.h>
#include<math.h>
#include<sys/time.h>
__global__
void Matadd(double* A, double* B, double* C, int N)
{
int i = blockIdx.x* blockDim.x +threadIdx.x;
if(i<N)
C[i] = A[i] + B[i];
__syncthreads();
}
int main()
{
for(int j=10;j<=20;j++)
{
cudaEvent_t start1,start2,start3,stop1,stop2,stop3,start4,stop4;
float time1,time2,time3, time4;
int i;
int N = pow(2,j);
size_t size = N * sizeof(double);
printf ("\n The value of N is %d",N);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventCreate(&start3);
cudaEventCreate(&stop3);
cudaEventCreate(&start4);
cudaEventCreate(&stop4);
//allocate input matrices hA, hB, hC,refC in host memory
double* hA = (double*)malloc(size);
double* hB = (double*)malloc(size);
double* hC = (double*)malloc(size);
double* refC = (double*)malloc(size);
for(i=0;i<N;i++)
{
hA[i] = rand()%20-10;
hB[i] = rand()%20-10;
refC[i] = hA[i] + hB[i];
}
//allocate memory on the device (GPU)
double* dA;
cudaMalloc(&dA,size);
double* dB;
cudaMalloc(&dB,size);
double* dC;
cudaMalloc(&dC,size);
//timing start for inclusive timing
cudaEventRecord(start1, 0);
//copy vectors from host memory to devie memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
//invoke GPU kernel, with two blocks each having eight threads
int threadsperblock = 32;
int blockspergrid = (N + threadsperblock - 1)/ threadsperblock;
//timing start for exclusive timing
cudaEventRecord(start2, 0);
Matadd<<<blockspergrid,threadsperblock>>>(dA,dB,dC,N);
//timing stop for exclusive timing
cudaEventRecord(stop2, 0);
cudaEventSynchronize(stop2);
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
//timing stop for inclusive timing
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
//timing start for inclusive timing
cudaEventRecord(start3, 0);
//copy vectors from host memory to devie memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
//invoke GPU kernel, with two blocks each having eight threads
threadsperblock = 1024;
blockspergrid = (N + threadsperblock - 1)/ threadsperblock;
//timing start for exclusive timing
cudaEventRecord(start4, 0);
Matadd<<<blockspergrid,threadsperblock>>>(dA,dB,dC,N);
//timing stop for exclusive timing
cudaEventRecord(stop4, 0);
cudaEventSynchronize(stop4);
//bring the result back from the device memory into the host array
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop3, 0);
cudaEventSynchronize(stop3);
for (i=0;i<N;i++)
{
if(fabs(refC[i] - hC[i]) > 1e-12)
{
printf("Erratic Value \n");
exit(1);
}
}
cudaEventElapsedTime(&time1,start1,stop1);
cudaEventElapsedTime(&time2,start2,stop2);
printf("\n The inclusive time and exclusive time for 32 threads in microseconds for 2 to power %d is %f and %f respectively \n",j,time1,time2);
cudaEventElapsedTime(&time3,start3,stop3);
cudaEventElapsedTime(&time4,start4,stop4);
printf("\n The inclusive time and exclusive time for 1024 threads in microseconds for 2 to power %d is %f and %f respectively \n",j,time3,time4);
free(hA);
free(hB);
free(hC);
free(refC);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
}
return 0;
}
|
12,692 | #include "includes.h"
__global__ void addOneFloat(double* vals, int N, float *out)
{
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if(idx < N) {
out[idx] = (float) vals[idx] + 1.0;
}
} |
12,693 | #include <iostream>
#include <math.h>
#define N 4096
#define BLOCK_DIM 32
__global__ void matAdd(float *a, float *b, float *c){
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if(i < N && j < N){
c[i * N + j] = a[i * N + j] + b[i * N + j];
}
}
int main(void){
float *dev_x, *dev_y, *dev_z;
float *x, *y, *z;
long long size = N * N * sizeof(float);
cudaMalloc((void **)&dev_x, size);
cudaMalloc((void **)&dev_y, size);
cudaMalloc((void **)&dev_z, size);
x = (float *)malloc(size);
y = (float *)malloc(size);
z = (float *)malloc(size);
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
x[i*N + j] = 1;
y[i*N + j] = 1;
}
}
cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_y, y, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_z, z, size, cudaMemcpyHostToDevice);
// FIGURE OUT BLOCK_DIM
dim3 block(BLOCK_DIM, BLOCK_DIM);
dim3 grid((int)(N/block.x), (int)(N/block.y));
matAdd<<<grid, block>>>(dev_x, dev_y, dev_z);
cudaMemcpy(z, dev_z, size, cudaMemcpyDeviceToHost);
printf("%f ", z[3]);
float maxError = 0;
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
maxError = fmax(maxError, fabs(z[i*N + j] - 2));
}
}
std::cout << "Error: " << maxError << std::endl;
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_z);
free(x);
free(y);
free(z);
return 0;
} |
12,694 | #include<cuda.h>
#include<stdio.h>
void initializeArray(int*,int);
void stampaArray(int*, int);
void equalArray(int*, int*, int);
void prodottoArrayCompPerCompCPU(int *, int *, int *, int);
__global__ void prodottoArrayCompPerCompGPU(int*, int*, int*, int );
int main(int argn, char * argv[])
{
//numero di blocchi e numero di thread per blocco
dim3 nBlocchi, nThreadPerBlocco;
int N; //numero totale di elementi dell'array
//array memorizzati sull'host
int *A_host, *B_host, *C_host;
//array memorizzati sul device
int *A_device, *B_device, *C_device;
int *copy; //array in cui copieremo i risultati di C_device
int size; //size in byte di ciascun array
int flag;
printf("***\t PRODOTTO COMPONENTE PER COMPONENTE DI DUE ARRAY \t***\n");
/* se l'utente non ha inserito un numero sufficiente di
parametri da riga di comando, si ricorre ai valori di
default per impostare il numero di thread per blocco, il
numero totale di elementi e il flag di stampa */
if(argn<4)
{
printf("Numero di parametri insufficiente!!!\n");
printf("Uso corretto: %s <NumElementi> <NumThreadPerBlocco> <flag per la Stampa>\n",argv[0]);
printf("Uso dei valori di default\n");
nThreadPerBlocco=4;
N=12;
flag=1;
}
else
{
N=atoi(argv[1]);
nThreadPerBlocco=atoi(argv[2]);
flag=atoi(argv[3]);
}
//determinazione esatta del numero di blocchi
nBlocchi=N/nThreadPerBlocco.x+
((N%nThreadPerBlocco.x)==0?0:1);
//size in byte di ogni array
size=N*sizeof(int);
//stampa delle info sull'esecuzione del kernel
printf("Numero di elementi = %d\n", N);
printf("Numero di thread per blocco = %d\n",
nThreadPerBlocco.x);
printf("Numero di blocchi = %d\n", nBlocchi.x);
//allocazione dati sull'host
A_host=(int*)malloc(size);
B_host=(int*)malloc(size);
C_host=(int*)malloc(size);
copy=(int*)malloc(size);
//allocazione dati sul device
cudaMalloc((void**)&A_device,size);
cudaMalloc((void**)&B_device,size);
cudaMalloc((void**)&C_device,size);
//inizializzazione dati sull'host
initializeArray(A_host, N);
initializeArray(B_host, N);
//copia dei dati dall'host al device
cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_device, B_host, size, cudaMemcpyHostToDevice);
//azzeriamo il contenuto della matrice C
memset(C_host, 0, size);
cudaMemset(C_device, 0, size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//invocazione del kernel
prodottoArrayCompPerCompGPU<<<nBlocchi, nThreadPerBlocco>>>
(A_device, B_device, C_device, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo
float elapsed;
// tempo tra i due eventi in millisecondi
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//copia dei risultati dal device all'host
cudaMemcpy(copy,C_device,size, cudaMemcpyDeviceToHost);
printf("tempo GPU=%f\n", elapsed);
// calcolo su CPU
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//chiamata alla funzione seriale per il prodotto di due array
prodottoArrayCompPerCompCPU(A_host, B_host, C_host, N);
cudaEventRecord(stop);
cudaEventSynchronize(stop); // assicura che tutti siano arrivati all'evento stop prima di registrare il tempo
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("tempo CPU=%f\n", elapsed);
//stampa degli array e dei risultati
if(flag==1)
{
printf("array A\n"); stampaArray(A_host,N);
printf("array B\n"); stampaArray(B_host,N);
printf("Risultati host\n"); stampaArray(C_host, N);
printf("Risultati device\n"); stampaArray(copy,N);
}
//test di correttezza
equalArray(copy, C_host,N);
//de-allocazione host
free(A_host);
free(B_host);
free(C_host);
free(copy);
//de-allocazione device
cudaFree(A_device);
cudaFree(B_device);
cudaFree(C_device);
exit(0);
}
void initializeArray(int *array, int n)
{
int i;
for(i=0;i<n;i++)
array[i] = i;
}
void stampaArray(int* array, int n)
{
int i;
for(i=0;i<n;i++)
printf("%d ", array[i]);
printf("\n");
}
void equalArray(int* a, int*b, int n)
{
int i=0;
while(a[i]==b[i])
i++;
if(i<n)
printf("I risultati dell'host e del device sono diversi\n");
else
printf("I risultati dell'host e del device coincidono\n");
}
//Seriale
void prodottoArrayCompPerCompCPU
(int *a, int *b, int *c, int n)
{
int i;
for(i=0;i<n;i++)
c[i]=a[i]*b[i];
}
//Parallelo
__global__ void prodottoArrayCompPerCompGPU
(int* a, int* b, int* c, int n)
{
int index=threadIdx.x + blockIdx.x*blockDim.x;
if(index < n)
c[index] = a[index]*b[index];
}
|
12,695 | #include "includes.h"
__global__ void eladd(float * inA, float * inB, int length)
{
int idx = threadIdx.x + blockDim.x*blockIdx.x;
if (idx<length) inA[idx] += inB[idx];
} |
12,696 | #include "includes.h"
__global__ void CUDAkernel_multiply( float* sourceA, float* sourceB, float* destination, int size )
{
int index = CUDASTDOFFSET;
float a = sourceA[index];
float b = sourceB[index];
if( index < size )
{
destination[index] = a * b;
}
} |
12,697 | #include <stdio.h>
#ifndef THREADS_PER_BLOCK
#define THREADS_PER_BLOCK 1024
#endif
__global__ void myKernel(int64_t *dA, size_t N) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < N) {
dA[id] = dA[id] + 1;
}
}
extern "C" {
void kernel(int64_t *ptr, size_t N) {
myKernel<<<ceil(((float)N)/THREADS_PER_BLOCK), THREADS_PER_BLOCK>>>(ptr, N);
}
} |
12,698 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define TILE_DIM 32
#define b_row 32
__global__ void transpose(float *output, float *input, int width, int height, int n)
{
__shared__ float tile[TILE_DIM][TILE_DIM+1];
int x_index = blockIdx.x * TILE_DIM + threadIdx.x;
int y_index = blockIdx.y * TILE_DIM + threadIdx.y;
int index_in = x_index + (y_index)*width;
x_index = blockIdx.y * TILE_DIM + threadIdx.x;
y_index = blockIdx.x * TILE_DIM + threadIdx.y;
int index_out = x_index + (y_index)*height;
for (int r=0; r < n; r++)
{
for (int i=0; i<TILE_DIM; i+=b_row)
{
tile[threadIdx.y+i][threadIdx.x] = input[index_in+i*width];
}
__syncthreads();
for (int i=0; i<TILE_DIM; i+=b_row)
{
output[index_out+i*height] = tile[threadIdx.x][threadIdx.y+i];
}
}
}
int main(void)
{
// Error code to check return values for CUDA calls
int t;
scanf("%d",&t);
while(t--)
{
cudaError_t err = cudaSuccess;
long long int N = 0;
scanf("%lld ",&N);
if (N % TILE_DIM != 0 )
{
printf("Matrix size must be integral multiple of tile size\nExiting...\n\n");
cudaDeviceReset();
exit(EXIT_FAILURE);
}
size_t mem_size = sizeof(float) * N * N;
float *input = (float *) malloc(mem_size);
float *output = (float *) malloc(mem_size);
// allocate device memory
float *d_input = NULL;
err = cudaMalloc((void **) &d_input, mem_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_output = NULL;
err = cudaMalloc((void **) &d_output, mem_size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
for (int i = 0; i < (N*N); ++i)
{
scanf("%f",&input[i]);
}
/*
for(long long int i=0;i<N*N; ++i)
{
input[i]= (float) i;
}*/
// Copy the host input vectors input1 and input2 in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_input, input, mem_size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector input1 from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
dim3 threadsPerBlock1(N/TILE_DIM, N/TILE_DIM,1);
dim3 blocksPerGrid1(TILE_DIM,b_row,1);
transpose<<<blocksPerGrid1, threadsPerBlock1>>>(d_output, d_input, N, N, 1);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch transpose kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(output, d_output, mem_size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector output from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
printf("Results\n");
for (long long int i = 0; i < (N*N); ++i)
{
printf("%0.2f", output[i]);
}
// Free device global memory
err = cudaFree(d_input);;
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector input1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_output);;
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector output1 (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//free(C);
free(input);
free(output);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
return 0;
}
|
12,699 | //
// Created by auyar on 17.08.2021.
//
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <algorithm>
#include <string>
#include <math.h>
#include <cuda.h>
#include <curand_kernel.h>
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(curandState *state) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence number, no offset */
curand_init(1234, id, 0, &state[id]);
}
__device__ bool exist(unsigned int* local_results, int size, unsigned int number) {
for (int i=0; i < size; i++) {
if (local_results[i] == number){
return true;
}
}
return false;
}
__global__ void generate_uniform_kernel(curandState *state,
unsigned int *results,
const unsigned int sample_size,
const unsigned int SAMPLES_PER_THREAD,
const unsigned int data_size,
const unsigned int data_size_per_thread) {
int id = threadIdx.x + blockIdx.x * blockDim.x;
// this can be a thread that is started extra in the last block,
// so it may not generate any random numbers
// or this can be the last thread to generated random numbers,
// it may generate random numbers less than SAMPLES_PER_THREAD in a smaller range
unsigned int thread_sample_start = id * SAMPLES_PER_THREAD;
unsigned int samples_this_thread = SAMPLES_PER_THREAD;
unsigned int data_size_this_thread = data_size_per_thread;
if (thread_sample_start >= sample_size) {
// this is an extra thread started in the last block
return;
} else if(thread_sample_start >= sample_size - SAMPLES_PER_THREAD) {
// this is the last thread to generate randoms
samples_this_thread = sample_size - thread_sample_start;
data_size_this_thread = data_size - data_size_per_thread * id;
}
/* Copy state to local memory for efficiency */
curandState localState = state[id];
unsigned int* local_results = new unsigned int[samples_this_thread];
for (int i=0; i < samples_this_thread; i++) {
unsigned int rand_num = (unsigned int) ceil(curand_uniform_double(&localState) * data_size_this_thread) - 1;
// check whether this number already generated
if (exist(local_results, i, rand_num)) {
i--;
} else {
local_results[i] = rand_num;
}
}
// transfer generated random numbers to the global array
unsigned int data_start = id * data_size_per_thread;
for (int i=0, j = thread_sample_start; i < samples_this_thread; i++, j++) {
results[j] = local_results[i] + data_start;
}
/* Copy state back to global memory */
state[id] = localState;
delete [] local_results;
}
int main(int argc, char** argv) {
if (argc != 3) {
printf("You must specify the data_size and sampling ratio as parameters.\n");
return 1;
}
std::string dataSizeStr = argv[1];
std::string samplingRatioStr = argv[2];
unsigned int dataSize = std::stoi(dataSizeStr);
float samplingRatio = std::stof(samplingRatioStr);
const unsigned int SAMPLES_PER_THREAD = 64;
const unsigned int THREADS_PER_BLOCK = 256;
unsigned int sampleSize = dataSize * samplingRatio;
const unsigned int totalThreads = ceil((double)sampleSize / (double)SAMPLES_PER_THREAD);
unsigned int blockCount = ceil((double)totalThreads / (double)THREADS_PER_BLOCK);
unsigned int data_size_per_thread = (dataSize / sampleSize) * SAMPLES_PER_THREAD;
printf("dataSize: %i\n", dataSize);
printf("data_size_per_thread: %i\n", data_size_per_thread);
printf("sampleSize: %i\n", sampleSize);
printf("SAMPLES_PER_THREAD: %i\n", SAMPLES_PER_THREAD);
printf("totalThreads: %i\n", totalThreads);
printf("blockCount: %i\n", blockCount);
cudaEvent_t start1, stop1, start2, stop2;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
unsigned int i;
unsigned int total;
curandState *devStates;
unsigned int *devResults, *hostResults;
int device;
/* check for double precision support */
CUDA_CALL(cudaGetDevice(&device));
/* Allocate space for results on host */
hostResults = (unsigned int *) calloc(sampleSize, sizeof(int));
/* Allocate space for results on device */
CUDA_CALL(cudaMalloc((void **)&devResults, sampleSize * sizeof(unsigned int)));
/* Set results to 0 */
CUDA_CALL(cudaMemset(devResults, 0, sampleSize * sizeof(unsigned int)));
/* Allocate space for prng states on device */
CUDA_CALL(cudaMalloc((void **)&devStates, blockCount * THREADS_PER_BLOCK * sizeof(curandState)));
/* Setup prng states */
cudaEventRecord(start1);
setup_kernel<<<blockCount, THREADS_PER_BLOCK>>>(devStates);
cudaEventRecord(stop1);
/* Generate and use pseudo-random */
// generate_kernel<<<64, 64>>>(devPHILOXStates, samplesPerThread, devResults);
// generate_kernel<<<64, 64>>>(devStates, samplesPerThread, devResults);
cudaEventRecord(start2);
generate_uniform_kernel<<<blockCount, THREADS_PER_BLOCK>>>(devStates,
devResults,
sampleSize,
SAMPLES_PER_THREAD,
dataSize,
data_size_per_thread);
cudaEventRecord(stop2);
// wait kernels to finish
// cudaDeviceSynchronize();
/* Copy device memory to host */
CUDA_CALL(cudaMemcpy(hostResults, devResults, sampleSize * sizeof(unsigned int), cudaMemcpyDeviceToHost));
/* Show result */
total = 0;
for(i = 0; i < sampleSize; i++) {
total += hostResults[i];
}
std::vector<int> randoms;
for(i = 0; i < sampleSize; i++) {
randoms.push_back(hostResults[i]);
}
int equalCount = 0;
// std::sort(randoms.begin(), randoms.end());
for (int i=0; i<randoms.size() -1; i++) {
if (randoms[i] == randoms[i+1]) {
// printf("random numbers are equal at i: %i\n", i);
equalCount++;
}
}
int zeroCount = 0;
for (int i=0; i<randoms.size() -1; i++) {
if (randoms[i] == 0) {
zeroCount++;
}
}
for(i = 0; i < sampleSize && i < 300; i++) {
printf("%i: %i\n", i, randoms[i]);
}
printf("\n");
// for(i = randoms.size() - 50; i < randoms.size(); i++) {
// printf("%i: %i\n", i, randoms[i]);
// }
float initDelay = 0;
cudaEventElapsedTime(&initDelay, start1, stop1);
float genDelay = 0;
cudaEventElapsedTime(&genDelay, start2, stop2);
printf("number of equal random numbers: %i\n", equalCount);
printf("number of zero random numbers: %i\n", zeroCount);
printf("curand init delay: %f\n", initDelay);
printf("curand generate delay: %f\n", genDelay);
free(hostResults);
cudaFree(devStates);
cudaFree(devResults);
return 0;
}
|
12,700 |
#include <cstdint>
#include <stdexcept>
#include <iostream>
//------------------------------------------------------------------------------
// The kernel
//------------------------------------------------------------------------------
__global__
void transform(uint32_t *output, const uint32_t *input, uint32_t size) {
uint32_t idx = threadIdx.x + blockDim.x * blockIdx.x;
if(idx >= size)
return;
output[idx] = input[idx] + 1;
}
//------------------------------------------------------------------------------
// Start the show
//------------------------------------------------------------------------------
int main(int argc, char **argv) {
const uint32_t size = 5;
const uint32_t memsize = size*4;
uint32_t input[] = { 1, 2, 3, 4, 5 };
uint32_t output[size];
uint32_t *d_input;
uint32_t *d_output;
//----------------------------------------------------------------------------
// Allocate memory
//----------------------------------------------------------------------------
auto status = cudaMalloc(&d_input, memsize);
if(status != cudaSuccess)
throw std::runtime_error("Unable to allocate GPU memory for input data");
status = cudaMalloc(&d_output, memsize);
if(status != cudaSuccess) {
cudaFree(d_input);
throw std::runtime_error("Unable to allocate GPU memory for output data");
}
//----------------------------------------------------------------------------
// Do memory copies and run the kernel
//----------------------------------------------------------------------------
cudaMemcpy(d_input, (void*)input, memsize, cudaMemcpyHostToDevice);
uint32_t blocks = size/1024 + 1;
transform<<<blocks, 1024>>>(d_output, d_input, size);
cudaMemcpy((void*)output, d_output, memsize, cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_output);
//----------------------------------------------------------------------------
// Print the input and the output
//----------------------------------------------------------------------------
std::cout << "Input: ";
for(auto a: input)
std::cout << a << ", ";
std::cout << std::endl;
std::cout << "Output: ";
for(auto a: output)
std::cout << a << ", ";
std::cout << std::endl;
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.