serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
12,701 | #include "includes.h"
#pragma comment(lib,"cublas.lib")
using namespace std;
//==============================Function Prototypes================================
double getRand();
__global__ void deltaCalcOutput(float *OutActivation, float *Outputdelta, float *targets){
int n = blockIdx.x*blockDim.x + threadIdx.x;
Outputdelta[n] = (targets[n] - OutActivation[n]) * (1 / (1 + exp(-OutActivation[n]))*(1 - 1 / (1 + exp(-OutActivation[n]))));
} |
12,702 | #include "includes.h"
using namespace std;
#ifndef MAP_FILE
#define MAP_FILE MAP_SHARED
#endif
__global__ void hardTanh(float* in, float* out, float min_val, float max_val, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = gridDim.x * blockDim.x;
for (int i = tid; i < size; i += stride) {
out[i] = in[i] < min_val ? min_val : (in[i] > max_val ? max_val : in[i]);
}
} |
12,703 | /* CudaFuzzy Project - 2015
* Graduate Program in Computer Science - UFPel
*
* \file FuzzyLogic.cu
* This file contains the sequential implementations of fuzzy functions applied to arrays.
*/
#include "FuzzyLogic.cuh"
/*___Not___
* Function: Implement the sequential version of Fuzzy not operation (1-Input).
* Parameters:
* Input: double* x: Input of Basic Fuzzy operation
* Output: double : Response of Fuzzy operation
* Creation date: November, 2015.
* Exception case: -
*/
double Not(double x) {
return 1 - x;
}
/*___Not2___
* Function: Implement the sequential version of Fuzzy not operation sqrt(1 - pow(x, 2)).
* Parameters:
* Input: double* x: Input of Basic Fuzzy operation
* Output: double : Response of Fuzzy operation
* Creation date: November, 2015.
* Exception case: -
*/
double Not2(double x) {
return sqrt(1 - pow(x, 2));
}
/*___Not3___
* Function: Implement the sequential version of Fuzzy not operation pow(1 - pow(x, 3), 1.0 / 3).
* Parameters:
* Input: double* x: Input of Basic Fuzzy operation
* Output: double : Response of Fuzzy operation
* Creation date: November, 2015.
* Exception case: -
*/
double Not3(double x) {
return pow(1 - pow(x, 3), 1.0 / 3);
}
/*___And___
* Function: Implement the sequential version of Fuzzy and operation (<).
* Parameters:
* Input: double* x: Input of Basic Fuzzy operation
* Input: double* y: Input of Basic Fuzzy operation
* Output: double : Response of Fuzzy operation
* Creation date: November, 2015.
* Exception case: -
*/
double And(double x, double y) {
return x < y ? x : y;
}
/*___And2___
* Function: Implement the sequential version of Fuzzy and operation (*).
* Parameters:
* Input: double* x: Input of Basic Fuzzy operation
* Input: double* y: Input of Basic Fuzzy operation
* Output: double : Response of Fuzzy operation
* Creation date: November, 2015.
* Exception case: -
*/
double And2(double x, double y) {
return x * y;
}
/*___Or___
* Function: Implement the sequential version of Fuzzy not operation (>).
* Parameters:
* Input: double* x: Input of Basic Fuzzy operation
* Input: double* y: Input of Basic Fuzzy operation
* Output: double : Response of Fuzzy operation
* Creation date: November, 2015.
* Exception case: -
*/
double Or(double x, double y) {
return x > y ? x : y;
}
/*___Or2___
* Function: Implement the sequential version of Fuzzy not operation (( x + y ) - ( x * y )).
* Parameters:
* Input: double* x: Input of Basic Fuzzy operation
* Input: double* y: Input of Basic Fuzzy operation
* Output: double : Response of Fuzzy operation
* Creation date: November, 2015.
* Exception case: -
*/
double Or2(double x, double y) {
return ( x + y ) - ( x * y );
}
|
12,704 | /*
* Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 16
#endif
/**
* This file contains the CUDA kernel for converting an image into
* a grayscale array of floats. Scaling factors used are:
* 0.299 r + 0.587 g + 0.114 b
*
* @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
* @version 0.1
*/
extern "C" {
// __global__ void grayscale(int h, int w, float* output, uchar3* input);
__global__ void grayscale(int h, int w, float* output, char* input);
}
/*
* Naive grayscale kernel
*
* Bytes go in, floats come out, alpha is ignored
*
* gridDim.x = w / block_size_x (ceiled)
* gridDim.y = h / block_size_y (ceiled)
*/
//__global__ void grayscale(int h, int w, float* output, uchar3* input) {
__global__ void grayscale(int h, int w, float* output, char* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
uchar3 *c3_input = (uchar3 *)input;
if (j < w && i < h) {
uchar3 c = c3_input[i*w+j];
// float b = (float) input[(i*w+j) * 3 + 0] & 0xFFFF;
// float g = (float) input[(i*w+j) * 3 + 1] & 0xFFFF;
// float r = (float) input[(i*w+j) * 3 + 2] & 0xFFFF;
output[i*w+j] = 0.299f*c.z + 0.587f*c.y + 0.114f*c.x;
}
}
|
12,705 |
/* Includes, system */
#include <stdio.h>
/* DEVICE CODE */
__global__ void primer_kernel(){
}
/* HOST CODE*/
int main(int argc, char** argv)
{
primer_kernel<<<1,1,0,0>>>();
printf("Para ser original -- HOLA MUNDO\n");
}
|
12,706 | /*==========================================================
* patch2hank.cu
*
* making block hankel matrix
*
* compile command
* nvcc('hank2patch_single.cu -arch sm_35')
*
* This is a MEX-file for MATLAB.
*
*========================================================*/
/* $created: 11-Mar-2015 $ */
// #define fmin(a,b) ((a) < (b) ? (a) : (b))
// #define fmax(a,b) ((a) > (b) ? (a) : (b))
// #include "mex.h"
// #include "cuda.h"
__global__ void hank2patch_single(float* out,float* y,int sy,int sx,int sz,int firy, int firx)
{
int idx = threadIdx.x+blockIdx.x*blockDim.x;
int um=sy-firy+1,bm=sx-firx+1;
int un=firy,bn=firx;
int ii=0,jj=0,zz=0,jid=0,iid=0,si=0,sj=0,ci=0,cj=0,k=0,m=0;
if ( idx < sy*sx*sz )
{
zz=(int)(idx/(sy*sx));
k=idx%(sy*sx);
ii=k%(sy);
jj=(int)(k/sy);
out[ii + jj*sy + zz*sy*sx]=(float)0.0f;
for (jid=0;jid<=jj;jid++)
{
if ((jid<bm) && ((jj-jid) <bn))
{
si=jid*um;
sj=(jj-jid)*un;
for (iid=0;iid<=ii;iid++)
{
if ((iid<um) && ((ii-iid) <un))
{
ci=si+iid;
cj=sj+(ii-iid);
out[ii + jj*sy + zz*sy*sx]+=y[ci+cj*um*bm+zz*um*bm*un*bn];
m+=1;
}
}
}
}
out[ii + jj*sy + zz*sy*sx]/=(float)m;
}
} |
12,707 | #include "includes.h"
__global__ void frontier_tail_swap_kernel(int* p_frontier_tail_d, int* c_frontier_tail_d) {
*p_frontier_tail_d = *c_frontier_tail_d;
*c_frontier_tail_d = 0;
} |
12,708 | #include "includes.h"
__global__ void kCorrelate(float* source, float* kernel, float* dest, int width, int height, int kwidth, int kheight) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < width * height; i += numThreads) {
float sum = 0;
for (int w = -kwidth/2; w <= kwidth/2; w++) {
for (int h = -kheight/2; h <= (kheight)/2; h++) {
const int x = (i / height) + w;
const int y = (i % height) + h;
const int j = i + (w * height) + h;
if (x >= 0 && x < width && y >= 0 && y < height)
sum += source[j] * kernel[(kwidth * kheight / 2) + w * kheight + h];
}
}
dest[i] = sum;
}
} |
12,709 | #include <cuda.h>
#include <math.h>
#include <iostream>
#include <thrust/device_vector.h>
static const size_t N = 102400;
__global__ void kernel(const thrust::device_ptr<float> A, const thrust::device_ptr<float> B, thrust::device_ptr<float> C, int N)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < N) {
C[tid] = A[tid] + B[tid];
}
}
int main() {
thrust::device_vector<float> d_A, d_B, d_C;
d_A.resize(N);
d_B.resize(N);
d_C.resize(N);
for (int i = 0; i < N; i++) {
d_A[i] = i;
d_B[i] = 0.5f * i - 2;
}
kernel<<<ceil(double(N) / 512), 512>>>(d_A.data(), d_B.data(), d_C.data(), N);
double err = 0;
for (int i = 0; i < N; i++) {
err += (d_A[i] + d_B[i]) - d_C[i];
}
std::cout << "Cum error: " << sqrt(err) << std::endl;
return 0;
}
|
12,710 | __device__ int test(int tid) {
tid += 1;
__syncthreads();
return tid;
}
__global__ void tfunction(int* A) {
int tid = threadIdx.x;
A[tid] = test(tid);
}
|
12,711 | /* CUDA finite difference wave equation solver, written by
* Jeff Amelang, 2012
*
* Modified by Kevin Yuh, 2013-14 */
#include <cstdio>
#include <cuda_runtime.h>
#include "Cuda1DFDWave_cuda.cuh"
/* TODO: You'll need a kernel here, as well as any helper functions
to call it */
__global__
void
cudaWaveKernel(const float* dev_old_data,
const float* dev_cur_data,
float* dev_new_data,
const size_t numberOfNodes,
const float courantSquared) {
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
while (idx >= 0 && idx <= numberOfNodes - 2) {
if (idx == 0) {
idx += blockDim.x * gridDim.x;
}
dev_new_data[idx] = 2 * dev_cur_data[idx] - dev_old_data[idx] + courantSquared * (dev_cur_data[idx+1] - 2*dev_cur_data[idx] + dev_cur_data[idx-1]);
idx += blockDim.x * gridDim.x;
}
}
void cudaCallWaveKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const float* dev_old_data,
const float* dev_cur_data,
float* dev_new_data,
const size_t numberOfNodes,
const float courantSquared) {
cudaWaveKernel<<<blocks, threadsPerBlock>>> (dev_old_data, dev_cur_data, dev_new_data, numberOfNodes, courantSquared);
} |
12,712 | //Add GRID Vector Using GPU
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 50000000
#define MAX_ERR 1e-6
__global__ void vector_add_grid(float *out, float *a, float *b, int n){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//printf("Global = %d\n", tid);
//Handling arbitary vector size
if (tid < n){
out[tid] = a[tid] + b[tid];
}
}
int main(int argc, char **argv){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
//Alokasi Host Memori
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
//Inisialisasi Array
for (int i = 0; i < N; i++){
a[i] = 29.0f;
b[i] = 57.0f;
}
//Alokasi Device memori
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
//Transfer Data dari Host memori ke Device memori
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
//Eksekusi Kernel
int block_size = 256;
int grid_size = ((N + block_size) / block_size);
vector_add_grid <<<grid_size, block_size>>> (d_out, d_a, d_b, N);
//Transfer Data kembali ke Host Memori
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
//Verification
//for (int i = 0; i < N; i++){
// assert(fabs(out[i] - a[i] - b[i]) < MAX_ERR);
//}
printf("out[0] = %f\n", out[0]);
printf("PASSED\n");
//Dealokasi Device Memori
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
//Dealokasi Host Memori
free(a);
free(b);
free(out);
return 0;
} |
12,713 | #include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <tiffio.h>
#include <stdint.h>
/*
# this program extracts the pixels from a tiff image and copies them to a new image
# the names of the original and copy should be the parameters
#
# to compile:
# gcc -std=c99 tiffcopy.c -o tiffcopy -ltiff
#
# to run:
# ./tiffcopy lena.tif lena.copy.tif
*/
int main(int argc, char **argv)
{
uint32_t width, length;
TIFF *iimage;
uint16_t bits_per_sample, photometric;
uint16_t planar_config;
uint16_t samples_per_pixel;
int size;
assert(argc == 3);
iimage = TIFFOpen(argv[1], "r");
assert(iimage);
assert(TIFFGetField(iimage, TIFFTAG_IMAGEWIDTH, &width));
assert(width > 0);
assert(TIFFGetField(iimage, TIFFTAG_IMAGELENGTH, &length));
assert(length > 0);
assert(TIFFGetField(iimage, TIFFTAG_BITSPERSAMPLE, &bits_per_sample) != 0);
assert(bits_per_sample == 8);
assert(TIFFGetField(iimage, TIFFTAG_PHOTOMETRIC, &photometric));
assert(photometric == PHOTOMETRIC_RGB);
assert(TIFFGetField(iimage, TIFFTAG_PLANARCONFIG, &planar_config) != 0);
assert(TIFFGetField(iimage, TIFFTAG_SAMPLESPERPIXEL, &samples_per_pixel));
assert(samples_per_pixel == 3);
size = width * length * samples_per_pixel * sizeof(char);
printf("size is %d\n",size);
printf("spp is %d\n",samples_per_pixel);
char *idata = (char *) malloc(size);
assert(idata != NULL);
char *curr = idata;
int count = TIFFNumberOfStrips(iimage);
for (int i = 0; i < count; ++i) {
tsize_t in = TIFFReadEncodedStrip(iimage, i, curr, -1);
assert(in != -1);
curr += in;
}
TIFFClose(iimage);
char *odata = (char *) malloc(size);
// copy the image, could've used memcpy too
// FIXME: of course, you have to do more than copy the image :)
for (int i = 0; i < size; i++) {
odata[i] = idata[i];
}
assert(odata != NULL);
TIFF *oimage = TIFFOpen(argv[2], "w");
assert(oimage);
assert(TIFFSetField(oimage, TIFFTAG_IMAGEWIDTH, width));
assert(TIFFSetField(oimage, TIFFTAG_IMAGELENGTH, length));
assert(TIFFSetField(oimage, TIFFTAG_BITSPERSAMPLE, bits_per_sample));
assert(TIFFSetField(oimage, TIFFTAG_COMPRESSION, COMPRESSION_DEFLATE));
assert(TIFFSetField(oimage, TIFFTAG_PHOTOMETRIC, photometric));
assert(TIFFSetField(oimage, TIFFTAG_SAMPLESPERPIXEL, samples_per_pixel));
assert(TIFFSetField(oimage, TIFFTAG_PLANARCONFIG, planar_config));
assert(TIFFSetField(oimage, TIFFTAG_ROWSPERSTRIP, length));
tsize_t on = size;
assert(TIFFWriteEncodedStrip(oimage, 0, odata, on) == on);
TIFFClose(oimage);
free(idata);
free(odata);
return 0;
}
|
12,714 | // mmm.cu
// Guide used: http://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html
#include <stdio.h>
#include <sys/time.h>
#include <stdlib.h>
#include <iostream>
using namespace std;
//----------------------------------- Structures and Globals---------------------------------------------
typedef struct {
int dimension1;
int dimension2;
} ArrayMetadata2D;
// Metadata variables describing dimensionalities of all data structures involved in the computation
ArrayMetadata2D A_MD, B_MD, C_MD;
// Pointers for input and output arrays in the host memory
float *A, *B, *C, *C_CPU, *B_TRANS;
// Pointers for input and output arrays in the device memory (NVIDIA DRAM)
float *A_GPU, *B_GPU, *C_GPU;
// Pointers for padded A and B, and unpadded C_RES, for the GPU final result
float *A_PAD, *B_PAD, *C_RES;
// TODO: tweak these?????? LOL
//const int BLOCK_COUNT = 32;
//const int THREADS_PER_BLOCK = 256;
const int BLOCK_SIZE = 16;
// const int WARP_SIZE = 32;
int pad;
//----------------------------------- Host Function Definitions -----------------------------------------
void allocateAndInitializeAB();
void computeCpuMMM();
void computeGpuMMM();
void copyMatricesToGPU();
void copyResultFromGPU();
void compareHostAndGpuOutput();
void die(const char *error);
void check_error(cudaError e);
//----------------------------------- CUDA Function Definitions -----------------------------------------
// TODO: fix
__global__ void mmm_kernel(float *A, float *B, float *C, int Ax, int By);
//--------------------------------------------- CODE ----------------------------------------------------
int main(int argc, char **argv) {
A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 100;
A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1;
B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2;
B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1;
C_MD.dimension1 = A_MD.dimension1;
C_MD.dimension2 = B_MD.dimension2;
printf("Matrix A is %d-by-%d\n", A_MD.dimension1, A_MD.dimension2);
printf("Matrix B is %d-by-%d\n", B_MD.dimension1, B_MD.dimension2);
printf("Matrix C is %d-by-%d\n", C_MD.dimension1, C_MD.dimension2);
allocateAndInitializeAB();
// Matrix matrix multiplication in the CPU
clock_t start = clock();
// computeCpuMMM();
clock_t end = clock();
// double elapsedCPU = (end - start) / (double) CLOCKS_PER_SEC;
// printf("Computation time in the CPU: %f seconds\n", elapsedCPU);
//---------- MY ADDED STUFF ----------//
// Transpose B
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
B_TRANS = (float*) malloc(sizeofB);
// print trans of B
for (int j = 0; j < B_MD.dimension2; ++j) {
for (int i = 0; i < B_MD.dimension1; ++i) {
B_TRANS[i + j * B_MD.dimension1] = B[j + i * B_MD.dimension1];
//printf("%f ", B[j + i * B_MD.dimension1]);
}
//printf("\n");
}
// we could switch B's dimensions, but since we only ever input square matrices, we don't care :)
// Pad A and B
// We know they're the same size so do it at the same time
int Ax = A_MD.dimension1;
int count = Ax;
while (count % BLOCK_SIZE != 0) {
count++;
}
pad = count - Ax;
// Allocate PAD arrays
size_t sizeofPADs = (A_MD.dimension1 + pad) * (A_MD.dimension2 + pad) * sizeof(float);
A_PAD = (float*) malloc(sizeofPADs);
B_PAD = (float*) malloc(sizeofPADs);
int rowcount = 0;
int padindex = 0;
int p = 0;
for (int i = 0; i < A_MD.dimension1 * A_MD.dimension2; i++) {
// Do padding because we're at the end of a row
if (rowcount == Ax) {
// Add however much padding there is
while (p < pad) {
A_PAD[padindex] = 0.0;
B_PAD[padindex] = 0.0;
p++;
padindex++;
}
p = 0;
rowcount = 0;
i--;
}
else {
A_PAD[padindex] = A[i];
B_PAD[padindex] = B_TRANS[i];
padindex++;
rowcount++;
}
}
// Add however many rows of padding we need
for (int i = A_MD.dimension2; i < (A_MD.dimension2 + pad); i++) {
for (int j = 0; j < (A_MD.dimension1 + pad); j++) {
A_PAD[i * (A_MD.dimension1 + pad) + j] = 0.0;
B_PAD[i * (B_MD.dimension1 + pad) + j] = 0.0;
}
}
// print B first
// for (int i = 0; i < B_MD.dimension1 * B_MD.dimension2; i++) {
// printf("%f ", B_TRANS[i]);
// }
// printf("\n\n");
// // print B_PAD
// for (int i = 0; i < (A_MD.dimension1 + pad) * (A_MD.dimension2 + pad); i++) {
// printf("%f ", B_PAD[i]);
// }
// printf("\n\n");
// MMM on the GPU
start = clock();
computeGpuMMM();
end = clock();
double elapsedGPU = (end - start) / (double) CLOCKS_PER_SEC;
printf("Computation time in the GPU: %f seconds\n", elapsedGPU);
// Compute the speedup or slowdown
// if (elapsedGPU > elapsedCPU) {
// printf("\nCPU outperformed GPU by %.2fx\n", (float) elapsedGPU / (float) elapsedCPU);
// } else {
// printf("\nGPU outperformed CPU by %.2fx\n", (float) elapsedCPU / (float) elapsedGPU);
// }
// Print out CPU result
// for (int i = 0; i < C_MD.dimension1 * C_MD.dimension2; i++) {
// printf("%f ", C[i]);
// }
// printf("\n\n");
// // Print out GPU result, which is C_CPU
// for (int i = 0; i < (C_MD.dimension1 + pad) * C_MD.dimension2; i++) {
// printf("%f ", C_CPU[i]);
// }
// printf("\n\n");
// Copy C_CPU to a smaller C_RES
// Allocate C_RES to be normal size of C
size_t sizeofCRES = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C_RES = (float*) malloc(sizeofCRES);
int offset = 0;
for (int i = 0; i < C_MD.dimension1 * C_MD.dimension2; i++) {
if (i % C_MD.dimension1 == 0 && i != 0) {
offset += pad;
}
C_RES[i] = C_CPU[i + offset];
}
// Print C_REST
// for (int i = 0; i < C_MD.dimension1 * C_MD.dimension2; i++) {
// printf("%f ", C_RES[i]);
// }
// Check the correctness of the GPU results
// compareHostAndGpuOutput();
return 0;
}
// Allocate and initialize A and B using a random number generator
void allocateAndInitializeAB() {
size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float);
A = (float*) malloc(sizeofA);
srand(time(NULL));
for (int i = 0; i < A_MD.dimension1; i++) {
for (int j = 0; j < A_MD.dimension2; j++) {
int index = i * A_MD.dimension2 + j;
A[index] = (rand() % 1000) * 0.001;
}
}
size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float);
B = (float*) malloc(sizeofB);
for (int i = 0; i < B_MD.dimension1; i++) {
for (int j = 0; j < B_MD.dimension2; j++) {
int index = i * B_MD.dimension2 + j;
B[index] = (rand() % 1000) * 0.001;
}
}
}
// Allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory
void copyMatricesToGPU() {
size_t sizeofA = (A_MD.dimension1 + pad) * (A_MD.dimension2 + pad) * sizeof(float);
check_error(cudaMalloc((void **) &A_GPU, sizeofA));
check_error(cudaMemcpy(A_GPU, A_PAD, sizeofA, cudaMemcpyHostToDevice));
size_t sizeofB = (B_MD.dimension1 + pad) * (B_MD.dimension2 + pad) * sizeof(float);
check_error(cudaMalloc((void **) &B_GPU, sizeofB));
check_error(cudaMemcpy(B_GPU, B_PAD, sizeofB, cudaMemcpyHostToDevice));
size_t sizeofC = (C_MD.dimension1 + pad) * (C_MD.dimension2 + pad) * sizeof(float);
check_error(cudaMalloc((void **) &C_GPU, sizeofC));
}
// Copy results from C_GPU which is in GPU card memory to C_CPU which is in the host CPU for result comparison
void copyResultFromGPU() {
size_t sizeofC = (C_MD.dimension1 + pad) * (C_MD.dimension2 + pad) * sizeof(float);
C_CPU = (float*) malloc(sizeofC);
check_error(cudaMemcpy(C_CPU, C_GPU, sizeofC, cudaMemcpyDeviceToHost));
}
// Do a straightforward matrix-matrix multiplication in the CPU notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation
void computeCpuMMM() {
// Allocate the result matrix for the CPU computation
size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
C = (float*) malloc(sizeofC);
// Compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A
for (int i = 0; i < A_MD.dimension1; i++) {
int a_i = i * A_MD.dimension2;
int c_i = i * C_MD.dimension2;
for (int j = 0; j < B_MD.dimension2; j++) {
int c_index = c_i + j;
C[c_index] = 0;
for (int k = 0; k < B_MD.dimension1; k++) {
int a_index = a_i + k;
int b_index = k * B_MD.dimension2 + j;
C[c_index] += A[a_index] * B[b_index];
}
}
}
}
// Function to determine if the GPU computation is done correctly by comparing the output from the GPU with that from the CPU
void compareHostAndGpuOutput() {
int totalElements = C_MD.dimension1 * C_MD.dimension2;
int mismatchCount = 0;
for (int i = 0; i < totalElements; i++) {
if (fabs(C[i] - C_RES[i]) > 0.01) {
mismatchCount++;
printf("mismatch at index %i: %f\t%f\n", i, C[i], C_RES[i]);
}
}
if (mismatchCount > 0) {
printf("Computation is incorrect: outputs do not match in %d indexes\n", mismatchCount);
} else {
printf("Computation is correct: CPU and GPU outputs match\n");
}
}
// Prints the specified error message and then exits
void die(const char *error) {
printf("%s", error);
exit(1);
}
// If the specified error code refers to a real error, report it and quit the program
void check_error(cudaError e) {
if (e != cudaSuccess) {
printf("\nCUDA error: %s\n", cudaGetErrorString(e));
exit(1);
}
}
//---------- MY ADDED STUFF ----------//
// TODO: MAKE THIS RIGHT
// KERNEL: A GPU kernel that does MMM
// __global__ void mmm_kernel(float *A, float *B, float *C, int Ax, int Ay, int Bx, int By, int pad) {
// // CURRENTLY THIS DOES A WEIRD VECTOR ADD IDK
// // Determine the index of the thread among all GPU threads
// int i = blockIdx.x * blockDim.x + threadIdx.x;
// int j = blockIdx.y * blockDim.y + threadIdx.y;
// //int threadCount = gridDim.x * blockDim.x;
// if (i < Ax && j < By) {
// // Compute C[i][j]
// // Multiply A row i with B row j and add it to sum
// float sum = 0.0;
// for (int x = 0; x < Ax; x++) {
// //sum += A[x+(i*Ay)] * B[(x*By)+j]; // for when B is NOT transposed
// sum += A[x+(i*(Ay+pad))] * B[x+(j*(By+pad))]; // for when B IS transposed
// }
// // Assign sum to C[i][j]
// C[j+(i*(By+pad))] = sum;
// }
// }
__device__ float *getSubMatrix(float *M, int w, int r, int c) {
return &M[(w * BLOCK_SIZE * r) + (BLOCK_SIZE * c)];
}
__global__ void mmm_kernel(float *A, float *B, float *C, int Ax, int By) {
// Get thread ID within the block
// int tid = threadIdx.x + threadIdx.y * blockDim.x;
// // Get warp ID
// int warpid = tid / WARP_SIZE;
// Store blocks in shared memory
__shared__ float Ashared[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bshared[BLOCK_SIZE][BLOCK_SIZE];
// Get submatrix of C
float *subC = &C[(Ax * BLOCK_SIZE * blockIdx.y) + (BLOCK_SIZE * blockIdx.x)]; //getSubMatrix(C, Ax, bRow, bCol);
float sum = 0.0;
// For each submatrix in A/B
// Multiply subA subB i,j and add to sum
int num_blocks = Ax / BLOCK_SIZE;
int rAxc = threadIdx.y * Ax + threadIdx.x;
int rByc = threadIdx.y * By + threadIdx.x;
float *subA, *subB;
for (int s = 0; s < num_blocks; s++) {
// Get submatrix of A
subA = &A[(Ax * BLOCK_SIZE * blockIdx.y) + (BLOCK_SIZE * s)]; //getSubMatrix(A, Ax, bRow, s);
// Get submatrix of B
subB = &B[(By * BLOCK_SIZE * blockIdx.x) + (BLOCK_SIZE * s)]; //getSubMatrix(B, By, bCol, s); // make sure this uses bCol!!! not bRow omg
// First warp loads values
// if (warpid == 0) {
// Each thread loads a few values in the block
Ashared[threadIdx.y][threadIdx.x] = subA[rAxc];
Bshared[threadIdx.y][threadIdx.x] = subB[rByc];
// }
// Synch to make sure everything is loaded before doing any computation
__syncthreads();
// Multiply this thread's row and column together
for (int k = 0; k < BLOCK_SIZE; k++) {
sum += Ashared[threadIdx.y][k] * Bshared[threadIdx.x][k];
}
// Synch again so that everyone is done
__syncthreads();
}
// Write subC back out
subC[rAxc] = sum;
}
// DO IT TO IT
// MMM on GPU
void computeGpuMMM() {
// Transfer input to GPU
clock_t start = clock();
copyMatricesToGPU();
clock_t end = clock();
double elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("GPU: Transfer to GPU: %f seconds\n", elapsed);
// Execute the kernel to compute the vector sum on the GPU
start = clock();
// Note that we are using a one dimensional grid in this calculation as that is ideal for this
// particular problem. For some other problem, a 2D or even a 3D grid may be appropriate. The
// dimensionality of the grid is supposed to help you decompose the algorithmic logic inside the
// GPU kernel. In particular, how you decide what thread should do what instruction. It does not
// affect the performance of the kernel.
//add_vectors_kernel <<<BLOCK_COUNT, THREADS_PER_BLOCK>>> (A_GPU, B_GPU, C_GPU, N);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlocks((A_MD.dimension1 + pad) / threadsPerBlock.x, (B_MD.dimension2 + pad) / threadsPerBlock.y);
mmm_kernel <<<numBlocks, threadsPerBlock>>> (A_GPU, B_GPU, C_GPU, A_MD.dimension1 + pad, B_MD.dimension2 + pad);
// Make the CPU main thread wait for the GPU kernel call to complete
cudaThreadSynchronize(); // This is only needed for timing and error-checking purposes
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("GPU: Kernel Execution: %f seconds\n", elapsed);
// Check for kernel errors
check_error(cudaGetLastError());
// Allocate CPU memory for the result
// size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float);
// float *C_CPU = (float *) malloc(sizeofC);
// if (C_CPU == NULL) die("Error allocating CPU memory");
// Transfer result back to CPU
start = clock();
copyResultFromGPU();
end = clock();
elapsed = (end - start) / (double) CLOCKS_PER_SEC;
printf("GPU: Transfer from GPU: %f seconds\n", elapsed);
// Free the GPU memory
check_error(cudaFree(A_GPU));
check_error(cudaFree(B_GPU));
check_error(cudaFree(C_GPU));
}
/*
TODO:
- transpose B: minimizes bank conflicts and has better memory access
- memory coalescing? chunks are aligned in either 32, 64, or 128 bytes (probs 128)
need to divide by size of float or something (like cache blocking)
- floats are 4 bytes
- warp size is 32
- all data structures need to be 128 (32*4) byte aligned (aka 32 float aligned)
- compute padding for A and B
+ find next value greater than or equal to Ax that's a multiple of 32, call it next32
+ take next32 - Ax to get how much padding you need
+ pad A by copying it and adding zeroes as padding to each row
+ same process for padding B, and we know its dimensions are the same as A
- find out warp size, block size should be a multiple of warp size (probs 32 or something)
- choose block_count and threads_per_block appropriately
*/
|
12,715 | #include <cuda.h>
#include <iostream>
#include <sys/time.h>
using namespace std;
// double precision atomic add function
// (there is no intrinsic double precision atomicAdd)
__device__ double atomicAdd_d(double* address, double val) {
unsigned long long int* address_as_ull = (unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val
+ __longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
__global__ void atomic(int n, double *a) {
//a[0] += 1.0; // gives wrong result
// instead use atomic function
atomicAdd_d(&a[0], 1.0);
}
int main() {
int n = 1024;
double *data = (double*) malloc(n * sizeof(double));
for (int i=0; i<n; i++) {
data[i] = (double)i;
}
double *data_dev;
cudaMalloc((void**) &data_dev, n * sizeof(double));
cudaMemcpy(data_dev, data, n * sizeof(double) , cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
cout << "copy to device = " << error << " : " << cudaGetErrorString(error) << endl;
int nBlocks = 1;
int nThreads = 1024;
atomic <<< nBlocks, nThreads >>>(n, data_dev);
error = cudaGetLastError();
cout << "run kernel = " << error << " : " << cudaGetErrorString(error) << endl;
cudaMemcpy(data, data_dev, n * sizeof(double) , cudaMemcpyDeviceToHost);
error = cudaGetLastError();
cout << "copy from device = " << error << " : " << cudaGetErrorString(error) << endl;
cudaFree(data_dev);
cout << "data[0] = " << data[0] << endl;
free(data);
}
|
12,716 | #ifndef VECTOR_GPU_H
#define VECTOR_GPU_H
#ifdef __DEVICE_EMULATION__
#include <assert.h>
#include <stdio.h>
#endif
//
struct vect3d_gpu
{
float data[3];
inline __device__
vect3d_gpu()
{
data[0] = 0;
data[1] = 0;
data[2] = 0;
}
inline __device__
vect3d_gpu(float x, float y, float z)
{
data[0] = x;
data[1] = y;
data[2] = z;
}
inline __device__
float & operator [](int i)
{
return data[i];
}
};
static inline __device__
void points2vec_gpu(vect3d_gpu &vStartPoint, vect3d_gpu &vEndPoint, vect3d_gpu &vVec)
{
vVec.data[0] = vEndPoint.data[0] - vStartPoint.data[0];
vVec.data[1] = vEndPoint.data[1] - vStartPoint.data[1];
vVec.data[2] = vEndPoint.data[2] - vStartPoint.data[2];
}
static inline __device__
void points2vec_gpu(float vStartPoint[3], float vEndPoint[3], vect3d_gpu &vVec)
{
vVec.data[0] = vEndPoint[0] - vStartPoint[0];
vVec.data[1] = vEndPoint[1] - vStartPoint[1];
vVec.data[2] = vEndPoint[2] - vStartPoint[2];
}
static inline __device__
float vecLen_gpu(vect3d_gpu *vVec)
{
return sqrtf( vVec->data[0] * vVec->data[0] +
vVec->data[1] * vVec->data[1] +
vVec->data[2] * vVec->data[2] );
}
static inline __device__
void vecScale_gpu(vect3d_gpu &vOrigVec, float fScale, vect3d_gpu &vScaledVec)
{
vScaledVec.data[0] = fScale * vOrigVec.data[0];
vScaledVec.data[1] = fScale * vOrigVec.data[1];
vScaledVec.data[2] = fScale * vOrigVec.data[2];
}
static inline __device__
void point2point_gpu(vect3d_gpu &vStartPoint, vect3d_gpu &vVec, vect3d_gpu &vEndPoint)
{
vEndPoint.data[0] = vStartPoint.data[0] + vVec.data[0];
vEndPoint.data[1] = vStartPoint.data[1] + vVec.data[1];
vEndPoint.data[2] = vStartPoint.data[2] + vVec.data[2];
}
static inline __device__
void cross_product_gpu(vect3d_gpu &vec1, vect3d_gpu &vec2, vect3d_gpu &vecr)
{
vecr.data[0] = vec1.data[1] * vec2.data[2] - vec1.data[2] * vec2.data[1];
vecr.data[1] = vec1.data[2] * vec2.data[0] - vec1.data[0] * vec2.data[2];
vecr.data[2] = vec1.data[0] * vec2.data[1] - vec1.data[1] * vec2.data[0];
}
static inline __device__
float dot_product_gpu(vect3d_gpu &vec1, vect3d_gpu &vec2)
{
return vec1.data[0] * vec2.data[0] +
vec1.data[1] * vec2.data[1] +
vec1.data[2] * vec2.data[2];
}
static inline __device__
void normalize_gpu(vect3d_gpu &vec)
{
float fLen = vecLen_gpu(&vec);
if(fLen == 0) return;
float v = __powf(fLen, -1); //1/fLen;
vec.data[0] *= v;
vec.data[1] *= v;
vec.data[2] *= v;
}
static inline __device__
void vecCopy_gpu(vect3d_gpu &destVec, float *srcVec)
{
destVec.data[0] = srcVec[0];
destVec.data[1] = srcVec[1];
destVec.data[2] = srcVec[2];
}
static inline __device__
void vecCopy_gpu(vect3d_gpu &destVec, vect3d_gpu &srcVec)
{
destVec.data[0] = srcVec.data[0];
destVec.data[1] = srcVec.data[1];
destVec.data[2] = srcVec.data[2];
}
///
/// Secondary functions
///
static inline __device__
void reflectVec_gpu(vect3d_gpu &vOrigViewVec, vect3d_gpu &vNormal, vect3d_gpu &vReflectViewVec)
{
vect3d_gpu vReverseViewVec; /*vReverseViewVec.init();*/ vecScale_gpu(vOrigViewVec, -1, vReverseViewVec);
vect3d_gpu vDiagonalNormalVec; /*vDiagonalNormalVec.init();*/
float fLen = dot_product_gpu(vReverseViewVec, vNormal) / vecLen_gpu(&vNormal) * 2.0f;
vect3d_gpu vNormalizedNormal;/* vNormalizedNormal.init();*/
point2point_gpu(vNormalizedNormal, vNormal, vNormalizedNormal);
normalize_gpu(vNormalizedNormal);
vecScale_gpu(vNormalizedNormal, fLen, vDiagonalNormalVec);
point2point_gpu(vDiagonalNormalVec, vOrigViewVec, vReflectViewVec);
}
static inline __device__
void refractVec_gpu(vect3d_gpu &vOrigViewVec, vect3d_gpu &vNormal, vect3d_gpu &vRefractedVec, float refraK)
{
// TODO: when view vec is very close to the plane.
// there'll be different behaviors of light
// Ref: http://en.wikipedia.org/wiki/Snell's_law
//
vect3d_gpu vOrigNormViewVec; /*vOrigNormViewVec.init();*/
point2point_gpu(vOrigNormViewVec, vOrigViewVec, vOrigNormViewVec);
normalize_gpu(vOrigNormViewVec);
vect3d_gpu vMinusL; /*vMinusL.init();*/
vecScale_gpu(vOrigNormViewVec, -1, vMinusL);
float cos1 = dot_product_gpu(vNormal, vMinusL);
float cos2 = sqrtf(1 - refraK * refraK * (1 - cos1 * cos1)); // cuda sqrt
#ifdef __DEVICE_EMULATION__
// assert( (1 - refraK * refraK * (1 - cos1 * cos1)) >= 0 );
if( !((1 - refraK * refraK * (1 - cos1 * cos1)) >= 0 ) )
{
printf(".data[refractVec_gpu] : Man, sth. is wrong...\n");
return;
}
#endif
// (n1/n2)*l
vect3d_gpu tmp; /*tmp.init();*/
point2point_gpu(tmp, vOrigNormViewVec, tmp);
vecScale_gpu(tmp, refraK, tmp);
// (n1/n2*cos1 +- cos2)
vecCopy_gpu(vRefractedVec, vNormal);
if(cos1 > 0)
{
vecScale_gpu(vRefractedVec, refraK * cos1 - cos2, vRefractedVec);
}
else
{
vecScale_gpu(vRefractedVec, refraK * cos1 + cos2, vRefractedVec);
}
// combined..
point2point_gpu(tmp, vRefractedVec, vRefractedVec);
}
static inline __device__
void projectPoint_gpu(vect3d_gpu &pEyePos, vect3d_gpu &vViewVec, float t, vect3d_gpu &vTargetPoint)
{
vect3d_gpu vStartPoint, tmp; /*vStartPoint.init(); tmp.init();*/
point2point_gpu(vStartPoint, pEyePos, vStartPoint);
point2point_gpu(tmp, vViewVec, tmp); vecScale_gpu(tmp, t, tmp);
point2point_gpu(vStartPoint, tmp, vTargetPoint);
}
static inline __device__
float point2line_gpu( vect3d_gpu & vPoint, vect3d_gpu &pEyePos, vect3d_gpu &pViewVec, float *pT)
{
vect3d_gpu k; /*k.init();*/
points2vec_gpu(pEyePos, vPoint, k);
float t = dot_product_gpu(k, pViewVec) / dot_product_gpu(pViewVec, pViewVec);
*pT = t;
vect3d_gpu vLen; /*vLen.init();*/
vect3d_gpu tmp;/* tmp.init();*/
vect3d_gpu tmp2; /*tmp2.init();*/
vecScale_gpu(pViewVec, t, tmp);
vecScale_gpu(k, -1, tmp2);
point2point_gpu(tmp2, tmp, vLen);
return vecLen_gpu(&vLen);
}
static inline __device__
float point2plane_gpu(float *point, float *planeCtr, vect3d_gpu &normalizedPlaneNorm)
{
vect3d_gpu toVec;
points2vec_gpu(planeCtr, point, toVec);
return fabs(dot_product_gpu(normalizedPlaneNorm, toVec));
}
#endif |
12,717 | #include "includes.h"
__global__ void gpuMatMul(float *a, float *b, float *c, int m, int n, int p) {
uint i = blockDim.x * blockIdx.x + threadIdx.x;
uint j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < m && j < p) {
float val = 0;
for (int k = 0; k < n; ++k) {
val += a[i * n + k] * b[k * p + j];
}
c[i * p + j] = val;
}
} |
12,718 | #include <iostream>
#include <assert.h>
#include <cstdlib>
#include <cuda.h>
#include <cuda_runtime.h>
const int SIZE = 1024;
__global__ void transpose(int *V, int n) {
int Idx = blockDim.x * blockIdx.x + threadIdx.x;
if (Idx <= n/2) {
int tmp = V[Idx];
V[Idx] = V[n-Idx-1];
V[n-Idx-1] = tmp;
}
}
__global__ void static_trans(int *Vec, int N) {
__shared__ int array[SIZE];
int Idx = threadIdx.x; //we have only one block - condition
array[Idx] = Vec[Idx];
__syncthreads();
Vec[Idx] = array[N-Idx-1];
}
__global__ void dynamic_trans(int *Vec, int N) {
extern __shared__ int array[];
int Idx = threadIdx.x;
array[Idx] = Vec[Idx];
__syncthreads();
Vec[Idx] = array[N-Idx-1];
}
int main(int argc, char **argv) {
assert(argc==2);
int n = atoi(argv[1]);
size_t size = n * sizeof(int);
int *V = (int*)malloc(size);
int *V_t = (int*)malloc(size);
for (int i = 0; i < n; i++) {
V[i] = i;
}
int block = 1024;
int grid = 1;
// int grid = (n / 2 - 1) / block + 1;
int *V_dev;
cudaMalloc(&V_dev, size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
cudaMemcpy(V_dev, V, size, cudaMemcpyHostToDevice);
transpose<<<grid, block>>>(V_dev, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(V_t, V_dev, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
std::cout << "Common time is: " << ms << std::endl;
for (int i = 0; i < n; i++) {
std::cout << V_t[i] << " ";
}
std::cout << std::endl;
/////////////////////////////////////////////////
//STATIC SHARED
cudaEventRecord(start);
cudaMemcpy(V_dev, V, size, cudaMemcpyHostToDevice);
static_trans<<<grid, block, n>>>(V_dev, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(V_t, V_dev, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
ms = 0;
cudaEventElapsedTime(&ms, start, stop);
std::cout << "Static sh m time is: " << ms << std::endl;
for (int i = 0; i < n; i++) {
std::cout << V_t[i] << " ";
}
std::cout << std::endl;
/////////////////////////////////////////////////
//DYNAMIC SHARED
cudaEventRecord(start);
cudaMemcpy(V_dev, V, size, cudaMemcpyHostToDevice);
dynamic_trans<<<grid, block, n>>>(V_dev, n);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(V_t, V_dev, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
ms = 0;
cudaEventElapsedTime(&ms, start, stop);
std::cout << "Dynamic sh m time is: " << ms << std::endl;
for (int i = 0; i < n; i++) {
std::cout << V_t[i] << " ";
}
std::cout << std::endl;
free(V);
cudaFree(V_t);
return 0;
}
|
12,719 | #include <stdio.h>
// #include <cuda.h>
__global__ void add(int* a, int* b, int* c)
{
*c = *a + *b;
}
// __global__ void add(int* a, int* b, int* c)
// {
// c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
// }
// nvcc -o bin/test src/test.cu
int main(void)
{
int a, b, c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a = 2;
b = 7;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a, d_b, d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("%d + %d = %d\n", a, b, c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
12,720 | #include <iostream>
#include <fstream>
#include <vector>
#include <memory>
#include <limits>
#include <random>
#include "include/vec3.cuh"
#include "include/ray.cuh"
#include "include/sphere.cuh"
#include "include/hitable.cuh"
#include "include/hitable_list.cuh"
#include "include/camera.cuh"
#include "include/material.cuh"
#include "include/random_helpers.cuh"
#define RM(row,col,w) row*w+col
#define CM(row,col,h) col*h+row
void write_ppm_image(std::vector<rgb> colors, int h, int w, std::string filename) {
std::ofstream myfile;
myfile.open(filename + ".ppm");
myfile << "P3\n" << w << " " << h << "\n255\n";
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
auto color = colors[RM(i, j, w)];
myfile << color.r()*255.99 << " " << color.g()*255.99 << " " << color.b()*255.99 << std::endl;
}
}
myfile.close();
}
std::vector<rgb> hello_world_render(int h, int w) {
auto colors = std::vector<rgb>(w*h);
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
colors[RM(i, j, w)].r(j / float(w));
colors[RM(i, j, w)].g(h - i / float(h));
colors[RM(i, j, w)].b(0.2f);
}
}
return colors;
}
rgb color(const ray& r, const std::shared_ptr<hitable>& world, int depth) {
hit_record rec;
if (world->hit(r, 0.001f, std::numeric_limits<float>::max(), rec)) {
ray scattered;
vec3 attenuation;
if (depth < 50 && rec.mat_ptr->scatter(r, rec, attenuation, scattered)) {
return attenuation * color(scattered, world, depth + 1);
}
else {
return rgb(0, 0, 0);
}
}
vec3 unit_direction = unit_vector(r.direction());
float t = 0.5f*(unit_direction.e[1] + 1.0f);
return vec3(1.0f, 1.0f, 1.0f)*(1.0f - t) + vec3(0.5f, 0.7f, 1.0f)*t;
}
std::vector<rgb> simple_ray_render(int h, int w, int samples) {
auto colors = std::vector<rgb>(w*h);
auto c = camera();
auto world = std::make_shared<hitable_list>();
world->add_hitable(std::make_shared<sphere>(vec3(0, 0, -1), 0.5f, std::make_shared<lambertian>(vec3(0.8f, 0.3f, 0.3f))));
world->add_hitable(std::make_shared<sphere>(vec3(0, -100.5, -1), 100.0f, std::make_shared<lambertian>(vec3(0.8f, 0.8f, 0.0f))));
world->add_hitable(std::make_shared<sphere>(vec3(1, 0, -1), 0.5f, std::make_shared<metal>(vec3(0.8f, 0.6f, 0.2f), 0.3f)));
world->add_hitable(std::make_shared<sphere>(vec3(-1, 0, -1), 0.5f, std::make_shared<dielectric>(1.5f)));
world->add_hitable(std::make_shared<sphere>(vec3(-1, 0, -1), -0.45f, std::make_shared<dielectric>(1.5f)));
#pragma omp parallel for
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
rgb pix(0, 0, 0);
for (int s = 0; s < samples; s++) {
float u = float(j + dis(gen)) / float(w);
float v = float(h - i + dis(gen)) / float(h);
ray r = c.get_ray(u, v);
pix += color(r, world, 0);
}
pix /= float(samples);
pix = pix.v_sqrt(); // gamma correct (gamma 2)
colors[RM(i, j, w)] = pix;
}
}
return colors;
}
int main() {
int h = 500;
int w = 1000;
int s = 100;
auto colors = simple_ray_render(h, w, s);
write_ppm_image(colors, h, w, "render");
} |
12,721 | // Kyle Heaton
// U0517990
// Final Project
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define DEFAULT_FILENAME "testing-image.ppm"
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic) {
FILE *fp;
//int x,y;
fprintf(stderr, "write_ppm( %s )\n", filename);
fp = fopen(filename, "w");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n", filename);
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
__global__ void sobel(int* imageWidth, int* imageHeight, int* image, int* output, int* threshold) {
int width = *imageWidth;
int height = *imageHeight;
for (int i = 1; i < height - 1; i++) {
for (int j = 1; j < width -1; j++) {
int offset = i * width + j;
int sum1 = image[width * (i - 1) + j + 1 ] - image[width * (i - 1) + j - 1] +
2 * image[width * (i) + j + 1 ] - 2 * image[width * (i) + j - 1] +
image[width * (i + 1) + j + 1 ] - image[width * (i + 1) + j - 1];
int sum2 = image[width * (i - 1) + j - 1] + 2 * image[width * (i - 1) + j] +
image[width * (i - 1) + j + 1] - image[width * (i + 1) + j - 1] -
2 * image[width * (i + 1) + j] - image[width * (i + 1) + j + 1];
int magnitude = sum1 * sum1 + sum2 * sum2;
if (magnitude > *threshold) {
output[offset] = 255;
}
else {
output[offset] = 0;
}
}
}
}
int main( int argc, char **argv ) {
int thresh = DEFAULT_THRESHOLD;
int number_of_files = 20000;
cudaEvent_t start_event, stop_event;
float elapsed_time_gpu;
if(argc > 1) {
number_of_files = atoi(argv[1]);
}
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
for(int i = 1; i <= number_of_files; i++) {
char *in_filename = (char*)malloc(36 * sizeof(char));
char *out_filename = (char*)malloc(36 * sizeof(char));
sprintf(in_filename, "./sintel/sintel%03d.ppm", i);
sprintf(out_filename, "./sintel-sobel/sintel-sobel%03d.ppm", i);
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( in_filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
int *out = result;
// Set initial values of result
for (int col=0; col<ysize; col++) {
for (int row=0; row<xsize; row++) {
*out++ = -1;
}
}
int *imageWidth, *imageHeight, *image, *output, *threshold;
cudaMalloc((void **)&imageWidth, sizeof(int));
cudaMalloc((void **)&imageHeight, sizeof(int));
cudaMalloc((void **)&image, numbytes);
cudaMalloc((void **)&output, numbytes);
cudaMalloc((void **)&threshold, sizeof(int));
cudaMemcpy(imageWidth, &xsize, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(imageHeight, &ysize, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(image, pic, numbytes, cudaMemcpyHostToDevice);
cudaMemcpy(output, result, numbytes, cudaMemcpyHostToDevice);
cudaMemcpy(threshold, &thresh, sizeof(int), cudaMemcpyHostToDevice);
sobel<<<1,1>>>(imageWidth, imageHeight, image, output, threshold);
cudaMemcpy(result, output, numbytes, cudaMemcpyDeviceToHost);
cudaFree(imageWidth);
cudaFree(imageHeight);
cudaFree(image);
cudaFree(output);
cudaFree(threshold);
write_ppm( out_filename, xsize, ysize, 255, result);
free(pic);
free(result);
free(out_filename);
free(in_filename);
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&elapsed_time_gpu,start_event, stop_event);
printf("Parallel Time: %.2f msec\n", elapsed_time_gpu);
fprintf(stderr, "sobel done\n");
}
int main1(int argc, char** argv) {
// *Optional* - call ffmpeg to split up video
// pull image files
// setup cuda parameters (splitting up blocks warps ect)
// https://devblogs.nvidia.com/parallelforall/how-overlap-data-transfers-cuda-cc/
// Async push images to GPU, then process, then pull back processed images to host
// save processed images
// *Optional* - call ffmpeg to stitch images back into video
return 0;
} |
12,722 | /////// Bom para quando maiores pq o discard n precisa contar varios.
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/transform.h>
#include <iostream>
#include <math.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/random/uniform_real_distribution.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random.h>
struct rng_gpu{
int SEED;
__device__ __host__
double operator()(const int &i){
thrust::default_random_engine eng(i*SEED);
thrust:: uniform_real_distribution<double> d(0.0,1.0);
eng.discard(100);
return d(eng);
}
};
int main() {
thrust::default_random_engine eng(10);
thrust::uniform_real_distribution<double> dist(25,40);
for(int i =0;i<10;i++){
std::cout<< dist(eng) <<"\n";
}
rng_gpu rg = {.SEED =10};
thrust::device_vector<double> D(10);
thrust::transform(thrust::make_counting_iterator<int>(0),
thrust::make_counting_iterator<int>(10),
D.begin(), rg);
for(int i = 0; i < 10; i++){
std::cout << D[i] << "\n";
}
} |
12,723 | #include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <cuda.h>
#define NTPB 64
#define NEPT 2
#define SIZE 128
#define MAX(x,y) ((x<=y)? y : x)
#define MIN(x,y) ((x<=y)? x : y)
// Function that catches the error
void testCUDA(cudaError_t error, const char *file, int line) {
if (error != cudaSuccess) {
printf("There is an error in file %s at line %d\n", file, line);
exit(EXIT_FAILURE);
}
}
// Has to be defined in the compilation in order to get the correct value of the macros
// __FILE__ and __LINE__
#define testCUDA(error) (testCUDA(error, __FILE__ , __LINE__))
void printGPUCaracteristics(){
int count;
cudaDeviceProp prop;
testCUDA(cudaGetDeviceCount(&count));
printf("\n\nThe number of devices available is %i GPUs \n", count);
testCUDA(cudaGetDeviceProperties(&prop, count-1));
printf("Name: %s\n", prop.name);
printf("Global memory size in octet (bytes): %ld\n", prop.totalGlobalMem);
printf("Shared memory size per block: %ld\n", prop.sharedMemPerBlock);
printf("Number of registers per block: %i\n", prop.regsPerBlock);
printf("Number of threads in a warp: %i\n", prop.warpSize);
printf("Maximum number of threads that can be launched per block: %i\n",
prop.maxThreadsPerBlock);
printf("Maximum number of threads that can be launched: %i X %i X %i\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Maximum grid size: %i X %i X %i\n", prop.maxGridSize[0],
prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Total constant memory size: %ld\n", prop.totalConstMem);
printf("Major compute capability: %i\n", prop.major);
printf("Minor compute capability: %i\n", prop.minor);
printf("Clock rate: %i\n", prop.clockRate);
printf("Maximum 1D texture memory: %i\n", prop.maxTexture1D);
printf("Could we overlap?: %i\n", prop.deviceOverlap);
printf("Number of multiprocessors: %i\n", prop.multiProcessorCount);
printf("Is there a limit for kernel execution?: %i\n",
prop.kernelExecTimeoutEnabled);
printf("Is my GPU a chipset?: %i\n", prop.integrated);
printf("Can we map the host memory?: %i\n", prop.canMapHostMemory);
printf("Can we launch concurrent kernels?: %i\n", prop.concurrentKernels);
printf("Do we have ECC memory?: %i\n", prop.ECCEnabled);
}
/* Cette fonction fait :
* d'abord: chercher le point d'intersection de diagonal avec merge-path
* et puis merge.
*
* paramètre:
* @ A, B: 2 tableaux triés
* @ C: tableau output qui contient A et B et qui est trié
* @ size_A, size_B : la taille du tableau A et B
* @ thread_id : indentifiant de thread dans la grille
* @ tid : indentifiant de thread dans son block
* @ numThreads : le nombre de threads dans la grille
*/
__device__ void DiagonalIntersection_Merge(int *A, int size_A ,int * B, int size_B, int *C, int thread_id, int tid, int numThreads)
{
int diag, diaglength, a_top, b_top, a_bottom, b_bottom;
int offset, a_end=0, b_end=0, a_start, b_start;
int numEls = (size_A + size_B)/numThreads;
int c_start;
__shared__ int diagA[NTPB];
__shared__ int diagB[NTPB];
/*chaque thread détermine sa propre matrice dont diagonal
* contient le point d'intersection avec merge-path
*/
diag = (thread_id) * numEls;
a_top = (diag > size_A) ? size_A : diag;
b_top = (diag > size_A) ? diag - size_A : 0;
a_bottom = b_top;
b_bottom = diaglength = (diag > size_A) ? 2*size_A - diag : diag;
/*initialiser tableaux diagA, diagB*/
if (thread_id==0)
{
diaglength = 0.5;
a_start = diagA[tid] = a_bottom;
b_start = diagB[tid] = b_top;
}
else
{
diagA[tid] = a_bottom;
diagB[tid] = b_top + diaglength - 1;
}
/*recherche dichotomique*/
while(diaglength > 0.5)
{
offset = (a_top - a_bottom)/2;
a_start = a_top - offset ;
b_start = b_top + offset ;
/*des cas spécials où l'intersection est sur le bord de matrice A*B*/
if (a_start == a_top && b_start == 0 | a_start == a_top && b_start == b_bottom | b_start == size_A)
{
diagB[tid] = b_start;
diagA[tid] = a_start;
break;
}
/*des cas réguliers où l'intersection est dans la matrice*/
if(A[a_start] > B[b_start-1])
{
if(A[a_start-1] <= B[b_start])
{
/*le point au milieu est celui d'intersection*/
diagA[tid] = a_start;
diagB[tid] = b_start;
break;
}
else
{
/*on se deplace sur la partie plus petit*/
/*redéterminer la nouvelle matrice*/
a_top = a_start - 1;
b_top = b_start + 1;
}
}
else
{
/*on se deplace sur la partie plus grand*/
/*redeterminer la nouvelle matrice*/
a_bottom = a_start;
}
diaglength /= 2;
}
/*dans la suite, chaque va lire un élément de diagA, un élément de diagB.
* Pour cela, des écritures dans ces tableaux doivent terminer avant la commence de lecturee.
*Faut synchoniser des threads.
*/
__syncthreads();
/*chaque thread lit un élément de diagA et un de diagB
* pour determiner a_end, b_end
*/
if (tid < NTPB -1)
{
a_end = diagA[tid+1];
b_end = diagB[tid+1];
}
else
{
/*des threads dont tid = NTPB-1 ne peuvent pas communiquer avec sa voisine à droite
* car elles sont dans différents block. Pour telle thread, a_end, b_end sont determiné différemment
*/
if (thread_id < numThreads - 1)
{
a_end = a_start + numEls;
b_end = b_start + numEls;
}
else
{
a_end = size_A;
b_end = size_A;
}
}
__syncthreads();
/*Partie MERGE*/
c_start = thread_id * numEls;
int c_end = c_start + numEls;
while (a_start < a_end && b_start < b_end && c_start < c_end) {
if (A[a_start] <= B[b_start]) {
C[c_start] = A[a_start];
c_start++;
a_start++;
} else {
C[c_start] = B[b_start];
c_start++;
b_start++;
}
}
while(a_start < a_end && c_start < c_end) {
C[c_start] = A[a_start];
c_start++;
a_start++;
}
while(b_start < b_end && c_start < c_end) {
C[c_start] = B[b_start];
c_start++;
b_start++;
}
}
/*la fonction kernel de chaque thread
*
*paramètre:
*@ A,B : tableaux d'entrée
*@ size_A, size_B : taille de A et B
*@ numThreads : nombre de threads dans la grille
*/
__global__ void kernel(int *A, int *B, int *S,int size_A, int size_B, int numThreads){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
DiagonalIntersection_Merge(A, size_A , B, size_B, S, idx, threadIdx.x, numThreads);
}
void printArray(int A[], int size){
int i;
for (i=0; i < size; i++){
printf("%d ", A[i]);
}
printf("\n\n");
}
void wrapper(int *A, int *B, int *S, int size_A, int size_B){
int *A_GPU , *B_GPU,*S_GPU;
int size_S = size_A + size_B;
int tailleA = size_A*sizeof(int);
int tailleB = size_B*sizeof(int);
int tailleS = size_S*sizeof(int);
float TimerV; // GPU timer instructions
cudaEvent_t start, stop; // GPU timer instructions
testCUDA(cudaEventCreate(&start)); // GPU timer instructions
testCUDA(cudaEventCreate(&stop));
testCUDA(cudaMalloc(&A_GPU,tailleA));
testCUDA(cudaMalloc(&B_GPU,tailleB));
testCUDA(cudaMalloc(&S_GPU,tailleS));
testCUDA(cudaMemcpy(A_GPU,A, tailleA,cudaMemcpyHostToDevice));
testCUDA(cudaMemcpy(B_GPU,B, tailleB,cudaMemcpyHostToDevice));
/*On veut que chaque thread ait NEPT éléments de C*/
int NB = (size_S + (NTPB*NEPT) -1)/(NTPB*NEPT);
int numThreads = NB*NTPB;
testCUDA(cudaEventRecord(start,0));
kernel<<<NB,NTPB>>>(A_GPU, B_GPU, S_GPU, size_A, size_B, numThreads);
printf ("NB = %d, NTPB = %d, numthreads = %d\n", NB, NTPB, numThreads);
testCUDA(cudaEventRecord(stop,0));
testCUDA(cudaEventSynchronize(stop));
testCUDA(cudaEventElapsedTime(&TimerV,start, stop));
testCUDA(cudaMemcpy(S,S_GPU, tailleS,cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
printf("\nExecution time: %f ms\n", TimerV);
testCUDA(cudaFree(A_GPU));
testCUDA(cudaFree(B_GPU));
testCUDA(cudaFree(S_GPU));
}
int main(int argc, char const *argv[]){
FILE * f;
int *A , *B, *S;
int A_size, B_size, S_size;
int i;
/*================= Recupétation des deux tableaux dans un fichier =================*/
if (argc < 2) {
fprintf( stderr,"Usage: <%s> <JeuxDeDonnees/fichier>\n", argv[0]);
return 1;
}
if( (f=fopen(argv[1], "r"))==NULL) {
fprintf(stderr,"erreur a la lecture du fichier %s\n", argv[1]);
exit(1);
}
char ch_a[10] = {0};
char ch_b[10] = {0};
fscanf(f, "%s %s", ch_a,ch_b);
A_size = atoi(ch_a);
B_size = atoi(ch_b);
S_size = A_size + B_size;
printf("\nSize of A: %d\n",A_size);
printf("Size of B: %d\n\n",B_size);
if (SIZE != A_size | NTPB != A_size/NEPT)
{
printf ("Make sure : SIZE = %d, NTPB = %d\n", A_size, A_size/NEPT);
printf ("Please retry !\n");
return 0;
}
A = (int*) malloc((A_size)*sizeof(int));
B = (int*) malloc((B_size)*sizeof(int));
S = (int*) malloc((S_size)*sizeof(int));
int max = MAX(A_size,B_size);
int min = MIN(A_size,B_size);
for(i = 0; i < max; i++){
if(i < min){
fscanf(f,"%ld %ld",&A[i],&B[i]);
}
else{
if(min == A_size){
fscanf(f,"%ld",&B[i]);
}
else{
fscanf(f,"%ld",&A[i]);
}
}
}
/*===============================================================================*/
printGPUCaracteristics();
if(A_size == max){
wrapper(A, B, S, A_size, B_size);
}
else{
wrapper(B, A, S, B_size, A_size);
}
printf("Given array are \n");
printf("A: ");
printArray(A, A_size);
printf("B: ");
printArray(B, B_size);
printf("\nSorted array is \n");
printArray(S, S_size);
free(A);
free(B);
free(S);
return 0;
}
|
12,724 | // JCudaNN から呼び出される CUDA カーネル関数
// *** デバイス定義の関数 ***
// 損失関数の微分
__device__
static float loss_deriv(float x, float y) {
return x - y;
}
// sigmoid 関数
__device__
static float sigmoid(float x) {
return 1.0f / (1.0f + expf(-x));
}
// sigmoid 関数の微分
__device__
static float sigmoid_deriv(float outz) {
return outz * (1.0f - outz);
}
// 最大値を求める
__device__
static float calc_max(float *xn, int n) {
// 最大値を求める
float xmax = -1e+5;
for (int i = 0; i < n; ++i) {
if (xmax < xn[i]) {
xmax = xn[i];
}
}
return xmax;
}
// softmax 関数の分母を求める
__device__
static float calc_div(float *xn, int n, float xmax) {
float div = 0.0f;
for (int i = 0; i < n; ++i) {
div += expf(xn[i] - xmax);
}
return div;
}
// softmax 関数
__device__
static float calc_softmax(float *xn, float xmax, float div, int m) {
return expf(xn[m] - xmax) / div;
}
// *** SimpleNet.java からの呼び出し ***
// 1次元クリア
extern "C"
__global__ void clear1D(float *b, int n) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
if (x < n) {
b[x] = 0.0f;
}
}
// 2次元クリア
extern "C"
__global__ void clear2D(float **w, int xsize, int ysize) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xsize && y < ysize) {
w[x][y] = 0.0f;
}
}
// 2次元コピー
extern "C"
__global__ void copy2D(float **dst, float **src, int xsize, int ysize) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xsize && y < ysize) {
dst[x][y] = src[x][y];
}
}
// 間接的2次元集約コピー
extern "C"
__global__ void copy2DGather(float **dst, float **src, int xsize, int ysize, int *samples) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xsize && y < ysize) {
dst[x][y] = src[samples[x]][y];
}
}
// forward 演算
extern "C"
__global__ void calc_forward(float **outz, float **tmpz, float **w, float *b,
float **xin, int xsize, int ysize, int fmt, int bs) {
const int y = blockDim.x * blockIdx.x + threadIdx.x;
const int k = blockDim.y * blockIdx.y + threadIdx.y;
// 線形和の計算
if (y < ysize && k < bs) {
float ztmp = 0.0f;
for (int x = 0; x < xsize; ++x) {
ztmp += xin[k][x] * w[x][y];
}
ztmp += b[y];
tmpz[k][y] = ztmp;
}
__syncthreads();
// 非線形関数の出力
if (y < ysize && k < bs) {
if (fmt == 1) {
// softmax 関数
float xmax = calc_max(tmpz[k], ysize);
float div = calc_div(tmpz[k], ysize, xmax);
outz[k][y] = calc_softmax(tmpz[k], xmax, div, y);
} else {
// sigmoid 関数
outz[k][y] = sigmoid(tmpz[k][y]);
}
}
}
// 損失関数のベクトル計算
extern "C"
__global__ void loss_derivative(float **out, float **outz, float **label, int *samples, int outn, int bs) {
const int i = blockDim.x * blockIdx.x + threadIdx.x;
const int k = blockDim.y * blockIdx.y + threadIdx.y;
if (i < outn && k < bs) {
out[k][i] = loss_deriv(outz[k][i], label[samples[k]][i]);
}
}
// 損失関数の b 方向の微分
extern "C"
__global__ void calc_deriv_b_kernel(float **db, float **w, float **outz, float **bderiv2,
int xsize, int ysize, int bs) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int k = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xsize && k < bs) {
float d = 0.0f;
for (int y = 0; y < ysize; ++y) {
d += w[x][y] * bderiv2[k][y];
}
d *= sigmoid_deriv(outz[k][x]);
db[k][x] = d;
}
}
// 損失関数の w 方向の微分
extern "C"
__global__ void calc_deriv_w_kernel(float **dw, float **in, int xsize, float **db,
int ysize, int bs) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xsize && y < ysize) {
float d = 0.0f;
for (int z = 0; z < bs; ++z) {
d += in[z][x] * db[z][y];
}
dw[x][y] = d;
}
}
// *** NeuralNet.java からの呼び出し ***
// 2次元のベクトル加算
extern "C"
__global__ void vec_add_2d(float **wout, float **win, int xsize, int ysize) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xsize && y < ysize) {
wout[x][y] += win[x][y];
}
}
// 1次元のベクトル加算(未使用)
extern "C"
__global__ void vec_add_1d(float *bout, float *bin, int size, int bs) {
}
// w に関する学習
extern "C"
__global__ void learn_2d(float **wout, float **deriv, float lrate, int xsize, int ysize, float nsample) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xsize && y < ysize) {
wout[x][y] -= lrate * deriv[x][y] / nsample;
}
}
// b に関する学習
extern "C"
__global__ void learn_1d(float *bout, float **deriv, float lrate, int size, float nsample, int bs) {
const int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size) {
for (int s = 0; s < bs; ++s) {
bout[i] -= lrate * deriv[s][i] / nsample;
}
}
}
// 一様乱数によるノイズシェーピング
extern "C"
__global__ void noise_shape(float **out, float **in, float *rnd, int xsize, int ysize, float threshold) {
const int x = blockDim.x * blockIdx.x + threadIdx.x;
const int y = blockDim.y * blockIdx.y + threadIdx.y;
if (x < xsize && y < ysize) {
if (rnd[x + xsize * y] < threshold) {
out[x][y] = 0.0f;
} else {
out[x][y] = in[x][y];
}
}
}
// テスト用
extern "C"
__global__ void test_sum(float *dout, float **dw, float **db, int inn, int outnn, int bs) {
const int i = blockDim.x * blockIdx.x + threadIdx.x; // for out
// zero clear
dout[i] = 0.0f;
// for input
for (int k = 0; k < inn; ++k) {
dout[i] += fabsf(dw[k][i]);
}
__syncthreads();
// for batchsize
for (int k = 0; k < bs; ++k) {
dout[i] += fabsf(db[k][i]);
}
}
|
12,725 | #include "includes.h"
__global__ void _mat_mul(float *ma, float *mb, float *target, int len){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < len){
target[tid] = ma[tid] * mb[tid];
}
} |
12,726 | #include<stdio.h>
#include<cuda.h>
#define N 1024
#define BLOCKSIZE 64
__device__ unsigned binary[N];
__global__ void K() {
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
binary[id] = id;
__syncthreads();
if (binary[N-1 - id] != N-1 - id)
printf("Error: There is no global barrier.\n");
}
int main() {
K<<<N / BLOCKSIZE, BLOCKSIZE>>>();
cudaDeviceSynchronize();
return 0;
}
|
12,727 | #include "includes.h"
#define MAXR(sz) (((sz)+MAXSEQ-1)/MAXSEQ+1)
#define MAXT MAXR(MAXN)
int MAXN;
int MAXSEQ;
int THRN;
//===Definicion de estructuras y funciones utiles===
typedef struct secuence{
int start,end,pivot;
}secuence;
typedef struct block{
secuence seq,parent;
int blockcount,id,bid;
}block;
__global__ void gqsort1(block * blocks,int * d,int * LT,int * GT){
int id = blockIdx.x,th = threadIdx.x,cth = blockDim.x;
int gt=0,lt=0,pivot=blocks[id].seq.pivot;
int start = blocks[id].seq.start,end = blocks[id].seq.end;
if(th==0){
LT[id]=0;
GT[id]=0;
}
__syncthreads();
for(int j=start+th;j<end;j+=cth){
if(d[j]<pivot)lt++;
else if(d[j]>pivot)gt++;
}
atomicAdd(<[id],lt);
atomicAdd(>[id],gt);
return;
} |
12,728 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#define EXPO 7
//the right way to add in cuda driver if you have an gpu
//http://askubuntu.com/questions/451221/ubuntu-14-04-install-nvidia-driver
__global__ void BackwardKernel(int k,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD,float* deviceFinalX,float initialValue)
{
int bx1=blockIdx.x;
int by1=blockIdx.y;
int tx1=threadIdx.x;
int ty1=threadIdx.y;
//printf("inside of kernle %f \n",deviceFinalX[4]);
int backhelper1=ty1*blockColumn+tx1+1;
int backhelper2=2*backhelper1-1;//(int((2*backhelper1-1)*pow(2.0,1.0*(k-1))))/(int)(pow(2.0,(k-1)*1.0));
int backhelper3=(int)pow(2.0,(EXPO+1)*1.0);
int backhelper4=(int)pow(2.0,(EXPO-k+2)*1.0);
int h=(int)(pow(2.0,1.0*(k-1)));
float backhelperd=deviceD[-k+backhelper3-backhelper4+backhelper2];
float backhelpera=deviceA[-k+backhelper3-backhelper4+backhelper2];
float backhelperb=deviceB[-k+backhelper3-backhelper4+backhelper2];
float backhelperc=deviceC[-k+backhelper3-backhelper4+backhelper2];
int xindex1=backhelper2*pow(2.0,1.0*(k-1))-h;
int xindex2=backhelper2*pow(2.0,1.0*(k-1))+h;
//so thread i will be in charge of (2i-1)*2^(k-1) calculation
//printf("%d ",int((2*backhelper1-1)*pow(2.0,1.0*(k-1))));
deviceFinalX[(int)(backhelper2*pow(2.0,1.0*(k-1)))]=(backhelperd-backhelpera*deviceFinalX[xindex1]-backhelperc*deviceFinalX[xindex2])*1.0/backhelperb;
__syncthreads();
}
//this is the kernel to calculate the P=(a,b,c,d)
//need to pass in the step which is j, and then figure out which thread to work on
//the calculation in (2^j,2*2^j,3*2^j....)
__global__ void CalculatePArrayKernel(int step,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD)
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int helper11=pow(2.0,(EXPO+1)*1.0);
int helper22=pow(2.0,(EXPO-step+1)*1.0);
int helper44=pow(2.0,(EXPO-step+2)*1.0);
int helper33=pow(2.0,EXPO*1.0)-1;
//printf("step is running: %d \n",step);
// if(helper3<pow(2.0,(EXPO-step)*1.0)-1)
//--step 1 is special case.
/* if((tx!=(blockColumn-1))&&(ty!=(blockRow-1)))-----this is very important branch divergence happen here, need
//to figure out how exactly cuda works!!
/*****calcualte A******************/
int helper1=helper11;
int helper2=helper22;
int helper4=helper44;
int flag=0;//special for step1.
if(step==1)
{
helper1=0;
helper2=0;
helper4=0;
flag=1;
}
int helper3=ty*blockColumn+tx+1;
if(helper3<=(pow(2.0,1.0*(EXPO-step))-1.0))
{
float ahelperfora1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperfora2=deviceA[-step+helper1-helper4+2*(helper3)-1];
float bhelperfora1=deviceB[-step+helper1-helper4+2*(helper3)-1];
deviceA[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*(ahelperfora1)*ahelperfora2/bhelperfora1;
//*****calculate C******************/
float chelperforc1=deviceC[-step+helper1-helper4+2*(helper3)];
float chelperforc2=deviceC[-step+helper1-helper4+2*(helper3)+1];
float bhelperforc1=deviceB[-step+helper1-helper4+2*(helper3)+1];
deviceC[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*chelperforc1*chelperforc2/bhelperforc1;
//calculate B***********************************************//
float bhelperforb1=deviceB[-step+helper1-helper4+2*(helper3)];
float bhelperforb2=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperforb3=deviceB[-step+helper1-helper4+2*(helper3)+1];
float ahelperforb1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperforb2=deviceA[-step+helper1-helper4+2*(helper3)+1];
float chelperforb1=deviceC[-step+helper1-helper4+2*(helper3)-1];
float chelperforb2=deviceC[-step+helper1-helper4+2*(helper3)];
deviceB[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=bhelperforb1-ahelperforb1/bhelperforb2*chelperforb1-chelperforb2/bhelperforb3*ahelperforb2;
//calculate D***************************************************//
float dhelperford1=deviceD[-step+helper1-helper4+2*(helper3)];
float dhelperford2=deviceD[-step+helper1-helper4+2*(helper3)-1];
float dhelperford3=deviceD[-step+helper1-helper4+2*(helper3)+1];
float ahelperford1=deviceA[-step+helper1-helper4+2*(helper3)];
float bhelperford1=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperford2=deviceB[-step+helper1-helper4+2*(helper3)+1];
float chelperford1=deviceC[-step+helper1-helper4+2*(helper3)];
deviceD[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=dhelperford1-ahelperford1/bhelperford1*dhelperford2-chelperford1/bhelperford2*dhelperford3;
}
__syncthreads();
}
int main()
{
int m=pow(2,EXPO)-1; //think of our example as n=3 then m will be 7 here
/*printf("m value is %d",m);*/
int b=1;
int a=0;
float delta=(b-a)*1.0/(m+1.0); //this is correct , think of m as the number of inner
float *A;
float *B;
float *C;
float *D;
float *FinalX;
//by careful calculation, we figure out we need (2^n-1)*2
//so the orinal step need to store 2^n-1 value, then step 1 needs 2^(n-1)-1 value and the last one will be 2^1-1 value.
//so chuck size will be 2^n-1+2^(n-1)-1+....+2-1
//int chunkLength=(pow(2,EXPO)-1)*2;
//ad one for the extra thread that never going to use, so in this way it will not be out of index
int finalLengthX=(int)pow(2,EXPO)+1;
int chunkLength=(pow(2,EXPO)-1)*2+1;
int chunkSize=chunkLength*sizeof(float);
A=(float*)malloc(chunkSize);
B=(float*)malloc(chunkSize);
C=(float*)malloc(chunkSize);
D=(float*)malloc(chunkSize);
FinalX=(float*)malloc(finalLengthX*sizeof(float));
A[0]=0;
//int vectorLength=EXPO*m;
for(int i=1;i<m;i++)
{
A[i]=1-delta*delta*0.5*(i+1);
}
//else will be 0
for(int i=m;i<chunkLength;i++)
{
A[i]=0;
}
for(int i=0;i<m;i++)
{
B[i]=-2+delta*delta*1.0;
}
for(int i=m;i<chunkLength;i++)
{
B[i]=0;
}
C[m-1]=0;
for(int i=0;i<m-1;i++)
{
C[i]=1+0.5*delta*delta*(i+1);
}
for(int i=m;i<chunkLength;i++)
{
C[i]=0;
}
/* D[0]=2*delta*delta*delta+0.5*delta*delta-1;*/
for(int i=0;i<m-1;i++)
{
D[i]=2*(i+1)*pow(delta,3);
}
D[m-1]=2*m*delta*delta*delta-1+3.5*delta*delta;
for(int i=m;i<chunkLength;i++)
{
D[i]=0;
}
clock_t begin,end;
begin=clock();
//so need to set up different grid dimension for different value of j,
//when j decrease the size of the thread using will decrease.
//dim3 dimGrid(1,4); //so we have 4 blocks each block will in charge a,b,c,d respectly.
//http://stackoverflow.com/questions/5029920/how-to-use-2d-arrays-in-cuda
//according to the above post, the following is the correct way to allocate 2D array on cuda devixe
/* float *deviceA, *deviceB, *deviceC, *deviceD;
size_t pitch;
cudaMallocPitch((void**)&deviceA,&pitch,m*sizeof(float),EXPO);
cudaMallocPitch((void**)&deviceB,&pitch,m*sizeof(float),EXPO);
cudaMallocPitch((void**)&deviceC,&pitch,m*sizeof(float),EXPO);
cudaMallocPitch((void**)&deviceD,&pitch,m*sizeof(float),EXPO);*/
float *deviceA, *deviceB, *deviceC, *deviceD,*deviceFinalX;
cudaMalloc((void**)&deviceA,chunkSize);
cudaMalloc((void**)&deviceB,chunkSize);
cudaMalloc((void**)&deviceC,chunkSize);
cudaMalloc((void**)&deviceD,chunkSize);
cudaMalloc((void**)&deviceFinalX,finalLengthX*sizeof(float));
//copy the host vector to device.
cudaMemcpy(deviceA,A,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,B,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceC,C,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceD,D,chunkSize,cudaMemcpyHostToDevice);
//deviceA, deviceB, deviceC, deviceD is designed to be the global memory of cuda.
//forward
for(int j=1;j<EXPO;j++)
{
//the lock size should change, the first step it will need 2^(n-j)-1, so first step will be 3 if n=3
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-j)/2);
//printf("blockrow is :%d \n",blockRow);
int blockColumn=pow(2,EXPO-j-(EXPO-j)/2);
//printf("blockColumn is :%d \n",blockColumn);
dim3 dimBlock(blockColumn,blockRow);
//in each step the processor being used should decrease should be 2^(n-j)-1 in jth step
CalculatePArrayKernel<<<dimGrid,dimBlock>>>(j,blockRow,blockColumn,deviceA,deviceB,deviceC,deviceD);
}
//backward
//copy the device vector to host
cudaMemcpy(A,deviceA,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(B,deviceB,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(C,deviceC,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(D,deviceD,chunkSize,cudaMemcpyDeviceToHost);
int lastIndex=(int)pow(2,EXPO+1)-EXPO-3;
float initialValue=D[lastIndex]/B[lastIndex];
FinalX[0]=0;
FinalX[(int)pow(2,EXPO-1)]=initialValue;
printf("the value in the middle is: %f and this suppose to close to 0.5 when n goes big! \n",FinalX[(int)pow(2,EXPO-1)]);
cudaMemcpy(deviceFinalX,FinalX,finalLengthX*sizeof(float),cudaMemcpyHostToDevice);
for(int k=EXPO-1;k>=1;k--)
{
//so the most one will use 2^(n-k) variable will be covered!
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-k)/2);
int blockColumn=pow(2,EXPO-k-(EXPO-k)/2);
dim3 dimBlock(blockColumn,blockRow);
BackwardKernel<<<dimGrid,dimBlock>>>(k,blockRow,blockColumn,deviceA,deviceB,deviceC,deviceD,deviceFinalX,initialValue);
}
cudaMemcpy(FinalX,deviceFinalX,finalLengthX*sizeof(float),cudaMemcpyDeviceToHost);
printf(" \n");
printf(" A \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",A[i]);
}
printf(" \n");
printf(" B \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",B[i]);
}
printf(" \n");
printf(" C \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",C[i]);
}
printf(" \n");
printf(" D \n");
for(int i=0;i<chunkLength;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",D[i]);
}
double time_spent;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\n the following are the solutions.");
for(int i=0;i<finalLengthX;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%f ",FinalX[i]);
}
printf("\n time used to calculate this is :%f seconds \n",time_spent);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
cudaFree(deviceD);
free(A);
free(B);
free(C);
free(D);
return 0;
}
|
12,729 | /**
File name: bfs_gpu_multi.cu
Author: Yuede Ji
Last update: 9:54 10-03-2015
Description: Using multi thread to implent GPU version of bfs.
Calculate the shortest distance between each other
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define N 1024 //1024 vertex number
//Using arrays to implement queue
#define N_block 32
#define imax(a, b) (a>b?a:b)
char filein[] = "/home/yuede/dataset/kron_10_4.dat";
char fileout[] = "/home/yuede/dataset/kron_10_4.m_gpu";
//Using arrays to implement queue
//int q[N];
int edge[N][N];
//int visit[N];
int dist[N][N];
__global__ void bfs(int *edg, int *dis)
{
int q[N];
int vis[N];
memset(vis, 0, N*sizeof(int));
memset(q, 0, N*sizeof(int));
int index = threadIdx.x + blockIdx.x * blockDim.x;
q[0] = index;
vis[index] = 1;
int l = 1; // record the size of the queue
int front = 0; // identify the front element
int end = 0; // identify the end element
printf("index = %d\n", index);
while(l>0)
{
int cur = q[front];
++front;
--l;
if(front >= N)
front -= N;
for(int i=0; edg[cur*N + i]!=0; ++i)
{
int v = edg[cur*N + i];
printf("vis[%d] = %d\n", v, vis[v]);
if(vis[v])
continue;
//printf("edg[cur*N + i] = %d\n", edg[cur*N + i]);
dis[index*N + v] = dis[index*N + cur] + 1;
//printf("dis[%d] = %d\n", v, dis[v]);
++end;
if(end >= N)
end -= N;
q[end] = v;
vis[v] = 1;
++l;
}
}
printf("index = %d finished\n", index);
}
int main()
{
FILE *fp_in = fopen(filein, "r");
int v, e;
int num_v=0;
memset(edge, 0, N*N*sizeof(int));
while(fscanf(fp_in, "%d %d", &v, &e)!=EOF)
{
++num_v;
for(int i=0; i<e; ++i)
{
int v1;
fscanf(fp_in, "%d", &v1);
edge[v][i] = v1;//v->v1
}
}
fclose(fp_in);
int *dev_edge;
int *dev_dist;
//allocate memory on GPU
cudaMalloc( (void **) &dev_edge, N*N*sizeof(int));
cudaMalloc( (void **) &dev_dist, N*N*sizeof(int));
//initialize GPU memory
cudaMemset( dev_dist, 0, N*N*sizeof(int));
//copy edge from CPU to GPU
cudaMemcpy(dev_edge, edge, N*N*sizeof(int), cudaMemcpyHostToDevice);
bfs<<<N_block, (N+N_block-1)/N_block>>>(dev_edge, dev_dist);
//bfs<<<1, 1>>>(dev_edge, dev_dist);
cudaMemcpy(dist, dev_dist, N*N*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_edge);
cudaFree(dev_dist);
FILE *fp_out = fopen(fileout, "w");
for(int i=0; i<num_v; ++i)
{
fprintf(fp_out, "%d", i);
for(int j=0; j<num_v; ++j)
fprintf(fp_out, " %d", imax(dist[i][j], dist[j][i]));
fprintf(fp_out, "\n");
}
fclose(fp_out);
printf("Finished!\n");
return 0;
}
|
12,730 | #include <bits/stdc++.h>
int main() {
int Dev;
cudaGetDeviceCount(&Dev);
for (int i = 0; i < Dev; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf("\tDevice name: %s\n", prop.name);
printf("\tTotalGlobalMem: %lu\n", prop.totalGlobalMem);
printf("\tConst Mem : %lu\n", prop.totalConstMem);
printf("Max shared mem for blocks %lu\n", prop.sharedMemPerBlock);
printf("Max regs per block %d\n", prop.regsPerBlock);
printf("Max thread per block %d\n", prop.maxThreadsPerBlock);
printf("MultiProcessorCount : %d\n", prop.multiProcessorCount);
printf("MaxThreadsDim %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("MaxGridSize %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
}
}
|
12,731 | #include <cuda.h>
int main() {
void *d;
for (int i = 0; i < 100000; i++) {
cudaMalloc(&d, 1024);
cudaFree(d);
}
}
|
12,732 | #include <iostream>
#include <cuda_runtime.h>
// Constante dx
#define dx 0.001
// Tiempos t
#define STEPS 10
/* Lectura Archivo */
void Read(float **f, int *M, int *N, int tipo=0) {
FILE *fp;
fp = fopen("initial.txt\0", "r");
fscanf(fp, "%d %d\n", M, N);
float *f1;
int size = (*M) * (*N);
if (tipo == 0)
f1 = new float[size];
else
cudaMallocHost(&f1, sizeof(float)* size); //pinned memory
if(tipo == 2){
for(int m = 0; m < (*M); m++){
for(int n = 0; n < (*N); n++){
fscanf(fp, "%f ", &(f1[n*(*M) + m]));
// printf("%d \n", n*(*M) + m);
}
}
} else{
for(int i = 0; i < size; i++){
fscanf(fp, "%f ", &(f1[i]));
// printf("%d ", i*4 + x);
}
}
fclose(fp);
*f = f1;
}
/* Escritura de archivo initial con array */
void Write(int *f, int M, int N, const char *filename) {
FILE *fp;
fp = fopen(filename, "w");
fprintf(fp, "%d %d\n", N, M);
int Largo = M*N;
for(int i = 0; i < Largo-1; i++){
fprintf(fp, "%d ", f[i]);
}
fprintf(fp, "%d\n", f[Largo-1]);
//printf("\n");
fclose(fp);
}
//funcion auxiliar %, funciona con entradas negativas
__host__ __device__ int modulo(int a, int b){
//a%b
if (a >= 0){
return a %b;
}
return b + a;
}
void imprimir_malla(float *f, int N , int M){
for(int j = 0; j< M; j ++){
for(int i = 0; i< N; i ++){
printf("%.1f ", f[i+ j*M]);
}
printf("\n");
}
printf("-----\n");
}
void imprimir_malla_t(float *f, int N , int M){
for(int i = 0; i< N; i ++){
for(int j = 0; j< M; j ++){
printf("%.2f ", f[j + i*M]);
// printf("%d\n", i + j*N);
}
printf("\n");
}
printf("--------\n");
}
/* Procesamiento CPU */
void CPU_1_step(float *f_in, float *f_out, int N, int M){
int x,y;
for (int i = 0; i < N*M; i++){
x = i % N;
y = i / N;
f_out[i] = (f_in[modulo(x+1, N) + y*N] + f_in[modulo(x-1, N) + y*N]) /(2*dx); // dx
//f_out[i] = 2;
}
}
void CPU(){
int M, N;
float *f_in, *f_out, *temp;
clock_t t1, t2;
double ms;
Read(&f_in, &M, &N,0);
//imprimir_malla(f_in, N, M);
f_out = new float[N*M];
t1 = clock();
for(int step = 0; step< STEPS; step++){
CPU_1_step(f_in, f_out, N,M);
temp = f_out;
f_out = f_in;
f_in = temp;
}
f_out =f_in;
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
printf("Tiempo CPU: %f[ms]\n", ms);
//imprimir_malla(f_out, N,M);
delete[] f_in;
delete[] f_out;
}
/* Procesamiento GPU, 1 stream */
__global__ void kernel_1(float *f, float *f_out, int N, int M){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < M){ //1 thread para cada fila
float anterior = f[modulo(-1, N) + tid*N];
float actual = f[0 + tid*N];
float siguiente;
for (int i = 0; i< N; i++){
siguiente = f[modulo(i+1, N) + tid*N];
f_out[i + tid*N] = (siguiente - anterior) / (2.0*dx); //dx
anterior = actual;
actual = siguiente;
}
}
}
void GPU_1_stream(){
printf("gpu 1\n");
cudaEvent_t ct1, ct2;
float dt;
int M, N;
float *f_host, *f_hostout, *f, *f_out, *temp;
int gs, bs = 256;
Read(&f_host, &M, &N,0);
gs = (int)ceil((float) M / bs);
//imprimir_malla(f_host, N,M);
cudaMalloc((void**)&f, M * N * sizeof(float));
cudaMemcpy(f, f_host, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&f_out, M * N * sizeof(float));
//cudaMalloc((void**)&temp, M * N * sizeof(float));
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
// llamadas al kernel
for (int i = 0 ; i< STEPS; i++){
kernel_1<<<gs, bs>>>(f, f_out, N, M);
temp = f_out;
f_out = f;
f = temp;
}
f_out =f;
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
f_hostout = new float[M * N];
cudaMemcpy(f_hostout, f, M * N * sizeof(float), cudaMemcpyDeviceToHost);
//Write(f_hostout, M, N, "initial_S.txt\0");
//imprimir_malla(f_hostout, N,M);
std::cout << "Tiempo " << ": " << dt << "[ms]" << std::endl;
cudaFree(f);
//cudaFree(temp);
cudaFree(f_out);
delete[] f_host;
delete[] f_hostout;
}
__global__ void kernel_2(float *f, float *f_out, int N, int M){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < M/4){ //1 thread para cada fila
float anterior = f[modulo(-1, N) + tid*N];
float actual = f[0 + tid*N];
float siguiente;
for (int i = 0; i< N; i++){
siguiente = f[modulo(i+1, N) + tid*N];
f_out[i + tid*N] = (siguiente - anterior) / (2.0*dx); //dx
anterior = actual;
actual = siguiente;
}
}
}
/* Procesamiento GPU, 4 stream horizontal*/
void GPU_4_stream_horizontal(){
printf("stream horizontal \n");
cudaEvent_t ct1, ct2;
float dt;
int M, N;
float *f_host;
float * f_in;
float *f_out;
int gs, bs = 256;
//crear streams
cudaStream_t str1, str2, str3, str4;
cudaStreamCreate(&str1);
cudaStreamCreate(&str2);
cudaStreamCreate(&str3);
cudaStreamCreate(&str4);
Read(&f_host, &M, &N,1);
gs = (int)ceil((float) (M/4) / bs);
//imprimir_malla(f_host, N,M);
int size = M/4 * N ;
cudaMalloc(&f_in, M * N* sizeof(float));
cudaMalloc(&f_out, M * N* sizeof(float));
float *out = new float[N*M];
float *temp;
//host to device
cudaMemcpyAsync(&f_in[size*0], &f_host[size*0], size * sizeof(float), cudaMemcpyHostToDevice, str1);
cudaMemcpyAsync(&f_in[size*1], &f_host[size*1], size * sizeof(float), cudaMemcpyHostToDevice, str2);
cudaMemcpyAsync(&f_in[size*2], &f_host[size*2], size * sizeof(float), cudaMemcpyHostToDevice, str3);
cudaMemcpyAsync(&f_in[size*3], &f_host[size*3], size * sizeof(float), cudaMemcpyHostToDevice, str4);
//kernel calls
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
// llamadas al kernel
for (int i = 0 ; i< STEPS; i++){
kernel_2<<<gs, bs,0,str1>>>(f_in, f_out, N, M);
kernel_2<<<gs, bs,0,str2>>>(&f_in[size*1], &f_out[size*1], N, M);
kernel_2<<<gs, bs,0,str3>>>(&f_in[size*2], &f_out[size*2], N, M);
kernel_2<<<gs, bs,0,str4>>>(&f_in[size*3], &f_out[size*3], N, M);
temp = f_out;
f_out = f_in;
f_in = temp;
}
//f_out =f_in;
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
//device to host
cudaMemcpyAsync(&out[size*0], &f_in[size*0], size * sizeof(float), cudaMemcpyDeviceToHost,str1);
cudaMemcpyAsync(&out[size*1], &f_in[size*1], size * sizeof(float), cudaMemcpyDeviceToHost,str2);
cudaMemcpyAsync(&out[size*2], &f_in[size*2], size * sizeof(float), cudaMemcpyDeviceToHost,str3);
cudaMemcpyAsync(&out[size*3], &f_in[size*3], size * sizeof(float), cudaMemcpyDeviceToHost,str4);
//Write(out, M, N, "initial_S.txt\0");
cudaDeviceSynchronize();
//imprimir_malla(out, N,M);
std::cout << "Tiempo " << ": " << dt << "[ms]" << std::endl;
cudaFree(f_host);
cudaFree(f_in);
cudaFree(f_out);
}
__global__ void kernel_vertical(float *f, float *f_out, int N, int M, int str){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < N/4){ //1 thread por cada columna del stream
int col, col_ant;
float anterior, siguiente;
col = str*N/4 + tid;
col_ant = modulo(col-1, N);
// printf("tid: %d\n", tid);
for (int i = 0; i< M; i++){
siguiente = f[(tid+1)*M + i];
// Si esta a un costado
if(!modulo(col, N-1)){
// Cada stream considera f[0] como su primer valor de su arreglo
if(!col){ // Si esta en borde izq
anterior = f[col_ant*M + i];
} else{ // Si esta en borde der
anterior = f[(tid-1)*M + i];
siguiente = f[-col_ant*M + i];
}
} else{
anterior = f[(tid-1)*M + i];
}
f_out[tid*M + i] = (siguiente - anterior) / (2.0*dx); //dx
}
}
}
/* Procesamiento GPU, 4 stream vertical*/
void GPU_4_stream_vertical(){
printf("Stream vertical \n");
cudaEvent_t ct1, ct2;
float dt;
int M, N;
float *f_host;
float *f_in;
float *f_out;
int gs, bs = 256;
//crear streams
cudaStream_t str1, str2, str3, str4;
cudaStreamCreate(&str1);
cudaStreamCreate(&str2);
cudaStreamCreate(&str3);
cudaStreamCreate(&str4);
Read(&f_host, &M, &N, 2);
// imprimir_malla_t(f_host, N, M);
gs = (int)ceil((float) (N/4) / bs);
int size = M * N/4;
cudaMalloc(&f_in, M * N * sizeof(float));
cudaMalloc(&f_out, M * N * sizeof(float));
float *out = new float[N*M];
float *temp;
clock_t t1, t2;
double ms;
t1 = clock();
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
//host to device
cudaMemcpyAsync(&f_in[size*0], &f_host[size*0], size * sizeof(float), cudaMemcpyHostToDevice, str1);
cudaDeviceSynchronize();
cudaMemcpyAsync(&f_in[size*1], &f_host[size*1], size * sizeof(float), cudaMemcpyHostToDevice, str2);
cudaDeviceSynchronize();
cudaMemcpyAsync(&f_in[size*2], &f_host[size*2], size * sizeof(float), cudaMemcpyHostToDevice, str3);
cudaDeviceSynchronize();
cudaMemcpyAsync(&f_in[size*3], &f_host[size*3], size * sizeof(float), cudaMemcpyHostToDevice, str4);
cudaDeviceSynchronize();
//kernel calls
// llamadas al kernel
for (int i = 0 ; i< STEPS; i++){
kernel_vertical<<<gs, bs, 0, str1>>>(f_in, f_out, N, M, 0);
kernel_vertical<<<gs, bs, 0, str2>>>(&f_in[size*1], &f_out[size*1], N, M, 1);
kernel_vertical<<<gs, bs, 0, str3>>>(&f_in[size*2], &f_out[size*2], N, M, 2);
kernel_vertical<<<gs, bs, 0, str4>>>(&f_in[size*3], &f_out[size*3], N, M, 3);
cudaDeviceSynchronize();
temp = f_out;
f_out = f_in;
f_in = temp;
}
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
std::cout << "Tiempo: " << ms << "[ms]" << std::endl;
//device to host
cudaMemcpyAsync(&out[size*0], &f_in[size*0], size * sizeof(float), cudaMemcpyDeviceToHost,str1);
cudaMemcpyAsync(&out[size*1], &f_in[size*1], size * sizeof(float), cudaMemcpyDeviceToHost,str2);
cudaMemcpyAsync(&out[size*2], &f_in[size*2], size * sizeof(float), cudaMemcpyDeviceToHost,str3);
cudaMemcpyAsync(&out[size*3], &f_in[size*3], size * sizeof(float), cudaMemcpyDeviceToHost,str4);
//Write(out, M, N, "initial_S.txt\0");
cudaDeviceSynchronize();
// imprimir_malla_t(out, N, M);
std::cout << "Tiempo " << ": " << dt << "[ms]" << std::endl;
cudaFreeHost(f_host);
cudaFree(f_in);
cudaFree(f_out);
return;
}
//--------------------------------------------------------------------------------
/* Codigo Principal */
int main(int argc, char **argv){
//ejecucion cpu
//CPU(); //212
// GPU_1_stream(); //23 1784
// GPU_4_stream_horizontal(); //23 1442
GPU_4_stream_vertical();
return 0;
} |
12,733 | #include <cuda.h>
#include <stdio.h>
// Multiplicacion de Fila - Matriz
__global__ void multMatCUDA(double *d_a, double *d_b, double *d_c, int NumberRows_A, int NumberCols_A, int NumberCols_B)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < NumberRows_A && col < NumberCols_B)
{
double result = 0;
for (int k = 0; k < NumberCols_A; k++)
{
result += d_a[row * NumberCols_A + k] * d_b[k * NumberCols_B + col];
}
d_c[row * NumberCols_B + col] = result;
}
}
void Mult_Matrix_Cuda(double *h_a, double *h_b, double *h_c, int NumberRows_A, int NumberCols_A, int NumberCols_B)
{
int blocksize = 32;
double *d_a, *d_b, *d_c;
// Asign memory in the device
cudaMalloc(&d_a, sizeof(double) * NumberRows_A * NumberCols_A);
cudaMalloc(&d_b, sizeof(double) * NumberCols_A * NumberCols_B);
cudaMalloc(&d_c, sizeof(double) * NumberRows_A * NumberCols_B);
cudaMemcpy(d_a, h_a, NumberRows_A * NumberCols_A * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, NumberCols_A * NumberCols_B * sizeof(double), cudaMemcpyHostToDevice);
dim3 dimBlock(blocksize, blocksize, 1);
dim3 dimGrid((NumberCols_B / blocksize) + 1, (NumberRows_A / blocksize) + 1);
multMatCUDA<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, NumberRows_A, NumberCols_A, NumberCols_B);
cudaMemcpy(h_c, d_c, NumberRows_A * NumberCols_B * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} |
12,734 | #include "includes.h"
__global__ void find_min_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
const int threadGId = blockIdx.x * blockDim.x + threadIdx.x;
const int threadLId = threadIdx.x;
// load shared mem from global mem
sdata[threadLId] = d_in[threadGId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int blockHalfSize = blockDim.x / 2; blockHalfSize > 0; blockHalfSize >>= 1) {
if (threadLId < blockHalfSize) {
sdata[threadLId] = min(sdata[threadLId], sdata[threadLId + blockHalfSize]);
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (threadLId == 0)
{
d_out[blockIdx.x] = sdata[0];
}
} |
12,735 | __device__ int val = 5;
__device__ void inc_val()
{
val += 1;
}
__device__ void do_stuff()
{
volatile void* shit = (void*) inc_val;
void (*f)() = (void (*)())shit;
f();
}
__device__ int get_val()
{
return val;
}
|
12,736 | /*
Contributors: Yizhao Gao (yizhaotsccsj@gmail.com)
*/
#include <stdio.h>
#include <math.h>
#include "io.cuh"
#include "kde.cuh"
#include <sys/time.h>
//dpbKDE inputPopFile inputHHFile outputFileName outputCount xMin yMin xMax yMax cellSize bandwidth mapLogic
int dpbKDE(char * inputPopFileName, char * inputHHFileName, char * outputFileName, char * outputCount, float xMin, float yMin, float xMax, float yMax, float cellSize, float bandwidth, char * subPop, char * mapLogic, char * personColName, int epsgCode)
{
//Uboyt
FILE * inputPopData;
FILE * inputHHData;
char outputRatioGTiffFile[200];
char outputPopulationFile[200];
bool hhOnly;
float * xCol;
float * yCol;
float * pCount;
float * cCount;
int nRow, nCol, nHH, nPop, nCase = 0;
float * caseDen;
float * popDen;
struct timeval time1;
gettimeofday(&time1, NULL);
if(strcmp(inputPopFileName, "-HHO") == 0)
{
hhOnly = true;
}
else
{
hhOnly = false;
if(NULL == (inputPopData = fopen(inputPopFileName, "r")))
{
printf("ERROR: Can't open input population file: %s\n", inputPopFileName);
exit(1);
}
}
if(NULL == (inputHHData = fopen(inputHHFileName, "r")))
{
printf("ERROR: Can't open input household file: %s\n", inputHHFileName);
exit(1);
}
sprintf(outputPopulationFile, "%s_P", outputFileName);
sprintf(outputRatioGTiffFile, "%s.tif", outputFileName);
//Cells
nCol = ceil((xMax - xMin)/cellSize);
nRow = ceil((yMax - yMin)/cellSize);
xMax = xMin + cellSize * nCol;
yMax = yMin + cellSize * nRow;
//printf("####################\n");
//printf("nRow: %d\tnCol: %d\n", nRow, nCol);
//printf("xMax: %f\txMin: %f\nyMax: %f\tyMin: %f\n",xMax,xMin,yMax,yMin);
//printf("####################\n");
//Points
nHH = getHHNum(inputHHData);
if(NULL == (xCol = (float *)malloc(sizeof(float) * nHH)))
{
printf("ERROR: Out of memory in line %d!\n", __LINE__);
exit(1);
}
if(NULL == (yCol = (float *)malloc(sizeof(float) * nHH)))
{
printf("ERROR: Out of memory in line %d!\n", __LINE__);
exit(1);
}
if(NULL == (pCount = (float *)malloc(sizeof(float) * nHH)))
{
printf("ERROR: Out of memory in line %d!\n", __LINE__);
exit(1);
}
if(NULL == (cCount = (float *)malloc(sizeof(float) * nHH)))
{
printf("ERROR: Out of memory in line %d!\n", __LINE__);
exit(1);
}
bool succeed;
if(hhOnly)
{
if(strcmp(subPop, "*") == 0)
{
succeed = readPointsH(inputHHData, nHH, nCase, xCol, yCol, pCount, cCount, mapLogic);
}
else
{
succeed = readPointsHInSubPop(inputHHData, nHH, nCase, xCol, yCol, pCount, cCount, subPop, mapLogic);
}
if(!succeed)
{
printf("File involved: %s\n", inputHHFileName);
exit(1);
}
//printf("num of household: %d\nnum of positive household: %d\n", nHH, nCase);
//printf("####################\n");
}
else
{
if(strcmp(subPop, "*") == 0)
{
succeed = readPointsP(inputHHData, inputPopData, nHH, nPop, nCase, xCol, yCol, pCount, cCount, mapLogic, personColName);
}
else
{
succeed = readPointsPInSubPop(inputHHData, inputPopData, nHH, nPop, nCase, xCol, yCol, pCount, cCount, subPop, mapLogic, personColName);
}
if(!succeed)
{
printf("File involved: %s and\\or %s\n", inputHHFileName, inputPopFileName);
exit(1);
}
//printf("num of household: %d\nnum of population: %d\nnum of case: %d\n", nHH, nPop, nCase);
//printf("####################\n");
}
fclose(inputHHData);
if(!hhOnly)
{
fclose(inputPopData);
}
struct timeval time2;
gettimeofday(&time2, NULL);
//KDE
if(NULL == (caseDen = (float *) malloc(sizeof(float) * nRow * nCol)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
if(NULL == (popDen = (float *) malloc(sizeof(float) * nRow * nCol)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < nRow * nCol; i++)
{
caseDen[i] = 0;
popDen[i] = 0;
}
int x, y;
if(bandwidth > 1)
{
kde(caseDen, popDen, nRow, nCol, cellSize, xMin, yMax, xCol, yCol, pCount, cCount, nHH, bandwidth);
//filter out non-value areas
bool * hasValue;
if(NULL == (hasValue = (bool *) malloc (sizeof(float) * nRow * nCol)))
{
printf("ERROR: Out of memory in %d!\n", __LINE__);
exit(1);
}
for(int i = 0; i < nCol * nRow; i++)
{
hasValue[i] = false;
}
for(int i = 0; i < nHH; i++)
{
x = (xCol[i] - xMin) / cellSize;
y = (yMax - yCol[i]) / cellSize;
//if(x < 0 || x >= nCol || y < 0 || y >= nRow)
//{
// printf("%d\t%f\t%f\n", i, xCol[i], yCol[i]);
//}
if(x > -1 && x < nCol && y > -1 && y < nRow && !hasValue[y * nCol + x])
hasValue[y * nCol + x] = true;
}
for(int i = 0; i < nRow * nCol; i++)
{
if(!hasValue[i])
{
caseDen[i] = 0;
popDen[i] = 0;
}
}
free(hasValue);
}
else
{
for(int i = 0; i < nHH; i++)
{
x = (xCol[i] - xMin) / cellSize;
y = (yMax - yCol[i]) / cellSize;
if(x >= 0 && x < nCol && y >= 0 && y < nRow)
{
caseDen[y * nCol + x] += cCount[i];
popDen[y * nCol + x] += pCount[i];
}
}
}
struct timeval time3;
gettimeofday(&time3, NULL);
printf("Input time:\t%lfms\n", ((&time2)->tv_sec - (&time1)->tv_sec) * 1000 + (double)((&time2)->tv_usec - (&time1)->tv_usec) / 1000);
printf("KDE time:\t%lfms\n", ((&time3)->tv_sec - (&time2)->tv_sec) * 1000 + (double)((&time3)->tv_usec - (&time2)->tv_usec) / 1000);
//Write outputFile
FILE * outputFile;
if(NULL == (outputFile = fopen(outputFileName, "wb")))
{
printf("ERROR: Can't open output file");
exit(1);
}
fwrite(caseDen, sizeof(float), nRow * nCol, outputFile);
fclose(outputFile);
//Generate ascii grid
/*
if(NULL == (outputFile = fopen(outputRatioGTiffFile, "w")))
{
printf("ERROR: Can't open output ratio file: %s\n", outputRatioGTiffFile);
exit(1);
}
writeGridRatio(outputFile, caseDen, popDen, nRow, nCol, xMin, yMin, cellSize);
fclose(outputFile);
*/
//Generate GeoTiff
writeGeoTiffRatio(outputRatioGTiffFile, caseDen, popDen, nRow, nCol, xMin, yMax, cellSize, epsgCode);
if(NULL == (outputFile = fopen(outputPopulationFile, "wb")))
{
printf("ERROR: Can't open output population file: %s\n", outputPopulationFile);
exit(1);
}
fwrite(popDen, sizeof(float), nRow * nCol, outputFile);
fclose(outputFile);
// This part is used to be used to calcuate the likelihood, but is no longer used
if(NULL == (outputFile = fopen(outputCount, "a")))
{
printf("ERROR: Can't open output population and count file: %s\n", outputCount);
exit(1);
}
if(hhOnly)
{
fprintf(outputFile, "%s %d %d\n", outputFileName, nHH, nCase);
}
else
{
fprintf(outputFile, "%s %d %d\n", outputFileName, nPop, nCase);
}
fclose(outputFile);
//free
free(xCol);
free(yCol);
free(pCount);
free(cCount);
free(caseDen);
free(popDen);
//printf("Finished!\n");
return 0;
}
|
12,737 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define NUM pow(2,15)
typedef struct point {
float x;
float y;
}POINT;
typedef struct distance {
float da;
float db;
float dc;
}DISTANCE;
DISTANCE calculate_euclidean(POINT,POINT,POINT,POINT);
__global__ void calculate_trail(DISTANCE *d, POINT *point1, POINT *point2, POINT *point3, POINT *result) {
POINT ex,ey,temp;
float d12,i,j,x,y,a,b,c,dp;
int index = blockIdx.x*blockDim.x + threadIdx.x;
d12 = powf(powf(point2->x-point1->x,2) + powf(point2->y-point1->y,2),0.5);
ex.x = (point2->x-point1->x)/d12;
ex.y = (point2->y-point1->y)/d12;
i = ex.x * (point3->x-point1->x) + ex.y * (point3->y-point1->y);
temp.x = point3->x-point1->x-i*ex.x;
temp.y = point3->y-point1->y-i*ex.y;
ey.x = temp.x / powf(powf(temp.x,2)+powf(temp.y,2),0.5);
ey.y = temp.y / powf(powf(temp.x,2)+powf(temp.y,2),0.5);
j = ey.x * (point3->x-point1->x) + ey.y * (point3->y-point1->y);
a = powf(d[index].da,2);
b = powf(d[index].db,2);
c = powf(d[index].dc,2);
dp = powf(d12,2);
x = ( a - b + dp)/ (2 * d12);
y = (a - c + powf(i,2) + pow(j,2))/(2*j) - i*x/j;
d[index].da = point1->x+ x*ex.x + y*ey.x;
d[index].db = point1->y+ x*ex.y + y*ey.y;
// Wait till all the positions are calculated
__syncthreads();
if(index%4 == 0) {
int temp_index = index/4;
float x_sum = 0.0, y_sum = 0.0;
// Averages 4 positions
for(int i=0;i<4;i++) {
x_sum += d[index+i].da;
y_sum += d[index+i].db;
}
result[temp_index].x = x_sum/4.0;
result[temp_index].y = y_sum/4.0;
}
}
int main()
{
float dx = 0.5, dy =0.5;
POINT a,b,c;
POINT *p = (POINT*) malloc(sizeof(POINT)*NUM);
POINT *result_p = (POINT*) malloc(sizeof(POINT)*NUM);
DISTANCE *d =(DISTANCE *) malloc(sizeof(DISTANCE)*NUM);
POINT *point1, *point2, *point3;
DISTANCE *cuda_d;
POINT *cuda_p;
struct timeval t1, t2;
int U=3/2,V=128;
// Allocate memory on GPU
cudaMalloc(&cuda_d, sizeof(DISTANCE)*NUM);
cudaMalloc((void **)&cuda_p, sizeof(POINT)*NUM);
cudaMalloc((void **)&point1, sizeof(POINT));
cudaMalloc((void **)&point2, sizeof(POINT));
cudaMalloc((void **)&point3, sizeof(POINT));
a.x = 4.0;
a.y = 4.0;
b.x = 9.0;
b.y = 7.0;
c.x = 9.0;
c.y = 1.0;
p[0].x = 2.5;
p[0].y = 1.0;
d[0] = calculate_euclidean(p[0],a,b,c);
// Generate sequence of positions by adding delta
for(int i=1;i<NUM;i++) {
p[i].x = p[i-1].x + dx;
p[i].y = p[i-1].y + dy;
d[i] = calculate_euclidean(p[i],a,b,c);
}
printf("\n\nResult from self-verification :\n");
for(int i=0;i<NUM;i++) {
printf("\n%.2f %.2f",p[i].x,p[i].y);
}
// Copy data to GPU memory
cudaMemcpy(cuda_d, d, sizeof(DISTANCE)*NUM, cudaMemcpyHostToDevice);
cudaMemcpy(point1, &a, sizeof(POINT), cudaMemcpyHostToDevice);
cudaMemcpy(point2, &b, sizeof(POINT), cudaMemcpyHostToDevice);
cudaMemcpy(point3, &c, sizeof(POINT), cudaMemcpyHostToDevice);
gettimeofday(&t1, 0);
// Calling Device Function
calculate_trail<<<U,V>>>(cuda_d,point1,point2,point3,cuda_p);
gettimeofday(&t2, 0);
cudaMemcpy(result_p, cuda_p, sizeof(POINT)*NUM, cudaMemcpyDeviceToHost);
// Calculate time elapsed
double time = (1000000.0*(t2.tv_sec - t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("\n\nResult from GPU :\n");
for(int i=0;i<(U*V)/4;i++) {
printf("\n%.2f %.2f",result_p[i].x,result_p[i].y);
}
printf("\n\nTime elapsed : %3.3f ms",time);
printf("\n");
// Free memory
free(p);
free(result_p);
free(d);
cudaFree(cuda_d);
cudaFree(cuda_p);
cudaFree(point1);
cudaFree(point2);
cudaFree(point3);
return 0;
}
// Function to calculate distance between 2 points
DISTANCE calculate_euclidean(POINT p, POINT a, POINT b, POINT c) {
DISTANCE d;
d.da = sqrt(pow((a.x-p.x),2)+pow((a.y-p.y),2));
d.db = sqrt(pow((b.x-p.x),2)+pow((b.y-p.y),2));
d.dc = sqrt(pow((c.x-p.x),2)+pow((c.y-p.y),2));
return d;
}
|
12,738 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *c, long long n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id]*a[id];
}
int main( int argc, char* argv[] )
{
FILE *fptr = fopen("parallel_square.txt", "w");
long long minsize = pow(2,8);
long long maxsize = pow(2,28);
//int cnt = 0;
//int n = 20;
long long n;
for(n = minsize; n<maxsize; n*=2)
{
// Size of vectors
//int n = 1000000;
// Host input vectors
double *h_a;
//double *h_b;
//Host output vector
double *h_c;
// Device input vectors
double *d_a;
//double *d_b;
//Device output vector
double *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(double);
// Allocate memory for each vector on host
h_a = (double*)malloc(bytes);
//h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
//cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
long long i;
// Initialize vectors on host
for( i = 0; i < n; i++ ) {
h_a[i] = sin(i);
//h_b[i] = cos(i);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
//cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
cudaEventRecord(start);
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_c, n);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Copy array back to host
cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 within error
// double sum = 0;
// for(i=0; i<n; i++)
// sum += h_c[i];
// printf("final result: %f\n", sum/n);
// Release device memory
cudaFree(d_a);
//cudaFree(d_b);
cudaFree(d_c);
fprintf(fptr, "%ld %lf\n", n, milliseconds);
// printf("%ld %lf\n", n, milliseconds);
// Release host memory
free(h_a);
//free(h_b);
free(h_c);
}
fclose(fptr);
return 0;
}
|
12,739 | // © Arno Pähler, 2007-08
// gflops example: Simon Green (?)
// blsc from NVIDIA SDK
extern "C" {
typedef const float _F;
#define NUM_THREADS_PER_BLOCK 768
#define NUM_ITERATIONS 512
// 128 MAD instructions
#define FMAD128(a, b) \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
a = b * a + b; \
b = a * b + a; \
__shared__ float result[NUM_THREADS_PER_BLOCK];
__global__ void gpuGFLOPS()
{
// this ensures the mads don't get compiled out
float a = result[threadIdx.x];
float b = 1.01f;
for (int i = 0; i < NUM_ITERATIONS; i++)
{
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
FMAD128(a, b);
}
result[threadIdx.x] = a + b;
}
////////////////////////////////////////////////////////////////////////
#define A1 0.31938153f
#define A2 -0.356563782f
#define A3 1.781477937f
#define A4 -1.821255978f
#define A5 1.330274429f
#define RSQRT2PI 0.3989422804f
//Polynomial approx. of cumulative normal distribution function
__device__ float CND(
float d){
float K, cnd;
K = 1.0f / (1.0f + 0.2316419f * fabsf(d));
cnd = RSQRT2PI * __expf(- 0.5f * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if(d > 0)
cnd = 1.0f - cnd;
return cnd;
}
__device__ void BlackScholesBody(
float& Call, float& Put,
float S, float X, float T, float R, float V){
float sqrtT, expRT;
float d1, d2, CNDD1, CNDD2;
sqrtT = sqrtf(T);
d1 = (__logf(S / X) + (R + 0.5f * V * V) * T) / (V * sqrtT);
d2 = d1 - V * sqrtT;
CNDD1 = CND(d1);
CNDD2 = CND(d2);
expRT = __expf(- R * T);
Call = S * CNDD1 - X * expRT * CNDD2;
Put = X * expRT * (1.0f - CNDD2) - S * (1.0f - CNDD1);
}
__global__ void gpuBLSC(
float *d_Calls, float *d_Puts,
float *d_S, float *d_X, float *d_T,
float R, float V, int OptN){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int THREAD_N = blockDim.x * gridDim.x;
for(int opt = tid; opt < OptN; opt += THREAD_N)
BlackScholesBody(d_Calls[opt], d_Puts[opt],
d_S[opt], d_X[opt], d_T[opt], R, V);
}
////////////////////////////////////////////////////////////////////////
__global__ void gpuPOLY5(
float *d_In1, float *d_Out1, int size ){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tsz = blockDim.x * gridDim.x;
float a0 = 1.f;
float a1 = 2.f;
float a2 = 3.f;
float a3 = 4.f;
float a4 = 5.f;
float p,q;
for (int i = tid; i < size; i += tsz) {
q = d_In1[i];
p = (((a0*q+a1)*q+a2)*q+a3)*q+a4;
d_Out1[i] = p;
}
}
__global__ void gpuPOLY10(
float *d_In1, float *d_Out1, int size ){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tsz = blockDim.x * gridDim.x;
float a0 = 1.f;
float a1 = 2.f;
float a2 = 3.f;
float a3 = 4.f;
float a4 = 5.f;
float p,q;
for (int i = tid; i < size; i += tsz) {
q = d_In1[i];
p = (((a0*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
d_Out1[i] = p;
}
}
__global__ void gpuPOLY20(
float *d_In1, float *d_Out1, int size ){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tsz = blockDim.x * gridDim.x;
float a0 = 1.f;
float a1 = 2.f;
float a2 = 3.f;
float a3 = 4.f;
float a4 = 5.f;
float p,q;
for (int i = tid; i < size; i += tsz) {
q = d_In1[i];
p = (((a0*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
d_Out1[i] = p;
}
}
__global__ void gpuPOLY40(
float *d_In1, float *d_Out1, int size ){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tsz = blockDim.x * gridDim.x;
float a0 = 1.f;
float a1 = 2.f;
float a2 = 3.f;
float a3 = 4.f;
float a4 = 5.f;
float p,q;
for (int i = tid; i < size; i += tsz) {
q = d_In1[i];
p = (((a0*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
p = ((( p*q+a1)*q+a2)*q+a3)*q+a4;
d_Out1[i] = p;
}
}
////////////////////////////////////////////////////////////////////////
__global__ void gpuSAXPY(
float Factor, float *d_In1, float *d_In2, int size ){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tsz = blockDim.x * gridDim.x;
for (int i = tid; i < size; i += tsz)
d_In2[i] = d_In2[i] + d_In1[i] * Factor;
}
__global__ void gpuSGEMM(
float* C, float* A, float* B, int wA, int wB ){
#define BLOCK_SIZE 16
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Cs = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
Cs += As[ty][k] * Bs[k][tx];
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Cs;
}
__global__ void gpuTRIG(
float *d_Out1, float *d_Out2, float *d_In1, int size ){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tsz = blockDim.x * gridDim.x;
for (int i = tid; i < size; i += tsz) {
d_Out1[i] = cosf(d_In1[i]);
d_Out2[i] = sinf(d_In1[i]);
}
}
__global__ void gpuScale(
float *d_Out1, _F *d_In1, _F scale, int size ){
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
const int tsz = blockDim.x * gridDim.x;
for (int i = tid; i < size; i += tsz) {
d_Out1[i] = d_In1[i]*scale;
}
}
// for streams example
__global__ void init_array(
int *g_data, int *factor){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
g_data[idx] = *factor;
}
}
|
12,740 | #include<time.h>
#include<math.h>
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define NTPB 128 /* Number of Threads Per Block */
__device__ inline void myAtomicAdd(double *address, double value) //See CUDA official forum
{
unsigned long long oldval, newval, readback;
oldval = __double_as_longlong(*address);
newval = __double_as_longlong(__longlong_as_double(oldval) + value);
while ((readback=atomicCAS((unsigned long long *)address, oldval, newval)) != oldval)
{
oldval = readback;
newval = __double_as_longlong(__longlong_as_double(oldval) + value);
}
}
__global__ void integrater(float *x, float *y, float *z, float *u, float *v, float *I1, int n){
int i;
int iglob = threadIdx.x + blockIdx.x*blockDim.x;
int iloc = threadIdx.x ;
extern __shared__ float block_cache[];
if (iglob < n)
block_cache[iloc] = expf(-x[iglob]*x[iglob] - y[iglob]*y[iglob]-z[iglob]*z[iglob]-v[iglob]*v[iglob]-u[iglob]*u[iglob]);/*main function eval*/
else
block_cache[iloc] = 0;
__syncthreads();
/* on the "master thread" of each block" sum the pairwise products
on that block into the block's portion of the global sum */
if (iloc == 0){
float sum = 0.0;
for (i=0;i<NTPB;++i)
sum += block_cache[i];
atomicAdd(I1,sum);
}
}
int main(int argc, char **argv){
float *x, *y, *z, *u, *v, *I1; /* host pointers */
float *x_d, *y_d, *z_d, *u_d, *v_d, *I1_d; /* device pointers */
int i,n; /* vector length */
cudaEvent_t start, stop; /* timers */
float times;
float actual = .232322;
n = atoi(argv[1]);
cudaEventCreate(&start);
cudaEventCreate(&stop);
/* allocate host memory */
assert (cudaMallocHost((void **) &x, n*sizeof(float)) == cudaSuccess);
assert (cudaMallocHost((void **) &y, n*sizeof(float)) == cudaSuccess);
assert (cudaMallocHost((void **) &z, n*sizeof(float)) == cudaSuccess);
assert (cudaMallocHost((void **) &u, n*sizeof(float)) == cudaSuccess);
assert (cudaMallocHost((void **) &v, n*sizeof(float)) == cudaSuccess);
assert (cudaMallocHost((void **) &I1, 1*sizeof(float)) == cudaSuccess);
srand((time(NULL)));
for (i=0;i<n;++i){
x[i] = (float)rand()/(float)(RAND_MAX-1);
y[i] = (float)rand()/(float)(RAND_MAX-1);
z[i] = (float)rand()/(float)(RAND_MAX-1);
u[i] = (float)rand()/(float)(RAND_MAX-1);
v[i] = (float)rand()/(float)(RAND_MAX-1);
}
*I1 = 0.0;
/* allocate memory on device */
assert (cudaMalloc((void **) &x_d, n*sizeof(float)) == cudaSuccess);
assert (cudaMalloc((void **) &y_d, n*sizeof(float)) == cudaSuccess);
assert (cudaMalloc((void **) &z_d, n*sizeof(float)) == cudaSuccess);
assert (cudaMalloc((void **) &u_d, n*sizeof(float)) == cudaSuccess);
assert (cudaMalloc((void **) &v_d, n*sizeof(float)) == cudaSuccess);
assert (cudaMalloc((void **) &I1_d, 1*sizeof(float)) == cudaSuccess);
/* copy host data to device pointers */
assert(cudaMemcpy(x_d,x,n*sizeof(float),cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMemcpy(y_d,y,n*sizeof(float),cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMemcpy(z_d,z,n*sizeof(float),cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMemcpy(u_d,u,n*sizeof(float),cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMemcpy(v_d,v,n*sizeof(float),cudaMemcpyHostToDevice) == cudaSuccess);
assert(cudaMemcpy(I1_d,I1,1*sizeof(float),cudaMemcpyHostToDevice) == cudaSuccess);
/* launch and time kernel code */
cudaEventRecord( start, 0 );
integrater<<<(n+NTPB-1)/NTPB,NTPB,NTPB*sizeof(float)>>>(x_d,y_d, z_d, u_d, v_d, I1_d,n);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( ×, start, stop );
assert(cudaMemcpy(I1,I1_d,1*sizeof(float),cudaMemcpyDeviceToHost) == cudaSuccess);
*I1 = *I1/(float)n;
printf("value: %f\nerror: %f\ntime elapsed: %f(s)\n", *I1, fabs(*I1-actual)/actual, times);
cudaFree(x_d); cudaFree(y_d); cudaFree(z_d);
cudaFree(I1_d); cudaFree(u_d); cudaFree(v_d);
cudaEventDestroy( start );
cudaEventDestroy( stop );
}
|
12,741 | #include "includes.h"
__global__ void meansquare_kernal(const float * data, float * device_stats, const int size, const int num_calcs, const int num_threads, const int offset)
{
float meansq = 0.0f;
const uint x=threadIdx.x;
const uint y=blockIdx.x;
int idx = x + y*num_threads + offset;
for(int i = 0; i < size; i++){
int index = i*size + idx % size + ((idx/size)*size*size); //for coalesing
meansq += data[index]*data[index];
}
device_stats[idx] = meansq/size;
} |
12,742 | #include <stdio.h>
#include <cuda_runtime_api.h>
#include <time.h>
//compile with: nvcc 2040511_Task3_A.cu -o 2040511_Task3_A
__device__ int is_a_match(char *attempt) {
char plain_password1[] = "AR29";
char plain_password2[] = "RR70";
char plain_password3[] = "PP90";
char plain_password4[] = "KO78";
char *s = attempt;
char *u = attempt;
char *a = attempt;
char *y = attempt;
char *p1 = plain_password1;
char *p2 = plain_password2;
char *p3 = plain_password3;
char *p4 = plain_password4;
while(*s == *p1) {
if(*s == '\0')
{
printf("Password: %s\n",plain_password1);
break;
}
s++;
p1++;
}
while(*u == *p2) {
if(*u == '\0')
{
printf("Password: %s\n",plain_password2);
break;
}
u++;
p2++;
}
while(*a == *p3) {
if(*a == '\0')
{
printf("Password: %s\n",plain_password3);
break;
}
a++;
p3++;
}
while(*y == *p4) {
if(*y == '\0')
{
printf("Password: %s\n",plain_password4);
return 1;
}
y++;
p4++;
}
return 0;
}
__global__ void kernel() {
char i1,i2;
char password[5];
password[4] = '\0';
int i = blockIdx.x+65;
int j = threadIdx.x+65;
char firstMatch = i;
char secondMatch = j;
password[0] = firstMatch;
password[1] = secondMatch;
for(i1='0'; i1<='9'; i1++){
for(i2='0'; i2<='9'; i2++){
password[2] = i1;
password[3] = i2;
if(is_a_match(password)) {
}
else {
}
}
}
}
int time_difference(struct timespec *start,
struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
kernel <<<26,26>>>();
cudaThreadSynchronize();
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9));
return 0;
}
|
12,743 | /*
Soma duas matrizes quadradas
Ilustra o uso da mem global e mem local
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define TAM 16
#define THREADS 4
__global__ void soma(int *A_dev_glb, int *B_dev_glb,int *C_dev_glb)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
if (i < TAM && j < TAM)
{
C_dev_glb[i*TAM+j]=A_dev_glb[i*TAM+j]+B_dev_glb[i*TAM+j];
}
}
int main(int argc,char **argv)
{
int *mA_hst,*mB_hst,*mC_hst;
int *mA_dev,*mB_dev,*mC_dev;
int i,j;
//Aloca matrizes no host
mA_hst=(int *)malloc(TAM*TAM*sizeof(int));
mB_hst=(int *)malloc(TAM*TAM*sizeof(int));
mC_hst=(int *)malloc(TAM*TAM*sizeof(int));
//Aloca matrizes no device
cudaMalloc((void**)&mA_dev,TAM*TAM*(sizeof(int)));
cudaMalloc((void**)&mB_dev,TAM*TAM*(sizeof(int)));
cudaMalloc((void**)&mC_dev,TAM*TAM*(sizeof(int)));
//Preenche matrizes no host
for(i=0;i<TAM;i++)
{
for(j=0;j<TAM;j++)
{
mA_hst[i*TAM+j]=i;
mB_hst[i*TAM+j]=-i;
}
}
//Copia o conteúdo das matrizes para o device
cudaMemcpy(mA_dev,mA_hst,TAM*TAM*(sizeof(int)), cudaMemcpyHostToDevice);
cudaMemcpy(mB_dev,mB_hst,TAM*TAM*(sizeof(int)), cudaMemcpyHostToDevice);
//Define a quantidade de threads por bloco
dim3 threadsPerBlock(THREADS,THREADS);
//Define a quantidade de blocos por grade
//
dim3 blocksPerGrid((TAM+(threadsPerBlock.x-1)) / threadsPerBlock.x, (TAM+(threadsPerBlock.y-1)) / threadsPerBlock.y);
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
soma <<<blocksPerGrid,threadsPerBlock>>> (mA_dev,mB_dev,mC_dev);
//Copia o resultado da soma de volta para o host
cudaMemcpy(mC_hst,mC_dev,TAM*TAM*(sizeof(int)), cudaMemcpyDeviceToHost);
//Imprime o resultado no host. Matriz C deve ter apenas valores zero.
for(i=0;i<TAM;i++)
{
for(j=0;j<TAM;j++)
printf("%d ",mC_hst[i*TAM+j]);
printf("\n");
}
//Desaloca matrizes no host
free(mA_hst);
free(mB_hst);
free(mC_hst);
//Desaloca matrizes no device
cudaFree(mA_dev);
cudaFree(mB_dev);
cudaFree(mC_dev);
}
|
12,744 | #include "includes.h"
__global__ static void kernelFindMax1(const int* dataArray, int arraySize, int* maxVal)
{
int arrayIndex = (int)(blockDim.x * blockIdx.x + threadIdx.x);
if (arrayIndex < arraySize)
{
atomicMax(maxVal, dataArray[arrayIndex]);
}
} |
12,745 | using namespace std;
#include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <fstream>
//#include "cuPrintf.cu"
#define B0 -2.647866f
#define B1 -0.374927f
#define B2 0.061601f
#define B3 -0.001511f
const long MAX_THREADS = 512;
const long MAX_BLOCK= 65535;
extern "C" void botrix_index (float *tempday_h, float* precday_h, int n, float* output);
extern "C" void init ();
void checkCUDAError(const char* msg);
__global__ void calculate_index(float* tempday, float* precday, int n)
{
int id;
float x, y;
id=(blockIdx.y*gridDim.x+blockIdx.x)*blockDim.x+threadIdx.x;
x=tempday[id];
y=precday[id];
if (id<n)
{
if (x!=-9999 && y!=-9999)
{
if (y>=4 && x<40)
{
if (x<12)
x=12;
if (x>32)
x=32;
tempday[id]=powf(M_E,(B0+(B1*y)+(B2*y*x)+(B3*y*(x*x)))) / (1+powf(M_E,(B0+(B1*y)+(B2*y*x)+(B3*y*(x*x)))));
}
else
tempday[id]=0;
}
else
tempday[id]=-9999;
}
}
void init()
{
cudaSetDevice(0);
}
void botrix_index(float *tempday_h, float* precday_h, int n, float* output)
{
float *tempday_d, *precday_d;
long n_threads;
int n_blocks;
int dim_blocks;
dim3 dim_grid;
//selezione device da utilizzare
// cudaSetDevice(0);
//settaggio parametri
n_threads=n;
if (n_threads>MAX_BLOCK*MAX_BLOCK*MAX_THREADS)
{
printf("Troppi threads!\n");
exit(0);
}
dim_blocks=MAX_THREADS;
n_blocks=n_threads/MAX_THREADS+(n_threads%MAX_THREADS==0?0:1);
if (n_blocks<=MAX_BLOCK)
{
dim_grid.x=n_blocks;
dim_grid.y=1;
dim_grid.z=1;
}
else
{
dim_grid.x=(unsigned int) ceil(sqrt(n_blocks));
dim_grid.y=(unsigned int) ceil(sqrt(n_blocks));
dim_grid.z=1;
}
printf("Numero threads per blocco: %d\n",dim_blocks);
printf("Dimensioni grid: x %d, y %d\n",dim_grid.x,dim_grid.y);
//stampa input
// cout << "tempday\n";
// for (int i=0; i<n; i++)
// {
// cout<<tempday_h[i]<<" ";
// }
// cout << endl;
// cout << "precday\n";
// for (int i=0; i<n; i++)
// {
// cout<<precday_h[i]<<" ";
// }
// cout << endl;
//allocazione
cudaMalloc((void**) &precday_d, n*sizeof(float));
cudaMalloc((void**) &tempday_d, n*sizeof(float));
checkCUDAError("Allocazione");
//trasferimento su device
cudaMemcpy(tempday_d,tempday_h, sizeof(float)*n, cudaMemcpyHostToDevice);
cudaMemcpy(precday_d,precday_h, sizeof(float)*n, cudaMemcpyHostToDevice);
checkCUDAError("Trasferimento su device");
//lancio kernel
//cudaPrintfInit();
calculate_index <<< dim_grid, dim_blocks >>>(tempday_d, precday_d, n);
//cudaPrintfDisplay(stdout, true);
checkCUDAError("Kernel");
//trasferimento da device
cudaMemcpy(output,tempday_d, sizeof(float)*n, cudaMemcpyDeviceToHost);
checkCUDAError("Trasferimento da device");
// cout << "output\n";
// for (int i=0; i<n; i++)
// {
// cout<<output[i]<<" ";
// }
// cout << endl;
//deallocazione
cudaFree(precday_d);
cudaFree(tempday_d);
}
int main(int argc, char **argv)
{
fstream tempday, precday;
int n;
float *tempday_h, *precday_h, *output;
//lettura parametri
if (argc<3)
{
printf("./a.out tempday.txt precday.txt lunghezza\n");
exit(0);
}
tempday.open(argv[1],ios::in);
precday.open(argv[2],ios::in);
sscanf(argv[3],"%d",&n);
//allocazione
precday_h = (float*) malloc (n*sizeof(float));
tempday_h = (float*) malloc (n*sizeof(float));
output = (float*) malloc (n*sizeof(float));
checkCUDAError("Allocazione");
//inizializzazione
for (int i=0; i<n; i++)
{
tempday >> tempday_h[i];
precday >> precday_h[i];
}
botrix_index(tempday_h,precday_h,n,output);
//stampa
cout << "Risultato botrite:\n";
for (int i=0; i<n; i++)
{
cout<<output[i]<<" ";
}
//deallocazione
free(precday_h);
free(tempday_h);
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
12,746 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <time.h>
void ReadFile(float **L, int *M, int *N, int T){
FILE *fp = fopen("initial80x160.txt", "r");
fscanf(fp, "%d %d", M, N);
int size = (*M)*(*N);
int i;
//printf("voy a hacer el malloc\n");
float *L1 = (float*)malloc(size*T*sizeof(float));
//printf("hice el malloc\n");
for(i = 0; i < size; i++){
fscanf(fp, "%f", &L1[i]);
//printf("%f", L1[i]);
}
fclose(fp);
*L = L1;
}
void WriteFile(int M, int N, int T, float *L, char* name){
FILE *fp = fopen(name, "w");
fprintf(fp, "%d %d %d\n", M, N, T);
int size = N*M;
for(int i=0; i < T*size; i++){
//printf("%d \n", i+j*N);
fprintf(fp, "%f ", L[i]);
}
fprintf(fp, "\n");
fclose(fp);
}
void CPU_Calor(int M, int N, float *U, int t){
int Dx = 1, Dy = 1, Dt = 1, x, y, up, down, left, right, center;
float alpha = 0.1;
int size = M*N;
for (int i = 0; i < size; i++){
y = i/N;
x = i%N;
//printf("x: %d y: %d \n",x,y);
center = U[(x+y*N)+((t-1)*size)];
if(x == 0){
left = 0;
right = U[((x+1)+y*N)+((t-1)*size)];
}
else if(x == N-1){
left = U[((x-1)+y*N)+((t-1)*size)];
right = 0;
}
else{
left = U[((x-1)+y*N)+((t-1)*size)];
right = U[((x+1)+y*N)+((t-1)*size)];
}
if(y == 0){
up = 0;
down = U[(x+(y+1)*N)+((t-1)*size)];
}
else if(y == M-1){
up = U[(x+(y-1)*N)+((t-1)*size)];
down = 0;
}
else{
down = U[(x+(y+1)*N)+((t-1)*size)];
up = U[(x+(y-1)*N)+((t-1)*size)];
}
U[(x+y*N)+t*size] = center + Dt * alpha *( ( ( left - 2 * center + right ) / Dx ) + ( (down - 2 * center + up ) / Dy ) );
}
}
__global__ void GPU_Calor(int M, int N, float *U, int t){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int Dx = 1, Dy = 1, Dt = 1, x, y, up, down, left, right, center;
float alpha = 0.1;
int size = M*N;
if(tId < size){
y = tId/N;
x = tId%N;
//printf("x: %d y: %d \n",x,y);
center = U[(x+y*N)+((t-1)*size)];
if(x == 0){
left = 0;
right = U[((x+1)+y*N)+((t-1)*size)];
}
else if(x == N-1){
left = U[((x-1)+y*N)+((t-1)*size)];
right = 0;
}
else{
left = U[((x-1)+y*N)+((t-1)*size)];
right = U[((x+1)+y*N)+((t-1)*size)];
}
if(y == 0){
up = 0;
down = U[(x+(y+1)*N)+((t-1)*size)];
}
else if(y == M-1){
up = U[(x+(y-1)*N)+((t-1)*size)];
down = 0;
}
else{
down = U[(x+(y+1)*N)+((t-1)*size)];
up = U[(x+(y-1)*N)+((t-1)*size)];
}
U[(x+y*N)+t*size] = center + Dt * alpha *( ( ( left - 2 * center + right ) / Dx ) + ( (down - 2 * center + up ) / Dy ) );
}
}
__global__ void Shared_Calor(int M, int N, float *U, int t){
int tId = threadIdx.x + blockIdx.x * blockDim.x;
int Dx = 1, Dy = 1, Dt = 1, x, y, up, down, left, right, center, X, Y;
float alpha = 0.1;
int size = M*N;
if(tId < size){
x = threadIdx.x%16;
y = threadIdx.x/16;
X = blockIdx.x%(N/16);
Y = blockIdx.x/(N/16);
extern __shared__ float LS[];
//printf("x: %d y: %d \n",x,y);
center = U[(X*16)+(Y*16*N)+(x)+(y*N)+(t-1)*size];
LS[threadIdx.x] = center;
__syncthreads();
if(x == 0 && X == 0){
left = 0;
right = LS[(x+1)+y*16];
}
else if(x == 15 && X == (N/16)-1){
left = LS[(x-1)+y*16];
right = 0;
}
else if(x == 0){
left = U[((X*16)+(Y*16*N)+(x-1)+(y*N))+(t-1)*size];
right = LS[(x+1)+y*16];
}
else if(x == 15){
left = LS[(x-1)+y*16];
right = U[((X*16)+(Y*16*N)+(x+1)+(y*N))+(t-1)*size];
}
else{
left = LS[(x-1)+y*16];
right = LS[(x+1)+y*16];
} // XDDDDDDDDDDDD
if(y == 0 && Y == 0){
up = 0;
down = LS[x+(y+1)*16];
}
else if(y == 15 && Y == (M/16)-1){
up = LS[x+(y-1)*16];
down = 0;
}
else if(y == 0){
up = U[((X*16)+(Y*16*N)+(x)+((y-1)*N))+(t-1)*size];
down = LS[x+(y+1)*16];
}
else if(y == 15){
up = LS[x+(y-1)*16];
down = U[((X*16)+(Y*16*N)+(x)+((y+1)*N))+(t-1)*size];
}
else{
up = LS[x+(y-1)*16];
down = LS[x+(y+1)*16];
}
U[(X*16)+(Y*16*N)+(x)+(y*N)+t*size] = center + Dt * alpha *( ( ( left - 2 * center + right ) / Dx ) + ( (down - 2 * center + up ) / Dy ) );
}
}
int main(int argc, char const *argv[]){
int M, N, T, t;
float *U;
clock_t t1,t2;
// ---------------------- CPU ------------------------------
//WriteFile(M,N, U, "Salida.txt");
printf("Ingrese cantidad de iteraciones: ");
scanf("%d",&T);
ReadFile(&U, &M, &N, T);
//float *Utemp = (float *)malloc(size*sizeof(float));
t1 = clock();
for (t = 1; t < T; t++){
CPU_Calor(M, N, U, t);
}
t2 = clock();
double ms = 1000.0 * (double)(t2-t1) / CLOCKS_PER_SEC;
printf("Tiempo CPU: %f \n", ms);
WriteFile(M, N, T, U, "SalidaCPU.txt");
free(U);
// ---------------------- GPU ------------------------------
ReadFile(&U, &M, &N, T);
float *Udev, *UdevShared;
int size = N*M;
int bs = 256;
int gs = (int)ceil(float(size)/bs);
cudaMalloc(&Udev, T*size*sizeof(float));
cudaMemcpy(Udev, U, T*size*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t ct1, ct2;
float dt, dtt;
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
for(t = 1; t < T; t++){
GPU_Calor<<<gs,bs>>>(M,N,Udev,t);
}
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
printf("Tiempo GPU: %f \n", dt);
cudaMemcpy(U, Udev, T*size*sizeof(float), cudaMemcpyDeviceToHost);
WriteFile(M, N, T, U, "SalidaGPU.txt");
free(U);
// ------------------ GPU Shared --------------------------
ReadFile(&U, &M, &N, T);
cudaMalloc(&UdevShared, T*size*sizeof(float));
cudaMemcpy(UdevShared, U, T*size*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t ct11, ct22;
cudaEventCreate(&ct11);
cudaEventCreate(&ct22);
cudaEventRecord(ct11);
for(t = 1; t < T; t++){
Shared_Calor<<<gs,bs, sizeof(float)*bs>>>(M,N,UdevShared,t);
}
cudaEventRecord(ct22);
cudaEventSynchronize(ct22);
cudaEventElapsedTime(&dtt, ct11, ct22);
printf("Tiempo GPU Shared: %f \n", dtt);
cudaMemcpy(U, UdevShared, T*size*sizeof(float), cudaMemcpyDeviceToHost);
WriteFile(M, N, T, U, "SalidaGPU_Shared.txt");
free(U);
return 0;
}
|
12,747 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <assert.h>
#include <vector>
using namespace std;
const int INF = 10000000;
const int V = 10010;
const int MAX_THREAD_DIM2 = 32;
void input(char *inFileName, int B);
void output(char *outFileName);
void block_FW(int B);
int ceil(int a, int b);
void calAsync(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int realn;
int n, m; // Number of vertices, edges
int* Dist; // n * n, on host
int* dDist; // n * n, on device
int streamSize;
vector<cudaStream_t> streams;
inline cudaStream_t getIdleStream ()
{
if(streams.size() == streamSize)
{
cudaStream_t stm;
cudaStreamCreate(&stm);
streams.push_back(stm);
streamSize++;
return stm;
}
else
return streams[streamSize++];
}
inline void syncAllStreams ()
{
cudaThreadSynchronize();
streamSize = 0;
}
int main(int argc, char* argv[])
{
int B = atoi(argv[3]);
input(argv[1], B);
// if(B > n)
// {
// B = n;
// cerr << "Warning: B > n. Set B = n.";
// }
block_FW(B);
output(argv[2]);
return 0;
}
void input(char *inFileName, int B)
{
FILE *infile = fopen(inFileName, "r");
fscanf(infile, "%d %d", &realn, &m);
n = ceil(realn, B) * B;
Dist = new int[n * n];
for (int i = 0, k = 0; i < n; ++i) {
for (int j = 0; j < n; ++j, ++k) {
if (i == j) Dist[k] = 0;
else Dist[k] = INF;
}
}
while (--m >= 0) {
int a, b, v;
fscanf(infile, "%d %d %d", &a, &b, &v);
--a, --b;
Dist[a * n + b] = v;
}
}
void output(char *outFileName)
{
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(outfile, "INF ");
else fprintf(outfile, "%d ", d);
}
fprintf(outfile, "\n");
}
delete[] Dist;
}
void print ()
{
for (int i = 0; i < realn; ++i) {
for (int j = 0; j < realn; ++j) {
int d = Dist[i * n + j];
if (d >= INF) fprintf(stderr, "INF ");
else fprintf(stderr, "%d ", d);
}
fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
inline int ceil(int a, int b)
{
return (a + b -1)/b;
}
inline __device__
void updateMin (int &x, int a)
{
if(a < x) x = a;
}
__global__
void UpdateIKJ32 (int r, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int i = r * 32 + tx;
int j = r * 32 + ty;
__shared__ int S[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
S[tx][ty] = D(i, j);
__syncthreads();
for(int k=0; k<32; ++k)
{
updateMin(S[tx][ty], S[tx][k] + S[k][ty]);
__syncthreads();
}
D(i, j) = S[tx][ty];
#undef D
}
__global__
void UpdateIK32 (int r, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int by = blockIdx.x;
if(by >= r) by++;
int i = r * 32 + tx;
int j = by * 32 + ty;
__shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
S0[ty][tx] = D(i, r*32 + ty);
S1[tx][ty] = D(i, j);
__syncthreads();
for(int k=0; k<32; ++k)
{
updateMin(S1[tx][ty], S0[k][tx] + S1[k][ty]);
__syncthreads();
}
D(i, j) = S1[tx][ty];
#undef D
}
__global__
void UpdateKJ32 (int r, int* dDist, int n)
// 0 --update--> 1
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
if(bx >= r) bx++;
int i = bx * 32 + tx;
int j = r * 32 + ty;
__shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
S0[ty][tx] = D(i, j);
S1[tx][ty] = D(r*32 + tx, j);
__syncthreads();
for(int k=0; k<32; ++k)
{
updateMin(S0[ty][tx], S0[k][tx] + S1[k][ty]);
__syncthreads();
}
D(i, j) = S0[ty][tx];
#undef D
}
__global__
void Update32 (int r, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
if(bx >= r) bx++;
if(by >= r) by++;
int i = bx * 32 + tx;
int j = by * 32 + ty;
__shared__ int S0[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int S1[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
S0[ty][tx] = D(i, r * 32 + ty);
S1[tx][ty] = D(r * 32 + tx, j);
__syncthreads();
int Dij = D(i, j);
for(int k=0; k<32; ++k)
{
updateMin(Dij, S0[k][tx] + S1[k][ty]);
__syncthreads();
}
D(i, j) = Dij;
#undef D
}
void block_FW(int B)
{
int *dPivot;
cudaMalloc(&dDist, sizeof(int) * n * n);
cudaMalloc(&dPivot, sizeof(int) * B * B);
cudaMemcpy(dDist, Dist, sizeof(int) * n * n, cudaMemcpyHostToDevice);
int round = ceil(n, B);
if(B == 32)
{
for (int r = 0; r < round; ++r)
{
/* Phase 1*/
UpdateIKJ32 <<< 1, dim3(32,32) >>> (r, dDist, n);
/* Phase 2*/
UpdateIK32 <<< round-1, dim3(32,32), 0, getIdleStream() >>> (r, dDist, n);
UpdateKJ32 <<< round-1, dim3(32,32), 0, getIdleStream() >>> (r, dDist, n);
syncAllStreams();
/* Phase 3*/
Update32 <<< dim3(round-1, round-1), dim3(32,32) >>> (r, dDist, n);
}
}
else
for (int r = 0; r < round; ++r) {
/* Phase 1*/
calAsync(B, r, r, r, 1, 1);
syncAllStreams();
/* Phase 2*/
calAsync(B, r, r, 0, r, 1);
calAsync(B, r, r, r +1, round - r -1, 1);
calAsync(B, r, 0, r, 1, r);
calAsync(B, r, r +1, r, 1, round - r -1);
syncAllStreams();
/* Phase 3*/
calAsync(B, r, 0, 0, r, r);
calAsync(B, r, 0, r +1, round -r -1, r);
calAsync(B, r, r +1, 0, r, round - r -1);
calAsync(B, r, r +1, r +1, round -r -1, round - r -1);
syncAllStreams();
}
cudaMemcpy(Dist, dDist, sizeof(int) * n * n, cudaMemcpyDeviceToHost);
cudaFree(dDist);
cudaFree(dPivot);
}
__global__
void Update (int k, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int i = blockDim.x * blockIdx.x + threadIdx.x + i0;
int j = blockDim.y * blockIdx.y + threadIdx.y + j0;
if(i >= i1 || j >= j1)
return;
updateMin(D(i, j), D(i, k) + D(k, j));
}
__global__
void UpdateIndependent (int k0, int k1, int i0, int j0, int i1, int j1, int* dDist, int n)
{
#define D(i,j) (dDist[(i) * n + (j)])
int tx = threadIdx.x;
int ty = threadIdx.y;
int di = blockDim.x * blockIdx.x + tx;
int dj = blockDim.y * blockIdx.y + ty;
int i = i0 + di;
int j = j0 + dj;
bool valid = i < i1 && j < j1;
__shared__ int Si[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
__shared__ int Sj[MAX_THREAD_DIM2][MAX_THREAD_DIM2];
const int cacheSize = MAX_THREAD_DIM2;
int Dij = valid? D(i, j): 0;
int dkmod = 0;
for(int k = k0; k < k1; ++k)
{
if(dkmod == 0)
{
__syncthreads();
if(i < i1 && k+ty < k1)
Si[ty][tx] = D(i, k+ty);
if(j < j1 && k+tx < k1)
Sj[tx][ty] = D(k+tx, j);
__syncthreads();
}
if(valid)
{
// assert(Si[tx][dkmod] == D(i,k));
// assert(Sj[dkmod][ty] == D(k,j));
// int Dik = D(i, k);
// int Dkj = D(k, j);
int Dik = Si[dkmod][tx];
int Dkj = Sj[dkmod][ty];
updateMin(Dij, Dik + Dkj);
}
dkmod = (dkmod + 1) % cacheSize;
}
if(valid)
D(i, j) = Dij;
}
void calAsync(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height)
{
int block_end_x = block_start_x + block_height;
int block_end_y = block_start_y + block_width;
int block_total = block_width * block_height;
for (int b_i = block_start_x; b_i < block_end_x; ++b_i) {
for (int b_j = block_start_y; b_j < block_end_y; ++b_j) {
// To calculate B*B elements in the block (b_i, b_j)
// For each block, it need to compute B times
// for (int k = Round * B; k < (Round +1) * B && k < n; ++k) {
// To calculate original index of elements in the block (b_i, b_j)
// For instance, original index of (0,0) in block (1,2) is (2,5) for V=6,B=2
int i0 = b_i * B;
int i1 = min((b_i +1) * B, n);
int j0 = b_j * B;
int j1 = min((b_j +1) * B, n);
int k0 = Round * B;
int k1 = min((Round +1) * B, n);
bool iDepends = i0 == k0;
bool jDepends = j0 == k0;
int threadDim = MAX_THREAD_DIM2;//std::min(B, MAX_THREAD_DIM2);
int blockDim = (B + MAX_THREAD_DIM2 - 1) / MAX_THREAD_DIM2;
dim3 grid(blockDim, blockDim), block(threadDim, threadDim);
cudaStream_t stm = getIdleStream();
if(iDepends || jDepends)
{
for(int k=k0; k<k1; ++k)
Update<<<grid, block, 0, stm>>>(k, i0, j0, i1, j1, dDist, n);
}
else
UpdateIndependent<<<grid, block, 0, stm>>>(k0, k1, i0, j0, i1, j1, dDist, n);
// for (int i = i0; i < i1; ++i) {
// for (int j = j0; j < j1; ++j) {
// if (Dist[i][k] + Dist[k][j] < Dist[i][j])
// Dist[i][j] = Dist[i][k] + Dist[k][j];
// }
// }
// }
}
}
}
|
12,748 |
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#define BLOCK_SIZE 8
#define GRID_SIZE 8
__global__ void ising_kernel(int *G,int *newG,double *w,int n){
int id=blockIdx.x*blockDim.x+threadIdx.x;
unsigned int xBlock = blockDim.x * blockIdx.x;
unsigned int yBlock = blockDim.y * blockIdx.y;
unsigned int xIndex = xBlock + threadIdx.x;
unsigned int yIndex = yBlock + threadIdx.y;
unsigned int tempX = xBlock + threadIdx.x;
unsigned int tempY = yBlock + threadIdx.y;
int iterations;
if (n%(BLOCK_SIZE*GRID_SIZE)==0){
iterations=n/(BLOCK_SIZE*GRID_SIZE);
}else{
iterations=n/(BLOCK_SIZE*GRID_SIZE)+1;
}
for(int i=0;i<iterations;i++){
xIndex=tempX+GRID_SIZE*BLOCK_SIZE*(i);
for(int j=0;j<iterations;j++){
yIndex=tempY+GRID_SIZE*BLOCK_SIZE*(j);
if(xIndex<n&&yIndex<n){
double weight=0;
for(int ibor=-2;ibor<3;ibor++){
for(int jbor=-2;jbor<3;jbor++){
weight+=w[(ibor+2)*5+jbor+2]*G[((xIndex-ibor+n)%n)*n +(yIndex-jbor+n)%n ];
}
}
if(weight<1e-4&&weight>-(1e-4)){
newG[xIndex*n+yIndex]=G[xIndex*n+yIndex];
}else if(weight>0){
newG[xIndex*n+yIndex]=1;
}else{
newG[xIndex*n+yIndex]=-1;
}
}
}
}
}
void ising( int *G, double *w, int k, int n){
int *newG,*G2;
double *w2;
cudaMallocManaged(&newG,n*n*sizeof(int));
cudaMallocManaged(&G2,n*n*sizeof(int));
cudaMallocManaged(&w2,25*sizeof(double));
cudaMemcpy( w2, w, 25*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy( G2, G, n*n*sizeof(int),cudaMemcpyHostToDevice);
// double total_time=0;
for(int iter=0;iter<k;iter++){
bool repeat=true;
dim3 grid(GRID_SIZE, GRID_SIZE);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
// struct timeval startwtime, endwtime;
// gettimeofday (&startwtime, NULL);
ising_kernel<<<grid,block>>>(G2,newG,w2,n);
cudaDeviceSynchronize();
//gettimeofday (&endwtime, NULL);
//double time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6+ endwtime.tv_sec - startwtime.tv_sec);
// total_time+=time;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(repeat&&newG[i*n+j]!=G2[i*n+j]){
repeat=false;
}
int temp=newG[i*n+j];
newG[i*n+j]=G2[i*n+j];
G2[i*n+j]=temp;
}
}
if(repeat){
break;
}
}
cudaMemcpy(G, G2, n*n*sizeof(int),cudaMemcpyDeviceToHost);
// printf("Seconds are %lf",total_time);
}
int main()
{
printf("=================START=========================\n");
double weight[]={0.004,0.016,0.026,0.016,0.004,0.016,0.071,0.117,0.071,0.016,0.026,0.117,0,0.117,0.026,0.016,0.071,0.117,0.071,0.016,0.004,0.016,0.026,0.016,0.004};
int n=517;
int X[n*n];
size_t size;
FILE *fp = fopen("conf-init.bin", "rb");
size = fread(X, sizeof(int), n * n, fp);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp);
int k=11;
ising(X,weight,k,n);
int checkX[n*n];
FILE *fp2 = fopen("conf-11.bin", "rb");
size = fread(checkX, sizeof(int), n * n, fp2);
if(size!=n*n) exit(EXIT_FAILURE);
fclose(fp2);
bool flag=true;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
if(checkX[i*n+j]!=X[i*n+j]){
printf("\nWRONG IMPLEMENTATION\n");
flag=false;
break;
}
}
if(!flag){
break;
}
}
if(flag){
printf("\nCORRECT IMPLEMENTATION\n");
}
printf("\n================END==============\n");
return 0;
} |
12,749 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define BUFSIZE 64
#define BLOCK_SIZE 9
// Perdiodicty Preservation retains our periodicity
// Runs on CPU
void periodicityPreservationCPU(int N, char *cells)
{
int i;
//rows
for (i = 1; i <= N; ++i)
{
//Copy first real row to bottom extra row
cells[(N+2)*(N+1)+i] = cells[(N+2)+i];
//Copy last real row to top extra row
cells[i] = cells[(N+2)*N + i];
}
//cols
for (i = 0; i <= N+1; ++i)
{
//Copy first real column to right last extra column
cells[i*(N+2)+N+1] = cells[i*(N+2)+1];
//Copy last real column to left last extra column
cells[i*(N+2)] = cells[i*(N+2) + N];
}
}
// Runs on GPU
__global__ void periodicityPreservationGPU(int N, char *cells)
{
int i;
//rows
for (i = 1; i <= N; ++i)
{
//Copy first real row to bottom extra row
cells[(N+2)*(N+1)+i] = cells[(N+2)+i];
//Copy last real row to top extra row
cells[i] = cells[(N+2)*N + i];
}
//cols
for (i = 0; i <= N+1; ++i)
{
//Copy first real column to right last extra column
cells[i*(N+2)+N+1] = cells[i*(N+2)+1];
//Copy last real column to left last extra column
cells[i*(N+2)] = cells[i*(N+2) + N];
}
}
/* Our evolve kernels shoulder our evolutions procedure ,
specifically [GridSize] blocks X [256] threads run towards
our evolve kernels . Each thread looks after the evolution
of one cell by calculating each neighbors and abiding
the evolution rules */
// Based on global memoery
__global__ void evovle_kernel(int N, char *oldGen, char *newGen, int *allzeros, int *change)
{
// Achieve indexng on 2D blocks
int ix = blockDim.x * blockIdx.x + threadIdx.x + 1;
int iy = blockDim.y * blockIdx.y + threadIdx.y + 1;
// Thread calculates its global id
int id = ix * (N+2) + iy;
int neighbors;
if (ix <= N && iy <= N) {
neighbors = oldGen[id+(N+2)] + oldGen[id-(N+2)] //lower upper
+ oldGen[id+1] + oldGen[id-1] //right left
+ oldGen[id+(N+3)] + oldGen[id-(N+3)] //diagonals
+ oldGen[id-(N+1)] + oldGen[id+(N+1)];
char cell = oldGen[id];
newGen[id] = neighbors == 3 || (neighbors == 2 && cell); // Fill in the cells
// Terminating Checkings
if (newGen[id] != 0) (*allzeros)++; // Check if all cells are dead
if (newGen[id] != oldGen[id]) (*change)++; // Check if life stayed the same
}
}
// With the help of shared memory
__global__ void evovle_kernel_shared(int N, char *oldGen, char *newGen, int *allzeros, int *change)
{
// Global
int ix = (blockDim.x - 2) * blockIdx.x + threadIdx.x; //Different indexing as we declared more blocks (see SideGrid)
int iy = (blockDim.y - 2) * blockIdx.y + threadIdx.y;
int id = ix * (N+2) + iy;
int i = threadIdx.x;
int j = threadIdx.y;
int neighbors;
// Declare the shared memory on a per block level
__shared__ char oldGen_shared[BLOCK_SIZE][BLOCK_SIZE];
// Copy cells into shared memory
if (ix <= N+1 && iy <= N+1)
oldGen_shared[i][j] = oldGen[id]; //Copy each cell and in the sides of shared array the blocks' neighbors
// Sync threads on block
__syncthreads();
if (ix <= N && iy <= N) {
if(i != 0 && i != (blockDim.y-1) && j != 0 && j != (blockDim.x-1)) {
// Get the number of neighbors for a given oldGen point
neighbors = oldGen_shared[i+1][j] + oldGen_shared[i-1][j] //lower upper
+ oldGen_shared[i][j+1] + oldGen_shared[i][j-1] //right left
+ oldGen_shared[i+1][j+1] + oldGen_shared[i-1][j-1] //diagonals
+ oldGen_shared[i-1][j+1] + oldGen_shared[i+1][j-1];
char cell = oldGen_shared[i][j];
newGen[id] = neighbors == 3 || (neighbors == 2 && cell); // Fill in the cells
// Terminating Checkings
if (newGen[id] != 0) (*allzeros)++; // Check if all cells are dead
if (newGen[id] != oldGen[id]) (*change)++; // Check if life stayed the same
}
}
}
int main(int argc, char* argv[])
{
int i, j;
int N; // Dimension of cells
int generations; // Generations of evolution
FILE *fp = NULL; // A file for input (optional)
int shared = 0; // Use share memory or not
int output = 0; // Print the array in every generation, at the end or not at all
int periodicity = 1; // Choose if we want the calculate the periodicity of side cells in cpu or gpu
int doom = 0 ; // With terminal checking or Not
/*Read the arguments*/
for (i = 0; i < argc; i++){
if (!strcmp(argv[i], "-n")) N = atoi(argv[++i]);
else if (!strcmp(argv[i], "-g")) generations = atoi(argv[++i]);
else if (!strcmp(argv[i], "-i")) fp = fopen(argv[++i], "r");
else if (!strcmp(argv[i], "-s")) shared = atoi(argv[++i]);
else if (!strcmp(argv[i], "-p")) periodicity = atoi(argv[++i]);
else if (!strcmp(argv[i], "-o1")) output = 1;
else if (!strcmp(argv[i], "-o2")) output = 2;
else if (!strcmp(argv[i], "-d")) doom = atoi(argv[++i]);
}
// Definitions of one dimension arrays on host and device
// Actually are a 2D size array but we declare them 1D as we want contiguous memory allocation
char* h_cells; // our results will be copied on CPU cells
char* d_old; // Device 2D cells for oldGen cells
char* d_new; // Device 2D cells for new generations cells
char* d_Swap; // Swap cells just like game_mpi
// Allocation of host cells, we allocate more byte space [(N+2)^2], to retain our periodicity of cells
int bytes = sizeof(char)*(N+2)*(N+2);
h_cells = (char*)malloc(bytes);
// If we don't have a file, fill it with mighty randomness
if (fp == NULL)
{
srand(time(NULL));
for(i = 1; i<=N; i++) {
for(j = 1; j<=N; j++) {
h_cells[i*(N+2)+j] = rand() % 2;
}
}
}
else // fill the cells from file
{
/*Read from input file the position of initial live cells (if there is input file)*/
if (fp != NULL){
char line[BUFSIZE], *token, delim[2] = " ";
fgets(line, BUFSIZE, fp);
while (!feof(fp)){ //Till the end of the file read from it
token = strtok(line, delim);
i = atoi(token);
token = strtok(NULL, delim);
j = atoi(token);
h_cells[i*(N+2)+j] = 1; //Assign a live cell in the coordinates given
fgets(line, BUFSIZE, fp);
}
fclose(fp); //Close the file
}
}
// Start Timer After Initialising The Array
cudaEvent_t event1, event2;
cudaEventCreate(&event1);
cudaEventCreate(&event2);
// Start
cudaEventRecord(event1, 0); //where 0 is the default stream
// Allocate device arrays on GPU memory
cudaMalloc(&d_old, bytes);
cudaMalloc(&d_new, bytes);
int *h_allzeros, *h_change;
int *d_allzeros, *d_change;
h_allzeros = (int*)malloc(sizeof(int));
h_change = (int*)malloc(sizeof(int));
cudaMalloc(&d_allzeros, sizeof(int));
cudaMalloc(&d_change, sizeof(int));
// Let's fill our device cells
cudaMemcpy(d_old, h_cells, bytes, cudaMemcpyHostToDevice);
// Set the prefferes cache configuration for the device function if we want to use shared memory
if (shared == 1) cudaFuncSetCacheConfig(evovle_kernel_shared, cudaFuncCachePreferShared);
// Find the Blocks each side of Grid has (e.g For N = 128 , we will need 8 blocks on each side of the grid)
int SideGrid;
if (shared)
SideGrid = (int)ceil(N/(float)(BLOCK_SIZE-2)); //For easier copy in shared memory, we declare more blocks per side of Grid
else
SideGrid = (int)ceil(N/(float)BLOCK_SIZE);
// For the evolution kernel we specify a two dimensional block size , 16x16 size , 256 threads
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE, 1);
// Create a 2D Grid , to hold our blocks
// e.g. N = 128 , we will call four 8x8 grid = 64 blocks , 64 x 256 = 16384 threads for the N x N = 16384 cells
dim3 gridSize(SideGrid, SideGrid, 1);
// Start Evolutioning for given generations
for (i = 0; i < generations; i++)
{
// Print the state of our cells (if told so in command line flags)
if (output == 2 || (output == 1 && i == (generations-1)))
{
cudaMemcpy(h_cells, d_old, bytes, cudaMemcpyDeviceToHost);
int r, c;
printf("\n///////////////////////////////////////////////////\n\n");
for (r = 1; r <= N; r++){
for (c = 1; c <= N; c++){
if (h_cells[r*(N+2)+c] == 0) printf("-");
else if (h_cells[r*(N+2)+c] == 1) printf("X");
else printf("?");
}
printf("\n");
}
}
if (i != (generations-1))
{
if ((doom == 1) && ((i%10) == 0) && (i != 0)){
*h_allzeros = 0;
*h_change = 0;
cudaMemcpy(d_allzeros, h_allzeros, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_change, h_change, sizeof(int), cudaMemcpyHostToDevice);
}
if (periodicity == 0){ // Calculate periodicity on cpu (faster processor but expensive memcopy)
cudaMemcpy(h_cells, d_old, bytes, cudaMemcpyDeviceToHost);
periodicityPreservationCPU(N, h_cells); // CPU is better if N is small (due to for loop) (CPU-GHz GPU-MHz)
cudaMemcpy(d_old, h_cells, bytes, cudaMemcpyHostToDevice); // GPU is better in big numbers as we avoid transfer data (memcopy)
}
else periodicityPreservationGPU<<<1,1>>>(N, d_old); // else on gpu (no memcopy on cpu but much slower gpu processor)
// Evolution of the cells, using shared memory in gpu or not
if (shared)
evovle_kernel_shared<<<gridSize, blockSize>>>(N, d_old, d_new, d_allzeros, d_change);
else
evovle_kernel<<<gridSize, blockSize>>>(N, d_old, d_new, d_allzeros, d_change);
if ((doom == 1) && ((i%10) == 0) && (i != 0)){
cudaMemcpy(h_allzeros, d_allzeros, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(h_change, d_change, sizeof(int), cudaMemcpyDeviceToHost);
//printf("Zeros:%d Change:%d\n", (*h_allzeros), (*h_change));
if ((*h_change) == 0 || (*h_allzeros) == 0){
printf("Program terminated (nothing changed or all extinguisted in this generation)\n");
break;
}
}
// Swap our grids and proceed to next generation
d_Swap = d_old;
d_old = d_new;
d_new = d_Swap;
}
}
// Copy back results and sum
cudaMemcpy(h_cells, d_old, bytes, cudaMemcpyDeviceToHost);
// Release memory
cudaFree(d_allzeros);
cudaFree(d_change);
cudaFree(d_old);
cudaFree(d_new);
free(h_cells);
free(h_change);
free(h_allzeros);
// Stop The Timer
cudaEventRecord(event2, 0);
// Calculate Elapsed Time
//synchronize
cudaEventSynchronize(event1); //optional
cudaEventSynchronize(event2); //wait for the event to be executed!
//calculate time
float dt_ms;
cudaEventElapsedTime(&dt_ms, event1, event2);
printf("--------------------------------------------------------------\n");
printf("Runtime %f \n", dt_ms/1000);
printf("--------------------------------------------------------------\n");
return 0;
} |
12,750 | // Matrix addition, GPU version
#include <stdio.h>
__global__
void add_matrix(float *a, float *b, float *c, int N)
{
int index;
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
{
index = i + j*N;
c[index] = a[index] + b[index];
}
}
__global__
void add_matrix_tid(float *a, float *b, float *c, int N)
{
int x, y, index;
// global index for threads
x = blockIdx.x * blockDim.x + threadIdx.x;
y = blockIdx.y * blockDim.y + threadIdx.y;
index = x + y*N;
c[index] = a[index] + b[index];
/*
printf("gridDim.x = %f, gridDim.y = %f, threadIdx.x = %f, threadIdx.y = %f, blockDim.x = %f, blockIdx.x = %f\n",
gridDim.x, gridDim.y, threadIdx.x, threadIdx.y, blockDim.x, blockIdx.x);
*/
}
int main()
{
const int N = 128;
float *a = new float[N*N];
float *b = new float[N*N];
float *c = new float[N*N];
float *c_d, *a_d, *b_d;
cudaMalloc(&a_d, N * N * sizeof(float));
cudaMalloc(&b_d, N * N * sizeof(float));
cudaMalloc(&c_d, N * N * sizeof(float));
// get information about the hardware
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf("Device name: %s\n", prop.name);
printf("Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf("Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf("Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf("MaxThreadsBlock: %d\n", prop.maxThreadsPerBlock);
printf("multiProcessorCount: %d\n", prop.multiProcessorCount);
printf("TotalGlobalMem / sizeof(float): %d\n", prop.totalGlobalMem/sizeof(float));
printf("MaxGridSize: %d\n", prop.maxGridSize);
printf("MaxThreadDim:(%d,%d,%d) \n", (prop.maxThreadsDim[0]),
(prop.maxThreadsDim[1]),(prop.maxThreadsDim[2]));
}
// initialize matrix a & b
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
a[i+j*N] = 10 + i;
b[i+j*N] = (float)j / N;
}
}
// create events and set starting point for timer
cudaEvent_t myEvent,myEventB;
cudaEventCreate(&myEvent);
cudaEventCreate(&myEventB);
// copy matrixes to GPU
cudaMemcpy(a_d, a, N*N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, N*N*sizeof(float), cudaMemcpyHostToDevice);
int block_size = 128, grid_size = N/block_size;
dim3 dimBlock(block_size, block_size);
dim3 dimGrid(grid_size,grid_size);
// do GPU calculations
cudaEventRecord(myEvent, 0);
cudaEventSynchronize(myEvent);
add_matrix_tid <<< dimGrid, dimBlock>>> (a_d, b_d, c_d, N);
cudaThreadSynchronize();
// set end point for timer and get the elapsed time
cudaEventRecord(myEventB, 0);
cudaEventSynchronize(myEventB);
float theTime;
cudaEventElapsedTime(&theTime, myEvent, myEventB);
// Overwrite a with the result
cudaMemcpy(c, c_d, N*N*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("%0.2f ", c[i+j*N]);
}
printf("\n##\n");
}
printf("time in ms: %f \n",theTime);
delete(a);
delete(b);
delete(c);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
|
12,751 | #define t_max 1
#define t 1
/*
(w1_a[0][0]=((a[0][0][0][0][1]*(a[0][0][0][0][1]+1.0))*((a[0][0][0][0][1]+2.0)*0.16666666666666666)))
(w2_a[0][0]=(((a[0][0][0][0][1]-1.0)*(a[0][0][0][0][1]+1.0))*((a[0][0][0][0][1]+2.0)*-0.5)))
(w3_a[0][0]=(((a[0][0][0][0][1]-1.0)*a[0][0][0][0][1])*((a[0][0][0][0][1]+2.0)*0.5)))
(w4_a[0][0]=(((a[0][0][0][0][1]-1.0)*a[0][0][0][0][1])*((a[0][0][0][0][1]+1.0)*-0.16666666666666666)))
(w1_b[0][0]=((b[0][0][0][0][2]*(b[0][0][0][0][2]+1.0))*((b[0][0][0][0][2]+2.0)*0.16666666666666666)))
(w2_b[0][0]=(((b[0][0][0][0][2]-1.0)*(b[0][0][0][0][2]+1.0))*((b[0][0][0][0][2]+2.0)*-0.5)))
(w3_b[0][0]=(((b[0][0][0][0][2]-1.0)*b[0][0][0][0][2])*((b[0][0][0][0][2]+2.0)*0.5)))
(w4_b[0][0]=(((b[0][0][0][0][2]-1.0)*b[0][0][0][0][2])*((b[0][0][0][0][2]+1.0)*-0.16666666666666666)))
(w1_c[0][0]=((c[0][0][0][0][3]*(c[0][0][0][0][3]+1.0))*((c[0][0][0][0][3]+2.0)*0.16666666666666666)))
(w2_c[0][0]=(((c[0][0][0][0][3]-1.0)*(c[0][0][0][0][3]+1.0))*((c[0][0][0][0][3]+2.0)*-0.5)))
(w3_c[0][0]=(((c[0][0][0][0][3]-1.0)*c[0][0][0][0][3])*((c[0][0][0][0][3]+2.0)*0.5)))
(w4_c[0][0]=(((c[0][0][0][0][3]-1.0)*c[0][0][0][0][3])*((c[0][0][0][0][3]+1.0)*-0.16666666666666666)))
(u[0][0][0][1][0]=((((((((w1_a*w1_b)*(w1_c*u[-1][-1][-1][0][0]))+((w2_a*w1_b)*(w1_c*u[0][-1][-1][0][0])))+(((w3_a*w1_b)*(w1_c*u[1][-1][-1][0][0]))+((w4_a*w1_b)*(w1_c*u[2][-1][-1][0][0]))))+((((w1_a*w2_b)*(w1_c*u[-1][0][-1][0][0]))+((w2_a*w2_b)*(w1_c*u[0][0][-1][0][0])))+(((w3_a*w2_b)*(w1_c*u[1][0][-1][0][0]))+((w4_a*w2_b)*(w1_c*u[2][0][-1][0][0])))))+(((((w1_a*w3_b)*(w1_c*u[-1][1][-1][0][0]))+((w2_a*w3_b)*(w1_c*u[0][1][-1][0][0])))+(((w3_a*w3_b)*(w1_c*u[1][1][-1][0][0]))+((w4_a*w3_b)*(w1_c*u[2][1][-1][0][0]))))+((((w1_a*w4_b)*(w1_c*u[-1][2][-1][0][0]))+((w2_a*w4_b)*(w1_c*u[0][2][-1][0][0])))+(((w3_a*w4_b)*(w1_c*u[1][2][-1][0][0]))+((w4_a*w4_b)*(w1_c*u[2][2][-1][0][0]))))))+((((((w1_a*w1_b)*(w2_c*u[-1][-1][0][0][0]))+((w2_a*w1_b)*(w2_c*u[0][-1][0][0][0])))+(((w3_a*w1_b)*(w2_c*u[1][-1][0][0][0]))+((w4_a*w1_b)*(w2_c*u[2][-1][0][0][0]))))+((((w1_a*w2_b)*(w2_c*u[-1][0][0][0][0]))+((w2_a*w2_b)*(w2_c*u[0][0][0][0][0])))+(((w3_a*w2_b)*(w2_c*u[1][0][0][0][0]))+((w4_a*w2_b)*(w2_c*u[2][0][0][0][0])))))+(((((w1_a*w3_b)*(w2_c*u[-1][1][0][0][0]))+((w2_a*w3_b)*(w2_c*u[0][1][0][0][0])))+(((w3_a*w3_b)*(w2_c*u[1][1][0][0][0]))+((w4_a*w3_b)*(w2_c*u[2][1][0][0][0]))))+((((w1_a*w4_b)*(w2_c*u[-1][2][0][0][0]))+((w2_a*w4_b)*(w2_c*u[0][2][0][0][0])))+(((w3_a*w4_b)*(w2_c*u[1][2][0][0][0]))+((w4_a*w4_b)*(w2_c*u[2][2][0][0][0])))))))+(((((((w1_a*w1_b)*(w3_c*u[-1][-1][1][0][0]))+((w2_a*w1_b)*(w3_c*u[0][-1][1][0][0])))+(((w3_a*w1_b)*(w3_c*u[1][-1][1][0][0]))+((w4_a*w1_b)*(w3_c*u[2][-1][1][0][0]))))+((((w1_a*w2_b)*(w3_c*u[-1][0][1][0][0]))+((w2_a*w2_b)*(w3_c*u[0][0][1][0][0])))+(((w3_a*w2_b)*(w3_c*u[1][0][1][0][0]))+((w4_a*w2_b)*(w3_c*u[2][0][1][0][0])))))+(((((w1_a*w3_b)*(w3_c*u[-1][1][1][0][0]))+((w2_a*w3_b)*(w3_c*u[0][1][1][0][0])))+(((w3_a*w3_b)*(w3_c*u[1][1][1][0][0]))+((w4_a*w3_b)*(w3_c*u[2][1][1][0][0]))))+((((w1_a*w4_b)*(w3_c*u[-1][2][1][0][0]))+((w2_a*w4_b)*(w3_c*u[0][2][1][0][0])))+(((w3_a*w4_b)*(w3_c*u[1][2][1][0][0]))+((w4_a*w4_b)*(w3_c*u[2][2][1][0][0]))))))+((((((w1_a*w1_b)*(w4_c*u[-1][-1][2][0][0]))+((w2_a*w1_b)*(w4_c*u[0][-1][2][0][0])))+(((w3_a*w1_b)*(w4_c*u[1][-1][2][0][0]))+((w4_a*w1_b)*(w4_c*u[2][-1][2][0][0]))))+((((w1_a*w2_b)*(w4_c*u[-1][0][2][0][0]))+((w2_a*w2_b)*(w4_c*u[0][0][2][0][0])))+(((w3_a*w2_b)*(w4_c*u[1][0][2][0][0]))+((w4_a*w2_b)*(w4_c*u[2][0][2][0][0])))))+(((((w1_a*w3_b)*(w4_c*u[-1][1][2][0][0]))+((w2_a*w3_b)*(w4_c*u[0][1][2][0][0])))+(((w3_a*w3_b)*(w4_c*u[1][1][2][0][0]))+((w4_a*w3_b)*(w4_c*u[2][1][2][0][0]))))+((((w1_a*w4_b)*(w4_c*u[-1][2][2][0][0]))+((w2_a*w4_b)*(w4_c*u[0][2][2][0][0])))+(((w3_a*w4_b)*(w4_c*u[1][2][2][0][0]))+((w4_a*w4_b)*(w4_c*u[2][2][2][0][0])))))))))
*/
__global__ void tricubic_interpolation(double * * u_0_1_out, double * u_0_0, double * u_0_1, double * a_1_0, double * b_2_0, double * c_3_0, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c)
{
/*
const double * const u__a_1[16] = { a_1_0 } ;
const double * const u__b_2[16] = { b_2_0 } ;
const double * const u__c_3[16] = { c_3_0 } ;
double * const u__u_0[16] = { u_0_0, u_0_1 } ;
*/
double w1_a;
double w1_b;
double w1_c;
double w2_a;
double w2_b;
double w2_c;
double w3_a;
double w3_b;
double w3_c;
double w4_a;
double w4_b;
double w4_c;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx16;
int _idx17;
int _idx18;
int _idx19;
int _idx2;
int _idx20;
int _idx21;
int _idx22;
int _idx23;
int _idx24;
int _idx25;
int _idx26;
int _idx27;
int _idx28;
int _idx29;
int _idx3;
int _idx30;
int _idx31;
int _idx32;
int _idx33;
int _idx34;
int _idx35;
int _idx36;
int _idx37;
int _idx38;
int _idx39;
int _idx4;
int _idx40;
int _idx41;
int _idx42;
int _idx43;
int _idx44;
int _idx45;
int _idx46;
int _idx47;
int _idx48;
int _idx49;
int _idx5;
int _idx50;
int _idx51;
int _idx52;
int _idx53;
int _idx54;
int _idx55;
int _idx56;
int _idx57;
int _idx58;
int _idx59;
int _idx6;
int _idx60;
int _idx61;
int _idx62;
int _idx63;
int _idx64;
int _idx7;
int _idx8;
int _idx9;
int chunk_idx_x;
int chunk_idx_x_max;
int chunk_idx_y;
int chunk_idx_y_max;
int chunk_idx_z;
int chunk_idx_z_max;
int idx_1_2;
int size_1_1;
int size_1_2;
//int t;
int thd_idx_x;
int thd_idx_y;
int thd_idx_z;
int thdblks_idx_x;
int thdblks_idx_x_max;
int thdblks_idx_y;
int thdblks_idx_y_max;
int thdblks_idx_z;
int thdblks_idx_z_max;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x)));
chunk_idx_x_max=(chunk_idx_x+c);
chunk_idx_y=(threadIdx.y+(tmp*blockDim.y));
chunk_idx_y_max=(chunk_idx_y+1);
chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
chunk_idx_z_max=(chunk_idx_z+1);
thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
thdblks_idx_x_max=(thdblks_idx_x+tbx);
thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y)));
thdblks_idx_y_max=(thdblks_idx_y+tby);
thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z)));
thdblks_idx_z_max=(thdblks_idx_z+tbz);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */
/* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */
/*
for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
thd_idx_z=chunk_idx_z;
thd_idx_y=chunk_idx_y;
for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1)
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0])
*/
/* _idx0 = ((((thd_idx_z*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x) */
_idx0=((((thd_idx_z*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x);
w1_a=((a_1_0[_idx0]*(a_1_0[_idx0]+1.0))*((a_1_0[_idx0]+2.0)*0.16666666666666666));
w2_a=(((a_1_0[_idx0]-1.0)*(a_1_0[_idx0]+1.0))*((a_1_0[_idx0]+2.0)*-0.5));
w3_a=(((a_1_0[_idx0]-1.0)*a_1_0[_idx0])*((a_1_0[_idx0]+2.0)*0.5));
w4_a=(((a_1_0[_idx0]-1.0)*a_1_0[_idx0])*((a_1_0[_idx0]+1.0)*-0.16666666666666666));
w1_b=((b_2_0[_idx0]*(b_2_0[_idx0]+1.0))*((b_2_0[_idx0]+2.0)*0.16666666666666666));
w2_b=(((b_2_0[_idx0]-1.0)*(b_2_0[_idx0]+1.0))*((b_2_0[_idx0]+2.0)*-0.5));
w3_b=(((b_2_0[_idx0]-1.0)*b_2_0[_idx0])*((b_2_0[_idx0]+2.0)*0.5));
w4_b=(((b_2_0[_idx0]-1.0)*b_2_0[_idx0])*((b_2_0[_idx0]+1.0)*-0.16666666666666666));
w1_c=((c_3_0[_idx0]*(c_3_0[_idx0]+1.0))*((c_3_0[_idx0]+2.0)*0.16666666666666666));
w2_c=(((c_3_0[_idx0]-1.0)*(c_3_0[_idx0]+1.0))*((c_3_0[_idx0]+2.0)*-0.5));
w3_c=(((c_3_0[_idx0]-1.0)*c_3_0[_idx0])*((c_3_0[_idx0]+2.0)*0.5));
w4_c=(((c_3_0[_idx0]-1.0)*c_3_0[_idx0])*((c_3_0[_idx0]+1.0)*-0.16666666666666666));
/* _idx1 = (((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+((((3*t)*thd_idx_z)+thd_idx_y)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x) */
_idx1=((((_idx0+(((3*t)*thd_idx_z)*y_max))+(((3*t)*thd_idx_z)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y));
/* _idx2 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+((((3*t)*thd_idx_z)+thd_idx_y)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+1) */
_idx2=(_idx1+1);
/* _idx3 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+((((3*t)*thd_idx_z)+thd_idx_y)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+2) */
_idx3=(_idx1+2);
/* _idx4 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+((((3*t)*thd_idx_z)+thd_idx_y)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+3) */
_idx4=(_idx1+3);
/* _idx5 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(3*t)) */
_idx5=((_idx1+x_max)+(3*t));
/* _idx6 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(3*t))+1) */
_idx6=(_idx5+1);
/* _idx7 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(3*t))+2) */
_idx7=(_idx5+2);
/* _idx8 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(3*t))+3) */
_idx8=(_idx5+3);
/* _idx9 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(6*t)) */
_idx9=((_idx5+x_max)+(3*t));
/* _idx10 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(6*t))+1) */
_idx10=(_idx9+1);
/* _idx11 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(6*t))+2) */
_idx11=(_idx10+1);
/* _idx12 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(6*t))+3) */
_idx12=(_idx9+3);
/* _idx13 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*t)) */
_idx13=((_idx9+x_max)+(3*t));
/* _idx14 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*t))+1) */
_idx14=(_idx13+1);
/* _idx15 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*t))+2) */
_idx15=(_idx13+2);
/* _idx16 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*t))+3) */
_idx16=(_idx13+3);
/* _idx17 = ((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t))) */
_idx17=(((_idx1+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
/* _idx18 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+1) */
_idx18=(_idx17+1);
/* _idx19 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+2) */
_idx19=(_idx17+2);
/* _idx20 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+3) */
_idx20=(_idx17+3);
/* _idx21 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(3*t)) */
_idx21=((_idx17+x_max)+(3*t));
/* _idx22 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(3*t))+1) */
_idx22=(_idx21+1);
/* _idx23 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(3*t))+2) */
_idx23=(_idx21+2);
/* _idx24 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(3*t))+3) */
_idx24=(_idx21+3);
/* _idx25 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(6*t)) */
_idx25=((_idx21+x_max)+(3*t));
/* _idx26 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(6*t))+1) */
_idx26=(_idx25+1);
/* _idx27 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(6*t))+2) */
_idx27=(_idx25+2);
/* _idx28 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(6*t))+3) */
_idx28=(_idx27+1);
/* _idx29 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(9*t)) */
_idx29=((_idx25+x_max)+(3*t));
/* _idx30 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(9*t))+1) */
_idx30=(_idx29+1);
/* _idx31 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(9*t))+2) */
_idx31=(_idx30+1);
/* _idx32 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(9*t))+3) */
_idx32=(_idx30+2);
/* _idx33 = ((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t))) */
_idx33=(((_idx17+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
/* _idx34 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+1) */
_idx34=(_idx33+1);
/* _idx35 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+2) */
_idx35=(_idx33+2);
/* _idx36 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+3) */
_idx36=(_idx35+1);
/* _idx37 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(3*t)) */
_idx37=((_idx33+x_max)+(3*t));
/* _idx38 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(3*t))+1) */
_idx38=(_idx37+1);
/* _idx39 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(3*t))+2) */
_idx39=(_idx37+2);
/* _idx40 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(3*t))+3) */
_idx40=(_idx39+1);
/* _idx41 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(6*t)) */
_idx41=((_idx37+x_max)+(3*t));
/* _idx42 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(6*t))+1) */
_idx42=(_idx41+1);
/* _idx43 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(6*t))+2) */
_idx43=(_idx41+2);
/* _idx44 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(6*t))+3) */
_idx44=(_idx41+3);
/* _idx45 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(9*t)) */
_idx45=((_idx41+x_max)+(3*t));
/* _idx46 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(9*t))+1) */
_idx46=(_idx45+1);
/* _idx47 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(9*t))+2) */
_idx47=(_idx46+1);
/* _idx48 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(9*t))+3) */
_idx48=(_idx47+1);
/* _idx49 = ((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t))) */
_idx49=(((_idx33+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
/* _idx50 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+1) */
_idx50=(_idx49+1);
/* _idx51 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+2) */
_idx51=(_idx49+2);
/* _idx52 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+3) */
_idx52=(_idx49+3);
/* _idx53 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(3*t)) */
_idx53=((_idx49+x_max)+(3*t));
/* _idx54 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(3*t))+1) */
_idx54=(_idx53+1);
/* _idx55 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(3*t))+2) */
_idx55=(_idx54+1);
/* _idx56 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(3*t))+3) */
_idx56=(_idx54+2);
/* _idx57 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(6*t)) */
_idx57=((_idx53+x_max)+(3*t));
/* _idx58 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(6*t))+1) */
_idx58=(_idx57+1);
/* _idx59 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(6*t))+2) */
_idx59=(_idx57+2);
/* _idx60 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(6*t))+3) */
_idx60=(_idx57+3);
/* _idx61 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(9*t)) */
_idx61=((_idx57+x_max)+(3*t));
/* _idx62 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(9*t))+1) */
_idx62=(_idx61+1);
/* _idx63 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(9*t))+2) */
_idx63=(_idx61+2);
/* _idx64 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(9*t))+3) */
_idx64=(_idx63+1);
u_0_1[_idx22]=((((((((w1_a*w1_b)*(w1_c*u_0_0[_idx1]))+((w2_a*w1_b)*(w1_c*u_0_0[_idx2])))+(((w3_a*w1_b)*(w1_c*u_0_0[_idx3]))+((w4_a*w1_b)*(w1_c*u_0_0[_idx4]))))+((((w1_a*w2_b)*(w1_c*u_0_0[_idx5]))+((w2_a*w2_b)*(w1_c*u_0_0[_idx6])))+(((w3_a*w2_b)*(w1_c*u_0_0[_idx7]))+((w4_a*w2_b)*(w1_c*u_0_0[_idx8])))))+(((((w1_a*w3_b)*(w1_c*u_0_0[_idx9]))+((w2_a*w3_b)*(w1_c*u_0_0[_idx10])))+(((w3_a*w3_b)*(w1_c*u_0_0[_idx11]))+((w4_a*w3_b)*(w1_c*u_0_0[_idx12]))))+((((w1_a*w4_b)*(w1_c*u_0_0[_idx13]))+((w2_a*w4_b)*(w1_c*u_0_0[_idx14])))+(((w3_a*w4_b)*(w1_c*u_0_0[_idx15]))+((w4_a*w4_b)*(w1_c*u_0_0[_idx16]))))))+((((((w1_a*w1_b)*(w2_c*u_0_0[_idx17]))+((w2_a*w1_b)*(w2_c*u_0_0[_idx18])))+(((w3_a*w1_b)*(w2_c*u_0_0[_idx19]))+((w4_a*w1_b)*(w2_c*u_0_0[_idx20]))))+((((w1_a*w2_b)*(w2_c*u_0_0[_idx21]))+((w2_a*w2_b)*(w2_c*u_0_0[_idx22])))+(((w3_a*w2_b)*(w2_c*u_0_0[_idx23]))+((w4_a*w2_b)*(w2_c*u_0_0[_idx24])))))+(((((w1_a*w3_b)*(w2_c*u_0_0[_idx25]))+((w2_a*w3_b)*(w2_c*u_0_0[_idx26])))+(((w3_a*w3_b)*(w2_c*u_0_0[_idx27]))+((w4_a*w3_b)*(w2_c*u_0_0[_idx28]))))+((((w1_a*w4_b)*(w2_c*u_0_0[_idx29]))+((w2_a*w4_b)*(w2_c*u_0_0[_idx30])))+(((w3_a*w4_b)*(w2_c*u_0_0[_idx31]))+((w4_a*w4_b)*(w2_c*u_0_0[_idx32])))))))+(((((((w1_a*w1_b)*(w3_c*u_0_0[_idx33]))+((w2_a*w1_b)*(w3_c*u_0_0[_idx34])))+(((w3_a*w1_b)*(w3_c*u_0_0[_idx35]))+((w4_a*w1_b)*(w3_c*u_0_0[_idx36]))))+((((w1_a*w2_b)*(w3_c*u_0_0[_idx37]))+((w2_a*w2_b)*(w3_c*u_0_0[_idx38])))+(((w3_a*w2_b)*(w3_c*u_0_0[_idx39]))+((w4_a*w2_b)*(w3_c*u_0_0[_idx40])))))+(((((w1_a*w3_b)*(w3_c*u_0_0[_idx41]))+((w2_a*w3_b)*(w3_c*u_0_0[_idx42])))+(((w3_a*w3_b)*(w3_c*u_0_0[_idx43]))+((w4_a*w3_b)*(w3_c*u_0_0[_idx44]))))+((((w1_a*w4_b)*(w3_c*u_0_0[_idx45]))+((w2_a*w4_b)*(w3_c*u_0_0[_idx46])))+(((w3_a*w4_b)*(w3_c*u_0_0[_idx47]))+((w4_a*w4_b)*(w3_c*u_0_0[_idx48]))))))+((((((w1_a*w1_b)*(w4_c*u_0_0[_idx49]))+((w2_a*w1_b)*(w4_c*u_0_0[_idx50])))+(((w3_a*w1_b)*(w4_c*u_0_0[_idx51]))+((w4_a*w1_b)*(w4_c*u_0_0[_idx52]))))+((((w1_a*w2_b)*(w4_c*u_0_0[_idx53]))+((w2_a*w2_b)*(w4_c*u_0_0[_idx54])))+(((w3_a*w2_b)*(w4_c*u_0_0[_idx55]))+((w4_a*w2_b)*(w4_c*u_0_0[_idx56])))))+(((((w1_a*w3_b)*(w4_c*u_0_0[_idx57]))+((w2_a*w3_b)*(w4_c*u_0_0[_idx58])))+(((w3_a*w3_b)*(w4_c*u_0_0[_idx59]))+((w4_a*w3_b)*(w4_c*u_0_0[_idx60]))))+((((w1_a*w4_b)*(w4_c*u_0_0[_idx61]))+((w2_a*w4_b)*(w4_c*u_0_0[_idx62])))+(((w3_a*w4_b)*(w4_c*u_0_0[_idx63]))+((w4_a*w4_b)*(w4_c*u_0_0[_idx64]))))))));
}
}
}
}
__global__ void initialize(double * u_0_0, double * u_0_1, double * a_1_0, double * b_2_0, double * c_3_0, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c)
{
const double * const u__a_1[16] = { a_1_0 } ;
const double * const u__b_2[16] = { b_2_0 } ;
const double * const u__c_3[16] = { c_3_0 } ;
double * const u__u_0[16] = { u_0_0, u_0_1 } ;
double w1_a;
double w1_b;
double w1_c;
double w2_a;
double w2_b;
double w2_c;
double w3_a;
double w3_b;
double w3_c;
double w4_a;
double w4_b;
double w4_c;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx16;
int _idx17;
int _idx18;
int _idx19;
int _idx2;
int _idx20;
int _idx21;
int _idx22;
int _idx23;
int _idx24;
int _idx25;
int _idx26;
int _idx27;
int _idx28;
int _idx29;
int _idx3;
int _idx30;
int _idx31;
int _idx32;
int _idx33;
int _idx34;
int _idx35;
int _idx36;
int _idx37;
int _idx38;
int _idx39;
int _idx4;
int _idx40;
int _idx41;
int _idx42;
int _idx43;
int _idx44;
int _idx45;
int _idx46;
int _idx47;
int _idx48;
int _idx49;
int _idx5;
int _idx50;
int _idx51;
int _idx52;
int _idx53;
int _idx54;
int _idx55;
int _idx56;
int _idx57;
int _idx58;
int _idx59;
int _idx6;
int _idx60;
int _idx61;
int _idx62;
int _idx63;
int _idx64;
int _idx7;
int _idx8;
int _idx9;
int chunk_idx_x;
int chunk_idx_x_max;
int chunk_idx_y;
int chunk_idx_y_max;
int chunk_idx_z;
int chunk_idx_z_max;
int idx_1_2;
int size_1_1;
int size_1_2;
//int t;
int thd_idx_x;
int thd_idx_y;
int thd_idx_z;
int thdblks_idx_x;
int thdblks_idx_x_max;
int thdblks_idx_y;
int thdblks_idx_y_max;
int thdblks_idx_z;
int thdblks_idx_z_max;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x)));
chunk_idx_x_max=(chunk_idx_x+c);
chunk_idx_y=(threadIdx.y+(tmp*blockDim.y));
chunk_idx_y_max=(chunk_idx_y+1);
chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
chunk_idx_z_max=(chunk_idx_z+1);
thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
thdblks_idx_x_max=(thdblks_idx_x+tbx);
thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y)));
thdblks_idx_y_max=(thdblks_idx_y+tby);
thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z)));
thdblks_idx_z_max=(thdblks_idx_z+tbz);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */
/* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */
/*
for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
thd_idx_z=chunk_idx_z;
thd_idx_y=chunk_idx_y;
for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1)
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0])
*/
/* _idx0 = ((((thd_idx_z*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x) */
_idx0=((((thd_idx_z*x_max)*y_max)+(thd_idx_y*x_max))+thd_idx_x);
a_1_0[_idx0]=0.2;
w1_a=0.1;
a_1_0[_idx0]=0.2;
w2_a=0.1;
a_1_0[_idx0]=0.2;
w3_a=0.1;
a_1_0[_idx0]=0.2;
w4_a=0.1;
b_2_0[_idx0]=0.30000000000000004;
w1_b=0.1;
b_2_0[_idx0]=0.30000000000000004;
w2_b=0.1;
b_2_0[_idx0]=0.30000000000000004;
w3_b=0.1;
b_2_0[_idx0]=0.30000000000000004;
w4_b=0.1;
c_3_0[_idx0]=0.4;
w1_c=0.1;
c_3_0[_idx0]=0.4;
w2_c=0.1;
c_3_0[_idx0]=0.4;
w3_c=0.1;
c_3_0[_idx0]=0.4;
w4_c=0.1;
/* _idx1 = (((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+((((3*t)*thd_idx_z)+thd_idx_y)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x) */
_idx1=((((_idx0+(((3*t)*thd_idx_z)*y_max))+(((3*t)*thd_idx_z)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y));
u_0_0[_idx1]=0.1;
/* _idx2 = ((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t))) */
_idx2=(((_idx1+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
u_0_0[_idx2]=0.1;
/* _idx3 = ((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t))) */
_idx3=(((_idx2+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
u_0_0[_idx3]=0.1;
/* _idx4 = ((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t))) */
_idx4=(((_idx3+((x_max+(3*t))*y_max))+((3*t)*x_max))+(9*(t*t)));
u_0_0[_idx4]=0.1;
/* _idx5 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(3*t)) */
_idx5=((_idx1+x_max)+(3*t));
u_0_0[_idx5]=0.1;
/* _idx6 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(3*t)) */
_idx6=((_idx2+x_max)+(3*t));
u_0_0[_idx6]=0.1;
/* _idx7 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(3*t)) */
_idx7=((_idx3+x_max)+(3*t));
u_0_0[_idx7]=0.1;
/* _idx8 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(3*t)) */
_idx8=((_idx4+x_max)+(3*t));
u_0_0[_idx8]=0.1;
/* _idx9 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(6*t)) */
_idx9=((_idx5+x_max)+(3*t));
u_0_0[_idx9]=0.1;
/* _idx10 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(6*t)) */
_idx10=((_idx6+x_max)+(3*t));
u_0_0[_idx10]=0.1;
/* _idx11 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(6*t)) */
_idx11=((_idx7+x_max)+(3*t));
u_0_0[_idx11]=0.1;
/* _idx12 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(6*t)) */
_idx12=((_idx8+x_max)+(3*t));
u_0_0[_idx12]=0.1;
/* _idx13 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*t)) */
_idx13=((_idx9+x_max)+(3*t));
u_0_0[_idx13]=0.1;
/* _idx14 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(9*t)) */
_idx14=((_idx10+x_max)+(3*t));
u_0_0[_idx14]=0.1;
/* _idx15 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(9*t)) */
_idx15=((_idx11+x_max)+(3*t));
u_0_0[_idx15]=0.1;
/* _idx16 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(9*t)) */
_idx16=((_idx12+x_max)+(3*t));
u_0_0[_idx16]=0.1;
/* _idx17 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+((((3*t)*thd_idx_z)+thd_idx_y)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+1) */
_idx17=(_idx1+1);
u_0_0[_idx17]=0.1;
/* _idx18 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+1) */
_idx18=(_idx2+1);
u_0_0[_idx18]=0.1;
/* _idx19 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+1) */
_idx19=(_idx3+1);
u_0_0[_idx19]=0.1;
/* _idx20 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+1) */
_idx20=(_idx4+1);
u_0_0[_idx20]=0.1;
/* _idx21 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(3*t))+1) */
_idx21=(_idx5+1);
u_0_0[_idx21]=0.1;
/* _idx22 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(3*t))+1) */
_idx22=(_idx6+1);
u_0_0[_idx22]=0.1;
/* _idx23 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(3*t))+1) */
_idx23=(_idx7+1);
u_0_0[_idx23]=0.1;
/* _idx24 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(3*t))+1) */
_idx24=(_idx8+1);
u_0_0[_idx24]=0.1;
/* _idx25 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(6*t))+1) */
_idx25=(_idx9+1);
u_0_0[_idx25]=0.1;
/* _idx26 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(6*t))+1) */
_idx26=(_idx10+1);
u_0_0[_idx26]=0.1;
/* _idx27 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(6*t))+1) */
_idx27=(_idx11+1);
u_0_0[_idx27]=0.1;
/* _idx28 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(6*t))+1) */
_idx28=(_idx12+1);
u_0_0[_idx28]=0.1;
/* _idx29 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*t))+1) */
_idx29=(_idx13+1);
u_0_0[_idx29]=0.1;
/* _idx30 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(9*t))+1) */
_idx30=(_idx14+1);
u_0_0[_idx30]=0.1;
/* _idx31 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(9*t))+1) */
_idx31=(_idx15+1);
u_0_0[_idx31]=0.1;
/* _idx32 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(9*t))+1) */
_idx32=(_idx16+1);
u_0_0[_idx32]=0.1;
/* _idx33 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+((((3*t)*thd_idx_z)+thd_idx_y)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+2) */
_idx33=(_idx1+2);
u_0_0[_idx33]=0.1;
/* _idx34 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+2) */
_idx34=(_idx2+2);
u_0_0[_idx34]=0.1;
/* _idx35 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+2) */
_idx35=(_idx3+2);
u_0_0[_idx35]=0.1;
/* _idx36 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+2) */
_idx36=(_idx4+2);
u_0_0[_idx36]=0.1;
/* _idx37 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(3*t))+2) */
_idx37=(_idx5+2);
u_0_0[_idx37]=0.1;
/* _idx38 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(3*t))+2) */
_idx38=(_idx6+2);
u_0_0[_idx38]=0.1;
/* _idx39 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(3*t))+2) */
_idx39=(_idx7+2);
u_0_0[_idx39]=0.1;
/* _idx40 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(3*t))+2) */
_idx40=(_idx24+1);
u_0_0[_idx40]=0.1;
/* _idx41 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(6*t))+2) */
_idx41=(_idx9+2);
u_0_0[_idx41]=0.1;
/* _idx42 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(6*t))+2) */
_idx42=(_idx10+2);
u_0_0[_idx42]=0.1;
/* _idx43 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(6*t))+2) */
_idx43=(_idx11+2);
u_0_0[_idx43]=0.1;
/* _idx44 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(6*t))+2) */
_idx44=(_idx12+2);
u_0_0[_idx44]=0.1;
/* _idx45 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*t))+2) */
_idx45=(_idx13+2);
u_0_0[_idx45]=0.1;
/* _idx46 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(9*t))+2) */
_idx46=(_idx30+1);
u_0_0[_idx46]=0.1;
/* _idx47 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(9*t))+2) */
_idx47=(_idx31+1);
u_0_0[_idx47]=0.1;
/* _idx48 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(9*t))+2) */
_idx48=(_idx16+2);
u_0_0[_idx48]=0.1;
/* _idx49 = ((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+((((3*t)*thd_idx_z)+thd_idx_y)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+3) */
_idx49=(_idx1+3);
u_0_0[_idx49]=0.1;
/* _idx50 = (((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+3) */
_idx50=(_idx2+3);
u_0_0[_idx50]=0.1;
/* _idx51 = (((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+3) */
_idx51=(_idx35+1);
u_0_0[_idx51]=0.1;
/* _idx52 = (((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+3) */
_idx52=(_idx4+3);
u_0_0[_idx52]=0.1;
/* _idx53 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(3*t))+3) */
_idx53=(_idx5+3);
u_0_0[_idx53]=0.1;
/* _idx54 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(3*t))+3) */
_idx54=(_idx6+3);
u_0_0[_idx54]=0.1;
/* _idx55 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(3*t))+3) */
_idx55=(_idx39+1);
u_0_0[_idx55]=0.1;
/* _idx56 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+1)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(3*t))+3) */
_idx56=(_idx24+2);
u_0_0[_idx56]=0.1;
/* _idx57 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(6*t))+3) */
_idx57=(_idx9+3);
u_0_0[_idx57]=0.1;
/* _idx58 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(6*t))+3) */
_idx58=(_idx10+3);
u_0_0[_idx58]=0.1;
/* _idx59 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(6*t))+3) */
_idx59=(_idx11+3);
u_0_0[_idx59]=0.1;
/* _idx60 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+2)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(6*t))+3) */
_idx60=(_idx12+3);
u_0_0[_idx60]=0.1;
/* _idx61 = (((((((((thd_idx_z*x_max)+((3*t)*thd_idx_z))*y_max)+(((((3*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*t))+3) */
_idx61=(_idx13+3);
u_0_0[_idx61]=0.1;
/* _idx62 = ((((((((((((thd_idx_z+1)*x_max)+((3*t)*thd_idx_z))+(3*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(3*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(9*(t*t)))+(9*t))+3) */
_idx62=(_idx30+2);
u_0_0[_idx62]=0.1;
/* _idx63 = ((((((((((((thd_idx_z+2)*x_max)+((3*t)*thd_idx_z))+(6*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(6*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(18*(t*t)))+(9*t))+3) */
_idx63=(_idx47+1);
u_0_0[_idx63]=0.1;
/* _idx64 = ((((((((((((thd_idx_z+3)*x_max)+((3*t)*thd_idx_z))+(9*t))*y_max)+((((((3*t)*thd_idx_z)+thd_idx_y)+(9*t))+3)*x_max))+((9*(t*t))*thd_idx_z))+((3*t)*thd_idx_y))+thd_idx_x)+(27*(t*t)))+(9*t))+3) */
_idx64=(_idx48+1);
u_0_0[_idx64]=0.1;
u_0_1[_idx22]=1.1;
}
}
}
}
|
12,752 | #include "includes.h"
/*
export PATH=/Developer/NVIDIA/CUDA-10.2/bin${PATH:+:${PATH}}
export DYLD_LIBRARY_PATH=/Developer/NVIDIA/CUDA-10.2/lib\${DYLD_LIBRARY_PATH:+:${DYLD_LIBRARY_PATH}}
*/
__global__ void kernel(){
} |
12,753 | #include "includes.h"
__global__ void addBias(float* Z, float* b, int Z_x_dim, int Z_y_dim){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row < Z_y_dim && col < Z_x_dim){
Z[row * Z_x_dim + col] += b[row];
}
} |
12,754 | // Author @ Eric Reinsmidt
// Date @ 2014.11.23
// Version 0.1
/*
Driver cuda program for gameOfLife.cu
*/
#include <stdio.h>
#include <iostream>
#include "gameOfLife.cu"
using namespace std;
int main() {
// Flag to output all generations to stdout
bool showAll = false;
// /Init number of generations to run
int numGenerations = 100;
// Create arrays for current and next generation on host
char currentGeneration[65536];
char theNextGeneration[65536];
// Create pointers to current and next generation on device
char *currentGenerationOnDevice;
char *theNextGenerationOnDevice;
// Fill automaton with empty cells
for (int i = 0; i < 65536; i++) {
currentGeneration[i] = 0;
}
// Place a strip of 10 vertical cells in middle of automaton
// This is what will become the pentadecathlon oscillator
for (int i = 123; i < 133; i++) {
currentGeneration[i * 256 + 128] = 1;
}
// Output starting generation to stdout if flag set
if (showAll) {
cout << "Starting Generation:" << endl;
outputAutomaton(currentGeneration);
}
// Allocate memory for current generation on device
if (cudaMalloc((void **)¤tGenerationOnDevice, 65536 * sizeof(char)) != cudaSuccess) {
cout << "cudaMalloc() failed!" << endl;
exit(0);
}
// Allocate memory for the next generation on device
if (cudaMalloc((void **)&theNextGenerationOnDevice, 65536 * sizeof(char)) != cudaSuccess) {
cout << "cudaMalloc() failed!" << endl;
exit(0);
}
// Copy initial generation from host to device
if (cudaMemcpy(currentGenerationOnDevice, currentGeneration, 65536 * sizeof(char), cudaMemcpyHostToDevice) != cudaSuccess) {
cout << "cudaMemcpy() failed!" << endl;
exit(0);
}
// Continue calculating next generation until desired number of generations
for(int i = 0; i < numGenerations; i++) {
changeCellState <<<64, 1024>>>(currentGenerationOnDevice, theNextGenerationOnDevice, 256, 256);
// Block until the device has completed all preceding requested tasks
if (cudaDeviceSynchronize() != cudaSuccess) {
cout << "cudaDeviceSynchronize() failed!" << endl;
exit(0);
}
// Output current generation to stdout if flag set
if (showAll) {
cudaMemcpy(currentGeneration, theNextGenerationOnDevice, 65536 * sizeof(char), cudaMemcpyDeviceToHost);
outputAutomaton(currentGeneration);
}
// Copy calculated generation to current generation on device
cudaMemcpy(currentGenerationOnDevice, theNextGenerationOnDevice, 65536 * sizeof(char), cudaMemcpyDeviceToDevice);
}
// Copy final generation on device to host
cudaMemcpy(theNextGeneration, theNextGenerationOnDevice, 65536 * sizeof(char), cudaMemcpyDeviceToHost);
// Output final generation to stdout
cout << "Final Generation:" << endl;
outputAutomaton(theNextGeneration);
// Explicitly destroy and clean up all resources associated
// with the current device in the current process.
if (cudaDeviceReset() != cudaSuccess) {
cout << "cudaDeviceReset() failed!" << endl;
return 1;
}
return 0;
} |
12,755 | #define HISTOGRAM_SIZE 256 /* Histogram has 256 bins */
/* Write GPU code to perform the step(s) involved in counting sort.
Add additional kernels and device functions as needed. */
__global__ void find_prefix_kernel(int *input_data, int *prefix_array, int num_elements, int range)
{
__shared__ unsigned int s[HISTOGRAM_SIZE];
__shared__ unsigned int s_temp[HISTOGRAM_SIZE];
/* Initialize shared memory */
if(threadIdx.x <= range){
s[threadIdx.x] = 0;
s_temp[threadIdx.x] = 0;
}
__syncthreads();
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
while (offset < num_elements) {
atomicAdd(&s[input_data[offset]], 1);
offset += stride;
}
__syncthreads();
/* Step 2: Calculate starting indices in output array for storing sorted elements.
* Use inclusive scan of the bin elements. */
int off = 1;
int pingpong_flag = 1;
int tid = threadIdx.x;
while(off < num_elements){
if (pingpong_flag){
if (tid >= off)
s_temp[tid] = s[tid] + s[tid - off];
else
s_temp[tid] = s[tid];
}
else{
if (tid >= off)
s[tid] = s_temp[tid] + s_temp[tid - off];
else
s[tid] = s_temp[tid];
}
__syncthreads();
pingpong_flag = !pingpong_flag;
off = 2*off;
}
/* Accumulate prefix array in shared memory into global memory, and send to CPU */
if (threadIdx.x <= range)
atomicAdd(&prefix_array[threadIdx.x], s[threadIdx.x]);
return;
}
__global__ void counting_sort_kernel(int *prefix_array, int *sorted_array, int num_elements, int range)
{
__shared__ unsigned int prefix_shared[HISTOGRAM_SIZE];
/* Get prefix array from CPU, copy to shared mem and arrange the sorted array */
int tid = threadIdx.x;
if (tid <= range)
prefix_shared[tid] = prefix_array[tid];
__syncthreads();
int start_idx = 0;
int j = 0;
if (tid == 0)
start_idx = 0;
else
start_idx = prefix_shared[tid-1];
int end_idx = prefix_shared[tid];
for (j = start_idx; j < end_idx; j++)
sorted_array[j] = tid;
return;
} |
12,756 | #include <iostream>
#include <cstdlib>
#include <stdio.h>
#include <cuda.h>
using namespace std;
//Kernel to get exponent, only positives
__device__ void calculate_exponent(int base,int exponent,long &result){
result = 1;
if(exponent==0){
return;
}
for(int counter=1;counter<=exponent;counter++){
result *= base;
}
}
// Kernel to fill the array with somethin', in this case its just the position but works
__global__ void fill_array(int *input, int totalSizeOfArray, int individualsPerThread, int number_genes, int *randomNumbers)
{
int startingPosition = threadIdx.y * (number_genes*individualsPerThread);
for(int counter=0;counter<(individualsPerThread*number_genes);counter++){
if(counter+startingPosition>=totalSizeOfArray){
return;
}
input[counter+startingPosition] = randomNumbers[counter+startingPosition];
}
}
// Kernel to evaluate an individual
__global__ void evaluate(int *input, int totalSizeOfArray, int number_genes, int individualsPerThread, long *scores){
int startingPosition = threadIdx.y * (number_genes*individualsPerThread);
int startingPosition_scores = threadIdx.y * individualsPerThread;
long acumulated = 0;
long temp = 0;
for(int counter_individuals=0;counter_individuals<individualsPerThread;counter_individuals++){
if(startingPosition + (counter_individuals*number_genes) >= totalSizeOfArray){
return;
}
for(int counter_gene=0;counter_gene<number_genes;counter_gene++){
int base = startingPosition + (counter_individuals*number_genes) + counter_gene;
calculate_exponent(input[base],(number_genes-1)-counter_gene,temp);
acumulated += temp;
}
scores[(threadIdx.y*individualsPerThread)+counter_individuals] = acumulated;
acumulated=0;
}
}
// main routine that executes on the host
int main(void)
{
const int number_genes = 10;
const int number_individuals = 1000000;
int *population_array_host = new int[number_genes*number_individuals];
int *population_array_device;
long *score_array_host = new long[number_individuals];
long *score_array_device;
int *random_numbers_host = new int[number_genes*number_individuals];
int *random_numbers_device;
//we need to initialize the population array
//must be done randomly
//we calculate the number of threads required to fill the array in parallel
int individuals_per_thread = 2000;
int number_of_threads = number_individuals/individuals_per_thread + (number_individuals%individuals_per_thread == 0 ? 0:1);
//we now randomly fill the random numbers array
srand ( time(NULL));
for(int contador=0;contador<number_genes*number_individuals;contador++){
random_numbers_host[contador] = ( rand() % 10 );
}
//we move the random numbers array to device
size_t memory_for_random_numbers = number_genes*number_individuals*sizeof(int);
cudaMalloc((void **) &random_numbers_device, memory_for_random_numbers);
cudaMemcpy(random_numbers_device, random_numbers_host, memory_for_random_numbers, cudaMemcpyHostToDevice);
//we zero-ise the scores
for(int contador=0;contador<number_individuals;contador++){
score_array_host[contador] = 0;
}
//we move the scores array to device
size_t memory_for_scores = number_individuals*sizeof(long);
cudaMalloc((void **) &score_array_device, memory_for_scores);
cudaMemcpy(score_array_device, score_array_host, memory_for_scores, cudaMemcpyHostToDevice);
//now we must launch 1 block with dimensions: x=1,y=number_of_threads, we define them
dim3 grid_fill(1,1);
dim3 block_fill(1,number_of_threads);
//we now allocate memory in device
size_t memory_for_population = number_genes*number_individuals*sizeof(int);
cudaMalloc((void **) &population_array_device, memory_for_population);
//we now launch the kernel for populating
fill_array <<< grid_fill, block_fill >>> (population_array_device, number_genes * number_individuals, individuals_per_thread, number_genes,random_numbers_device);
//we now launch the kernel for evaluating
evaluate <<< grid_fill, block_fill >>> (population_array_device, number_genes * number_individuals, number_genes, individuals_per_thread,score_array_device);
cudaMemcpy(population_array_host, population_array_device, memory_for_population, cudaMemcpyDeviceToHost);
cudaMemcpy(score_array_host, score_array_device, memory_for_scores, cudaMemcpyDeviceToHost);
///END, move back to host, print PopulationArray
for(int contador=0;contador<number_genes*number_individuals;contador++){
if(contador%number_genes==0 && contador > 0){
cout << endl;
}
cout << population_array_host[contador] << "-";
}
cout << endl;
cout << "----";
cout << endl;
for(int contador=0;contador<number_individuals;contador++){
cout << score_array_host[contador] << endl;
}
}
|
12,757 | //pass
//--blockDim=64 --gridDim=64 --no-inline
#include <cuda.h>
#include <stdio.h>
#define N 2
__device__ void bar(int* q) {
}
__global__ void foo(int* p) {
__shared__ int A[10];
bar(p);
bar(A);
}
int main(){
int* a;
foo<<<N,N>>>(a);
//ESBMC_verify_kernel(foo,1,N,a);
cudaThreadSynchronize();
}
|
12,758 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<string.h>
int main(){
int i, j, k, n;
printf("Enter the number of vertices : \n");
scanf("%d", &n);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
long adj[n][n];
for(i = 0;i < n; i++){
for(j = 0;j < n; j++)
adj[i][j] = __INT_MAX__;
}
while(1){
printf("Click 1 to enter edge and 0 to finish.\n");
scanf("%d", &k);
if(!k)
break;
int s, d, w;
printf("Enter start and end of edge in 1-ordering : \n");
scanf("%d %d", &s, &d);
if(s == d){
printf("Invalid edge.\n");
continue;
}
if(s > n || s < 1 || d > n || d < 1){
printf("Invalid edge.\n");
continue;
}
printf("Enter edge weight : \n");
scanf("%d", &w);
if(w < 0){
printf("Invalid edge weight.\n");
continue;
}
adj[s - 1][d - 1] = w;
}
for(i = 0; i < n; i++)
adj[i][i] = 0;
cudaDeviceSynchronize();
cudaEventRecord(start);
for(k = 0; k < n; k++){
for(i = 0; i < n; i++){
for(j = 0;j < n; j++){
long s = (long)adj[i][k] + (long)adj[k][j];
if(s < adj[i][j])
adj[i][j] = s;
}
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
for(i = 0;i < n; i++){
for(j = 0;j < n; j++)
printf("%ld ", adj[i][j]);
printf("\n");
}
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("%f ms\n",milliseconds) ;
return 0;
} |
12,759 | /******************************************************************************
* © Mathias Bourgoin, Université Pierre et Marie Curie (2011)
*
* Mathias.Bourgoin@gmail.com
*
* This software is a computer program whose purpose is allow GPU programming
* with the OCaml language.
*
* This software is governed by the CeCILL-B license under French law and
* abiding by the rules of distribution of free software. You can use,
* modify and/ or redistribute the software under the terms of the CeCILL-B
* license as circulated by CEA, CNRS and INRIA at the following URL
* "http://www.cecill.info".
*
* As a counterpart to the access to the source code and rights to copy,
* modify and redistribute granted by the license, users are provided only
* with a limited warranty and the software's author, the holder of the
* economic rights, and the successive licensors have only limited
* liability.
*
* In this respect, the user's attention is drawn to the risks associated
* with loading, using, modifying and/or developing or reproducing the
* software by the user in light of its specific status of free software,
* that may mean that it is complicated to manipulate, and that also
* therefore means that it is reserved for developers and experienced
* professionals having in-depth computer knowledge. Users are therefore
* encouraged to load and test the software's suitability as regards their
* requirements in conditions enabling the security of their systems and/or
* data to be ensured and, more generally, to use and operate it in the
* same conditions as regards security.
*
* The fact that you are presently reading this means that you have had
* knowledge of the CeCILL-B license and that you accept its terms.
*
* NOTE: This file contains source code provided by NVIDIA Corporation.
*******************************************************************************/
#ifdef __cplusplus
extern "C" {
#endif
/****** Single precision *****/
__global__ void vec_add(const float* A, const float* B, float* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
;
C[i] = A[i] + B[i];
}
__global__ void vec_mult(const float* A, const float* B, float* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
;
C[i] = A[i] * B[i];
}
__global__ void vec_div(const float* A, const float* B, float* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
;
C[i] = A[i] / B[i];
}
__global__ void vec_sub(const float* A, const float* B, float* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
;
C[i] = A[i] - B[i];
}
__global__ void vec_fma(const float* A, const float* B, float* C, float* D,
int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
;
D[i] = A[i] + B[i] * C[i];
}
/****** Double precision *****/
__global__ void vec_add_64(const double* A, const double* B, double* C, int N) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
;
C[i] = A[i] + B[i];
}
__global__ void sum(
int * vec1,
int * result,
int* tmp1,
const int count)
{
//parallel reduction on global memory:
int n = count/2;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < n)
tmp1[tid] = vec1[tid] + vec1[tid+n];
__syncthreads();
for (unsigned int stride = n/2; stride > 0; stride /= 2)
{
if (tid < stride)
tmp1[tid] += tmp1[tid+stride];
__syncthreads();
}
if (tid == 0)
*result = tmp1[0];
}
__global__ void spoc_max(const double* input, double* output, const int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i > 0) return;
double r = fabs(input[0]);
for (int j = 1; j < size; j++)
{
if (r < fabs(input[j]))
r = fabs(input[j]);
}
output[0] = r;
}
__global__ void int_bubble_filter(
int* input,
const int* vec1,
int* output,
const int count)
{
int i;
int k = 1;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid <= count/2)
{
output[tid*2] = vec1[tid*2];
output[tid*2+1] = vec1[tid*2+1];
//barrier(CLK_GLOBAL_MEM_FENCE);
for (int n = 0; n < count*2; n++)
{
k = (k)?0:1;
i = (tid*2) + k;
if( i+1 < count)
{
if ((!input[i]) && (input[i+1]))
{
input[i] = 1;
input[i+1] = 0;
output[i] = output[i+1];
output[i+1] = 0;
}
else
{
if (!input[i])
output[i] = 0;
if (!input[i+1])
output[i+1] = 0;
}
}
__syncthreads();
}
}
}
#ifdef __cplusplus
}
#endif
|
12,760 | extern "C" __constant__ int my_constant = 314;
extern "C" __global__ void sum(const float *x, const float *y, float *out,
int count) {
for (int i = ((threadIdx.x) + (((blockIdx.x) * (blockDim.x)))); i < count;
(i)++) {
out[i] = ((x[i]) * (y[i]));
}
} |
12,761 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: XIAO Tong (email: xiaotong@mail.neu.edu.cn) 2018-04-24
*/
#include "../../XDevice.h"
#include "../../XTensor.h"
#include "../../XUtility.h"
#include "ReduceMax.h"
#include "ReduceMax.cuh"
namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
use PTX code to reduce float data
*/
#define SHLFUNCFLOAT(funcName, reducePTXOp) \
__device__ __forceinline__ \
float funcName(float input) \
{ \
float output; \
asm volatile( \
"{" \
".reg .f32 r0;" \
".reg .pred p;" \
"shfl.sync.down.b32 r0, %1, 0x10, 0x1f,0xffffffff;" \
"setp."#reducePTXOp".f32 p,%1,r0;" \
"@p mov.f32 %1,r0;" \
"shfl.sync.down.b32 r0, %1, 0x8, 0xf,0xffffffff;" \
"setp."#reducePTXOp".f32 p,%1,r0;" \
"@p mov.f32 %1,r0;" \
"shfl.sync.down.b32 r0, %1, 0x4, 0x7,0xffffffff;" \
"setp."#reducePTXOp".f32 p,%1,r0;" \
"@p mov.f32 %1,r0;" \
"shfl.sync.down.b32 r0, %1, 0x2, 0x3,0xffffffff;" \
"setp."#reducePTXOp".f32 p,%1,r0;" \
"@p mov.f32 %1,r0;" \
"shfl.sync.down.b32 r0, %1, 0x1, 0x1,0xffffffff;" \
"setp."#reducePTXOp".f32 p, %1, r0; " \
"@p mov.f32 %1,r0;" \
"mov.f32 %0,%1;" \
"}" \
: "=f"(output) : "f"(input)); \
return output; \
}
SHLFUNCFLOAT(shflDownReduceMax, lt)
SHLFUNCFLOAT(shflDownReduceMin, gt)
/*
use PTX code to reduce int data
*/
#define SHLFUNCINT(funcName, reducePTXOp) \
__device__ __forceinline__ \
int funcName(int input) \
{ \
int output; \
asm volatile( \
"{" \
".reg .s32 r0;" \
".reg .pred p;" \
"shfl.sync.down.b32 r0, %1, 0x10, 0x1f,0xffffffff;" \
"setp."#reducePTXOp".s32 p,%1,r0;" \
"@p mov.s32 %1,r0;" \
"shfl.sync.down.b32 r0, %1, 0x8, 0xf,0xffffffff;" \
"setp."#reducePTXOp".s32 p,%1,r0;" \
"@p mov.s32 %1,r0;" \
"shfl.sync.down.b32 r0, %1, 0x4, 0x7,0xffffffff;" \
"setp."#reducePTXOp".s32 p,%1,r0;" \
"@p mov.s32 %1,r0;" \
"shfl.sync.down.b32 r0, %1, 0x2, 0x3,0xffffffff;" \
"setp."#reducePTXOp".s32 p,%1,r0;" \
"@p mov.s32 %1,r0;" \
"shfl.sync.down.b32 r0, %1, 0x1, 0x1,0xffffffff;" \
"setp."#reducePTXOp".s32 p, %1, r0; " \
"@p mov.s32 %1,r0;" \
"mov.s32 %0,%1;" \
"}" \
: "=r"(output) : "r"(input)); \
return output; \
}
SHLFUNCINT(shflDownReduceMax, lt)
SHLFUNCINT(shflDownReduceMin, gt)
/*
reduce a tensor to another that keeps the max value along a dimension - slow version
Given a block of data, we go over each dimension i in the stride and we have
sum_i = max_{0<=j<strideNum} input_{i,j}
where we can view the block as a matrix and input_{i,j} represent the item at the
crossing of the i-th columne and the j-th row.
>> input - the input array (representing a tensor)
>> output - the sum over each block. NOTE: output is also an array
>> stride - stride that we need to move to the next item
>> strideNum - how many strides we need to finish the reduce
>> reducedStrideNum - the number of strides after reducation
>> blockSize - size of the block (i.e., stride * strideNum)
>> blockNum - how many blocks
*/
#define KERNELREDUCEFUN3(funName, opName, initData) \
__global__ \
void funName(DTYPE * input, DTYPE * output, \
int stride, int strideNum, int reducedStrideNum, \
int blockSize, int blockNum) \
{ \
__shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE/2]; \
\
int idx = threadIdx.x * blockDim.y + threadIdx.y; \
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; \
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; \
\
if(i >= stride * blockNum) \
return; \
\
__syncthreads(); \
\
int k = i / stride; \
int iOffset = i % stride; \
\
DTYPE value = (i < stride * blockNum && j < strideNum) ? \
input[blockSize * k + stride * j + iOffset] : initData; \
\
/* load data into the shared mem */ \
iData[threadIdx.x * blockDim.y + threadIdx.y] = value; \
\
__syncthreads(); \
\
/* do reduction in shared mem */ \
for (unsigned int s = blockDim.y/2; s > 0; s >>= 1){ \
if(threadIdx.y < s){ \
iData[idx] = opName(iData[idx + s], iData[idx]); \
} \
\
__syncthreads(); \
} \
\
/* write result for this block to the output array */ \
if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum) \
output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = iData[threadIdx.x * blockDim.y]; \
\
}
KERNELREDUCEFUN3(KernelReduceMax, MAX, FLOAT_MIN)
KERNELREDUCEFUN3(KernelReduceMin, MIN, MAX_FLOAT)
/*
reduce a tensor to another that keeps the max value along a dimension - slow version
Given a block of data, we go over each dimension i in the stride and we have
sum_i = max_{0<=j<strideNum} input_{i,j}
where we can view the block as a matrix and input_{i,j} represent the item at the
crossing of the i-th columne and the j-th row.
>> input - the input array (representing a tensor)
>> output - the sum over each block. NOTE: output is also an array
>> stride - stride that we need to move to the next item
>> strideNum - how many strides we need to finish the reduce
>> reducedStrideNum - the number of strides after reducation
>> blockSize - size of the block (i.e., stride * strideNum)
>> blockNum - how many blocks
*/
__global__
void KernelReduceMax(__half * input, __half * output,
int stride, int strideNum, int reducedStrideNum,
int blockSize, int blockNum)
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i >= stride * blockNum)
return;
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
__shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE / 2];
#else
__shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK * MIN_CUDA_SHARED_MEM_COL_SIZE / 2];
#endif
__syncthreads();
int k = i / stride;
int iOffset = i % stride;
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
__half value = (i < stride * blockNum && j < strideNum) ?
input[blockSize * k + stride * j + iOffset] : __half(FLOAT16_MIN);
#else
DTYPE value = (i < stride * blockNum && j < strideNum) ?
__half2float(input[blockSize * k + stride * j + iOffset]) : FLOAT_MIN;
#endif
/* load data into the shared mem */
iData[threadIdx.x * blockDim.y + threadIdx.y] = value;
__syncthreads();
/* do reduction in shared mem */
for (unsigned int s = blockDim.y / 2; s > 0; s >>= 1) {
if (threadIdx.y < s && iData[idx] < iData[idx + s]) {
iData[idx] = iData[idx + s];
}
__syncthreads();
}
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
/* write result for this block to the output array */
if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum)
output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = iData[threadIdx.x * blockDim.y];
#else
/* write result for this block to the output array */
if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum)
output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __half(iData[threadIdx.x * blockDim.y]);
#endif
}
/*
reduce a tensor to another that keeps the max value along a dimension - fast version
>> input - the input array (representing a tensor)
>> output - the sum over each block. NOTE: output is also an array
>> stride - stride that we need to move to the next item
>> strideNum - how many strides we need to finish the reduce
>> reducedStrideNum - the number of strides after reducation
>> blockSize - size of the block (i.e., stride * strideNum)
>> blockNum - how many blocks
*/
#define KERNELREDUCEFUN4(funName, opName, opFuncName, initData) \
template <unsigned int goodSize> __global__ \
void funName(DTYPE * input, DTYPE * output, \
int stride, int strideNum, int reducedStrideNum, \
int blockSize, int blockNum) \
{ \
__shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK]; \
\
unsigned int tid = threadIdx.y; \
unsigned int j = blockIdx.y * (blockDim.y * 2) + threadIdx.y; \
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; \
\
if(i >= stride * blockNum) \
return; \
\
__syncthreads(); \
\
/* first level reduction */ \
int k = i / stride; \
int iOffset = i % stride; \
\
DTYPE * data = iData + threadIdx.x * blockDim.y; \
DTYPE * inputData = input + k * blockSize; \
DTYPE value = j < strideNum ? inputData[j * stride + iOffset] : initData; \
DTYPE value2 = j + blockDim.y < strideNum ? inputData[(j + blockDim.y) * stride + iOffset]: initData; \
\
value = opName(value, value2); \
value = opFuncName(value); \
if ((tid & 0x1f) == 0) \
data[tid / 32] = value; \
__syncthreads(); \
\
if (tid < 32) { \
if (tid < blockDim.y / 32) \
value = data[tid]; \
else \
value = initData; \
value = opFuncName(value); \
if (tid == 0 && blockIdx.y < reducedStrideNum) \
output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = value; \
} \
}
KERNELREDUCEFUN4(KernelReduceMaxFast, MAX, shflDownReduceMax, FLOAT_MIN)
KERNELREDUCEFUN4(KernelReduceMinFast, MIN, shflDownReduceMin, MAX_FLOAT)
/*
reduce a tensor to another that keeps the max value along a dimension - fast version
>> input - the input array (representing a tensor)
>> output - the sum over each block. NOTE: output is also an array
>> stride - stride that we need to move to the next item
>> strideNum - how many strides we need to finish the reduce
>> reducedStrideNum - the number of strides after reducation
>> blockSize - size of the block (i.e., stride * strideNum)
>> blockNum - how many blocks
*/
template <unsigned int goodSize> __global__
void KernelReduceMaxFast(__half * input, __half * output,
int stride, int strideNum, int reducedStrideNum,
int blockSize, int blockNum)
{
unsigned int tid = threadIdx.y;
unsigned int j = blockIdx.y * (blockDim.y * 2) + threadIdx.y;
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= stride * blockNum)
return;
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
__shared__ __half iData[MAX_CUDA_THREAD_NUM_PER_BLOCK];
#else
__shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK];
#endif
__syncthreads();
/* first level reduction */
int k = i / stride;
int iOffset = i % stride;
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
__half * data = iData + threadIdx.x * blockDim.y;
__half * inputData = input + k * blockSize;
__half value = j < strideNum ? inputData[j * stride + iOffset] : __half(FLOAT16_MIN);
__half value2 = j + blockDim.y < strideNum ? inputData[(j + blockDim.y) * stride + iOffset] : __half(FLOAT16_MIN);
#else
DTYPE * data = iData + threadIdx.x * blockDim.y;
__half * inputData = input + k * blockSize;
DTYPE value = j < strideNum ? __half2float(inputData[j * stride + iOffset]) : FLOAT_MIN;
DTYPE value2 = j + blockDim.y < strideNum ? __half2float(inputData[(j + blockDim.y) * stride + iOffset]) : FLOAT_MIN;
#endif
/* load data into the shared mem */
data[tid] = MAX(value, value2);
__syncthreads();
/* unroll the warp */
if (goodSize >= 512) { if (tid < 256) { if (data[tid] < data[tid + 256]) data[tid] = data[tid + 256]; } __syncthreads(); }
if (goodSize >= 256) { if (tid < 128) { if (data[tid] < data[tid + 128]) data[tid] = data[tid + 128]; } __syncthreads(); }
if (goodSize >= 128) { if (tid < 64) { if (data[tid] < data[tid + 64]) data[tid] = data[tid + 64]; } __syncthreads(); }
if (goodSize >= 64) { if (tid < 32) { if (data[tid] < data[tid + 32]) data[tid] = data[tid + 32]; } __syncthreads(); }
if (goodSize >= 32) { if (tid < 16) { if (data[tid] < data[tid + 16]) data[tid] = data[tid + 16]; } __syncthreads(); }
if (goodSize >= 16) { if (tid < 8) { if (data[tid] < data[tid + 8]) data[tid] = data[tid + 8]; } __syncthreads(); }
if (goodSize >= 8) { if (tid < 4) { if (data[tid] < data[tid + 4]) data[tid] = data[tid + 4]; } __syncthreads(); }
if (goodSize >= 4) { if (tid < 2) { if (data[tid] < data[tid + 2]) data[tid] = data[tid + 2]; } __syncthreads(); }
if (goodSize >= 2) { if (tid < 1) { if (data[tid] < data[tid + 1]) data[tid] = data[tid + 1]; } __syncthreads(); }
#if __CUDA_ARCH__ >= 530 || !defined(__CUDA_ARCH__)
/* write result for this block to the output array */
if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum)
output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = data[0];
#else
/* write result for this block to the output array */
if (threadIdx.y == 0 && blockIdx.y < reducedStrideNum)
output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = __float2half(data[0]);
#endif
}
/*
reduce a tensor to another that keeps the max value along a dimension - simple and fast version
*/
__global__
void KernelReduceMaxSimpleFast(DTYPE * input, DTYPE * output,
int stride, int strideNum, int blockSize, int blockNum)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= stride)
return;
int blockIndex = i / blockSize;
int offset = i % blockSize;
DTYPE * ip = input + blockIndex * blockSize + offset;
DTYPE * op = output + blockIndex * stride + offset;
DTYPE max = DTYPE_MIN;
if(strideNum % 4 == 0){
int stride2 = stride + stride;
int stride3 = stride2 + stride;
int stride4 = stride3 + stride;
for(int k = 0; k < blockSize; k += stride4){
DTYPE m = MAX(MAX(ip[k], ip[k + stride]), MAX(ip[k + stride2], ip[k + stride3]));
max = MAX(max, m);
}
}
else{
for (int k = 0; k < blockSize; k += stride)
max = MAX(max, ip[k]);
}
__syncthreads();
op[offset] = max;
}
/*
according the GPU's sm number allocation warp num
*/
inline void continuousStorageThreadAllocation(dim3& grid, dim3& block, long long vectorNum, int vectorSize)
{
int warpNum = 4;
if (vectorNum < 20 * 8){
warpNum = 8;
if (vectorNum < 20 * 4){
warpNum = 16;
if (warpNum < 20 * 2)
warpNum = 32;
}
}
int minWarpNum = vectorSize / 32;
if (vectorSize % 32 != 0) minWarpNum++;
warpNum = min(warpNum, minWarpNum);
grid.x = (unsigned int)vectorNum;
grid.y = 1;
grid.z = 1;
block.x = 1;
block.y = warpNum * 32;
block.z = 1;
}
/*
adjust threads.x number then we can use warp optimization
*/
inline void adjustThreadForUseWarpOptimization(dim3& blocks, dim3& threads)
{
if (threads.x > 1) {
blocks.x *= threads.x;
threads.x = 1;
}
if (threads.y < 32)
threads.y = 32;
}
/*
In some case,we use less block to imporve efficiency
*/
#define KERNELREDUCEFUN2(funName, opName, opFuncName, initData) \
__global__ \
void funName(DTYPE * input, DTYPE * output, int strideNum, int blockNum) \
{ \
int idx = threadIdx.x % 32; \
int idy = (blockIdx.x * blockDim.x + threadIdx.x) / 32; \
\
int startIndex = idy * strideNum; \
DTYPE threadMax = initData; \
for (int i = idx; i < strideNum; i += 32) { \
threadMax = opName(input[startIndex + i], threadMax); \
} \
threadMax = opFuncName(threadMax); \
if (idx == 0) \
output[idy] = threadMax; \
}
KERNELREDUCEFUN2(KernelReduceMaxOpLessBlocks, MAX, shflDownReduceMax, FLOAT_MIN)
KERNELREDUCEFUN2(KernelReduceMinOpLessBlocks, MIN, shflDownReduceMin, MAX_FLOAT)
/*
we use PTX code reduce
*/
#define KERNELREDUCEFUN1(funName, opName, opFuncName, initData) \
__global__ \
void funName(DTYPE * input, DTYPE * output,int stride, int strideNum, \
int reducedStrideNum,int blockSize, int blockNum) \
{ \
__shared__ DTYPE iData[MAX_CUDA_THREAD_NUM_PER_BLOCK / 32]; \
\
unsigned int tid = threadIdx.y; \
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y; \
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; \
if (i >= stride * blockNum) \
return; \
\
/* first level reduction */ \
int k = i / stride; \
int iOffset = i % stride; \
\
DTYPE threadMax = initData; \
\
DTYPE * data = iData + threadIdx.x * blockDim.y; \
DTYPE * inputData = input + k * blockSize; \
for (int it = j; it < strideNum; it += blockDim.y){ \
threadMax = opName(inputData[it * stride + iOffset], threadMax); \
} \
\
__syncthreads(); \
threadMax = opFuncName(threadMax); \
if ((tid & 0x1f) == 0) \
data[tid / 32] = threadMax; \
\
__syncthreads(); \
/* use one warp to reduce remaining data */ \
if (tid < 32){ \
if (tid < blockDim.y / 32) \
threadMax = data[tid]; \
else threadMax = initData; \
threadMax = opFuncName(threadMax); \
if (tid == 0 && blockIdx.y < reducedStrideNum) \
output[(k * reducedStrideNum + blockIdx.y) * stride + iOffset] = threadMax; \
} \
}
KERNELREDUCEFUN1(KernelReduceMaxOp, MAX, shflDownReduceMax, FLOAT_MIN)
KERNELREDUCEFUN1(KernelReduceMinOp, MIN, shflDownReduceMin, MAX_FLOAT)
/*
get the max-valued items along a dimension of the tensor (cuda version).
For a 1-dimensional data array a,
sum_i = max_{0<=j<strideNum} input_{i,j}
>> input - the input tensor
>> output - the output tensor
>> dim - which dimension to reduce
*/
#define _CUDAREDUCE(_funcName, _reduceFunc1, _reduceFunc2, _reduceFunc3, _reduceFun4) \
void _funcName(const XTensor * input, XTensor * output, int dim) \
{ \
CheckNTErrors(input && output, "Empty input or output tensors!"); \
CheckNTErrors(input->order == output->order + 1, "Incorrect tensor sizes!"); \
CheckNTErrors(input->order > dim && dim >=0, "Illegal dimension to reduce!"); \
CheckNTErrors(input->dataType == output->dataType, "Unmatched data types!"); \
\
for(int i = 0; i < input->order; i++){ \
if(i < dim){ \
CheckNTErrors(input->dimSize[i] == output->dimSize[i], "Unmatched tensors!"); \
} \
else if(i > dim){ \
CheckNTErrors(input->dimSize[i] == output->dimSize[i - 1], "Unmatched tensors!"); \
} \
} \
\
int cudaGridSize[3]; \
int cudaBlockSize[3]; \
int iter = 0; \
int stride = 1; \
int strideNum = input->dimSize[dim]; \
int blockSize = 1; \
int blockNum = 1; \
\
for (int i = 0; i < input->order; i++) { \
if (i < dim) \
blockNum *= input->dimSize[i]; \
else if (i > dim) \
stride *= input->dimSize[i]; \
} \
blockSize = stride * strideNum; \
\
int devID = input->devID; \
int devIDBackup; \
ProtectCudaDev(input->devID, devIDBackup); \
\
if (stride == 1 && blockNum >= 10) { \
dim3 grids; \
dim3 blocks; \
continuousStorageThreadAllocation(grids, blocks, (long long)blockNum, strideNum); \
if (blocks.y >= 128) { \
_reduceFunc1 <<<grids, blocks >>> ((DTYPE *)input->data, (DTYPE*)output->data, stride, strideNum, grids.y, blockSize, blockNum); \
} \
else { \
if (blockNum % 4 != 0) blockNum = (int)(blockNum / 4) + 1; \
else blockNum = blockNum / 4; \
_reduceFunc2 <<<blockNum, 128 >>> ((DTYPE *)input->data, (DTYPE*)output->data, strideNum, blockNum); \
} \
} \
else { \
XMem * mem = input->mem; \
GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
int bufSize = input->unitSize * cudaGridSize[0] * stride * blockNum * 2; \
DTYPE * buf; \
if (mem != NULL) { \
mem->LockBuf(); \
buf = (DTYPE*)mem->AllocBuf(mem->devID, bufSize); \
} \
else { \
buf = (DTYPE*)XMemAlloc(devID, bufSize); \
} \
DTYPE * buf1 = buf; \
DTYPE * buf2 = buf + cudaGridSize[0] * stride * blockNum; \
do { \
if (input->dataType == DEFAULT_DTYPE) { \
DTYPE * iData = NULL; \
DTYPE * oData = NULL; \
if (iter == 0) { \
iData = (DTYPE*)input->data; \
oData = buf1; \
} \
else if (iter % 2 == 1) { \
iData = buf1; \
oData = buf2; \
} \
else { \
iData = buf2; \
oData = buf1; \
} \
\
/* unroll the reduction procedure. The code is messy but it is faster. */ \
if (strideNum < 32) { \
GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (DTYPE*)output->data; \
_reduceFunc3 <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
else if (strideNum < 128) { \
GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (DTYPE*)output->data; \
CheckNTErrors(cudaBlockSize[0] >= 64, "Incorrect thread number when calling the cuda kernel!"); \
adjustThreadForUseWarpOptimization(blocks, threads); \
_reduceFun4<64> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
else if (strideNum < 256) { \
GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (DTYPE*)output->data; \
CheckNTErrors(cudaBlockSize[0] >= 128, "Incorrect thread number when calling the cuda kernel!"); \
adjustThreadForUseWarpOptimization(blocks, threads); \
_reduceFun4<128> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
else if (strideNum < 512) { \
GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (DTYPE*)output->data; \
CheckNTErrors(cudaBlockSize[0] >= 256, "Incorrect thread number when calling the cuda kernel!"); \
adjustThreadForUseWarpOptimization(blocks, threads); \
_reduceFun4<256> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
else { \
GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (DTYPE*)output->data; \
CheckNTErrors(cudaBlockSize[0] >= 512, "Incorrect thread number when calling the cuda kernel!"); \
adjustThreadForUseWarpOptimization(blocks, threads); \
_reduceFun4<512> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
} \
else if (input->dataType == X_FLOAT16) { \
__half * buf1ft16 = (__half *)buf1; \
__half * buf2ft16 = (__half *)buf2; \
__half * iData = NULL; \
__half * oData = NULL; \
if (iter == 0) { \
iData = (__half*)input->data; \
oData = buf1ft16; \
} \
else if (iter % 2 == 1) { \
iData = buf1ft16; \
oData = buf2ft16; \
} \
else { \
iData = buf2ft16; \
oData = buf1ft16; \
} \
\
/* unroll the reduction procedure. The code is messy but it is faster. */ \
if (strideNum < 32) { \
GDevs.GetCudaThread2D(devID, strideNum, stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (__half*)output->data; \
KernelReduceMax <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
else if (strideNum < 128) { \
GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 64), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (__half*)output->data; \
CheckNTErrors(cudaBlockSize[0] >= 64, "Incorrect thread number when calling the cuda kernel!"); \
KernelReduceMaxFast<64> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
else if (strideNum < 256) { \
GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 128), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (__half*)output->data; \
CheckNTErrors(cudaBlockSize[0] >= 128, "Incorrect thread number when calling the cuda kernel!"); \
KernelReduceMaxFast<128> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
else if (strideNum < 512) { \
GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 256), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (__half*)output->data; \
CheckNTErrors(cudaBlockSize[0] >= 256, "Incorrect thread number when calling the cuda kernel!"); \
KernelReduceMaxFast<256> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
else { \
GDevs.GetCudaThread2D(devID, MAX(strideNum / 2 + 1, 512), stride * blockNum, MAX_INT, cudaGridSize, cudaBlockSize); \
dim3 blocks(cudaGridSize[1], cudaGridSize[0]), threads(cudaBlockSize[1], cudaBlockSize[0]); \
if (cudaGridSize[0] == 1) \
oData = (__half*)output->data; \
CheckNTErrors(cudaBlockSize[0] >= 512, "Incorrect thread number when calling the cuda kernel!"); \
KernelReduceMaxFast<512> <<<blocks, threads>>> (iData, oData, stride, strideNum, blocks.y, blockSize, blockNum); \
} \
} \
\
strideNum = cudaGridSize[0]; \
blockSize = cudaGridSize[0]; \
\
iter++; \
\
} while (strideNum > 1); \
\
if (mem != NULL) { \
mem->ReleaseBuf(mem->devID, bufSize); \
mem->UnlockBuf(); \
} \
else \
XMemFree(input->devID, buf); \
} \
BacktoCudaDev(input->devID, devIDBackup); \
}
_CUDAREDUCE(_CudaReduceMax, KernelReduceMaxOp, KernelReduceMaxOpLessBlocks, KernelReduceMax, KernelReduceMaxFast)
_CUDAREDUCE(_CudaReduceMin, KernelReduceMinOp, KernelReduceMinOpLessBlocks, KernelReduceMin, KernelReduceMinFast)
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
12,762 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float* var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24) {
for (int i=0; i < var_1; ++i) {
comp = var_3 * floorf(+1.5144E35f);
float tmp_1 = -0.0f + (var_4 + fmodf((var_5 * (+1.1032E-36f - (var_6 * -0.0f + +0.0f))), -1.8364E35f));
comp += tmp_1 * (-1.7594E-25f + (var_7 - var_8 - var_9));
comp += tanhf(floorf(+1.6852E-43f + var_10 * -1.1582E-36f));
for (int i=0; i < var_2; ++i) {
var_11[i] = -1.6470E-22f;
var_12[i] = +1.0940E36f;
comp += var_12[i] / var_11[i] + (var_13 / (var_14 + var_15));
}
if (comp >= -1.4099E35f + (-1.8257E34f + -1.0592E27f)) {
comp = var_16 - var_17 / var_18 - -1.7750E-37f / -1.9396E-19f + var_19;
comp += var_20 - -0.0f;
comp += (+1.9299E-37f + (+1.9056E12f / var_21));
comp += var_22 - -1.5014E-36f * log10f(+1.0657E-43f);
}
if (comp > -1.2880E1f / +1.7427E-44f - +1.5180E7f) {
comp = var_23 - (var_24 / -1.6423E11f);
comp = (+1.4207E-36f - +1.3393E34f + -1.1701E-43f);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float* tmp_12 = initPointer( atof(argv[12]) );
float* tmp_13 = initPointer( atof(argv[13]) );
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25);
cudaDeviceSynchronize();
return 0;
}
|
12,763 | #include <ctime>
#include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#define RED 2
#define GREEN 1
#define BLUE 0
using namespace std;
extern "C"
cudaError_t cuda_main()
{
const int kb = 1024;
const int mb = kb * kb;
wcout << "NBody.GPU" << endl << "=========" << endl << endl;
int devCount;
cudaGetDeviceCount(&devCount);
wcout << "Dispositivo CUDA : " << endl << endl;
for(int i = 0; i < devCount; ++i)
{
cudaDeviceProp props;
cudaGetDeviceProperties(&props, i);
wcout << i << ": " << props.name << ": " << props.major << "." << props.minor << endl;
wcout << "Memoria global: " << props.totalGlobalMem / mb << "mb" << endl;
wcout << "Memoria compartida: " << props.sharedMemPerBlock / kb << "kb" << endl;
wcout << "Memoria constante: " << props.totalConstMem / kb << "kb" << endl;
wcout << "Registros por bloque: " << props.regsPerBlock << endl << endl;
wcout << "Multi Processor Count: " << props.multiProcessorCount << endl << endl;
wcout << "Tamano Warp : " << props.warpSize << endl;
wcout << "Threads por block: " << props.maxThreadsPerBlock << endl;
wcout << "Dimension Max block : [ " << props.maxThreadsDim[0] << ", " << props.maxThreadsDim[1] << ", " << props.maxThreadsDim[2] << " ]" << endl;
wcout << "Dimension Max grid : [ " << props.maxGridSize[0] << ", " << props.maxGridSize[1] << ", " << props.maxGridSize[2] << " ]" << endl;
wcout << endl;
}
return cudaGetLastError();
}
//funcion device
__device__ unsigned char clamp(int value){
if(value < 0)
value = 0;
else
if(value > 255)
value = 255;
return (unsigned char)value;
}
//funcion global de filtro
__global__ void Convolucion(char *M, unsigned char *imageInput, int width, int height, \
unsigned int maskWidth,unsigned char *imageOutput){
unsigned int row = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int col = blockIdx.x*blockDim.x+threadIdx.x;
int Pvalue = 0;
int start_row = row - (maskWidth/2);
int start_col = col - (maskWidth/2);
for(int i = 0; i < maskWidth; i++){
for(int j = 0; j < maskWidth; j++ ){
if((start_col + j >=0 && start_col + j < width) \
&&(start_row + i >=0 && start_row + i < height)){
Pvalue += imageInput[(start_row + i)*width+(start_col + j)] * M[i*maskWidth+j];
}
}
}
imageOutput[row*width+col] = clamp(Pvalue);
}
//funcion global de escala de grises
__global__ void img2gray(unsigned char *imageInput, int width, int height, unsigned char *imageOutput){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
if((row < height) && (col < width)){
imageOutput[row*width+col] = imageInput[(row*width+col)*3+RED]*0.299 + imageInput[(row*width+col)*3+GREEN]*0.587 \
+ imageInput[(row*width+col)*3+BLUE]*0.114;
}
}
//funcion global de filtro
__global__ void FilterColor(char *M, unsigned char *imageInput, int width, int height, \
unsigned int maskWidth,unsigned char *imageOutput, float coef){
unsigned int row = blockIdx.y*blockDim.y+threadIdx.y;
unsigned int col = blockIdx.x*blockDim.x+threadIdx.x;
int Pvalue = 0.0;
int start_row = row - (maskWidth/2);
int start_col = col - (maskWidth/2);
for(int i = 0; i < maskWidth; i++){
for(int j = 0; j < maskWidth; j++ ){
if((start_col + j >=0 && start_col + j < width) \
&&(start_row + i >=0 && start_row + i < height)){
Pvalue += imageInput[(start_row + i)*width+(start_col + j)] * M[i*maskWidth+j]*coef;
}
}
}
imageOutput[row*width+col] = clamp(Pvalue);
}
cudaError_t convolucionCuda(int width, int height, int size, int sizeGray, unsigned char *dataRawImage, unsigned char *&h_imageOutput, \
int MASK_WIDTH, char *h_M){
cudaError_t error = cudaSuccess;
clock_t startGPU, endGPU;
double gpu_time;
int sizeM = sizeof(char)*MASK_WIDTH*MASK_WIDTH;
char *d_M;
unsigned char *d_dataRawImage, *d_imageOutput, *d_sobelOutput;
//Reserva de Memoria para M
error = cudaMalloc((void**)&d_M,sizeM);
if(error != cudaSuccess){
printf("Error reservando memoria para d_M\n");
exit(-1);
}
//Reserva de Memoria para d_dataRawImage
error = cudaMalloc((void**)&d_dataRawImage,size);
if(error != cudaSuccess){
printf("Error reservando memoria para d_dataRawImage\n");
exit(-1);
}
//Reserva de Memoria para d_imageOutput
error = cudaMalloc((void**)&d_imageOutput,sizeGray);
if(error != cudaSuccess){
printf("Error reservando memoria para d_imageOutput\n");
exit(-1);
}
//Reserva de Memoria para d_sobelOutput
error = cudaMalloc((void**)&d_sobelOutput,sizeGray);
if(error != cudaSuccess){
printf("Error reservando memoria para d_sobelOutput\n");
exit(-1);
}
//Algoritmo Paralelo con CUDA
startGPU = clock();
error = cudaMemcpy(d_dataRawImage,dataRawImage,size, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de dataRawImage a d_dataRawImage \n");
exit(-1);
}
error = cudaMemcpy(d_M,h_M,sizeM, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de h_M a d_M \n");
exit(-1);
}
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1);
//llamamos la funcion de escala de grises
img2gray<<<dimGrid,dimBlock>>>(d_dataRawImage,width,height,d_imageOutput);
//Sincronizamos
cudaDeviceSynchronize();
//llamamos la funcion de filtro de sobel
Convolucion<<<dimGrid,dimBlock>>>(d_M, d_imageOutput,width,height,MASK_WIDTH,d_sobelOutput);
cudaMemcpy(h_imageOutput,d_sobelOutput,sizeGray,cudaMemcpyDeviceToHost);
endGPU = clock();
//fin algoritmo Paralelo con CUDA
//imprimir tiempos de ejecucion
gpu_time = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo Algoritmo Paralelo CUDA: %.10f\n",gpu_time);
//limpiar memoria
cudaFree(d_dataRawImage);
cudaFree(d_imageOutput);
cudaFree(d_M);
cudaFree(d_sobelOutput);
return cudaGetLastError();
}
cudaError_t convolucionCudaColor(int width, int height, int sizeGray, unsigned char * splB, unsigned char * splG, unsigned char * splR,\
unsigned char *&h_imageOutputB,unsigned char *&h_imageOutputG,unsigned char *&h_imageOutputR,\
int MASK_WIDTH, char *h_M, float coef){
cudaError_t error = cudaSuccess;
clock_t startGPU, endGPU;
double gpu_time;
int sizeM = sizeof(char)*MASK_WIDTH*MASK_WIDTH;
char *d_M;
unsigned char *d_dataRawImageB, *d_dataRawImageG, *d_dataRawImageR, *d_sobelOutput;
//Reserva de Memoria para M
error = cudaMalloc((void**)&d_M,sizeM);
if(error != cudaSuccess){
printf("Error reservando memoria para d_M\n");
exit(-1);
}
error = cudaMalloc((void**)&d_dataRawImageB,sizeGray);
error = cudaMalloc((void**)&d_dataRawImageG,sizeGray);
error = cudaMalloc((void**)&d_dataRawImageR,sizeGray);
if(error != cudaSuccess){
printf("Error reservando memoria para d_dataRawImage\n");
exit(-1);
}
//Reserva de Memoria para d_sobelOutput
error = cudaMalloc((void**)&d_sobelOutput,sizeGray);
if(error != cudaSuccess){
printf("Error reservando memoria para d_sobelOutput\n");
exit(-1);
}
//Algoritmo Paralelo con CUDA
startGPU = clock();
error = cudaMemcpy(d_M,h_M,sizeM, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de h_M a d_M \n");
exit(-1);
}
int blockSize = 32;
dim3 dimBlock(blockSize,blockSize,1);
dim3 dimGrid(ceil(width/float(blockSize)),ceil(height/float(blockSize)),1);
cudaDeviceSynchronize();
error = cudaMemcpy(d_dataRawImageB,splB,sizeGray, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de splB a d_dataRawImage \n");
exit(-1);
}
FilterColor<<<dimGrid,dimBlock>>>(d_M, d_dataRawImageB,width,height,MASK_WIDTH,d_sobelOutput,coef);
cudaMemcpy(h_imageOutputB,d_sobelOutput,sizeGray,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
///////////////////////////
error = cudaMemcpy(d_dataRawImageG,splG,sizeGray, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de splG a d_dataRawImage \n");
exit(-1);
}
FilterColor<<<dimGrid,dimBlock>>>(d_M,d_dataRawImageG,width,height,MASK_WIDTH,d_sobelOutput,coef);
cudaMemcpy(h_imageOutputG,d_sobelOutput,sizeGray,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
/////////////////////////////
error = cudaMemcpy(d_dataRawImageR,splR,sizeGray, cudaMemcpyHostToDevice);
if(error != cudaSuccess){
printf("Error copiando los datos de splR a d_dataRawImage \n");
exit(-1);
}
FilterColor<<<dimGrid,dimBlock>>>(d_M,d_dataRawImageR,width,height,MASK_WIDTH,d_sobelOutput,coef);
cudaMemcpy(h_imageOutputR,d_sobelOutput,sizeGray,cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
endGPU = clock();
//fin algoritmo Paralelo con CUDA
//imprimir tiempos de ejecucion
gpu_time = ((double) (endGPU - startGPU)) / CLOCKS_PER_SEC;
printf("Tiempo Algoritmo Paralelo CUDA: %.10f\n",gpu_time);
//limpiar memoria
cudaFree(d_dataRawImageB);
cudaFree(d_dataRawImageG);
cudaFree(d_dataRawImageR);
cudaFree(d_M);
cudaFree(d_sobelOutput);
return cudaGetLastError();
}
|
12,764 | /**
* Programmation GPU
* Universit Pierre et Marie Curie
* Calcul de convolution sur une image.
*/
/**
* V0
*
*/
#include <cuda.h>
#include <stdio.h>
extern "C" double my_gettimeofday();
/**
* Controle des erreurs CUDA et debugging.
*/
#ifdef CUDA_DEBUG
#define CUDA_SYNC_ERROR() { \
cudaError_t sync_error; \
cudaDeviceSynchronize(); \
sync_error = cudaGetLastError(); \
if(sync_error != cudaSuccess) { \
fprintf(stderr, "[CUDA SYNC ERROR at %s:%d -> %s]\n", \
__FILE__ , __LINE__, cudaGetErrorString(sync_error)); \
exit(EXIT_FAILURE); \
} \
}
#else /* #ifdef CUDA_DEBUG */
#define CUDA_SYNC_ERROR()
#endif /* #ifdef CUDA_DEBUG */
#define CUDA_ERROR(cuda_call) { \
cudaError_t error = cuda_call; \
if(error != cudaSuccess){ \
fprintf(stderr, "[CUDA ERROR at %s:%d -> %s]\n", \
__FILE__ , __LINE__, cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
CUDA_SYNC_ERROR(); \
}
/**
* Retourne le quotient entier superieur ou egal a "a/b".
* D'apres : CUDA SDK 4.1
*/
static int iDivUp(int a, int b){
return ((a % b != 0) ? (a / b + 1) : (a / b));
}
__global__ void convolKernel(float* d_buf, float* d_buf_aux, int nbl, int nbc){
int j = blockDim.x*blockIdx.x + threadIdx.x;
int i = blockDim.y*blockIdx.y + threadIdx.y;
if (i<nbl && j<nbc)
{
//Copie depuis convol.c
//=====================
/*** filtre moyenneur CONVOL_MOYENNE2 (filtre moyenneur avec
* un poid central plus fort):
* Rq: pour les bords, moyenne avec uniquement les cases presentes */
float denominateur = 0.0f;
float numerateur = 0.0f;
float poids_central;
if (i<nbl-1){
numerateur += d_buf[(i+1)*nbc+j]; ++denominateur;
if (j>0){ numerateur += d_buf[(i+1)*nbc+j-1]; ++denominateur; }
if (j<nbc-1){ numerateur += d_buf[(i+1)*nbc+j+1]; ++denominateur; }
}
if (j>0){ numerateur += d_buf[(i)*nbc+j-1]; ++denominateur; }
if (j<nbc-1){ numerateur += d_buf[(i)*nbc+j+1]; ++denominateur; }
if (i>0){
numerateur += d_buf[(i-1)*nbc+j]; ++denominateur;
if (j>0){ numerateur += d_buf[(i-1)*nbc+j-1]; ++denominateur; }
if (j<nbc-1){ numerateur += d_buf[(i-1)*nbc+j+1]; ++denominateur; }
}
poids_central = denominateur*0.5f; /* poids central = 50% autres poids */
numerateur += poids_central*d_buf[(i)*nbc+j];
denominateur += poids_central;
d_buf_aux[i*nbc+j] = numerateur/denominateur;
}
}
/**
* Effectue 'nbiter' convolutions sur GPU et retourne
* le pointeur vers le buffer contenant la derniere convolution.
*/
extern "C"
float *gpu_multiples_convolutions(float buf[],
float buf_aux[],
int nbl,
int nbc,
int nbiter,
int nbThreadsParBloc){
/*** TODO ***/;
float *d_buf, *d_buf_aux;
int grilleX, grilleY;
int taille_alloc = nbc * nbl * sizeof(float);
cudaMalloc((void **) &d_buf, taille_alloc);
cudaMalloc((void **) &d_buf_aux, taille_alloc);
cudaMemcpy(d_buf, buf, taille_alloc, cudaMemcpyHostToDevice);
cudaMemcpy(d_buf_aux, buf_aux, taille_alloc, cudaMemcpyHostToDevice);
grilleX = ceil((float)nbc/(float)nbThreadsParBloc);
grilleY = ceil((float)nbl/(float)nbThreadsParBloc);
dim3 threads_par_bloc(nbThreadsParBloc, nbThreadsParBloc);
dim3 taille_grille(grilleX, grilleY);
int i;
for(i=0; i<nbiter; i++){
convolKernel<<<taille_grille, threads_par_bloc>>>(d_buf, d_buf_aux, nbl, nbc);
cudaMemcpy(d_buf, d_buf_aux, taille_alloc, cudaMemcpyDeviceToDevice);
}
cudaMemcpy(buf, d_buf, taille_alloc, cudaMemcpyDeviceToHost);
return buf;
}
|
12,765 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void j2d81pt (double * __restrict__ l_in, double * __restrict__ l_out, int N) {
//Determing the block's indices
int i0 = (int)(blockIdx.x)*(int)(blockDim.x);
int i = max(i0,0) + (int)(threadIdx.x);
int j0 = 4*(int)(blockIdx.y)*(int)(blockDim.y);
int j = max(j0,0) + 4*(int)(threadIdx.y);
double (*in)[8200] = (double (*)[8200]) l_in;
double (*out)[8200] = (double (*)[8200]) l_out;
if (i>=0 & j>=0 & i<=N-9 & j<=N-9) {
double _t_0_ = in[j][i];
_t_0_ += in[j][i+8];
_t_0_ += in[j+8][i];
double _t_15_ = in[j+8][i];
double _t_30_ = in[j+8][i];
double _t_45_ = in[j+8][i];
_t_0_ += in[j+8][i+8];
_t_15_ += in[j+8][i+8];
_t_30_ += in[j+8][i+8];
_t_45_ += in[j+8][i+8];
double outjc0ic0 = _t_0_ * 3.18622;
double _t_4_ = in[j][i+4];
_t_4_ += in[j+4][i+8];
double _t_17_ = in[j+4][i+8];
_t_30_ += in[j+4][i+8];
double _t_43_ = in[j+4][i+8];
_t_4_ += in[j+4][i];
_t_17_ += in[j+4][i];
_t_30_ += in[j+4][i];
_t_43_ += in[j+4][i];
_t_4_ += in[j+8][i+4];
double _t_22_ = in[j+8][i+4];
double _t_39_ = in[j+8][i+4];
double _t_55_ = in[j+8][i+4];
outjc0ic0 += _t_4_ * -0.00508225;
double _t_5_ = in[j+1][i+1];
_t_15_ += in[j+1][i+1];
_t_5_ += in[j+1][i+7];
_t_15_ += in[j+1][i+7];
_t_5_ += in[j+7][i+1];
double _t_20_ = in[j+7][i+1];
double _t_35_ = in[j+7][i+1];
double _t_50_ = in[j+7][i+1];
_t_5_ += in[j+7][i+7];
_t_20_ += in[j+7][i+7];
_t_35_ += in[j+7][i+7];
_t_50_ += in[j+7][i+7];
outjc0ic0 += _t_5_ * 0.00064516;
double _t_8_ = in[j+1][i+4];
double _t_18_ = in[j+1][i+4];
_t_8_ += in[j+4][i+1];
double _t_21_ = in[j+4][i+1];
double _t_34_ = in[j+4][i+1];
double _t_47_ = in[j+4][i+1];
_t_8_ += in[j+4][i+7];
_t_21_ += in[j+4][i+7];
_t_34_ += in[j+4][i+7];
_t_47_ += in[j+4][i+7];
_t_8_ += in[j+7][i+4];
double _t_25_ = in[j+7][i+4];
double _t_41_ = in[j+7][i+4];
double outjp3ic0 = in[j+7][i+4] * 8.10655;
outjc0ic0 += _t_8_ * -0.0723189;
double _t_9_ = in[j+2][i+2];
_t_20_ += in[j+2][i+2];
_t_30_ += in[j+2][i+2];
_t_9_ += in[j+2][i+6];
_t_20_ += in[j+2][i+6];
_t_30_ += in[j+2][i+6];
_t_9_ += in[j+6][i+2];
double _t_24_ = in[j+6][i+2];
_t_39_ += in[j+6][i+2];
double _t_52_ = in[j+6][i+2];
_t_9_ += in[j+6][i+6];
_t_24_ += in[j+6][i+6];
_t_39_ += in[j+6][i+6];
_t_52_ += in[j+6][i+6];
outjc0ic0 += _t_9_ * 0.04;
double _t_11_ = in[j+2][i+4];
_t_22_ += in[j+2][i+4];
double _t_32_ = in[j+2][i+4];
_t_11_ += in[j+4][i+2];
_t_24_ += in[j+4][i+2];
double _t_37_ = in[j+4][i+2];
double _t_48_ = in[j+4][i+2];
_t_11_ += in[j+4][i+6];
_t_24_ += in[j+4][i+6];
_t_37_ += in[j+4][i+6];
_t_48_ += in[j+4][i+6];
_t_11_ += in[j+6][i+4];
double _t_27_ = in[j+6][i+4];
double outjp2ic0 = in[j+6][i+4] * 8.10655;
_t_55_ += in[j+6][i+4];
outjc0ic0 += _t_11_ * 0.56944;
double _t_12_ = in[j+3][i+3];
_t_24_ += in[j+3][i+3];
_t_35_ += in[j+3][i+3];
_t_45_ += in[j+3][i+3];
_t_12_ += in[j+3][i+5];
_t_24_ += in[j+3][i+5];
_t_35_ += in[j+3][i+5];
_t_45_ += in[j+3][i+5];
_t_12_ += in[j+5][i+3];
_t_27_ += in[j+5][i+3];
double _t_40_ = in[j+5][i+3];
_t_52_ += in[j+5][i+3];
_t_12_ += in[j+5][i+5];
_t_27_ += in[j+5][i+5];
_t_40_ += in[j+5][i+5];
_t_52_ += in[j+5][i+5];
outjc0ic0 += _t_12_ * 2.56;
double _t_13_ = in[j+3][i+4];
_t_25_ += in[j+3][i+4];
double _t_36_ = in[j+3][i+4];
double _t_46_ = in[j+3][i+4];
_t_13_ += in[j+4][i+3];
double _t_26_ = in[j+4][i+3];
double _t_38_ = in[j+4][i+3];
double _t_49_ = in[j+4][i+3];
_t_13_ += in[j+4][i+5];
_t_26_ += in[j+4][i+5];
_t_38_ += in[j+4][i+5];
_t_49_ += in[j+4][i+5];
_t_13_ += in[j+5][i+4];
double outjp1ic0 = in[j+5][i+4] * 8.10655;
_t_41_ += in[j+5][i+4];
double _t_53_ = in[j+5][i+4];
outjc0ic0 += _t_13_ * -4.55552;
double _t_1_ = in[j][i+1];
_t_1_ += in[j][i+7];
_t_1_ += in[j+1][i];
double _t_14_ = in[j+1][i];
_t_1_ += in[j+1][i+8];
_t_14_ += in[j+1][i+8];
_t_1_ += in[j+7][i];
double _t_16_ = in[j+7][i];
double _t_31_ = in[j+7][i];
_t_46_ += in[j+7][i];
_t_1_ += in[j+7][i+8];
_t_16_ += in[j+7][i+8];
_t_31_ += in[j+7][i+8];
_t_46_ += in[j+7][i+8];
_t_1_ += in[j+8][i+1];
double _t_19_ = in[j+8][i+1];
_t_34_ += in[j+8][i+1];
_t_49_ += in[j+8][i+1];
_t_1_ += in[j+8][i+7];
_t_19_ += in[j+8][i+7];
_t_34_ += in[j+8][i+7];
_t_49_ += in[j+8][i+7];
outjc0ic0 += _t_1_ * 4.5339;
double _t_2_ = in[j][i+2];
_t_2_ += in[j][i+6];
_t_2_ += in[j+2][i];
_t_15_ += in[j+2][i];
double _t_28_ = in[j+2][i];
_t_2_ += in[j+2][i+8];
_t_15_ += in[j+2][i+8];
_t_28_ += in[j+2][i+8];
_t_2_ += in[j+6][i];
_t_17_ += in[j+6][i];
_t_32_ += in[j+6][i];
_t_45_ += in[j+6][i];
_t_2_ += in[j+6][i+8];
_t_17_ += in[j+6][i+8];
_t_32_ += in[j+6][i+8];
_t_45_ += in[j+6][i+8];
_t_2_ += in[j+8][i+2];
_t_20_ += in[j+8][i+2];
_t_37_ += in[j+8][i+2];
_t_52_ += in[j+8][i+2];
_t_2_ += in[j+8][i+6];
_t_20_ += in[j+8][i+6];
_t_37_ += in[j+8][i+6];
_t_52_ += in[j+8][i+6];
outjc0ic0 += _t_2_ * -0.000357;
double _t_3_ = in[j][i+3];
_t_3_ += in[j][i+5];
_t_3_ += in[j+3][i];
_t_16_ += in[j+3][i];
double _t_29_ = in[j+3][i];
double _t_42_ = in[j+3][i];
_t_3_ += in[j+3][i+8];
_t_16_ += in[j+3][i+8];
_t_29_ += in[j+3][i+8];
_t_42_ += in[j+3][i+8];
_t_3_ += in[j+5][i];
_t_18_ += in[j+5][i];
_t_31_ += in[j+5][i];
double _t_44_ = in[j+5][i];
_t_3_ += in[j+5][i+8];
_t_18_ += in[j+5][i+8];
_t_31_ += in[j+5][i+8];
_t_44_ += in[j+5][i+8];
_t_3_ += in[j+8][i+3];
_t_21_ += in[j+8][i+3];
_t_38_ += in[j+8][i+3];
double _t_54_ = in[j+8][i+3];
_t_3_ += in[j+8][i+5];
_t_21_ += in[j+8][i+5];
_t_38_ += in[j+8][i+5];
_t_54_ += in[j+8][i+5];
outjc0ic0 += _t_3_ * 0.002856;
double _t_6_ = in[j+1][i+2];
_t_16_ += in[j+1][i+2];
_t_6_ += in[j+1][i+6];
_t_16_ += in[j+1][i+6];
_t_6_ += in[j+2][i+1];
_t_19_ += in[j+2][i+1];
_t_29_ += in[j+2][i+1];
_t_6_ += in[j+2][i+7];
_t_19_ += in[j+2][i+7];
_t_29_ += in[j+2][i+7];
_t_6_ += in[j+6][i+1];
_t_21_ += in[j+6][i+1];
_t_36_ += in[j+6][i+1];
_t_49_ += in[j+6][i+1];
_t_6_ += in[j+6][i+7];
_t_21_ += in[j+6][i+7];
_t_36_ += in[j+6][i+7];
_t_49_ += in[j+6][i+7];
_t_6_ += in[j+7][i+2];
double _t_23_ = in[j+7][i+2];
_t_38_ += in[j+7][i+2];
_t_53_ += in[j+7][i+2];
_t_6_ += in[j+7][i+6];
_t_23_ += in[j+7][i+6];
_t_38_ += in[j+7][i+6];
_t_53_ += in[j+7][i+6];
outjc0ic0 += _t_6_ * -0.00508;
double _t_7_ = in[j+1][i+3];
_t_17_ += in[j+1][i+3];
_t_7_ += in[j+1][i+5];
_t_17_ += in[j+1][i+5];
_t_7_ += in[j+3][i+1];
_t_20_ += in[j+3][i+1];
double _t_33_ = in[j+3][i+1];
_t_43_ += in[j+3][i+1];
_t_7_ += in[j+3][i+7];
_t_20_ += in[j+3][i+7];
_t_33_ += in[j+3][i+7];
_t_43_ += in[j+3][i+7];
_t_7_ += in[j+5][i+1];
_t_22_ += in[j+5][i+1];
_t_35_ += in[j+5][i+1];
_t_48_ += in[j+5][i+1];
_t_7_ += in[j+5][i+7];
_t_22_ += in[j+5][i+7];
_t_35_ += in[j+5][i+7];
_t_48_ += in[j+5][i+7];
_t_7_ += in[j+7][i+3];
_t_24_ += in[j+7][i+3];
_t_40_ += in[j+7][i+3];
_t_55_ += in[j+7][i+3];
_t_7_ += in[j+7][i+5];
_t_24_ += in[j+7][i+5];
_t_40_ += in[j+7][i+5];
_t_55_ += in[j+7][i+5];
outjc0ic0 += _t_7_ * 0.04064;
double _t_10_ = in[j+2][i+3];
_t_21_ += in[j+2][i+3];
_t_31_ += in[j+2][i+3];
_t_10_ += in[j+2][i+5];
_t_21_ += in[j+2][i+5];
_t_31_ += in[j+2][i+5];
_t_10_ += in[j+3][i+2];
_t_23_ += in[j+3][i+2];
_t_34_ += in[j+3][i+2];
_t_44_ += in[j+3][i+2];
_t_10_ += in[j+3][i+6];
_t_23_ += in[j+3][i+6];
_t_34_ += in[j+3][i+6];
_t_44_ += in[j+3][i+6];
_t_10_ += in[j+5][i+2];
_t_25_ += in[j+5][i+2];
_t_38_ += in[j+5][i+2];
double _t_51_ = in[j+5][i+2];
_t_10_ += in[j+5][i+6];
_t_25_ += in[j+5][i+6];
_t_38_ += in[j+5][i+6];
_t_51_ += in[j+5][i+6];
_t_10_ += in[j+6][i+3];
_t_26_ += in[j+6][i+3];
_t_41_ += in[j+6][i+3];
_t_54_ += in[j+6][i+3];
_t_10_ += in[j+6][i+5];
_t_26_ += in[j+6][i+5];
_t_41_ += in[j+6][i+5];
_t_54_ += in[j+6][i+5];
outjc0ic0 += _t_10_ * -0.32;
outjc0ic0 += in[j+4][i+4] * 8.10655;
_t_27_ += in[j+4][i+4];
_t_39_ += in[j+4][i+4];
_t_50_ += in[j+4][i+4];
_t_14_ += in[j+9][i];
_t_29_ += in[j+9][i];
_t_45_ += in[j+11][i+3];
_t_45_ += in[j+11][i+5];
outjp3ic0 += _t_45_ * 0.002856;
_t_46_ += in[j+11][i+4];
outjp3ic0 += _t_46_ * -0.00508225;
_t_42_ += in[j+11][i];
_t_42_ += in[j+11][i+8];
outjp3ic0 += _t_42_ * 3.18622;
_t_14_ += in[j+9][i+8];
_t_29_ += in[j+9][i+8];
outjp1ic0 += _t_14_ * 3.18622;
_t_44_ += in[j+9][i];
_t_44_ += in[j+9][i+8];
_t_44_ += in[j+11][i+2];
_t_44_ += in[j+11][i+6];
outjp3ic0 += _t_44_ * -0.000357;
_t_15_ += in[j+9][i+1];
_t_48_ += in[j+9][i+1];
_t_15_ += in[j+9][i+7];
_t_48_ += in[j+9][i+7];
outjp1ic0 += _t_15_ * 4.5339;
_t_33_ += in[j+9][i+1];
_t_33_ += in[j+9][i+7];
outjp2ic0 += _t_33_ * 0.00064516;
_t_16_ += in[j+9][i+2];
_t_16_ += in[j+9][i+6];
outjp1ic0 += _t_16_ * -0.000357;
_t_34_ += in[j+9][i+2];
_t_34_ += in[j+9][i+6];
outjp2ic0 += _t_34_ * -0.00508;
_t_51_ += in[j+9][i+2];
_t_51_ += in[j+9][i+6];
outjp3ic0 += _t_51_ * 0.04;
_t_17_ += in[j+9][i+3];
_t_17_ += in[j+9][i+5];
outjp1ic0 += _t_17_ * 0.002856;
_t_35_ += in[j+9][i+3];
_t_35_ += in[j+9][i+5];
outjp2ic0 += _t_35_ * 0.04064;
_t_52_ += in[j+9][i+3];
_t_52_ += in[j+9][i+5];
outjp3ic0 += _t_52_ * -0.32;
_t_18_ += in[j+9][i+4];
_t_36_ += in[j+9][i+4];
outjp2ic0 += _t_36_ * -0.0723189;
_t_53_ += in[j+9][i+4];
outjp3ic0 += _t_53_ * 0.56944;
outjp1ic0 += _t_18_ * -0.00508225;
outjp1ic0 += _t_19_ * 0.00064516;
outjp1ic0 += _t_20_ * -0.00508;
outjp1ic0 += _t_21_ * 0.04064;
outjp1ic0 += _t_22_ * -0.0723189;
outjp1ic0 += _t_23_ * 0.04;
outjp1ic0 += _t_24_ * -0.32;
outjp1ic0 += _t_25_ * 0.56944;
outjp1ic0 += _t_26_ * 2.56;
outjp1ic0 += _t_27_ * -4.55552;
_t_28_ += in[j+10][i];
_t_28_ += in[j+10][i+8];
outjp2ic0 += _t_28_ * 3.18622;
_t_43_ += in[j+10][i];
_t_43_ += in[j+10][i+8];
_t_43_ += in[j+11][i+1];
_t_43_ += in[j+11][i+7];
outjp3ic0 += _t_43_ * 4.5339;
_t_29_ += in[j+10][i+1];
_t_29_ += in[j+10][i+7];
outjp2ic0 += _t_29_ * 4.5339;
_t_47_ += in[j+10][i+1];
_t_47_ += in[j+10][i+7];
outjp3ic0 += _t_47_ * 0.00064516;
_t_30_ += in[j+10][i+2];
_t_30_ += in[j+10][i+6];
outjp2ic0 += _t_30_ * -0.000357;
_t_48_ += in[j+10][i+2];
_t_48_ += in[j+10][i+6];
outjp3ic0 += _t_48_ * -0.00508;
_t_31_ += in[j+10][i+3];
_t_31_ += in[j+10][i+5];
outjp2ic0 += _t_31_ * 0.002856;
_t_49_ += in[j+10][i+3];
_t_49_ += in[j+10][i+5];
outjp3ic0 += _t_49_ * 0.04064;
_t_32_ += in[j+10][i+4];
_t_50_ += in[j+10][i+4];
outjp3ic0 += _t_50_ * -0.0723189;
outjp2ic0 += _t_32_ * -0.00508225;
outjp2ic0 += _t_37_ * 0.04;
outjp2ic0 += _t_38_ * -0.32;
outjp2ic0 += _t_39_ * 0.56944;
outjp2ic0 += _t_40_ * 2.56;
outjp2ic0 += _t_41_ * -4.55552;
outjp3ic0 += _t_54_ * 2.56;
outjp3ic0 += _t_55_ * -4.55552;
out[j][i] = outjc0ic0;
out[j+1][i] = outjp1ic0;
out[j+2][i] = outjp2ic0;
out[j+3][i] = outjp3ic0;
}
}
extern "C" void host_code (double *h_in, double *h_out, int N) {
double *in;
cudaMalloc (&in, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for in\n");
cudaMemcpy (in, h_in, sizeof(double)*N*N, cudaMemcpyHostToDevice);
double *out;
cudaMalloc (&out, sizeof(double)*N*N);
check_error ("Failed to allocate device memory for out\n");
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, 4*blockconfig.y));
j2d81pt<<<gridconfig, blockconfig>>> (in, out, N);
cudaMemcpy (h_out, out, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
cudaFree (in);
cudaFree (out);
}
|
12,766 | #include <string>
#include <iostream>
#include <fstream>
#include <sstream>
#include <cstdlib>
#include <cstring>
#include <ostream>
#include <sys/time.h>
using namespace std;
double get_walltime() {
struct timeval time;
if(gettimeofday(&time, NULL)) {
return 0;
}
return (double)time.tv_sec + (double)time.tv_usec * 0.000001;
}
void readPassFile(char **, char ** ,char *);
void readHashFile( char **, char ** ,char *);
void writeFile(string *, string *, char *, int);
void readPassFile(char * record_pass[], char * record_hash[], char * fileName) {
int i = 0;
ifstream file;
string line;
file.open(fileName); //passlist/wordsforsimpletest.txt
while(getline(file, line)) {
string key,value;
istringstream liness(line);
getline(liness, key, ',');
getline(liness, value, ',');
char tKey[key.length() + 1];
char tValue[value.length() + 1];
strcpy(tKey, key.c_str());
strcpy(tValue, value.c_str());
record_pass[i] = (char*) malloc(key.length() + 1);
record_hash[i] = (char*) malloc(value.length() + 1);
strncpy(record_pass[i], tKey, key.length()+1);
strncpy(record_hash[i], tValue, value.length()+1);
i++;
}
file.close();
}
void readHashFile(char ** hToCheck_pass, char ** hToCheck_hash, char * fileName ) {
ifstream file;
string line;
int i = 0;
file.open(fileName); //passlist/hashFileToTest.txt
while(getline(file, line)) {
string key = "";
string value = line;
char tKey[key.length() + 1];
char tValue[value.length() + 1];
strcpy(tKey, key.c_str());
strcpy(tValue, value.c_str());
hToCheck_pass[i] = (char*) malloc(key.length() + 1);
hToCheck_hash[i] = (char*) malloc(value.length() + 1);
strncpy(hToCheck_pass[i], tKey, key.length()+1);
strncpy(hToCheck_hash[i], tValue, value.length()+1);
i++;
}
file.close();
}
void writeFile(char ** result_pass, char ** result_hash, char * fileName, int size) {
int i;
ofstream fileToWriteTo;
fileToWriteTo.open(fileName); //passlist/convertedHash.txt
for(i = 0; i < size; i++) {
fileToWriteTo << result_pass[i]<< ", " << result_hash[i] << endl;
}
fileToWriteTo.close();
}
__device__ int d_strcmp (char * s1, char * s2)
{
int ret = 0;
while (!(ret = *(unsigned char *) s1 - *(unsigned char *) s2) && *s2) ++s1, ++s2;
if (ret < 0)
ret = -1;
else if (ret > 0)
ret = 1 ;
return ret;
}
__global__ void gpuComputation(char ** d_record_pass, char ** d_record_hash, char ** d_hToCheck_pass, char ** d_hToCheck_hash, char ** d_result_pass, char ** d_result_hash, int row) {
int blockindex = blockIdx.x;
int threadindex = threadIdx.x;
/* for(i = 0; i < row; i++) {
char tempKey[32];
for(j = 0; j < 32; j++) {
if(d_record_hash[i][j] == d_hToCheck_hash[threadIdx.x][j]) {
tempKey[j] = d_record_pass[i][j];
printf("h");
}
}
*/
int res = d_strcmp(d_record_hash[blockindex], d_hToCheck_hash[threadindex]);
printf("%c\n", d_record_hash[blockindex][0]);
printf("here\n");
// printf("%d %d %d\n", blockindex, threadindex, row);
if(d_strcmp(d_record_hash[blockindex], d_hToCheck_hash[threadindex]) == 0) {
for(int j = 0; j < 32; j++) {
//d_result_pass[threadIdx.x][j] = d_record_pass[blockindex][j];
//d_result_hash[threadIdx.x][j] = d_record_hash[blockindex][j];
printf("%c", d_record_hash[threadIdx.x][j]);
}
printf("\n");
}
// }
}
int performMainComputation(char ** record_pass, char ** record_hash, char ** hToCheck_pass, char ** hToCheck_hash, char ** result_pass, char ** result_hash, int nLinesPFile, int nLinesHFile) {
int row = nLinesPFile;
int col = nLinesHFile;
// int indexStruct = 0;
int rowSize = row * sizeof(int);
int colSize = col * sizeof(int);
char * d_record_pass[nLinesPFile], *d_record_hash[nLinesPFile];
char * d_hToCheck_pass[nLinesHFile], *d_hToCheck_hash[nLinesHFile];
char * d_result_pass[nLinesHFile], *d_result_hash[nLinesHFile];
cudaMemcpy(d_record_pass, record_pass, rowSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_record_hash, record_hash, rowSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_hToCheck_pass, hToCheck_pass, rowSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_hToCheck_hash, hToCheck_hash, rowSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_result_pass, result_pass, rowSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_result_hash, result_hash, rowSize, cudaMemcpyHostToDevice);
dim3 blocks(row, 1);
dim3 grids(col, 1);
for(int i = 0; i < 16; i++)
cout << record_pass[i] << " " << d_record_pass[i] << endl;
gpuComputation<<<grids, blocks, 1>>>(d_record_pass, d_record_hash, d_hToCheck_pass, d_hToCheck_hash, d_result_pass, d_result_hash, row);
cudaDeviceSynchronize();
cudaMemcpy(result_pass, d_result_pass, colSize, cudaMemcpyDeviceToHost);
cudaMemcpy(result_hash, d_result_hash, colSize, cudaMemcpyDeviceToHost);
cudaFree(d_record_pass); cudaFree(d_hToCheck_pass); cudaFree(d_result_pass);
cudaFree(d_record_hash); cudaFree(d_hToCheck_hash); cudaFree(d_result_hash);
/*
for(i = 0; i < row; i++) {
for(j = 0; j < col; j++) {
if(record[i].hash.compare(hToCheck[j].hash) == 0) {
result[indexStruct].pass = record[i].pass;
result[indexStruct].hash = record[i].hash;
indexStruct++;
}
}
}
*/
return 16;
}
void printBenchmark(int nLinesHFile, double readTime, int nPassCracked, double execTime, double writeTime) {
cout << endl;
cout << "Read time of the file with " << nLinesHFile << " pass hashes = " << readTime << " seconds" << endl << endl;
cout << "Total number of passwords cracked = " << nPassCracked << endl;
cout << "Total execution time for the main computation = " << execTime << " seconds" << endl;
cout << endl;
cout << "Write time of the output file = " << writeTime << " seconds" << endl << endl;
}
int main(int argc, char ** argv) {
char * PFile = argv[1];
int nLinesPFile = atoi(argv[2]);
char * HFile = argv[3];
int nLinesHFile = atoi(argv[4]);
char * outputFile = argv[5];
char * record_pass[nLinesPFile];
char * record_hash[nLinesPFile];
char * hToCheck_pass[nLinesHFile];
char * hToCheck_hash[nLinesHFile];
char * result_pass[nLinesHFile];
char * result_hash[nLinesHFile];
double startReadTime = get_walltime();
readPassFile(record_pass, record_hash, PFile);
readHashFile(hToCheck_pass, hToCheck_hash, HFile);
double readTime = get_walltime() - startReadTime;
double startExecTime = get_walltime();
int nPassCracked = performMainComputation(record_pass, record_hash, hToCheck_pass, hToCheck_hash, result_pass, result_hash, nLinesPFile, nLinesHFile);
double execTime = get_walltime() - startExecTime;
double startWriteTime = get_walltime();
// writeFile(result_pass, result_hash, outputFile, nPassCracked);
double writeTime = get_walltime() - startWriteTime;
//printBenchmark(nLinesHFile, readTime, nPassCracked, execTime, writeTime);
return 0;
}
|
12,767 | /*
Solution of the Laplace equation for heat conduction in a square plate
*/
#include <iostream>
// global variables
const int NX = 1024; // mesh size (number of node points along X)
const int NY = 1024; // mesh size (number of node points along Y)
const int MAX_ITER=1000; // number of Jacobi iterations
// device function to update the array T_new based on the values in array T_old
// note that all locations are updated simultaneously on the GPU
__global__ void Laplace(double *T_old, double *T_new)
{
// compute the "i" and "j" location of the node point
// handled by this thread
int i = blockIdx.x * blockDim.x + threadIdx.x ;
int j = blockIdx.y * blockDim.y + threadIdx.y ;
// get the natural index values of node (i,j) and its neighboring nodes
// N
int P = i + j*NX; // node (i,j) |
int N = i + (j+1)*NX; // node (i,j+1) |
int S = i + (j-1)*NX; // node (i,j-1) W ---- P ---- E
int E = (i+1) + j*NX; // node (i+1,j) |
int W = (i-1) + j*NX; // node (i-1,j) |
//
// update "interior" node points
if(i>0 && i<NX-1 && j>0 && j<NY-1) {
T_new[P] = 0.25*( T_old[E] + T_old[W] + T_old[N] + T_old[S] );
}
}
// initialization
void Initialize(double *TEMPERATURE)
{
for(int i=0;i<NX;i++) {
for(int j=0;j<NY;j++) {
int index = i + j*NX;
TEMPERATURE[index]=0.0;
}
}
// set left wall to 1
for(int j=0;j<NY;j++) {
int index = j*NX;
TEMPERATURE[index]=1.0;
}
}
int main(int argc,char **argv)
{
double *_T1, *_T2; // pointers to device (GPU) memory
// allocate a "pre-computation" T array on the host
double *T = new double [NX*NY];
// initialize array on the host
Initialize(T);
// allocate storage space on the GPU
cudaMalloc((void **)&_T1,NX*NY*sizeof(double));
cudaMalloc((void **)&_T2,NX*NY*sizeof(double));
// copy (initialized) host arrays to the GPU memory from CPU memory
cudaMemcpy(_T1,T,NX*NY*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(_T2,T,NX*NY*sizeof(double),cudaMemcpyHostToDevice);
// assign a 2D distribution of CUDA "threads" within each CUDA "block"
int ThreadsPerBlock=16;
dim3 dimBlock( ThreadsPerBlock, ThreadsPerBlock );
// calculate number of blocks along X and Y in a 2D CUDA "grid"
dim3 dimGrid( ceil(double(NX)/double(dimBlock.x)), ceil(double(NY)/double(dimBlock.y)), 1 );
// begin Jacobi iteration
int k = 0;
while(k<MAX_ITER) {
Laplace<<<dimGrid, dimBlock>>>(_T1,_T2); // update T1 using data stored in T2
Laplace<<<dimGrid, dimBlock>>>(_T2,_T1); // update T2 using data stored in T1
k+=2;
}
// copy final array to the CPU from the GPU
cudaMemcpy(T,_T2,NX*NY*sizeof(double),cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
/*
// print the results to screen
for (int j=NY-1;j>=0;j--) {
for (int i=0;i<NX;i++) {
int index = i + j*NX;
std::cout << T[index] << " ";
}
std::cout << std::endl;
}
*/
// release memory on the host
delete T;
// release memory on the device
cudaFree(_T1);
cudaFree(_T2);
return 0;
}
|
12,768 | #ifdef __cplusplus
extern "C" {
#endif
#define L_SIZE 16
//__constant__ float gaus[3][3] = { {0, 1, 0},
// {1, -3, 1},
// {0, 1, 0} };
__constant__ float gaus[3][3] = { {0, 0, 0},
{0, 1, 0},
{0, 0, 0} };
__global__ void gauss_kernel( int* data,
int* out,
int cols,int rows)
{
int g_row = blockIdx.x * blockDim.x + threadIdx.x;
int g_col = blockIdx.y * blockDim.y + threadIdx.y;
int pos = g_col * cols + g_row;
int l_row = threadIdx.x + 1;
int l_col = threadIdx.y + 1;
int sum=0;
__shared__ int l_data[L_SIZE+2][L_SIZE+2];
// copy to local
l_data[l_row][l_col] = data[pos];
// top most row
if (l_row == 1)
{
l_data[0][l_col] = data[pos-cols];
// top left
if (l_col == 1)
l_data[0][0] = data[pos-cols-1];
// top right
else if (l_col == L_SIZE)
l_data[0][L_SIZE+1] = data[pos-cols+1];
}
// bottom most row
else if (l_row == L_SIZE)
{
l_data[L_SIZE+1][l_col] = data[pos+cols];
// bottom left
if (l_col == 1)
l_data[L_SIZE+1][0] = data[pos+cols-1];
// bottom right
else if (l_col == L_SIZE)
l_data[L_SIZE+1][L_SIZE+1] = data[pos+cols+1];
}
if (l_col == 1)
l_data[l_row][0] = data[pos-1];
else if (l_col == L_SIZE)
l_data[l_row][L_SIZE+1] = data[pos+1];
for (int i = 0; i < 3; i++)
for (int j = 0; j < 3; j++)
sum += gaus[i][j] * l_data[i+l_row-1][j+l_col-1];
out[pos] = max(0,sum); ;
return;
}
#ifdef __cplusplus
}
#endif
|
12,769 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include <cuda.h>
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
ui origSize, TgtSize;
ui xshrink, yshrink;
ui origVpixels, origHpixels, origHbytes;
uch *TheImage, *NewImage; // Where images are stored in CPU
uch *GPUSrcImage, *GPUTgtImage, *GPUResult; // Where images are stored in GPU
struct ImgProp{
int Hpixels;
int Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
*(int*)&ip.HeaderInfo[2] = ip.Hbytes*ip.Vpixels+54;
*(int*)&ip.HeaderInfo[18] = ip.Hpixels;
*(int*)&ip.HeaderInfo[22] = ip.Vpixels;
*(int*)&ip.HeaderInfo[34] = ip.Hbytes*ip.Vpixels;
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u\n\n", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
__global__
void ImShrunk(uch *TgtImg, uch *SrcImg, ui TgtHpixels, ui xshrink, ui yshrink, ui origHpixels, ui origHbytes)
{
ui ThrPerBlk = blockDim.x;
ui TgtBid = blockIdx.x;
ui TgtTid = threadIdx.x;
ui TgtGtid = ThrPerBlk * TgtBid + TgtTid;
ui BlkPerRow = (TgtHpixels + ThrPerBlk - 1) / ThrPerBlk; // ceil
ui TgtRowBytes = (TgtHpixels * 3 + 3) & (~3);
// ui SrcRowBytes = (origHpixels * 3 + 3) & (~3);
ui SrcRowBytes = origHbytes;
ui Tgtrow = TgtBid / BlkPerRow;
ui TgtCol = TgtGtid - Tgtrow*BlkPerRow*ThrPerBlk;
if(TgtCol >= TgtHpixels) return;
ui SrcRow = Tgtrow * yshrink;
ui SrcCol = TgtCol * xshrink;
// if(SrcCol >= origHpixels) return;
///////////////
ui TgtOffset = Tgtrow * TgtRowBytes;
ui SrcOffset = SrcRow * SrcRowBytes;
ui TgtIndex = TgtOffset + 3*TgtCol;
ui SrcIndex = SrcOffset + 3*SrcCol;
TgtImg[TgtIndex] = SrcImg[SrcIndex];
TgtImg[TgtIndex+1] = SrcImg[SrcIndex+1];
TgtImg[TgtIndex+2] = SrcImg[SrcIndex+2];
}
int main(int argc, char** argv){
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime; // GPU code run times
cudaError_t cudaStatus, cudaStatus2;
cudaEvent_t time1, time2, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256, NumBlocks, GPUDataTransfer;
cudaDeviceProp GPUprop;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
strcpy(ProgName, "imShrunk");
if(argc!=5){
printf("\n\nUsage: imshrunk input output xshrink yshrink");
return 0;
}
xshrink = atoi(argv[3]);
yshrink = atoi(argv[4]);
strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
TheImage = ReadBMPlin(argv[1]);
if (TheImage == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
origHpixels = ip.Hpixels;
origVpixels = ip.Vpixels;
origHbytes = ip.Hbytes;
origSize = origHbytes * origVpixels;
ip.Hpixels = ip.Hpixels/xshrink;
ip.Hbytes = (ip.Hpixels*3 + 3) & (~3);
ip.Vpixels = ip.Vpixels/yshrink;
// TgtSize = ip.Hbytes * ip.Vpixels;
// printf("\n new Hpixels %u", ip.Hpixels);
// printf("\n new Vpixels %u", ip.Vpixels);
NewImage = (uch *)malloc(IMAGESIZE);
if (NewImage == NULL){
free(NewImage);
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
exit(EXIT_FAILURE);
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
exit(EXIT_FAILURE);
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui)GPUprop.maxGridSize[0] * (ui)GPUprop.maxGridSize[1] * (ui)GPUprop.maxGridSize[2] / 1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks >= 5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks >= 5) ? 'M' : 'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventCreate(&time4);
cudaEventRecord(time1, 0);
// allocate GPU buffer
cudaStatus = cudaMalloc((void**)&GPUSrcImage, origSize);
cudaStatus2 = cudaMalloc((void**)&GPUTgtImage, IMAGESIZE);
if ((cudaStatus != cudaSuccess) || (cudaStatus2 != cudaSuccess)){
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory");
exit(EXIT_FAILURE);
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUSrcImage, TheImage, origSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy CPU to GPU failed!");
exit(EXIT_FAILURE);
}
cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done
BlkPerRow = (IPH + ThrPerBlk -1 ) / ThrPerBlk;
NumBlocks = IPV*BlkPerRow;
ImShrunk <<< NumBlocks, ThrPerBlk>>> (GPUTgtImage, GPUSrcImage, IPH, xshrink, yshrink, origHpixels, origHbytes);
GPUResult = GPUTgtImage;
GPUDataTransfer = origSize + IMAGESIZE;
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus);
exit(EXIT_FAILURE);
}
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(NewImage, GPUResult, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
exit(EXIT_FAILURE);
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
cudaStatus = cudaDeviceSynchronize();
//checkError(cudaGetLastError()); // screen for errors in kernel launches
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImage);
free(NewImage);
exit(EXIT_FAILURE);
}
WriteBMPlin(NewImage, argv[2]); // Write the flipped image back to disk
////////////////// change from here
printf("\n\n--------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("--------------------------------------------------------------------------\n");
printf("%s %s %s %u [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName,
ThrPerBlk, NumBlocks, BlkPerRow);
printf("--------------------------------------------------------------------------\n");
printf("CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(origSize), DATABW(origSize, tfrCPUtoGPU));
printf("Kernel Execution =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecutionTime, DATAMB(GPUDataTransfer), DATABW(GPUDataTransfer, kernelExecutionTime));
printf("GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU));
printf("--------------------------------------------------------------------------\n");
printf("Total time elapsed =%7.2f ms %4d MB ... %6.2f GB/s\n", totalTime, DATAMB((origSize + IMAGESIZE + GPUDataTransfer)), DATABW((origSize + IMAGESIZE+ GPUDataTransfer), totalTime));
printf("--------------------------------------------------------------------------\n\n");
// Deallocate CPU, GPU memory and destroy events.
cudaFree(GPUSrcImage);
cudaFree(GPUTgtImage);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaEventDestroy(time4);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImage);
free(NewImage);
exit(EXIT_FAILURE);
}
free(TheImage);
free(NewImage);
return(EXIT_SUCCESS);
}
|
12,770 | extern "C" __global__ void
copy(float2* x, float* y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i].x = y[i];
x[i].y = 0;
} |
12,771 | #include<stdio.h>
#include<cstdlib>
__global__ void race(volatile int *arr, size_t size, int n, int *out) {
int gid = threadIdx.x + blockDim.x * blockIdx.x;
int ptr = gid;
if (ptr > size) ptr = 0;
for (int i = 0; i < n; i++) {
ptr = arr[size * gid + ptr];
}
out[gid] = ptr;
}
int main(int argc, char *argv[]) {
int grid_size=10, block_size=128, n=100, step=100;
if (argc > 4) {
sscanf(argv[1], "%d", &grid_size);
sscanf(argv[2], "%d", &block_size);
sscanf(argv[3], "%d", &n);
sscanf(argv[4], "%d", &step);
}
size_t size = n;
size_t total_size = size * grid_size * block_size * sizeof(int);
printf("size = %zd KB\n", total_size / 1024);
int *arr = new int[size * grid_size * block_size];
int *ra = new int[size];
int *out = new int[grid_size * block_size];
for (size_t tid = 0; tid < grid_size * block_size; tid++) {
int *arr2 = arr + tid * size;
for (int i = 0; i < n; i++) {
ra[i] = i;
}
for (int i = 1; i < n; i++) {
int r = rand()%(i+1);
int tmp = ra[i];
ra[i] = ra[r];
ra[r] = tmp;
}
/*for (int i = 1; i < n; i++) {
arr2[ra[i-1]] = ra[i];
}
arr2[ra[n-1]] = ra[0];*/
for (int i = 1; i < n; i++) {
arr2[i-1] = i;
}
arr2[n-1] = 0;
}
int *garr, *gout;
cudaMalloc(&garr, total_size);
cudaMalloc(&gout, sizeof(int) * grid_size * block_size);
cudaMemcpy(garr, arr, total_size, cudaMemcpyHostToDevice);
race<<<grid_size, block_size>>>(garr, size, step, gout);
cudaMemcpy(out, gout, sizeof(int) * grid_size * block_size, cudaMemcpyDeviceToHost);
return 0;
} |
12,772 | //
// main.cu
// TermProject
//
// Created by Kutay Demireren on 25/12/15.
// Copyright © 2015 Kutay Demireren. All rights reserved.
//
/*****************************************
This program compute n many particles forces on each other and their
velocities & positions according to forces in time.
It is called N-Body Solver.
This code is written by Kutay Demireren and Elif Ecem Ates. January, 2016.
*****************************************/
#include <iostream>
#include <string.h>
#include <math.h>
#include <sys/time.h>
#include <fstream>
using namespace std;
/* Dimension of vect arrays */
#define DIM 2
/* Global definitions for X and Y */
#define X 0
#define Y 1
/* Universal Gravity Constant */
#define G 6.673*pow(10,-11)
typedef double vect_t[DIM];
struct particle_t{
double mass;
double position[2];
double velocity[2];
};
/* Function prototypes */
void print_particles(particle_t *particles);
double fRand(double fMin, double fMax);
void init_force(vect_t *vect, int N);
void init_particle(particle_t *vect, int N);
double getTime();
void read_input(particle_t *particles, int N, char *file);
void usage(int argc, char **argv);
int N = 0;
int num_threads = 1;
/* Kernel functions */
__global__ void compute_force_on_device(vect_t *forces, particle_t *particles, int N, double g, double *force_qk);
__global__ void compute_pos_and_vel_on_device(vect_t *forces, particle_t *particles, int delta_t, int N, double g);
int main(int argc, char * argv[]) {
srand((unsigned)time(0));
/*
Inputs to the program(respectively):
N number of the particles
delta_t time difference between two time
T final time number
debug 1 to see each step, 0 to see only final step.
num_threads # of threads working on the program.
Inputs provided via file:
(for each particle)
mass mass of particle
initial_pos initial position of particle
initial_vel initial velocities of particle
*/
int step = 0, T = 0, delta_t, debug, n_steps;
vect_t *forces;
particle_t *particles;
double start, end, total_time;
char *pfile;
// Error code to check CUDA return values
cudaError_t err = cudaSuccess;
/* check validity of inputs */
if (argc != 7)
usage(argc, argv);
if ((N = atoi(argv[1])) <= 0 ||
(delta_t = atoi(argv[2])) <= 0 ||
(T = atoi(argv[3])) <= 0 ||
(debug = atoi(argv[4])) < 0 ||
(debug = atoi(argv[4])) > 1 ||
(num_threads = atoi(argv[5])) <= 0)
usage(argc, argv);
/* Allocating on host memory */
forces = (vect_t *) malloc(N*sizeof(vect_t));
particles = (particle_t *) malloc(N*sizeof(particle_t));
double *force_qk = (double *) malloc(2*sizeof(double));
if(!forces || !particles || !force_qk){
fprintf(stderr, "error: unable to allocate memory\n");
exit(1);
}
//Initalizing host vectors
init_force(forces, N);
pfile = argv[6];
read_input(particles, N, pfile);
cout << endl << "Start computing n-body solver" << endl;
cout << "------------------" << endl;
//Size of each vector
size_t size_force = N*sizeof(vect_t);
size_t size_particle = N*sizeof(particle_t);
//Initialize device input vectors
vect_t *d_forces;
particle_t *d_particles;
double *d_force_qk;
/* Allocating memory on device */
// Allocate the device input vector d_forces
err = cudaMalloc((void **)&d_forces, size_force);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_forces (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector d_particles
err = cudaMalloc((void **)&d_particles, size_particle);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_particles (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_force_qk, 2*sizeof(double));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector d_force_qk (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* Copy host vectors to device vectors */
// Copy forces to d_forces
err = cudaMemcpy(d_forces, forces, size_force, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_forces from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy particles to d_particles
err = cudaMemcpy(d_particles, particles, size_particle, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_particles from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy particles to d_force_qk
err = cudaMemcpy(d_force_qk, force_qk, 2*sizeof(double), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector d_force_qk from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Final setups for CUDA
int numThreadsInBlock = num_threads;
int numBlocks = N / numThreadsInBlock + (N%numThreadsInBlock == 0 ? 0 : 1);
cout << "Computing starts with..." << endl << " Block Size: " << numThreadsInBlock << endl << " # of Blocks : " << numBlocks << endl;
start = getTime();
n_steps = T / delta_t;
double g = G;
//Computing
for (step = 0; step < n_steps; step++) {
/* Synchronize, before it compute new velocities and positions.
To make sure all computations have finished in the last step */
cudaThreadSynchronize();
if(step % 100 == 0 && debug == 1){
if(step == 0)
cout << "Initial particles" << endl;
else
cout << "Iteration " << step << endl;
/* Take current values to print */
err = cudaMemcpy(particles, d_particles, size_particle, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector particles from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
print_particles(particles);
}
//Set all forces on particles to 0 before computing
err = cudaMemset(d_forces, 0, size_force);
if(err != cudaSuccess){
fprintf(stderr, "Failed to set the memory to value 0 (error code %s)!\n", cudaGetErrorString(err));
exit(1);
}
//Kernel launch
compute_force_on_device<<<numBlocks, numThreadsInBlock>>>(d_forces, d_particles, N, g, d_force_qk);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch compute_force_on_device kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Update positions and velocities according to force
//Kernel launch
compute_pos_and_vel_on_device<<<numBlocks, numThreadsInBlock>>>(d_forces, d_particles, delta_t, N, g);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch compute_pos_and_vel_on_device kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* End of computing */
/* Time passed */
end = getTime();
total_time = end - start;
//To make sure kernels have finished
cudaThreadSynchronize();
/* Copy the final states to Host */
err = cudaMemcpy(particles, d_particles, size_particle, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector particles from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/* print final states of particles */
cout << "Final positions and velocities " << endl;
print_particles(particles);
int num_flops = 20 ;
double flop_rate = n_steps * (1E-9 * N * N * num_flops) / total_time;
double BW = n_steps * (N * N * sizeof(double) * 3.0)/total_time /N /N /N;
/* output results */
fprintf(stdout, " Time in seconds: %f \n", total_time);
fprintf(stdout, " Gflops Rate: %f GFlop/s\n", flop_rate);
fprintf(stdout, " Bandwidth Rate : %f GB/s\n", BW);
/* cleanup */
err = cudaFree(d_particles);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_particles (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_forces);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_forces (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_force_qk);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector d_forces (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(particles);
free(forces);
free(force_qk);
return 0;
}
__global__ void compute_pos_and_vel_on_device(vect_t *forces, particle_t *particles, int delta_t, int N, double g){
int q = 0;
q = blockIdx.x*blockDim.x + threadIdx.x;
if(q>=0 && q<N){
//position update
particles[q].position[X] += delta_t * particles[q].velocity[X];
particles[q].position[Y] += delta_t * particles[q].velocity[Y];
//velocity update
particles[q].velocity[X] += delta_t/particles[q].mass*forces[q][X];
particles[q].velocity[Y] += delta_t/particles[q].mass*forces[q][Y];
}
}
__global__ void compute_force_on_device(vect_t *forces, particle_t *particles, int N, double g, double *force_qk){
//It is the basic computation code, computing directly the forces.
int q = 0;
q = blockIdx.x*blockDim.x + threadIdx.x;
if(q>=0 && q<N){
for(int k = 0; k<N; k++){
if(k!=q){
double x_diff = particles[q].position[X] - particles[k].position[X];
double y_diff = particles[q].position[Y] - particles[k].position[Y];
double dist = sqrt(x_diff*x_diff + y_diff*y_diff);
double dist_cubed = dist*dist*dist;
forces[q][X] -= g*particles[q].mass*particles[k].mass/dist_cubed * x_diff;
forces[q][Y] -= g*particles[q].mass*particles[k].mass/dist_cubed * y_diff;
}
}
}
}
void print_particles(particle_t *particles)
{
for (int part = 0; part < N; part++) {
particle_t particle = particles[part];
cout << "position of particle " << part << " is (" << particle.position[X] << "," << particle.position[Y] << ")" << endl;
cout << "velocity of particle " << part << " is (" << particle.velocity[X] << "," << particle.velocity[Y] << ")" << endl;
}
cout << "------------------" << endl;
}
double fRand(double fMin, double fMax)
{
double f = (double)rand() / RAND_MAX;
return fMin + f * (fMax - fMin);
}
void init_force(vect_t *vect, int N)
{
int q=0;
for(q=0; q < N; q++)
{
vect[q][X]=0.0;
vect[q][Y]=0.0;
}
}
void init_particle(particle_t *particles, int N)
{
for (int i = 0; i < N; i++) {
if(i==5){ //Sun
struct particle_t particle;
particle.mass = fRand(1.9890e+30, 1.9890e+30);
particle.position[X] = fRand(0,0);
particle.position[Y] = fRand(0,0);
particle.velocity[X] = fRand(0,0);
particle.velocity[Y] = fRand(0,0);
particles[i] = particle;
}else{
struct particle_t particle;
double min = 5.9740e+24;
double max = 10*min;
particle.mass = fRand(min, max);
particle.position[X] = fRand(-2.50e+11, 2.50e+11);
particle.position[Y] = fRand(-2.50e+11, 2.50e+11);
particle.velocity[X] = fRand(0, 0);
particle.velocity[Y] = fRand(2.9800e+03, 2.9800e+05);
particles[i] = particle;
}
}
}
double getTime()
{
const double kMicro = 1.0e-6;
struct timeval TV;
const int RC = gettimeofday(&TV, NULL);
if(RC == -1)
{
printf("ERROR: Bad call to gettimeofday\n");
return(-1);
}
return( ((double)TV.tv_sec) + kMicro * ((double)TV.tv_usec) );
}
void read_input(particle_t *particles, int N, char *file)
{
ifstream myfile;
double mass, positionX, positionY, velocityX, velocityY;
int i = 0;
myfile.open(file, ios::in);
if(myfile.is_open()){
while(i < N){
myfile >> positionX >> positionY >> velocityX >> velocityY >> mass;
struct particle_t particle;
particle.mass = mass;
particle.position[X] = positionX;
particle.position[Y] = positionY;
particle.velocity[X] = velocityX;
particle.velocity[Y] = velocityY;
particles[i] = particle;
i++;
}
myfile.close();
}else{
cout << fprintf(stderr, "error: file could not be opened for reading") << endl;
exit(1);
}
}
void usage(int argc, char **argv)
{
cout << "Usage " << argv[0] << "<N> <delta_t> <T> <debug> <num_threads> <part_file>" << endl;
cout << "\tN\t - number of particles (positive integer)" << endl;
cout << "\tdelta_t\t - time difference between two time (positive)" << endl;
cout << "\tT\t - final time number (positive)" << endl;
cout << "\tdebug\t - 1 to see the states of particles time to time, 0 to see only final step" << endl;
cout << "\tnum_threads\t - number of threads" << endl;
cout << "\tpart_file\t - name of the file containing information the inital states of particles" << endl;
exit(1);
}
|
12,773 |
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
|
12,774 | #include "includes.h"
__global__ void SetAllButOneKernel(float *buffer, float value, int index, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < count && threadId != index)
{
buffer[threadId] = value;
}
} |
12,775 | #include <stdio.h>
#include <stdlib.h>
#include <curand.h>
#include <curand_kernel.h>
#define MAX_NONCE 100000000000 // 100000000000
#define MAX 10
//char* tohexadecimal
void mine1(long blockNum, char *trans, char *preHash, int prefixZero){
//char prefix[] = "0000" ;
for(int i = 0; i < MAX_NONCE; i++){
printf("mining...\n") ;
srand(i*blockNum*(trans[0])*(preHash[0]));
int count = 0 ;
for(int j = 0; j < prefixZero; j++){
if(rand() % 10 == 0){
count++ ;
}
}
if (count == prefixZero){
printf("found, nonce = %d\n", i) ;
break;
}
//printf("%d\n", rand() % 10);
}
}
__global__ void mine(long int* blockNum, char *trans, char *preHash, int *prefixZero){
int index = threadIdx.x ;
for(int i = 0; i < (MAX_NONCE/1024/10); i++){
//printf("mining...\n") ;
int n = ((MAX_NONCE/1024/10)*(blockIdx.x*blockDim.x)+index) + i ;
curandState_t state;
curand_init(n*(*blockNum)*(*trans)*(*preHash), 0, 0, &state);
//printf("rand = %d\n", curand(&state) % MAX) ;
//int random = curand(&state) % MAX ;
//printf("random = %d\n", random) ;
//srand(n*(*blockNum)*(*trans)*(*preHash));
int count = 0 ;
int random = curand(&state) % MAX ;
for(int j = 0; j < (*prefixZero); j++){
if(random == 0){
count++ ;
}
}
if (count == (*prefixZero)){
//printf("found, nonce = %d\n", n) ;
//exit(1) ;
}
}
}
int main(){
char trans[] = "A-20->B,b-10->C" ;
char preHash[] = "0000000xa036944e29568d0cff17edbe038f81208fecf9a66be9a2b8321c6ec7" ;
int difficulty = 5 ;
//mine(1, trans, preHash, difficulty) ;
long int blockNum = 1 ;
char tran = trans[0] ;
char preH = preHash[0] ;
long int *d_blockNum ;
char *d_trans ;
char *d_preHash ;
int *d_diff ;
cudaMalloc((void**) &d_blockNum, sizeof(long int));
cudaMalloc((void**) &d_trans, sizeof(char));
cudaMalloc((void**) &d_preHash, sizeof(char));
cudaMalloc((void**) &d_diff, sizeof(int));
cudaMemcpy(d_blockNum, &blockNum, sizeof(long int), cudaMemcpyHostToDevice);
cudaMemcpy(d_trans, &tran, sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_preHash, &preH, sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(d_diff, &difficulty, sizeof(int), cudaMemcpyHostToDevice);
cudaEvent_t start, stop ;
cudaEventCreate(&start) ;
cudaEventCreate(&stop) ;
cudaEventRecord(start) ;
mine<<<10, 1024>>>(d_blockNum, d_trans, d_preHash, d_diff) ;
cudaEventRecord(stop) ;
cudaEventSynchronize(stop) ;
float millisec = 0 ;
cudaEventElapsedTime(&millisec, start, stop) ;
printf("Time used: %f\n", millisec) ;
cudaFree(d_blockNum);
cudaFree(d_trans);
cudaFree(d_preHash);
cudaFree(d_diff);
printf("end\n") ;
return 0 ;
} |
12,776 | #include<iostream>
#include<algorithm>
#include<iomanip>
#include<time.h>
#include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/sort.h>
#define N (8<<27)
#define C1 1
template<class T>
class plusOne{
public:
__device__ __host__ T operator() (T a){
return a+1;
}
};
int main(){
printf("size %d \n",N);
srand(time(NULL));
thrust::host_vector<int> hv(N);
std::generate(hv.begin(),hv.end(),rand);
thrust::device_vector<int> dv=hv;
cudaEvent_t start,stop;
float elapsed;
float elapsed_bak;
float ratio;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
for(int c=0;c<C1;c++){
thrust::transform(dv.begin(),dv.end(),dv.begin(),plusOne<int>());
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed,start,stop);
std::cout<<"gpu :"<<elapsed<<"ms ["<<std::setprecision(8)<<C1/elapsed<<"/ms]"<<std::endl;
elapsed_bak = elapsed;
std::generate(hv.begin(),hv.end(),rand);
dv=hv;
cudaEventRecord(start,0);
for(int c=0;c<C1;c++){
thrust::transform(hv.begin(),hv.end(),hv.begin(),plusOne<int>());
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed,start,stop);
std::cout<<"cpu :"<<elapsed<<"ms ["<<std::setprecision(8)<<C1/elapsed<<"/ms]"<<std::endl;
ratio = elapsed/elapsed_bak;
std::cout<<"ratio:"<<ratio<<std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
12,777 | /******************************************************************************
*cr
*cr (C) Copyright 2010-2013 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_WIDTH_A 256
#define TILE_WIDTH_B 16
#define TILE_K (TILE_WIDTH_A/TILE_WIDTH_B)
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use register and shared memory tiling and thread coarsening
*
* NOTE: A and C are column major, B is row major
*
********************************************************************/
// Macros for accessing flattened matrices
#define A(row,col) A[(row) + (col)*m]
#define B(row,col) B[(row)*n + (col)]
#define C(row,col) C[(row) + (col)*m]
//tiling for B and output C
__shared__ float shared_B[TILE_K][TILE_WIDTH_B];
float C_RT[TILE_WIDTH_B];
for(int i = 0; i<TILE_WIDTH_B;i++) C_RT[i] = 0.0;
//Get block and thread idxs to load in tiles
int by = blockIdx.y, bx = blockIdx.x, tx = threadIdx.x, ty = threadIdx.y;
int b_col = tx + bx * TILE_WIDTH_B;
// For every block y, ty ranges from 0-TILE_K. TILE_WIDTH_B*TILE_K = TILE_WIDTH_A
// So every by should add TILE_WIDTH_A
int a_row = tx + ty * TILE_WIDTH_B + by * TILE_WIDTH_A;
int p_col_offset = bx * TILE_WIDTH_B;
for (int i = 0; i < ceil(double(k)/double(TILE_K)); i++){//loop through all k tiles
//each thread load in an element of B Tile
int b_row = i * TILE_K + ty;
if (b_row < k && b_col < n){
shared_B[ty][tx] = B(b_row,b_col);
}else{
shared_B[ty][tx] = 0;
}
__syncthreads();//wait for threads to load into shared mem
for (int j = 0; j < TILE_K; j++){
float a = 0;
int a_col = i * TILE_K + j;
if (a_col < k && a_row < m){
a = A(a_row,a_col);
}
for (int l = 0; l < TILE_WIDTH_B; l++){//compute partial multiplication
C_RT[l] += a*shared_B[j][l];
}
}
__syncthreads();//wait for all threads to perform computations from b
}
for (int i = 0; i < TILE_WIDTH_B; i++) {
if (a_row < m && i+p_col_offset < n){
C(a_row,i+p_col_offset) = C_RT[i];
}
}
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'T') && (transb != 't')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
dim3 dimGrid(ceil(double(n)/double(TILE_WIDTH_B)),ceil(double(m)/double(TILE_K)),1);
dim3 dimBlock(TILE_WIDTH_B,TILE_K,1);
// Invoke CUDA kernel -----------------------------------------------------
mysgemm<<<dimGrid, dimBlock>>>(m,n,k,A,B,C);
}
|
12,778 | #include "aabb.cuh"
#include <math.h>
#include <stdio.h>
__host__ __device__ AABB3::AABB3() {
min = Vec3();
max = Vec3();
}
__host__ __device__ AABB3::AABB3(Vec3 min, Vec3 max) {
this->min = min;
this->max = max;
}
__host__ __device__ void AABB3::get_vertices(Vec3* vertices)
{
vertices[0] = this->min;
vertices[1] = Vec3(max.x, min.y, min.z);
vertices[2] = Vec3(min.x, max.y, min.z);
vertices[3] = Vec3(min.x, min.y, max.z);
vertices[4] = Vec3(max.x, max.y, min.z);
vertices[5] = Vec3(min.x, max.y, max.z);
vertices[6] = Vec3(max.x, min.y, max.z);
vertices[7] = this->max;
}
__host__ __device__ AABB3 AABB3::get_union(AABB3* other) {
return AABB3(min.minimum(other->min), max.maximum(other->max));
}
__host__ __device__ bool AABB3::intersects(Vec3 ray_origin, Vec3 ray_direction, float* distance) {
#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
ray_direction = ray_direction.normalize();
Vec3 dirfrac = Vec3(1.0 / ray_direction.x, 1.0 / ray_direction.y, 1.0 / ray_direction.z);
float t1 = (min.x - ray_origin.x) * dirfrac.x;
float t2 = (max.x - ray_origin.x) * dirfrac.x;
float t3 = (min.y - ray_origin.y) * dirfrac.y;
float t4 = (max.y - ray_origin.y) * dirfrac.y;
float t5 = (min.z - ray_origin.z) * dirfrac.z;
float t6 = (max.z - ray_origin.z) * dirfrac.z;
float tmin = max(max(min(t1, t2), min(t3, t4)), min(t5, t6));
float tmax = min(min(max(t1, t2), max(t3, t4)), max(t5, t6));
if (tmax < 0) // box behind us
return false;
if (tmin > tmax) // no intersection
return false;
*distance = tmin;
return true;
}
__host__ __device__ float AABB3::surface() {
float x = max.x - min.x;
float y = max.y - min.y;
float z = max.z - min.z;
return (x * y + y * z + z * x) * 2.0;
}
__host__ __device__ void AABB3::print() {
printf("AABB3(%f, %f, %f)(%f, %f, %f)\n", min.x, min.y, min.z, max.x, max.y, max.z);
}
__host__ __device__ Vec3 AABB3::center() {
return Vec3((max.x + min.x) / 2.0, (max.y + min.y) / 2.0, (max.z + min.z) / 2.0);
}
__host__ __device__ AABB2::AABB2() {
min = Vec2();
max = Vec2();
}
__host__ __device__ AABB2::AABB2(Vec2 min, Vec2 max) {
this->min = min;
this->max = max;
}
|
12,779 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void kernel(double *a, double *b, double *c, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
c[i] = a[i] + b[i];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++)
{
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
// 3-arrays allocation on device
cudaMalloc((void**)&d_a, sz_in_bytes);
cudaMalloc((void**)&d_b, sz_in_bytes);
cudaMalloc((void**)&d_c, sz_in_bytes);
// copy on device values pointed on host by h_a and h_b
// (the new values are pointed by d_a et d_b on device)
cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice);
dim3 dimBlock(64, 1, 1);
dim3 dimGrid((N + dimBlock.x - 1)/dimBlock.x, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
// Result is pointed by d_c on device
// Copy this result on host (result pointed by h_c on host)
cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost);
// freeing on device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
12,780 | #include "includes.h"
namespace ann {
// CUDA2
}
__global__ void kernel_weight_update_2( int layer_id, int *l, int *s_ext, int *sw_ext, float *z_ext_arr, float *a_ext_arr, float *t_arr, float *gjl_ext, float *w_ext_arr, float *dw_ext_arr, float eta, float alpha ){
int idx = threadIdx.y + blockDim.y*blockIdx.y;
int h = blockDim.x;
int pidx=threadIdx.x;
int neuron_count = l[layer_id];
int neuron_count_next = l[layer_id+1];
if(idx >= neuron_count) return;
float a = a_ext_arr[s_ext[layer_id] + idx];
int index0 = s_ext[layer_id + 1] + pidx;
int index1 = sw_ext[layer_id] + idx*(neuron_count_next - 1) + pidx;
for(int k = pidx; k < neuron_count_next-1; k+=h){
float grad = a*gjl_ext[index0];
index0 += h;
float dw = dw_ext_arr[index1] = -eta*grad + alpha*dw_ext_arr[index1];
w_ext_arr[index1] += dw;
index1 += h;
}
} |
12,781 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
#include<cuda_runtime_api.h>
__global__ void vecSum(double* devIn, int pow_step, int n)
{
//The thread ID (including its block ID)
int i = blockIdx.x * blockDim.x + threadIdx.x;
//Safety check to prevent unwanted threads.
if(pow_step*i < n)
//The two 'adjacent' elements of the array (or
//the two children in the segment tree) are added and
//the result is stored in the first element.
devIn[pow_step*i] = devIn[pow_step*i+(pow_step/2)] + devIn[pow_step*i];
}
int main()
{
//Size of the array
int n = 15;
//hostIn: The array accessible by the host.
//devIn: The input array accessible by the device.
double *hostIn, *devIn;
//hostOut: The output value accessible by the host.
double hostOut;
//The total size of the array (in bytes)
size_t b = n*sizeof(double);
//Allocating memory to host and device copies of array
hostIn = (double*)malloc(b);
cudaMalloc(&devIn, b);
//Initialising the array. Here, we are randomly initialising the values.
int i;
printf("\nArray: ");
for(i=0; i<n; i++)
{
hostIn[i] = rand()%10 + (float)rand()/RAND_MAX;
printf("%f ", hostIn[i]);
}
//Copying the values in the host array to the device memory.
cudaMemcpy(devIn, hostIn, b, cudaMemcpyHostToDevice);
//Defining the block size and the grid size.
int blk_size = 8, grd_size = (int)ceil((float)n/blk_size);
//We are constructing a segment tree of the given array, where the internal
//nodes store the sum of the subarray corresponding to the leaves in its
//subtree. Each level in the tree can then be used to exhibit data-level parallelism.
//The step variable indicates the total levels of the tree.
int step = (int)ceil((float)(log(n)/log(2)));
for(i=0; i<step; i++)
//We will be calling the device function corresponding to each level of the
//tree to achieve parallelism
vecSum<<<grd_size, blk_size>>>(devIn, pow(2, i+1), n);
//Copying the value of the output (which is present as the first element in the devIn array)
//to the host memory.
cudaMemcpy(&hostOut, &devIn[0], sizeof(double), cudaMemcpyDeviceToHost);
printf("\n\nFinal sum: %f\n", hostOut);
//Freeing the host and the device memory.
cudaFree(devIn);
free(hostIn);
return 0;
}
|
12,782 | #include<cuda_runtime.h>
#include<stdio.h>
#include<time.h>
#include<stdlib.h>
__global__ void Comp(int**ctable1,int **ctable2,int **cresult,int j_N)
{
int i=threadIdx.x;
int j=threadIdx.y;
for(int z=0;z<j_N;z++)
cresult[i][j]+=ctable1[i][z]*ctable2[z][j];
}
int** MallocDeviceDoubleArray(int** head,int Array_i,int Array_j);
int** MallocHostDoubleArray(int Array_i,int Array_j);
int main()
{
int i1_max,j1_max,i2_max,j2_max;
printf("这是一个矩阵相乘的程序\n");
printf("请输入矩阵A大小(行列大小)\n");
scanf("%d %d",&j1_max,&i1_max); //设置矩阵的大小
int **table1=MallocHostDoubleArray(i1_max,j1_max);
printf("请输入矩阵A的数据\n");
for(int i=0;i<i1_max;i++)
for(int j=0;j<j1_max;j++)
scanf("%d",&table1[i][j]);
printf("请输入矩阵B大小(行列大小)\n");
scanf("%d %d",&j2_max,&i2_max); //设置矩阵的大小
int **table2=MallocHostDoubleArray(i2_max,j2_max); //为两个矩阵分配host空间
printf("请输入矩阵B的数据\n");
for(int i=0;i<i2_max;i++)
for(int j=0;j<j2_max;j++)
scanf("%d",&table2[i][j]);
/*
for(int i=0;i<i1_max;i++)
{
for(int j=0;j<j1_max;j++)
printf("%d ",table1[i][j]);
printf("\n");
}
for(int i=0;i<i2_max;i++)
{
for(int j=0;j<j2_max;j++)
printf("%d ",table2[i][j]);
printf("\n");
}*/
int **result=MallocHostDoubleArray(i1_max,j2_max); //分配储存结果的host空间
int *ctable1_head,*ctable2_head,*cresult_head;
int **ctable1=MallocDeviceDoubleArray(&ctable1_head,i1_max,j1_max),**ctable2=MallocDeviceDoubleArray(&ctable2_head,i2_max,j2_max),**cresult=MallocDeviceDoubleArray(&cresult_head,i1_max,j2_max); //为两个矩阵分配显存空间
cudaMemset(cresult_head,0,sizeof(cresult_head));
cudaMemcpy(ctable1_head,*table1,sizeof(int)*i1_max*j1_max,cudaMemcpyHostToDevice);
cudaMemcpy(ctable2_head,*table2,sizeof(int)*i2_max*j2_max,cudaMemcpyHostToDevice); //为table1,2赋值到显存
dim3 threadmax;
threadmax.x=i1_max;
threadmax.y=j2_max;
Comp<<<1,threadmax>>>(ctable1,ctable2,cresult,j1_max);
cudaMemcpy(*result,cresult_head,sizeof(int)*i1_max*j2_max,cudaMemcpyDeviceToHost);
for(int i=0;i<i1_max;i++)
{
for(int j=0;j<j2_max;j++)
printf("%d ",result[i][j]);
printf("\n");
}
return 0;
}
/*
*@用二维数组的形式访问一维数组
*@(二维数组访问,一维数组存储)
*@Device端生成动态二维数组,head返回一维数组的地址(以便memcpy使用)
*@返回值为二维数组指针
*/
int** MallocDeviceDoubleArray(int** head,int Array_i,int Array_j)
{
int** cresult,*temp,**temp3;
cudaMalloc((void**)&cresult,sizeof(int*)*Array_i);
cudaMalloc((void**)&temp,sizeof(int)*Array_i*Array_j);
temp3=(int**)malloc(sizeof(int*)*Array_i);
for(int i=0;i<Array_i;i++)
temp3[i]=i*Array_j+temp;
*head=temp;
cudaMemcpy(cresult,temp3,sizeof(int*)*Array_i,cudaMemcpyHostToDevice);
return cresult;
}
int **MallocHostDoubleArray(int Array_i,int Array_j)
{
int **table,*table2;
table2=(int*)malloc(sizeof(int)*Array_i*Array_j);
table=(int**)malloc(sizeof(int*)*Array_i);
for(int i=0;i<Array_i;i++)
{
table[i]=Array_j*i+table2;
}
return table;
}
|
12,783 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#define gpuErrchk(ans) { gpuAssert((ans),__FILE__,__LINE__);}
//implement one grid with 4 blocks and 256 threads in total, 8x8 threads for each block
__global__ void print_threadIds()
{
printf("blockIdx,x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, blockDim.z : %d gridDim.x : %d, gridDim.y : %d, gridDim.z : %d \n",blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
__global__ void unique_idx_calc_threadIdx(int * input)
{
int tid = threadIdx.x;
int offset = (blockIdx.x>0)? 4:0;
printf("blockIdx : %d, threadIdx : %d, value : %d\n", blockIdx.x, tid, input[tid+offset]);
}
__global__ void unique_gid_calculation(int * input){
int tid = threadIdx.y * blockDim.x + threadIdx.x;
int offset = blockIdx.y * gridDim.x * (blockDim.x * blockDim.y) + blockIdx.x * (blockDim.x * blockDim.y);
//number of threads in one row = gridDim.x * blockDim.x
//row offset: gridDim.x * blockDim.x * blockIdx.y
//int offset = blockIdx.x * (blockDim.x * blockDim.y) + blockIdx.y * (blockDim.x * blockDim.y);
int gid = tid + offset;
printf("gid: %d, input[gid]: %d \n",gid, input[gid]);
printf("threadIdx.x : %d, blockIdx.x : %d, blockIdx.y : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d gid : %d value : %d\n",
threadIdx.x, blockIdx.x, blockIdx.y, blockDim.x, blockDim.y, gridDim.x, gid, input[gid]);
}
__global__ void mem_trs_test(int * input)
{
int gid = blockIdx.y * (blockDim.x*blockDim.y)*gridDim.x + blockIdx.x * (blockDim.x*blockDim.y) + threadIdx.x;
printf("tid : %d, gid : %d, value : %d \n", threadIdx.x, gid, input[gid]);
}
__global__ void mem_trs_test1(int * input,int size)
{
int gid = blockIdx.y * (blockDim.x*blockDim.y)*gridDim.x + blockIdx.x * (blockDim.x*blockDim.y) + threadIdx.x;
//if(gid<size){
printf("tid : %d, gid : %d, value : %d \n", threadIdx.x, gid, input[gid]);
//}
}
__global__ void sum_array_gpu(int *a,int *b,int *c,int size)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (gid < size)
{
c[gid] = a[gid] + b[gid];
}
//printf("gid : %d, a[gid] : %d, b[gid] : %d, c[gid] : %d\n", gid, a[gid], b[gid], c[gid]);
}
void sum_array_cpu(int *a, int *b, int *c, int size)
{
for(int i=0;i<size;i++){
c[i] = a[i] + b[i];
}
}
bool checkResult(int *a, int *b, int size)
{
for(int i=0;i<size;i++){
if(a[i]!=b[i]){
printf("the current value of a[i] and b[i] is: %d, %d",a[i],b[i]);
return false;
}
//printf("the current value of a[i] and b[i] are the same");
}
return true;
}
int main()
{
int size = 1000;
//int block_size = 128;
int byte_size = size * sizeof(int);
cudaError error;
int *a_input,*b_input;
a_input = (int*)malloc(byte_size);
b_input = (int*)malloc(byte_size);
int *c_output,*gpu_output;
c_output = (int*)malloc(byte_size);
gpu_output = (int*)malloc(byte_size);
for(int i=0;i<size;i++)
{
a_input[i] = i;
b_input[i] = i*2;
}
//cpu matrix sum calculation
sum_array_cpu(a_input,b_input,c_output,size);
int * a_gpu_input, * b_gpu_input, *c_gpu_output;
error = cudaMalloc((void**)&a_gpu_input, byte_size);
if(error != cudaSuccess)
{
fprintf(stderr,"%s \n", cudaGetErrorString(error));
}
cudaMalloc((void**)&b_gpu_input, byte_size);
cudaMalloc((void**)&c_gpu_output, byte_size);
cudaMemcpy(a_gpu_input,a_input,byte_size,cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu_input,b_input,byte_size,cudaMemcpyHostToDevice);
dim3 block(128);
dim3 grid(8);
sum_array_gpu<<<grid,block>>>(a_gpu_input,b_gpu_input,c_gpu_output,size);
cudaDeviceSynchronize();
//memory transfer back to host
cudaMemcpy(gpu_output,c_gpu_output,byte_size,cudaMemcpyDeviceToHost);
//for(int i=0;i<size;i++){
// printf("the gpu_output[i] value is: %d",gpu_output[i]);
//}
bool test = checkResult(c_output,gpu_output,size);
if(test==true){
printf("the result is true");
}else{
printf("the result is false");
}
// if(checkResult(c_gpu_output,c_output,size)==true){
// printf("the result is correct");
// }else{
// printf("the result is not correct");
// }
cudaDeviceSynchronize();
cudaFree(a_gpu_input);
cudaFree(b_gpu_input);
cudaFree(c_gpu_output);
free(a_input);
free(b_input);
free(c_output);
cudaDeviceReset();
return 0;
}
|
12,784 | //pass
//--blockDim=32 --gridDim=32 --warp-sync=32 --only-warp
__global__ void onlywarp_pass (int* A) {
A[threadIdx.x] = threadIdx.x;
A[threadIdx.x+1] = threadIdx.x;
}
|
12,785 | #include <cmath>
#include <cstdio>
#include <ctime>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void sum_shared_mem(float *array)
{
int idx = threadIdx.x;
float sum=0.0f;
// Share among threads within the same block
__shared__ float sh_array[1024];
sh_array[idx] = array[idx];
// Syncronize threads within the same block
__syncthreads();
for (int i=0; i<=idx; i++){
sum+= sh_array[i];
}
__syncthreads();
array[idx] = sum;
}
__global__ void sum_global_mem(float *array)
{
int idx = threadIdx.x;
float sum=0.0f;
for (int i=0; i<=idx; i++){
sum+= array[i];
}
__syncthreads();
array[idx] = sum;
}
int main(void)
{
std::clock_t start_time;
double duration01;
double duration02;
double duration03;
const int ARR_BYTES = 1024*sizeof(float);
// Clock start
start_time = std::clock();
// Declare and alloc array on host
float h_array[1024];
// initialize input array
for (int i=0; i<1024; i++){
h_array[i] = float(i);
}
// Declare and alloc array on device
float *d_array;
cudaMalloc(&d_array, ARR_BYTES);
// Transfer to device
cudaMemcpy(d_array, h_array, ARR_BYTES, cudaMemcpyHostToDevice);
// Clock stop 01
duration01 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC;
std::cout<<"Computing time before Kernel call: "<< duration01 << "s" << std::endl;
// Call kernel function with shared memory
sum_shared_mem<<<1, 1024>>>(d_array);
// Call kernel function with shared memory
// sum_global_mem<<<1, 1024>>>(d_array);
// Clock stop 02
duration02 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC;
std::cout<<"Computing time after Kernel call: "<< duration02 << "s" << std::endl;
// Transfer results to host
cudaMemcpy(h_array, d_array, ARR_BYTES, cudaMemcpyDeviceToHost);
// Clock stop 03
duration03 = ( std::clock() - start_time ) / (double) CLOCKS_PER_SEC;
std::cout<<"Computing time after memory copy: "<< duration03 << "s" << std::endl;
// Output results
for(int ii=0; ii<10; ii++){
std::cout<< h_array[ii]<< ", ";
}
std::cout<< std::endl;
return 0;
}
|
12,786 | //#define REARRANGED_DOMAIN
__global__ void extrapolate_first_order(
int N,
double * centroid_values,
double * edge_values,
double * vertex_values)
{
const int k =
threadIdx.x+threadIdx.y*blockDim.x+
(blockIdx.x+blockIdx.y*gridDim.x)*blockDim.x*blockDim.y;
if (k >= N)
return;
#ifndef REARRANGED_DOMAIN
int k3 = k*3;
edge_values[k3] = centroid_values[k];
edge_values[k3+ 1] = centroid_values[k];
edge_values[k3+ 2] = centroid_values[k];
vertex_values[k3] = centroid_values[k];
vertex_values[k3+ 1] = centroid_values[k];
vertex_values[k3+ 2] = centroid_values[k];
#else
edge_values[k] = centroid_values[k];
edge_values[k + N] = centroid_values[k];
edge_values[k + 2*N] = centroid_values[k];
vertex_values[k] = centroid_values[k];
vertex_values[k + N] = centroid_values[k];
vertex_values[k + 2*N] = centroid_values[k];
#endif
}
|
12,787 | #include "includes.h"
/* ==========================================================================
textureCube.cu
==========================================================================
Main wrapper + kernel that changes the colors of the four faces
*/
#define PI 3.1415926536f
// --------------------------------------------------------------------------
// Kernel
// --------------------------------------------------------------------------
// Paint a 2D surface with a moving bulls-eye pattern. The "face" parameter selects
// between 6 different colors to use. We will use a different color on each face of a
// cube map.
// --------------------------------------------------------------------------
// Wrapper
// --------------------------------------------------------------------------
// Sets up grid / blocks, launches kernel
extern "C"
__global__ void CudaKernelTextureCubeStrobelight(char *surface, int width, int height, size_t pitch, int face, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
unsigned char *pixel;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to this pixel
pixel = (unsigned char *)(surface + y*pitch) + 4 * x;
// populate it
float theta_x = (2.0f*x) / width - 1.0f;
float theta_y = (2.0f*y) / height - 1.0f;
float theta = 2.0f*PI*sqrt(theta_x*theta_x + theta_y*theta_y);
unsigned char value = 255 * (0.6f + 0.4f*cos(theta + t));
pixel[3] = 255; // alpha
if (face % 2)
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = 0.5; // red
pixel[face / 2] = value;
}
else
{
pixel[0] = // blue
pixel[1] = // green
pixel[2] = value; // red
pixel[face / 2] = 0.5;
}
} |
12,788 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
//#include <wb.h>
#include <stdio.h>
#include <stdlib.h>
#define wbCheck(stmt) do { cudaError_t err = stmt; if (err != cudaSuccess) {printf( "Failed to run stmt ", stmt); return -1;}} while(0)
#define wbLog(level, msg) printf(msg)
// MP 1
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = blockIdx.x * blockDim.x+ threadIdx.x;
if( i<len ) out[i] = in1[i]+in2[i];
}
int main(int argc, char ** argv) {
// wbArg_t args;
int inputLength;
float * hostInput1;
float * hostInput2;
float * hostOutput;
float * deviceInput1;
float * deviceInput2;
float * deviceOutput;
float * hostCheckOutput;
// args = wbArg_read(argc, argv);
// wbTime_start(Generic, "Importing data and creating memory on host");
inputLength = 1000000;
hostInput1 = (float *)malloc(inputLength * sizeof(float));
hostInput2 = (float *)malloc(inputLength * sizeof(float));
// hostOutput = (float *)malloc(inputLength * sizeof(float));
// Using Pinned Memory
wbCheck( cudaHostAlloc((void **) &hostOutput, inputLength * sizeof(float),cudaHostAllocDefault));
hostCheckOutput= (float *)malloc(inputLength * sizeof(float));
printf("1 %i \n", inputLength);
// wbTime_stop(Generic, "Importing data and creating memory on host");
for (int i = 0; i < inputLength; i++)
{
hostInput1[i] = 0;
hostInput2[i] = i;
hostCheckOutput[i] = hostInput1[i] + hostInput2[i];
}
printf("2 \n");
// wbLog(TRACE, "The input length is ", inputLength);
// Async loading supported?
int dev_count;
cudaDeviceProp prop;
cudaGetDeviceCount( &dev_count);
for (int i=0; i < dev_count; i++)
{
cudaGetDeviceProperties(&prop,i);
if(prop.deviceOverlap) printf(" Async loading and Streams supported \n");
}
// wbTime_start(GPU, "Allocating GPU memory.");
//@@ Allocate GPU memory here
int numStreams = 4;
cudaStream_t stream0, stream[4];
wbCheck(cudaStreamCreate(&stream0));
for (int i = 0; i < numStreams; ++i)
{
wbCheck(cudaStreamCreate(&stream[i]));
}
int size = inputLength * sizeof(float);
wbCheck(cudaMalloc((void **) &deviceInput1, size)); // Input
wbCheck(cudaMalloc((void **) &deviceInput2, size)); // Input
wbCheck(cudaMalloc((void **) &deviceOutput, size)); // Output
printf("3 \n");
// wbTime_stop(GPU, "Allocating GPU memory.");
// wbTime_start(GPU, "Copying input memory to the GPU.");
//@@ Copy memory to the GPU here
int SegSize = 40960;
size = SegSize* sizeof(float);
int iStream;
int iteration = inputLength/SegSize +1;
for (int i=0; i < iteration ; i++)
{
iStream = i % numStreams;
//stream[iStream] = stream0;
if (inputLength == SegSize*i) // inputLength multiple of SegSize
{
break;
}
else if(inputLength < SegSize*(i+1)) // process the small amount not yet processed
{
size = (inputLength - i*SegSize)* sizeof(float);
}
wbCheck(cudaMemcpyAsync(deviceInput1, hostInput1+i*SegSize, size, cudaMemcpyHostToDevice,stream[iStream]));
wbCheck(cudaMemcpyAsync(deviceInput2, hostInput2+i*SegSize, size, cudaMemcpyHostToDevice,stream[iStream]));
printf(" %d %d %d \n", i, size, iStream);
//@@ Initialize the grid and block dimensions here
int blockSize = 1024;
int gridSize =(SegSize-1)/blockSize + 1;
if ( gridSize > 65535) {
printf("inputLength to large %i > 65535 ", gridSize);
return -1;
}
dim3 DimGrid(gridSize, 1, 1);
dim3 DimBlock(blockSize, 1, 1);
vecAdd<<<DimGrid,DimBlock,0,stream[iStream]>>>( deviceInput1, deviceInput2, deviceOutput, size);
cudaThreadSynchronize();
wbCheck(cudaMemcpyAsync(hostOutput+i*SegSize, deviceOutput, size, cudaMemcpyDeviceToHost,stream[iStream]) );
}
cudaFree(deviceInput2); cudaFree(deviceInput1); cudaFree(deviceOutput);
int ii;
float sum = 0.;
for ( ii = 0; ii < inputLength; ii++)
{
if ( hostCheckOutput[ii] != hostOutput[ii]) {
printf("%d %d \n",hostOutput[ii],hostCheckOutput[ii]);
}
else
{
sum = sum + hostOutput[ii];
// printf(" Good %i \n",ii);
}
}
printf(" Done %d %f \n",ii, sum);
free(hostInput1);
free(hostInput2);
cudaFreeHost(hostOutput);
return 0;
}
|
12,789 | #include <stdio.h>
__global__ void pinv(float *A, int n)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if(i >= n) {
return;
}
float a = A[4*i + 0];
float b = A[4*i + 1];
float c = A[4*i + 2];
float d = A[4*i + 3];
float e = a*a + c*c;
float f = a*b + c*d;
float g = a*b + c*d;
float h = b*b + d*d;
float scalar = 1/(e*h - f*g);
float e_i = scalar * h;
float f_i = scalar * (-f);
float g_i = scalar * (-g);
float h_i = scalar * e;
A[4*i + 0] = e_i*a + f_i*b;
A[4*i + 1] = e_i*c + f_i*d;
A[4*i + 2] = g_i*a + h_i*b;
A[4*i + 3] = g_i*c + h_i*d;
}
|
12,790 | /* ==================================================================
Programmer: Yicheng Tu (ytu@cse.usf.edu)
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH in the C4 lab machines
==================================================================
*/
/* USF Fall 2019 CIS4930 Programming on Massively Parallel Systems
Project Description: Write a CUDA program to implement the same
functionality as the CPU only code
Student: Alexander Cook
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
double x_pos;
double y_pos;
double z_pos;
} atom;
typedef struct hist_entry{
//float min;
//float max;
unsigned long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * histogram; /* list of all buckets in the histogram */
bucket * z_histogram; /* histogram initialized to all 0s */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
Distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
//Device helper function which now takes a pointer as an argument instead of using a global pointer
__device__ double d_p2p_distance(atom *atom_list, int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
__global__ void PDH_kernel(atom *d_atom_list, bucket *d_histogram, int PDH_acnt, int PDH_res){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int h_pos;
double dist;
if(i < j && i < PDH_acnt && j < PDH_acnt){ // i < j so distances are not counted twice
dist = d_p2p_distance(d_atom_list, i,j);
h_pos = (int) (dist / PDH_res);
atomicAdd(&(d_histogram[h_pos].d_cnt), 1);
}
}
//Single threaded kernel for testing
__global__ void PDH_kernelST(atom *d_atom_list, bucket *d_histogram, int PDH_acnt, int PDH_res){
int i = threadIdx.x;
int j, h_pos;
double dist;
for(; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = d_p2p_distance(d_atom_list,i,j);
h_pos = (int) (dist / PDH_res);
d_histogram[h_pos].d_cnt++;
}
}
}
/*
Brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for(i = 0; i < PDH_acnt; i++) {
for(j = i+1; j < PDH_acnt; j++) {
dist = p2p_distance(i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
Set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time(const char* version) {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for %s version: %ld.%06ld\n",version , sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
Print the counts in all buckets of the histogram
*/
void output_histogram(bucket *histogram){
int i;
long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
//Prints difference between two histograms
void output_histogram_diff(bucket *histo1, bucket *histo2){
int i;
long long total_cnt1 = 0;
long long total_cnt2 = 0;
printf("Difference between CPU and GPU histogram\n");
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histo1[i].d_cnt - histo2[i].d_cnt);
total_cnt1 += histo1[i].d_cnt;
total_cnt2 += histo2[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt1 - total_cnt2);
else printf("| ");
}
}
int main(int argc, char **argv)
{
int i;
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
//Allocate host memory
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
z_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
//initialize histogram to zero
memset(z_histogram, 0, sizeof(bucket)*num_buckets);
//Allocate device memory
bucket *d_histogram; //pointer to array of buckets
atom *d_atom_list; //pointer to array of atoms
cudaMalloc((void**)&d_histogram, sizeof(bucket)*num_buckets);
cudaMalloc((void**)&d_atom_list, sizeof(atom)*PDH_acnt);
srand(1);
/* Generate data following a uniform distribution */
for(i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
//Copy host data to device memory
cudaMemcpy(d_histogram, z_histogram, sizeof(bucket)*num_buckets, cudaMemcpyHostToDevice);
cudaMemcpy(d_atom_list, atom_list, sizeof(atom)*PDH_acnt, cudaMemcpyHostToDevice);
/* Start counting time */
gettimeofday(&startTime, &Idunno);
/* Call CPU single thread version to compute the histogram */
PDH_baseline();
/* Report running time for CPU version */
report_running_time("CPU");
/* Print out the histogram */
output_histogram(histogram);
//Define 2D block and grid size
int num_threads = 16; //number of threads in one dimension of a block
dim3 blockDim(num_threads,num_threads); //num_threads^2 threads per block
int num_blocks = (PDH_acnt + num_threads - 1)/num_threads; //calculate number of blocks for the grid in a particular dimension
dim3 gridDim(num_blocks, num_blocks); //the grid is the same size in x and y dimension
//Start counting time
gettimeofday(&startTime, &Idunno);
//Launch kernel
PDH_kernel<<<gridDim,blockDim>>>(d_atom_list, d_histogram, PDH_acnt, PDH_res);
//PDH_kernelST<<<1,1>>>(d_atom_list, d_histogram, PDH_acnt, PDH_res);
//Copy data from gpu memory to host memory
bucket * GPU_histogram;
GPU_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
cudaMemcpy(GPU_histogram, d_histogram, sizeof(bucket)*num_buckets, cudaMemcpyDeviceToHost);
//Report GPU running time
report_running_time("GPU");
/* Print out the histogram again for gpu version */
output_histogram(GPU_histogram);
//print difference between the two histograms
output_histogram_diff(histogram, GPU_histogram);
free(histogram);
free(atom_list);
free(GPU_histogram);
cudaFree(d_histogram);
cudaFree(d_atom_list);
return 0;
}
|
12,791 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#define NSTEPS 500
#define TX 16
#define TY 32
#define NPTSX 200
#define NPTSY 200
__global__
void performUpdatesKernel(float *d_phi, float *d_oldphi, int *d_mask, int nptsx, int nptsy)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
int x = Row*nptsx+Col;
int xm = x-nptsx;
int xp = x+nptsx;
if(Col<nptsx && Row<nptsy)
if (d_mask[x]) d_phi[x] = 0.25f*(d_oldphi[x+1]+d_oldphi[x-1]+d_oldphi[xp]+d_oldphi[xm]);
}
__global__
void doCopyKernel(float *d_phi, float *d_oldphi, int *d_mask, int nptsx, int nptsy)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
int x = Row*nptsx+Col;
if(Col<nptsx && Row<nptsy)
if (d_mask[x]) d_oldphi[x] = d_phi[x];
}
void performUpdates(float *h_phi, float * h_oldphi, int *h_mask, int nptsx, int nptsy, int nsteps)
{
float *d_phi, *d_oldphi;
int *d_mask;
int k;
int sizef = sizeof(float)*nptsx*nptsy;
int sizei = sizeof(int)*nptsx*nptsy;
cudaMalloc((void **)&d_phi,sizef);
cudaMalloc((void **)&d_oldphi,sizef);
cudaMalloc((void **)&d_mask,sizei);
cudaMemcpy(d_oldphi,h_oldphi,sizef,cudaMemcpyHostToDevice);
cudaMemcpy(d_mask,h_mask,sizei,cudaMemcpyHostToDevice);
dim3 dimGrid(ceil(nptsx/(float)TX),ceil(nptsy/(float)TY),1);
dim3 dimBlock(TX,TY,1);
for(k=0;k<nsteps;++k){
performUpdatesKernel<<<dimGrid,dimBlock>>>(d_phi,d_oldphi,d_mask,nptsx,nptsy);
doCopyKernel<<<dimGrid,dimBlock>>>(d_phi,d_oldphi,d_mask,nptsx,nptsy);
}
cudaMemcpy(h_phi,d_oldphi,sizef,cudaMemcpyDeviceToHost);
cudaFree(d_phi); cudaFree(d_oldphi); cudaFree(d_mask);
}
int RGBval(float x){
int R, B, G, pow8 = 256;
if(x<=0.5){
B = (int)((1.0-2.0*x)*255.0);
G = (int)(2.0*x*255.0);
R = 0;
}
else{
B = 0;
G = (int)((2.0-2.0*x)*255.0);
R = (int)((2.0*x-1.0)*255.0);
}
return (B+(G+R*pow8)*pow8);
}
int setup_grid (float *h_phi, int nptsx, int nptsy, int *h_mask)
{
int i, j, nx2, ny2;
for(j=0;j<nptsy;j++)
for(i=0;i<nptsx;i++){
h_phi[j*nptsx+i] = 0.0;
h_mask[j*nptsx+i] = 1;
}
for(i=0;i<nptsx;i++) h_mask[i] = 0;
for(i=0;i<nptsx;i++) h_mask[(nptsy-1)*nptsx+i] = 0;
for(j=0;j<nptsy;j++) h_mask[j*nptsx] = 0;
for(j=0;j<nptsy;j++) h_mask[j*nptsx+nptsx-1] = 0;
nx2 = nptsx/2;
ny2 = nptsy/2;
h_mask[ny2*nptsx+nx2] = 0;
h_mask[ny2*nptsx+nx2-1] = 0;
h_mask[(ny2-1)*nptsx+nx2] = 0;
h_mask[(ny2-1)*nptsx+nx2-1] = 0;
h_phi[ny2*nptsx+nx2] = 1.0;
h_phi[ny2*nptsx+nx2-1] = 1.0;
h_phi[(ny2-1)*nptsx+nx2] = 1.0;
h_phi[(ny2-1)*nptsx+nx2-1] = 1.0;
return 0;
}
int output_array (float *h_phi, int nptsx, int nptsy)
{
int i, j, k=0;
FILE *fp;
fp = fopen("outCUDA.ps","w");
fprintf(fp,"/picstr %d string def\n",nptsx);
fprintf(fp,"50 50 translate\n");
fprintf(fp,"%d %d scale\n",nptsx, nptsy);
fprintf(fp,"%d %d 8 [%d 0 0 %d 0 %d] \n",nptsx, nptsy, nptsx, nptsy, -nptsx);
fprintf(fp,"{currentfile 3 200 mul string readhexstring pop} bind false 3 colorimage\n");
for(j=0;j<nptsy;j++){
for(i=0;i<nptsx;i++,k++){
fprintf(fp,"%06x",RGBval(h_phi[j*nptsx+i]));
if((k+1)%10==0) fprintf(fp,"\n");
}
}
fclose(fp);
return 0;
}
int main (int argc, char *argv[])
{
float *h_phi;
float *h_oldphi;
int *h_mask;
int nsize1=sizeof(float)*NPTSX*NPTSY;
int nsize2=sizeof(int)*NPTSX*NPTSY;
h_phi = (float *)malloc(nsize1);
h_oldphi = (float *)malloc(nsize1);
h_mask = (int *)malloc(nsize2);
setup_grid (h_oldphi, NPTSX, NPTSY, h_mask);
performUpdates(h_phi,h_oldphi,h_mask,NPTSX,NPTSY,NSTEPS);
output_array (h_phi, NPTSX, NPTSY);
return 0;
}
|
12,792 | /*
============================================================================
Name : CudaProject.cu
Author : Adrianna Urbańska, Gabriel Chęć
Version :
Copyright :
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 32
class CudaObject {
public:
int size_x;
int size_y;
int *data;
int stride;
int bytes;
__host__ __device__
CudaObject(int x, int y, int stride ): size_x(x), size_y(y),stride(stride){}
__host__ __device__
CudaObject(const CudaObject &a): size_x(a.size_x), size_y(a.size_y),data(a.data),stride(a.stride){}
__device__
int getElement(int row, int col){
return data[row * stride + col];
}
__device__
void setElement(int row, int col, int val){
data[row * stride + col] = val;
}
__device__
CudaObject cutMatrix(int row, int col){
CudaObject tmp(BLOCK_SIZE, BLOCK_SIZE, stride);
tmp.data = &data[stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return tmp;
}
__host__
void writeOut(){
for(int i = 0; i < size_x; i++){
std::cout<<"| ";
for(int j = 0; j < size_y; j++){
std::cout<<data[i * size_y + j]<<" ";
}
std::cout<<"|"<<std::endl;
}
std::cout<<"\n";
}
void setSize(int x, int y){
this->size_x = x;
this->size_y = y;
this->bytes = x * y * sizeof(int);
cudaMallocManaged(&this->data, this->bytes);
}
void addCpu(CudaObject &fData, CudaObject &sData){
if(fData.size_x == sData.size_x && fData.size_y == sData.size_y){
cudaMemPrefetchAsync(this->data, this->bytes, cudaCpuDeviceId);
cudaMemPrefetchAsync(fData.data, fData.bytes, cudaCpuDeviceId);
cudaMemPrefetchAsync(sData.data, sData.bytes, cudaCpuDeviceId);
this->setSize(sData.size_x, sData.size_y);
for(int i = 0; i < sData.size_x; i++){
for(int j = 0; j < sData.size_y; j++){
this->data[i * this->size_x + j] = fData.data[i * this->size_x + j] + sData.data[i * this->size_x + j];
}
}
}
}
void subCpu(CudaObject &fData, CudaObject &sData){
if(fData.size_x == sData.size_x && fData.size_y == sData.size_y){
cudaMemPrefetchAsync(this->data, this->bytes, cudaCpuDeviceId);
cudaMemPrefetchAsync(fData.data, fData.bytes, cudaCpuDeviceId);
cudaMemPrefetchAsync(sData.data, sData.bytes, cudaCpuDeviceId);
this->setSize(sData.size_x, sData.size_y);
for(int i = 0; i < sData.size_x; i++){
for(int j = 0; j < sData.size_y; j++){
this->data[i * this->size_x + j] = fData.data[i * this->size_x + j] - sData.data[i * this->size_x + j];
}
}
}
}
void mulCpu(CudaObject &fData, CudaObject &sData){
int y_s = sData.size_y;
int y_f = fData.size_y;
for(int i = 0; i < size_x; i++ ){
for(int j = 0; j < size_y; j++ ){
int s = 0;
for(int k = 0; k < y_f; k++ )
s += fData.data[i * y_f + k] * sData.data[k * y_s + j];
this->data[i * y_s + j] = s;
}
}
}
void tranCpu(CudaObject &iData){
int x = iData.size_x;
int y = iData.size_y;
this->setSize(y,x);
for(int n = 0; n < x * y; n++){
int i = n/x;
int j = n%x;
this->data[n] = iData.data[y * j + i];
}
}
};
__global__ void add(int *fData, int *sData, int *oData, int x, int y){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < x*y; i += stride)
{
oData[i] = fData[i] + sData[i];
}
}
__global__ void sub(int *fData, int *sData, int *oData, int x, int y){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < x * y; i += stride)
{
oData[i] = fData[i] - sData[i];
}
}
__global__ void mul(CudaObject a,CudaObject b, CudaObject c) {
int cutRow = blockIdx.y ;
int cutCol = blockIdx.x;
int fRow = blockIdx.y * blockDim.y + threadIdx.y;
int fCol = blockIdx.x * blockDim.x + threadIdx.x;
int row = threadIdx.y;
int col = threadIdx.x;
int temp = 0;
CudaObject cutMatC = c.cutMatrix(cutRow, cutCol);
for( int v = 0; v < ((a.size_y + BLOCK_SIZE - 1)/BLOCK_SIZE); ++v){
CudaObject cutMatA = a.cutMatrix(cutRow, v); //cut input matrix vector which can fit inside block
CudaObject cutMatB = b.cutMatrix(v, cutCol);
__shared__ int A[BLOCK_SIZE][BLOCK_SIZE]; //Matrix wchich can share memory between threads
__shared__ int B[BLOCK_SIZE][BLOCK_SIZE];
if((row < a.size_x) && ((col + v * BLOCK_SIZE) < a.size_y)){
A[row][col] = cutMatA.getElement(row, col);
}
else{
A[row][col] = 0;
}
if((col < b.size_y) && ((row + v * BLOCK_SIZE) < b.size_x)){
B[row][col] = cutMatB.getElement(row, col);
}
else{
B[row][col] = 0;
}
__syncthreads(); //make sure that every metrix is filled
for (int i = 0; i < BLOCK_SIZE; ++i){
temp += A[row][i] * B[i][col];
}
__syncthreads();
}
if(fRow < c.size_x && fCol < c.size_y)
c.setElement(fRow, fCol, temp);
}
__global__ void tran(CudaObject iData, CudaObject oData){
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
int x = iData.size_x;
int y = iData.size_y;
for(int n = index; n < x * y; n += stride){
int i = n/x;
int j = n%x;
oData.data[n] = iData.data[y * j + i];
}
}
void OperationsInfo()
{
std::cout<<"Choose an operation:"<<std::endl;
std::cout<<"1. Matrix addition on CPU"<<std::endl;
std::cout<<"2. Matrix addition on GPU"<<std::endl;
std::cout<<"3. Matrix substraction on CPU"<<std::endl;
std::cout<<"4. Matrix substraction on GPU"<<std::endl;
std::cout<<"5. Matrix multiplication on CPU"<<std::endl;
std::cout<<"6. Matrix multiplication on GPU"<<std::endl;
std::cout<<"7. Matrix transposition on CPU"<<std::endl;
std::cout<<"8. Matrix transposition on GPU"<<std::endl;
}
void Init(CudaObject &oData, int val)
{
int x = oData.size_x;
int y = oData.size_y;
for(int i = 0; i < y; i++){
for(int j = 0; j<x; j++){
oData.data[i*x+j] = val;
}
}
}
int main(){
int operation;
int N_1, N_2, M_1, M_2;
int val_1, val_2;
std::cout<<"Enter the values of size_x, size_y of the first matrix and value to filled matrix:"<<std::endl;
std::cin>>N_1;
std::cin>>M_1;
std::cin>>val_1;
std::cout<<"Enter the values of size_x, size_y of the second matrix and value to filled matrix:"<<std::endl;
std::cin>>N_2;
std::cin>>M_2;
std::cin>>val_2;
CudaObject fData(N_1, M_1, M_1), sData(N_2, M_2, M_2), oData(N_1, M_2, M_2);
cudaMallocManaged(&fData.data,N_1 * M_1 * sizeof(int));
cudaMallocManaged(&sData.data, N_2 * M_2 * sizeof(int));
cudaMallocManaged(&oData.data, N_1 * M_2 * sizeof(int));
Init(fData,val_1);
Init(sData,val_2);
fData.writeOut();
sData.writeOut();
OperationsInfo();
std::cin>>operation;
dim3 threadsPerBlock(BLOCK_SIZE,BLOCK_SIZE);
dim3 blocksPerGrid((fData.size_y + threadsPerBlock.x - 1)/threadsPerBlock.x, (sData.size_x + threadsPerBlock.y - 1)/threadsPerBlock.y);
switch(operation)
{
case 1:
if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){
std::cout<<"Matrices sizes have to be equal!"<<std::endl;
}
else{
oData.addCpu(fData,sData);
oData.writeOut();
}
break;
case 2:
if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){
std::cout<<"Matrices sizes have to be equal!"<<std::endl;
}
else{
add<<<blocksPerGrid,threadsPerBlock>>>(fData.data, sData.data, oData.data, oData.size_x, oData.size_y);
cudaDeviceSynchronize();
oData.writeOut();
}
break;
case 3:
if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){
std::cout<<"Matrices sizes have to be equal!"<<std::endl;
}
else{
oData.subCpu(fData,sData);
oData.writeOut();
}
break;
case 4:
if(fData.size_x != sData.size_x || fData.size_y != sData.size_y){
std::cout<<"Matrices sizes have to be equal!"<<std::endl;
}
else{
sub<<<blocksPerGrid,threadsPerBlock>>>(fData.data, sData.data, oData.data, oData.size_x, oData.size_y);
cudaDeviceSynchronize();
oData.writeOut();
}
break;
case 5:
if(fData.size_y != sData.size_x){
std::cout<<"Size_x of the first matrix and size_y of the second matrix have to be equal!"<<std::endl;
}
else{
oData.mulCpu(fData,sData);
oData.writeOut();
}
break;
case 6:
if(fData.size_y != sData.size_x){
std::cout<<"Size_x of the first matrix and size_y of the second matrix have to be equal!"<<std::endl;
}
else{
mul<<<blocksPerGrid, threadsPerBlock>>>(fData, sData, oData);
cudaDeviceSynchronize();
oData.writeOut();
}
break;
case 7:
std::cout<<"Transposition of the first matrix: "<<std::endl;
oData.tranCpu(fData);
oData.writeOut();
std::cout<<"Transposition of the second matrix: "<<std::endl;
oData.tranCpu(sData);
oData.writeOut();
break;
case 8:
std::cout<<"Transposition of the first matrix: "<<std::endl;
oData.setSize(M_1,N_1);
tran<<<blocksPerGrid, threadsPerBlock>>>(fData, oData);
cudaDeviceSynchronize();
oData.writeOut();
std::cout<<"Transposition of the second matrix: "<<std::endl;
oData.setSize(M_2,N_2);
tran<<<blocksPerGrid, threadsPerBlock>>>(sData, oData);
cudaDeviceSynchronize();
oData.writeOut();
break;
default:
std::cout<<"Wrong number entered!"<<std::endl;
break;
}
cudaError_t err = cudaSuccess;
if (err != cudaSuccess){
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaFree(fData.data);
cudaFree(sData.data);
cudaFree(oData.data);
}
|
12,793 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 1048576
#define THREAD_NUM 1024
typedef int ARRAY_TYPE;
__global__ void prefixSum_gpu_1(ARRAY_TYPE *a, ARRAY_TYPE *t)
{
int i;
ARRAY_TYPE temp;
__shared__ ARRAY_TYPE a_shared[THREAD_NUM];
a_shared[threadIdx.x] = a[threadIdx.x + blockIdx.x * THREAD_NUM];
__syncthreads();
for(i=1;i<1024;i*=2){
if(threadIdx.x >= i){
temp = a_shared[threadIdx.x - i];
}
__syncthreads();
if(threadIdx.x >= i){
a_shared[threadIdx.x] += temp;
}
__syncthreads();
}
if(threadIdx.x == THREAD_NUM-1){
t[blockIdx.x] = a_shared[threadIdx.x];
// printf("%d\n", t[blockIdx.x]);
}
}
__global__ void prefixSum_gpu_2(ARRAY_TYPE *t, ARRAY_TYPE *p)
{
int i;
ARRAY_TYPE temp;
__shared__ ARRAY_TYPE t_shared[THREAD_NUM];
t_shared[threadIdx.x] = t[threadIdx.x];
__syncthreads();
for(i=1;i<1024;i*=2){
if(threadIdx.x >= i){
temp = t_shared[threadIdx.x - i];
}
__syncthreads();
if(threadIdx.x >= i){
t_shared[threadIdx.x] += temp;
}
__syncthreads();
}
p[threadIdx.x] = t_shared[threadIdx.x];
}
__global__ void prefixSum_gpu_3(ARRAY_TYPE *p, ARRAY_TYPE *a, ARRAY_TYPE *b)
{
int i;
ARRAY_TYPE psum, temp;
__shared__ ARRAY_TYPE a_shared[THREAD_NUM];
if(blockIdx.x > 0){
psum = p[blockIdx.x-1];
}else{
psum = 0;
}
a_shared[threadIdx.x] = a[threadIdx.x + blockIdx.x * THREAD_NUM];
__syncthreads();
for(i=1;i<1024;i*=2){
if(threadIdx.x >= i){
temp = a_shared[threadIdx.x - i];
}
__syncthreads();
if(threadIdx.x >= i){
a_shared[threadIdx.x] += temp;
}
__syncthreads();
}
b[threadIdx.x + blockIdx.x * THREAD_NUM] = a_shared[threadIdx.x] + psum;
}
__host__ void prefixSum_cpu(ARRAY_TYPE *a, ARRAY_TYPE *b)
{
int i;
b[0] = a[0];
for(i=1;i<N;i++){
b[i] = b[i-1] + a[i];
}
}
int main(int argc, char *argv[])
{
int i;
struct timeval s, e;
cudaEvent_t dev_start, dev_stop;
float kernel1, kernel2, kernel3, h2dTime, d2hTime;
cudaEventCreate(&dev_start);
cudaEventCreate(&dev_stop);
ARRAY_TYPE *a_host, *b_host;
ARRAY_TYPE *a_dev, *b_dev, *t_dev, *p_dev;
ARRAY_TYPE check;
a_host = (ARRAY_TYPE*)malloc(sizeof(ARRAY_TYPE)*N);
b_host = (ARRAY_TYPE*)malloc(sizeof(ARRAY_TYPE)*N);
for(i=0;i<N;i++){
a_host[i] = rand()%10;
}
gettimeofday(&s, NULL);
prefixSum_cpu(a_host, b_host);
gettimeofday(&e, NULL);
printf("time = %lf [ms]\n", (e.tv_sec - s.tv_sec) + (e.tv_usec - s.tv_usec)*1.0E-6 * 1000);
printf("%d\n", b_host[N-1]);
cudaMalloc(&a_dev, sizeof(ARRAY_TYPE)*N);
cudaMalloc(&b_dev, sizeof(ARRAY_TYPE)*N);
cudaMalloc(&t_dev, sizeof(ARRAY_TYPE)*(N/THREAD_NUM));
cudaMalloc(&p_dev, sizeof(ARRAY_TYPE)*THREAD_NUM);
cudaEventRecord(dev_start, 0);
cudaMemcpy(a_dev, a_host, sizeof(ARRAY_TYPE)*N, cudaMemcpyHostToDevice);
cudaEventRecord(dev_stop, 0);
cudaEventSynchronize(dev_stop);
cudaEventElapsedTime(&h2dTime, dev_start, dev_stop);
cudaEventRecord(dev_start, 0);
prefixSum_gpu_1<<<N/THREAD_NUM, THREAD_NUM>>>(a_dev, t_dev);
cudaEventRecord(dev_stop, 0);
cudaEventSynchronize(dev_stop);
cudaEventElapsedTime(&kernel1, dev_start, dev_stop);
cudaEventRecord(dev_start, 0);
prefixSum_gpu_2<<<1, THREAD_NUM>>>(t_dev, p_dev);
cudaEventRecord(dev_stop, 0);
cudaEventSynchronize(dev_stop);
cudaEventElapsedTime(&kernel2, dev_start, dev_stop);
cudaEventRecord(dev_start, 0);
prefixSum_gpu_3<<<N/THREAD_NUM, THREAD_NUM>>>(p_dev, a_dev, b_dev);
cudaEventRecord(dev_stop, 0);
cudaEventSynchronize(dev_stop);
cudaEventElapsedTime(&kernel3, dev_start, dev_stop);
cudaEventRecord(dev_start, 0);
cudaMemcpy(b_host, b_dev, sizeof(ARRAY_TYPE)*N, cudaMemcpyDeviceToHost);
cudaEventRecord(dev_stop, 0);
cudaEventSynchronize(dev_stop);
cudaEventElapsedTime(&d2hTime, dev_start, dev_stop);
printf("%d\n", b_host[N-1]);
check = 0;
for(i=0;i<N;i++){
check += a_host[i];
if(check != b_host[i]){
printf("error at %d\n", i);
break;
}
}
printf("HostToDevice : %f [ms]\n", h2dTime);
printf("kernel 1 : %f [ms]\n", kernel1);
printf("kernel 2 : %f [ms]\n", kernel2);
printf("kernel 3 : %f [ms]\n", kernel3);
printf("deviceToHost : %f [ms]\n", d2hTime);
printf("gpuTotal : %f [ms]\n", h2dTime + kernel1 + kernel2 + kernel3 + d2hTime);
free(a_host);
free(b_host);
return 0;
}
|
12,794 | #include "optimizer.hh"
#include <cassert>
#include "graph.hh"
#include "node.hh"
namespace rt
{
namespace
{
//can't divide if the divide size is <= MIN_NODES_SIZE
constexpr std::size_t MIN_NODES_SIZE = 1024;
//max number of parallel nodes when dividing operation
constexpr std::size_t MAX_NODES = 8;
//make sure every size of new ops are multiple of this
constexpr std::size_t SIZE_DIVISOR = 8;
void elemwhise_size(std::size_t total, std::size_t& divs, std::size_t& size)
{
std::size_t n = total / MIN_NODES_SIZE;
n = std::min(n, MAX_NODES);
n = std::max(1UL, n);
std::size_t m = total / n;
while (n * m < total)
++m;
if (n > 1)
{
while (m % SIZE_DIVISOR != 0)
++m;
}
divs = n;
size = m;
}
using opti_f = Node* (*)(Graph&, Node*, const std::vector<Node*>&);
Node* opti_mat_mat_mul(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
const std::size_t nm = node->len1;
const std::size_t nn = node->len2;
const std::size_t np = node->len3;
std::size_t n;
std::size_t m;
if (nm > np) {
elemwhise_size(nm, n, m);
} else {
elemwhise_size(np, n, m);
}
if (n < 2)
{
auto res = Node::op_mat_mat_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
if (node->len1 > node->len2) {
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_mat_mat_mul(node->in1 + i * m * nn, node->in2,
node->out1 + i * m * np, m, nn, np, preds));
div_nodes.push_back(Node::op_mat_mat_mul(node->in1 + (n - 1) * m * nn, node->in2,
node->out1 + (n - 1) * m * np, nm - (n - 1) * m, nn, np, preds));
} else {
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_mat_mat_mul(node->in1, node->in2 + i * m,
node->out1 + i * m * np, nm, nn, m, preds));
div_nodes.push_back(Node::op_mat_mat_mul(node->in1, node->in2 + (n - 1) * m,
node->out1 + (n - 1) * m * np, nm, nn, np - (n - 1) * m, preds));
}
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_mat_rvect_add(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_mat_rvect_add(node->in1, node->in2, node->out1,
node->len1, node->len2, preds);
graph.add(res);
return res;
}
const std::size_t nv = node->len2;
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_mat_rvect_add(node->in1 + (i * m * nv), node->in2,
node->out1 + (i * m * nv), m, nv, preds));
div_nodes.push_back(Node::op_mat_rvect_add(node->in1 + (n - 1) * m * nv, node->in2,
node->out1 + (n - 1) * m * nv, node->len1 - ((n - 1) * m), nv, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_sigmoid(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_sigmoid(node->in1, node->out1,
node->len1, preds);
//res->use_simd = node->len1 % SIZE_DIVISOR == 0;
graph.add(res);
return res;
}
const dbl_t* in = node->in1;
dbl_t* out = node->out1;
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_sigmoid(in + i * m, out + i * m,
m, preds));
div_nodes.push_back(Node::op_sigmoid(in + (n - 1) * m, out + (n - 1) * m,
node->len1 - ((n - 1) * m), preds));
for (auto n : div_nodes)
{
//n->use_simd = true;
graph.add(n);
}
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_mse(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_mse(node->in1, node->in2, node->out1,
node->len1, node->len2, preds);
graph.add(res);
return res;
}
Node* opti_softmax(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_softmax(node->in1, node->out1, node->len1, node->len2, preds);
graph.add(res);
return res;
}
std::size_t nv = node->len2;
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_softmax(node->in1 + (i * m * nv), node->out1 + (i * m * nv),
m, nv, preds));
div_nodes.push_back(Node::op_softmax(node->in1 + (n - 1) * m * nv,
node->out1 + (n - 1) * m * nv, node->len1 - ((n - 1) * m), nv, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_log_softmax(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_log_softmax(node->in1, node->out1, node->len1, node->len2, preds);
graph.add(res);
return res;
}
std::size_t nv = node->len2;
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_log_softmax(node->in1 + (i * m * nv),
node->out1 + (i * m * nv), m, nv, preds));
div_nodes.push_back(Node::op_log_softmax(node->in1 + (n - 1) * m * nv,
node->out1 + (n - 1) * m * nv, node->len1 - ((n - 1) * m), nv, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_softmax_cross_entropy(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_softmax_cross_entropy(node->in1, node->in2, node->out1,
node->len1, node->len2, preds);
graph.add(res);
return res;
}
Node* opti_conv2d(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_conv2d(node->in1, node->in2, node->intconst,
node->int_cons1, node->int_cons2, node->out1,
node->sizes1, node->sizes2, preds);
graph.add(res);
return res;
}
Node* opti_relu(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_relu(node->in1, node->out1,
node->len1, preds);
graph.add(res);
return res;
}
const dbl_t* in = node->in1;
dbl_t* out = node->out1;
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_relu(in + i * m, out + i * m, m, preds));
div_nodes.push_back(Node::op_relu(in + (n - 1) * m, out + (n - 1) * m,
node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_relu_leaky(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_relu_leaky(node->in1, node->out1,
node->len1, node->alpha_leaky, preds);
graph.add(res);
return res;
}
const dbl_t* in = node->in1;
dbl_t* out = node->out1;
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_relu_leaky(in + i * m, out + i * m,
m, node->alpha_leaky, preds));
div_nodes.push_back(Node::op_relu_leaky(in + (n - 1) * m, out + (n - 1) * m,
node->len1 - (n - 1) * m, node->alpha_leaky, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_tanh(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_tanh(node->in1, node->out1,
node->len1, preds);
graph.add(res);
return res;
}
const dbl_t* in = node->in1;
dbl_t* out = node->out1;
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_tanh(in + i * m, out + i * m,
m, preds));
div_nodes.push_back(Node::op_tanh(in + (n - 1) * m, out + (n - 1) * m,
node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_mse_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_mse_grad(node->in1, node->in2, node->out1,
node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_mse_grad(node->in1 + i * m, node->in2 + i * m,
node->out1 + i * m, m, preds));
div_nodes.push_back(Node::op_mse_grad(node->in1 + (n - 1) * m, node->in2 + (n - 1) * m,
node->out1 + (n - 1) * m, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_sigmoid_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_sigmoid_grad(node->in1, node->in2, node->out1,
node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_sigmoid_grad(node->in1 + i * m, node->in2 + i * m,
node->out1 + i * m, m, preds));
div_nodes.push_back(Node::op_sigmoid_grad(node->in1 + (n - 1) * m, node->in2 + (n - 1) * m,
node->out1 + (n - 1) * m, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_mat_mul_add(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_mat_mul_add(node->in1, node->in2, node->in3,
node->out1, node->len1, node->len2, node->len3,
preds);
graph.add(res);
return res;
}
Node* opti_tmat_mat_mul(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_tmat_mat_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3, preds);
graph.add(res);
return res;
}
Node* opti_mat_tmat_mul(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_mat_tmat_mul(node->in1, node->in2, node->out1,
node->len1, node->len2, node->len3, preds);
graph.add(res);
return res;
}
Node* opti_mat_sum_rows(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_mat_sum_rows(node->in1, node->out1,
node->len1, node->len2, preds);
graph.add(res);
return res;
}
Node* opti_mat_sum_cols(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_mat_sum_cols(node->in1, node->out1,
node->len1, node->len2, preds);
graph.add(res);
return res;
}
Node* opti_softmax_cross_entropy_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_softmax_cross_entropy_grad(node->in1, node->in2, node->out1,
node->len1, node->len2, preds);
graph.add(res);
return res;
}
Node* opti_relu_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_relu_grad(node->in1, node->in2, node->out1,
node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_relu_grad(node->in1 + i * m, node->in2 + i * m,
node->out1 + i * m, m, preds));
div_nodes.push_back(Node::op_relu_grad(node->in1 + (n - 1) * m, node->in2 + (n - 1) * m,
node->out1 + (n - 1) * m, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_conv2d_bias_add(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_conv2d_bias_add(node->in1, node->in2, node->out1,
node->sizes1, preds);
graph.add(res);
return res;
}
Node* opti_update(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_update(node->out1, node->in1, node->in2,
node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_update(node->out1 + i * m, node->in1 + i * m, node->in2, m, preds));
div_nodes.push_back(Node::op_update(node->out1 + (n - 1) * m, node->in1 + (n - 1) * m,
node->in2, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_sigmoid_cross_entropy(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_sigmoid_cross_entropy(node->in1, node->in2, node->out1,
node->len1, preds);
graph.add(res);
return res;
}
Node* opti_sigmoid_cross_entropy_grad(Graph& graph, Node* node,
const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_sigmoid_cross_entropy_grad(node->in1, node->in2, node->out1,
node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_sigmoid_cross_entropy_grad(node->in1 + i * m, node->in2 + i * m,
node->out1 + i * m, m, preds));
div_nodes.push_back(Node::op_sigmoid_cross_entropy_grad(node->in1 + (n - 1) * m, node->in2 + (n - 1) * m,
node->out1 + (n - 1) * m, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_conv2d_input_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
int input_size[] = {0, node->intconst2[0], node->intconst2[1]};
auto res = Node::op_conv2d_input_grad(node->in1, node->in2,
node->intconst, node->out1,
node->sizes1, node->sizes2,
input_size, preds);
graph.add(res);
return res;
}
Node* opti_conv2d_kernel_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_conv2d_kernel_grad(node->in1, node->in2,
node->intconst, node->out1,
node->sizes1, node->sizes2,
node->intconst2, preds);
graph.add(res);
return res;
}
Node* opti_argmax_acc(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_argmax_acc(node->in1, node->in2, node->out1,
node->len1, node->len2, preds);
graph.add(res);
return res;
}
Node* opti_moment_update(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_moment_update(node->out1, node->in1, node->cons1,
node->cons2, node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_moment_update(node->out1 + i * m, node->in1 + i * m,
node->cons1, node->cons2, m, preds));
div_nodes.push_back(Node::op_moment_update(node->out1 + (n - 1) * m, node->in1 + (n - 1) * m,
node->cons1, node->cons2, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_moment_update2(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_moment_update2(node->out1, node->in1, node->cons1,
node->cons2, node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_moment_update2(node->out1 + i * m, node->in1 + i * m,
node->cons1, node->cons2, m, preds));
div_nodes.push_back(Node::op_moment_update2(node->out1 + (n - 1) * m, node->in1 + (n - 1) * m,
node->cons1, node->cons2, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_adam_update(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_adam_update(node->out1, node->out2,
node->in1, node->in2,
node->cons1, node->cons2,
node->cons3, node->cons4,
node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_adam_update(node->out1 + i * m, node->out2 + i * m,
node->in1 + i * m, node->in2 + i * m,
node->cons1, node->cons2,
node->cons3, node->cons4,
m, preds));
div_nodes.push_back(Node::op_adam_update(node->out1 + (n - 1) * m, node->out2 + (n - 1) * m,
node->in1 + (n - 1) * m, node->in2 + (n - 1) * m,
node->cons1, node->cons2,
node->cons3, node->cons4,
node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_leaky_relu_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_leaky_relu_grad(node->in1, node->in2, node->out1,
node->cons1, node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_leaky_relu_grad(node->in1 + i * n, node->in2 + i * n,
node->out1 + i * n, node->cons1, m, preds));
div_nodes.push_back(Node::op_leaky_relu_grad(node->in1 + (n - 1) * m, node->in2 + (n - 1) * m,
node->out1 + (n - 1) * m, node->cons1, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_conv2d_bias_add_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_conv2d_bias_add_grad(node->in1, node->sizes1,
node->out1, preds);
graph.add(res);
return res;
}
Node* opti_tanh_grad(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_tanh_grad(node->in1, node->in2, node->out1, node->len1, preds);
graph.add(res);
return res;
}
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_tanh_grad(node->in1 + i * n, node->in2 + i * n,
node->out1 + i * n, m, preds));
div_nodes.push_back(Node::op_tanh_grad(node->in1 + (n - 1) * m, node->in2 + (n - 1) * m,
node->out1 + (n - 1) * m, node->len1 - (n - 1) * m, preds));
for (auto n : div_nodes)
graph.add(n);
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
Node* opti_conv2d_transpose(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
auto res = Node::op_conv2d_transpose(node->in1, node->in2,
node->sizes1, node->intconst,
node->out1, node->sizes2,
node->sizes3, preds);
graph.add(res);
return res;
}
Node* opti_conv2d_transpose_input_grad(Graph& graph, Node* node,
const std::vector<Node*>& preds)
{
auto res = Node::op_conv2d_transpose_input_grad(node->in1, node->in2,
node->intconst, node->out1,
node->sizes1, node->sizes2,
node->intconst2, preds);
graph.add(res);
return res;
}
Node* opti_conv2d_transpose_kernel_grad(Graph& graph, Node* node,
const std::vector<Node*>& preds)
{
auto res = Node::op_conv2d_transpose_kernel_grad(node->in1, node->in2,
node->intconst, node->out1,
node->sizes1, node->sizes2,
node->sizes3,
preds);
graph.add(res);
return res;
}
Node* opti_add(Graph& graph, Node* node, const std::vector<Node*>& preds)
{
std::size_t n;
std::size_t m;
elemwhise_size(node->len1, n, m);
if (n < 2)
{
auto res = Node::op_add(node->in1, node->in2, node->out1,
node->len1, preds);
//res->use_simd = node->len1 % SIZE_DIVISOR == 0;
graph.add(res);
return res;
}
const dbl_t* in1 = node->in1;
const dbl_t* in2 = node->in2;
dbl_t* out = node->out1;
std::vector<Node*> div_nodes;
for (std::size_t i = 0; i < n - 1; ++i)
div_nodes.push_back(Node::op_add(in1 + i * m, in2 + i * m,out + i * m,
m, preds));
div_nodes.push_back(Node::op_add(in1 + (n - 1) * m, in2 + (n - 1) * m, out + (n - 1) * m,
node->len1 - ((n - 1) * m), preds));
for (auto n : div_nodes)
{
//n->use_simd = true;
graph.add(n);
}
auto res = Node::nop(div_nodes);
graph.add(res);
return res;
}
opti_f optis_list[64] = {
opti_mat_mat_mul,
opti_mat_rvect_add,
opti_sigmoid,
opti_mse,
opti_softmax,
opti_log_softmax,
opti_softmax_cross_entropy,
opti_conv2d,
opti_relu,
opti_relu_leaky,
opti_tanh,
opti_mse_grad,
opti_sigmoid_grad,
opti_mat_mul_add,
opti_tmat_mat_mul,
opti_mat_tmat_mul,
opti_mat_sum_rows,
opti_mat_sum_cols,
opti_softmax_cross_entropy_grad,
opti_relu_grad,
opti_conv2d_bias_add,
opti_update,
opti_sigmoid_cross_entropy,
opti_sigmoid_cross_entropy_grad,
opti_conv2d_input_grad,
opti_conv2d_kernel_grad,
opti_argmax_acc,
opti_moment_update,
opti_moment_update2,
opti_adam_update,
opti_leaky_relu_grad,
opti_conv2d_bias_add_grad,
opti_tanh_grad,
opti_conv2d_transpose,
opti_conv2d_transpose_input_grad,
opti_conv2d_transpose_kernel_grad,
opti_add
};
}
namespace
{
Node* opti_node(Node* node, Graph& graph, std::map<Node*, Node*>& optis)
{
auto it = optis.find(node);
if (it != optis.end())
return it->second;
std::vector<Node*> preds;
for (auto n : node->preds)
preds.push_back(opti_node(n, graph, optis));
Node* res;
if (node->type == Node::OP_NOP)
{
res = Node::nop(preds);
graph.add(res);
}
else
res = optis_list[node->type](graph, node, preds);
optis[node] = res;
return res;
}
}
Graph* optimize(const Graph& graph, std::map<Node*, Node*>& optis)
{
optis.clear();
auto graph_opti = new Graph;
for (auto node : graph.nodes())
opti_node(node, *graph_opti, optis);
return graph_opti;
}
std::vector<Node*> convert_nodes(const std::vector<Node*> nodes,
const std::map<Node*, Node*>& optis)
{
std::vector<Node*> res;
for (auto n : nodes)
{
auto it = optis.find(n);
assert(it != optis.end());
res.push_back(it->second);
}
return res;
}
}
|
12,795 | #include <stdio.h>
//Matrix multiplication kernel - thread specification
__global__ void MatrixMulKernel(float *d_M , float *d_N , float *d_P , int Width) {
//2D Thread ID
int tx = threadIdx.x;
int ty = threadIdx.y;
//Pvalue stores the Pd element that is computed by the thread
float Pvalue = 0.0;
for(int k = 0; k < Width ; ++k) {
float Mdelement = d_M[ty*Width + k];
float Ndelement = d_N[k*Width + tx];
Pvalue += (Mdelement*Ndelement);
}
d_P[ty*Width + tx] = Pvalue;
}
int main(void) {
const int Width = 8;
float h_Ma[Width][Width], h_Mb[Width][Width], h_Mc[Width][Width];
float *Md, *Nd, *Pd;
int i, j;
//input in host array
for (i = 0; i < Width; i++) {
for (j = 0; j < Width; j++) {
h_Ma[i][j] = 1 ;
h_Mb[i][j] = 2 ;
}
}
//MatrixMultiplication(h_Ma, h_Mb, h_Mc, Width);
int size = (Width*Width)*sizeof(float);
//Transfer M and N to device memory
cudaMalloc((void**)&Md, size);
cudaMemcpy(Md, h_Ma, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&Nd, size);
cudaMemcpy(Nd, h_Mb, size, cudaMemcpyHostToDevice);
//Allocate P on the device
cudaMalloc((void**)&Pd,size);
//Setup the execution configuration
dim3 dimBlock(Width,Width);
dim3 dimGrid(1,1);
//Launch the device computation threads!
MatrixMulKernel<<<dimGrid,dimBlock>>>(Md,Nd,Pd,Width);
//Transfer P from device to host
cudaMemcpy(h_Mc,Pd,size,cudaMemcpyDeviceToHost);
//Free device matrices
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
//printf the result array
for (i = 0; i < Width; i++) {
for (j = 0; j < Width; j++) {
printf("%f ", h_Mc[i][j]);
}
printf("\n");
}
return 0;
} |
12,796 | /*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include<conio.h>
__global__ void convolution(int *N, int *M, int *P, int mask_width, int width)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int pvalue=0;
int N_start_point=i-(mask_width/2);
for(int j=0; j<mask_width; j++)
{
if(N_start_point+j>=0 && N_start_point+j<width)
pvalue +=N[N_start_point+ j]*M[j];
}
P[i]=pvalue;
}
int main()
{
int i, n, m, *A, *B, *C, *N, *M, *P;
n=8;
m=5;
A=(int *)malloc(sizeof(int)*n);
B=(int *)malloc(sizeof(int)*m);
C=(int *)malloc(sizeof(int)*n);
printf("\n\nElements in A:\n");
for(i=0; i<n; i++)
printf("%d \t",A[i]=i+1);
printf("\n\nElements in B:\n");
for(i=0; i<m; i++)
printf("%d \t",B[i]=i+1);
cudaMalloc((void**)&N,sizeof(int)*n);
cudaMalloc((void**)&M,sizeof(int)*m);
cudaMalloc((void**)&P,sizeof(int)*n);
cudaMemcpy(N,A,sizeof(int)*n,cudaMemcpyHostToDevice);
cudaMemcpy(M,B,sizeof(int)*m,cudaMemcpyHostToDevice);
dim3 DimGrid(1,1,1);
dim3 DimBlock(n,1,1);
convolution<<<DimGrid,DimBlock>>>(N,M,P,m,n);
cudaMemcpy(C,P,sizeof(int)*n,cudaMemcpyDeviceToHost);
printf("\n\nOutput:\n");
for(i=0; i<n; i++)
printf("%d \t",C[i]);
printf("\n");
cudaFree(N);
cudaFree(M);
cudaFree(P);
getch();
return 0;
}
*/
/* OUTPUT -
Elements in A:
1 2 3 4 5 6 7 8
Elements in B:
1 2 3 4 5
Output:
26 40 55 70 85 100 70 44
*/ |
12,797 | #include <stdio.h>
#include<stdlib.h>
dim3 grid(10, 10);
dim3 block(10,10,1);
#define BLOCK 16
#define N 200
__global__
void sum_of_array(float *arr1, float *arr2, float *arr3){
printf("blockIdx.x = %d, blockIdx.y = %d, blockIdx.z = %d\n", blockIdx.x, blockIdx.y, blockIdx.z);
printf("threadIdx.x = %d\n", threadIdx.x);
int i = blockIdx.x * blockDim.x + threadIdx.x;
arr3[i] = arr1[i] + arr2[i];
}
void initialize_array(float *arr, int size){
for (int i = 0; i < size; i++){
arr[i] = i*1.0;//(float)rand();
}
}
int main(void){
float *arr1, *arr2, *arr3, *d_arr1, *d_arr2, *d_arr3;
size_t n_byte = N * sizeof(float);
arr1 = (float *)malloc(n_byte);
arr2 = (float *)malloc(n_byte);
arr3 = (float *)malloc(n_byte);
initialize_array(arr1, N);
initialize_array(arr2, N);
initialize_array(arr3, N);
printf("start cudaMalloc\n");
cudaMalloc((void**)&d_arr1, n_byte);
cudaMalloc((void**)&d_arr2, n_byte);
cudaMalloc((void**)&d_arr3, n_byte);
printf("finish cudaMalloc\n");
printf("start cudaMemcpy\n");
cudaMemcpy(d_arr1, arr1, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr2, arr2, n_byte, cudaMemcpyHostToDevice);
cudaMemcpy(d_arr3, arr3, n_byte, cudaMemcpyHostToDevice);
printf("finish cudaMemcpy\n");
printf("start kernel function\n");
sum_of_array<<<(N+255)/256, 256>>>(d_arr1, d_arr2, d_arr3);
printf("finish kernel function\n");
cudaMemcpy(arr3, d_arr3, n_byte, cudaMemcpyDeviceToHost);
cudaFree(d_arr3);
cudaFree(d_arr2);
cudaFree(d_arr1);
for(int i = 0; i <N; i++){
printf("%f+%f = %f, ", arr1[i], arr2[i], arr3[i]);
}
printf("\n");
} |
12,798 | /*
* mmult_gpu.cu -- Device code for matrix multiplication benchmark
*
* Michael McThrow
*/
#define get_element_index(i, j, cols) ((i) * (cols) + (j))
__global__ void mmult_kernel(unsigned int *a, unsigned int *b, unsigned int *c,
unsigned int rows, unsigned int cols)
{
unsigned int i;
unsigned int product = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int index = get_element_index(row, col, cols);
for (i = 0; i < cols; i++)
product += a[row * cols + i] + b[i * cols + col];
c[index] = product;
}
|
12,799 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <string.h>
// Kernel function to add the elements of two arrays
#define ALPHA 5.1
#define BETA 0.15
#define RHO 0.6
#define Q 5.0
__global__
void calcDenom(int N, float *p, float *tau, float *dist,float *denominator, int *connections)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int r = index; r < N; r += stride)
{
//probability denominator
denominator[r] = 0;
for(int s = 0; s < N; s++)
{
denominator[r] += pow(tau[r*N + s], ALPHA)*pow((1/dist[r*N + s]), BETA)*connections[r*N + s];
}
}
}
__global__
void calcProb(int N, float *p, float *tau, float *dist,float *denominator, int *connections)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int r = index; r < N*N; r += stride)
{
if(denominator[(int) r/N] != 0)
{
p[r] = (pow(tau[r], ALPHA)*pow((1/dist[r]), BETA)*connections[r])/denominator[(int) r/N];
}
else
{
p[r] = -1;
}
}
}
__global__
void setZeros(int N, float *p, float *tau,float *dtau, float *dist,float *denominator, int *connections)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int r = index; r < N*N; r += stride)
{
connections[r] = 0;
tau[r] = 1.0;
dtau[r] = 0;
dist[r] = 1.0;
p[r] = 0;
}
}
__global__
void clearTau(int N, float *tau,float *dtau)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int r = index; r < N*N; r += stride)
{
tau[r] = dtau[r] + (1-RHO)*tau[r] +1;
dtau[r] = 0;
}
}
void read_csv(int row, int col, char *filename, double **data){
FILE *file;
file = fopen(filename, "r");
int i = 0;
char line[100];
while (fgets(line, 4098, file) && (i < row))
{
char* tmp = strdup(line);
int j = 0;
char *token;
/* get the first token */
token = strtok(line, " ");
/* walk through other tokens */
while( token != NULL ) {
data[i][j] = atof(token);
token = strtok(NULL, " ");
j++;
}
free(tmp);
i++;
}
}
void readEdges(int N, int row, int col, char *filename, int *connections, float *dist){
double **dataEdges;
dataEdges = (double **)malloc(row * sizeof(double *));
for (int i = 0; i < row; ++i){
dataEdges[i] = (double *)malloc(col * sizeof(double));
}
read_csv(row, col, filename, dataEdges);
for(int i = 0; i < row; i++)
{
connections[((int) dataEdges[i][1])*N + (int) dataEdges[i][2]] = 1;
dist[(int) dataEdges[i][1] *N + (int) dataEdges[i][2]] = dataEdges[i][3];
}
free(dataEdges);
}
int main(void)
{
//CONSTANTS
const int NR_OF_DATAPOINTS = 1363;
const int NR_OF_EDGES = 3977;
const int NR_OF_ITERATIONS = 5;
const int NR_OF_ANTS = 20;
const int NR_OF_BLOCKS = 4096;
const int NR_OF_THREADS = 512;
const int start = 1;
const int destination = 250;
//MEMORY ALLOCATION
float *tau, *dtau, *dist, *p;
int *connections, *numOfConnections;
float *denominator;
cudaMallocManaged(&tau, NR_OF_DATAPOINTS*NR_OF_DATAPOINTS*sizeof(float));
cudaMallocManaged(&dtau, NR_OF_DATAPOINTS*NR_OF_DATAPOINTS*sizeof(float));
cudaMallocManaged(&dist, NR_OF_DATAPOINTS*NR_OF_DATAPOINTS*sizeof(float));
cudaMallocManaged(&p, NR_OF_DATAPOINTS*NR_OF_DATAPOINTS*sizeof(float));
cudaMallocManaged(&connections, NR_OF_DATAPOINTS*NR_OF_DATAPOINTS*sizeof(int));
cudaMallocManaged(&denominator, NR_OF_DATAPOINTS*sizeof(float));
cudaMallocManaged(&numOfConnections, NR_OF_DATAPOINTS*sizeof(int));
setZeros<<<NR_OF_BLOCKS, NR_OF_THREADS>>>(NR_OF_DATAPOINTS, p, tau, dtau, dist,denominator, connections);
cudaDeviceSynchronize();
int route [NR_OF_ANTS][200];
float length [NR_OF_ANTS];
float lengthMin [NR_OF_ANTS];
//READ EDGES
int col = 6;
char fname[256] = "dist.csv";
readEdges(NR_OF_DATAPOINTS, NR_OF_EDGES, col, fname, connections, dist);
//count the connections of nodes
for(int r = 0; r < NR_OF_DATAPOINTS; r++)
{
numOfConnections[r] = 0;
for(int s = 0; s < NR_OF_DATAPOINTS; s++)
{
numOfConnections[r] += connections[NR_OF_DATAPOINTS*r + s];
}
}
//iteration
float roulette = 0;
float sumP = 0;
int selection = 0;
int current = 0;
int step = 0;
for(int i = 0; i < NR_OF_ITERATIONS; i++)
{
clearTau<<<NR_OF_BLOCKS, NR_OF_THREADS>>>(NR_OF_DATAPOINTS, tau, dtau);
cudaDeviceSynchronize();
//"create ants"
for(int ant = 0; ant < NR_OF_ANTS; ant++)
{
route[ant][0] = start;
length[ant] = 0;
lengthMin[ant] = FLT_MAX;
calcDenom<<<NR_OF_BLOCKS, NR_OF_THREADS>>>(NR_OF_DATAPOINTS, p, tau, dist,denominator, connections);
cudaDeviceSynchronize();
//calculate probabilities
calcProb<<<NR_OF_BLOCKS, NR_OF_THREADS>>>(NR_OF_DATAPOINTS, p, tau, dist,denominator, connections);
cudaDeviceSynchronize();
//start an ant
step = 0;
while(step < 200)
{
//ROULETTE
current = route[ant][step];
do
{
roulette = (double) rand() / (double) RAND_MAX;
sumP = 0;
selection = -1;
for(int s = 0; s < NR_OF_DATAPOINTS; s++)
{
sumP += p[route[ant][step]*NR_OF_DATAPOINTS + s];
if(roulette < sumP)
{
selection = s;
break;
}
}
}while (selection == route[ant][step - 1] && numOfConnections[current] != 1);
if(selection == destination)
{
printf("\ntalalat\n");
for(int ii = 0; ii <=step ; ii++)
{
printf("-> %d ", route[ant][ii]);
}
printf("\n\n");
step++;
route[ant][step] = selection;
length[ant] += dist[route[ant][step - 1]*NR_OF_DATAPOINTS + route[ant][step]];
break;
}
if(numOfConnections[selection] <= 1)
{
connections[current*NR_OF_DATAPOINTS + selection] = 0;
connections[selection*NR_OF_DATAPOINTS + current] = 0;
numOfConnections[current] = numOfConnections[current] - numOfConnections[selection];
numOfConnections[selection] = 0;
}
else
{
//step forward
step++;
route[ant][step] = selection;
length[ant] += dist[route[ant][step - 1]*NR_OF_DATAPOINTS + route[ant][step]];
//feromon
dtau[current*NR_OF_DATAPOINTS + selection] += Q/length[ant];
}
}
}
}
cudaFree(denominator);
cudaFree(tau);
cudaFree(dtau);
cudaFree(dist);
cudaFree(p);
cudaFree(connections);
cudaFree(numOfConnections);
}
|
12,800 | #include <stdio.h>
__host__ int getval_int (const char *str, const int arg)
{
int ans;
int ret = arg;
printf ("Enter %s or -1 to accept default (%d)\n", str, arg);
scanf ("%d", &ans);
if (ans != -1)
ret = ans;
printf ("returning %d\n", ret);
return ret;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.