serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
6,801 | // Name: H.G. Manesha Washani
// Student Id: 1432289
#include <stdio.h>
/* this one of the header file. in this code need
dynamically allocated array function. library code can use
malloc, free option */
#include <stdlib.h>
#define N 4
/* The __global__ indicates that this is an entry-point function running on the device. is called from host code */
__global__ void Matrixadd(int A[][N], int B[][N], int C[][N]){
int g = threadIdx.x;
int h = threadIdx.y;
C[g][h] = A[g][h] + B[g][h];
}
int main()
{
int A[N][N] =
{
{1, 5, 6, 7},
{4, 4, 8, 0},
{2, 3, 4, 5},
{2, 3, 4, 5}
};
int B[N][N] =
{
{1, 5, 6, 7},
{4, 4, 8, 0},
{2, 3, 4, 5},
{2, 3, 4, 5}
};
int C[N][N] =
{
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0},
{0, 0, 0, 0}
};
//device copies of A, B,C
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
/* Device copies of A, B and C allovated space for device aopies of A, B and C. in lecture CUDA part 1 explanation have allocate memory on the device. */
cudaMalloc((void**)&d_A, (N*N)*sizeof(int));
cudaMalloc((void**)&d_B, (N*N)*sizeof(int));
cudaMalloc((void**)&d_C, (N*N)*sizeof(int));
/* Copy input to device. the memory areas may not overlap calling cuda Memcpy()*/
cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
//Launch add() kernel on GPU
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
Matrixadd<<<numBlocks,threadsPerBlock>>>(d_A,d_B,d_C);
// Copy result back to the host
cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
int g, h; printf("C = \n");
for(g=0;g<N;g++){
for(h=0;h<N;h++){
printf("%d ", C[g][h]);
}
printf("\n");
}
// This is cleanup
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n");
return 0;
}
|
6,802 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#define ANCHOMATRIZ 64
// Definición del kernel
__global__ void matrixMul(float *a, float *b, float *c)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row < ANCHOMATRIZ && col < ANCHOMATRIZ)
{
float sum = 0.0f;
for (int i = 0; i < ANCHOMATRIZ; i++) {
sum += a[row * ANCHOMATRIZ + i] * b[i * ANCHOMATRIZ + col];
}
c[row * ANCHOMATRIZ + col] = sum;
}
}
void matrixMul_CPU(float *a, float *b, float *c)
{
float acum;
for (int i = 0 ; i<ANCHOMATRIZ ; i++)
{
for (int j = 0 ; j<ANCHOMATRIZ ; j++)
{
acum = 0;
for (int k = 0 ; k<ANCHOMATRIZ ; k++)
{
acum = acum + a[i*ANCHOMATRIZ + k]*b[k*ANCHOMATRIZ + j];
}
c[i*ANCHOMATRIZ+j] = acum;
}
}
}
long long milisegundos()
// Devuelve el tiempo en milisegundos desde la época Unix (01/01/1970)
{
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec*1000 + t.tv_usec/1000;
}
int main(void)
{
int numElementos = ANCHOMATRIZ*ANCHOMATRIZ;
size_t tamano = numElementos * sizeof(int);
// Reservamos memoria host (memoria principal)
float *h_a = (float *)malloc(tamano);
float *h_b = (float *)malloc(tamano);
float *h_c = (float *)malloc(tamano);
long long ti,tf;
// Inicializar con números arbitrarios
for (int i = 0; i < ANCHOMATRIZ; ++i)
{
for (int j = 0; j < ANCHOMATRIZ; j++)
{
h_a[i*ANCHOMATRIZ+j] = rand()/(float)RAND_MAX;
h_b[i*ANCHOMATRIZ+j] = rand()/(float)RAND_MAX;
}
}
ti=milisegundos(); //tiempo inicial
// Ejecutamos la multiplicación de matrices
matrixMul_CPU(h_a, h_b, h_c);
tf=milisegundos(); //tiempo final
printf("Tiempo invertido en multiplicar CPU: %f\n", (tf-ti));
printf("------- CPU --------\n");
for (int i = 0; i < 10; ++i)
{
printf("Componente [%d] = %f\n", i, h_c[i]);
}
//Crear variables para la parte device (d_a, d_b, d_c)
float *d_a, *d_b, *d_c;
// Reservar memoria en la parte device
cudaMalloc(&d_a, tamano);
cudaMalloc(&d_b, tamano);
cudaMalloc(&d_c, tamano);
//Pasar datos de la memoria host a memoria device
cudaMemcpy(d_a, h_a, tamano, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, tamano, cudaMemcpyHostToDevice);
cudaMemcpy(d_c, h_c, tamano, cudaMemcpyHostToDevice);
// Cada bloque tendrá 256 hilos y habrá 4096 bloques
dim3 dimBlock(16,16);
dim3 dimGrid(64,64);
ti=milisegundos(); //tiempo inicial
// Lanzar Kernel
matrixMul<<<dimGrid, dimBlock>>>(d_a, d_b, d_c);
//Pasar datos de la memoria device a memoria host
cudaMemcpy(h_c, d_c, tamano, cudaMemcpyDeviceToHost);
tf=milisegundos(); //tiempo final
printf("Tiempo invertido en multiplicar GPU: %f\n", (tf-ti));
// Verificamos los primeros valores
printf("------- GPU --------\n");
for (int i = 0; i < 10; ++i)
{
printf("Componente [%d] = %f\n", i, h_c[i]);
}
// Liberamos memoria device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Liberamos memoria host
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
6,803 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void matrixElementWiseMultiplicationKernelNaive(const float* A, const float* B, float* C, int a, int b) {
int ROW = blockIdx.y*blockDim.y+threadIdx.y;
int COL = blockIdx.x*blockDim.x+threadIdx.x;
if (ROW < a && COL < b) {
C[ROW * a + COL] = A[ROW * b + COL]*B[ROW * b + COL];
}
} |
6,804 | #include "includes.h"
__global__ void matchHistCuda(float*qSet, float*dbSet, size_t qSize, size_t dbSize, size_t hSize, float*out){
size_t idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t idy = blockIdx.y*blockDim.y + threadIdx.y;
if(idx < qSize && idy < dbSize){
size_t qi = idx*hSize;
size_t dbi = idy*hSize;
//Cosine similarity code ------------
float sumab = 0;
float suma2 = 0;
float sumb2 = 0;
for(int k = 0; k < hSize; k++){
sumab += qSet[qi+k] * dbSet[dbi+k];
suma2 += qSet[qi+k] * qSet[qi+k];
sumb2 += dbSet[dbi+k] * dbSet[dbi+k];
}
float cossim = sumab/(sqrtf(suma2)/sqrtf(sumb2));
out[idy*qSize + idx] = cossim;
}
} |
6,805 | /************************************************************
Known issues:
This program only works on matrices smaller than or equal to
256x256. 1024x1024 will cause segmentation faults and 512x512
simply causes the program to almost crash and return times of
0 for each kernel call.
The matrices must be square and all matrices must be the same
size.
*/
#include <stdio.h>
#include <stdlib.h>
#define MATSIZE 128
#define THREADS_PER_BLOCK 32
//serial matrix multiplication kernel
__global__ void smultiply(int* g_a, int* g_b, int* g_c)
{
int x, y, z;
for (x = 0; x < MATSIZE; ++ x)
{
for (y = 0; y < MATSIZE; ++ y)
{
for (z = 0; z < MATSIZE; ++ z)
{
g_c[(x * MATSIZE) + y] += g_a[(x * MATSIZE) + z] * g_b[(z * MATSIZE) + y];
}
}
}
}
//parallel matrix multiplication kernel
__global__ void pmultiply(double* g_a, double* g_b, double* g_d , int dim)
{
int z;
double sum = 0.0;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
for (z = 0; z < dim; ++ z)
{
sum += g_a[x * dim + z] * g_b[y + z * dim];
}
g_d[(x * dim) + y] = sum;
}
extern "C" void Cudamultiply(double* a, double* b, double* d, int Dim)
{
// double *d;
int i;
double *g_a, *g_b, *g_d;
int g_size = Dim * Dim * sizeof(double);
cudaEvent_t start, stop;
float time;
// d=new double[Dim*Dim];
cudaEventCreate(&start);
cudaEventCreate(&stop);
//used for timing the Cuda run
cudaMalloc(&g_a, g_size); //allocate memory on Cuda device
cudaMemcpy(g_a, a, g_size, cudaMemcpyHostToDevice);
//copy matrix A onto the Cuda device
cudaMalloc(&g_b, g_size);
cudaMemcpy(g_b, b, g_size, cudaMemcpyHostToDevice);
// cudaMalloc(&g_c, g_size);
// cudaMemcpy(g_c, c, g_size, cudaMemcpyHostToDevice);
dim3 dimGrid((Dim / THREADS_PER_BLOCK), (Dim / THREADS_PER_BLOCK));
//create the needed number of grids
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK);
//create the needed number of threads in each grid
//serial Cuda kernel call
//cudaEventRecord(start, 0);
//smultiply<<<1,1>>>(g_a, g_b, g_c);
//cudaEventRecord(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&time, start, stop);
//get run time
// cudaMemcpy(c, g_c, g_size, cudaMemcpyDeviceToHost);
//copy results back to host device
// cudaFree(g_c);
//free up unused user allocated memory on Cuda device
printf("Time = %f milliseconds\n", time);
//create a second answer matrix to use
//This is not done until now so that memory on the
//Cuda device is not wasted.
cudaMalloc(&g_d, g_size);
cudaMemcpy(g_d, d, g_size, cudaMemcpyHostToDevice);
//parallel Cuda kernel call
cudaEventRecord(start, 0);
pmultiply<<<dimGrid,threads>>>(g_a, g_b, g_d,Dim);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time = %f milliseconds\n", time);
cudaMemcpy(d, g_d, g_size, cudaMemcpyDeviceToHost);
cudaFree(g_a);
cudaFree(g_b);
cudaFree(g_d);
//free up all unused user allocated memory on Cuda device
//printf("\n");
/*The next 2 for loops print out the values of both
answer matrices. This can be used to ensure that both
kernel calls are producing the same results, and that
the results are correct. This section can be commented
out when the user only wants the timing of a run.*/
// for (i = 1; i <= Dim * Dim; ++ i)
// {
// if (i % Dim == 0)
// {
//// printf("%f ", c[i-1]);
// printf("\n");
// }
//
// else
//// printf("%f ", c[i-1]);
// }
// printf("\n");
// for (i = 1; i <= Dim * Dim; ++ i)
// {
// if ( i % Dim == 0)
// {
// printf("%f ", d[i-1]);
// printf("\n");
// }
//
// else
// printf("%f ", d[i-1]);
// }
// delete d;
}
|
6,806 | __global__
void add(int *a, int *b, int *c, int *n)
{
int index = blockIdx.x;
if( index < *n)
{
c[index] = a[index] + b[index] ;
}
}
|
6,807 | #include <stdio.h>
#include <iostream>
#define N 5
using namespace std;
__global__
void matrix_vector_mult(float *A, float *B, float *C, int n){
int i = threadIdx.x + blockDim.x * blockIdx.x, j;
if(i < n){
C[i] = 0;
for(j = 0; j < n; j++){
C[i] += A[i*n+j] * B[j];
}
}
}
int main(){
int i; //int j;
float *A,*B,*C;
A = (float*) malloc(N*N*sizeof(float));
B = (float*) malloc(N*sizeof(float));
C = (float*) malloc(N*sizeof(float));
for(i = 0; i < N*N; i++){
//for(j = 0; j < N; j++)
//h_A[i*N+j] = 1;
A[i]=1;
}
for(i = 0; i < N; i++){
B[i] = 1;
C[i] = 0;
}
int size = N*sizeof(float);
float *d_A, *d_B, *d_C;
cudaMalloc((void **) &d_A, size*N);
cudaMemcpy(d_A,A,size*N,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_B, size);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_C, size);
matrix_vector_mult<<<ceil(N/256.0), 256>>>(d_A,d_B,d_C,N);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++){
cout<<C[i]<<" ";
}
cout<<endl;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
6,808 | #include <stdio.h>
__global__ void kernel(){
int gId = blockIdx.x * blockDim.x + threadIdx.x;
printf("bId=%d,tId=%d,gId=%d¥n", blockIdx.x, threadIdx.x, gId);
}
int main(void){
kernel<<<3, 4>>>();
cudaDeviceSynchronize();
return 0;
} |
6,809 | # include <stdio.h>
# include <stdlib.h> // To use the exit function and malloc
# include <string.h>
/*
* ============================================
* Find a word in a given string (CUDA version)
* ============================================
*
* Usage: find_word <word> <input_file>
*
* Given a word, load the first line of the input file and
* search the word in it. This version uses a CUDA-enabled
* graphics card.
*/
// Global constant
# define NOT_FOUND (-1)
# define THREADS_PER_BLOCK (128)
// Function declaration
int find_word_in_gpu(char *word, char *search_here);
// ----------------------------------------------------------------------------
// Kernel definition
void __global__ find_word_kernel(char *word, char *search_here, int ref_length, int *result) {
/*
* Search for the given word in the search_here string.
*
* At first occurrence, returns the starting position. If the word was not
* found, return NOT_FOUND.
*/
// 1. --- > Prepare for execution
// Allocate shared memory for the result
__shared__ int found_here[THREADS_PER_BLOCK];
// The starting position of each thread
int start = (blockDim.x * blockIdx.x) + threadIdx.x;
// The shared memory index for this thread
int shared_idx = threadIdx.x;
// 2. --- > Search the word
if (start < ref_length-1) { // Check for a valid position
int found = 1; // Pretend you found it
int letters_coincide;
// ---> Check if the word is found from here
for (int j=0; word[j] != '\0'; j++) {
// Check if the letters coincide
letters_coincide = (search_here[start+j] == word[j]);
found = (found && letters_coincide);
}
// ---> Place your mark
if (found) {
// Place position if it was found
found_here[shared_idx] = start;
} else {
found_here[shared_idx] = 0;
}
} else { // Non working thread, initialize shared memory
// You will definitely NOT find it here
found_here[start] = 0;
}
// Wait until everyone finishes
__syncthreads();
// 3. --- > Reduce the result on every thread
// ---> Reduce the results to one per block
int threads_per_block = blockDim.x;
int i = (threads_per_block+1)/2;
while( i != 0 ) {
// Reduce halving the results on each iteration
if (threadIdx.x < i) {
// Check if the entries are within reach
if ( shared_idx + i < threads_per_block ) {
// Check if it was found here
found_here[shared_idx] = (found_here[shared_idx] ? found_here[shared_idx] : found_here[shared_idx+i]);
}
}
// Prepare the next reduction
i/=2;
__syncthreads();
}
// 4. --- > Save the block's reduction and return
if (threadIdx.x == 0) {
result[blockIdx.x] = found_here[shared_idx];
}
return;
} // --- find_word_kernel
// ----------------------------------------------------------------------------
/* --- << Main function >> --- */
int main(int argc, char *argv[]) {
// 1. ---> Find the input file and the word to search
char *search_here = argv[1];
char *word = argv[2];
// 2. ---> Search the word in the reference string
int found_here = find_word_in_gpu(word, search_here);
// 3. ---> Display the results
if( found_here == NOT_FOUND ) {
// The word was not found
printf("Sorry, the word was not found in the reference string\n");
printf("Word: %s\nReference string: %s\n\n", word, search_here);
} else {
// The word was found
printf("The word was found at position: %d\n", found_here);
// Signal the position
printf("Word: %s\nReference string: %s\n", word, search_here);
printf(" ");
for (int i=0; i < found_here-1; i++)
printf(" ");
printf("^\n\n");
}
// 4. ---> Finish!
return 0;
} // --- main
// ----------------------------------------------------------------------------
/* --- << Functions >> --- */
// --- --- - --- --- - --- --- - --- --- - --- --- - --- --- - --- --
int find_word_in_gpu(char *word, char *search_here) {
/*
* Search for the given word in the search_here string.
*
* At first occurrence, returns the starting position. If the word was not
* found, return NOT_FOUND. Uses a CUDA-enabled graphics card.
*/
// 1. --- > Prepare the data in the CPU
// Lookup the lengths of the words
int word_length = strlen(word);
int str_length = strlen(search_here);
int found_here = NOT_FOUND;
// Copy the word to the GPU
char *word_tmp;
cudaMallocManaged(&word_tmp, word_length * sizeof(char));
strcpy(word_tmp, word);
// Copy the search_string to the GPU
char *str_tmp;
cudaMallocManaged(&str_tmp, str_length * sizeof(char));
strcpy(str_tmp, search_here);
// 2. --- > Prepare and launch the Kernel
// Calculate the total threads to use (one per window)
int total_threads = (str_length - word_length) + 1;
// Calculate the blocks needed for that
int blocks = (total_threads + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK;
printf("Launching %d threads in %d blocks\n", THREADS_PER_BLOCK, blocks);
// Prepare for the arrival of the results
int *partial_results;
cudaMallocManaged(&partial_results, blocks * sizeof(int));
for (int i=0; i < blocks; i++) {
partial_results[i] = 0;
}
// Launch the kernel
find_word_kernel<<<blocks, THREADS_PER_BLOCK>>>(word_tmp, str_tmp, str_length, partial_results);
cudaDeviceSynchronize();
// 3. --- > Analyze the result
for (int i=0; i<blocks; i++) {
if ( partial_results[i] ) {
found_here = partial_results[i];
break;
}
}
// 4. ---> Cleanup and return
// Free unneeded memory
cudaFree(partial_results);
cudaFree(word_tmp);
cudaFree(str_tmp);
return found_here;
} // --- find_word_in_gpu
|
6,810 | /*
* transpose an array - using a cuda device, but no parallelism
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void cudaTransposeSerial(float* out, float *in, int size);
void startClock(char*);
void stopClock(char*);
void printClock(char*);
#define DIM 1024
int main(int argc, char** argv) {
float *h_in;
float *h_out;
h_in = (float*) malloc(DIM*DIM*sizeof(float));
h_out =(float*) malloc(DIM*DIM*sizeof(float));
void *d_in;
void *d_out;
cudaMalloc(&d_in,DIM*DIM*sizeof(float));
cudaMalloc(&d_out,DIM*DIM*sizeof(float));
int value = 1;
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
h_in[i + j*DIM] = value++;
}
}
startClock("copy in");
cudaMemcpy(d_in,h_in,DIM*DIM*sizeof(float),cudaMemcpyHostToDevice);
stopClock("copy in");
startClock("compute");
cudaTransposeSerial<<<1,1>>>((float*)d_out,(float*)d_in,DIM);
cudaThreadSynchronize();
stopClock("compute");
startClock("copy out");
cudaMemcpy(h_out,d_out,DIM*DIM*sizeof(float),cudaMemcpyDeviceToHost);
stopClock("copy out");
// sanity check
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
if (h_in[i + j*DIM] != h_out[i*DIM + j]) {
printf("ERROR");
exit(1);
}
}
}
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
printClock("copy in");
printClock("compute");
printClock("copy out");
}
__global__ void cudaTransposeSerial(float* out, float* in, int size) {
for (int i = 0; i < size; i++) {
for (int j = 0; j < size; j++) {
out[j + i*size] = in[i + j*size];
}
}
}
|
6,811 | #include <stdio.h>
#define NX 256
#define NY 256
#define DX (5./(float)NX)
#define DY (5./(float)NY)
#define N_ITERATIONS 100000
#define N_THREADS_X 32
#define N_THREADS_Y 32
#define N_BLOCKS_X (NX + N_THREADS_X - 1)/N_THREADS_X
#define N_BLOCKS_Y (NY + N_THREADS_Y - 1)/N_THREADS_Y
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Solves the Poisson equation via the Jacobi Method
// $$\nabla^2 \phi = f$$
float f(float x, float y)
{
return exp(-x*x-y*y);
}
float top_bc = 0;
float bottom_bc = 0;
float left_bc = 0;
float right_bc = 2;
__global__ void iteratePoisson(float* d_source, float* d_V1, float* d_V2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int n = j * NX + i;
// printf("%d %d %d %d\n", i, j, n, i > 0);
if ((i>0) && (i < NX-1) && (j>0) && (j < NY-1)) { //TODO: see what can be done about boundaries
int n_top = (j-1) * NX + i;
int n_bot = (j+1) * NX + i;
int n_left = j * NX + (i-1);
int n_right = j * NX + (i+1);
//TODO: rewrite above in terms of n?
// d_V1[n] = n;
d_V1[n] = 0.25f * (d_V2[n_top] + d_V2[n_bot] + d_V2[n_left] + d_V2[n_right]) +\
d_source[n] * DX * DY;
//TODO: check above for consistency. Does this need factor of 4?
}
}
int main()
{
float *h_source = (float *)malloc(NX*NY*sizeof(float));
float *h_V = (float *)malloc(NX*NY*sizeof(float));
float x;
float y;
for(int j =0; j<NY; j++){
for (int i = 0; i < NX; i++){
int n = NX*j + i;
x = i*DX - NX/2 * DX;
y = j*DY - NY/2 * DY;
h_source[n] = f(x,y); //TODO: set up source term
if (j == 0){ // top row
h_V[n] = top_bc;
}
else if (j==NY-1){ //bottom row
h_V[n] = bottom_bc;
}
else if (i==0){ //left column
h_V[n] = left_bc;
}
else if (i==NX-1){
h_V[n] = right_bc;
}
}
}
float *d_source;
float *d_V1;
float *d_V2;
//allocate GPU memory
gpuErrchk(cudaMalloc((void **)&d_source, NX*NY*sizeof(float)));
gpuErrchk(cudaMalloc((void **)&d_V1, NX*NY*sizeof(float)));
gpuErrchk(cudaMalloc((void **)&d_V2, NX*NY*sizeof(float)));
// copy V1, V2 from host to device
gpuErrchk(cudaMemcpy(d_source, h_source, NX*NY*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_V1, h_V, NX*NY*sizeof(float), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_V2, h_V, NX*NY*sizeof(float), cudaMemcpyHostToDevice));
// gpuErrchk(cudaPeekAtLastError());
printf("Blocks: %d\nThreads: %d\n", N_BLOCKS_X*N_BLOCKS_Y, N_THREADS_X*N_THREADS_Y);
printf("Iteration %5d", 0);
dim3 blocks(N_BLOCKS_X, N_BLOCKS_Y);
dim3 threads(N_THREADS_X, N_THREADS_Y);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
cudaEvent_t start;
gpuErrchk(cudaEventCreate(&start));
cudaEvent_t stop;
gpuErrchk(cudaEventCreate(&stop));
gpuErrchk(cudaEventRecord(start, NULL));
for (int i = 0; i < N_ITERATIONS; i += 2)
{
printf("\rIteration %5d", i);
iteratePoisson<<<blocks, threads>>>(d_source, d_V1, d_V2);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
iteratePoisson<<<blocks, threads>>>(d_source, d_V2, d_V1);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
}
gpuErrchk(cudaEventRecord(stop, NULL));
//copy V2 from device to host as final value
gpuErrchk(cudaMemcpy(h_source, d_source, NX*NY*sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_V, d_V2, NX*NY*sizeof(float), cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(d_source));
gpuErrchk(cudaFree(d_V1));
gpuErrchk(cudaFree(d_V2));
float msecTotal = 0.0f;
gpuErrchk(cudaEventElapsedTime(&msecTotal, start, stop));
printf("Elapsed time: %f ms", msecTotal);
FILE* file_V1 = fopen("V1.dat", "w");
FILE* file_source = fopen("source.dat", "w");
for(int j =0; j<NY; j++){
for (int i = 0; i < NX; i++){
int n = NX*j + i;
x = i*DX - NX/2 * DX;
y = j*DY - NY/2 * DY;
fprintf(file_V1, "%d %d %.3f %.3f %.3f\n", i, j, x, y, h_V[n]);
fprintf(file_source, "%d %d %.3f %.3f %.3f\n", i, j, x, y, h_source[n]);
}
}
//free GPU arrays
free(h_V);
free(h_source);
//write data out
printf("\nFinished!\n");
}
|
6,812 | #include "includes.h"
#define NUMTHREADS 16
#define THREADWORK 32
__global__ void gpuKendall(const float * a, size_t na, const float * b, size_t nb, size_t sampleSize, double * results)
{
size_t
i, j, tests,
tx = threadIdx.x, ty = threadIdx.y,
bx = blockIdx.x, by = blockIdx.y,
rowa = bx * sampleSize, rowb = by * sampleSize;
float
discordant, concordant = 0.f,
numer, denom;
__shared__ float threadSums[NUMTHREADS*NUMTHREADS];
for(i = tx; i < sampleSize; i += NUMTHREADS) {
for(j = i+1+ty; j < sampleSize; j += NUMTHREADS) {
tests = ((a[rowa+j] > a[rowa+i]) && (b[rowb+j] > b[rowb+i]))
+ ((a[rowa+j] < a[rowa+i]) && (b[rowb+j] < b[rowb+i]))
+ ((a[rowa+j] == a[rowa+i]) && (b[rowb+j] == b[rowb+i]));
concordant = concordant + (float)tests;
}
}
threadSums[tx*NUMTHREADS+ty] = concordant;
__syncthreads();
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if(ty < i)
threadSums[tx*NUMTHREADS+ty] += threadSums[tx*NUMTHREADS+ty+i];
__syncthreads();
}
for(i = NUMTHREADS >> 1; i > 0; i >>= 1) {
if((tx < i) && (ty == 0))
threadSums[tx*NUMTHREADS] += threadSums[(tx+i)*NUMTHREADS];
__syncthreads();
}
if((tx == 0) && (ty == 0)) {
concordant = threadSums[0];
denom = (float)sampleSize;
denom = (denom * (denom - 1.f)) / 2.f; discordant = denom - concordant;
numer = concordant - discordant;
results[by*na+bx] = ((double)numer)/((double)denom);
}
} |
6,813 | __device__ int evalChecker() {
return 40;
}
|
6,814 | //These includes are for running on a personal computer
/*#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <device_functions.h>
#include <cuda.h>
#include <crt/host_defines.h>
*/
#include <iostream>
#include <cstdlib>
#include <stdio.h>
#include <string>
#include <chrono>
//global variables definitions for the boids on both device and host
float2* pos_dev;
float2* vel_dev;
float2* acc_dev;
float2* sep_dev;
float2* align_dev;
float2* cohesion_dev;
float2* pos_host;
float2* vel_host;
float2 averagePos;
float2 averageForward;
//all of our hard coded values we can change
#define BlockSize 256
#define FLOCKING_RAD 50.0f
#define COHESION_STRENGTH 3.0f
#define ALIGNMENT_STRENGTH 5.0f
#define SEPARATION_STRENGTH 2.0f
#define SAFE_RADIUS 3.0f
#define MAX_SPEED 5.0f
//vector math functions for the 2d vectors -- naive
__device__ bool vector2dEquals(float2 a, float2 b) {
if (a.x == b.x && a.y == b.y) {
return true;
} else {
return false;
}
}
__device__ float calcLength(float2 vec) {
return sqrt(vec.x * vec.x + vec.y * vec.y);
}
__device__ float distance(float2 vec1, float2 vec2) {
float finalDistance = sqrt(((vec1.x - vec2.x)*(vec1.x - vec2.x)) + ((vec1.y - vec2.y)*(vec1.y - vec2.y)));
return finalDistance;
}
__device__ float2 subVecs(float2 vec1, float2 vec2) {
float2 finalVec = make_float2(vec1.x - vec2.x, vec1.y - vec2.y);
return finalVec;
}
__device__ float2 addVecs(float2 vec1, float2 vec2) {
float2 finalVec = make_float2(vec1.x + vec2.x, vec1.y + vec2.y);
return finalVec;
}
__device__ float2 divideVec(float scalar, float2 vector) {
float2 finalVec = make_float2(vector.x / scalar, vector.y / scalar);
return finalVec;
}
__device__ float2 multiplyVec(float scalar, float2 vector) {
float2 finalVec = make_float2(vector.x * scalar, vector.y * scalar);
return finalVec;
}
__device__ float2 normalize(float2 vector) {
float length = calcLength(vector);
if (length > 0) {
float2 finalVec = make_float2(vector.x / length, vector.y / length);
return finalVec;
} else {
return vector;
}
}
//-----------------end vec funcs------------------------
// calculates the average forward velocity vector of all the boids
__host__ void calc_average_forward(int NBOIDS) {
int counter = 0;
float2 sum = make_float2(0.0, 0.0);
for (int i = 0; i < NBOIDS; i++) {
sum.x += vel_host[i].x;
sum.y += vel_host[i].y;
counter++;
}
averageForward.x = sum.x / counter;
averageForward.y = sum.y / counter;
}
// calculate the average position of all the boids
__host__ void calc_average_pos(int NBOIDS) {
int counter = 0;
float2 sum = make_float2(0.0, 0.0);
for (int i = 0; i < NBOIDS; i++) {
sum.x += pos_host[i].x;
sum.y += pos_host[i].y;
counter++;
}
averagePos.x = sum.x / counter;
averagePos.y = sum.y / counter;
}
//updates the position of all the boids
__global__ void updatePos(int numBoids, float2* vel_dev, float2* pos_dev) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
//if boids get too far away set their position to 0;
if (i < numBoids) {
if (pos_dev[i].x > 10000.0f || pos_dev[i].y > 10000.0f) {
pos_dev[i].x = 0;
pos_dev[i].y = 0;
}
pos_dev[i] = addVecs(pos_dev[i], vel_dev[i]);
}
}
//calculates the separation vector for each boid
__device__ float2 calc_separation_accel(int numBoids, float2* pos_dev, float2* vel_dev) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
float safeDist = SAFE_RADIUS;
safeDist = safeDist + safeDist;
float separationStrength = SEPARATION_STRENGTH;
float2 totalVel = make_float2(0.0f, 0.0f);
if (i < numBoids) {
float2 boidPos = make_float2(pos_dev[i].x, pos_dev[i].y);
float2 boidVel = make_float2(vel_dev[i].x, vel_dev[i].y);
for (int i = 0; i < numBoids; i++) {
float2 siblingPos = pos_dev[i];
float2 siblingVel = vel_dev[i];
//skip if current boid is self
if (vector2dEquals(boidPos, siblingPos) && vector2dEquals(boidVel, siblingVel)) {
continue;
}
float2 accel = subVecs(boidPos, siblingPos);
float dist = calcLength(accel);
if (dist < safeDist) {
accel = normalize(accel);
accel = divideVec(safeDist, multiplyVec((safeDist - dist), accel));
totalVel = addVecs(totalVel, accel);
}
}
if (calcLength(totalVel) > 1) {
totalVel = normalize(totalVel);
}
return multiplyVec(separationStrength, totalVel);
}
return make_float2(0.0f, 0.0f);
}
//calculates the alignment vector for each boid
__device__ float2 calc_alignment_accel(int numBoids, float2 averageForward) {
float maxSpeed = MAX_SPEED;
float alignStr = ALIGNMENT_STRENGTH;
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < numBoids) {
float2 accel = divideVec(maxSpeed, averageForward);
if (calcLength(accel) > 1) {
accel = normalize(accel);
}
return multiplyVec(alignStr, accel);
}
return make_float2(0.0f, 0.0f);
}
//calculates the cohesion vector for each boid
__device__ float2 calc_cohesion_accel(int numBoids, float2 averagePos, float2* pos_dev) {
float flockRad = FLOCKING_RAD;
float cohesionStr = COHESION_STRENGTH;
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < numBoids) {
float2 accel = subVecs(averagePos, pos_dev[i]);
float dist = calcLength(pos_dev[i]);
accel = normalize(accel);
if(dist < flockRad) {
accel = multiplyVec(dist, accel);
accel = divideVec(flockRad, accel);
}
return multiplyVec(cohesionStr, accel);
}
return make_float2(0.0f, 0.0f);
}
//generates the initial position of the boids
__global__ void generateInitialPosition(int numBoids, float2* pos_dev, float2* vel_dev, float2* acc_dev, float2* sep_dev, float2* align_dev, float2* cohesion_dev) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < numBoids) {
pos_dev[i].x = 0.0f;
pos_dev[i].y = 0.0f;
vel_dev[i].x = 0.0f;
vel_dev[i].y = 0.0f;
acc_dev[i].x = 0.0f;
acc_dev[i].y = 0.0f;
sep_dev[i].x = 0.0f;
sep_dev[i].y = 0.0f;
align_dev[i].x = 0.0f;
align_dev[i].y = 0.0f;
cohesion_dev[i].x = 0.0f;
cohesion_dev[i].y = 0.0f;
}
}
//define inital cuda mallocs and vars
__host__ void startCuda(int numBoids) {
//printf("\nDefining cuda variables\n");
dim3 fullBlocksPerGrid((int)ceil(float(numBoids) / float(BlockSize)));
// Malloc for device
cudaMalloc((void**)&pos_dev, numBoids * sizeof(float2));
cudaMalloc((void**)&vel_dev, numBoids * sizeof(float2));
cudaMalloc((void**)&acc_dev, numBoids * sizeof(float2));
cudaMalloc((void**)&sep_dev, numBoids * sizeof(float2));
cudaMalloc((void**)&align_dev, numBoids * sizeof(float2));
cudaMalloc((void**)&cohesion_dev, numBoids * sizeof(float2));
//malloc for host
pos_host = (float2*)malloc(numBoids * sizeof(float2));
vel_host = (float2*)malloc(numBoids * sizeof(float2));
//set random velocity
for (int i = 0; i < numBoids; i++) {
vel_host[i].x = ((float) rand() / (RAND_MAX));
vel_host[i].y = ((float) rand() / (RAND_MAX));
}
// Setup Kernels
//printf("\nGenerating initial position\n");
generateInitialPosition<<<fullBlocksPerGrid, BlockSize>>>(numBoids, pos_dev, vel_dev, acc_dev, sep_dev, align_dev, cohesion_dev);
cudaMemcpy(vel_dev, vel_host, numBoids * sizeof(float2), cudaMemcpyHostToDevice);
cudaMemcpy(pos_host, pos_dev, numBoids * sizeof(float2), cudaMemcpyDeviceToHost);
cudaMemcpy(vel_host, vel_dev, numBoids * sizeof(float2), cudaMemcpyDeviceToHost);
//for debugging
/*printf("after\n");
for (int i = 0; i < numBoids; i++) {
printf("x = %f, y = %f\n", vel_host[i].x, vel_host[i].y);
}*/
}
//update kernel that calls cohesion, separation and alignment
__global__ void update(int numBoids, float2 averagePos, float2 averageForward, float2* pos_dev, float2* vel_dev, float2* acc_dev, float2* sep_dev, float2* align_dev, float2* cohesion_dev) {
dim3 fullBlocksPerGrid((int)ceil(float(numBoids) / float(BlockSize)));
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < numBoids) {
//cohesion
float2 cohesion = calc_cohesion_accel(numBoids, averagePos, pos_dev);
//separation
float2 separation = calc_separation_accel(numBoids, pos_dev, vel_dev);
//alignment
float2 alignment = calc_alignment_accel(numBoids, averageForward);
//printf("cohesion: %f\nseparation: %f\nalignment: %f\n", cohesion, separation, alignment);
vel_dev[i] = addVecs(vel_dev[i], cohesion);
vel_dev[i] = addVecs(vel_dev[i], separation);
vel_dev[i] = addVecs(vel_dev[i], alignment);
if (calcLength(vel_dev[i]) > 50.0f) {
vel_dev[i] = normalize(vel_dev[i]);
vel_dev[i] = multiplyVec(50.0f, vel_dev[i]);
//printf("%d ", calcLength(vel_dev[i]));
}
if (calcLength(vel_dev[i]) < 0.0f) {
vel_dev[i] = normalize(vel_dev[i]);
vel_dev[i] = multiplyVec(50.0f, vel_dev[i]);
//printf("%d ", calcLength(vel_dev[i]));
}
//printf("%d ", calcLength(vel_dev[i]));
}
}
//main cuda function
__host__ int main(int argc, char* argv[])
{
//for timing
using std::chrono::high_resolution_clock;
using std::chrono::duration_cast;
using std::chrono::duration;
using std::chrono::milliseconds;
auto t1 = high_resolution_clock::now();
//takes 2 arguments, number of boids and iterations
int numB = std::stoi(argv[1]);
int iterations = std::stoi(argv[2]);
dim3 fullBlocksPerGrid((int)ceil(float(numB) / float(BlockSize)));
startCuda(numB);
//printf("\nRunning Simulation with %d boids and %d iterations\n", numB, iterations);
for (int i = 0; i < iterations; i++) {
cudaMemcpy(vel_host, vel_dev, numB * sizeof(float2), cudaMemcpyDeviceToHost);
cudaMemcpy(pos_host, pos_dev, numB * sizeof(float2), cudaMemcpyDeviceToHost);
calc_average_pos(numB);
calc_average_forward(numB);
update<<<fullBlocksPerGrid, BlockSize>>>(numB, averagePos, averageForward, pos_dev, vel_dev, acc_dev, sep_dev, align_dev, cohesion_dev);
updatePos<<<fullBlocksPerGrid, BlockSize>>>(numB, vel_dev, pos_dev);
//for debugging will remove
//cudaMemcpy(vel_host, vel_dev, numB * sizeof(float2), cudaMemcpyDeviceToHost);
//cudaMemcpy(pos_host, pos_dev, numB * sizeof(float2), cudaMemcpyDeviceToHost);
//printf("guy1-x: %f, guy1-y: %f | ", pos_host[0].x, pos_host[0].y);
//printf("guy2-x: %f, guy2-y: %f\n", pos_host[1].x, pos_host[1].y);
}
cudaFree(pos_dev);
cudaFree(vel_dev);
cudaFree(acc_dev);
cudaFree(sep_dev);
cudaFree(align_dev);
cudaFree(cohesion_dev);
free(pos_host);
auto t2 = high_resolution_clock::now();
duration<double, std::milli> ms_double = t2 - t1;
printf("%f", ms_double);
return 0;
}
|
6,815 | #include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
#define RANDOM_MN_RANGE 64
struct Matrix {
int width;
int height;
// contiguously stored Matrix, in row first order
float *elements;
};
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C){
// runs for each col - row pair
float tmpVal = 0;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
for (int i = 0; i < A.width; ++i)
tmpVal += A.elements[row * A.width + i] *
B.elements[i * B.width + col];
C.elements[ row * C.width + col ] = tmpVal;
}
extern "C" {
void mMul( Matrix *A, Matrix *B, Matrix *C ){
Matrix d_A, d_B, d_C;
// Matrix d_A
d_A.width = A->width;
d_A.height = A->height;
size_t sizeA = A->width * A->height * sizeof(float);
// dynamically allocate cudaMemory for elemenst array
cudaMalloc(&d_A.elements, sizeA);
cudaMemcpy(d_A.elements, A->elements, sizeA, cudaMemcpyHostToDevice);
// Matrix d_B
d_B.width = B->width;
d_B.height = B->height;
size_t sizeB = B->width * B->height * sizeof(float);
// dynamically allocate cudaMemory for elemenst array
cudaMalloc(&d_B.elements, sizeB);
cudaMemcpy(d_B.elements, B->elements, sizeB, cudaMemcpyHostToDevice);
// Matrix d_C
d_C.width = C->width;
d_C.height = C->height;
size_t sizeC = C->width * C->height * sizeof(float);
// dynamically allocate cudaMemory for elemenst array
cudaMalloc(&d_C.elements, sizeC);
// 16 * 16 = 256 threads per block
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
// Blocks per grid
dim3 dimGrid(B->width / dimBlock.x, A->height / dimBlock.y);
// calling the Kernel
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// copy results from result matrix C to the host again
cudaMemcpy(C->elements, d_C.elements, sizeC, cudaMemcpyDeviceToHost);
printf("A is %f\n", A->elements[0]);
printf("B is %f\n", B->elements[0]);
printf("C is %f\n", C->elements[0]);
// free the cuda memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
}
/*
void fillMatrix(Matrix *mX){
// we have width * height elements in our matrix
int mXsz = mX->height * mX->width;
// we are allocating the values for float array
mX->elements = (float*)malloc(sizeof(float) * mXsz);
// we loop through the range of all elements
for (int i = 0; i < mXsz; i++){
// filling it with a random value between 0 and 1
// mX->elements[i] = (float) rand()/RAND_MAX;
mX->elements[i] = (float) 1.0;
}
}
int main(){
// start the random number generator
srand((unsigned int)time(NULL));
// allocating memory space to all three Matrices
Matrix *pmA = (Matrix*) malloc(sizeof(Matrix));
Matrix *pmB = (Matrix*) malloc(sizeof(Matrix));
Matrix *pmC = (Matrix*) malloc(sizeof(Matrix));
int mSize = 1<<4, nSize = 1<<6;
// assign values to members height, width
pmA->width = nSize, pmA->height = mSize;
pmB->width = nSize, pmB->height = mSize;
pmC->width = nSize, pmC->height = mSize;
fillMatrix(pmA);
fillMatrix(pmB);
int nmSize = mSize * nSize;
pmC->elements = (float*)calloc(nmSize, sizeof(float));
MatMul(pmA, pmB, pmC);
for (int i = 0; i < pmC->width * pmC->height; i++){
printf("%f\n", pmC->elements[i]);
}
free(pmA);
free(pmB);
free(pmC);
return 0;
}
*/
|
6,816 | #include <iostream>
#include <stdio.h>
#include <math.h>
#define ni 4096
#define nn 1024
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void random_ints(int* a, int N)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = rand();
}
}
void zeros(int* a, int N)
{
int i;
for (i = 0; i < N; i++)
{
a[i] = 0;
}
}
// CURRENT MEMORY PERFORMANCE = 14.23 GB/s
// perform 1 column of the matrix-vector multiply (1 input, 1 weights vector)
// this means that the batch size is 1(?)
// the dimensions of the weights matrix are (ni, nn) => 2D array
// the full input is a vector of dimension ni (represented as an array)
// the full output is a vector of dimension nn (represented as an array)
// this is what is done in a fully-connected classifier layer
// this method utilizes a scratchpad memory for better thread block performance
__global__
void matrix_vector_mult(int *inp, int *outp, int *kern)
{
// scratchpad memory used for shared variables
__shared__ int temp_kern[nn]; // weights vector
__shared__ int temp_inp; // single input vector value
// populate shared data structures
// only 1 thread needs to handle everything
if (threadIdx.x == 0) {
for (int i=0; i<nn; i++) {
int k_index = blockIdx.x + i*ni; // weights matrix is row-major
temp_kern[i] = kern[k_index];
}
temp_inp = inp[blockIdx.x];
}
__syncthreads(); // sync all threads to this point
// populate output
outp[threadIdx.x] += (temp_inp * temp_kern[threadIdx.x]);
}
int main(void)
{
// declare host + device pointers
int *inp, *outp, *kern;
int *d_inp, *d_outp, *d_kern;
// compute array sizes
int i_size = ni;
int o_size = nn;
int k_size = nn*ni;
// allocate space for each array on the device
gpuErrchk( cudaMalloc(&d_inp, i_size*sizeof(int)) );
gpuErrchk( cudaMalloc(&d_outp, o_size*sizeof(int)) );
gpuErrchk( cudaMalloc(&d_kern, k_size*sizeof(int)) );
// allocate space and populate each array on the host
inp = (int*)malloc(i_size*sizeof(int));
outp = (int*)malloc(o_size*sizeof(int));
kern = (int*)malloc(k_size*sizeof(int));
random_ints(inp, i_size);
zeros(outp, o_size);
random_ints(kern, k_size);
// copy populated host arrays to corresponding device arrays
gpuErrchk( cudaMemcpy(d_inp, inp, i_size*sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_outp, outp, o_size*sizeof(int), cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_kern, kern, k_size*sizeof(int), cudaMemcpyHostToDevice) );
// launch all threads on device
// # blocks = # of columns (ni)
// # threads / block = # of rows (nn)
matrix_vector_mult<<<ni, nn>>>(d_inp, d_outp, d_kern);
// determine if run succeeded
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// copy output array back to host
gpuErrchk( cudaMemcpy(outp, d_outp, o_size, cudaMemcpyDeviceToHost) );
// free all memory
free(inp); free(outp); free(kern);
gpuErrchk( cudaFree(d_inp) ); gpuErrchk( cudaFree(d_outp) ); gpuErrchk( cudaFree(d_kern) );
return 0;
} |
6,817 | //Just your regular Hello World file
// to be compiled with nvcc rather than gcc
#include <stdio.h>
__global__ void helloFromGPU(void) {
printf("Hello World from GPU, thread %d\n",threadIdx.x);
}
int main(void) {
printf("Hello World from CPU!\n");
helloFromGPU<<<1, 10>>>();
cudaDeviceReset();
return 0;
}
|
6,818 | #include "includes.h"
__global__ void NegativeCorrelationForwardResetKernel( float* outputPtr, int thisLayerSize )
{
// j: current layer neuron id
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
outputPtr[j] = 0;
}
} |
6,819 | // One of the possible optimizations to the implementation in fft.cu
//
// The algorithm is as follows:
//
// for (s = 0 to log2(N)) ........ loop1
// m = 2^i;
// statements
// for (j=0;j<N;j+=m) ....... loop2
// for (k=0 to m/2) ..... loop3
// statements
//
// The second and third loop are complementary to each other in the sense that
// while loop2 runs for n/m values of j, loop3 runs for m/2 values of k. With
// larger values of m, loop3 does more work, while for smaller values of m, loop2
// does more work, so in this optimization, we plan to separate out the two cases
// and achieve parallelization of both outer and inner loops
//
// The point of separation needs to be experimentally arrived at, i.e, we need to
// test out different cases and choose the best of the lot. As a first commit in
// the optimization, we have adopted a naive approach and check the value of m/2.
// If the value of m/2 is less than the maximum number of threads that can be
// spawned by a block, we parallelize loop2, and loop3 otherwise.
//
//
#include <stdio.h>
#include <cmath>
#include <cuda.h>
typedef float2 Complex;
#define THREADS 32
#define MAX_NO_OF_THREADS_PER_BLOCK 1024
const long long ARRAY_SIZE = 65536;
const long long ARRAY_BYTES = ARRAY_SIZE * sizeof(Complex);
// Bit reversal re-ordering, first step of FFT
__global__ void bit_reverse_reorder(Complex *d_rev, Complex *d_a, int s) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int rev = __brev(id) >> (32-s);
if(id < ARRAY_SIZE)
d_rev[rev] = d_a[id];
}
// Work of the innermost loop, common to both parallelization
__device__ void inplace_fft(Complex *a, int j, int k, int m){
if (j+k+m/2 < ARRAY_SIZE){
Complex w, t, u;
// w^k (w is root of unity)
w.x = __cosf((2*M_PI*k)/m);
w.y = -__sinf((2*M_PI*k)/m);
// u = a[j+k]
u.x = a[j+k].x;
u.y = a[j+k].y;
// t = w*a[j+k+m/2];
t.x = w.x*a[j+k+m/2].x - w.y*a[j+k+m/2].y;
t.y = w.x*a[j+k+m/2].y + w.y*a[j+k+m/2].x;
// a[j+k] = u+t;
a[j+k].x = u.x + t.x;
a[j+k].y = u.y + t.y;
// a[j+k+m/2] = u-t;
a[j+k+m/2].x = u.x - t.x;
a[j+k+m/2].y = u.y - t.y;
}
}
// Parallelization of loop2
__global__ void fft_outer(Complex *a, int m){
int j = (blockIdx.x * blockDim.x + threadIdx.x)*m;
if (j < ARRAY_SIZE){
for (int k=0;k<m/2;k++){
inplace_fft(a,j,k,m);
}
}
}
// Parallelization of loop3
__global__ void fft_inner(Complex *a, int j, int m){
int k = (blockIdx.x * blockDim.x + threadIdx.x);
if (k < m/2)
inplace_fft(a,j,k,m);
}
int main() {
// Host arrays for input and output
Complex h_a[ARRAY_SIZE];
Complex h_rev[ARRAY_SIZE];
// Input signal, complex part remains zero.
// Signal is of the form sin(2*M_PI*f*x/N) or cos(2*M_PI*f*x/N)
// N is sample size and is always a power of 2
for(int i = 0; i < ARRAY_SIZE; i++) {
h_a[i].x = cos((6.0*M_PI*i)/ARRAY_SIZE);
h_a[i].y = 0.0;
}
// No of bits required to represent N
int s = (int)ceil(log2(ARRAY_SIZE));
// Device arrays
Complex *d_a, *d_rev;
// Memory Allocation
cudaMalloc((void**) &d_a, ARRAY_BYTES);
cudaMalloc((void**) &d_rev, ARRAY_BYTES);
// Copy host array to device
cudaMemcpy(d_a, h_a, ARRAY_BYTES, cudaMemcpyHostToDevice);
// First step in FFT, bit-reverse reordering
// Swap an element at index 'i', with the element
// which is the bit-string reversal of 'i'
bit_reverse_reorder<<<(ARRAY_SIZE+THREADS-1)/THREADS, THREADS>>>(d_rev, d_a, s);
cudaDeviceSynchronize();
// FFT driver code
for (int i=1;i<=s;i++){
// m = 2^i
int m = 1 << i;
if (m/2 < MAX_NO_OF_THREADS_PER_BLOCK){
fft_outer<<<((ARRAY_SIZE/m)+THREADS-1)/THREADS,THREADS>>>(d_rev,m);
} else {
for (int j=0;j<ARRAY_SIZE;j+=m){
fft_inner<<<((m/2)+THREADS-1)/THREADS,THREADS>>>(d_rev,j,m);
}
}
}
// Copy result array from device to host
cudaMemcpy(h_rev,d_rev,ARRAY_BYTES,cudaMemcpyDeviceToHost);
// Free allocated device memory
cudaFree(d_a);
cudaFree(d_rev);
return 0;
} |
6,820 | #include <algorithm>
#include <chrono>
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
// TODO
// check if file parameter is present
// create array to reference heater locations instead of recalculating values
// make use of cuda shared memory
// restructure project
// https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#independent-thread-scheduling
struct block{
uint32_t x;
uint32_t y;
uint32_t z;
uint32_t width;
uint32_t height;
uint32_t depth;
uint32_t size;
float temp;
};
struct config_values {
bool is_3d;
bool padding[3];
float k;
uint32_t num_timesteps;
uint32_t grid_width;
uint32_t grid_height;
uint32_t grid_depth;
float starting_temp;
std::vector<block> blocks;
};
block line_to_block(bool is_3d, std::string & line) {
block out_block;
std::string token;
out_block.x = atoi(line.substr(0, line.find(',')).c_str());
token = line.substr(line.find(',') + 1);
out_block.y = atoi(token.substr(0, token.find(',')).c_str());
if (is_3d) {
token = token.substr(token.find(',') + 1);
out_block.z = atoi(token.substr(0, token.find(',')).c_str());
} else {
out_block.z = 0;
}
token = token.substr(token.find(',') + 1);
out_block.width = atoi(token.substr(0, token.find(',')).c_str());
token = token.substr(token.find(',') + 1);
out_block.height = atoi(token.substr(0, token.find(',')).c_str());
if (is_3d) {
token = token.substr(token.find(',') + 1);
out_block.depth = atoi(token.substr(0, token.find(',')).c_str());
} else {
out_block.depth = 1;
}
token = token.substr(token.find(',') + 1);
out_block.temp = atof(token.substr(0, token.find(',')).c_str());
out_block.size = out_block.width * out_block.height * out_block.depth;
return out_block;
}
void set_config_values(config_values & conf, std::string & file_name) {
std::string buf;
std::string token;
uint8_t count = 0;
std::ifstream conf_file(file_name);
if (!conf_file) {
std::cerr << "Error opening config file.\n";
} else {
while (!conf_file.eof()) {
std::getline(conf_file, buf);
buf.erase(std::remove_if( buf.begin(), buf.end(), ::isspace ), buf.end());
// Filter out line that dont start with a number
if(buf[0 ]== '.' || (buf[0] != '#' && (buf[0] >= '0' && buf[0] <= '9'))) {
switch (count) {
case 0:
conf.is_3d = (buf[0] == '3');
break;
case 1:
conf.k = atof(buf.c_str());
break;
case 2:
conf.num_timesteps = atoi(buf.c_str());
break;
case 3:
// GRID SIZE
conf.grid_width = atoi(buf.substr(0, buf.find(',')).c_str());
token = buf.substr(buf.find(',') + 1);
conf.grid_height = atoi(token.substr(0, token.find(',')).c_str());
if (conf.is_3d) {
conf.grid_depth = atoi(token.substr(token.find(',') + 1).c_str());
} else {
conf.grid_depth = 1;
}
break;
case 4:
conf.starting_temp = atof(buf.c_str());
break;
default:
// PARSE THE REMAIN FILE FOR FIXED BLOCKS
conf.blocks.push_back(line_to_block(conf.is_3d, buf));
break;
}
++count;
}
}
}
conf_file.close();
}
__global__ void init_grid_values(float * a, int size, float value) {
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
uint idx;
uint blkID = blockIdx.x;
uint blkDim = blockDim.x;
uint thrID = threadIdx.x;
asm("mad.lo.u32 %0, %1, %2, %3;" : "=r"(idx) : "r"(blkID), "r"(blkDim), "r"(thrID));
if (idx < size) {
a[idx] = value;
}
}
__global__ void copy_array_elements(float * lhs, float * rhs, int size) {
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
uint idx;
uint blkID = blockIdx.x;
uint blkDim = blockDim.x;
uint thrID = threadIdx.x;
asm("mad.lo.u32 %0, %1, %2, %3;" : "=r"(idx) : "r"(blkID), "r"(blkDim), "r"(thrID));
if (idx < size) {
lhs[idx] = rhs[idx];
}
}
__global__ void place_fixed_temp_block(float * array, int array_width, int array_height, int x, int y, int z, int w, int h, float value, int size) {
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
uint idx;
uint blkID = blockIdx.x;
uint blkDim = blockDim.x;
uint thrID = threadIdx.x;
asm("mad.lo.u32 %0, %1, %2, %3;" : "=r"(idx) : "r"(blkID), "r"(blkDim), "r"(thrID));
// 4d start = x
// + (y * array_width)
// + (z * array_width * array_height)
// + (a * array_width * array_height * array_depth)
// 4d offset = (idx % w)
// + array_width * (idx % (w * h)) / w)
// + (array_width * array_height) * ((idx % (w * h * d)) / (w * h))
// + (array_width * array_height * array_depth) * (idx / (w * h * d))
if (idx < size) {
int start = x + (y * array_width) + (z * array_width * array_height);
int index = start + (idx % w) + array_width * ((idx % (w * h)) / w) + (array_width * array_height) * (idx / (w * h));
array[index] = value;
}
}
__global__ void mono_3d (float * old_grid, float * new_grid, int size, int width, float k, int area) {
//int idx = threadIdx.x + blockIdx.x * blockDim.x;
/* USE PTX assembly if nvcc doesn't automatically detect optimal instruction
https://docs.nvidia.com/cuda/parallel-thread-execution/index.html#integer-arithmetic-instructions-mad
mad{.hi,.lo,.wide}.type d, a, b, c;
mad.hi.sat.s32 d, a, b, c;
.type = { .u16, .u32, .u64,
.s16, .s32, .s64 };
use: @p mad.wide.s32 idx, blockIDx.x, blockDim.x, threadIdx.x;
uint idx;
asm("mad.lo.u32 %0, %1, %2, %3"; : "=r"(idx) : "r"(blockIDx.x), "r"(blockDim.x), "r"(threadIdx.x));
uint idx;
uint blkID = blockIdx.x;
uint blkDim = blockDim.x;
uint thrID = threadIdx.x;
asm("mad.lo.u32 %0, %1, %2, %3;" : "=r"(idx) : "r"(blkID), "r"(blkDim), "r"(thrID));
*/
uint idx;
uint blkID = blockIdx.x;
uint blkDim = blockDim.x;
uint thrID = threadIdx.x;
asm("mad.lo.u32 %0, %1, %2, %3;" : "=r"(idx) : "r"(blkID), "r"(blkDim), "r"(thrID));
float oldValue = old_grid[idx];
float * newValueLoc = &new_grid[idx];
//left
if (idx < size) {
if (idx % width != 0) {
// if not out of range and not a left edge;
*newValueLoc += k * (old_grid[idx - 1] - oldValue);
}
//right
if (idx % width != width - 1) {
// if not out of range and not a right edge;
*newValueLoc += k * (old_grid[idx + 1] - oldValue);
}
if (idx % area >= width) {
// if not out of range and not a top edge;
*newValueLoc += k * (old_grid[idx - width] - oldValue);
}
if (idx % area < area - width) {
// if not out of range and not a bottom edge;
*newValueLoc += k * (old_grid[idx + width] - oldValue);
}
if (idx >= area) {
// if not out of range and not a front edge;
*newValueLoc += k * (old_grid[idx - area] - oldValue);
}
if (idx < size - area) {
// if not out of range and not a back edge;
*newValueLoc += k * (old_grid[idx + area] - oldValue);
}
}
}
__global__ void mono_2d (float * old_grid, float * new_grid, int size, int width, float k, int area) {
uint idx;
uint blkID = blockIdx.x;
uint blkDim = blockDim.x;
uint thrID = threadIdx.x;
asm("mad.lo.u32 %0, %1, %2, %3;" : "=r"(idx) : "r"(blkID), "r"(blkDim), "r"(thrID));
float oldValue = old_grid[idx];
float * newValueLoc = &new_grid[idx];
//left
if (idx < size) {
if (idx % width != 0) {
// if not out of range and not a left edge;
*newValueLoc += k * (old_grid[idx - 1] - oldValue);
}
//right
if (idx % width != width - 1) {
// if not out of range and not a right edge;
*newValueLoc += k * (old_grid[idx + 1] - oldValue);
}
if (idx % area >= width) {
// if not out of range and not a top edge;
*newValueLoc += k * (old_grid[idx - width] - oldValue);
}
if (idx % area < area - width) {
// if not out of range and not a bottom edge;
*newValueLoc += k * (old_grid[idx + width] - oldValue);
}
}
}
void copy_fixed_blocks (config_values & conf, int TPB, float *new_grid) {
// Copy fixed values into new_grid
for (int block_idx = 0; block_idx < conf.blocks.size(); ++block_idx) {
int blocks = (conf.blocks[block_idx].size + TPB - 1) / TPB;
place_fixed_temp_block<<<blocks, TPB>>>(new_grid, conf.grid_width, conf.grid_height,
conf.blocks[block_idx].x, conf.blocks[block_idx].y, conf.blocks[block_idx].z,
conf.blocks[block_idx].width, conf.blocks[block_idx].height, conf.blocks[block_idx].temp, conf.blocks[block_idx].size);
cudaThreadSynchronize();
}
}
void output_final_values (config_values & conf, float * host_grid) {
std::ofstream out_file("heatOutput.csv");
int index = 0;
for (int layer = 0; layer < conf.grid_depth - 1; ++layer) {
for (int row = 0; row < conf.grid_height; ++row) {
for(int col = 0; col < conf.grid_width - 1; ++col) {
out_file << host_grid[index++] << ", ";
}
out_file << host_grid[index++] << '\n';
}
out_file << '\n';
}
for (int row = 0; row < conf.grid_height - 1; ++row) {
for(int col = 0; col < conf.grid_width - 1; ++col) {
out_file << host_grid[index++] << ", ";
}
out_file << host_grid[index++] << '\n';
}
for(int col = 0; col < conf.grid_width - 1; ++col) {
out_file << host_grid[index++] << ", ";
}
out_file << host_grid[index++] << '\n';
}
int main(int argc, char * argv[]) {
auto start = std::chrono::high_resolution_clock::now();
config_values conf;
std::string file_name(argv[1]);
set_config_values(conf, file_name);
int TPB = 512; // could change to a define (need to edit copy_fixed_blocks())
int area = conf.grid_width * conf.grid_height;
int size = area * conf.grid_depth;
int num_blocks = (size + TPB - 1) / TPB;
float * new_grid;
float * old_grid;
float * host_grid = new float[size];
auto stop = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> duration = (stop - start);
std::cout << duration.count() * 1000 * 1000 << "us" << '\n';
start = std::chrono::high_resolution_clock::now();
cudaMalloc((void**) & new_grid, size * sizeof(float));
cudaMalloc((void**) & old_grid, size * sizeof(float));
init_grid_values<<<num_blocks, TPB>>>(new_grid, size, conf.starting_temp);
cudaThreadSynchronize();
copy_fixed_blocks(conf , TPB, new_grid);
cudaThreadSynchronize();
if (conf.is_3d) {
for (int i = 0; i < conf.num_timesteps; ++i) {
copy_array_elements<<<num_blocks, TPB>>>(old_grid, new_grid, size); // old = new
cudaThreadSynchronize();
mono_3d<<<num_blocks, TPB>>>(old_grid, new_grid, size, conf.grid_width, conf.k, area);
cudaThreadSynchronize();
copy_fixed_blocks(conf, TPB, new_grid);
cudaThreadSynchronize();
}
} else {
for (int i = 0; i < conf.num_timesteps; ++i) {
copy_array_elements<<<num_blocks, TPB>>>(old_grid, new_grid, size); // old = new
cudaThreadSynchronize();
mono_2d<<<num_blocks, TPB>>>(old_grid, new_grid, size, conf.grid_width, conf.k, area);
cudaThreadSynchronize();
copy_fixed_blocks(conf, TPB, new_grid);
cudaThreadSynchronize();
}
}
cudaMemcpy(host_grid, new_grid, size * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(old_grid);
cudaFree(new_grid);
stop = std::chrono::high_resolution_clock::now();
duration = (stop - start);
std::cout << duration.count() * 1000 * 1000 << "us" << '\n';
// Output host_grid values to files and std::cout
output_final_values(conf, host_grid);
delete[] host_grid;
return 0;
} |
6,821 | #include "includes.h"
__global__ void oddeven(int* x,int I,int n)
{
int id=blockIdx.x;
if(I==0 && ((id*2+1)< n)){
if(x[id*2]>x[id*2+1]){
int X=x[id*2];
x[id*2]=x[id*2+1];
x[id*2+1]=X;
}
}
if(I==1 && ((id*2+2)< n)){
if(x[id*2+1]>x[id*2+2]){
int X=x[id*2+1];
x[id*2+1]=x[id*2+2];
x[id*2+2]=X;
}
}
} |
6,822 | #include<stdio.h>
#define Width 32 // size of Width x Width matrix
#define TILE_WIDTH 16
__global__ void MatrixMulKernel (float* Md, float* Nd, float* Pd, int ncols) {
int row = blockIdx.y*blockDim.y + threadIdx.y;
int col = blockIdx.x*blockDim.x + threadIdx.x;
// Pvalue is used to store the element of the output matrix
// that is computed by the thread
float Pvalue = 0;
for (int k = 0; k < ncols; ++k) {
float Melement = Md[row*ncols+k];
float Nelement = Nd[k*ncols+col];
Pvalue += Melement * Nelement;
}
Pd[row*ncols+col] = Pvalue;
}
int main(int argc, char *argv[]) {
int i,j;
int size = Width * Width * sizeof(float);
float M[Width][Width], N[Width][Width],P[Width][Width];
float *Md, *Nd, *Pd;
for(i=0;i<Width;i++) {
for(j=0; j<Width;j++) {
M[i][j] = 1; N[i][j]= 2;
}
}
cudaMalloc( (void**)&Md, size);
cudaMalloc( (void**)&Nd, size);
cudaMalloc( (void**)&Pd, size);
cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice);
cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice);
// Setup the execution configuration
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(Width/TILE_WIDTH,Width/TILE_WIDTH);
// Launch the device computation threads!
MatrixMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, Width);
// Read P from the device
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
// Free device matrices
cudaFree(Md); cudaFree(Nd); cudaFree(Pd);
for(i=0;i<Width;i++) {
for (j=0;j<Width;j++) {
printf("%.2f ",P[i][j]);
}
printf("\n");
}
}
|
6,823 | #include <stdio.h>
#include <stdlib.h>
#define N 1024
__global__ void matrix_product(int *g_A, int *g_B, int *g_C) {
int gx = threadIdx.x + blockIdx.x * 32;
int gy = threadIdx.y + blockIdx.y * 32;
int c = 0;
int k;
for (k = 0; k < N; k++) {
c += g_A[k + gy*N] * g_B[gx + k*N];
}
g_C[gx + gy*N] = c;
}
int main() {
int i;
int *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
h_A = (int*)malloc(sizeof(int)*N*N);
h_B = (int*)malloc(sizeof(int)*N*N);
h_C = (int*)malloc(sizeof(int)*N*N);
cudaMalloc(&d_A, sizeof(int)*N*N);
cudaMalloc(&d_B, sizeof(int)*N*N);
cudaMalloc(&d_C, sizeof(int)*N*N);
for (i = 0; i < N*N; i++) {
h_A[i] = h_B[i] = 1;
}
cudaMemcpy(d_A, h_A, sizeof(int)*N*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(int)*N*N, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, sizeof(int)*N*N);
dim3 grid(32, 32);
dim3 block(32, 32);
matrix_product<<< grid, block >>> (d_A, d_B, d_C);
cudaMemcpy(h_C, d_C, sizeof(int)*N*N, cudaMemcpyDeviceToHost);
int flag = 0;
for(int y=0; y<N; y++){
for(int x=0; x<N; x++){
int c = 0;
for(int k=0; k<N; k++){
c += h_A[y*N + k] * h_B[k*N + x];
}
if(h_C[y*N + x] != c){
flag = 1;
}
}
}
if(flag==0)
printf("OK\n");
else
printf("NG\n");
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
} |
6,824 | #include <stdio.h>
int main(){
printf("\n\n");
// CUDA device properties struct
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
// Device name
printf("Device Name: %s\n",prop.name);
// Compute capability
printf("Compute capability: major: %d\tminor: %d\n\n", prop.major, prop.minor);
// Maximum block dimension
printf("Maximum block dimension in x: %d\n", prop.maxThreadsDim[0]);
printf("Maximum block dimension in y: %d\n", prop.maxThreadsDim[1]);
printf("Maximum block dimension in z: %d\n\n", prop.maxThreadsDim[2]);
// Maximum block dimension
printf("Maximum grid dimension in x: %d\n", prop.maxGridSize[0]);
printf("Maximum grid dimension in y: %d\n", prop.maxGridSize[1]);
printf("Maximum grid dimension in z: %d\n\n", prop.maxGridSize[2]);
// Shared Memory Per Block
printf("Shared memory per block: %zu\n", prop.sharedMemPerBlock);
// Total Global Memory
printf("Total global memory: %zu\n", prop.totalGlobalMem);
// Total Constant Memory
printf("Total constant memory: %zu\n\n", prop.totalConstMem);
// Warp size
printf("Warp size: %d\n", prop.warpSize);
printf("\n\n");
}
|
6,825 | /* Name : Krishna Pal Deora Admission No. : 18JE0425
* File: matrix_addition.cu
* Program : Implement matrix addition on a GPU using CUDA
*
*
* Input: The matrices A and B
* Output: Result of matrix addition.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void Mat_add(float A[], float B[], float C[], int m, int n) {
int my_ij = blockDim.x * blockIdx.x + threadIdx.x;
if (blockIdx.x < m && threadIdx.x < n)
C[my_ij] = A[my_ij] + B[my_ij];
} /* Mat_add */
/*---------------------------------------------------------------------
* Function: Read_matrix
* Purpose: Read an m x n matrix from stdin
* In args: m, n
* Out arg: A
*/
void Read_matrix(float A[], int m, int n) {
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
scanf("%f", &A[i*n+j]);
} /* Read_matrix */
/*---------------------------------------------------------------------
* Function: Print_matrix
* Purpose: Print an m x n matrix to stdout
* In args: title, A, m, n
*/
void Print_matrix(char title[], float A[], int m, int n) {
int i, j;
printf("%s\n", title);
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++)
printf("%.1f ", A[i*n+j]);
printf("\n");
}
} /* Print_matrix */
/* Host code */
int main(int argc, char* argv[]) {
int m, n;
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
size_t size;
/* Get size of matrices */
if (argc != 3) {
fprintf(stderr, "usage: %s <row count> <col count>\n", argv[0]);
exit(0);
}
m = strtol(argv[1], NULL, 10);
n = strtol(argv[2], NULL, 10);
printf("m = %d, n = %d\n", m, n);
size = m*n*sizeof(float);
h_A = (float*) malloc(size);
h_B = (float*) malloc(size);
h_C = (float*) malloc(size);
printf("Enter the matrices A and B\n");
Read_matrix(h_A, m, n);
Read_matrix(h_B, m, n);
Print_matrix("A =", h_A, m, n);
Print_matrix("B =", h_B, m, n);
/* Allocate matrices in device memory */
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
/* Copy matrices from host memory to device memory */
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
/* Invoke kernel using m thread blocks, each of */
/* which contains n threads */
Mat_add<<<m, n>>>(d_A, d_B, d_C, m, n);
/* Copy result from device memory to host memory */
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
Print_matrix("The sum is: ", h_C, m, n);
/* Free device memory */
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
/* Free host memory */
free(h_A);
free(h_B);
free(h_C);
return 0;
} /* main */ |
6,826 | /*--
--*/
#include "../include/constraint.cuh"
__host__ __device__ float state_constraint_barrier(float st, float min, float max, int TYPE){
float add = 0;
float med = 0;
switch(CONSTRAINT_TYPE){
case 1:
med = (max + min)/2;
if(med < st){
add += 1/(powf(10*(max - st),2));
if(max < st){
add += FLT_MAX;
}
}else{
add += 1/(powf(10*(st - min),2));
if(st < min){
add += FLT_MAX;
}
}
break;
case 2:
med = (max + min) /2;
if(med < st){
add += -logf(max-st);
if(max < st){
add += FLT_MAX;
}
}else{
add += -logf(st - min);
if(st < min){
add += FLT_MAX;
}
}
break;
default:
if( st < min ){
add += FLT_MAX;
}
if( max < st){
add += FLT_MAX;
}
break;
}
return add;
}
|
6,827 | /*
* Application:- Imogen's ported kernel from "gpuImogen/gpuclass/cudaArrayAtomic.cu"
* Purpose:-
* To perform operation on single array. Useful when parameters like density
* are required to be kept to have certain minimum value or certain maximum value
* or require NaNs to be replaced by zeros.
*/
/*
* Shader frequency of GTX 480
* Better will be to deriver this in case we are simulation in a different GPU
* But calling function to derive frequncy so many times will be still expensive
* Tip:- Get this from the caller as input parameter.
*/
#define SHADER_CLOCK 1401000
/*
* This will set all array elements with less than minimum
* threshold to value specified in input parameter
*/
__device__ void ArraySetMin(void *params)
{
/*
* Kernel Benchmarking parameters
* Uncomment to benchmark inside CUDA kernel.
* Don't uncomment otherwise else it will lead to unnecessary console logs.
*/
//clock_t start, stop;
//start = clock();
//printf("Thread %d\n", threadIdx.x);
/*
* CUDA Threads
*/
int warp_size = 32;
/*
* Get thread tid (we should keep it from 0-31 range only)
*/
int tid = threadIdx.x % warp_size;
/*
* Get input parameters
*/
double* paramsIn = (double*)params;
/*
* First argument is basis of getting into this function
*/
paramsIn = paramsIn + 1;
/*
* Get minimum threshold parameter
*/
double min = (double)paramsIn[0];
/*
* Get number of elements in the array
*/
paramsIn = paramsIn + 1;
int n = (int)paramsIn[0];
/*
* Get the array
*/
paramsIn = paramsIn + 1;
double *array = paramsIn;
/*
* Loop and set values
*/
while (tid < n) {
if (array[tid] < min) {
array[tid] = min;
}
//printf("tid = %d, n = %d\n", tid, n);
//printf("incrementing Thread %d\n", threadIdx.x);
/*
* Skip next 32 entries, other threads will take care of them
*/
tid += warp_size;
}
/*
* No need of synchronization within 1 warp
*/
//__syncthreads();
/*
* Kernel Benchmarking parameters
* Uncomment to benchmark inside CUDA kernel.
* Don't uncomment otherwise else it will lead to unnecessary console logs.
*/
//printf("Thread %d\n", threadIdx.x);
//stop = clock();
//float time = (float)(stop - start)/(float)SHADER_CLOCK;
//printf("Time taken %f ms\n", time);
}
/*
* This will set all array elements with greater than maximum
* threshold to value specified in input parameter
*/
__device__ void ArraySetMax(void *params)
{
/*
* CUDA Threads
*/
int warp_size = 32;
int tid = threadIdx.x % warp_size;
/*
* Get input parameters
*/
double* paramsIn = (double*)params;
/*
* First argument is basis of getting into this function
*/
paramsIn = paramsIn + 1;
/*
* Get maximum threshold
*/
double max = (double)paramsIn[0];
/*
* Get number of array elements
*/
paramsIn = paramsIn + 1;
int n = (int)paramsIn[0];
/*
* Get the array itself
*/
paramsIn = paramsIn + 1;
double *array = paramsIn;
/*
* Loop and set values
*/
while (tid < n) {
if (array[tid] > max) {
array[tid] = max;
}
/*
* Skip next 32 entries, other threads will take care of them
*/
tid += warp_size;
}
}
/*
* This will set all array elements which are not a number
* to value specified in input parameter
*/
__device__ void ArraySetNaN(void *params)
{
/*
* CUDA Threads
*/
int warp_size = 32;
int tid = threadIdx.x % warp_size;
/*
* Get input parameters
*/
double* paramsIn = (double*)params;
/*
* First argument is basis of getting into this function
*/
paramsIn = paramsIn + 1;
/*
* Get fixed values to replace NaNs
*/
double fixval = (double)paramsIn[0];
/*
* Get number of array elements
*/
paramsIn = paramsIn + 1;
int n = (int)paramsIn[0];
/*
* Get the array itself
*/
paramsIn = paramsIn + 1;
double *array = paramsIn;
/*
* Loop and set values
*/
while (tid < n) {
if (isnan(array[tid])) {
array[tid] = fixval;
}
/*
* Skip next 32 entries, other threads will take care of them
*/
tid += warp_size;
}
}
/*
* Sub-kernel selection function, Superkernel will call this function only
*/
__device__ void ArrayAtomic(void *params)
{
/*
* Get the selection option
*/
double *operation = (double*)params;
switch((int)*operation) {
/*
* Call set min for selection option 1
*/
case 1: ArraySetMin(params);
break;
/*
* Call set max for selection option 2
*/
case 2: ArraySetMax(params);
break;
/*
* Call set NaN for selection option 3
*/
case 3: ArraySetNaN(params);
break;
}
}
|
6,828 | #include <stdio.h>
#include <cuda.h>
#define SIZE 1024
#define START_SCALE 1.5f
#define MAXIT 256
#define C_RE -0.8f
#define C_IM 0.156f
#define ZOOM 200
//======================================================================
/*This function checks whether a point
belongs to the filled julia set. It
returns 0 if the value 'escaped', and
one if the maximal numer of iterations
has been reached. It is a function on the
device, but it can't be called like a
kernel from the host. It is used by the
kernel 'construct_julia_set' below. Note that
the kernel has the 'global' attribute rather
than the 'device' attribute.*/
__device__ int julia( int i, int j, float scale)
{
//rescale grid to actual scale
float x = scale * (float)(SIZE/2 - i)/(SIZE/2);
float y = scale * (float)(SIZE/2 - j)/(SIZE/2);
//real and imaginary part of point in question
float z_re=x;
float z_im=y;
float z_re_old;
//compute Z(n+1) = Zn^2 + C
for (int k=0; k<MAXIT;k++)
{
z_re_old = z_re; //store old real value
z_re =(z_re*z_re-z_im*z_im) + (C_RE); //compute Re(Z(n+1))
z_im = 2.0f*z_re_old*z_im + (C_IM); //compute Im(Z(n+1))
if ( sqrt(z_re*z_re+z_im*z_im) > SIZE) //check if point escaped
{
return 0; //point escaped
}
}
return 1; //point in set
}
//======================================================================
/* This function uses the one defined above
to construct an array called 'set' with ones and zeroes,
defining the elements of the julia set. This is the
kernel function to be called from the host and exectued
on the device. It thus carries the 'global' attribute.*/
__global__ void construct_julia_set( int *set, float scale)
{
// int i = blockIdx.x*blockDim.x + threadIdx.x; //determine thread id i
// int j = blockIdx.y*blockDim.y + threadIdx.y; //determine thread id j
int i = blockIdx.x;
int j = blockIdx.y;
int pos = i + j * SIZE; //remap to 1-dim array
set[pos] = julia( i, j, scale); //fill the set
}
//======================================================================
int main( void )
{
using namespace std;
int set[SIZE*SIZE]; //the result array on the host
int *set_d; //the result array on the device
int written; //aux variable for writing file
int num; //numbering of output file
float x; //Re part in complex Z plane
float y; //Im part in complez Z plane
float scale; //initial scale for zoom
char buffer[32]; //buffer for filenames
FILE *out; //output file
//dim3 threadsPerBlock(16,16); //number of threads per block and grid size
//dim3 numBlocks(SIZE/threadsPerBlock.x,SIZE/threadsPerBlock.y);
dim3 grid(SIZE,SIZE);
for(int k=0; k<SIZE*SIZE; k++)
{
set[k] = 0;
}
cudaMalloc(&set_d, SIZE*SIZE*sizeof(int)); //allocate memory for set on device
/*The variable 'written' allows us to introduce a newline
after each row that has been written to the outpur file.
This allows for more freedom in printing the set using
gnuplot.*/
written=0; //reset 'written' value
for( int k=0; k<ZOOM; k++ ) //k...number of zoom slices to produce
{ //vary scale for zoom
scale = START_SCALE *(400.0f-(float)k)/400.0f + 0.01;
cudaMemcpy(set_d, set, SIZE*SIZE*sizeof(int), cudaMemcpyHostToDevice); //init set on device
// construct_julia_set<<<numBlocks,threadsPerBlock>>>(set_d, scale); //construct julia set on GPU
construct_julia_set<<<grid,1>>>(set_d, scale); //construct julia set on GPU
cudaDeviceSynchronize();
cudaMemcpy(set, set_d, SIZE*SIZE*sizeof(int), cudaMemcpyDeviceToHost); //copy resultto host
num = k; //out: 'julia_000.dat', 'julia_001.dat',...
snprintf(buffer, sizeof(char)*32, "julia_%03i.dat", num);
out = fopen( buffer, "wt" ); //write in text mode (wt)
for (int i=0; i<SIZE; i++) //actual grid values x and y
{
x = scale * (float)(SIZE/2 - i)/(SIZE/2);
for (int j=0; j<SIZE; j++)
{
y = scale * (float)(SIZE/2 - j)/(SIZE/2);
int pos = i + j * SIZE; //position in array
if(set[pos]==1) //write only if part of set
{
fprintf(out,"%f %f \n",x,y);
written = 1; //set written to 1
}
}//end inner grid loop (j)
if( written == 1 )
{
fprintf(out,"\n"); //add newline if row content not empty
}
written=0; //reset written value
}//end outer grid loop (i)
fclose(out); //close file
}//end zoom loop (k)
cudaFree( set_d ); //deallocate
}
|
6,829 | #include "includes.h"
__device__ int d(void) { return 8; }
__global__ void test_num_vgpr_num_sgpr() { } |
6,830 | #include <bits/stdc++.h>
#include <thrust/device_vector.h>
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#define to_ptr(x) thrust::raw_pointer_cast(&x[0])
#define gpu_copy(x, y) thrust::copy((x).begin(), (x).end(), (y).begin())
#define gpu_copy_to(x, y, pos) thrust::copy((x).begin(), (x).end(), (y).begin() + (pos))
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
const int BLOCK_SIZE = 512;
__global__ void init(){}
__global__ void scatterSum(int N, float *input, float *output){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= N) return;
float a = input[i];
for(int j=0;j<N;++j){
atomicAdd(output+(j+i)%N, a);
}
return;
}
int main(int argc, char* argv[]){
int N = 512*512;
if(argc > 1) N = stoi(argv[1]);
int num_blocks = (N+BLOCK_SIZE-1)/BLOCK_SIZE;
cudaEvent_t start, stop;
float cuda_time;
cudaEventCreate(&start); // creating the event 1
cudaEventCreate(&stop); // creating the event 2
init<<<num_blocks, BLOCK_SIZE>>>();
cudaEventRecord(start, 0);
def_dvec(float) input(N, 1.), output(N, 0.);
scatterSum<<<num_blocks, BLOCK_SIZE>>>(N, to_ptr(input), to_ptr(output));
cudaEventRecord(stop, 0); // Stop time measuring
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cuda_time, start, stop); // Saving the time measured
cout<<"Time Usage for input oriented parallelism is: "<<cuda_time/1000<<"s"<<endl;
for(int i=0;i<N;i+=N/10) cout<<output[i]<<' ';
cout<<endl;
return 0;
}
|
6,831 | #include<cuda.h>
#include <bits/stdc++.h>
using namespace std;
//matrix initialization
void init(int *A,int n, int d){
for(int i = 0; i < n*n; i++)
A[i] = d;
}
//matrix comparation
bool compare(int *A, int *B, int size){
for(int i = 0; i < size*size; i++)
if(A[i] != B[i])
return false;
return true;
}
//print matrix
void printmat(int *A, int size){
for(int i = 0; i < size; i++){
for(int j = 0; j < size; j++){
cout<<A[i]<<" ";
}
cout<<endl;
}
cout<<endl;
}
//matrix multiplication
void matMult(int *h_A, int *h_B, int *h_C, int n){
int sum;
for(int i = 0; i < n; i++)
for(int j = 0; j < n; j++){
sum = 0;
for(int k = 0; k < n; k++)
sum += h_A[n*i + k] * h_B[n*k + j];
h_C[n*i + j] = sum;
// h_C[n*i + j] += h_A[n*i + k] * h_B[n*k + j];
}
}
//Parallel kernel
__global__ void matMultPP (int *A, int *B, int *C, int n){
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if(i < n and j < n){
int sum = 0;
for(int k = 0; k < n; ++k)
sum += A[n*i + k] * B[n*k + j];
C[n*i + j] = sum;
}
}
int main(){
int n; cin>>n;
cout<<n<<endl;
int size = n*n*sizeof(int);
int *A = (int *)malloc(size);
int *B = (int *)malloc(size);
int *C = (int *)malloc(size);
int *D = (int *)malloc(size);
int *d_A, *d_B, *d_C;
init(A,n,1);
init(B,n,2);
init(C,n,0);
init(D,n,0);
double a, b;
clock_t t = clock();
//Secuencial
matMult(A,B,C,n);
t = clock() - t;
a = ((float)t)/CLOCKS_PER_SEC;
cout<<a<<endl;
int block_size = 32;
//paralelo
t = clock();
//Allocate memory for device
cudaMalloc(&d_A, size);
cudaMalloc(&d_B, size);
cudaMalloc(&d_C, size);
//Copy Data from host to device
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
//Blocks and Grids
dim3 dimBlock(block_size,block_size);
dim3 dimGrid(ceil(n/(float)block_size),ceil(n/(float)block_size));
//Launch Kernel
matMultPP<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, n);
cudaDeviceSynchronize();
//Copy from device, free device memory
cudaMemcpy (D, d_C, size, cudaMemcpyDeviceToHost);
//matMultP(A,B,D,size);
t = clock() - t;
b = ((float)t)/CLOCKS_PER_SEC;
cout<<b<<endl;
cout<<(a/b)<<endl;
//printmat(C,n);
//printmat(D,n);
//if(compare(C,D,n)) cout<<"Work :)"<<endl;
//else cout<<"No work :("<<endl;
free(A);
free(B);
free(C);
free(D);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
6,832 | //
// mulMat.cu
//
//
// Created by Amilcar Meneses Viveros on 15/02/18.
//
//
#include <stdio.h>
#define M 1000
#define N 1000
// double a[M][N], b[M][N], c[M][N];
double *a, *b, *c;
float elapsedtime_total;
float elapsedtime_kernel;
__global__ void kernelmultiplicaMatrices(double *a, double *b, double *c, int m, int n) {
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
int k;
double tmp;
tmp = 0;
for (k=0; k<n; k++) {
tmp += b[i*m+k]*c[k*m+j];
}
a[i*m+j] = tmp;
}
void multiplicaMatricesEnDevice(double *a, double *b, double *c, int m, int n) {
int size=m*n*sizeof(double);
double *aD, *bD, *cD;
dim3 nb(50,50);
dim3 nt(20,20);
cudaEvent_t start, stop;
cudaEvent_t startk, stopk;
cudaSetDevice(0);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&startk);
cudaEventCreate(&stopk);
cudaEventRecord(start, 0);
// 1. Reservar memoria
cudaMalloc(&aD, size); cudaMalloc(&bD, size); cudaMalloc(&cD, size);
// 2. Subir datos del Host a Device
cudaMemcpy(bD, b, size, cudaMemcpyHostToDevice);
cudaMemcpy(cD, c, size, cudaMemcpyHostToDevice);
// 3. Ejecutar kernel
cudaEventRecord(startk, 0);
kernelmultiplicaMatrices<<<nb, nt>>>(aD, bD, cD, m, n);
cudaEventRecord(stopk, 0);
cudaEventSynchronize(stopk);
// 4. Bajar datos del Device al Host
cudaMemcpy(a, aD, size, cudaMemcpyDeviceToHost);
// 5.Libera memoria
cudaFree(aD); cudaFree(bD); cudaFree(cD);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedtime_total, start, stop);
cudaEventElapsedTime(&elapsedtime_kernel, startk, stopk);
cudaEventDestroy(start); cudaEventDestroy(stop);
cudaEventDestroy(startk); cudaEventDestroy(stopk);
}
int main() {
int i, j;
a = (double*)malloc(M*N*sizeof(double));
b = (double*)malloc(M*N*sizeof(double));
c = (double*)calloc(M*N, sizeof(double));
for (i=0; i<M; i++) {
for (j=0; j<N; j++) {
b[i*N+j] = i+j;
}
c[i*N+i] = 2.0;
}
multiplicaMatricesEnDevice(a, b, c, M, N);
for (i=M-1; i<M; i++) {
for (j=0; j<N; j++) {
printf("%3.2lf ", a[i*N+j]);
}
printf("\n");
}
free(a); free(b); free(c);
printf("Tiempo total cuda %4.6fms\n", elapsedtime_total);
printf("Tiempo kernel cuda %4.6fms\n", elapsedtime_kernel);
return 0;
}
|
6,833 | #include "includes.h"
__global__ void blockXOR(int *size, const char *input, char *output, long *key) {
const long ix = threadIdx.x + blockIdx.x * (long)blockDim.x;
if (ix * 8 < *size) {
((long *)output)[ix] = ((const long *)input)[ix] ^ *key;
}
} |
6,834 | #include <stdio.h>
__global__ void add (int* a, int* b, int * c){
*c = *a + *b;
printf("\n value = %d \n", *c );
}
int main(){
int a, b,c;
int *d_a, *d_b, *d_c;
int size = sizeof(int);
a = 1;
b = 1;
// Allocate Space on Device (GPU)
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Copy Data to Device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
// Launch the kernel on GPU
add<<<4,4>>>(d_a,d_b,d_c); // this kernel will be executed 16 times but the output value will remain the same (2)
// Copy Results back to Host
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
printf("Result = %d \n", c);
// cleanup
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
6,835 | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda_fp16.h>
__device__ inline void MyAtomicAdd(float* address, float value) {
int oldval, newval, readback;
oldval = __float_as_int(*address);
newval = __float_as_int(__int_as_float(oldval) + value);
while ((readback = atomicCAS((int*)address, oldval, newval)) != oldval) {
oldval = readback;
newval = __float_as_int(__int_as_float(oldval) + value);
}
} |
6,836 |
__global__ void gpuLayer_copy(float *layer, float *layer_copy)
{
int hilosporbloque = blockDim.x * blockDim.y;
int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y;
int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y;
int gid = numbloqueengrid * hilosporbloque + numhiloenbloque;
layer_copy[gid]=layer[gid];
}
__global__ void gpu_Actualiza(float *layer, int posicion, float energia,int layer_size)
{
int hilosporbloque = blockDim.x * blockDim.y;
int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y;
int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y;
int gid = numbloqueengrid * hilosporbloque + numhiloenbloque;
/* Función actualiza */
if(gid > 0 && gid < layer_size){
int distancia = posicion - gid;
if ( distancia < 0 ) distancia = - distancia;
/* 2. El punto de impacto tiene distancia 1 */
distancia = distancia + 1;
/* 3. Raiz cuadrada de la distancia */
//float atenuacion = (float)distancia*distancia;
//float atenuacion = (float)distancia / PI;
float atenuacion = sqrtf( (float)distancia );
/* 4. Calcular energia atenuada */
float energia_k = energia / atenuacion;
/* 5. No sumar si el valor absoluto es menor que umbral */
if ( energia_k >= 0.001f || energia_k <= -0.001f )
layer[gid] = layer[gid] + energia_k;
}
}
__global__ void gpu_Extremos(float *layer, float *layer_copy, int layer_size)
{
int hilosporbloque = blockDim.x * blockDim.y;
int numhiloenbloque = threadIdx.x + blockDim.x * threadIdx.y;
int numbloqueengrid = blockIdx.x + gridDim.x * blockIdx.y;
int gid = numbloqueengrid * hilosporbloque + numhiloenbloque;
if(gid > 0 && gid < layer_size-1){
layer[gid] = ( layer_copy[gid-1] + layer_copy[gid] + layer_copy[gid+1] ) / 3;
}
}
|
6,837 | #include "includes.h"
#define INTERVALS 1000000
// Max number of threads per block
#define THREADS 512
#define BLOCKS 64
double calculatePiCPU();
// Synchronous error checking call. Enable with nvcc -DDEBUG
__global__ void integrateSimple(float *sum, float step, int threads, int blocks)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
for (int i = idx; i < INTERVALS; i+=threads*blocks)
{
float x = (i+0.5f) * step;
sum[idx] += 4.0f / (1.0f+ x*x);
}
} |
6,838 | #include <cstdlib>
#include <ctime>
#include <algorithm>
#include <iostream>
#include <chrono>
#include <vector>
#include <iterator>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
int main() {
std::ios::sync_with_stdio(false);
srand(time(NULL));
//32M random values
thrust::host_vector<int> h_vec(32 << 20);
thrust::generate(begin(h_vec), end(h_vec), rand);
auto t1 = std::chrono::high_resolution_clock::now();
thrust::device_vector<int> d_vec = h_vec;
thrust::sort(d_vec.begin(), d_vec.end());
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
auto t2 = std::chrono::high_resolution_clock::now();
//Duration (cuda): 676ms (GTX 1080 TI, driver 470, cuda 11.4, first time)
//Duration (cuda): 110ms (GTX 1080 TI, driver 470, cuda 11.4, second time and futher)
std::cout << "Duration (cuda): " << std::chrono::duration_cast<std::chrono::milliseconds>(t2 - t1).count() << "ms" << std::endl;
std::vector<int> v(h_vec.begin(), h_vec.end());
auto t3 = std::chrono::high_resolution_clock::now();
std::sort(begin(v), end(v));
auto t4 = std::chrono::high_resolution_clock::now();
//Duration (host): 6919ms (i3 8350K @ 4.00 GHz, cache 8MB, DDR4 8Gb x2 3000 MHz)
std::cout << "Duration (host): " << std::chrono::duration_cast<std::chrono::milliseconds>(t4 - t3).count() << "ms" << std::endl;
std::copy(h_vec.begin(), std::next(h_vec.begin(), 10), std::ostream_iterator<int>(std::cout, ", "));
std::cout << std::endl;
std::copy(begin(v), std::next(begin(v), 10), std::ostream_iterator<int>(std::cout, ", "));
std::cout << std::endl;
return 0;
}
|
6,839 | #define FLOAT_TO_BITS(x) (*reinterpret_cast<unsigned int*>(x))
#define BITS_TO_FLOAT(x) (*reinterpret_cast<float*>(x))
__device__ __forceinline__ unsigned int extract_exponent(float *a) {
unsigned int temp = *(reinterpret_cast<unsigned int*>(a));
temp = (temp << 1 >> 24); // single preciision, 1 sign bit, 23 mantissa bits
return temp-127+1; // exponent offset and virtual bit
}
__device__ __forceinline__ unsigned int round_bitwise_stochastic(unsigned int target,
unsigned int rand_prob,
int man_bits) {
unsigned int mask = (1 << (23-man_bits)) - 1;
unsigned int add_r = target+(rand_prob & mask);
unsigned int quantized = add_r & ~mask;
return quantized;
}
__device__ __forceinline__ unsigned int round_bitwise_nearest(unsigned int target,
int man_bits) {
unsigned int mask = (1 << (23-man_bits)) - 1;
unsigned int rand_prob = 1 << (23-man_bits-1);
unsigned int add_r = target+rand_prob;
unsigned int quantized = add_r & ~mask;
return quantized;
}
__device__ __forceinline__ unsigned int clip_exponent(int exp_bits, int man_bits,
unsigned int old_num,
unsigned int quantized_num) {
if (quantized_num == 0)
return quantized_num;
int quantized_exponent_store = quantized_num << 1 >> 1 >> 23; // 1 sign bit, 23 mantissa bits
int max_exponent_store = (1 << (exp_bits - 1)) + 127; // we are not reserving an exponent bit for infinity, nan, etc
// Clippping Value Up
if (quantized_exponent_store > max_exponent_store)
{
unsigned int max_man = (unsigned int)-1 << 9 >> 9 >> (23 - man_bits) << (23 - man_bits); // 1 sign bit, 8 exponent bits, 1 virtual bit
unsigned int max_num = ((unsigned int)max_exponent_store << 23) | max_man;
unsigned int old_sign = old_num >> 31 << 31;
quantized_num = old_sign | max_num;
}
return quantized_num;
}
__device__ __forceinline__ unsigned int clip_max_exponent(int man_bits,
unsigned int max_exponent,
unsigned int quantized_num) {
unsigned int quantized_exponent = quantized_num << 1 >> 24 << 23; // 1 sign bit, 23 mantissa bits
if (quantized_exponent > max_exponent) {
unsigned int max_man = (unsigned int ) -1 << 9 >> 9 >> (23-man_bits) << (23-man_bits); // 1 sign bit, 8 exponent bits
unsigned int max_num = max_exponent | max_man;
unsigned int old_sign = quantized_num >> 31 << 31;
quantized_num = old_sign | max_num;
}
return quantized_num;
}
|
6,840 | #include "includes.h"
__global__ void IFD_boundary( int size, double *d_Price, double lambda_U, double lambda_L )
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
{
if (i == 0)//top condition
{
d_Price[i] = lambda_U;
}
else if (i == size - 1) //bottom condition
{
d_Price[i] = 0.0;
}
}
} |
6,841 | #include "includes.h"
__global__ void convertBGR2RGBfloatKernel(uchar3 *src, float3 *dst, int width, int height)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= width || y >= height) {
return;
}
uchar3 color = src[y * width + x];
dst[y * width + x] = make_float3(color.z, color.y, color.x);
} |
6,842 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
int main()
{
}
|
6,843 |
#include <stdio.h>
#include <cuda.h>
__global__ void addVector(int N, float* c_a, float* c_b, float* c_c ){
int point_id = threadIdx.x +blockIdx.x*blockDim.x;
if (point_id<N)
{
c_c[point_id] = c_a[point_id] - c_b[point_id];
}
}
int main () {
/* specify number of entries */
int N = 10000;
/* allocate enough memory to store N floats */
/* malloc is a standard library call */
/* malloc accepts one argument, specifying the number of bytes of memory to be allocated */
/* malloc does not initialize values in the array */
float* pt_a = (float*) malloc(sizeof(float)*N);
float* pt_b = (float*) malloc(sizeof(float)*N);
float* pt_c = (float*) malloc(sizeof(float)*N);
int i;
for (i = 0; i < N; i=i+1){
pt_a[i] = 7.7;
pt_b[i] = 5.2;
}
//cuda mem allocation to gpu
float* c_a;
float* c_b;
float* c_c;
cudaMalloc(&c_a, N*sizeof(float));
cudaMalloc(&c_b, N*sizeof(float));
cudaMalloc(&c_c, N*sizeof(float));
//copying data from CPU(host) to GPU(Device)
cudaMemcpy(c_a, pt_a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(c_b, pt_b, N*sizeof(float), cudaMemcpyHostToDevice);
int T=32;
int B=(N + T-1)/T;
dim3 NthreadsPerBlock(T);
dim3 NBlocks(B);
addVector<<<NBlocks,NthreadsPerBlock>>>(N, c_a, c_b, c_c);
//copying data from device to host
cudaMemcpy(pt_c, c_c, N*sizeof(float), cudaMemcpyDeviceToHost);
for(i = N - 100; i < N; i++){
printf("c[%d]=%f\n", i, pt_c[i]);
}
return 0;
}
|
6,844 | #include <cuda_runtime.h>
__global__ void myKer(float* a) {
}
int main(){
float* a;
const size_t size_bytes = 10;
cudaMalloc((void**)&a, size_bytes);
myKer<<<1,1>>>(a);
cudaFree(a);
} |
6,845 | #include <iostream>
#include <fstream>
#include <cstring>
using namespace std;
unsigned char s_box[256] = {
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16};
unsigned char mul2[256] = {
0x00, 0x02, 0x04, 0x06, 0x08, 0x0a, 0x0c, 0x0e, 0x10, 0x12, 0x14, 0x16, 0x18, 0x1a, 0x1c, 0x1e,
0x20, 0x22, 0x24, 0x26, 0x28, 0x2a, 0x2c, 0x2e, 0x30, 0x32, 0x34, 0x36, 0x38, 0x3a, 0x3c, 0x3e,
0x40, 0x42, 0x44, 0x46, 0x48, 0x4a, 0x4c, 0x4e, 0x50, 0x52, 0x54, 0x56, 0x58, 0x5a, 0x5c, 0x5e,
0x60, 0x62, 0x64, 0x66, 0x68, 0x6a, 0x6c, 0x6e, 0x70, 0x72, 0x74, 0x76, 0x78, 0x7a, 0x7c, 0x7e,
0x80, 0x82, 0x84, 0x86, 0x88, 0x8a, 0x8c, 0x8e, 0x90, 0x92, 0x94, 0x96, 0x98, 0x9a, 0x9c, 0x9e,
0xa0, 0xa2, 0xa4, 0xa6, 0xa8, 0xaa, 0xac, 0xae, 0xb0, 0xb2, 0xb4, 0xb6, 0xb8, 0xba, 0xbc, 0xbe,
0xc0, 0xc2, 0xc4, 0xc6, 0xc8, 0xca, 0xcc, 0xce, 0xd0, 0xd2, 0xd4, 0xd6, 0xd8, 0xda, 0xdc, 0xde,
0xe0, 0xe2, 0xe4, 0xe6, 0xe8, 0xea, 0xec, 0xee, 0xf0, 0xf2, 0xf4, 0xf6, 0xf8, 0xfa, 0xfc, 0xfe,
0x1b, 0x19, 0x1f, 0x1d, 0x13, 0x11, 0x17, 0x15, 0x0b, 0x09, 0x0f, 0x0d, 0x03, 0x01, 0x07, 0x05,
0x3b, 0x39, 0x3f, 0x3d, 0x33, 0x31, 0x37, 0x35, 0x2b, 0x29, 0x2f, 0x2d, 0x23, 0x21, 0x27, 0x25,
0x5b, 0x59, 0x5f, 0x5d, 0x53, 0x51, 0x57, 0x55, 0x4b, 0x49, 0x4f, 0x4d, 0x43, 0x41, 0x47, 0x45,
0x7b, 0x79, 0x7f, 0x7d, 0x73, 0x71, 0x77, 0x75, 0x6b, 0x69, 0x6f, 0x6d, 0x63, 0x61, 0x67, 0x65,
0x9b, 0x99, 0x9f, 0x9d, 0x93, 0x91, 0x97, 0x95, 0x8b, 0x89, 0x8f, 0x8d, 0x83, 0x81, 0x87, 0x85,
0xbb, 0xb9, 0xbf, 0xbd, 0xb3, 0xb1, 0xb7, 0xb5, 0xab, 0xa9, 0xaf, 0xad, 0xa3, 0xa1, 0xa7, 0xa5,
0xdb, 0xd9, 0xdf, 0xdd, 0xd3, 0xd1, 0xd7, 0xd5, 0xcb, 0xc9, 0xcf, 0xcd, 0xc3, 0xc1, 0xc7, 0xc5,
0xfb, 0xf9, 0xff, 0xfd, 0xf3, 0xf1, 0xf7, 0xf5, 0xeb, 0xe9, 0xef, 0xed, 0xe3, 0xe1, 0xe7, 0xe5};
unsigned char mul3[256] = {
0x00, 0x03, 0x06, 0x05, 0x0c, 0x0f, 0x0a, 0x09, 0x18, 0x1b, 0x1e, 0x1d, 0x14, 0x17, 0x12, 0x11,
0x30, 0x33, 0x36, 0x35, 0x3c, 0x3f, 0x3a, 0x39, 0x28, 0x2b, 0x2e, 0x2d, 0x24, 0x27, 0x22, 0x21,
0x60, 0x63, 0x66, 0x65, 0x6c, 0x6f, 0x6a, 0x69, 0x78, 0x7b, 0x7e, 0x7d, 0x74, 0x77, 0x72, 0x71,
0x50, 0x53, 0x56, 0x55, 0x5c, 0x5f, 0x5a, 0x59, 0x48, 0x4b, 0x4e, 0x4d, 0x44, 0x47, 0x42, 0x41,
0xc0, 0xc3, 0xc6, 0xc5, 0xcc, 0xcf, 0xca, 0xc9, 0xd8, 0xdb, 0xde, 0xdd, 0xd4, 0xd7, 0xd2, 0xd1,
0xf0, 0xf3, 0xf6, 0xf5, 0xfc, 0xff, 0xfa, 0xf9, 0xe8, 0xeb, 0xee, 0xed, 0xe4, 0xe7, 0xe2, 0xe1,
0xa0, 0xa3, 0xa6, 0xa5, 0xac, 0xaf, 0xaa, 0xa9, 0xb8, 0xbb, 0xbe, 0xbd, 0xb4, 0xb7, 0xb2, 0xb1,
0x90, 0x93, 0x96, 0x95, 0x9c, 0x9f, 0x9a, 0x99, 0x88, 0x8b, 0x8e, 0x8d, 0x84, 0x87, 0x82, 0x81,
0x9b, 0x98, 0x9d, 0x9e, 0x97, 0x94, 0x91, 0x92, 0x83, 0x80, 0x85, 0x86, 0x8f, 0x8c, 0x89, 0x8a,
0xab, 0xa8, 0xad, 0xae, 0xa7, 0xa4, 0xa1, 0xa2, 0xb3, 0xb0, 0xb5, 0xb6, 0xbf, 0xbc, 0xb9, 0xba,
0xfb, 0xf8, 0xfd, 0xfe, 0xf7, 0xf4, 0xf1, 0xf2, 0xe3, 0xe0, 0xe5, 0xe6, 0xef, 0xec, 0xe9, 0xea,
0xcb, 0xc8, 0xcd, 0xce, 0xc7, 0xc4, 0xc1, 0xc2, 0xd3, 0xd0, 0xd5, 0xd6, 0xdf, 0xdc, 0xd9, 0xda,
0x5b, 0x58, 0x5d, 0x5e, 0x57, 0x54, 0x51, 0x52, 0x43, 0x40, 0x45, 0x46, 0x4f, 0x4c, 0x49, 0x4a,
0x6b, 0x68, 0x6d, 0x6e, 0x67, 0x64, 0x61, 0x62, 0x73, 0x70, 0x75, 0x76, 0x7f, 0x7c, 0x79, 0x7a,
0x3b, 0x38, 0x3d, 0x3e, 0x37, 0x34, 0x31, 0x32, 0x23, 0x20, 0x25, 0x26, 0x2f, 0x2c, 0x29, 0x2a,
0x0b, 0x08, 0x0d, 0x0e, 0x07, 0x04, 0x01, 0x02, 0x13, 0x10, 0x15, 0x16, 0x1f, 0x1c, 0x19, 0x1a};
unsigned char rcon[256] = {
0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a,
0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39,
0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a,
0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8,
0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef,
0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc,
0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b,
0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3,
0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94,
0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20,
0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63, 0xc6, 0x97, 0x35,
0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd, 0x61, 0xc2, 0x9f,
0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d, 0x01, 0x02, 0x04,
0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36, 0x6c, 0xd8, 0xab, 0x4d, 0x9a, 0x2f, 0x5e, 0xbc, 0x63,
0xc6, 0x97, 0x35, 0x6a, 0xd4, 0xb3, 0x7d, 0xfa, 0xef, 0xc5, 0x91, 0x39, 0x72, 0xe4, 0xd3, 0xbd,
0x61, 0xc2, 0x9f, 0x25, 0x4a, 0x94, 0x33, 0x66, 0xcc, 0x83, 0x1d, 0x3a, 0x74, 0xe8, 0xcb, 0x8d};
ofstream g("textCriptat.txt");
void KeyExpansionCore(unsigned char *in, unsigned char i)
{
// Rotatie stanga
// Simplifica
unsigned int *q = (unsigned int *)in;
*q = (*q >> 8) | ((*q & 0xff) << 24);
// S-Box 4 bytes
in[0] = s_box[in[0]];
in[1] = s_box[in[1]];
in[2] = s_box[in[2]];
in[3] = s_box[in[3]];
// RCon
in[0] ^= rcon[i];
}
void expandareCheie(unsigned char *cheieOriginala, unsigned char *expKeys, unsigned nivel)
{
// Nu functioneaza 256
unsigned lungimeCheie = nivel / 8;
unsigned lungimeCheieExpandata = (nivel == 128) ? 176 : (nivel == 192) ? 208 : (nivel == 256) ? 240 : 0;
memcpy(expKeys, cheieOriginala, lungimeCheie);
unsigned bytesGenerati = lungimeCheie;
unsigned iteratii_rcon = 1;
unsigned char temporar[4];
while (bytesGenerati < lungimeCheieExpandata)
{
temporar[0] = expKeys[bytesGenerati - 4];
temporar[1] = expKeys[bytesGenerati - 3];
temporar[2] = expKeys[bytesGenerati - 2];
temporar[3] = expKeys[bytesGenerati - 1];
if (bytesGenerati % lungimeCheie == 0)
{
KeyExpansionCore(temporar, iteratii_rcon++);
}
// Numai pentru AES-256
if (bytesGenerati % 32 == 16 && nivel == 256)
{
temporar[0] = s_box[temporar[0]];
temporar[1] = s_box[temporar[1]];
temporar[2] = s_box[temporar[2]];
temporar[3] = s_box[temporar[3]];
}
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[0];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[1];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[2];
expKeys[bytesGenerati++] = expKeys[bytesGenerati - lungimeCheie] ^ temporar[3];
}
}
void SubstitutieBytes(unsigned char *state)
{
state[0] = s_box[state[0]];
state[1] = s_box[state[1]];
state[2] = s_box[state[2]];
state[3] = s_box[state[3]];
state[4] = s_box[state[4]];
state[5] = s_box[state[5]];
state[6] = s_box[state[6]];
state[7] = s_box[state[7]];
state[8] = s_box[state[8]];
state[9] = s_box[state[9]];
state[10] = s_box[state[10]];
state[11] = s_box[state[11]];
state[12] = s_box[state[12]];
state[13] = s_box[state[13]];
state[14] = s_box[state[14]];
state[15] = s_box[state[15]];
}
void ShiftRows(unsigned char *state)
{
swap(state[1], state[5]);
swap(state[2], state[10]);
swap(state[3], state[15]);
swap(state[5], state[9]);
swap(state[6], state[14]);
swap(state[7], state[15]);
swap(state[9], state[13]);
swap(state[11], state[15]);
}
void MixColumns(unsigned char *state)
{
unsigned char temporar[16];
temporar[0] = (unsigned char)(mul2[state[0]] ^ mul3[state[1]] ^ state[2] ^ state[3]);
temporar[1] = (unsigned char)(state[0] ^ mul2[state[1]] ^ mul3[state[2]] ^ state[3]);
temporar[2] = (unsigned char)(state[0] ^ state[1] ^ mul2[state[2]] ^ mul3[state[3]]);
temporar[3] = (unsigned char)(mul3[state[0]] ^ state[1] ^ state[2] ^ mul2[state[3]]);
temporar[4] = (unsigned char)(mul2[state[4]] ^ mul3[state[5]] ^ state[6] ^ state[7]);
temporar[5] = (unsigned char)(state[4] ^ mul2[state[5]] ^ mul3[state[6]] ^ state[7]);
temporar[6] = (unsigned char)(state[4] ^ state[5] ^ mul2[state[6]] ^ mul3[state[7]]);
temporar[7] = (unsigned char)(mul3[state[4]] ^ state[5] ^ state[6] ^ mul2[state[7]]);
temporar[8] = (unsigned char)(mul2[state[8]] ^ mul3[state[9]] ^ state[10] ^ state[11]);
temporar[9] = (unsigned char)(state[8] ^ mul2[state[9]] ^ mul3[state[10]] ^ state[11]);
temporar[10] = (unsigned char)(state[8] ^ state[9] ^ mul2[state[10]] ^ mul3[state[11]]);
temporar[11] = (unsigned char)(mul3[state[8]] ^ state[9] ^ state[10] ^ mul2[state[11]]);
temporar[12] = (unsigned char)(mul2[state[12]] ^ mul3[state[13]] ^ state[14] ^ state[15]);
temporar[13] = (unsigned char)(state[12] ^ mul2[state[13]] ^ mul3[state[14]] ^ state[15]);
temporar[14] = (unsigned char)(state[12] ^ state[13] ^ mul2[state[14]] ^ mul3[state[15]]);
temporar[15] = (unsigned char)(mul3[state[12]] ^ state[13] ^ state[14] ^ mul2[state[15]]);
memcpy(state, &temporar, sizeof(temporar));
}
void AddRoundKey(unsigned char *state, unsigned char *roundKey)
{
state[0] ^= roundKey[0];
state[1] ^= roundKey[1];
state[2] ^= roundKey[2];
state[3] ^= roundKey[3];
state[4] ^= roundKey[4];
state[5] ^= roundKey[5];
state[6] ^= roundKey[6];
state[7] ^= roundKey[7];
state[8] ^= roundKey[8];
state[9] ^= roundKey[9];
state[10] ^= roundKey[10];
state[11] ^= roundKey[11];
state[12] ^= roundKey[12];
state[13] ^= roundKey[13];
state[14] ^= roundKey[14];
state[15] ^= roundKey[15];
}
void criptareMesaj(unsigned char *mesaj, unsigned char *cheie, unsigned char *expKeys, unsigned nivel)
{
int numarRunde = 9;
if (nivel == 128)
{
numarRunde = 9;
}
if (nivel == 192)
{
numarRunde = 11;
}
if (nivel == 256)
{
numarRunde = 13;
}
AddRoundKey(mesaj, cheie);
for (int i = 0; i < numarRunde; ++i)
{
SubstitutieBytes(mesaj);
ShiftRows(mesaj);
MixColumns(mesaj);
AddRoundKey(mesaj, expKeys + (16 * (i + 1)));
}
// Runda finala
SubstitutieBytes(mesaj);
ShiftRows(mesaj);
AddRoundKey(mesaj, expKeys + 16 * (numarRunde + 1));
}
void PrintHex(unsigned char x)
{
if (x / 16 < 10)
{
g << (char)((x / 16) + '0');
}
if (x / 16 >= 10)
{
g << (char)((x / 16 - 10) + 'A');
}
if (x % 16 < 10)
{
g << (char)((x % 16) + '0');
}
if (x % 16 >= 10)
{
g << (char)((x % 16 - 10) + 'A');
}
}
int main(int argc, char **argv)
{
fstream f("mesaj.txt", ios::in | ios::out | ios::ate);
unsigned char* mesaj;
cudaMallocManaged(&mesaj, 17*sizeof(unsigned char));
unsigned nivel = atoi(argv[1]);
unsigned char *cheie = nullptr;
unsigned char *expKeys = nullptr;
unsigned size = 0;
size = f.tellg();
if (size % 16 != 0)
{
for (int i = 0; i < 16 - (size % 16); ++i)
{
f << '0';
}
size = f.tellg();
}
if (nivel == 128)
{
cheie = new unsigned char[16];
memcpy(cheie, "u43x2l6gjng24edf", 16);
expKeys = new unsigned char[176];
}
if (nivel == 192)
{
cheie = new unsigned char[24];
memcpy(cheie, "pyehxfiikibqunkkbwyydlqq", 24);
expKeys = new unsigned char[208];
}
if (nivel == 256)
{
cheie = new unsigned char[32];
memcpy(cheie, "bstipsymvkpascpmdqahvtdwusnhzexv", 32);
expKeys = new unsigned char[240];
}
expandareCheie(cheie, expKeys, nivel);
f.seekp(ios::beg);
for (int i = 0; i < size / 16; ++i)
{
f.read((char *)mesaj, 16);
mesaj[16] = '\0';
criptareMesaj(mesaj, cheie, expKeys, nivel);
for (int j = 0; j < 16; ++j)
{
PrintHex(mesaj[j]);
g << " ";
}
}
cudaDeviceSynchronize();
f.close();
g.close();
cout << '\n';
cudaFree(mesaj);
delete[] cheie;
delete[] expKeys;
return 0;
} |
6,846 | #include <iostream>
#include <fstream>
#include <string>
#include <cstdio>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
#if defined(NDEBUG)
#define CUDA_CHECK(x) (x)
#else
#define CUDA_CHECK(x) do {\
(x); \
cudaError_t e = cudaGetLastError(); \
if (cudaSuccess != e) { \
printf("cuda failure \"%s\" at %s:%d\n", \
cudaGetErrorString(e), \
__FILE__, __LINE__); \
exit(1); \
} \
} while (0)
#endif
__device__ void my_strcpy(char *dest, const char *src) {
int i = 0;
do {
dest[i] = src[i];
} while (src[++i] != '\0');
}
__device__ int my_strlen(char *string) {
int cnt = 0;
while (string[cnt] != '\0') {
++cnt;
}
return cnt;
}
__device__ int my_comp(char* str1, char* str2, int N) {
int flag = 0;
for (int i = 0; i<N; i++) {
if (str1[i] != str2[i]) {
flag = 1;
break;
}
}
return flag;
}
__global__ void bruteforce(char* pass, char* alphabet, char* dest, int N, long long int next) { // N = alphabet length
extern __shared__ char s_alphabet[];
char test[100]; // char test = (char*)malloc(sizeof(char)*N);
int digit[7] = { 0, };
int passLen = my_strlen(pass);
for (int i = 0; i<N; i++)
s_alphabet[i] = alphabet[i];
digit[6] = blockIdx.x >= N*N*N ? (int)((blockIdx.x / (N*N*N)) % N) : 0;
digit[5] = blockIdx.x >= N*N ? (int)((blockIdx.x / (N*N)) % N) : 0;
digit[4] = blockIdx.x >= N ? (int)((blockIdx.x / N) % N) : 0;
digit[3] = (int)(blockIdx.x % N);
digit[2] = threadIdx.x;
digit[1] = 0;
digit[0] = 0;
while (digit[1] < N) {
for (int i = 0; digit[0] < N; digit[0]++, ++i) {
test[0] = s_alphabet[digit[0]];
for (int j = 1; j < passLen; j++) {
test[j] = s_alphabet[digit[j]];
}
test[passLen] = '\0';
if (!my_comp(pass, test, passLen)) {
my_strcpy(dest, test);
dest[passLen] = '\0';
return;
}
}
++digit[1];
digit[0] = 0;
}
}
__global__ void bruteforce_write(char* pass, char* alphabet, char* dest, int N, long long unsigned int ExecutionPerThread, long long unsigned total_len) { // N = alphabet length
// we don't use shared memory in this function.
char test[100]; // char test = (char*)malloc(sizeof(char)*N);
int digit[7] = { 0, };
int passLen = my_strlen(pass);
long long unsigned int idx = 0;
long long unsigned int dummy = 0;
digit[6] = blockIdx.x >= N*N*N ? (int)((blockIdx.x / (N*N*N)) % N) : 0;
digit[5] = blockIdx.x >= N*N ? (int)((blockIdx.x / (N*N)) % N) : 0;
digit[4] = blockIdx.x >= N ? (int)((blockIdx.x / N) % N) : 0;
digit[3] = (int)(blockIdx.x % N);
digit[2] = threadIdx.x;
digit[1] = 0;
digit[0] = 0;
// ExecutionPerThread = alphabetLen * alphabeltLen * passLen
while (digit[1] < N) {
for (int i = 0; digit[0] < N; digit[0]++, ++i) {
if (blockIdx.x)
idx = (threadIdx.x * ExecutionPerThread + (i + dummy) * passLen) * blockDim.x * blockIdx.x;
else
idx = threadIdx.x * ExecutionPerThread + (i + dummy) * passLen;
if (idx > total_len) return;
dest[idx++] = alphabet[digit[0]];
for (int j = 1; j < passLen; j++) {
dest[idx++] = alphabet[digit[j]];
}
}
dummy += N ;
++digit[1];
digit[0] = 0;
}
}
void crackPassword(string, int, int);
int main() {
system("mode con cols=200 lines=250");
string password;
string cracked;
int operation = 0;
int numOfChars = -1;
while (operation != 1 && operation != 2) {
cout << "******* BRUTE FORCE PROGRAM *******" << endl;
cout << "******* [1]. JUST FIND " << endl;
cout << "******* [2]. WRITE " << endl;
cout << "INPUT NUMBER HERE : ";
cin >> operation;
}
if (operation == 2) {
while (numOfChars <= 0) {
cout << "INPUT NUMBER OF CHARACTERS(FROM 1 TO 7) : ";
cin >> numOfChars;
for (int i = 0; i < numOfChars; i++)
password += 'a';
}
}
else {
cout << "Enter the password to crack : ";
cin >> password;
}
crackPassword(password, operation, numOfChars);
cout << endl;
return 0;
}
void crackPassword(string pass, int operation, int numOfChars) {
cudaEvent_t start, stop;
string alphabet;
string str("");
char* result;
char* d_pass;
char* d_alphabet;
char* d_dest;
int alphabetSet = 1;
int len;
int cnt = 0;
int new_cnt = 0;
int passLen = pass.length();
float ms = 0;
char* temp = (char*)malloc(sizeof(char)*pass.length() + 1);
ofstream ofs("password.txt");
long long unsigned int total_len;
bool isFind = false;
string line("");
memset(temp, 0, sizeof(char)*pass.length() + 1);
result = (char*)malloc(sizeof(char)*pass.length() + 1);
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaMalloc((void**)&d_pass, sizeof(char)*pass.length() + 1));
CUDA_CHECK(cudaMalloc((void**)&d_dest, sizeof(char)*pass.length() + 1));
CUDA_CHECK(cudaMemcpy(d_pass, pass.c_str(), sizeof(char)*pass.length() + 1, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_dest, temp, sizeof(char)*pass.length() + 1, cudaMemcpyHostToDevice));
CUDA_CHECK(cudaEventRecord(start)); // start gpu computing with cuda
while (1) {
memset(result, 0, pass.length() + 1);
switch (alphabetSet) {
case 1: alphabet = "0123456789";
if (operation == 1)
cout << endl << endl << "Testing only digits(0123456789) - 10 Characters, please wait";
else
cout << endl << endl << "...writing";
break;
case 2: alphabet = "abcdefghijklmnopqrstuvwxyz";
if (operation == 1)
cout << endl << endl << "Couldn't find the password, increasing the searching level." << endl << endl << "Testing only lowercase characters(abcdefghijklmnopqrstuvwxyz) - 26 Characters, please wait";
else
cout << endl << endl << "...writing";
break;
case 3: alphabet = "ABCDEFGHIJKLMNOPQRSTUVWXYZ";
if (operation == 1)
cout << endl << endl << "Couldn't find the password, increasing the searching level." << endl << endl << "Testing only uppercase characters(ABCDEFGHIJKLMNOPQRSTUVWXYZ) - 26 Characters, please wait";
else
cout << endl << endl << "...writing";
break;
case 4: alphabet = "0123456789abcdefghijklmnopqrstuvwxyz";
if (operation == 1)
cout << endl << endl << "Couldn't find the password, increasing the searching level." << endl << endl << "Testing lowercase characters and numbers(0123456789abcdefghijklmnopqrstuvwxyz) - 36 Characters, please wait";
else
cout << endl << endl << "...writing";
break;
case 5: alphabet = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
if (operation == 1)
cout << endl << endl << "Couldn't find the password, increasing the searching level." << endl << endl << "Testing uppercase characters and numbers(0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ) - 36 Characters, please wait";
else
cout << endl << endl << "...writing";
break;
case 6: alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
if (operation == 1)
cout << endl << endl << "Couldn't find the password, increasing the searching level." << endl << endl << "Testing lowercase, uppercase characters(abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ) - 52 Characters, please wait";
else
cout << endl << endl << "...writing";
break;
case 7: alphabet = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ";
if (operation == 1)
cout << endl << endl << "Couldn't find the password, increasing the searching level." << endl << endl << "Testing lowercase, uppercase characters and numbers(0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ) - 62 Characters, please wait";
else
cout << endl << endl << "...writing";
break;
}
len = alphabet.length();
CUDA_CHECK(cudaMalloc((void**)&d_alphabet, sizeof(char)*len + 1));
CUDA_CHECK(cudaMemcpy(d_alphabet, alphabet.c_str(), sizeof(char)*len + 1, cudaMemcpyHostToDevice));
dim3 threadsPerBlock(len, 1);
dim3 blocksPerGrid((int)std::pow((float)len, pass.length() < 3 ? 1 : (float)(pass.length() - 3)), 1);
switch (operation) {
case 1: // JUST FIND
bruteforce<<<blocksPerGrid, threadsPerBlock, sizeof(char) * len >>>(d_pass, d_alphabet, d_dest, len, 0);
CUDA_CHECK(cudaMemcpy(result, d_dest, sizeof(char)*pass.length() + 1, cudaMemcpyDeviceToHost));
str = result;
if (str.compare(pass) == 0) {
CUDA_CHECK(cudaEventRecord(stop));
cudaEventSynchronize(stop);
cout << endl << "the password : " << result << endl;
CUDA_CHECK(cudaEventElapsedTime(&ms, start, stop));
cout << "The time duration passed : " << ms << "ms" << endl << endl;
isFind = true;
free(result);
}
break;
case 2: // WRITE
total_len = (long long unsigned int)(sizeof(char) * len*len*pass.length() * threadsPerBlock.x * blocksPerGrid.x);
CUDA_CHECK( cudaMalloc((void**)&d_dest, total_len) );
bruteforce_write<<<blocksPerGrid, threadsPerBlock>>>(d_pass, d_alphabet, d_dest, len, (long long unsigned int)len*len*pass.length(), total_len );
result = (char*)malloc(total_len);
CUDA_CHECK(cudaMemcpy(result, d_dest, total_len , cudaMemcpyDeviceToHost));
// file write
while(cnt <= total_len) {
line = "";
new_cnt = 0;
while (new_cnt < passLen) {
if ( (strchr(alphabet.c_str(), result[cnt]) != NULL) && result[cnt] != '\0' ) {
line += result[cnt];
new_cnt++;
}
cnt++;
if (cnt >= total_len)
break;
}
ofs << line << endl;
}
cnt = 0;
new_cnt = 0;
CUDA_CHECK(cudaFree(d_dest));
// free(result);
break;
}
alphabetSet++;
CUDA_CHECK(cudaFree(d_alphabet));
if (alphabetSet > 7)
break;
if (isFind == true)
break;
}
CUDA_CHECK(cudaFree(d_pass));
if(operation == 1)
CUDA_CHECK(cudaFree(d_dest));
}
|
6,847 | #include "includes.h"
__global__ void prescan_large_unoptimized(int *output, int *input, int n, int *sums) {
int blockID = blockIdx.x;
int threadID = threadIdx.x;
int blockOffset = blockID * n;
extern __shared__ int temp[];
temp[2 * threadID] = input[blockOffset + (2 * threadID)];
temp[2 * threadID + 1] = input[blockOffset + (2 * threadID) + 1];
int offset = 1;
for (int d = n >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
temp[bi] += temp[ai];
}
offset *= 2;
}
__syncthreads();
if (threadID == 0) {
sums[blockID] = temp[n - 1];
temp[n - 1] = 0;
}
for (int d = 1; d < n; d *= 2) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadID < d)
{
int ai = offset * (2 * threadID + 1) - 1;
int bi = offset * (2 * threadID + 2) - 1;
int t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
output[blockOffset + (2 * threadID)] = temp[2 * threadID];
output[blockOffset + (2 * threadID) + 1] = temp[2 * threadID + 1];
} |
6,848 | #include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/uniform_real_distribution.h>
// nvcc -arch=sm_70 -std=c++14 exemplo1.cu -o exemplo1 && ./exemplo1
int main()
{
int seed;
std::cin >> seed;
// default_random_engine is currently an alias for minstd_rand, and may change in a future version.
thrust::minstd_rand rng(seed);
thrust::uniform_real_distribution<double> urd(25, 40);
for(int i = 0; i< 10; i ++)
{
std::cout << urd(rng) << " ";
}
std::cout << "\n";
} |
6,849 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
//execution time: 16.612064 ms
#define n 1024
#define block_size 32
__global__ void mul_matrix(int *a, int *b, int *c){
int my_x, my_y, i,j;
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
int row = threadIdx.y;
int col = threadIdx.x;
my_y = blockIdx.y*blockDim.y + threadIdx.y;
my_x = blockIdx.x*blockDim.x + threadIdx.x;
int local_c;
__shared__ int A_s[32][32];
__shared__ int B_s[32][32];
for (i=0;i<n/block_size;i++)
{
A_s[row][col] = a[my_x*n + (i*blockDim.x + col)];
B_s[row][col] = b[(i*blockDim.y + row)*n + my_y];
__syncthreads();
for(j=0;j<block_size;j++)
local_c += A_s[row][j]*B_s[j][col];
__syncthreads();
}
c[my_x*n+my_y] = local_c;
}
int main(){
int i;
float time;
cudaEvent_t start, stop;
// row major order
// a(i,j) = a[i*1024+j];
int *a = (int*)malloc(sizeof(int)*n*n);
int *b = (int*)malloc(sizeof(int)*n*n);
int *c = (int*)malloc(sizeof(int)*n*n);
dim3 dimGrid(32,32);
dim3 dimBlock(32,32);
for (i=0; i<n*n; i++){
a[i]=1;
b[i]=2;
c[i]=0;
}
int *gpu_a, *gpu_b, *gpu_c;
cudaMalloc((void**)&gpu_a, sizeof(int)*n*n);
cudaMalloc((void**)&gpu_b, sizeof(int)*n*n);
cudaMalloc((void**)&gpu_c, sizeof(int)*n*n);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(gpu_a, a, sizeof(int)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, sizeof(int)*n*n, cudaMemcpyHostToDevice);
cudaEventRecord(start,0);
mul_matrix<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(c, gpu_c, sizeof(int)*n*n, cudaMemcpyDeviceToHost);
//for (i=0; i<n*n; i++)
//printf("%d ", c[i]);
printf("%f ", time);
free(a);
free(b);
free(c);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
return 0;
}
|
6,850 | /*
* Sample program to illustrate threads and blocks in CUDA.
*
* compile with:
* nvcc -o hello hello.cu
* run with:
* ./hello
*/
#include <stdio.h>
__global__ void hello() {
int id = threadIdx.x + blockIdx.x * blockDim.x;
printf("Hello from thread %d (%d of block %d)\n", id, threadIdx.x, blockIdx.x);
}
int main() {
hello<<<3,4>>>(); //launch 3 blocks of 4 threads each
cudaDeviceSynchronize(); //make sure kernel completes
}
|
6,851 | extern "C"{
__global__ void kernel(int * value){
*value = 5;
}
__global__ void kernel_2(unsigned int * values, unsigned int * value_count){
if (threadIdx.x < *value_count){
values[threadIdx.x] *= 2;
}
}
__global__ void kernel_3(const int in, int * out){
*out = in;
}
}
|
6,852 | /*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
*/
extern "C" {
__global__ void multKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] * b[i] * 100;
}
// "main" function is not required
}
|
6,853 | #include "jpg.hh"
#include <stdexcept>
#include <setjmp.h>
#include <jpeglib.h>
namespace img
{
std::uint8_t* jpg_load(const std::string& path,
std::size_t* pwidth, std::size_t* pheight,
std::size_t* pchannels)
{
struct jpeg_decompress_struct cinfo;
jmp_buf setjmp_buffer;
struct jpeg_error_mgr pub;
FILE* infile = fopen(path.c_str(), "rb");
if (!infile)
throw std::runtime_error {"Can't open file"};
cinfo.err = jpeg_std_error(&pub);
if (setjmp(setjmp_buffer))
throw std::runtime_error {"Can't read file"};
jpeg_create_decompress(&cinfo);
jpeg_stdio_src(&cinfo, infile);
jpeg_read_header(&cinfo, TRUE);
jpeg_start_decompress(&cinfo);
std::size_t row_stride = cinfo.output_width * cinfo.output_components;
std::size_t width = cinfo.output_width;
std::size_t height = cinfo.output_height;
std::size_t channels = cinfo.output_components;
std::uint8_t* data = new std::uint8_t[height * row_stride];
std::size_t offset = 0;
while (cinfo.output_scanline < cinfo.output_height)
{
auto row = reinterpret_cast<JSAMPROW> (data + offset);
jpeg_read_scanlines(&cinfo, &row, 1);
offset += row_stride;
}
jpeg_finish_decompress(&cinfo);
jpeg_destroy_decompress(&cinfo);
fclose(infile);
if (pwidth)
*pwidth = width;
if (pheight)
*pheight = height;
if (pchannels)
*pchannels = channels;
return data;
}
void jpg_save(const std::string& path, std::uint8_t* data,
std::size_t width, std::size_t height,
int quality)
{
FILE* outfile = fopen(path.c_str(), "wb");
if (!outfile)
throw std::runtime_error {"Can't open image file"};
struct jpeg_compress_struct cinfo;
struct jpeg_error_mgr jerr;
cinfo.err = jpeg_std_error(&jerr);
jpeg_create_compress(&cinfo);
jpeg_stdio_dest(&cinfo, outfile);
std::size_t channels = 3;
J_COLOR_SPACE color_type = JCS_RGB;
cinfo.image_width = width;
cinfo.image_height = height;
cinfo.input_components = channels;
cinfo.in_color_space = color_type;
jpeg_set_defaults(&cinfo);
jpeg_set_quality (&cinfo, quality, true);
jpeg_start_compress(&cinfo, true);
std::size_t row_stride = width * channels;
std::size_t offset = 0;
while (cinfo.next_scanline < cinfo.image_height)
{
auto row = reinterpret_cast<JSAMPROW> (data + offset);
jpeg_write_scanlines(&cinfo, &row, 1);
offset += row_stride;
}
jpeg_finish_compress(&cinfo);
}
}
|
6,854 | #include <cstddef>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#define BLOCK_SIZE 32
#define BIG_BLOCK BLOCK_SIZE * 2
const int INF = ((1 << 30) - 1);
__global__ void cal_phase1(int* Dist, int numOfVertex, int round){
int newDist;
int i = BLOCK_SIZE * round + threadIdx.y;
int j = BLOCK_SIZE * round + threadIdx.x;
__shared__ int smem_pivot_dist[BLOCK_SIZE][BLOCK_SIZE];
smem_pivot_dist[threadIdx.y][threadIdx.x] = Dist[i * numOfVertex + j];
__syncthreads();
#pragma unroll
for(int k = 0; k < BLOCK_SIZE; k++){
newDist = smem_pivot_dist[threadIdx.y][k] + smem_pivot_dist[k][threadIdx.x];
__syncthreads();
if(newDist < smem_pivot_dist[threadIdx.y][threadIdx.x]){
smem_pivot_dist[threadIdx.y][threadIdx.x] = newDist;
}
__syncthreads();
}
Dist[i * numOfVertex + j] = smem_pivot_dist[threadIdx.y][threadIdx.x];
}
__global__ void cal_phase2(int* Dist, int numOfVertex, int round){
if(blockIdx.x == round){
return;
}
int shortestDist;
int i = BLOCK_SIZE * round + threadIdx.y;
int j = BLOCK_SIZE * round + threadIdx.x;
int newDist;
__shared__ int smem_pivot_dist[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int smem_current_dist[BLOCK_SIZE][BLOCK_SIZE];
const int pivotBlockIndex = i * numOfVertex + j;
int currentBlockIndex;
smem_pivot_dist[threadIdx.y][threadIdx.x] = Dist[pivotBlockIndex];
__syncthreads();
// Row
if(blockIdx.y == 0){
i = BLOCK_SIZE * round + threadIdx.y;
j = BLOCK_SIZE * blockIdx.x + threadIdx.x;
}
// Column
else{
i = BLOCK_SIZE * blockIdx.x + threadIdx.y;
j = BLOCK_SIZE * round + threadIdx.x;
}
currentBlockIndex = i * numOfVertex + j;
smem_current_dist[threadIdx.y][threadIdx.x] = Dist[currentBlockIndex];
shortestDist = smem_current_dist[threadIdx.y][threadIdx.x];
__syncthreads();
// Row
if(blockIdx.y == 0){
#pragma unroll
for(int k = 0; k < BLOCK_SIZE; k++){
newDist = smem_pivot_dist[threadIdx.y][k] + smem_current_dist[k][threadIdx.x];
__syncthreads();
if(newDist < shortestDist){
shortestDist = newDist;
}
__syncthreads();
}
}
// Column
else{
#pragma unroll
for(int k = 0; k < BLOCK_SIZE; k++){
newDist = smem_current_dist[threadIdx.y][k] + smem_pivot_dist[k][threadIdx.x];
__syncthreads();
if(newDist < shortestDist){
shortestDist = newDist;
}
__syncthreads();
}
}
Dist[currentBlockIndex] = shortestDist;
}
__global__ void cal_phase3(int* Dist, int numOfVertex, int round){
if(blockIdx.x == round || blockIdx.y == round){
return;
}
int i, j;
int newDist;
int shortestDist;
__shared__ int smem_row_pivot_dist[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int smem_column_pivot_dist[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int smem_current_dist[BLOCK_SIZE][BLOCK_SIZE];
// Load row-pivot block
i = BLOCK_SIZE * round + threadIdx.y;
j = BLOCK_SIZE * blockIdx.x + threadIdx.x;
smem_row_pivot_dist[threadIdx.y][threadIdx.x] = Dist[i * numOfVertex + j];
// Load column-pivot block
i = BLOCK_SIZE * blockIdx.y + threadIdx.y;
j = BLOCK_SIZE * round + threadIdx.x;
smem_column_pivot_dist[threadIdx.y][threadIdx.x] = Dist[i * numOfVertex + j];
// Load current block to shared memory
i = BLOCK_SIZE * blockIdx.y + threadIdx.y;
j = BLOCK_SIZE * blockIdx.x + threadIdx.x;
smem_current_dist[threadIdx.y][threadIdx.x] = Dist[i * numOfVertex + j];
shortestDist = smem_current_dist[threadIdx.y][threadIdx.x];
__syncthreads();
#pragma unroll
for(int k = 0; k < BLOCK_SIZE; k++){
newDist = smem_column_pivot_dist[threadIdx.y][k] + smem_row_pivot_dist[k][threadIdx.x];
if(newDist < shortestDist){
shortestDist = newDist;
}
}
__syncthreads();
Dist[i * numOfVertex + j] = shortestDist;
}
void block_FW(int* Dist, int numOfVertex) {
cudaError_t status;
int* devMem_Dist;
//long long dataSize = (long long)numOfVertex * (long long)numOfVertex * sizeof(int);
status = cudaMalloc((void**)&devMem_Dist, numOfVertex *numOfVertex * sizeof(int));
if(status != cudaSuccess){
exit(2);
}
status = cudaMemcpy(devMem_Dist, Dist, numOfVertex *numOfVertex * sizeof(int), cudaMemcpyHostToDevice);
if(status != cudaSuccess){
exit(3);
}
int round = numOfVertex / BLOCK_SIZE; //(numOfVertex + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 gridSize_phase1(1, 1);
dim3 blockSize_phase1(BLOCK_SIZE, BLOCK_SIZE);
dim3 gridSize_phase2(numOfVertex / BLOCK_SIZE, 2);
dim3 blockSize_phase2(BLOCK_SIZE, BLOCK_SIZE);
dim3 gridSize_phase3(numOfVertex / BLOCK_SIZE, numOfVertex / BLOCK_SIZE);
dim3 blockSize_phase3(BLOCK_SIZE, BLOCK_SIZE);
for (int r = 0; r < round; ++r) {
status = cudaMemcpy(Dist, devMem_Dist, numOfVertex *numOfVertex * sizeof(int), cudaMemcpyDeviceToHost);
printf("\n-------before round: %d--------------------\n", r);
for(int i = 0; i < numOfVertex; i++){
for(int j = 0; j < numOfVertex; j++){
if(Dist[i * numOfVertex + j] == INF){
printf("INF ");
}
else
printf("%d ", Dist[i * numOfVertex + j]);
}
printf("\n");
}
/* Phase 1*/
cal_phase1<<<gridSize_phase1, blockSize_phase1>>>(devMem_Dist, numOfVertex, r);
/* Phase 2*/
cal_phase2<<<gridSize_phase2, blockSize_phase2>>>(devMem_Dist, numOfVertex, r);
/* Phase 3*/
cal_phase3<<<gridSize_phase3, blockSize_phase3>>>(devMem_Dist, numOfVertex, r);
}
status = cudaDeviceSynchronize();
if(status != cudaSuccess){
exit(4);
}
status = cudaMemcpy(Dist, devMem_Dist, numOfVertex *numOfVertex * sizeof(int), cudaMemcpyDeviceToHost);
if(status != cudaSuccess){
exit(5);
}
cudaFree(devMem_Dist);
}
int main(int argc, char* argv[]) {
int numOfVertex, original_numOfVertex, numOfEdge, numOfPadding;
// ///////////////////////////////////////////////////////////////
// Input
// ///////////////////////////////////////////////////////////////
FILE* inFile = fopen(argv[1], "rb");
fread(&numOfVertex, sizeof(int), 1, inFile);
printf("The number of vertices: %d\n", numOfVertex);
fread(&numOfEdge, sizeof(int), 1, inFile);
numOfPadding = BLOCK_SIZE - ( numOfVertex % BLOCK_SIZE );
original_numOfVertex = numOfVertex;
numOfVertex += numOfPadding;
int* Dist = (int*)malloc(numOfVertex * numOfVertex * sizeof(int));
int* shortestDist = (int*)malloc(original_numOfVertex * original_numOfVertex * sizeof(int));
for (int i = 0; i < numOfVertex; ++i) {
for (int j = 0; j < numOfVertex; ++j) {
if (i == j && i != numOfVertex - 1) {
Dist[i * numOfVertex + j] = 0;
} else {
Dist[i * numOfVertex + j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < numOfEdge; ++i) {
fread(pair, sizeof(int), 3, inFile);
Dist[pair[0] * numOfVertex + pair[1]] = pair[2];
}
/////////////////////////////////////////////////////////////
//Calculate
/////////////////////////////////////////////////////////////
block_FW(Dist, numOfVertex);
FILE* outFile = fopen(argv[2], "wb");
for (int i = 0; i < numOfVertex; ++i) {
for (int j = 0; j < numOfVertex; ++j) {
if (Dist[i * numOfVertex + j] >= INF) Dist[i * numOfVertex + j] = INF;
}
}
///////////////////////////////////////////////////////////
//print
///////////////////////////////////////////////////////////
// printf("=======================================\n");
// for(int i = 0; i < original_numOfVertex; i++){
// for(int j = 0; j < original_numOfVertex; j++){
// if(Dist[i * numOfVertex + j] == INF){
// printf("INF ");
// }
// else
// printf("%d ", Dist[i * numOfVertex + j]);
// }
// printf("\n");
// }
for(int i = 0; i < original_numOfVertex; i++){
for(int j = 0; j < original_numOfVertex; j++){
shortestDist[i * original_numOfVertex + j] = Dist[i * numOfVertex + j];
}
}
////////////////////////////////////////////////////////////
// Output
////////////////////////////////////////////////////////////
fwrite(shortestDist, sizeof(int), original_numOfVertex * original_numOfVertex, outFile);
fclose(inFile);
fclose(outFile);
delete[]Dist;
delete[]shortestDist;
return 0;
}
|
6,855 | #include "includes.h"
__global__ void block_QR(float* z, float* z1, float* vector, float* vector1, float* Q, float* NewQ, float* R, float* PrevM, float* NewM, int* converged, float* eigenvector, const int *WidthOfMatrix, const int *ind, const int *vind)
{
//extern __shared__ float z1[];
int n = WidthOfMatrix[blockIdx.x];
int index = ind[blockIdx.x];
int vectindex = vind[blockIdx.x];
int numofelements = n*n;
if(threadIdx.x==0){
converged[blockIdx.x] = 0;
}
if(threadIdx.x<numofelements){
int i;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
//set eigenvector to the identity matrix.
if(i/n==i%n)eigenvector[i+index]=1;
else eigenvector[i+index]=0;
}
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i+index;
z1[iplusindex]=z[iplusindex];
Q[iplusindex]=z[iplusindex];
PrevM[iplusindex]=z[iplusindex];
}
do{
int k, j, PowOf2;
for(k=0;k<n-1;k++){
//Householder Code
//STEP 0: Get value of z[k*n+k] for use in step 4
float NormCheck = z[k*n+k+index];
//STEP 1: Find minor matrix of the input matrix z and sets it to z
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i%n==i/n&&i/n<k)z[i+index]=1;
else if(i/n>=k&&i%n>=k)z[i+index]=z[i+index];
else z[i+index]=0;
}
__syncthreads();
//STEP 2: Find kTH column of z and set to vector
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
vector[i+vectindex] = z[i*n+k+index];
}
}
//STEP 3: Find the norm of the kTh column and set to NormOfKcol
float NormOfKcol;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
//Need a temporary for vector that we can change the values of since we need mcol later
vector1[iplusvectindex] = vector[iplusvectindex];
vector1[iplusvectindex] *= vector1[iplusvectindex];
}
}
PowOf2 = 1;
__syncthreads();
//add all x's together, 2 at a time. O((log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
NormOfKcol = sqrt(vector1[0+vectindex]);
//STEP 4: Make Norm Negative if NormCheck is > 0
if(NormCheck > 0) NormOfKcol = -NormOfKcol;
//STEPS 5+6 Combined: add NormOfKcol to tmp[k]
if(k==threadIdx.x)vector[k+vectindex]=vector[k+vectindex]+NormOfKcol;
__syncthreads();
//STEP 7: Finds the addition of the new kcol and stores it in tmp[0]
//used in ||tmp||
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector1[iplusvectindex] = vector[iplusvectindex] * vector[iplusvectindex];
PowOf2 = 1;
}
}
__syncthreads();
//add all tmp's together, 2 at a time. O(n(log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
__syncthreads();
//STEP 8: Divide vector Vmadd by the Norm[0] and set it to Vdiv
// Vdiv = Vmadd / norm
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector[iplusvectindex] = vector[iplusvectindex]/(sqrt(vector1[vectindex]));
}
}
__syncthreads();
//STEP 9: Multiply the Vdiv vector by its transverse and subtract that from I, store the resulting matrix in Vmul
// Vmul = I - 2 * Vdiv * Vdiv^T
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index] = -2 * vector[i/n+vectindex] * vector[i%n+vectindex];
}
//if on the diagonal(row==column)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n){
R[i+index] += 1;
}
}
__syncthreads();
//STEP 10: Multiply Vmul by input matrix z1 and store in VmulZ
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i+index]=0;
for(j=0;j<n;j++){
z[i+index]+= R[i/n*n+j+index] * z1[j*n+i%n+index];
}
}
//STEP 11: if k!=0 Multiply Vmul by input matrix Q and set to NewQ
if(k!=0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewQ[i+index]=0;
for(j=0;j<n;j++)
{
NewQ[i+index]+= R[i/n*n+j+index] * Q[j*n+i%n+index];
}
}
}
__syncthreads();
//STEP 12.1: If first iteration of k, set Q to vmul for use in next iteration of k
if(k==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = R[i+index];
}
}
//STEP 12.2: If after first iteration of k, set Q to NewQ, which was found by multiplying the old Q by Vmul.
else {
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = NewQ[i+index];
}
}
//STEP 12.3: Set z and z1 to VmulZ for use in the next iteration of k.
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z1[i+index] = z[i+index];
}
__syncthreads();
}
//Once for loop is completed:
//STEP 13: Multiply matrices Q and m to find the matrix R
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index]=0;
}
for(i=0;i<n;i++)
{
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
R[j+index]+= Q[j/n*n+i+index] * PrevM[i*n+j%n+index];
}
}
__syncthreads();
//STEP 14: Find the transpose of matrix Q and store int TransposeOfQ
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
// # -> #%n*n+#/n
// for n=4 0->0 1->4 2->8 3->12
// 4->1 5->5 6->9 7->13
// 8->2 9->6 10->10 11->14
// 12->3 13->7 14->11 15->15
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i%n*n+i/n+index] = Q[i+index];
}
__syncthreads();
//STEP 14.5: Multiply matrices eigenvector and TransposeOfQ and store in eigenvector(use NewM as a temporary matrix)
//NewM contains new eigenvectors
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++){
NewM[i+index]+= eigenvector[i/n*n+j+index] * z[j*n+i%n+index];
}
}
__syncthreads();
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
eigenvector[i+index]=NewM[i+index];
}
__syncthreads();
//STEP 15: Multiply matrices R and TransposeOfQ and store in NewM matrix
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++)
{
NewM[i+index]+= R[i/n*n+j+index] * z[j*n+i%n+index];
}
}
//STEP 16: Check for Convergence of New Matrix (Newm)
if(threadIdx.x==0){
converged[blockIdx.x] = 1;
}
__syncthreads();
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n&&(PrevM[i+index]/NewM[i+index]>1.000001||
PrevM[i+index]/NewM[i+index]<0.999999)){
converged[blockIdx.x] = 0;
}
}
__syncthreads();
//STEP 17: Set up for next iteration if converged is 0
if(converged[blockIdx.x]==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i + index;
z[iplusindex] = NewM[iplusindex];
z1[iplusindex] = NewM[iplusindex];
Q[iplusindex] = NewM[iplusindex];
PrevM[iplusindex] = NewM[iplusindex];
}
}
__syncthreads();
}while(converged[blockIdx.x]==0);
//put eigenvalues into vector
if(threadIdx.x<n){
vector[threadIdx.x+vectindex]=NewM[threadIdx.x+threadIdx.x*n+index];
}
__syncthreads();
if(threadIdx.x==0){
//Sort Eigenvalues low to high and swap eigenvectors to match eigenvalues
//Simple Bubble Sort
int i1,i2,i3;
for(i1=vectindex;i1<n-1+vectindex;i1++){
for(i2=i1+1;i2<n+vectindex;i2++){
if(vector[i1]>vector[i2]){
float tmp = vector[i1];
vector[i1] = vector[i2];
vector[i2] = tmp;
for(i3 = 0;i3<n;i3++){
float tmp = eigenvector[i3*n+(i1-vectindex)%n+index];
eigenvector[i3*n+(i1-vectindex)%n+index] = eigenvector[i3*n+(i2-vectindex)%n+index];
eigenvector[i3*n+(i2-vectindex)%n+index] = tmp;
}
}
}
}
}
}
} |
6,856 | #include "includes.h"
__global__ void stretch_sway_flip_weights_kernel(const float *src_weight_gpu, float *weight_deform_gpu, int nweights, int n, int kernel_size, float angle, int reverse)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
const int kernel_area = kernel_size * kernel_size;
const int i = index * kernel_area;
const int stage_step = (nweights / kernel_area) / 8; // 8 stages
const int stage_id = index / stage_step;
// nweights = (c / groups) * n * size * size;
// kernel_area = size*size
if (i < nweights)
{
if (stage_id == 0) {
// simple copy
for (int x = 0; x < kernel_size; ++x) {
for (int y = 0; y < kernel_size; ++y) {
weight_deform_gpu[x + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
else if (stage_id == 1 || stage_id == 2 || stage_id == 3 || stage_id == 4)
{
float scale = 0.5;
if (stage_id == 1) scale = 0.65;
else if (stage_id == 2) scale = 0.8;
else if (stage_id == 3) scale = 1.2;
else if (stage_id == 4) scale = 1.4;
if (reverse) scale = 1 / scale;
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x_c + (x_d - x_c) / scale
// Ysource = y_c + (y_d - y_c) / scale
float x_s = x_c + (x - x_c) / scale;
float y_s = y_c + (y - y_c) / scale;
int x_0 = floor(x_s); // round down
int x_1 = ceil(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floor(y_s);
int y_1 = ceil(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
//const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
if(scale > 1)
weight_deform_gpu[x + y*kernel_size + i] /= scale;// *= coef;
}
}
}
else if (stage_id == 5 || stage_id == 6)
{
// rotate left or right
if (stage_id == 6) angle = -angle;
if (reverse) angle = -angle;
const float cos_a = cosf(angle * 3.14159265 / 180);
const float sin_a = sinf(angle * 3.14159265 / 180);
const int x_c = kernel_size / 2;
const int y_c = kernel_size / 2;
float dropout_sum = 0;
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
// Xsource = x*cos(alpha) + y*sin(alpha)
// Ysource = -x*sin(alpha) + y*cos(alpha)
float x_s = x_c + (x - x_c)*cos_a + (y - y_c)*sin_a;
float y_s = y_c - (x - x_c)*sin_a + (y - y_c)*cos_a;
int x_0 = floor(x_s); // round down
int x_1 = ceil(x_s); // round up
if (x_0 == x_1) x_1 = x_0 + 1;
int y_0 = floor(y_s);
int y_1 = ceil(y_s);
if (y_0 == y_1) y_1 = y_0 + 1;
float c_x_0 = x_1 - x_s;
float c_x_1 = x_s - x_0;
float c_y_0 = y_1 - y_s;
float c_y_1 = y_s - y_0;
float val = 0;
if (x_0 >= 0 && x_0 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_0 + y_0*kernel_size + i] * c_x_0 * c_y_0;
else dropout_sum += c_x_0 * c_y_0;
if (x_1 >= 0 && x_1 < kernel_size && y_0 >= 0 && y_0 < kernel_size) val += src_weight_gpu[x_1 + y_0*kernel_size + i] * c_x_1 * c_y_0;
else dropout_sum += c_x_1 * c_y_0;
if (x_0 >= 0 && x_0 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_0 + y_1*kernel_size + i] * c_x_0 * c_y_1;
else dropout_sum += c_x_0 * c_y_1;
if (x_1 >= 0 && x_1 < kernel_size && y_1 >= 0 && y_1 < kernel_size) val += src_weight_gpu[x_1 + y_1*kernel_size + i] * c_x_1 * c_y_1;
else dropout_sum += c_x_1 * c_y_1;
weight_deform_gpu[x + y*kernel_size + i] = val;
}
}
// compensate for dropped items
const float coef = (kernel_size*kernel_size) / (kernel_size*kernel_size - dropout_sum);
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[x + y*kernel_size + i] *= coef;
}
}
}
else if (stage_id == 7)
{
// flip
for (int y = 0; y < kernel_size; ++y) {
for (int x = 0; x < kernel_size; ++x) {
weight_deform_gpu[(kernel_size - x - 1) + y*kernel_size + i] = src_weight_gpu[x + y*kernel_size + i];
}
}
}
}
} |
6,857 | #include <cstdlib>
#include <cstdio>
#include <algorithm>
#include <chrono>
#include <thread>
#include <iostream>
using namespace std;
const int W = 1000;
const int H = 1000;
__global__
void compute_transition(const bool *const current, bool *const next) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < W*H; i += stride) {
int x = i % W;
int y = i / W;
int n = 0;
for (int yd = -1; yd <= 1; yd++) {
int y_ = (y + H + yd) % H;
for (int xd = -1; xd <= 1; xd++) {
if (yd == 0 && xd == 0) continue;
int x_ = (x + W + xd) % W;
n += current[y_ * W + x_];
}
}
if (current[i]) {
next[i] = (n == 2 || n == 3);
} else {
next[i] = (n == 3);
}
}
}
void print_grid(const bool *const grid) {
for (int y = 0; y < H; ++y) {
for (int x = 0; x < W; ++x) {
cout << (grid[y*W+x] ? "#" : " ");
}
cout << endl;
}
}
int main() {
bool *current, *next;
cudaMallocManaged(¤t, sizeof(bool) * W * H);
cudaMallocManaged(&next, sizeof(bool) * W * H);
for (int i = 0; i < W*H; ++i)
current[i] = rand() % 2;
auto start_time = chrono::high_resolution_clock::now();
int blockSize = 128;
int nBlocks = (W*H + blockSize - 1) / blockSize;
for (int i = 0; i < 100; ++i) {
compute_transition<<<nBlocks, blockSize>>>(current, next);
swap(current, next);
}
cudaDeviceSynchronize();
auto end_time = chrono::high_resolution_clock::now();
auto ms = chrono::duration_cast<chrono::milliseconds>(end_time - start_time);
cout << ms.count() << "ms" << endl;
cudaFree(next);
cudaFree(current);
} |
6,858 | #include <iostream>
#include <vector>
#define PB push_back
#define MP make_pair
#define vvd vector<vector<double> >
#define vd vector<double>
typedef long long int ll;
using namespace std;
#define rep(i, begin, end) for (__typeof(end) i = (begin) - ((begin) > (end)); i != (end) - ((begin) > (end)); i += 1 - 2 * ((begin) > (end)))
#define sz(a) (int)(a).size()
#define pii pair<int, int>
#define pll pair<ll, ll>
// Device version of pre-training
// Can use thrust library
// OR use a flattened array here
//__global__ void pretrain_fwd(vvd *W1, vvd *b1, vvd *W2, vvd *b2, vvd *X, vvd *Z, vd *rho, vd *err){
__global__ void pretrain_fwd(double *W1){
int bid = blockIdx.x, tid = threadIdx.x;
int idx = tid + bid*blockDim.x;
}
int main(){
vvd W1, W2, x, y, h_x, y_x;
vd b1, b2;
//int H = sz(W1), W = sz(W1[0]);
int H = 2, W = 3;
double *dW1, *db1, *dW2, *db2, *dX, *dZ, *drho, *derr;
cudaMalloc(&dW1, H * W * sizeof(double));
double *dst = dW1;
for(vvd::iterator it = W1.begin(); it != W1.end(); it++){
double *src = &((*it)[0]);
int sz = it->size();
cudaMemcpy(dst, src, sizeof(double)*sz, cudaMemcpyHostToDevice);
dst += sz;
}
pretrain_fwd<<<1, 1>>>(dW1);
}
|
6,859 | //xfail:BOOGIE_ERROR
//--blockDim=1024 --gridDim=1
//null pointer access
// ALTOUGH, IT WORKS
#include <stdio.h>
#include <cuda.h>
#define N 2//4//8
__global__ void foo(int *H) {
size_t tmp = (size_t)H; //type cast
tmp += sizeof(int);
int *G = (int *)tmp;
G -= 1; //POSSIBLE NULL POINTER ACCESS
G[threadIdx.x] = threadIdx.x;
__syncthreads();
H[threadIdx.x] = G[threadIdx.x];
}
|
6,860 | //
// Created by root on 2020/11/11.
//
#include "cuda_runtime.h"
#include "iostream"
__global__ void addMatrix(int* a, int* b, int* c, int nx, int ny) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = 0;
for (; idy < ny; idy++) {
int index = idy * nx + idx;
c[index] = a[index] + b[index];
}
}
int main() {
int x = 5, y = 2;
int size = x * y * sizeof(int);
int *a = (int*) malloc(size);
int *b = (int*) malloc(size);
int *c = (int*) malloc(size);
for (int i = 0; i < x * y; i++) {
a[i] = i * 2;
b[i] = i + 1;
}
int* h_a;
int* h_b;
int* h_c;
cudaMalloc(&h_a, size);
cudaMalloc(&h_b, size);
cudaMalloc(&h_c, size);
cudaMemcpy(h_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(h_b, b, size, cudaMemcpyHostToDevice);
dim3 block(32, 32);
dim3 grid((x + block.x - 1) / block.x, (y + block.y - 1) / block.y);
addMatrix<<<grid, block>>>(h_a, h_b, h_c, x, y);
cudaMemcpy(c, h_c, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < x * y; i++) {
std::cout << c[i] << std::endl;
}
cudaFree(h_a);
cudaFree(h_b);
cudaFree(h_c);
free(a);
free(b);
free(c);
return 0;
}
|
6,861 | #include <cmath>
#include <cstdlib>
#include <cstdio>
#include <sys/time.h>
#define M 8 //
#define K 8
/*
for (int mb = 0; mb < M; mb += Mtile)
for (int nb = 0; nb < N; nb += Ntile)
for (int kb = 0; kb < K; kb += Ktile)
{
// compute Mtile-by-Ntile-by-Ktile matrix product
for (int k = 0; k < Ktile; ++k)
for (int i = 0; i < Mtile; ++i)
for (int j = 0; j < Ntile; ++j)
{
int row = mb + i;
int col = nb + j;
C[row][col] +=
A[row][kb + k] * B[kb + k][col];
}
}
*/
__global__ void matmul(float *A, float *B, float *C, int N) {
int ii = threadIdx.x + blockDim.x * blockIdx.x;
int jj = threadIdx.y + blockDim.y * blockIdx.y;
for (int i=ii; i<ii+M; i++) {
for (int j=jj; j<jj+M; j++) {
float sum = 0.0f;
for (int k=0; k<N; k++) {
sum += A[N*i+k] * B[N*k+j];
}
C[N*i+j] = sum;
}
}
}
int main(int argc, char **argv) {
int N = atoi(argv[1]);
// Allocate memory space for matrices to cpu (host)
float * h_A = new float [N*N]; // First matrix
float * h_B = new float [N*N]; // Second matrix
float * h_C = new float [N*N]; // Result matrix
// Allocate memory space for matrices to gpu (device)
float *d_A, *d_B, *d_C; // Gpu allocations
int size = N * N * sizeof(float); // Byte size for cuda malloc
cudaMalloc((void **) &d_A, size);
cudaMalloc((void **) &d_B, size);
cudaMalloc((void **) &d_C, size);
// Init cpu matrices with random values.
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
h_A[N*i+j] = drand48();
h_B[N*i+j] = drand48();
h_C[N*i+j] = 0;
}
}
// Copy matrices to gpu memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
// Make gpu multiplication
struct timeval tic, toc;
gettimeofday(&tic, NULL); //Start timer
// Invoke gpu multiplication
dim3 grid(N/M/K, N/M/K); // Amount of 2 dimensional blocks per axis
dim3 block(K,K); // Thread size per axis
matmul<<<grid,block>>>(d_A, d_B, d_C, N);
cudaDeviceSynchronize();
// Calculate and print flops
gettimeofday(&toc, NULL); //End timer
double time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); // Print flops for gpu multiplication
// Copy matrices back to cpu memory
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Make controll by trivially recalculating matrix values and subtracting them from the current cpu output matrix (and time)
gettimeofday(&tic, NULL); //Start timer
#pragma omp parallel for
for (int i=0; i<N; i++) {
for (int k=0; k<N; k++) {
for (int j=0; j<N; j++) {
h_C[N*i+j] -= h_A[N*i+k] * h_B[N*k+j];
}
}
}
gettimeofday(&toc, NULL); //End timer
time = toc.tv_sec-tic.tv_sec+(toc.tv_usec-tic.tv_usec)*1e-6;
printf("N=%d: %lf s (%lf GFlops)\n",N,time,2.*N*N*N/time/1e9); // Print flops for cpu multiplication
// Total error: Sum difference on each value between cpu and gpu calculation
float err = 0;
for (int i=0; i<N; i++) {
for (int j=0; j<N; j++) {
err += fabs(h_C[N*i+j]);
}
}
printf("error: %f\n",err/N/N); // Print total error
// Clear memory
delete[] h_A;
delete[] h_B;
delete[] h_C;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
6,862 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
struct test{
double gene;
double * OS_gene;
double test_OS_gene;
int * canRunTool;
};
__global__ void pointer(struct test * testStructs, double * gene_array, int size){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < size){
testStructs[index].OS_gene = &gene_array[index];
}
}
__global__ void test2(int N){
int index = threadIdx.x;
printf("index = %d\n", index);
printf("CUDA said: hello world");
}
__global__ void testing(struct test *testStructs, int size){
// int index = threadIdx.x + blockIdx.x * blockDim.x;
int index = threadIdx.x;
printf("index = %d\n", index);
// printf("gene = %d\n", testStructs[index].gene);
if(index < size){
for(int i = 0; i < 10; ++i){
testStructs[index].canRunTool[i] += 1;
}
testStructs[index].test_OS_gene = *testStructs[index].OS_gene;
}
}
int main(int argc, char const * argv[]){
int structSize = 10;
int numBytes = structSize * sizeof(struct test);
printf("sizeof = %d\n", sizeof(struct test));
int canRunTools = 10;
int canRunToolsBytes = canRunTools * sizeof(int);
struct test * stests = (struct test *)malloc(numBytes);
int i, j;
for(i = j = 0; i < structSize; ++i){
stests[i].canRunTool = (int *)malloc(canRunToolsBytes);
for(int k = 0;k < canRunTools; ++j, ++k){
stests[i].canRunTool[k] = j;
stests[i].gene = i;
printf("%d ", j);
}
printf("\n");
}
struct test * dev_tests;
cudaMalloc((void **) &dev_tests, numBytes);
cudaMemcpy(dev_tests, stests, numBytes, cudaMemcpyHostToDevice);
struct test * temp_tests = (struct test*)malloc(numBytes);
cudaMemcpy(temp_tests, dev_tests, numBytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < structSize; ++i){
printf("49 gene = %.0f\n", temp_tests[i].gene);
}
int * dev_canRunToolsTemp;
int ** temps = (int **)malloc(structSize*sizeof(int *));
for(i = 0; i < structSize; ++i){
cudaMalloc((void **)&dev_canRunToolsTemp, canRunToolsBytes);
temps[i] = dev_canRunToolsTemp;
cudaMemcpy(dev_canRunToolsTemp, stests[i].canRunTool,canRunToolsBytes, cudaMemcpyHostToDevice);
cudaMemcpy(&(dev_tests[i].canRunTool), &dev_canRunToolsTemp, sizeof(dev_canRunToolsTemp), cudaMemcpyHostToDevice);
}
dim3 threadsPerBlock(2, 2);
dim3 numBlocks(structSize / threadsPerBlock.x, structSize / threadsPerBlock.y);
double gene_array[10];
for(int i = 0; i < 10; ++i){
gene_array[i] = (double)i / 10.0;
}
double *dev_gene_array;
cudaMalloc((void**)&dev_gene_array, sizeof(double)*10);
cudaMemcpy(dev_gene_array, gene_array,sizeof(double)*10, cudaMemcpyHostToDevice);
test2<<<1, structSize>>>(5);
pointer<<<1, structSize>>>(dev_tests, dev_gene_array, structSize);
testing<<<1, structSize>>>(dev_tests, structSize);
// testing<<<numBlocks, threadsPerBlock>>>(dev_tests, 4);
int * host_canRunToolsTemp;
host_canRunToolsTemp = (int*)malloc(canRunToolsBytes);
cudaMemcpy(stests, dev_tests, numBytes, cudaMemcpyDeviceToHost);
for(i = 0; i < structSize; ++i){
printf("%.3f\n", stests[i].test_OS_gene);
}
}
|
6,863 | #include "resources.cuh"
__device__ __host__ bool Processor::is_idle()
{
return !(jb != NULL);
}
__device__ __host__ int Processor::overhead()
{
return _overhead_cycle;
}
__device__ __host__ void Processor::load(job *_job)
{
assert(is_idle());
_overhead += _overhead_cycle;
jb = _job;
jb->activate();
}
__device__ __host__ job* Processor::preempt(job *_job)
{
assert(!is_idle());
job *preempted_job = jb;
jb->preempt();
unload();
load(_job);
return preempted_job;
}
__device__ __host__ void Processor::unload()
{
assert(!is_idle());
_overhead = _overhead_cycle;
jb = NULL;
}
__device__ void Processor::run(int ticks)
{
if ((ticks % _ratio) == 0)
{
_cycle += 1;
if (_overhead >= 0)
{
_overhead -= 1;
}
else
{
if (is_idle())
{
jb->run();
if (jb->completed())
{
jb->terminate(ticks);
unload();
}
}
}
}
}
__device__ __host__ void Processor::stop()
{
// Nothing
} |
6,864 | #include <stdio.h>
#include <string.h>
const int N = 8;
const int BLOCKSIZE = 8;
const int GRIDSIZE = 1;
// ---------------------------------------------- KERNELS ---------------------------------------------------------------
// Hillis Steele Scan - Inclusive Scan
__global__ void gpu_blelloch_scan (int *in, int *out)
{
extern __shared__ int cache[];
int myId = threadIdx.x;
// Copy the array to shared memory
cache[myId] = in[myId];
__syncthreads();
for (int d = 1; d < N; d <<= 1)
{
if (myId >= d)
cache[myId] += cache[myId - d];
__syncthreads();
}
// Write results to output
// -- Inclusive
//out[myId] = cache[myId];
// -- Exclusive
if (myId == 0) out[0] = 0;
else out[myId] = cache[myId-1];
}
// ---------------------------------------------------------------------------------------------------------------------------
// -------------------------------------------------- CPU Functions ----------------------------------------------------------
void print (int *v)
{
for (int i = 0; i < N; i++)
printf("%d ",v[i]);
printf("\n\n");
}
void generate (int *v)
{
for (int i = 0; i < N; i++)
v[i] = i+1;
/*
v[0] = 13;
v[1] = 7;
v[2] = 16;
v[3] = 21;
v[4] = 8;
v[5] = 20;
v[6] = 13;
v[7] = 12;
*/
}
void Usage (char pName[])
{
printf("============================================\n");
printf("Usage:> %s \n",pName);
printf("============================================\n");
}
// ---------------------------------------------------------------------------------------------------------------------------
// ------------------------------------------------ MAIN FUNCTION ------------------------------------------------------------
int main (int argc, char *argv[])
{
if (argc-1 != 0)
{
Usage(argv[0]);
exit(1);
}
// Declare and allocate memory for the host and device structures
int *h_in, *h_out;
int *d_in, *d_out;
size_t sizeIn = N*sizeof(int);
size_t sizeOut = N*sizeof(int);
h_in = (int*)malloc(sizeIn); generate(h_in); print(h_in);
cudaMalloc(&d_in,sizeIn);
cudaMemcpy(d_in,h_in,sizeIn,cudaMemcpyHostToDevice);
h_out = (int*)malloc(sizeOut);
cudaMalloc(&d_out,sizeOut);
dim3 gridSize(1,1);
dim3 blockSize(BLOCKSIZE,1);
size_t sharedMem = sizeof(int)*BLOCKSIZE*2;
// Call reduce kernel
gpu_blelloch_scan<<<gridSize,blockSize,sharedMem>>>(d_in,d_out);
cudaMemcpy(h_out,d_out,sizeOut,cudaMemcpyDeviceToHost);
// Print the result
print(h_out);
free(h_in); free(h_out);
cudaFree(d_in); cudaFree(d_out);
return 0;
} |
6,865 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <cuda_runtime.h>
float *create_host_float_array(const int LENGTH)
{
const int MEM_SIZE = sizeof(float) * LENGTH;
float *array = (float *)malloc(MEM_SIZE);
if (!array)
{
fprintf(stderr, "Failed to allocate array of floats on the host (CPU) with length %d and size %d; exiting...\n", LENGTH, MEM_SIZE);
exit(EXIT_FAILURE);
}
return array;
}
float *create_device_float_array(const int LENGTH)
{
const int MEM_SIZE = sizeof(float) * LENGTH;
float *array = NULL;
cudaError_t err = cudaMalloc((void **)&array, MEM_SIZE);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate array of floats on the CUDA-device with length %d and size %d; CUDA error code: %s; exiting...\n", LENGTH, MEM_SIZE, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
return array;
}
void copy_mem_host_to_device(void *host_pointer, void *device_pointer, const int MEM_SIZE)
{
cudaError_t err = cudaMemcpy(device_pointer, host_pointer, MEM_SIZE, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy host pointer (%p) into device pointer (%p) with size %d; exiting...\n", host_pointer, device_pointer, MEM_SIZE, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void copy_mem_device_to_host(void *device_pointer, void *host_pointer, const int MEM_SIZE)
{
cudaError_t err = cudaMemcpy(host_pointer, device_pointer, MEM_SIZE, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy device pointer (%p) into host pointer (%p) with size %d; exiting...\n", device_pointer, host_pointer, MEM_SIZE, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
void delete_device_memory(void *memory)
{
cudaError_t err = cudaFree(memory);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free memory from device; CUDA error code: %s; exiting...\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/**
*
* Example "kernel" function (runs on the GPU).
*
* Adds two float arrays into a third array.
* All arrays must of the same size of variable
* LENGTH.
*
* Note: __global__ makes this a kernel function.
*
*/
__global__ void add_floats_of_length(float *dest, float *src_a, float *src_b, const int LENGTH)
{
const int block_offset = blockDim.x * gridDim.x;
const int thread_offset = blockIdx.x * blockDim.x + threadIdx.x;
for (int idx = thread_offset; idx < LENGTH; idx += block_offset)
dest[idx] = src_a[idx] + src_b[idx];
}
int main(void)
{
/**
*
* Length and memory sizeof all input/output,
* and host/device arrays.
*
*/
const int ARRAY_LENGTH = 1 << 26;
const int ARRAY_MEM_SIZE = sizeof(float) * ARRAY_LENGTH;
// Initialize host (CPU) arrays
float *host_input_a = create_host_float_array(ARRAY_LENGTH);
float *host_input_b = create_host_float_array(ARRAY_LENGTH);
float *host_output = create_host_float_array(ARRAY_LENGTH);
// Initialize CUDA device arrays
float *device_input_a = create_device_float_array(ARRAY_LENGTH);
float *device_input_b = create_device_float_array(ARRAY_LENGTH);
float *device_output = create_device_float_array(ARRAY_LENGTH);
/**
*
* Fill both arrays with some test data, so
* that adding each array at the same index
* gives the float, 3.0F.
*
*/
for (int idx = 0; idx < ARRAY_LENGTH; idx++)
{
host_input_a[idx] = 1.0f;
host_input_b[idx] = 2.0f;
}
/**
*
* Copy host input arrays into device memory.
*
*/
copy_mem_host_to_device(host_input_a, device_input_a, ARRAY_MEM_SIZE);
copy_mem_host_to_device(host_input_b, device_input_b, ARRAY_MEM_SIZE);
/**
*
* Run a basic addition test on the device
* with the copied (and now available) inputs.
*
*/
const int block_size = 256;
const int grid_size = (ARRAY_LENGTH + block_size - 1) / block_size;
add_floats_of_length<<<grid_size, block_size>>>(device_output, device_input_a, device_input_b, ARRAY_LENGTH);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to perform device calculation; CUDA error code: %s; exiting...\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/**
*
* Copy device output to host output.
*
*/
copy_mem_device_to_host(device_output, host_output, ARRAY_MEM_SIZE);
/**
*
* Confirm each element of array_y to be 3.0F,
* otherwise calculate out the margin-of-error.
*
*/
float maxError = 0.0F;
for (int idx = 0; idx < ARRAY_LENGTH; idx++)
maxError = fmax(maxError, fabs(host_output[idx] - 3.0F));
printf("--- END ---\nMax float computation error: %f\n--- END ---\n", maxError);
delete_device_memory(device_input_a);
delete_device_memory(device_input_b);
delete_device_memory(device_output);
free(host_input_a);
free(host_input_b);
free(host_output);
return 0;
}
|
6,866 | #include "includes.h"
__global__ void update_population_free( unsigned int * fixed , unsigned int * lost , unsigned int * free , unsigned int cols ) {
} |
6,867 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
const char TRUE = 1;
const char FALSE = 0;
int bruteIncrement(char* brute, int alphabetLen, int wordLen, int incrementBy) {
int i = 0;
while(incrementBy > 0 && i < wordLen) {
int add = incrementBy + brute[i];
brute[i] = (char)(add % alphabetLen);
incrementBy = add / alphabetLen;
i++;
}
return incrementBy == 0;
}
__device__ void cudaStrCmp(char *a, char *b, int len, int* res) {
int workerId = threadIdx.x;
for (int i = 0; i < len; i++) {
if (a[i] != b[i]) {
*res = 0;
return;
}
}
}
__device__ void k_bruteIncrement(char* brute, int alphabetLen, int wordLen, int incrementBy, int *incRes) {
int i = 0;
int workerId = threadIdx.x;
while(incrementBy > 0 && i < wordLen) {
int add = incrementBy + brute[i];
brute[i] = (char)(add % alphabetLen);
incrementBy = add / alphabetLen;
i++;
}
*incRes = incrementBy == 0;
}
__device__ void bruteToString(char *brute, int wordLen, char *alphabet, char *out){
for(int i=0;i<wordLen;i++){
out[i]=alphabet[brute[i]];
}
out[wordLen]='\0';
}
int any(int *list, int listSize){
for(int i=0;i<listSize;i++){
if(list[i])return TRUE;
}
return FALSE;
}
__global__ void searchPart(char *targetString, char *alphabet, char *brutePart, int workSize, int wordLen, int alphabetLen, int* results){
int workerId = threadIdx.x;
results[workerId] = 0;
int incRes = FALSE;
// Receive start of latest section (WORKER * WORKSIZE), create local copy
char* t_brutePart = (char *) malloc((wordLen)* sizeof(char));
for (int i = 0; i < wordLen; i++) t_brutePart[i] = brutePart[i];
// Increment to start of this thread's chunk (WORKSIZE)
k_bruteIncrement(t_brutePart, alphabetLen, wordLen, workSize*workerId, &incRes);
if(!incRes){
return;
}
int count = 0;
char* out = (char *) malloc((wordLen + 1)* sizeof(char));
// Increment by one and compare strs after every iteration
while(1) {
if(count>=workSize) {
break;
}
bruteToString(t_brutePart, wordLen, alphabet, out);
int cmpRes = 1;
cudaStrCmp(out, targetString, wordLen, &cmpRes);
if(cmpRes == 1) {
results[workerId] = 1;
break;
}
count +=1;
incRes = 0;
k_bruteIncrement(t_brutePart, alphabetLen, wordLen, 1, &incRes);
if(!incRes) {
break;
}
}
free(out);
free(t_brutePart);
}
int search(char *targetString, char *alphabet, int numWorkers, int workSize){
int wordLen = strlen(targetString);
int alphabetLen = strlen(alphabet);
int size = wordLen*sizeof(char);
int alphabetSize = alphabetLen*sizeof(char);
char *k_alphabet;
int *k_alphabetLen;
int *k_wordLen;
char *k_targetString;
cudaMallocManaged(&k_alphabet, alphabetSize);
cudaMallocManaged(&k_alphabetLen, sizeof(int));
cudaMallocManaged(&k_wordLen, sizeof(int));
cudaMallocManaged(&k_targetString, size);
cudaMemcpy(k_alphabet, alphabet, alphabetSize, cudaMemcpyDefault );
cudaMemcpy(k_targetString, targetString, size, cudaMemcpyDefault );
*k_alphabetLen = strlen(alphabet);
*k_wordLen = strlen(targetString);
char brute [wordLen];
for(int i=0;i<wordLen;i++)brute[i]=0;
char* k_brutePart;
cudaMalloc(&k_brutePart, size);
int* k_results;
cudaMallocManaged(&k_results, numWorkers* sizeof(int));
int* results = (int*)malloc(sizeof(int) * numWorkers);
// Every iteration, increment brute (WORKERS * WORKSIZE) times
while(1){
cudaMemcpy(k_brutePart, brute, size, cudaMemcpyDefault );
for(int i=0;i<numWorkers;i++) k_results[i] = 0;
// Divide the section into chunks to be worked on in parallel
searchPart<<<1, numWorkers>>>(k_targetString, k_alphabet, k_brutePart, workSize, *k_wordLen, *k_alphabetLen, k_results);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
if(any(k_results, numWorkers)) return 1;
// advance to the next major chunk of work
if(!bruteIncrement(brute, alphabetLen, wordLen, workSize*numWorkers)){
break;
}
}
cudaFree(k_alphabet);
cudaFree(k_alphabetLen);
cudaFree(k_wordLen);
cudaFree(k_targetString);
cudaFree(k_brutePart);
cudaFree(k_results);
return 0;
}
int main( int argc, char** argv) {
char *targetString = argv[1];
char *alphabet = argv[2];
int numWorkers = atoi(argv[3]);
int workSize = atoi(argv[4]);
printf("Looking for %s in [%s]...\n", targetString, alphabet);
if(search(targetString, alphabet, numWorkers, workSize)){
printf("Found\n");
} else {
printf("Notfound\n");
}
return 0;
} |
6,868 |
#include <stdio.h>
#include <stdlib.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
#include <sys/time.h>
#include <cooperative_groups.h>
__global__ void
ac(float *A, const int *B, const int *C, const int *op_sel, int n_inputs, const int n_arith, int thresh, int iter) {
int i= blockDim.x * blockIdx.x + threadIdx.x;
int val_4, val_5, val_6, val_11, val_12, val_13, val_18, val_19, val_20, val_25, val_26, val_27, val_32, val_33, val_34, val_39, val_40, val_41, val_46, val_47, val_48, val_53, val_54, val_55, val_60, val_61, val_62, val_67, val_68, val_69, val_74, val_75, val_76, val_81, val_82, val_83, val_88, val_89, val_90, val_95, val_96, val_97, val_102, val_103, val_104, val_109, val_110, val_111, val_116, val_117, val_118, val_123, val_124, val_125, val_130, val_131, val_132, val_137, val_138, val_139, val_144, val_145, val_146, val_151, val_152, val_153, val_158, val_159, val_160, val_165, val_166, val_167, val_173, val_174, val_175, val_180, val_181, val_182, val_188, val_189, val_190, val_196, val_197, val_198, val_205, val_206, val_207, val_212, val_213, val_214, val_219, val_220, val_221, val_226, val_227, val_228, val_233, val_234, val_235, val_240, val_241, val_242, val_248, val_249, val_250, val_255, val_256, val_257, val_262, val_263, val_264, val_273, val_274, val_275, val_281, val_282, val_283, val_289, val_290, val_291, val_297, val_298, val_299, val_304, val_305, val_306, val_311, val_312, val_313, val_318, val_319, val_320, val_325, val_326, val_327, val_338, val_339, val_340, val_346, val_347, val_348, val_354, val_355, val_356, val_361, val_362, val_364, val_365, val_366, val_368, val_369, val_371, val_372, val_373, val_375, val_376, val_378, val_379, val_381, val_382, val_384, val_385, val_387, val_388, val_390, val_391, val_393, val_394, val_395, val_397, val_398, val_400, val_401, val_402, val_404, val_405, val_407, val_408, val_409, val_411, val_412, val_414, val_415, val_416, val_418, val_419, val_421, val_422, val_424, val_425, val_427, val_428, val_429, val_431, val_432, val_434, val_435, val_437, val_438, val_439, val_441, val_442, val_444, val_445, val_447, val_448, val_449, val_451, val_452, val_454, val_455, val_456, val_458, val_459, val_461, val_462, val_464, val_465, val_466, val_468, val_469, val_471, val_472, val_473, val_475, val_476, val_478, val_479, val_481, val_482, val_483, val_485, val_486, val_488, val_489, val_490, val_492, val_493, val_495, val_496, val_497, val_499, val_500, val_502, val_503, val_505, val_506, val_508, val_509, val_510, val_512, val_513, val_515, val_516, val_518, val_519, val_521, val_522, val_524, val_525, val_526, val_528, val_529, val_531, val_532, val_534, val_535, val_537, val_538, val_539, val_541, val_542, val_544, val_545, val_547, val_548, val_550, val_551, val_553, val_554, val_555, val_557, val_558, val_560, val_561, val_562, val_564, val_565, val_567, val_568, val_569, val_571, val_572, val_574, val_575, val_577, val_578, val_580, val_581, val_582, val_584, val_585, val_587, val_588, val_589, val_591, val_592, val_594, val_595, val_597, val_598, val_600, val_601, val_602, val_604, val_605, val_607, val_608, val_610, val_611, val_612, val_614, val_615, val_617, val_618, val_619, val_621, val_622, val_624, val_625, val_627, val_628, val_630, val_631, val_633, val_634, val_636, val_637, val_639, val_640, val_641, val_643, val_644, val_646, val_647, val_648, val_650, val_651, val_653, val_654, val_655, val_657, val_658, val_660, val_661, val_663, val_664, val_666, val_667, val_669, val_670, val_671, val_673, val_674, val_676, val_677, val_679, val_680, val_682, val_683, val_685, val_686, val_687, val_689, val_690, val_692, val_693, val_694, val_696, val_697, val_699, val_700, val_701, val_703, val_704, val_706, val_707, val_708, val_710, val_711, val_713, val_714, val_716, val_717, val_719, val_720, val_721, val_723, val_724, val_726, val_727, val_728, val_730, val_731, val_733, val_734, val_736, val_737, val_738, val_740, val_741, val_743, val_744, val_745, val_747, val_748, val_750, val_751, val_752, val_754, val_755, val_757, val_758, val_760, val_761, val_763, val_764, val_765, val_767, val_768, val_770, val_771, val_773, val_774, val_775, val_777, val_778, val_780, val_781, val_782, val_784, val_785, val_787, val_788, val_790, val_791, val_792, val_794, val_795, val_797, val_798, val_800, val_801, val_802, val_804, val_805, val_807, val_808, val_809, val_811, val_812, val_814, val_815, val_816, val_818, val_819, val_821, val_822, val_824, val_825, val_827, val_828, val_829, val_831, val_832, val_834, val_835, val_836, val_838, val_839, val_841, val_842, val_844, val_845, val_847, val_848, val_849, val_851, val_852, val_854, val_855, val_857, val_858, val_859, val_861, val_862, val_864, val_865, val_866, val_868, val_869, val_871, val_872, val_873, val_875, val_876, val_878, val_879, val_881, val_882, val_884, val_885, val_887, val_888, val_889, val_891, val_892, val_894, val_895, val_897, val_898, val_900, val_901, val_902, val_904, val_905, val_907, val_908, val_910, val_911, val_913, val_914, val_916, val_917, val_919, val_920, val_921, val_923, val_924, val_926, val_927, val_929, val_930, val_932, val_933, val_935, val_936, val_938, val_939, val_941, val_942, val_943, val_945, val_946, val_948, val_949, val_951, val_952, val_954, val_955, val_956, val_957, val_959, val_960, val_962, val_963, val_964, val_966, val_967, val_969, val_970, val_972, val_973, val_975, val_976, val_978, val_979, val_980, val_981, val_983, val_984, val_986, val_987, val_989, val_990, val_992, val_993, val_995, val_996, val_998, val_999, val_1001, val_1002, val_1004, val_1005, val_1006, val_1007, val_1009, val_1010, val_1012, val_1013, val_1015, val_1016, val_1017, val_1019, val_1020, val_1022, val_1023, val_1025, val_1026, val_1027, val_1029, val_1030, val_1032, val_1033, val_1035, val_1036, val_1038, val_1039, val_1041, val_1042, val_1043, val_1044, val_1046, val_1047, val_1049, val_1050, val_1052, val_1053, val_1054, val_1055, val_1057, val_1058, val_1060, val_1061, val_1063, val_1064, val_1065, val_1066, val_1068, val_1069, val_1071, val_1072, val_1074, val_1075, val_1077, val_1078, val_1079, val_1080, val_1082, val_1083, val_1085, val_1086, val_1088, val_1089, val_1091, val_1092, val_1093, val_1094, val_1096, val_1097, val_1099, val_1100, val_1102, val_1103, val_1105, val_1106, val_1107, val_1108, val_1110, val_1111, val_1113, val_1114, val_1116, val_1117, val_1119, val_1120, val_1121, val_1122, val_1124, val_1125, val_1127, val_1128, val_1130, val_1131, val_1133, val_1134, val_1135, val_1136, val_1138, val_1139, val_1141, val_1142, val_1144, val_1145, val_1147, val_1148, val_1149, val_1150, val_1152, val_1153, val_1155, val_1156, val_1158, val_1159, val_1161, val_1162, val_1163, val_1164, val_1166, val_1167, val_1169, val_1170, val_1172, val_1173, val_1174, val_1176, val_1177, val_1179, val_1180, val_1182, val_1183, val_1184, val_1186, val_1187, val_1189, val_1190, val_1192, val_1193, val_1194, val_1195, val_1197, val_1198, val_1200, val_1201, val_1202, val_1204, val_1205, val_1207, val_1208, val_1210, val_1211, val_1213, val_1214, val_1216, val_1217, val_1218, val_1219, val_1221, val_1222, val_1224, val_1225, val_1227, val_1228, val_1230, val_1231, val_1232, val_1233, val_1235, val_1236, val_1238, val_1239, val_1241, val_1242, val_1244, val_1245, val_1247, val_1248, val_1250, val_1251, val_1252, val_1253, val_1255, val_1256, val_1258, val_1259, val_1261, val_1262, val_1264, val_1265, val_1266, val_1267, val_1269, val_1270, val_1272, val_1273, val_1275, val_1276, val_1278, val_1279, val_1281, val_1282, val_1284, val_1285, val_1286, val_1288, val_1289, val_1291, val_1292, val_1294, val_1295, val_1296, val_1297, val_1299, val_1300, val_1302, val_1303, val_1305, val_1306, val_1308, val_1309, val_1311, val_1312, val_1314, val_1315, val_1316, val_1317, val_1319, val_1320, val_1322, val_1323, val_1325, val_1326, val_1328, val_1329, val_1330, val_1331, val_1333, val_1334, val_1336, val_1337, val_1339, val_1340, val_1342, val_1343, val_1345, val_1346, val_1348, val_1349, val_1350, val_1351, val_1353, val_1354, val_1356, val_1357, val_1359, val_1360, val_1362, val_1363, val_1364, val_1365, val_1367, val_1368, val_1370, val_1371, val_1373, val_1374, val_1376, val_1377, val_1378, val_1379, val_1381, val_1382, val_1384, val_1385, val_1387, val_1388, val_1390, val_1391, val_1392, val_1393, val_1395, val_1396, val_1398, val_1399, val_1401, val_1402, val_1403, val_1405, val_1406, val_1408, val_1409, val_1411, val_1412, val_1413, val_1415, val_1416, val_1418, val_1419, val_1420, val_1422, val_1423, val_1425, val_1426, val_1427, val_1429, val_1430, val_1432, val_1433, val_1434, val_1436, val_1437, val_1439, val_1440, val_1441, val_1443, val_1444, val_1446, val_1447, val_1448, val_1450, val_1451, val_1453, val_1454, val_1455, val_1457, val_1458, val_1460, val_1461, val_1463, val_1464, val_1465, val_1467, val_1468, val_1470, val_1471, val_1473, val_1474, val_1475, val_1477, val_1478, val_1480, val_1481, val_1482, val_1484, val_1485, val_1487, val_1488, val_1489, val_1491, val_1492, val_1494, val_1495, val_1497, val_1498, val_1499, val_1501, val_1502, val_1504, val_1505, val_1507, val_1508, val_1509, val_1511, val_1512, val_1514, val_1515, val_1516, val_1518, val_1519, val_1521, val_1522, val_1523, val_1525, val_1526, val_1528, val_1529, val_1530, val_1532, val_1533, val_1535, val_1536, val_1537, val_1539, val_1540, val_1542, val_1543, val_1544, val_1546, val_1547, val_1549, val_1550, val_1551, val_1553, val_1554, val_1556, val_1557, val_1558, val_1560, val_1561, val_1563, val_1564, val_1565, val_1567, val_1568, val_1570, val_1571, val_1573, val_1574, val_1576, val_1577, val_1579, val_1580, val_1582, val_1583, val_1585, val_1586, val_1588, val_1589, val_1591, val_1592, val_1594, val_1595, val_1597, val_1598, val_1600, val_1601, val_1603, val_1604, val_1606, val_1607, val_1608, val_1609, val_1610, val_1611, val_1612, val_1613, val_1614, val_1615, val_1616, val_1617, val_1618, val_1619, val_1620, val_1625, val_1626, val_1627, val_1629, val_1630;
int idx_off;
for (int k=0; k<iter; k++) {
//idx_off = i*(n_inputs * iter) + n_inputs*k;
idx_off = i*(n_inputs);
val_4 = A[idx_off + 0] * A[idx_off + 1];
val_5 = A[idx_off + 2] * A[idx_off + 3];
val_6 = val_4 + val_5;
val_11 = A[idx_off + 7] * A[idx_off + 8];
val_12 = A[idx_off + 9] * A[idx_off + 10];
val_13 = val_11 + val_12;
val_18 = A[idx_off + 14] * A[idx_off + 15];
val_19 = A[idx_off + 16] * A[idx_off + 17];
val_20 = val_18 + val_19;
val_25 = A[idx_off + 21] * A[idx_off + 22];
val_26 = A[idx_off + 23] * A[idx_off + 24];
val_27 = val_25 + val_26;
val_32 = A[idx_off + 28] * A[idx_off + 29];
val_33 = A[idx_off + 30] * A[idx_off + 31];
val_34 = val_32 + val_33;
val_39 = A[idx_off + 35] * A[idx_off + 36];
val_40 = A[idx_off + 37] * A[idx_off + 38];
val_41 = val_39 + val_40;
val_46 = A[idx_off + 42] * A[idx_off + 43];
val_47 = A[idx_off + 44] * A[idx_off + 45];
val_48 = val_46 + val_47;
val_53 = A[idx_off + 49] * A[idx_off + 50];
val_54 = A[idx_off + 51] * A[idx_off + 52];
val_55 = val_53 + val_54;
val_60 = A[idx_off + 56] * A[idx_off + 57];
val_61 = A[idx_off + 58] * A[idx_off + 59];
val_62 = val_60 + val_61;
val_67 = A[idx_off + 63] * A[idx_off + 64];
val_68 = A[idx_off + 65] * A[idx_off + 66];
val_69 = val_67 + val_68;
val_74 = A[idx_off + 70] * A[idx_off + 71];
val_75 = A[idx_off + 72] * A[idx_off + 73];
val_76 = val_74 + val_75;
val_81 = A[idx_off + 77] * A[idx_off + 78];
val_82 = A[idx_off + 79] * A[idx_off + 80];
val_83 = val_81 + val_82;
val_88 = A[idx_off + 84] * A[idx_off + 85];
val_89 = A[idx_off + 86] * A[idx_off + 87];
val_90 = val_88 + val_89;
val_95 = A[idx_off + 91] * A[idx_off + 92];
val_96 = A[idx_off + 93] * A[idx_off + 94];
val_97 = val_95 + val_96;
val_102 = A[idx_off + 98] * A[idx_off + 99];
val_103 = A[idx_off + 100] * A[idx_off + 101];
val_104 = val_102 + val_103;
val_109 = A[idx_off + 105] * A[idx_off + 106];
val_110 = A[idx_off + 107] * A[idx_off + 108];
val_111 = val_109 + val_110;
val_116 = A[idx_off + 112] * A[idx_off + 113];
val_117 = A[idx_off + 114] * A[idx_off + 115];
val_118 = val_116 + val_117;
val_123 = A[idx_off + 119] * A[idx_off + 120];
val_124 = A[idx_off + 121] * A[idx_off + 122];
val_125 = val_123 + val_124;
val_130 = A[idx_off + 126] * A[idx_off + 127];
val_131 = A[idx_off + 128] * A[idx_off + 129];
val_132 = val_130 + val_131;
val_137 = A[idx_off + 133] * A[idx_off + 134];
val_138 = A[idx_off + 135] * A[idx_off + 136];
val_139 = val_137 + val_138;
val_144 = A[idx_off + 140] * A[idx_off + 141];
val_145 = A[idx_off + 142] * A[idx_off + 143];
val_146 = val_144 + val_145;
val_151 = A[idx_off + 147] * A[idx_off + 148];
val_152 = A[idx_off + 149] * A[idx_off + 150];
val_153 = val_151 + val_152;
val_158 = A[idx_off + 154] * A[idx_off + 155];
val_159 = A[idx_off + 156] * A[idx_off + 157];
val_160 = val_158 + val_159;
val_165 = A[idx_off + 161] * A[idx_off + 162];
val_166 = A[idx_off + 163] * A[idx_off + 164];
val_167 = val_165 + val_166;
val_173 = A[idx_off + 169] * A[idx_off + 170];
val_174 = A[idx_off + 171] * A[idx_off + 172];
val_175 = val_173 + val_174;
val_180 = A[idx_off + 176] * A[idx_off + 177];
val_181 = A[idx_off + 178] * A[idx_off + 179];
val_182 = val_180 + val_181;
val_188 = A[idx_off + 184] * A[idx_off + 185];
val_189 = A[idx_off + 186] * A[idx_off + 187];
val_190 = val_188 + val_189;
val_196 = A[idx_off + 192] * A[idx_off + 193];
val_197 = A[idx_off + 194] * A[idx_off + 195];
val_198 = val_196 + val_197;
val_205 = A[idx_off + 201] * A[idx_off + 202];
val_206 = A[idx_off + 203] * A[idx_off + 204];
val_207 = val_205 + val_206;
val_212 = A[idx_off + 208] * A[idx_off + 209];
val_213 = A[idx_off + 210] * A[idx_off + 211];
val_214 = val_212 + val_213;
val_219 = A[idx_off + 215] * A[idx_off + 216];
val_220 = A[idx_off + 217] * A[idx_off + 218];
val_221 = val_219 + val_220;
val_226 = A[idx_off + 222] * A[idx_off + 223];
val_227 = A[idx_off + 224] * A[idx_off + 225];
val_228 = val_226 + val_227;
val_233 = A[idx_off + 229] * A[idx_off + 230];
val_234 = A[idx_off + 231] * A[idx_off + 232];
val_235 = val_233 + val_234;
val_240 = A[idx_off + 236] * A[idx_off + 237];
val_241 = A[idx_off + 238] * A[idx_off + 239];
val_242 = val_240 + val_241;
val_248 = A[idx_off + 244] * A[idx_off + 245];
val_249 = A[idx_off + 246] * A[idx_off + 247];
val_250 = val_248 + val_249;
val_255 = A[idx_off + 251] * A[idx_off + 252];
val_256 = A[idx_off + 253] * A[idx_off + 254];
val_257 = val_255 + val_256;
val_262 = A[idx_off + 258] * A[idx_off + 259];
val_263 = A[idx_off + 260] * A[idx_off + 261];
val_264 = val_262 + val_263;
val_273 = A[idx_off + 269] * A[idx_off + 270];
val_274 = A[idx_off + 271] * A[idx_off + 272];
val_275 = val_273 + val_274;
val_281 = A[idx_off + 277] * A[idx_off + 278];
val_282 = A[idx_off + 279] * A[idx_off + 280];
val_283 = val_281 + val_282;
val_289 = A[idx_off + 285] * A[idx_off + 286];
val_290 = A[idx_off + 287] * A[idx_off + 288];
val_291 = val_289 + val_290;
val_297 = A[idx_off + 293] * A[idx_off + 294];
val_298 = A[idx_off + 295] * A[idx_off + 296];
val_299 = val_297 + val_298;
val_304 = A[idx_off + 300] * A[idx_off + 301];
val_305 = A[idx_off + 302] * A[idx_off + 303];
val_306 = val_304 + val_305;
val_311 = A[idx_off + 307] * A[idx_off + 308];
val_312 = A[idx_off + 309] * A[idx_off + 310];
val_313 = val_311 + val_312;
val_318 = A[idx_off + 314] * A[idx_off + 315];
val_319 = A[idx_off + 316] * A[idx_off + 317];
val_320 = val_318 + val_319;
val_325 = A[idx_off + 321] * A[idx_off + 322];
val_326 = A[idx_off + 323] * A[idx_off + 324];
val_327 = val_325 + val_326;
val_338 = A[idx_off + 334] * A[idx_off + 335];
val_339 = A[idx_off + 336] * A[idx_off + 337];
val_340 = val_338 + val_339;
val_346 = A[idx_off + 342] * A[idx_off + 343];
val_347 = A[idx_off + 344] * A[idx_off + 345];
val_348 = val_346 + val_347;
val_354 = A[idx_off + 350] * A[idx_off + 351];
val_355 = A[idx_off + 352] * A[idx_off + 353];
val_356 = val_354 + val_355;
val_361 = A[idx_off + 276] * val_214;
val_362 = val_361 * A[idx_off + 360];
val_364 = A[idx_off + 268] * val_76;
val_365 = val_364 * A[idx_off + 363];
val_366 = val_362 + val_365;
val_368 = A[idx_off + 276] * val_275;
val_369 = val_368 * A[idx_off + 367];
val_371 = A[idx_off + 268] * val_228;
val_372 = val_371 * A[idx_off + 370];
val_373 = val_369 + val_372;
val_375 = A[idx_off + 284] * val_235;
val_376 = val_375 * A[idx_off + 374];
val_378 = A[idx_off + 243] * val_48;
val_379 = val_378 * A[idx_off + 377];
val_381 = A[idx_off + 183] * val_6;
val_382 = val_381 * A[idx_off + 380];
val_384 = A[idx_off + 243] * val_62;
val_385 = val_384 * A[idx_off + 383];
val_387 = A[idx_off + 191] * val_69;
val_388 = val_387 * A[idx_off + 386];
val_390 = A[idx_off + 357] * val_327;
val_391 = val_390 * A[idx_off + 389];
val_393 = A[idx_off + 349] * val_320;
val_394 = val_393 * A[idx_off + 392];
val_395 = val_391 + val_394;
val_397 = A[idx_off + 276] * val_214;
val_398 = val_397 * A[idx_off + 396];
val_400 = A[idx_off + 268] * val_207;
val_401 = val_400 * A[idx_off + 399];
val_402 = val_398 + val_401;
val_404 = A[idx_off + 276] * val_275;
val_405 = val_404 * A[idx_off + 403];
val_407 = A[idx_off + 268] * val_221;
val_408 = val_407 * A[idx_off + 406];
val_409 = val_405 + val_408;
val_411 = A[idx_off + 276] * val_275;
val_412 = val_411 * A[idx_off + 410];
val_414 = A[idx_off + 268] * val_228;
val_415 = val_414 * A[idx_off + 413];
val_416 = val_412 + val_415;
val_418 = A[idx_off + 284] * val_235;
val_419 = val_418 * A[idx_off + 417];
val_421 = A[idx_off + 243] * val_175;
val_422 = val_421 * A[idx_off + 420];
val_424 = A[idx_off + 191] * val_139;
val_425 = val_424 * A[idx_off + 423];
val_427 = A[idx_off + 183] * val_13;
val_428 = val_427 * A[idx_off + 426];
val_429 = val_425 + val_428;
val_431 = val_306 * val_198;
val_432 = val_431 * A[idx_off + 430];
val_434 = A[idx_off + 333] * A[idx_off + 199];
val_435 = val_434 * A[idx_off + 433];
val_437 = A[idx_off + 200] * A[idx_off + 199];
val_438 = val_437 * A[idx_off + 436];
val_439 = val_435 + val_438;
val_441 = A[idx_off + 341] * val_313;
val_442 = val_441 * A[idx_off + 440];
val_444 = A[idx_off + 357] * val_327;
val_445 = val_444 * A[idx_off + 443];
val_447 = A[idx_off + 349] * val_320;
val_448 = val_447 * A[idx_off + 446];
val_449 = val_445 + val_448;
val_451 = A[idx_off + 276] * val_214;
val_452 = val_451 * A[idx_off + 450];
val_454 = A[idx_off + 268] * val_125;
val_455 = val_454 * A[idx_off + 453];
val_456 = val_452 + val_455;
val_458 = A[idx_off + 243] * val_175;
val_459 = val_458 * A[idx_off + 457];
val_461 = A[idx_off + 191] * val_139;
val_462 = val_461 * A[idx_off + 460];
val_464 = A[idx_off + 183] * val_132;
val_465 = val_464 * A[idx_off + 463];
val_466 = val_462 + val_465;
val_468 = A[idx_off + 357] * val_153;
val_469 = val_468 * A[idx_off + 467];
val_471 = A[idx_off + 349] * val_146;
val_472 = val_471 * A[idx_off + 470];
val_473 = val_469 + val_472;
val_475 = A[idx_off + 341] * A[idx_off + 329];
val_476 = val_475 * A[idx_off + 474];
val_478 = A[idx_off + 357] * val_167;
val_479 = val_478 * A[idx_off + 477];
val_481 = A[idx_off + 349] * val_160;
val_482 = val_481 * A[idx_off + 480];
val_483 = val_479 + val_482;
val_485 = A[idx_off + 333] * A[idx_off + 199];
val_486 = val_485 * A[idx_off + 484];
val_488 = A[idx_off + 200] * A[idx_off + 199];
val_489 = val_488 * A[idx_off + 487];
val_490 = val_486 + val_489;
val_492 = A[idx_off + 276] * val_214;
val_493 = val_492 * A[idx_off + 491];
val_495 = A[idx_off + 268] * val_20;
val_496 = val_495 * A[idx_off + 494];
val_497 = val_493 + val_496;
val_499 = A[idx_off + 243] * val_242;
val_500 = val_499 * A[idx_off + 498];
val_502 = val_257 * val_250;
val_503 = val_502 * A[idx_off + 501];
val_505 = A[idx_off + 276] * val_275;
val_506 = val_505 * A[idx_off + 504];
val_508 = A[idx_off + 268] * val_264;
val_509 = val_508 * A[idx_off + 507];
val_510 = val_506 + val_509;
val_512 = A[idx_off + 292] * val_291;
val_513 = val_512 * A[idx_off + 511];
val_515 = val_306 * val_299;
val_516 = val_515 * A[idx_off + 514];
val_518 = A[idx_off + 349] * val_348;
val_519 = val_518 * A[idx_off + 517];
val_521 = A[idx_off + 276] * val_214;
val_522 = val_521 * A[idx_off + 520];
val_524 = A[idx_off + 268] * val_27;
val_525 = val_524 * A[idx_off + 523];
val_526 = val_522 + val_525;
val_528 = A[idx_off + 243] * val_242;
val_529 = val_528 * A[idx_off + 527];
val_531 = val_257 * val_250;
val_532 = val_531 * A[idx_off + 530];
val_534 = A[idx_off + 276] * val_275;
val_535 = val_534 * A[idx_off + 533];
val_537 = A[idx_off + 268] * val_264;
val_538 = val_537 * A[idx_off + 536];
val_539 = val_535 + val_538;
val_541 = A[idx_off + 292] * val_291;
val_542 = val_541 * A[idx_off + 540];
val_544 = val_306 * val_299;
val_545 = val_544 * A[idx_off + 543];
val_547 = A[idx_off + 357] * val_356;
val_548 = val_547 * A[idx_off + 546];
val_550 = A[idx_off + 276] * val_214;
val_551 = val_550 * A[idx_off + 549];
val_553 = A[idx_off + 268] * val_207;
val_554 = val_553 * A[idx_off + 552];
val_555 = val_551 + val_554;
val_557 = A[idx_off + 276] * val_275;
val_558 = val_557 * A[idx_off + 556];
val_560 = A[idx_off + 268] * val_221;
val_561 = val_560 * A[idx_off + 559];
val_562 = val_558 + val_561;
val_564 = A[idx_off + 276] * val_275;
val_565 = val_564 * A[idx_off + 563];
val_567 = A[idx_off + 268] * val_228;
val_568 = val_567 * A[idx_off + 566];
val_569 = val_565 + val_568;
val_571 = A[idx_off + 284] * val_235;
val_572 = val_571 * A[idx_off + 570];
val_574 = A[idx_off + 243] * val_34;
val_575 = val_574 * A[idx_off + 573];
val_577 = A[idx_off + 191] * val_190;
val_578 = val_577 * A[idx_off + 576];
val_580 = A[idx_off + 183] * val_41;
val_581 = val_580 * A[idx_off + 579];
val_582 = val_578 + val_581;
val_584 = A[idx_off + 276] * val_275;
val_585 = val_584 * A[idx_off + 583];
val_587 = A[idx_off + 268] * val_264;
val_588 = val_587 * A[idx_off + 586];
val_589 = val_585 + val_588;
val_591 = A[idx_off + 292] * val_291;
val_592 = val_591 * A[idx_off + 590];
val_594 = val_306 * val_198;
val_595 = val_594 * A[idx_off + 593];
val_597 = A[idx_off + 357] * val_327;
val_598 = val_597 * A[idx_off + 596];
val_600 = A[idx_off + 349] * val_320;
val_601 = val_600 * A[idx_off + 599];
val_602 = val_598 + val_601;
val_604 = A[idx_off + 333] * A[idx_off + 199];
val_605 = val_604 * A[idx_off + 603];
val_607 = A[idx_off + 276] * val_214;
val_608 = val_607 * A[idx_off + 606];
val_610 = A[idx_off + 268] * val_76;
val_611 = val_610 * A[idx_off + 609];
val_612 = val_608 + val_611;
val_614 = A[idx_off + 276] * val_275;
val_615 = val_614 * A[idx_off + 613];
val_617 = A[idx_off + 268] * val_228;
val_618 = val_617 * A[idx_off + 616];
val_619 = val_615 + val_618;
val_621 = A[idx_off + 284] * val_235;
val_622 = val_621 * A[idx_off + 620];
val_624 = A[idx_off + 243] * val_48;
val_625 = val_624 * A[idx_off + 623];
val_627 = A[idx_off + 183] * val_55;
val_628 = val_627 * A[idx_off + 626];
val_630 = A[idx_off + 243] * val_62;
val_631 = val_630 * A[idx_off + 629];
val_633 = A[idx_off + 191] * val_69;
val_634 = val_633 * A[idx_off + 632];
val_636 = A[idx_off + 357] * val_327;
val_637 = val_636 * A[idx_off + 635];
val_639 = A[idx_off + 349] * val_320;
val_640 = val_639 * A[idx_off + 638];
val_641 = val_637 + val_640;
val_643 = A[idx_off + 276] * val_214;
val_644 = val_643 * A[idx_off + 642];
val_646 = A[idx_off + 268] * val_76;
val_647 = val_646 * A[idx_off + 645];
val_648 = val_644 + val_647;
val_650 = A[idx_off + 276] * val_275;
val_651 = val_650 * A[idx_off + 649];
val_653 = A[idx_off + 268] * val_228;
val_654 = val_653 * A[idx_off + 652];
val_655 = val_651 + val_654;
val_657 = A[idx_off + 284] * val_235;
val_658 = val_657 * A[idx_off + 656];
val_660 = A[idx_off + 243] * val_83;
val_661 = val_660 * A[idx_off + 659];
val_663 = val_90 * val_250;
val_664 = val_663 * A[idx_off + 662];
val_666 = A[idx_off + 276] * val_275;
val_667 = val_666 * A[idx_off + 665];
val_669 = A[idx_off + 268] * val_264;
val_670 = val_669 * A[idx_off + 668];
val_671 = val_667 + val_670;
val_673 = A[idx_off + 292] * val_291;
val_674 = val_673 * A[idx_off + 672];
val_676 = val_306 * val_299;
val_677 = val_676 * A[idx_off + 675];
val_679 = A[idx_off + 341] * val_313;
val_680 = val_679 * A[idx_off + 678];
val_682 = A[idx_off + 357] * val_327;
val_683 = val_682 * A[idx_off + 681];
val_685 = A[idx_off + 349] * val_97;
val_686 = val_685 * A[idx_off + 684];
val_687 = val_683 + val_686;
val_689 = A[idx_off + 276] * val_214;
val_690 = val_689 * A[idx_off + 688];
val_692 = A[idx_off + 268] * val_207;
val_693 = val_692 * A[idx_off + 691];
val_694 = val_690 + val_693;
val_696 = A[idx_off + 276] * val_275;
val_697 = val_696 * A[idx_off + 695];
val_699 = A[idx_off + 268] * val_221;
val_700 = val_699 * A[idx_off + 698];
val_701 = val_697 + val_700;
val_703 = A[idx_off + 276] * val_275;
val_704 = val_703 * A[idx_off + 702];
val_706 = A[idx_off + 268] * val_228;
val_707 = val_706 * A[idx_off + 705];
val_708 = val_704 + val_707;
val_710 = A[idx_off + 284] * val_235;
val_711 = val_710 * A[idx_off + 709];
val_713 = A[idx_off + 243] * val_175;
val_714 = val_713 * A[idx_off + 712];
val_716 = A[idx_off + 191] * val_190;
val_717 = val_716 * A[idx_off + 715];
val_719 = A[idx_off + 183] * val_104;
val_720 = val_719 * A[idx_off + 718];
val_721 = val_717 + val_720;
val_723 = A[idx_off + 276] * val_275;
val_724 = val_723 * A[idx_off + 722];
val_726 = A[idx_off + 268] * val_264;
val_727 = val_726 * A[idx_off + 725];
val_728 = val_724 + val_727;
val_730 = A[idx_off + 292] * val_291;
val_731 = val_730 * A[idx_off + 729];
val_733 = A[idx_off + 191] * val_118;
val_734 = val_733 * A[idx_off + 732];
val_736 = A[idx_off + 183] * val_111;
val_737 = val_736 * A[idx_off + 735];
val_738 = val_734 + val_737;
val_740 = A[idx_off + 357] * val_327;
val_741 = val_740 * A[idx_off + 739];
val_743 = A[idx_off + 349] * val_320;
val_744 = val_743 * A[idx_off + 742];
val_745 = val_741 + val_744;
val_747 = A[idx_off + 357] * val_356;
val_748 = val_747 * A[idx_off + 746];
val_750 = A[idx_off + 349] * val_348;
val_751 = val_750 * A[idx_off + 749];
val_752 = val_748 + val_751;
val_754 = A[idx_off + 200] * A[idx_off + 332];
val_755 = val_754 * A[idx_off + 753];
val_757 = A[idx_off + 341] * A[idx_off + 330];
val_758 = val_757 * A[idx_off + 756];
val_760 = A[idx_off + 276] * val_214;
val_761 = val_760 * A[idx_off + 759];
val_763 = A[idx_off + 268] * val_125;
val_764 = val_763 * A[idx_off + 762];
val_765 = val_761 + val_764;
val_767 = A[idx_off + 243] * val_175;
val_768 = val_767 * A[idx_off + 766];
val_770 = A[idx_off + 191] * val_139;
val_771 = val_770 * A[idx_off + 769];
val_773 = A[idx_off + 183] * val_132;
val_774 = val_773 * A[idx_off + 772];
val_775 = val_771 + val_774;
val_777 = A[idx_off + 357] * val_153;
val_778 = val_777 * A[idx_off + 776];
val_780 = A[idx_off + 349] * val_146;
val_781 = val_780 * A[idx_off + 779];
val_782 = val_778 + val_781;
val_784 = A[idx_off + 328] * A[idx_off + 168];
val_785 = val_784 * A[idx_off + 783];
val_787 = A[idx_off + 357] * val_167;
val_788 = val_787 * A[idx_off + 786];
val_790 = A[idx_off + 349] * val_160;
val_791 = val_790 * A[idx_off + 789];
val_792 = val_788 + val_791;
val_794 = A[idx_off + 359] * A[idx_off + 168];
val_795 = val_794 * A[idx_off + 793];
val_797 = A[idx_off + 276] * val_214;
val_798 = val_797 * A[idx_off + 796];
val_800 = A[idx_off + 268] * val_207;
val_801 = val_800 * A[idx_off + 799];
val_802 = val_798 + val_801;
val_804 = A[idx_off + 276] * val_275;
val_805 = val_804 * A[idx_off + 803];
val_807 = A[idx_off + 268] * val_221;
val_808 = val_807 * A[idx_off + 806];
val_809 = val_805 + val_808;
val_811 = A[idx_off + 276] * val_275;
val_812 = val_811 * A[idx_off + 810];
val_814 = A[idx_off + 268] * val_228;
val_815 = val_814 * A[idx_off + 813];
val_816 = val_812 + val_815;
val_818 = A[idx_off + 284] * val_235;
val_819 = val_818 * A[idx_off + 817];
val_821 = A[idx_off + 243] * val_175;
val_822 = val_821 * A[idx_off + 820];
val_824 = A[idx_off + 191] * val_190;
val_825 = val_824 * A[idx_off + 823];
val_827 = A[idx_off + 183] * val_182;
val_828 = val_827 * A[idx_off + 826];
val_829 = val_825 + val_828;
val_831 = A[idx_off + 276] * val_275;
val_832 = val_831 * A[idx_off + 830];
val_834 = A[idx_off + 268] * val_264;
val_835 = val_834 * A[idx_off + 833];
val_836 = val_832 + val_835;
val_838 = A[idx_off + 292] * val_291;
val_839 = val_838 * A[idx_off + 837];
val_841 = val_306 * val_198;
val_842 = val_841 * A[idx_off + 840];
val_844 = A[idx_off + 357] * val_327;
val_845 = val_844 * A[idx_off + 843];
val_847 = A[idx_off + 349] * val_320;
val_848 = val_847 * A[idx_off + 846];
val_849 = val_845 + val_848;
val_851 = A[idx_off + 200] * A[idx_off + 199];
val_852 = val_851 * A[idx_off + 850];
val_854 = A[idx_off + 276] * val_214;
val_855 = val_854 * A[idx_off + 853];
val_857 = A[idx_off + 268] * val_207;
val_858 = val_857 * A[idx_off + 856];
val_859 = val_855 + val_858;
val_861 = A[idx_off + 276] * val_275;
val_862 = val_861 * A[idx_off + 860];
val_864 = A[idx_off + 268] * val_221;
val_865 = val_864 * A[idx_off + 863];
val_866 = val_862 + val_865;
val_868 = A[idx_off + 276] * val_275;
val_869 = val_868 * A[idx_off + 867];
val_871 = A[idx_off + 268] * val_228;
val_872 = val_871 * A[idx_off + 870];
val_873 = val_869 + val_872;
val_875 = A[idx_off + 284] * val_235;
val_876 = val_875 * A[idx_off + 874];
val_878 = A[idx_off + 243] * val_242;
val_879 = val_878 * A[idx_off + 877];
val_881 = val_257 * val_250;
val_882 = val_881 * A[idx_off + 880];
val_884 = A[idx_off + 276] * val_275;
val_885 = val_884 * A[idx_off + 883];
val_887 = A[idx_off + 268] * val_264;
val_888 = val_887 * A[idx_off + 886];
val_889 = val_885 + val_888;
val_891 = A[idx_off + 267] * A[idx_off + 265];
val_892 = val_891 * A[idx_off + 890];
val_894 = A[idx_off + 267] * A[idx_off + 266];
val_895 = val_894 * A[idx_off + 893];
val_897 = A[idx_off + 276] * val_275;
val_898 = val_897 * A[idx_off + 896];
val_900 = A[idx_off + 268] * val_275;
val_901 = val_900 * A[idx_off + 899];
val_902 = val_898 + val_901;
val_904 = A[idx_off + 284] * val_283;
val_905 = val_904 * A[idx_off + 903];
val_907 = A[idx_off + 292] * val_291;
val_908 = val_907 * A[idx_off + 906];
val_910 = val_306 * val_299;
val_911 = val_910 * A[idx_off + 909];
val_913 = A[idx_off + 341] * val_313;
val_914 = val_913 * A[idx_off + 912];
val_916 = A[idx_off + 357] * val_327;
val_917 = val_916 * A[idx_off + 915];
val_919 = A[idx_off + 349] * val_320;
val_920 = val_919 * A[idx_off + 918];
val_921 = val_917 + val_920;
val_923 = A[idx_off + 328] * A[idx_off + 358];
val_924 = val_923 * A[idx_off + 922];
val_926 = A[idx_off + 331] * A[idx_off + 329];
val_927 = val_926 * A[idx_off + 925];
val_929 = A[idx_off + 331] * A[idx_off + 330];
val_930 = val_929 * A[idx_off + 928];
val_932 = A[idx_off + 333] * A[idx_off + 332];
val_933 = val_932 * A[idx_off + 931];
val_935 = A[idx_off + 341] * val_340;
val_936 = val_935 * A[idx_off + 934];
val_938 = A[idx_off + 357] * val_356;
val_939 = val_938 * A[idx_off + 937];
val_941 = A[idx_off + 349] * val_348;
val_942 = val_941 * A[idx_off + 940];
val_943 = val_939 + val_942;
val_945 = A[idx_off + 359] * A[idx_off + 358];
val_946 = val_945 * A[idx_off + 944];
val_948 = val_376 * val_373;
val_949 = val_948 * A[idx_off + 947];
val_951 = val_895 * val_866;
val_952 = val_951 * A[idx_off + 950];
val_954 = val_892 * val_366;
val_955 = val_954 * A[idx_off + 953];
val_956 = val_949 + val_952;
val_957 = val_956 + val_955;
val_959 = val_388 * val_385;
val_960 = val_959 * A[idx_off + 958];
val_962 = val_382 * val_379;
val_963 = val_962 * A[idx_off + 961];
val_964 = val_960 + val_963;
val_966 = val_927 * val_755;
val_967 = val_966 * A[idx_off + 965];
val_969 = val_924 * val_395;
val_970 = val_969 * A[idx_off + 968];
val_972 = val_419 * val_416;
val_973 = val_972 * A[idx_off + 971];
val_975 = val_895 * val_409;
val_976 = val_975 * A[idx_off + 974];
val_978 = val_892 * val_402;
val_979 = val_978 * A[idx_off + 977];
val_980 = val_973 + val_976;
val_981 = val_980 + val_979;
val_983 = val_429 * val_422;
val_984 = val_983 * A[idx_off + 982];
val_986 = val_432 * val_908;
val_987 = val_986 * A[idx_off + 985];
val_989 = val_442 * val_439;
val_990 = val_989 * A[idx_off + 988];
val_992 = val_924 * val_449;
val_993 = val_992 * A[idx_off + 991];
val_995 = val_936 * val_490;
val_996 = val_995 * A[idx_off + 994];
val_998 = val_876 * val_873;
val_999 = val_998 * A[idx_off + 997];
val_1001 = val_895 * val_866;
val_1002 = val_1001 * A[idx_off + 1000];
val_1004 = val_892 * val_456;
val_1005 = val_1004 * A[idx_off + 1003];
val_1006 = val_999 + val_1002;
val_1007 = val_1006 + val_1005;
val_1009 = val_466 * val_459;
val_1010 = val_1009 * A[idx_off + 1008];
val_1012 = val_476 * val_755;
val_1013 = val_1012 * A[idx_off + 1011];
val_1015 = val_927 * val_755;
val_1016 = val_1015 * A[idx_off + 1014];
val_1017 = val_1013 + val_1016;
val_1019 = val_785 * val_473;
val_1020 = val_1019 * A[idx_off + 1018];
val_1022 = val_476 * val_755;
val_1023 = val_1022 * A[idx_off + 1021];
val_1025 = val_927 * val_755;
val_1026 = val_1025 * A[idx_off + 1024];
val_1027 = val_1023 + val_1026;
val_1029 = val_795 * val_483;
val_1030 = val_1029 * A[idx_off + 1028];
val_1032 = val_882 * val_768;
val_1033 = val_1032 * A[idx_off + 1031];
val_1035 = val_914 * val_490;
val_1036 = val_1035 * A[idx_off + 1034];
val_1038 = val_930 * val_490;
val_1039 = val_1038 * A[idx_off + 1037];
val_1041 = val_927 * val_490;
val_1042 = val_1041 * A[idx_off + 1040];
val_1043 = val_1036 + val_1039;
val_1044 = val_1043 + val_1042;
val_1046 = val_936 * val_490;
val_1047 = val_1046 * A[idx_off + 1045];
val_1049 = val_930 * val_490;
val_1050 = val_1049 * A[idx_off + 1048];
val_1052 = val_927 * val_490;
val_1053 = val_1052 * A[idx_off + 1051];
val_1054 = val_1047 + val_1050;
val_1055 = val_1054 + val_1053;
val_1057 = val_876 * val_873;
val_1058 = val_1057 * A[idx_off + 1056];
val_1060 = val_895 * val_866;
val_1061 = val_1060 * A[idx_off + 1059];
val_1063 = val_892 * val_497;
val_1064 = val_1063 * A[idx_off + 1062];
val_1065 = val_1058 + val_1061;
val_1066 = val_1065 + val_1064;
val_1068 = val_503 * val_500;
val_1069 = val_1068 * A[idx_off + 1067];
val_1071 = val_905 * val_902;
val_1072 = val_1071 * A[idx_off + 1070];
val_1074 = val_895 * val_902;
val_1075 = val_1074 * A[idx_off + 1073];
val_1077 = val_892 * val_510;
val_1078 = val_1077 * A[idx_off + 1076];
val_1079 = val_1072 + val_1075;
val_1080 = val_1079 + val_1078;
val_1082 = val_516 * val_513;
val_1083 = val_1082 * A[idx_off + 1081];
val_1085 = val_936 * val_755;
val_1086 = val_1085 * A[idx_off + 1084];
val_1088 = val_930 * val_755;
val_1089 = val_1088 * A[idx_off + 1087];
val_1091 = val_927 * val_755;
val_1092 = val_1091 * A[idx_off + 1090];
val_1093 = val_1086 + val_1089;
val_1094 = val_1093 + val_1092;
val_1096 = val_946 * val_519;
val_1097 = val_1096 * A[idx_off + 1095];
val_1099 = val_876 * val_873;
val_1100 = val_1099 * A[idx_off + 1098];
val_1102 = val_895 * val_866;
val_1103 = val_1102 * A[idx_off + 1101];
val_1105 = val_892 * val_526;
val_1106 = val_1105 * A[idx_off + 1104];
val_1107 = val_1100 + val_1103;
val_1108 = val_1107 + val_1106;
val_1110 = val_532 * val_529;
val_1111 = val_1110 * A[idx_off + 1109];
val_1113 = val_905 * val_902;
val_1114 = val_1113 * A[idx_off + 1112];
val_1116 = val_895 * val_902;
val_1117 = val_1116 * A[idx_off + 1115];
val_1119 = val_892 * val_539;
val_1120 = val_1119 * A[idx_off + 1118];
val_1121 = val_1114 + val_1117;
val_1122 = val_1121 + val_1120;
val_1124 = val_545 * val_542;
val_1125 = val_1124 * A[idx_off + 1123];
val_1127 = val_936 * val_755;
val_1128 = val_1127 * A[idx_off + 1126];
val_1130 = val_930 * val_755;
val_1131 = val_1130 * A[idx_off + 1129];
val_1133 = val_927 * val_755;
val_1134 = val_1133 * A[idx_off + 1132];
val_1135 = val_1128 + val_1131;
val_1136 = val_1135 + val_1134;
val_1138 = val_946 * val_548;
val_1139 = val_1138 * A[idx_off + 1137];
val_1141 = val_572 * val_569;
val_1142 = val_1141 * A[idx_off + 1140];
val_1144 = val_895 * val_562;
val_1145 = val_1144 * A[idx_off + 1143];
val_1147 = val_892 * val_555;
val_1148 = val_1147 * A[idx_off + 1146];
val_1149 = val_1142 + val_1145;
val_1150 = val_1149 + val_1148;
val_1152 = val_582 * val_575;
val_1153 = val_1152 * A[idx_off + 1151];
val_1155 = val_905 * val_902;
val_1156 = val_1155 * A[idx_off + 1154];
val_1158 = val_895 * val_902;
val_1159 = val_1158 * A[idx_off + 1157];
val_1161 = val_892 * val_589;
val_1162 = val_1161 * A[idx_off + 1160];
val_1163 = val_1156 + val_1159;
val_1164 = val_1163 + val_1162;
val_1166 = val_595 * val_592;
val_1167 = val_1166 * A[idx_off + 1165];
val_1169 = val_930 * val_605;
val_1170 = val_1169 * A[idx_off + 1168];
val_1172 = val_927 * val_605;
val_1173 = val_1172 * A[idx_off + 1171];
val_1174 = val_1170 + val_1173;
val_1176 = val_924 * val_602;
val_1177 = val_1176 * A[idx_off + 1175];
val_1179 = val_930 * val_605;
val_1180 = val_1179 * A[idx_off + 1178];
val_1182 = val_927 * val_605;
val_1183 = val_1182 * A[idx_off + 1181];
val_1184 = val_1180 + val_1183;
val_1186 = val_622 * val_619;
val_1187 = val_1186 * A[idx_off + 1185];
val_1189 = val_895 * val_866;
val_1190 = val_1189 * A[idx_off + 1188];
val_1192 = val_892 * val_612;
val_1193 = val_1192 * A[idx_off + 1191];
val_1194 = val_1187 + val_1190;
val_1195 = val_1194 + val_1193;
val_1197 = val_634 * val_631;
val_1198 = val_1197 * A[idx_off + 1196];
val_1200 = val_628 * val_625;
val_1201 = val_1200 * A[idx_off + 1199];
val_1202 = val_1198 + val_1201;
val_1204 = val_930 * val_755;
val_1205 = val_1204 * A[idx_off + 1203];
val_1207 = val_924 * val_641;
val_1208 = val_1207 * A[idx_off + 1206];
val_1210 = val_658 * val_655;
val_1211 = val_1210 * A[idx_off + 1209];
val_1213 = val_895 * val_866;
val_1214 = val_1213 * A[idx_off + 1212];
val_1216 = val_892 * val_648;
val_1217 = val_1216 * A[idx_off + 1215];
val_1218 = val_1211 + val_1214;
val_1219 = val_1218 + val_1217;
val_1221 = val_664 * val_661;
val_1222 = val_1221 * A[idx_off + 1220];
val_1224 = val_905 * val_902;
val_1225 = val_1224 * A[idx_off + 1223];
val_1227 = val_895 * val_902;
val_1228 = val_1227 * A[idx_off + 1226];
val_1230 = val_892 * val_671;
val_1231 = val_1230 * A[idx_off + 1229];
val_1232 = val_1225 + val_1228;
val_1233 = val_1232 + val_1231;
val_1235 = val_677 * val_674;
val_1236 = val_1235 * A[idx_off + 1234];
val_1238 = val_680 * val_755;
val_1239 = val_1238 * A[idx_off + 1237];
val_1241 = val_924 * val_687;
val_1242 = val_1241 * A[idx_off + 1240];
val_1244 = val_711 * val_708;
val_1245 = val_1244 * A[idx_off + 1243];
val_1247 = val_895 * val_701;
val_1248 = val_1247 * A[idx_off + 1246];
val_1250 = val_892 * val_694;
val_1251 = val_1250 * A[idx_off + 1249];
val_1252 = val_1245 + val_1248;
val_1253 = val_1252 + val_1251;
val_1255 = val_721 * val_714;
val_1256 = val_1255 * A[idx_off + 1254];
val_1258 = val_905 * val_902;
val_1259 = val_1258 * A[idx_off + 1257];
val_1261 = val_895 * val_902;
val_1262 = val_1261 * A[idx_off + 1260];
val_1264 = val_892 * val_728;
val_1265 = val_1264 * A[idx_off + 1263];
val_1266 = val_1259 + val_1262;
val_1267 = val_1266 + val_1265;
val_1269 = val_738 * val_731;
val_1270 = val_1269 * A[idx_off + 1268];
val_1272 = val_924 * val_745;
val_1273 = val_1272 * A[idx_off + 1271];
val_1275 = val_927 * val_852;
val_1276 = val_1275 * A[idx_off + 1274];
val_1278 = val_946 * val_752;
val_1279 = val_1278 * A[idx_off + 1277];
val_1281 = val_758 * val_755;
val_1282 = val_1281 * A[idx_off + 1280];
val_1284 = val_930 * val_755;
val_1285 = val_1284 * A[idx_off + 1283];
val_1286 = val_1282 + val_1285;
val_1288 = val_876 * val_873;
val_1289 = val_1288 * A[idx_off + 1287];
val_1291 = val_895 * val_866;
val_1292 = val_1291 * A[idx_off + 1290];
val_1294 = val_892 * val_765;
val_1295 = val_1294 * A[idx_off + 1293];
val_1296 = val_1289 + val_1292;
val_1297 = val_1296 + val_1295;
val_1299 = val_775 * val_768;
val_1300 = val_1299 * A[idx_off + 1298];
val_1302 = val_785 * val_782;
val_1303 = val_1302 * A[idx_off + 1301];
val_1305 = val_795 * val_792;
val_1306 = val_1305 * A[idx_off + 1304];
val_1308 = val_819 * val_816;
val_1309 = val_1308 * A[idx_off + 1307];
val_1311 = val_895 * val_809;
val_1312 = val_1311 * A[idx_off + 1310];
val_1314 = val_892 * val_802;
val_1315 = val_1314 * A[idx_off + 1313];
val_1316 = val_1309 + val_1312;
val_1317 = val_1316 + val_1315;
val_1319 = val_829 * val_822;
val_1320 = val_1319 * A[idx_off + 1318];
val_1322 = val_905 * val_902;
val_1323 = val_1322 * A[idx_off + 1321];
val_1325 = val_895 * val_902;
val_1326 = val_1325 * A[idx_off + 1324];
val_1328 = val_892 * val_836;
val_1329 = val_1328 * A[idx_off + 1327];
val_1330 = val_1323 + val_1326;
val_1331 = val_1330 + val_1329;
val_1333 = val_842 * val_839;
val_1334 = val_1333 * A[idx_off + 1332];
val_1336 = val_924 * val_849;
val_1337 = val_1336 * A[idx_off + 1335];
val_1339 = val_930 * val_852;
val_1340 = val_1339 * A[idx_off + 1338];
val_1342 = val_876 * val_873;
val_1343 = val_1342 * A[idx_off + 1341];
val_1345 = val_895 * val_866;
val_1346 = val_1345 * A[idx_off + 1344];
val_1348 = val_892 * val_859;
val_1349 = val_1348 * A[idx_off + 1347];
val_1350 = val_1343 + val_1346;
val_1351 = val_1350 + val_1349;
val_1353 = val_882 * val_879;
val_1354 = val_1353 * A[idx_off + 1352];
val_1356 = val_905 * val_902;
val_1357 = val_1356 * A[idx_off + 1355];
val_1359 = val_895 * val_902;
val_1360 = val_1359 * A[idx_off + 1358];
val_1362 = val_892 * val_889;
val_1363 = val_1362 * A[idx_off + 1361];
val_1364 = val_1357 + val_1360;
val_1365 = val_1364 + val_1363;
val_1367 = val_911 * val_908;
val_1368 = val_1367 * A[idx_off + 1366];
val_1370 = val_914 * val_933;
val_1371 = val_1370 * A[idx_off + 1369];
val_1373 = val_930 * val_933;
val_1374 = val_1373 * A[idx_off + 1372];
val_1376 = val_927 * val_933;
val_1377 = val_1376 * A[idx_off + 1375];
val_1378 = val_1371 + val_1374;
val_1379 = val_1378 + val_1377;
val_1381 = val_924 * val_921;
val_1382 = val_1381 * A[idx_off + 1380];
val_1384 = val_936 * val_933;
val_1385 = val_1384 * A[idx_off + 1383];
val_1387 = val_930 * val_933;
val_1388 = val_1387 * A[idx_off + 1386];
val_1390 = val_927 * val_933;
val_1391 = val_1390 * A[idx_off + 1389];
val_1392 = val_1385 + val_1388;
val_1393 = val_1392 + val_1391;
val_1395 = val_946 * val_943;
val_1396 = val_1395 * A[idx_off + 1394];
val_1398 = val_1368 * val_1365;
val_1399 = val_1398 * A[idx_off + 1397];
val_1401 = val_964 * val_957;
val_1402 = val_1401 * A[idx_off + 1400];
val_1403 = val_1399 + val_1402;
val_1405 = val_970 * val_967;
val_1406 = val_1405 * A[idx_off + 1404];
val_1408 = val_987 * val_1365;
val_1409 = val_1408 * A[idx_off + 1407];
val_1411 = val_984 * val_981;
val_1412 = val_1411 * A[idx_off + 1410];
val_1413 = val_1409 + val_1412;
val_1415 = val_1396 * val_996;
val_1416 = val_1415 * A[idx_off + 1414];
val_1418 = val_993 * val_990;
val_1419 = val_1418 * A[idx_off + 1417];
val_1420 = val_1416 + val_1419;
val_1422 = val_1368 * val_1365;
val_1423 = val_1422 * A[idx_off + 1421];
val_1425 = val_1010 * val_1007;
val_1426 = val_1425 * A[idx_off + 1424];
val_1427 = val_1423 + val_1426;
val_1429 = val_1030 * val_1027;
val_1430 = val_1429 * A[idx_off + 1428];
val_1432 = val_1020 * val_1017;
val_1433 = val_1432 * A[idx_off + 1431];
val_1434 = val_1430 + val_1433;
val_1436 = val_1368 * val_1365;
val_1437 = val_1436 * A[idx_off + 1435];
val_1439 = val_1033 * val_1297;
val_1440 = val_1439 * A[idx_off + 1438];
val_1441 = val_1437 + val_1440;
val_1443 = val_1306 * val_1055;
val_1444 = val_1443 * A[idx_off + 1442];
val_1446 = val_1303 * val_1044;
val_1447 = val_1446 * A[idx_off + 1445];
val_1448 = val_1444 + val_1447;
val_1450 = val_1083 * val_1080;
val_1451 = val_1450 * A[idx_off + 1449];
val_1453 = val_1069 * val_1066;
val_1454 = val_1453 * A[idx_off + 1452];
val_1455 = val_1451 + val_1454;
val_1457 = val_1097 * val_1094;
val_1458 = val_1457 * A[idx_off + 1456];
val_1460 = val_1125 * val_1122;
val_1461 = val_1460 * A[idx_off + 1459];
val_1463 = val_1111 * val_1108;
val_1464 = val_1463 * A[idx_off + 1462];
val_1465 = val_1461 + val_1464;
val_1467 = val_1139 * val_1136;
val_1468 = val_1467 * A[idx_off + 1466];
val_1470 = val_1167 * val_1164;
val_1471 = val_1470 * A[idx_off + 1469];
val_1473 = val_1153 * val_1150;
val_1474 = val_1473 * A[idx_off + 1472];
val_1475 = val_1471 + val_1474;
val_1477 = val_1396 * val_1184;
val_1478 = val_1477 * A[idx_off + 1476];
val_1480 = val_1177 * val_1174;
val_1481 = val_1480 * A[idx_off + 1479];
val_1482 = val_1478 + val_1481;
val_1484 = val_1368 * val_1365;
val_1485 = val_1484 * A[idx_off + 1483];
val_1487 = val_1202 * val_1195;
val_1488 = val_1487 * A[idx_off + 1486];
val_1489 = val_1485 + val_1488;
val_1491 = val_1208 * val_1205;
val_1492 = val_1491 * A[idx_off + 1490];
val_1494 = val_1236 * val_1233;
val_1495 = val_1494 * A[idx_off + 1493];
val_1497 = val_1222 * val_1219;
val_1498 = val_1497 * A[idx_off + 1496];
val_1499 = val_1495 + val_1498;
val_1501 = val_1242 * val_1239;
val_1502 = val_1501 * A[idx_off + 1500];
val_1504 = val_1270 * val_1267;
val_1505 = val_1504 * A[idx_off + 1503];
val_1507 = val_1256 * val_1253;
val_1508 = val_1507 * A[idx_off + 1506];
val_1509 = val_1505 + val_1508;
val_1511 = val_1279 * val_1276;
val_1512 = val_1511 * A[idx_off + 1510];
val_1514 = val_1273 * val_1276;
val_1515 = val_1514 * A[idx_off + 1513];
val_1516 = val_1512 + val_1515;
val_1518 = val_1306 * val_1286;
val_1519 = val_1518 * A[idx_off + 1517];
val_1521 = val_1303 * val_1286;
val_1522 = val_1521 * A[idx_off + 1520];
val_1523 = val_1519 + val_1522;
val_1525 = val_1368 * val_1365;
val_1526 = val_1525 * A[idx_off + 1524];
val_1528 = val_1300 * val_1297;
val_1529 = val_1528 * A[idx_off + 1527];
val_1530 = val_1526 + val_1529;
val_1532 = val_1306 * val_1393;
val_1533 = val_1532 * A[idx_off + 1531];
val_1535 = val_1303 * val_1379;
val_1536 = val_1535 * A[idx_off + 1534];
val_1537 = val_1533 + val_1536;
val_1539 = val_1334 * val_1331;
val_1540 = val_1539 * A[idx_off + 1538];
val_1542 = val_1320 * val_1317;
val_1543 = val_1542 * A[idx_off + 1541];
val_1544 = val_1540 + val_1543;
val_1546 = val_1396 * val_1340;
val_1547 = val_1546 * A[idx_off + 1545];
val_1549 = val_1337 * val_1340;
val_1550 = val_1549 * A[idx_off + 1548];
val_1551 = val_1547 + val_1550;
val_1553 = val_1368 * val_1365;
val_1554 = val_1553 * A[idx_off + 1552];
val_1556 = val_1354 * val_1351;
val_1557 = val_1556 * A[idx_off + 1555];
val_1558 = val_1554 + val_1557;
val_1560 = val_1396 * val_1393;
val_1561 = val_1560 * A[idx_off + 1559];
val_1563 = val_1382 * val_1379;
val_1564 = val_1563 * A[idx_off + 1562];
val_1565 = val_1561 + val_1564;
val_1567 = val_1523 * val_1530;
val_1568 = val_1567 * A[idx_off + 1566];
val_1570 = val_1565 * val_1558;
val_1571 = val_1570 * A[idx_off + 1569];
val_1573 = val_1502 * val_1499;
val_1574 = val_1573 * A[idx_off + 1572];
val_1576 = val_1468 * val_1465;
val_1577 = val_1576 * A[idx_off + 1575];
val_1579 = val_1482 * val_1475;
val_1580 = val_1579 * A[idx_off + 1578];
val_1582 = val_1406 * val_1403;
val_1583 = val_1582 * A[idx_off + 1581];
val_1585 = val_1420 * val_1413;
val_1586 = val_1585 * A[idx_off + 1584];
val_1588 = val_1448 * val_1441;
val_1589 = val_1588 * A[idx_off + 1587];
val_1591 = val_1492 * val_1489;
val_1592 = val_1591 * A[idx_off + 1590];
val_1594 = val_1434 * val_1427;
val_1595 = val_1594 * A[idx_off + 1593];
val_1597 = val_1458 * val_1455;
val_1598 = val_1597 * A[idx_off + 1596];
val_1600 = val_1516 * val_1509;
val_1601 = val_1600 * A[idx_off + 1599];
val_1603 = val_1551 * val_1544;
val_1604 = val_1603 * A[idx_off + 1602];
val_1606 = val_1537 * val_1530;
val_1607 = val_1606 * A[idx_off + 1605];
val_1608 = val_1568 + val_1571;
val_1609 = val_1574 + val_1577;
val_1610 = val_1580 + val_1583;
val_1611 = val_1586 + val_1589;
val_1612 = val_1592 + val_1595;
val_1613 = val_1598 + val_1601;
val_1614 = val_1604 + val_1607;
val_1615 = val_1608 + val_1609;
val_1616 = val_1610 + val_1611;
val_1617 = val_1612 + val_1613;
val_1618 = val_1615 + val_1616;
val_1619 = val_1617 + val_1614;
val_1620 = val_1618 + val_1619;
val_1625 = A[idx_off + 1621] * A[idx_off + 1622];
val_1626 = A[idx_off + 1623] * A[idx_off + 1624];
val_1627 = val_1625 + val_1626;
val_1629 = val_1627 * val_1620;
val_1630 = val_1629 * A[idx_off + 1628];
A[idx_off] += val_1630;
}
A[i*n_inputs]= val_1630;
}
int
main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
//#define N_INPUTS 592
#define N_INPUTS 512
#define N_ARITH 1039
const int n_inputs= N_INPUTS;
const int n_arith= N_ARITH;
const int batch_size= 1024;
const int iter= 1;
const int thresh= n_arith/3;
//size_t size= batch_size * (n_inputs) * (iter) * sizeof(float);
size_t size= batch_size * (n_inputs) * sizeof(float);
size_t size_idx= n_arith * sizeof(int);
float *h_A= (float *)malloc(size);
int *h_B= (int *)malloc(size_idx);
int *h_C= (int *)malloc(size_idx);
int *h_op_sel= (int *) malloc(size_idx);
// Initialize the host input vectors
for (int i = 0; i < n_arith; ++i)
{
if (i < thresh) {
h_B[i] = rand() % (n_inputs);
h_C[i] = rand() % (n_inputs);
}
else{
h_B[i] = rand() % (i);
h_C[i] = rand() % (i);
}
h_op_sel[i]= rand() % 2;
}
for (int i= 0; i < n_inputs; ++i) {
for (int b =0; b< batch_size; ++b) {
//h_A[b* n_inputs + i]= float(rand());
h_A[b* n_inputs + i]= 1;
}
}
// Allocate the device input vector A
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_C = NULL;
err = cudaMalloc((void **)&d_C, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_op_sel = NULL;
err = cudaMalloc((void **)&d_op_sel, size_idx);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_C, h_C, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_op_sel, h_op_sel, size_idx, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 32;
int blocksPerGrid= (batch_size + threadsPerBlock -1)/ threadsPerBlock;
struct timeval t1, t2;
// Perform Warmup
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t1, 0);
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
ac<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, d_op_sel, n_inputs, n_arith, thresh, iter);
// FInish execution of kernel
cudaDeviceSynchronize();
gettimeofday(&t2, 0);
double time = (1000000.0*(t2.tv_sec-t1.tv_sec) + t2.tv_usec-t1.tv_usec)/1000.0;
printf("Time of kernel: %3.4f ms \n", time);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Throughput: %.3f Gops/sec, Batch: %d, nIter: %d, n_arith: %d\n", (((1.0*batch_size*iter*n_arith))/time)/1E6, batch_size, iter, n_arith);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//for (int i=0; i<numElements; i++) {
for (int i=0; i<32; i++) {
printf("%d : %f,", i, h_A[i*n_inputs]);
}
err = cudaFree(d_A);
err = cudaFree(d_B);
err = cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
printf("Done!\n");
return 0;
}
|
6,869 |
// Este codigo calcula (u[i]-u)/u_d_m en el dispositivo GPU
#include <stdio.h>
__global__ void operacionKernelGPU(float* u, float* lu, float u_m, float u_d, int n)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx < n) lu[idx] = (u[idx]-u_m)/u_d;
}
int main(int argc, char**argv)
{
unsigned int n;
if(argc == 1) {
n = 25;
} else if(argc == 2) {
n = atoi(argv[1]);
} else {
printf("\n Parametros no validos!"
"\n Uso: ./derivCPU # Vector of longitud 10,000"
"\n Uso: ./derivCPU <m> # Vector of longitud m"
"\n");
exit(0);
}
// u_h, lu_h
const int u_m = 0;
const int u_d = 255;
int size = n*sizeof(float);
float* h_u = (float*) malloc( size );
for (unsigned int i=0; i < n; i++) { h_u[i] = i; }
float* h_lu = (float*) malloc( size );
// declaracion de vectores/arreglos a usarse en el GPU
float* d_u;
cudaMalloc((void**)&d_u, size);
float* d_lu;
cudaMalloc((void**)&d_lu, size);
// cudaDeviceSynchronize();
cudaMemcpy( d_u, h_u, size, cudaMemcpyHostToDevice );
operacionKernelGPU<<<ceil(n/256.0),256>>>(d_u, d_lu, u_m, u_d, n);
cudaMemcpy( h_lu, d_lu, size, cudaMemcpyDeviceToHost );
const float toleranciaRelativa = 1e-4;
// verificar
for(int i=0; i<n; i++)
{
float operBasic = (h_u[i]-u_m)/u_d;
float errorRelativo = (operBasic-h_lu[i])/operBasic;
if (errorRelativo > toleranciaRelativa
|| errorRelativo < -toleranciaRelativa) {
printf("PRUEBA FALLIDA\n\n");
exit(0);
}
}
printf("PRUEBA SUPERADA\n\n");
if (n==25)
{
for(int i=0; i<n; i++)
{
printf("%10.8f\t",h_lu[i]);
}
printf("\n");
}
free(h_u);
free(h_lu);
cudaFree(d_u);
cudaFree(d_lu);
} |
6,870 | #include "includes.h"
__device__ int DeviceDefaultStep() {
return gridDim.x * blockDim.x;
}
__device__ int DeviceDefaultIndex() {
return blockIdx.x * blockDim.x + threadIdx.x;
}
__global__ void KernelSelfPlusIters(const bool *indexers, int *iters, int count) {
int index = DeviceDefaultIndex();
int step = DeviceDefaultStep();
for (int i = index; i < count; i += step) {
if (indexers[i]) {
++iters[i];
}
}
} |
6,871 | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#include <cstdlib>
#include <curand_kernel.h>
#include <bits/stdc++.h>
using namespace std;
const int numberNodes = 439;
struct Agent{
int size;
float fitness;
int genome[numberNodes];
};
/* Arrange the N elements of ARRAY in random order.
Only effective if N is much smaller than RAND_MAX;
if this may not be the case, use a better random
number generator. */
void shuffle(int *array, size_t n)
{
if (n > 1)
{
size_t i;
for (i = 0; i < n - 1; i++)
{
size_t j = i + rand() / (RAND_MAX / (n - i) + 1);
int t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
Agent RandomAgent(int size){
int g[size];
for (int i = 0; i < size ; i++){
g[i] = i;
}
shuffle(g,size);
struct Agent RAgent{};
RAgent.size = size;
RAgent.fitness = 0.0;
for (int i = 0; i < size ; i++){
RAgent.genome[i] = g[i];
}
return RAgent;
}
__device__ Agent NewAgent (const int genome[]){
struct Agent nw{};
nw.size = numberNodes;
nw.fitness = 0.0;
for (int i = 0; i < numberNodes ; i++){
nw.genome[i] = genome[i];
}
return nw;
}
__device__ void Mutate(struct Agent *agent, curandState state) {
unsigned int p1 = curand(&state) % numberNodes;
while (true) {
unsigned int p2 = curand(&state) % numberNodes;
if (p1 != p2) {
int temp = agent->genome[p1];
agent->genome[p1] = agent->genome[p2];
agent->genome[p2] = temp;
break;
}
}
}
__device__ void CrossPermutation(int a[], int b[], curandState state){
unsigned int crossPoint = curand(&state)%(numberNodes-2)+1;
unsigned int tempSize = crossPoint+1;
int tempA[numberNodes];
int tempB[numberNodes];
for(int i = 0;i<numberNodes;i++){
tempA[i]=0;
tempB[i]=0;
}
for(int i = tempSize;i<numberNodes;i++){
tempA[a[i]]=1;
tempB[b[i]]=1;
}
unsigned int idenA = tempSize;
unsigned int idenB = tempSize;
for (int i = 0;i < numberNodes ; i++) {
if ( tempA[b[i]] ==1 ) {
a[idenA] = b[i];
idenA++;
}
}
for (int i = 0;i < numberNodes ; i++) {
if ( tempB[a[i]] ==1 ) {
b[idenB] = a[i];
idenB++;
}
}
};
__device__ void FitnessFunction(struct Agent *agent, const float *distance){
float fitness = 0;
for (int i = 0; i < numberNodes ; i++){
if (i < numberNodes - 1 ){
fitness += distance[agent->genome[i]+agent->genome[i+1]*numberNodes];
}else{
fitness += distance[agent->genome[i]+agent->genome[0]*numberNodes];
}
}
agent->fitness = fitness;
}
__device__ Agent GetBest(struct Agent a1,struct Agent a2,struct Agent a3){
if(a1.fitness < a2.fitness && a1.fitness < a3.fitness){
return a1;
}else if (a2.fitness < a3.fitness){
return a2;
}else{
return a3;
}
}
void PrintAgent(struct Agent agent){
cout<<agent.fitness<< " ";
for (int i : agent.genome){
cout<< i << " ";
}
cout << endl;
}
__global__ void EvaluateGen(float *DDistance, struct Agent *DIPopulation, struct Agent *DFPopulation, int popSize, float rate) {
unsigned int tId = threadIdx.x + (blockIdx.x * blockDim.x);
curandState state;
curand_init((unsigned long long)clock() , tId, 0, &state);
if(tId < popSize){
FitnessFunction(&DIPopulation[tId], DDistance);
if(curand_uniform(&state) < rate){
unsigned int pair = curand(&state)%popSize;
int n1[numberNodes];
int n2[numberNodes];
memcpy(n1,DIPopulation[pair].genome,sizeof(int)*numberNodes);
memcpy(n2,DIPopulation[tId].genome,sizeof(int)*numberNodes);
CrossPermutation(n1,n2,state);
struct Agent a1 = NewAgent(n1);
struct Agent a2 = NewAgent(n2);
Mutate(&a1, state);
Mutate(&a2, state);
FitnessFunction(&a1,DDistance);
FitnessFunction(&a2,DDistance);
DFPopulation[tId] = GetBest(a1,a2,DIPopulation[tId]);
}else{
DFPopulation[tId] = DIPopulation[tId];
}
}
}
void CondensedResult(float current[], float *results, float mean, int popSize, int generation){
float median = 0, best = 0, worst = 0, stDeviation = 0;
sort(current,current +popSize);
best = current[0];
worst = current[popSize-1];
if (popSize % 2 == 0){
median = (current[int(popSize/2)]+current[int((popSize/2)+1)])/2;
} else{
median = current[int((popSize/2)+1)];
}
for(int i = 0;i<popSize;i++){
stDeviation += pow(current[i]-mean,2);
}
stDeviation /= popSize;
stDeviation = sqrt(stDeviation);
results[generation*5] = best;
results[generation*5+1] = worst;
results[generation*5+2] = mean;
results[generation*5+3] = median;
results[generation*5+4] = stDeviation;
}
extern "C" {
void evaluateGen(float *distance, float *results, int popSize, int generations, float rate) {
srand(time(nullptr));
struct Agent population[popSize];
for (int i = 0; i< popSize;i++){
population[i] = RandomAgent(numberNodes);
}
unsigned long DDistanceSize = (numberNodes*numberNodes)*sizeof(float);
float* DDistance;
cudaMalloc((void**)&DDistance,DDistanceSize);
cudaMemcpy(DDistance,distance,DDistanceSize,cudaMemcpyHostToDevice);
unsigned long DPopulationSize = (sizeof(int)+sizeof(float)+(sizeof(int)*numberNodes))*popSize;
struct Agent* DIPopulation;
cudaMalloc((void**)&DIPopulation,DPopulationSize);
struct Agent* DFPopulation;
cudaMalloc((void**)&DFPopulation,DPopulationSize);
cudaMemcpy(DIPopulation,population,DPopulationSize,cudaMemcpyHostToDevice);
for (int i = 0; i < generations;i++){
float popResults[popSize];
float mean = 0.0;
if(i%2==0){
EvaluateGen<<<128,int(popSize/128)+1>>>(DDistance, DIPopulation, DFPopulation, popSize, rate);
cudaMemcpy(population,DFPopulation,DPopulationSize,cudaMemcpyDeviceToHost);
}else{
EvaluateGen<<<128,int(popSize/128)+1>>>(DDistance, DFPopulation, DIPopulation, popSize, rate);
cudaMemcpy(population,DIPopulation,DPopulationSize,cudaMemcpyDeviceToHost);
}
for (int j = 0; j< popSize;j++){
mean += population[j].fitness;
popResults[j] = population[j].fitness;
// PrintAgent(population[j]);
}
mean /= popSize;
CondensedResult(popResults,results,mean, popSize,i);
}
cudaFree(DDistance);
cudaFree(DIPopulation);
cudaFree(DFPopulation);
}
}
|
6,872 | #include "includes.h"
__global__ void _roll_array( const float* array, const long* step, float* new_array, const int b, const int n, const int d ) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index >= b * n * d)
return;
const int c_b = index / (n * d);
const int c_n = (index - c_b * n * d) / d;
const int c_d = index % d;
const float c_array_element = array[c_b * n * d + c_n * d + c_d];
float* c_new_array = &new_array[c_b * n * d];
int c_step = int(step[c_b]);
int new_n = ((c_n + c_step) % n + n) % n;
int position = new_n * d + c_d;
c_new_array[position] = c_array_element;
} |
6,873 | //
// Example from:
// https://devblogs.nvidia.com/even-easier-introduction-cuda/
//
// Compiles as follows:
// nvcc add_v01.cu -o add_v1_cuda
//
// nvcc is set as follows:
// export PATH=/usr/local/cuda-10.0/bin:$PATH
#include <iostream>
#include <math.h>
// NOTE (JVY): This kernel is only correct for a single thread,
// since every thread that runs it will perform the add
// on the whole array
// Moreover, there is a race condition since multiple
// parallel threads would both read and write the same
// locations.
// CUDA *kernel* function to add the elements of two arrays
//
// The __global__ specifier tells the CUDA C++ compiler that
// this is a function that runs on the GPU and can be called
// from CPU code
//
// These __global__ functions are known as *kernels*, and code
// that runs on the GPU is often called *device code*, while code
// that runs on the CPU is *host code*
//
__global__
void add(int n, float* x, float* y)
{
for (int i = 0; i < n; i++) // i++)
{
y[i] = x[i] + y[i];
}
}
int main(void)
{
// left shift by 20 bits
//
int N = 1<<20; // (more than) 1M elements
// memory allocation in C++/CPU) but we'll replace it
// with allocation in CUDA - see below !
//
// float* x = new float[N];
// float* y = new float[N];
//
// alocate memory in CUDA
// it's ** Unified Memory ** accessible from CPU and/or GPU
//
// NOTE: (as seen in many other examples) one can use
// cudaMalloc but that will allocate memory on the ** GPU **
// e.g.
// int* dev_x, dev_y;
// cudaMalloc( &dev_x, N*sizeof(int) );
// cudaMalloc( &dev_y, N*sizeof(int) );
// but then one should also copy arrays x and y to the GPU:
// cudaMemcpy( dev_x, x, N*sizeof(int), cudaMemoryHostToDevice );
// cudaMemcpy( dev_x, x, N*sizeof(int), cudaMemoryHostToDevice );
// and at the end (after running add(...)) one should copy
// the new content of y back to CPU:
// cudaMemcpy( x, dev_x, N*sizeof(int), cudaMemoryDeviceToHost );
//
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host N elements each
//
for (int i = 0; i < N; i++)
{
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
//
// Q (JVY): why do they call it "kernel" ???
// A : OK, they call it "kernel" because that's
// what it is in CUDA (__global__)
//
// This is how to "launch" the add(...) function (kernel) on CPU
//
// add(N, x, y);
//
// Launch the add(...) kernel on GPU
// the <<< >>> syntax is CUDA kernel launch(es)
// (apparently) this will launch on *one* thread
// with 1 "block per grid" (don't know yet what it means)
//
add <<< 1, 1 >>>(N, x, y);
// Last but not least !!!
// Tell the *CPU* to wait until the kernel is done
// before accessing results (because CUDA kernel lauch
// doesn't block calling CPU thread)
//
cudaDeviceSynchronize();
// Check for errors (all values should be 3.0f)
//
float maxError = 0.0f;
for (int i = 0; i < N; i++)
{
maxError = fmax(maxError, fabs(y[i]-3.0));
}
std::cout << "Max error: " << maxError << std::endl;
// free memory (C++/CPU)
//
// delete [] x;
// delete [] y;
//
// free memory in CUDA
//
cudaFree(x);
cudaFree(y);
return 0;
}
|
6,874 | /*
* parallelise for dot product and dw calculation.
*/
#define length_of_features 12
#define examples 455
__global__ void sgd(float *x, float* y, float* weights,
float reg_strength,
float learning_rate,
int total_examples)
{
int tid = blockIdx.x*blockDim.x + threadIdx.x;
int tx = threadIdx.x;
float val=0;
float distance;
int idx, itr;
int data_point;
__shared__ float dw[length_of_features];
__shared__ float weights_shared[length_of_features];
__shared__ float rand_index[examples];
float dot_XW_single = 0;
__shared__ float dot_XW;
if (tid < length_of_features) {
/* loading weights to shared memory*/
weights_shared[tx] = weights[tid];
// if block_size = 16, feature_len = 32,
__syncthreads();
for(itr =0; itr < total_examples; itr++) {
data_point = itr;
/* x[data_point] is a vector
* each tid is computing one feature
* dot_XW_single = np.dot(X, W)
*/
idx = data_point * length_of_features + tid;
dot_XW_single = x[idx] * weights_shared[tx];
atomicAdd(&dot_XW, dot_XW_single);
distance = 1 - (y[data_point] * dot_XW);
if (distance > 0) {
dw[tid] = weights_shared[tx]
- (reg_strength * y[data_point] * x[idx]);
} else
dw[tid] = weights_shared[tx];
val = learning_rate * dw[tx];
weights_shared[tx] = weights_shared[tx] - val;
__syncthreads();
}//End--of--Data-Point
__syncthreads();
weights[tid] = weights_shared[tx];
}//End--of--threadId-bound
}//End--of--global
|
6,875 | #include <cstdio>
static const int NUM_THREADS = 128;
static const int MAX_BLOCKS = 32768;
__global__ void gpmrIntIntSorterComputeA(const int blocksSoFar, const int * const inputKeys, int * const uniqueFlags, const int numKeys)
{
const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x;
if (index == 0) uniqueFlags[index] = 1;
else if (inputKeys[index] != inputKeys[index - 1]) uniqueFlags[index] = 1;
else uniqueFlags[index] = 0;
}
__global__ void gpmrIntIntSorterComputeC(const int * const gpuA, const int * const gpuB, int * const gpuC, const int numKeys)
{
__shared__ int b0;
if (threadIdx.x == 0) b0 = gpuB[0];
__syncthreads();
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < numKeys; index += gridDim.x * blockDim.x)
{
if (gpuA[index]) gpuC[b0 - gpuB[index]] = index;
}
}
__global__ void gpmrIntIntSorterComputeD(const int blocksSoFar, const int * const gpuC, int * const gpuD, const int numUniqueKeys, const int numKeys)
{
const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x;
if (index == numUniqueKeys - 1) gpuD[index] = numKeys - gpuC[index];
else gpuD[index] = gpuC[index + 1] - gpuC[index];
}
__global__ void gpmrIntIntSorterSetCompactedKeysKernel(const int blocksSoFar, const int * const keys, const int * const input, int * const output, const int numKeys)
{
const int index = (blocksSoFar + blockIdx.x) * blockDim.x + threadIdx.x;
if (index < numKeys) output[index] = keys[input[index]];
}
void gpmrIntIntSorterMarkUnique(const void * const gpuInputKeys, void * const gpuUniqueFlags, const int numKeys)
{
const int NUM_BLOCKS = (numKeys + NUM_THREADS - 1) / NUM_THREADS;
int blocksSoFar = 0;
int numBlocks;
while (blocksSoFar < NUM_BLOCKS)
{
numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar);
gpmrIntIntSorterComputeA<<<numBlocks, NUM_THREADS>>>(blocksSoFar,
reinterpret_cast<const int * >(gpuInputKeys),
reinterpret_cast<int * >(gpuUniqueFlags),
numKeys);
blocksSoFar += numBlocks;
}
}
void gpmrIntIntSorterFindOffsets(const void * const gpuKeys, const void * const gpuA, const void * const gpuB, void * const gpuC, void * const gpuD, const int numKeys, const int numUniqueKeys)
{
const int NUM_BLOCKS_1 = 60;
const int NUM_BLOCKS = (numUniqueKeys + NUM_THREADS - 1) / NUM_THREADS;
int blocksSoFar = 0;
int numBlocks;
gpmrIntIntSorterComputeC<<<NUM_BLOCKS_1, NUM_THREADS>>>(reinterpret_cast<const int * >(gpuA),
reinterpret_cast<const int * >(gpuB),
reinterpret_cast<int * >(gpuC),
numKeys);
while (blocksSoFar < NUM_BLOCKS)
{
numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar);
gpmrIntIntSorterComputeD<<<numBlocks, NUM_THREADS>>>(blocksSoFar,
reinterpret_cast<const int * >(gpuC),
reinterpret_cast<int * >(gpuD),
numUniqueKeys,
numKeys);
blocksSoFar += numBlocks;
}
}
void gpmrIntIntSorterSetCompactedKeys(const void * const gpuKeys, const void * const gpuInput, void * const gpuOutput, const int numUniqueKeys)
{
const int NUM_BLOCKS = (numUniqueKeys + NUM_THREADS - 1) / NUM_THREADS;
int blocksSoFar = 0;
int numBlocks;
while (blocksSoFar < NUM_BLOCKS)
{
numBlocks = (NUM_BLOCKS - blocksSoFar > MAX_BLOCKS ? MAX_BLOCKS : NUM_BLOCKS - blocksSoFar);
gpmrIntIntSorterSetCompactedKeysKernel<<<numBlocks, NUM_THREADS>>>(blocksSoFar,
reinterpret_cast<const int * >(gpuKeys),
reinterpret_cast<const int * >(gpuInput),
reinterpret_cast< int * >(gpuOutput),
numUniqueKeys);
blocksSoFar += numBlocks;
}
}
|
6,876 | #include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#define imin(a,b) (a<b?a:b)
int n, m;
double result;
double * a;
double * dev_a;
void init (int argc, char* argv[]){
assert(argc == 3);
n = atoi(argv[1]);
m = atoi(argv[2]);
result = 0.0;
a = (double *)malloc(n*m*sizeof(double));
for (int i=0; i<n; i++) {
for (int j=0; j<m; j++)
a[i*m+j] = i*2.0 + j*1.0;
}
}
__global__ void kernel(double *dev_a, int n, int m){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
dev_a[m*row+col] = dev_a[m*row+col] * dev_a[m*row+col];
//dev_a[m*row+col] = 0.0;
}
int main (int argc, char* argv[]){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
init(argc, argv);
/*
for(int i = 0; i< n; i++){
printf("\n");
for(int j = 0; j<m; j++){
printf("%f ",a[i*m+j]);
}
} */
dim3 dimBlock(16,16);
int dimx = (int) ceil((double)n/dimBlock.x);
int dimy = (int) ceil((double)m/dimBlock.y);
// printf("dimx: %d, dimy: %d\n", dimx, dimy);
dim3 dimGrid(dimx,dimy);
int size = n*m*sizeof(double);
cudaMalloc((void**)&dev_a, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
kernel<<<dimGrid,dimBlock>>>(dev_a, n,m);
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy( a, dev_a, size, cudaMemcpyDeviceToHost);
//printf("done gpu stuff\n");
double total = 0.0;
for(int j = 0; j<m; j++){
double temp = 0.0;
for(int i = 0; i<n; i++){
temp+= a[i*m+j];
}
total+= sqrt(temp);
}
cudaEventRecord(stop);
float secs = 0;
cudaEventElapsedTime(&secs, start, stop);
secs = secs / 1000;
printf("%f\n", total);
#ifdef TIME
printf("Time: %.2f\n", secs);
#endif
free(a);
cudaFree(dev_a);
}
|
6,877 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda.h>
void load(char* file);
__global__ void count(int *A, int *colind,int *block_sums, int nnz);
void gen_colind();
int *A;
int nnz;
int *colind;
struct timeval startwtime, endwtime;
double seq_time;
#define TPB 1024
#define NB 1024
__global__ void count(int *A, int *colind, int *block_sums, int nnz){
int idx = blockDim.x*blockIdx.x + threadIdx.x;
extern __shared__ int cache[];
int csum = 0;
int i,j,k,l;
int nextk,nextl;
while(idx < nnz){
i = A[idx];
j = A[nnz + idx];
k = colind[j-1];
l = colind[i-1];
nextk = (j == A[2*nnz-1])?nnz:colind[j];
nextl = (i == A[2*nnz-1])?nnz:colind[i];
do{
if(A[k] > A[l]){
l++;
}
else if(A[k] < A[l]){
k++;
}
else{
csum++;
k++;
l++;
}
}while(k<nextk && l<nextl);
idx += blockDim.x*gridDim.x;
}
cache[threadIdx.x] = csum;
__syncthreads();
//per-block Reduction
for(int s = blockDim.x/2;s>0;s>>=1){
if(threadIdx.x < s){
cache[threadIdx.x] += cache[threadIdx.x + s];
__syncthreads();
}
}
if(threadIdx.x == 0) block_sums[blockIdx.x] = cache[0];
}
int main(int argc, char **argv){
if(argc != 2){
printf("Usage: %s [filename]\n",argv[0]);
printf("Quiting...\n");
exit(1);
}
load(argv[1]);
gen_colind();
int nthreads_block = TPB;
int nblocks = NB;
int nt;
int *block_sums = (int *)malloc(nblocks*sizeof(int));
int *d_A,*d_colind,*d_block_sums;
cudaMalloc((void **)&d_A,2*nnz*sizeof(int));
cudaMalloc((void **)&d_colind,A[2*nnz-1]*sizeof(int));
cudaMalloc((void **)&d_block_sums,nblocks*sizeof(int));
cudaMemcpy(d_A,A,2*nnz*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_colind,colind,A[2*nnz-1]*sizeof(int),cudaMemcpyHostToDevice);
//Call kernel and measure time passed
gettimeofday (&startwtime, NULL);
count<<<nblocks,nthreads_block,nthreads_block*sizeof(int)>>>(d_A,d_colind,d_block_sums,nnz);
cudaDeviceSynchronize();
gettimeofday (&endwtime, NULL);
//Copy partial sums to host
cudaMemcpy(block_sums, d_block_sums, nblocks*sizeof(int), cudaMemcpyDeviceToHost);
//Sum all block sums to solve problem
nt = 0;
for(int i=0;i<nblocks;i++){
nt += block_sums[i];
}
nt /= 6;
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e3
+ (endwtime.tv_sec - startwtime.tv_sec)*1.0e3);
printf("Found %d triangles in %f ms\n",nt,seq_time);
free(A);
free(colind);
free(block_sums);
cudaFree(d_A);
cudaFree(d_colind);
cudaFree(d_block_sums);
}
void load(char* file){
FILE *fp;
int size;
if((fp = fopen(file,"rb")) == NULL){
printf("Failed to open file.\nExiting...\n");
exit(1);
}
fseek(fp,0,SEEK_END);
size = ftell(fp);
size /= 4;
fseek(fp,0,SEEK_SET);
A = (int *)malloc(size*sizeof(int));
int i = 0;
int nread;
for(i=0;i<size;i++){
nread = fread(&A[i],sizeof(int),1,fp);
if(nread != 1){
printf("Error reading file!\nExiting...\n");
printf("%d\n",i);
exit(1);
}
}
nnz = size/2;
fclose(fp);
}
void gen_colind(){
int lastcol = A[2*nnz-1];
colind = (int *)malloc(lastcol*sizeof(int));
int prev = 0;
for(int i=0;i<nnz;i++){
if(A[nnz+i] != prev){
prev = A[nnz+i];
colind[prev-1] = i;
}
}
}
|
6,878 | __global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N);
// Kernelul ce se executa pe device-ul CUDA
__global__ void actiune_thread(float* a_d, float* b_d,float *r_d,int N)
{
extern __shared__ float s_data[];
int inOffset = blockDim.x * blockIdx.x;
int in = inOffset + threadIdx.x;
s_data[blockDim.x - 1 - threadIdx.x] = a_d[in];
__syncthreads();
int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x);
int out = outOffset + threadIdx.x;
r_d[out] = s_data[threadIdx.x];
}
extern "C"
cudaError_t launch_actiune_thread(float* a_d, float* b_d,float *r_d,int N,dim3 DIM_GRID, dim3 DIM_BLOCK)
{
actiune_thread <<<DIM_GRID, DIM_BLOCK, 1024>>> (a_d, b_d,r_d,N);
return cudaGetLastError();
} |
6,879 | #include "includes.h"
__global__ void k2_mul(float *data, float val) {
data[threadIdx.x] *= val;
} |
6,880 | /*
* CUDA kernel for pixelwise maximum of all orientation-specific responses
* of a COSFIRE filter.
* Sofie Lovdal 28.6.2018
*/
__global__ void pixelwiseMax(double * output, double * const input,
unsigned int const numRows, unsigned int const numCols,
int const numResponses)
{
const int colIdx = blockIdx.x*blockDim.x + threadIdx.x;
const int rowIdx = blockIdx.y*blockDim.y + threadIdx.y;
/*make sure we are within image*/
if(colIdx>=numCols || rowIdx >= numRows) return;
/*Pixel to consider in outputimage*/
int linearIdx = rowIdx*numCols + colIdx;
double max=0.0, value;
int i;
for(i=0; i<numResponses; i++) {
value = input[linearIdx+i*numRows*numCols];
max = (value > max ? value : max);
}
output[linearIdx] = max;
}
|
6,881 | #include "cuda.h"
#include "stdio.h"
// extern __host__ __device__ int MAX(int a, int b) { return a > b ? a : b; }
// extern __host__ __device__ int MIN(int a, int b) { return a < b ? a : b; }
// extern __host__ __device__ int CEIL(int a, int b) { return ( (a) % (b) == 0 ? (a) / (b) : ( (a) / (b) + 1 ) ); }
void Check_CUDA_Error(const char* message){
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess ){
printf("CUDA-ERROR:%s, %s\n",message,cudaGetErrorString(error) );
exit(-1);
}
}
|
6,882 | /*
* Copyright (C) 2002-2019 the Network-Based Computing Laboratory
* (NBCL), The Ohio State University.
*
* Contact: Dr. D. K. Panda (panda@cse.ohio-state.edu)
*
* For detailed copyright and licensing information, please refer to the
* copyright file COPYRIGHT in the top level OMB directory.
*/
__global__
void compute_kernel(float a, float * x, float * y, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
if (i < N) {
for(count=0; count < (N/8); count++) {
y[i] = a * x[i] + y[i];
}
}
}
extern "C"
void
call_kernel(float a, float * d_x, float * d_y, int N, cudaStream_t * stream)
{
compute_kernel<<<(N+255)/256, 256, 0, *stream>>>(a, d_x, d_y, N);
}
|
6,883 | #include <iostream>
#include <vector>
__global__ void matadd( int * m0, int * m1, std::size_t w, std::size_t h )
{
auto i = blockIdx.x * blockDim.x + threadIdx.x;
auto j = blockIdx.y * blockDim.y + threadIdx.y;
if( i < w && j < h )
m0[ i * w + j ] += m1[ i * w + j ];// i * w + j;
}
int main() {
std::vector< int > v0_h( 10000 );
std::vector< int > v1_h( 10000 );
for( std::size_t i = 0 ; i < v0_h.size(); ++i ) {
v0_h[ i ] = v1_h[ i ] = i;
}
int * v0_d = nullptr;
int * v1_d = nullptr;
cudaMalloc( &v0_d, v0_h.size() * sizeof( int ) );
cudaMalloc( &v1_d, v0_h.size() * sizeof( int ) );
cudaMemcpy( v0_d, v0_h.data(), v0_h.size() * sizeof( int ), cudaMemcpyHostToDevice );
cudaMemcpy( v1_d, v1_h.data(), v0_h.size() * sizeof( int ), cudaMemcpyHostToDevice );
dim3 t( 32, 32 );
dim3 b( 4, 4 );
cudaEvent_t start, stop;
cudaEventCreate( &start );
cudaEventCreate( &stop );
cudaEventRecord( start );
matadd<<< b, t >>>( v0_d, v1_d, 100, 100 );
cudaEventRecord( stop );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( & elapsedTime, start, stop );
std::cout << elapsedTime << std::endl;
cudaEventDestroy( start );
cudaEventDestroy( stop );
auto err = cudaGetLastError();
cudaMemcpy( v0_h.data(), v0_d, v0_h.size() * sizeof( int ), cudaMemcpyDeviceToHost );
//for( auto const i: v0_h ) { std::cout << i << std::endl; }
cudaFree( v0_d );
cudaFree( v1_d );
return 0;
}
|
6,884 | //Author: Harindranath Ambalampitiya, PhD(Theoretical atomic and molecular physics)
//Parallel version
#include <iostream>
#include<math.h>
#include<stdio.h>
#include<ctime>
#include<cstdlib>
#include <chrono>
#include<curand_kernel.h>
using namespace std;
using namespace std::chrono;
// initialize random_number generator on the device
//each thread gets the same seed,but different sequence
__global__ void rng_init(curandState *state,int seed,int n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<n)
{
curand_init(seed, id, 0, &state[id]);
}
}
//Let's calculate pi on the device
__global__ void mcpiKernel(curandState *state,int *a, int NSW,float r)
{
//Monte carlo region
float xmin=-r,xmax=r,ymin=-r,ymax=r;
int idx=blockIdx.x*blockDim.x+threadIdx.x;
//copy state to local memory
curandState localState = state[idx];
int sum_in=0;
for(int i=1;i<=NSW;i++)
{
//generate random numbers in the uniform grid (0,1]
//for both x and y coordinates
float ran0 = curand_uniform(&localState);
float ran1 = curand_uniform(&localState);
float x=xmin+(xmax-xmin)*ran0;
float y=ymin+(ymax-ymin)*ran1;
float d=sqrt(x*x+y*y);
//printf("x,y: %f \t %f \n",x,y);
if(d<=r)
sum_in=sum_in+1;
}
//copy local memory to global
state[idx] = localState;
a[idx]=sum_in;
//printf("inside: %i \n",sum_in);
}
float cudaPi(int N)
{
// number of threads and blocks
int block_size=256;
int n_blocks=128;
int n_procs=n_blocks*block_size;
//memory allocation in the host and device
size_t size=n_procs *sizeof(int);
int* a_h=(int*)malloc(size);
int* a_d;
cudaMalloc((void **) &a_d, size);
//random_states
curandState *devStates;
cudaMalloc((void **) &devStates, n_procs *sizeof(curandState));
//number of sweeps that each graphic processor gets
int nsw=N/n_procs+(N%n_procs==0 ? 0:1);
//initialize the random numbers
int s=12345;//seed
rng_init<<<n_blocks,block_size>>>(devStates, s, n_procs);
//pass it to parallel processing
//each parallel unit counts how many points lie inside the circle
float r=0.5;//circle radius
mcpiKernel<<<n_blocks,block_size>>>(devStates,a_d,nsw,r);
cudaMemcpy(a_h,a_d, sizeof(int)*n_procs,cudaMemcpyDeviceToHost);
//number of points inside/outsie the circle
float sum_in=0.;
float sum_out=nsw*n_procs;
for(int i=0;i<n_procs;i++)sum_in +=(float)a_h[i];
//printf("sum in is %f \n sum out is %f \n",sum_in,sum_out);
float pii=4.0f*(sum_in/sum_out);
//now free-up the space
free(a_h);
cudaFree(a_d);
return pii;
}
int main()
{
auto start = high_resolution_clock::now();
int N=10000000;
float pii=cudaPi(N);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<milliseconds>(stop - start);
printf("Pi value is: %f \n ",pii);
cout<<"Duration (ms)"<<"\t"<<duration.count()<<endl;
}
|
6,885 | #define t_max 1
#define t 1
/*
(u[0][0][0][1][0]=(a*((((u[-3][0][0][0][0]+(u[0][-3][0][0][0]+u[0][0][-3][0][0]))*-2.0)+(((u[-2][0][0][0][0]+(u[0][-2][0][0][0]+u[0][0][-2][0][0]))*15.0)+((u[-1][0][0][0][0]+(u[0][-1][0][0][0]+u[0][0][-1][0][0]))*-60.0)))+((u[0][0][0][0][0]*20.0)+(((u[1][0][0][0][0]+(u[0][1][0][0][0]+u[0][0][1][0][0]))*30.0)+((u[2][0][0][0][0]+(u[0][2][0][0][0]+u[0][0][2][0][0]))*-3.0))))))
*/
__global__ void upstream_5_3d(double * * u_0_1_out, double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max)
{
//double * const u__u_0[16] = { u_0_0, u_0_1 } ;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int _idx7;
int _idx8;
int _idx9;
int idx_1_2;
int p_idx_x;
int p_idx_x_max;
int p_idx_y;
int p_idx_y_max;
int p_idx_z;
int p_idx_z_max;
int size_1_1;
int size_1_2;
//int t;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x));
p_idx_x_max=(p_idx_x+1);
p_idx_y=(threadIdx.y+(tmp*blockDim.y));
p_idx_y_max=(p_idx_y+1);
p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
p_idx_z_max=(p_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0])
*/
/* _idx0 = ((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x) */
_idx0=((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x);
/* _idx1 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+(((((5*p_idx_z)+15)*t)+p_idx_y)*x_max))+(((25*p_idx_z)+75)*(t*t)))+((5*p_idx_y)*t))+p_idx_x)+3) */
_idx1=(((_idx0-(3*x_max))-(15*t))+3);
/* _idx2 = ((((((((p_idx_z*x_max)+((5*p_idx_z)*t))*y_max)+(((((5*p_idx_z)*t)+p_idx_y)+3)*x_max))+((25*p_idx_z)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx2=((((_idx0+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+3);
/* _idx3 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+1) */
_idx3=(_idx0+1);
/* _idx4 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+1)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+5)*t))+p_idx_x)+3) */
_idx4=((_idx1+x_max)+(5*t));
/* _idx5 = (((((((((p_idx_z+1)*x_max)+(((5*p_idx_z)+5)*t))*y_max)+((((((5*p_idx_z)+5)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+25)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx5=(((_idx2+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx6 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+2) */
_idx6=(_idx3+1);
/* _idx7 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+2)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+10)*t))+p_idx_x)+3) */
_idx7=((_idx4+x_max)+(5*t));
/* _idx8 = (((((((((p_idx_z+2)*x_max)+(((5*p_idx_z)+10)*t))*y_max)+((((((5*p_idx_z)+10)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+50)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx8=(((_idx5+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx9 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx9=(_idx3+2);
/* _idx10 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+4) */
_idx10=(_idx3+3);
/* _idx11 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+4)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+20)*t))+p_idx_x)+3) */
_idx11=((_idx9+x_max)+(5*t));
/* _idx12 = (((((((((p_idx_z+4)*x_max)+(((5*p_idx_z)+20)*t))*y_max)+((((((5*p_idx_z)+20)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+100)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx12=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx13 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+5) */
_idx13=(_idx3+4);
/* _idx14 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+5)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+25)*t))+p_idx_x)+3) */
_idx14=((_idx11+x_max)+(5*t));
/* _idx15 = (((((((((p_idx_z+5)*x_max)+(((5*p_idx_z)+25)*t))*y_max)+((((((5*p_idx_z)+25)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+125)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx15=(((_idx12+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_1[_idx9]=(a*((((u_0_0[_idx0]+(u_0_0[_idx1]+u_0_0[_idx2]))*-2.0)+(((u_0_0[_idx3]+(u_0_0[_idx4]+u_0_0[_idx5]))*15.0)+((u_0_0[_idx6]+(u_0_0[_idx7]+u_0_0[_idx8]))*-60.0)))+((u_0_0[_idx9]*20.0)+(((u_0_0[_idx10]+(u_0_0[_idx11]+u_0_0[_idx12]))*30.0)+((u_0_0[_idx13]+(u_0_0[_idx14]+u_0_0[_idx15]))*-3.0)))));
}
}
__global__ void initialize(double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max)
{
double * const u__u_0[16] = { u_0_0, u_0_1 } ;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int _idx7;
int _idx8;
int _idx9;
int idx_1_2;
int p_idx_x;
int p_idx_x_max;
int p_idx_y;
int p_idx_y_max;
int p_idx_z;
int p_idx_z_max;
int size_1_1;
int size_1_2;
//int t;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x));
p_idx_x_max=(p_idx_x+1);
p_idx_y=(threadIdx.y+(tmp*blockDim.y));
p_idx_y_max=(p_idx_y+1);
p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
p_idx_z_max=(p_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0])
*/
/* _idx0 = ((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x) */
_idx0=((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x);
u_0_0[_idx0]=0.1;
/* _idx1 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+1) */
_idx1=(_idx0+1);
u_0_0[_idx1]=0.1;
/* _idx2 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+2) */
_idx2=(_idx1+1);
u_0_0[_idx2]=0.1;
/* _idx3 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+(((((5*p_idx_z)+15)*t)+p_idx_y)*x_max))+(((25*p_idx_z)+75)*(t*t)))+((5*p_idx_y)*t))+p_idx_x)+3) */
_idx3=(((_idx1-(3*x_max))-(15*t))+2);
u_0_0[_idx3]=0.1;
/* _idx4 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+1)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+5)*t))+p_idx_x)+3) */
_idx4=((_idx3+x_max)+(5*t));
u_0_0[_idx4]=0.1;
/* _idx5 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+2)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+10)*t))+p_idx_x)+3) */
_idx5=((_idx4+x_max)+(5*t));
u_0_0[_idx5]=0.1;
/* _idx6 = ((((((((p_idx_z*x_max)+((5*p_idx_z)*t))*y_max)+(((((5*p_idx_z)*t)+p_idx_y)+3)*x_max))+((25*p_idx_z)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx6=((((_idx1+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+2);
u_0_0[_idx6]=0.1;
/* _idx7 = (((((((((p_idx_z+1)*x_max)+(((5*p_idx_z)+5)*t))*y_max)+((((((5*p_idx_z)+5)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+25)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx7=(((_idx6+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_0[_idx7]=0.1;
/* _idx8 = (((((((((p_idx_z+2)*x_max)+(((5*p_idx_z)+10)*t))*y_max)+((((((5*p_idx_z)+10)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+50)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx8=(((_idx7+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_0[_idx8]=0.1;
/* _idx9 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx9=(_idx1+2);
u_0_0[_idx9]=0.1;
/* _idx10 = (((((((((p_idx_z+4)*x_max)+(((5*p_idx_z)+20)*t))*y_max)+((((((5*p_idx_z)+20)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+100)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx10=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_0[_idx10]=0.1;
/* _idx11 = (((((((((p_idx_z+5)*x_max)+(((5*p_idx_z)+25)*t))*y_max)+((((((5*p_idx_z)+25)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+125)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+3) */
_idx11=(((_idx10+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_0[_idx11]=0.1;
/* _idx12 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+4)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+20)*t))+p_idx_x)+3) */
_idx12=((_idx9+x_max)+(5*t));
u_0_0[_idx12]=0.1;
/* _idx13 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+5)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+25)*t))+p_idx_x)+3) */
_idx13=((_idx12+x_max)+(5*t));
u_0_0[_idx13]=0.1;
/* _idx14 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+4) */
_idx14=(_idx1+3);
u_0_0[_idx14]=0.1;
/* _idx15 = (((((((((p_idx_z+3)*x_max)+(((5*p_idx_z)+15)*t))*y_max)+((((((5*p_idx_z)+15)*t)+p_idx_y)+3)*x_max))+(((25*p_idx_z)+75)*(t*t)))+(((5*p_idx_y)+15)*t))+p_idx_x)+5) */
_idx15=(_idx1+4);
u_0_0[_idx15]=0.1;
u__u_0[t][_idx9]=1.1;
}
}
|
6,886 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define MAX_THREADS_PER_BLOCK 1024
#define RANDMAX 100
__device__ float mean(float *y, int s, int e) {
int i;
float total = 0;
for (int i = s; i < e; i++) {
total += y[i];
}
return total / (e - s + 1);
}
__global__ void fillMaxArray(float *x, int n, int k, float *out) {
extern __shared__ float s[];
int me = blockDim.x * blockIdx.x + threadIdx.x;
// copy global data to shared data
s[me] = x[me];
int perstart; // period start
int perlen; // period length
int perend; // perlen end
int pl1; // perlen - 1
// for best found by this thread so far
int mystartmax; // start location
int myendmax; // end location
float mymaxval; // max value
float xbar; // scratch variable
mymaxval = -1;
for (perstart = me; perstart <= n - k; perstart ++) {
for (perlen = k; perlen <= n - perstart; perlen ++) {
if (perlen == k) {
xbar = mean(s, perstart, perend);
}
else {
// update hold mean
pl1 = perlen - 1;
xbar = (pl1 * xbar + s[perend]) / perlen;
}
if (xbar > mymaxval) {
mymaxval = xbar;
mystartmax = perstart;
myendmax = perend;
}
}
}
out[me] = mymaxval;
__syncthreads();
}
int main(int argc, char** argv) {
int n = atoi(argv[1]); // array input size
int k = atoi(argv[2]); // window size
// size of input array
int input_size = n * sizeof(float);
// host input and output array
float *h_in;
float *h_out;
// allocate memory for host arrays
h_in = (float*)malloc(input_size);
h_out = (float*)malloc(input_size);
// fill host array with random numbers
for (int i = 0; i < n; i++) {
h_in[i] = rand() / (float)RANDMAX;
}
// device input and output array
float *d_in;
float *d_out;
// allocate memory for device arrays
cudaMalloc((void**) &d_in, input_size);
cudaMalloc((void**) &d_out, input_size);
// copy host input to device input
cudaMemcpy(d_in, h_in, input_size, cudaMemcpyHostToDevice);
// other host parameters
int *h_startend;
h_startend = (int*) malloc(2 * sizeof(int));
float *h_bigmax = 0;
// other device parameters
int *d_startend;
cudaMalloc((void**) &d_startend, 2 * sizeof(int));
float *d_bigmax = 0;
// create kernel invocation parameters
int NUM_BLOCKS = n / MAX_THREADS_PER_BLOCK;
dim3 dimGrid(NUM_BLOCKS, 1);
dim3 dimBlock(MAX_THREADS_PER_BLOCK, 1);
// kernel invocation
// findMax <<< dimGrid, dimBlock, input_size >>>(d_in, n, k, d_startend , d_bigmax);
fillMaxArray <<< dimGrid, dimBlock, input_size>>> (d_in, n, k, d_out);
cudaMemcpy(d_out, h_out, input_size, cudaMemcpyDeviceToHost);
// copy result from device to host
cudaMemcpy(d_startend, h_startend, input_size, cudaMemcpyDeviceToHost);
cudaMemcpy(d_bigmax, h_bigmax, input_size, cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++) {
printf("h_out%d: %f\n", i, h_out[i]);
}
// printf("dstartend.1: %d\n", h_startend[0]);
// printf("dstartend.2: %d\n", h_startend[1]);
printf("d_bigmax: %f\n", h_bigmax);
// free gpu memory
cudaFree(d_startend);
cudaFree(d_bigmax);
return 0;
}
// __global__ void findMax(float *x, int n, int k, int *startend, float *bigmax) {
// // shared memory of size: input_size
// extern __shared__ float s[];
// // thread id
// int me = blockDim.x * blockIdx.x + threadIdx.x;
// printf("me: %d", me);
// // copy global data to shared data
// s[me] = x[me];
// int perstart; // period start
// int perlen; // period length
// int perend; // perlen end
// int pl1; // perlen - 1
// // for best found by this thread so far
// int mystartmax; // start location
// int myendmax; // end location
// float mymaxval; // max value
// float xbar; // scratch variable
// mymaxval = -1;
// for (perstart = me; perstart <= n - k; perstart ++) {
// for (perlen = k; perlen <= n - perstart; perlen ++) {
// if (perlen == k) {
// xbar = mean(s, perstart, perend);
// }
// else {
// // update hold mean
// pl1 = perlen - 1;
// xbar = (pl1 * xbar + s[perend]) / perlen;
// }
// if (xbar > mymaxval) {
// mymaxval = xbar;
// mystartmax = perstart;
// myendmax = perend;
// }
// }
// }
// __syncthreads();
// if (mymaxval > *bigmax) {
// *bigmax = mymaxval;
// startend[0] = mystartmax;
// startend[1] = myendmax;
// }
// }
|
6,887 | // Compile: nvcc -arch=sm_61 -std=c++11 assignment5-p3.cu -o assignment5-p3
#include <cmath>
#include <iostream>
#include <sys/time.h>
#define SIZE 4096
#define THRESHOLD (0.000001)
using namespace std;
double rtclock() {
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
__host__ void ATAonCPU(double** M, double** P) {
for (int k = 0; k < SIZE; k++) {
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++)
P[i][j] += M[k][i] * M[k][j];
}
}
}
__host__ void check_result(double** Test, double** Ref) {
double maxdiff = 0, rel_diff = 0;
int numdiffs = 0;
for (int i = 0; i < SIZE; i++) {
for (int j = 0; i < SIZE; j++) {
rel_diff = (Test[i][j] - Ref[i][j]);
if (fabs(rel_diff) > THRESHOLD) {
numdiffs++;
if (rel_diff > maxdiff)
maxdiff = rel_diff;
}
}
}
if (numdiffs > 0)
cout << numdiffs << " Diffs found over THRESHOLD " << THRESHOLD << " Max Diff = " << maxdiff
<< "\n";
else
cout << "No differences found between base and test versions\n";
}
// SB: Implement your kernel here
__global__ void ATAkernel(double* A, double* B) {}
int main() {
cout << "Matrix Size = " << SIZE << "\n";
double** A = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
A[i] = new double[SIZE];
}
double** O_s = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
O_s[i] = new double[SIZE];
}
double** O_p = new double*[SIZE];
for (int i = 0; i < SIZE; i++) {
O_p[i] = new double[SIZE];
}
for (int i = 0; i < SIZE; i++) {
for (int j = 0; j < SIZE; j++) {
A[i][j] = i * j * 0.25;
O_s[i][j] = 0;
O_p[i][j] = 0;
}
}
double clkbegin, clkend;
double t;
clkbegin = rtclock();
ATAonCPU(A, O_s);
clkend = rtclock();
t = clkend - clkbegin;
cout << "A^T.A on CPU: " << (2.0 * SIZE * SIZE * SIZE / t / 1.0e9)
<< " GFLOPS; Time = " << t * 1000 << " msec\n";
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
// SB: Write your GPU kernel here
cudaEventRecord(end, 0);
float kernel_time;
cudaEventElapsedTime(&kernel_time, start, end);
cout << "A^T.A on GPU: " << (2.0 * SIZE * SIZE * SIZE / t / 1.0e9)
<< " GFLOPS; Time = " << kernel_time << " msec\n";
check_result(O_p, O_s);
return EXIT_SUCCESS;
}
|
6,888 | /* ECGR 6090 Heterogeneous Computing Homework0
Problem 2- 1D stencil using GPU and shared memory
Written by Bhavin Thakar - 801151488
*/
// To execute the program type: ./1dstencilsharedmemory
#include<stdio.h>
#include <sys/time.h>
struct timeval stop, start,start1,stop1;
#define R 4 // Defining radius as 4
#define B 128 // Defining Thread Block Size as 128
#define N 1000000 // Defining Number of Elements as 1M
// Kernel Function
__global__ void stencil1d(int *in, int *out){
__shared__ int temp[B + 2 * R]; // Declaring a shared integer array
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + R;
temp[lindex] = in[gindex]; //storing in shared memory
if (threadIdx.x < R)
{
temp[lindex - R] = in[gindex - R];
temp[lindex + B] = in[gindex + B];
}
__syncthreads();
int result = 0;
for (int offset = -R ; offset <= R ; offset++)
{
result += temp[lindex + offset];
}
// Store the result
out[gindex] = result;
}
// random function to generate random numbers
void random(int *a, int n ){
int i;
for (i = 0; i <=n+1; ++i)
a[i] = rand()%100;
}
int main(void){
int n;
int *c_in, *c_out; // integer aray for CPU
int size= N*sizeof(int);
n=N+2*R;
// Allocating memory for CPU integer array
c_in=(int*)malloc(n*size);
c_out=(int*)malloc(N*size);
random(c_in,n); // Calling random function
int *d_in,*d_out; //integer array for GPU
//Allocating memory for GPU integer array
cudaMalloc(&d_in,n*size);
cudaMalloc(&d_out,N*size);
// Copying input from CPU to GPU
cudaMemcpy(d_in,c_in,n*size,cudaMemcpyHostToDevice);
gettimeofday(&start, NULL);
stencil1d<<<(N/B-1)/B,B>>>(d_in,d_out); //Calling Kernel Function
gettimeofday(&stop, NULL);
cudaDeviceSynchronize(); // Check if streams are completed
printf("Execution time of kernel: %lu us\n", (stop.tv_sec - start.tv_sec) * 1000000 + stop.tv_usec - start.tv_usec);
// Copying back the results from GPU to CPU
cudaMemcpy(c_out,d_out,n*size,cudaMemcpyDeviceToHost);
// Free resources
free(c_in);
free(c_out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
} |
6,889 | #include <iostream>
#include <chrono>
using namespace std;
//CPU
typedef unsigned int uint;
uint i = 0;
int gpuCount = 0;
void initalizeHost( float *ip, uint size );
//GPU
cudaDeviceProp gpuProperties;
const uint N = 1E7;
const uint nThreads = 512;
const uint nBlocks = ( N / nThreads ) + 1;
const uint UNROLLING = 16; //check [ 8 16 32 64 ]; I would guess sixteen times unrolling;
__global__ void nop(){};
__global__ void trivialLoop()
{
uint a = 0;
for ( uint32_t i = 0; i < N; i++ )
{
a = i;
a++;
}
};
__global__ void unrollTrivialLoop()
{
uint a = 0;
#pragma unroll UNROLLING //briliant feature
for ( uint32_t i = 0; i < N; i++ )
{
a = i;
a++;
}
};
__global__ void initializeDevice( float *d_in, const uint streamSize )
{
uint tid = threadIdx.x;
uint idx = blockIdx.x * blockDim.x + tid;
if( idx < streamSize )
{
d_in[ idx ] *= 0.9f;
}
};
__global__ void loop( float *d_in, const uint streamNo, const uint streamSize )
{
size_t ii = 0;
for ( ii = 0; ii < streamSize; ii++ )
{
d_in[ ii ] += float( ii );
}
};
__global__ void unrollLoop( float *d_in, const uint streamNo, const uint streamSize )
{
size_t ii = 0;
#pragma unroll UNROLLING //adjustable mainly to register-only, high performance computations - described at unrollTrivialLoop device kernel
for ( ii = 0; ii < streamSize; ii++ )
{
d_in[ ii ] += float( ii );
}
};
int main( void )
{
cudaGetDeviceCount( &gpuCount );
//HOST
float *h_arr[ gpuCount ]; // float **h_arr = ( float** )malloc( sizeof( float * ) * gpuCount ); //alternatively
uint perDevN = N / gpuCount;
uint perDevNBytes = sizeof( float ) * perDevN ;
//DEVICE
cudaStream_t stream[ gpuCount ];
float *d_arr[ gpuCount ]; // float **d_arr = ( float** )malloc( sizeof( float * ) * gpuCount ); //alternatively
//alocate & initialize H,D memories
for ( i = 0; i < gpuCount; i++ )
{
//HOST
cudaMallocHost( ( void** ) &h_arr[ i ], perDevNBytes );
initalizeHost( h_arr[ i ], perDevN );
//DEVICE
cudaSetDevice( i );
cudaMalloc( ( void** ) &d_arr[ i ], perDevNBytes );
cudaStreamCreate( &stream[ i ] );
}
//DEVICE computations
for ( i = 0; i < gpuCount; i++ )
{
cudaSetDevice( i );
cudaGetDeviceProperties( &gpuProperties, i );
cout << endl << gpuProperties.name << ": " << endl;
auto t1 = chrono::high_resolution_clock::now();
trivialLoop<<< 1, 1 >>>();
nop<<< 1, 1 >>>();
auto t2 = chrono::high_resolution_clock::now();
uint elapsed = uint( chrono::duration_cast< chrono::nanoseconds >( t2 - t1 ).count() );
printf( "trivial loop elapsed: %d \n", elapsed );
t1 = chrono::high_resolution_clock::now();
unrollTrivialLoop<<< 1, 1 >>>();
nop<<< 1, 1 >>>();
t2 = chrono::high_resolution_clock::now();
elapsed = chrono::duration_cast< chrono::nanoseconds >( t2 - t1 ).count();
printf( "trivial unrolled loop elapsed: %d \n", elapsed );
cudaMemcpyAsync( d_arr[ i ], h_arr[ i ], perDevNBytes, cudaMemcpyHostToDevice, stream[ i ] );
initializeDevice<<< nBlocks, nThreads, 0, stream[ i ] >>>( d_arr[ i ], perDevN );
nop<<< 1, 1 >>>();
t1 = chrono::high_resolution_clock::now();
loop<<< 1, 1, 0, stream[ i ] >>>( d_arr[ i ], i, perDevN );
nop<<< 1, 1 >>>();
t2 = chrono::high_resolution_clock::now();
elapsed = chrono::duration_cast< chrono::nanoseconds >( t2 - t1 ).count();
printf( "loop elapsed: %d \n", elapsed );
t1 = chrono::high_resolution_clock::now();
unrollLoop<<< 1, 1, 0, stream[ i ] >>>( d_arr[ i ], i, perDevN );
nop<<< 1, 1 >>>();
t2 = chrono::high_resolution_clock::now();
elapsed = chrono::duration_cast< chrono::nanoseconds >( t2 - t1 ).count();
printf( "unrolled loop elapsed: %d \n", elapsed );
}
//free memories
for ( i = 0; i < gpuCount; i++ )
{
//HOST
cudaFreeHost( h_arr[ i ] );
//DEVICE
cudaSetDevice( i );
cudaFree( d_arr[ i ] );
cudaStreamDestroy( stream[ i ] );
}
cudaDeviceReset();
return 0;
}
void initalizeHost( float *ip, uint size )
{
for ( size_t i = 0; i < size; i++ )
ip[ i ] = 1.2f;
};
//Post Scriptum: In my professional opinion, coprocessors: GTX1080ti is brand-new and off-the-shell optimal; GTX770 is used optimal - I've heard about R9Nano and HD5770 ( GFLOPS/USD; GFLOPS/W; QualityWithBandwidthAndMemSize/Price; );
//Post Post Scriptum: I do strongly recommend profiling with NVidia's NVPROF profiling tool, instead of CPU high-resolution timer.
|
6,890 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void addKernel(int a, int b, int *c)
{
*c = (a+b)*(a+b);
}
__global__ void setVectorKernel(int *v, int g)
{
v[threadIdx.x + (blockDim.x * blockIdx.x)] = g;
}
__global__ void addVectorKernel(int* a, int* b, int* out, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<N)
out[i] = a[i] + b[i];
}
__global__ void dwtKernel(float* input, float* output)
{
const int VECTOR_SIZE = 32;
const int VECTOR_HALF_SIZE = 16;
__shared__ float res[VECTOR_SIZE];
__shared__ float res2[VECTOR_SIZE];
// scaled and coefficients for Daubechies 4 wavelet
const int WaveletLength = 4;
float Coefficients[WaveletLength];
Coefficients[0] = 0.4829629131;
Coefficients[1] = 0.8365163037;
Coefficients[2] = 0.2241438680;
Coefficients[3] = -0.1294095226;
float Scales[WaveletLength];
Scales[0] = Coefficients[3];
Scales[1] = -Coefficients[2];
Scales[2] = Coefficients[1];
Scales[3] = -Coefficients[0];
int dx= threadIdx.x;
int k = 0;
for (int i = 0; i < WaveletLength; i++)
{
k = (dx*2)+i;
if(k>=VECTOR_SIZE){k -= VECTOR_SIZE;}
// set to zero for smoothing purposes, otherwise use commented formula
res[dx] += 0; // input[k] * Scales[i];
res[dx + VECTOR_HALF_SIZE] += input[k]*Coefficients[i];
}
// wait for DWT
__syncthreads();
for (int i = 0; i < WaveletLength; i++)
{
k = (dx*2)+i;
if(k>=VECTOR_SIZE) {k-=VECTOR_SIZE;}
res2[k] += (res[dx] *Scales[i] + res[dx + VECTOR_HALF_SIZE] * Coefficients[i]);
}
// wait for inverse transform
__syncthreads();
output[dx] = res2[dx];
output[dx + VECTOR_HALF_SIZE] = res2[dx+VECTOR_HALF_SIZE];
}
// Print device properties
void printDevProp(cudaDeviceProp devProp)
{
printf("Major revision number: %d\n", devProp.major);
printf("Minor revision number: %d\n", devProp.minor);
printf("Name: %s\n", devProp.name);
printf("Total global memory: %u\n", devProp.totalGlobalMem);
printf("Total shared memory per block: %u\n", devProp.sharedMemPerBlock);
printf("Total registers per block: %d\n", devProp.regsPerBlock);
printf("Warp size: %d\n", devProp.warpSize);
printf("Maximum memory pitch: %u\n", devProp.memPitch);
printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock);
for (int i = 1; i <= 3; ++i)
printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i-1]);
for (int i = 1; i <= 3; ++i)
printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i-1]);
printf("Clock rate: %d\n", devProp.clockRate);
printf("Total constant memory: %u\n", devProp.totalConstMem);
printf("Texture alignment: %u\n", devProp.textureAlignment);
printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No"));
printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount);
printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No"));
return;
}
int main()
{
// Number of CUDA devices
int devCount;
cudaGetDeviceCount(&devCount);
printf("CUDA Device Query...\n");
printf("There are %d CUDA devices.\n", devCount);
// Iterate through devices
for (int i = 0; i < devCount; ++i)
{
// Get device properties
printf("\nCUDA Device #%d\n", i);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDevProp(devProp);
}
printf("\nPress any key to exit...");
char c;
scanf("%c", &c);
return 0;
}
|
6,891 | #include "includes.h"
__global__ void kernel_256_OuterProduct_256(float *A, float *B, float *C) {
int Tile = blockIdx.x, Part = blockIdx.y, tX = threadIdx.x, tY = threadIdx.y;
int c_input = tY*256 + tX, c_kernel = c_input, T_offset = (Tile<<12) + (Part<<11) + c_input, B_offset = (Tile<<16) + c_kernel;
extern __shared__ float input[];
float *kernel = input + 2048, *out = kernel + 8192;
int B_stride[32] = {0, 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, 4096, 4352, 4608, 4864, 5120, 5376, 5632, 5888, 6144, 6400, 6656, 6912, 7168, 7424, 7680, 7936};
out[c_input] = 0.0f;
out[c_input+1024] = 0;
input[c_input] = A[T_offset];
input[c_input+1024] = A[T_offset+1024];
for (int k = 0; k < 8; k++) {
int B_start = B_offset + (k<<13); // 32*64
kernel[c_kernel] = B[B_start], kernel[c_kernel+1024] = B[B_start+1024];
kernel[c_kernel+2048] = B[B_start+2048], kernel[c_kernel+3072] = B[B_start+3072];
kernel[c_kernel+4096] = B[B_start+4096], kernel[c_kernel+5120] = B[B_start+5120];
kernel[c_kernel+6144] = B[B_start+6144], kernel[c_kernel+7168] = B[B_start+7168];
__syncthreads();
float sum = 0, sum1 = 0;
int y_tmp = (tY<<8)+(k<<5), y_tmp1 = y_tmp+1024;
for (int j = 0; j < 32; j++) {
sum += input[y_tmp + j] * kernel[tX + B_stride[j]];
sum1 += input[y_tmp1 + j] * kernel[tX + B_stride[j]];
}
out[c_input] += sum;
out[c_input+1024] += sum1;
__syncthreads();
}
C[T_offset] = out[c_input];
C[T_offset+1024] = out[c_input+1024];
} |
6,892 | #include "includes.h"
__global__ void gMaxPoolingBackward(float* adj, int adjRows, int adjCols, float* in, float* adjIn, int inRows, int inCols, float* mask, int numKernels, int maskCols, int width, int lastWidth) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid >= adjRows * adjCols)
return;
int rowId = tid / adjRows;
int colId = tid % adjRows;
float* b = in + (rowId * inCols) + (colId * width);
if(colId == adjRows - 1) {
width = lastWidth;
}
float* localMask = mask + (rowId / numKernels) * maskCols + colId * width;
size_t currentMaxIdx = 0;
for(int i = 1; i < width; ++i) {
if(b[i] * localMask[i] > b[currentMaxIdx] * localMask[currentMaxIdx]) {
currentMaxIdx = i;
}
}
adjIn[(rowId * inCols) + (colId * width) + currentMaxIdx]
+= adj[rowId + (colId * adjCols)];
} |
6,893 | #include <iostream>
#include <fstream>
#include <stdio.h>
#include <unistd.h>
#include <string>
using namespace std;
void printBoard(char array[], int size, int width);
__global__
void getNeighbours(char startBoard[], char finalBoard[], int width, int height);
int main(int argc, char** argv){
const char* filename;
if(argc == 4 || argc == 5){
string arg1 = argv[1];
string arg3 = argv[3];
if(argc == 4 && arg1 == "-i"){
filename = argv[3];
}
else if(argc == 5 && arg1 == "-i" && arg3 == "-v"){
filename = argv[4];
}
else{
cout << "Wrong arguments given" << endl;
return 0;
}
}
else{
cout << "Wrong number of arguments given" << endl;
return 0;
}
int numIter = atoi(argv[2]);
int width = 0;
int height = 0;
ifstream infile;
infile.open(filename);
//Getting width and Height from file
string line;
while(getline(infile, line)){
if(width == 0){
width = line.length();
}
if(line.length() == width){
height ++;
}
}
int arraySize = height * width;
infile.clear();
infile.seekg(0, infile.beg);
//Creating arrays
char* startBoard;
char* finalBoard;
char world[arraySize];
for (int i = 0; i < arraySize; ++i)
{
infile >> world[i];
}
//Printing Start Board
printBoard(world, arraySize, width);
cout << endl;
//Device arrays
cudaMalloc((void**)&startBoard, height * width * sizeof(char));
cudaMalloc((void**)&finalBoard, height * width * sizeof(char));
cudaMemcpy(startBoard, world, height * width * sizeof(char), cudaMemcpyHostToDevice);
//Number of iterations
for(int iter = 0; iter < numIter; iter++){
int blockSize = 1024;
int numBlocks = (arraySize + blockSize -1) / blockSize;
getNeighbours<<<numBlocks, blockSize>>>(startBoard, finalBoard, width, height);
swap(startBoard,finalBoard);
//Printing each iteration board
if(argc == 5){
cudaMemcpy(world, startBoard, height * width * sizeof(char), cudaMemcpyDeviceToHost);
printBoard(world, arraySize, width);
cout << endl;
}
unsigned int microseconds;
microseconds = 100000;
//usleep(microseconds);
}
//Print only final iteration
if(argc == 4){
cudaMemcpy(world, startBoard, height * width * sizeof(char), cudaMemcpyDeviceToHost);
printBoard(world, arraySize, width);
}
cudaFree(startBoard);
cudaFree(finalBoard);
return 0;
}
void printBoard(char array[], int size, int width){
int count = 0;
for (int i = 0; i < size; ++i)
{
cout << array[i];
count ++;
if(count == width){
cout << endl;
count = 0;
}
}
}
__global__
void getNeighbours(char startBoard[], char finalBoard[], int width, int height){
//Finding Neighbours
int index;
int x;
int y;
int currentCell = blockIdx.x * blockDim.x + threadIdx.x;
if(currentCell < width * height){
y = currentCell / width;
x = currentCell - (width * y);
int neighbours = 0;
//Checking surrounding squares
for (int i = y - 1; i <= y + 1; i++)
{
for (int j = x - 1; j <= x + 1; j++){
if ( j == x && i == y ) {
continue;
}
//Check if on board
else if(j > -1 && j < width && i > -1 && i < height){
index = width * i + j;
if(startBoard[index] == 'X'){
neighbours ++;
}
}
//Handle wrap around and add neighbours
else{
int jTemp = j;
int iTemp = i;
if(j == -1){
jTemp = width - 1;
}
if(j == width){
jTemp = 0;
}
if(i == -1){
iTemp = height - 1;
}
if(i == height){
iTemp = 0;
}
index = width * iTemp + jTemp;
if(startBoard[index] == 'X'){
neighbours ++;
}
}
}
}
if(neighbours == 3 || startBoard[currentCell] == 'X' && neighbours == 2){
finalBoard[currentCell] = 'X';
}
else{
finalBoard[currentCell] = '-';
}
}
}
|
6,894 | /*
* Copyright 2013 William J. Brouwer, Pierre-Yves Taunay
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// A simple RNG based around the unpredictability of the scheduler
// Tested on a single M2070 by WJB/PYT
// FYI [0,1] distribution is heavily skewed toward bounds
//
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__global__ void rng_cuda(float * out){
int t_idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float values[512];
values[threadIdx.x] = 0;
values[threadIdx.x] += clock();
for (int i=0; i<16; ++i) values[threadIdx.x] += values[(threadIdx.x+32+i)%512];
for (int i=0; i<16; ++i) values[(threadIdx.x+32+i)%512] += values[threadIdx.x];
out[t_idx] = (cos(values[threadIdx.x])+1.0) / 2.0;
//out[t_idx] = ((float) ((int) values[threadIdx.x] % 100)) / 100.0;
}
int main(int argc, char * argv[]){
dim3 threads=512, blocks=10;
int size = threads.x*blocks.x*sizeof(float);
float * d_out, *out;
cudaMalloc((void**) &d_out, size);
out = (float*) malloc(size);
rng_cuda<<<blocks,threads>>>(d_out);
rng_cuda<<<blocks,threads>>>(d_out);
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
cudaFree(d_out);
for (int i=0; i<size / sizeof(float); i++) printf("%f \n",out[i]);
return 0;
}
|
6,895 | #include "cuda.h"
#include <math.h>
#include <stdio.h>
__global__ void square_elements(float* in, float* out, int M, int N);
int main()
{
uint M = 5000;
uint N = 5000;
float* inputPtr = (float*)malloc(sizeof(float)*M*N);
float* outputPtr = (float*)malloc(sizeof(float)*M*N);
int m, n;
for (m = 0; m < M; m++){
for (n = 0; n < N; n++){
*(inputPtr + m*N + n) = m+n;
}
}
// for GPU
float* inputGPUPtr;
float* outputGPUPtr;
/* create input and output array on GPU. */
cudaMalloc((void**) &inputGPUPtr, sizeof(float)*M*N);
cudaMalloc((void**) &outputGPUPtr, sizeof(float)*M*N);
/* The input array is single precision, it can be sent directly to the
card */
cudaMemcpy(inputGPUPtr, inputPtr, sizeof(float)*M*N, cudaMemcpyHostToDevice);
/* run the kernel function. */
int blockSize = 256;
int nBlocks = (M*N)/blockSize + ((M*N)%blockSize == 0?0:1);
printf("blockSize: %d, nBlocks = %d\n", blockSize, nBlocks);
dim3 dimBlock(blockSize);
dim3 dimGrid(nBlocks);
square_elements<<<dimGrid, dimBlock>>>(inputGPUPtr, outputGPUPtr, M, N);
/* Send results back to cpu memeory */
cudaMemcpy(outputPtr, outputGPUPtr, sizeof(float)*M*N, cudaMemcpyDeviceToHost);
/* clean up. */
cudaFree(inputGPUPtr);
cudaFree(outputGPUPtr);
free(inputPtr);
free(outputPtr);
/* Scratch pad. */
}
/* Kernel to square elements of the array on the GPU */
__global__ void square_elements(float* in, float* out, int M, int N)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if ( idx < N) out[idx]= in[idx] * in[idx];
}
|
6,896 | #include<stdio.h>
#include<assert.h>
//cudaMemcpy( dest, source, sizeinbytes, cudaMemcpyHostToDevice | cudaMemcpyDeviceToHost );
//cudaMalloc( (void **) &my_ptr, sizeinbytes );
__global__ void my_function( int array_size, int * d_in, int * d_out )
{
}
int main()
{
return 0;
}
|
6,897 | #include <iostream>
using namespace std;
#include "fThread.cuh"
__device__ fThread::fThread(float* fIn_d, float* fOut_d, float t, float dt){
_fIn_d = fIn_d; _fOut_d = fOut_d; _t = t; _dt = dt;
_idx = getIdx();
_fNext = _fIn_d[_idx];
//printf("%d ", _idx);
}
__device__ int fThread::getIdx(){
return threadIdx.x + blockDim.x*(threadIdx.y + blockDim.y*(threadIdx.z + blockDim.z*(blockIdx.x + gridDim.x*(blockIdx.y + gridDim.y*blockIdx.z))));
}
__device__ void fThread::update(){
_fOut_d[_idx] = _fNext;
}
__device__ float fThread::getC(){
float c = 0.0;
for(int i=0;i<_ntot;i++)
c += _fIn_d[i]*_fIn_d[i]*_fIn_d[i]*_fIn_d[i];
return c*_idx;
}
__device__ void fThread::nextTime(){
_fNext = _fIn_d[_idx] + _dt* getC();
}
__device__
void fThread::print(){
printf("Hello World from [%d]th thread with fIn = %f\n", _idx, _fIn_d[_idx]);
}
__device__ void fThread::setntot(int ntot){
_ntot = ntot;
}
|
6,898 | __global__ void compute_mass_matrix(double *values, int* rows, int* cols, int res, double d, double* vertices, int *tets, int tets_num)
{
const int idx = threadIdx.x + blockIdx.y*res + blockIdx.x*res*res;
if (idx >= tets_num)
return;
tets = tets + idx*4;
double x[4],y[4],z[4];
for (int i = 0;i < 4;i++){
x[i] = vertices[tets[i]*3];
y[i] = vertices[tets[i]*3 + 1];
z[i] = vertices[tets[i]*3 + 2];
}
int vid[12] = {tets[0]*3,tets[0]*3+1,tets[0]*3+2,
tets[1]*3,tets[1]*3+1,tets[1]*3+2,
tets[2]*3,tets[2]*3+1,tets[2]*3+2,
tets[3]*3,tets[3]*3+1,tets[3]*3+2,
};
double V =((x[1] - x[0])*((y[2] - y[0])*(z[3] - z[0])-(y[3] - y[0])*(z[2] - z[0]))+(y[1] - y[0])*((x[3] - x[0])*(z[2] - z[0])-(x[2] - x[0])*(z[3] - z[0]))+(z[1] - z[0])*((x[2] - x[0])*(y[3] - y[0])-(x[3] - x[0])*(y[2] - y[0])))/6;
V = abs(V);
double m[] = {2, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0
,0, 2, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0
,0, 0, 2, 0, 0, 1, 0, 0, 1, 0, 0, 1
,1, 0, 0, 2, 0, 0, 1, 0, 0, 1, 0, 0
,0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 1, 0
,0, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0, 1
,1, 0, 0, 1, 0, 0, 2, 0, 0, 1, 0, 0
,0, 1, 0, 0, 1, 0, 0, 2, 0, 0, 1, 0
,0, 0, 1, 0, 0, 1, 0, 0, 2, 0, 0, 1
,1, 0, 0, 1, 0, 0, 1, 0, 0, 2, 0, 0
,0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 2, 0
,0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 2};
for (int i = 0;i < 12;i++)
for(int j = 0;j < 12;j++)
m[i*12+j] *= (d/20)*V;
int offset = idx*12*12;
for (int i = 0;i < 12; i++)
for(int j = 0;j < 12; j++){
values[offset + i*12 + j] = m[i*12 + j];
rows[offset + i*12 + j] = vid[i];
cols[offset + i*12 + j] = vid[j];
}
}
__global__ void compute_stiff_matrix(double *values, int* rows, int* cols, int res, double E0, double v, double* vertices, int *tets, int tets_num)
{
const int idx = threadIdx.x + blockIdx.y*res + blockIdx.x*res*res;
if (idx >= tets_num)
return;
tets = tets + idx*4;
double x[4],y[4],z[4];
for (int i = 0;i < 4;i++){
x[i] = vertices[tets[i]*3];
y[i] = vertices[tets[i]*3 + 1];
z[i] = vertices[tets[i]*3 + 2];
}
int vid[12] = {tets[0]*3,tets[0]*3+1,tets[0]*3+2,
tets[1]*3,tets[1]*3+1,tets[1]*3+2,
tets[2]*3,tets[2]*3+1,tets[2]*3+2,
tets[3]*3,tets[3]*3+1,tets[3]*3+2,
};
double a[4],b[4],c[4],V;
a[0]=y[1]*(z[3] - z[2])-y[2]*(z[3] - z[1])+y[3]*(z[2] - z[1]);
a[1]=-y[0]*(z[3] - z[2])+y[2]*(z[3] - z[0])-y[3]*(z[2] - z[0]);
a[2]=y[0]*(z[3] - z[1])-y[1]*(z[3] - z[0])+y[3]*(z[1] - z[0]);
a[3]=-y[0]*(z[2] - z[1])+y[1]*(z[2] - z[0])-y[2]*(z[1] - z[0]);
b[0]=-x[1]*(z[3] - z[2])+x[2]*(z[3] - z[1])-x[3]*(z[2] - z[1]);
b[1]=x[0]*(z[3] - z[2])-x[2]*(z[3] - z[0])+x[3]*(z[2] - z[0]);
b[2]=-x[0]*(z[3] - z[1])+x[1]*(z[3] - z[0])-x[3]*(z[1] - z[0]);
b[3]=x[0]*(z[2] - z[1])-x[1]*(z[2] - z[0])+x[2]*(z[1] - z[0]);
c[0]=x[1]*(y[3] - y[2])-x[2]*(y[3] - y[1])+x[3]*(y[2] - y[1]);
c[1]=-x[0]*(y[3] - y[2])+x[2]*(y[3] - y[0])-x[3]*(y[2] - y[0]);
c[2]=x[0]*(y[3] - y[1])-x[1]*(y[3] - y[0])+x[3]*(y[1] - y[0]);
c[3]=-x[0]*(y[2] - y[1])+x[1]*(y[2] - y[0])-x[2]*(y[1] - y[0]);
V =((x[1] - x[0])*((y[2] - y[0])*(z[3] - z[0])-(y[3] - y[0])*(z[2] - z[0]))+(y[1] - y[0])*((x[3] - x[0])*(z[2] - z[0])-(x[2] - x[0])*(z[3] - z[0]))+(z[1] - z[0])*((x[2] - x[0])*(y[3] - y[0])-(x[3] - x[0])*(y[2] - y[0])))/6;
V = abs(V);
double e[] = {1-v ,v ,v ,0 ,0 ,0
,v ,1-v ,v ,0 ,0 ,0
,v ,v ,1-v ,0 ,0 ,0
,0 ,0 ,0 ,0.5-v,0 ,0
,0 ,0 ,0 ,0 ,0.5-v,0
,0 ,0 ,0 ,0 ,0 ,0.5-v};
for(int i = 0;i < 6*6;i++)
e[i] = e[i] *(E0/(1+v)/(1-2*v));
double be[] = {a[0],0 ,0 ,a[1],0 ,0 ,a[2],0 ,0 ,a[3],0 ,0
,0 ,b[0],0 ,0 ,b[1],0 ,0 ,b[2],0 ,0 ,b[3],0
,0 ,0 ,c[0],0 ,0 ,c[1],0 ,0 ,c[2],0 ,0 ,c[3]
,b[0],a[0],0 ,b[1],a[1],0 ,b[2],a[2],0 ,b[3],a[3],0
,0 ,c[0],b[0],0 ,c[1],b[1],0 ,c[2],b[2],0 ,c[3],b[3]
,c[0],0 ,a[0],c[1],0 ,a[1],c[2],0 ,a[2],c[3],0 ,a[3]};
for(int i = 0;i < 6*12;i++)
be[i] = be[i] /(6*V);
double ke1[12*6];
for (int i = 0;i < 12;i++)
for(int j = 0;j < 6;j++){
ke1[i*6+j] = 0;
for(int k = 0;k < 6;k++)
ke1[i*6+j] += be[k*12+i]*e[k*6+j];
}
double ke2[12*12];
for (int i = 0;i < 12;i++)
for(int j = 0;j < 12;j++){
ke2[i*12+j] = 0;
for(int k = 0;k < 6;k++)
ke2[i*12+j] += ke1[i*6+k]*be[k*12+j];
ke2[i*12+j] *= V;
}
// for (int i = 0;i < 12;i++)
// for(int j = 0;j < 6;j++)
// ke2[i*12+j] = ke1[i*6+j];
// for (int i = 0;i < 6;i++)
// for(int j = 0;j < 12;j++)
// ke2[i*12+j] = be[i*12+j];
int offset = idx*12*12;
for (int i = 0;i < 12; i++)
for(int j = 0;j < 12; j++){
values[offset + i*12 + j] = ke2[i*12 + j];
rows[offset + i*12 + j] = vid[i];
cols[offset + i*12 + j] = vid[j];
}
} |
6,899 | #include <stdio.h>
__global__ void mat_x_vec_kernel(double *A, double *v, double *w, int m, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j;
double sum = 0.0;
//if(i >= m)
// return;
for (j = 0; j < n; j++)
{
sum += A[i * m + j] * v[j];
}
w[i] = sum;
} |
6,900 | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
__global__ void d_matrix_multi(float * d_matrix_a_ptr_in, float * d_matrix_b_ptr_in, float * d_matrix_c_ptr_in, int d_row_size_a_in, int d_row_size_b_in, int d_column_size_c_in)
{
int d_i=blockIdx.x*blockDim.x+threadIdx.x;
int d_j=blockIdx.y*blockDim.y+threadIdx.y;
int d_k;
float d_sum=(float)0.0;
for (d_k=0;d_k<d_row_size_b_in;d_k++)
{
d_sum+=d_matrix_a_ptr_in[d_i*d_row_size_b_in+d_k]*d_matrix_b_ptr_in[d_k*d_column_size_c_in+d_j];
}
d_matrix_c_ptr_in[d_i*d_column_size_c_in+d_j]=d_sum;
}
int h_matrix_init(float * h_matrix_ptr_in, int h_row_size_in, int h_column_size_in)
{
int h_i, h_j;
if(h_matrix_ptr_in==NULL)
{
fprintf(stderr, "INVALID MATRIX POINTER.\n");
return 1;
}
else
{
srand((unsigned)time(NULL));
for(h_i=0; h_i<h_row_size_in;h_i++)
{
for(h_j=0;h_j<h_column_size_in;h_j++)
{
h_matrix_ptr_in[h_i*h_column_size_in+h_j]=((float) rand()/(float) RAND_MAX);
}
}
return 0;
}
}
int h_display_result(float * h_matrix_ptr_in, int h_row_size_in, int h_column_size_in)
{
int h_i, h_j;
if (h_matrix_ptr_in==NULL)
{
fprintf(stderr,"ERROR IN MATRIX POINTER INPUT.\n");
return 1;
}
else if(h_row_size_in==0 || h_column_size_in==0)
{
fprintf(stderr, "ERROR IN MATRIX SIZE INPUT.\n");
return 1;
}
else
{
for(h_i=0;h_i<h_row_size_in;h_i++)
{
for(h_j=0;h_j<h_column_size_in;h_j++)
{
fprintf(stdout,"C[%d][%d]=%f.\n",h_i,h_j,h_matrix_ptr_in[h_i*h_column_size_in+h_j]);
}
}
return 0;
}
}
int main(int argc, char **argv)
{
int h_row_size_a, h_row_size_b, h_row_size_c;
int h_column_size_a, h_column_size_b, h_column_size_c;
float * h_matrix_a_ptr;
float * h_matrix_b_ptr;
float * h_matrix_c_ptr;
int h_ret=0;
float * d_matrix_a_ptr;
float * d_matrix_b_ptr;
float * d_matrix_c_ptr;
if(argc!=5)
{
fprintf(stderr, "ERROR IN USAGE.\n");
fprintf(stderr,"./matrix row_size_a column_size_a row_size_b column_size_b \n");
return 1;
}
else
{
h_row_size_a=atoi(argv[1]);
h_column_size_a=atoi(argv[2]);
h_row_size_b=atoi(argv[3]);
h_column_size_b=atoi(argv[4]);
if((h_row_size_a==0 || h_column_size_a==0 || h_row_size_b==0 || h_column_size_b==0) || (h_column_size_a!=h_row_size_b))
{
fprintf(stderr, "INVAILD MATRIX SIZE.\n");
fprintf(stderr, "C=AxB.\n");
fprintf(stderr, "Dim for Matrix A is %d x %d.\n", h_row_size_a,h_column_size_a);
fprintf(stderr, "Dim for Matrix b is %d x %d.\n", h_row_size_b,h_column_size_b);
return 1;
}
else
{
//MATRIX SIZE C
h_row_size_c=h_row_size_a;
h_column_size_c=h_column_size_b;
//HOST MEMORY
h_matrix_a_ptr=(float*)malloc(h_row_size_a*h_column_size_a*sizeof(float));
if(h_matrix_a_ptr==NULL)
{
fprintf(stderr, "HOST MEMORY ALLOC ERROR FOR A.\n");
return 1;
}
h_matrix_b_ptr=(float*)malloc(h_row_size_b*h_column_size_b*sizeof(float));
if(h_matrix_b_ptr==NULL)
{
fprintf(stderr, "HOST MEMORY ALLOC ERROR FOR B.\n");
return 1;
}
h_matrix_c_ptr=(float*)malloc(h_row_size_c*h_column_size_c*sizeof(float));
if(h_matrix_c_ptr==NULL)
{
fprintf(stderr, "HOST MEMORY ALLOC ERROR FOR C.\n");
return 1;
}
//HOST MATRIX INITIALIZATION
h_ret=h_matrix_init(h_matrix_a_ptr,h_row_size_a, h_column_size_a);
if(h_ret!=0)
{
fprintf(stderr, "MATRIX A INITIALIZATION ERROR.\n");
return 1;
}
h_ret=h_matrix_init(h_matrix_b_ptr, h_row_size_b,h_column_size_b);
if(h_ret!=0)
{
fprintf(stderr, "MATRIX B INITIALIZATION ERROR.\n");
return 1;
}
}
//DEVICE MEMORY
cudaMalloc((void**) &d_matrix_a_ptr,h_row_size_a*h_column_size_a*sizeof(float));
cudaMalloc((void**) &d_matrix_b_ptr,h_row_size_b*h_column_size_b*sizeof(float));
cudaMalloc((void**) &d_matrix_c_ptr,h_row_size_c*h_column_size_c*sizeof(float));
//HOST->DEVICE MEM COPY
cudaMemcpy(d_matrix_a_ptr,h_matrix_a_ptr,h_row_size_a*h_column_size_a*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_matrix_b_ptr,h_matrix_b_ptr,h_row_size_b*h_column_size_b*sizeof(float),cudaMemcpyHostToDevice);
//CUDA KERNEL CALL
dim3 threadsPerBlock(16,16);
dim3 numBlocks(h_row_size_c/16, h_column_size_c/16);
d_matrix_multi<<<numBlocks, threadsPerBlock>>>(d_matrix_a_ptr,d_matrix_b_ptr, d_matrix_c_ptr, h_row_size_a, h_column_size_b, h_column_size_c);
//GET CUDA RESULT
cudaMemcpy(h_matrix_c_ptr,d_matrix_c_ptr,h_row_size_c*h_column_size_c*sizeof(float),cudaMemcpyDeviceToHost);
//DISPLAY RESULT
h_ret=h_display_result(h_matrix_c_ptr,h_row_size_c, h_column_size_c);
//free
cudaFree(d_matrix_a_ptr);
cudaFree(d_matrix_b_ptr);
cudaFree(d_matrix_c_ptr);
free(h_matrix_a_ptr);
free(h_matrix_b_ptr);
free(h_matrix_c_ptr);
}
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.