serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
9,101 | /**
* \file main.cu
* \brief Single-precision A*X plus Y (SAXPY) implementation in CUDA.
*/
#include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>
/**
* \brief SAXPY Kernel.
*
* Calculates Y = a*X + Y.
*/
__global__ void saxpy(const float a, const float *X, float *Y, size_t N)
{
// Compute global index
int i = blockDim.x * blockIdx.x + threadIdx.x;
// Compute SAXPY if we are inside of bounds
if (i < N)
Y[i] = a * X[i] + Y[i];
}
/**
* \brief Program entry-point.
*/
int main(int argc, char **argv)
{
if (argc < 2) {
std::fprintf(stderr, "Error: missing command-line parameter\n");
std::exit(EXIT_FAILURE);
}
size_t N = atol(argv[1]);
size_t T = (argc > 2) ? atol(argv[2]) : 128;
size_t B = (N + T - 1) / T;
float *d_X, *d_Y;
float ms;
cudaEvent_t start, stop;
// Sanity checks
assert(N > 0);
assert(T >= 32);
// Host vectors, N elements initialized to 1
std::vector<float> h_X(N, 1);
std::vector<float> h_Y(N, 1);
// Instantiate things
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc(&d_X, N * sizeof(float));
cudaMalloc(&d_Y, N * sizeof(float));
// Copy X and Y to the GPU
cudaMemcpy(d_X, h_X.data(), N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, h_Y.data(), N * sizeof(float), cudaMemcpyHostToDevice);
// Launch kernel
cudaEventRecord(start);
saxpy<<<B, T>>>(10, d_X, d_Y, N);
cudaEventRecord(stop);
// Copy output data
cudaMemcpy(h_Y.data(), d_Y, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
// Count how many elements match
auto matches =
std::count_if(h_Y.begin(), h_Y.end(), [](float x) { return x == 11; });
std::printf("Elements matching = %d\n", matches);
std::printf("Elapsed time (ms) = %g\n", ms);
std::printf("Effective bandwidth (GB/s) = %g\n", N*4*3/ms/1e6);
std::printf("Throughput (GFLOP/s) = %g\n", 2*N/ms/1e6);
// Cleanup the mess
cudaFree(d_X);
cudaFree(d_Y);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
9,102 | #include <cuda.h>
#include <stdio.h>
#include <iostream>
#define SIZE 100
#define n 2
using namespace std;
__global__ void vec_add(int *x,int *y,int *z)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
z[id]=x[id]+y[id];
}
__global__ void vec_mat_mul(int *mat,int *vec,int *o)
{
int x = threadIdx.x;
printf("\n%d",x);
o[x]=0;
for(int k=0;k<n;k++)
o[x]=o[x]+vec[k]*mat[k*n+k];
}
__global__ void mat_mul(int *a,int *b,int* c)
{
int x = threadIdx.x;
int y = threadIdx.y;
c[n*y+x]=0; //here col2
for(int k=0;k<n;k++) //here col1
c[n*y+x]=c[n*y+x]+a[n*y+k]*b[n*k+x]; //col2,col2,col1,col2
}
int main()
{
//vec_add
int a[SIZE],b[SIZE],c[SIZE];
int *d,*e,*f;
for(int i=0;i<SIZE;i++)
a[i]=b[i]=i;
cudaMalloc((void**)&d,SIZE*sizeof(int));
cudaMalloc((void**)&e,SIZE*sizeof(int));
cudaMalloc((void**)&f,SIZE*sizeof(int));
cudaMemcpy(d,a,SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(e,b,SIZE*sizeof(int),cudaMemcpyHostToDevice);
vec_add<<<5,20>>>(d,e,f);
cudaMemcpy(c,f,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
printf("%d",c[50]);
//mat_mul
int mat1[n][n],mat2[n][n],mat3[n][n];
int *g,*h,*l;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
mat1[i][j]=mat2[i][j]=1;
}}
cudaMalloc((void**)&g,n*n*sizeof(int));
cudaMalloc((void**)&h,n*n*sizeof(int));
cudaMalloc((void**)&l,n*n*sizeof(int));
cudaMemcpy(g,mat1,n*n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(h,mat2,n*n*sizeof(int),cudaMemcpyHostToDevice);
dim3 threadBlock(n,n); //col2 row1
mat_mul<<<1,threadBlock>>>(g,h,l);
cudaMemcpy(mat3,l,n*n*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
printf("%d",mat3[i][j]);
}}
int mat4[n][n];
int *w;
int vec4[n],o4[n];
int *u,*out4;
for(int i=0;i<n;i++){
for(int j=0;j<n;j++){
mat4[i][j]=1;
}}
for(int i=0;i<n;i++)
vec4[i]=1;
cudaMalloc((void**)&w,n*n*sizeof(int));
cudaMalloc((void**)&u,n*sizeof(int));
cudaMalloc((void**)&out4,n*sizeof(int));
cudaMemcpy(w,mat4,n*n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(u,vec4,n*sizeof(int),cudaMemcpyHostToDevice);
vec_mat_mul<<<1,n>>>(w,u,out4);
cudaMemcpy(o4,out4,n*sizeof(int),cudaMemcpyDeviceToHost);
cout<<"\n\n";
for(int i=0;i<n;i++)
cout<<o4[i];
return 0;
}
|
9,103 | #include <iostream>
#include <cstdio>
#include <cmath>
#include <cstdlib>
#include <ctime>
using namespace std;
#define min(a, b) ((a <= b) ? a : b)
int * make_array_2_to_n(int n) {
// Makes array of size n-1 (index 0 to n-2 map to 2 to n)
int * array = (int *) malloc((n-1) * sizeof(int));
for (int i = 0; i < (n-1); i++) {
array[i] = 1;
}
return array;
}
void print_array(int * arr, int n) {
for (int i = 0; i < (n-1); i++) {
cout << (i+2) << " " << arr[i] << endl;
}
}
void print_prime(int * arr, int n) {
// if arr[i] == 1, then i+2 is prime (note the +2 shift
// because of the way I defined my matrix
for (int i = 0; i < (n-1); i++) {
if (arr[i] == 1) {
cout << (i+2) << endl;
}
}
}
void diff_prime(int * arr1, int * arr2, int n) {
// Checks if two arrays have the same input and output
// Checks if both arrays are correct (or incorrect)
int flag = 1;
for (int i = 0; i < (n-1); i++) {
if (arr1[i] != arr2[i]) {
if (flag == 1) { flag = 0; }
cout << "Arrays are different\n";
cout << (i+2) << " " << arr1[i] << " " << arr2[i] << endl;
return;
}
}
if (flag == 1) {
cout << "Arrays are the same\n";
}
}
void seq_sieve(int * arr, int n) {
int sqrt_n = int(ceil(sqrt(int(n))));
int i_sqr;
// Sieve of Eratosthenese
for (int i = 2; i <= sqrt_n; i++) {
if (arr[i-2] == 1) {
i_sqr = i * i;
for (int j = i_sqr; j <= n; j+=i) {
arr[j - 2] = 0;
}
}
}
}
__global__
void par_sieve(int * d_arr, int n, int sqrt_n, int start, int end) {
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
int min_i = min(start, 2);
int max_i = min(sqrt_n, end);
__syncthreads();
// Performs Sieve of Eratosthenese
// Go from i = 2, ... , sqrt_n
for (int i = min_i; i <= max_i; i++) {
// Only uses sqrt_n threads (to minimize using sqrt(n) processors
if (tid < sqrt_n) {
// Checks if marked as 1 (prime)
if (d_arr[i-2] == 1) {
// Perform interleaved work. With sqrt_n processors, each processor
// goes in increments of i, starting from 2i and not exceeding n.
// This traversal ensures that every thread will set to 0 a number
// which is a multiple of i.
// This implementation does not introduce more work than the original
// implementation. So, there is O(n*log(log(n))) work. With sqrt_n
// processors, I can use O(sqrt_n * log(log(n)))
for (int j = 0; ((j + tid + 1) * i + (i-2)) < (n-1); j+=sqrt_n) {
d_arr[(j + tid + 1) * i + (i - 2)] = 0;
}
}
}
}
}
int main(int argc, char** argv) {
if (argc != 2) {
cout << "Takes one argument - n, positive integer - to calculate the number of primes at most n\n";
}
int n = atoi(argv[1]);
// Making Array
cout << "sequential implementation\n";
int * seq_array = make_array_2_to_n(n);
//print_array(seq_array, n);
// Sequential Sieve
clock_t start, seq_runtime;
start = clock();
seq_sieve(seq_array, n);
seq_runtime = clock() - start;
cout << "sequential run time (in milliseconds): " << (seq_runtime * 1000 / CLOCKS_PER_SEC) << endl;
//print_prime(seq_array, n);
cout << "\n\n";
cout << "parallel\n";
// Initializing variables for parallel execution
int sqrt_n = int(ceil(sqrt(int(n))));
int * par_array = make_array_2_to_n(n);
int * d_par_array;
cout << "cudaMalloc\n";
cudaError_t malloc_error = cudaMalloc((void**)&d_par_array, sizeof(int) * (n-1));
if (malloc_error != cudaSuccess) {
printf("cudaMalloc error: %s\n", cudaGetErrorString(malloc_error));
}
cout << "cudaMemcpyHostToDevice\n";
clock_t with_memcpy_start, with_memcpy_runtime;
with_memcpy_start = clock();
cudaError_t memcpy_to_d_error = cudaMemcpy((void*)d_par_array, (void*)par_array, sizeof(int) * (n-1), cudaMemcpyHostToDevice);
if (malloc_error != cudaSuccess) {
printf("cudaMemcpyHostToDevice: %s\n", cudaGetErrorString(memcpy_to_d_error));
}
// Defining threads per block (tpb), and number of blocks to schedule
int tpb = 1024;
int nblocks = n / tpb + 1;
clock_t without_memcpy_start, without_memcpy_runtime;
without_memcpy_start = clock();
// Calling Parallel Sieve
if (n <= 20000000) {
cout << "Kernel call 1" << endl;
par_sieve<<<nblocks, tpb>>>(d_par_array, n, sqrt_n, 2, sqrt_n);
cudaDeviceSynchronize();
} else if (n <= 27500000) {
cout << "Kernel call 1" << endl;
par_sieve<<<nblocks, tpb>>>(d_par_array, n, sqrt_n, 2, 1000);
cudaDeviceSynchronize();
cout << "Kernel call 2" << endl;
par_sieve<<<nblocks, tpb>>>(d_par_array, n, sqrt_n, 1001, 3000);
cudaDeviceSynchronize();
cout << "Kernel call 3" << endl;
par_sieve<<<nblocks, tpb>>>(d_par_array, n, sqrt_n, 3001, sqrt_n);
cudaDeviceSynchronize();
} else {
cout << "I have not been able to get n > 27 500 000 to run without without CUDA launch timeout" << endl;
cout << "Exiting now\n";
return 0;
}
// Error checking
cudaError_t kernel_error = cudaGetLastError();
if (kernel_error != cudaSuccess) {
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(kernel_error));
}
without_memcpy_runtime = clock() - without_memcpy_start;
cout << "parallel run time (in milliseconds) WITHOUT cudaMemcpy: " << (without_memcpy_runtime * 1000 / CLOCKS_PER_SEC) << endl;
cudaMemcpy((void*)par_array, (void*)d_par_array, sizeof(int) * (n-1), cudaMemcpyDeviceToHost);
with_memcpy_runtime = clock() - with_memcpy_start;
cout << "parallel run time (in milliseconds) WITH cudaMemcpy: " << (with_memcpy_runtime * 1000 / CLOCKS_PER_SEC) << endl;
//print_prime(par_array, n);
diff_prime(seq_array, par_array, n);
return 0;
}
|
9,104 | /* GEMM is a General Matrix Multiply - a subroutine in the Basic Linear Algebra Subprograms library*/
/* Includes, system */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
/* ======================================================= */
/* Simple implementation of dgemm */
/* ======================================================= */
static void simple_dgemm(int n, double alpha, const double *A, const double *B,
double beta, double *C) {
int i, j, k;
for (i = 0; i < n; ++i) {
for (j = 0; j < n; ++j){
double prod = 0;
for (k = 0; k < n; ++k){
prod += A[k * n + i] * B[j * n + k];
}
C[j * n + i] = alpha * prod + beta * C[j * n + i];
}
}
}
/* ==== */
/* Main */
/* ==== */
int main(int argc, char **argv)
{
double *h_A, *h_B, *h_C, *h_C_simple;
double alpha = 1.0f;
double beta = 0.0f;
int n2, N;
int i;
double error_norm;
double ref_norm;
double diff;
struct timeval tv1, tv2;
/* get the size of the matrix from the command line */
if (argc <2 ) N= 275;
else N = atoi(argv[1]);
n2 = N * N;
printf("\nRunning dgemm test for %d by %d matricies.\n", N, N);
/* Allocate host memory for the matrices */
h_A = (double *)malloc(n2 * sizeof(double) );
h_B = (double *)malloc(n2 * sizeof(double) );
h_C = (double *)malloc(n2 * sizeof(double) );
h_C_simple = (double *)malloc(n2 * sizeof(double) );
/* Fill the matrices with test data */
for (i = 0; i < n2; i++){
h_A[i] = rand() / (double)RAND_MAX;
h_B[i] = rand() / (double)RAND_MAX;
h_C[i] = rand() / (double)RAND_MAX;
h_C_simple[i] = h_C[i];
}
printf("\tTesting simple C implementation of dgemm function.\n");
gettimeofday(&tv1, NULL);
/* Performs operation using plain C code */
simple_dgemm(N, alpha, h_A, h_B, beta, h_C_simple);
gettimeofday(&tv2, NULL);
printf("\t\tdone...\n");
printf("\t\tExecution time (in millisec): %.2f\n",
(double)(tv2.tv_usec-tv1.tv_usec)/1000 +
(double)(tv2.tv_sec -tv1.tv_sec )*1000);
/* Memory clean up */
free(h_A);
free(h_B);
free(h_C);
free(h_C_simple);
return(0);
}
|
9,105 | #include <stdio.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
//Number of threads in each dimension of the block.
#define THREAD_NUM 16
// CUDA kernel
__global__ void matrixMul(int *A, int *B, int *C, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int num = n;
if (row < num && col < num)
{
long Cvalue = 0;
for (int i = 0; i < num; i++)
{
Cvalue += A[row * num + i] * B[i * num + col];
}
C[row * num + col] = Cvalue;
}
}
// Main
int main(void)
{
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
int num = 512, i, j;
size_t size = num * num * sizeof(int);
printf("\n\tMatrix multiplication of two %d * %d matrices\n\n", num, num);
int h_A[num][num], h_B[num][num], h_C[num][num];
printf("Initializing host input vectors...\n");
for (int i = 0; i < num; i++)
{
for (int j = 0; j < num; j++)
{
//h_A[i][j] = rand();
//h_B[i][j] = rand();
h_A[i][j] = 1;
h_B[i][j] = 1;
}
}
// Allocate device memory (with error checking)
printf("Allocating device memory...\n");
int *d_A = NULL;
err = cudaMalloc((void **)&d_A, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_B = NULL;
err = cudaMalloc((void **)&d_B, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_C = NULL;
err = cudaMalloc((void **)&d_C, size);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy from host to device
printf("Copying input from host to device...\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Displaying the input matrices...\n\nMatrix A: \n");
for(i=0; i<num; i++){
for(j=0; j<num; j++)
printf("%d ", h_A[i][j]);
printf("\n");
}
printf("\nMatrix B: ");
for(i=0; i<num; i++){
for(j=0; j<num; j++)
printf("%d ", h_B[i][j]);
printf("\n");
}
printf("\n");
/*
printf("\nd_b:\n");
for(i=0; i<num; i++){
for(j=0; j<num; j++)
printf("%d ", d_B[i*num+j]);
printf("\n");
}
printf("\n");
*/
// Launch CUDA Kernel
printf("Launching vector multiplication kernel...\n");
dim3 dimBlock(THREAD_NUM, THREAD_NUM, 1);
dim3 dimGrid((int) ceil((float)num/dimBlock.x), (int) ceil((float)num/dimBlock.y), 1);
matrixMul<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, num);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy result from device to host
printf("Copying result from device to host...\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*
printf("d_c:\n");
for(i=0; i<num; i++){
for(j=0; j<num; j++)
printf("%d ", d_C[i*num+j]);
printf("\n");
}
*/
printf("Displaying the output matrix...\n\nMatrix C: \n");
for(i=0; i<num; i++){
for(j=0; j<num; j++)
printf("%d ", h_C[i][j]);
printf("\n");
}
printf("\n");
// Free device global memory
printf("Freeing device memory...\n");
err = cudaFree(d_A);
/*if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}*/
err = cudaFree(d_B);
/*if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}*/
err = cudaFree(d_C);
/*if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}*/
printf("Done.\n\n");
return 0;
}
|
9,106 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
__global__
void kernel(float *vec, float *mat, float *out, const int N, const int M)
{
int tid=threadIdx.x+blockIdx.x*blockDim.x;
float sum=0;
if(tid<M)
{
for(int i=0; i<N; i++)
out[tid] += vec[i]*mat[(i*M)+tid];
}
}
void init_array(float *a, const int N);
void init_mat(float *a, const int N, const int M);
void print_array(float *a, const int N, char *d);
void print_mat(float *a, const int N, const int M, char *d);
int main (void)
{
srand( time(NULL) );
float *a, *b, *c;
float *dev_a, *dev_b, *dev_c;
int N=5;
int M=7;
a=(float*)malloc(sizeof(float)*N);
b=(float*)malloc(sizeof(float)*N*M);
c=(float*)malloc(sizeof(float)*M);
init_array(a, N);
init_mat(b, N, M);
init_array(c, M);
printf("<<<<<<<<<< initial data:\n");
print_array(a, N, "in-vector");
print_mat(b, N, M, "matrix");
print_array(c, M, "out-vector");
cudaMalloc((void**)&dev_a, sizeof(float)*N);
cudaMalloc((void**)&dev_b, sizeof(float)*N*M);
cudaMalloc((void**)&dev_c, sizeof(float)*M);
cudaMemcpy(dev_a, a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(float)*N*M, cudaMemcpyHostToDevice);
kernel<<<M/256+1, 256>>>(dev_a, dev_b, dev_c, N, M);
//printf("error code: %s\n",cudaGetErrorString(cudaGetLastError()));
cudaMemcpy(c, dev_c, sizeof(float)*M, cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
printf(">>>>>>>>>> RESULTADPS:\n");
print_array(c, M, "out-vector");
return 0;
};
void init_array(float *a, const int N)
{
int i;
for(i=0; i<N; i++)
a[i] = rand() % 4 + 1;
}
void init_mat(float *a, const int N, const int M)
{
int i, j;
for(i=0; i<N; i++)
for(j=0; j<M; j++)
a[i*M+j] = rand() % 4 + 1;
}
void print_array(float *a, const int N, char *d) {
int i;
for(i=0; i<N; i++)
printf("\n%s[%d]: %f",d, i, a[i]);
printf("\n");
}
void print_mat(float *a, const int N, const int M, char *d) {
int i, j;
for(i=0; i<N; i++){
printf("\n%s[%d]:", d, i);
for (j=0; j<M; j++)
printf("\t%6.4f", a[i*M+j]);
}
printf("\n");
} |
9,107 | #include "includes.h"
__global__ void sortIndexKde ( const int d, const int n, const float *a, const float *b, float *sa, float *sb ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int ij = i + j * d;
int mewj, il;
float mewa, mewb;
if ( i < d && j < n ) {
mewj = j;
mewa = a[ij];
mewb = b[ij];
for ( int l = 0; l < n; l++ ) {
il = i + l * d;
if ( l > j ) {
mewj += ( a[il] > mewa ) * ( l - mewj );
mewa += ( a[il] > mewa ) * ( a[il] - mewa );
mewb += ( a[il] > mewa ) * ( b[il] - mewb );
} else if ( l < j ) {
mewj += ( a[il] < mewa ) * ( l - mewj );
mewa += ( a[il] < mewa ) * ( a[il] - mewa );
mewb += ( a[il] < mewa ) * ( b[il] - mewb );
}
}
sa[ij] = mewa;
sb[ij] = mewb;
}
} |
9,108 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#define MATRIX_DIM 10
__host__ double* createArrayWithRandoms(){
double *matrix = (double *) malloc(MATRIX_DIM * sizeof(double));
for (int i = 0; i < MATRIX_DIM; i++)
*(matrix + i) = (10.0*rand()/(RAND_MAX+1.0));
return matrix;
}
__global__ void addArrays(double *a, double *b, double *c, int length){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < length)
*(c + i) = *(a + i) + *(b + i);
}
__host__ void checkError(cudaError_t error, const char *point){
if (error != cudaSuccess){
printf("there was an error at %s, error code: %d", point, error);
exit(EXIT_FAILURE);
}
}
int main(){
cudaError_t error = cudaSuccess;
size_t size = MATRIX_DIM * sizeof(double);
double *h_a = createArrayWithRandoms();
double *h_b = createArrayWithRandoms();
double *h_c = (double *) malloc(MATRIX_DIM * sizeof(double));
double *d_a , *d_b, *d_c;
error = cudaMalloc(&d_a, size);
checkError(error, "allocating device memory for A");
error = cudaMalloc(&d_b, size);
checkError(error, "allocating device memory for B");
error = cudaMalloc(&d_c, size);
checkError(error, "allocating device memory for C");
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
checkError(error, "copy A from host to device");
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
checkError(error, "copy B from host to device");
cudaMemcpy(d_c, h_c, size, cudaMemcpyHostToDevice);
checkError(error, "copy C from host to device");
addArrays<<< 1, MATRIX_DIM >>>(d_a, d_b, d_c, MATRIX_DIM);
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
checkError(error, "copy C from device to host");
for (int i = 0; i < MATRIX_DIM; i++)
printf("%.2f + %.2f = %.2f \n", *(h_a + i), *(h_b + i), *(h_c + i));
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
9,109 | // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
// Taken from: http://docs.nvidia.com/cuda/curand/device-api-overview.html#poisson-api-example
/*
* This program uses CURAND library for Poisson distribution
* to simulate queues in store for 16 hours. It shows the
* difference of using 3 different APIs:
* - HOST API -arrival of customers is described by Poisson(4)
* - SIMPLE DEVICE API -arrival of customers is described by
* Poisson(4*(sin(x/100)+1)), where x is number of minutes
* from store opening time.
* - ROBUST DEVICE API -arrival of customers is described by:
* - Poisson(2) for first 3 hours.
* - Poisson(1) for second 3 hours.
* - Poisson(3) after 6 hours.
*/
#include <stdio.h>
#include <stdlib.h>
// CHECK: #include <hip/hip_runtime.h>
#include <cuda.h>
// CHECK: #include <hiprand_kernel.h>
#include <curand_kernel.h>
// CHECK: #include <hiprand.h>
#include <curand.h>
// CHECK: #define CUDA_CALL(x) do { if((x) != hipSuccess) {
#define CUDA_CALL(x) do { if((x) != cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
// CHECK: #define CURAND_CALL(x) do { if((x)!=HIPRAND_STATUS_SUCCESS) {
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define HOURS 16
#define OPENING_HOUR 7
#define CLOSING_HOUR (OPENING_HOUR + HOURS)
#define access_2D(type, ptr, row, column, pitch)\
*((type*)((char*)ptr + (row) * pitch) + column)
enum API_TYPE {
HOST_API = 0,
SIMPLE_DEVICE_API = 1,
ROBUST_DEVICE_API = 2,
};
/* global variables */
API_TYPE api;
int report_break;
int cashiers_load_h[HOURS];
__constant__ int cashiers_load[HOURS];
// CHECK: __global__ void setup_kernel(hiprandState_t *state)
__global__ void setup_kernel(curandState *state)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Each thread gets same seed, a different sequence
number, no offset */
// CHECK: hiprand_init(1234, id, 0, &state[id]);
curand_init(1234, id, 0, &state[id]);
}
__inline__ __device__
void update_queue(int id, int min, unsigned int new_customers,
unsigned int &queue_length,
unsigned int *queue_lengths, size_t pitch)
{
int balance;
balance = new_customers - 2 * cashiers_load[(min-1)/60];
if (balance + (int)queue_length <= 0){
queue_length = 0;
}else{
queue_length += balance;
}
/* Store results */
access_2D(unsigned int, queue_lengths, min-1, id, pitch)
= queue_length;
}
// CHECK: __global__ void simple_device_API_kernel(hiprandState_t *state,
__global__ void simple_device_API_kernel(curandState *state,
unsigned int *queue_lengths, size_t pitch)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int new_customers;
unsigned int queue_length = 0;
/* Copy state to local memory for efficiency */
// CHECK: hiprandState_t localState = state[id];
curandState localState = state[id];
/* Simulate queue in time */
for(int min = 1; min <= 60 * HOURS; min++) {
/* Draw number of new customers depending on API */
// CHECK: new_customers = hiprand_poisson(&localState,
new_customers = curand_poisson(&localState,
4*(sin((float)min/100.0)+1));
/* Update queue */
update_queue(id, min, new_customers, queue_length,
queue_lengths, pitch);
}
/* Copy state back to global memory */
state[id] = localState;
}
__global__ void host_API_kernel(unsigned int *poisson_numbers,
unsigned int *queue_lengths, size_t pitch)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int new_customers;
unsigned int queue_length = 0;
/* Simulate queue in time */
for(int min = 1; min <= 60 * HOURS; min++) {
/* Get random number from global memory */
new_customers = poisson_numbers
[blockDim.x * gridDim.x * (min -1) + id];
/* Update queue */
update_queue(id, min, new_customers, queue_length,
queue_lengths, pitch);
}
}
// CHECK: __global__ void robust_device_API_kernel(hiprandState_t *state,
// CHECK: hiprandDiscreteDistribution_t poisson_1,
// CHECK: hiprandDiscreteDistribution_t poisson_2,
// CHECK: hiprandDiscreteDistribution_t poisson_3,
__global__ void robust_device_API_kernel(curandState *state,
curandDiscreteDistribution_t poisson_1,
curandDiscreteDistribution_t poisson_2,
curandDiscreteDistribution_t poisson_3,
unsigned int *queue_lengths, size_t pitch)
{
int id = threadIdx.x + blockIdx.x * 64;
unsigned int new_customers;
unsigned int queue_length = 0;
/* Copy state to local memory for efficiency */
// CHECK: hiprandState_t localState = state[id];
curandState localState = state[id];
/* Simulate queue in time */
/* first 3 hours */
for(int min = 1; min <= 60 * 3; min++) {
/* draw number of new customers depending on API */
new_customers =
// CHECK: hiprand_discrete(&localState, poisson_2);
curand_discrete(&localState, poisson_2);
/* Update queue */
update_queue(id, min, new_customers, queue_length,
queue_lengths, pitch);
}
/* second 3 hours */
for(int min = 60 * 3 + 1; min <= 60 * 6; min++) {
/* draw number of new customers depending on API */
new_customers =
// CHECK: hiprand_discrete(&localState, poisson_1);
curand_discrete(&localState, poisson_1);
/* Update queue */
update_queue(id, min, new_customers, queue_length,
queue_lengths, pitch);
}
/* after 6 hours */
for(int min = 60 * 6 + 1; min <= 60 * HOURS; min++) {
/* draw number of new customers depending on API */
new_customers =
// CHECK: hiprand_discrete(&localState, poisson_3);
curand_discrete(&localState, poisson_3);
/* Update queue */
update_queue(id, min, new_customers, queue_length,
queue_lengths, pitch);
}
/* Copy state back to global memory */
state[id] = localState;
}
/* Set time intervals between reports */
void report_settings()
{
do{
printf("Set time intervals between queue reports");
printf("(in minutes > 0)\n");
if (scanf("%d", &report_break) == 0) continue;
}while(report_break <= 0);
}
/* Set number of cashiers each hour */
void add_cachiers(int *cashiers_load)
{
int i, min, max, begin, end;
printf("Cashier serves 2 customers per minute...\n");
for (i = 0; i < HOURS; i++){
cashiers_load_h[i] = 0;
}
while (true){
printf("Adding cashier...\n");
min = OPENING_HOUR;
max = CLOSING_HOUR-1;
do{
printf("Set hour that cahier comes (%d-%d)",
min, max);
printf(" [type 0 to finish adding cashiers]\n");
if (scanf("%d", &begin) == 0) continue;
}while (begin > max || (begin < min && begin != 0));
if (begin == 0) break;
min = begin+1;
max = CLOSING_HOUR;
do{
printf("Set hour that cahier leaves (%d-%d)",
min, max);
printf(" [type 0 to finish adding cashiers]\n");
if (scanf("%d", &end) == 0) continue;
}while (end > max || (end < min && end != 0));
if (end == 0) break;
for (i = begin - OPENING_HOUR;
i < end - OPENING_HOUR; i++){
cashiers_load_h[i]++;
}
}
for (i = OPENING_HOUR; i < CLOSING_HOUR; i++){
printf("\n%2d:00 - %2d:00 %d cashier",
i, i+1, cashiers_load_h[i-OPENING_HOUR]);
if (cashiers_load[i-OPENING_HOUR] != 1) printf("s");
}
printf("\n");
}
/* Set API type */
API_TYPE set_API_type()
{
printf("Choose API type:\n");
int choose;
do{
printf("type 1 for HOST API\n");
printf("type 2 for SIMPLE DEVICE API\n");
printf("type 3 for ROBUST DEVICE API\n");
if (scanf("%d", &choose) == 0) continue;
}while( choose < 1 || choose > 3);
switch(choose){
case 1: return HOST_API;
case 2: return SIMPLE_DEVICE_API;
case 3: return ROBUST_DEVICE_API;
default:
fprintf(stderr, "wrong API\n");
return HOST_API;
}
}
void settings()
{
add_cachiers(cashiers_load);
// CHECK: hipMemcpyToSymbol("cashiers_load", cashiers_load_h,
// CHECK: HOURS * sizeof(int), 0, hipMemcpyHostToDevice);
cudaMemcpyToSymbol("cashiers_load", cashiers_load_h,
HOURS * sizeof(int), 0, cudaMemcpyHostToDevice);
report_settings();
api = set_API_type();
}
void print_statistics(unsigned int *hostResults, size_t pitch)
{
int min, i, hour, minute;
unsigned int sum;
for(min = report_break; min <= 60 * HOURS;
min += report_break) {
sum = 0;
for(i = 0; i < 64 * 64; i++) {
sum += access_2D(unsigned int, hostResults,
min-1, i, pitch);
}
hour = OPENING_HOUR + min/60;
minute = min%60;
printf("%2d:%02d # of waiting customers = %10.4g |",
hour, minute, (float)sum/(64.0 * 64.0));
printf(" # of cashiers = %d | ",
cashiers_load_h[(min-1)/60]);
printf("# of new customers/min ~= ");
switch (api){
case HOST_API:
printf("%2.2f\n", 4.0);
break;
case SIMPLE_DEVICE_API:
printf("%2.2f\n",
4*(sin((float)min/100.0)+1));
break;
case ROBUST_DEVICE_API:
if (min <= 3 * 60){
printf("%2.2f\n", 2.0);
}else{
if (min <= 6 * 60){
printf("%2.2f\n", 1.0);
}else{
printf("%2.2f\n", 3.0);
}
}
break;
default:
fprintf(stderr, "Wrong API\n");
}
}
}
int main(int argc, char *argv[])
{
int n;
size_t pitch;
// CHECK: hiprandState_t *devStates;
curandState *devStates;
unsigned int *devResults, *hostResults;
unsigned int *poisson_numbers_d;
// CHECK: hiprandDiscreteDistribution_t poisson_1, poisson_2;
// CHECK: hiprandDiscreteDistribution_t poisson_3;
// CHECK: hiprandGenerator_t gen;
curandDiscreteDistribution_t poisson_1, poisson_2;
curandDiscreteDistribution_t poisson_3;
curandGenerator_t gen;
/* Setting cashiers, report and API */
settings();
/* Allocate space for results on device */
// CHECK: CUDA_CALL(hipMallocPitch((void **)&devResults, &pitch,
CUDA_CALL(cudaMallocPitch((void **)&devResults, &pitch,
64 * 64 * sizeof(unsigned int), 60 * HOURS));
/* Allocate space for results on host */
hostResults = (unsigned int *)calloc(pitch * 60 * HOURS,
sizeof(unsigned int));
/* Allocate space for prng states on device */
// CHECK: CUDA_CALL(hipMalloc((void **)&devStates, 64 * 64 *
// CHECK: sizeof(hiprandState_t)));
CUDA_CALL(cudaMalloc((void **)&devStates, 64 * 64 *
sizeof(curandState)));
/* Setup prng states */
if (api != HOST_API){
// CHECK: hipLaunchKernelGGL(setup_kernel, dim3(64), dim3(64), 0, 0, devStates);
setup_kernel<<<64, 64>>>(devStates);
}
/* Simulate queue */
switch (api){
case HOST_API:
/* Create pseudo-random number generator */
// CHECK: CURAND_CALL(hiprandCreateGenerator(&gen,
// CHECK: HIPRAND_RNG_PSEUDO_DEFAULT));
CURAND_CALL(curandCreateGenerator(&gen,
CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
// CHECK: CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(
gen, 1234ULL));
/* compute n */
n = 64 * 64 * HOURS * 60;
/* Allocate n unsigned ints on device */
// CHECK: CUDA_CALL(hipMalloc((void **)&poisson_numbers_d,
CUDA_CALL(cudaMalloc((void **)&poisson_numbers_d,
n * sizeof(unsigned int)));
/* Generate n unsigned ints on device */
// CHECK: CURAND_CALL(hiprandGeneratePoisson(gen,
CURAND_CALL(curandGeneratePoisson(gen,
poisson_numbers_d, n, 4.0));
// CHECK: hipLaunchKernelGGL(host_API_kernel, dim3(64), dim3(64), 0, 0, poisson_numbers_d,
host_API_kernel<<<64, 64>>>(poisson_numbers_d,
devResults, pitch);
/* Cleanup */
// CHECK: CURAND_CALL(hiprandDestroyGenerator(gen));
CURAND_CALL(curandDestroyGenerator(gen));
break;
case SIMPLE_DEVICE_API:
// CHECK: hipLaunchKernelGGL(simple_device_API_kernel, dim3(64), dim3(64), 0, 0, devStates,
simple_device_API_kernel<<<64, 64>>>(devStates,
devResults, pitch);
break;
case ROBUST_DEVICE_API:
/* Create histograms for Poisson(1) */
// CHECK: CURAND_CALL(hiprandCreatePoissonDistribution(1.0,
CURAND_CALL(curandCreatePoissonDistribution(1.0,
&poisson_1));
/* Create histograms for Poisson(2) */
// CHECK: CURAND_CALL(hiprandCreatePoissonDistribution(2.0,
CURAND_CALL(curandCreatePoissonDistribution(2.0,
&poisson_2));
/* Create histograms for Poisson(3) */
// CHECK: CURAND_CALL(hiprandCreatePoissonDistribution(3.0,
CURAND_CALL(curandCreatePoissonDistribution(3.0,
&poisson_3));
// CHECK: hipLaunchKernelGGL(robust_device_API_kernel, dim3(64), dim3(64), 0, 0, devStates,
robust_device_API_kernel<<<64, 64>>>(devStates,
poisson_1, poisson_2, poisson_3,
devResults, pitch);
/* Cleanup */
// CHECK: CURAND_CALL(hiprandDestroyDistribution(poisson_1));
// CHECK: CURAND_CALL(hiprandDestroyDistribution(poisson_2));
// CHECK: CURAND_CALL(hiprandDestroyDistribution(poisson_3));
CURAND_CALL(curandDestroyDistribution(poisson_1));
CURAND_CALL(curandDestroyDistribution(poisson_2));
CURAND_CALL(curandDestroyDistribution(poisson_3));
break;
default:
fprintf(stderr, "Wrong API\n");
}
/* Copy device memory to host */
// CHECK: CUDA_CALL(hipMemcpy2D(hostResults, pitch, devResults,
// CHECK: 60 * HOURS, hipMemcpyDeviceToHost));
CUDA_CALL(cudaMemcpy2D(hostResults, pitch, devResults,
pitch, 64 * 64 * sizeof(unsigned int),
60 * HOURS, cudaMemcpyDeviceToHost));
/* Show result */
print_statistics(hostResults, pitch);
/* Cleanup */
// CHECK: CUDA_CALL(hipFree(devStates));
// CHECK: CUDA_CALL(hipFree(devResults));
CUDA_CALL(cudaFree(devStates));
CUDA_CALL(cudaFree(devResults));
free(hostResults);
return EXIT_SUCCESS;
}
|
9,110 | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <iomanip>
using namespace std;
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
#define threadsPerBlock 1000
__global__ void setGhostPoints(double *rod_new, double *ghost, int numBlocks, int numseg, long int steps){
int sec;
threadIdx.x == numBlocks - 1 ? sec = numseg + 1 : sec = (threadIdx.x+1) * threadsPerBlock + 1;
ghost[2*threadIdx.x] = rod_new[threadIdx.x*threadsPerBlock];
ghost[2*threadIdx.x+1] = rod_new[sec];
}
__global__ void init(double *rod_new, double imax, double ldr, double rlength, int numseg, int numBlocks){
int i = blockIdx.x*threadsPerBlock + threadIdx.x + 1;
rod_new[i] = (1-(i*i*ldr*ldr/(3*rlength*rlength)))*3*mu0*imax*i*ldr/(4*PI*rlength*rlength);
}
__global__ void initBound(double *rod_new, double imax, double ldr, double rlength, int numseg){
rod_new[numseg+1] = mu0*imax/(2*PI*rlength);
rod_new[0] = 0;
}
__global__ void run(double *ghost, double *rod_new, double aug, int numseg, int numBlocks, long int steps){
int bi = blockIdx.x;
int ti = threadIdx.x;
int i = bi*threadsPerBlock + ti + 1;
int threadsNeeded;
bi == (numBlocks - 1) ? threadsNeeded = (numseg - (bi*threadsPerBlock)) : threadsNeeded = threadsPerBlock;
extern __shared__ double rod_old_s[];
double ghost_left = ghost[2*bi];
double ghost_right = ghost[2*bi+1];
rod_old_s[ti] = rod_new[i];
__syncthreads();
if(threadsNeeded == 1 && ti == 0)
rod_new[i] += aug*((1+(1/(2*i)))*ghost_right + (-2-(1/(i*i)))*rod_old_s[ti] + (1-(1/(2*i)))*ghost_left);
else if(i==1)
rod_new[1]+= aug*(2*rod_old_s[ti+1] - 4*rod_old_s[ti]);
else if(ti == 0)
rod_new[i] += aug*((1+(1/(2*i)))*rod_old_s[ti+1] + (-2-(1/(i*i)))*rod_old_s[ti] + (1-(1/(2*i)))*ghost_left);
else if(ti == threadsNeeded - 1)
rod_new[i] += aug*((1+(1/(2*i)))*ghost_right + (-2-(1/(i*i)))*rod_old_s[ti] + (1-(1/(2*i)))*rod_old_s[ti-1]);
else if(i<(numseg + 1))
rod_new[i] += aug*((1+(1/(2*i)))*rod_old_s[ti+1] + (-2-(1/(i*i)))*rod_old_s[ti] + (1-(1/(2*i)))*rod_old_s[ti-1]);
}
int main(){
FILE *myfile;
myfile = fopen("segResults.txt", "w");
double imax, rlength, eta, tstep, ldr, tottime;
int numseg;
printf("What is your I max? ");
scanf("%lf", &imax);
printf("What is the length of your rod? ");
scanf("%lf", &rlength);
printf("What is eta? ");
scanf("%lf", &eta);
printf("How many segments would you like? ");
scanf("%d", &numseg);
ldr = rlength/(numseg+1);
tstep = 0.25*ldr*ldr*mu0/eta;
printf("How long would you like to run? ");
scanf("%lf", &tottime);
double *h_rod, *d_rod, *d_ghost;
size_t rod_size = (numseg + 2) * sizeof(double);
h_rod = (double*)malloc(rod_size);
cudaMalloc(&d_rod, rod_size);
int numBlocks = (numseg + threadsPerBlock -1)/threadsPerBlock;
size_t ghost_size = numBlocks * 2 * sizeof(double);
cudaMalloc(&d_ghost, ghost_size);
initBound<<<1,1>>>(d_rod, imax, ldr, rlength, numseg);
init<<<numBlocks,threadsPerBlock>>>(d_rod, imax, ldr, rlength, numseg, numBlocks);
int out;
//output r values
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", out*ldr );
}
fprintf( myfile, "%lf\n", out*ldr );
cudaMemcpy(h_rod, d_rod, rod_size, cudaMemcpyDeviceToHost);
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", *(h_rod+out) );
}
fprintf( myfile, "%lf\n", *(h_rod+out) );
double aug = eta*tstep/(mu0*ldr*ldr);
long int total_steps = tottime / tstep;
printf("\nSteps: %ld\tTstep: %le\n", total_steps, tstep);
clock_t begin, end;
double time_spent;
begin = clock();
//run
long int steps = 0;
while(steps< total_steps){
setGhostPoints<<<1,numBlocks>>>(d_rod, d_ghost, numBlocks, numseg, steps);
run<<<numBlocks, threadsPerBlock, threadsPerBlock*sizeof(double)>>>(d_ghost, d_rod, aug, numseg, numBlocks, steps);
steps++;
}
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(h_rod, d_rod, rod_size, cudaMemcpyDeviceToHost);
for(out = 0; out<numseg+1; out++){
fprintf( myfile, "%lf ", *(h_rod+out) );
}
fprintf( myfile, "%lf\n", *(h_rod+out) );
fprintf(myfile, "STOP\n");
fclose(myfile);
cudaFree(d_rod);
cudaFree(d_ghost);
free(h_rod);
cout << "\n------------------------------------\nExecution took: "<< time_spent << " sec\n";
return 0;
}
|
9,111 | #include <iostream>
__global__ void fillMonoThread(int* vector, std::size_t size){
for(std::size_t i=0; i<size; ++i){
vector[i] = 1;
}
}
void testMonoThread(){
std::size_t size = 10;
int* vector_h = new int[size];
int* vector_d;
// allocate memory on GPU
cudaMalloc(&vector_d, size * sizeof(int));
// run cuda code on GPU
fillMonoThread<<<1,1>>>(vector_d, size);
// once it's done transfert GPU data to CPU
// thus we could use it here, on CPU host
cudaMemcpy(vector_h, vector_d, size*sizeof(int), cudaMemcpyDeviceToHost);
for(std::size_t i=0; i<size; ++i){
std::cout << vector_h[i] << std::endl;
}
cudaFree(vector_d);
}
__global__ void fillMutliThreads(int* vector, std::size_t size){
vector[threadIdx.x] = threadIdx.x;
}
void testMultiThreads(){
std::size_t size = 1024;
int* vector_h = new int[size];
int* vector_d;
// allocate memory on GPU
cudaMalloc(&vector_d, size * sizeof(int));
// run cuda code on GPU
// on size threads
fillMutliThreads<<<1,size>>>(vector_d, size);
// once it's done transfert GPU data to CPU
// thus we could use it here, on CPU host
cudaMemcpy(vector_h, vector_d, size*sizeof(int), cudaMemcpyDeviceToHost);
for(std::size_t i=0; i<size; ++i){
std::cout << vector_h[i] << ' ' ;
}
cudaFree(vector_d);
}
__global__ void fillMutliBlocs(int* vector, std::size_t size){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
vector[threadId] = threadId;
}
void testMultiBlocs(){
std::size_t size = 1024;
int* vector_h = new int[size];
int* vector_d;
// allocate memory on GPU
cudaMalloc(&vector_d, size * sizeof(int));
// run cuda code on GPU
// on size threads
fillMutliBlocs<<<2,size/2>>>(vector_d, size);
// once it's done transfert GPU data to CPU
// thus we could use it here, on CPU host
cudaMemcpy(vector_h, vector_d, size*sizeof(int), cudaMemcpyDeviceToHost);
for(std::size_t i=0; i<size; ++i){
std::cout << vector_h[i] << ' ' ;
}
cudaFree(vector_d);
}
__global__ void addVector(int* v1, int* v2, std::size_t size){
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
if(threadId < size){
v1[threadId] += v2[threadId];
}
}
void testAdd(){
std::size_t const size = 100;
int* vector0_h = nullptr;
int* vector1_h = nullptr;
int* vector0_d = nullptr;
int* vector1_d = nullptr;
cudaError_t cudaError;
// http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_g9f93d9600f4504e0d637ceb43c91ebad.html
// allocate memory host
cudaMallocHost(&vector0_h, size * sizeof(int));
cudaMallocHost(&vector1_h, size * sizeof(int));
// fill vectors
for(int i=0; i<size; ++i){
vector0_h[i] = vector1_h[i] = i;
}
// allocate memory device
cudaError = cudaMalloc(&vector0_d, size * sizeof(int));
if(cudaError != cudaSuccess){
std::cout << cudaGetErrorString(cudaError) << std::endl;
throw std::exception();
}
cudaError = cudaMalloc(&vector1_d, size * sizeof(int));
if(cudaError != cudaSuccess){
std::cout << cudaGetErrorString(cudaError) << std::endl;
throw std::exception();
}
cudaMemcpy(vector0_d, vector0_h, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(vector1_d, vector1_h, size * sizeof(int), cudaMemcpyHostToDevice);
// call cuda Kernel
// generic block / grid parametrization
dim3 block(32);
// little tweak to get a correct grid in case of size/block < 0
dim3 grid((size-1)/block.x + 1);
addVector<<<grid, block>>>(vector0_d, vector1_d, size);
// put back GPU result to CPU
cudaMemcpy(vector0_h, vector0_d, size * sizeof(int), cudaMemcpyDeviceToHost);
// check add result vectors
for(int i=0; i<size; ++i){
std::cout << vector0_h[i] << ' ' ;
}
// free memory on GPU and CPU
cudaFree(vector0_d);
cudaFree(vector1_d);
cudaFreeHost(vector0_h);
cudaFreeHost(vector1_h);
}
__global__ void addMatrix(int* m1, int* m2, std::size_t width, std::size_t height){
// index of thread in X dim
int threadIdX = blockIdx.x * blockDim.x + threadIdx.x;
// index of thread in Y dim
int threadIdY = blockIdx.y * blockDim.y + threadIdx.y;
if(threadIdX < width && threadIdY < height){
// make a 2D index into 1D
int matrixIndex = width * threadIdY + threadIdX;
m1[matrixIndex] += m2[matrixIndex];
}
}
void testAddMatrice(){
std::size_t const width = 100;
std::size_t const height = 100;
std::size_t const size = width * height;
int* matrix0_h = nullptr;
int* matrix1_h = nullptr;
int* matrix0_d = nullptr;
int* matrix1_d = nullptr;
cudaError_t cudaError;
// http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_g9f93d9600f4504e0d637ceb43c91ebad.html
// allocate memory host
cudaMallocHost(&matrix0_h, size * sizeof(int));
cudaMallocHost(&matrix1_h, size * sizeof(int));
// fill matrixs
for(int i=0; i<size; ++i){
matrix0_h[i] = matrix1_h[i] = i;
}
// allocate memory device
cudaError = cudaMalloc(&matrix0_d, size * sizeof(int));
if(cudaError != cudaSuccess){
std::cout << cudaGetErrorString(cudaError) << std::endl;
throw std::exception();
}
cudaError = cudaMalloc(&matrix1_d, size * sizeof(int));
if(cudaError != cudaSuccess){
std::cout << cudaGetErrorString(cudaError) << std::endl;
throw std::exception();
}
cudaMemcpy(matrix0_d, matrix0_h, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(matrix1_d, matrix1_h, size * sizeof(int), cudaMemcpyHostToDevice);
// call cuda Kernel
// generic block / grid parametrization
dim3 block(32, 32);
// little tweak to get a correct grid in case of size/block < 0
dim3 grid((width-1)/block.x + 1, (height-1)/block.y + 1);
addMatrix<<<grid, block>>>(matrix0_d, matrix1_d, width, height);
// put back GPU result to CPU
cudaMemcpy(matrix0_h, matrix0_d, size * sizeof(int), cudaMemcpyDeviceToHost);
// check add result matrixs
for(int i=0; i<size; ++i){
std::cout << matrix0_h[i] << ' ' ;
}
// free memory on GPU and CPU
cudaFree(matrix0_d);
cudaFree(matrix1_d);
cudaFreeHost(matrix0_h);
cudaFreeHost(matrix1_h);
}
__global__ void addVectorShared(int* v1, int* v2, std::size_t size){
extern __shared__ int shared[];
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
shared[threadIdx.x] = v2[threadId];
if(threadId < size){
v1[threadId] += shared[threadIdx.x];
}
}
void testAddShared(){
std::size_t const size = 100;
int* vector0_h = nullptr;
int* vector1_h = nullptr;
int* vector0_d = nullptr;
int* vector1_d = nullptr;
cudaError_t cudaError;
// http://horacio9573.no-ip.org/cuda/group__CUDART__MEMORY_g9f93d9600f4504e0d637ceb43c91ebad.html
// allocate memory host
cudaMallocHost(&vector0_h, size * sizeof(int));
cudaMallocHost(&vector1_h, size * sizeof(int));
// fill vectors
for(int i=0; i<size; ++i){
vector0_h[i] = vector1_h[i] = i;
}
// allocate memory device
cudaError = cudaMalloc(&vector0_d, size * sizeof(int));
if(cudaError != cudaSuccess){
std::cout << cudaGetErrorString(cudaError) << std::endl;
throw std::exception();
}
cudaError = cudaMalloc(&vector1_d, size * sizeof(int));
if(cudaError != cudaSuccess){
std::cout << cudaGetErrorString(cudaError) << std::endl;
throw std::exception();
}
cudaMemcpy(vector0_d, vector0_h, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(vector1_d, vector1_h, size * sizeof(int), cudaMemcpyHostToDevice);
// call cuda Kernel
// generic block / grid parametrization
dim3 block(32);
// little tweak to get a correct grid in case of size/block < 0
dim3 grid((size-1)/block.x + 1);
addVectorShared<<<grid, block, block.x * sizeof(int)>>>(vector0_d, vector1_d, size);
// put back GPU result to CPU
cudaMemcpy(vector0_h, vector0_d, size * sizeof(int), cudaMemcpyDeviceToHost);
// check add result vectors
for(int i=0; i<size; ++i){
std::cout << vector0_h[i] << ' ' ;
}
// free memory on GPU and CPU
cudaFree(vector0_d);
cudaFree(vector1_d);
cudaFreeHost(vector0_h);
cudaFreeHost(vector1_h);
}
int main(){
// testMonoThread();
// testMultiThreads();
// testMultiBlocs();
// testAdd();
testAddShared();
return 0;
}
|
9,112 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <math.h>
#include <unistd.h>
#include <curand.h>
#include <curand_kernel.h>
#define ISLAND 6
#define POPULATION 20
#define FACILITY 20
#define GENERATION 1
#define CROSSOVER 0.6
#define MUTATION 0.03
#define MIGRATION 15
#define INDIVIDUAL 5
#define H 15 // BAY height
#define W 10 // BAY width
__global__ void init(unsigned int seed, curandState_t* states) {
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
/* we have to initialize the state */
curand_init(seed, /* the seed can be the same for each core, here we pass the time in from the CPU */
x, /* the sequence number should be different for each core (unless you want all
cores to get the same sequence of numbers for some reason - use thread id! */
0, /* the offset is how much extra we advance in the sequence for each call, can be 0 */
&states[x]);
}
__global__ void randomData(curandState_t* states, short* GA){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
for(int j=0;j<FACILITY;j++){ // setup
GA[x*FACILITY + j] = j;
}
int i; // shuffle
for(i = 0; i < FACILITY; i++) {
short k = curand(&states[x]) % FACILITY;
int tmp = GA[x*FACILITY + i];
GA[x*FACILITY + i] = GA[x*FACILITY + k];
GA[x*FACILITY + k] = tmp;
}
}
__global__ void randomBay(curandState_t* states, bool* GB){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
int i; // shuffle
for(i = 0; i < FACILITY-1; i++) {
GB[x*(FACILITY-1) + i] = curand(&states[x]) % 2;
}
}
__global__ void calPosition(short *data, bool *bay, float *position){
short b=blockIdx.x; //區塊索引 == ISLAND
short t=threadIdx.x; //執行緒索引 == POPULATION
short n=blockDim.x; //區塊中包含的執行緒數目 == num of ISLAND
short x=b*n+t;
short posit = x * FACILITY;
short bayposit = x * (FACILITY-1);
// int posit=b*POPULATION*FACILITY+t*FACILITY; //執行緒在陣列中對應的位置
// int posofposit = b*POPULATION*FACILITY*2+t*FACILITY*2;
for(int i=0;i<ISLAND*POPULATION*FACILITY*2;i++){
position[i] = 0;
}
short len = 1;
short next = 0;
for(short f=0;f<FACILITY;f++){
if(bay[bayposit+f] == 0){
len = len + 1;
}
if(bay[bayposit+f] == 1 || f == FACILITY - 1 ){
if(f == FACILITY - 1 && bay[bayposit+f] == 0){
len = len - 1;
}
float x = W / 2.0 + next;
for(short j=0;j<len;j++){
position[posit*2+(f+j-len+1)*2] = x;
float y = H / (len * 2.0) * ( (j * 2) + 1) ;
position[posit*2+(f+j-len+1)*2+1] = y;
}
len = 1;
next = next + W;
}
}
}
int main(){
float START, END;
START = clock();
curandState_t* states;
cudaMalloc((void**) &states, ISLAND * POPULATION * sizeof(curandState_t));
// init seed
init<<<ISLAND, POPULATION>>>(time(NULL), states);
// generate random data
short *GA;
cudaMalloc((void**)&GA, ISLAND*POPULATION*FACILITY*sizeof(short));
bool *GB;
cudaMalloc((void**)&GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool));
randomData<<<ISLAND, POPULATION>>>(states, GA);
randomBay<<<ISLAND, POPULATION>>>(states, GB);
short data[ISLAND][POPULATION][FACILITY];
bool bay[ISLAND][POPULATION][FACILITY-1];
cudaMemcpy(data, GA, ISLAND*POPULATION*FACILITY*sizeof(short), cudaMemcpyDeviceToHost);
cudaMemcpy(bay, GB, ISLAND*POPULATION*(FACILITY-1)*sizeof(bool), cudaMemcpyDeviceToHost);
// print data
for(int i=0;i<ISLAND;i++){
for(int j=0;j<POPULATION;j++){
for(int k=0;k<FACILITY;k++){
printf("%hu ", data[i][j][k]);
}
printf("\n");
}
}
// print bay
for(int i=0;i<ISLAND;i++){
for(int j=0;j<POPULATION;j++){
for(int k=0;k<FACILITY-1;k++){
printf("%d ", bay[i][j][k]);
}
printf("\n");
}
}
FILE *fPtr;
int ttt = FACILITY * (FACILITY-1) ;
fPtr=fopen("cost.txt","r");
int cost[FACILITY][FACILITY] = {0};
int temp[ttt][3]; // cost
for(int i=0;i<ttt;i++){
fscanf(fPtr , "%d %d %d" , &temp[i][0], &temp[i][1], &temp[i][2]);
}
fclose(fPtr);
for(int i=0;i<ttt;i++){ // 2 dimention cost
cost[ temp[i][0]-1 ][ temp[i][1]-1] = temp[i][2];
}
for(int i=0;i<FACILITY;i++){
for(int j=0;j<FACILITY;j++){
printf("%d ", cost[i][j]);
}
printf("\n");
}
int *Gcost;
cudaMalloc((void**)&Gcost, FACILITY*FACILITY*sizeof(int));
cudaMemcpy(Gcost, cost, FACILITY*FACILITY*sizeof(int), cudaMemcpyHostToDevice);
for(int gggggg=0;gggggg<GENERATION;gggggg++){ // generation start
float *Gposition;
cudaMalloc((void**)&Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float));
// calculate position
calPosition<<<ISLAND, POPULATION>>>(GA, GB, Gposition);
float position[ISLAND][POPULATION][FACILITY][2];
cudaMemcpy(position, Gposition, ISLAND*POPULATION*FACILITY*2*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0;i<ISLAND;i++){
for(int p=0;p<POPULATION;p++){
for(int f=0;f<FACILITY;f++){
for(int t=0;t<2;t++){
printf("%.2f ", position[i][p][f][t]);
}
printf("\n");
}
}
}
} // generation end
END = clock();
printf("%f\n", (END - START) / CLOCKS_PER_SEC);
return 0;
} |
9,113 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/time.h>
#define MEM_DIM 64
#define RADIO 3
#define SIZE_BLOQUE 8
#define SIZE_GRID 8
#define RADIO 3
/*Programar una funcion que haga la media de numeros end GPU sin memoria compartida, en GPU con memoria compartida
y en CPU. Comparar los tiempos de ejecucion*/
__global__ void kernel_Shared(int *d_input, int *d_output)
{
int i;
int valorFinal = 0;
__shared__ int arrayValores[ MEM_DIM + RADIO + RADIO ];
//Inicializar el array para poder calcular las medias
arrayValores[threadIdx.x + RADIO] = 0;
//Inicializar las posiciones extras en el array
if (threadIdx.x < RADIO)
arrayValores[threadIdx.x] = 0;
if (threadIdx.x >= (SIZE_BLOQUE - RADIO))
arrayValores[threadIdx.x + RADIO] = 0;
/*
//En esta posicion los valores de arrayValores son correctos - Inicializados a 0
for(int i = 0; i < blockDim.x + RADIO + RADIO; ++i)
{
printf("Valor deberia ser 0: %d\n", arrayValores[i]);
}
*/
// Sincronizar todos los threads - Se puede omitir?
__syncthreads();
//Copiar los valores desde la memoria global a la memoria compartida
arrayValores[threadIdx.x + RADIO] = d_input[blockIdx.x * blockDim.x + threadIdx.x];
//
/*if (threadIdx.x == 0)
{
for(int i = 0; i < blockDim.x + RADIO + RADIO; ++i)
{
printf("Valor deberia ser 0: %d\n", arrayValores[i]);
}
}*/
//d_output[blockIdx.x * blockDim.x + threadIdx.x];
/*if (threadIdx.x == 0)
{
for(int i = 0; i < MEM_DIM + RADIO + RADIO; ++i)
printf(" %d", arrayValores[i]);
}
printf("\n");*/
//Copiar los valores extras
if (threadIdx.x < RADIO)
{
if (blockIdx.x > 0)
{
arrayValores[threadIdx.x] = d_input[(blockIdx.x * blockDim.x + threadIdx.x) - RADIO];
}
}
if (threadIdx.x >= (SIZE_BLOQUE - RADIO))
{
if (blockIdx.x < SIZE_GRID - 1)
{
arrayValores[threadIdx.x + RADIO + RADIO] = d_input[(blockIdx.x * blockDim.x + threadIdx.x) + RADIO];
}
}
if (threadIdx.x == 0)
{
for(int i = 0; i < blockDim.x + RADIO + RADIO; ++i)
{
printf("Valor kernel (%d, %d): %d\n", blockIdx.x, i, arrayValores[i]);
}
printf("%d\n\n", blockIdx.x * blockDim.x + threadIdx.x);
}
//Sincronizar los threads
__syncthreads();
//Hacer la media en el array de outputs
for (i = -RADIO; i <= RADIO; ++i)
{
valorFinal += arrayValores[(threadIdx.x + RADIO) + i];
}
valorFinal /= (RADIO + RADIO + 1);
printf("Valor en el thread actual (%d, %d): %d\n", blockIdx.x, threadIdx.x, valorFinal);
d_output[blockIdx.x * blockDim.x + threadIdx.x] = valorFinal;
printf("Bloque: %d -> Thread: %d -> PosicionArray: %d -> Posicion Array Global: %d -> Valor Guardado: %d\n", blockIdx.x, threadIdx.x, threadIdx.x + RADIO, blockIdx.x * blockDim.x + threadIdx.x, arrayValores[threadIdx.x + RADIO]);
}
double tiempo( void )
{
struct timeval tv;
gettimeofday(&tv, NULL);
return (double) (tv.tv_usec) / 1000000 + (double) (tv.tv_sec);
}
int main(int argc, char** argv)
{
double tiempoInicio;
double tiempoFin;
int n = SIZE_BLOQUE * SIZE_GRID;
printf("\nElementos a reservar: %d\n\n\n", n);
int numBytes = n * sizeof(int);
int *d_input;
int *d_output;
int *h_input;
int *h_output;
cudaMalloc((void **) &d_input, numBytes );
if(cudaSuccess != cudaGetLastError())
{
printf("Error de cuda\n");
}
cudaMalloc((void **) &d_output, numBytes );
if(cudaSuccess != cudaGetLastError())
{
printf("Error de cuda\n");
}
cudaMemset(d_output, 0, n);
if(cudaSuccess != cudaGetLastError())
{
printf("Error de cuda\n");
}
h_input = (int *)malloc(numBytes);
h_output = (int *)malloc(numBytes);
for(int i = 0; i < n; ++i)
h_input[i] = i;
cudaMemcpy (d_input, h_input, numBytes, cudaMemcpyHostToDevice);
if(cudaSuccess != cudaGetLastError())
{
printf("Error de cuda\n");
}
dim3 blockSize(SIZE_BLOQUE);
dim3 gridSize(SIZE_GRID);
tiempoInicio = tiempo();
kernel_Shared <<<gridSize, blockSize>>>(d_input, d_output);
cudaThreadSynchronize();
if(cudaSuccess != cudaGetLastError())
{
printf("Error de cuda _1\n");
}
tiempoFin = tiempo();
printf("Tiempo de inicio Kernel: %lf\n", tiempoInicio);
printf("Tiempo de fin Kernel: %lf\n", tiempoFin);
printf("Tiempo total: %lf\n\n\n", tiempoFin - tiempoInicio);
tiempoInicio = tiempo();
cudaMemcpy (h_output, d_output, numBytes, cudaMemcpyDeviceToHost);
tiempoFin = tiempo();
if ( cudaSuccess != cudaGetLastError() )
printf( "Error! _2\n" );
printf("Tiempo de inicio Transferencia: %lf\n", tiempoInicio);
printf("Tiempo de fin Transferencia: %lf\n", tiempoFin);
printf("Tiempo total: %lf\n\n\n", tiempoFin - tiempoInicio);
for(int i = 0; i < n; ++i)
{
printf("%d - ", h_output[i]);
}
printf("\n\n\nDone.\n");
return 0;
}
|
9,114 | //
// Created by Andrey Pahomov on 28.08.20.
//
#include <cuda_runtime.h>
#include "histogram.cuh"
__device__ inline float clamp(float val, float mn, float mx) {
return (val >= mn) ? ((val <= mx) ? val : mx) : mn;
}
__global__ void gpuConvertY10to8uc1_kernel(unsigned short *src, unsigned char *dst,
unsigned int width, unsigned int height) {
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col >= width || row >= height)
return;
__syncthreads();
dst[row * width + col] = src[row * width + col] >> 2;
}
__global__ void gpuConvertY10to8uc1_kernel_gist_simple(unsigned short *src, unsigned char *dst,
unsigned int width, unsigned int height,
unsigned int *intensity_num,
double *intensity_pro,
unsigned int *min_index,
unsigned int *max_index) {
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int pos = row * width + col;
if (col >= width || row >= height)
return;
/**********************count_intensity********************/
atomicAdd(&intensity_num[(unsigned int) (src[pos] & INTENSITY_MASK)], 1);
/******************************************/
__syncthreads();
if (*max_index > *min_index && *max_index > 0) {
double k = (double) (255. / (*max_index - *min_index));
dst[pos] = (unsigned char) ((double) (src[pos] - *min_index) * k);
} else dst[pos] = 0;
}
__global__ void gpuConvertY10to8uc1_kernel_gist_optimized(unsigned short *src, unsigned char *dst,
unsigned int width, unsigned int height,
unsigned int *intensity_num,
double *intensity_pro,
unsigned int *min_index,
unsigned int *max_index) {
unsigned int stride;
__shared__ unsigned int shared_bin[INTENSITY_RANGE];
unsigned long long i;
shared_bin[threadIdx.x] = 0;
__syncthreads();
i = blockIdx.x * blockDim.x + threadIdx.x;
stride = blockDim.x * gridDim.x;
if (i > width * height) return;
while (i < width * height) {
atomicAdd(&shared_bin[src[i]], 1);
i += stride;
}
__syncthreads();
if (threadIdx.x < INTENSITY_RANGE)
atomicAdd(&intensity_num[threadIdx.x], shared_bin[threadIdx.x]);
if (threadIdx.x == 0) {
unsigned int offset = 1;
unsigned int offset_l = 1;
unsigned int offset_r = 1;
bool isMin = false;
bool isMax = false;
for (int i = offset_l; i < INTENSITY_RANGE; i++) {
if (intensity_num[i] > offset && !isMin) {
*min_index = i;
isMin = true;
}
if (intensity_num[INTENSITY_RANGE - i - offset_r] > offset && !isMax) {
*max_index = INTENSITY_RANGE - i - offset_r;
isMax = true;
}
if (isMin && isMax) break;
}
}
}
/*dynamic range expansion*/
__global__ void gpuConvertY10to8uc1_kernel_dre(unsigned short *src, unsigned char *dst,
unsigned int width, unsigned int height,
unsigned int *intensity_num,
double *intensity_pro,
unsigned int *min_index,
unsigned int *max_index) {
const int row = blockIdx.y * blockDim.y + threadIdx.y;
const int col = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int pos = row * width + col;
__syncthreads();
if (col >= width || row >= height)
return;
if (*max_index > *min_index && *max_index > 0) {
const unsigned int diff = *max_index - *min_index;
dst[pos] = (unsigned char) ((src[pos] - *min_index) * 255 / diff);
} else dst[pos] = 0;
}
void gpuConvertY10to8uc1(unsigned short *src, unsigned char *dst,
unsigned int width, unsigned int height) {
unsigned short *d_src = NULL;
unsigned char *d_dst = NULL;
size_t planeSize = width * height * sizeof(unsigned char);
unsigned int flags;
bool srcIsMapped = (cudaHostGetFlags(&flags, src) == cudaSuccess) && (flags & cudaHostAllocMapped);
bool dstIsMapped = (cudaHostGetFlags(&flags, dst) == cudaSuccess) && (flags & cudaHostAllocMapped);
if (srcIsMapped) {
d_src = src;
cudaStreamAttachMemAsync(NULL, src, 0, cudaMemAttachGlobal);
} else {
cudaMalloc(&d_src, planeSize * 2);
cudaMemcpy(d_src, src, planeSize * 2, cudaMemcpyHostToDevice);
}
if (dstIsMapped) {
d_dst = dst;
cudaStreamAttachMemAsync(NULL, dst, 0, cudaMemAttachGlobal);
} else {
cudaMalloc(&d_dst, planeSize * 3);
}
int threadNum = 32;
dim3 blockSize = dim3(threadNum, threadNum, 1);
dim3 gridSize = dim3(width / threadNum + 1, height / threadNum + 1, 1);
gpuConvertY10to8uc1_kernel << < gridSize, blockSize >> > (d_src, d_dst, width, height);
// unsigned int blockSize = 1024;
// unsigned int numBlocks = (width / 2 + blockSize - 1) / blockSize;
// gpuConvertY10to8uc1_kernel<<<numBlocks, blockSize>>>(d_src, d_dst, width, height);
// cudaDeviceSynchronize();
cudaStreamAttachMemAsync(NULL, dst, 0, cudaMemAttachHost);
cudaStreamSynchronize(NULL);
if (!srcIsMapped) {
cudaMemcpy(dst, d_dst, planeSize * 3, cudaMemcpyDeviceToHost);
cudaFree(d_src);
}
if (!dstIsMapped) {
cudaFree(d_dst);
}
}
void gpuConvertY10to8uc1_gist(unsigned short *src, unsigned char *dst,
unsigned int width, unsigned int height,
unsigned int *intensity_num,
double *intensity_pro,
unsigned int *min_index,
unsigned int *max_index,
bool isMatGist) {
unsigned short *d_src = NULL;
unsigned char *d_dst = NULL;
unsigned int *d_intensity_num = NULL;
double *d_intensity_pro = NULL;
unsigned int *d_min_index = NULL;
unsigned int *d_max_index = NULL;
size_t planeSize = width * height * sizeof(unsigned char);
unsigned int flags;
bool srcIsMapped = (cudaHostGetFlags(&flags, src) == cudaSuccess) && (flags & cudaHostAllocMapped);
bool dstIsMapped = (cudaHostGetFlags(&flags, dst) == cudaSuccess) && (flags & cudaHostAllocMapped);
bool intensity_numIsMapped =
(cudaHostGetFlags(&flags, intensity_num) == cudaSuccess) && (flags & cudaHostAllocMapped);
bool intensity_proIsMapped =
(cudaHostGetFlags(&flags, intensity_pro) == cudaSuccess) && (flags & cudaHostAllocMapped);
bool min_indexIsMapped = (cudaHostGetFlags(&flags, min_index) == cudaSuccess) && (flags & cudaHostAllocMapped);
bool max_indexIsMapped = (cudaHostGetFlags(&flags, max_index) == cudaSuccess) && (flags & cudaHostAllocMapped);
if (srcIsMapped) {
d_src = src;
cudaStreamAttachMemAsync(NULL, src, 0, cudaMemAttachGlobal);
} else {
cudaMalloc(&d_src, planeSize * 2);
cudaMemcpy(d_src, src, planeSize * 2, cudaMemcpyHostToDevice);
}
if (dstIsMapped) {
d_dst = dst;
cudaStreamAttachMemAsync(NULL, dst, 0, cudaMemAttachGlobal);
} else {
cudaMalloc(&d_dst, planeSize * 3);
}
if (intensity_numIsMapped) {
d_intensity_num = intensity_num;
cudaStreamAttachMemAsync(NULL, intensity_num, 0, cudaMemAttachGlobal);
} else {
cudaMalloc(&d_intensity_num, INTENSITY_RANGE * sizeof(unsigned int));
cudaMemcpy(d_intensity_num, intensity_num, INTENSITY_RANGE * sizeof(unsigned int), cudaMemcpyHostToDevice);
}
if (intensity_proIsMapped) {
d_intensity_pro = intensity_pro;
cudaStreamAttachMemAsync(NULL, intensity_pro, 0, cudaMemAttachGlobal);
} else {
cudaMalloc(&d_intensity_pro, INTENSITY_RANGE * sizeof(double));
cudaMemcpy(d_intensity_pro, intensity_pro, INTENSITY_RANGE * sizeof(double), cudaMemcpyHostToDevice);
}
if (min_indexIsMapped) {
d_min_index = min_index;
cudaStreamAttachMemAsync(NULL, min_index, 0, cudaMemAttachGlobal);
} else {
cudaMalloc(&d_min_index, 1 * sizeof(unsigned int));
cudaMemcpy(d_min_index, min_index, 1 * sizeof(unsigned int), cudaMemcpyHostToDevice);
}
if (max_indexIsMapped) {
d_max_index = max_index;
cudaStreamAttachMemAsync(NULL, max_index, 0, cudaMemAttachGlobal);
} else {
cudaMalloc(&d_max_index, 1 * sizeof(unsigned int));
cudaMemcpy(d_max_index, max_index, 1 * sizeof(unsigned int), cudaMemcpyHostToDevice);
}
int threadNum = INTENSITY_RANGE;
dim3 blockSize(INTENSITY_RANGE);
dim3 gridSize = dim3(width / threadNum + 1, height / threadNum + 1, 1);
if (isMatGist)
gpuConvertY10to8uc1_kernel_gist_optimized << < gridSize, blockSize >> >
(d_src, d_dst, width, height,
d_intensity_num,
d_intensity_pro,
d_min_index, d_max_index);
threadNum = 32;
blockSize = dim3(threadNum, threadNum, 1);
gridSize = dim3(width / threadNum + 1, height / threadNum + 1, 1);
gpuConvertY10to8uc1_kernel_dre << < gridSize, blockSize >> >
(d_src, d_dst, width, height,
d_intensity_num,
d_intensity_pro,
d_min_index, d_max_index);
cudaStreamAttachMemAsync(NULL, dst, 0, cudaMemAttachHost);
cudaStreamAttachMemAsync(NULL, intensity_num, 0, cudaMemAttachHost);
cudaStreamAttachMemAsync(NULL, intensity_pro, 0, cudaMemAttachHost);
cudaStreamAttachMemAsync(NULL, min_index, 0, cudaMemAttachHost);
cudaStreamAttachMemAsync(NULL, max_index, 0, cudaMemAttachHost);
cudaStreamSynchronize(NULL);
if (!srcIsMapped) {
cudaMemcpy(dst, d_dst, planeSize * 3, cudaMemcpyDeviceToHost);
cudaFree(d_src);
}
if (!dstIsMapped) {
cudaFree(d_dst);
}
if (!intensity_numIsMapped) {
cudaFree(d_intensity_num);
}
if (!intensity_proIsMapped) {
cudaFree(d_intensity_pro);
}
if (!min_indexIsMapped) {
cudaFree(d_min_index);
}
if (!min_indexIsMapped) {
cudaFree(d_max_index);
}
}
|
9,115 | #include <stdio.h>
__global__ void vecAdd(float *A,float *B,float *C) {
int i = threadIdx.x + blockDim.x*blockIdx.x;
C[i] = A[i] + B[i];
}
int main() {
int N;
scanf("%d",&N);
float *h_a = (float*)malloc(N*sizeof(float));
float *h_b = (float*)malloc(N*sizeof(float));
float *h_c = (float*)malloc(N*sizeof(float));
float *d_a,*d_b,*d_c;
cudaMalloc((void**)&d_a,N*sizeof(float));
cudaMalloc((void**)&d_b,N*sizeof(float));
cudaMalloc((void**)&d_c,N*sizeof(float));
int i;
for(i=0;i<N;i++) {
*(h_a + i) = (float)rand()/(float)(RAND_MAX/100);
}
for(i=0;i<N;i++) {
*(h_b + i) = (float)rand()/(float)(RAND_MAX/100);
}
for(i=0;i<N;i++) {
*(h_c + i) = (float)rand()/(float)(RAND_MAX/100);
}
cudaMemcpy(d_a,h_a,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemset(d_c,0,N*sizeof(float));
vecAdd<<<N/256,256>>>(d_a,d_b,d_c);
cudaMemcpy(h_c,d_c,N*sizeof(float),cudaMemcpyDeviceToHost);
for(i=0;i<N;i++) {
printf("%d ",h_a[i]);
}
printf("\n");
for(i=0;i<N;i++) {
printf("%d ",h_b[i]);
}
printf("\n");
for(i=0;i<N;i++) {
printf("%d ",h_c[i]);
}
printf("\n");
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
} |
9,116 | /*
* The Game of Life
*
* a cell is born, if it has exactly three neighbours
* a cell dies of loneliness, if it has less than two neighbours
* a cell dies of overcrowding, if it has more than three neighbours
* a cell survives to the next generation, if it does not die of loneliness
* or overcrowding
*
* In this version, a 2D array of ints is used. A 1 cell is on, a 0 cell is off.
* The game plays a number of steps (given by the input), printing to the screen each time. 'x' printed
* means on, space means off.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef unsigned char bool_t;
typedef unsigned char cell_t;
#define TILE_SIZE 16
#define gpuErrchk(ans) \
{ \
gpuAssert((ans), __FILE__, __LINE__); \
}
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort)
exit(code);
}
}
cell_t *allocate_board_flat(int flat_size) {
cell_t *board = (cell_t *) malloc(sizeof(cell_t) * flat_size);
return board;
}
__device__ __inline__ int adjacent_to(const cell_t *d_board, int size, int i, int j) {
int k, l, count = 0;
int sk = (i > 0) ? i - 1 : i;
int ek = (i + 1 < size) ? i + 1 : i;
int sl = (j > 0) ? j - 1 : j;
int el = (j + 1 < size) ? j + 1 : j;
for (k = sk; k <= ek; k++)
for (l = sl; l <= el; l++)
count += d_board[l * size + k];
count -= d_board[j * size + i];
return count;
}
__global__ void playKernel(cell_t *d_board, cell_t *d_newboard, int size) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
if (row < size && col < size) {
int a = adjacent_to(d_board, size, col, row);
if (a == 2)
d_newboard[row * size + col] = d_board[row * size + col];
if (a == 3)
d_newboard[row * size + col] = 1;
if (a < 2)
d_newboard[row * size + col] = 0;
if (a > 3)
d_newboard[row * size + col] = 0;
}
}
/* print the life board */
void print_flat(cell_t *board, int size) {
int i, j;
/* for each row */
for (j = 0; j < size; j++) {
/* print each column position... */
for (i = 0; i < size; i++)
printf("%c", board[j * size + i] ? 'x' : ' ');
/* followed by a carriage return */
printf("\n");
}
}
/* read a file into the life board */
void read_file_flat(FILE *f, cell_t *board, int size) {
int i, j;
size_t len;
char *s = (char *) malloc(size + 10);
for (j = 0; j < size; j++) {
/* get a string */
fgets(s, size + 10, f);
len = strlen(s) - 1;
/* copy the string to the life board */
for (i = 0; i < size; i++) {
board[j * size + i] = i < len ? s[i] == 'x' : 0;
}
}
}
int main(int argc, char *argv[]) {
// Host variables
int size, flat_size, steps, i, grid_size;
FILE *f_in;
cell_t *h_prev;
bool_t writeOutput = 1, evenSteps;
// Device variables
cell_t *d_prev, *d_next;
f_in = stdin;
// Read the input file and write its content in the host array
fscanf(f_in, "%d %d", &size, &steps);
flat_size = size * size;
evenSteps = steps % 2 == 0;
h_prev = allocate_board_flat(flat_size);
read_file_flat(f_in, h_prev, size);
fclose(f_in);
grid_size = int(ceil((float) size / TILE_SIZE));
dim3 dimGrid(grid_size, grid_size, 1);
dim3 dimBlock(TILE_SIZE, TILE_SIZE, 1);
// Allocate device arrays
gpuErrchk(cudaMalloc((void **) &d_prev, flat_size * sizeof(cell_t)));
gpuErrchk(cudaMalloc((void **) &d_next, flat_size * sizeof(cell_t)));
// Copy the data from the host array to the device array
gpuErrchk(cudaMemcpy(d_prev, h_prev, flat_size * sizeof(cell_t), cudaMemcpyHostToDevice));
for (i = 0; i < int(ceil((float) steps / 2)); i++) {
// printf("Step: %d\n", 2 * i);
// Instead of using cudaMemcpy and a buffer or swapping pointers,
// run the same kernel with the variables inverted
playKernel<<<dimGrid, dimBlock>>>(d_prev, d_next, size);
if (evenSteps || (2 * i + 1) < steps) {
// printf("Step: %d\n", 2 * i + 1);
playKernel<<<dimGrid, dimBlock>>>(d_next, d_prev, size);
}
}
// Copy data back from the device array to the host array
gpuErrchk(cudaMemcpy(h_prev, evenSteps ? d_prev : d_next, flat_size * sizeof(cell_t), cudaMemcpyDeviceToHost))
// Deallocate device arrays
gpuErrchk(cudaFree(d_next));
gpuErrchk(cudaFree(d_prev));
if (writeOutput) {
print_flat(h_prev, size);
}
free(h_prev);
return EXIT_SUCCESS;
}
|
9,117 | #include <stdio.h>
__global__ void kernel(int *d_A, size_t pitch, int rows, int cols){
//compute the row
int r = blockIdx.y*blockDim.y+threadIdx.y;
//compute the column
int c = blockIdx.x*blockDim.x+threadIdx.x;
if((r < rows) && (c < cols)){
// // update the pointer to point to the beginning of the row
int *Row = (int*)((char*)d_A + r*pitch);
int elem = Row[c];
printf("%d ", elem);
}
}
int main(){
int *d_A, *A;
size_t pitch;
int rows = 4;
int cols = 4;
A = (int *)malloc(rows*cols*sizeof(int));
for (int i = 0; i < rows*cols; i++) A[i] = i;
cudaMallocPitch((void**)&d_A, &pitch, sizeof(int)*cols, rows);
cudaMemcpy2D(d_A, pitch, A, sizeof(int)*cols, sizeof(int)*cols, rows, cudaMemcpyHostToDevice);
dim3 block(16,16);
dim3 grid(1,1);
kernel<<<grid,block>>>(d_A, pitch, rows, cols);
cudaDeviceSynchronize();
printf("\nDone!\n");
return 0;
}
|
9,118 | #include "includes.h"
__global__ void reduceCompUnrollW(int *g_idata, int *g_odata, unsigned int n)
{
// set the thread id.
unsigned int tid = threadIdx.x;
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x * 8;
// convert global data pointer to the local pointer of this block.
int *idata = g_idata + blockIdx.x * blockDim.x * 8;
// unrolling 8 data blocks.
if (idx + blockDim.x * 7 < n)
{
int a1 = g_idata[idx];
int a2 = g_idata[idx + blockDim.x];
int a3 = g_idata[idx + blockDim.x * 2];
int a4 = g_idata[idx + blockDim.x * 3];
int b1 = g_idata[idx + blockDim.x * 4];
int b2 = g_idata[idx + blockDim.x * 5];
int b3 = g_idata[idx + blockDim.x * 6];
int b4 = g_idata[idx + blockDim.x * 7];
g_idata[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4;
}
__syncthreads();
// in-place reduction and complete unroll
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
// write result for this block to global mem.
if (tid == 0)
{
g_odata[blockIdx.x] = idata[0];
}
} |
9,119 | #include <iostream>
#include <math.h>
using std::cout;;
using std::cerr;
using std::endl;
// Shut down MPI cleanly if something goes wrong
void my_abort(int err)
{
cout << "Test FAILED\n";
}
// Error handling macro
#define CUDA_CHECK(call) \
if((call) != cudaSuccess) { \
cudaError_t err = cudaGetLastError(); \
cerr << "CUDA error calling \""#call"\", code is " << err << endl; \
my_abort(err); }
// Kernel function to add the elements of two arrays
__global__
void add(int n, float *x, float *y)
{
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
int main(void)
{
int N = 1000;
float *h_x = NULL;
float *d_x = NULL;
float *h_y = NULL;
float *d_y = NULL;
// allocate memory for arrays on the host
h_x = (float *) malloc(N*sizeof(float));
if (!h_x)
cerr << "can't allocate memory h_x" << endl;
h_y = (float *) malloc(N*sizeof(float));
if (!h_y)
cerr << "can't allocate memory h_y" << endl;
// allocate memory for arrays on the device
CUDA_CHECK(cudaMalloc((void **)&d_x, N*sizeof(float)));
if (!d_x)
cerr << "can't allocate memory d_x" << endl;
CUDA_CHECK(cudaMalloc((void **)&d_y, N*sizeof(float)));
if (!d_y)
cerr << "can't allocate memory d_y" << endl;
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
h_x[i] = 1.0f;
h_y[i] = 2.0f;
}
// copy arrays to device
CUDA_CHECK(cudaMemcpy(d_x, h_x, N*sizeof(float),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(d_y, h_y, N*sizeof(float),
cudaMemcpyHostToDevice));
// Run kernel on the GPU
add<<<1, 1>>>(N, d_x, d_y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
// copy array to the host
CUDA_CHECK(cudaMemcpy(h_y, d_y, N*sizeof(float),
cudaMemcpyDeviceToHost));
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(h_y[i]-3.0f));
cout << "Max error: " << maxError << std::endl;
// Free memory
free(h_x);
free(h_y);
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
9,120 | // nnIndex: B*M*K;
// nnCount: B*M;
// input: B*N*C;
// output: B*M*C (M<N)
__global__ void max_pool3d_forward(int B, int N, int M, int C, int K, const int* nnIndex,
const int* nnCount, const float* input, float* output, int* maxIndex)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<M*C;j+=blockDim.x)
{
int m = j/C;
int c = j%C;
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k];
if (k==0)
{
output[i*M*C+j] = input[i*N*C+n*C+c];
maxIndex[i*M*C+j] = n;
continue;
}
if (input[i*N*C+n*C+c]>output[i*M*C+j])
{
output[i*M*C+j] = input[i*N*C+n*C+c];
maxIndex[i*M*C+j] = n;
}
}
}
}
}
// maxIndex: B*M*C, indices of the maximum feature point
__global__ void max_pool3d_backward(int B, int N, int M, int C, const int* maxIndex,
const float* gradOutput, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<M*C;j+=blockDim.x)
{
int c = j%C;
int n = maxIndex[i*M*C+j];
atomicAdd(&gradInput[i*N*C+n*C+c],gradOutput[i*M*C+j]);
}
}
}
__global__ void avg_pool3d_forward(int B, int N, int M, int C, int K, const int* nnIndex,
const int* nnCount, const float* input, float* output)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<M*C;j+=blockDim.x)
{
int m = j/C;
int c = j%C;
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k];
output[i*M*C+j] += input[i*N*C+n*C+c]/nnSize;
}
}
}
}
__global__ void avg_pool3d_backward(int B, int N, int M, int C, int K, const int* nnIndex,
const int* nnCount, const float* gradOutput, float* gradInput)
{
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<M*C;j+=blockDim.x)
{
int m = j/C;
int c = j%C;
int nnSize = nnCount[i*M+m];
for(int k=0;k<nnSize;k++)
{
int n = nnIndex[i*M*K+m*K+k]; // only neighbor, no bin indices, dimension=(B,M,K)
atomicAdd(&gradInput[i*N*C+n*C+c],gradOutput[i*M*C+j]/nnSize);
}
}
}
}
void maxPool3dLauncher(int B, int N, int M, int C, int K, const int* nnIndex, const int* nnCount,
const float* input, float* output, int* maxIndex)
{
max_pool3d_forward<<<B,1024>>>(B, N, M, C, K, nnIndex, nnCount, input, output, maxIndex);
// cudaDeviceSynchronize();
}
void maxPool3dGradLauncher(int B, int N, int M, int C, const int* maxIndex,
const float* gradOutput, float* gradInput)
{
max_pool3d_backward<<<B,1024>>>(B, N, M, C, maxIndex, gradOutput, gradInput);
}
void avgPool3dLauncher(int B, int N, int M, int C, int K, const int* nnIndex, const int* nnCount,
const float* input, float* output)
{
avg_pool3d_forward<<<B,1024>>>(B, N, M, C, K, nnIndex, nnCount, input, output);
}
void avgPool3dGradLauncher(int B, int N, int M, int C, int K, const int* nnIndex, const int* nnCount,
const float* gradOutput, float* gradInput)
{
avg_pool3d_backward<<<B,1024>>>(B, N, M, C, K, nnIndex, nnCount, gradOutput, gradInput);
} |
9,121 | #include <stdio.h>
const int N = 20;
const int blocksize = 20;
__global__
void hello(char *a, int *c, int size)
{
int i = 0, k = 0;
int count = 1;
if(threadIdx.x < size){
for(i = 0; i < size; i++){
if(threadIdx.x != i){
for(k = 0; k < N; k++){
if(a[N * threadIdx.x + k] != a[N * i + k]){
break;
}
if(k == N - 1){
count++;
}
}
}
}
c[threadIdx.x] = count;
}
else{
c[threadIdx.x] = -1;
}
}
int main()
{
char words[N][N] =
{
"ABCDE",
"xyz",
"Hi",
"japan",
"xyz",
"Hi",
"cup",
"paper",
"Hi",
"Apple"
};
int size = 10;
int count[N];
char answer_words[N][N];
int answer_count[N];
char *ad;
int *cd;
const int csize = N*N*sizeof(char);
const int isize = N*sizeof(int);
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&cd, isize );
cudaMemcpy( ad, words, csize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, cd, size);
cudaMemcpy( count, cd, isize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( cd );
int i = 0, k = 0;
int num = 0;
int dismatchflag = 0;
for(i = 0; i < N; i++){
if(count[i] == -1){
break;
}
if(count[i] == 1){
strcpy(answer_words[num], words[i]);
answer_count[num] = count[i];
num++;
}
else if(count[i] > 1){
for(k = 0; k < num; k++){
if(strcmp(words[i], answer_words[k]) == 0){
dismatchflag = 1;
break;
}
}
if(dismatchflag == 0){
strcpy(answer_words[num], words[i]);
answer_count[num] = count[i];
num++;
}
else{
dismatchflag = 0;
}
}
}
for(i = 0; i < num; i++){
printf("%s, %d\n", answer_words[i], answer_count[i]);
}
return EXIT_SUCCESS;
}
|
9,122 | // Jordan Cazamias
// CUDA World Gen 2015
#include <iostream>
#include <ctime>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "cuda_runtime_api.h"
using namespace std;
__global__ void AddInts(int *a, int *b, int count)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < count)
{
a[id] += b[id];
}
//printf("id: %d\n", id);
}
/*
int main()
{
srand(time(NULL));
int count = 100;
int *h_a = new int[count];
int *h_b = new int[count];
for (int i = 0; i < count; i++)
{
h_a[i] = rand() % 1000;
h_b[i] = rand() % 1000;
}
cout << "Prior to addition:" << endl;
for (int i = 0; i < 5; i++)
{
cout << i << ": " << h_a[i] << " " << h_b[i] << endl;
}
int *d_a, *d_b;
if (cudaMalloc(&d_a, sizeof(int) * count) != cudaSuccess)
{
cout << "CUDA Malloc failed!";
return 1;
}
if (cudaMalloc(&d_b, sizeof(int) * count) != cudaSuccess)
{
cout << "CUDA Malloc failed!";
cudaFree(d_a);
return 1;
}
if (cudaMemcpy(d_a, h_a, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout << "CUDA copy to device failed!";
cudaFree(d_a);
cudaFree(d_b);
return 1;
}
if (cudaMemcpy(d_b, h_b, sizeof(int)*count, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout << "CUDA copy to device failed!";
cudaFree(d_a);
cudaFree(d_b);
return 1;
}
// Add integers together
int blocks = count / 256 + 1;
int threads = 256;
AddInts<<<blocks, threads>>>(d_a, d_b, count);
if (cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess)
{
cout << "CUDA copy to host failed!";
cudaFree(d_a);
cudaFree(d_b);
return 1;
}
if (cudaMemcpy(h_a, d_a, sizeof(int)*count, cudaMemcpyDeviceToHost) != cudaSuccess)
{
cout << "CUDA copy to host failed!";
cudaFree(d_a);
cudaFree(d_b);
return 1;
}
for (int i = 0; i < 5; i++)
{
cout << "Ans: " << h_a[i] << endl;
}
delete[] h_a;
delete[] h_b;
return 0;
}
*/ |
9,123 | #include "includes.h"
__global__ void transformation(size_t num_values, float_t* src, float_t* dest, size_t ld_src, size_t ld_dest)
{
size_t index = blockIdx.x*blockDim.x + threadIdx.x;
if(index < num_values)
{
size_t dest_index = (index/ld_src)*ld_src + ((index%ld_src)%8)*ld_dest+ (index%ld_src)/8;
dest[dest_index] = src[index];
}
} |
9,124 | #include <iostream>
#include <math.h>
using namespace std;
class Vector2D{
public:
double x;
double y;
// Constructor
__host__ __device__ Vector2D( double x0=0.f, double y0=0.f ) : x(x0), y(y0) {}
// Destructor
// __host__ __device__ ~Vector2D(){ delete[] &x; delete[] &y; }
__host__ __device__ double norm( void ) { return sqrt( x*x + y*y ); };
__host__ __device__ double norm2( void ) { return x*x + y*y ; };
__host__ __device__ void normalize(){
double mag = norm();
x /= mag;
y /= mag;
}
__host__ __device__ Vector2D operator+( Vector2D &v ){
return Vector2D( x+v.x, y+v.y );
}
__host__ __device__ Vector2D operator-( Vector2D &v ){
return Vector2D( x-v.x, y-v.y );
}
__host__ __device__ double operator*( Vector2D &v ){
return x*v.x + y*v.y;
}
__host__ __device__ Vector2D operator/( double a ){
return Vector2D( a*x, a*y );
}
__host__ __device__ void redefine( double x0, double y0 ){
x = x0;
y = y0;
}
};
|
9,125 | //xfail:ASSERTION_ERROR
//--gridDim=1 --blockDim=2 --no-inline
//This kernel has a null pointer access.
#define memcpy(dst, src, len) __builtin_memcpy(dst, src, len)
typedef struct {
short x;
short y;
} s_t; //< sizeof(s2_t) == 4
__global__ void k(s_t *in, s_t *out) {
memcpy(0, &in[threadIdx.x], sizeof(s_t));
}
|
9,126 | #include <stdio.h>
#include <cuda.h>
#define BLOCK_SIZE 2
/***********************/
__global__ void SpMV_ELL(int num_rows,float* data, int* col_index, int num_elem, float* x, float* y)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
/**************************************************************/
/* YOUR TODO STARTS HERE */
/* Perform the multiplication between matrix M and vector x */
/* The result is store in vector y */
/**************************************************************/
if(row < num_rows)
{
float dot = 0.0;
for(int i=0;i<num_elem;i++)
dot += data[row+i*num_rows] * x[col_index[row+i*num_rows]];
y[row] = dot;
}
/**************************************************************/
/* YOUR TODO ENDS HERE */
/**************************************************************/
}
/**/
void checkCUDAError(const char *msg);
/**/
int main(int argc, char* argv[])
{
int i;
/**/
int num_rows, num_elem;
float* h_x, *h_data, *h_y;
int *h_col_index;
/**/
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
/*******************/
/** READING INPUT **/
/*******************/
scanf("%d",&num_rows);
scanf("%d",&num_elem);
/* allocation for matrix M and vector x*/
/* data and col_index contains num_rows x num_elem elements */
h_data = (float*) malloc(sizeof(float)*num_rows*num_elem);
h_col_index = (int*) malloc(sizeof(int)*num_rows*num_elem);
/* vector x contains num_rows elements */
h_x = (float*) malloc(sizeof(float)*num_rows);
/* the result vector contains num_rows elements */
h_y = (float*) malloc(sizeof(float)*num_rows);
/* reading matrix and vector from stdin*/
for(i=0;i<num_rows*num_elem;++i){ scanf("%f", &h_data[i]);}
for(i=0;i<num_rows*num_elem;++i){ scanf("%d", &h_col_index[i]);}
for(i=0;i<num_rows;++i){ scanf("%f", &h_x[i]);}
/********************/
/** FINISHED INPUT **/
/********************/
/*************************/
/* allocate device */
/* memory for A,B,C */
/*************************/
float *d_data, *d_x, *d_y;
int *d_col_index;
/**/
cudaMalloc(&d_data,sizeof(float)*num_rows*num_elem);
cudaMalloc(&d_col_index,sizeof(int)*num_rows*num_elem);
cudaMalloc(&d_x,sizeof(float)*num_rows);
cudaMalloc(&d_y,sizeof(float)*num_rows);
cudaEventRecord(start,0);
/***********************************/
/* copy input data to device */
/***********************************/
cudaMemcpy(d_data,h_data,sizeof(float)*num_rows*num_elem,cudaMemcpyHostToDevice);
cudaMemcpy(d_col_index,h_col_index,sizeof(float)*num_rows*num_elem,cudaMemcpyHostToDevice);
cudaMemcpy(d_x,h_x,sizeof(float)*num_rows,cudaMemcpyHostToDevice);
/* Calculate the number of blocks */
int num_block = (num_rows + BLOCK_SIZE - 1)/BLOCK_SIZE;
/******************************************************/
/* call kernel */
/* n_block blocks, BLOCK_SIZE threads per block */
/******************************************************/
SpMV_ELL<<<num_block,BLOCK_SIZE>>>(num_rows,d_data,d_col_index,num_elem,d_x,d_y);
checkCUDAError("Kernel Invoking");
/**************************/
/* copy result back */
/**************************/
cudaMemcpy(h_y,d_y,sizeof(float)*num_rows,cudaMemcpyDeviceToHost);
/**/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(stderr,"Elapsed time = %f (s)\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
/*******************************************/
/* Print the final scan result */
/*******************************************/
printf("The result vector:\n");
for(int i=0;i<num_rows;++i)printf("%4.1f ",h_y[i]);
/* free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_data);
cudaFree(d_col_index);
/* free host memory */
free(h_x);
free(h_y);
free(h_data);
free(h_col_index);
/**/
return 0;
}
/*function to test CUDA command*/
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
9,127 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* This program takes an initial estimate of m and c and finds the associated
* rms error. It is then as a base to generate and evaluate 8 new estimates,
* which are steps in different directions in m-c space. The best estimate is
* then used as the base for another iteration of "generate and evaluate". This
* continues until none of the new estimates are better than the base. This is
* a gradient search for a minimum in mc-space.
*
* To compile:
* cc -o lr_coursework lr_coursework.c -lm
*
* To run:
* ./lr_coursework
*
* Dr Kevan Buckley, University of Wolverhampton, 2018
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{77.91,137.94},{73.55,140.19},{67.73,103.18},{72.21,107.69},
{65.87,111.92},{69.66,113.91},{76.72,110.53},{70.64,116.64},
{85.61,124.05},{76.77,121.42},{76.49,110.56},{69.74,122.83},
{82.22,117.16},{30.49,71.38},{26.76,78.09},{60.10,113.07},
{ 6.45,48.40},{28.23,62.23},{16.98,57.19},{42.86,72.84},
{45.90,101.80},{11.00,48.54},{88.36,140.39},{10.97,56.44},
{33.41,63.81},{40.15,88.93},{41.16,94.70},{ 8.27,44.49},
{40.10,88.81},{14.94,68.41},{94.69,130.29},{54.04,99.42},
{96.78,144.92},{34.64,69.07},{68.88,111.93},{69.30,119.68},
{64.35,128.70},{13.22,64.68},{94.53,152.09},{37.79,92.85},
{29.84,87.18},{19.43,57.34},{49.04,95.81},{38.11,111.29},
{61.85,120.56},{45.89,93.11},{21.28,66.51},{42.66,74.88},
{86.60,133.95},{32.28,81.48},{45.65,85.03},{70.93,104.05},
{27.47,61.30},{27.89,84.13},{45.54,79.23},{26.27,74.84},
{99.30,147.19},{71.25,105.51},{ 2.30,46.45},{26.51,61.82},
{41.32,71.59},{43.62,82.30},{73.94,121.04},{77.37,138.14},
{65.54,103.86},{51.71,90.47},{45.09,80.79},{56.73,94.98},
{35.46,67.69},{15.96,43.84},{69.51,97.47},{76.31,115.14},
{50.76,111.88},{ 8.94,43.42},{94.76,130.50},{11.18,52.89},
{34.86,80.62},{37.48,79.21},{ 7.59,54.55},{27.57,76.34},
{57.26,87.54},{ 9.36,53.07},{47.67,91.40},{48.61,78.84},
{42.20,95.36},{69.48,116.91},{56.63,109.48},{63.82,103.96},
{11.35,42.22},{28.48,68.38},{60.46,106.86},{56.93,103.53},
{74.62,121.94},{93.32,141.87},{77.71,132.25},{12.04,36.33},
{86.85,135.93},{99.24,137.68},{24.16,79.63},{14.75,54.94},
{21.01,54.39},{70.57,106.15},{33.02,61.07},{90.59,137.18},
{62.71,97.37},{38.43,87.14},{55.08,96.69},{99.10,162.52},
{77.24,129.84},{31.20,70.54},{75.41,116.41},{23.94,54.01},
{ 6.83,44.58},{44.52,92.93},{78.11,110.63},{92.41,134.57},
{61.06,110.49},{58.22,80.87},{81.40,118.57},{83.75,143.43},
{ 4.82,55.24},{57.03,102.68},{26.86,78.80},{37.38,77.85},
{58.54,119.47},{56.66,90.04},{54.93,98.51},{60.22,94.79},
{80.88,120.59},{21.00,56.00},{63.01,104.75},{ 1.61,33.15},
{94.90,139.36},{95.17,153.42},{38.37,68.95},{66.06,109.97},
{68.45,112.16},{74.99,125.06},{49.64,93.96},{15.95,29.82},
{ 5.04,42.00},{98.76,137.21},{74.07,126.20},{68.65,128.60},
{11.38,26.96},{49.95,82.69},{29.04,74.89},{16.38,63.83},
{59.04,109.53},{27.32,71.71},{39.51,101.93},{54.04,96.36},
{51.50,100.11},{25.88,63.72},{76.07,112.84},{85.46,129.42},
{ 3.80,40.40},{57.09,110.76},{59.19,96.37},{76.34,124.58},
{38.28,91.58},{72.14,111.75},{88.50,132.91},{94.21,141.83},
{ 2.43,32.33},{62.47,115.70},{24.78,59.55},{14.39,64.41},
{99.32,140.63},{ 6.44,49.49},{ 2.25,29.16},{19.09,44.98},
{ 6.33,48.74},{54.46,91.56},{68.23,117.61},{27.76,77.29},
{78.68,118.79},{39.96,84.11},{99.49,146.02},{46.24,99.64},
{ 9.18,38.93},{35.33,94.25},{95.52,149.63},{56.44,99.26},
{10.70,60.09},{23.20,52.34},{ 4.34,34.46},{58.07,108.44},
{33.12,87.11},{72.71,116.57},{ 8.74,47.56},{ 0.04,51.06},
{26.39,55.02},{41.34,97.48},{96.12,138.97},{81.76,128.23},
{93.98,150.40},{77.63,137.75},{59.95,117.56},{92.74,133.49},
{88.40,144.82},{72.31,110.11},{61.92,101.44},{27.51,74.96},
{61.45,95.72},{73.46,117.17},{62.02,102.17},{59.49,114.88},
{18.03,47.92},{36.98,80.51},{24.98,57.81},{22.88,49.89},
{89.51,136.78},{46.50,91.37},{58.98,95.67},{48.35,83.96},
{73.68,125.13},{44.09,106.47},{32.16,67.74},{93.39,146.45},
{13.34,35.70},{74.02,111.39},{84.35,134.19},{72.87,106.49},
{80.02,116.40},{79.03,134.19},{ 9.43,73.06},{57.48,122.57},
{90.90,127.78},{42.58,83.98},{57.70,96.29},{71.45,108.44},
{35.14,84.38},{94.49,130.20},{22.54,89.12},{25.76,79.00},
{54.87,93.03},{81.53,123.81},{34.15,77.98},{70.97,116.78},
{13.18,47.54},{63.55,124.59},{62.49,107.07},{84.30,138.60},
{15.66,63.61},{30.99,87.18},{33.96,68.64},{ 2.19,46.07},
{48.87,92.79},{79.79,131.08},{71.29,120.93},{72.16,132.56},
{17.13,51.90},{28.39,71.37},{94.06,133.31},{17.60,43.10},
{77.55,145.59},{93.45,140.12},{12.55,53.67},{62.44,96.08},
{40.29,84.88},{26.65,69.78},{94.37,136.47},{32.37,66.81},
{59.10,99.68},{74.29,128.55},{21.33,69.52},{51.34,88.05},
{99.82,146.42},{47.96,80.59},{81.11,144.49},{94.90,153.29},
{54.00,103.65},{53.53,87.53},{54.91,90.78},{ 5.14,36.78},
{29.93,69.98},{ 3.08,37.13},{94.13,150.87},{10.46,52.34},
{36.77,95.13},{57.38,95.64},{89.28,127.06},{ 7.91,45.51},
{72.55,125.14},{83.21,133.87},{70.89,113.46},{32.39,82.07},
{54.13,100.86},{68.83,116.81},{64.48,105.76},{33.59,83.13},
{46.38,84.07},{90.03,120.24},{ 1.77,30.89},{67.22,119.87},
{39.33,84.74},{42.47,101.74},{95.05,136.38},{48.02,104.48},
{49.45,101.45},{82.31,122.99},{34.06,65.00},{91.26,121.28},
{ 0.41,32.00},{67.71,94.28},{99.76,133.29},{77.93,125.82},
{ 1.68,46.34},{45.04,107.98},{81.64,110.16},{72.74,117.13},
{84.24,107.66},{81.42,125.84},{57.07,100.89},{85.54,126.36},
{41.28,77.43},{54.28,95.17},{76.96,142.41},{70.96,93.42},
{ 2.31,43.37},{84.15,131.81},{39.52,84.19},{33.53,61.80},
{61.74,92.17},{21.04,56.67},{ 8.18,58.27},{ 4.70,44.13},
{50.57,95.90},{27.39,69.58},{16.06,30.97},{45.69,91.88},
{86.56,132.60},{40.11,67.72},{27.03,67.79},{34.12,72.91},
{95.42,146.35},{47.82,98.04},{88.28,142.05},{39.46,72.98},
{33.18,70.94},{64.41,120.27},{83.11,136.72},{49.37,78.60},
{51.86,83.64},{75.19,118.96},{75.39,124.65},{45.93,77.95},
{ 5.86,46.50},{47.88,98.78},{28.13,64.80},{40.09,91.03},
{81.07,143.02},{79.79,102.30},{42.99,85.52},{36.20,72.76},
{99.67,156.20},{64.44,110.66},{94.63,138.33},{28.42,75.97},
{54.67,87.20},{96.62,154.09},{23.70,62.38},{38.67,78.86},
{22.09,56.57},{29.19,70.08},{ 9.39,63.72},{20.57,46.94},
{77.93,123.66},{54.94,94.95},{95.31,129.18},{10.14,49.72},
{48.01,76.86},{62.66,128.28},{ 3.51,48.10},{50.77,83.73},
{60.45,116.21},{ 8.07,57.61},{85.27,152.01},{63.39,109.60},
{86.87,129.76},{ 3.76,36.44},{93.11,149.12},{69.63,114.32},
{88.45,131.41},{90.76,123.43},{69.16,123.60},{10.23,37.67},
{68.41,122.94},{28.20,56.51},{39.87,79.05},{51.55,85.21},
{47.52,95.17},{25.61,75.33},{85.93,136.70},{30.53,57.66},
{ 3.47,49.10},{97.05,145.27},{67.53,102.44},{74.58,121.92},
{ 1.84,46.71},{20.51,53.47},{67.26,97.46},{49.67,90.19},
{36.84,83.86},{28.66,62.86},{40.13,90.36},{93.40,140.55},
{58.51,96.91},{79.61,93.98},{85.29,133.17},{91.11,142.37},
{97.26,154.56},{58.64,95.55},{78.03,125.40},{45.37,78.87},
{95.15,138.71},{64.43,123.91},{68.30,119.83},{84.59,124.52},
{36.37,80.59},{70.22,96.59},{30.18,75.66},{95.22,133.93},
{29.80,73.46},{36.03,68.69},{22.55,60.53},{92.75,139.88},
{67.76,113.62},{91.84,133.75},{66.37,119.44},{ 1.67,25.11},
{25.90,55.54},{54.07,91.65},{33.45,91.06},{10.93,58.02},
{80.08,129.17},{ 8.88,57.18},{40.95,80.77},{ 5.92,28.75},
{30.67,77.57},{40.89,79.48},{97.27,158.36},{81.72,123.87},
{23.01,52.68},{53.24,101.99},{97.87,137.07},{57.48,101.19},
{98.71,148.21},{71.11,112.95},{57.69,83.01},{92.05,131.64},
{44.24,97.84},{94.38,147.34},{18.31,47.47},{53.40,87.97},
{37.76,79.24},{25.34,66.33},{48.52,92.49},{74.42,126.63},
{ 9.16,35.22},{10.12,61.68},{82.08,127.94},{55.82,115.67},
{94.99,158.31},{52.50,98.22},{33.08,85.34},{44.86,71.11},
{63.03,109.30},{30.23,63.91},{42.90,99.14},{13.49,61.23},
{34.00,78.21},{20.83,64.89},{56.70,110.87},{29.28,62.25},
{39.06,70.14},{41.13,75.52},{15.31,48.77},{47.86,90.13},
{81.72,124.72},{26.99,75.25},{79.69,124.73},{19.90,55.67},
{31.05,71.45},{73.25,108.77},{30.93,71.27},{13.94,57.58},
{96.73,123.05},{ 0.36,27.96},{55.29,98.98},{35.61,76.60},
{36.07,97.21},{32.71,67.50},{55.60,108.66},{54.62,96.93},
{18.98,55.79},{11.90,52.95},{10.51,44.69},{64.28,107.92},
{83.08,122.82},{27.91,83.34},{84.34,145.33},{86.00,142.97},
{43.56,88.18},{78.20,111.30},{81.74,128.23},{65.69,113.52},
{74.03,128.98},{45.63,74.61},{98.51,156.36},{38.19,90.32},
{68.10,117.84},{37.99,62.93},{90.85,143.03},{22.43,63.57},
{13.21,38.92},{91.97,142.82},{62.72,115.55},{67.26,126.35},
{53.05,85.26},{93.97,142.15},{58.59,115.37},{91.96,134.64},
{27.86,75.95},{54.72,112.05},{24.52,80.58},{ 6.18,29.76},
{31.05,69.21},{63.08,112.53},{70.10,94.71},{76.97,129.39},
{15.09,50.83},{27.21,71.13},{ 6.49,46.66},{43.93,98.49},
{ 7.49,48.51},{16.83,47.93},{38.64,67.91},{50.04,74.44},
{40.82,90.82},{ 6.80,32.81},{63.64,93.63},{60.60,109.89},
{58.90,101.00},{86.48,145.07},{ 7.15,41.21},{28.15,67.43},
{64.20,101.33},{80.75,115.35},{40.40,79.91},{34.78,84.96},
{69.88,121.96},{16.66,73.49},{10.06,58.83},{27.96,64.46},
{53.84,91.50},{87.87,146.70},{49.03,82.12},{76.03,111.50},
{29.03,55.19},{22.44,53.09},{82.82,132.99},{95.90,136.32},
{37.21,71.98},{42.25,104.38},{77.76,134.68},{27.48,79.72},
{ 8.20,54.46},{22.64,70.60},{56.39,93.04},{41.02,79.64},
{85.82,147.33},{46.10,86.18},{73.35,120.35},{35.86,84.81},
{79.61,132.16},{33.31,61.78},{86.83,125.84},{15.61,38.11},
{60.07,89.20},{97.80,132.30},{ 6.66,39.04},{ 1.06,21.28},
{17.84,65.02},{52.00,95.55},{81.65,118.00},{76.78,132.88},
{97.72,151.72},{61.43,104.38},{64.39,107.58},{22.55,73.41},
{54.48,113.54},{64.33,113.33},{ 8.85,29.80},{63.27,114.98},
{26.79,75.91},{ 9.12,63.89},{ 2.82,40.76},{17.92,56.66},
{24.75,76.14},{31.34,73.34},{32.78,76.99},{10.92,36.93},
{26.73,64.14},{10.88,58.58},{96.82,140.90},{77.88,134.50},
{97.84,134.78},{42.59,80.77},{17.50,59.90},{93.79,135.44},
{77.77,115.47},{51.33,86.67},{12.70,32.70},{60.72,103.85},
{31.69,60.38},{83.72,111.31},{61.48,107.22},{88.83,123.38},
{12.92,56.40},{35.71,65.41},{24.00,48.01},{88.44,139.09},
{ 0.23,34.14},{38.85,77.55},{45.11,90.53},{29.25,65.54},
{61.30,99.63},{14.23,58.27},{30.31,75.98},{76.70,119.00},
{32.24,62.54},{24.71,62.05},{78.14,129.60},{23.29,68.88},
{72.49,106.79},{79.14,120.16},{16.74,58.14},{79.03,120.90},
{ 2.20,47.86},{21.38,71.37},{38.66,101.19},{91.29,134.26},
{79.56,143.14},{ 0.64,17.91},{38.24,73.91},{43.36,101.26},
{75.76,128.57},{61.91,97.17},{ 2.87,39.03},{76.97,129.62},
{56.48,95.38},{24.98,72.11},{ 0.31,28.92},{65.32,95.59},
{78.66,112.24},{ 9.61,55.49},{17.51,62.49},{44.86,84.27},
{56.82,108.95},{88.90,127.31},{77.91,102.26},{59.98,87.42},
{63.04,94.23},{36.46,88.09},{72.96,120.36},{94.22,156.65},
{25.16,74.23},{87.33,131.71},{85.61,129.34},{62.29,113.26},
{36.64,84.47},{86.47,129.95},{24.83,55.85},{36.88,91.52},
{ 9.60,44.53},{ 8.29,29.05},{77.87,117.78},{ 3.65,57.62},
{29.50,66.42},{82.11,135.13},{87.94,131.08},{19.22,51.06},
{77.14,137.18},{36.06,85.33},{11.79,65.84},{95.87,122.45},
{86.82,130.26},{66.64,102.41},{84.49,124.25},{58.31,85.27},
{ 6.65,50.38},{92.34,130.07},{30.25,69.84},{44.33,76.39},
{11.95,51.41},{41.72,105.88},{59.94,109.36},{13.56,49.44},
{60.66,117.25},{38.59,85.94},{48.00,100.76},{ 7.14,52.20},
{16.88,50.44},{ 3.07,46.82},{93.55,122.74},{88.41,126.77},
{70.37,122.32},{44.80,89.11},{29.92,61.25},{97.73,144.98},
{37.63,74.16},{51.59,109.22},{43.66,80.18},{95.37,151.05},
{79.07,135.38},{19.82,65.97},{90.53,115.60},{81.58,123.75},
{28.89,66.95},{24.30,77.77},{89.15,126.12},{27.07,74.44},
{ 7.44,33.59},{26.16,70.17},{90.96,128.55},{39.91,75.53},
{65.45,93.73},{ 7.68,32.59},{34.21,86.35},{36.14,70.00},
{48.50,82.20},{96.88,140.90},{61.67,97.25},{54.20,102.73},
{20.02,65.41},{10.62,55.73},{48.33,87.72},{17.04,50.61},
{31.04,61.63},{10.91,53.43},{50.99,86.70},{65.09,88.77},
{89.08,146.30},{80.78,121.86},{14.37,58.44},{ 9.39,40.79},
{20.67,57.29},{ 9.08,68.40},{47.52,95.72},{71.48,117.41},
{11.62,52.50},{ 6.70,54.06},{62.83,122.69},{74.72,142.22},
{ 1.67,38.64},{ 0.16,38.41},{97.31,150.19},{42.77,77.46},
{22.14,55.75},{83.46,136.50},{61.77,96.62},{ 0.06,30.09},
{97.36,143.75},{70.03,125.10},{79.57,127.39},{83.54,127.26},
{42.85,92.36},{17.24,58.84},{53.25,88.51},{ 2.56,44.53},
{71.72,121.73},{85.75,130.90},{47.62,101.11},{15.78,63.30},
{ 6.43,45.38},{16.56,39.99},{61.06,110.65},{36.67,93.80},
{14.19,44.88},{ 0.68,49.49},{ 7.30,34.40},{ 8.88,50.84},
{95.16,130.83},{71.87,122.62},{20.10,57.88},{94.33,140.90},
{32.76,61.94},{53.70,96.13},{70.60,129.76},{71.13,118.00},
{12.84,51.27},{13.24,56.18},{ 9.13,47.39},{80.29,139.56},
{21.04,65.87},{67.74,101.56},{36.60,68.50},{40.76,91.95},
{52.31,98.09},{18.87,47.54},{70.72,99.96},{92.31,125.51},
{66.83,110.26},{ 0.45,28.87},{53.29,92.35},{19.20,56.25},
{64.75,97.41},{98.02,156.22},{83.66,137.30},{50.42,95.68},
{67.75,114.35},{ 0.62,40.65},{79.83,120.17},{89.79,132.11},
{36.21,68.02},{40.99,83.14},{93.31,158.32},{14.33,52.24},
{25.40,84.95},{ 1.54,32.14},{52.78,102.58},{92.88,140.40},
{ 3.40,46.06},{28.56,55.92},{81.67,114.32},{41.98,78.43},
{ 2.41,40.92},{87.39,129.75},{24.11,59.23},{70.33,108.86},
{97.45,170.97},{51.47,73.41},{49.55,95.09},{62.37,113.87},
{ 9.01,40.54},{95.06,120.59},{75.97,133.00},{ 4.72,58.11},
{18.99,59.83},{47.94,77.34},{79.85,106.00},{28.92,77.12},
{45.71,84.34},{39.43,79.34},{52.63,108.60},{49.54,93.24},
{59.78,95.58},{18.71,62.50},{46.50,98.75},{52.82,85.80},
{72.43,131.61},{36.02,76.32},{46.58,101.85},{21.49,60.48},
{ 6.05,45.53},{90.92,138.53},{55.96,106.46},{84.69,135.08},
{28.24,68.22},{39.17,94.71},{ 6.92,56.07},{49.42,109.44},
{22.91,49.83},{36.70,70.34},{12.48,53.18},{38.64,78.95},
{83.58,113.92},{10.45,32.71},{65.88,102.70},{40.93,91.07},
{ 3.45,27.36},{24.43,46.10},{92.16,149.14},{21.86,60.48},
{67.09,109.56},{22.22,71.28},{32.01,67.43},{12.73,44.50},
{75.37,116.20},{85.03,129.18},{66.38,103.56},{39.10,95.26},
{11.80,54.21},{18.01,52.89},{21.36,68.01},{ 1.58,47.56},
{30.67,73.12},{35.21,71.88},{22.38,64.38},{22.65,59.59},
{41.35,67.34},{32.20,70.19},{81.08,133.90},{86.97,136.75},
{17.44,60.37},{80.92,133.81},{99.32,144.20},{27.09,75.37},
{48.93,82.31},{67.78,121.54},{32.13,83.10},{35.53,89.31},
{40.21,54.98},{68.96,126.59},{ 4.47,30.15},{25.80,76.93},
{26.78,66.78},{41.94,90.81},{44.21,75.12},{61.65,103.95},
{99.04,137.83},{82.92,125.62},{62.11,115.28},{63.62,113.02},
{26.20,73.38},{28.14,77.48},{28.19,74.24},{10.03,52.34},
{64.55,109.04},{70.74,105.96},{60.22,92.48},{10.32,72.87},
{33.34,57.89},{35.27,65.05},{45.76,116.58},{ 0.49,57.86},
{66.70,109.27},{55.73,103.89},{44.45,90.52},{38.56,77.80},
{82.45,120.05},{66.12,113.99},{12.53,66.87},{ 5.50,48.99},
{74.01,115.15},{30.31,72.87},{35.83,71.68},{37.14,95.23},
{51.21,99.36},{23.85,69.26},{26.89,75.49},{13.59,59.16},
{25.22,68.93},{52.73,109.21},{60.45,113.81},{51.60,103.04},
{79.96,123.55},{46.98,97.77},{ 1.66,21.38},{75.71,137.06},
{33.47,70.29},{ 1.51,35.75},{ 0.74,35.19},{62.56,88.66},
{87.96,135.91},{62.35,105.98},{12.09,62.14},{96.99,151.92},
{74.71,134.08},{87.17,134.74},{12.05,34.79},{32.97,78.39},
{ 2.80,51.64},{26.75,67.52},{40.96,69.15},{78.20,123.24},
{29.55,66.86},{92.50,135.15},{44.16,90.03},{68.10,115.91},
{ 7.05,36.94},{ 1.31,34.46},{42.44,100.45},{12.63,42.62},
{30.10,87.86},{47.35,91.17},{18.59,50.43},{64.59,98.09},
{54.62,77.52},{67.17,91.15},{37.10,71.55},{86.15,139.15},
{23.17,58.38},{58.31,97.30},{40.06,66.65},{89.85,145.61},
{54.43,85.60},{60.17,110.33},{16.25,57.61},{60.56,106.49},
{ 7.44,51.15},{59.46,114.06},{44.40,81.99},{14.29,45.65},
{ 8.30,44.93},{66.49,111.11},{78.69,118.62},{60.81,116.74}
};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
/*
Calculate the current index by using:
- The thread id
- The block id
- The number of threads per block
*/
int i = threadIdx.x + blockIdx.x * blockDim.x;
//Work out the error sum 1000 times and store them in an array.
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
//Get the system time before we begin the linear regression.
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
//Device variables
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
//Allocate memory for d_dm
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_dc
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_error_sum_arr
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Allocate memory for d_data
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
//Copy memory for dm to d_dm
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for dc to d_dc
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
//Copy memory for data to d_data
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
//Host variable storing the array returned from the kernel function.
double h_error_sum_arr[1000];
//Stores the total sum of the values from the error sum array.
double error_sum_total;
//Stores the mean of the total sum of the error sums.
double error_sum_mean;
//Call the rms_error function using 100 blocks and 10 threads.
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
//Copy memory for d_error_sum_arr
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
//Loop through the error sum array returned from the kernel function
for(int j=0; j<n_data; j++) {
//Add each error sum to the error sum total.
error_sum_total += h_error_sum_arr[j];
}
//Calculate the mean for the error sum.
error_sum_mean = error_sum_total / n_data;
//Calculate the square root for the error sum mean.
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
//Reset the error sum total.
error_sum_total = 0;
}
//printf("best m,c is %lf,%lf with error %lf in direction %d\n",
//dm[best_error_i], dc[best_error_i], best_error, best_error_i);
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
//Free memory for d_dm
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_dc
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_data
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
//Free memory for d_error_sum_arr
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
//Get the system time after we have run the linear regression function.
clock_gettime(CLOCK_MONOTONIC, &finish);
//Calculate the time spent between the start time and end time.
time_difference(&start, &finish, &time_elapsed);
//Output the time spent running the program.
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
9,128 |
#include "cuda.h"
#include <time.h>
#include <stdio.h>
#include <math.h>
__global__
void map(float *data, float *result, int N) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
result[i] = pow(data[i],2) ;
//result[i]= 2;
}
int main() {
const int N = 10;
float *A_h = (float*)malloc(sizeof(float) * N *N);
float *B_h = (float*)malloc(sizeof(float) * N *N);
for (int i=0; i< N; i++) A_h[i] = 5;
//for (int i=0; i< N; i++) fprintf(stdout, "%f\n", A_h[i]);
//clock_t begin, end;
//double elapsed;
//initialize matrices
float *A_d, *B_d;
cudaMalloc(&A_d, sizeof(float) * N * N );
cudaMalloc(&B_d, sizeof(float) * N * N );
//begin = clock();
cudaMemcpy(A_d, A_h, sizeof(float) * N * N , cudaMemcpyHostToDevice);
//launch kernel
dim3 dimBlock(10, 10);
dim3 dimGrid(N/10, N/10);
//matrixMultSimple<<<dimGrid, dimBlock>>>(A_d, B_d, C_d, N);
map<<<dimGrid, dimBlock>>>(A_d, B_d, N);
cudaMemcpy(B_h, B_d, sizeof(float) * N *N , cudaMemcpyDeviceToHost);
for (int i=0; i< N; i++) fprintf(stdout, "%f\n", B_h[i]);
//end = clock();
//elapsed = double(end - begin)/CLOCKS_PER_SEC;
//fprintf(stdout, "%f\n", elapsed);
cudaFree(A_d);
cudaFree(B_d);
free(A_h);
free(B_h);
return 0;
}
|
9,129 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
__global__ void squareKernel(float* d_in, float *d_out) {
const unsigned int lid = threadIdx.x; // local id inside a block
const unsigned int gid = blockIdx.x*blockDim.x + lid; // global id
d_out[gid] = d_in[gid]*d_in[gid]; // do computation
}
int main(int argc, char** argv) {
unsigned int num_threads = 32;
unsigned int mem_size = num_threads*sizeof(float);
// allocate host memory
float* h_in = (float*) malloc(mem_size);
float* h_out = (float*) malloc(mem_size);
// initialize the memory
for(unsigned int i=0; i<num_threads; ++i){
h_in[i] = (float)i;
}
// allocate device memory
float* d_in;
float* d_out;
cudaMalloc((void**)&d_in, mem_size);
cudaMalloc((void**)&d_out, mem_size);
// copy host memory to device
cudaMemcpy(d_in, h_in, mem_size, cudaMemcpyHostToDevice);
// execute the kernel
squareKernel<<< 1, num_threads>>>(d_in, d_out);
// copy result from ddevice to host
cudaMemcpy(h_out, d_out, sizeof(float)*num_threads, cudaMemcpyDeviceToHost);
// print result
for(unsigned int i=0; i<num_threads; ++i) printf("%.6f\n", h_out[i]);
// clean-up memory
free(h_in); free(h_out);
cudaFree(d_in); cudaFree(d_out);
} |
9,130 | /*
Faz a soma global dos elementos de um vetor
Exemplifica o uso de atomicADD()
Para compilar: nvcc 00-soma-elems-vet-atomic.cu -o 00-soma-elems-vet-atomic
Para executar: ./00-soma-elems-vet-atomic
OBS: os valores de tamanho do vetor e o conteudo do vetor
estao fixos no codigo
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void soma_elementos(int *vetorA,int *soma,int tam)
{
//Calcula o índice global da thread
int idx = threadIdx.x+blockIdx.x*blockDim.x;
if (idx < tam)
{
//Faz a soma entre elemento do vetor no índice idx e o conteúdo de soma
atomicAdd(soma,vetorA[idx]);
}
}
int main(int argc,char **argv)
{
//Declara as variáveis para uso no host
int i,*vetorA,threadsPerBlock,blocksPerGrid,soma=0;
//Declara os ponteiros para alocação no device
int *vetorA_d,*soma_d;
//Define o tamanho do vetor
int tam=16; //5000;
//Define a quantidade de threads por bloco
threadsPerBlock = 256;
//Aloca memoria paginada para o vetor no host
vetorA=(int *)malloc(tam * sizeof(int));
//Aloca o vetor no device
cudaMalloc((void**)&vetorA_d,tam*(sizeof(int)));
//Aloca uma variável para armazenar a soma dos elementos do vetor
cudaMalloc((void**)&soma_d,sizeof(int));
//Preenche o vetor no host
for(i=0;i<tam;i++)
{
vetorA[i]=1;
}
//Define a quantidade de blocos por grade
blocksPerGrid=(tam+threadsPerBlock-1)/threadsPerBlock;
//Copia o conteúdo do vetor para o device
cudaMemcpy(vetorA_d,vetorA,tam*(sizeof(int)), cudaMemcpyHostToDevice);
//Copia o conteúdo de soma para o device (soma_d = 0)
cudaMemcpy(soma_d,&soma,sizeof(int), cudaMemcpyHostToDevice);
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
soma_elementos <<<blocksPerGrid,threadsPerBlock>>> (vetorA_d, soma_d, tam);
//Copia o resultado da soma de volta para o host
cudaMemcpy(&soma,soma_d,sizeof(int), cudaMemcpyDeviceToHost);
//Imprime o resultado no host
printf("%d\n",soma);
//Desaloca o vetor no host
free(vetorA);
//Desaloca os vetores no device
cudaFree(vetorA_d);
cudaFree(soma_d);
}
|
9,131 | /**
* mvt.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <unistd.h>
#include <sys/time.h>
#include <cuda.h>
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define GPU_DEVICE 0
/* Problem size */
#define N 4096
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 256
#define DIM_THREAD_BLOCK_Y 1
/* Can switch DATA_TYPE between float and double */
typedef float DATA_TYPE;
void init_array(DATA_TYPE* A, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i, j;
for (i = 0; i < N; i++)
{
x1[i] = ((DATA_TYPE) i) / N;
x2[i] = ((DATA_TYPE) i + 1) / N;
y1[i] = ((DATA_TYPE) i + 3) / N;
y2[i] = ((DATA_TYPE) i + 4) / N;
for (j = 0; j < N; j++)
{
A[i*N + j] = ((DATA_TYPE) i*j) / N;
}
}
}
void runMvt(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y1, DATA_TYPE* y2)
{
int i, j;
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
x1[i] = x1[i] + a[i*N + j] * y1[j];
}
}
for (i=0; i<N; i++)
{
for (j=0; j<N; j++)
{
x2[i] = x2[i] + a[j*N + i] * y2[j];
}
}
}
void compareResults(DATA_TYPE* x1, DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2, DATA_TYPE* x2_outputFromGpu)
{
int i, fail;
fail = 0;
for (i=0; i<N; i++)
{
if (percentDiff(x1[i], x1_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
if (percentDiff(x2[i], x2_outputFromGpu[i]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void GPU_argv_init()
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, GPU_DEVICE);
printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name);
cudaSetDevice( GPU_DEVICE );
}
__global__ void mvt_kernel1(DATA_TYPE *a, DATA_TYPE *x1, DATA_TYPE *y_1)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j=0; j < N; j++)
{
x1[i] += a[i * N + j] * y_1[j];
}
}
}
__global__ void mvt_kernel2(DATA_TYPE *a, DATA_TYPE *x2, DATA_TYPE *y_2)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
{
int j;
for(j=0; j < N; j++)
{
x2[i] += a[j * N + i] * y_2[j];
}
}
}
void mvtCuda(DATA_TYPE* a, DATA_TYPE* x1, DATA_TYPE* x2, DATA_TYPE* y_1, DATA_TYPE* y_2,
DATA_TYPE* x1_outputFromGpu, DATA_TYPE* x2_outputFromGpu)
{
double t_start, t_end;
DATA_TYPE* a_gpu;
DATA_TYPE* x1_gpu;
DATA_TYPE* x2_gpu;
DATA_TYPE* y_1_gpu;
DATA_TYPE* y_2_gpu;
cudaMalloc((void **)&a_gpu, sizeof(DATA_TYPE) * N * N);
cudaMalloc((void **)&x1_gpu, sizeof(DATA_TYPE) * N);
cudaMalloc((void **)&x2_gpu, sizeof(DATA_TYPE) * N);
cudaMalloc((void **)&y_1_gpu, sizeof(DATA_TYPE) * N);
cudaMalloc((void **)&y_2_gpu, sizeof(DATA_TYPE) * N);
cudaMemcpy(a_gpu, a, sizeof(DATA_TYPE) * N * N, cudaMemcpyHostToDevice);
cudaMemcpy(x1_gpu, x1, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
cudaMemcpy(x2_gpu, x2, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
cudaMemcpy(y_1_gpu, y_1, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
cudaMemcpy(y_2_gpu, y_2, sizeof(DATA_TYPE) * N, cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil((float)N/ ((float)DIM_THREAD_BLOCK_X)), 1);
t_start = rtclock();
mvt_kernel1<<<grid,block>>>(a_gpu,x1_gpu,y_1_gpu);
mvt_kernel2<<<grid,block>>>(a_gpu,x2_gpu,y_2_gpu);
cudaThreadSynchronize();
t_end = rtclock();
fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start);
cudaMemcpy(x1_outputFromGpu, x1_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(x2_outputFromGpu, x2_gpu, sizeof(DATA_TYPE) * N, cudaMemcpyDeviceToHost);
cudaFree(a_gpu);
cudaFree(x1_gpu);
cudaFree(x2_gpu);
cudaFree(y_1_gpu);
cudaFree(y_2_gpu);
}
int main()
{
double t_start, t_end;
DATA_TYPE* a;
DATA_TYPE* x1;
DATA_TYPE* x2;
DATA_TYPE* x1_outputFromGpu;
DATA_TYPE* x2_outputFromGpu;
DATA_TYPE* y_1;
DATA_TYPE* y_2;
a = (DATA_TYPE*)malloc(N*N*sizeof(DATA_TYPE));
x1 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x2 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x1_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
x2_outputFromGpu = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_1 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
y_2 = (DATA_TYPE*)malloc(N*sizeof(DATA_TYPE));
init_array(a, x1, x2, y_1, y_2);
GPU_argv_init();
mvtCuda(a, x1, x2, y_1, y_2, x1_outputFromGpu, x2_outputFromGpu);
t_start = rtclock();
//run the algorithm on the CPU
runMvt(a, x1, x2, y_1, y_2);
t_end = rtclock();
fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start);
compareResults(x1, x1_outputFromGpu, x2, x2_outputFromGpu);
free(a);
free(x1);
free(x2);
free(x1_outputFromGpu);
free(x2_outputFromGpu);
free(y_1);
free(y_2);
return 0;
}
|
9,132 | #include "includes.h"
// includes, project
#define PI 3.1415926536f
int MaxThreadsPerBlock;
int MaxThreadsX;
int MaxThreadsY;
// Conversion d'un vecteur réel en vecteur complexe
// Conversion d'un vecteur complexe en vecteur réel
// Multiplie point par point un vecteur complex par un vecteur réel
// Applique y = at*x +bt à chaque point d'un vecteur réel
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de bytes
// Alpha n'est pas modifié
// Remplissage de la linearmem (tableau de pixels) associée à la texture avec le tableau de réel
// Alpha autorise l'affichage au dessus d'un certain seuil
// Processus auto-régressif X2 = a*X1 + b*X0 + N0;
// Expansion
// On applique une interpolation bi-linéaire à la source
// Transformation Cartesian To Polar
// On applique une interpolation bi-linéaire à la source
__global__ void Kernel_CartToPol1(double *tb1, double *tb2, int width, int height )
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height) return;
double R = 2* sqrtf( powf(x-width/2,2) + powf(y-height/2,2) );
double theta = (atan2f( y-height/2 , x-width/2) +PI)*height/(2*PI);
int x1 = ((int) R) % width ;
int y1 = ((int) theta) % height;
int xp1 = (x1+1) % width;
int yp1 = (y1+1) % height;
double z1 = tb1[width*y1+x1];
double z2 = tb1[width*yp1+x1];
double z3 = tb1[width*yp1+xp1];
double z4 = tb1[width*y1+xp1];
double dx = theta-floorf(theta);
double dy = R-floorf(R);
double zp = 1.0*z1+ dy*(1.0*z2-z1);
double zq = 1.0*z4+ dy*(1.0*z3-z4);
double ZR = zp+ dx*(zq-zp);
tb2[width*y+x] = ZR;
} |
9,133 | // Courtesy of https://devblogs.nvidia.com/parallelforall/unified-memory-cuda-beginners/
// REMOVE ME: Uncommnet the code only upon full implementation or get seg-fault
#include <iostream>
#include <math.h>
// CUDA kernel to add elements of two arrays
__global__
void add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(int argc, char** argv)
{
if(argc < 2)
return 0;
int N = 1<<atoi(argv[1]);
float *x, *y;
// Allocate Unified Memory -- accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaEvent_t start, end;
float time;
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
//for(int i = 0; i < 5; i++){
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
add<<<numBlocks, blockSize>>>(N, x, y);
cudaDeviceSynchronize();
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time, start, end);
fprintf(stdout, "%0.6lf\n", time);
//}
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
|
9,134 | #include <iostream>
#include <stdlib.h>
#include <cmath>
#include <stdio.h>
#include <sys/time.h>
void check_upper();
void print_a();
void print_b();
void print_x();
void print_m();
void check_result();
void initialize_data();
void partial_pivoting(int row);
float* a_get(int row, int col);
pthread_barrier_t barrier;
float* a;
float* a_d;
float* b_d;
float* b;
float* x;
int n;
int thread_amount;
int kek;
int BLOCK_SIZE;
__global__ void gauss_solve(int row, float* A, float* B, int n) {
int idx = blockIdx.x * blockDim.x + threadIdx.x ;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
if (idx >= n || idx <= row || idy < row || idy >= n) return;
__shared__ float m[32];
__shared__ float tmp[32];
m[tx] = A[idx*n+row]/A[row*n+row];
tmp[ty] = A[row*n + idy];
__syncthreads();
A[idx*n+idy] -= m[tx] * tmp[ty];
if (idy == n-1) B[idx]-= m[tx]*B[row];
}
__global__ void part_pivot(int row, float* A, float* B, int n) {
float ksave = -1.0f;
int ki = 0;
for (int col = row; col < n; col++) {
if (abs(A[col*n + row]) > ksave) {
ksave = abs(A[col*n + row]);
ki = col;
}
}
// Swap rows
for (int col = row; col < n; col++) {
float tmp = A[ki*n + col];
A[ki*n + col] = A[row*n + col];
A[row*n + col] = tmp;
}
}
void gauss_elim() {
size_t size = n * n * sizeof(float);
cudaMalloc(&a_d, size);
size_t size_b = n * sizeof(float);
cudaMalloc(&b_d, size_b);
cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, size_b, cudaMemcpyHostToDevice);
dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE);
size_t blocks_needed = ceil(n / (float)BLOCK_SIZE);
dim3 numBlocks(blocks_needed, blocks_needed);
for (int row = 0; row < n; row++) {
part_pivot<<<dim3(1,1),dim3(1,1)>>>(row, a_d, b_d, n);
//cudaDeviceSynchronize();
gauss_solve<<<numBlocks, threadsPerBlock>>>(row, a_d, b_d, n);
//cudaDeviceSynchronize();
}
cudaMemcpy(a, a_d, size, cudaMemcpyDeviceToHost);
cudaMemcpy(b, b_d, size_b, cudaMemcpyDeviceToHost);
cudaFree(a_d);
cudaFree(b_d);
}
int main(int argc, char* argv[]) {
if (argc != 3) {
std::cout << "Invalid input. Number of arguments should be 2.\n";
return 0;
}
n = atoi(argv[1]);
BLOCK_SIZE = atoi(argv[2]);
BLOCK_SIZE = std::min(BLOCK_SIZE, 32);
initialize_data();
// Get current system time
struct timeval tp;
gettimeofday(&tp, NULL);
long int start = tp.tv_sec * 1000 + tp.tv_usec / 1000;
gauss_elim();
// Backwards solving
for (int i = n-1; i >= 0; i--) {
x[i] = b[i];
for (int j = i+1; j < n; j++) {
x[i]-= a[i*n + j]*x[j];
}
x[i]/=a[i*n + i];
}
gettimeofday(&tp, NULL);
long int end = tp.tv_sec * 1000 + tp.tv_usec / 1000;
std::cout << "Execution time = " << end-start << " ms.\n";
check_result();
check_upper();
free(a);
delete[] b;
delete[] x;
}
/**
* Allocates memory and randomizes values for the matrix and vectors
* */
void initialize_data() {
a = (float*) malloc(n*n*sizeof(float));
b = new float[n];
x = new float[n];
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
a[i*n+j] = 1 + drand48() * 9;
}
b[i] = drand48() * 9;
}
}
/**
* Checks and prints the final result by calculating the L2-norm of
* Ax-b
* */
void check_result() {
float* r = new float[n];
for (int i = 0; i < n; i++) {
r[i] = 0;
for (int j = 0; j < n; j++) {
r[i] += a[i*n + j]*x[j];
}
r[i]-=b[i];
}
float result = 0;
for (int i = 0; i < n; i++) {
result += r[i]*r[i];
}
result = sqrt(result);
std::cerr << "Error factor: " << result << ".\n";
}
/**
* Prints the matrix A
* */
void print_a() {
std::cout << "A: \n";
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
std::cout << (int)a[i*n + j] << " ";
}
std::cout << "\n";
}
std::cout << "\n\n";
}
void print_b() {
std::cout << "B: \n";
for (int i = 0; i < n; i++) {
std::cout << b[i] << "\n";
}
std::cout << "\n\n";
}
void print_x() {
std::cout << "X: \n";
for (int i = 0; i < n; i++) {
std::cout << x[i] << "\n";
}
std::cout << "\n\n";
}
void check_upper() {
std::cout << "Check if upper: \n";
for (int i = 0; i < n; i++) {
for (int j = 0; j < i; j++) {
if(((int)a[i*n+j]!=0))
std::cout << "Value: " << (int)a[i*n + j] << " at " << i << "," << j << "\n";
}
}
}
|
9,135 | #include "includes.h"
__global__ void axpy(float a, float* x, float* y) {
// RUN: sh -c "test `grep -c -F 'y[hipThreadIdx_x] = a * x[hipThreadIdx_x];' %t` -eq 2"
y[threadIdx.x] = a * x[threadIdx.x];
} |
9,136 | //headers
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define N (33 * 1024)
//global variables declaration
int hostA[N];
int hostB[N];
int hostC[N];
int *deviceA;
int *deviceB;
int *deviceC;
// *** CUDA KERNEL DEFINITION ***
__global__ void add(int *a, int *b, int *c)
{
//variable declaration
int tid = blockIdx.x * blockDim.x + threadIdx.x;
//code
while(tid < N)
{
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(int argc, char *argv[])
{
//function declaration
void cleanup(void);
//code
cudaError_t err = cudaSuccess;
//allocate memory on device
err = cudaMalloc((void **)&deviceA, N * sizeof(int));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceB, N * sizeof(int));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&deviceC, N * sizeof(int));
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//fill the host input array
for(int i = 0; i < N; i++)
{
hostA[i] = i;
hostB[i] = i * i;
}
//copy the host input array to device memory
err = cudaMemcpy(deviceA, hostA, (N * sizeof(int)), cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err = cudaMemcpy(deviceB, hostB, (N * sizeof(int)), cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//cuda kernel configuration
dim3 DimGrid = 128;
dim3 DimBlock = 128;
add<<<DimGrid, DimBlock>>>(deviceA, deviceB, deviceC);
//copy output array back to host
err = cudaMemcpy(hostC, deviceC, (N * sizeof(int)), cudaMemcpyDeviceToHost);
if(err != cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No %d.\nExiting Now ...\n", cudaGetErrorString(err), __FILE__, __LINE__);
cleanup();
exit(EXIT_FAILURE);
}
//verify the results
bool success = true;
for(int i = 0; i < N; i++)
{
if(hostA[i] + hostB[i] != hostC[i])
{
printf("Error : %d + %d != %d\n", hostA[i], hostB[i], hostC[i]);
success = false;
}
}
if(success)
{
printf("Addition Is Successful On GPU !\n");
}
//total cleanup
cleanup();
return (0);
}
void cleanup(void)
{
if(deviceA)
{
cudaFree(deviceA);
deviceA = NULL;
}
if(deviceB)
{
cudaFree(deviceB);
deviceB = NULL;
}
if(deviceC)
{
cudaFree(deviceC);
deviceC = NULL;
}
}
|
9,137 | #include "includes.h"
__global__ void myFirstKernel()
{
} |
9,138 | #include <algorithm>
#include <chrono>
#include <iostream>
#include <thrust/copy.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
int64_t getTimeNs() {
return std::chrono::duration_cast<std::chrono::nanoseconds>(
std::chrono::steady_clock::now().time_since_epoch())
.count();
}
int main(void) {
auto t0 = getTimeNs();
// generate 32M random numbers serially
// thrust::host_vector<int> h_vec(32 << 20);
thrust::host_vector<int> h_vec(512 << 20);
std::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<int> d_vec = h_vec;
auto t1 = getTimeNs();
for (int i = 0; i < 10; ++i) {
// sort data on the device (846M keys per second on GeForce GTX 480)
thrust::sort(d_vec.begin(), d_vec.end());
}
auto t2 = getTimeNs();
// transfer data back to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
auto t3 = getTimeNs();
std::cout << "vec len: " << h_vec.size() << '\n';
std::cout << "time: " << (t3 - t0) / 1e6 << "\nt1-t0: " << (t1 - t0) / 1e6
<< "\nt2-t1: " << (t2 - t1) / 1e6 << "\nt3-t2: " << (t3 - t2) / 1e6
<< '\n';
return 0;
}
|
9,139 | #include <stdio.h>
#include <cuda.h>
#define CHECK_CUDA_ERROR(file, line) { \
cudaError_t err_t; \
if ((err_t = cudaGetLastError() ) != cudaSuccess) { \
printf("Cuda error: %s \n", cudaGetErrorString(err_t)); \
printf("File: %s; line %d\n", file, line); \
exit(1); \
} \
}
const int N = 1000000; // 1M
const int blocksize = 16;
__global__
void vupdate(const int n, const int *x, int *y)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
y[i] = x[i] + y[i];
}
int main()
{
int *h_x, *h_y;
int *d_x, *d_y;
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Allocating on the CPU
h_x = (int *) malloc(N*sizeof(int));
h_y = (int *) malloc(N*sizeof(int));
// init data
for (int i=0; i<N; ++i) {
h_x[i] = i;
h_y[i] = i-10;
}
// Allocating on the GPU
cudaMalloc( (void**)&d_x, N*sizeof(int) );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMalloc( (void**)&d_y, N*sizeof(int) );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// Copy data from CPU to GPU
cudaMemcpy( d_x, h_x, N*sizeof(int), cudaMemcpyHostToDevice );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy( d_y, h_y, N*sizeof(int), cudaMemcpyHostToDevice );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
dim3 dimBlock(blocksize);
dim3 dimGrid(N / blocksize + 1);
for (int test=0; test<500; ++test) {
vupdate<<<dimGrid, dimBlock>>>(N, d_x, d_y);
CHECK_CUDA_ERROR(__FILE__, __LINE__);
}
// copy back
cudaMemcpy( h_x, d_x , N*sizeof(int), cudaMemcpyDeviceToHost );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaMemcpy( h_y, d_y , N*sizeof(int), cudaMemcpyDeviceToHost );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
// Free on GPU
cudaFree( d_x );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaFree( d_y );
CHECK_CUDA_ERROR(__FILE__, __LINE__);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf ("Time for the kernel: %f ms\n", elapsedTime);
// Free on host
free(h_x);
free(h_y);
return 0;
}
|
9,140 | #include "includes.h"
__global__ void traspose(int *src, int *dest)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx >= WIDTH || idy >= HEIGHT) return;
dest[idx * HEIGHT + idy] = src[idy * WIDTH + idx]; // Cambio el valor de la matriz a la traspuesta
// con los índices de acceso a la matriz...
} |
9,141 | //general parts
#include <stdio.h>
#include <vector>
#include <memory>
#include <string.h>
#include <chrono>
#include <thread>
#include <iostream>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
//CUDA parts
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#define GROUP 1
void sample_6_benchmark_cuFFT_single_r2c(bool file_output, FILE* output, int device_id)
{
if (file_output)
fprintf(output, "6 - cuFFT FFT + iFFT R2C/C2R multidimensional benchmark in single precision\n");
printf("6 - cuFFT FFT + iFFT R2C/C2R multidimensional benchmark in single precision\n");
cudaSetDevice(device_id);
const int num_benchmark_samples = 24;
const int num_runs = 3;
//printf("First %" PRIu64 " runs are a warmup\n", num_runs);
uint64_t benchmark_dimensions[num_benchmark_samples][4] = { {1024, 1024, 1, 2}, {64, 64, 1, 2}, {256, 256, 1, 2}, {1024, 256, 1, 2}, {512, 512, 1, 2}, {1024, 1024, 1, 2}, {4096, 256, 1, 2}, {2048, 1024, 1, 2},{4096, 2048, 1, 2}, {4096, 4096, 1, 2}, {720, 480, 1, 2},{1280, 720, 1, 2},{1920, 1080, 1, 2}, {2560, 1440, 1, 2},{3840, 2160, 1, 2},
{32, 32, 32, 3}, {64, 64, 64, 3}, {256, 256, 32, 3}, {1024, 256, 32, 3}, {256, 256, 256, 3}, {2048, 1024, 8, 3}, {512, 512, 128, 3}, {2048, 256, 256, 3}, {4096, 512, 8, 3} };
double benchmark_result[2] = { 0,0 };//averaged result = sum(system_size/iteration_time)/num_benchmark_samples
cufftReal* inputC = (cufftReal*)malloc((uint64_t)sizeof(cufftReal)*pow(2, 27));
for (uint64_t i = 0; i < pow(2, 27); i++) {
inputC[i] = 2 * ((float)rand()) / RAND_MAX - 1.0;
}
for (int n = 0; n < num_benchmark_samples; n++) {
double run_time[num_runs][2];
for (int r = 0; r < num_runs; r++) {
cufftHandle planR2C;
cufftHandle planC2R;
cufftReal* dataR;
cufftComplex* dataC;
uint64_t dims[3] = { benchmark_dimensions[n][0] , benchmark_dimensions[n][1] ,benchmark_dimensions[n][2] };
cudaMalloc((void**)&dataR, sizeof(cufftComplex) * (dims[0] / 2 + 1) * dims[1] * dims[2]);
cudaMalloc((void**)&dataC, sizeof(cufftComplex) * (dims[0] / 2 + 1) * dims[1] * dims[2]);
cudaMemcpy(dataR, inputC, sizeof(cufftReal) * dims[0] * dims[1] * dims[2], cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) {
fprintf(stderr, "Cuda error: Failed to allocate\n");
return;
}
switch (benchmark_dimensions[n][3]) {
case 1:
cufftPlan1d(&planR2C, dims[0], CUFFT_R2C, 1);
cufftPlan1d(&planC2R, dims[0], CUFFT_C2R, 1);
break;
case 2:
cufftPlan2d(&planR2C, dims[1], dims[0], CUFFT_R2C);
cufftPlan2d(&planC2R, dims[1], dims[0], CUFFT_C2R);
break;
case 3:
cufftPlan3d(&planR2C, dims[2], dims[1], dims[0], CUFFT_R2C);
cufftPlan3d(&planC2R, dims[2], dims[1], dims[0], CUFFT_C2R);
break;
}
float totTime = 0;
uint64_t cuBufferSize = sizeof(float) * 2 * (dims[0]/2+1) * dims[1] * dims[2];
uint64_t num_iter = ((4096 * 1024.0 * 1024.0) / cuBufferSize > 1000) ? 1000 : (4096 * 1024.0 * 1024.0) / cuBufferSize;
if (num_iter == 0) num_iter = 1;
std::chrono::steady_clock::time_point timeSubmit = std::chrono::steady_clock::now();
for (int i = 0; i < num_iter; i++) {
cufftExecR2C(planR2C, dataR, dataC);
cufftExecC2R(planC2R, dataC, dataR);
}
cudaDeviceSynchronize();
std::chrono::steady_clock::time_point timeEnd = std::chrono::steady_clock::now();
totTime = (std::chrono::duration_cast<std::chrono::microseconds>(timeEnd - timeSubmit).count() * 0.001) / num_iter;
run_time[r][0] = totTime;
if (n > 0) {
if (r == num_runs - 1) {
double std_error = 0;
double avg_time = 0;
for (uint64_t t = 0; t < num_runs; t++) {
avg_time += run_time[t][0];
}
avg_time /= num_runs;
for (uint64_t t = 0; t < num_runs; t++) {
std_error += (run_time[t][0] - avg_time) * (run_time[t][0] - avg_time);
}
std_error = sqrt(std_error / num_runs);
if (file_output)
fprintf(output, "cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", benchmark_dimensions[n][0], benchmark_dimensions[n][1], benchmark_dimensions[n][2], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time));
printf("cuFFT System: %" PRIu64 "x%" PRIu64 "x%" PRIu64 " Buffer: %" PRIu64 " MB avg_time_per_step: %0.3f ms std_error: %0.3f num_iter: %" PRIu64 " benchmark: %" PRIu64 "\n", benchmark_dimensions[n][0], benchmark_dimensions[n][1], benchmark_dimensions[n][2], cuBufferSize / 1024 / 1024, avg_time, std_error, num_iter, (uint64_t)(((double)cuBufferSize / 1024) / avg_time));
benchmark_result[0] += ((double)cuBufferSize / 1024) / avg_time;
}
}
cufftDestroy(planR2C);
cufftDestroy(planC2R);
cudaFree(dataC);
cudaFree(dataR);
cudaDeviceSynchronize();
//cufftComplex* output_cuFFT = (cufftComplex*)(malloc(sizeof(cufftComplex) * dims[0] * dims[1] * dims[2]));
//cudaMemcpy(output_cuFFT, dataC, sizeof(cufftComplex) * dims[0] * dims[1] * dims[2], cudaMemcpyDeviceToHost);
//cudaDeviceSynchronize();
}
}
free(inputC);
benchmark_result[0] /= (num_benchmark_samples - 1);
if (file_output)
fprintf(output, "Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0]));
printf("Benchmark score cuFFT: %" PRIu64 "\n", (uint64_t)(benchmark_result[0]));
}
|
9,142 | #include <stdio.h>
#define n 16
__global__ void countNumberInArray(int *originalData, int *arrayCount)
{
int index = threadIdx.x, i;
int sum = 0;
if(threadIdx.x < n)
{
for(i = 0; i < n; i++)
{
sum += originalData[(index * n) + i];
printf("%3d " ,threadIdx.x);
}
}
else
{
for(i = 0; i < n; i++)
{
sum += originalData[(i * n) + index];
printf("%3d " ,threadIdx.x);
}
}
atomicAdd(&arrayCount[index],sum);
}
int main(int argc, char *argv[])
{
int totalCount = 2 * n;
int originalData[n][n], count[totalCount];
int i = 0;
int j = 0;
int *deviceOriginalData, *deviceArrayCount;
int arrayByteSize = (n * n) * sizeof(int);
int countArrayByteSize = totalCount * sizeof(int);
printf("ORIGINAL: \n");
for(i = 0; i < n; i++)
{
for(j = 0; j < n; j++)
{
originalData[i][j] = i;
printf("%3d ", originalData[i][j]);
}
printf("\n");
}
printf("\n\n");
cudaMalloc((void**) &deviceOriginalData, arrayByteSize);
cudaMalloc((void**) &deviceArrayCount, countArrayByteSize);
cudaMemcpy(deviceOriginalData, originalData, arrayByteSize, cudaMemcpyHostToDevice);
dim3 blockDim(totalCount);
countNumberInArray<<<1, blockDim>>>(deviceOriginalData, deviceArrayCount);
cudaMemcpy(count, deviceArrayCount, countArrayByteSize, cudaMemcpyDeviceToHost);
cudaFree(deviceOriginalData);
cudaFree(deviceArrayCount);
int rowCounts[n], colCounts[n], rowArrayIterator = 0, colArrayIterator = 0;
int rowsum = 0;
int colsum = 0;
int l = 0;
for(l = 0; l < totalCount; l++)
{
if(l < n)
{
rowCounts[rowArrayIterator++] = count[l];
rowsum += count[l];
}
else
{
colCounts[colArrayIterator++] = count[l];
colsum += count[l];
}
}
printf("TOTAL COUNT ROW\n");
for(l = 0; l < n; l++)
{
printf("(%d,%3d)", l, rowCounts[l]);
}
printf("\nSum Row: %d\n" ,rowsum);
printf("\n\nTOTAL COUNT COL\n");
for(l = 0; l < n; l++)
{
printf("(%d,%3d)", l, colCounts[l]);
}
printf("\nSum Col: %d\n" ,colsum);
printf("\n");
return 0;
} |
9,143 | #include <stdio.h>
int getSPcores(cudaDeviceProp devProp)
{
int cores = 0;
int mp = devProp.multiProcessorCount;
switch (devProp.major){
case 2: // Fermi
if (devProp.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (devProp.minor == 1) cores = mp * 128;
else if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
case 7: // Volta
if (devProp.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
return cores;
}
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Number of CUDA cores: %d\n", getSPcores(prop));
printf(" Warp Size: %d\n", prop.warpSize);
printf(" Max Threads Per Block: %d\n", prop.maxThreadsPerBlock);
printf(" Max Grid Size: (%d, %d, %d)\n", prop.maxGridSize[0],prop.maxGridSize[1],prop.maxGridSize[2]);
printf(" Max Block Size: (%d, %d, %d)\n", prop.maxThreadsDim[0],prop.maxThreadsDim[1],prop.maxThreadsDim[2]);
printf(" Max Threads Per Multiprocessor: %d\n", prop.maxThreadsPerMultiProcessor);
printf(" Total Global Memory: %zu GB\n", prop.totalGlobalMem>>30);
printf(" Shared Memory per Block: %zu kB\n", prop.sharedMemPerBlock>>10);
printf(" Register per Block: %d kB \n\n", prop.regsPerBlock/1024);
}
} |
9,144 | extern "C"
__global__ void Control_Flow(int n_iterations, int num_arrays, int array_stride, int * boolean_array)
/*
Periodic BC, loop through each array, if 1 then flip the parity of idx to the right, else flip parity of idx to the left.
*/
{
int shared_boolean_array[32];
const unsigned int id = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int array_address = id * array_stride;
const unsigned int shared_address = 0;
if(id < num_arrays) {
int curr_val;
int side_val;
for(int j = 0; j < array_stride; j++ ) {
shared_boolean_array[shared_address + j] = boolean_array[array_address + j];
}
for (int i = 0; i < n_iterations; i++) {
for (int j = 0; j < array_stride; j++) {
curr_val = shared_boolean_array[shared_address + j];
if(curr_val == 0) {
if(j > 0) {
side_val = shared_boolean_array[shared_address + j - 1];
shared_boolean_array[shared_address + j -1] = (side_val +1) % 2;
}
else{
side_val = shared_boolean_array[shared_address + array_stride - 1];
shared_boolean_array[shared_address + array_stride - 1] = (side_val +1) % 2;
}
}
else{
if(j < array_stride -1) {
side_val = shared_boolean_array[shared_address + j + 1];
shared_boolean_array[shared_address + j + 1] = (side_val + 1) % 2;
}
else{
side_val = shared_boolean_array[shared_address];
shared_boolean_array[shared_address] = (side_val + 1) % 2;
}
}
}
}
for(int j = 0; j < array_stride; j++ ) {
boolean_array[array_address + j] = shared_boolean_array[shared_address + j];
}
}
}
|
9,145 | #include "includes.h"
__global__ void Matrix_cos_naive(const float * A , int Acount, int Acols, float * out0 , int out0count)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<out0count){
out0[id] = cos(A[id]);
}
} |
9,146 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <math.h>
//#include <unistd.h>
#define EXPO 5 //so [0,1] will be break into 2^6 intervals 64*64
#define PI 3.14159265
__global__ void CalculateTheD(int step,float* deviceB, float* deviceC, float* deviceD, float* deviceX, float* devicenewB, float* devicenewC)
{
int blocksize=16;
int gridsize=gridDim.x; //this will always be 1
// int gridsize2=gridDim.x;
// printf("gridsize: %d %d\n", gridsize,gridsize2);
// __device__ bool myGlobalFlag=true;
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int blocktotal=blocksize*blocksize;
int temp1=by*gridsize+bx;
if(tx==1&&ty==1)
{
printf("temp1: %d \n",temp1);
}
int temp2=ty*blocksize+tx;
int ind=temp1*blocktotal+temp2;
int m=(int)pow(2.0,EXPO*1.0)-1;
/*int column=threadIdx.x;
int row=threadIdx.y;*/
int row=ind/m;
int column=ind%m;
int iloopstep=(int)pow(2.0,(EXPO-step)*1.0)-1;
int h=(int)pow(2.0,(step-1)*1.0);
int multiplier=(int)pow(2.0,step*1.0);
int countHelper1=(int)pow(2.0,EXPO+1.0);
int countHelper2=(int)pow(2.0,EXPO-step+2);
float* oldB;
float* oldC;
float* newB;
float* newC;
//float* newD;
//this is to exchange which hold the previous value which hold the current value
if(step%2==0)
{
oldB=devicenewB;
oldC=devicenewC;
/*oldD=devicenewD;*/
newB=deviceB;
newC=deviceC;
/* newD=deviceD;*/
}
else
{
oldB=deviceB;
oldC=deviceC;
/* oldD=deviceD;*/
newB=devicenewB;
newC=devicenewC;
/* newD=devicenewD;*/
}
/*
if(row==0&&column==0)
{
if(step==1)
{
printf("bb-0 \n");
for(int i=0;i<m;i++)
{
for(int j=0;j<m;j++)
{
printf("%lf ",oldB[i*m+j]);
}
printf("\n");
}
}
if(step==2)
{
printf("bb-1 \n");
for(int i=0;i<m;i++)
{
for(int j=0;j<m;j++)
{
printf("%lf ",oldB[i*m+j]);
}
printf("\n");
}
}
}*/
//use the device value as old value and store the updated one in to the new value
if(row<m && column<m) //so only the first 63 threads do work and the other one is hanging there
{
float sumBB=0.0;
for(int k=0;k<m;k++)
{
sumBB=sumBB+oldB[row*m+k]*oldB[k*m+column];
}
float sumCC=0.0;
for(int k=0;k<m;k++)
{
sumCC=sumCC+oldC[row*m+k]*oldC[k*m+column];
}
//based on formula (5.4.2.15) on book
newB[row*m+column]=2*sumCC-sumBB;
newC[row*m+column]=sumCC;
//now calculate the new d and it needs to loop through i in each block
//look at the third formula on 5.4.2.15 on book
///D[ith BLOCK][j thSTep ]=D[(2^(k+1)-2^(k-j+1)+i-j-1)*m+...]
if(column==0)
{
//for calculate d we just need 63 tthreads but B and C we need 63*63 threads
//so in step :step, each thread will work on the row th value in each block in that step,sicne there are 63 threads.
for(int i=1;i<=iloopstep;i++)
{
float sumCD1=0.0;
for(int k=0;k<m;k++)
{
sumCD1=sumCD1+oldC[row*m+k]*deviceD[(countHelper1-countHelper2+i*2-1-step)*m+k];
}
float sumCD2=0.0;
for(int k=0;k<m;k++)
{
sumCD2=sumCD2+oldC[row*m+k]*deviceD[(countHelper1-countHelper2+i*2+1-step)*m+k];
}
float sumBD=0.0;
for(int k=0;k<m;k++)
{
sumBD=sumBD+oldB[row*m+k]*deviceD[(countHelper1-countHelper2+i*2-step)*m+k];
}
deviceD[(int)((countHelper1-countHelper2/2+i-step-1)*m)+row]=sumCD1+sumCD2-sumBD;
if(i==2&&step==1)
{
printf("row %d, index %d, %lf",row,(int)((countHelper1-countHelper2/2+i-step-1)*m)+row,deviceD[(int)((countHelper1-countHelper2/2+i-step-1)*m)+row]);
}
//printf("step %d,row: %d, index:%d dd1: %lf \n",step,row,(int)(countHelper1-countHelper2/2+i-step-1)*m+row,deviceD[(countHelper1-countHelper2/2+i-step-1)*m+row]);
//printf("in cuda index %d value %lf: \n",(countHelper1-countHelper2/2+i-step-1)*m+row,deviceD[(countHelper1-countHelper2/2+i-step-1)*m+row]);
//printf("gpu:%lf:",newD[(i*multiplier-1)*m+row]);
}
}
}
//sync the thread before go to the next step.
__syncthreads();
/* if(row==0&&column==0)s
{
for(int i=0;i<9;i++)
{
printf("%lf ",oldD[i]);
}
printf("\n");
}*/
}
//***************************begin of unblock version of cyclic reduction*********************************************************************************//
__global__ void CalculatePArrayKernel(int step,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD)
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int helper11=pow(2.0,(EXPO+1)*1.0);
int helper22=pow(2.0,(EXPO-step+1)*1.0);
int helper44=pow(2.0,(EXPO-step+2)*1.0);
int helper33=pow(2.0,EXPO*1.0)-1;
//printf("step is running: %d \n",step);
// if(helper3<pow(2.0,(EXPO-step)*1.0)-1)
//--step 1 is special case.
/* if((tx!=(blockColumn-1))&&(ty!=(blockRow-1)))-----this is very important branch divergence happen here, need
//to figure out how exactly cuda works!!
/*****calcualte A******************/
int helper1=helper11;
int helper2=helper22;
int helper4=helper44;
int flag=0;//special for step1.
if(step==1)
{
helper1=0;
helper2=0;
helper4=0;
flag=1;
}
int helper3=ty*blockColumn+tx+1;
if(helper3<=(pow(2.0,1.0*(EXPO-step))-1.0))
{
float ahelperfora1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperfora2=deviceA[-step+helper1-helper4+2*(helper3)-1];
float bhelperfora1=deviceB[-step+helper1-helper4+2*(helper3)-1];
deviceA[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*(ahelperfora1)*ahelperfora2/bhelperfora1;
//*****calculate C******************/
float chelperforc1=deviceC[-step+helper1-helper4+2*(helper3)];
float chelperforc2=deviceC[-step+helper1-helper4+2*(helper3)+1];
float bhelperforc1=deviceB[-step+helper1-helper4+2*(helper3)+1];
deviceC[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*chelperforc1*chelperforc2/bhelperforc1;
//calculate B***********************************************//
float bhelperforb1=deviceB[-step+helper1-helper4+2*(helper3)];
float bhelperforb2=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperforb3=deviceB[-step+helper1-helper4+2*(helper3)+1];
float ahelperforb1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperforb2=deviceA[-step+helper1-helper4+2*(helper3)+1];
float chelperforb1=deviceC[-step+helper1-helper4+2*(helper3)-1];
float chelperforb2=deviceC[-step+helper1-helper4+2*(helper3)];
deviceB[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=bhelperforb1-ahelperforb1/bhelperforb2*chelperforb1-chelperforb2/bhelperforb3*ahelperforb2;
//calculate D***************************************************//
float dhelperford1=deviceD[-step+helper1-helper4+2*(helper3)];
float dhelperford2=deviceD[-step+helper1-helper4+2*(helper3)-1];
float dhelperford3=deviceD[-step+helper1-helper4+2*(helper3)+1];
float ahelperford1=deviceA[-step+helper1-helper4+2*(helper3)];
float bhelperford1=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperford2=deviceB[-step+helper1-helper4+2*(helper3)+1];
float chelperford1=deviceC[-step+helper1-helper4+2*(helper3)];
deviceD[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=dhelperford1-ahelperford1/bhelperford1*dhelperford2-chelperford1/bhelperford2*dhelperford3;
/* for(int i=0;i<6;i++)
{
//printf("cudab %lf \n",deviceB[i]);
printf("cudab %lf \n",deviceB[i]);
}
for(int i=0;i<6;i++)
{
//printf("cudab %lf \n",deviceB[i]);
printf("cudad %lf \n",deviceD[i]);
}*/
}
__syncthreads();
}
__global__ void BackwardKernel(int k,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD,float* deviceFinalX,float initialValue)
{
int bx1=blockIdx.x;
int by1=blockIdx.y;
int tx1=threadIdx.x;
int ty1=threadIdx.y;
//printf("inside of kernle %f \n",deviceFinalX[4]);
int backhelper1=ty1*blockColumn+tx1+1;
int backhelper2=2*backhelper1-1;//(int((2*backhelper1-1)*pow(2.0,1.0*(k-1))))/(int)(pow(2.0,(k-1)*1.0));
int backhelper3=(int)pow(2.0,(EXPO+1)*1.0);
int backhelper4=(int)pow(2.0,(EXPO-k+2)*1.0);
int h=(int)(pow(2.0,1.0*(k-1)));
float backhelperd=deviceD[-k+backhelper3-backhelper4+backhelper2];
float backhelpera=deviceA[-k+backhelper3-backhelper4+backhelper2];
float backhelperb=deviceB[-k+backhelper3-backhelper4+backhelper2];
float backhelperc=deviceC[-k+backhelper3-backhelper4+backhelper2];
int xindex1=backhelper2*pow(2.0,1.0*(k-1))-h;
int xindex2=backhelper2*pow(2.0,1.0*(k-1))+h;
//so thread i will be in charge of (2i-1)*2^(k-1) calculation
//printf("%d ",int((2*backhelper1-1)*pow(2.0,1.0*(k-1))));
deviceFinalX[(int)(backhelper2*pow(2.0,1.0*(k-1)))]=(backhelperd-backhelpera*deviceFinalX[xindex1]-backhelperc*deviceFinalX[xindex2])*1.0/backhelperb;
__syncthreads();
}
//***************************end of not block version of cyclic reduction*********************************************************************************//
int main()
{
//matrix size will be 63*63 as our setup
int m=pow(2,EXPO)-1;
int loopH=pow(2,EXPO-1);
int conHelp=4*loopH;
//syntax will follow the routine in the book
float *B;
float *C;
float *D;
float *X; //X to store the solution
float *newB;
float *newC;
int b=1;
int a=0;
int maxBlockSize=16;
//B and C share the same chuck length
int chunkLength=m*m;
float delta=(b-a)*1.0/(m+1.0);
float deltaSquare=delta*delta;
int chunkSize=chunkLength*sizeof(float);
//need to store all the version of D, it will be 2^k-k-1 block and each block has m value
int dLength=(pow(2,EXPO+1)-EXPO-2)*m;
int dSize=dLength*sizeof(float);
B=(float*)malloc(chunkSize);
C=(float*)malloc(chunkSize);
D=(float*)malloc(dSize);
//this is to store the final answer
X=(float*)malloc(chunkSize);
newB=(float*)malloc(chunkSize);
newC=(float*)malloc(chunkSize);
//newD=(float*)malloc(dSize);
//initilize B
for(int i=0;i<m;i++)
{
for(int j=0;j<m;j++)
{
B[i*m+j]=0.0;
C[i*m+j]=0.0;
}
}
for(int i=0;i<m;i++)
{
B[i*m+i]=-4.0;
if(i!=0)
{
B[i*m+i-1]=1.0;
}
if(i!=m-1)
{
B[i*m+i+1]=1.0;;
}
}
//initilize C
for(int i=0;i<m;i++)
{
C[i*m+i]=1.0;
}
//the first 2^k-1 will be the step 0 initial value
for(int i=0;i<m;i++)
{
for(int j=0;j<m;j++)
{
/* float x=(j+1)*delta;
float y=(i+1)*delta;*/
D[i*m+j]=deltaSquare; //f(x,y) will be 1 here.
printf("dd block: %d %lf ",i,D[i*m+j]);
}
printf("\n");
}
//other value initilized to be 0 at the beginnig
for(int i=m*m;i<dLength;i++)
{
D[i]=0.0;
}
/* for(int i=0;i<dLength;i++)
{
printf("dd %lf",D[i]);
}*/
//initilize x
for(int i=0;i<m;i++)
{
for(int j=0;j<m;j++)
{
X[i*m+j]=0.0;
}
}
//printf("let test this2:\n");
//begin timing here:
clock_t begin,end;
begin=clock();
float *deviceB,*deviceC,*deviceD,*deviceX,*devicenewB,*devicenewC;
cudaMalloc((void**)&deviceB,chunkSize);
cudaMalloc((void**)&deviceC,chunkSize);
cudaMalloc((void**)&deviceD,dSize);
cudaMalloc((void**)&deviceX,chunkSize);
cudaMalloc((void**)&devicenewB,chunkSize);
cudaMalloc((void**)&devicenewC,chunkSize);
//cudaMalloc((void**)&devicenewD,chunkSize);
cudaMemcpy(deviceB,B,chunkSize,cudaMemcpyHostToDevice); //store previous value
cudaMemcpy(deviceC,C,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceD,D,dSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceX,X,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(devicenewB,newB,chunkSize,cudaMemcpyHostToDevice); //store current stored value
cudaMemcpy(devicenewC,newC,chunkSize,cudaMemcpyHostToDevice);
// cudaMemcpy(devicenewD,newD,chunkSize,cudaMemcpyHostToDevice);
//int gridSize=((m+1)/maxBlockSize)*((m+1)/maxBlockSize); //gridSize for this problem will be 16
//dim3 dimGrid(1,gridSize)
dim3 dimGrid(1,32); //since the maximum process we are going to use will be 63*63
int blockRow=maxBlockSize;//pow(2,EXPO/2); //here will be 8 and 8
int blockColumn=maxBlockSize;//pow(2,EXPO/2); //here will be 8 and 8
dim3 dimBlock(blockColumn,blockRow);
for(int step=1;step<EXPO;step++)
{
//so the logic here will be if step is odd, then it use B,C,D as the old value and new value into newB, newC,newD.
//if step is even, then use newB,newC,newD as the old value and put the update value into B,C,D.
//here is to calculate the d(2^(k-1))(K-1) in the book
CalculateTheD<<<dimGrid,dimBlock>>>(step,deviceB,deviceC,deviceD,deviceX,devicenewB,devicenewC);
}
cudaDeviceSynchronize();
//the last step here will be 5 so it will write its new value into newB, newC, newD.
cudaMemcpy(D,deviceD,dSize,cudaMemcpyDeviceToHost);
/* for (int i=0;i<m;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%lf ",newD[3+i]);
}*/
//release some of the memory
for(int i=0;i<dLength;i++)
{
if(i%m==0)
{
printf("\n");
}
printf("dha:%lf ",D[i]);
}
cudaFree(deviceB);
cudaFree(deviceC);
//cudaFree(deviceD);
cudaFree(devicenewB);
cudaFree(devicenewC);
//cudaFree(devicenewD);
free(B);
free(C);
//free(D);
free(newB);
free(newC);
//free(newD);
/*cudaMemcpy(deviceB,B,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceC,C,chunkSize,cudaMemcpyHostToDevice);*/
//z will D in the not block version of cyclic reduction, ZA, ZB, ZC will corresponding to A, B and C
float *Z,*ZA,*ZB,*ZC,*FinalX;
int finalLengthX=(int)pow(2,EXPO)+1;
int chunkLengthZ=(pow(2,EXPO)-1)*2+1;
int zSize=chunkLengthZ*sizeof(float);
Z=(float*)malloc(zSize);
ZA=(float*)malloc(zSize);
ZB=(float*)malloc(zSize);
ZC=(float*)malloc(zSize);
FinalX=(float*)malloc(finalLengthX*sizeof(float)); //the first and last one should be know by the boundary condition
float *deviceZ,*deviceZA,*deviceZB, *deviceZC,*deviceFinalX;
cudaMalloc((void**)&deviceZ,zSize);
cudaMalloc((void**)&deviceZA,zSize);
cudaMalloc((void**)&deviceZB,zSize);
cudaMalloc((void**)&deviceZC,zSize);
cudaMalloc((void**)&deviceFinalX,finalLengthX*sizeof(float));
//set up the matrix step
for(int j=1;j<=loopH;j++)
{
//for each j, za,zb,zc all going to be different
ZA[0]=0;
for(int i=1;i<m;i++)
{
ZA[i]=1.0;
}
//else will be 0,since it has been seperate to half and half
for(int i=m;i<chunkLengthZ;i++)
{
ZA[i]=0;
}
for(int i=0;i<m;i++)
{
ZB[i]=-4.0+2*cos((2.0*j-1.0)/(m+1.0)*PI);
//printf("zb:%f \n",ZB[i]);
}
for(int i=m;i<chunkLengthZ;i++)
{
ZB[i]=0;
}
ZC[m-1]=0;
for(int i=0;i<m-1;i++)
{
ZC[i]=1.0;
}
for(int i=m;i<chunkLengthZ;i++)
{
ZC[i]=0;
}
//if it is the first step z will be from d, otherwise, z will be from the previous solution of x
if(j==1)
{
for(int i=0;i<m;i++)
{
/*Z[i]=newD[(loopH-1)*m+i]*(-1.0);
printf("this original one being called? %lf \n",Z[i]);*/
Z[i]=D[((int)pow(2.0,EXPO+1.0)-3-EXPO)*m+i]*(-1.0);
//printf("z value: %lf \n",Z[i]);
}
for(int i=m;i<chunkLengthZ;i++)
{
Z[i]=0;
}
}
else
{
for(int i=0;i<m;i++)
{
//to do this will be x
Z[i]=FinalX[i+1];
//printf("does this ever called? %lf \n",Z[i]);
}
for(int i=m;i<chunkLengthZ;i++)
{
Z[i]=0;
}
}
//now need to call the cyclic function to find the solution of x
cudaMemcpy(deviceZ,Z,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZA,ZA,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZB,ZB,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZC,ZC,zSize,cudaMemcpyHostToDevice);
for(int j=1;j<EXPO;j++)
{
//the lock size should change, the first step it will need 2^(n-j)-1, so first step will be 3 if n=3
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-j)/2);
//printf("blockrow is :%d \n",blockRow);
int blockColumn=pow(2,EXPO-j-(EXPO-j)/2);
//printf("blockColumn is :%d \n",blockColumn);
dim3 dimBlock(blockColumn,blockRow);
//in each step the processor being used should decrease should be 2^(n-j)-1 in jth step
CalculatePArrayKernel<<<dimGrid,dimBlock>>>(j,blockRow,blockColumn,deviceZA,deviceZB,deviceZC,deviceZ);
cudaDeviceSynchronize(); //cpu will wait until cuda finish the job, this is such important function!
}
//backward
//copy the device vector to host
//cudaMemcpy(ZA,deviceZA,chunkSize,cudaMemcpyDeviceToHost);
// sleep(20);
cudaMemcpy(ZB,deviceZB,zSize,cudaMemcpyDeviceToHost);
/*for(int i=0;i<2*m;i++)
{
printf("zbresult:%lf \n",ZB[i]);
}*/
//cudaMemcpy(C,deviceC,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(Z,deviceZ,zSize,cudaMemcpyDeviceToHost);
int lastIndex=(int)pow(2,EXPO+1)-EXPO-3;
float initialValue=Z[lastIndex]/ZB[lastIndex];
//printf("initial value: %lf \n",initialValue);
FinalX[0]=0;
FinalX[(int)pow(2,EXPO-1)]=initialValue;
//printf("the value in the middle is: %f and this suppose to close to 0.5 when n goes big! \n",FinalX[(int)pow(2,EXPO-1)]);
cudaMemcpy(deviceFinalX,FinalX,finalLengthX*sizeof(float),cudaMemcpyHostToDevice);
for(int k=EXPO-1;k>=1;k--)
{
//so the most one will use 2^(n-k) variable will be covered!
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-k)/2);
int blockColumn=pow(2,EXPO-k-(EXPO-k)/2);
dim3 dimBlock(blockColumn,blockRow);
BackwardKernel<<<dimGrid,dimBlock>>>(k,blockRow,blockColumn,deviceZA,deviceZB,deviceZC,deviceZ,deviceFinalX,initialValue);
cudaDeviceSynchronize();
}
cudaMemcpy(FinalX,deviceFinalX,finalLengthX*sizeof(float),cudaMemcpyDeviceToHost);
}
printf("\n final result for x(2^(k-1) block which should have %d values in it:\n",m);
for (int i=1;i<finalLengthX-1;i++)
{
//this will we stored in X the 2^(k-1) the block.
X[(loopH-1)*m+i-1]=FinalX[i];
printf("index: %d, %lf ",(loopH-1)*m+i-1,FinalX[i]);
}
printf("\n \n");
//now need to do the block wise backsubstitution based on the formula of 5.4.2.17
for(int step=EXPO-1;step>=1;step--)
{
//based on formula 5.4.2.30
//ok this is loop trhough the matrix in 5.4.2.17
int help1=pow(2,EXPO-step);
int localloopH=pow(2,step-1);
int thetaHelper=pow(2,step);
//inside of each step, you have this much of sybmatrix to solve
for(int backStep=1;backStep<=help1;backStep++)
{
//factorize B(step-1)
//first and last one need to be treat specially, C[j-1] will be just identity matrix here
//************************************************************//
//this is to loop through the factorization
for(int j=1;j<=localloopH;j++)
{
//for each j, za,zb,zc all going to be different
ZA[0]=0;
for(int i=1;i<m;i++)
{
ZA[i]=1.0;
}
//else will be 0,since it has been seperate to half and half
for(int i=m;i<chunkLengthZ;i++)
{
ZA[i]=0;
}
for(int i=0;i<m;i++)
{
ZB[i]=-4.0+2*cos((2.0*j-1.0)/(thetaHelper)*PI);
printf("zb:%f step: %d\n",ZB[i],step);
}
for(int i=m;i<chunkLengthZ;i++)
{
ZB[i]=0;
}
ZC[m-1]=0;
for(int i=0;i<m-1;i++)
{
ZC[i]=1.0;
}
for(int i=m;i<chunkLengthZ;i++)
{
ZC[i]=0;
}
//if it is the first step z will be from d, otherwise, z will be from the previous solution of x
if(j==1)
{
//the first backsetp and last backstep will be special
if(backStep==1)
{
//teh first d will be d(t-s)(j-1)-c(j-1)x(t)
for(int i=0;i<m;i++)
{
//Z[i]=D[(loopH-1)*m+i]*(-1.0);
//printf("this original one being called? %lf \n",Z[i]);
Z[i]=(D[(conHelp-4*help1-step+1)*m+i]-X[(thetaHelper-1)*m+i])*(-1.0);
}
for(int i=m;i<chunkLengthZ;i++)
{
Z[i]=0;
}
}
else if(backStep==help1)
{
for(int i=0;i<m;i++)
{
//Z[i]=D[(loopH-1)*m+i]*(-1.0);
//printf("this original one being called? %lf \n",Z[i]);
Z[i]=(D[(conHelp-2*help1-1-step)*m+i]-X[(conHelp/2-thetaHelper-1)*m+i])*(-1.0);
}
for(int i=m;i<chunkLengthZ;i++)
{
Z[i]=0;
}
}
else //this is at the middle bakcstep
{
for(int i=0;i<m;i++)
{
//Z[i]=D[(loopH-1)*m+i]*(-1.0);
//printf("this original one being called? %lf \n",Z[i]);
Z[i]=(D[(2*backStep-1-step+conHelp-4*help1)*m+i]-X[(backStep*thetaHelper-1)*m+i]-X[((backStep-1)*thetaHelper-1)*m+i])*(-1.0);
}
for(int i=m;i<chunkLengthZ;i++)
{
Z[i]=0;
}
}
}
else
{
for(int i=0;i<m;i++)
{
//to do this will be x
Z[i]=FinalX[i+1];
//printf("does this ever called? %lf \n",Z[i]);
}
for(int i=m;i<chunkLengthZ;i++)
{
Z[i]=0;
}
}
//now need to call the cyclic function to find the solution of x
cudaMemcpy(deviceZ,Z,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZA,ZA,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZB,ZB,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZC,ZC,zSize,cudaMemcpyHostToDevice);
for(int j=1;j<EXPO;j++)
{
//the lock size should change, the first step it will need 2^(n-j)-1, so first step will be 3 if n=3
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-j)/2);
//printf("blockrow is :%d \n",blockRow);
int blockColumn=pow(2,EXPO-j-(EXPO-j)/2);
//printf("blockColumn is :%d \n",blockColumn);
dim3 dimBlock(blockColumn,blockRow);
//in each step the processor being used should decrease should be 2^(n-j)-1 in jth step
CalculatePArrayKernel<<<dimGrid,dimBlock>>>(j,blockRow,blockColumn,deviceZA,deviceZB,deviceZC,deviceZ);
cudaDeviceSynchronize(); //cpu will wait until cuda finish the job, this is such important function!
}
//backward
//copy the device vector to host
//cudaMemcpy(ZA,deviceZA,chunkSize,cudaMemcpyDeviceToHost);
// sleep(20);
cudaMemcpy(ZB,deviceZB,zSize,cudaMemcpyDeviceToHost);
/* for(int i=0;i<2*m;i++)
{
printf("zbresult:%lf \n",ZB[i]);
}*/
//cudaMemcpy(C,deviceC,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(Z,deviceZ,zSize,cudaMemcpyDeviceToHost);
int lastIndex=(int)pow(2,EXPO+1)-EXPO-3;
float initialValue=Z[lastIndex]/ZB[lastIndex];
//printf("initial value: %lf \n",initialValue);
FinalX[0]=0;
FinalX[(int)pow(2,EXPO-1)]=initialValue;
//printf("the value in the middle is: %f and this suppose to close to 0.5 when n goes big! \n",FinalX[(int)pow(2,EXPO-1)]);
cudaMemcpy(deviceFinalX,FinalX,finalLengthX*sizeof(float),cudaMemcpyHostToDevice);
for(int k=EXPO-1;k>=1;k--)
{
//so the most one will use 2^(n-k) variable will be covered!
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-k)/2);
int blockColumn=pow(2,EXPO-k-(EXPO-k)/2);
dim3 dimBlock(blockColumn,blockRow);
BackwardKernel<<<dimGrid,dimBlock>>>(k,blockRow,blockColumn,deviceZA,deviceZB,deviceZC,deviceZ,deviceFinalX,initialValue);
cudaDeviceSynchronize();
}
cudaMemcpy(FinalX,deviceFinalX,finalLengthX*sizeof(float),cudaMemcpyDeviceToHost);
}
printf("\n");
for(int i=1;i<finalLengthX-1;i++)
{
X[((2*backStep-1)*localloopH-1)*m+i-1]=FinalX[i];
//printf("%lf \n",FinalX[i]);
if(backStep==1)
{
printf("test [%d],%lf \n",((2*backStep-1)*localloopH-1)*m+i-1,FinalX[i]);
}
}
//************************************************************//
}
}
printf("\n");
for (int i=0;i<m*m;i++)
{
//this will we stored in X the 2^(k-1) the block.
if(m%31==0)
{
printf("\n");
}
printf("[%d]:%lf ",i,X[i]);
}
double time_spent;
end=clock();
time_spent=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\n time spent to calcuate x with partition [0,1] into %d intervals:%lf seconds \n",m+1,time_spent);
}
|
9,147 |
#include <stdio.h>
#include <stdlib.h>
// Declaración de función para ver recursos del device
void devicenfo(void);
int main(int argc, char *argv[]){
devicenfo();
return(0);
}
// Sacar por pantalla información del *device*
void devicenfo(void){
struct cudaDeviceProp capabilities;
cudaGetDeviceProperties (&capabilities, 0);
printf("->CUDA Platform & Capabilities\n");
printf("Name: %s\n", capabilities.name);
printf("totalGlobalMem: %.2f MB\n", capabilities.totalGlobalMem/1024.0f/1024.0f);
printf("sharedMemPerBlock: %.2f KB\n", capabilities.sharedMemPerBlock/1024.0f);
printf("regsPerBlock (32 bits): %d\n", capabilities.regsPerBlock);
printf("warpSize: %d\n", capabilities.warpSize);
printf("memPitch: %.2f KB\n", capabilities.memPitch/1024.0f);
printf("maxThreadsPerBlock: %d\n", capabilities.maxThreadsPerBlock);
printf("maxThreadsDim: %d x %d x %d\n", capabilities.maxThreadsDim[0],
capabilities.maxThreadsDim[1], capabilities.maxThreadsDim[2]);
printf("maxGridSize: %d x %d\n", capabilities.maxGridSize[0],
capabilities.maxGridSize[1]);
printf("totalConstMem: %.2f KB\n", capabilities.totalConstMem/1024.0f);
printf("major.minor: %d.%d\n", capabilities.major, capabilities.minor);
printf("clockRate: %.2f MHz\n", capabilities.clockRate/1024.0f);
printf("textureAlignment: %d\n", capabilities.textureAlignment);
printf("deviceOverlap: %d\n", capabilities.deviceOverlap);
printf("multiProcessorCount: %d\n", capabilities.multiProcessorCount);
}
|
9,148 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void __print_kernel__ ()
{
printf("GPU says, Hello world! \n");
}
extern "C" void print_gpu()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
printf("Nr. GPUs %d \n", nDevices);
__print_kernel__ <<<1,1>>> ();
cudaDeviceSynchronize();
}
|
9,149 | #include "conv2d-kernel-grad.hh"
#include "graph.hh"
#include "../runtime/graph.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
Conv2DKernelGrad::Conv2DKernelGrad(Op* y, Op* input, const int strides[], const int kernel_size[], const int padded_size[])
: Op("conv2d_kernel_grad",
Shape({kernel_size[0], kernel_size[1],
kernel_size[2],kernel_size[3]}),
{y, input})
, m_strides(strides)
{
m_kernel_size[0] = kernel_size[0];
m_kernel_size[1] = kernel_size[1];
m_kernel_size[2] = kernel_size[2];
m_kernel_size[3] = kernel_size[3];
m_padded_size[0] = padded_size[0];
m_padded_size[1] = padded_size[1];
}
void Conv2DKernelGrad::compile()
{
auto& g = Graph::instance();
auto& cy = g.compiled(preds()[0]);
auto& cinput = g.compiled(preds()[1]);
Shape out_shape({m_kernel_size[0], m_kernel_size[1], m_kernel_size[2], m_kernel_size[3]});
dbl_t* out_data = tensor_alloc(out_shape.total());
int y_size[4] = { cy.out_shape[0], cy.out_shape[1],
cy.out_shape[2], cy.out_shape[3]};
int input_size[4] = { cinput.out_shape[0], cinput.out_shape[1],
cinput.out_shape[2], cinput.out_shape[3]};
auto out_node = rt::Node::op_conv2d_kernel_grad(cy.out_data, cinput.out_data,
m_strides, out_data, y_size,
input_size, m_padded_size,
{cy.out_node, cinput.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
}
|
9,150 | #include <cmath>
inline __device__ double cube(double value)
{
return value * value * value;
}
__global__ void cu_cbrt(double* value)
{
value[threadIdx.x] = cube(value[threadIdx.x]);
}
|
9,151 | #include <ctype.h>
#include <inttypes.h>
#include <stdio.h>
#include <string.h>
struct PPMImage {
size_t x;
size_t y;
uint8_t *data;
};
PPMImage * readPPM(const char *path)
{
// WARNING - heavily untested and not error checked code
// also doesn't check for comment lines
PPMImage *img = NULL;
FILE *myfile = fopen(path, "r");
char magic_number[16];
int x, y, high;
int c;
size_t count;
fscanf(myfile, "%15s", magic_number);
if (strncmp(magic_number, "P6", 16) != 0) {
fprintf(stderr, "readPPM only expects P6\n");
goto cleanup;
}
fscanf(myfile, "%d %d", &x, &y);
fscanf(myfile, "%d", &high);
if (high != 255) {
fprintf(stderr, "readPPM only expects max_val of 255\n");
goto cleanup;
}
// skip whitespace
c = fgetc(myfile);
while (!feof(myfile) && isspace(c)) {
c = fgetc(myfile);
}
ungetc(c, myfile);
img = (PPMImage *)malloc(sizeof(PPMImage));
if (img == NULL)
goto cleanup;
img->x = x;
img->y = y;
img->data = (uint8_t *)malloc(sizeof(uint8_t) * x * y);
count = fread(img->data, sizeof(uint8_t), x * y, myfile);
if (count != x * y * sizeof(uint8_t)) {
fprintf(stderr, "readPPM read incorrect number of bytes for data\n");
free(img->data);
free(img);
img = NULL;
}
cleanup:
fclose(myfile);
return img;
}
void freePPM(PPMImage *img)
{
free(img->data);
free(img);
}
int main(int argc, char const *argv[])
{
if (argc < 2) {
fprintf(stderr, "Usage: %s [raw-image]\n");
exit(1);
}
PPMImage * img = readPPM(argv[1]);
freePPM(img);
return 0;
}
|
9,152 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/for_each.h>
#include <iterator>
#include <thrust/copy.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/transform.h>
#include <algorithm>
#include <vector>
#include <thrust/sort.h>
#include <sys/times.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cuda.h>
void start_clock(void);
void end_clock(char *msg);
static clock_t st_time;
static clock_t en_time;
static struct tms st_cpu;
static struct tms en_cpu;
void
start_clock()
{
st_time = times(&st_cpu);
}
void end_clock(char *msg)
{
en_time = times(&en_cpu);
std::cout<< "Sort type : " << msg << std::endl<< " Time elapsed:"<< (intmax_t)(en_time - st_time)<<std::endl;
}
void generateRandom(double & i)
{
i = rand();
}
int main(int argc, char ** argv)
{
if(argc<2)
{
std::cout<<"Please provide size as argument"<<std::endl;
return 1;
}
long vec_size =atoi(argv[1]);
{
start_clock();
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
std::sort(vec.begin(), vec.end());
end_clock("CPU all");
}
{
std::vector<double> vec(vec_size);
std::for_each(vec.begin(), vec.end(), generateRandom);
start_clock();
std::sort(vec.begin(), vec.end());
end_clock("CPU sort only");
}
{
cudaDeviceReset();
start_clock();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
hv = d;
end_clock("thrust ALL");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
start_clock();
thrust::device_vector<double> d = hv;
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("Thrust sort and copy and alloc");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d(vec_size);
start_clock();
thrust::copy(hv.begin(), hv.end(), d.begin());
thrust::sort(d.begin(), d.end());
thrust::copy(d.begin(), d.end(), hv.begin());
end_clock("thrust sort and copy");
}
{
cudaDeviceReset();
thrust::host_vector<double> hv(vec_size);
std::for_each(hv.begin(), hv.end(), generateRandom);
thrust::device_vector<double> d = hv;
start_clock();
thrust::sort(d.begin(), d.end());
end_clock("thrust sort only");
hv = d;
}
} |
9,153 | __global__ void Transpose_ker(float * dst, float * src, int size)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size && j < size)
dst[i * size + j] = src[j * size + i];
}
void cuTranspose(float * dst, float * src, int size)
{
size = (size + 16 - 1) / 16 * 16;
dim3 dimBlock(16, 16);
dim3 dimGrid(size / 16, size / 16);
Transpose_ker<<<dimGrid, dimBlock>>>(dst, src, size);
} |
9,154 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
// Defined min and max functions
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__global__ void mergeSort(int* startingArr, int* endingArr, unsigned int N, unsigned int sortedArraySize, unsigned int threads, unsigned int totalSortedArrays) {
// Assigns thread ID
unsigned int id = threadIdx.x + blockIdx.x * blockDim.x;
// index of the first element of the first sorted array that this thread is responsible for
unsigned startElementIndex = id * sortedArraySize;
// loop through each of the sorted arrays that this thread is responsible for merging
for (unsigned int currentArray = id; currentArray < totalSortedArrays; currentArray += threads) {
// calculate the middle and end element indices for this merge
unsigned arrayMiddle = MIN(startElementIndex + (sortedArraySize / 2), N);
unsigned arrayEnd = MIN(startElementIndex + sortedArraySize, N);
// create variables to hold the left and right indices of the merge as it proceeds
unsigned int leftArrayIndex = startElementIndex;
unsigned int rightArrayIndex = arrayMiddle;
// perform the merge of two sorted sub-arrays into one sorted array
for (unsigned int i = startElementIndex; i < arrayEnd; i++) {
// If middle array is larger, temp start is index in sorted array
if (leftArrayIndex < arrayMiddle && (rightArrayIndex >= arrayEnd || startingArr[leftArrayIndex] < startingArr[rightArrayIndex])) {
endingArr[i] = startingArr[leftArrayIndex];
leftArrayIndex++;
}
// If middle array start is smaller, middle array is index in sorted array
else {
endingArr[i] = startingArr[rightArrayIndex];
rightArrayIndex++;
}
}
// increment to the start point of the next merge
startElementIndex += threads * sortedArraySize;
}
}
int main() {
printf("Parallelized Merge Sort Has Begun");
unsigned int N = 1000000;
unsigned int threads = 32;
if (threads > 1024) {
threads = 1024;
}
// Define arrays for system
int* startingArray = new int[N];
int* endingArray = new int[N];
int* cudaStartArray;
int* cudaEndArray;
// Initialize array to random values 0 and 10000
for (int i = 0; i < N; i++) {
startingArray[i] = rand() % 10000;
}
// Copy starting Array values into end Array
memcpy(endingArray, startingArray, N * sizeof(int));
// Clock variable
clock_t startTime = clock();
// iterate through the levels of the merge tree, using the size of the individual sorted arrays at each level as the loop index
for (unsigned int sortedArraySize = 2; sortedArraySize < N * 2; sortedArraySize = sortedArraySize * 2) {
clock_t loopTime = clock();
cudaSetDevice(0);
// Allocate memory on device for the original list and the list at the end of this step
cudaMalloc((void**)& cudaStartArray, N * sizeof(int));
cudaMalloc((void**)& cudaEndArray, N * sizeof(int));
// Copy data from host to device
cudaMemcpy(cudaStartArray, startingArray, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cudaEndArray, endingArray, N * sizeof(int), cudaMemcpyHostToDevice);
// calculate how many individual arrays there are at this level of the merge tree
unsigned int totalSortedArrays = N / sortedArraySize + (N % sortedArraySize != 0);
// call MergeSort kernel function
mergeSort <<<1, threads>>> (cudaStartArray, cudaEndArray, N, sortedArraySize, threads, totalSortedArrays);
// Synchronize threads
cudaDeviceSynchronize();
// Copy resulting list to host
cudaMemcpy(endingArray, cudaEndArray, N * sizeof(int), cudaMemcpyDeviceToHost);
// Free cuda memory
cudaFree(cudaStartArray);
cudaFree(cudaEndArray);
// Copies endingArray to startingArray to prepare for the next step
memcpy(startingArray, endingArray, N * sizeof(int));
// printing timing results
printf("\nMerge Tree Level: %d || Size of Sorted Arrays: %d || Level Merge Time: %f", (int)log2(sortedArraySize), sortedArraySize, (float)(clock() - loopTime) / CLOCKS_PER_SEC);
}
printf(" \nFINAL ITERATION : ");
// Checks to see if merge sorted matrix is properly sorted
bool isTrue = true;
for (unsigned int i = 0; i < N; i++) {
//printf("%d \n", endingArray[j]);
if (i > 0) {
if (endingArray[i] < endingArray[i - 1]) {
isTrue = false;
}
}
}
// Prints if matrix has or hasn't been sorted
if (isTrue) {
printf("The Input Array Is Sorted From Merge Sort \n");
}
else {
printf("The Input Array Is Not Sorted From Merge Sort \n");
}
// Prints total time of system
printf("Total Merge Sort Time : %f", ((float)(clock() - startTime)) / CLOCKS_PER_SEC);
return 0;
} |
9,155 | #include <stdio.h>
#include <stdlib.h>
__global__ void findleft(int *deviceMatrix, int *rowSum, int n){
int rownum = blockIdx.x;
int sum = 0;
int k;
for(k=0; k<n; k++)
sum += deviceMatrix[rownum*n+k];
rowSum[rownum] = sum;
}
int main(){
int n = 20;
int *hostMatrix, *deviceMatrix, *hostRowSum, *deviceRowSum;
int msize = n*n*sizeof(int);
hostMatrix = (int *) malloc(msize);
hostRowSum = (int *) malloc(n*sizeof(int));
cudaMalloc((void **) &deviceMatrix, msize);
cudaMalloc((void **) &deviceRowSum, msize/n);
int t = 0, i, j;
for(i = 0; i<n; i++){
for(j=0; j<n; j++)
hostMatrix[i*n+j] = t++;
}
cudaMemcpy(deviceMatrix, hostMatrix, msize, cudaMemcpyHostToDevice);
dim3 dimGrid(n,1);
dim3 dimBlock(1,1,1);
findleft<<<dimGrid, dimBlock>>>(deviceMatrix, deviceRowSum, n);
cudaThreadSynchronize();
cudaMemcpy(hostRowSum, deviceRowSum, msize/n, cudaMemcpyDeviceToHost);
for(i = 0; i<n; i++) printf("%d\n", hostRowSum[i]);
return 0;
} |
9,156 | #include "includes.h"
__global__ void dev_get_potential_at_point( float eps2, float *eps, float *xh, float *yh, float *zh, float *xt, float *yt, float *zt, float *phi, int n, float *field_m, float *fxh, float *fyh, float *fzh, float *fxt, float *fyt, float *fzt, int n_field) {
float dx, dy, dz, r, dr2, eps2_total;
for (int tid=threadIdx.x + blockIdx.x*blockDim.x; tid < n; tid += blockDim.x*gridDim.x){
eps2_total = eps2 + eps[tid]*eps[tid];
phi[tid] = 0;
for (int i=0; i < n_field; i++){
dx = (fxh[i] - xh[tid]) + (fxt[i] - xt[tid]);
dy = (fyh[i] - yh[tid]) + (fyt[i] - yt[tid]);
dz = (fzh[i] - zh[tid]) + (fzt[i] - zt[tid]);
dr2 = dx*dx + dy*dy + dz*dz;
if (dr2 > 0) {
r = sqrt(eps2_total + dr2);
phi[tid] -= field_m[i] / r;
}
}
}
} |
9,157 | #include <stdio.h>
#include <stdlib.h>
#include <assert.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
//#include <helper_cuda.h>
#define DEBUG 0
#define UNKNOWN_NUM 188
#define POLY_NUM 191
#define PARAM_NUM 36
#define ENUM_NUM 16 //16
#define CHECK_NUM 9
#define SOL_MAX_NUM 200
//#define SET_VAL (((value_t)14764175488)<< ENUM_NUM)
#define BLOCK_NUM 32 //2^5
#define THREAD_NUM 256 // 2^8
#define THREADS_SHIFT 13 // (5+8)
typedef long value_t; // to save values of variables.
//typedef unsigned long constpart_t; // the part with no parameters.
typedef unsigned long linearpart_t; // to save 32 unknowns and 1 contants.
typedef unsigned long squarepart_t;
typedef unsigned long oripoly_t;
static inline void binary_print(value_t val, int len) {
for (int i = 0; i < len; i++) {
if (val & ((value_t)1 << i)) {
printf("1");
} else {
printf("0");
}
if ((i + 1) % 5 == 0) {
printf(" ");
}
}
}
typedef unsigned char UINT8;
typedef unsigned long long UINT64;
typedef UINT64 tKeccakLane;
//#define KeccakReference
#define maxNrRounds 24
#define nrLanes 25
#define index(x, y) (((x)%5)+5*((y)%5))
#define KeccakP1600_stateSizeInBytes 200
static tKeccakLane KeccakRoundConstants[maxNrRounds];
static unsigned int KeccakRhoOffsets[nrLanes];
__constant__ tKeccakLane const_KeccakRoundConstants[maxNrRounds] =
{
0x0000000000000001,
0x0000000000008082,
0x800000000000808a,
0x8000000080008000,
0x000000000000808b,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008a,
0x0000000000000088,
0x0000000080008009,
0x000000008000000a,
0x000000008000808b,
0x800000000000008b,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800a,
0x800000008000000a,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008,
};
__constant__ unsigned int const_KeccakRhoOffsets[nrLanes] =
{
0, 1, 62, 28, 27, 36, 44, 6, 55, 20, 3, 10, 43, 25, 39, 41, 45, 15, 21, 8, 18, 2, 61, 56, 14
};
void KeccakP1600_InitializeRoundConstants(void);
void KeccakP1600_InitializeRhoOffsets(void);
static int LFSR86540(UINT8 *LFSR);
int displayLevel = 10;
void KeccakP1600_StaticInitialize(void)
{
if (sizeof(tKeccakLane) != 8) {
printf("tKeccakLane should be 64-bit wide\n");
abort();
}
KeccakP1600_InitializeRoundConstants();
KeccakP1600_InitializeRhoOffsets();
}
void KeccakP1600_InitializeRoundConstants(void)
{
UINT8 LFSRstate = 0x01;
unsigned int i, j, bitPosition;
for(i=0; i<maxNrRounds; i++) {
KeccakRoundConstants[i] = 0;
for(j=0; j<7; j++) {
bitPosition = (1<<j)-1; /* 2^j-1 */
if (LFSR86540(&LFSRstate))
KeccakRoundConstants[i] ^= (tKeccakLane)1<<bitPosition;
}
}
}
void KeccakP1600_InitializeRhoOffsets(void)
{
unsigned int x, y, t, newX, newY;
KeccakRhoOffsets[index(0, 0)] = 0;
x = 1;
y = 0;
for(t=0; t<24; t++) {
KeccakRhoOffsets[index(x, y)] = ((t+1)*(t+2)/2) % 64;
newX = (0*x+1*y) % 5;
newY = (2*x+3*y) % 5;
x = newX;
y = newY;
}
}
static int LFSR86540(UINT8 *LFSR)
{
int result = ((*LFSR) & 0x01) != 0;
if (((*LFSR) & 0x80) != 0)
/* Primitive polynomial over GF(2): x^8+x^6+x^5+x^4+1 */
(*LFSR) = ((*LFSR) << 1) ^ 0x71;
else
(*LFSR) <<= 1;
return result;
}
__host__ __device__ void KeccakP1600_Initialize(void *state)
{
memset(state, 0, 1600/8);
}
/* ---------------------------------------------------------------- */
__host__ __device__ void KeccakP1600_AddByte(void *state, unsigned char byte, unsigned int offset)
{
assert(offset < 200);
((unsigned char *)state)[offset] ^= byte;
}
/* ---------------------------------------------------------------- */
__host__ __device__ void KeccakP1600_AddBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length)
{
unsigned int i;
assert(offset < 200);
assert(offset+length <= 200);
for(i=0; i<length; i++)
((unsigned char *)state)[offset+i] ^= data[i];
}
/* ---------------------------------------------------------------- */
__host__ __device__ void KeccakP1600_OverwriteBytes(void *state, const unsigned char *data, unsigned int offset, unsigned int length)
{
assert(offset < 200);
assert(offset+length <= 200);
memcpy((unsigned char*)state+offset, data, length);
}
/* ---------------------------------------------------------------- */
__host__ __device__ void KeccakP1600_OverwriteWithZeroes(void *state, unsigned int byteCount)
{
assert(byteCount <= 200);
memset(state, 0, byteCount);
}
#define ROL64(a, offset) ((offset != 0) ? ((((tKeccakLane)a) << offset) ^ (((tKeccakLane)a) >> (64-offset))) : a)
static __host__ __device__ void theta(tKeccakLane *A)
{
unsigned int x, y;
tKeccakLane C[5]={0,0,0,0,0}, D[5]={0,0,0,0,0};
for(x=0; x<5; x++) {
C[x] = 0;
for(y=0; y<5; y++)
C[x] ^= A[index(x, y)];
}
for(x=0; x<5; x++)
D[x] = ROL64(C[(x+1)%5], 1) ^ C[(x+4)%5];
for(x=0; x<5; x++)
for(y=0; y<5; y++)
A[index(x, y)] ^= D[x];
}
static void rho(tKeccakLane *A)
{
unsigned int x, y;
for(x=0; x<5; x++) for(y=0; y<5; y++)
A[index(x, y)] = ROL64(A[index(x, y)], KeccakRhoOffsets[index(x, y)]);
}
static __device__ void rho_Device(tKeccakLane *A)
{
unsigned int x, y;
for(x=0; x<5; x++) for(y=0; y<5; y++)
A[index(x, y)] = ROL64(A[index(x, y)], const_KeccakRhoOffsets[index(x, y)]);
}
static __host__ __device__ void pi(tKeccakLane *A)
{
unsigned int x, y;
tKeccakLane tempA[25];
for(x=0; x<5; x++) for(y=0; y<5; y++)
tempA[index(x, y)] = A[index(x, y)];
for(x=0; x<5; x++) for(y=0; y<5; y++)
A[index(0*x+1*y, 2*x+3*y)] = tempA[index(x, y)];
}
static __host__ __device__ void chi(tKeccakLane *A)
{
unsigned int x, y;
tKeccakLane C[5];
for(y=0; y<5; y++) {
for(x=0; x<5; x++)
C[x] = A[index(x, y)] ^ ((~A[index(x+1, y)]) & A[index(x+2, y)]);
for(x=0; x<5; x++)
A[index(x, y)] = C[x];
}
}
static void iota(tKeccakLane *A, unsigned int indexRound)
{
A[index(0, 0)] ^= KeccakRoundConstants[indexRound];
}
static __device__ void iota_Device(tKeccakLane *A, unsigned int indexRound)
{
A[index(0, 0)] ^= const_KeccakRoundConstants[indexRound];
}
void KeccakP1600Round(tKeccakLane *state, unsigned int indexRound)
{
theta(state);
rho(state);
pi(state);
chi(state);
iota(state, indexRound);
}
__device__ void KeccakP1600Round_Device(tKeccakLane *state,unsigned int indexRound) {
theta(state);
rho_Device(state);
pi(state);
chi(state);
iota_Device(state, indexRound);
}
void stateInit(tKeccakLane state[25]) {
KeccakP1600_StaticInitialize();
FILE *f = fopen("../data/state_files/messaged_state.txt", "r+"); //todo path
char line[100];
for (int k = 0; k < 5; k++) {
fgets(line, 100, f);
for (int i = 0; i < 5; i++) {
char hex_s = 0;
UINT64 tmp_val = 0;
for (int j = 0; j < 17; j++) {
if (line[i * 17 + j] <= '9' && line[i * 17 + j] >= '0') {
hex_s = line[i * 17 + j] - '0';
} else {
hex_s = line[i * 17 + j] - 'a' + 10;
}
if (j < 16) {
//printf("%c", line[i*17 + j] );
tmp_val = (tmp_val << 4) ^ (hex_s);
}
}
state[k * 5 + i] = tmp_val;
//printf("%08x%08x ", (unsigned int) (state[k * 5 + i] >> 32),(unsigned int) (state[k * 5 + i] & 0xFFFFFFFFLU));
}
}
fclose(f);
}
void getStates(tKeccakLane state[25], oripoly_t var_all[640][4], value_t val,
value_t solutions[3]) {
value_t val_sol[4];
val_sol[3] = solutions[2];
val_sol[2] = solutions[1];
val_sol[1] = solutions[0];
val_sol[0] = val ^ ((value_t) 1 << PARAM_NUM);
for (int i = 0; i < 640; i++) {
value_t w[4] = { 0, 0, 0, 0 };
for (int j = 0; j < 4; j++) {
w[j] = var_all[i][j] & val_sol[j];
}
w[0] = w[0] ^ w[1] ^ w[2] ^ w[3];
w[0] = (w[0]) ^ (w[0] >> 32);
w[0] = (w[0]) ^ (w[0] >> 16);
w[0] = (w[0]) ^ (w[0] >> 8);
w[0] = (w[0]) ^ (w[0] >> 4);
w[0] = (w[0]) ^ (w[0] >> 2);
w[0] = (w[0]) ^ (w[0] >> 1);
if (w[0] & (value_t) 1) {
int n = (i / 64 > 4) ? (i / 64 + 5) : i / 64;
state[n] ^= ((UINT64) 1) << (i % 64);
}
}
}
int checkHashValue(tKeccakLane state[25], tKeccakLane hashvalue[4]) {
tKeccakLane state_copy[25];
for(int i = 0; i < 25; i++){
state_copy[i] = state[i];
}
for (int i = 0; i < 3; i++) {
KeccakP1600Round(state, i);
}
int result = 0;
if(state[0] == hashvalue[0] && state[1] == hashvalue[1] && state[2] == hashvalue[2] && ((state[3] ^= hashvalue[3]) & (0x00000000FFFFFFFF)) == 0){
printf("Find Preimage!!!\nState after XORed with block2:");
for (int i = 0; i < 25; i++) {
binary_print(state_copy[i], 64);
printf(" ");
printf("%llu ",state_copy[i]);
}
printf("\n");
result = 1;
}
return result;
}
__constant__ tKeccakLane const_state[25];
__constant__ tKeccakLane const_hashvalue[4];
__device__ linearpart_t d_linear_mat[ENUM_NUM * POLY_NUM * 3];
__device__ squarepart_t d_square_mat[ENUM_NUM * POLY_NUM];
__device__ value_t d_var_all[2560];
static inline __host__ __device__ int largestpos(value_t val, int len) {
for (int i = len - 1; i >= 0; i--) {
if (val & ((value_t) 1 << i)) {
return i;
}
}
return -1;
}
static inline __host__ __device__ int largestpos_3(value_t val0, value_t val1,
value_t val2, int len) {
int p = 0;
if (len > 128) {
p = largestpos(val2, len - 128);
if (p > -1) {
return p + 128;
} else {
p = largestpos(val1, 64);
if (p > -1) {
return p + 64;
} else {
p = largestpos(val0, 64);
}
}
} else if (len > 64 && len <= 128) {
p = largestpos(val1, len - 64);
if (p > -1) {
return p + 64;
} else {
p = largestpos(val0, 64);
}
} else {
p = largestpos(val0, 64);
}
return p;
}
static inline value_t gauss_host(linearpart_t working_mat[POLY_NUM][3],
const int poly_num, const int unknown_num, value_t solutions[SOL_MAX_NUM][3]) {
int pos_arr[POLY_NUM]; // bear revised
int rank = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 && working_mat[pi][2] == 0) {
continue;
}
pos_arr[pi] = largestpos_3(working_mat[pi][0],working_mat[pi][1],working_mat[pi][2], unknown_num + 1);
rank++;
if (pos_arr[pi] == 0) {
return 0;
}
for (int j = pi + 1; j < POLY_NUM; j++) {
if(working_mat[j][pos_arr[pi]/64] & ((linearpart_t)1 << (pos_arr[pi] % 64))){
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
working_mat[j][2] ^= (working_mat[pi][2]);
}
}
}
// back
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0
&& working_mat[pi][2] == 0) {
continue;
}
for (int j = 0; j < pi; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
working_mat[j][2] ^= (working_mat[pi][2]);
}
}
}
if (rank == unknown_num) {
// only one solution.
solutions[0][0] = 0;
solutions[0][1] = 0;
solutions[0][2] = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 && working_mat[pi][2] == 0) {
continue;
}
if (working_mat[pi][0] & (linearpart_t)1) {
solutions[0][(pos_arr[pi]-1) /64 ] ^= ((value_t)1 << (pos_arr[pi]-1) % 64);
}
}
return 1;
} else {
// now troubles come
solutions[0][0] = 0;
solutions[0][1] = 0;
solutions[0][2] = 0;
value_t sol_num = 1;
bool appear[UNKNOWN_NUM + 1];
for(int nn = 0; nn < UNKNOWN_NUM + 1; nn++){
appear[nn] = 0;
}
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 && working_mat[pi][2] == 0) {
continue;
}
appear[pos_arr[pi]] = true;
if (working_mat[pi][0] & (linearpart_t)1) {
solutions[0][(pos_arr[pi]-1) /64 ] ^= ((value_t)1 << (pos_arr[pi]-1) % 64);
}
}
// duplicate solutions.
for (int i = 1; i < UNKNOWN_NUM+1; i++) { // liting revised
if (appear[i] == false) {
for (int j = 0; j < sol_num; j++) {
// bear revised
solutions[j + sol_num][0] = solutions[j][0];
solutions[j + sol_num][1] = solutions[j][1];
solutions[j + sol_num][2] = solutions[j][2];
solutions[j + sol_num][(i-1)/64] ^= ((value_t)1 << ((i-1)%64));
}
// bear added
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0 && working_mat[pi][2] == 0) {
continue;
}
for (int j = 0; j < sol_num * ((working_mat[pi][i/64] & (((linearpart_t) 1) << (i%64))) != 0); j++) {
solutions[j + sol_num][(pos_arr[pi] - 1)/64] ^= ((value_t) 1 << ((pos_arr[pi] - 1)% 64));
}
}
sol_num *= 2;
}
}
return sol_num;
}
}
static inline __device__ value_t gauss(value_t solutions[SOL_MAX_NUM][3], linearpart_t working_mat[POLY_NUM][3],
const int poly_num, const int unknown_num) {
// bear revised
int pos_arr[POLY_NUM]; // bear revised
int rank = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0
&& working_mat[pi][2] == 0) {
continue;
}
pos_arr[pi] = largestpos_3(working_mat[pi][0], working_mat[pi][1],working_mat[pi][2],unknown_num + 1);
rank++;
if (pos_arr[pi] == 0) {
return 0;
}
for (int j = pi + 1; j < POLY_NUM; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
working_mat[j][2] ^= (working_mat[pi][2]);
}
}
}
// back
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0
&& working_mat[pi][2] == 0) {
continue;
}
for (int j = 0; j < pi; j++) {
if (working_mat[j][pos_arr[pi] / 64]
& ((linearpart_t) 1 << (pos_arr[pi] % 64))) {
working_mat[j][0] ^= (working_mat[pi][0]);
working_mat[j][1] ^= (working_mat[pi][1]);
working_mat[j][2] ^= (working_mat[pi][2]);
}
}
}
if (rank == unknown_num) {
// only one solution.
solutions[0][0] = 0;
solutions[0][1] = 0;
solutions[0][2] = 0;
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0
&& working_mat[pi][2] == 0) {
continue;
}
if (working_mat[pi][0] & (linearpart_t) 1) {
solutions[0][(pos_arr[pi] - 1) / 64] ^= ((value_t) 1
<< (pos_arr[pi] - 1) % 64);
}
}
return 1;
} else {
// now troubles come
// now troubles come
solutions[0][0] = 0;
solutions[0][1] = 0;
solutions[0][2] = 0;
value_t sol_num = 1;
//liting revised
bool appear[UNKNOWN_NUM + 1];
for(int nn = 0; nn < UNKNOWN_NUM + 1; nn++){
appear[nn] = 0;
}
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0
&& working_mat[pi][2] == 0) {
continue;
}
appear[pos_arr[pi]] = true;
if (working_mat[pi][0] & (linearpart_t) 1) {
solutions[0][(pos_arr[pi] - 1) / 64] ^= ((value_t) 1
<< (pos_arr[pi] - 1) % 64);
}
}
// duplicate solutions.
for (int i = 1; i < UNKNOWN_NUM + 1; i++) { // liting revised
if (appear[i] == false) {
for (int j = 0; j < sol_num; j++) {
// bear revised
solutions[j + sol_num][0] = solutions[j][0];
solutions[j + sol_num][1] = solutions[j][1];
solutions[j + sol_num][2] = solutions[j][2];
solutions[j + sol_num][(i - 1) / 64] ^= ((value_t) 1
<< ((i - 1) % 64));
}
// bear added
for (int pi = 0; pi < POLY_NUM; pi++) {
if (working_mat[pi][0] == 0 && working_mat[pi][1] == 0
&& working_mat[pi][2] == 0) {
continue;
}
for (int j = 0;j< sol_num* ((working_mat[pi][i / 64]& (((linearpart_t) 1)<< (i % 64))) != 0);j++) {
solutions[j + sol_num][(pos_arr[pi] - 1) / 64] ^=((value_t) 1 << ((pos_arr[pi] - 1) % 64));
}
}
sol_num *= 2;
}
}
return sol_num;
}
}
__global__ void solveLinear(const linearpart_t *d_working_mat_copy,
const squarepart_t *d_const_mat, value_t *d_val, value_t *d_sol_total,value_t* result) {
int thidx = blockDim.x * blockIdx.x + threadIdx.x;
value_t val = d_val[thidx];
linearpart_t working_mat[POLY_NUM][3]; // initialized as the const part of linear matrix. also used as the results of linear part.
linearpart_t working_mat_copy[POLY_NUM][3];
squarepart_t const_mat[POLY_NUM];
d_sol_total[thidx] = 0;
//copy data from device
for (int i = 0; i < POLY_NUM; i++) {
working_mat_copy[i][0] = d_working_mat_copy[thidx * POLY_NUM * 3 + i*3];
working_mat_copy[i][1] = d_working_mat_copy[thidx * POLY_NUM * 3 + i*3 + 1];
working_mat_copy[i][2] = d_working_mat_copy[thidx * POLY_NUM * 3 + i*3 + 2];
const_mat[i] = d_const_mat[thidx * POLY_NUM + i];
}
// main loop.
for (value_t count = 1; count < (1 << ENUM_NUM); count++) {
// generate the next gray code
int pos = 64-__ffsll(__brevll(count ^ (count - 1)));
val = val ^ ((value_t) 1 << pos);
for (int pi = 0; pi < POLY_NUM; pi++) {
working_mat_copy[pi][0] ^= d_linear_mat[pos * POLY_NUM * 3 + pi * 3];
working_mat_copy[pi][1] ^= d_linear_mat[pos * POLY_NUM * 3 + pi * 3 + 1];
working_mat_copy[pi][2] ^= d_linear_mat[pos * POLY_NUM * 3 + pi * 3 + 2];
const_mat[pi] ^= d_square_mat[pos * POLY_NUM + pi];
working_mat[pi][0] = working_mat_copy[pi][0];
working_mat[pi][1] = working_mat_copy[pi][1];
working_mat[pi][2] = working_mat_copy[pi][2];
value_t w = const_mat[pi] & val;
working_mat[pi][0] ^= (bool)((__popcll((unsigned long long int)w)) & (value_t) 1);
}
value_t solutions[SOL_MAX_NUM][3];
value_t sol_num = 0;
// gauss
sol_num = gauss(solutions, working_mat, POLY_NUM, UNKNOWN_NUM);
d_sol_total[thidx] += sol_num;
// verify on 3 round keccak.
tKeccakLane dState[25];
for(int s = 0;s < sol_num;s++){
dState[0] = 0;
dState[1] = 0;
dState[2] = 0;
dState[3] = 0;
dState[4] = 0;
dState[5] = const_state[5];
dState[6] = const_state[6];
dState[7] = const_state[7];
dState[8] = const_state[8];
dState[9] = const_state[9];
dState[10] = 0;
dState[11] = 0;
dState[12] = 0;
dState[13] = 0;
dState[14] = 0;
dState[15] = const_state[15];
dState[16] = const_state[16];
dState[17] = const_state[17];
dState[18] = const_state[18];
dState[19] = const_state[19];
dState[20] = const_state[20];
dState[21] = const_state[21];
dState[22] = const_state[22];
dState[23] = const_state[23];
dState[24] = const_state[24];
value_t val_sol[4];
val_sol[3] = solutions[s][2];
val_sol[2] = solutions[s][1];
val_sol[1] = solutions[s][0];
val_sol[0] = val ^ ((value_t)1 << PARAM_NUM);
for(int i = 0; i< 640; i ++){
value_t w[4] ={0,0,0,0};
for(int j = 0; j< 4; j++){
w[j] = d_var_all[i * 4 + j] & val_sol[j];
}
w[0] = w[0] ^w[1]^w[2]^w[3];
if ((bool)((__popcll((unsigned long long int)w[0])) & (value_t) 1)) {
int n = (i/64 > 4 )?( i/64 + 5 ): i/64 ;
dState[n] ^= ((UINT64)1) << (i % 64);
}
}
tKeccakLane state_copy[25];
for(int i = 0; i < 25; i++){
state_copy[i] = dState[i];
}
KeccakP1600Round_Device(dState, 0);
KeccakP1600Round_Device(dState, 1);
KeccakP1600Round_Device(dState, 2);
if(dState[0] == const_hashvalue[0] && dState[1] == const_hashvalue[1] && dState[2] == const_hashvalue[2] && ((dState[3] ^= const_hashvalue[3]) & (0x00000000FFFFFFFF)) == 0){
printf("Find Preimage!!! val is %lu.\n",val);
result[0] = val;
result[1] = val_sol[1];
result[2] = val_sol[2];
result[3] = val_sol[3];
printf("The messaged state :");
for (int i = 0; i < 25; i++) {
printf("%llx ", state_copy[i]);
if(i % 5 == 0){
printf("\n");
}
}
printf("\n");
}
}
}
}
int main(int argc, char** argv) {
printf("read middle state\n");
tKeccakLane state[25];
stateInit(state);
printf("read hash value\n");
FILE *hashvalue_file = fopen("../../hash_value.txt", "r+");
tKeccakLane hashvalue[4] = { 0, 0, 0, 0 };
char line[20];
for (int i = 0; i < 4; i++) {
fgets(line, 20, hashvalue_file);
char hex_s = 0;
UINT64 tmp_val = 0;
for (int j = 0; j < 17; j++) {
if (line[j] <= '9' && line[j] >= '0') {
hex_s = line[j] - '0';
}else if(line[j] <= 'z' && line[j] >= 'a'){
hex_s = line[j] - 'a' + 10;
}else if(line[j] <= 'Z' && line[j] >= 'A'){
hex_s = line[j] - 'A' + 10;
}
if (j < 16) {
//printf("%c", line[i*17 + j] );
tmp_val = (tmp_val << 4) ^ (hex_s);
}
}
hashvalue[i] = tmp_val;
}
fclose (hashvalue_file);
cudaError_t err0 = cudaSuccess;
err0 = cudaMemcpyToSymbol(const_state, state, 25 * sizeof(tKeccakLane), 0,
cudaMemcpyHostToDevice);
if (err0 != cudaSuccess) {
printf("Failed to copy host to device(error code %s)!\n",
cudaGetErrorString(err0));
exit(EXIT_FAILURE);
}
err0 = cudaSuccess;
err0 = cudaMemcpyToSymbol(const_hashvalue, hashvalue, 4 * sizeof(tKeccakLane), 0,
cudaMemcpyHostToDevice);
if (err0 != cudaSuccess) {
printf("Failed to copy host to device(error code %s)!\n",
cudaGetErrorString(err0));
exit(EXIT_FAILURE);
}
const int para_num = PARAM_NUM;
const int enum_num = ENUM_NUM;
//const int set_num = para_num - enum_num;
value_t set_val = atol(argv[1])<<THREADS_SHIFT;
// value_t set_val = 0;
const int unknown_num = UNKNOWN_NUM;
const int poly_num = POLY_NUM;
linearpart_t linear_mat[para_num][poly_num][3];
linearpart_t working_mat[poly_num][3]; // initialized as the const part of linear matrix. also used as the results of linear part.
linearpart_t working_mat_copy[poly_num][3];
linearpart_t working_mat_file[poly_num][3];
squarepart_t square_mat[para_num][poly_num];
squarepart_t const_mat[poly_num]; // used to compute the const part from square polys.
oripoly_t var_all[640][4];
// cudaSetDevice(0);
cudaSetDevice(atoi(argv[2]));
//todo
FILE *in1 = fopen("../data/mat_files/linear_mat.txt", "r+");
FILE *in2 = fopen("../data/mat_files/square_mat.txt", "r+");
FILE *in4 = fopen("../data/mat_files/working_mat.txt", "r+");
FILE *in5 = fopen("../data/mat_files/totalLinear_mat.txt", "r+");
char c1, c2, c4, c5;
for (int i = 0; i < para_num; i++) {
for (int j = 0; j < poly_num; j++) {
linear_mat[i][j][0] = 0;
linear_mat[i][j][1] = 0;
linear_mat[i][j][2] = 0;
square_mat[i][j] = 0;
for (int k = 0; k < 192; k++) {
fscanf(in1, "%c", &c1);
while (c1 != '0' && c1 != '1') {
fscanf(in1, "%c", &c1);
}
if (c1 == '1') {
linear_mat[i][j][k / 64] ^= ((linearpart_t) 1 << (k % 64));
}
}
for (int k = 0; k < 64; k++) {
fscanf(in2, "%c", &c2);
while (c2 != '0' && c2 != '1') {
fscanf(in2, "%c", &c2);
}
if (c2 == '1') {
square_mat[i][j] ^=
((squarepart_t) 1 << (para_num - 1 - k));
}
}
}
}
for (int i = 0; i < poly_num; i++) {
working_mat[i][0] = 0;
working_mat[i][1] = 0;
working_mat[i][2] = 0;
for (int j = 0; j < 192; j++) {
fscanf(in4, "%c", &c4);
while (c4 != '0' && c4 != '1') {
fscanf(in4, "%c", &c4);
}
if (c4 == '1') {
working_mat[i][j / 64] ^= ((linearpart_t) 1 << (j % 64));
}
}
working_mat_file[i][0] = working_mat[i][0];
working_mat_file[i][1] = working_mat[i][1];
working_mat_file[i][2] = working_mat[i][2];
}
for (int i = 0; i < 640; i++) {
var_all[i][0] = 0;
var_all[i][1] = 0;
var_all[i][2] = 0;
var_all[i][3] = 0;
for (int j = 0; j < 256; j++) {
fscanf(in5, "%c", &c5);
while (c5 != '0' && c5 != '1') {
fscanf(in5, "%c", &c5);
}
if (c5 == '1') {
var_all[i][j / 64] ^= ((value_t) 1 << (j % 64));
}
}
}
fclose(in1);
fclose(in2);
fclose(in4);
fclose(in5);
printf("finish reading file!\n");
linearpart_t linear_mat_enum[ENUM_NUM * POLY_NUM * 3];
squarepart_t square_mat_enum[ENUM_NUM * POLY_NUM];
value_t var_all_enum[640 * 4];
for (int i = 0; i < ENUM_NUM; i++) {
for (int j = 0; j < POLY_NUM; j++) {
for (int k = 0; k < 3; k++) {
linear_mat_enum[i * POLY_NUM * 3 + j * 3 + k] =
linear_mat[i][j][k];
}
}
}
for (int i = 0; i < ENUM_NUM; i++) {
for (int j = 0; j < POLY_NUM; j++) {
square_mat_enum[i * POLY_NUM + j] = square_mat[i][j];
}
}
cudaMemcpyToSymbol(d_linear_mat, linear_mat_enum,
3 * ENUM_NUM * POLY_NUM * sizeof(linearpart_t));
cudaMemcpyToSymbol(d_square_mat, square_mat_enum,
ENUM_NUM * POLY_NUM * sizeof(squarepart_t));
for (int i = 0; i < 640; i++) {
for (int j = 0; j < 4; j++) {
var_all_enum[i * 4 + j] = var_all[i][j];
}
}
cudaMemcpyToSymbol(d_var_all, var_all_enum, 640 * 4 * sizeof(value_t));
printf("finish copying device memory!\n");
cudaError_t err = cudaSuccess;
int thidx = BLOCK_NUM * THREAD_NUM;
value_t *d_val = NULL;
err = cudaMalloc((void **) &d_val, thidx * sizeof(value_t));
if (err != cudaSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t *d_sol_total = NULL;
err = cudaMalloc((void **) &d_sol_total, thidx * 3 * sizeof(value_t));
if (err != cudaSuccess) {
printf("Failed to allocate device value (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
linearpart_t *d_working_mat_copy = NULL;
err = cudaMalloc((void **) &d_working_mat_copy,
thidx * poly_num * 3 * sizeof(linearpart_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate device working_mat_copy (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
squarepart_t *d_const_mat = NULL;
err = cudaMalloc((void **) &d_const_mat,
thidx * poly_num * sizeof(squarepart_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate devices const_mat (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy oripolys from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t h_result[4] = {0, 0, 0, 0};
value_t *d_result = NULL;
err = cudaMalloc((void **) &d_result, 4 *sizeof(value_t));
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to allocate devices result (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_result, h_result,
4 * sizeof(value_t),cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy result from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("finish allocate device memory!\n");
// value_t val_arr[thidx];
// linearpart_t working_mat_copy_arr[thidx * POLY_NUM * 3];
// squarepart_t const_mat_arr[thidx * POLY_NUM];
int s_total_p0 = 0;
value_t *val_arr = (value_t*)calloc(thidx, sizeof(value_t));
linearpart_t *working_mat_copy_arr = (linearpart_t*)calloc(thidx * POLY_NUM * 3, sizeof(linearpart_t));
squarepart_t *const_mat_arr = (squarepart_t*)calloc(thidx * POLY_NUM, sizeof(squarepart_t));
for (int thi = 0; thi < thidx; thi++) {
value_t sol_num = 0;
value_t solutions[SOL_MAX_NUM][3];
//int sol_total = 0;
value_t val = (set_val + (value_t) thi) << ENUM_NUM;
val_arr[thi] = val;
for (int pi = 0; pi < POLY_NUM; pi++) {
working_mat[pi][0] = working_mat_file[pi][0];
working_mat[pi][1] = working_mat_file[pi][1];
working_mat[pi][2] = working_mat_file[pi][2];
const_mat[pi] = 0;
}
for (int pos = enum_num; pos < para_num; pos++) {
if (val & ((value_t) 1 << pos)) {
for (int pi = 0; pi < poly_num; pi++) {
working_mat[pi][0] ^= linear_mat[pos][pi][0];
working_mat[pi][1] ^= linear_mat[pos][pi][1];
working_mat[pi][2] ^= linear_mat[pos][pi][2];
}
for (int pi = 0; pi < poly_num; pi++) {
const_mat[pi] ^= square_mat[pos][pi];
}
}
}
for (int i = 0; i < poly_num; i++) {
working_mat_copy[i][0] = working_mat[i][0];
working_mat_copy[i][1] = working_mat[i][1];
working_mat_copy[i][2] = working_mat[i][2];
working_mat_copy_arr[thi * POLY_NUM * 3 + 3 * i] = working_mat_copy[i][0];
working_mat_copy_arr[thi * POLY_NUM * 3 + 3 * i + 1] = working_mat_copy[i][1];
working_mat_copy_arr[thi * POLY_NUM * 3 + 3 * i + 2] = working_mat_copy[i][2];
const_mat_arr[thi * POLY_NUM + i] = const_mat[i];
}
for (int pi = 0; pi < poly_num; pi++) {
value_t w = const_mat[pi] & val;
w = (w) ^ (w >> 32);
w = (w) ^ (w >> 16);
w = (w) ^ (w >> 8);
w = (w) ^ (w >> 4);
w = (w) ^ (w >> 2);
w = (w) ^ (w >> 1);
if (w & (value_t) 1) {
working_mat[pi][0] ^= (linearpart_t) 1;
}
}
sol_num = gauss_host(working_mat, POLY_NUM, UNKNOWN_NUM, solutions);
s_total_p0 += sol_num;
for (int s = 0; s < sol_num; s++) {
tKeccakLane state[25];
stateInit(state);
getStates(state, var_all, val, solutions[s]);
tKeccakLane state_cp[25];
for(int sn = 0; sn < 25; sn++){
state_cp[sn] = state[sn];
}
if(checkHashValue(state, hashvalue)){
//todo
FILE *out = fopen("final_messaged_state_online.txt","a+");
printf("we have done on GPU!!! val:%lu, sol:%lu %lu %lu\n",val,solutions[s][0],solutions[s][1],solutions[s][2]);
//fprintf(out,"we have done on GPU!!! val:%lu, sol:%lu %lu %lu\n",val,solutions[s][0],solutions[s][1],solutions[s][2]);
fprintf(out, "The messaged state:");
for (int sn = 0; sn < 25; sn++) {
if (sn % 5 == 0) {
fprintf(out, "\n");
}
fprintf(out, "%016lX ", state_cp[sn]);
}
fprintf(out, "\n\nThe output state:");
for(int sn = 0; sn < 25 ;sn++){
if(sn % 5 == 0){
fprintf(out, "\n");
}
fprintf(out, "%016lX ", state[sn]);
}
fclose(out);
printf("finish.\n");
exit(0);
}
}
}
printf("finish cpu computing!\n");
//begin device part
err = cudaMemcpy(d_val, val_arr, thidx * sizeof(value_t),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("Failed to copy value from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_working_mat_copy, working_mat_copy_arr,
thidx * 3 * poly_num * sizeof(linearpart_t), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy working_mat_copy from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_const_mat, const_mat_arr,
thidx * poly_num * sizeof(squarepart_t), cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy const_mat from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("enum num : %d\nblock num : %d\nthread num : %d\n", ENUM_NUM,
BLOCK_NUM, THREAD_NUM);
cudaEvent_t start1;
cudaEventCreate(&start1);
cudaEvent_t stop1;
cudaEventCreate(&stop1);
cudaEventRecord(start1, NULL);
printf("begin solve linear system!\n");
solveLinear<<<BLOCK_NUM, THREAD_NUM>>>(d_working_mat_copy, d_const_mat,
d_val, d_sol_total,d_result);
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
float msecTotal1 = 0.0f;
cudaEventElapsedTime(&msecTotal1, start1, stop1);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch solveLinear kernel (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
value_t h_sol_total[thidx];
err = cudaMemcpy(h_sol_total, d_sol_total, thidx * sizeof(value_t),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy total solution numbers from device to host (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_result, d_result,4 * sizeof(value_t),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr,
"Failed to copy result from device to host (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
if(h_result[0]!=0 || h_result[1]!=0 || h_result[2]!=0 || h_result[3]!=0){
FILE *out = fopen("../data/state_files/final_messaged_state_online.txt","a+");
printf("we have done on GPU!!! val:%lu, sol:%lu %lu %lu\n",h_result[0],h_result[1],h_result[2],h_result[3]);
//fprintf(out,"we have done on GPU!!! val:%lu, sol:%lu %lu %lu\n",h_result[0],h_result[1],h_result[2],h_result[3]);
tKeccakLane state[25];
stateInit(state);
getStates(state, var_all, h_result[0], h_result+1);
tKeccakLane state_cp[25];
for (int sn = 0; sn < 25; sn++) {
state_cp[sn] = state[sn];
}
for (int i = 0; i < 3; i++) {
KeccakP1600Round(state, i);
}
fprintf(out, "The messaged state:");
for (int sn = 0; sn < 25; sn++) {
if (sn % 5 == 0) {
fprintf(out, "\n");
}
fprintf(out, "%016lX ", state_cp[sn]);
}
fprintf(out, "\n\nThe output state:");
for (int sn = 0; sn < 25; sn++) {
if (sn % 5 == 0) {
fprintf(out, "\n");
}
fprintf(out, "%016lX ", state[sn]);
}
fclose(out);
printf("finish.\n");
exit(0);
fclose(out);
}else{
FILE *out = fopen("../data/mat_files/result.txt","a+");
long sol_all_threads = s_total_p0;
for(int i = 0;i < thidx;i++){
sol_all_threads += h_sol_total[i];
}
printf("val : %lu~%lu ,find %lu solutions, none is correct...\n",set_val << ENUM_NUM ,(set_val << ENUM_NUM)+(THREAD_NUM * BLOCK_NUM) * (1 << ENUM_NUM) -1, sol_all_threads);
fprintf(out, "Part %d finished -- val : %lu~%lu ,find %lu solutions, none is correct...\n",atol(argv[1]), set_val << ENUM_NUM ,(set_val << ENUM_NUM)+(THREAD_NUM * BLOCK_NUM) * (1 << ENUM_NUM) -1, sol_all_threads);
fclose(out);
}
printf("time:%.3lf ms\n---------------------------------------\n", msecTotal1);
cudaFree(val_arr);
cudaFree(d_working_mat_copy);
cudaFree(d_const_mat);
cudaFree(d_val);
cudaFree(d_sol_total);
cudaFree(d_result);
}
|
9,158 | #include "tensoroperations.cuh"
const int THREAD_SIZE_XY = 1 << 10;
const int THREAD_SIZE_Z = 1 << 6;
__global__
void addElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) ptr3[idx] = ptr1[idx] + ptr2[idx];
}
std::unique_ptr<float[]> CUDAaddElementwise(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<float[]>& in_ptr2, int ptr_size) {
int gpu_ptr1_bytes = ptr_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
float* gpu_ptr3;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) / THREAD_SIZE_XY;
addElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);
std::unique_ptr<float[]> out_ptr(new float[ptr_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
cudaFree(gpu_ptr3);
return out_ptr;
}
__global__
void subtractElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) ptr3[idx] = ptr1[idx] - ptr2[idx];
}
std::unique_ptr<float[]> CUDAsubtractElementwise(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<float[]>& in_ptr2, int ptr_size) {
int gpu_ptr1_bytes = ptr_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
float* gpu_ptr3;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) / THREAD_SIZE_XY;
subtractElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);
std::unique_ptr<float[]> out_ptr(new float[ptr_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
cudaFree(gpu_ptr3);
return out_ptr;
}
__global__
void multiplyElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) ptr3[idx] = ptr1[idx] * ptr2[idx];
}
std::unique_ptr<float[]> CUDAmultiplyElementwise(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<float[]>& in_ptr2, int ptr_size) {
int gpu_ptr1_bytes = ptr_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
float* gpu_ptr3;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) / THREAD_SIZE_XY;
multiplyElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);
std::unique_ptr<float[]> out_ptr(new float[ptr_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
cudaFree(gpu_ptr3);
return out_ptr;
}
__global__
void divideElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) ptr3[idx] = ptr1[idx] / ptr2[idx];
}
std::unique_ptr<float[]> CUDAdivideElementwise(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<float[]>& in_ptr2, int ptr_size) {
int gpu_ptr1_bytes = ptr_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
float* gpu_ptr3;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) / THREAD_SIZE_XY;
divideElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);
std::unique_ptr<float[]> out_ptr(new float[ptr_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
cudaFree(gpu_ptr3);
return out_ptr;
}
__global__
void powerElementwiseD(int size, float* ptr1, float* ptr2, float* ptr3) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) ptr3[idx] = std::pow(ptr1[idx], ptr2[idx]);
}
std::unique_ptr<float[]> CUDApowerElementwise(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<float[]>& in_ptr2, int ptr_size) {
int gpu_ptr1_bytes = ptr_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
float* gpu_ptr3;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
int dimGridX = (ptr_size + THREAD_SIZE_XY - 1) / THREAD_SIZE_XY;
powerElementwiseD <<< dimGridX, THREAD_SIZE_XY >>> (ptr_size, gpu_ptr1, gpu_ptr2, gpu_ptr3);
std::unique_ptr<float[]> out_ptr(new float[ptr_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
cudaFree(gpu_ptr3);
return out_ptr;
}
__global__
void transposeD(int cols, int rows, int depths, float* ptr1, float* ptr2) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int depth = blockIdx.z * blockDim.z + threadIdx.z;
// Of course this is going to need a Z coordinate for the infinite dimensions it can take
if ((col < cols) && (row < rows) && (depth < depths)) ptr2[depth * rows * cols + row * cols + col] = ptr1[depth * rows * cols + col * rows + row];
}
// I need to reformat all of the other functions to fit this
std::unique_ptr<float[]> CUDAtranspose(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<int[]>& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size) {
int cols = in_ptr1_dims[0];
int rows = in_ptr1_dims[1];
// Is there a faster way to do this
int depths = 1;
for (int i = 2; i < in_ptr1_dims_size; i++) {
depths *= in_ptr1_dims[i];
}
int gpu_ptr1_bytes = ptr1_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr1_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
int grid_cols = (cols + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_rows = (rows + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_depths = (depths + THREAD_SIZE_Z - 1) / THREAD_SIZE_Z;
dim3 gridSize(grid_cols, grid_cols, grid_depths);
dim3 threadSize(std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), THREAD_SIZE_Z);
transposeD <<< gridSize, threadSize >>> (cols, rows, depths, gpu_ptr1, gpu_ptr2);
std::unique_ptr<float[]> out_ptr(new float[ptr1_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr2, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
return out_ptr;
}
__global__
void multiplyD(int cols, int shared, int rows, int depths, float* ptr1, float* ptr2, float* ptr3) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int depth = blockIdx.z * blockDim.z + threadIdx.z;
float sum;
if ((col < cols) && (row < rows) && (depth < depths)) {
sum = 0;
for (int i = 0; i < shared; i++) {
sum += ptr1[depth * rows * cols + row * shared + i] * ptr2[depth * rows * cols + i * cols + col];
}
ptr3[depth * rows * cols + row * cols + col] = sum;
}
}
std::unique_ptr<float[]> CUDAmultiply(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<int[]>& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, std::unique_ptr<float[]>& in_ptr2, std::unique_ptr<int[]>& in_ptr2_dims, int in_ptr2_dims_size, int ptr2_size) {
int ptr1_rows = in_ptr1_dims[1];
int ptr2_cols = in_ptr2_dims[0];
int shared_size = in_ptr1_dims[0];
int depths = 1;
for (int i = 2; i < in_ptr1_dims_size; i++) { // In theory the rest of the dims after should be the exact same if we assume they are correct
depths *= in_ptr1_dims[i];
}
int gpu_ptr1_bytes = ptr1_size * sizeof(float);
int gpu_ptr2_bytes = ptr2_size * sizeof(float);
int gpu_ptr3_bytes = depths * ptr1_rows * ptr2_cols * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
float* gpu_ptr3;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);
cudaMalloc(&gpu_ptr3, gpu_ptr3_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr2_bytes, cudaMemcpyHostToDevice);
int grid_cols = (ptr2_cols + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_depths = (depths + THREAD_SIZE_Z - 1) / THREAD_SIZE_Z;
dim3 gridSize(grid_cols, grid_cols, grid_depths);
dim3 threadSize(std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), THREAD_SIZE_Z);
multiplyD <<< gridSize, threadSize >>> (ptr2_cols, shared_size, ptr1_rows, depths, gpu_ptr1, gpu_ptr2, gpu_ptr3);
std::unique_ptr<float[]> out_ptr(new float[depths * ptr1_rows * ptr2_cols]);
cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr3_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
cudaFree(gpu_ptr3);
return out_ptr;
}
__global__
void rotateD(int cols, int rows, int depths, float* ptr1, float* ptr2) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int depth = blockIdx.z * blockDim.z + threadIdx.z;
if ((col < cols) && (row < rows) && (depth < depths)) ptr2[depth * rows * cols + (rows - row - 1) * cols + (cols - col - 1)] = ptr1[depth * rows * cols + row * cols + col];
}
std::unique_ptr<float[]> CUDArotate(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<int[]>& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size) {
int ptr1_cols = in_ptr1_dims[0];
int ptr1_rows = in_ptr1_dims[1];
int depths = 1;
for (int i = 2; i < in_ptr1_dims_size; i++) {
depths *= in_ptr1_dims[i];
}
int gpu_ptr_bytes = ptr1_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
cudaMalloc(&gpu_ptr1, gpu_ptr_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr_bytes, cudaMemcpyHostToDevice);
int grid_cols = (ptr1_cols + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_depths = (depths + THREAD_SIZE_Z - 1) / THREAD_SIZE_Z;
dim3 gridSize(grid_cols, grid_cols, grid_depths);
dim3 threadSize(std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), THREAD_SIZE_Z);
rotateD <<< gridSize, threadSize >>> (ptr1_cols, ptr1_rows, depths, gpu_ptr1, gpu_ptr2);
std::unique_ptr<float[]> out_ptr(new float[ptr1_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr2, gpu_ptr_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
return out_ptr;
}
__global__
void maxPoolingD(int cols, int rows, int depths, int kernel_cols, int kernel_rows, int stride_cols, int stride_rows, float* ptr1, float* ptr2) {
int col = blockIdx.x * blockDim.x + threadIdx.x; // Col of the unpooled ptr
int row = blockIdx.y * blockDim.y + threadIdx.y; // Row of the unpooled ptr
int depth = blockIdx.z * blockDim.z + threadIdx.z; // Depth of the unpooled ptr
if ((col < cols - kernel_cols + 1) && (row < rows - kernel_rows + 1) && (depth < depths)) {
if ((col % stride_cols == 0) && (row % stride_rows == 0)) {
int max = ptr1[depth * rows * cols + row * cols + col];
int comparison;
for (int i = 0; i < kernel_rows; i++) {
for (int j = 0; j < kernel_cols; j++) {
comparison = ptr1[depth * rows * cols + (row + i) * cols + (col + j)];
if (max < comparison) max = comparison;
}
}
int pooled_cols_size = (cols - kernel_cols + stride_cols) / stride_cols;
int pooled_rows_size = (rows - kernel_rows + stride_rows) / stride_rows;
int pooled_col = (col - kernel_cols + stride_cols) / stride_cols;
if (pooled_col < 0) pooled_col = 0;
int pooled_row = (row - kernel_rows + stride_rows) / stride_rows;
if (pooled_row < 0) pooled_row = 0;
ptr2[depth * pooled_rows_size * pooled_cols_size + pooled_row * pooled_cols_size + pooled_col] = max;
}
}
}
std::unique_ptr<float[]> CUDAmaxPooling(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<int[]>& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, int kernel_cols, int kernel_rows, int stride_cols, int stride_rows) {
// This is raw dimensions
int ptr1_cols = in_ptr1_dims[0];
int ptr1_rows = in_ptr1_dims[1];
int depths = 1;
for (int i = 2; i < in_ptr1_dims_size; i++) {
depths *= in_ptr1_dims[i];
}
// This is pooled dims
int ptr2_cols = (ptr1_cols - kernel_cols + stride_cols) / stride_cols;
int ptr2_rows = (ptr1_rows - kernel_rows + stride_rows) / stride_rows;
int ptr2_size = ptr2_cols * ptr2_rows * depths;
int gpu_ptr1_bytes = ptr1_size * sizeof(float);
int gpu_ptr2_bytes = ptr2_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
int grid_cols = (ptr1_cols + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_depths = (depths + THREAD_SIZE_Z - 1) / THREAD_SIZE_Z;
dim3 gridSize(grid_cols, grid_cols, grid_depths);
dim3 threadSize(std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), THREAD_SIZE_Z);
maxPoolingD <<< gridSize, threadSize >>> (ptr1_cols, ptr1_rows, depths, kernel_rows, kernel_cols, stride_cols, stride_cols, gpu_ptr1, gpu_ptr2);
std::unique_ptr<float[]> out_ptr(new float[ptr2_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr2, gpu_ptr2_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
return out_ptr;
}
__global__
void poolingDerivD(int cols, int rows, int depths, int kernel_cols, int kernel_rows, int stride_cols, int stride_rows, float* ptr1, float* ptr2, float* ptr3) {
int col = blockIdx.x * blockDim.x + threadIdx.x; // Col of the unpooled ptr
int row = blockIdx.y * blockDim.y + threadIdx.y; // Row of the unpooled ptr
int depth = blockIdx.z * blockDim.z + threadIdx.z; // Depth of the unpooled ptr
if ((col < cols - kernel_cols + 1) && (row < rows - kernel_rows + 1) && (depth < depths)) {
if ((col % stride_cols == 0) && (row % stride_rows == 0)) {
int max = ptr1[depth * rows * cols + row * cols + col];
int argmax_col = 0;
int argmax_row = 0;
int comparison;
for (int i = 0; i < kernel_rows; i++) {
for (int j = 0; j < kernel_cols; j++) {
comparison = ptr1[depth * rows * cols + (row + i) * cols + (col + j)];
if (max < comparison) {
max = comparison;
argmax_col = j;
argmax_row = i;
}
}
}
int pooled_cols_size = (cols - kernel_cols + stride_cols) / stride_cols;
int pooled_rows_size = (rows - kernel_rows + stride_rows) / stride_rows;
int pooled_col = (col - kernel_cols + stride_cols) / stride_cols;
if (pooled_col < 0) pooled_col = 0;
int pooled_row = (row - kernel_rows + stride_rows) / stride_rows;
if (pooled_row < 0) pooled_row = 0;
ptr3[depth * rows * cols + (row + argmax_row) * cols + (col + argmax_col)] += ptr2[depth * pooled_rows_size * pooled_cols_size + pooled_row * pooled_cols_size + pooled_col];
}
}
}
std::unique_ptr<float[]> CUDApoolingDeriv(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<int[]>& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, std::unique_ptr<float[]>& in_ptr2, std::unique_ptr<int[]>& in_ptr2_dims, int in_ptr2_dims_size, int ptr2_size, int kernel_cols, int kernel_rows, int stride_cols, int stride_rows) {
int ptr1_cols = in_ptr1_dims[0]; // This is the full size unkerneled
int ptr1_rows = in_ptr1_dims[1];
int ptr2_cols = in_ptr2_dims[0]; // This is the kernel size!
int ptr2_rows = in_ptr2_dims[1];
int depths = 1;
for (int i = 2; i < in_ptr1_dims_size; i++) {
depths *= in_ptr1_dims[i];
}
int gpu_ptr1_bytes = ptr1_size * sizeof(float);
int gpu_ptr2_bytes = ptr2_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
float* gpu_ptr3;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);
cudaMalloc(&gpu_ptr3, gpu_ptr1_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_ptr2, in_ptr2.get(), gpu_ptr2_bytes, cudaMemcpyHostToDevice);
// Now what memory blocks are we going to use for this?
int grid_cols = (ptr1_cols + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_depths = (depths + THREAD_SIZE_Z - 1) / THREAD_SIZE_Z;
dim3 gridSize(grid_cols, grid_cols, grid_depths);
dim3 threadSize(std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), THREAD_SIZE_Z);
poolingDerivD <<< gridSize, threadSize >>> (ptr1_cols, ptr1_rows, depths, kernel_cols, kernel_rows, stride_cols, stride_rows, gpu_ptr1, gpu_ptr2, gpu_ptr3);
std::unique_ptr<float[]> out_ptr(new float[ptr1_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr1_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
cudaFree(gpu_ptr3);
return out_ptr;
}
__global__
void dupeD(int cols, int rows, int depths, int duped_depths, float* ptr1, float* ptr2) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int depth = blockIdx.z * blockDim.z + threadIdx.z; // Now represents the depth of the unstreteched size
if ((col < cols) && (row < rows) && (depth < duped_depths)) {
int ptr1_depth = depth % depths;
ptr2[depth * rows * cols + row * cols + col] = ptr1[ptr1_depth * rows * cols + row * cols + col];
}
}
// This is the broken function
std::unique_ptr<float[]> CUDAdupe(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<int[]>& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, int dupe_size) {
int ptr1_cols = in_ptr1_dims[0];
int ptr1_rows = in_ptr1_dims[1];
int depths = 1;
for (int i = 2; i < in_ptr1_dims_size; i++) {
depths *= in_ptr1_dims[i];
}
int ptr2_depths = dupe_size * depths;
int ptr2_size = ptr1_cols * ptr1_rows * ptr2_depths;
int gpu_ptr1_bytes = ptr1_size * sizeof(float);
int gpu_ptr2_bytes = ptr2_size * sizeof(float);
float* gpu_ptr1;
float* gpu_ptr2;
cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);
cudaMemcpy(gpu_ptr1, in_ptr1.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
int grid_cols = (ptr1_cols + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_rows = (ptr1_rows + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
int grid_depths = (ptr2_depths + THREAD_SIZE_Z - 1) / THREAD_SIZE_Z; // This should be the depths of the duped one
dim3 gridSize(grid_cols, grid_cols, grid_depths);
dim3 threadSize(std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), THREAD_SIZE_Z);
dupeD <<< gridSize, threadSize >>> (ptr1_cols, ptr1_rows, depths, ptr2_depths, gpu_ptr1, gpu_ptr2);
std::unique_ptr<float[]> out_ptr(new float[ptr2_size]);
cudaMemcpy(out_ptr.get(), gpu_ptr2, gpu_ptr2_bytes, cudaMemcpyDeviceToHost);
cudaFree(gpu_ptr1);
cudaFree(gpu_ptr2);
return out_ptr;
}
__global__
// What row, col and depth are we choosing? The big one
void convolutionD(int cols, int rows, int kernel_cols, int kernel_rows, int depths, int stride_cols, int stride_rows, float* ptr1, float* ptr2, float* ptr3) {
// In here we take the correct stride and perform the convolution over that desired block for each element
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int depth = blockIdx.z * blockDim.z + threadIdx.z;
if ((col < cols - kernel_cols + 1) && (row < rows - kernel_rows + 1) && (depth < depths)) {
if ((col % stride_cols == 0) && (row % stride_rows == 0)) {
float weighted = 0;
for (int i = 0; i < kernel_rows; i++) {
for (int j = 0; j < kernel_cols; j++) {
weighted += ptr1[depth * rows * cols + (row + i) * cols + (col + j)] * ptr2[depth * rows * cols + i * kernel_cols + j]; // Now I have to do the dot product of the kernel and the convolved
}
}
int weighted_cols_size = (cols - kernel_cols + stride_cols) / stride_cols;
int weighted_rows_size = (rows - kernel_rows + stride_rows) / stride_rows;
int weighted_col = (col - kernel_cols + stride_cols) / stride_cols;
if (weighted_col < 0) weighted_col = 0;
int weighted_row = (row - kernel_rows + stride_rows) / stride_rows;
if (weighted_row < 0) weighted_row = 0;
ptr3[depth * weighted_rows_size * weighted_cols_size + weighted_row * weighted_cols_size + weighted_col] = weighted;
}
}
}
// No bias is required for this
// std::unique_ptr<float[]> CUDAconvolution(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<int[]>& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, std::unique_ptr<float[]>& in_ptr2, std::unique_ptr<int[]>& in_ptr2_dims, int in_ptr2_dims_size, int ptr2_size, int stride_cols, int stride_rows) {
// // Convolve layer
// int ptr1_cols = in_ptr1_dims[0];
// int ptr1_rows = in_ptr1_dims[1];
// int ptr1_depths = 1;
// for (int i = 0; i < in_ptr1_dims_size; i++) {
// ptr1_depths *= in_ptr1_dims[i];
// }
// // Kernel
// int ptr2_cols = in_ptr2_dims[0];
// int ptr2_rows = in_ptr2_dims[1];
// int ptr2_depths = 1;
// for (int i = 0; i < in_ptr2_dims_size; i++) {
// ptr2_depths *= in_ptr2_dims[i];
// }
// // This will be the amount to scale the pointers for its depth size
// int dupe_ptr1 = 1;
// if (in_ptr2_dims_size > 3) dupe_ptr1 = in_ptr2_dims[3];
// int dupe_ptr2 = 1;
// if (in_ptr1_dims_size > 3) dupe_ptr2 = in_ptr1_dims[3];
// // We see that the dupe function duplicates every depth in each fourth dimension
// std::unique_ptr<float[]> ptr1_duped = CUDAdupe(in_ptr1, in_ptr1_dims, in_ptr1_dims_size, ptr1_size, dupe_ptr1); // This will be the ptr1 that has been scaled to match the filter sizes
// std::unique_ptr<float[]> ptr2_duped = CUDAdupe(in_ptr2, in_ptr2_dims, in_ptr2_dims_size, ptr2_size, dupe_ptr2); // This will scale the kernel to match the amount of input blocks there are
// int ptr1_duped_size = ptr1_size * dupe_ptr1;
// int ptr2_duped_size = ptr2_size * dupe_ptr2; // This part could be the problem?
// // This part is all safe
// int ptr3_cols = (ptr1_cols - ptr2_cols + stride_cols) / stride_cols;
// int ptr3_rows = (ptr1_rows - ptr2_rows + stride_rows) / stride_rows;
// int ptr3_depths = dupe_ptr1 * ptr1_depths;
// int ptr3_size = ptr3_depths * ptr3_rows * ptr3_cols;
// int gpu_ptr1_bytes = ptr1_duped_size * sizeof(float); // These must be the wrong allocation sizes
// int gpu_ptr2_bytes = ptr2_duped_size * sizeof(float);
// int gpu_ptr3_bytes = ptr3_size * sizeof(float);
// float* gpu_ptr1; // Convolved
// float* gpu_ptr2; // Kernel
// float* gpu_ptr3; // Output
// cudaMalloc(&gpu_ptr1, gpu_ptr1_bytes);
// cudaMalloc(&gpu_ptr2, gpu_ptr2_bytes);
// cudaMalloc(&gpu_ptr3, gpu_ptr3_bytes);
// cudaMemcpy(gpu_ptr1, ptr1_duped.get(), gpu_ptr1_bytes, cudaMemcpyHostToDevice);
// cudaMemcpy(gpu_ptr2, ptr2_duped.get(), gpu_ptr2_bytes, cudaMemcpyHostToDevice); // The memory allocation for this one is wrong
// int grid_cols = (ptr3_cols + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
// int grid_rows = (ptr3_rows + std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z) - 1) / std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z);
// int grid_depths = (ptr3_depths + THREAD_SIZE_Z - 1) / THREAD_SIZE_Z;
// dim3 gridSize(grid_cols, grid_cols, grid_depths);
// dim3 threadSize(std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), std::sqrt(THREAD_SIZE_XY / THREAD_SIZE_Z), THREAD_SIZE_Z);
// std::unique_ptr<float[]> out_ptr(new float[ptr3_size]);
// cudaMemcpy(out_ptr.get(), gpu_ptr3, gpu_ptr3_bytes, cudaMemcpyDeviceToHost);
// cudaFree(gpu_ptr1);
// cudaFree(gpu_ptr2);
// cudaFree(gpu_ptr3);
// return out_ptr;
// }
// std::unique_ptr<float[]> CUDAconvolution(std::unique_ptr<float[]>& in_ptr1, std::unique_ptr<int[]>& in_ptr1_dims, int in_ptr1_dims_size, int ptr1_size, std::unique_ptr<float[]>& in_ptr2, std::unique_ptr<int[]>& in_ptr2_dims, int in_ptr2_dims_size, int ptr2_size, int stride_cols, int stride_rows) {
// int ptr1_cols = ;
// }
// New Pseudo:
// Inputs: A layered 4 dimensional input block
// A layered 4 dimensional weight block (with the same depth as those of the filters)
// The third dimensions nof each should line up but not the fourth dimension
// Scaling: For the input block scale them to be the same size as the fourth dimension of the weight block
// For the weight blocks, condense it all into a single 3d layer and then scale them by the fourth dimension of the input block
// Post Scaling: Turn the scaled input block into a single three dimensional layer (do this by multiplying the depth by the rest of the size)
// Turn the scaled weight block into a big single three dimensional block too
// Remaining steps: Perform the convolution across every different subsection
// Output it as a block with dimensions of the new rows and cols, the depth of the original depth of the input block and the fourth dimension of the kernels
// Post processing ----------- (NOT NEEDED)
// Do the sum across all of the fourth dimensions into a single third dimension (or something)
// Add the bias term to each respective element
// Thoughts?
// How would this deal with a block size larger than four dimensions?
// To do so it appears that the dupe function is broken - It does not perform the duplicates properly for just the fourth dimensions, lets check this out |
9,159 | #include <stdlib.h>
#include <stdio.h>
#define TEST_SIZE 35
#define BLOCK_WIDTH 4
#define CEILING_DIVIDE(X, Y) (1 + (((X) - 1) / (Y)))
// Computes a blockwise exclusive sum scan
__global__ void partialScan(unsigned int *d_in,
unsigned int *d_out,
unsigned int *d_total,
size_t n)
{
__shared__ unsigned int temp[BLOCK_WIDTH];
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = BLOCK_WIDTH * bx + tx;
if(index < n) {
temp[tx] = d_in[index];
} else { temp[tx] = 0; }
__syncthreads();
// Perform the actual scan
for(int offset = 1; offset < BLOCK_WIDTH; offset <<= 1) {
if(tx + offset < BLOCK_WIDTH) {
temp[tx + offset] += temp[tx];
}
__syncthreads();
}
// Shift when copying the result so as to make it an exclusive scan
if(tx +1 < BLOCK_WIDTH && index + 1 < n) {
d_out[index + 1] = temp[tx];
}
d_out[0] = 0;
// Store the total sum of each block
d_total[bx] = temp[BLOCK_WIDTH - 1];
}
// Compute a map on a partial scan to create a total scan from
__global__ void mapScan(unsigned int *d_array, unsigned int *d_total, size_t n) {
int tx = threadIdx.x;
int bx = blockIdx.x;
int index = BLOCK_WIDTH * bx + tx;
if(index < n) {
d_array[index] += d_total[bx];
}
}
// Compute exclusive sum scan for arbitrary sized array (device pointers as input)
void totalScan(unsigned int *d_in, unsigned int *d_out, size_t n) {
size_t numBlocks = CEILING_DIVIDE(n, BLOCK_WIDTH);
unsigned int *d_total;
cudaMalloc(&d_total, sizeof(unsigned int) * numBlocks);
cudaMemset(d_total, 0, sizeof(unsigned int) * numBlocks);
partialScan<<<numBlocks, BLOCK_WIDTH>>>(d_in, d_out, d_total, n);
if(numBlocks > 1) {
unsigned int *d_total_scanned;
cudaMalloc(&d_total_scanned, sizeof(unsigned int) * numBlocks);
cudaMemset(d_total_scanned, 0, sizeof(unsigned int) * numBlocks);
totalScan(d_total, d_total_scanned, numBlocks);
mapScan<<<numBlocks, BLOCK_WIDTH>>>(d_out, d_total_scanned, n);
cudaFree(d_total_scanned);
}
cudaFree(d_total);
}
////////////////////////////////////////////////////////////////////////////////
// Wrapper for totalScan (host pointers as input)
void totalScanHost(unsigned int *h_in, unsigned int *h_out, size_t n) {
unsigned int *d_in;
unsigned int *d_out;
size_t memsize = sizeof(unsigned int) * n;
cudaMalloc(&d_in, memsize);
cudaMalloc(&d_out, memsize);
cudaMemcpy(d_in, h_in, memsize, cudaMemcpyHostToDevice);
totalScan(d_in, d_out, n);
cudaMemcpy(h_out, d_out, memsize, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
int main(int argc, char **argv) {
unsigned int *h_in;
unsigned int *h_out;
size_t memsize = sizeof(unsigned int) * TEST_SIZE;
h_in = (unsigned int*)malloc(memsize);
h_out = (unsigned int*)malloc(memsize);
// Test values 1 .. TEST_SIZE
for(int i=0; i<TEST_SIZE; i++){ h_in[i] = i+1; }
// Compute
totalScanHost(h_in, h_out, TEST_SIZE);
// Print input
printf("h_in = [ ");
for(int i=0; i<TEST_SIZE; i++){ printf("%d ", h_in[i]); }
printf("];\n");
// Print output
printf("h_out = [ ");
for(int i=0; i<TEST_SIZE; i++){ printf("%d ", h_out[i]); }
printf("];\n");
free(h_in);
free(h_out);
return 0;
}
|
9,160 | #include "stdio.h"
__global__ void convolution(float* input, float* kernel, float* output, int width, int height, int kernel_size) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x >= width || y >= height) {
return;
}
int k_radius = kernel_size / 2;
float sum = 0.0;
for (int i = -k_radius; i <= k_radius; i++) {
for (int j = -k_radius; j <= k_radius; j++) {
int cur_x = x + i;
int cur_y = y + j;
if (cur_x < 0 || cur_x >= width || cur_y < 0 || cur_y >= height) {
continue;
}
float val = input[cur_y * width + cur_x];
float kernel_val = kernel[(j + k_radius) * kernel_size + (i + k_radius)];
sum += val * kernel_val;
}
}
output[y * width + x] = sum;
}
__global__ void helloCUDA(float f){
printf ("Hello thread %d, f=%f\n", threadIdx.x, f);
} |
9,161 | #include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#ifndef USE_MALLOC_HOST
#define USE_MALLOC_HOST
#endif
#undef USE_MALLOC_HOST
#define CUDA_SAFE_CALL(err) __cudaSafeCall(err,__FILE__,__LINE__)
inline void __cudaSafeCall(cudaError_t err,const char *file, const int line) {
if(cudaSuccess != err) {
printf("%s(%i) : cudaSafeCall() Runtime API error : %s.\n",
file, line, cudaGetErrorString(err) );
exit(-1);
}
}
int main(int argc, char * argv[]) {
int numDevs=0, i, j, N, nBytes;
cudaError_t err;
cudaDeviceProp prop;
cudaEvent_t start, stop;
float *x_cpu, *y_cpu, *x_gpu;
float dt,totalDtTo=0.0,totalDtFrom=0.0;
/* create events */
CUDA_SAFE_CALL(cudaEventCreate(&start));
CUDA_SAFE_CALL(cudaEventCreate(&stop));
N = atoi(argv[1]);
nBytes = N*sizeof(float);
#ifdef USE_MALLOC_HOST
CUDA_SAFE_CALL(cudaMallocHost((void**)&x_cpu,nBytes));
CUDA_SAFE_CALL(cudaMallocHost((void**)&y_cpu,nBytes));
#else
x_cpu = (float *) malloc(nBytes);
y_cpu = (float *) malloc(nBytes);
#endif
for (i=0; i<N; ++i) {
x_cpu[i] = 1.0*i;
}
CUDA_SAFE_CALL(cudaGetDeviceCount(&numDevs));
printf("Number of CUDA Devices = %d\n",numDevs);
printf("===========================\n");
for (i=0; i<numDevs; ++i) {
CUDA_SAFE_CALL(cudaSetDevice(i));
CUDA_SAFE_CALL(cudaMalloc((void**)&x_gpu,nBytes));
#ifndef USE_MALLOC_HOST
CUDA_SAFE_CALL(cudaHostRegister(x_cpu, nBytes, cudaHostRegisterMapped));
CUDA_SAFE_CALL(cudaHostRegister(y_cpu, nBytes, cudaHostRegisterMapped));
#endif
CUDA_SAFE_CALL(cudaGetDeviceProperties(&prop,i));
printf("Device %d has name %s with compute capability %d.%d canMapHostMemory=%d\n",i,prop.name,prop.major,prop.minor,prop.canMapHostMemory);
printf(" global memory = %1.5g\n",1.0*prop.totalGlobalMem/(1024*1024*1024));
dt=0.0;
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
for (j=0; j<100; ++j) {
CUDA_SAFE_CALL(cudaMemcpy(x_gpu, x_cpu, nBytes, cudaMemcpyHostToDevice));
}
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
CUDA_SAFE_CALL(cudaEventElapsedTime(&dt,start,stop));
totalDtTo+=dt;
dt=0.0;
CUDA_SAFE_CALL(cudaEventRecord(start, 0));
for (j=0; j<100; ++j) {
CUDA_SAFE_CALL(cudaMemcpy(y_cpu, x_gpu, nBytes, cudaMemcpyDeviceToHost));
}
CUDA_SAFE_CALL(cudaEventRecord(stop, 0));
CUDA_SAFE_CALL(cudaEventSynchronize(stop));
CUDA_SAFE_CALL(cudaEventElapsedTime(&dt,start,stop));
totalDtFrom+=dt;
totalDtTo*=.001;
totalDtFrom*=.001;
printf("HostToDevice PCI Express BW=%g GB/s\n",100.0*nBytes/(1024*1024*1024)/totalDtTo);
printf("DeviceToHost PCI Express BW=%g GB/s\n",100.0*nBytes/(1024*1024*1024)/totalDtFrom);
totalDtTo=0.0;
totalDtFrom=0.0;
#ifndef USE_MALLOC_HOST
CUDA_SAFE_CALL(cudaHostUnregister(x_cpu));
CUDA_SAFE_CALL(cudaHostUnregister(y_cpu));
#endif
CUDA_SAFE_CALL(cudaFree(x_gpu));
}
#ifndef USE_MALLOC_HOST
free(x_cpu);
free(y_cpu);
#else
CUDA_SAFE_CALL(cudaFreeHost(x_cpu));
CUDA_SAFE_CALL(cudaFreeHost(y_cpu));
#endif
/* destroy events */
CUDA_SAFE_CALL(cudaEventDestroy(start));
CUDA_SAFE_CALL(cudaEventDestroy(stop));
return 0;
}
|
9,162 |
inline void tic(cudaEvent_t *p_start, cudaEvent_t *p_stop){
//cudaEvent_t start, stop;
cudaEventCreate(p_start);
cudaEventCreate(p_stop);
cudaEventRecord(p_start[0], 0);
}
inline void toc(cudaEvent_t start, cudaEvent_t stop, float *p_time){
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(p_time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
9,163 | #include "includes.h"
__global__ void sd_t_s1_4_kernel(int h1d,int h2d,int h3d,int p4d,int p5d,int p6d,int p4ld_t2,int h1ld_t2,int h3ld_v2,int h2ld_v2,int p6ld_v2,int p5ld_v2,int h3ld_t3,int h2ld_t3,int h1ld_t3,int p6ld_t3,int p5ld_t3,int p4ld_t3,double *t3d, double *t2_d, double *v2_d,int p4, int total_x) {
int h1,h2,h3,p6,p5;
__shared__ double t2_shm[T1*2*Tcomm];
for(int i=threadIdx.x;i<h1d*p4d;i+=blockDim.x)
if(i<h1d*p4d)
t2_shm[i] = t2_d[i];
int rest_x=blockIdx.x;
int thread_x = T2*T1 * rest_x + threadIdx.x;
rest_x = thread_x;
__syncthreads();
/* the following computation may need to happen inside the loop */
for(int i=0;i<total_x;i+=gridDim.x*blockDim.x)
{
rest_x += i;
h3=rest_x%h3d;
rest_x=rest_x/h3d;
h2=rest_x%h2d;
rest_x=rest_x/h2d;
p6=rest_x%p6d;
rest_x=rest_x/p6d;
p5=rest_x%p5d;
if((thread_x+i)<total_x)
for(h1=0;h1<h1d;h1++)
for(p4=0;p4<p4d;p4++)
{
t3d[h3*h3ld_t3+h2*h2ld_t3+h1*h1ld_t3+p6*p6ld_t3+p5*p5ld_t3+p4*p4ld_t3]-=t2_shm[h1*p4d+p4]*v2_d[h3*h3ld_v2+h2*h2ld_v2+p6*p6ld_v2+p5*p5ld_v2];
}
}
__syncthreads();
} |
9,164 | #include "includes.h"
// richu shaji abraham richursa
using namespace std;
__device__ int function(int value , int bit ,int bitset)
{
if(bitset == 1 )
{
if((value & bit) != 0)
{
return 1;
}
else
return 0;
}
else
{
if((value & bit) == 0)
{
return 1;
}
else
{
return 0;
}
}
}
__global__ void predicateDevice(int *d_array , int *d_predicateArrry , int d_numberOfElements,int bit,int bitset)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
if(index < d_numberOfElements)
{
d_predicateArrry[index] = function(d_array[index],bit,bitset);
}
} |
9,165 | #include <stdio.h>
#include <stdlib.h>
__global__ void arrayReduce(int *A, int *sum, int size){
int thread = threadIdx.x;
sum += A[thread];
}
int main(int argc, char **argv){
int size;
int* deviceArray = 0;
int* sum = 0;
int* localSum = (int*) malloc(4);
printf("Array Size: ");
scanf("%d", &size);
int* local = (int*) malloc(size * sizeof(int));
cudaMalloc((void**) &deviceArray, sizeof(int) * size);
cudaMalloc((void**) &sum, 4);
for(int i = 0; i < size; i++){
local[i] = rand() % 100;
}
cudaMemcpy(deviceArray, local, size * sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(size,1);
arrayReduce<<<1,dimBlock>>>(deviceArray, sum, size);
cudaThreadSynchronize();
cudaMemcpy(localSum, sum, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nSum: %d\n", *localSum);
}
|
9,166 | #include<stdlib.h>
#include<stdio.h>
__global__ void add(int* a , int *b,int *c)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
b[id] = (a[id] * (*c)) + b[id];
//printf("A = %d \t B = %d \t C = %d \n",a[id],b[id],*c);
}
int main(void)
{
int *a , *b , c;
int *d_a , *d_b ,*d_c;
printf("Enter the value of N \n");
int n;
int i;
scanf("%d",&n);
a = (int*)malloc(sizeof(int)*n);
b = (int*)malloc(sizeof(int)*n);
printf("Enter the value of alpha \n");
scanf("%d",&c);
printf("Enter the values for 1st Array \n");
for( i = 0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter the values for 2nd Array \n");
for(i = 0;i<n;i++)
scanf("%d",&b[i]);
int size = sizeof(int)*n;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,sizeof(int));
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_c,&c,sizeof(int),cudaMemcpyHostToDevice);
add<<<1,n>>>(d_a,d_b,d_c);
cudaMemcpy(b,d_b,size,cudaMemcpyDeviceToHost);
printf("Result \n");
for( i = 0;i<n;i++)
printf("%d \t",b[i]);
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
return 0;
}
|
9,167 | /***************/
/* FFTSHIFT 2D */
/***************/
__global__ void fftshift_2D(float2 * __restrict__ data, const int N1, const int N2)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i < N1 && j < N2) {
data[i*N2+j].x *= 1-2*((i+j)&1);
data[i*N2+j].y *= 1-2*((i+j)&1);
}
}
__global__ void fftshift_2D(double2 * __restrict__ data, const int N1, const int N2)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if (i < N1 && j < N2) {
data[i*N2+j].x *= 1-2*((i+j)&1);
data[i*N2+j].y *= 1-2*((i+j)&1);
}
}
|
9,168 |
__global__ void mykernel( double *Gamma, double *g, double *Theta, double *X, int N, int K, int T) {
/* compute kernel index */
int t = blockIdx.x*blockDim.x + threadIdx.x;
if(t<T){
int mink;
/* compute g(:,t) */
for(int k=0;k<K;k++){
/* compute dot product g(k,t) = <X(:,t) - Theta(:,k),X(:,t) - Theta(:,k)> */
g[t*K+k] = 0;
for(int n=0;n<N;n++){
g[t*K+k] += (X[t*N+n] - Theta[k*N+n])*(X[t*N+n] - Theta[k*N+n]);
}
/* if this is first row, then Gamma(k,t) is minimal value */
if(k==0){
mink=0; /* index k with min value of g(:,t) */
Gamma[t*K+k] = 1;
} else {
/* is this smaller value then previous one? */
if(g[t*K+k] < g[t*K+mink]){
/* old one is not min, set it equal to zero */
Gamma[t*K+mink] = 0;
mink=k;
Gamma[t*K+k] = 1;
} else {
/* it is not min */
Gamma[t*K+k] = 0;
}
}
}
}
}
|
9,169 | __global__ void initmem( int Ntot, float *a ) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if ( idx < Ntot ) a[idx] = 0;
}
|
9,170 | #include "includes.h"
/*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
*
* This code is based on the NVIDIA 'reduction' CUDA sample,
* Copyright 1993-2010 NVIDIA Corporation.
*/
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void getTargetIndex(int n, int *index, double *w)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
if(w[i] == 1.0) {index[0] = i;}
}
} |
9,171 | #include <chrono>
#include <iostream>
#include <string>
//Testing Structs, easiest way to define datatypes of size
typedef struct
{//just define an amount of chars in the struct that is equal to the desired size
char p1;
}testingStruct_1;
typedef struct
{//just define an amount of chars in the struct that is equal to the desired size
char p1;
char p2;
}testingStruct_2;
typedef struct
{//just define an amount of chars in the struct that is equal to the desired size
char p1;
char p2;
char p3;
char p4;
}testingStruct_4;
typedef struct
{//just define an amount of chars in the struct that is equal to the desired size
char p1;
char p2;
char p3;
char p4;
char p5;
char p6;
char p7;
char p8;
}testingStruct_8;
typedef struct
{//just define an amount of chars in the struct that is equal to the desired size
char p1;
char p2;
char p3;
char p4;
char p5;
char p6;
char p7;
char p8;
char p9;
char p10;
char p11;
char p12;
char p13;
char p14;
char p15;
char p16;
}testingStruct_16;
//Kernel definition
template<typename T>
__global__
void copyKernel(T* out, T* in, int stride) {
unsigned id = threadIdx.x + blockIdx.x * blockDim.x;
// if (id*stride < sizeof(out)) {
out[id*stride] = in[id*stride];
// }
}
int main () {
using namespace std::chrono;
//output strings so that they may be pasted directly into python
//std::string keyString;
//std::string valueString;
std::cout<<"np.array((";
for (int i = 0; i <= 6; i++) {//we are looping the numbers of blocks now
int stride = 1 << i;
int* out;
int* in;
int accessSize = sizeof (int);
// TODO check total memory consumtpion and cout total size in GB
cudaMalloc(&out , 16384 * 1024 * accessSize * stride );
auto err = cudaMalloc(&in , 16384 * 1024 * accessSize *stride ) ;
if (err != cudaSuccess) {
std::cout << "ERROR: could not alloc!" << std::endl;
}
copyKernel<<<16384, 1024>>>(out, in, stride);
cudaDeviceSynchronize();
//Time Measururement Point 1
high_resolution_clock::time_point timeBefore = high_resolution_clock::now();
for (int j = 1; j <= 10; j++){
copyKernel<<<16384, 1024>>>(out, in, stride);
cudaDeviceSynchronize();
}
//Time Measurement Point 2
high_resolution_clock::time_point timeAfter = high_resolution_clock::now();
//Output Time Measurement Result
duration<double> time_span = duration_cast<duration<double>>(timeAfter - timeBefore);
//std::cout << "Time for 10x stride = " << stride << " is " << time_span.count() << " seconds";
//std::cout << std:: endl;
std::cout << time_span.count();
cudaFree(out);
cudaFree(in);
if(i != 6)
{
std::cout<<",";
}
}
std::cout << "))" << std::endl;
return 0;
}
|
9,172 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <pthread.h>
#include <cuda_runtime.h>
// global constants
#define NUM_CPU_THREADS 32
#define MAX_MASS 100.0f
#define MAX_POS_X 100.0f
#define MAX_POS_Y 100.0f
#define MAX_VEL_X 0.0f
#define MAX_VEL_Y 0.0f
#define G 8
#define DT 0.0019531255f
#define DT2 0.000003814697265625f/2
#define DAMPING 1.0f
#define SOFTENING 0.0009765625f
typedef struct {
float *m;
float *r1, *r2;
float *v1, *v2;
float *a1, *a2;
unsigned long nElem, nIter;
} UNIVERSE;
UNIVERSE US;
#define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// time stamp function in seconds
double getTimeStamp()
{
struct timeval tv;
gettimeofday (&tv, NULL);
return (double) tv.tv_usec/1000000 + tv.tv_sec;
}
void print_BodyStats (const float *m, const float *r, const float *v, const float *a)
{
unsigned long nElem = US.nElem;
printf("\n");
// print body number
for (unsigned long idx=0; idx<nElem; idx++) {
if (idx == nElem-1)
printf("Mass %ld\n", idx);
else
printf("Mass %ld\t", idx);
}
// print Mass
for (unsigned long idx=0; idx<nElem; idx++) {
if (idx == nElem-1)
printf("%.2f\n", m[idx]);
else
printf("%.2f\t", m[idx]);
}
// print x-position
for (unsigned long idx=0; idx<nElem; idx++) {
if (idx == nElem-1)
printf("%.2f\n", r[2*idx]);
else
printf("%.2f\t", r[2*idx]);
}
// print y-position
for (unsigned long idx=0; idx<nElem; idx++) {
if (idx == nElem-1)
printf("%.2f\n", r[2*idx+1]);
else
printf("%.2f\t", r[2*idx+1]);
}
// print x-velocity
for (unsigned long idx=0; idx<nElem; idx++) {
if (idx == nElem-1)
printf("%.2f\n", v[2*idx]);
else
printf("%.2f\t", v[2*idx]);
}
// print y-velocity
for (unsigned long idx=0; idx<nElem; idx++) {
if (idx == nElem-1)
printf("%.2f\n", v[2*idx+1]);
else
printf("%.2f\t", v[2*idx+1]);
}
// print x-acceleration
for (unsigned long idx=0; idx<nElem; idx++) {
if (idx == nElem-1)
printf("%.2f\n", a[2*idx]);
else
printf("%.2f\t", a[2*idx]);
}
// print y-acceleration
for (unsigned long idx=0; idx<nElem; idx++) {
if (idx == nElem-1)
printf("%.2f\n\n", a[2*idx+1]);
else
printf("%.2f\t", a[2*idx+1]);
}
}
void init_MassPositionVelocity ()
{
// generate different seed for pseudo-random number generator
// time_t t;
// srand ((unsigned int) time(&t));
srand ((unsigned int) 1000);
// define local variables for convenience
unsigned long nElem = US.nElem;
// populating mass, position, & velocity arrays
unsigned long idx;
for (idx=0; idx<nElem; idx++)
{
US.m[idx] = 100.0;//(float) ((double) rand() / (double) (RAND_MAX/MAX_MASS));
US.r1[2*idx] = (float) ((double) rand() / (double) (RAND_MAX/(MAX_POS_X*2)) - MAX_POS_X);
US.r1[2*idx+1] = (float) ((double) rand() / (double) (RAND_MAX/(MAX_POS_Y*2)) - MAX_POS_Y);
US.v1[2*idx] = (float) ((double) rand() / (double) (RAND_MAX/(MAX_VEL_X*2)) - MAX_VEL_X);
US.v1[2*idx+1] = (float) ((double) rand() / (double) (RAND_MAX/(MAX_VEL_Y*2)) - MAX_VEL_Y);
}
}
void *init_Acceleration_SMT (void *arg)
{
// define local variables for convenience
unsigned long start, end, len, offset, nElem;
nElem = US.nElem;
offset = (unsigned long) arg;
len = (unsigned long) US.nElem / NUM_CPU_THREADS;
start = offset * len;
end = start + len;
unsigned long i, j;
float ax_ip1, ay_ip1;
float dx_ip1, dy_ip1, rDistSquared, invDistCubed;
float **i_r = &US.r1;
float **o_a = &US.a1;
// calculating NEXT acceleration of each body from the position of every other bodies
// ... and NEXT velocity of each body utilizing the next acceleration
for (i=start; i<end; i++)
{
ax_ip1 = 0.0;
ay_ip1 = 0.0;
for (j=0; j<nElem; j++)
{
if (j != i)
{
dx_ip1 = *(*i_r + 2*j) - *(*i_r + 2*i);
dy_ip1 = *(*i_r + 2*j+1) - *(*i_r + 2*i+1);
rDistSquared = dx_ip1*dx_ip1 + dy_ip1*dy_ip1 + SOFTENING;
invDistCubed = G*US.m[j]/sqrtf(rDistSquared*rDistSquared*rDistSquared);
ax_ip1 += dx_ip1 * invDistCubed;
ay_ip1 += dy_ip1 * invDistCubed;
}
}
*(*o_a + 2*i) = ax_ip1;
*(*o_a + 2*i+1) = ay_ip1;
}
pthread_exit (NULL);
}
__global__ void compute_Device (
float *o_r, float *o_v, float *o_a,
const float *i_r, const float *i_v, const float *i_a,
const float *m, const unsigned long nElem)
{
unsigned long tid = blockIdx.x * blockDim.x + threadIdx.x;
// if (tid == 0)
// printf("x: %.6f\ty:%.6f\n", i_r[0], i_r[1]);
float ax_ip1 = 0.0, ay_ip1 = 0.0;
float dx_ip1, dy_ip1, rDistSquared, invDistCubed;
if (tid < nElem) {
// calculating subsequent position of body (one body per thread)
o_r[2*tid] = i_r[2*tid] + i_v[2*tid]*DT + i_a[2*tid]*DT2; // x-position
o_r[2*tid+1] = i_r[2*tid+1] + i_v[2*tid+1]*DT + i_a[2*tid+1]*DT2; // y-position
// calculating the NEXT iteration's acceleration and velocity
for (unsigned long j=0; j<nElem; j++) {
if (j != tid) {
dx_ip1 = o_r[2*j] - o_r[2*tid];
dy_ip1 = o_r[2*j+1] - o_r[2*tid+1];
rDistSquared = dx_ip1*dx_ip1 + dy_ip1*dy_ip1 + SOFTENING;
invDistCubed = G*m[j]*rsqrtf(rDistSquared*rDistSquared*rDistSquared);
ax_ip1 += dx_ip1 * invDistCubed;
ay_ip1 += dy_ip1 * invDistCubed;
}
}
o_a[2*tid] = ax_ip1; // x-acceleration
o_a[2*tid+1] = ay_ip1; // y-acceleration
o_v[2*tid] = i_v[2*tid] + (i_a[2*tid] + ax_ip1)*DT/2; // x-velocity
o_v[2*tid+1] = i_v[2*tid+1] + (i_a[2*tid+1] + ay_ip1)*DT/2; // y-velocity
}
}
int main (int argc, char *argv[])
{
if (argc > 3) {
printf("Error: Wrong number of arguments.\n");
exit(EXIT_FAILURE);
}
unsigned long nElem = 16000;
unsigned long nIter = 10;
char *ptr1, *ptr2;
if (argc > 1)
nElem = strtoul(argv[1], &ptr1, 10);
if (argc > 2)
nIter = strtoul(argv[2], &ptr2, 10);
////////////////////////////////////////////////////////////////
/// SETTING UP DEVICE
////////////////////////////////////////////////////////////////
int dev = 0, driverVersion = 0, runtimeVersion = 0;
cudaDeviceProp deviceProp;
checkCudaErrors (cudaGetDeviceProperties (&deviceProp, dev));
checkCudaErrors (cudaSetDevice (dev));
checkCudaErrors (cudaDriverGetVersion (&driverVersion));
checkCudaErrors (cudaRuntimeGetVersion (&runtimeVersion));
printf("\n===== Device Properties ======\n\n");
printf(" Device %d: %s\n", dev, deviceProp.name);
printf(" CUDA Driver Version / Runtime Version: %d.%d / %d.%d\n",
driverVersion/1000, (driverVersion%100)/10,
runtimeVersion/1000, (runtimeVersion%100)/10);
printf(" CUDA Capability Major/Minor version number: %d.%d\n",
deviceProp.major, deviceProp.minor);
printf(" Number of SMs: %d\n", deviceProp.multiProcessorCount);
printf(" Total amount of global memory: %.2f GB (%llu B)\n",
(float) deviceProp.totalGlobalMem/pow(1024.0,3),
(unsigned long long) deviceProp.totalGlobalMem);
printf(" Total amount of constant memory: %4.2f kB\n",
deviceProp.totalConstMem/1024.0);
printf(" Total amount of shared memory per block: %4.2f kB\n",
deviceProp.sharedMemPerBlock/1024.0);
printf(" Total number of registers available per block: %d\n",
deviceProp.regsPerBlock);
printf(" Warp size: %d\n", deviceProp.warpSize);
printf(" Maximum number of threads per block: %d\n",
deviceProp.maxThreadsPerBlock);
printf(" Maximum number of threads per SM: %d\n",
deviceProp.maxThreadsPerMultiProcessor);
printf(" Maximum number of warps per SM: %d\n",
deviceProp.maxThreadsPerMultiProcessor/32);
printf(" Maximum size of each block dimension: %d x %d x %d\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Maximum size of each grid dimension: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf(" Maximum memory pitch: %lu B\n", deviceProp.memPitch);
printf(" Memory Clock Rate (MHz): %.1f\n",
deviceProp.memoryClockRate/1e3);
printf(" Memory Bus Width (b): %d\n", deviceProp.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %.2f\n\n",
2.0*deviceProp.memoryClockRate*(deviceProp.memoryBusWidth/8)/1e6);
printf("\n===== Simulation Parameters =====\n\n");
printf(" Number of Bodies = %ld\n", nElem);
printf(" Number of Time Steps = %ld\n", nIter);
printf(" Number of CPU Threads = %d\n\n", NUM_CPU_THREADS);
printf("=================================\n\n\n");
////////////////////////////////////////////////////////////////
/// INITIALIZING SIMULATION
////////////////////////////////////////////////////////////////
float *h_m, *h_r1, *h_r2, *h_v1, *h_v2, *h_a1, *h_a2; // host data
float *d_m, *d_r1, *d_r2, *d_v1, *d_v2, *d_a1, *d_a2; // device data
float *gref_m, *gref_r, *gref_v, *gref_a;
size_t nBytes = nElem * sizeof(float);
h_m = (float *) malloc(nBytes);
h_r1 = (float *) malloc(nBytes*2);
h_r2 = (float *) malloc(nBytes*2);
h_v1 = (float *) malloc(nBytes*2);
h_v2 = (float *) malloc(nBytes*2);
h_a1 = (float *) malloc(nBytes*2);
h_a2 = (float *) malloc(nBytes*2);
gref_m = (float *) malloc(nBytes);
gref_r = (float *) malloc(nBytes*2);
gref_v = (float *) malloc(nBytes*2);
gref_a = (float *) malloc(nBytes*2);
memset (h_m, 0, nBytes);
memset (h_r1, 0, nBytes*2);
memset (h_r2, 0, nBytes*2);
memset (h_v1, 0, nBytes*2);
memset (h_v2, 0, nBytes*2);
memset (h_a1, 0, nBytes*2);
memset (h_a2, 0, nBytes*2);
memset (gref_m, 0, nBytes);
memset (gref_r, 0, nBytes*2);
memset (gref_v, 0, nBytes*2);
memset (gref_a, 0, nBytes*2);
// initialize data on host size and then transfer to device
US.m = h_m;
US.r1 = h_r1;
US.r2 = h_r2;
US.v1 = h_v1;
US.v2 = h_v2;
US.a1 = h_a1;
US.a2 = h_a2;
US.nElem = nElem;
US.nIter = nIter;
printf("Initializing bodies on HOST. Time taken: ");
double time0 = getTimeStamp();
init_MassPositionVelocity();
// for portability, explicity create threads in a joinable state
pthread_t threads [NUM_CPU_THREADS];
pthread_attr_t attr;
pthread_attr_init (&attr);
pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_JOINABLE);
// creating the threads to calculate initial body accelerations on HOST
unsigned long i;
int rc;
void *status;
for (i=0; i<NUM_CPU_THREADS; i++) {
rc = pthread_create (&threads[i], &attr, init_Acceleration_SMT, (void *) i);
if (rc) {
printf("Error; return code from pthread_create() is %d.\n", rc);
exit(EXIT_FAILURE);
}
}
// wait on the other threads after initial body accelerations on HOST
for (i=0; i<NUM_CPU_THREADS; i++) {
rc = pthread_join (threads[i], &status);
if (rc) {
printf("ERROR; return code from pthread_join() is %d.\n", rc);
exit(EXIT_FAILURE);
}
}
printf ("%lfs\n", getTimeStamp()-time0);
//print_BodyStats(h_m, h_r1, h_v1, h_a1);
// allocating space in device global memory for data
checkCudaErrors (cudaMalloc ((void**) &d_m, nBytes));
checkCudaErrors (cudaMalloc ((void**) &d_r1, nBytes*2));
checkCudaErrors (cudaMalloc ((void**) &d_r2, nBytes*2));
checkCudaErrors (cudaMalloc ((void**) &d_v1, nBytes*2));
checkCudaErrors (cudaMalloc ((void**) &d_v2, nBytes*2));
checkCudaErrors (cudaMalloc ((void**) &d_a1, nBytes*2));
checkCudaErrors (cudaMalloc ((void**) &d_a2, nBytes*2));
// copying initialized data from host to device
checkCudaErrors (cudaMemcpy (d_m, h_m, nBytes, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_r1, h_r1, nBytes*2, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_r2, h_r2, nBytes*2, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_v1, h_v1, nBytes*2, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_v2, h_v2, nBytes*2, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_a1, h_a1, nBytes*2, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_a2, h_a2, nBytes*2, cudaMemcpyHostToDevice));
////////////////////////////////////////////////////////////////
/// PERFORMING SIMULATION ON DEVICE
////////////////////////////////////////////////////////////////
dim3 block (1024);
dim3 grid ((nElem+block.x-1)/(block.x));
double timestamp_GPU_start = getTimeStamp();
for (unsigned long iter=0; iter<nIter; iter++) {
if (iter % 2 == 0) {
compute_Device <<<grid, block, 0, 0>>> (d_r2, d_v2, d_a2, d_r1, d_v1, d_a1, d_m, nElem);
cudaDeviceSynchronize ();
// cudaMemcpy(gref_m, d_m, nBytes, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_r, d_r2, nBytes*2, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_v, d_v2, nBytes*2, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_a, d_a2, nBytes*2, cudaMemcpyDeviceToHost);
} else {
compute_Device <<<grid, block, 0, 0>>> (d_r1, d_v1, d_a1, d_r2, d_v2, d_a2, d_m, nElem);
cudaDeviceSynchronize ();
// cudaMemcpy(gref_m, d_m, nBytes, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_r, d_r1, nBytes*2, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_v, d_v1, nBytes*2, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_a, d_a1, nBytes*2, cudaMemcpyDeviceToHost);
}
// if (iter%1000 == 0)
// print_BodyStats (gref_m, gref_r, gref_v, gref_a);
}
double timestamp_GPU_end = getTimeStamp();
double elapsedTime = timestamp_GPU_end - timestamp_GPU_start;
////////////////////////////////////////////////////////////////
/// SIMULATION COMPLETE
////////////////////////////////////////////////////////////////
cudaFree (d_m);
cudaFree (d_r1); cudaFree (d_r2);
cudaFree (d_v1); cudaFree (d_v2);
cudaFree (d_a1); cudaFree (d_a2);
checkCudaErrors (cudaDeviceReset());
printf("Device successfully reset.\n");
printf("\nElapsed Time: %lfs\n", elapsedTime);
printf("Average timestep simulation duration: %lfs\n", elapsedTime/nIter);
free (h_m);
free (h_r1); free (h_r2);
free (h_v1); free (h_v2);
free (h_a1); free (h_a2);
free (gref_m);
free (gref_r);
free (gref_v);
free (gref_a);
pthread_attr_destroy (&attr);
//pthread_exit(NULL);
return 0;
}
|
9,173 | #include "includes.h"
__global__ void elevar_al_cuadrado(float * d_salida, float * d_entrada){
int idx = threadIdx.x;
float f = d_entrada[idx];
d_salida[idx] = f*f;
} |
9,174 | #include <iostream>
#include <cuda.h>
#include <iterator>
#include <algorithm>
// Kernel that executes on the CUDA device
__global__ void square_array(float *a, int array_size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<array_size) a[idx] = a[idx] * a[idx];
}
int main(void)
{
int array_size = 100;
size_t size = array_size * sizeof(float);
float *a_host = new float[array_size]; //array on CPU
for (int i=0; i<array_size; i++) a_host[i] = i;
float *a_device;
cudaMalloc((void **) &a_device, size); // Allocate array on device
cudaMemcpy(a_device, a_host, size, cudaMemcpyHostToDevice); //Copy data to device
// Do calculation on device:
int block_size = 64;
int n_blocks = array_size/block_size + (array_size%block_size == 0 ? 0:1);
square_array <<< n_blocks, block_size >>> (a_device, array_size);
cudaMemcpy(a_host, a_device, sizeof(float)*array_size, cudaMemcpyDeviceToHost);//copy data back to CPU
// Print results
std::ostream_iterator<float> printIterator(std::cout, "\n");
std::copy(a_host, a_host+5, printIterator);
std::cout<<"."<<std::endl<<"."<<std::endl<<"."<<std::endl;
std::copy(a_host+array_size-5, a_host+array_size, printIterator);
// Cleanup
free(a_host); cudaFree(a_device);
} |
9,175 | #include "includes.h"
__global__ void primal(float *y1, float *y2, float *xbar, float sigma, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int i;
float x1, x2, val, norm;
for (int z = 0; z < nc; z++) {
i = x + w * y + w * h * z;
val = xbar[i];
x1 = (x+1<w) ? (xbar[(x+1) + w * y + w * h * z] - val) : 0.f;
x2 = (y+1<h) ? (xbar[x + w * (y+1) + w * h * z] - val) : 0.f;
x1 = y1[i] + sigma * x1;
x2 = y2[i] + sigma * x2;
norm = sqrtf(x1*x1+x2*x2);
y1[i] = x1 / fmax(1.f, norm);
y2[i] = x2 / fmax(1.f, norm);
}
}
} |
9,176 | // tests cuMemHostAlloc
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda.h>
__global__ void incrValue(float *data, int idx, float value) {
if(threadIdx.x == 0 && blockIdx.x == 0) {
data[idx] += value;
}
}
int main(int argc, char *argv[]) {
int N = 1024;
CUstream stream;
cuStreamCreate(&stream, 0);
float *hostFloats;
cuMemHostAlloc((void **)&hostFloats, N * sizeof(float), CU_MEMHOSTALLOC_PORTABLE);
CUdeviceptr deviceFloats;
cuMemAlloc(&deviceFloats, N * sizeof(float));
hostFloats[2] = 4.0f;
cuMemcpyHtoDAsync(deviceFloats, hostFloats, N * sizeof(float), stream);
incrValue<<<dim3(32, 1, 1), dim3(32, 1, 1), 0, stream>>>((float *)deviceFloats, 2, 3.0f);
cuMemcpyDtoHAsync(hostFloats, deviceFloats, N * sizeof(float), stream);
cuStreamSynchronize(stream);
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 7);
incrValue<<<dim3(32, 1, 1), dim3(32, 1, 1), 0, stream>>>((float *)deviceFloats, 2, 5.0f);
cuMemcpyDtoHAsync(hostFloats, deviceFloats, N * sizeof(float), stream);
cuStreamSynchronize(stream);
cout << "hostFloats[2] " << hostFloats[2] << endl;
assert(hostFloats[2] == 12);
cuMemFreeHost(hostFloats);
cuMemFree(deviceFloats);
cuStreamDestroy(stream);
return 0;
}
|
9,177 | #include "includes.h"
__global__ void Saxy_device(float* x, float* y, float* d, float xb, float yb, int n)
{
int i = threadIdx.x;
if (i < n)
d[i] = (x[i] - xb) * (y[i] - yb);
} |
9,178 | #include <algorithm>
#include <cassert>
#include <iostream>
#include <vector>
// __global__ means that its called from CPU to run on the GPU
__global__ void vectorAdd(int *a, int *b, int *c, int N)
{
// calculate the global thread id
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if ( tid < N) // a quick boundary check
{
c[tid] = a[tid] + b[tid]; // do the actual addition
}
}
int main()
{
constexpr int N = 1 << 16;
constexpr size_t bytes = sizeof(int) * N;
// declare the vectors to hold the data on the CPU
int *a, *b, *c;
cudaMallocManaged(&a, bytes);
cudaMallocManaged(&b, bytes);
cudaMallocManaged(&c, bytes);
int id = cudaGetDevice(&id);
// give some hints to the memory manager about where we want our variables to live
cudaMemAdvise(a, bytes, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId);
cudaMemAdvise(b, bytes, cudaMemAdviseSetPreferredLocation, cudaCpuDeviceId);
cudaMemPrefetchAsync(c, bytes, id);
// fill up the vectors
for(int i=0; i < N; i++)
{
a[i] = rand() % 100;
b[i] = rand() % 100;
}
// prefetch a and b to GPU
cudaMemAdvise(a, bytes, cudaMemAdviseSetReadMostly, id);
cudaMemAdvise(b, bytes, cudaMemAdviseSetReadMostly, id);
cudaMemPrefetchAsync(a, bytes, id);
cudaMemPrefetchAsync(b, bytes, id);
// Threads per CTA
int BLOCK_SIZE = 1<<10;
// CTAs per Grid
// We need to launch at LEAST as many threads as we have elements
// This equation pads an extra CTA to the grid if N cannot evenly be divided
// by NUM_THREADS (e.g. N = 1025, NUM_THREADS = 1024)
int GRID_SIZE = (N + BLOCK_SIZE - 1)/ BLOCK_SIZE;
// Launch the kernel on the GPU
// Kernel calls are asynchronous ( the CPU program continues execution after
// call, but not necessarily before the kernel finishes)
vectorAdd<<< GRID_SIZE, BLOCK_SIZE >>>(a, b, c, N);
// Wait for all previous operations before using values
// We need this because we don't get the implicit synchronization of
// cudaMemcpy like in the original example
cudaDeviceSynchronize();
// prefetch back to CPU
cudaMemPrefetchAsync(a, bytes, cudaCpuDeviceId);
cudaMemPrefetchAsync(b, bytes, cudaCpuDeviceId);
cudaMemPrefetchAsync(c, bytes, cudaCpuDeviceId);
for (int i = 0 ; i<N ; i++)
{
assert(c[i] == a[i] + b[i]);
}
// Free memory on device
cudaFree(a);
cudaFree(b);
cudaFree(c);
std::cout << "COMPLETED SUCCESSFULLY\n";
return 0;
}
|
9,179 | /**
* Most simple example. Launch a kernel which does nothing on the device.
* To compile: nvcc -o 01-mostbasic 01-mostbasic.cu
*/
/**
* The __global__ keyword indicates that the function (kernel)
* runs on the device and is callable from the host.
*/
__global__ void donothing()
{}
int main()
{
// Call the kernel on the CUDA device using 1 block of 1 thread.
donothing<<< 1, 1 >>>();
return 0;
}
|
9,180 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
typedef struct node {
int data;
struct node *parent;
struct node *left;
struct node *right;
int sema;
int height;
} node;
__device__ node* global_Root;
__device__ int lock(node* n) {
return !atomicExch(&n->sema, 1);
}
__device__ void unlock(node* n) {
atomicExch(&n->sema, 0);
}
__device__ node* new_node(int val, node* parent) {
node *tmp = (node *) malloc(sizeof(node));
tmp->data = val;
tmp->parent = parent;
tmp->left = tmp->right = NULL;
tmp->height = 1;
return tmp;
}
__device__ int height(node *root)
{
if (root == NULL)
return 0;
return root->height;
}
__device__ node* left_rotate(node* root,node* parent)
{
node* temp1 = root->right;
node* temp2 = temp1->left;
temp1->left = root;
root->parent = temp1;
root->right = temp2;
if(temp2)
temp2->parent = root;
root->height = max(height(root->left), height(root->right))+1;
temp1->height = max(height(temp1->left), height(temp1->right))+1;
temp1->parent = parent;
return temp1;
}
__device__ node* right_rotate(node* root, node* parent)
{
node* temp1 = root->left;
node* temp2 = temp1->right;
temp1->right = root;
root->parent = temp1;
root->left = temp2;
if(temp2)
temp2->parent = root;
root->height = max(height(root->left), height(root->right))+1;
temp1->height = max(height(temp1->left), height(temp1->right))+1;
temp1->parent = parent;
return temp1;
}
__device__ int get_balance(node *root)
{
if (root == NULL)
return 0;
return height(root->left) - height(root->right);
}
__device__ int MASTER_LOCK = 0;
__device__ void coarse_rebalance(node* p, int key) {
//printf("rebalance : %d\n", p->data);
if (p->parent) {
p->height = max(height(p->left), height(p->right)) + 1;
int balance = get_balance(p);
bool rebalancing_occured = false;
if (balance > 1 && key < p->left->data) {
node* parent=p->parent;
if (p->data < p->parent->data) {
parent->left = right_rotate(p, p->parent);
} else {
parent->right = right_rotate(p, p->parent);
}
rebalancing_occured = true;
}
// Right Right Case
else if (balance < -1 && key > p->right->data) {
node* parent=p->parent;
if (p->data < p->parent->data) {
parent->left = left_rotate(p, p->parent);
} else {
parent->right = left_rotate(p, p->parent);
}
rebalancing_occured = true;
}
// Left Right Case
else if (balance > 1 && key > p->left->data)
{
p->left = left_rotate(p->left, p);
node* parent=p->parent;
if (p->data < p->parent->data) {
parent->left = right_rotate(p, p->parent);
} else {
parent->right = right_rotate(p, p->parent);
}
rebalancing_occured = true;
}
// Right Left Case
else if (balance < -1 && key < p->right->data)
{
p->right = right_rotate(p->right, p);
node* parent=p->parent;
if (p->data < p->parent->data) {
parent->left = left_rotate(p, p->parent);
} else {
parent->right = left_rotate(p, p->parent);
}
rebalancing_occured = true;
}
if (!rebalancing_occured)
coarse_rebalance(p->parent, key);
} else {
p->height = max(height(p->left), height(p->right)) + 1;
int balance = get_balance(p);
//printf("jag %d %d",balance,p->data);
if (balance > 1 && key < p->left->data) {
global_Root = right_rotate(p, NULL);
}
// Right Right Case
else if (balance < -1 && key > p->right->data) {
global_Root = left_rotate(p, NULL);
}
// Left Right Case
else if (balance > 1 && key > p->left->data)
{
p->left = left_rotate(p->left, p);
global_Root = right_rotate(p, NULL);
}
// Right Left Case
else if (balance < -1 && key < p->right->data)
{
p->right = right_rotate(p->right, p);
global_Root = left_rotate(p, NULL);
}
}
return;
}
__device__ void coarse_insert(int key) {
bool flag = true;
while (flag) {
if (!atomicExch(&MASTER_LOCK, 1)) {
node* curr = global_Root;
node* parent = NULL;
while (curr != NULL) {
parent = curr;
if (key < curr->data)
curr = curr->left;
else
curr = curr->right;
if (curr == NULL) {
if (key < parent->data) {
parent->left = new_node(key, parent);
coarse_rebalance(parent->left, key);
} else {
parent->right = new_node(key, parent);
coarse_rebalance(parent->right, key);
}
} else {
if (parent)
atomicExch(&(parent->sema), 0);
}
}
flag = false;
atomicExch(&MASTER_LOCK, 0);
}
}
return;
}
__device__ void coarse_delete(node* root, int key) {
return;
}
__device__ node* find(node* root, int key) {
if (root == NULL) return NULL;
if (root->data == key) return root;
else if (root->data > key) return find(root->left, key);
else return find(root->right, key);
}
__device__ void pre_order(node* root)
{
if(root != NULL)
{
printf("%d ", root->data);
pre_order(root->left);
pre_order(root->right);
}
return;
}
__device__ void in_order(node* root)
{
if(root != NULL)
{
in_order(root->left);
printf("%d ", root->data);
in_order(root->right);
}
return;
}
|
9,181 | #include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
using namespace std;
#define eps 1e-5
//在原来的情况下,读可以合并,因为可以索引到连续的内存空间(例如行优先),但是写的时候只能跳跃访存写了。
//现在利用shared把一个tile的读进shared进行转置写,就可以高效了
__global__ void mat_transpose(const float *a, float *b, int n, int m){
const int TIlE_WIDTH = 8;
__shared__ float temp[TIlE_WIDTH][TIlE_WIDTH];
int bx = blockIdx.x, by = blockIdx.y;
int tx = threadIdx.x, ty = threadIdx.y;
int i = TIlE_WIDTH * bx + tx;
int j = TIlE_WIDTH * by + ty;
int idxa = j * n + i;
int idxb = i * n + j;
temp[ty][tx] = a[idxa];
__syncthreads();
b[idxb] = temp[ty][tx];
// if(i < n and j < m){
// b[idxb] = a[idxa];
// }
}
void check_mat_transpose(const float *a, const float *b, int n, int m){
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
int idxa = i * m + j;
int idxb = j * m + i;
//cout<<i<<' '<<j<<' '<<a[idxa]<<' '<<b[idxb]<<endl;
if(fabs(a[idxa] - b[idxb]) > eps){
printf("Not equal !!\n");
exit(1);
}
}
}
printf("Check matmul success!!!\n");
}
void run_matmul(){
int n = 1<<7;
int m = 1<<7;
int total = n * m;
size_t size = (total) * sizeof(float);
float *ha = (float*)malloc(size);
float *hb = (float*)malloc(size);
float *hc = (float*)malloc(size);
float *da = NULL, *db = NULL, *dc = NULL;
cudaMalloc((void**)&da, size);
cudaMalloc((void**)&db, size);
cudaMalloc((void**)&dc, size);
for(int i = 0; i < total; i++){
ha[i] = rand() * 1.0 / (RAND_MAX);
//hb[i] = rand() * 1.0 / (RAND_MAX);
//cout<<ha[i]<<' '<<hb[i]<<endl;
}
// for(int i = 0; i < total; i++){
// cout<<ha[i]<< ' ';
// }
// cout<<endl;
// for(int i = 0; i < total; i++){
// cout<<hb[i]<< ' ';
// }
// cout<<endl;
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
//cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice);
//int threadPerBlock = 512;
//int blockPerGrid = (total + threadPerBlock - 1) / threadPerBlock;
//clock_t st = clock();
dim3 threadPerBlock(8,8);
dim3 blockPerGrid((n+threadPerBlock.x-1)/threadPerBlock.x, (m+threadPerBlock.y-1)/threadPerBlock.y);
mat_transpose<<<blockPerGrid, threadPerBlock>>>(da, db, n, m);
//matmul<<<blockPerGrid, threadPerBlock>>>(da, db, dc, n, m);
// dim3 threadPerBlock(512);
// dim3 blockPerGrid((threadPerBlock.x + total - 1) / threadPerBlock.x);
// matadd_1d<<<blockPerGrid, threadPerBlock>>>(da, db, dc, n, m);
// dim3 threadPerBlock(512, 1);
// dim3 blockPerGrid((n+threadPerBlock.x-1)/threadPerBlock.x, (m+threadPerBlock.y-1)/threadPerBlock.y);
// matadd_2d<<<blockPerGrid, threadPerBlock>>>(da, db, dc, n, m);
//clock_t ed = clock();
//cout<<"time used: "<<ed-st<<endl;
cudaDeviceSynchronize();
cudaMemcpy(hb, db, size, cudaMemcpyDeviceToHost);
//check_matadd(ha, hb, hc, n, m);
check_mat_transpose(ha, hb, n, m);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(ha);
free(hb);
free(hc);
}
int main(){
//run_matmul_partition();
run_matmul();
return 0;
} |
9,182 | #include <iostream>
#define N (1024 * 1024)
#define FULL_DATA_SIZE (N * 10)
using namespace std;
void memcpy()
{
int *dev;
int *host = new int[FULL_DATA_SIZE];
cudaMalloc((void**)&dev, FULL_DATA_SIZE * sizeof(int));
for (int i = 0; i < FULL_DATA_SIZE; i++)
host[i] = rand();
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(dev, host, FULL_DATA_SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "host->device elapsed time: " << elapsedTime << " ms" << endl;
cudaEventRecord(start, 0);
cudaMemcpy(host, dev, FULL_DATA_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "device->host elapsed time: " << elapsedTime << " ms" << endl;
}
void memcpy_PageLocked()
{
int *dev;
int *host;
cudaMalloc((void**)&dev, FULL_DATA_SIZE * sizeof(int));
cudaHostAlloc((void**)&host, FULL_DATA_SIZE * sizeof(int), cudaHostAllocDefault);
for (int i = 0; i < FULL_DATA_SIZE; i++)
host[i] = rand();
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMemcpy(dev, host, FULL_DATA_SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "page-locked host->device elapsed time: " << elapsedTime << " ms" << endl;
cudaEventRecord(start, 0);
cudaMemcpy(host, dev, FULL_DATA_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "page-locked device->host elapsed time: " << elapsedTime << " ms" << endl;
}
int main()
{
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop, whichDevice);
if (!prop.deviceOverlap) {
cout << "Device does not support overlapping" << endl;
return 1;
}
memcpy();
cout << endl;
memcpy_PageLocked();
return 0;
}
|
9,183 | #include <stdio.h>
__global__ void device_hello(){
//uncomment this line to print only one time (unless you have multiple blocks)
//if(threadIdx.x==0)
int block = (blockIdx.x + blockIdx.y * gridDim.x) * (blockDim.x * blockDim.y * blockDim.z) ;
int thread = (threadIdx.z * (blockDim.x * blockDim.y) ) + (threadIdx.y * blockDim.x) + threadIdx.x;
int global_id = block + thread;
printf("Hello world! from the device! Global_Thread_Id:%d\n",global_id);
return;
}
int main(void){
// rather than calling fflush
setbuf(stdout, NULL);
// greet from the host
printf("Hello world! from the host!\n");
// launch a kernel with a block of threads to greet from the device
dim3 blockSize(2,2,2);
dim3 gridSize(2,2,1);
// run several variations by playing with the block and grid sizes
// above -- if you change the y or z dimensions change the print
// statement to reflect that.
device_hello<<<gridSize,blockSize>>>();
// comment this line out and see what happens
cudaDeviceSynchronize();
printf("Goodbye world! from the host!\n");
return 0;
}
|
9,184 | /* File: vec_add.cu
* Purpose: Implement vector addition on a gpu using cuda
*
* Compile: nvcc [-g] [-G] -o vec_add vec_add.cu
* Run: ./vec_add
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
__global__ void Vec_add(float x[], float y[], float z[], int n) {
int thread_id = threadIdx.x;
if (thread_id < n){
z[thread_id] = x[thread_id] + y[thread_id];
}
}
int main(int argc, char* argv[]) {
int n, m;
float *h_x, *h_y, *h_z;
float *d_x, *d_y, *d_z;
size_t size;
/* Define vector length */
n = 1000;
m = 20;
size = n*sizeof(float);
// Allocate memory for the vectors on host memory.
h_x = (float*) malloc(size);
h_y = (float*) malloc(size);
h_z = (float*) malloc(size);
for (int i = 0; i < n; i++) {
h_x[i] = i+1;
h_y[i] = n-i;
}
// Print original vectors.
printf("h_x = ");
for (int i = 0; i < m; i++){
printf("%.1f ", h_x[i]);
}
printf("\n\n");
printf("h_y = ");
for (int i = 0; i < m; i++){
printf("%.1f ", h_y[i]);
}
printf("\n\n");
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
/* Kernel Call */
Vec_add<<<1,1000>>>(d_x, d_y, d_z, n);
cudaThreadSynchronize();
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
printf("The sum is: \n");
for (int i = 0; i < m; i++){
printf("%.1f ", h_z[i]);
}
printf("\n");
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
return 0;
} /* main */
|
9,185 | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#define LENGTH 10000000
using namespace std;
__global__ void vector_add(float *a, float *b, float *c){
int index = threadIdx.x + blockDim.x * blockIdx.x;
if (index<LENGTH){
c[index] = a[index] + b[index];
}
}
void myCpu(){
unsigned int microseconds = 10000000;
usleep(microseconds);
}
int main(){
float *a_vec, *b_vec, *c_vec;
a_vec = (float*)malloc(LENGTH*sizeof(float));
b_vec = (float*)malloc(LENGTH*sizeof(float));
c_vec = (float*)malloc(LENGTH*sizeof(float)); //cpu device -> host
for(int i=0 ; i< LENGTH; i++){
a_vec[i] = i;
b_vec[i] = i;
}
float *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, LENGTH*sizeof(float));
cudaMalloc((void**)&d_b, LENGTH*sizeof(float));
cudaMalloc((void**)&d_c, LENGTH*sizeof(float)); // host -> device
cudaMemcpy(d_a, a_vec, LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b_vec, LENGTH*sizeof(float), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
vector_add<<<(int)ceil((float)LENGTH/1024),1024>>>(d_a, d_b, d_c); //what happens if no of threads becomes decimal
myCpu();
// cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaMemcpy(c_vec, d_c, LENGTH*sizeof(float), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "Time taken : " << milliseconds << std::endl;
std::cout<<"First 3 elements are "<<c_vec[0]<<" "<<c_vec[1]<<" "<<c_vec[2]<<'\n';
free(a_vec);
free(b_vec);
free(c_vec);
// for(int i=0; i<LENGTH ;i++){
// cout << c_vec[i] << endl;
// }
}
|
9,186 | #include <stdio.h>
#include <cuda.h>
__global__ void square_cuda(float *a, int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx<N) a[idx] = a[idx] * a[idx];
}
extern "C" {
void square(float *a, int N) {
float* a_d;
size_t size = N * sizeof(float);
cudaMalloc((void **) &a_d, size);
cudaMemcpy(a_d, a, size, cudaMemcpyHostToDevice);
int block_size = 4;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
square_cuda <<< n_blocks, block_size >>> (a_d, N);
cudaMemcpy(a, a_d, size, cudaMemcpyDeviceToHost);
cudaFree(a_d);
}
}
|
9,187 | #include <cuda_runtime_api.h>
#include <stdint.h>
__global__ void activate_rect_fwd_kernel(
const float *in_act,
uint32_t dim,
float *out_act)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
out_act[idx] = x * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_fwd(
const float *in_act,
size_t dim,
float *out_act,
cudaStream_t stream)
{
activate_rect_fwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_act);
}
__global__ void activate_rect_bwd_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta,
float *in_delta)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
in_delta[idx] = out_delta[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_bwd(
const float *in_act,
size_t dim,
const float *out_delta,
float *in_delta,
cudaStream_t stream)
{
activate_rect_bwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_delta, in_delta);
}
__global__ void activate_rect_bwd2_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta2,
float *in_delta2)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
in_delta2[idx] = out_delta2[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_bwd2(
const float *in_act,
size_t dim,
const float *out_delta2,
float *in_delta2,
cudaStream_t stream)
{
activate_rect_bwd2_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_delta2, in_delta2);
}
__global__ void activate_rect_rfwd_kernel(
const float *in_val,
uint32_t dim,
const float *in_r_val,
float *out_r_val)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_val[idx];
out_r_val[idx] = in_r_val[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_rfwd(
const float *in_val,
size_t dim,
const float *in_r_val,
float *out_r_val,
cudaStream_t stream)
{
activate_rect_rfwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_val, dim, in_r_val, out_r_val);
}
__global__ void activate_rect_rbwd_kernel(
const float *in_val,
uint32_t dim,
const float *out_r_grad,
float *in_r_grad)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_val[idx];
in_r_grad[idx] = out_r_grad[idx] * (x > 0.0f);
}
}
extern "C" void neuralops_cuda_activate_rect_rbwd(
const float *in_val,
size_t dim,
const float *out_r_grad,
float *in_r_grad,
cudaStream_t stream)
{
activate_rect_rbwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_val, dim, out_r_grad, in_r_grad);
}
__global__ void activate_leakrect_fwd_kernel(
const float *in_act,
uint32_t dim,
float *out_act,
float neg_slope)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
int mask = x > 0.0f;
out_act[idx] = x * (neg_slope * (1 - mask) + mask);
}
}
extern "C" void neuralops_cuda_activate_leakrect_fwd(
const float *in_act,
size_t dim,
float *out_act,
float neg_slope,
cudaStream_t stream)
{
activate_leakrect_fwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_act, neg_slope);
}
__global__ void activate_leakrect_bwd_kernel(
const float *in_act,
uint32_t dim,
const float *out_delta,
float *in_delta,
float neg_slope)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_act[idx];
int mask = x > 0.0f;
float dy = out_delta[idx];
in_delta[idx] = dy * (neg_slope * (1 - mask) + mask);
}
}
extern "C" void neuralops_cuda_activate_leakrect_bwd(
const float *in_act,
size_t dim,
const float *out_delta,
float *in_delta,
float neg_slope,
cudaStream_t stream)
{
activate_leakrect_bwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_act, dim, out_delta, in_delta, neg_slope);
}
__global__ void activate_logistic_fwd_kernel(
const float *in_buf,
uint32_t dim,
float *out_buf)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_buf[idx];
out_buf[idx] = 1.0f / (1.0f + expf(-x));
}
}
extern "C" void neuralops_cuda_activate_logistic_fwd(
const float *in_buf,
size_t dim,
float *out_buf,
cudaStream_t stream)
{
activate_logistic_fwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_buf, dim, out_buf);
}
__global__ void activate_logistic_bwd_kernel(
const float *in_buf,
uint32_t dim,
const float *out_delta,
float *in_delta)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float x = in_buf[idx];
float y = 1.0f / (1.0f + expf(-x));
in_delta[idx] = y * (1.0f - y) * out_delta[idx];
}
}
extern "C" void neuralops_cuda_activate_logistic_bwd(
const float *in_buf,
size_t dim,
const float *out_delta,
float *in_delta,
cudaStream_t stream)
{
activate_logistic_bwd_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
in_buf, dim, out_delta, in_delta);
}
|
9,188 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define MASK_WIDTH 5
#define COMMENT "Histogram_GPU"
#define RGB_COMPONENT_COLOR 255
typedef struct {
unsigned char red, green, blue;
} PPMPixel;
typedef struct {
int x, y;
PPMPixel *data;
} PPMImage;
double rtclock()
{
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, &Tzp);
if (stat != 0) printf("Error return from gettimeofday: %d",stat);
return(Tp.tv_sec + Tp.tv_usec*1.0e-6);
}
static PPMImage *readPPM(const char *filename) {
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
fp = fopen(filename, "rb");
if (!fp) {
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
if (!fgets(buff, sizeof(buff), fp)) {
perror(filename);
exit(1);
}
if (buff[0] != 'P' || buff[1] != '6') {
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
img = (PPMImage *) malloc(sizeof(PPMImage));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
c = getc(fp);
while (c == '#') {
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) {
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
if (fscanf(fp, "%d", &rgb_comp_color) != 1) {
fprintf(stderr, "Invalid rgb component (error loading '%s')\n",
filename);
exit(1);
}
if (rgb_comp_color != RGB_COMPONENT_COLOR) {
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
img->data = (PPMPixel*) malloc(img->x * img->y * sizeof(PPMPixel));
if (!img) {
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
if (fread(img->data, 3 * img->x, img->y, fp) != img->y) {
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePPM(PPMImage *img) {
fprintf(stdout, "P6\n");
fprintf(stdout, "# %s\n", COMMENT);
fprintf(stdout, "%d %d\n", img->x, img->y);
fprintf(stdout, "%d\n", RGB_COMPONENT_COLOR);
fwrite(img->data, 3 * img->x, img->y, stdout);
fclose(stdout);
}
void Smoothing_CPU_Serial(PPMImage *image, PPMImage *image_copy) {
int i, j, y, x;
int total_red, total_blue, total_green;
for (i = 0; i < image->y; i++) {
for (j = 0; j < image->x; j++) {
total_red = total_blue = total_green = 0;
for (y = i - ((MASK_WIDTH-1)/2); y <= (i + ((MASK_WIDTH-1)/2)); y++) {
for (x = j - ((MASK_WIDTH-1)/2); x <= (j + ((MASK_WIDTH-1)/2)); x++) {
if (x >= 0 && y >= 0 && y < image->y && x < image->x) {
total_red += image_copy->data[(y * image->x) + x].red;
total_blue += image_copy->data[(y * image->x) + x].blue;
total_green += image_copy->data[(y * image->x) + x].green;
}
}
}
image->data[(i * image->x) + j].red = total_red / (MASK_WIDTH*MASK_WIDTH);
image->data[(i * image->x) + j].blue = total_blue / (MASK_WIDTH*MASK_WIDTH);
image->data[(i * image->x) + j].green = total_green / (MASK_WIDTH*MASK_WIDTH);
}
}
}
int main(int argc, char *argv[]) {
if( argc != 2 ) {
printf("Too many or no one arguments supplied.\n");
}
double t_start, t_end;
char *filename = argv[1];
float milliseconds = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
PPMImage *image = readPPM(filename);
PPMImage *image_output = readPPM(filename);
//t_start = rtclock();
cudaEventRecord(start);
Smoothing_CPU_Serial(image_output, image);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
//t_end = rtclock();
//writePPM(image_output);
printf("Tempo: %0.3f\n",milliseconds);
// fprintf(stdout, "\n%0.6lfs\n", t_end - t_start);
free(image);
free(image_output);
return 0;
}
|
9,189 | #include <stdio.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdlib.h>
#define N (2048*2048)
#define THREADS_PER_BLOCK 512
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
void print_few(int* array){
for(int i=0; i<5; ++i){
printf("%d ", array[i]);
}
printf("\n");
}
void random_ints(int *a, int n){
int i;
for (i = 0; i < n; ++i)
a[i] = rand() %10;
}
int main(void) {
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Alloc space for host copies of a, b, c and setup input values
a = (int *)malloc(size); random_ints(a, N);
b = (int *)malloc(size); random_ints(b, N);
c = (int *)malloc(size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
print_few(a);
print_few(b);
print_few(c);
// Cleanup
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
} |
9,190 | #ifndef _GNU_SOURCE
#define _GNU_SOURCE 98
#endif
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
/**
* Sleep for a given number of milliseconds
* \param ms The number of milliseconds to sleep for
*/
void sleep_ms(size_t ms) {
struct timespec ts;
size_t rem = ms % 1000;
ts.tv_sec = (ms - rem)/1000;
ts.tv_nsec = rem * 1000000;
// Sleep repeatedly as long as nanosleep is interrupted
while(nanosleep(&ts, &ts) != 0) {}
}
/**
* Get the time in milliseconds since UNIX epoch
*/
size_t time_ms() {
struct timeval tv;
if(gettimeofday(&tv, NULL) == -1) {
perror("gettimeofday");
exit(2);
}
// Convert timeval values to milliseconds
return tv.tv_sec*1000 + tv.tv_usec/1000;
}
|
9,191 | #include <stdio.h>
const int N = 4096; // Matrix size
const float A_val = 1.0f; // Values of all elements of A
const float B_val = 2.0f; // Values of all elements of B
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
cudaError_t cuErr = call; \
if(cudaSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr)); \
exit(0); \
} \
}while(0)
/* ----------------------------------------------------------------------------
Matrix multiply kernel
- Each CUDA thread computes 1 element of the matrix via dot product
i.e., C[row,col] = SUM(from i=0,N){A[row,i]*B[i,col]}
- 1D indexing (row-major) through matrix gives
index = row * n + col
- Based on 1D indexing, we have
single_row_index = row * n + i
single_col_index = i * n + col
---------------------------------------------------------------------------- */
__global__ void mat_mul(const float *A, const float *B, float *C, int n) {
/* -----------------------------------------------
These span all cols and rows of the matrices based
on the values of the configuration parameters
----------------------------------------------- */
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if((col < n) && (row < n)){
float element = 0;
for(int i=0; i<n; i++){
element += A[row * n + i] * B[i * n + col]; // Dot product of row,column
}
C[row * n + col] = element;
}
}
int main(int argc, char *argv[]){
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
// Allocate memory for arrays h_A, h_B, h_C on host
h_A = new float[N*N];
h_B = new float[N*N];
h_C = new float[N*N];
// Initialize host arrays
for (int i = 0; i < N*N; i++){
h_A[i] = A_val;
h_B[i] = B_val;
h_C[i] = 0;
}
// Allocate memory for arrays d_A, d_B, d_C on device
cudaErrorCheck( cudaMalloc(&d_A, N*N*sizeof(float)) );
cudaErrorCheck( cudaMalloc(&d_B, N*N*sizeof(float)) );
cudaErrorCheck( cudaMalloc(&d_C, N*N*sizeof(float)) );
// Copy values from host arrays into device arrays
cudaErrorCheck( cudaMemcpy(d_A, h_A, N*N*sizeof(float), cudaMemcpyHostToDevice) );
cudaErrorCheck( cudaMemcpy(d_B, h_B, N*N*sizeof(float), cudaMemcpyHostToDevice) );
/* -------------------------------------------------------------
Set execution configuration parameters
threads_per_block: number of CUDA threads per grid block
blocks_in_grid : number of blocks in grid
(These are structs with 3 member variables x, y, z)
------------------------------------------------------------ */
dim3 threads_per_block(16,16,1);
dim3 blocks_in_grid(ceil(float(N)/threads_per_block.x), ceil(float(N)/threads_per_block.y), 1);
// Launch kernel
mat_mul<<<blocks_in_grid, threads_per_block>>>(d_A, d_B, d_C, N);
// Check for errors in the kernel launch (e.g., invalid configuration parameters)
cudaError_t cuErrSync = cudaGetLastError();
// Check for errors on the device after control is returned to host
cudaError_t cuErrAsync = cudaDeviceSynchronize();
if(cuErrSync != cudaSuccess){
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrSync)); exit(0);
}
if(cuErrAsync != cudaSuccess){
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErrAsync)); exit(0);
}
// Copy results back to host
cudaErrorCheck( cudaMemcpy(h_C, d_C, N*N*sizeof(float), cudaMemcpyDeviceToHost) );
// Verify results
for(int i = 0; i < N*N; i++){
if(h_C[i] != A_val*B_val*N){
printf("mismatch at index %d, was: %f, should be: %f\n", i, h_C[i], A_val*B_val*N);
return -1;
}
}
cudaErrorCheck( cudaFree(d_A) );
cudaErrorCheck( cudaFree(d_B) );
cudaErrorCheck( cudaFree(d_C) );
delete[] h_A;
delete[] h_B;
delete[] h_C;
printf("Success!\n");
return 0;
}
|
9,192 | #include<stdio.h>
#include<time.h>
#define N 2
#define M 3
#define P 2
#define BLOCK_SIZE 16
__global__ void mm_kernel(float *d_a, float *d_b, float *d_c, int n, int m, int p){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if( col < p && row < n)
{
for(int i = 0; i < m; i++)
{
sum += d_a[row * m + i] * d_b[i * p + col];
}
d_c[row * p + col] = sum;
}
}
int main(){
float *a, *b, *c;
float *d_a, *d_b, *d_c;
int n = N*M;
int t = M*P;
int u = N*P;
a = (float *)malloc(n);
b = (float *)malloc(t);
c = (float *)malloc(u);
cudaMalloc((void **)&d_a,sizeof(float)*n);
cudaMalloc((void **)&d_b,sizeof(float)*t);
cudaMalloc((void **)&d_c,sizeof(float)*u);
srand(time(NULL));
for(int i =0;i<N;i++){
for(int j =0;j<M;j++){
a[i*M +j] = rand()%10;
printf("%f ", a[i*M+j]);
}
printf("\n");
}
printf("\n\n");
srand(time(NULL));
for(int i =0;i<M;i++){
for(int j =0;j<P;j++){
b[i*P +j] = rand()%9;
printf("%f ", b[i*P+j]);
}
printf("\n");
}
printf("\n\n");
cudaMemcpy(d_a, a, sizeof(float)*n, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float)*t, cudaMemcpyHostToDevice);
mm_kernel<<<dim3(BLOCK_SIZE,BLOCK_SIZE,1), dim3(N,P,1)>>>(d_a, d_b, d_c, N,M,P);
cudaMemcpy(c, d_c, sizeof(float)*u, cudaMemcpyDeviceToHost);
for(int i =0;i<N;i++){
for(int j =0;j<P;j++){
printf("%f ", c[i*P+j]);
}
printf("\n");
}
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
}
|
9,193 | #include "includes.h"
__global__ void Multiply(float* im, float val, int size)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size)
im[id] *= val;
} |
9,194 | #include <cuda.h>
#include <math_constants.h>
#define ARRAY_SIZE(array) (sizeof(array) / sizeof((array)[0]))
#define NODES_PER_LAYER 4
struct Parameters {
float ih[NODES_PER_LAYER][19]; // input->hidden edge weight
float c[NODES_PER_LAYER][19]; // RBF center
float w[NODES_PER_LAYER]; // RBF width
float w2[NODES_PER_LAYER]; // RBF width second layer
float ho[NODES_PER_LAYER]; // hidden->output edge weight
float hh[NODES_PER_LAYER][NODES_PER_LAYER];// hidden->hidden edge weight
float c2[NODES_PER_LAYER][NODES_PER_LAYER];// RBF center second layer
};
__global__ void evaluate(
const float *trainSet,
unsigned int trainSize,
const Parameters *params,
unsigned int popSize,
float *outputs
) {
const int trainIndex = blockIdx.x * blockDim.x + threadIdx.x;
const int paramsIndex = blockIdx.y;
const int outputIndex = paramsIndex * trainSize + trainIndex;
if (trainIndex < trainSize && paramsIndex < popSize) {
// Read input features.
float inputs[19];
float l2inputs[NODES_PER_LAYER] = {0.f};
for (int i = 0; i < ARRAY_SIZE(inputs); i++) {
inputs[i] = trainSet[i * trainSize + trainIndex];
}
// TODO: Read parameters into shared memory.
const Parameters * const p = ¶ms[paramsIndex];
// Calculate network output.
float output = 0.f;
// TODO: Make sure this gets unrolled
for (int j = 0; j < NODES_PER_LAYER; j++) {
float d2 = 0.f; // input to hidden node j
for (int i = 0; i < 19; i++) {
const float d = inputs[i] * p->ih[j][i] - p->c[j][i];
d2 += d * d;
}
const float h = __expf(-p->w[j] * d2); // Gaussian RBF
for (int i = 0; i < NODES_PER_LAYER; i++) {
const float d = h * p->hh[j][i] - p->c2[j][i];
l2inputs[i] += d * d;
}
}
for (int j = 0; j < NODES_PER_LAYER; j++) {
const float h = __expf(-p->w[j] * l2inputs[j]); // Gaussian RBF
output += h * p->ho[j];
}
outputs[outputIndex] = output;
}
}
#define heapparent(i) (((i) - 1) / 2)
#define heapleft(i) (2 * ((i) + 1) - 1)
#define heapright(i) (2 * ((i) + 1))
#define swap(a, b) {float temp = (a); (a) = (b); (b) = temp; }
__device__ void heapreplace(
float *heap,
unsigned int size,
float value
) {
if (value < heap[0]) {
return;
}
// Replace min (root) with new value.
heap[0] = value;
// Down-heap.
int i = 0;
while (heapleft(i) < size) { // stop before leaf level
const int left = heapleft(i);
const int right = heapright(i);
int smallest;
if (left < size && heap[left] < heap[i]) {
smallest = left;
} else {
smallest = i;
}
if (right < size && heap[right] < heap[smallest]) {
smallest = right;
}
if (smallest != i) {
swap(heap[smallest], heap[i]);
i = smallest;
} else {
break;
}
}
}
__global__ void nlargest(
const float *outputs,
unsigned int trainSize,
unsigned int popSize,
unsigned int n,
float *thresholds,
unsigned int *thresholdCounts
) {
const int paramsIndex = blockIdx.y;
if (paramsIndex < popSize) {
float maxValue = thresholds[paramsIndex];
unsigned int maxCount = thresholdCounts[paramsIndex];
extern __shared__ float heap[/* n */];
// First n values sink to the bottom of the heap.
for (int i = 0; i < n; i++) {
heap[i] = -CUDART_INF_F;
}
for (int trainIndex = 0; trainIndex < trainSize; trainIndex++) {
const int outputIndex = paramsIndex * trainSize + trainIndex;
const float output = outputs[outputIndex];
if (isnan(output)) {
continue;
}
if (output < maxValue) {
heapreplace(heap, n, output);
} else if (output == maxValue) {
if (maxCount == 0) {
heapreplace(heap, n, output);
} else {
maxCount--;
}
}
}
// If maxValue hasn't changed, carry over the number of occurrences from the
// previous pass.
if (maxValue == heap[0]) {
maxCount = thresholdCounts[paramsIndex];
} else {
maxValue = heap[0];
maxCount = 0;
}
// During the next pass, skip the occurrences of maxValue that were already
// accounted for in this pass.
for (int i = 0; i < n; i++) {
if (maxValue == heap[i]) {
maxCount++;
}
}
thresholds[paramsIndex] = maxValue;
thresholdCounts[paramsIndex] = maxCount;
}
}
__global__ void count(
const float *outputs,
unsigned int trainSize,
unsigned int trainPositives,
unsigned int popSize,
const float *thresholds,
unsigned int *counts
) {
const int paramsIndex = blockIdx.y;
if (paramsIndex < popSize) {
const float threshold = thresholds[paramsIndex];
unsigned int count = 0;
const int gridDimX = (trainSize + blockDim.x - 1) / blockDim.x;
for (int blockX = 0; blockX < gridDimX; blockX++) {
const int trainIndex = blockX * blockDim.x + threadIdx.x;
if (trainIndex < trainPositives) {
const float output = outputs[paramsIndex * trainSize + trainIndex];
if (output > threshold) {
count++;
}
}
}
counts[paramsIndex * blockDim.x + threadIdx.x] = count;
}
}
|
9,195 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define TILE_WIDTH 10
#define TILE_HEIGHT 10
__host__
__device__ int calc_mandel(float c_re, float c_im, int count)
{
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < count; ++i) {
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re*z_re - z_im*z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
return i;
}
__host__
__device__ int mandelbrot_calc(
float x0, float y0, float x1, float y1,
int width, int height,
int row, int col,
int maxIterations)
{
float dx = (x1 - x0) / width;
float dy = (y1 - y0) / height;
float x = x0 + col * dx;
float y = y0 + row * dy;
int val = calc_mandel(x, y, maxIterations);
return val;
}
__global__ void mandelbrot_kernel(float x0, float y0, float x1, float y1, int width, int height, int maxIterations, int* output) {
int col = blockIdx.x*blockDim.x + threadIdx.x;
int row = blockIdx.y*blockDim.y + threadIdx.y;
if (row < height && col < width) {
output[row*width + col] = mandelbrot_calc(x0, y0, x1, y1, width, height, row, col, maxIterations);
}
}
void mandelbrotGpu(
float x0, float y0, float x1, float y1,
int width, int height,
int maxIterations,
int output[]) {
int* d_output;
float millisec = 0.0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int nTilesX = (width / TILE_WIDTH) + ((width % TILE_WIDTH == 0) ? 0 : 1);
int nTilesY = (height / TILE_HEIGHT) + ((height % TILE_HEIGHT == 0) ? 0 : 1);
cudaMalloc(&d_output, width*height*sizeof(int));
//printf("\ndim=%d,%d %d,%d", nTilesX, nTilesY, TILE_WIDTH, TILE_HEIGHT);
dim3 threadsPerBlock(TILE_WIDTH, TILE_HEIGHT);
dim3 blocksPerGrid(nTilesX, nTilesY);
mandelbrot_kernel<<<blocksPerGrid, threadsPerBlock>>>(x0, y0, x1, y1, width, height, maxIterations, d_output);
cudaMemcpy(output, d_output, width*height*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_output);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&millisec, start, stop);
//printf("\ncuda time = %f\n", millisec);
//printf("\nVal=%d\n", mandelbrot_calc(x0, y0, x1, y1, width, height, 5, 517, maxIterations));
}
|
9,196 | #include <device_launch_parameters.h>
#include <cuda_runtime.h>
#include <cufft.h>
#include <vector>
#include <cmath>
#include <stdio.h>
#include <string>
#include <iostream>
/*The multicplication algorithm with cuFFT is from the following source:
Source: https://programmer.group/implementing-large-integer-multiplication-with-cufft.html
The multWithFFT-function is edited by Max & Johannes*/
const auto BATCH = 1;
__global__ void ComplexPointwiseMulAndScale(cufftComplex *a, cufftComplex *b, int size)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
float scale = 1.0f / (float)size;
cufftComplex c;
for (int i = threadID; i < size; i += numThreads)
{
c = cuCmulf(a[i], b[i]);
b[i] = make_cuFloatComplex(scale*cuCrealf(c), scale*cuCimagf(c));
}
}
__global__ void ConvertToInt(cufftReal *a, int size)
{
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
auto b = (int*)a;
for (int i = threadID; i < size; i += numThreads)
b[i] = static_cast<int>(round(a[i]));
}
std::vector<int> multiply(const std::vector<float> &a, const std::vector<float> &b)
{
const auto NX = a.size();
cufftHandle plan_a, plan_b, plan_c;
cufftComplex *data_a, *data_b;
std::vector<int> c(a.size() + 1);
c[0] = 0;
//Allocate graphics card memory and initialize, assuming sizeof(int)==sizeof(float), sizeof(cufftComplex)==2*sizeof(float)
cudaMalloc((void**)&data_a, sizeof(cufftComplex) * (NX / 2 + 1) * BATCH);
cudaMalloc((void**)&data_b, sizeof(cufftComplex) * (NX / 2 + 1) * BATCH);
cudaMemcpy(data_a, a.data(), sizeof(float) * a.size(), cudaMemcpyHostToDevice);
cudaMemcpy(data_b, b.data(), sizeof(float) * b.size(), cudaMemcpyHostToDevice);
if (cudaGetLastError() != cudaSuccess) { fprintf(stderr, "Cuda error: Failed to allocate\n"); return c; }
if (cufftPlan1d(&plan_a, NX, CUFFT_R2C, BATCH) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
if (cufftPlan1d(&plan_b, NX, CUFFT_R2C, BATCH) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
if (cufftPlan1d(&plan_c, NX, CUFFT_C2R, BATCH) != CUFFT_SUCCESS) { fprintf(stderr, "CUFFT error: Plan creation failed"); return c; }
//Converting A(x) to Frequency Domain
if (cufftExecR2C(plan_a, (cufftReal*)data_a, data_a) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecR2C Forward failed");
return c;
}
//Converting B(x) to Frequency Domain
if (cufftExecR2C(plan_b, (cufftReal*)data_b, data_b) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecR2C Forward failed");
return c;
}
//Point multiplication
ComplexPointwiseMulAndScale<<<NX / 256 + 1, 256>>>(data_a, data_b, NX);
//Converting C(x) back to time domain
if (cufftExecC2R(plan_c, data_b, (cufftReal*)data_b) != CUFFT_SUCCESS)
{
fprintf(stderr, "CUFFT error: ExecC2R Forward failed");
return c;
}
//Converting the results of floating-point numbers to integers
ConvertToInt<<<NX / 256 + 1, 256>>>((cufftReal*)data_b, NX);
if (cudaDeviceSynchronize() != cudaSuccess)
{
fprintf(stderr, "Cuda error: Failed to synchronize\n");
return c;
}
cudaMemcpy(&c[1], data_b, sizeof(float) * b.size(), cudaMemcpyDeviceToHost);
cufftDestroy(plan_a);
cufftDestroy(plan_b);
cufftDestroy(plan_c);
cudaFree(data_a);
cudaFree(data_b);
return c;
}
void print(std::vector<float> const &input)
{
for (int i = 0; i < input.size(); i++) {
std::cout << input.at(i) << ' ';
}
}
extern "C" void multWithFFT(char* a, char *b, char **c)
{
//Set base
const int base = 10;
printf("a = %s\n\n",a);
printf("b = %s\n\n",b);
int lengthA = strlen(a);
int lengthB = strlen(b);
//length of multiplication result has the size of the sum of the two factors
int result_length = lengthA + lengthB;
//factors are stored in these vectors
std::vector<float> av{};
std::vector<float> bv{};
//fill vectors step by step
for(int i=0; i<lengthA; ++i){
av.push_back((float)(a[i])-'0');
}
for(int i=0; i<lengthB; ++i){
bv.push_back((float)(b[i])-'0');
}
//vectors need to be same size
while (av.size() != result_length){
av.insert(av.begin(),(float) 0);
}
while (bv.size() != result_length){
bv.insert(bv.begin(),(float) 0);
}
//call cuda-kernel-function
std::vector<int> cv = multiply(av, bv);
//Processing carry
for (int i = cv.size() - 1; i > 0; i--)
{
if (cv[i] >= base)
{
cv[i - 1] += cv[i] / base;
cv[i] %= base;
}
}
//Remove excess zeros
cv.pop_back();
auto i = 0;
//For some multiplications the result has a zero as a first digit (for example 999*1 = 999 will be 0999)
if (cv[0] == 0)
i++;
//If i++ will be executed the array still has to begin at element tmp[0]
int k = 0;
char tmp[cv.size()];
//convert integer vector to string
for (; i < cv.size(); i++){
tmp[k] = (char) cv.at(i) + '0';
k++;
}
tmp[k] = '\0';
//transfer result to cuda_mult.c
memcpy(*c,tmp,sizeof(tmp));
return;
}
|
9,197 | typedef struct {
int width;
int height;
int stride;
int* elements;
} matrix;
typedef struct {
int width;
int height;
int stride;
float* elements;
} matrixf;
__device__ matrix GetSubmatrix(matrix A, int row, int col, int block_size)
{
matrix Asub;
Asub.width = block_size;
Asub.height = block_size;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * block_size * row
+ block_size * col];
return Asub;
}
__device__ matrixf GetSubmatrixf(matrixf A, int row, int col, int block_size)
{
matrixf Asub;
Asub.width = block_size;
Asub.height = block_size;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * block_size * row
+ block_size * col];
return Asub;
}
__global__ void create_accum(matrix *accum, matrix *r_table, matrixf *gradient_image)
{
int idx = threadIdx.x + blockDim.x * blockIdx.x;;//height of image
int idy = threadIdx.y + blockDim.y * blockIdx.y;//width of image
int idz = threadIdx.z + blockDim.z * blockIdx.z;;//width of r_table
//float phi =0;
//if(idx<gradient_image->height && idy<gradient_image->width){
float phi = gradient_image->elements[idx * gradient_image->width + idy];
//}
int slice =0;
float pi = 3.14159265359;
if(phi > 0.001||phi< -0.001){
slice = __float2int_rd(8*(phi+pi)/(2*pi));//rotate here?
if(r_table->elements[(slice*r_table->width + idz)*2] != 0 && r_table->elements[(slice*r_table->width + idz)*2+1] != 0){
int ix = idx+r_table->elements[(slice*r_table->width + idz)*2];
int iy = idy+r_table->elements[(slice*r_table->width + idz)*2 + 1];
if ( ix >= 0 && ix < accum->width && iy >= 0 && iy < accum->height){
atomicAdd(&accum->elements[(ix*accum->width + iy)],1);
__syncthreads();
}
}
}
} |
9,198 | __global__ void kernelForAPSP(int *V, int *E, int *W, int *n, bool *visit, int *dist, int *predist){
const int blockId = blockIdx.z *(gridDim.x * gridDim.y) + blockIdx.y * gridDim.x + blockIdx.x;
const int threadId = threadIdx.z*(blockDim.x * blockDim.y)+ threadIdx.y* blockDim.x+ threadIdx.x;
const int blockSize =blockDim.x * blockDim.y * blockDim.z;
const int gridSize = gridDim.x * gridDim.y * gridDim.z;
__shared__ int QuickExit;
int u=0, st=0, align=0, old=0;
st = blockId;
while(st < (*n))
{
align = (st * (*n));
for(int rnd=0;rnd<(*n);rnd++){
QuickExit = 0;
u = threadId;
while(u < (*n)){
if(visit[u + align]){
visit[u + align]=0;
for(int adj = V[u];adj<V[u+1];adj++){
old=atomicMin( &predist[align + E[adj]] , dist[align + u] + W[adj]);
}
}
u+=blockSize;
}
__syncthreads();
u=threadId;
while(u < (*n)){
if(predist[align + u] < dist[align + u]){
dist[align + u] = predist[align + u];
visit[align + u] = 1;
QuickExit = 1;
}
u+=blockSize;
}
__syncthreads();
if(QuickExit==0){
break;
}
}
__syncthreads();
st += gridSize;
}
} |
9,199 | /*
Compile and profile
nvcc checkTransfer.cu -o checkTransfer
If you have problems with gcc version try
sudo ln -s /usr/bin/gcc-4.9 /usr/local/cuda-7.5/bin/gcc
Profile console
nvprof ./checkTransfer
*/
int main()
{
//const unsigned int X=1; //1 Bytes (2us/1us)
//const unsigned int X=10; //10 Bytes (2us/1us)
//const unsigned int X=100; //100 Bytes (2us/1us)
//const unsigned int X=1000; //1k Bytes (2us/1us)
//const unsigned int X=10000; //10k Bytes (2.7us/2us)
//const unsigned int X=100000; //100k Bytes (10us/10us)
//const unsigned int X=1000000; //1 Megabyte (80us/79us)
//const unsigned int X=10000000; //10 Megabyte (1000us/900us)
//const unsigned int X=100000000; //100 Megabyte (10000us/10000us)
const unsigned int X=1000000000; //1000 Megabyte (106000us/103000us)
//const unsigned int X=256000000; //256 Megabyte (27000us/26000us)
//const unsigned int X=120*120*3; // 120x120 RGB image (43200 bytes) (7us/6us)
const unsigned int bytes = X*sizeof(char);
// Alocate memory on CPU
char *hostArray= (char*)malloc(bytes);
char *deviceArray;
// Allocate memory on GPU
cudaMalloc((char**)&deviceArray,bytes);
memset(hostArray,0,bytes);
// Transfer hostArray from CPU to GPU
cudaMemcpy(deviceArray,hostArray,bytes,cudaMemcpyHostToDevice);
// Get hostArray from GPU to CPU
cudaMemcpy(hostArray,deviceArray,bytes,cudaMemcpyDeviceToHost);
// Release memory from GPU
cudaFree(deviceArray);
}
|
9,200 | #include "includes.h"
__global__ void __dds0(int nrows, int ncols, double *A, double *B, int *Cir, int *Cjc, double *P) {} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.