serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
7,701 | struct ProgramTestClass
{
__device__ ProgramTestClass()
{
}
int A;
int B;
int C;
};
// CudaTest.Program
extern "C" __global__ void doTheThing( ProgramTestClass* tests, int testsLen0);
// CudaTest.Program
extern "C" __global__ void addArrays( int* a, int aLen0, int* b, int bLen0, int* c, int cLen0);
// CudaTest.Program
extern "C" __global__ void findPrimes( int* toCheck, int toCheckLen0, int* results, int resultsLen0);
// CudaTest.Program
__device__ int isPrime(int a);
// CudaTest.Program
extern "C" __global__ void doTheThing( ProgramTestClass* tests, int testsLen0)
{
int x = blockIdx.x;
if (x < 1000000)
{
ProgramTestClass testClass = tests[(x)];
testClass.C = testClass.A + testClass.B;
}
}
// CudaTest.Program
extern "C" __global__ void addArrays( int* a, int aLen0, int* b, int bLen0, int* c, int cLen0)
{
int x = blockIdx.x;
if (x < 1000000)
{
c[(x)] = a[(x)] + b[(x)];
}
}
// CudaTest.Program
extern "C" __global__ void findPrimes( int* toCheck, int toCheckLen0, int* results, int resultsLen0)
{
int x = blockIdx.x;
if (x < 1000000)
{
results[(x)] = isPrime(toCheck[(x)]);
}
}
// CudaTest.Program
__device__ int isPrime(int a)
{
int result;
if (a == 1 || a == 2)
{
result = 1;
}
else
{
int num = a % 2;
if (num == 0)
{
result = 0;
}
else
{
int num2 = a / 2;
for (int num3 = 3; num3 <= num2; num3++)
{
if (a % num3 == 0)
{
result = 0;
return result;
}
}
result = 1;
}
}
return result;
}
|
7,702 | #include <iostream>
#include <cstdlib>
//matrix A[i][j]
template<int N>
__device__ int l(int a, int b){
return a*N + b;
}
template<int N>
__global__ void multiply(double *A, double *B, double *C){
// C[i][k] = \sum_{j} A[i][j] * B[j][k]
int i = blockIdx.y*blockDim.y+threadIdx.y;
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(i<N && k<N){
double result = 0;
for(int j=0; j<N; j++){
result += A[l<N>(i, j)] * B[l<N>(j, k)];
}
C[l<N>(i, k)] = result;
}
}
void random(double* x, int size){
for(int i=0; i<size; i++){
x[i] = (double)(rand()%100)/100;
}
}
template<int N>
void dispMat(const double *A){
for(int i=0; i<N; i++){
for(int j=0; j<N; j++){
std::cout << A[i*N+j] << ",";
}
std::cout << std::endl;
}
}
void checkError(){
std::cout << cudaGetErrorString(cudaGetLastError()) << std::endl;
}
int main(void){
const int N = 1000;
const int THREADS_PER_BLOCK = 16;
double *A;
double *B;
double *C;
double *d_A;
double *d_B;
double *d_C;
int memsize = N*N*sizeof(double);
A = (double *)malloc(memsize);
B = (double *)malloc(memsize);
C = (double *)malloc(memsize);
random(A, N*N);
random(B, N*N);
std::cout << "A=" << std::endl;
dispMat<N>(A);
std::cout << "B=" << std::endl;
dispMat<N>(B);
cudaMalloc((void**)&d_A, memsize);
checkError();
cudaMalloc((void**)&d_B, memsize);
checkError();
cudaMalloc((void**)&d_C, memsize);
checkError();
cudaMemcpy(d_A, A, memsize, cudaMemcpyHostToDevice);
checkError();
cudaMemcpy(d_B, B, memsize, cudaMemcpyHostToDevice);
checkError();
dim3 blocks((N/THREADS_PER_BLOCK)+1, (N/THREADS_PER_BLOCK)+1, 1);
dim3 thPerBlock(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1);
multiply<N><<<blocks,thPerBlock>>>(d_A, d_B, d_C);
checkError();
cudaMemcpy(C, d_C, memsize, cudaMemcpyDeviceToHost);
checkError();
std::cout << "C=" << C[10*N+10] << std::endl;
dispMat<N>(C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(A);
free(B);
free(C);
}
|
7,703 | #include <stdio.h>
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
const int N=10;
__global__ void add(int *a, int *b, int *c) {
int tid = threadIdx.x;
c[tid] = a[tid] + b[tid];
}
int main(){
int a[N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
cudaMalloc( (void**)&dev_b, N * sizeof(int) );
cudaMalloc( (void**)&dev_c, N * sizeof(int) );
cudaCheckErrors("cudamalloc fail");
for (int i=0; i<N; i++) {
a[i] = -i; b[i] = i * i;
}
cudaMemcpy ( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy ( dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice );
cudaCheckErrors("cuda memcpy fail");
add<<<1,N>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, N * sizeof(int),cudaMemcpyDeviceToHost );
cudaCheckErrors("cudamemcpy or cuda kernel fail");
for (int i=0; i<N; i++) {
printf("%d + %d = %d\n", a[i],b[i],c[i]);
}
cudaFree (dev_a); cudaFree (dev_b); cudaFree (dev_c);
return 0;
}
|
7,704 | #include "includes.h"
__global__ void imagePaddingKernel(float3 *ptr, float3 *dst, int width, int height, int top, int bottom, int left, int right)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x < left || x >= (width - right) || y < top || y > (height - bottom)) {
return;
}
float3 color = ptr[(y - top) * (width - top - right) + (x - left)];
dst[y * width + x] = color;
} |
7,705 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <iostream>
int main(void)
{
int N = 100000;
thrust::host_vector<int> h_data(N);
thrust::device_vector<int> d_data(N);
//Method 1 (one cudaMemcpy per element, slow)
// for (int i = 0; i < N; i++)
// d_data[i] = i;
//Method 2 (one cudaMemcpy per entire array, faster)
for (int i = 0; i < N; i++)
h_data[i] = i;
thrust::copy(h_data.begin(), h_data.end(), d_data.begin());
return 0;
}
|
7,706 | #include <stdio.h>
#include <ctype.h>
#include <stdlib.h>
#include <unistd.h>
#include <cstdlib>
//Data parameters
#define SHOW_ELAPSED_TIME 1
//DSP parameters
#define SAMP_FREQ 1e6
#define FREQ_SHIFT -350000
//Coefficients for FIR
#define DECIMATION_RATE 2
#define FIR_SIZE 64
const int fir_size_in_bytes = FIR_SIZE * sizeof(float);
__constant__ float fir_coef [FIR_SIZE];
float cpu_fir_coef[FIR_SIZE] = {0.0};
// Declare arrays dynamically
float *cpu_I_in_buffer;
float *cpu_I_result_buffer;
float *cpu_Q_in_buffer;
float *cpu_Q_result_buffer;
//Default values, can be overwritten by providing command line arguments
unsigned int array_size;
unsigned int array_size_in_bytes;
unsigned int num_threads = FIR_SIZE;
unsigned int num_blocks = 4096;
//Function to copy data into shared memory. Includes thread sync
__device__
void copy_data_to_shared(float * src, float * dst, const unsigned int tid)
{
// Copy data
dst[tid] = src[tid];
// Sync threads before accessing shared memory
__syncthreads();
}
//Function to copy data out of shared memory. Includes thread sync
__device__
void copy_data_from_shared(float * src, float * dst, const unsigned int tid)
{
// Sync threads before accessing shared memory
__syncthreads();
// Copy data
dst[tid] = src[tid];
}
// Custom complex multiplication kernel
__device__
void cMult(const float Ai, const float Aq, const float Bi, const float Bq, float* Ri, float* Rq)
{
*Ri = Ai*Bi - Aq*Bq;
*Rq = Ai*Bq + Aq*Bi;
}
//Method to quickly sum an array and put the result at the begining of the array
//http://developer.download.nvidia.com/compute/cuda/1.1-Beta/x86_website/projects/reduction/doc/reduction.pdf
__device__
void sum_array(float * sdata, const unsigned int blockSize, const unsigned int tid)
{
for (unsigned int s=blockSize/2; s>0; s>>=1) {
if(tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
}
// Perform a frequency shift via complex multiply
// Parameters:
// I_in, Q_in, I_out, Q_out: 4 data buffers, all must be the same length
// n0: Used to calculate the phase of the first point of the mixing signal
// freq_shift: Frequency to shift in Hz
// Fs: sample frequency in Hz
__global__
void freq_shift(float * I_in, float * Q_in, float * I_out, float * Q_out, const unsigned int n0, const float freq_shift, const float Fs)
{
//Who am I?
//const unsigned int thread_id = threadIdx.x;
//const unsigned int block_id = blockIdx.x;
const unsigned int global_index = (blockIdx.x * blockDim.x) + threadIdx.x;
float I_shift;
float Q_shift;
float theta_nopi = 2.0*freq_shift*(n0 + global_index)/Fs;
sincospif(theta_nopi, &Q_shift, &I_shift);
cMult(I_in[global_index], Q_in[global_index], I_shift, Q_shift, &I_out[global_index], &Q_out[global_index]);
}
// FIR based decimation
__global__
void decimate(float * input_buffer, float * output_buffer, const unsigned int decimation_factor)
{
__shared__ float conv[FIR_SIZE];
//Who am I?
const unsigned int thread_id = threadIdx.x;
const unsigned int block_id = blockIdx.x;
//Perform the convolution as a copy from global (num samples) to shared (FIR width)
float sample = 0.0;
int sample_index = block_id - thread_id;
if(sample_index >= 0)
sample = input_buffer[sample_index];
conv[thread_id] = sample*fir_coef[thread_id];
__syncthreads();
//Sum results vector using loop unrolling and shared memory
sum_array(conv, blockDim.x, thread_id);
//Decimate
if(thread_id == 0)
{
if((block_id % decimation_factor) == 0)
output_buffer[block_id / decimation_factor] = conv[0];
}
}
// main_sub0 : Method to copy an input buffer into cuda and copy the results out
void main_sub0()
{
// Declare pointers for GPU based params
float *gpu_I_in_buffer;
float *gpu_I_mixed_buffer;
float *gpu_I_result_buffer;
float *gpu_Q_in_buffer;
float *gpu_Q_mixed_buffer;
float *gpu_Q_result_buffer;
// Allocate memory in the GPU
cudaMalloc((void **)&gpu_I_in_buffer, array_size_in_bytes);
cudaMalloc((void **)&gpu_I_mixed_buffer, array_size_in_bytes);
cudaMalloc((void **)&gpu_I_result_buffer, array_size_in_bytes);
cudaMalloc((void **)&gpu_Q_in_buffer, array_size_in_bytes);
cudaMalloc((void **)&gpu_Q_mixed_buffer, array_size_in_bytes);
cudaMalloc((void **)&gpu_Q_result_buffer, array_size_in_bytes);
//Copy Constant data
cudaMemcpyToSymbol(fir_coef, &cpu_fir_coef, fir_size_in_bytes);
#if SHOW_ELAPSED_TIME
float ms;
// Setup Start and Stop event
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
// Start timer
cudaEventRecord(startEvent, 0);
#endif
// Copy data from CPU to GPU
cudaMemcpy(gpu_I_in_buffer, cpu_I_in_buffer, array_size_in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_Q_in_buffer, cpu_Q_in_buffer, array_size_in_bytes, cudaMemcpyHostToDevice);
//Run kernels
freq_shift<<<num_blocks/32, 32>>>(gpu_I_in_buffer, gpu_Q_in_buffer, gpu_I_mixed_buffer, gpu_Q_mixed_buffer, 0, FREQ_SHIFT, SAMP_FREQ);
decimate<<<num_blocks, num_threads>>>(gpu_I_mixed_buffer, gpu_I_result_buffer, DECIMATION_RATE);
decimate<<<num_blocks, num_threads>>>(gpu_Q_mixed_buffer, gpu_Q_result_buffer, DECIMATION_RATE);
// Copy results from GPU to CPU
cudaMemcpy(cpu_I_result_buffer, gpu_I_result_buffer, array_size_in_bytes/DECIMATION_RATE, cudaMemcpyDeviceToHost);
cudaMemcpy(cpu_Q_result_buffer, gpu_Q_result_buffer, array_size_in_bytes/DECIMATION_RATE, cudaMemcpyDeviceToHost);
#if SHOW_ELAPSED_TIME
// Stop timer
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
printf("Elapsed Time: %f ms\n", ms);
// Destroy timer
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
#endif
//Destory streams
//cudaStreamDestroy(stream1);
//cudaStreamDestroy(stream2);
// Free the arrays on the GPU
cudaFree(gpu_I_in_buffer);
cudaFree(gpu_I_mixed_buffer);
cudaFree(gpu_I_result_buffer);
cudaFree(gpu_Q_in_buffer);
cudaFree(gpu_Q_mixed_buffer);
cudaFree(gpu_Q_result_buffer);
}
//main : parse command line arguments and run GPU code
int main(int argc, char *argv[])
{
// Argument parsing using getopt
// http://www.gnu.org/software/libc/manual/html_node/Example-of-Getopt.html#Example-of-Getopt
// -b <int> sets the number of GPU blocks
// -t <int> sets the nubmer of GPU threads
// -v Sets verbose flag - shows math results
int c;
bool showMathResults = false;
while ((c = getopt (argc, argv, "b:t:v")) != -1)
switch (c)
{
case 'b':
num_blocks = atoi(optarg);
break;
case 't':
num_threads = atoi(optarg);
break;
case 'v':
showMathResults = true;
break;
default:
printf("USAGE:\n-b <int> GPU blocks\n-t <int> GPU threads (each block)\n-v Verbose\n");
return EXIT_SUCCESS;
}
printf("Blocks: %d\nThreads: %d\n", num_blocks, num_threads);
// Calculate buffer size
array_size = num_blocks;
array_size_in_bytes = sizeof(float) * (array_size);
// Allocate memory on the CPU
cpu_I_in_buffer = new float[array_size];
cpu_I_result_buffer = new float[array_size];
cpu_Q_in_buffer = new float[array_size];
cpu_Q_result_buffer = new float[array_size];
//Load fir
FILE * iFile;
char fileName[100];
sprintf(fileName, "fir_dec_%d_taps_%d.txt", DECIMATION_RATE, FIR_SIZE);
iFile = fopen(fileName, "r");
for(unsigned int i=0; i<FIR_SIZE; i++)
fscanf(iFile, "%f\r\n", &cpu_fir_coef[i]);
fclose(iFile);
// Generate data to be processed
float I, Q;
iFile = fopen("inputIQ.txt", "r+");
for(unsigned int i=0; i<array_size; i++)
{
fscanf(iFile, "%f,%f\r\n", &I, &Q);
cpu_I_in_buffer[i] = I;
cpu_Q_in_buffer[i] = Q;
}
fclose(iFile);
// Run
main_sub0();
// Output results
if(showMathResults)
{
for(unsigned int i = 0; i < array_size/DECIMATION_RATE; i++)
{
printf("%.5f, %.5f\n", cpu_I_result_buffer[i], cpu_Q_result_buffer[i]);
}
}
printf("\n");
FILE * oFile;
oFile = fopen("outputIQ.txt", "w+");
for(unsigned int i=0; i<array_size/DECIMATION_RATE; i++)
{
fprintf(oFile, "%f,%f\r\n", cpu_I_result_buffer[i], cpu_Q_result_buffer[i]);
}
fclose(oFile);
// Done
return EXIT_SUCCESS;
}
|
7,707 | #include <stdlib.h>
#include <iostream>
#include "test_hashmap.cuh"
#include "test_polyaurn.cuh"
#include "test_spalias.cuh"
#include "test_topics.cuh"
namespace gpulda_test {
void run_tests() {
std::cout << "running tests" << std::endl;
std::cout << "testing hashmap" << std::endl;
test_hashmap();
std::cout << "testing polya_urn_init" << std::endl;
test_polya_urn_init();
std::cout << "testing polya_urn_sample" << std::endl;
test_polya_urn_sample();
std::cout << "testing polya_urn_transpose" << std::endl;
test_polya_urn_transpose();
std::cout << "testing polya_urn_reset" << std::endl;
test_polya_urn_reset();
std::cout << "testing polya_urn_colsums" << std::endl;
test_polya_urn_colsums();
std::cout << "testing build_alias" << std::endl;
test_build_alias();
std::cout << "testing compute_d_idx" << std::endl;
test_compute_d_idx();
std::cout << "testing sample_topics_functions" << std::endl;
test_sample_topics_functions();
std::cout << "testing sample_topics" << std::endl;
test_sample_topics();
std::cout << "tests completed" << std::endl;
}
}
|
7,708 | #include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
//DEVICE
__global__ void kernelVector_x_constant( float* arr, int n, int k )
{
//Obtengo el indice del hilo fisico
int idx = blockIdx.x * blockDim.x + threadIdx.x;
//Mientras el hilo sea valido para la operaci�n
if( idx<n )
{
//Multiplico el elemento por la constante
arr[ idx ] = arr[ idx ] * k;
}
}
//HOST
int main()
{
int size = 1000000;
//Separo memoria en la RAM del HOST
float* arr = new float[size];
float* arr_DEVICE = NULL;
//Inicializo el arreglo en el HOST
for( int index = 0; index<size ; index++ )
{
arr[index] = index;
}
//Separo memoria en la RAM del DEVICE ( la misma cantidad de bytes que en el HOST )
cudaMalloc((void**)&arr_DEVICE, size * sizeof(float));
//Copio el bloque de memoria del HOST al DEVICE
cudaMemcpy( arr_DEVICE, arr, size * sizeof(float), cudaMemcpyHostToDevice);
///////////////////////// EJECUTO EL KERNEL DE CUDA ////////////////////////////
//////// 512 Hilos
//////// ceil(1000000/512) Bloques
kernelVector_x_constant<<< ceil(size/512.0), 512 >>>( arr_DEVICE, size, 65 );
//Fuerzo una llamada Sincrona
cudaThreadSynchronize();
//Copio mis datos ya procesados a la RAM del HOST
cudaMemcpy( arr, arr_DEVICE, size * sizeof(float), cudaMemcpyDeviceToHost);
//Con una impresi�n de los primeros 100 visualizo el resultado
for( int index = 0; index<100 ; index++ )
{
cout<<arr[index]<<endl;
}
//Libero memoria en la RAM del DEVICE
cudaFree( arr_DEVICE );
//Libero memoria en la RAM del HOST
delete[] arr;
cin.get();
}
|
7,709 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <unistd.h>
#include <time.h>
#include <cuda_runtime.h>
#define PI 3.14159265
const int val = 180.0 / PI;
const int thread = 256;
const float fix = 1 / 60 * PI / 180;
const int bins = 720;
using namespace std;
__global__ void angles(volatile float *a0, volatile float *b0, volatile float *a1, volatile float *b1, int xind, int yind, int max_x, int max_y, volatile int *histi)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float angle;
idx += xind;
__shared__ unsigned int shared[bins];
// Always clear the first iteration
if (threadIdx.x == 0) {
for (int i = 0; i < bins; i++) {
shared[i] = 0;
}
}
__syncthreads();
// Check
for (int i = yind; i < yind + max_y; i++) {
angle = acosf((sin(b0[idx]) * sin(b1[i]) + cos(b0[idx]) * cos(b1[i]) * cos(a0[idx] * -a1[i])) * 1 / val);
shared[int(angle)]++;
// TODO: Make atomicAddwork :D
// atomicAdd(&shared[int(angle)], 1);
}
__syncthreads();
if (threadIdx.x == 0) {
for (int i = 0; i < bins; i++) {
histi[i] = shared[i];
}
}
}
void read_the_files()
{
// Reading files 1 and 2
FILE *real_g;
FILE *synthetic_g;
int galaxies_r, galaxies_s;
float *a0, *a1, *b0, *b1;
real_g = fopen("data_100k_arcmin.txt", "r");
synthetic_g = fopen("flat_100k_arcmin.txt", "r");
fscanf(real_g, "%d", &galaxies_r);
fscanf(synthetic_g, "%d", &galaxies_s);
a0 = (float*) malloc(galaxies_r * sizeof(float));
b0 = (float*) malloc(galaxies_r * sizeof(float));
a1 = (float*) malloc(galaxies_s * sizeof(float));
b1 = (float*) malloc(galaxies_s * sizeof(float));
for (int i = 0; i < galaxies_r; i++) {
fscanf(real_g, "%e %e", &a0[i], &b0[i]);
fscanf(synthetic_g, "%e %e", &a1[i], &b1[i]);
a0[i] *= fix;
b0[i] *= fix;
a1[i] *= fix;
b1[i] *= fix;
}
fclose(real_g);
fclose(synthetic_g);
dim3 grid, block;
grid.x = 1024;
grid.y = 1024;
block.x = 1;
float *aa1, *bb1, *aa0, *bb0;
cudaMalloc((void **) &aa0, galaxies_r * sizeof(float));
cudaMalloc((void **) &bb0, galaxies_r * sizeof(float));
cudaMalloc((void **) &aa1, galaxies_s * sizeof(float));
cudaMalloc((void **) &bb1, galaxies_s * sizeof(float));
// Do we have enough memory?
// Initialize array to all 0's
cudaMemset(aa0, 0, galaxies_r * sizeof(float));
cudaMemset(bb0, 0, galaxies_r * sizeof(float));
cudaMemset(aa1, 0, galaxies_s * sizeof(float));
cudaMemset(bb1, 0, galaxies_s * sizeof(float));
cudaMemcpy(aa0, a0, galaxies_r * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(bb0, b0, galaxies_r * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(aa1, a1, galaxies_s * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(bb1, b1, galaxies_s * sizeof(float), cudaMemcpyHostToDevice);
int x, y;
// Preparing the histogram array
int *hist, *histi, *tmp;
int size_h_bytes = 720 * sizeof(int);
hist = (int*)malloc(size_h_bytes);
memset(hist, 0, size_h_bytes);
cudaMalloc((void **) &tmp, (size_h_bytes));
cudaMemset(tmp, 0, size_h_bytes);
unsigned long *hist_array;
int hist_array_size = 720 * sizeof(unsigned long);
hist_array = (unsigned long*)malloc(hist_array_size);
memset(hist_array, 0, hist_array_size);
cudaMemset(tmp, 0, size_h_bytes);
for (int i = 0; i < 9; i++) {
y = i * 512 * 512;
for (int j = 0; j < 9; j++) {
x = 512 * 512 * i;
angles<<<grid,block>>>(aa0, bb0, aa1, bb1, x, y, 512, 512, tmp);
cudaMemcpy(hist, tmp, size_h_bytes, cudaMemcpyDeviceToHost);
for(int i = 0; i < 720; i++) {
hist_array[i] += hist[i];
}
}
}
for(int i = 0; i < 720; i++) {
printf("%d ", hist_array[i]);
}
free(a1);
free(b1);
free(a0);
free(b0);
cudaFree(aa1);
cudaFree(aa0);
cudaFree(bb0);
cudaFree(bb1);
cudaFree(tmp);
}
// Preparation for the kernel
int main()
{
float alpha1 = 4646.98;
float b1 = 3749.51;
float a2 = 4644.35;
float b2 = 3749.52;
float theta1 = acos(sin(b1) * sin(b2) + cos(b1) * cos(b2) * cos(alpha1 - a2));
printf("%f\n", b1);
printf("%f\n", theta1);
clock_t start, end;
double cpu_time_used;
start = clock();
read_the_files();
// Do some calculations
end = clock();
cpu_time_used = ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f", cpu_time_used);
return EXIT_SUCCESS;
}
|
7,710 | #include <iostream>
#include <algorithm>
using namespace std;
__global__ void VectorsPairMaximums(size_t size, double *first, double *second, double *res) {
size_t begin = (size_t) (blockDim.x * blockIdx.x + threadIdx.x);
size_t offset = gridDim.x * blockDim.x;
for (size_t i = begin; i < size; i += offset) {
res[i] = max(first[i], second[i]);
}
}
__host__ int main(void) {
size_t size;
cin >> size;
double *first = new double[size];
double *second = new double[size];
double *res = new double[size];
for (size_t i = 0; i < size; i++) {
cin >> first[i];
//first[i] = i;
}
for (size_t i = 0; i < size; i++) {
cin >> second[i];
//second[i] = i;
}
double *cudaFirst;
double *cudaSecond;
double *cudaRes;
cudaMalloc((void**) &cudaFirst, sizeof(double) * size);
cudaMalloc((void**) &cudaSecond, sizeof(double) * size);
cudaMalloc((void**) &cudaRes, sizeof(double) * size);
cudaMemcpy(cudaFirst, first, sizeof(double) * size, cudaMemcpyHostToDevice);
cudaMemcpy(cudaSecond, second, sizeof(double) * size, cudaMemcpyHostToDevice);
VectorsPairMaximums<<<256, 256>>>(size, cudaFirst, cudaSecond, cudaRes);
cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent);
cudaEventRecord(syncEvent, 0);
cudaEventSynchronize(syncEvent);
cudaMemcpy(res, cudaRes, sizeof(double) * size, cudaMemcpyDeviceToHost);
//double *testArr = new double[size];
//cudaMemcpy(testArr, cudaFirst, sizeof(double) * size, cudaMemcpyDeviceToHost);
cudaEventDestroy(syncEvent);
cudaFree(cudaFirst);
cudaFree(cudaSecond);
cudaFree(cudaRes);
for (size_t i = 0; i < size; i++) {
if (i > 0) {
cout << " ";
}
cout << scientific << res[i];
}
cout << endl;
delete [] first;
delete [] second;
delete [] res;
return 0;
} |
7,711 | # include <math.h>
__constant__ const float MAX_FLOAT32 = 3.4028e+038;
__constant__ const float MIN_FLOAT32 = -3.4028e+038;
__constant__ const float EPS_FLOAT32 = 2.22045e-016;
__constant__ float MU_WATER = 0.037;
__constant__ float MU_AIR = 0.00046;
__global__ void backprojectPixel(
const int h, const int w, float * dsts,
const float * minv, const float * kinv,
const int z_sign, const bool down, const float sid)
{
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int localId = (threadIdx.y*blockDim.x) + threadIdx.x;
int threadId = blockId * (blockDim.x*blockDim.y) + localId;
__shared__ float sKinv[9];
__shared__ float sMinv[12];
if (localId < 9){
sKinv[localId] = kinv[localId];
}
else if (localId < 21){
sMinv[localId - 9] = minv[localId - 9];
}
__syncthreads();
int i = threadId / w;
int j = threadId % w;
if (down == true) {
int temp = i;
i = j;
j = temp;
}
if (threadId < h*w)
{
float dotx = sid*z_sign*(sKinv[0]*i + sKinv[1]*j + sKinv[2]*1);
float doty = sid*z_sign*(sKinv[3]*i + sKinv[4]*j + sKinv[5]*1);
float dotz = sid*z_sign*(sKinv[6]*i + sKinv[7]*j + sKinv[8]*1);
dsts[3*threadId + 0] = sMinv[0]*dotx + sMinv[1]*doty + sMinv[2]*dotz + sMinv[3]*1;
dsts[3*threadId + 1] = sMinv[4]*dotx + sMinv[5]*doty + sMinv[6]*dotz + sMinv[7]*1;
dsts[3*threadId + 2] = sMinv[8]*dotx + sMinv[9]*doty + sMinv[10]*dotz + sMinv[11]*1;
}
}
__device__ int getIJK(
const float & src, const float & dst, const float & minAxyz,
const float & amin, const float & b, const float & s)
{
return floor((src + 0.5*(minAxyz + amin)*(dst-src) - b)/s);
}
__device__ void getAlphas(
const float & b, const float & s, const float & p1,
const float & p2, const int & n, float & amin, float & amax)
{
if (fabsf(p1 - p2) < EPS_FLOAT32) {
amin = MIN_FLOAT32;
amax = MAX_FLOAT32;
}
else{
amin = (b - p1)/(p2 - p1);
amax = (b+(n-1)*s - p1)/(p2 - p1);
if (amin > amax){
float temp = amin;
amin = amax;
amax = temp;
}
}
}
__device__ float getAx(
const float & p1, const float & p2, const int & n,
const float & b, const float & s, const float & axmin,
const float & axmax, const float & amin, const float & amax)
{
float ax = 0;
int imin = 1;
int imax = n-2;
if(fabsf(p2 - p1) < EPS_FLOAT32){
ax = MAX_FLOAT32;
}
else if (p1 < p2){
if(fabsf(amin - axmin) > EPS_FLOAT32){
imin = floor((p1 + amin*(p2-p1) - b)/s + 1);
}
ax = ((b + imin*s) - p1)/(p2 - p1);
}
else {
if(fabsf(amin - axmin) > EPS_FLOAT32){
imax = ceil((p1 + amin*(p2 - p1) - b)/s - 1);
}
ax = ((b + imax*s) - p1)/(p2 - p1);
}
return ax;
}
__global__ void traceRay(
const float * src, const float * dsts, float * raysums,
const float * rho, const float * b, const float * sp,
const int * n, const int h, const int w, const float threshold)
{
int blockId = blockIdx.x + blockIdx.y*gridDim.x;
int localId = (threadIdx.y*blockDim.x) + threadIdx.x;
int threadId = blockId * (blockDim.x*blockDim.y) + localId;
__shared__ float sB[3];
__shared__ float sSp[3];
__shared__ float sSrc[3];
__shared__ int sN[3];
if(localId < 3){
sB[localId] = b[localId];
} else if(localId < 6){
sSp[localId - 3] = sp[localId - 3];
} else if(localId < 9){
sN[localId - 6] = n[localId - 6];
} else if(localId < 12){
sSrc[localId - 9] = src[localId - 9];
}
__syncthreads();
if (threadId < h*w){
float3 dst = make_float3(dsts[3*threadId], dsts[3*threadId + 1], dsts[3*threadId + 2]);
float axmin, axmax, aymin, aymax, azmin, azmax;
getAlphas(sB[0], sSp[0], sSrc[0], dst.x, sN[0], axmin, axmax);
getAlphas(sB[1], sSp[1], sSrc[1], dst.y, sN[1], aymin, aymax);
getAlphas(sB[2], sSp[2], sSrc[2], dst.z, sN[2], azmin, azmax);
float amin = fmaxf(axmin, fmaxf(aymin, azmin));
float amax = fminf(axmax, fminf(aymax, azmax));
if(amin > amax || (amin < 0)){
raysums[threadId] = 1;
return;
}
else {
float ax = getAx(sSrc[0], dst.x, sN[0], sB[0], sSp[0], axmin, axmax, amin, amax);
float ay = getAx(sSrc[1], dst.y, sN[1], sB[1], sSp[1], aymin, aymax, amin, amax);
float az = getAx(sSrc[2], dst.z, sN[2], sB[2], sSp[2], azmin, azmax, amin, amax);
float dconv = sqrtf(
(dst.x-sSrc[0])*(dst.x-sSrc[0]) +
(dst.y-sSrc[1])*(dst.y-sSrc[1]) +
(dst.z-sSrc[2])*(dst.z-sSrc[2]));
float d12 = 0;
float ac = amin;
float minAxyz = fminf(ax, fminf(ay, az));
int i = getIJK(sSrc[0], dst.x, minAxyz, amin, sB[0], sSp[0]);
int j = getIJK(sSrc[1], dst.y, minAxyz, amin, sB[1], sSp[1]);
int k = getIJK(sSrc[2], dst.z, minAxyz, amin, sB[2], sSp[2]);
while((-1 < i && i < (sN[0]-1)) &&
(-1 < j && j < (sN[1]-1)) &&
(-1 < k && k < (sN[2]-1))){
float hu = rho[k + j*(sN[2]-1) + i*(sN[2]-1)*(sN[1]-1)];
float mu = (hu*(MU_WATER-MU_AIR)/1000 + MU_WATER);
if (hu < threshold){
mu = 0;
}
if(ax == minAxyz){
d12 = d12 + (ax - ac)*dconv*mu;
i = (sSrc[0] < dst.x)?(i+1): (i-1);
ac = ax;
ax = ax + sSp[0]/fabsf(dst.x - sSrc[0]);
}
else if(ay == minAxyz){
d12 = d12 + (ay - ac)*dconv*mu;
j = (sSrc[1] < dst.y)?(j+1): (j-1);
ac = ay;
ay = ay + sSp[1]/fabsf(dst.y - sSrc[1]);
}
else {
d12 = d12 + (az - ac)*dconv*mu;
k = (sSrc[2] < dst.z)?(k+1): (k-1);
ac = az;
az = az + sSp[2]/fabsf(dst.z - sSrc[2]);
}
minAxyz = fminf(ax, fminf(ay, az));
}
raysums[threadId] = expf(-d12);
}
}
}
|
7,712 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string>
#include <fstream>
#include <cmath>
#include <string>
#include <iostream>
using namespace std;
typedef struct {
double x,y,z;
} XYZ;
typedef struct {
XYZ p[8];
double val[8];
} GRIDCELL;
typedef struct {
XYZ p[3]; /* Vertices */
// XYZ c; /* Centroid */
// XYZ n[3]; /* Normal */
} TRIANGLE;
#define ABS(x) (x < 0 ? -(x) : (x))
// Prototypes
//__global__
//int PolygoniseCube(GRIDCELL,double,TRIANGLE *);
//XYZ VertexInterp(double,XYZ,XYZ,double,double);
/*
#define NX 200
#define NY 160
#define NZ 160
*/
#define NX 200//200
#define NY 160//160
#define NZ 160//160
//68 x 256 x 256
void fillMatrix(XYZ* a, int n)
{
int i;
for (i = 0; i < n; ++i)
{
a[i].x = 3;
a[i].y = 2;
a[i].z = 5;//rand()%5;
}
}
__global__
void matrixAdition(XYZ * b, XYZ *a,int n)
{
int ij = threadIdx.x + blockDim.x * blockIdx.x;
if(ij<n)
{
b[ij].x = a[ij].x+2;
b[ij].y = a[ij].y+3;
b[ij].z = a[ij].z+0;
//printf("da %d \n" , b[ij].x);
}
}
void printMatrix(string s, XYZ *a , int tam){
cout<<s;
for(int i=0;i<tam;i++)
{
if(a[i].x!=0 && a[i].y!=0 && a[i].z!=0)
{
cout<<a[i].x<<" "<<a[i].y<<" "<<a[i].z<<" ";
cout<<endl;
}
}
}
void assingMem(int *** data)
{
int i,j;
data = (int ***)malloc(NX*sizeof(short int **));
for (i=0;i<NX;i++)
data[i] = (int **)malloc(NY*sizeof(short int *));
for (i=0;i<NX;i++)
for (j=0;j<NY;j++)
data[i][j] = (int *)malloc(NZ*sizeof(short int));
}
void readFile(FILE *fptr, const char * namefile , int themin , int themax, int *** data)
{
int i,j,k,c;
fprintf(stderr,"Load data ...\n");
if ((fptr = fopen(namefile,"rb")) == NULL) {
fprintf(stderr,"Error al leer archivo\n");
exit(-1);
}
for (k=0;k<NZ;k++) {
for (j=0;j<NY;j++) {
for (i=0;i<NX;i++) {
if ((c = fgetc(fptr)) == EOF) {
fprintf(stderr,"Error en tamaño\n");
exit(-1);
}
data[i][j][k] = c;
cout<<"leyendo :"<<c<<endl;
if (c > themax)
themax = c;
if (c < themin)
themin = c;
}
}
}
fclose(fptr);
fprintf(stderr,"Rango del volumen: %d -> %d\n",themin,themax);
}
int constructCubes(GRIDCELL * vectGrids, int *** data)
{
int i,j,k;
//fprintf(stderr,"Construyendo Cubos ...\n");
int cont=0;
for (i=0;i<NX-1;i++) {
//cout<<i<<endl;
//if (i % (NX/10) == 0)
//fprintf(stderr," Slice %d de %d\n",i,NX);
for (j=0;j<NY-1;j++) {
for (k=0;k<NZ-1;k++) {
GRIDCELL grid;
grid.p[0].x = i;
grid.p[0].y = j;
grid.p[0].z = k;
grid.val[0] = data[i][j][k];
grid.p[1].x = i+1;
grid.p[1].y = j;
grid.p[1].z = k;
grid.val[1] = data[i+1][j][k];
grid.p[2].x = i+1;
grid.p[2].y = j+1;
grid.p[2].z = k;
grid.val[2] = data[i+1][j+1][k];
grid.p[3].x = i;
grid.p[3].y = j+1;
grid.p[3].z = k;
grid.val[3] = data[i][j+1][k];
grid.p[4].x = i;
grid.p[4].y = j;
grid.p[4].z = k+1;
grid.val[4] = data[i][j][k+1];
grid.p[5].x = i+1;
grid.p[5].y = j;
grid.p[5].z = k+1;
grid.val[5] = data[i+1][j][k+1];
grid.p[6].x = i+1;
grid.p[6].y = j+1;
grid.p[6].z = k+1;
grid.val[6] = data[i+1][j+1][k+1];
grid.p[7].x = i;
grid.p[7].y = j+1;
grid.p[7].z = k+1;
grid.val[7] = data[i][j+1][k+1];
vectGrids[cont]=grid;
cont++;
}
}
}
cout<<"numero de datos ingresados "<<cont<<endl;
return cont;
}
__device__
XYZ VertexInterp(double isolevel,XYZ p1,XYZ p2,double valp1,double valp2)
{
double mu;
XYZ p;
if (ABS(isolevel-valp1) < 0.00001)
return(p1);
if (ABS(isolevel-valp2) < 0.00001)
return(p2);
if (ABS(valp1-valp2) < 0.00001)
return(p1);
mu = (isolevel - valp1) / (valp2 - valp1);
p.x = p1.x + mu * (p2.x - p1.x);
p.y = p1.y + mu * (p2.y - p1.y);
p.z = p1.z + mu * (p2.z - p1.z);
return p;
}
__device__
void copyXYZ(XYZ &a, XYZ &b)
{
a.x=b.x ; a.y=b.y ; a.z = b.z;
}
__device__
XYZ defect()
{
XYZ a;
a.x=300 ; a.y=300 ; a.z = 300;
return a;
}
/*
__global__
void coyGRID(GRIDCELL * a, GRIDCELL * b, int x, int y, int z)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
/*if(i<x && j<y && k<z)
{
a[ij].p = b[ij].p;
a[ij].val = b[ij].val;
}
}
*/
/*
__global__
void copyGRID1(GRIDCELL * a, GRIDCELL * b, int x, int y, int z)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int k = threadIdx.z + blockDim.z * blockIdx.z;
if(i<x && j<y && k<z)
{
for(int w=0;w<8;w++)
{
a[i].p[w] = b[i].p[w];
a[i].val[w] = b[i].val[w];
}
}
}*/
/*
__global__
void PolygoniseCube(XYZ * vertlist ,GRIDCELL * g ,double iso, int x ,int y , int z)
*/
__device__ int ntri=0;
__global__
void PolygoniseCube(TRIANGLE * d_vectTriangles,GRIDCELL * g , int n)
{
int iso=80;
int ind = threadIdx.x + blockDim.x * blockIdx.x;
//printf("este si %d \n",ind);
//return ;
if(ind<n)
{
//printf("thread %d \n", g[i].p[7].x);
int cubeindex;
//int tamVert=12;
XYZ vertlist[12];
int edgeTable[256]={
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 };
cubeindex = 0;
if (g[ind].val[0] < iso) cubeindex |= 1;
if (g[ind].val[1] < iso) cubeindex |= 2;
if (g[ind].val[2] < iso) cubeindex |= 4;
if (g[ind].val[3] < iso) cubeindex |= 8;
if (g[ind].val[4] < iso) cubeindex |= 16;
if (g[ind].val[5] < iso) cubeindex |= 32;
if (g[ind].val[6] < iso) cubeindex |= 64;
if (g[ind].val[7] < iso) cubeindex |= 128;
//printf("Est parte normal %d \n",cubeindex );
int triTable[256][16] =
{{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}};
if (edgeTable[cubeindex] == 0)
return;
/* Find the vertices where the surface intersects the cube */
if (edgeTable[cubeindex] & 1) {
vertlist[0] = VertexInterp(iso,g[ind].p[0],g[ind].p[1],g[ind].val[0],g[ind].val[1]);
}
if (edgeTable[cubeindex] & 2) {
vertlist[1] = VertexInterp(iso,g[ind].p[1],g[ind].p[2],g[ind].val[1],g[ind].val[2]);
}
if (edgeTable[cubeindex] & 4) {
vertlist[2] = VertexInterp(iso,g[ind].p[2],g[ind].p[3],g[ind].val[2],g[ind].val[3]);
}
if (edgeTable[cubeindex] & 8) {
vertlist[3] = VertexInterp(iso,g[ind].p[3],g[ind].p[0],g[ind].val[3],g[ind].val[0]);
}
if (edgeTable[cubeindex] & 16) {
vertlist[4] = VertexInterp(iso,g[ind].p[4],g[ind].p[5],g[ind].val[4],g[ind].val[5]);
}
if (edgeTable[cubeindex] & 32) {
vertlist[5] = VertexInterp(iso,g[ind].p[5],g[ind].p[6],g[ind].val[5],g[ind].val[6]);
}
if (edgeTable[cubeindex] & 64) {
vertlist[6] = VertexInterp(iso,g[ind].p[6],g[ind].p[7],g[ind].val[6],g[ind].val[7]);
}
if (edgeTable[cubeindex] & 128) {
vertlist[7] = VertexInterp(iso,g[ind].p[7],g[ind].p[4],g[ind].val[7],g[ind].val[4]);
}
if (edgeTable[cubeindex] & 256) {
vertlist[8] = VertexInterp(iso,g[ind].p[0],g[ind].p[4],g[ind].val[0],g[ind].val[4]);
}
if (edgeTable[cubeindex] & 512) {
vertlist[9] = VertexInterp(iso,g[ind].p[1],g[ind].p[5],g[ind].val[1],g[ind].val[5]);
}
if (edgeTable[cubeindex] & 1024) {
vertlist[10] = VertexInterp(iso,g[ind].p[2],g[ind].p[6],g[ind].val[2],g[ind].val[6]);
}
if (edgeTable[cubeindex] & 2048) {
vertlist[11] = VertexInterp(iso,g[ind].p[3],g[ind].p[7],g[ind].val[3],g[ind].val[7]);
}
int t=0;
//printf("indice %d \n", ind);
while(triTable[cubeindex][t] != -1)
{
d_vectTriangles[ind].p[0] = vertlist[triTable[cubeindex][t]];
d_vectTriangles[ind].p[1] = vertlist[triTable[cubeindex][t+1]];
d_vectTriangles[ind].p[2] = vertlist[triTable[cubeindex][t+2]];
ntri++;
t+=3;
}
//printf("ntri %d \n", ntri);
}
}
//return ntri;
void printGrid(string a, GRIDCELL * g, int tam)
{
cout<<a;
for(int i =0; i<tam ;i++)
for(int j=0;j<8;j++)
//printf("%f %f %f \n", g[i].p[j].x ,g[i].p[j].y,g[i].p[j].z);
printf("%f \n", g[i].val[j]);
}
void printTriangles(string a , TRIANGLE * t, int tam)
{
//numtri=ntri;
//printTriangles("Printing Tringles \n",cpy_vectTriangles,N);
cout<<a;
int cont=0;
ofstream myfile;
myfile.open ("Triangulos.txt");
//myfilen.open("Normales.txt");
myfile<<"# .PCD v.5 - Point Cloud Data file format"<<endl;
myfile<<"VERSION .5"<<endl;
myfile<<"FIELDS x y z"<<endl;
myfile<<"SIZE 4 4 4"<<endl;
myfile<<"TYPE F F F"<<endl;
myfile<<"COUNT 1 1 1"<<endl;
myfile<<"WIDTH 742716"<<endl;
myfile<<"HEIGHT 1"<<endl;
myfile<<"POINTS 742716"<<endl;
myfile<<"DATA ascii"<<endl;
for(int i =0; i<tam ;i++)
{
for (int k=0;k<3;k++)
{
cont++;
if(t[i].p[k].x != 0 && t[i].p[k].y != 0 && t[i].p[k].z !=0 )
{
//fprintf(fptr,"%g %g %g ",t[i].p[k].x,t[i].p[k].y,t[i].p[k].z);
myfile<<t[i].p[k].x<<" "<<t[i].p[k].y<<" "<<t[i].p[k].z<<endl;
}
}
//t++;
}
myfile.close();
cout<<"numero de triangulos"<<cont<<endl;
}
/*
XYZ * CalcNormals(string a , XYZ * data, int tam)
{
XYZ normales[tam];
for (int i=0;i<NX-1;i++) {
for (int j=0;j<NY-1;j++) {
for (int k=0;k<NZ-1;k++)
{
Normales[n].x=(data[i+1][j][k]+data[i-1,j,k])/0.0001;
Normales[n].y=(data[i][j+1][k]-data[i][j-1][k])/0.0001;
Normales[n].z=(data[i][j][k+1]-data[i][j][k-1])/0.0001;
n++;
}
}
return normales;
}
*/
int main(int argc, char *argv[])
{
int i,j,k,c;
int numtri=0;
int ***data;
FILE *fptr;
int N= ((NX-1)*(NY-1)*(NZ-1));
cout<<N<<endl; //return 1;
int THREADS_PER_BLOCK =1024;
int themin=255;
int themax=0;
int isolevel=80;
//const char* FILENAME = "mri.raw";
//assingMem(data);
//readFile(fptr,FILENAME,themin, themax,data);
// Malloc the volumetric data, hardwired size!
data = (int***)malloc(NX*sizeof(int **));
for (i=0;i<NX;i++)
data[i] = (int**)malloc(NY*sizeof(int *));
for (i=0;i<NX;i++)
for (j=0;j<NY;j++)
data[i][j] = (int*)malloc(NZ*sizeof(int));
//cout<<data[199][60][0]<<endl;
// Open and read the raw data
fprintf(stderr,"Reading data ...\n");
if ((fptr = fopen(argv[argc-1],"rb")) == NULL) {
fprintf(stderr,"File open failed\n");
exit(-1);
}
cout<<"llega"<<endl;
for (k=0;k<NZ;k++) {
for (j=0;j<NY;j++) {
for (i=0;i<NX;i++) {
if ((c = fgetc(fptr)) == EOF) {
fprintf(stderr,"Unexpected end of file\n");
exit(-1);
}
data[i][j][k] = c;
//cout<<i<<" "<<j <<" "<<k <<" data : "<<data[i][j][k]<<endl;
if (c > themax)
themax = c;
if (c < themin)
themin = c;
}
}
}
fclose(fptr);
fprintf(stderr,"Volumetric data range: %d -> %d\n",themin,themax);
int sizeGRID= N*sizeof(GRIDCELL);
int sizeTRI = N*sizeof(TRIANGLE);
cout<<"sizeGRID "<<sizeGRID<<endl;
cout<<"sizeTRI "<<sizeTRI<<endl;
GRIDCELL * vectGrids;
GRIDCELL * d_vectGrids;
TRIANGLE * vectTriangles;
TRIANGLE * d_vectTriangles;
vectGrids = (GRIDCELL *)malloc(sizeGRID);
vectTriangles= (TRIANGLE *)malloc(sizeTRI);
cout<<"mem of grid "<<vectGrids<<endl;
cout<<"mem of triangulos "<<vectTriangles<<endl;
cout<<"asigna memoria sin problemas"<<endl;
int numCubos = constructCubes(vectGrids,data);
cout<<"pasa"<<endl;
/*
typedef struct {
XYZ p[3];
// XYZ c;
// XYZ n[3];
} TRIANGLE;
typedef struct {
XYZ p[8];
double val[8];
} GRIDCELL;
*/
size_t available, total;
cudaMemGetInfo(&available, &total);
cout<<"available: " << available<<" total: "<<total <<endl;
cudaMalloc((void **)&d_vectGrids, sizeGRID);
cudaMemcpy(d_vectGrids,vectGrids, sizeGRID, cudaMemcpyHostToDevice);
cout<<"asignacion en RAM"<<endl;
cudaMalloc((void **)&d_vectTriangles, sizeTRI);
cudaMemcpy(d_vectTriangles,vectTriangles,sizeTRI,cudaMemcpyHostToDevice);
cout<<"mem of grid "<<d_vectGrids<<endl;
cout<<"mem of triangulos "<<d_vectTriangles<<endl;
cout<<"separa memoria en cuda sin problemas"<<endl;
//printGrid("imprimiendo Grid inicial en Host \n ",vectGrids,N);
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
//int x = NX-1; int y = NY-1 ; int z = NZ-1;
/*int blockX= (x + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
int blockY= (y + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
int blockZ= (z + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
cout<<"blocks : "<<blockX<<" threds: "<<THREADS_PER_BLOCK<<endl;
cout<<"blocks : "<<blockY<<" threds: "<<THREADS_PER_BLOCK<<endl;
cout<<"blocks : "<<blockZ<<" threds: "<<THREADS_PER_BLOCK<<endl;*/
//int ntri=0;int d_ntri=0;
//cudaMalloc((void **)ntri,int);
//cudaMemcpy(d_ntri,ntri,cudaMemcpyHostToDevice);
int blocks= (N + THREADS_PER_BLOCK -1)/THREADS_PER_BLOCK;
cout<<"blocks : \n"<<blocks<<"\n threds: \n "<<THREADS_PER_BLOCK<<endl;
//dim3 dimGrid(blockX, blockY, blockZ);
//dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK, THREADS_PER_BLOCK);
cudaEventRecord(start,0);
//copyGRID1<<<dimGrid,dimBlock>>>(d_res,d_vectGrid,x,y,z);
//PolygoniseCube<<<dimGrid,dimBlock>>>(d_vectTriangles,d_vectGrids,x,y,z);
PolygoniseCube<<<blocks,THREADS_PER_BLOCK>>>(d_vectTriangles,d_vectGrids,N);
//matrixAdition<<<blocks,THREADS_PER_BLOCK>>>(d_a, d_points,10);
//matrixAditionCol<<<blocks2,THREADS_PER_BLOCK>>>( d_c, d_a, d_b,N);
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
TRIANGLE * cpy_vectTriangles;
cpy_vectTriangles= (TRIANGLE *)malloc(sizeTRI);
cout<<"crea sin problemas en host"<<endl;
cudaMemcpy(cpy_vectTriangles,d_vectTriangles, sizeTRI, cudaMemcpyDeviceToHost);
printTriangles("Printing Tringles \n",cpy_vectTriangles,N);
//CalcNormals("Calculate Normals \n",data,N);
int n=0;
free(vectTriangles); free(vectGrids); free(cpy_vectTriangles);
cudaFree(d_vectTriangles); cudaFree(d_vectGrids);
return 0;
}
/*
for(int i=0;i<N;i++)
{
cudaMalloc((void**)&d_p,8*sizeof(XYZ));
//cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
cudaMalloc((void**)&d_val,8*sizeof(double));
//cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
cudaMemcpy(d_p,vectGrids[i].p,8*sizeof(XYZ),cudaMemcpyHostToDevice);
//for(int w=0;w<8;w++)
//{
cout<<vectGrids[i].p[w].y<<endl;
//}
//cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
cudaMemcpy(d_val,vectGrids[i].val,8*sizeof(double),cudaMemcpyHostToDevice);
//cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl
cudaMemGetInfo(&available, &total);
//cout<<"available: " << available<<" total: "<<total <<endl;
cudaMemcpy(d_vectGrid[i].val, d_val, 8*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_vectGrid[i].p, d_p, 8*sizeof(XYZ),cudaMemcpyHostToDevice);
}*/ |
7,713 | #define MAXBLOCK 512
__device__ int sgn(float x){
if(x == 0.0){
return 0;
}else if(x < 0.0){
return -1;
}
return 1;
}
__device__ int square(int x){
return x*x;
}
__global__ void binarize(int* output, int2 size, float* implicit){
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int i = x + y * size.x;
const int replace = square(max(size.x,size.y));
if(size.x <= x || size.y <= y) return;
output[i] = sgn(implicit[i]) * replace;
}
__global__ void edt_pass(int* samples, const int width, const int height, const int dim){
__shared__ int coeffs[MAXBLOCK];
__shared__ int verts[MAXBLOCK];
__shared__ int signs[MAXBLOCK];
// this requires that the thread size is (length of data,1)
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
int* sample = &(samples[x + y * width]);
int frame;
int pos;
int size;
if(dim){
frame = y & ~1;
pos = y;
size = height;
}else{
frame = x & ~1;
pos = x;
size = width;
}
// Perform the first set of reductions
int out = *sample;
int sign = sgn(out);
out = abs(out);
signs[pos] = sign * pos;
coeffs[pos] = out;
__syncthreads();
int otherindex = pos ^ 1;
int otherdata = coeffs[otherindex];
int othersign = signs[otherindex];
if(othersign * sign < 0){
if(sign == -1){
out = 0;
coeffs[pos] = 0;
verts[pos] = pos;
signs[pos] = 0;
}else{
out = 1;
coeffs[pos] = 1;
verts[pos] = otherindex;
}
}else if(out > otherdata){
coeffs[pos] = otherdata + 1;
verts[pos] = otherindex;
}else{
verts[pos] = pos;
}
__syncthreads();
int mask = 3;
while(size > 0){
size >>= 1;
int base = frame & ~3;
int dest = base >> 1;
int offset = base ^ frame;
int half = offset >> 1;
offset = offset | half;
int par = pos & mask;
int lowvertex = verts[base + 1];
int highvertex = verts[base + 2];
int lowcoeff = coeffs[base + 1];
int highcoeff = coeffs[base + 2];
int lowsgn = signs[base+1];
int highsgn = signs[base+2];
if((0 > lowsgn * highsgn) && (size > 1)){
lowvertex = abs(min(lowsgn,highsgn));
lowcoeff = 0;
}
int low = square(pos - lowvertex) + lowcoeff;
int high = square(pos - highvertex) + highcoeff;
int extreme = square(pos - verts[base + offset]) + coeffs[base + offset];
out = min(out,min(high,min(low,extreme)));
if(par == 0 || par == mask){
int vertex;
int coefficient;
if(high < extreme || low < extreme){
if(high < low){
vertex = highvertex;
coefficient = highcoeff;
}else{
vertex = lowvertex;
coefficient = lowcoeff;
}
} else {
vertex = verts[base + offset];
coefficient = coeffs[base + offset];
}
int s = signs[base + 3 * half];
__syncthreads();
signs[dest + half] = s;
coeffs[dest + half] = coefficient;
verts[dest + half] = vertex;
__syncthreads();
}else{
__syncthreads();
__syncthreads();
}
frame = dest;
mask = (mask << 1) + 1;
}
*sample = sign * out;
}
__global__ void signed_sqrt(int* values, int2 size, float* output){ // int width, int height){
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if(size.x <= x || size.y <= y) return;
const int i = x + size.x * y;
float out = 1.0;
int value = values[i];
if(value < 0){
out = -1.0;
value = -1 * value;
}
output[i] = out * sqrtf((float) value) / ((float) min(size.y,size.x));
}
|
7,714 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CUDA kernel to pause for at least num_cycle cycles
__global__ void sleep(int64_t *completed_cycles, int64_t requested_cycles)
{
completed_cycles[0] = 0;
int64_t start = clock64();
while(completed_cycles[0] < requested_cycles) {
completed_cycles[0] = clock64() - start;
}
}
extern "C" void allocate_mem(int64_t **device_value)
{
gpuErrchk( cudaMalloc((void**)device_value, sizeof(int64_t)) );
}
extern "C" void copy_mem(int64_t *host_value, int64_t *device_value)
{
gpuErrchk( cudaMemcpy(host_value, device_value, sizeof(int64_t), cudaMemcpyDeviceToHost) );
}
// Returns number of cycles required for requested seconds
extern "C" int64_t get_cycles(float seconds)
{
// Get device frequency in KHz
int64_t Hz;
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
Hz = int64_t(prop.clockRate) * 1000;
// Calculate number of cycles to wait
int64_t num_cycles;
num_cycles = (int64_t)(seconds * Hz);
return num_cycles;
}
// Launches a kernel that sleeps for num_cycles
extern "C" void sleep_kernel(int64_t *completed_cycles, int64_t requested_cycles)
{
// Our kernel will launch a single thread to sleep the kernel
int blockSize, gridSize;
blockSize = 1;
gridSize = 1;
// Execute the kernel in default stream
sleep<<< gridSize, blockSize >>>(completed_cycles, requested_cycles);
}
// Wait for all work to complete
extern "C" void wait_for_gpu()
{
cudaDeviceSynchronize();
}
|
7,715 | //pass
//--blockDim=[64,1] --gridDim=[64,1]
#include <cuda.h>
#define BIN_COUNT 64
////////////////////////////////////////////////////////////////////////////////
// GPU-specific definitions
////////////////////////////////////////////////////////////////////////////////
//Fast mul on G8x / G9x / G100
#define IMUL(a, b) a * b
////////////////////////////////////////////////////////////////////////////////
// Merge blockN histograms into gridDim.x histograms
// blockDim.x == BIN_COUNT
// gridDim.x == BLOCK_N2
////////////////////////////////////////////////////////////////////////////////
#define MERGE_THREADS 64
__global__ void mergeHistogram64Kernel(
unsigned int *d_Histogram,
unsigned int *d_PartialHistograms,
unsigned int blockN
){
__shared__ unsigned int data[MERGE_THREADS];
unsigned int sum = 0;
for(unsigned int i = threadIdx.x; i < blockN; i += MERGE_THREADS) {
sum += d_PartialHistograms[blockIdx.x + i * BIN_COUNT];
}
data[threadIdx.x] = sum;
for(unsigned int stride = MERGE_THREADS / 2;
stride > 0; stride >>= 1){
__syncthreads();
/* BUGINJECT: ADD_BARRIER, DOWN */
if(threadIdx.x < stride) {
#ifdef MUTATION
__syncthreads();
#endif
data[threadIdx.x] += data[threadIdx.x + stride];
}
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
|
7,716 | #include <stdio.h>
#include <stdlib.h>
__global__ void add(int *da, int *db, int *dc) {
dc[threadIdx.x] = da[threadIdx.x] + db[threadIdx.x];
}
int main(int argc, char **argv) {
int a_in = atoi(argv[1]); // Read the addends from the command line
int b_in = atoi(argv[2]);
int N = atoi(argv[3]); // Read the length of the vectors
int gpuThreads = atoi(argv[4]); // Read the number of CUDA threads to use
int *a, *b, *c;
int *da, *db, *dc;
int bytes = N *sizeof(int);
a = (int *)malloc(bytes);
b = (int *)malloc(bytes);
c = (int *)malloc(bytes);
for (int i=0; i<N; i++) {
a[i] = a_in; b[i] = b_in; c[i] = 0; }
cudaMalloc((void **)&da, bytes);
cudaMalloc((void **)&db, bytes);
cudaMalloc((void **)&dc, bytes);
cudaMemcpy(da, a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(db, b, bytes, cudaMemcpyHostToDevice);
add<<<1,gpuThreads>>>(da, db, dc);
cudaMemcpy(c, dc, bytes, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("%d + %d -> %d\n", a[0], b[0], c[0]);
printf(" ...\n");
printf("%d + %d -> %d\n", a[N-1], b[N-1], c[N-1]);
cudaFree(da); cudaFree(db); cudaFree(dc);
free(a); free(b); free(c);
}
|
7,717 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <device_launch_parameters.h>
#define LIST_SIZE 100000
extern "C" __device__ unsigned long long shiftCount[LIST_SIZE];
extern "C" __device__ unsigned long long shiftVal[LIST_SIZE];
extern "C" __device__ unsigned long long record_flag;
void bambooLogRecordOff(){
long long local_record = 0;
cudaMemcpyToSymbol(record_flag, &local_record, sizeof(long long), 0, cudaMemcpyHostToDevice);
}
void bambooLogKernelBegin(long long i) {
i = 1;
cudaMemcpyToSymbol(record_flag, &i, sizeof(long long), 0, cudaMemcpyHostToDevice);
}
void bambooLogKernelEnd()
{
#ifdef KERNELTRACE
cudaDeviceSynchronize();
#endif
unsigned long long shift_count[LIST_SIZE] = {0};
unsigned long long shift_val[LIST_SIZE] = {0};
cudaMemcpyFromSymbol(shift_count, shiftCount, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(shift_val, shiftVal, LIST_SIZE * sizeof(long long), 0, cudaMemcpyDeviceToHost);
FILE *profileFile = fopen("profile_shift_value_result.txt", "w");
for(long long i=0; i < LIST_SIZE; i++){
if(shift_count[i] != 0){
fprintf(profileFile, "%lld: %lld %lld\n", i, shift_val[i]/shift_count[i], shift_count[i]);
}
}
fclose(profileFile);
}
|
7,718 | #include <stdio.h>
__device__ static inline unsigned int argmin(unsigned int a, unsigned int b, unsigned int c)
{
if (a < b)
{
if (a < c)
return 0;
else
return 2;
}
if (b < c)
return 1;
else return 2;
}
__global__ void matching(int *D, int *phi, int m, int n)
{
// matrix m x n
int pnt = 2;
const int tid = threadIdx.x;// + blockDim.x * blockIdx.x;
if (tid == 0)
{
printf("%d", tid);
int j = 0;
for (j = 0; j < n; j++)
{
int tmp[3] = { D[(tid * n) + j], D[(tid+1)*n+j] + pnt, D[(tid * n) + j + 1] + pnt };
int arg = argmin(tmp[0], tmp[1], tmp[2]);
int dmin = tmp[arg];
D[((tid+1) * n) + j + 1] = D[((tid+1) * n) + j + 1] + dmin;
phi[(tid * n) + j] = arg + 1;
}
}
else
if (tid < m)
{
int j = 0;
for (j = 0; j < n; j++)
{
while(1)
{
if (phi[(tid-1) * n + j])
{
int tmp[3] = {D[(tid * n) + j], D[(tid+1)*n+j] + pnt, D[(tid * n) + j + 1] + pnt};
int arg = argmin(tmp[0], tmp[1], tmp[2]);
int dmin = tmp[arg];
D[((tid+1) * n) + j + 1] = D[((tid+1) * n) + j + 1] + dmin;
phi[(tid * n) + j] = arg + 1;
break;
}
}
}
}
} |
7,719 | #include <iostream>
#include <chrono>
#include <functional>
#include <cmath>
#include <stdio.h>
using namespace std::chrono;
typedef std::function<void*() > func;
class Matrix;
__global__
void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out);
class Matrix{
private:
int m_dimX;
int m_dimY;
int* m_content;
void deleteContent(){
if(m_content != NULL){
cudaFree(m_content);
m_content = NULL;
}
}
class helper{
public:
int& operator[](int j){
return m_M->m_content[m_i*(m_M->dimY()) +j];
}
helper(const Matrix* m, int i): m_M(m), m_i(i){}
private:
const Matrix* m_M;
int m_i;
};
// class helperC{
// public:
// int operator[](int j){
// return m_M->m_content[m_i*j];
// }
// helper(const Matrix* m, int i): m_M(m), m_i(i){}
// private:
// const Matrix* m_M;
// int m_i;
// };
public:
Matrix():m_dimX(-1), m_dimY(-1), m_content(NULL){}
Matrix(int dy, int dx, int val):
m_dimX(dx), m_dimY(dy){
cudaMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int));
for(int i=0; i<(m_dimY*m_dimX); i++){
m_content[i] = val;
}
}
Matrix(int dy, int dx):
m_dimX(dx), m_dimY(dy){
cudaMallocManaged(&m_content, m_dimX*m_dimY*sizeof(int));
}
int nElem(){return m_dimX*m_dimY;}
int dimX() const{return m_dimX;}
int dimY() const{return m_dimY;}
Matrix& operator=(const Matrix& other){
if(this != &other){
this->deleteContent();
this->m_content = other.m_content;
this->m_dimX = other.m_dimX;
this->m_dimY = other.m_dimY;
}
return *this;
}
void print(){
//TODO
for(int i=0; i<m_dimY; i++){
for(int j=0; j<m_dimX; j++){
std::cout << (*this)[i][j] << ",";
}
std::cout << std::endl;
}
std::cout << "("<<m_dimY<<","<<m_dimX<<")"<<std::endl;
}
helper operator[] (const int i) const{
return helper(this, i);
}
Matrix matMul(const Matrix& other) const{
if( this->m_dimX != other.m_dimY ){
std::cout << "dimenstion don't fit " << std::endl;
throw 0;
}
int blockSize = 256;
int numBlocks = ((m_dimY * other.m_dimX) + blockSize - 1) / blockSize;
Matrix ret(m_dimY, other.m_dimX);
cudaMatMulClass<<<numBlocks,blockSize>>>(m_content, m_dimY, m_dimX, other.m_content, other.dimY(), other.dimX(), ret.m_content);
//cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out);
cudaDeviceSynchronize();
return ret;
}
};
__global__
void cudaMatMulClass(const int* A, int an, int am, const int* B, int bn, int bm, int* out){
int i,j,ij, k;
int ijS = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid;
//int jS = blockIdx.y * blockDim.y + threadIdx.y;
//int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid;
if(blockIdx.x==0 && threadIdx.x==0){
printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x);
printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y);
printf("ijS= %d ,stride= %d \n", ijS, stride);
}
for(ij=ijS; ij < an*bm; ij+=stride){
i = ij/bm;
j = ij%bm;
out[ij]=0;
for(k=0; k<am; k++){
out[ij] += A[i*an + k]*B[k*bn +j];
}
//printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]);
}
}
__global__
void cudaMatMul(int** A, int an, int am, int** B, int bn, int bm, int** out){
int i,j,ij, k;
int ijS = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x; //how many threads ber bock * how many blocks in the grid;
//int jS = blockIdx.y * blockDim.y + threadIdx.y;
//int jStride = blockDim.y * gridDim.y; //how many threads ber bock * how many blocks in the grid;
if(blockIdx.x==0 && threadIdx.x==0){
printf("blockInd.x= %d ,blockDim.x= %d, threadIdx.x= %d, GridDim.x= %d\n", blockIdx.x ,blockDim.x , threadIdx.x ,gridDim.x);
printf("blockInd.y= %d ,blockDim.y= %d, threadIdx.y= %d, GridDim.y= %d\n", blockIdx.y ,blockDim.y , threadIdx.y ,gridDim.y);
printf("ijS= %d ,stride= %d \n", ijS, stride);
}
//std::cout << "blockInd.x="<< blockIdx.x << " blockDim.x=" << blockDim.x << " threadIdx.x=" << threadIdx.x << " GridDim.x" << gridDim.x << std::endl;
//std::cout << "blockInd.y="<< blockIdx.y << " blockDim.y=" << blockDim.y << " threadIdx.y=" << threadIdx.y << " GridDim.y" << gridDim.y << std::endl;
//for(i=iS; i < an; i+=iStride){
// for(j=0; j<bm; j++){
// // go threw all fields in output
// // initialize output field as 0
// out[i][j]=0;
//
// for(k=0; k < am; k++){
// out[i][j] += A[i][k] * B[k][j];
// }
// }
//}
//version with single loop
for(ij=ijS; ij < an*bm; ij+=stride){
i = ij/bm;
j = ij%bm;
out[i][j]=0;
for(k=0; k<am; k++){
out[i][j] += A[i][k]*B[k][j];
}
//printf("(i:%d, j:%d)=%d \n", i,j, out[i][j]);
}
}
int** matMul(int** A, int an, int am, int** B, int bn, int bm){
if( am != bn ){
std::cout << "dimenstion don't fit" << std::endl;
throw 0;
}
int N = std::max(an, bm);
int blockSize = 256;
int numBlocks = ((an * bm) + blockSize - 1) / blockSize;
int i;
int **out;
//out = new int*[an];
cudaMallocManaged(&out, an*sizeof(int*));
for(i=0; i < an ; i++){
//out[i] = new int[bm];
cudaMallocManaged(&out[i], bm*sizeof(int));
}
cudaMatMul<<<numBlocks,blockSize>>>(A, an, am, B, bn, bm, out);
//cudaMatMul<<<(2,2),(3,3)>>>(A, an, am, B, bn, bm, out);
cudaDeviceSynchronize();
return out;
}
/// initialize matrix
int** init(int an, int am, int value){
//int** out = new int*[an];
int** out;
cudaMallocManaged(&out, an*sizeof(int*));
for(int i=0; i<an; i++){
//out[i] = new int[am];
cudaMallocManaged(&out[i], am*sizeof(int));
for(int j=0; j<am; j++){
out[i][j] = value;
}
}
return out;
}
void print(int** A, int an, int am){
std::cout << "A = " << std::endl;
for(int i=0; i<an; i++){
for(int j=0; j<am; j++){
std::cout << A[i][j] << ",";
}
std::cout << std::endl;
}
std::cout << "("<<an<<","<<am<<")"<<std::endl;
}
template <class retType>
retType measureTime(func& f){
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
void* result = f();
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<milliseconds>( t2 - t1 ).count();
std::cout << "execution took " << duration << " milliseconds" << std::endl;
return (retType)result;
}
int main(){
std::cout << "gpu version" << std::endl;
int A[2][2]={{1,2},{3,4}};
int B[2][3]={{1,1,1},{1,1,1}};
int **out;
int **C = init(300, 500, 1);
int **D = init(500, 900, 1);
func f = [C,D](){return (void*)matMul(C, 300, 500, D, 500, 900);};
out=measureTime<int**>(f);
std::cout << "the new one " << std:: endl;
//print(out, 300, 900);
Matrix MA(300,500,1);
Matrix MB(500,900,1);
//func f = [A,B](){return (void*)A.matMul(B);}
//Matrix C = measureTime<Matrix>(f);
Matrix MC = MA.matMul(MB);
//MC.print();
return 0;
}
|
7,720 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
__device__ int mandel(float c_re, float c_im, int maxIteration)
{
float z_re = c_re, z_im = c_im;
int i;
for (i = 0; i < maxIteration; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
return i;
}
__global__ void mandelKernel(float x1, float y1, float x0, float y0, int* output, int width, int height, int maxIterations, int pitch, int pixels) {
// To avoid error caused by the floating number, use the following pseudo code
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
int i = (blockIdx.x * blockDim.x + threadIdx.x) * pixels;
int j = blockIdx.y * blockDim.y + threadIdx.y;
// if (i >= width || j >= height) return;
if (i >= width || j >= height) return;
// i -> 1600, j -> 1200
float dx = (x1 - x0) / width;
float dy = (y1 - y0) / height;
float y = y0 + j * dy;
for(int pixel = 0; pixel < pixels; pixel++) {
float x = x0 + (i + pixel) * dx;
int index = (j * pitch + i) + pixel;
output[index] = mandel(x, y, maxIterations);
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
#define N 1600
#define BLOCK_SIZE 64
void hostFE (float x1, float y1, float x0, float y0, int* output, int width, int height, int maxIterations)
{
int *h_img = NULL, *d_img = NULL;
size_t pitch;
// Locate CPU memory and GPU memory
cudaHostAlloc((void**)&h_img, width * height * sizeof(int), cudaHostAllocDefault);
cudaMallocPitch((void**)&d_img, &pitch, (size_t)width * sizeof(int), (size_t)height);
// Copy memory from CPU to GPU
cudaMemcpy2D(d_img, pitch, h_img, width * sizeof(int), width * sizeof(int), height, cudaMemcpyHostToDevice);
//
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlock(N / BLOCK_SIZE, N / BLOCK_SIZE);
mandelKernel<<<blockSize, numBlock>>>(x1, y1, x0, y0, d_img, width, height, maxIterations, pitch / sizeof(int), 2);
// Sync
cudaDeviceSynchronize();
// // Copy memory from GPU to CPU
cudaMemcpy2D(h_img, width * sizeof(int), d_img, pitch, width * sizeof(int), height, cudaMemcpyDeviceToHost);
// // Copy memory from CPU to CPU(answers)
memcpy(output, h_img, width * height * sizeof(int));
cudaFree(h_img);
cudaFree(d_img);
}
|
7,721 |
#include <iostream>
#include <stdlib.h>
#include <stdio.h>
using namespace std;
int main( int argc, char** argv ){
int args_needed = 1;
if (argc < args_needed + 1 ){
printf(" Arg number error, needed: %d \n", args_needed);
return 0;
}
printf(" CUDA - Template \n");
return 0;
}
|
7,722 | #define BLOCK_SIZE 32
__global__ void opticalFlowKernel(int* o_gx, int* o_gy, int* arr1dI, int* arr1dJ, int rows, int cols, int wx, int wy, int K) {
__shared__ int I_tile[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int J_tile[BLOCK_SIZE][BLOCK_SIZE];
int tx = threadIdx.x;
int ty = threadIdx.y;
int xidx = blockIdx.x * blockDim.x + threadIdx.x;
int yidx = blockIdx.y * blockDim.y + threadIdx.y;
I_tile[ty][tx] = arr1dI[yidx * cols + xidx];
J_tile[ty][tx] = arr1dJ[yidx * cols + xidx];
__syncthreads();
// Initialization of pyramidal guess
float gx = 0, gy = 0;
// Spatial gradient matrix
float Gxx = 0, Gxy = 0, Gyy = 0;
for(int dy = -wy; dy <= wy; dy++) {
for(int dx = -wx; dx <= wx; dx++) {
if(ty + dy - 1 >= 0 && ty + dy + 1 < BLOCK_SIZE && tx + dx - 1 >= 0 && tx + dx + 1 < BLOCK_SIZE) {
int Ix = (I_tile[ty+dy][tx+dx+1] - I_tile[ty+dy][tx+dx-1]) / 2;
int Iy = (I_tile[ty+dy+1][tx+dx] - I_tile[ty+dy-1][tx+dx]) / 2;
Gxx += Ix * Ix;
Gxy += Ix * Iy;
Gyy += Iy * Iy;
}
}
}
float det = Gxx * Gyy - Gxy * Gxy;
if(det > 0.00001f) {
// Initialization of iterative L-K
float vx = 0, vy = 0;
for(int k = 0; k < K; k++) {
int Jx = tx + gx + vx;
int Jy = ty + gy + vy;
if(Jx - 1 >= 0 && Jx + 1 < BLOCK_SIZE && Jy - 1 >= 0 && Jy + 1 < BLOCK_SIZE) {
// Image mismatch vector
float bkx = 0, bky = 0;
for(int dy = -wy; dy <= wy; dy++) {
for(int dx = -wx; dx <= wx; dx++) {
if(ty + dy - 1 >= 0 && ty + dy + 1 < BLOCK_SIZE && tx + dx - 1 >= 0 && tx + dx + 1 < BLOCK_SIZE) {
int Ik = I_tile[ty+dy][tx+dx] - J_tile[Jy][Jx];
int Ix = (I_tile[ty+dy][tx+dx+1] - I_tile[ty+dy][tx+dx-1]) / 2;
int Iy = (I_tile[ty+dy+1][tx+dx] - I_tile[ty+dy-1][tx+dx]) / 2;
bkx += Ik * Ix;
bky += Ik * Iy;
}
}
}
// Optical Flow
float eta_x = (-Gyy * bkx + Gxy * bky) / det;
float eta_y = (Gxy * bkx - Gxx * bky) / det;
// Guess for next iteration
vx += eta_x;
vy += eta_y;
}
}
// Guess for next level
gx += vx;
gy += vy;
}
o_gx[yidx * cols + xidx] = gx;
o_gy[yidx * cols + xidx] = gy;
}
|
7,723 | #include "includes.h"
#define N 128*128
__global__ void kernelMontecarlo(float *x, float *y,int *contador) {
//int i = threadIdx.x + blockIdx.x*blockDim.x;
//int j = threadIdx.y + blockIdx.y*blockDim.y;
int indice = threadIdx.x + blockIdx.x*blockDim.x;
//int indice=i;
//printf("Indice: %f\n",(x[indice]*x[indice] + y[indice]*y[indice]));
if((x[indice]*x[indice] + y[indice]*y[indice]) <=1.0) {
atomicAdd(contador,1);//contador++;
//printf("Contador: %d\n",*contador);
}
} |
7,724 | #include <stdio.h>
int DIM_LIM = 10;
int MAT_COUNT = 20;
int SEED = 15; //seed for rand
class matrix {
public:
int row; //number of rows, y
int col; //number of columns, x
double* data;
__host__ __device__ matrix(int columns, int rows) :
col(columns), row(rows),
data(new double[col * row])
{}
__host__ __device__ double& getdata(int x, int y){
return data[y * col + x]; //vertical position * row length + pos in row
};
};
__global__ void d_printMat(matrix *mat)
{
int dimxn = mat->col;
int dimyn = mat->row;
printf("Dim x %d, Dim y %d\n", dimxn, dimyn);
for(int y = 0; y<dimyn; y++){
for(int x = 0; x<dimxn; x++){
printf("%lf ", mat->getdata(x,y));
}
printf("\n");
}
printf("\n");
}
__host__ void printMat(matrix *mat)
{
int dimxn = mat->col;
int dimyn = mat->row;
printf("Dim x %d, Dim y %d\n", dimxn, dimyn);
for(int y = 0; y<dimyn; y++){
for(int x = 0; x<dimxn; x++){
printf("%lf ", mat->getdata(x,y));
}
printf("\n");
}
printf("\n");
}
__global__ void mat_mult(matrix *a, matrix *b, matrix *ans){
if(a->row == b->col){
int iter = a->row; //number of mults needed
printf("a(%d, %d) b(%d,%d)\n",a->col,a->row,b->col,b->row);
//ans = new matrix(a->col, b->row);
printf("result %d rows %d cols\n", ans->row, ans->col);
for(int x = 0; x < ans->col; x++){
for(int y = 0; y < ans->row; y++){
ans->getdata(x,y) = 0; //initialize
for(int z = 0; z < iter; z++){
ans->getdata(x,y) += (a->getdata(x,y) * b->getdata(y,x));
}
//printf("value at %d %d is %f\n", x,y, ans->getdata(x,y));
}
}
}
else{
printf("matrix size mismatch");
}
};
matrix** initialize(){
srand(SEED); //init random gen
int dim[MAT_COUNT + 1]; //stores matrix sizes
for(int z = 0; z <= MAT_COUNT; z++){
dim[z] = rand()%DIM_LIM + 1;//random between 1 and limit
}
//declare matrix array as pointer
matrix **mat = (matrix **)malloc(MAT_COUNT * sizeof(matrix*));
for(int z = 0; z < MAT_COUNT; z++){
//each matrix shares a dimension with the previous
int dimx = dim[z];
int dimy = dim[z+1];
mat[z] = new matrix(dimx,dimy); //dimx columns, dimy rows
for(int x = 0; x<dimx; x++){
for(int y = 0; y<dimy; y++){
//TODO change to random double
mat[z]->getdata(x,y) = 5; //initialize each element
}
}
}
return mat;
}
matrix* copyMatrixDev(matrix *host){
matrix *d_mat;
double *tmp_data;
cudaMalloc(&d_mat, sizeof(matrix));
cudaMemcpy(d_mat, host, sizeof(matrix),
cudaMemcpyHostToDevice);
cudaMalloc(&tmp_data, sizeof(double) * host->col * host->row);
cudaMemcpy(tmp_data, host->data, sizeof(double) * host->col * host->row,
cudaMemcpyHostToDevice);
cudaMemcpy(&(d_mat->data),&tmp_data, sizeof(double *),
cudaMemcpyHostToDevice);
return d_mat;
}
matrix* hostMultMat(matrix *a, matrix *d_a, matrix *b, matrix *d_b){
matrix *result = new matrix(a->col,b->row);
matrix *d_result = copyMatrixDev(result);
cudaDeviceSynchronize();
mat_mult<<<1,1>>>(d_a,d_b,d_result);
cudaDeviceSynchronize();
return d_result;
}
int main(){
matrix **mat_arr = initialize();
matrix *d_mat[MAT_COUNT];
double *mat_data[MAT_COUNT];
for(int i = 0; i < MAT_COUNT; i++){
cudaMalloc(&d_mat[i], sizeof(matrix));
cudaMemcpy(d_mat[i], mat_arr[i], sizeof(matrix),
cudaMemcpyHostToDevice);
cudaMalloc(&mat_data[i], sizeof(double) * mat_arr[i]->col * mat_arr[i]->row);
cudaMemcpy(mat_data[i], mat_arr[i]->data, sizeof(double) * mat_arr[i]->col * mat_arr[i]->row,
cudaMemcpyHostToDevice);
cudaMemcpy(&(d_mat[i]->data),&mat_data[i], sizeof(double *),
cudaMemcpyHostToDevice);
// printMat(mat_arr[i]);
// d_printMat<<<1,1>>>(d_mat[i]);
}
//matrix *d_result = hostMultMat(mat_arr[0], d_mat[0], mat_arr[1], d_mat[1]);
//d_printMat<<<1,1>>>(d_result);
//cudaDeviceSynchronize();
for(int i = 0; i < MAT_COUNT-1; i++){
matrix *d_result = hostMultMat(mat_arr[i], d_mat[i],
mat_arr[i+1], d_mat[i+1]);
d_printMat<<<1,1>>>(d_result);
cudaFree(d_result);
}
}
|
7,725 | #include "includes.h"
// Taken from the NVIDIA "2_Graphics\simpleGL" sample:
// A kernel that modifies the z-coordinates of a rectangular
// grid of vertices, based on a time value, so that they
// form an animated sine wave
extern "C"
__global__ void simple_vbo_kernel( float4 *pos, unsigned int width, unsigned int height, float time)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
// calculate uv coordinates
float u = x / (float) width;
float v = y / (float) height;
u = u*2.0f - 1.0f;
v = v*2.0f - 1.0f;
// calculate simple sine wave pattern
float freq = 4.0f;
float w = sinf(u*freq + time) * cosf(v*freq + time) * 0.5f;
// write output vertex
pos[y*width+x] = make_float4(u, w, v, 1.0f);
} |
7,726 | #ifdef __INC_CUDA_VECTOR_UTIL_H__
#include "cuda_runtime.h"
__device__ void Mat4x4_Mul(float *A, float *B, float *C)
{
C[0] = A[0]*B[0]+A[4]*B[1]+A[8]*B[2]+A[12]*B[3];
C[1] = A[0]*B[4]+A[4]*B[5]+A[8]*B[6]+A[12]*B[7];
C[2] = A[0]*B[8]+A[4]*B[9]+A[8]*B[10]+A[12]*B[11];
C[3] = A[0]*B[12]+A[4]*B[13]+A[8]*B[14]+A[12]*B[15];
C[4] = A[1]*B[0]+A[5]*B[1]+A[9]*B[2]+A[13]*B[3];
C[5] = A[1]*B[4]+A[5]*B[5]+A[9]*B[6]+A[13]*B[7];
C[6] = A[1]*B[8]+A[5]*B[9]+A[9]*B[10]+A[13]*B[11];
C[7] = A[1]*B[12]+A[5]*B[13]+A[9]*B[14]+A[13]*B[15];
C[8] = A[2]*B[0]+A[6]*B[1]+A[10]*B[2]+A[14]*B[3];
C[9] = A[2]*B[4]+A[6]*B[5]+A[10]*B[6]+A[14]*B[7];
C[10] = A[2]*B[8]+A[6]*B[9]+A[10]*B[10]+A[14]*B[11];
C[11] = A[2]*B[12]+A[6]*B[13]+A[10]*B[14]+A[14]*B[15];
C[12] = A[3]*B[0]+A[7]*B[1]+A[11]*B[2]+A[15]*B[3];
C[13] = A[3]*B[4]+A[7]*B[5]+A[11]*B[6]+A[15]*B[7];
C[14] = A[3]*B[8]+A[7]*B[9]+A[11]*B[10]+A[15]*B[11];
C[15] = A[3]*B[12]+A[7]*B[13]+A[11]*B[14]+A[15]*B[15];
}
__device__ void Mat4x4_Mul_Vec4(float *A, float *B, float *C)
{
C[0] = A[0]*B[0]+A[4]*B[1]+A[8]*B[2]+A[12]*B[3];
C[1] = A[1]*B[0]+A[5]*B[1]+A[9]*B[2]+A[13]*B[3];
C[2] = A[2]*B[0]+A[6]*B[1]+A[10]*B[2]+A[14]*B[3];
C[3] = A[3]*B[0]+A[7]*B[1]+A[11]*B[2]+A[15]*B[3];
}
#endif |
7,727 | /*
* @author Connie Shi
* Lab 3: Write a reduction program in CUDA that finds the maximum
* of an array of M integers.
* Part 3 (Improved):
* Write a CUDA version that makes use of shared memory,
* prefetching, and different granularities. Performs better
* than original cudashared.cu version, because it does not
* divide the data into subsets to sequential search.
*
* Should be run on cuda1 machine with 1024 max threads per block.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 1024
#define WARP 32
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/* Function Declarations */
void generate_random(int random[], int num_elements);
__global__ void max_in_blocks(int random[], int num_elements);
__device__ void sequential(int random[], int num_elements);
/* Generates M random numbers from 1 to 100000*/
void generate_random(int random[], int num_elements) {
int i;
time_t t;
srand((unsigned)time(&t)); //randomizes seed
for (i = 0; i < num_elements; i++) {
random[i] = (int)(((double)rand()/RAND_MAX)*100000);
}
}
/* global function called from host and executed on kernel
* Uses a tree-like structure to do parallel max reduction.
* Avoids branch diversion, uses prefetching and shared memory.
*/
__global__
void max_in_blocks(int random[], int num_elements) {
__shared__ int sdata[THREADS_PER_BLOCK];
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride;
// Take data from global memory to shared memory for faster access
sdata[tid] = random[index];
__syncthreads();
for (stride = blockDim.x/2; stride >= 32; stride >>= 1) {
if (tid < stride && tid + stride < num_elements) {
int current = sdata[tid + stride];
if (sdata[tid] < current) {
sdata[tid] = current;
}
}
__syncthreads();
}
// Prevents branch divergence because it will stop running
// Through while loop when it reaches the size of the warp
// At which point, the max is in the first 32 positions.
// Sequential search 32 elements is very fast.
if (tid < 32) {
sequential(sdata, num_elements);
random[blockIdx.x] = sdata[0];
}
}
/* Sequential searches through the first 32 positions of the block
* to prevent further divvying up of the warp into different tasks.
*/
__device__
void sequential(int sdata[], int num_elements) {
int i;
int max = 0;
int tid = threadIdx.x;
for (i = tid; i < tid + WARP && i < num_elements; i++) {
if (max < sdata[i]) {
max = sdata[i];
}
}
// Put in index position, first element of the block
sdata[0] = max;
}
/**************************************************************/
int main(int argc, char*argv[]) {
int* h_random;
int* d_random;
int i;
int largest = 0;
clock_t start, end;
if (argc != 2) {
printf("Invalid number of commands: usage ./cudadivshared M\n");
exit(1);
}
// Generate array of random elements
int num_elements = atoi(argv[1]);
h_random = (int*)malloc(sizeof(int) * num_elements);
generate_random(h_random, num_elements);
start = clock();
// Calculation for grid dimensions
int leftover = num_elements % WARP;
int d_elements = num_elements - leftover;
int n_blocks = (int)ceil((double)d_elements/THREADS_PER_BLOCK);
int n_threads = (d_elements > THREADS_PER_BLOCK) ? THREADS_PER_BLOCK : d_elements;
// Allocate space on device and copy over elements
cudaError_t err = cudaMalloc((void**)&d_random, sizeof(int) * d_elements);
if (err != cudaSuccess) {
printf("cudaMalloc failure\n");
}
err = cudaMemcpy(d_random, h_random, sizeof(int) * d_elements, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
printf("cudaMemcpy failure\n");
}
// Execute kernel
max_in_blocks<<<n_blocks, n_threads>>>(d_random, d_elements);
// While kernel is executing, find the max in leftover elements
for (i = d_elements; i < num_elements; i++) {
if (largest < h_random[i]) {
largest = h_random[i];
}
}
// Retrieve reduction results, only the first n_blocks element
cudaMemcpy(h_random, d_random, sizeof(int) * n_blocks, cudaMemcpyDeviceToHost);
// Check through n_blocks elements for the max
for (i = 0; i < n_blocks; i ++) {
if (largest < h_random[i]) {
largest = h_random[i];
}
}
end = clock();
printf("Time to find max %f\n", (double)(end-start)/CLOCKS_PER_SEC);
printf("Largest: %d\n", largest);
// Clean up resources
cudaFree(d_random);
free(h_random);
}
|
7,728 | #include "includes.h"
//#define NDEBUG
const static float eps = 1e-6;
const static size_t blocSize = 8;
const static size_t size = 1024;
__global__ void matMultiply2D(float* matA, float* matB, float* Dest, int dimensions)
{
int ix = threadIdx.x + blockIdx.x*blockDim.x;
int iy = threadIdx.y + blockIdx.y*blockDim.y;
if (ix < dimensions&&iy < dimensions)
{
float res = 0.0f;
for (unsigned k = 0; k != dimensions; ++k)
{
res += matA[ix*dimensions + k] * matB[k*dimensions + iy];
}
Dest[ix*dimensions + iy] = res;
}
} |
7,729 | #include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdbool.h>
#include <time.h>
#define ALPHABET_LEN 4
#define NOT_FOUND patlen
#define max(a, b) ((a < b) ? b : a)
void make_delta1(int *delta1, int8_t *pat, int32_t patlen) {
int i;
for (i=0; i < ALPHABET_LEN; i++) {
delta1[i] = NOT_FOUND;
}
for (i=0; i < patlen-1; i++) {
delta1[pat[i]] = patlen-1 - i;
}
}
int is_prefix(int8_t *word, int wordlen, int pos) {
int i;
int suffixlen = wordlen - pos;
for (i=0; i < suffixlen; i++) {
if (word[i] != word[pos+i]) {
return 0;
}
}
return 1;
}
int suffix_length(int8_t *word, int wordlen, int pos) {
int i;
for (i = 0; (word[pos-i] == word[wordlen-1-i]) && (i < pos); i++);
return i;
}
void make_delta2(int *delta2, int8_t *pat, int32_t patlen) {
int p;
int last_prefix_index = 1;
for (p=patlen-1; p>=0; p--) {
if (is_prefix(pat, patlen, p+1)) {
last_prefix_index = p+1;
}
delta2[p] = (patlen-1 - p) + last_prefix_index;
}
for (p=0; p < patlen-1; p++) {
int slen = suffix_length(pat, patlen, p);
if (pat[p - slen] != pat[patlen-1 - slen]) {
delta2[patlen-1 - slen] = patlen-1 - p + slen;
}
}
}
__device__ int d_retval;
__global__ void boyer_moore (int8_t *string, int32_t stringlen, int8_t *pat, int32_t patlen, int *delta1, int *delta2, int n) {
int i;
d_retval = -1;
int tid = blockIdx.x*blockDim.x+threadIdx.x;
__syncthreads();
if (tid<n)
{
int beg = tid*patlen;
int end = min (beg+(2*patlen), stringlen);
i = beg+patlen-1;
while (i < end) {
int j = patlen-1;
while (j >= 0 && (string[i] == pat[j])) {
//printf("here in loop\n");
--i;
--j;
}
if (j < 0) {
d_retval = i+1;
break;
}
i += max(delta1[string[i]], delta2[j]);
}
}
}
int8_t h_string[1000000];
int8_t h_pat[100];
int main(int argc, char const *argv[]) {
int8_t *d_s, *d_p;
int *d_d1, *d_d2;
int32_t strlen = 1000000;
int32_t patlen = 100;
srand(time(NULL));
int i;
char con [] = "ACGT";
for(i=0;i<strlen;i++)
h_string[i] = rand ()%4;
int patid = rand ()%10000;
for(i=0;i<patlen;i++)
h_pat[i] = h_string[patid++];
printf("The String is: ");
for (i=0;i<strlen;i++)
printf("%c", con[h_string[i]]);
printf("\nThe search keyword is: ");
for (i=0;i<patlen;i++)
printf("%c", con[h_pat[i]]);
int delta1[ALPHABET_LEN];
int delta2[patlen];
make_delta1(delta1, h_pat, patlen);
make_delta2(delta2, h_pat, patlen);
cudaMalloc(&d_s, strlen*sizeof(int8_t));
cudaMemcpy(d_s, h_string,strlen*sizeof(int8_t),cudaMemcpyHostToDevice);
cudaMalloc(&d_p, patlen*sizeof(int8_t));
cudaMemcpy(d_p, h_pat,patlen*sizeof(int8_t),cudaMemcpyHostToDevice);
cudaMalloc(&d_d1, ALPHABET_LEN*sizeof(int));
cudaMemcpy(d_d1, delta1,ALPHABET_LEN*sizeof(int),cudaMemcpyHostToDevice);
cudaMalloc(&d_d2, patlen*sizeof(int));
cudaMemcpy(d_d2, delta2,patlen*sizeof(int),cudaMemcpyHostToDevice);
int n = strlen/patlen;
int block_size = 1024;
int n_blocks = n/block_size + (n%block_size==0?0:1);
boyer_moore<<<n_blocks,block_size>>>(d_s, strlen, d_p, patlen, d_d1, d_d2, n);
cudaDeviceSynchronize();
int answer;
cudaMemcpyFromSymbol(&answer, d_retval, sizeof(int), 0, cudaMemcpyDeviceToHost);
printf("\nString found at %d\n", answer);
return 0;
}
|
7,730 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#define N 5
#define BLOCK_DIM 10
using namespace std;
__global__
void adicion_Matrices (int *a, int *b, int *c) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int fila = blockIdx.y * blockDim.y + threadIdx.y;
int index = col + fila * N;
if (col < N && fila < N) {
c[index] = a[index] + b[index];
}
}
__global__
void matrix_vector (int *a, int *b, int *c) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
if (col < N) {
for(int i=0;i<N;i++){
sum += b[i]*a[(i*N)+col];
}
c[col] = sum;
}
}
__global__
void adicion_matrices_Filas (int *a, int *b, int *c) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int fila = blockIdx.y * blockDim.y + threadIdx.y;
for(int i=col; i<N; i++){
int index = i + fila * N;
c[index] = a[index] + b[index];
}
}
__global__
void adicion_matrices_Columnas (int *a, int *b, int *c) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int fila = blockIdx.y * blockDim.y + threadIdx.y;
for(int i=fila; i<N; i++){
int index = col + i * N;
c[index] = a[index] + b[index];
}
}
void imprimir(int matrix[N][N]){
for(int i=0;i<N;i++){
for(int j=0; j<N; j++){
std::cout << matrix[i][j] << '\t';
}
std::cout << std::endl;
}
}
void print_vector(int vector[N]){
for(int j=0; j<N; j++){
std::cout << vector[j] << '\t';
}
}
void adicion_matrix_to_Kernel(){
int a[N][N], b[N][N], c[N][N];
int *dev_a, *dev_b, *dev_c;
int size = N * N * sizeof(int);
srand(time(NULL));
for(int i=0; i<N; i++)
for (int j=0; j<N; j++){
a[i][j] = rand() % 3;
b[i][j] = rand() % 3;
}
imprimir(a);
std::cout << std::endl;
imprimir(b);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM, BLOCK_DIM);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
adicion_Matrices<<<dimGrid,dimBlock>>>(dev_a,dev_b,dev_c);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
std::cout << std::endl;
imprimir (c);
/*
for(int i=0; i<N; i++){
for (int j=0; j<N; j++){
printf("%d\t", c[i][j] );
}
printf("\n");
}
*/
}
int main() {
int a[N][N], b[N], c[N];
int *dev_a, *dev_b, *dev_c;
int size = N * N * sizeof(int);
srand(time(NULL));
for(int i=0; i<N; i++){
for (int j=0; j<N; j++){
a[i][j] = rand() % 9;
}
b[i] = rand() % 9;
}
imprimir(a);
std::cout << std::endl;
print_vector(b);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, N * sizeof(int));
cudaMalloc((void**)&dev_c, N* sizeof(int));
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice);
matrix_vector<<<N/256+1,256>>>(dev_a,dev_b,dev_c);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
std::cout << std::endl;
print_vector(c);
return 0;
} |
7,731 | // Matrix multiplication using CUDA
// Based on code from: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory
#include <stdio.h>
#include <string.h>
#include <cuda.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include "time.h"
#include <iostream>
#include <fstream>
#include <iomanip>
#define BLOCK_SIZE 64
//CPU matrix multiplication
__host__ void cpu_matrix_mult(float *h_a, float *h_b, float *h_result, int m) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < m; ++j)
{
float tmp = 0.0;
for (int h = 0; h < m; ++h)
{
tmp += h_a[i * m + h] * h_b[h * m + j];
}
h_result[i * m + j] = tmp;
}
}
}
// GPU matrix multiplication
// Uses square matrices
// Based on code from: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory
__global__ void gpu_multiply(float *left, float *right, float *res, int dim) {
float temp = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < dim; ++e)
temp += left[row * dim + e]
* right[e * dim + col];
res[row * dim + col] = temp;
}
// Function to fill a non-square matrix with zeros to make it a square matrix
__host__ int fill(float **Lmatrix, float **Rmatrix, int LdimX, int LdimY, int RdimX, int RdimY) {
int sqr_dim_X, sqr_dim_Y, size;
sqr_dim_X = RdimX;
if (LdimX > RdimX) {
sqr_dim_X = LdimX;
}
sqr_dim_Y = RdimY;
if (LdimY > RdimY) {
sqr_dim_Y = LdimY;
}
size = sqr_dim_Y;
if (sqr_dim_X > sqr_dim_Y) {
size = sqr_dim_X;
}
int temp = size / BLOCK_SIZE + (size % BLOCK_SIZE == 0 ? 0 : 1);
size = temp * BLOCK_SIZE;
size_t pt_size = size * size * sizeof(float);
*Lmatrix = (float *)malloc(pt_size);
*Rmatrix = (float *)malloc(pt_size);
memset(*Lmatrix, 0, pt_size);
memset(*Rmatrix, 0, pt_size);
for (int i = 0; i < LdimX; i++) {
for (int j = 0; j < LdimY; j++) {
int dummy = size * i + j;
(*Lmatrix)[dummy] = sinf(dummy);
}
}
for (int i = 0; i < RdimX; i++) {
for (int j = 0; j < RdimY; j++) {
int dummy = size * i + j;
(*Rmatrix)[dummy] = cosf(dummy);
}
}
return size;
}
// GPU matrix multiplication - optimized
// uses shared memory
// based on code from: https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#shared-memory
__global__ void multiply(float *left, float *right, float *res, int dim) {
int i, j;
float temp = 0;
__shared__ float Left_shared_t[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Right_shared_t[BLOCK_SIZE][BLOCK_SIZE];
// Row i of matrix left
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int tileNUM = 0; tileNUM < gridDim.x; tileNUM++) {
// Column j of matrix left
j = tileNUM * BLOCK_SIZE + threadIdx.x;
i = tileNUM * BLOCK_SIZE + threadIdx.y;
// Load left[i][j] to shared mem
Left_shared_t[threadIdx.y][threadIdx.x] = left[row * dim + j];// Coalesced access
// Load right[i][j] to shared mem
Right_shared_t[threadIdx.y][threadIdx.x] = right[i * dim + col]; // Coalesced access
// Synchronize before computation
__syncthreads();
// Accumulate one tile of res from tiles of left and right in shared mem
for (int k = 0; k < BLOCK_SIZE; k++) {
temp += Left_shared_t[threadIdx.y][k] * Right_shared_t[k][threadIdx.x]; //no shared memory bank conflict
}
// Synchronize
__syncthreads();
}
// Store accumulated value to res
res[row * dim + col] = temp;
}
int main(void)
{
// Matrix sizes
int Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y, Left_vector_size, Right_vector_size;
// Declaring pointers for arrays
float *Left_Vector_h, *Right_Vector_h, *Left_Vector_d, *Right_Vector_d, *Res_h, *Res_d, *CPU;
printf("Enter m n n k :\n");
scanf("%d %d %d %d", &Left_matrix_x, &Left_matrix_y, &Right_matrix_x, &Right_matrix_y);
int dim = fill(&Left_Vector_h, &Right_Vector_h, Left_matrix_x, Left_matrix_y, Right_matrix_x, Right_matrix_y);
size_t vector_size;
vector_size = dim * dim * sizeof(float);
//Allocate memory on host
Res_h = (float *)malloc(vector_size);
CPU = (float *)malloc(vector_size);
//Allocate memory on device
cudaMalloc((void **)&Left_Vector_d, vector_size);
cudaMalloc((void **)&Right_Vector_d, vector_size);
cudaMalloc((void **)&Res_d, vector_size);
//Copy values to device
cudaMemcpy(Left_Vector_d, Left_Vector_h, vector_size, cudaMemcpyHostToDevice);
//Copy values to device
cudaMemcpy(Right_Vector_d, Right_Vector_h, vector_size, cudaMemcpyHostToDevice);
//Block dimension is directly from block_size
dim3 Block_dim(BLOCK_SIZE, BLOCK_SIZE);
//Grid dimension is found by dividing matrix dimension to block_size
dim3 Grid_dim(dim / BLOCK_SIZE, dim / BLOCK_SIZE);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//GPU Kernel call - optimized matrix multiplication
multiply <<< Grid_dim, Block_dim >>> (Left_Vector_d, Right_Vector_d, Res_d, dim);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float et;
cudaEventElapsedTime(&et, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//Retrieve result from device and store it in host array
cudaMemcpy(Res_h, Res_d, vector_size, cudaMemcpyDeviceToHost);
clock_t begin = clock();
//CPU Matrix multiplication
cpu_matrix_mult(Left_Vector_h, Right_Vector_h, CPU, dim);
clock_t end = clock();
double time_spent = (double)1000 * (end - begin) / CLOCKS_PER_SEC;
//Block dimension is directly from block_size
dim3 Block_dim_1(BLOCK_SIZE, BLOCK_SIZE);
//Grid dimension is found by dividing matrix dimension to block_size
dim3 Grid_dim_1(dim / BLOCK_SIZE, dim / BLOCK_SIZE);
//commented out the functions which helps to calculate time
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventRecord(start1, 0);
//GPU Kernel call - non optimized matrix multiplication
gpu_multiply <<< Grid_dim_1, Block_dim_1 >>> (Left_Vector_d, Right_Vector_d, Res_d, dim);
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
float mm;
cudaEventElapsedTime(&mm, start1, stop1);
cudaEventDestroy(start1);
cudaEventDestroy(stop1);
printf("GPU time= %f ms\n", et);
printf("CPU time= %lf ms\n", time_spent);
printf("GPU (not optimized) time = %f ms\n", mm);
//Cleanup
free(Left_Vector_h);
free(Right_Vector_h);
free(Res_h);
free(CPU);
cudaFree(Left_Vector_d);
cudaFree(Right_Vector_d);
cudaFree(Res_d);
} |
7,732 | #include <stdio.h>
#include <cuda_runtime.h>
#include <unistd.h>
#define N 20*1024*1024
#define THREADS_PER_BLOCK 512
#ifdef __cplusplus
#define restrict __restrict__
#endif
__global__ void gpu_vector_add(int* a, int* b, int* c)
{
int index = threadIdx.x + blockIdx.x*blockDim.x;
c[index] = a[index] + b[index];
}
void cpu_vector_add(int* restrict a, int* restrict b, int* restrict c)
{
int i;
for (i = 0; i < N; ++i)
c[i] = a[i] + b[i];
}
int main()
{
//static int a[N], b[N], c[N]; /*Variáveis na memória principal*/
int *a, *b, *c;
int *d_a, *d_b, *d_c; /*Variáveis que alocaremos na memória da GPU*/
int i;
size_t size = N*sizeof(int);
int err;
a = (int*) malloc(size);
b = (int*) malloc(size);
c = (int*) malloc(size);
err = cudaMalloc((void **)&d_a, size); /*Aloque um inteiro na memória de vídeo e faça d_a apontar para ele.*/
if (err != cudaSuccess)
{
return 1;
}
err = cudaMalloc((void **)&d_b, size);
if (err != cudaSuccess)
{
return 1;
}
err = cudaMalloc((void **)&d_c, size);
if (err != cudaSuccess)
{
return 1;
}
for (i = 0; i < N; ++i)
a[i] = 42;
for (i = 0; i < N; ++i)
b[i] = 1337;
/*Copie a e b para os seus respectivos espaços alocados na GPU*/
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
/* O que é isso?
* a flag cudaMemcpyHostToDevice
* é parte de uma enum que especifica
* o fluxo de dados. HostToDevice
* especifica que copiaremos os
* dados da RAM para a GRAM
*/
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
gpu_vector_add<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
/*N = número de blocos. 1 bloco = um conjunto de threads*/
cudaDeviceSynchronize();
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
printf("Resultado: %d\n", c[0]);
/*Libera a memória na placa. E se eu não liberar?*/
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a); free(b); free(c);
return 0;
}
|
7,733 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#define size 4096
#define row 16
#define loop 256
//row is size/(32*sqrt(brock_num)) and loop % 2^2n == 0 loop = row^2
#define N 100
#define r 0.1
#define blocksize 1024
//size is a grid size and N is how many times to calculate
#define block_num 64
__global__
void calc(float *u1, float *u2, int n){
int i2 = threadIdx.x;
int i3 = blockIdx.x;
int i4 = blockDim.x;
for(int j = 0; j < loop; j++){
//int i = i2 + blockrow*(j%row) + (j/row) * blocksize * row;
int i = i2 + i4 * j + i4*loop*i3;
i = i + ((i/size)*2) + 1 + (size + 2);
if(n%2==0){
float x1 = u1[i]; float x2 = u1[i+1]; float x3 = u1[i-1]; float x4 = u1[i+(size+2)]; float x5 = u1[i-(size+2)];
//__syncthreads();
u2[i] = (1 - 4*r) * x1 + r * (x2 + x3 + x4 + x5);
__syncthreads();
}
else {
float x1 = u2[i]; float x2 = u2[i+1]; float x3 = u2[i-1]; float x4 = u2[i+(size+2)]; float x5 = u2[i-(size+2)];
//__syncthreads();
u1[i] = (1 - 4*r) * x1 + r * (x2 + x3 + x4 + x5);
__syncthreads();
}
}
return ;
}
int main(){
float u_odd [(size+2)*(size+2)];
float u_even [(size+2)*(size+2)];
float u_result [(size+2)*(size+2)];
float* u1;
float* u2;
for(int i = 0; i < (size+2)*(size+2); i++){
u_even[i] = 0;
u_odd[i] = 0;
u_result[i] = 0;
}
//initialize u_even
for(int i = 0; i < (size+2)*(size+2); i++){
if(i % (size+2) != 0 && i % (size+2) != (size+1) && i/(size+2) != 0 && i/(size+2) != (size+1)){
u_even[i] = 1;
}
}
struct timeval t0,t1;
const int csize = (size+2)*(size+2)*sizeof(float);
cudaMalloc((void**)&u1,csize);
cudaMalloc((void**)&u2,csize);
if (N%2 == 0){
cudaMemcpy(u1,u_even,csize,cudaMemcpyHostToDevice);
cudaMemcpy(u2,u_odd,csize,cudaMemcpyHostToDevice);
}else{
cudaMemcpy(u2,u_even,csize,cudaMemcpyHostToDevice);
cudaMemcpy(u1,u_odd,csize,cudaMemcpyHostToDevice);
}
dim3 dimBlock(blocksize,1);
dim3 dimGrid(block_num,1);
//時間ぶんloopする
gettimeofday(&t0, NULL);
for (int i = N; i > 0; i--){
calc<<<dimGrid,dimBlock>>>(u1,u2,i);
cudaThreadSynchronize();
}
gettimeofday(&t1, NULL);
cudaMemcpy(u_result,u1,csize,cudaMemcpyDeviceToHost);
cudaFree(u1);
cudaFree(u2);
printf("Elapsed time = %lf\n",((double)(t1.tv_sec - t0.tv_sec) + (double)(t1.tv_usec - t0.tv_usec)*1.0e-6));
/*
for(int i = 0; i < (size+2)*(size+2); i++){
if (i % (size+2) != 0 && i % (size+2) != (size+1) && i/(size+2) != 0 && i/(size+2) != (size+1)){
printf("%.3f ",u_result[i]);
}
if(i % (size+2) == (size+1)){
printf("\n");
}
}
*/
return EXIT_SUCCESS;
}
|
7,734 | // 使用GPU输出Hello world
#include <stdio.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void helloFromGPU (void)
{
printf("Hello World from GPU!\n");
}
int main(void)
{
// hello from cpu
printf("Hello World from CPU!\n");
for(int i=0;i<1000;i++)
{
helloFromGPU <<<1, 100>>>();
cudaDeviceReset();
}
return 0;
} |
7,735 | //A1.cu
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
__global__ void A1_kernel(double* r, double* v, double dt)
{
size_t id = blockIdx.x * blockDim.x + threadIdx.x;
r[id] += v[id] * dt;
}
extern "C" {
void A1(double* r_h, double* v_h, double dt, int numParticles)
{
size_t N = 3 * numParticles;
size_t N_bytes = N * sizeof(double);
double *r_d, *v_d;
cudaMalloc(&r_d, N_bytes);
cudaMalloc(&v_d, N_bytes);
cudaMemcpy(r_d, r_h, N_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(v_d, v_h, N_bytes, cudaMemcpyHostToDevice);
A1_kernel<<<N, 3>>>(r_d, v_d, dt);
cudaMemcpy(r_h, r_d, N_bytes, cudaMemcpyDeviceToHost);
cudaFree(r_d);
cudaFree(v_d);
}
}
|
7,736 | #include "includes.h"
// Save wave state as binary file.
__global__ void fillSpaceTSteps(int N, int T, float c, float dt, float dd, float *waveSpace, float *waveSpaceTMin1, float *waveSpaceTMin2)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
waveSpace[N * i + j] = 2 * waveSpaceTMin1[N * i + j] - waveSpaceTMin2[N * i + j] + (c * c) * (dt/dd * dt/dd) * (waveSpaceTMin1[N * (i + 1) + j] + waveSpaceTMin1[N * (i - 1) + j] + waveSpaceTMin1[N * i + (j - 1)] + waveSpaceTMin1[N * i + (j + 1)] - 4 * waveSpaceTMin1[N * i + j]);
__syncthreads();
} |
7,737 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void revArray(int N, float *a) {
int n = threadIdx.x + blockIdx.x*blockDim.x;
if(n<N/2) {
float a1 = a[n];
a[n] = a[N-1-n];
a[N-1-n] = a1;
}
}
int main(int argc, char **argv) {
int N = 100;
//Host memory allocation
float *h_a = (float*) malloc(N*sizeof(float));
float *h_b = (float*) malloc(N*sizeof(float));
int n;
for(n=0;n<N;n++) {
h_a[n] = 1+n;
}
// Device memory allocation
float *d_a;
cudaMalloc(&d_a, N*sizeof(float));
// Copy data from host to device
cudaMemcpy(d_a, h_a, N*sizeof(float), cudaMemcpyHostToDevice);
//save this for later
int NthreadsPerBlock = 10;
int NthreadBlocks = ((N/2)+NthreadsPerBlock-1)/NthreadsPerBlock ;
revArray<<<NthreadBlocks, NthreadsPerBlock>>>(N,d_a);
//copy result from device to host
cudaMemcpy(h_a, d_a, N*sizeof(float), cudaMemcpyDeviceToHost);
for(n=0;n<N;++n) {
printf("h_a[%d] = %g\n",n,h_a[n]);
}
free(h_a);
cudaFree(d_a);
return 0;
}
|
7,738 | #include "includes.h"
__global__ void sum_of_mults_kernel(float *a1, float *a2, float *b1, float *b2, size_t size, float *dst)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < size) {
dst[index] = a1[index] * a2[index] + b1[index] * b2[index];
}
} |
7,739 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cufft.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define TPBx 32 // TPBx * TPBy = number of threads per block
#define TPBy 32
__global__ void real2complex(cufftComplex *c, float *a, int n);
__global__ void complex2real_scaled(float *a, cufftComplex *c, float scale, int n);
__global__ void solve_poisson(cufftComplex *c, float *kx, float *ky, int n);
void exportData(const char *file, const float *X, const float *Y, const float *Z, const int n);
void gaussian(float *bin, const float *X, const float *Y, const int n);
int main(){
///////////////////////////// INITIZALIZATION ////////////////////////////
int N, R;
printf("Phase 1: Set Up The Environment for Testing\n");
printf("Input the range of x and y: "); // the range of x and y will be from -R to R
scanf("%d", &R);
printf("Input the number of samples: "); // the number of samples will be N * N
scanf("%d", &N);
printf("Allocating memory...\n");
clock_t startTime11 = clock();
char *uFile = (char *)"u_data.dat";
char *rFile = (char *)"r_data.dat";
float *X = (float *)malloc(sizeof(float) * N);
float *Y = (float *)malloc(sizeof(float) * N);
float *kx = (float *)malloc(sizeof(float) * N);
float *ky = (float *)malloc(sizeof(float) * N);
float *r = (float *)malloc(sizeof(float) * N * N);
float *u = (float *)malloc(sizeof(float) * N * N);
const float EPSILON = 8.85418782 * pow(10, -12); // Permitivity of free space
const float PI = 4 * atan(1);
float *kx_d, *ky_d, *r_d;
cufftComplex *r_complex_d;
cudaMalloc((void **)&kx_d, sizeof(float) * N);
cudaMalloc((void **)&ky_d, sizeof(float) * N);
cudaMalloc((void **)&r_d, sizeof(float) * N * N);
cudaMalloc((void **)&r_complex_d, sizeof(cufftComplex) * N * N);
int m = 0;
for(int i = N/-2; i < N/2; i++){
if(m < N){
X[m] = i * (float)R / (N / 2);
Y[m] = -1 * i * (float)R / (N / 2);
kx[m] = i * PI * N / (float)R; // Centers kx values to be at the origin
ky[m] = - i * PI * N / (float)R; // Centers ky values to be at the origin
}
m += 1;
}
clock_t endTime11 = clock();
clock_t startTime12 = clock();
gaussian(r, X, Y, N); // Generate a Gaussian Distribution for r
clock_t endTime12 = clock();
for (int i = 0; i < N * N; i++){
u[i] = 0.f;
}
double totalTime11 = (double)(endTime11 - startTime11) / CLOCKS_PER_SEC;
double totalTime12 = (double)(endTime12 - startTime12) / CLOCKS_PER_SEC;
printf("Phase 1 ended\n");
printf("Time spent on allocating memory: %f sec\n", totalTime11);
printf("Time spent on generating function: %f sec\n\n", totalTime12);
//////////////////////////////////////////////////////////////////////////
printf("Phase 2: Evaluation\n");
printf("Copying data from the host to the device...\n");
clock_t startTime21 = clock();
cudaMemcpy(kx_d, kx, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(ky_d, ky, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(r_d, r, sizeof(float) * N * N, cudaMemcpyHostToDevice);
cufftHandle plan;
cufftPlan2d(&plan, N, N, CUFFT_C2C);
// Compute the execution configuration
dim3 dimBlock(TPBx, TPBy);
dim3 dimGrid(N / dimBlock.x, N / dimBlock.y);
// Handle N not multiple of TPBx or TPBy
if(N % TPBx != 0){
dimGrid.x += 1;
}
if(N % TPBy != 0){
dimGrid.y += 1;
}
clock_t endTime21 = clock();
printf("Start to solve the Poisson equation...\n");
clock_t startTime22 = clock();
real2complex<<<dimGrid, dimBlock>>>(r_complex_d, r_d, N);
cufftExecC2C(plan, r_complex_d, r_complex_d, CUFFT_FORWARD);
solve_poisson<<<dimGrid, dimBlock>>>(r_complex_d, kx_d, ky_d, N);
cufftExecC2C(plan, r_complex_d, r_complex_d, CUFFT_INVERSE);
float scale = 1.f / (EPSILON * N * N);
complex2real_scaled<<<dimGrid, dimBlock>>>(r_d, r_complex_d, scale, N);
clock_t endTime22 = clock();
clock_t startTime23 = clock();
cudaMemcpy(u, r_d, sizeof(float) * N * N, cudaMemcpyDeviceToHost);
clock_t endTime23 = clock();
printf("Phase 2 ended\n");
double totalTime21 = (double)(endTime22 - startTime22) / CLOCKS_PER_SEC;
double totalTime22 = (double)(endTime21 + endTime23 - startTime21 - endTime23) / CLOCKS_PER_SEC;
printf("Time spent on calculation: %f sec\n", totalTime21);
printf("Data spent on data transfer: %f sec\n\n", totalTime22);
printf("Phase 3: Data Exportation");
printf("Exporting data...\n");
clock_t startTime31 = clock();
exportData(rFile, X, Y, r, N);
exportData(uFile, X, Y, u, N);
clock_t endTime31 = clock();
printf("Finish!\n");
printf("Phase 3 ended\n");
double totalTime31 = (double)(endTime31 - startTime31) / CLOCKS_PER_SEC;
printf("Time spent on exporting files: %f sec\n", totalTime31);
// Destroy plan and clean up memory on device
free(kx);
free(ky);
free(X);
free(Y);
free(r);
free(u);
cufftDestroy(plan);
cudaFree(r_complex_d);
cudaFree(kx_d);
cudaFree(ky_d);
return 0;
}
__global__ void real2complex(cufftComplex *c, float *a, int n){
/* compute idx and idy, the location of the element in the original NxN array */
int idxX = blockIdx.x * blockDim.x + threadIdx.x;
int idxY = blockIdx.y * blockDim.y + threadIdx.y;
if(idxX < n && idxY < n){
int idx = idxX + idxY * n;
c[idx].x = a[idx];
c[idx].y = 0.0f;
}
}
__global__ void complex2real_scaled(float *a, cufftComplex *c, float scale, int n){
/* Compute index X and index Y, the location of the element in the original NxN array */
int idxX = blockIdx.x * blockDim.x + threadIdx.x;
int idxY = blockIdx.y * blockDim.y + threadIdx.y;
if(idxX < n && idxY < n){
int idx = idxX + idxY * n;
a[idx] = scale * c[idx].x;
}
}
__global__ void solve_poisson(cufftComplex *c, float *kx, float *ky, int n){
/* compute idx and idy, the location of the element in the original NxN array */
int idxX = blockIdx.x * blockDim.x + threadIdx.x;
int idxY = blockIdx.y * blockDim.y + threadIdx.y;
if (idxX < n && idxY < n){
int idx = idxX + idxY * n;
float scale = -(kx[idxX] * kx[idxX] + ky[idxY] * ky[idxY]);
if(idxX == n/2 && idxY == n/2){
scale = 1.0f;
}
scale = 1.0f / scale;
c[idx].x *= scale;
c[idx].y *= scale;
}
}
void exportData(const char *file, const float *X, const float *Y, const float *Z, const int n){
FILE *dataFile = fopen(file, "w");
if(dataFile != NULL){
for(int j = 0; j < n ; j++){
for(int i = 0; i < n; i++){
fprintf(dataFile, "%f\t%f\t%f\n", X[i], Y[j], Z[i+j*n]);
}
}
printf("All data have been stored in \"%s\".\n", file);
fclose(dataFile);
}else{
printf("File not found!");
}
}
void gaussian(float *bin, const float *X, const float *Y, const int n){
int sNum; // Number of signal
int dim = 2;
const float PI = 4 * atan(1);
float x, y;
// Ask for essential parameters
printf("Number of signal: ");
scanf("%d", &sNum);
float *sPos = (float *)malloc(sizeof(float) * dim * sNum); // Position of signal
float *scale = (float *)malloc(sizeof(float) * sNum); // Normalization factor
float *var = (float *)malloc(sizeof(float) * sNum); // Variances
for(int s = 0; s < sNum; s++){
printf("Position of signal %d(e.g. 1.2 -3): ", s+1);
scanf("%f %f", &sPos[0+s*dim], &sPos[1+s*dim]);
printf("Value of variance %d: ", s+1);
scanf("%f", &var[s]);
}
// Generate required function
printf("Generating density distribution...");
for(int s = 0; s < sNum; s++){
scale[s] = 1.0f / sqrt(2 * PI * var[s]);
}
for(int j = 0; j < n-1; j++){
for(int i = 0; i < n-1; i++){
bin[i+j*n] = 0;
for(int s = 0; s < sNum; s++){
x = X[i] - sPos[0+s*dim];
y = Y[j] - sPos[1+s*dim];
bin[i+j*n] += scale[s] * exp(-(x * x + y * y)/(2 * var[s]));
}
}
}
// Fix boundary
for(int i = 0; i < n; i++){
bin[i+(n-1)*n] = bin[i];
bin[(n-1)+i*n] = bin[i*n];
}
// Clean up
free(sPos);
free(scale);
free(var);
} |
7,740 | #include "includes.h"
__global__ void call_kALACSearch(int16_t * mCoefsU, int16_t * mCoefsV, int32_t kALACMaxCoefs)
{
int x = blockIdx.x;
int y = threadIdx.x;
int index = x * 16 * 16 + y * 16;
int32_t k;
int32_t den = 1 << DENSHIFT_DEFAULT;
mCoefsU[index + 0] = (AINIT * den) >> 4;
mCoefsU[index + 1] = (BINIT * den) >> 4;
mCoefsU[index + 2] = (CINIT * den) >> 4;
mCoefsV[index + 0] = (AINIT * den) >> 4;
mCoefsV[index + 1] = (BINIT * den) >> 4;
mCoefsV[index + 2] = (CINIT * den) >> 4;
for (k = 3; k < kALACMaxCoefs; k++)
{
mCoefsU[index + k] = 0;
mCoefsV[index + k] = 0;
}
} |
7,741 | #include <stdio.h>
#include <cuda.h>
__global__ void kernel()
{
printf("Hello World! My threadId is %d\n", blockDim.x * blockIdx.x + threadIdx.x);
}
int main()
{
kernel<<<1, 256>>>();
cudaDeviceSynchronize();
}
|
7,742 | __global__ void add_vectors(
const int* a,
const int* b,
int *c,
const int n)
{
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) return;
c[idx] = a[idx] + b[idx];
}
|
7,743 | /*
* Copyright 2015 Netherlands eScience Center, VU University Amsterdam, and Netherlands Forensic Institute
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This file contains CUDA kernels for applying a Wiener filter to a
* PRNU pattern, as proposed by:
* M. Chen et al. "Determining image origin and integrity using sensor
* noise", IEEE Trans. Inf. Forensics Secur. 3 (2008) 74-90.
*
* The Wiener filter is used to remove JPEG artifacts from a PRNU pattern.
*
* To apply the complete filter:
* apply Fourier transform to the input image
* call computeSquaredMagnitudes() on the frequencies
* call computeVarianceEstimates() on the squared magnitudes
* call computeVarianceZeroMean() on the squared magnitudes
* call scaleWithVariances() scaling the frequencies using the local and global variance
* apply inverse Fourier transform
* normalize result by calling normalizeToReal()
*
* @author Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
* @version 0.1
*/
#ifndef block_size_x
#define block_size_x 32
#endif
#ifndef block_size_y
#define block_size_y 16
#endif
#ifndef reuse_computation
#define reuse_computation 1
#endif
//set the number and size of filters, also adjust max_border
#define FILTERS 4
#define FILTER_SIZES {3, 5, 7, 9}
#define MAX_BORDER 4 //the largest (filter size/2)
#define FLT_MAX 3.40282347e+38f
//function interfaces to prevent C++ garbling the kernel keys
extern "C" {
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies);
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance);
__global__ void toComplex(int h, int w, float* complex, float* input);
__global__ void toReal(int h, int w, float* output, float* complex);
__global__ void computeVarianceZeroMean(int n, float* output, float *input);
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input);
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input);
__global__ void normalizeToReal(int h, int w, float* output, float* complex);
__global__ void normalize(int h, int w, float* output, float* complex);
__global__ void sumFloats(float *output, float *input, int n);
}
/**
* Computes the square of each frequency and stores the result as a real.
*/
__global__ void computeSquaredMagnitudes(int h, int w, float* output, float* frequencies) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (j < w && i < h) {
float re = frequencies[i*2*w+(2 * j)];
float im = frequencies[i*2*w+(2 * j + 1)];
output[i*w+j] = (re * re) + (im * im);
}
}
/**
* This kernel scales the frequencies in input with a combination of the global variance and an estimate
* for the local variance at that position. Effectively this cleans the input pattern from low frequency
* noise.
*/
__global__ void scaleWithVariances(int h, int w, float* output, float* input, float* varianceEstimates, float* variance) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float var = variance[0];
if (j < w && i < h) {
float scale = var / max(var, varianceEstimates[i*w+j]);
output[i*2*w+(j * 2)] = input[i*2*w+(j*2)] * scale;
output[i*2*w+(j * 2 + 1)] = input[i*2*w+(j * 2 + 1)] * scale;
}
}
/**
* Simple helper kernel to convert an array of real values to an array of complex values
*/
__global__ void toComplex(int h, int w, float* complex, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex[i * w * 2 + 2 * j] = input[i * w + j];
complex[i * w * 2 + (2 * j + 1)] = 0.0f;
}
}
/**
* Simple helper kernel to convert a complex array to an array of real values
*/
__global__ void toReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = complex[i * w * 2 + 2 * j];
}
}
/**
* This kernel normalizes the input by dividing it by the number of pixels in the image.
* It takes an array of complex numbers as input, but only stores the real values.
*/
__global__ void normalizeToReal(int h, int w, float* output, float* complex) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
output[i*w+j] = (complex[i * w * 2 + 2 * j] / (float)(w * h));
}
}
/**
* This kernel normalizes the complex input by dividing it by the number of pixels in the image.
*/
__global__ void normalize(int h, int w, float* complex_out, float* complex_in) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
if (i < h && j < w) {
complex_out[i*w*2+2*j ] = (complex_in[i*w*2+2*j ] / (float)(w*h));
complex_out[i*w*2+2*j+1] = (complex_in[i*w*2+2*j+1] / (float)(w*h));
}
}
/**
* computeVarianceEstimates uses a number of simple filters to compute a minimum local variance
*
* Instead of using multiple arrays with zeroed borders around them, the loading phase of this
* kernel writes a zero to shared memory instead of loading a border value from global memory.
* The filters can then be performed as normal on the data in shared memory. Because of this
* MAX_BORDER needs to be set accordingly.
*
*/
__global__ void computeVarianceEstimates(int h, int w, float* varest, float* input) {
int ty = threadIdx.y;
int tx = threadIdx.x;
int i = blockIdx.y * block_size_y;
int j = blockIdx.x * block_size_x;
__shared__ float shinput[block_size_y+2*MAX_BORDER][block_size_x+2*MAX_BORDER];
//the loading phase of the kernel, which writes 0.0f to shared memory if the index
//is outside the input
int yEnd = block_size_y+2*MAX_BORDER;
int xEnd = block_size_x+2*MAX_BORDER;
for (int y=ty; y < yEnd; y+= block_size_y) {
for (int x=tx; x < xEnd; x+= block_size_x) {
float in = 0.0f;
int indexy = i+y-MAX_BORDER;
int indexx = j+x-MAX_BORDER;
if (indexy >= 0 && indexy < h) {
if (indexx >= 0 && indexx < w) {
in = input[indexy*w+indexx];
}
}
shinput[y][x] = in;
}
}
__syncthreads();
const int filter[FILTERS] = FILTER_SIZES;
float res = FLT_MAX;
#if reuse_computation == 0
//perform filtering without reusing the sum from smaller filters
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
sum += shinput[ty+fi+offset][tx+fj+offset];
}
}
sum /= (float)(filterSize * filterSize);
//store minimum
res = sum < res ? sum : res;
}
#elif reuse_computation == 1
//perform filtering while reusing the sum from smaller filters
//start from center pixel
float sum = shinput[ty+MAX_BORDER][tx+MAX_BORDER];
//add sides of the square filter to sum and store minimum average
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int offset = MAX_BORDER-(filterSize/2);
//top and bottom row
for (int fj=0; fj<filterSize; fj++) {
sum += shinput[ty+0+offset][tx+fj+offset];
sum += shinput[ty+filterSize-1+offset][tx+fj+offset];
}
//two sides (between top and bottom rows)
for (int fi=1; fi<filterSize-1; fi++) {
sum += shinput[ty+fi+offset][tx+0+offset];
sum += shinput[ty+fi+offset][tx+filterSize-1+offset];
}
//store minimum
float avg = sum / (filterSize*filterSize);
res = avg < res ? avg : res;
}
#endif
//write output
if (i + ty < h) {
if (j + tx < w) {
varest[(i+ty)*w + (j+tx)] = res;
}
}
}
/**
* This method is a naive implementation of computeVarianceEstimates used for correctness checks
*/
__global__ void computeVarianceEstimates_naive(int h, int w, float* varest, float* input) {
int i = threadIdx.y + blockIdx.y * block_size_y;
int j = threadIdx.x + blockIdx.x * block_size_x;
float res = FLT_MAX;
if (i < h && j < w) {
const int filter[FILTERS] = FILTER_SIZES;
for (int f = 0; f < FILTERS; f++) {
int filterSize = filter[f];
int border = filterSize/2;
//do a convolution
float sum = 0.0f;
for (int fi = 0; fi < filterSize; fi++) {
for (int fj = 0; fj < filterSize; fj++) {
//original
//sum += input[(i + fi)*(w+border*2)+(j + fj)];
int row = i+fi-border;
int col = j+fj-border;
//the following ifs are a hack to save redundant copying
if (row >= 0 && row < h) {
if (col >= 0 && col < w) {
sum += input[row*w + col];
}
}
}
}
sum /= (float)(filterSize * filterSize);
if (sum < res) {
res = sum;
}
}
//write output
varest[i*w+j] = res;
}
}
/*
* This method computes the variance of an input array, assuming the mean is equal to zero
*
* Thread block size should be power of two because of the reduction.
* The implementation currently assumes only one thread block is used for the entire input array
*
* In case of multiple thread blocks initialize output to zero and use atomic add or another kernel
*
* block_size_x power of 2
*/
#ifndef grid_size_x //hack to see if the Kernel Tuner is being used
#undef block_size_x
#define block_size_x 128
#endif
__global__ void computeVarianceZeroMean(int n, float *output, float *input) {
int x = blockIdx.x * block_size_x + threadIdx.x;
int ti = threadIdx.x;
int step_size = block_size_x * gridDim.x;
float sum = 0.0f;
if (x < n) {
//compute thread-local sums of squares
for (int i=x; i < n; i+=step_size) {
sum += input[i]*input[i];
}
}
//store local sums in shared memory
__shared__ float shmem[block_size_x];
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[blockIdx.x] = ( shmem[0] * n ) / ( n - 1 ); //in case of multiple threadblocks write back using atomicAdd
}
}
/*
* Simple CUDA Helper function to reduce the output of a
* reduction kernel with multiple thread blocks to a single value
*
* This function performs a sum of an array of floats
*
* This function is to be called with only a single thread block
*/
__global__ void sumFloats(float *output, float *input, int n) {
int ti = threadIdx.x;
__shared__ float shmem[block_size_x];
//compute thread-local sums
float sum = 0.0f;
for (int i=ti; i < n; i+=block_size_x) {
sum += input[i];
}
//store local sums in shared memory
shmem[ti] = sum;
__syncthreads();
//reduce local sums
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
shmem[ti] += shmem[ti + s];
}
__syncthreads();
}
//write result
if (ti == 0) {
output[0] = shmem[0];
}
}
|
7,744 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define STAR 10001
char *RULES_FILE = (char *)"rule_2M.csv";
const int RULES_COUNT = 2000000;
char *TR_FILE = (char *)"transactions_tiny.csv";
const int TR_COUNT = 20000;
const int RULE_SIZE = 11;
const int TR_SIZE = RULE_SIZE - 1;
int rules_f[RULES_COUNT*RULE_SIZE];
int data_f[TR_COUNT*TR_SIZE];
int** alloc_two_d(int rows, int cols) {
int **array = (int **)calloc(rows, sizeof(int*));
for (int row = 0; row < rows; row++) {
array[row] = (int *)calloc(cols, sizeof(int));
}
return array;
}
int** load_csv(char *csv_file, int rows, int cols){
int **data = alloc_two_d(rows, cols);
FILE* file = fopen(csv_file, "r");
for (int row = 0; row < rows; row++) {
for (int col = 0; col < cols; col++) {
if(!fscanf(file, "%d;", &data[row][col])) {
fscanf(file, "%*c;");
data[row][col] = STAR;
}
}
}
fclose(file);
return data;
}
int cmpfunc (const void * a, const void * b) {
const int **r1 = (const int**)a;
const int **r2 = (const int**)b;
int i = 0;
int cmp = 0;
while(i<10){
cmp = (*r1)[i]-(*r2)[i];
if ( cmp != 0){
return cmp;
}else{
i++;
}
}
return 0;
}
__global__
void gpu_kernel(int* data, int tr_count, int tr_size, int* rules, int rules_count,int rule_size, int* result, int result_size){
int tr = blockIdx.x*blockDim.x + threadIdx.x;
if(tr >= tr_count)return;
int start_col = 0;
for (int row = 0; row < rules_count; row++) {
int ok = 1;
while(rules[row*rule_size + start_col] == STAR){
start_col++;
}
for (int col = start_col; ok && col < tr_size; col++) {
if (data[tr*tr_size + col] != rules[row*rule_size+col] && rules[row*rule_size+col] != STAR) {
ok = 0;
}
}
if (ok) {
result[tr*result_size+ rules[row*rule_size+rule_size-1]]+=1;
}
}
}
void gpu_search(int* data_f, int tr_count,int tr_size, int* rules_f, int rules_count,int rule_size){
int* data_g;
int* rules_g;
int* result_g;
int result_f [tr_count*100];
for(int i=0;i<tr_count;i++){
for(int j=0;j<100;j++){
result_f[i*100+j]=0;
}
}
cudaError_t err;
err = cudaMalloc((void **)&data_g, tr_count*tr_size*sizeof(int));
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err = cudaMalloc((void **)&rules_g, rules_count*rule_size*sizeof(int));
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err = cudaMalloc((void **)&result_g, tr_count*100*sizeof(int));
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err = cudaMemcpy(data_g, data_f, tr_count*tr_size*sizeof(int), cudaMemcpyHostToDevice);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err = cudaMemcpy(rules_g, rules_f, rules_count*rule_size*sizeof(int), cudaMemcpyHostToDevice);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err = cudaMemcpy(result_g, result_f, tr_count*100*sizeof(int), cudaMemcpyHostToDevice);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
int BLOCK_SIZE =1024;
int BLOCK_DIM = 5;
//for(int i=0;i<){
gpu_kernel<<<BLOCK_DIM,BLOCK_SIZE>>>(data_g, tr_count, tr_size, rules_g, rules_count, rule_size, result_g, 100);
//}
err = cudaMemcpy(result_f,result_g , tr_count*100*sizeof(int), cudaMemcpyDeviceToHost);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
// for(int i=0;i<BLOCK_DIM*BLOCK_SIZE;i++){
// for(int j=0;j<100;j++){
// printf("%d,",result_f[i*100+j]);
// }
// printf("\n");
// }
err = cudaFree(data_g);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err = cudaFree(rules_g);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
err = cudaFree(result_g);
if(err!=cudaSuccess){printf("%s in %s at line %d\n",cudaGetErrorString(err),__FILE__,__LINE__);}
}
int main(){
struct timeval start, end;
gettimeofday(&start, NULL);
printf("Loading rules\n");
int **rules = load_csv(RULES_FILE, RULES_COUNT, RULE_SIZE);
printf("Loading transactions\n");
int **data = load_csv(TR_FILE, TR_COUNT, TR_SIZE);
printf("Sorting rules\n");
qsort(rules, RULES_COUNT, sizeof(rules[0]), cmpfunc);
for(int i=0;i<RULES_COUNT;i++){
for(int j=0;j<RULE_SIZE;j++){
rules_f[i*RULE_SIZE + j]=rules[i][j];
}
}
for(int i=0;i<TR_COUNT;i++){
for(int j=0;j<TR_SIZE;j++){
data_f[i*TR_SIZE + j]=data[i][j];
}
}
printf("GPU: start\n");
gettimeofday(&start, NULL);
gpu_search(data_f,TR_COUNT,TR_SIZE,rules_f,RULES_COUNT,RULE_SIZE);
gettimeofday(&end, NULL);
printf("GPU: %f\n",(end.tv_sec - start.tv_sec)+ (end.tv_usec - start.tv_usec) / 1.e6);
return 0;
}
|
7,745 | #include "includes.h"
__global__ void clearLabel(bool *label, unsigned int size)
{
unsigned int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < size)
label[id] = false;
} |
7,746 | #include "includes.h"
__global__ void reduceUnrollWarps8(int *g_idata, int *g_odata, unsigned int n){
unsigned int tid = threadIdx.x;
unsigned int idx = (8 * blockIdx.x) * blockDim.x + threadIdx.x;
int *idata = g_idata + (8 * blockIdx.x) * blockDim.x;
if(idx + 7 * blockDim.x < n){
g_idata[idx] += g_idata[idx + blockDim.x];
g_idata[idx] += g_idata[idx + 2 * blockDim.x];
g_idata[idx] += g_idata[idx + 3 * blockDim.x];
g_idata[idx] += g_idata[idx + 4 * blockDim.x];
g_idata[idx] += g_idata[idx + 5 * blockDim.x];
g_idata[idx] += g_idata[idx + 6 * blockDim.x];
g_idata[idx] += g_idata[idx + 7 * blockDim.x];
}
__syncthreads();
for(int stride = blockDim.x / 2; stride > 32; stride >>= 1){
if(tid < stride)
idata[tid] += idata[tid + stride];
__syncthreads();
}
if(tid < 32){
volatile int *vmem = idata;
vmem[tid] += vmem[tid + 32];
vmem[tid] += vmem[tid + 16];
vmem[tid] += vmem[tid + 8];
vmem[tid] += vmem[tid + 4];
vmem[tid] += vmem[tid + 2];
vmem[tid] += vmem[tid + 1];
}
if(tid == 0) g_odata[blockIdx.x] = idata[0];
} |
7,747 | #include <iostream>
#include <math.h>
#include <chrono>
// Kernel function to add the elements of two arrays
__global__ void add(int n, float *x, float *y){
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
int main(int argc ,char *argv[]){
std::chrono::high_resolution_clock::time_point start_beig,stop_end;
int N = 1<<20;
float *x, *y;
float *x_device, *y_device;
//cpu data
x = (float *)malloc(N*sizeof(float));
y = (float *)malloc(N*sizeof(float));
start_beig = std::chrono::high_resolution_clock::now();
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
stop_end = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop_end - start_beig).count();
std::cout << "cudaMalloc set data time : " << duration / 1000.0f << " ms\n";
//device data
start_beig = std::chrono::high_resolution_clock::now();
cudaMalloc(&x_device,N*sizeof(float));
cudaMalloc(&y_device,N*sizeof(float));
cudaMemcpy(x_device,x,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(y_device,y,N*sizeof(float),cudaMemcpyHostToDevice);
stop_end = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::microseconds>(stop_end - start_beig).count();
std::cout << "cudaMalloc time : " << duration / 1000.0f << " ms\n";
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
start_beig = std::chrono::high_resolution_clock::now();
// Run kernel on 1M elements on the GPU
add<<<numBlocks, blockSize>>>(N, x_device, y_device);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
cudaMemcpy(y,y_device,N*sizeof(float),cudaMemcpyDeviceToHost);
stop_end = std::chrono::high_resolution_clock::now();
duration = std::chrono::duration_cast<std::chrono::microseconds>(stop_end - start_beig).count();
std::cout << "cudaMalloc exe : " << duration / 1000.0f << " ms\n";
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++){
maxError = fmax(maxError, fabs(y[i]-3.0f));
if(maxError > 0){
std::cout << "i: " << i << " y[i]: " << y[i] << std::endl;
}
}
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
} |
7,748 | #include <iostream>
#include <math.h>
__global__ void add(int n, float *x, float *y) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
//int stride = blockDim.x * gridDim.x;
if (index >= n) {
return;
}
y[index] = x[index] + y[index];
}
int main(void) {
int N = 1<<20;
float *x, *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i=0; i<N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
add<<<numBlocks, blockSize>>>(N, x, y);
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i=0; i<N; i++) {
maxError = fmax(maxError, fabs(y[i]-3.0f));
}
std::cout << "Max error: " << maxError << std::endl;
cudaFree(x);
cudaFree(y);
return 0;
}
|
7,749 | #include<thrust/host_vector.h>
#include<thrust/device_vector.h>
#include<thrust/reduce.h>
#include<thrust/scan.h>
#include<device_launch_parameters.h>
#include<stdio.h>
#include<stdlib.h>
#define DATA_SIZE 66292994
int main(void)
{
float t_reduce;
cudaEvent_t start1,stop1, start2, stop2;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
//initialze random values on host
thrust::host_vector<int> data(DATA_SIZE);
thrust::generate(data.begin(), data.end(), rand);
//compute sum on host(CPU)
int h_sreduce = thrust::reduce(data.begin(), data.end());
//for inclusive time
cudaEventRecord(start1, NULL);
//copy values on device
thrust::device_vector<int> gpudata = data;
cudaEventRecord(start2, NULL);
//compute sum on device(GPU)
int d_sreduce = thrust::reduce(gpudata.begin(), gpudata.end());
//copy back to host
thrust::copy(gpudata.begin(), gpudata.end(), data.begin());
cudaEventRecord(stop1, NULL);
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&t_reduce, start1, stop1);
printf("\n Reduce time is %f ms", t_reduce);
// thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
printf("\n host sum = %d, gpu sum = %d",h_sreduce,d_sreduce);
}
|
7,750 | #include<stdio.h>
const int ARRAY_SIZE =500000; // size greater than 32M could not be achieved
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int MAX_NO_THREADS = 512;
__global__ void vector_reduce(float *d_in1, float *d_in2, float *d_out){
int index = threadIdx.x + blockIdx.x*blockDim.x ;
*(d_out+index) = *(d_in1+index) + *(d_in2+index);
}
int check( float *h_in1, float *h_in2, float *h_out){
int i,flag = 1;
for(i=0;i<ARRAY_SIZE;i++){
if(h_in1[i]+h_in2[i]!=h_out[i]){
flag=0;
break;
}
}
return flag;
}
int main(){
//allocating size for host arrays
float h_in1[ARRAY_SIZE], h_in2[ARRAY_SIZE], h_out[ARRAY_SIZE];
//generating the input arrays
int i;
for(i=0;i<ARRAY_SIZE;i++){
h_in1[i]=(float)i;
h_in2[i]=(float)(ARRAY_SIZE-i);
}
//declaring device memory pointers
float *d_in1, *d_in2, *d_out;
//allocating device memory
cudaMalloc(&d_in1, ARRAY_BYTES);
cudaMalloc(&d_in2, ARRAY_BYTES);
cudaMalloc(&d_out, ARRAY_BYTES);
//transferring memory from host to device
cudaMemcpy(d_in1, h_in1, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_in2, h_in2, ARRAY_BYTES, cudaMemcpyHostToDevice);
//starting kernel
vector_reduce<<<(int)(ARRAY_SIZE/MAX_NO_THREADS)+1, MAX_NO_THREADS>>>(d_in1, d_in2, d_out);
//transferring memory from device to host
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//checking correctness
if(check(h_in1, h_in2, h_out))
printf("the result is correct\n");
else
printf("the result is incorrect\n");
//freeing memory
cudaFree(d_in1);
cudaFree(d_in2);
cudaFree(d_out);
return 0;
}
|
7,751 | //
// Created by Peter Rigole on 2019-04-26.
//
#ifndef AXONBITS_PHASE_H
#define AXONBITS_PHASE_H
enum Phase { ExpectationPhase, OutcomePhase };
#endif //AXONBITS_PHASE_H
|
7,752 | #include "includes.h"
__global__ void initGuessBuffers( const uchar4* srcImg, float3* guess1, float3* guess2, const uint nRows, const uint nCols )
{
const uint nSamps = nRows*nCols;
const uint samp = threadIdx.x + blockDim.x * blockIdx.x;
if( samp < nSamps )
{
guess1[samp].x = srcImg[samp].x;
guess2[samp].x = srcImg[samp].x;
guess1[samp].y = srcImg[samp].y;
guess2[samp].y = srcImg[samp].y;
guess1[samp].z = srcImg[samp].z;
guess2[samp].z = srcImg[samp].z;
}
} |
7,753 | #include "includes.h"
__global__ void init(int order, const int matrices, double * C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
for (int b=0; b<matrices; ++b) {
if ((i<order) && (j<order)) {
C[b*order*order+i*order+j] = 0;
}
}
} |
7,754 | #include "includes.h"
__global__ void cu_padding(const float* src, float* dst, const int rows1, const int cols1, const int cols2, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
int pad = (cols2 - cols1) / 2;
int c1 = tid % cols1;
int r1 = tid / cols1;
int r2 = r1 + pad;
int c2 = c1 + pad;
dst[r2 * cols2 + c2] = src[tid];
tid += stride;
}
} |
7,755 | /***************************************************
* Module that multiply a matrix by the transpose of other
* Author: Alonso Vidales <alonso.vidales@tras2.es>
*
* To be compiled with nvcc -ptx matrix_mult_trans.cu
* Debug: nvcc -arch=sm_20 -ptx matrix_mult_trans.cu
*
**************************************************/
//#include <stdio.h>
#ifdef __cplusplus
extern "C" {
#endif
// CUDA Kernel
__global__ void matrixMulTrans(double* C, double* A, double* B, int wA, int resW, int resH, int resultWidth, int resultSize)
{
int x = threadIdx.x + (blockIdx.x * resW);
int y = threadIdx.y + (blockIdx.y * resH);
int resultPos = y * resultWidth + x;
//printf("Thread %d - %d: %d. Final: x: %d y: %d Size: %d\n", threadIdx.x, threadIdx.y, resultPos, x, y, resultSize);
if (resultPos < resultSize && x < resultWidth) {
// value stores the element that is
// computed by the thread
double value = 0;
for (int i = 0; i < wA; ++i)
{
value += A[y * wA + i] * B[x * wA + i];
//printf("Pos %d - %d, thread %d - %d : pos: %d %d H: %d Pos: %d Val: %f\n", blockIdx.x, blockIdx.y, threadIdx.x, threadIdx.y, x, y, resultWidth, resultPos, value);
}
// Write the matrix to device memory each
// thread writes one element
C[resultPos] = value;
}
}
#ifdef __cplusplus
}
#endif
|
7,756 | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define ErrorCheck(ans) { CheckFun((ans), __FILE__, __LINE__); }
inline void CheckFun(cudaError_t code, const char *file, int line){
if (code != cudaSuccess) {
fprintf(stderr,"ERROR: %s %s %d\n", cudaGetErrorString(code), file, line);
exit(0);
}
}
__global__ void multiply(double* dev_A, double* dev_B, size_t arrLen) {
size_t index = threadIdx.x + blockIdx.x * blockDim.x;
while (index < arrLen) {
dev_A[index] = dev_A[index] * dev_B[index];
index += blockDim.x * gridDim.x;
}
}
int main() {
size_t arrLen;
scanf("%zd", &arrLen);
size_t size = sizeof(double) * arrLen;
double *arrA = (double*)malloc(size);
double *arrB = (double*)malloc(size);
for (size_t i = 0; i < arrLen; ++i) {
scanf("%lf", &arrA[i]);
}
for (size_t i = 0; i < arrLen; ++i) {
scanf("%lf", &arrB[i]);
}
double *dev_A, *dev_B;
ErrorCheck(cudaMalloc((void**)&dev_A, size));
ErrorCheck(cudaMalloc((void**)&dev_B, size));
ErrorCheck(cudaMemcpy(dev_A, arrA, size, cudaMemcpyHostToDevice));
ErrorCheck(cudaMemcpy(dev_B, arrB, size, cudaMemcpyHostToDevice));
dim3 blockSize = dim3(512,1,1);
dim3 gridSize = dim3((unsigned int)arrLen / 512 + 1, 1, 1);
multiply <<<gridSize, blockSize >>> (dev_A, dev_B, arrLen);
ErrorCheck(cudaGetLastError());
ErrorCheck(cudaMemcpy(arrA, dev_A, size, cudaMemcpyDeviceToHost));
for (size_t i = 0; i < arrLen; ++i) {
printf("%.10lf ", arrA[i]);
}
printf("\n");
free(arrA);
free(arrB);
ErrorCheck(cudaFree(dev_A));
ErrorCheck(cudaFree(dev_B));
return 0;
}
|
7,757 | #include <iostream>
#include <algorithm>
#include <cstdlib>
#include <ctime>
#include <cuda.h>
#include <stdio.h>
#include <cassert>
//define the chunk sizes that each threadblock will work on
#define BLKXSIZE 32
#define BLKYSIZE 4
#define BLKZSIZE 4
#define Q 19
#define lx 10
#define ly 10
#define lz 5
// for cuda error checking
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
return 1; \
} \
} while (0)
template <typename T> __device__ void swap ( T& a, T& b )
{
T c(a); a=b; b=c;
}
__global__ void gpu_array_swap(int ptr_gpu[][ly][lz][Q]) {
// int thread_id = thread_idx(grid_dim, block_dim);
unsigned idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned idy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned idz = blockIdx.z * blockDim.z + threadIdx.z;
if ((idx < lx) && (idy < ly) && (idz < lz)) {
for (size_t i = 1; i <= 9; i++)
swap(ptr_gpu[idx][idy][idz][i], ptr_gpu[idx][idy][idz][i + 9]);
}
}
void set_array(int array[][ly][lz][Q]) {
int m = 0;
for (int l = 0; l < Q; ++l) {
for (int i = 0; i < lz; ++i) {
for (int j = 0; j < ly; ++j) {
for (int k = 0; k < lx; ++k) {
array[i][j][k][l] = ++m;
}
}
}
}
}
void print_array(int array[][ly][lz][Q]) {
for (int i = 0; i < lx; ++i) {
for (int j = 0; j < ly; ++j) {
for (int k = 0; k < lz; ++k) {
for (int l = 0; l < Q; ++l) {
std::cout << array[i][j][k][l] << " ";
if (l == (Q - 1)) std::cout << std::endl;
}
}
}
}
}
int main() {
typedef int array_3d[ly][lz];
typedef int array_4d[ly][lz][Q];
const dim3 blockSize(BLKXSIZE, BLKYSIZE, BLKZSIZE);
const dim3 gridSize(((lx + BLKXSIZE - 1) / BLKXSIZE),
((ly + BLKYSIZE - 1) / BLKYSIZE),
((lz + BLKZSIZE - 1) / BLKZSIZE));
// pointers for data set storage via malloc
array_4d* c; // storage for result stored on host
array_4d* d_c; // storage for result computed on device
// allocate storage for data set
if ((c = (array_4d*)malloc((lx * ly * lz * Q) * sizeof(int))) == 0) {
fprintf(stderr, "malloc1 Fail \n");
return 1;
}
set_array(c);
print_array(c);
// allocate GPU device buffers
cudaMalloc((void**)&d_c, (lx * ly * lz * Q) * sizeof(int));
cudaCheckErrors("Failed to allocate device buffer");
cudaMemcpy(d_c, c, ((lx * ly * lz * Q) * sizeof(int)),
cudaMemcpyHostToDevice);
// compute result
gpu_array_swap<<<gridSize, blockSize>>>(d_c);
cudaCheckErrors("Kernel launch failure");
// copy output data back to host
cudaMemcpy(c, d_c, ((lx * ly * lz * Q) * sizeof(int)),
cudaMemcpyDeviceToHost);
cudaCheckErrors("CUDA memcpy failure");
free(c);
cudaFree(d_c);
cudaCheckErrors("cudaFree fail");
return 0;
}
|
7,758 | #include "includes.h"
__global__ void __pairmult2(int nrows, int bncols, int brows1, int brows2, float *A, int lda, float *A2, int lda2, float *Bdata, int *Bir, int *Bjc, int broff, int bcoff, float *C, int ldc, int transpose) {} |
7,759 |
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include <iostream>
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start); \
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 16
#define BLOCK_SIZE_1D 256
#define NUM_BINS 256
#define CUDA_TIMING
#define DEBUG
#define WARP_SIZE 32
#define R 9
unsigned char *input_gpu;
unsigned char *output_gpu;
double CLOCK() {
struct timespec t;
clock_gettime(CLOCK_MONOTONIC, &t);
return (t.tv_sec * 1000)+(t.tv_nsec*1e-6);
}
/*******************************************************/
/* Cuda Error Function */
/*******************************************************/
inline cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
exit(-1);
}
#endif
return result;
}
// Add GPU kernel and functions
// HERE!!!
__global__ void kernel(unsigned char *input, unsigned int *output_cdf,
//unsigned char *output,
unsigned int im_size, unsigned int *cdf_min){
/*
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
input[location] = round(float(output_cdf[input[location]] - *cdf_min)/float(im_size/4 - *cdf_min) * (NUM_BINS - 1));
*/
int location = blockIdx.x * blockDim.x+threadIdx.x;
input[location] = float(output_cdf[input[location]] - *cdf_min)/float(im_size/64 - *cdf_min) * (NUM_BINS - 1);
//printf("the final: %d .", int(output[location]));
}
__global__ void get_histogram(unsigned char *input,
unsigned int *output_histogram
//int offset
){
/*
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
//if(x % 4 == 0 && y % 4 == 0){
if(x & 1 && y & 1){
int location = offset + y*TILE_SIZE*gridDim.x+x;
atomicAdd(&(output_histogram[input[location]]), 1);
}*/
if( !(threadIdx.x & 63)){
int location = blockIdx.x * blockDim.x+threadIdx.x;
atomicAdd(&(output_histogram[input[location]]), 1);
}
//__syncthreads();
}
__global__ void get_cdf_prefixSum(unsigned int *histogram)
{
int tid = threadIdx.x;
//USE SHARED MEMORY - COMON WE ARE EXPERIENCED PROGRAMMERS
__shared__ int Cache[256];
Cache[tid] = histogram[tid];
__syncthreads();
int space = 1;
//BEGIN
for (int i = 0; i < 8; i++)
{
int temp = Cache[tid];
int neighbor = 0;
if ((tid - space) >= 0)
{
neighbor = Cache[tid - space];
}
__syncthreads(); //AFTER LOADING
if (tid < space)
{
//DO NOTHING
}
else
{
Cache[tid] = temp + neighbor;
}
space = space * 2;
__syncthreads();
}
//REWRITE RESULTS TO MAIN MEMORY
histogram[tid] = Cache[tid];
}
/*
__global__ void get_cdf_naive(unsigned int *output_histogram,
unsigned int *output_cdf,
int n)
{
unsigned int d_hist_idx = blockDim.x * blockIdx.x + threadIdx.x;
if (d_hist_idx >= n)
{
return;
}
unsigned int cdf_val = 0;
for (int i = 0; i <= d_hist_idx; ++i)
{
cdf_val = cdf_val + output_histogram[i];
}
output_cdf[d_hist_idx] = cdf_val;
}*/
__global__ void reductionMin(unsigned int *sdata, unsigned int *results, int n)
{
// extern __shared__ int sdata[];
unsigned int tx = threadIdx.x;
// block-wide reduction
for(unsigned int offset = blockDim.x>>1; offset > 0; offset >>= 1)
{
__syncthreads();
if(tx < offset)
{
if(sdata[tx + offset] < sdata[tx] || sdata[tx] == 0)
sdata[tx] = sdata[tx + offset];
}
}
// finally, thread 0 writes the result
if(threadIdx.x == 0)
{
// the result is per-block
*results = sdata[0];
}
}
__global__ void kernel_warmup(unsigned char *input,
unsigned char *output){
int x = blockIdx.x*TILE_SIZE+threadIdx.x;
int y = blockIdx.y*TILE_SIZE+threadIdx.y;
int location = y*TILE_SIZE*gridDim.x+x;
output[location] = x%255;
}
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int gridSize_1D = 1 + (NUM_BINS - 1)/ BLOCK_SIZE_1D;
int gridSize1D_2D = 1 + (( width*height - 1) / BLOCK_SIZE_1D);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// CPU
unsigned int *cdf_gpu = new unsigned int [NUM_BINS];
// Pinned
//unsigned char *data_pinned;
// GPU
unsigned int *output_histogram;
//unsigned int *output_cdf;
unsigned int *cdf_min;
// Pageable to Pinned memory
//cudaMallocHost((void**)&data_pinned, size*sizeof(unsigned char));
//memcpy(data_pinned, data, size*sizeof(unsigned char));
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_histogram , NUM_BINS*sizeof(unsigned int)));
//checkCuda(cudaMalloc((void**)&output_cdf , NUM_BINS*sizeof(unsigned int)));
checkCuda(cudaMalloc((void**)&cdf_min , sizeof(unsigned int)));
checkCuda(cudaMemset(output_histogram , 0 , NUM_BINS*sizeof(unsigned int)));
//checkCuda(cudaMemset(output_cdf , 0 , NUM_BINS*sizeof(unsigned int)));
checkCuda(cudaMemset(cdf_min, 0, sizeof(unsigned int)));
// Grid & Block Size
//dim3 dimGrid2D(gridXSize, gridYSize);
//dim3 dimBlock2D(TILE_SIZE, TILE_SIZE);
// create streams
/*
const int nStreams = 2;
const int streamSize = width*height/nStreams;
std:: cout << "stream size: " << streamSize<<std::endl;
const int streamBytes = streamSize * sizeof(unsigned char);
cudaStream_t stream[nStreams];
for (int i = 0; i < nStreams; ++i){
checkCuda(cudaStreamCreate(&stream[i]));
}
*/
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(unsigned char),
cudaMemcpyHostToDevice));
//checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid1D(gridSize_1D);
dim3 dimBlock1D(BLOCK_SIZE_1D);
dim3 dimGrid1D_2D(gridSize1D_2D);
dim3 dimBlock1D_2D(BLOCK_SIZE_1D);
// Kernel Call
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
get_histogram<<<dimGrid1D_2D, dimBlock1D_2D>>>(input_gpu, output_histogram);
//get_cdf_naive<<<dimGrid1D, dimBlock1D>>>(output_histogram, output_cdf, NUM_BINS);
get_cdf_prefixSum<<<1, 256>>>(output_histogram);
reductionMin<<<1, 256>>>(output_histogram, cdf_min, 256);
//checkCuda(cudaPeekAtLastError());
//checkCuda(cudaDeviceSynchronize());
// Retrieve results from the GPU
/*
checkCuda(cudaMemcpy(cdf_gpu,
output_cdf,
NUM_BINS*sizeof(unsigned int),
cudaMemcpyDeviceToHost));
// Free resources and end the program
*/
/*
unsigned int cdf_min = INT_MAX;
for (int i = 0; i < NUM_BINS; i++){
if(cdf_gpu[i] != 0 && cdf_gpu[i] < cdf_min){
cdf_min = cdf_gpu[i];
}
}*/
// std::cout << "cdf min : " << cdf_min << std::endl;
kernel<<<dimGrid1D_2D, dimBlock1D_2D>>>(input_gpu, output_histogram, width*height, cdf_min);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaDeviceSynchronize());
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
checkCuda(cudaMemcpy(data,
input_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
//memcpy(data, data_pinned, size*sizeof(unsigned char));
//checkCuda(cudaFreeHost(data_pinned));
checkCuda(cudaFree(output_histogram));
checkCuda(cudaFree(cdf_min));
//checkCuda(cudaFree(output_cdf));
//checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
/*
for(int i = 0; i < NUM_BINS; i++){
std::cout << "Value " << i << " : " << probability_gpu[i] << " " << cdf_gpu[i] << std::endl;
}*/
/*
for(int i = 0; i < NUM_BINS*NUM_PARTS; i++){
std::cout << "Value " << i << " : " << hist_local_gpu[i] << " " << std::endl;
} */
/*
for (long int i = 0; i < 4990464; i++){
std::cout << data[i] << " ";
}*/
}
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width){
int gridXSize = 1 + (( width - 1) / TILE_SIZE);
int gridYSize = 1 + ((height - 1) / TILE_SIZE);
int XSize = gridXSize*TILE_SIZE;
int YSize = gridYSize*TILE_SIZE;
// Both are the same size (CPU/GPU).
int size = XSize*YSize;
// Allocate arrays in GPU memory
checkCuda(cudaMalloc((void**)&input_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMalloc((void**)&output_gpu , size*sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu , 0 , size*sizeof(unsigned char)));
// Copy data to GPU
checkCuda(cudaMemcpy(input_gpu,
data,
size*sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
// Execute algorithm
dim3 dimGrid(gridXSize, gridYSize);
dim3 dimBlock(TILE_SIZE, TILE_SIZE);
kernel_warmup <<<dimGrid, dimBlock>>>(input_gpu,
output_gpu);
checkCuda(cudaDeviceSynchronize());
// Retrieve results from the GPU
checkCuda(cudaMemcpy(data,
output_gpu,
size*sizeof(unsigned char),
cudaMemcpyDeviceToHost));
// Free resources and end the program
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
7,760 | #include <iostream>
#include <array>
#include <cuda.h>
#include <numeric>
#include <vector>
template<typename T>
T*
copy_to_host(T* dest, T* src, size_t size){
auto rc = cudaMemcpy(dest, src, sizeof(T) * size, cudaMemcpyDeviceToHost);
if(rc != cudaSuccess)
throw std::bad_alloc();
return dest;
}
template<class T>
T*
cuda_malloc(size_t size){
T* temp;
auto rc = cudaMalloc((void**)&temp, sizeof(T) * size);
if (rc != cudaSuccess){
printf("device side\n");
throw std::bad_alloc();
}
return temp;
}
template<typename T>
cudaError_t
copy_to_device(T* dest, T* src, size_t size){
cudaError_t rc = cudaMemcpy(dest, src, sizeof(T) * size, cudaMemcpyHostToDevice);
if(rc != cudaSuccess)
throw std::bad_alloc();
return rc;
}
template<typename T>
T*
alloc_and_copy_to_device(T* src, size_t size){
T* tmp = cuda_malloc<T>(size);
cudaError_t rc = copy_to_device<T>(tmp, src, size);
if(rc != cudaSuccess)
throw std::runtime_error("Bad copy to device");
return tmp;
}
__global__
void
atomic_addition(double* src, double* out, size_t size){
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
size_t total_num_threads = gridDim.x * blockDim.x;
for(size_t i = tid; i < size; i += total_num_threads){
for(int i=0; i < 8;++i)
atomicAdd(&out[threadIdx.x], src[i]);
//out[threadIdx.x] += src[i];
}
}
int main(){
const size_t num_threads = 64;
std::vector<double> src;
src.resize(32768 * 1025 * 2);
std::iota(src.begin(), src.end(), 1.);
std::array<double, num_threads> output = {0.};
std::cout<<"size:"<<src.size()<<std::endl;
std::cout<<"Memory:"<<src.size()*8/1e9<<"GB\n";
double* d_src = alloc_and_copy_to_device<double>(src.data(), src.size());
double* d_output = alloc_and_copy_to_device<double>(output.data(), output.size());
size_t num_blocks = src.size()/num_threads;
atomic_addition<<<num_blocks, num_threads>>>(d_src, d_output, src.size());
cudaDeviceSynchronize();
copy_to_host(output.data(), d_output, output.size());
for(int i = 0; i < output.size(); ++i)
printf("output %i, %e\n", i, output[i]);
cudaFree(d_src);
cudaFree(d_output);
return 0;
}
|
7,761 | /* Furthest point sampling GPU implementation
* Author Zhaoyu SU
* All Rights Reserved. Sep., 2019.
* Happy Mid-Autumn Festival! :)
*/
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <iostream>
#define USECPSEC 1000000ULL
__global__ void get_bev_gt_bbox_gpu_kernel(int batch_size, int npoint, int nbbox, int bbox_attr,
int num_anchor, int anchor_attr,
int diff_thres, int cls_thres, float expand_ratio,
const float* input_coors,
const float* label_bbox,
const int* input_num_list,
const float* anchor_param_list,
int* input_accu_list,
float* gt_bbox,
int* gt_conf,
int* label_idx) {
if (batch_size * nbbox * bbox_attr <=0 || npoint <=0) {
// printf("Get RoI Logits Op exited unexpectedly.\n");
return;
}
if (threadIdx.x == 0 && blockIdx.x == 0) {
input_accu_list[0] = 0;
for (int b=1; b<batch_size; b++) {
input_accu_list[b] = input_accu_list[b-1] + input_num_list[b-1];
}
}
__syncthreads();
// printf("%d\n", input_accu_list[5]);
for (int b=blockIdx.x; b<batch_size; b+=gridDim.x) {
for (int i=threadIdx.x; i<input_num_list[b]; i+=blockDim.x) {
for (int k=0; k<num_anchor; k++) {
float point_x = input_coors[input_accu_list[b]*2 + i*2 + 0];
float point_y = input_coors[input_accu_list[b]*2 + i*2 + 1];
float point_z = anchor_param_list[k*anchor_attr + 3];
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 0] = 0.1;
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 1] = 0.1;
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 2] = 0.1;
gt_conf[input_accu_list[b]*num_anchor + i*num_anchor + k] = 0;
label_idx[input_accu_list[b]*num_anchor + i*num_anchor + k] = -1;
for (int j=0; j<nbbox; j++) {
// [w, l, h, x, y, z, r, cls, diff_idx]
// 0 1 2 3 4 5 6 7 8
float bbox_w = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 0];
float bbox_l = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 1];
float bbox_h = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 2];
float bbox_x = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 3];
float bbox_y = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 4];
float bbox_z = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 5];
float bbox_r = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 6];
float bbox_cls = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 7];
float bbox_diff = label_bbox[b*nbbox*bbox_attr + j*bbox_attr + 8];
// printf("bbox:[%.2f,%.2f,%.2f], point:[%.2f,%.2f,%.2f]\n",bbox_w,bbox_l,bbox_h,point_x,point_y,point_z);
if (bbox_l*bbox_h*bbox_w > 0) {
float rel_point_x = point_x - bbox_x;
float rel_point_y = point_y - bbox_y;
float rel_point_z = point_z - bbox_z;
float rot_rel_point_x = rel_point_x*cosf(bbox_r) + rel_point_y*sinf(bbox_r);
float rot_rel_point_y = -rel_point_x*sinf(bbox_r) + rel_point_y*cosf(bbox_r);
if (abs(rot_rel_point_x)<=bbox_w * (1 + expand_ratio) / 2 &&
abs(rot_rel_point_y)<=bbox_l * (1 + expand_ratio) / 2 &&
abs(rel_point_z)<=bbox_h * (1 + expand_ratio) / 2) {
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 0] = bbox_w;
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 1] = bbox_l;
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 2] = bbox_h;
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 3] = bbox_x;
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 4] = bbox_y;
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 5] = bbox_z;
gt_bbox[input_accu_list[b]*num_anchor*7 + i*num_anchor*7 + k*7 + 6] = bbox_r;
if (bbox_diff <= diff_thres && bbox_cls <= cls_thres) {
// Here we only take cars into consideration, while vans are excluded and give the foreground labels as -1 (ignored).
// TODO: need to change the category class accordingly to the expected detection target.
gt_conf[input_accu_list[b]*num_anchor + i*num_anchor + k] = 1;
label_idx[input_accu_list[b]*num_anchor + i*num_anchor + k] = b * nbbox + j;
}else{
gt_conf[input_accu_list[b]*num_anchor + i*num_anchor + k] = -1;
}
}
}
}
}
}
}
}
long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
void get_bev_gt_bbox_gpu_launcher(int batch_size, int npoint, int nbbox, int bbox_attr,
int num_anchor, int anchor_attr,
int diff_thres, int cls_thres, float expand_ratio,
const float* input_coors,
const float* label_bbox,
const int* input_num_list,
const float* anchor_param_list,
int* input_accu_list,
float* gt_bbox,
int* gt_conf,
int* label_idx) {
long long dt = dtime_usec(0);
get_bev_gt_bbox_gpu_kernel<<<32,512>>>(batch_size, npoint, nbbox, bbox_attr,
num_anchor, anchor_attr,
diff_thres, cls_thres, expand_ratio,
input_coors,
label_bbox,
input_num_list,
anchor_param_list,
input_accu_list,
gt_bbox,
gt_conf,
label_idx);
dt = dtime_usec(dt);
// std::cout << "Voxel Sample (forward) CUDA time: " << dt/(float)USECPSEC << "s" << std::endl;
}
|
7,762 | #include "cuda.h"
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
inline double gettime_ms() {
struct timeval t;
gettimeofday(&t,NULL);
return (t.tv_sec+t.tv_usec*1e-6)*1000;
}
__global__ void mem_kernel(int *arr, int N, int *rdom_arr, int *result) {
int tem=0;
int random;
int tid=blockIdx.x*blockDim.x+threadIdx.x;
int totalthreads=blockDim.x*gridDim.x;
for(int i=0;i<10;++i){
for(int j=tid ;j<N; j+=totalthreads){
random = rdom_arr[j];
int idx=(j+random)%N;
tem += arr[idx];
result[random%N]=tem;
}
}
//write result
}
int main(int argc, char **argv){
//input: Size N, Utilization U, Affinity A, BlockNum B, ThreadsNum T, Randomizer X
int N, U, A, B, T, X;
if (argc!=7) {
printf("\nInput arguments wrong!\n input: Size N, Utilization U, Affinity A, BlockNum B, ThreadsNum T, Random X \n ");
return 0;
}
N=atoi(argv[1]);
U=atof(argv[2]);
A=atoi(argv[3]);
B=atoi(argv[4]);
T=atoi(argv[5]);
X=atoi(argv[6]);
printf("\ninput: Size N:%d, Utilization U:%d, Affinity A:%d, BlockNum B:%d, ThreadsNum T:%d, Random X:%d \n ",N,U,A,B,T,X);
if(N%32!=0) {
printf("\nArray size N has to be multiple of 32\n");
return 0;
}
cudaSetDevice(0);
srand(0);
int *array_h=(int *)malloc(N*sizeof(int));
for (int i=0;i<N;++i) array_h[i]=1;
int *array_d;
cudaMalloc(&array_d,N*sizeof(int));
cudaMemcpy(array_d,array_h,N*sizeof(int),cudaMemcpyHostToDevice);
int *rdom_arr_h=(int *)malloc(N*sizeof(int));
for (int i=0;i<N;++i) rdom_arr_h[i]= rand() % X; //generate random number in range [0, X)
int *rdom_arr_d;
cudaMalloc(&rdom_arr_d,N*sizeof(int));
cudaMemcpy(rdom_arr_d,rdom_arr_h,N*sizeof(int),cudaMemcpyHostToDevice);
int *result_h=(int *)malloc(N*sizeof(int));
int *result_d;
cudaMalloc(&result_d,N*sizeof(int));
double ktime=gettime_ms();
mem_kernel<<<B,T>>>(array_d,N,rdom_arr_d,result_d);
cudaDeviceSynchronize();
ktime=gettime_ms()-ktime;
FILE* fp=fopen("gpu_result.txt","a+");
fprintf(fp,"%f ",ktime);
printf("Kernel time:%f \n",ktime);
fclose(fp);
cudaMemcpy(result_h,result_d,N*sizeof(int),cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
printf("results:\n");
for(int i=0;i<10;++i){
printf("%d ",result_h[i]);
}
printf("\n");
free(array_h);
free(rdom_arr_h);
free(result_h);
cudaFree(array_d);
cudaFree(rdom_arr_d);
cudaFree(result_d);
return 0;
}
|
7,763 | __global__ void fillOneIntegerArrayKernel(
int numberEntries,
int numberIterations,
int* array,
int constant) {
int start = blockIdx.x * blockDim.x * numberIterations + threadIdx.x * numberIterations;
for(int index = start; index < min(start + numberIterations, numberEntries); index++) {
array[index] = constant;
}
} |
7,764 | //#ifndef _MATRIXMUL_KERNEL_H_
//#define _MATRIXMUL_KERNEL_H_
/*
(define (gpu-info)
(let* ([info (cuGPUinfo)])
(values (gridDim-x info) ......)))
(: cpyTestDrv_kernel ((Listof Float) Integer -> (Listof Float) (Listof Integer) Integer)
(define (cpyTestDrv_kernel d_array_in d_single_in)
(let*-values ([(d_array_out) (take d_array_in 0)]
[(memstruct) (gpu-info)]
[(d_single_out) d_single_in])
(values d_array_out memstruct d_single_out)))
*/
extern "C"
/* Signature:
float* d_array_in, int count, uint single_in
->
float* d_array_out, int* memstruct, uint* single_out
*/
__global__ void
cpyTestDrv_kernel(float* d_array_out, float* d_array_in, int count,
unsigned int* memstruct,
unsigned int* d_single_out, unsigned int d_single_in)
{
// copy - single value
*d_single_out = d_single_in;
// copy of default variables
unsigned int gdm_x = gridDim.x;
unsigned int gdm_y = gridDim.y;
unsigned int gdm_z = gridDim.z;
unsigned int bdm_x = blockDim.x;
unsigned int bdm_y = blockDim.y;
unsigned int bdm_z = blockDim.z;
unsigned int bid_x = blockIdx.x;
unsigned int bid_y = blockIdx.y;
unsigned int bid_z = blockIdx.z;
unsigned int tid_x = threadIdx.x;
unsigned int tid_y = threadIdx.y;
unsigned int tid_z = threadIdx.z;
*memstruct = gdm_x;
*(memstruct+1) = gdm_y;
*(memstruct+2) = gdm_z;
*(memstruct+3) = bdm_x;
*(memstruct+4) = bdm_y;
*(memstruct+5) = bdm_z;
*(memstruct+6) = bid_x;
*(memstruct+7) = bid_y;
*(memstruct+8) = bid_z;
*(memstruct+9) = tid_x;
*(memstruct+10) = tid_y;
*(memstruct+11) = tid_z;
// copy of array variables
for(int j = 0 ; j < count ; j++)
{
*(d_array_out+j) = d_array_in[j];
*(d_array_out+j) = j;
}
}
|
7,765 | #include "includes.h"
__global__ void VanLeerThetaKernel (double *Rsup, double *Rinf, double *Surf, double dt, int nrad, int nsec, int UniformTransport, int *NoSplitAdvection, double *QRStar, double *DensStar, double *Vazimutal_d, double *Qbase)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double dxrad, invsurf, varq;
if (i<nrad && j<nsec){
if ((UniformTransport == NO) || (NoSplitAdvection[i] == NO)){
dxrad = (Rsup[i]-Rinf[i])*dt;
invsurf = 1.0/Surf[i];
varq = dxrad*QRStar[i*nsec + j]*DensStar[i*nsec + j]*Vazimutal_d[i*nsec + j];
varq -= dxrad*QRStar[i*nsec + (j+1)%nsec]*DensStar[i*nsec + (j+1)%nsec]*Vazimutal_d[i*nsec + (j+1)%nsec];
Qbase[i*nsec + j] += varq*invsurf;
}
}
} |
7,766 | #include "includes.h"
__global__ void loadOP() {
/*output[0] = Load<DF>(input);
output[1] = Load<CA>(input + 1);
output[2] = Load<CG>(input + 2);
output[3] = Load<CS>(input + 3);
output[4] = Load<CV>(input + 4);
output[5] = Load<NC>(input + 5);
output[6] = Load<NC_CA>(input + 6);
output[7] = Load<NC_CG>(input + 7);
output[8] = Load<NC_CS>(input + 8);*/
} |
7,767 | template <typename T>
__global__ void add1(T * n){
T i = threadIdx.x;
n[i] += 1;
} |
7,768 | #include "includes.h"
__global__ void checkAggregationFillAggregates(int size, int *adjIndices, int *adjacency, int* aggregation, int* valuesIn, int* valuesOut, int* incomplete) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
// Find the currently marked distance
int currentVal = valuesIn[idx];
int currentAgg = aggregation[idx];
// Checking if any neighbors have a better value
int start = adjIndices[idx];
int end = adjIndices[idx + 1];
for (int i = start; i < end; i++)
{
int neighborAgg = aggregation[adjacency[i]];
int neighborVal = valuesIn[adjacency[i]];
if (neighborAgg == currentAgg && neighborVal > currentVal)
{
currentVal = neighborVal;
incomplete[0] = 1;
}
}
// Write out the distance to the output vector:
valuesOut[idx] = currentVal;
}
} |
7,769 | extern "C" {
__device__ float add(float a, float b)
{
return a+b;
}
}
|
7,770 | //********************************************************************************************************
#include <cstdio>// a simple matrix matrix multiplication in CUDA
#include <iostream>
#include <fstream>
#include <vector>
#include <cmath>
using namespace std;
//#ifdef DOLOG
//#define LOG(msg) std::cerr<<msg<<std::endl
#define LOG(msg) fprintf(stderr, msg "\n");
//#else
//#define LOG(msg)
//#endif
// host code for validating last cuda operation (not kernel launch)
//using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// CPU matrix multiplication: code(the normal way to mulitiply matrices)
__global__
void multiply (int width,int height ,int firstCol ,int** A,int** B,int** C,int** A_O,int** B_O, int** C_O)
{
int x_idx = threadIdx.x + blockIdx.x * blockDim.x;
int y_idx = threadIdx.y + blockIdx.y * blockDim.y;
if (x_idx < width && y_idx <height ) {
//int temp = 0;
for (int i = 0; i <3; i++)
{
//printf ("i= %d", i);
//printf ("\n");
for (int j = 0; j <3; j++)
{
//printf ("j= %d", j);
//printf ("\n");
int temp = 0;
//printf ("temp= %d", temp);
//printf ("\n");
for (int k = 0; k < firstCol; k++)
{
//printf ("k= %d", k);
// printf ("\n");
temp += A[i][k] * B[k][j]; //C is fixed while A is rowWise and B is columnWise
C_O[i][j] =temp ;
//printf ("Cij= %d", temp);
//printf ("\n");
}
}
}
}
}
//**************************************************************************************
__global__ void display (int width, int height,int** A,int** B,int** C ){
for (int i = 0; i <3; i++)
{
for (int j = 0; j < 3; j++)
{
//printf ("%d\t", C[i][j]);
}
printf ("\n");
}
}
//***********************************************************************************
int main ()
{
int width, height;
//vector<vector<int> > A { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
//vector<vector<int> > B { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
//vector<vector<int> > C { {0, 0, 0}, {0, 0, 0}, {0, 0, 0} };
int A[][3]= { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
int B[][3]= { {1, 2, 3}, {4, 5, 6}, {7, 8, 9} };
int C[][3]= { {0, 0, 0}, {0, 0, 0}, {0, 0, 0} };
//**********************************************************************************
//Memory allocaction
width=3;
height=3;
int firstCol= 3;
int **d_A, **d_B, **d_C; //allocate memory on device
//copy matrix to GPU
gpuErrchk(cudaMalloc((void**)&d_A, (width)*(height)*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&d_B, (width)*(height)*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&d_C, (width)*(height)*sizeof(int)));
//same
int **A_O,**B_O,**C_O;
gpuErrchk(cudaMalloc((void**)&A_O,(width)*(height)*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&B_O,(width)*(height)*sizeof(int)));
gpuErrchk(cudaMalloc((void**)&C_O,(width)*(height)*sizeof(int)));
gpuErrchk(cudaMemcpy(d_A, &A[0][0],(width)*(height)*sizeof(int) , cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_B, &B[0][0],(width)*(height)*sizeof(int) , cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_C, &C[0][0],(width)*(height)*sizeof(int) , cudaMemcpyHostToDevice));
//printf ("matrix A= %d", A);
//printf ("matrix B= %d", B);
//call kernel
dim3 tpb(16,16);
dim3 bpg((width+tpb.x-1)/tpb.x, (height+tpb.y-1)/tpb.y);
multiply<<<bpg,tpb>>>(width, height , firstCol ,d_A,d_B,d_C, A_O, B_O, C_O);
display<<<bpg,tpb>>>(width,height,d_A,d_B,d_C);
//copy matrix back to CPU
//gpuErrchk(cudaMemcpy(&A[0][0], d_A, (width)*(height)*sizeof(int), cudaMemcpyDeviceToHost));
//gpuErrchk(cudaMemcpy(&B[0][0], d_B, (width)*(height)*sizeof(int), cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(&C[0][0], (void**)C_O, (width)*(height)*sizeof(int), cudaMemcpyDeviceToHost));
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);cudaFree(A_O);cudaFree(B_O);cudaFree(C_O);
return 0;
} |
7,771 | #include "assignmentHPC1.cuh"
#include <iostream>
#include <cstdlib>
#include <chrono>
#include <cmath>
using namespace std;
using namespace std::chrono;
double find_sd_cpu(double *arr_host, unsigned int N) {
double mean = 0.0f;
for(unsigned int i = 0; i < N; i++) {
mean += arr_host[i];
}
mean /= N;
double result = 0.0f;
for(unsigned int i = 0; i < N; i++) {
result += pow(arr_host[i] - mean, 2);
}
result = sqrt(result/N);
return result;
}
void find_sd(double *arr_host, unsigned int N) {
// ----------------------------------------- CPU Code -------------------------------------------------
auto startCPU = high_resolution_clock::now();
double result = find_sd_cpu(arr_host, N);
auto stopCPU = high_resolution_clock::now();
cout<<"\n\n--------------- CPU ---------------\n"<<endl;
cout<<"Answer CPU : "<<result<<endl;
cout<<"\nTime on CPU : "<<duration_cast<microseconds>(stopCPU - startCPU).count()/1000<<" milli seconds\n\n"<<endl;
} |
7,772 | #include<iostream>
#include<fstream>
#include<math.h>
#include<string>
#include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
using namespace std;
//#define length 8
#define PI 3.14159265
#define length 8
#define block_len 16
cudaError_t dctWithCuda_1(const double *d, double *D);
cudaError_t dctWithCuda_2(const double *f, double *F);
/*__global__ void dct1(float *f, float *F){
int tidy = blockIdx.x*blockDim.x + threadIdx.x;
int tidx = blockIdx.y*blockDim.y + threadIdx.y;
int index = tidx*len + tidy;
float tmp;
float beta,alfa;
if(tidx == 0)
beta = sqrt(1.0/length);
else
beta = sqrt(2.0/length);
if(tidy == 0)
alfa = sqrt(1.0/length);
else
alfa = sqrt(2.0/length);
if(tidx<length && tidy<length){
for(i=0; i<length; i++){
int x = i/length;
int y = i%length;
tmp+=((int)data[i])*cos((2*x+1)*tidx*PI/(2.0*length))*
cos((2*y+1)*tidy*PI/(2.0*length));
}
F[index]=(float)alfa*beta*tmp;
}
}*/
__global__ void dct_1(const double *f,double *F){
int bid = blockIdx.x;
//int tid = threadIdx.x;
int i,j;
//double data[length]={0.0};
double tmp;
if(bid<length){
__shared__ double data[length];
for (i=0; i<length; i++)
data[i] = f[bid*length+i];//load row data from f.
__syncthreads();
for(i=0; i<length; i++){
if(i==0){
tmp = (double)(1.0/sqrt(1.0*length));
F[bid] = 0;//why use F[bid]? Do transpose at the same time.
for(j=0; j<length; j++)
F[bid] +=data[j] ;
F[bid] *= tmp;
}
else{
tmp = (double)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++){
F[i*length+bid] = 0;
for(j=0; j<length; j++)
F[i*length+bid] += (double)(data[j]*cos((2*j+1)*i*PI/(2*length)));
F[i*length+bid] *= tmp;
}
}
}
__syncthreads();
for(i=0; i<length; i++)
data[i] = F[bid*length+1];
__syncthreads();
for(i=0; i<length; i++){
if(i==0){
tmp=(double)(1.0/sqrt(1.0*length));
F[bid]=0;
for(j=0; j<length; j++)
F[bid] += data[i];
F[bid] *= tmp;
}
else{
tmp = (double)(sqrt(2.0/(1.0*length)));
for(i=1; i<length; i++){
F[i*length+bid] = 0;
for(j=0; j<length; j++)
F[i*length+bid] += (double)(data[j]*cos((2*j+1)*i*PI/(2*length)));
F[i*length+bid] *= tmp;
}
}
}
__syncthreads();
}
}
__global__ void dct_2(const double *f, double *F){
int tidy = blockIdx.x*blockDim.x + threadIdx.x;
int tidx = blockIdx.y*blockDim.y + threadIdx.y;
int index = tidx*length + tidy;
int i;
double tmp;
double beta ,alfa;
if(tidx == 0)
beta = sqrt(1.0/length);
else
beta = sqrt(2.0/length);
if(tidy == 0)
alfa = sqrt(1.0/length);
else
alfa = sqrt(2.0/length);
if(tidx<length && tidy<length){
for(i=0; i<length*length; i++){
int x = i/length;
int y = i%length;
tmp += ((double)f[i])*cos((2*x+1)*tidx*PI/(2.0*length))*
cos((2*y+1)*tidy*PI/(2.0*length));
}
F[index]=(double)alfa * beta * tmp;
}
}
int main(){
ifstream infile("/home/zhujian/cuda-workspace/dct_10.16/gradient.txt");
int i=0;
string line;
double f[length*length] = {0.0};
double F[length*length] = {0.0};
while(i<length*length){
if(getline(infile, line)){
f[i] = atof(line.c_str());
cout<<"f[i]: "<<f[i]<<endl;
}
i++;
}
cout<<"before"<<endl;
for(i=0; i<length*length; i++){
cout<<f[i]<<" ";
if ((i+1)%length==0)
cout<<endl;
}
cout<<endl;
for(i=0; i<length*length; i++){
cout<<F[i]<<" ";
if ((i+1)%length==0)
cout<<endl;
}
cudaError_t cudaStatus = dctWithCuda_1(f,F);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "dctWithCuda_1 failed!");
return 1;
}
cout<<"after"<<endl;
for(i=0; i<length*length; i++){
cout<<f[i]<<" ";
if ((i+1)%length==0)
cout<<endl;
}
cout<<endl;
for(i=0; i<length*length; i++){
cout<<F[i]<<" ";
if ((i+1)%length==0)
cout<<endl;
}
return 0;
}
cudaError_t dctWithCuda_1(const double *d, double *D){
double *dev_d = 0;
double *dev_D = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_d,length *length* sizeof(double));
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_D,length *length* sizeof(double));
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_d, d,length *length*sizeof(double),cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMemcpy-- failed");
goto Error;
}
//launch a kernel on the GPU
dct_1<<<length,1>>>(dev_d, dev_D);
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(D, dev_D, length*length* sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_d);
cudaFree(dev_D);
return cudaStatus;
}
cudaError_t dctWithCuda_2(const double *d, double *D){
double *dev_d = 0;
double *dev_D = 0;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_d,length * sizeof(double));
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_D,length * sizeof(double));
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
//copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_d, d,length *sizeof(double),cudaMemcpyHostToDevice);
if(cudaStatus != cudaSuccess){
fprintf(stderr, "cudaMalloc failed");
goto Error;
}
//launch a kernel on the GPU
dct_2<<<1, (length/block_len)*(length/block_len), block_len*block_len>>>(dev_d, dev_D);
cudaStatus = cudaThreadSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaThreadSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
cudaStatus = cudaMemcpy(D, dev_D, length*length * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_d);
cudaFree(dev_D);
return cudaStatus;
}
|
7,773 | #include <stdio.h>
/* -------- KERNEL -------- */
__global__ void reduce_kernel(int * d_out, int * d_in, int size)
{
// position and threadId
int pos = blockIdx.x * blockDim.x + threadIdx.x;
int tid = threadIdx.x;
// do reduction in global memory
for (unsigned int s = blockDim.x / 2; s>0; s>>=1)
{
if (tid < s)
{
if (pos+s < size) // Handling out of bounds
{
d_in[pos] = d_in[pos] + d_in[pos+s];
}
}
__syncthreads();
}
// only thread 0 writes result, as thread
if ((tid==0) && (pos < size))
{
d_out[blockIdx.x] = d_in[pos];
}
}
/* -------- KERNEL WRAPPER -------- */
void reduce(int * d_out, int * d_in, int size, int num_threads)
{
// setting up blocks and intermediate result holder
int num_blocks;
if(((size) % num_threads))
{
num_blocks = ((size) / num_threads) + 1;
}
else
{
num_blocks = (size) / num_threads;
}
int * d_intermediate;
cudaMalloc(&d_intermediate, sizeof(int)*num_blocks);
cudaMemset(d_intermediate, 0, sizeof(int)*num_blocks);
int prev_num_blocks;
int i = 1;
int size_rest = 0;
// recursively solving, will run approximately log base num_threads times.
do
{
printf("Round:%.d\n", i);
printf("NumBlocks:%.d\n", num_blocks);
printf("NumThreads:%.d\n", num_threads);
printf("size of array:%.d\n", size);
i++;
reduce_kernel<<<num_blocks, num_threads>>>(d_intermediate, d_in, size);
size_rest = size % num_threads;
size = size / num_threads + size_rest;
// updating input to intermediate
cudaMemcpy(d_in, d_intermediate, sizeof(int)*num_blocks, cudaMemcpyDeviceToDevice);
// Updating num_blocks to reflect how many blocks we now want to compute on
prev_num_blocks = num_blocks;
if(size % num_threads)
{
num_blocks = size / num_threads + 1;
}
else
{
num_blocks = size / num_threads;
}
// updating intermediate
cudaFree(d_intermediate);
cudaMalloc(&d_intermediate, sizeof(int)*num_blocks);
}
while(size > num_threads); // if it is too small, compute rest.
// computing rest
reduce_kernel<<<1, size>>>(d_out, d_in, prev_num_blocks);
}
/* -------- MAIN -------- */
int main(int argc, char **argv)
{
printf("@@STARTING@@ \n");
// Setting num_threads
int num_threads = 512;
// Making non-bogus data and setting it on the GPU
const int size = 1<<19;
const int size_out = 1;
int * d_in;
int * d_out;
cudaMalloc(&d_in, sizeof(int)*size);
cudaMalloc(&d_out, sizeof(int)*size_out);
int * h_in = (int *)malloc(size*sizeof(int));
for (int i = 0; i < size; i++) h_in[i] = 1;
cudaMemcpy(d_in, h_in, sizeof(int)*size, cudaMemcpyHostToDevice);
// Running kernel wrapper
reduce(d_out, d_in, size, num_threads);
int result;
cudaMemcpy(&result, d_out, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nFINAL SUM IS: %d\n", result);
} |
7,774 | // Author: Brian Nguyen
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#define N 5000 // number of array elements
#define B 64 // number of elements in a block
__global__ void scan(float *g_odata, float *g_idata, int n);
__global__ void prescan(float *g_odata, float *g_idata, int n, float *g_sums);
__global__ void uniform_add(float *o_array, float *sum_array);
void scanCPU(float *f_out, float *f_in, int i_n);
double myDiffTime(struct timeval &start, struct timeval &end) {
/* Calculate the time difference. */
double d_start, d_end;
d_start = (double)(start.tv_sec + start.tv_usec/1000000.0);
d_end = (double)(end.tv_sec + end.tv_usec/1000000.0);
return (d_end - d_start);
}
int main() {
timeval start, end;
int new_N;
// temporary pointer arrays for computation
float *dev_a, *dev_g, *dev_sums;
double d_gpuTime, d_cpuTime;
// handle padding of the array for non-powers of 2
if ((N % B) != 0) {
new_N = ((N + B - 1) / B) * B;
}
else{
new_N = N;
}
printf("%d", new_N);
//float a[new_N];
float *a;
a = (float *) malloc(new_N * sizeof(float));
// initialize matrix a with random floats between 0 and 1000
for (int i = 1; i <= N; i++) {
a[i-1] = i;
}
for (int i = N; i < new_N; i++) {
a[i] = 0;
}
int size = new_N * sizeof(float);
int grid_size = ceil(new_N / B); // size of grids for first prefix-scan
int grid_size2 = ceil(grid_size / B); // size of grids for second prefix-scan
int thread_size = B / 2; // thread size for each block
int size_sums = grid_size * sizeof(float);
int size_sums2 = grid_size2 * sizeof(float);
float c[N], g[new_N], sums[grid_size];
// CPU version (serial) of prefix-sum
gettimeofday(&start, NULL);
scanCPU(c, a, N);
gettimeofday(&end, NULL);
d_cpuTime = myDiffTime(start, end);
// START OF FIRST PRE-SCAN RUN
// initialize a and b matrices here for CUDA
cudaMalloc((void **) &dev_a, size);
cudaMalloc((void **) &dev_g, size);
cudaMalloc((void **) &dev_sums, size_sums);
// GPU version (CUDA) of prefix-sum
gettimeofday(&start, NULL);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
// work-efficient scan for SUMS array
prescan<<<grid_size, thread_size, B*sizeof(float)>>>(dev_g, dev_a, new_N, dev_sums);
cudaDeviceSynchronize();
cudaMemcpy(g, dev_g, size, cudaMemcpyDeviceToHost);
cudaMemcpy(sums, dev_sums, size_sums, cudaMemcpyDeviceToHost);
cudaFree(dev_a); cudaFree(dev_g); cudaFree(dev_sums);
// START OF SECOND PRE-SCAN RUN
float inc[grid_size], sums_inc[grid_size2], inc_final[grid_size2];
float *dev_inc, *dev_sums_inc, *dev_inc_final, *dev_sums_input;
cudaMalloc((void **) &dev_sums_input, size_sums);
cudaMalloc((void **) &dev_inc, size_sums);
cudaMalloc((void **) &dev_sums_inc, size_sums2);
cudaMemcpy(dev_sums_input, sums, size_sums, cudaMemcpyHostToDevice);
prescan<<<grid_size2, thread_size, B*sizeof(float)>>>(dev_inc, dev_sums_input, grid_size2, dev_sums_inc);
cudaDeviceSynchronize();
cudaMemcpy(inc, dev_inc, size_sums, cudaMemcpyDeviceToHost);
cudaMemcpy(sums_inc, dev_sums_inc, size_sums2, cudaMemcpyDeviceToHost);
cudaFree(dev_inc); cudaFree(dev_sums_inc); cudaFree(dev_sums_input);
scanCPU(inc_final, sums_inc, size_sums2);
// START OF UPDATING SUMS
float g2[grid_size];
float *dev_g2;
cudaMalloc((void **) &dev_g2, size);
cudaMalloc((void **) &dev_inc_final, size_sums);
cudaMemcpy(dev_inc_final, inc_final, size_sums, cudaMemcpyHostToDevice);
cudaMemcpy(dev_g2, inc, size_sums, cudaMemcpyHostToDevice);
uniform_add<<<grid_size2, thread_size, B*sizeof(float)>>>(dev_g2, dev_inc_final);
cudaDeviceSynchronize();
cudaMemcpy(g2, dev_g2, size, cudaMemcpyDeviceToHost);
cudaFree(dev_g2); cudaFree(dev_inc_final);
// START OF FINAL UPDATE TO FIRST PREFIX SCAN
float g3[new_N];
float *dev_g3, *dev_first_add;
cudaMalloc((void **) &dev_g3, size);
cudaMalloc((void **) &dev_first_add, size_sums);
cudaMemcpy(dev_first_add, g2, size_sums, cudaMemcpyHostToDevice);
cudaMemcpy(dev_g3, g, size, cudaMemcpyHostToDevice);
uniform_add<<<grid_size, thread_size, B*sizeof(float)>>>(dev_g3, dev_first_add);
cudaDeviceSynchronize();
cudaMemcpy(g3, dev_g3, size, cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
d_gpuTime = myDiffTime(start, end);
cudaFree(dev_g3); cudaFree(dev_first_add);
free(a);
// display results of the prefix-sum
for (int i = 0; i < N; i++) {
printf("c[%i] = %0.3f, g3[%i] = %0.3f\n", i, c[i], i, g3[i]);
//if (c[i] != g[i])
//{
// printf("Results do not match! c[%i]=%f, g[%i]=%f\n", i, c[i], i, g[i]);
// break;
//}
}
printf("GPU Time for scan size %i: %f\n", N, d_gpuTime);
printf("CPU Time for scan size %i: %f\n", N, d_cpuTime);
return 0;
}
__global__ void scan(float *g_odata, float *g_idata, int n) {
/* CUDA Naive Scan Algorithm (double buffered). */
extern __shared__ float temp[]; // allocated on invocation
int thid = threadIdx.x;
int pout = 0, pin = 1;
// Load input into shared memory.
// This is exclusive scan, so shift right by one
// and set first element to 0
temp[thid] = (thid > 0) ? g_idata[thid-1] : 0;
__syncthreads();
for (int offset = 1; offset < n; offset *= 2) {
pout = 1 - pout; // swap double buffer indices
pin = 1 - pout;
if (thid >= offset)
temp[pout*n+thid] += temp[pin*n+thid - offset];
else
temp[pout*n+thid] = temp[pin*n+thid];
__syncthreads();
}
g_odata[thid] = temp[pout*n+thid]; // write output
}
__global__ void prescan(float *g_odata, float *g_idata, int n, float *g_sums) {
/* CUDA Work-Efficient Scan Algorithm. */
extern __shared__ float temp[]; // allocated on invocation
int thid = threadIdx.x; // thread id of a thread in a block
int gthid = (blockIdx.x * blockDim.x) + thid; // global thread id of grid
int offset = 1;
/*
// for each thread in a block, put data into shared memory
if (gthid > n) {
// handle non-power of two arrays by padding elements in last block
temp[2*thid] = 0;
temp[2*thid+1] = 0;
}
else {
// grab data from input array
temp[2*thid] = g_idata[2*gthid];
temp[2*thid+1] = g_idata[2*gthid+1];
}
*/
temp[2*thid] = g_idata[2*gthid];
temp[2*thid+1] = g_idata[2*gthid+1];
// build sum in place up the tree
for (int d = B>>1; d > 0; d >>= 1) {
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0) {
g_sums[blockIdx.x] = temp[B - 1];
temp[B - 1] = 0;
}
// clear the last element
// traverse down tree & build scan
for (int d = 1; d < B; d *= 2) {
offset >>= 1;
__syncthreads();
if (thid < d) {
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = temp[ai];
temp[ai] = temp[bi];
temp[bi] += t;
}
}
__syncthreads();
// write results to device memory
g_odata[2*gthid] = temp[2*thid];
g_odata[2*gthid+1] = temp[2*thid+1];
}
__global__ void uniform_add(float *o_array, float *sum_array) {
int bid = blockIdx.x;
int gthid = (bid * blockDim.x) + threadIdx.x; // global thread id of grid
o_array[2*gthid] = o_array[2*gthid] + sum_array[bid];
o_array[2*gthid+1] = o_array[2*gthid+1] + sum_array[bid];
}
void scanCPU(float *f_out, float *f_in, int i_n) {
/* Apply all-prefix sums to an array on the CPu
without parallelization. */
f_out[0] = 0;
/* for each array element, the value is the previous sum
plus the current array value */
for (int i = 1; i < i_n; i++)
f_out[i] = f_out[i-1] + f_in[i-1];
}
|
7,775 | #include <stdio.h>
#include <assert.h>
#include "cuda.h"
#define N 2
__global__ void foo(int* p) {
p[threadIdx.x] = 2;
__syncthreads();
}
|
7,776 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
double get_walltime()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return (double) (tp.tv_sec + tp.tv_usec*1e-6);
}
void file_results(double time_elapsed, int k) {
FILE *fptr;
fptr = fopen("cuda_times.txt", "a");
if(fptr == NULL){
printf("\nError creating or opening file\n");
exit(1);
}
fprintf(fptr, "cuda\npower of 2: %d\ntime elapsed: %f\n\n", k, time_elapsed);
fclose(fptr);
}
void compare_and_swap(int *arr,int a, int b, int dir){
if(dir == (arr[a] > arr[b])){
int temp = arr[a];
arr[a] = arr[b];
arr[b] = temp;
}
}
void generate_vector(int *D, int n){
for (size_t i = 0; i < n; i++)
D[i] = rand()%100;
}
void print_array(int *D, int n) {
for (size_t i = 0; i < n; i++) {
printf("%d ", D[i]);
}
}
__global__
void bitonic_sort_step(int *D, int n, int a, int b){
int i, ixa, temp;
i = threadIdx.x + (blockDim.x * blockIdx.x);
ixa = i^a;
if (ixa > i){
if ((i & b) == 0){
if (D[i] > D[ixa]){
temp = D[i];
D[i] = D[ixa];
D[ixa] = temp;
}
}
if ((i & b) != 0){
if (D[i] < D[ixa]){
temp = D[i];
D[i] = D[ixa];
D[ixa] = temp;
}
}
}
//print_array(D, n);
//printf("\t\ti: %d\tixa: %d\ta: %d\tb: %d\n", i, ixa, a, b);
}
void bitonic_sort(int *D, int n, int k){
int a, b;
int thread_num=512;
int block_num=1;
int *dev_D;
if (k > 9){
block_num = pow(2, k-8);
}
while (thread_num > n){
thread_num >>= 1;
}
cudaMalloc((void**) &dev_D, sizeof(int) * n);
cudaMemcpy(dev_D, D, sizeof(int) * n, cudaMemcpyHostToDevice);
dim3 blocks(block_num);
dim3 threads(thread_num);
for (b = 2; b <= n; b <<= 1){
for (a = b>>1; a > 0; a >>= 1){
bitonic_sort_step<<<blocks, threads>>>(dev_D, n, a, b);
}
}
cudaMemcpy(D, dev_D, sizeof(int) * n, cudaMemcpyDeviceToHost);
cudaFree(dev_D);
}
int main(int argc, char const *argv[]) {
/* code */
int *D; // Array of values
int n, k; // 2^k = n, number of elements in the array
double time0, time1;
if (argc > 1){
k = atoi(argv[1]);
} else {
printf("number of nodes = 2^k\nenter the value of k: \n");
scanf("%d", &k);
}
if (argc > 2){
srand(atoi(argv[2]));
} else {
srand(get_walltime());
}
printf("\npopulating array\n");
n = pow(2, k);
D = (int*)malloc(sizeof(int)*n);
generate_vector(D, n);
if (n < 33){
printf("printing array\n\n");
for (size_t i = 0; i < n; i++) {
printf("%d ", D[i]);
}
}
printf("\n\nbegin bitonic sort\n");
time0 = get_walltime();
bitonic_sort(D, n, k);
time1 = get_walltime();
if (n < 33){
printf("finalize bitonic sort\n\n");
for (size_t i = 0; i < n; i++) {
printf("%d ", D[i]);
}
}
printf("\n\ntime elapsed: %f s\n\n ", time1 - time0);
file_results(time1-time0, k);
free(D);
return 0;
}
|
7,777 | #include "includes.h"
// Copyright 2019 Adam Campbell, Seth Hall, Andrew Ensor
// Copyright 2019 High Performance Computing Research Laboratory, Auckland University of Technology (AUT)
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// 3. Neither the name of the copyright holder nor the names of its
// contributors may be used to endorse or promote products derived from this
// software without specific prior written permission.
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
__device__ double2 complex_multiply(double2 z1, double2 z2)
{
double real = z1.x*z2.x - z1.y*z2.y;
double imag = z1.y*z2.x + z1.x*z2.y;
return make_double2(real, imag);
}
__global__ void inverse_dft_with_w_correction(double2 *grid, size_t grid_pitch, const double3 *visibilities, const double2 *vis_intensity, int vis_count, int batch_count, int x_offset, int y_offset, int render_size, double cell_size)
{
// look up id of thread
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
if(idx >= render_size || idy >= render_size)
return;
double real_sum = 0;
double imag_sum = 0;
// convert to x and y image coordinates
double x = (idx+x_offset) * cell_size;
double y = (idy+y_offset) * cell_size;
double2 vis;
double2 theta_complex = make_double2(0.0, 0.0);
// precalculate image correction and wCorrection
double image_correction = sqrt(1.0 - (x * x) - (y * y));
double w_correction = image_correction - 1.0;
// NOTE: below is an approximation... Uncomment if needed
// double wCorrection = -((x*x)+(y*y))/2.0;
// loop through all visibilities and create sum using iDFT formula
for(int i = 0; i < batch_count; ++i)
{
double theta = 2.0 * M_PI * (x * visibilities[i].x + y * visibilities[i].y
+ (w_correction * visibilities[i].z));
sincos(theta, &(theta_complex.y), &(theta_complex.x));
vis = complex_multiply(vis_intensity[i], theta_complex);
real_sum += vis.x;
imag_sum += vis.y;
}
// adjust sum by image correction
real_sum *= image_correction;
imag_sum *= image_correction;
// look up destination in image (grid) and divide by amount of visibilities (N)
double2 *row = (double2*)((char*)grid + idy * grid_pitch);
row[idx].x += (real_sum / vis_count);
row[idx].y += (imag_sum / vis_count);
} |
7,778 | #include "includes.h"
__global__ void add(float *a, float *b, int nx, int ny)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int idx = x + y*nx;
if (x<nx && y<ny) b[idx] += a[idx] * .125;
} |
7,779 | #include<iostream>
using namespace std;
#define THREADS_PER_BLOCK 256
__global__ void max_per_block(int *a_d,int *b_d,int n){
int block = blockDim.x*blockIdx.x;
int max=0;
for(int i=block;i<min(block+blockDim.x,n);i++){
if(max<a_d[i]){
max=a_d[i];
}
}
b_d[blockIdx.x]=max;
}
int main() {
int n;
cout<<"Enter the no of elements";
cin>>n;
int *arr = new int[n];
for(int i=0;i<n;i++){
arr[i]=i+1;
}
int no_of_blocks = (n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
int size = n*sizeof(int);
int *arr_d,*b_d;
cudaMalloc(&arr_d,size);
cudaMalloc(&b_d,no_of_blocks*sizeof(int));
cudaMemcpy(arr_d,arr,size,cudaMemcpyHostToDevice);
while(n>1){
max_per_block<<<no_of_blocks,THREADS_PER_BLOCK>>>(arr_d,b_d,n);
n=(n+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK;
cudaMemcpy(arr_d,b_d,no_of_blocks*sizeof(int),cudaMemcpyDeviceToDevice);
}
int ans;
cudaMemcpy(&ans,arr_d,sizeof(int),cudaMemcpyDeviceToHost);
cout<<ans;
// clock_t cpu_start = clock();
// clock_t cpu_stop = clock();
// clock_t cpu_elapsed_time = 1000*(cpu_stop - cpu_start)/CLOCKS_PER_SEC;
//int( pow(double(input[i]- *mean),2.0));
// cudaEvent_t gpu_start,gpu_stop;
// cudaEventCreate(&gpu_start);
// cudaEventCreate(&gpu_stop);
// cudaEventRecord(gpu_start,0);
// cudaEventRecord(gpu_stop, 0);
// cudaEventSynchronize(gpu_stop);
// cudaEventElapsedTime(&gpu_elapsed_time, gpu_start, gpu_stop);
} |
7,780 | #include <iostream>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <cmath>
#define VECTOR_SIZE 4096
#define VECTOR_NUM 4096
#define blocksize 8
#define TILE_WIDTH 16
using namespace std;
__global__ void rbf_cuda(const float *a, const float *b, float *c, int vec_size, int vec_num)
// use cuda to calculate rbf with only global memory
{
// get the thread parameters
int index_a = blockIdx.y * blockDim.y + threadIdx.y;
int index_b = blockIdx.x * blockDim.x + threadIdx.x;
float tmp = 0;
//if (index_a >= VECTOR_NUM || index_b >= VECTOR_NUM) return;
for (int i = 0; i < vec_size; ++i)
{
tmp += (a[index_a * vec_size + i] - b[index_b * vec_size + i]) * \
(a[index_a * vec_size + i] - b[index_b * vec_size + i]);
}
tmp = sqrt(tmp);
c[index_a * vec_num + index_b] = tmp;
}
__global__ void rbf_cuda_shareMem(const float *a, const float *b, float *c, int vec_size, int vec_num)
// use cuda to calculate rbf with global memory and shared memory
{
// shared memory allocation
__shared__ float a_s[TILE_WIDTH][TILE_WIDTH];
__shared__ float b_s[TILE_WIDTH][TILE_WIDTH];
// thread parameters
int index_a = blockIdx.y * blockDim.y + threadIdx.y;
int index_b = blockIdx.x * blockDim.x + threadIdx.x;
int ty = threadIdx.y;
int tx = threadIdx.x;
float tmp = 0;
for (int i = 0; i < vec_size / TILE_WIDTH; i++)
{
// in each tiling, a thread load two data into shared memory
// which will be used for the whole thread block.
a_s[ty][tx] = \
a[index_a * vec_size + i * TILE_WIDTH + tx];
b_s[ty][tx] = \
b[index_b * vec_size + i * TILE_WIDTH + tx];
// synchronize threads to avoid data race
__syncthreads();
for (int j = 0; j < TILE_WIDTH; j++)
{
tmp += (a_s[ty][j] - b_s[ty][j]) * \
(a_s[ty][j] - b_s[ty][j]);
}
__syncthreads();
}
c[index_a * vec_num + index_b] = sqrt(tmp);
}
void rbf_cpu(float *a, float *b, float *c)
// naive CPU function
{
float tmp = 0;
for (int i = 0; i < VECTOR_NUM; i++)
for (int j = 0; j < VECTOR_NUM; j++)
{
tmp = 0;
for (int k = 0; k < VECTOR_SIZE; k++)
{
tmp += (a[i * VECTOR_SIZE + k] - b[j * VECTOR_SIZE + k]) * \
(a[i * VECTOR_SIZE + k] - b[j * VECTOR_SIZE + k]);
}
c[i * VECTOR_NUM + j] = sqrt(tmp);
}
}
void genNum(float *m, int num, int size, float value)
{
for (int i = 0; i < num; ++i)
for (int j = 0; j < size; ++j)
m[i * size + j] = value;
}
int main(int argc, char *argv[])
{
// parse command line
int mode = 0;
if (argc == 1)
{
cout << "Too few arguments, please add \'-c\', \'-n\' or \'-s\'.\n";
return 0;
}
if (!strcmp(argv[1], "-c")) mode = 0;
else if (!strcmp(argv[1], "-n")) mode = 1;
else if (!strcmp(argv[1], "-s")) mode = 2;
else
{
cout << "Invalid run mode!\n";
return 0;
}
clock_t start, end;
// memory on host(CPU)
float *a, *b, *c;
int calMatrixSize = sizeof(float) * VECTOR_SIZE * VECTOR_NUM;
int ansMatrixSize = sizeof(float) * VECTOR_NUM * VECTOR_NUM;
a = (float*)malloc(calMatrixSize);
b = (float*)malloc(calMatrixSize);
c = (float*)malloc(ansMatrixSize);
// initialize the memory of matrix on CPU
genNum(a, VECTOR_NUM, VECTOR_SIZE, 1.0);
genNum(b, VECTOR_NUM, VECTOR_SIZE, 2.0);
genNum(c, VECTOR_NUM, VECTOR_NUM, 3.0); // only for debug, can be deleted later
// memory on device(GPU)
float *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, calMatrixSize);
cudaMalloc((void**)&d_b, calMatrixSize);
cudaMalloc((void**)&d_c, ansMatrixSize);
// cpu mode
if (mode == 0)
{
cout << "Using CPU to calculate...\n";
// start timing
start = clock();
rbf_cpu(a, b, c);
end = clock();
}
// normal mode
else if (mode == 1)
{
cout << "Using GPU with global memory to calculate...\n";
// prepare the size of block and grid
dim3 blockSize(blocksize, blocksize);
dim3 gridSize((VECTOR_NUM + blocksize - 1) / blocksize, (VECTOR_NUM + blocksize - 1) / blocksize);
// start timing
start = clock();
// copy the matrix from host to device
cudaMemcpy((void*)d_a, (void*)a, calMatrixSize, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_b, (void*)b, calMatrixSize, cudaMemcpyHostToDevice);
// calculate RBF kernel function
rbf_cuda<<<gridSize, blockSize>>>(d_a, d_b, d_c, VECTOR_SIZE, VECTOR_NUM);
// copy the ans matrix from device to host
cudaMemcpy((void*)c, (void*)d_c, ansMatrixSize, cudaMemcpyDeviceToHost);
end = clock();
}
// shared memory mode
else
{
cout << "Using GPU with global and shared memory to calculate...\n";
// prepare the size of block and grid
dim3 blockSize(TILE_WIDTH, TILE_WIDTH);
dim3 gridSize((VECTOR_NUM + TILE_WIDTH - 1) / TILE_WIDTH, (VECTOR_NUM + TILE_WIDTH - 1) / TILE_WIDTH);
// start timing
start = clock();
// copy the matrix from host to device
cudaMemcpy((void*)d_a, (void*)a, calMatrixSize, cudaMemcpyHostToDevice);
cudaMemcpy((void*)d_b, (void*)b, calMatrixSize, cudaMemcpyHostToDevice);
// calculate RBF kernel function
rbf_cuda_shareMem<<<gridSize, blockSize>>>(d_a, d_b, d_c, VECTOR_SIZE, VECTOR_NUM);
// copy the ans matrix from device to host
cudaMemcpy((void*)c, (void*)d_c, ansMatrixSize, cudaMemcpyDeviceToHost);
end = clock();
}
cout << "Time consuming: " << double(end - start) / CLOCKS_PER_SEC << "s\n";
// check the error
float max_error = 0.0;
float correct_ans = sqrt(VECTOR_SIZE);
int error_cnt = 0;
for (int i = 0; i < VECTOR_NUM; ++i)
for (int j = 0; j < VECTOR_NUM; ++j)
{
float error = c[i * VECTOR_NUM + j] - correct_ans;
error = error >= 0 ? error : -1 * error;
if (error != 0.0) error_cnt++;
if (error > max_error) max_error = error;
}
cout << "Max error: " << max_error << endl;
cout << "Total error counts: " << error_cnt << endl;
// free all memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(a);
free(b);
free(c);
return 0;
}
|
7,781 | /**
* simple hello world program, demonstrated that nvcc is fully downward-compatible with c
*/
#include <stdio.h>
int main(void) {
printf("Hello World! 123\n");
return 0;
}
|
7,782 | #include "includes.h"
/**
* Given a input tensor x with shape (N, C, D), compute x.mean(2).mean(0)
* This function is useful in batch normalization.
* Refer to https://people.maths.ox.ac.uk/gilesm/cuda/prac4/reduction.pdf.
* But the unrolling warps seems to be not working correctly for now.
*/
const int N = 256;
const int C = 1024;
const int D = 28*28;
__global__ void reduce0(const float* in, float* out) {
__shared__ float buffer[CUDA_NUM_THREADS];
const unsigned int tid = threadIdx.x;
const unsigned int c = blockIdx.x;
// load and accumulate data to buffer
buffer[tid] = 0;
for (int i = tid; i < N * D; i += blockDim.x) {
const unsigned int n = i / D;
const unsigned int d = i % D;
const unsigned int index = n * C * D + c * D + d;
buffer[tid] += in[index];
}
__syncthreads();
// do tree reduction in buffer
for (int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) { // <-- bad: divergent branching
buffer[tid] += buffer[tid + s];
}
__syncthreads();
}
if (tid == 0) out[c] = buffer[0] / (N * D);
} |
7,783 | #include "includes.h"
__global__ void skip_add(size_t sz, float_t* f1, float* skip_out_sum)
{
size_t index = blockDim.x * blockIdx.x + threadIdx.x;
if(index < sz)
{
skip_out_sum[index] += f1[index];
}
} |
7,784 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
#include <iostream>
#include <cstdlib>
int main(int argc, char *argv[]) {
if (argc < 2 || argc > 2) {
std::cout << "Usage: " << argv[0] << " size\n";
return -1;
}
const size_t len = 1L << (atoi(argv[1]));
thrust::host_vector<int> h_a(len);
srand(0);
clock_t begin = clock();
for (size_t i = 0; i < len; ++i) {
h_a[i] = rand() / (float)RAND_MAX;
}
std::cout << "Elapsed time: "
<< double(clock() - begin) / CLOCKS_PER_SEC * 1000 << " ms\n";
// Copy host_vector to device_vector
thrust::device_vector<int> d_a = h_a;
thrust::min_element(d_a.begin(), d_a.end());
return 0;
} |
7,785 | #define TILE_SIZE 4
#define KBLOCK 8
#define TILE_SIZEB 8
#define TILE_M 8
#define TILE_N 4
#define BLOCK_M 128
#define BLOCK_N 64
#define BLOCK_SIZEB 128
#define BLOCK_SIZE 64
#define KBLOCKB 8
#define STRIDE BLOCK_SIZEB/TILE_SIZEB
#define M 2560
#define N 2048
#define K 4096
#include <cuda_runtime.h>
#include <stdlib.h>
#include <stdio.h>
/**
Algorithm 0: The most naive CUDA matmul kernel.
The result serves as the baseline.
*/
__global__ void matmul_0(float *A, float *B, float *C)
{
int m_i = blockIdx.y * blockDim.y + threadIdx.y;
int n_i = blockIdx.x * blockDim.x + threadIdx.x;
float c = 0;
for(int i=0; i<K; i++){
c += A[m_i*K + i]*B[i*N + n_i];
}
if(m_i < M && n_i < N){
C[m_i*N + n_i] = c;
}
}
__global__ void matmul_s(float *A, float *B, float *C)
{
int m_b = blockIdx.y * blockDim.y;
int n_b = blockIdx.x * blockDim.x;
int tx = threadIdx.x;
int ty = threadIdx.y;
int m_i = m_b + ty;
int n_i = n_b + tx;
float c = 0;
__shared__ float sa[16][16];
__shared__ float sb[16][16];
for(int k_i=0; k_i<K; k_i += 16){
sa[ty][tx] = A[(m_i)*K + k_i + tx];
sb[ty][tx] = B[(k_i + ty)*N + n_i];
__syncthreads();
for(int k=0; k<16; k++){
c += sa[ty][k]*sb[k][tx];
}
__syncthreads();
}
if(m_i < M && n_i < N){
C[m_i*N + n_i] = c;
}
}
__global__ void matmul_1(float *A, float *B, float *C)
{
int m_i = blockIdx.y * BLOCK_SIZE + threadIdx.y * TILE_SIZE;
int n_i = blockIdx.x * BLOCK_SIZE + threadIdx.x * TILE_SIZE;
float a[TILE_SIZE][KBLOCK];
float b[KBLOCK][TILE_SIZE];
float c[TILE_SIZE][TILE_SIZE];
for(int i=0; i<TILE_SIZE; i++){
for(int j=0; j<TILE_SIZE; j++){
c[i][j]=0;
}
}
for(int k_i=0; k_i<K; k_i += KBLOCK){
for(int i=0; i<TILE_SIZE; i++){
for(int k=0; k<KBLOCK; k++){
a[i][k] = A[(m_i+i)*K + k_i + k];
b[k][i] = B[(k_i + k)*N + n_i + i];
}
}
for(int i=0; i<TILE_SIZE; i++){
for(int j=0; j<TILE_SIZE; j++){
#pragma unroll
for(int k=0; k<KBLOCK; k++)
c[i][j] += a[i][k] * b[k][j];
}
}
}
for(int i=0; i<TILE_SIZE; i++){
for(int j=0; j<TILE_SIZE; j++){
if(m_i+i < M && n_i+j < N){
C[(m_i+i)*N + n_i + j] = c[i][j];
}
}
}
}
__global__ void matmul_2(float *A, float *B, float *C)
{
int m_b = blockIdx.y * BLOCK_SIZE;
int n_b = blockIdx.x * BLOCK_SIZE;
int m_t = threadIdx.y * TILE_SIZE;
int n_t = threadIdx.x * TILE_SIZE;
int m_i = m_b + m_t;
int n_i = n_b + n_t;
float a[TILE_SIZE][KBLOCK];
float b[KBLOCK][TILE_SIZE];
float c[TILE_SIZE][TILE_SIZE];
__shared__ float sa[BLOCK_SIZE*KBLOCK];
__shared__ float sb[KBLOCK*BLOCK_SIZE];
for(int i=0; i<TILE_SIZE; i++){
for(int j=0; j<TILE_SIZE; j++){
c[i][j]=0;
}
}
for(int k_i=0; k_i<K; k_i += KBLOCK){
for (int t=threadIdx.y*blockDim.x + threadIdx.x; t<BLOCK_SIZE*KBLOCK; t += blockDim.x*blockDim.y){
sa[t] = A[(m_b + t/KBLOCK)*K + k_i + t%KBLOCK];
sb[t] = B[(k_i + t/BLOCK_SIZE)*N + n_b + t%BLOCK_SIZE];
}
__syncthreads();
for(int i=0; i<TILE_SIZE; i++){
for(int k=0; k<KBLOCK; k++){
a[i][k] = sa[(m_t + i)*KBLOCK + k];
b[k][i] = sb[k*BLOCK_SIZE + n_t + i];
}
}
for(int i=0; i<TILE_SIZE; i++){
for(int j=0; j<TILE_SIZE; j++){
#pragma unroll
for(int k=0; k<KBLOCK; k++)
c[i][j] += a[i][k] * b[k][j];
}
}
__syncthreads();
}
for(int i=0; i<TILE_SIZE; i++){
for(int j=0; j<TILE_SIZE; j++){
if(m_i+i < M && n_i+j < N){
C[(m_i+i)*N + n_i + j] = c[i][j];
}
}
}
}
__global__ void matmul_3(float *A, float *B, float *C)
{
int m_b = blockIdx.y * BLOCK_SIZEB;
int n_b = blockIdx.x * BLOCK_SIZEB;
int m_t = threadIdx.y * TILE_SIZEB;
int n_t = threadIdx.x * TILE_SIZEB;
int m_i = m_b + m_t;
int n_i = n_b + n_t;
float a[TILE_SIZEB];
float b[TILE_SIZEB];
float c[TILE_SIZEB][TILE_SIZEB];
__shared__ float sa[BLOCK_SIZEB*KBLOCKB];
__shared__ float sb[KBLOCKB*BLOCK_SIZEB];
for(int i=0; i<TILE_SIZEB; i++){
for(int j=0; j<TILE_SIZEB; j++){
c[i][j]=0;
}
}
for(int k_i=0; k_i<K; k_i += KBLOCKB){
for (int t=threadIdx.y*blockDim.x + threadIdx.x; t<BLOCK_SIZEB*KBLOCKB; t += blockDim.x*blockDim.y){
sa[t] = A[(m_b + t/KBLOCKB)*K + k_i + t%KBLOCKB];
sb[t] = B[(k_i + t/BLOCK_SIZEB)*N + n_b + t%BLOCK_SIZEB];
}
__syncthreads();
for(int k=0; k<KBLOCKB; k++){
#pragma unroll
for(int i=0; i<TILE_SIZEB; i++){
a[i] = sa[(m_t + i)*KBLOCKB + k];
b[i] = sb[k*BLOCK_SIZEB + n_t + i];
}
for(int i=0; i<TILE_SIZEB; i++){
#pragma unroll
for(int j=0; j<TILE_SIZEB; j++){
c[i][j] += a[i] * b[j];
}
}
}
__syncthreads();
}
for(int i=0; i<TILE_SIZEB; i++){
for(int j=0; j<TILE_SIZEB; j++){
if(m_i+i < M && n_i+j < N){
C[(m_i+i)*N + n_i + j] = c[i][j];
}
}
}
}
__global__ void matmul_4(float *A, float *B, float *C)
{
int m_b = blockIdx.y * BLOCK_SIZEB;
int n_b = blockIdx.x * BLOCK_SIZEB;
int m_i = m_b + threadIdx.y;
int n_i = n_b + threadIdx.x;
float a[TILE_SIZEB];
float b[TILE_SIZEB];
float c[TILE_SIZEB][TILE_SIZEB];
__shared__ float sa[BLOCK_SIZEB*KBLOCKB];
__shared__ float sb[KBLOCKB*BLOCK_SIZEB];
for(int i=0; i<TILE_SIZEB; i++){
for(int j=0; j<TILE_SIZEB; j++){
c[i][j]=0;
}
}
for(int k_i=0; k_i<K; k_i += KBLOCKB){
for (int t=threadIdx.y*STRIDE + threadIdx.x; t<BLOCK_SIZEB*KBLOCKB; t += STRIDE*STRIDE){
sa[t] = A[(m_b + t/KBLOCKB)*K + k_i + t%KBLOCKB];
sb[t] = B[(k_i + t/BLOCK_SIZEB)*N + n_b + t%BLOCK_SIZEB];
}
__syncthreads();
for(int k=0; k<KBLOCKB; k++){
#pragma unroll
for(int i=0; i<TILE_SIZEB; i++){
a[i] = sa[(i*STRIDE + threadIdx.y)*KBLOCKB + k];
b[i] = sb[k*BLOCK_SIZEB + i*STRIDE + threadIdx.x];
}
for(int i=0; i<TILE_SIZEB; i++){
#pragma unroll
for(int j=0; j<TILE_SIZEB; j++){
c[i][j] += a[i] * b[j];
}
}
}
__syncthreads();
}
for(int i=0; i<TILE_SIZEB; i++){
for(int j=0; j<TILE_SIZEB; j++){
if(m_i+i*STRIDE < M && n_i+j*STRIDE < N){
C[(m_i+i*STRIDE)*N + n_i + j*STRIDE] = c[i][j];
}
}
}
}
__global__ void matmul_5(float *A, float *B, float *C)
{
int m_b = blockIdx.y * BLOCK_SIZEB;
int n_b = blockIdx.x * BLOCK_SIZEB;
int m_i = m_b + threadIdx.y;
int n_i = n_b + threadIdx.x;
float a[TILE_SIZEB];
float b[TILE_SIZEB];
float a1[TILE_SIZEB];
float b1[TILE_SIZEB];
float c[TILE_SIZEB][TILE_SIZEB];
__shared__ float sa[BLOCK_SIZEB*KBLOCKB];
__shared__ float sb[KBLOCKB*BLOCK_SIZEB];
for(int i=0; i<TILE_SIZEB; i++){
for(int j=0; j<TILE_SIZEB; j++){
c[i][j]=0;
}
}
for(int k_i=0; k_i<K; k_i += KBLOCKB){
for (int t=threadIdx.y*STRIDE + threadIdx.x; t<BLOCK_SIZEB*KBLOCKB; t += STRIDE*STRIDE){
sa[t] = A[(m_b + t/KBLOCKB)*K + k_i + t%KBLOCKB];
sb[t] = B[(k_i + t/BLOCK_SIZEB)*N + n_b + t%BLOCK_SIZEB];
}
__syncthreads();
for(int i=0; i<TILE_SIZEB; i++){
a[i] = sa[(i*STRIDE + threadIdx.y)*KBLOCKB];
b[i] = sb[i*STRIDE + threadIdx.x];
}
int k = 1;
while(k<KBLOCKB-1){
for(int i=0; i<TILE_SIZEB; i++){
a1[i] = sa[(i*STRIDE + threadIdx.y)*KBLOCKB + k];
b1[i] = sb[k*BLOCK_SIZEB + i*STRIDE + threadIdx.x];
#pragma unroll
for(int j=0; j<TILE_SIZEB; j++){
c[i][j] += a[i] * b[j];
}
}
k++;
for(int i=0; i<TILE_SIZEB; i++){
a[i] = sa[(i*STRIDE + threadIdx.y)*KBLOCKB + k];
b[i] = sb[k*BLOCK_SIZEB + i*STRIDE + threadIdx.x];
#pragma unroll
for(int j=0; j<TILE_SIZEB; j++){
c[i][j] += a1[i] * b1[j];
}
}
k++;
}
for(int i=0; i<TILE_SIZEB; i++){
a1[i] = sa[(i*STRIDE + threadIdx.y)*KBLOCKB + k];
b1[i] = sb[k*BLOCK_SIZEB + i*STRIDE + threadIdx.x];
#pragma unroll
for(int j=0; j<TILE_SIZEB; j++){
c[i][j] += a[i] * b[j];
}
}
for(int i=0; i<TILE_SIZEB; i++){
#pragma unroll
for(int j=0; j<TILE_SIZEB; j++){
c[i][j] += a1[i] * b1[j];
}
}
__syncthreads();
}
for(int i=0; i<TILE_SIZEB; i++){
for(int j=0; j<TILE_SIZEB; j++){
if(m_i+i*STRIDE < M && n_i+j*STRIDE < N){
C[(m_i+i*STRIDE)*N + n_i + j*STRIDE] = c[i][j];
}
}
}
}
int main(int argc, char *argv[])
{
srand(0);
float *cpu_a = (float*)malloc(sizeof(float)*M*K);
float *cpu_b = (float*)malloc(sizeof(float)*K*N);
float *cpu_at = (float*)malloc(sizeof(float)*K*M);
float *cpu_c = (float*)malloc(sizeof(float)*M*N);
float *cpu_c2 = (float*)malloc(sizeof(float)*M*N);
for (int i=0; i<M; i++){
for (int k=0; k<K; k++){
cpu_a[i*K+k] = rand()/65536;
cpu_at[k*M+i] = cpu_a[i*K+k];
}
}
for (int i=0; i<N*K; i++)
cpu_b[i] = rand()/65536;
//cudaEvent_t start, end;
//cudaEventCreate(&start);
//cudeEventCreate(&end);
float *gpu_a, *gpu_at, *gpu_b, *gpu_c;
cudaMalloc((void**)&gpu_a, sizeof(float)*M*K);
cudaMalloc((void**)&gpu_b, sizeof(float)*N*K);
cudaMalloc((void**)&gpu_at, sizeof(float)*M*K);
cudaMalloc((void**)&gpu_c, sizeof(float)*M*N);
cudaMemcpy(gpu_a, cpu_a, sizeof(float)*M*K, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_at, cpu_at, sizeof(float)*M*K, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, cpu_b, sizeof(float)*N*K, cudaMemcpyHostToDevice);
dim3 grid0(M/16,N/16);
dim3 block0(16, 16);
matmul_s<<<grid0, block0>>>(gpu_a, gpu_b, gpu_c);
cudaDeviceSynchronize();
printf("%s\n",cudaGetErrorString(cudaPeekAtLastError()));
cudaMemcpy(cpu_c2, gpu_c, sizeof(float)*M*N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//cudaEventRecord(start, 0);
dim3 grid(M/BLOCK_SIZE,N/BLOCK_SIZE);
dim3 block(BLOCK_SIZE/TILE_SIZE, BLOCK_SIZE/TILE_SIZE);
matmul_2<<<grid, block>>>(gpu_a, gpu_b, gpu_c);
cudaDeviceSynchronize();
printf("%s\n",cudaGetErrorString(cudaPeekAtLastError()));
cudaMemcpy(cpu_c2, gpu_c, sizeof(float)*M*N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
matmul_1<<<grid, block>>>(gpu_a, gpu_b, gpu_c);
cudaDeviceSynchronize();
printf("%s\n",cudaGetErrorString(cudaPeekAtLastError()));
cudaMemcpy(cpu_c, gpu_c, sizeof(float)*M*N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
dim3 gridb(M/BLOCK_SIZEB,N/BLOCK_SIZEB);
dim3 blockb(BLOCK_SIZEB/TILE_SIZEB, BLOCK_SIZEB/TILE_SIZEB);
matmul_4<<<gridb, blockb>>>(gpu_a, gpu_b, gpu_c);
cudaDeviceSynchronize();
printf("%s\n",cudaGetErrorString(cudaPeekAtLastError()));
cudaMemcpy(cpu_c, gpu_c, sizeof(float)*M*N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
matmul_5<<<gridb, blockb>>>(gpu_a, gpu_b, gpu_c);
cudaDeviceSynchronize();
printf("%s\n",cudaGetErrorString(cudaPeekAtLastError()));
cudaMemcpy(cpu_c, gpu_c, sizeof(float)*M*N, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
float sum = 0.0;
for (int i=0; i<M*N; i++)
sum += cpu_c2[i];
printf("%f\n",sum);
sum = 0.0;
for (int i=0; i<M*N; i++)
sum += cpu_c[i];
printf("%f\n",sum);
sum = 0.0;
for (int i=0; i<M*N; i++)
sum += cpu_c[i] - cpu_c2[i];
printf("%f\n",sum);
//cudaEventRecord(end,0);
cudaFree(gpu_a);
cudaFree(gpu_at);
cudaFree(gpu_b);
cudaFree(gpu_c);
free(cpu_a);
free(cpu_at);
free(cpu_b);
free(cpu_c);
free(cpu_c2);
}
|
7,786 | #define BLOCK_SIZE_KERNEL 16
#define BLOCK_SIZE 16
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ int kerrors;
__global__ void GoldChkKernel(double *gk, double *ck, int n) //, int *kerrors)
{
//================== HW Accelerated output validation
int tx = blockIdx.x * BLOCK_SIZE + threadIdx.x;
int ty = blockIdx.y * BLOCK_SIZE + threadIdx.y;
if ((fabs((gk[ty * n + tx] - ck[ty * n + tx]) / gk[ty * n + tx])
> 0.0000000001)
|| (fabs((gk[ty * n + tx] - ck[ty * n + tx]) / ck[ty * n + tx])
> 0.0000000001))
atomicAdd(&kerrors, 1);
}
/* ======================================================= */
/* CUDA implementation of dGEMM using shared memory
/* ======================================================= */
__global__ void cuda_dgemm_shmem(int n,
double alpha,
const double *B,
const double *A,
double beta,
double *C) {
// Block index
int block_col = blockIdx.x;
int block_row = blockIdx.y;
// Thread index
int thread_col = threadIdx.x;
int thread_row = threadIdx.y;
//printf("row = %d col = %d n= %d\n", block_col, block_row, n);
//int row = blockDim.y * blockIdx.y + threadIdx.y;
//int col = blockDim.x * blockIdx.x + threadIdx.x;
int aBegin = n * blockDim.x * block_row;
int aEnd = aBegin + n-1;
int bBegin = blockDim.x * block_col;
int bStep = n * blockDim.x;
double Csub = 0;
for (int a=aBegin, b=bBegin, istep=0;
a <= aEnd; a+= blockDim.x, b+=bStep, ++istep){
__shared__ double As[BLOCK_SIZE_KERNEL][BLOCK_SIZE_KERNEL];
__shared__ double Bs[BLOCK_SIZE_KERNEL][BLOCK_SIZE_KERNEL];
if ((istep*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n))
As[thread_row][thread_col] = A[a + n * thread_row + thread_col];
else
As[thread_row][thread_col] = 0;
if ((block_col*blockDim.x+thread_col < n) && (istep*blockDim.x + thread_row < n))
Bs[thread_row][thread_col] = B[b + n * thread_row + thread_col];
else
Bs[thread_row][thread_col] = 0;
__syncthreads();
// calculate the cell
for (int k = 0; k < blockDim.x; ++k)
Csub += As[thread_row][k] * Bs[k][thread_col];
__syncthreads();
}
// Write the block sub-matrix to global memory;
// each thread writes one element
int c = n * blockDim.x * block_row + blockDim.x * block_col;
if ((block_col*blockDim.x+thread_col < n) && (block_row*blockDim.x+ thread_row < n))
C[c + n * thread_row + thread_col] = alpha * Csub + beta * C[c +n * thread_row + thread_col];
}
|
7,787 | //Sum of two vectors in a parallel fashion
#include <stdio.h>
#include <iostream>
#define N 512
__global__
void add(int *aa, int *bb, int *cc)
{
//int index = threadIdx.x + blockIdx.x* blockDim.x;
// int index = threadIdx.x;
int index = blockIdx.x;
cc[index] = aa[index] + bb[index];
}
int main(void)
{
int *a, *b, *c;// host copies of a, b, c
int *d_a, *d_b, *d_c;// device copies of a, b, c
int size = N * sizeof(int);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for (int i = 0; i < N; i++) {
a[i] = 1;
b[i] = 2;
c[i] = -1;
}
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
add<<<N, 1>>>(d_a, d_b, d_c);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
//free(a);
//free(b);
//free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
//return 0;
//std::cout << "Result c\n";
std::cout << "a = " << a[1] << "\n";
std::cout << "b = " << b[1] << "\n";
std::cout << "c = " << c[1] << "\n";
}
|
7,788 | #include "rand.cuh"
__device__ void init_fast_random(fast_random_t &fr, int uniq_id, int dest_id, rand_init_t init)
{
int init2 = init.a * 17;
int init3 = init.b * 59;
int init4 = init.c * 23;
fr.x = uniq_id * init.d + dest_id * init3;
fr.y = uniq_id * init2 + dest_id * init2;
fr.z = uniq_id * init3 + dest_id * init3;
fr.w = uniq_id * init4 + dest_id * init.d;
}
__device__ uint32_t rand_full(fast_random_t &fr)
{
uint32_t t = fr.x ^ (fr.x << 11);
fr.x = fr.y;
fr.y = fr.z;
fr.z = fr.w;
return fr.w = fr.w ^ (fr.w >> 19) ^ t ^ (t >> 8);
}
__device__ float rand_f(fast_random_t &fr)
{
uint32_t r = rand_full(fr);
return 100.0f / ((r & 0xFFFF) + 1);
}
__device__ uint32_t rand256(fast_random_t &fr)
{
uint32_t t = fr.x ^ (fr.x << 11);
fr.x = fr.y;
fr.y = fr.z;
fr.z = fr.w;
return (fr.w = fr.w ^ (fr.w >> 19) ^ t ^ (t >> 8)) & 255;
}
void init_rand(rand_init_t &rand_init)
{
rand_init.a = rand();
rand_init.b = rand();
rand_init.c = rand();
rand_init.d = rand();
}
|
7,789 | #include "includes.h"
__global__ void Kernel10(int N, int M, int P, float *A, float *B, float *C) {
__shared__ float sA[SIZE][SIZE];
__shared__ float sB[SIZE][SIZE];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * SIZE + ty;
int col = bx * SIZE + tx;
float tmp = 0.0;
for (int m=0; m < P; m=m+SIZE) {
sA[ty][tx] = A[row*P + m + tx];
sB[ty][tx] = B[col + (m + ty)*M];
__syncthreads();
for (int k=0; k<SIZE; k++)
tmp += sA[ty][k] * sB[k][tx];
__syncthreads();
}
C[row*M+col] = tmp;
} |
7,790 | __global__ void BNAvg(const float lr, float *w, const float *dzdw,
const float ts, const float len) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= len) return;
w[index] = (1-lr)*w[index]+lr*dzdw[index]/ts;
} |
7,791 | //This file contains code for performing convolution on a GPU
//Author:-Ajay Singh
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
__global__ void convolution_1D_basic_kernel(float *N, float *M, float *P, int mask_width, int size){
int i=threadIdx.x+blockIdx.x*blockDim.x;
int start_pos=(int)(i-(mask_width/2));
float pvalue;
for(int j=start_pos; j<start_pos+mask_width; j++){
if(j>=0 && j<size)
pvalue+=M[j-start_pos]*N[j];
}
P[i]=pvalue;
}
#define size 10000
#define mask_width 100
int main(){
float mask[mask_width];
float arr[size];
float Output[size];
float *Pd, *arrd, *Md;
double elapsed;
for(int i=0; i<size; i++){
if(i<mask_width){
mask[i]=i/size;
}
arr[i]=(i*i)/size;
}
if(cudaMalloc((void **)&Pd, sizeof(float)*size)!=cudaSuccess){
printf("error while allocating Pd\n");
}
if(cudaMalloc((void **)&Md, sizeof(float)*mask_width)!=cudaSuccess){
printf("error while allocating Md\n");
}
if(cudaMalloc((void **)&arrd, sizeof(float)*size)!=cudaSuccess){
printf("error while allocating arrd\n");
}
if(cudaMemcpy(arrd, arr, sizeof(float)*size, cudaMemcpyHostToDevice)!=cudaSuccess){
printf("error while copyting arr from host to device\n");
}
if(cudaMemcpy(Md, mask, sizeof(float)*mask_width, cudaMemcpyHostToDevice)!=cudaSuccess){
printf("error while copyting mask from host to device\n");
}
elapsed= -clock();
convolution_1D_basic_kernel<<<size/20, 20>>>(arrd, Md, Pd, mask_width, size);
elapsed+=clock();
if(cudaMemcpy(Output, Pd, sizeof(float)*size, cudaMemcpyDeviceToHost)!=cudaSuccess){
printf("error while copyting arr from host to device\n");
}
printf("Time Taken = %lf\n",elapsed);
cudaFree(arrd);
cudaFree(Pd);
cudaFree(Md);
return 0;
}
|
7,792 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include <curand_kernel.h>
#include <iostream>
#include <string>
#include <fstream>
using namespace std;
#define CURAND_CALL(x) do { if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
__global__ void setup_kernel(curandState *state,unsigned long long seed)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(/*seed*/seed,/*sequence*/id, /*offset*/0, &state[id]);
}
__global__ void generate_kernel(curandState *state, unsigned int *result)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
/* Copy state to local memory for efficiency */
curandState localState = state[id];
/* Generate pseudo-random unsigned ints */
result[id] = curand(&localState);
/* Copy state back to global memory */
state[id] = localState;
}
__global__ void generate_kernel2(unsigned int *result,unsigned long long seed)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
curandState state;
curand_init(/*seed*/seed,/*sequence*/id, /*offset*/0, &state);
result[id] = curand(&state);
}
int main(int argc, char *argv[])
{
int *h_data,*d_data,L,tam,print;
size_t size;
curandState *devStates;
L=10;
if(argc > 1)
L = atoi(argv[1]);
if(argc > 2)
print = atoi(argv[2]);
tam = L*L;
size = tam*sizeof(int);
// Allocate memory for the vectors on host memory.
h_data = (int*) malloc(size);
for (int i = 0; i < tam; i++)
h_data[i] = 0;
cudaMalloc((void **)&d_data, size);
cudaMalloc((void **)&devStates, tam * sizeof(curandState));
//setup_kernel<<<L,L>>>(devStates,time(NULL));
//generate_kernel<<<L,L>>>(devStates, (unsigned int *) d_data);
generate_kernel2<<<L,L>>>((unsigned int *) d_data,time(NULL));
cudaMemcpy(h_data, d_data, size, cudaMemcpyDeviceToHost);
ofstream out("data.txt");
if(print)
printf("\n\n");
for (int i = 0; i < tam; i++)
{
if(print)
if(i%L==0)
printf("\n");
out << h_data[i] << " ";
if(print)
printf(" %u",h_data[i]);
}
if(print)
printf("\n\n");
out.close();
/* Free host memory */
cudaFree(devStates);
cudaFree(d_data);
free(h_data);
return 0;
} |
7,793 | #include <cmath>
__global__ void myexp(double* value)
{
value[threadIdx.x] = std::exp(value[threadIdx.x]);
}
|
7,794 | #include "includes.h"
/*
* Naive sort
* used if the quicksort uses too many levels
*/
__global__ void kernel_quicksort(int* values, int n) {
#define MAX_LEVELS 1000
int pivot, L, R;
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int start[MAX_LEVELS];
int end[MAX_LEVELS];
start[idx] = idx;
end[idx] = n - 1;
while (idx >= 0) {
L = start[idx];
R = end[idx];
if (L < R) {
pivot = values[L];
while (L < R) {
while (values[R] >= pivot && L < R)
R--;
if(L < R)
values[L++] = values[R];
while (values[L] < pivot && L < R)
L++;
if (L < R)
values[R--] = values[L];
}
values[L] = pivot;
start[idx + 1] = L + 1;
end[idx + 1] = end[idx];
end[idx++] = L;
if (end[idx] - start[idx] > end[idx - 1] - start[idx - 1]) {
int tmp = start[idx];
start[idx] = start[idx - 1];
start[idx - 1] = tmp;
tmp = end[idx];
end[idx] = end[idx - 1];
end[idx - 1] = tmp;
}
}
else
idx--;
}
} |
7,795 | #include "includes.h"
__global__ void PolynomialFunctionKernel_Double(float a3, float a2, float a1, float a0, double* input, double* output, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if (id < size)
{
double x = input[id];
output[id] = a3 * x * x * x + a2 * x * x + a1 * x + a0;
}
} |
7,796 | #include "includes.h"
__global__ void jacobi_copy_u( const int x_inner, const int y_inner, const double* src, double* dest)
{
const int gid = threadIdx.x+blockIdx.x*blockDim.x;
if(gid < x_inner*y_inner)
{
dest[gid] = src[gid];
}
} |
7,797 | #include "includes.h"
__global__ void ComputeVelocitiesKernel (double *Vrad, double *Vtheta, double *Dens, double *Rmed, double *ThetaMomP, double *ThetaMomM, double *RadMomP, double *RadMomM, int nrad, int nsec, double OmegaFrame)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
if (i<nrad && j<nsec){
if (i == 0)
Vrad[i*nsec + j] = 0.0;
else {
Vrad[i*nsec + j] = (RadMomP[(i-1)*nsec + j] + RadMomM[i*nsec + j])/(Dens[i*nsec + j] +
Dens[(i-1)*nsec + j] + 1e-20);
}
Vtheta[i*nsec + j] = (ThetaMomP[i*nsec + ((j-1)+nsec)%nsec] + ThetaMomM[i*nsec + j])/(Dens[i*nsec + j] +
Dens[i*nsec + ((j-1)+nsec)%nsec] + 1e-15)/Rmed[i] - Rmed[i]*OmegaFrame;
/* It was the angular momentum */
}
} |
7,798 | #include <stdio.h>
// Функция сложения двух векторов
__global__ void addVector(float* left, float* right, float* result)
{
//Получаем id текущей нити.
int idx = threadIdx.x;
//Расчитываем результат.
result[idx] = left[idx] + right[idx];
}
#define SIZE 512
__host__ int main()
{
//Выделяем память под вектора
float* vec1 = new float[SIZE];
float* vec2 = new float[SIZE];
float* vec3 = new float[SIZE];
//Инициализируем значения векторов
for (int i = 0; i < SIZE; i++)
{
vec1[i] = i;
vec2[i] = i;
}
//Указатели на память видеокарте
float* devVec1;
float* devVec2;
float* devVec3;
//Выделяем память для векторов на видеокарте
cudaMalloc((void**)&devVec1, sizeof(float) * SIZE);
cudaMalloc((void**)&devVec2, sizeof(float) * SIZE);
cudaMalloc((void**)&devVec3, sizeof(float) * SIZE);
//Копируем данные в память видеокарты
cudaMemcpy(devVec1, vec1, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(devVec2, vec2, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
dim3 gridSize = dim3(1, 1, 1); //Размер используемого грида
dim3 blockSize = dim3(SIZE, 1, 1); //Размер используемого блока
//Выполняем вызов функции ядра
addVector<<<gridSize, blockSize>>>(devVec1, devVec2, devVec3);
//Выполняем вызов функции ядра
//addVector<<<blocks, threads>>>(devVec1, devVec2, devVec3);
//Хендл event'а
cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent); //Создаем event
cudaEventRecord(syncEvent, 0); //Записываем event
cudaEventSynchronize(syncEvent); //Синхронизируем event
//Только теперь получаем результат расчета
cudaMemcpy(vec3, devVec3, sizeof(float) * SIZE, cudaMemcpyDeviceToHost);
//Результаты расчета
for (int i = 0; i < SIZE; i++)
{
printf("Element #%i: %.1f\n", i , vec3[i]);
}
//
// Высвобождаем ресурсы
//
cudaEventDestroy(syncEvent);
cudaFree(devVec1);
cudaFree(devVec2);
cudaFree(devVec3);
delete[] vec1; vec1 = 0;
delete[] vec2; vec2 = 0;
delete[] vec3; vec3 = 0;
}
|
7,799 | #include "includes.h"
__global__ void sync_conv_groups() { } |
7,800 | #include <iostream>
#define NUM_BANKS 32
#define LOG_NUM_BANKS 5
#define GET_OFFSET(idx) (idx >> LOG_NUM_BANKS)
// Must be divisible by 32 and a divisor of 1024
const int block_size = 256;
// Akhtyamov Pavel's realization of Scan
// https://github.com/akhtyamovpavel/ParallelComputationExamples/blob/master/CUDA/05-scan/05-scan_bank_conflicts.cu
__global__ void Scan(int *in_data, int *out_data) {
// in_data -> [1 2 3 4 5 6 7 8], block_size 4
// block_idx -> [0 0 0 0 1 1 1 1 ]
extern __shared__ int shared_data[];
// block_idx = 0
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
shared_data[tid + GET_OFFSET(tid)] = in_data[index];
// shared_data[tid + (tid >> LOG_NUM_BANKS)] = in_data[index];
// shared_data -> [1, 2, 3, 4]
__syncthreads();
// shift = 2^(d - 1)
for (unsigned int shift = 1; shift < blockDim.x; shift <<= 1) {
int ai = shift * (2 * tid + 1) - 1; // tid = 0, shift = 1, ai = 0; // tid = 16, shift = 1, ai = 32 = 0
int bi = shift * (2 * tid + 2) - 1;
if (bi < blockDim.x) {
shared_data[bi + GET_OFFSET(bi)] += shared_data[ai + GET_OFFSET(ai)];
}
__syncthreads();
}
if (tid == 0) {
shared_data[blockDim.x - 1 + GET_OFFSET(blockDim.x - 1)] = 0;
}
__syncthreads();
int temp;
for (unsigned int shift = blockDim.x / 2; shift > 0; shift >>= 1) {
int bi = shift * (2 * tid + 2) - 1;
int ai = shift * (2 * tid + 1) - 1;
int ai_offset = ai + GET_OFFSET(ai);
int bi_offset = bi + GET_OFFSET(bi);
if (bi < blockDim.x) {
temp = shared_data[ai_offset]; // blue in temp
// temp = 4
shared_data[ai_offset] = shared_data[bi_offset]; // orange
// 1 2 1 0 1 2 1 0 // temp = 4
shared_data[bi_offset] = temp + shared_data[bi_offset];
}
__syncthreads();
}
// if (blockIdx.x == 16383) {
// printf("%d %d %d %d\n", tid, tid + GET_OFFSET(tid), shared_data[tid + GET_OFFSET(tid)], index);
// // std::cout << shared_data[tid] << std::endl;
// }
// block_idx = 0 -> [a0, a1, a2, a3]
// block_idx = 1 -> [a4, a5, a6, a7]
out_data[index] = shared_data[tid + GET_OFFSET(tid)];
__syncthreads();
// out_data[block_idx == 0] = [1, 3, 6, 10]
// out_data[block_idx == 1] = [5, 11, 18, 26]
}
/*
* Calculates grid size for array
* size - array size
*/
int GridSize(int size) {
// Calc grid size, the whole array must be covered with blocks
int grid_size = size / block_size;
if (size % block_size) {
grid_size += 1;
}
return grid_size;
}
/*
* Put prefix sums of each block in one array (d_blocks)
*
* d_array - source array
* d_prefix_sum - calculated prefix sum on blocks
* d_blocks - result
*/
__global__ void FindBlocks(int *d_array, int *d_prefix_sum, int *d_blocks) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
int last_elem_in_block_index = (index + 1) * block_size - 1;
d_blocks[index] = d_array[last_elem_in_block_index] +
d_prefix_sum[last_elem_in_block_index];
__syncthreads();
}
/*
* Fills masks of elements greater than pivot and less than or equal to pivot
* using GPU
*
* d_array - array of elements under consideration
* d_greater_mask - mask of elements greater than pivot
* d_less_or_equal_mask - mask of elements less (or equal) than pivot
*/
__global__ void FillMasks(int *d_array, int size, int pivot, int *d_greater_mask, int *d_less_or_equal_mask) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < size) {
if (d_array[index] > pivot) {
d_greater_mask[index] = 1;
d_less_or_equal_mask[index] = 0;
} else {
d_greater_mask[index] = 0;
d_less_or_equal_mask[index] = 1;
}
}
__syncthreads();
}
/*
* Add prefix sum of previous blocks to prefix sum, calculated on one block
*
* d_prefix_sum - prefix sum calculated on each block
* d_block_prefix_sum - prefix sum calculated on previous blocks
*/
__global__ void AddBlocksPrefixSum(int *d_prefix_sum, int *d_block_prefix_sum) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
d_prefix_sum[index] += d_block_prefix_sum[blockIdx.x];
__syncthreads();
}
/*
* Calculates scan (cumsum) for input array
* using GPU
*
* d_array - input array
* size - input array size
* d_prefix_sum - result array
*/
void ParallelPrefixSum(int *d_array, int size, int *d_prefix_sum) {
// Calc prefix sum on each block
int grid_size = GridSize(size);
Scan<<<grid_size, block_size, sizeof(int) * (block_size + GET_OFFSET(block_size))>>>(d_array, d_prefix_sum);
// If size of array greater than size of one block,
// we have to add to each block prefix sum of previous blocks
if (grid_size > 1) {
int block_grid_size = GridSize(grid_size);
// Array of last elements of blocks
int *d_block_array;
cudaMalloc(&d_block_array, sizeof(int) * grid_size);
// Array for prefix sum on blocks
int *d_block_prefix_sum;
cudaMalloc(&d_block_prefix_sum, sizeof(int) * grid_size);
// Put prefix sum of each block in d_block_array
FindBlocks<<<block_grid_size, block_size>>>(d_array, d_prefix_sum, d_block_array);
// Find prefix sum on blocks
ParallelPrefixSum(d_block_array, grid_size, d_block_prefix_sum);
// Add prefix sum of all previous blocks to prefix sum
AddBlocksPrefixSum<<<grid_size, block_size>>>(d_prefix_sum, d_block_prefix_sum);
cudaFree(d_block_array);
cudaFree(d_block_prefix_sum);
}
}
/*
* Divide elements into two arrays
* d_array - source array
* size - size of array
*
* d_less_or_equal_mask, d_greater_mask - masks of elements in d_array
* d_less_or_equal_prefix_sum, d_greater_prefix_sum - prefix sums of masks
*
* d_less_or_equal_elems, d_greater_elems - result
*/
__global__ void ParallelDivide(int *d_array,
int *d_less_or_equal_prefix_sum,
int *d_greater_prefix_sum,
int *d_less_or_equal_mask,
int *d_greater_mask,
int *d_less_or_equal_elems,
int *d_greater_elems,
int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < size) {
if (d_less_or_equal_mask[index]) {
d_less_or_equal_elems[d_less_or_equal_prefix_sum[index]] = d_array[index];
} else {
d_greater_elems[d_greater_prefix_sum[index]] = d_array[index];
}
}
__syncthreads();
}
/*
* Copy elements from d_source to d_array
*/
__global__ void ParallelCopy(int *d_array, int *d_source, int size) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < size) {
d_array[index] = d_source[index];
}
__syncthreads();
}
/*
* Returns last value of array in global memory
* d_array - array in global memory
* size - d_array size
*/
int GetLast(int *d_array, int size) {
int result = 0;
cudaMemcpy(&result, d_array + size - 1, sizeof(int), cudaMemcpyDeviceToHost);
return result;
}
/*
* Place elements in correct order
* Firstly, elements less (or equal) than pivot
* Then, pivot,
* Then, elements greater than pivot
*/
void ArrangeElements(int *d_array,
int size,
int pivot,
int *d_less_or_equal_elems,
int *d_greater_elems,
int less_or_equal_size,
int greater_size) {
int grid_size = GridSize(size);
ParallelCopy<<<grid_size, block_size>>>(d_array, d_less_or_equal_elems, less_or_equal_size);
cudaMemcpy(d_array + less_or_equal_size, &pivot, sizeof(int), cudaMemcpyHostToDevice);
ParallelCopy<<<grid_size, block_size>>>(d_array + less_or_equal_size + 1, d_greater_elems, greater_size);
}
/*
* Fills arrays of elements less than pivot and greater than pivot
*/
void ProcessMasks(int *d_array,
int size,
int pivot,
int **d_less_or_equal_elems,
int **d_greater_elems,
int &less_or_equal_size,
int &greater_size) {
// Storages of masks for elements less (or equal) than pivot
// and greater than pivot
int *d_less_or_equal_mask = nullptr;
int *d_greater_mask = nullptr;
// Storages of its prefix sums
int *d_less_or_equal_prefix_sum = nullptr;
int *d_greater_prefix_sum = nullptr;
cudaMalloc(&d_less_or_equal_mask, sizeof(int) * size);
cudaMalloc(&d_greater_mask, sizeof(int) * size);
cudaMalloc(&d_less_or_equal_prefix_sum, sizeof(int) * size);
cudaMalloc(&d_greater_prefix_sum, sizeof(int) * size);
int grid_size = GridSize(size);
FillMasks<<<grid_size, block_size>>>(d_array, size, pivot,
d_greater_mask,
d_less_or_equal_mask);
ParallelPrefixSum(d_less_or_equal_mask, size, d_less_or_equal_prefix_sum);
ParallelPrefixSum(d_greater_mask, size, d_greater_prefix_sum);
// Get count of elements less or equal than pivot, held in last elem of prefix sum
less_or_equal_size = GetLast(d_less_or_equal_prefix_sum, size);
// Get count of elements greater than pivot, held in last elem of prefix sum
greater_size = GetLast(d_greater_prefix_sum, size);
cudaMalloc(d_less_or_equal_elems, sizeof(int) * less_or_equal_size);
cudaMalloc(d_greater_elems, sizeof(pivot) * greater_size);
// Divide elements into two arrays
ParallelDivide<<<grid_size, block_size>>>(d_array,
d_less_or_equal_prefix_sum,
d_greater_prefix_sum,
d_less_or_equal_mask,
d_greater_mask,
*d_less_or_equal_elems,
*d_greater_elems,
size - 1);
cudaFree(d_less_or_equal_mask);
cudaFree(d_greater_mask);
cudaFree(d_less_or_equal_prefix_sum);
cudaFree(d_greater_prefix_sum);
}
/*
* Sorts d_array using GPU
* d_array - array in global memory
* size - d_array size
*/
void QuickSort(int *d_array, int size) {
// Already sorted
if (size <= 1) return;
/// Partition
// Take last element of array as a pivot
int pivot = GetLast(d_array, size);
// Storages of elements less (or equal) than pivot
// and greater than pivot
int *d_less_or_equal_elems = nullptr;
int *d_greater_elems = nullptr;
// Its sizes
int less_or_equal_size = 0;
int greater_size = 0;
ProcessMasks(d_array,
size,
pivot,
&d_less_or_equal_elems,
&d_greater_elems,
less_or_equal_size,
greater_size);
ArrangeElements(d_array,
size,
pivot,
d_less_or_equal_elems,
d_greater_elems,
less_or_equal_size,
greater_size);
cudaFree(d_less_or_equal_elems);
cudaFree(d_greater_elems);
// Recursively process parts
QuickSort(d_array, less_or_equal_size);
QuickSort(d_array + less_or_equal_size + 1, greater_size);
}
void Shuffle(int *array, int size) {
for (int i = size - 1; i > 0; --i) {
std::swap(array[i], array[std::rand() % (i + 1)]);
}
}
int main() {
size_t size = 0;
std::cout << "Insert the size of array" << std::endl;
std::cin >> size;
int *h_array_to_be_sorted = new int[size];
for (int i = 0; i < size; ++i) {
h_array_to_be_sorted[i] = i;
}
// Create shuffled example to be sorted later
Shuffle(h_array_to_be_sorted, size);
// Create array to be processed to GPU
int *d_array_to_be_sorted;
cudaMalloc(&d_array_to_be_sorted, size * sizeof(float));
cudaMemcpy(d_array_to_be_sorted,
h_array_to_be_sorted,
sizeof(int) * size,
cudaMemcpyHostToDevice);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0 /* Stream ID */);
QuickSort(d_array_to_be_sorted, size);
cudaEventRecord(stop, 0);
cudaMemcpy(h_array_to_be_sorted,
d_array_to_be_sorted,
sizeof(int) * size,
cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
std::cout << "Elapsed time is :" << elapsedTime << std::endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
std::cout << "Sorted array:" << std::endl;
for (int i = 0; i < size; ++i) {
std::cout << h_array_to_be_sorted[i] << " ";
}
std::cout << "\n";
cudaFree(d_array_to_be_sorted);
delete[] h_array_to_be_sorted;
return 0;
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.