serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
21,301 | #include <stdio.h>
#include <time.h>
__global__ void matrixMulGPU( int * a, int * b, int * c, int N )
{
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N)
{
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
__host__ void matrixMulCPU( int * a, int * b, int * c, int N )
{
int val = 0;
for( int row = 0; row < N; ++row )
for( int col = 0; col < N; ++col )
{
val = 0;
for ( int k = 0; k < N; ++k )
val += a[row * N + k] * b[k * N + col];
c[row * N + col] = val;
}
}
__host__ void printMatrix(int *mat, int N) {
for( int row = 0; row < N; ++row ){
for( int col = 0; col < N; ++col )
printf("%d\t", mat[row * N + col]);
printf("\n");
}
}
int main()
{
int N, size, *a, *b, *c_cpu, *c_gpu;
printf("N?\n");
scanf("%d", &N);
size = N * N * sizeof (int); // Number of bytes of an N x N matrix
// Allocate memory
cudaMallocManaged (&a, size);
cudaMallocManaged (&b, size);
cudaMallocManaged (&c_cpu, size);
cudaMallocManaged (&c_gpu, size);
// Initialize memory
for( int row = 0; row < N; ++row ){
for( int col = 0; col < N; ++col )
{
a[row*N + col] = row;
b[row*N + col] = col+2;
c_cpu[row*N + col] = 0;
c_gpu[row*N + col] = 0;
}
}
printf("\nMatrix A:\n");
printMatrix(a, N);
printf("\nMatrix B:\n");
printMatrix(b, N);
printf("\n\n");
dim3 threads_per_block (16, 16, 1); // A 16 x 16 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
matrixMulGPU <<< number_of_blocks, threads_per_block >>> ( a, b, c_gpu, N );
cudaEventRecord(stop,0);
cudaEventSynchronize(stop); // Wait for the GPU to finish before proceeding
float et;
cudaEventElapsedTime(&et, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Call the CPU version to check our work
clock_t begin = clock();
matrixMulCPU( a, b, c_cpu, N );
clock_t end = clock();
double time_spent = (double)1000 * (end - begin) / CLOCKS_PER_SEC;
printf("GPU time: %f\n", et);
printf("CPU time: %f", time_spent);
// Compare the two answers to make sure they are equal
bool error = false;
for( int row = 0; row < N && !error; ++row )
for( int col = 0; col < N && !error; ++col )
if (c_cpu[row * N + col] != c_gpu[row * N + col])
{
printf("FOUND ERROR at c[%d][%d]\n", row, col);
error = true;
break;
}
if (!error){
printf("\nSuccess!\n");
printf("\nResult:\n");
//printMatrix(c_gpu, N);
}
// Free all our allocated memory
cudaFree(a); cudaFree(b);
cudaFree( c_cpu ); cudaFree( c_gpu );
} |
21,302 |
#include "vectorAdd.cuh"
// ---------------------------------------------------------------------------
// C = A + B
// ---------------------------------------------------------------------------
__global__ void vectorAdd(
const float* A,
const float* B,
float* const C,
int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
C[i] = A[i] + B[i];
}
|
21,303 | #include <iostream>
using namespace std;
#define CUDA_CHECK_RETURN(value) {\
cudaError_t _m_cudaStat = value;\
if (_m_cudaStat != cudaSuccess) {\
fprintf(stderr, "Error %s at line %d in file %s\n", cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__);\
exit(1);\
}}
__global__ void VecSum(float *A, float *B, float *C, int size)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < size)
C[i] = A[i] + B[i];
}
__global__ void VecMul(float *A, float *B, float *C, int partSize)
{
__shared__ float cache[256];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int temp = 0;
while (tid < partSize) {
temp += A[tid] * B[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s >>= 1) {
if (cacheIndex < s) {
cache[cacheIndex] += cache[cacheIndex + s];
}
__syncthreads();
}
if (cacheIndex == 0) C[blockIdx.x] = cache[0];
}
void InitVec(float *A, float *B, int size)
{
for (int i = 0; i < size; i++) {
A[i] = 1;
B[i] = 1;
}
}
void printVec(float *vec, int size)
{
for (int i = 0; i < size; i++)
cout << vec[i] << endl;
}
int main(int argc, char* argv[])
{
if (argc != 3) {
cout << "launch parametrs: [vector size] [partSize]" << endl;
return 1;
}
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop, whichDevice);
if (!prop.deviceOverlap) {
cout << "Device does not support overlapping" << endl;
return 1;
}
int full_data_size = atoi(argv[1]);
int partSize = atoi(argv[2]);
if (full_data_size % partSize != 0) {
cout << "The size of the data chunk must be a multiple of the full data size" << endl;
return 1;
}
float *dev_a, *dev_b, *dev_c;
float *A, *B, *C;
cudaHostAlloc((void**)&A, full_data_size * sizeof(float), cudaHostAllocDefault);
cudaHostAlloc((void**)&B, full_data_size * sizeof(float), cudaHostAllocDefault);
cudaHostAlloc((void**)&C, full_data_size * sizeof(float), cudaHostAllocDefault);
InitVec(A, B, full_data_size);
cudaMalloc((void**)&dev_a, full_data_size * sizeof(float));
cudaMalloc((void**)&dev_b, full_data_size * sizeof(float));
cudaMalloc((void**)&dev_c, full_data_size * sizeof(float));
cudaStream_t stream;
cudaStreamCreate(&stream);
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for (int i = 0; i < full_data_size; i += partSize) {
cudaMemcpyAsync(dev_a, A + i, partSize * sizeof(float), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dev_b, B + i, partSize * sizeof(float), cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(dev_c, C + i, partSize * sizeof(float), cudaMemcpyHostToDevice, stream);
VecSum <<< partSize / 256, 256, 0, stream >>> (dev_a, dev_b, dev_c, full_data_size);
cudaMemcpyAsync(C + i, dev_c, partSize * sizeof(float), cudaMemcpyDeviceToHost, stream);
}
//printVec(C, full_data_size);
cudaStreamSynchronize(stream);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
CUDA_CHECK_RETURN(cudaDeviceSynchronize());
CUDA_CHECK_RETURN(cudaGetLastError());
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "time: " << elapsedTime << " ms" << endl;
return 0;
}
|
21,304 | #include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <iostream>
#include <fstream>
#include <ostream>
#include <istream>
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <string>
#include <vector>
// nvcc ptrs.cu -o ./bin/ptrs -gencode arch=compute_35,code=sm_35 -lm -O3 -std=c++11
using namespace std;
__device__ int *a, *b;
__global__ void setptr(int *sj, int offs, int *ps)
{
ps = &sj[offs];
}
__global__ void someMth(int *sj)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x; //Global Thread ID
int om = sj[gid+1];
sj[gid] += om;
}
int main()
{
int *fptr, *sptr;
int mysize = 256;
int realsz = mysize/2;
int tpb = 32;
int bks = realsz/tpb;
int rlsz = sizeof(int)*realsz;
int imem[mysize];
int rmem[mysize];
int gmem[mysize];
cudaMalloc((void **) &fptr, sizeof(int)*mysize);
for (int k=0; k<mysize; k++) imem[k] = k;
cudaMemcpy(fptr, &imem, sizeof(int) * mysize, cudaMemcpyHostToDevice);
someMth <<< bks, tpb >>> (fptr);
cudaMemcpy(&imem, fptr, rlsz, cudaMemcpyDeviceToHost);
// for (int k=0; k<realsz; k++) cout << imem[k] << " ";
// cout << endl;
setptr <<< 1,1 >>> (fptr, tpb, sptr);
someMth <<< bks, tpb >>> (fptr+tpb);
cudaMemcpy(&rmem, fptr, rlsz+sizeof(int)*tpb, cudaMemcpyDeviceToHost);
cudaMemcpy(&gmem, fptr + (tpb*2), rlsz+sizeof(int)*tpb, cudaMemcpyDeviceToHost);
cout << "IDx || After 1 || Swap" << endl;
for (int k=0; k<realsz + tpb; k++) cout << k << " " << imem[k] << " " << rmem[k] << " " << gmem[k] << endl;
return 0;
}
|
21,305 | #include <stdio.h>
__global__ void sale(int *GPU_arr, int *GPU_price,int *GPU_out){
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ int temp[4];
temp[threadIdx.x] = GPU_arr[i] * GPU_price[threadIdx.x];
__syncthreads();
if(threadIdx.x==0){
float sum = 0;
sum = temp[0]+temp[1]+temp[2]+temp[3];
GPU_out[blockIdx.x] = sum;
}
}
int main(void){
int days = 7;
int items = 4;
int arr[28] = {
3,5,2,0,
2,4,5,1,
0,3,3,1,
3,5,4,4,
4,5,5,3,
10,13,21,16,
8,11,15,8
};
float prices[4] = {29.99,14.99,9.99,24.99};
float out[7];
int *GPU_arr;
int *GPU_price;
int *GPU_out;
cudaMalloc((void**)&GPU_arr,sizeof(int)*28);
cudaMalloc((void**)&GPU_price,sizeof(float)*4);
cudaMalloc((void**)&GPU_out,sizeof(float)*7);
cudaMemcpy(GPU_arr,arr,sizeof(int)*28,cudaMemcpyHostToDevice);
cudaMemcpy(GPU_price,prices,sizeof(float)*4,cudaMemcpyHostToDevice);
sale<<<7,4>>>(GPU_arr,GPU_price,GPU_out);
cudaMemcpy(out,GPU_out,sizeof(float)*7,cudaMemcpyDeviceToHost);
//printf("%d",*c);
//printf("%d,%d,%d,%d\n",c[0],c[1],c[2],c[3]);
return 0;
}
|
21,306 | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
__global__
void NMode32Kernel(const float* A, const int I, const int J, const int S,
const float* B, const int R,
float* C){
int32_t iA = blockIdx.x * blockDim.x + threadIdx.x;
if (iA >= I) {
return;
}
for (int32_t jA = 0; jA < J; jA++) {
int32_t p2 = iA * J + jA;
for (int32_t sA = 0; sA < S; sA++) {
float ts = A[p2 * S + sA];
for (int32_t rB = 0; rB < R; rB++) {
int32_t pB2 = sA * R + rB;
int32_t pC3 = p2 * R + rB;
C[pC3] += ts * B[pB2];
}
}
}
}
void NMode32KernelLauncher(const float* A, const int I, const int J, const int S,
const float* B, const int R,
float* C) {
NMode32Kernel<<<(I + 255) / 256, 256>>>(A, I, J, S, B, R, C);
cudaDeviceSynchronize();
}
#endif
|
21,307 | #include "includes.h"
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < len)
out[i] = in1[i] + in2[i];
} |
21,308 | #include <iostream>
#include <string>
#include <cmath>
#include <chrono>
#include <cuda.h>
#define PI 3.141592653589793
const size_t nThreadsPerBlock = 256;
static void HandleError(cudaError_t err, const char *file, int line )
{
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
// Direct use of global memory between threads may be very wrong?
__global__ void dotProd(int length, double *u, double *v, double *out) {
unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned tid_const = threadIdx.x + blockDim.x * blockIdx.x;
double temp = 0;
while (tid < length) {
temp += u[tid] * v[tid];
tid += blockDim.x * gridDim.x;
}
__syncthreads();
out[tid_const] = temp;
}
__global__ void dotProdWithSharedMem(int length, double *u, double *v, double *out) {
unsigned tid = threadIdx.x + blockDim.x * blockIdx.x;
unsigned cid = threadIdx.x;
__shared__ double cache[nThreadsPerBlock];
double temp = 0;
while (tid < length) {
temp += u[tid] * v[tid];
tid += blockDim.x * gridDim.x;
}
cache[cid] = temp;
__syncthreads();
int i = blockDim.x/2;
while (i != 0) {
if (cid < i) {
cache[cid] += cache[cid + i];
}
__syncthreads();
i /= 2;
}
if (cid == 0) {
out[blockIdx.x] = cache[0];
}
}
int main(int argc, char* argv[]) {
size_t vec_len = 1 << std::stoi(argv[1]);
size_t size = vec_len * sizeof(double);
size_t nthreads = std::stoi(argv[2]);
size_t nblocks = std::stoi(argv[3]);
// size_t nblocks = (vec_len + nthreads - 1) / nthreads;
size_t size_out = nthreads*nblocks*sizeof(double);
size_t size_out_2 = nblocks*sizeof(double);
double *u = (double *)malloc(size);
double *v = (double *)malloc(size);
double *out = (double *)malloc(size_out);
double *out_2 = (double *)malloc(size_out_2);
double *dev_u, *dev_v, *dev_out, *dev_out_2; // Device arrays
double res_gpu = 0;
double res_gpu_2 = 0;
double res_cpu = 0;
dim3 dimGrid(nblocks, 1, 1);
dim3 dimBlocks(nthreads, 1, 1);
// Initiate values
for(size_t i=0; i<vec_len; ++i) {
u[i] = std::sin(i*PI*1E-2);
v[i] = std::cos(i*PI*1E-2);
}
HANDLE_ERROR( cudaMalloc((void**)&dev_u, size) );
HANDLE_ERROR( cudaMalloc((void**)&dev_v, size) );
HANDLE_ERROR( cudaMalloc((void**)&dev_out, size_out) );
HANDLE_ERROR( cudaMalloc((void**)&dev_out_2, size_out_2) );
HANDLE_ERROR( cudaMemcpy(dev_u, u, size, cudaMemcpyHostToDevice) );
HANDLE_ERROR( cudaMemcpy(dev_v, v, size, cudaMemcpyHostToDevice) );
//HANDLE_ERROR( cudaMemset(dev_out, 0, size_out) );
//HANDLE_ERROR( cudaMemset(dev_out_2, 0, size_out_2) );
auto t1_gpu = std::chrono::system_clock::now();
dotProd <<<dimGrid, dimBlocks>>> (vec_len, dev_u, dev_v, dev_out);
cudaDeviceSynchronize();
HANDLE_ERROR( cudaMemcpy(out, dev_out, size_out, cudaMemcpyDeviceToHost) );
// Reduction
for(size_t i=0; i<nthreads*nblocks; ++i) {
res_gpu += out[i];
}
auto t2_gpu = std::chrono::system_clock::now();
// CPU version for result-check
for(size_t i=0; i<vec_len; ++i) {
res_cpu += u[i] * v[i];
}
auto t2_cpu = std::chrono::system_clock::now();
// GPU version with shared memory
dotProdWithSharedMem <<<dimGrid, dimBlocks>>> (vec_len, dev_u, dev_v, dev_out_2);
cudaDeviceSynchronize();
HANDLE_ERROR( cudaMemcpy(out_2, dev_out_2, size_out_2, cudaMemcpyDeviceToHost) );
// Reduction
for(size_t i=0; i<nblocks; ++i) {
res_gpu_2 += out_2[i];
}
auto t2_gpu_2 = std::chrono::system_clock::now();
double t_gpu = std::chrono::duration <double, std::milli> (t2_gpu - t1_gpu).count();
double t_gpu_2 = std::chrono::duration <double, std::milli> (t2_gpu_2 - t2_cpu).count();
double t_cpu = std::chrono::duration <double, std::milli> (t2_cpu - t2_gpu).count();
printf("GPU result: %.18f, time consummed: %.5f ms\n", res_gpu, t_gpu);
printf("GPU result: %.18f, time consummed: %.5f ms\n", res_gpu_2, t_gpu_2);
printf("CPU result: %.18f, time consummed: %.5f ms\n", res_cpu, t_cpu);
cudaFree(dev_u);
cudaFree(dev_v);
cudaFree(dev_out);
cudaFree(dev_out_2);
free(u);
free(v);
free(out);
free(out_2);
return 0;
}
|
21,309 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#include <math.h>
#include "link.h"
using namespace std;
__global__ void link(int *bin, int *list, int *bnei, int *bnum,
int *potCon, int *potConSize, int *npcnpt,
int *nsegpt, int *nxbinpt, int *nybinpt, int *nzbinpt){
int npcn = *npcnpt;
int nseg = *nsegpt;
int nxbin = *nxbinpt;
int nybin = *nybinpt;
int nzbin = *nzbinpt;
int mi = blockIdx.x;
int tid = threadIdx.x;
int miBin = bnum[mi];
int nextBin = bnei[miBin + tid*nxbin*nybin*nzbin];
int nextTot = bin[nextBin];
int nj, i, pos;
if (tid == 0){
potConSize[mi] = 0;
}
__syncthreads();
for (i = 0; i < nextTot; i++){
nj = list[nextBin + i*nxbin*nybin*nzbin];
// skip when segments are connected
if (mi >= nj){
continue;
}
if ((mi - (mi / nseg)*nseg) != 0 && nj == mi - 1){
continue;
}
if ((nj - (nj / nseg)*nseg) != 0 && mi == nj - 1){
continue;
}
if ((mi - (mi / nseg)*nseg) != nseg - 1 && nj == mi + 1){
continue;
}
if ((nj - (nj / nseg)*nseg) != nseg - 1 && mi == nj + 1){
continue;
}
//printf("link mi nj %4d %4d\n", mi, nj);
pos = atomicAdd(potConSize + mi, 1);
if (pos >= npcn - 1) printf("allocate more space for potCon pos %4d\n", pos);
potCon[mi * npcn + pos] = nj;
}
}
|
21,310 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void increment(float *x, float *y) {
int me = threadIdx.x;
y[me] = x[me] += 1;
__syncthreads();
}
int main(int argc, char** argv) {
int n = 10;
// arrays for host
float *h_input;
float *h_output;
// allocate space on host
h_input = (float*)malloc(n * sizeof(float));
h_output = (float*)malloc(n * sizeof(float));
// arrays for device
float *d_input;
float *d_output;
// allocate space on device
cudaMalloc((void**) &d_input, n * sizeof(float));
cudaMalloc((void**) &d_output, n * sizeof(float));
// populate host array
for (int i = 0; i < n; i++) {
h_input[i] = i;
}
// copy host input to device input array
cudaMemcpy(d_input, h_input, n * sizeof(float), cudaMemcpyHostToDevice);
dim3 gridDim(1,1);
dim3 gridBlock(n, 1);
// invoke the increment kernel
increment<<< gridDim, gridBlock >>>(d_input, d_output);
// copy device output back to host output
cudaMemcpy(h_output, d_output, n * sizeof(float), cudaMemcpyDeviceToHost);
// print result
for (int i = 0; i < n; i++) {
printf("%f\n", h_output[i]);
}
// free memory on GPU
cudaFree(d_output);
cudaFree(d_input);
return 0;
}
|
21,311 | #include <stdio.h>
#define B 1
#define TPB 256
__device__ uint whoami() {
return blockIdx.x*blockDim.x+threadIdx.x;
}
__global__ void greetings() {
uint id = whoami();
printf("Hello world! My threadId is %d\n", id);
}
int main() {
greetings<<<B, TPB>>>();
cudaDeviceSynchronize();
} |
21,312 | #include <stdio.h>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
#define CUDA_ERROR_CHECK
#define CudaSafeCall( err ) __cudaSafeCall( err, __FILE__, __LINE__ )
#define CudaCheckError() __cudaCheckError( __FILE__, __LINE__ )
inline void __cudaSafeCall( cudaError err, const char *file, const int line ) {
#ifdef CUDA_ERROR_CHECK
if ( cudaSuccess != err ) {
fprintf( stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err));
exit(-1);
}
#endif
}
inline void __cudaCheckError( const char *file, const int line ) {
#ifdef CUDA_ERROR_CHECK
cudaError err = cudaGetLastError();
if ( cudaSuccess != err ) {
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) );
exit( -1 );
}
// More careful checking. However, this will affect performance.
// Comment away if needed.
err = cudaDeviceSynchronize();
if( cudaSuccess != err ) {
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) );
exit( -1 );
}
#endif
}
__device__ void calc_cov_est(float* cov_arr, float* cov_inv_arr, float* cov_est, int n, int n_threads) {
int i, j;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int offset = threadID * n;
for (i = offset; i < offset + n; i++) {
cov_inv_arr[i] = 1.f / cov_arr[i];
}
if (threadID == 0) {
for (i = 0; i < n; i++) {
cov_est[i] = 0.f;
}
for (i = 0; i < n; i++) {
for (j = 0; j < n_threads; j++) {
cov_est[i] += cov_inv_arr[i + n * j];
}
cov_est[i] = 1.f / cov_est[i];
// printf("Covariance estimate for element %d: %f\n", i, cov_est[i]);
}
}
__syncthreads();
}
__device__ void calc_mu_est(float* mu_arr, float* cov_inv_arr, float* mu_inv_arr, float* cov_est, float* mu_est, int n, int n_threads) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int offset = threadID * n;
int i, j;
for (i = offset; i < offset + n; i++) {
mu_inv_arr[i] = mu_arr[i] * cov_inv_arr[i];
}
if (threadID == 0) {
for (i = 0; i < n; i++) {
mu_est[i] = 0.f;
}
for (i = 0; i < n; i++) {
for (j = 0; j < n_threads; j++) {
mu_est[i] += mu_inv_arr[i + n * j];
}
}
for (i = 0; i < n; i++) {
mu_est[i] = cov_est[i] * mu_est[i];
}
}
__syncthreads();
}
__device__ void rand_init(curandState *state) {
// unsigned int seed = (unsigned int) clock64();
int seed = 0;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
curand_init(seed+threadID, 0, 0, &state[threadID]);
}
__device__ void get_rand_nums(curandState *state, float* rand_num, int* rand_ints, int max_int) {
float rand_int_num;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
rand_num[threadID] = (float) (curand_uniform(&state[threadID]));
rand_int_num = (float) (curand_uniform(&state[threadID]));
rand_ints[threadID] = int(max_int * rand_int_num);
}
__device__ void get_rand_gaussian(curandState *state, float sigma1, float* gaussian_arr1, float sigma2, float* gaussian_arr2) {
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
gaussian_arr1[threadID] = (float) (curand_normal(&state[threadID]) * sigma1);
gaussian_arr2[threadID] = (float) (curand_normal(&state[threadID]) * sigma2);
}
__device__ float alt_calc_det(float* cov_arr, int n) {
int i;
float det = 1.f;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int initial_idx = n * threadID;
for (i = initial_idx; i < initial_idx + n; i++) {
det *= cov_arr[i];
}
return det;
}
__device__ float calc_vec_mat_vec_prod(float* cov_arr, float* data, float* mu_arr, int data_idx, int n) {
int i, j;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int initial_idx = n * threadID;
float diff;
float cum_sum = 0.f;
for (i = initial_idx; i < initial_idx + n; i++) {
j = i - initial_idx + data_idx;
diff = data[j] - mu_arr[i];
cum_sum += diff * diff * 1. / cov_arr[i];
}
return cum_sum;
}
__device__ float get_log_det(float* A, int n) {
float det = alt_calc_det(A, n);
return log(det);
}
__device__ float get_log_likelihood(float* data, float* mu_arr, float* cov_arr, float cov_det, int data_idx, int n) {
// Log likelihood, assuming Gaussian errors.
float t1, t2, t3;
float L;
float fl_inf = 10000000000000000000;
t1 = -0.5 * cov_det;
t2 = -0.5 * calc_vec_mat_vec_prod(cov_arr, data, mu_arr, data_idx, n);
t3 = -0.5 * n * log(2 * M_PI);
// printf("Terms: %f, %f, %f\n", t1, t2, t3);
L = t1 + t2 + t3;
if (isnan(L)) {
return -1 * fl_inf;
} else {
return L;
}
}
__device__ float get_total_log_likelihood(float* cov, float* mu, float* data, int n_data_per_thread, int n_data, int n) {
// Gets the log likelihood for all of the data files, summing the result.
int data_idx;
float cov_det = get_log_det(cov, n);
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int data_offset = threadID * n * n_data_per_thread;
float cum_L = 0.f;
for (data_idx = data_offset; data_idx < data_offset + n * n_data_per_thread; data_idx+=n) {
cum_L += get_log_likelihood(data, mu, cov, cov_det, data_idx, n);
}
return cum_L;
}
__device__ void generate_random_nums(curandState *state, float* rand_mu, float* rand_cov, float mu_step, float cov_step, float* rand_num, int* rand_ints, int n) {
int n_params = 2 * n;
get_rand_nums(state, rand_num, rand_ints, n_params);
get_rand_gaussian(state, mu_step, rand_mu, cov_step, rand_cov);
}
__device__ void perturb_cov(float* old_cov, float* new_cov, int param_idx, float rand_cov_num, int n) {
float new_val;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int offset = threadID * n;
int idx = param_idx - n;
new_val = old_cov[offset + idx] + rand_cov_num;
if (new_val > 0) {
new_cov[offset + idx] = new_val;
}
}
__device__ void perturb_params(float* old_cov, float* old_mu, float* new_cov, float* new_mu, int* rand_ints, float* rand_mu, float* rand_cov, int n) {
// Perturbs a random parameter from array params from a Gaussian distribution
// with a standard deviation of its step-size in array step_size. Returns a new
// parameter vector params.
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int offset = threadID * n;
// Pick parameter to perturb
int param_idx = rand_ints[threadID];
// printf("Random interger: %d.\n", param_idx);
if (param_idx < n) {
new_mu[param_idx + offset] = old_mu[param_idx + offset] + rand_mu[threadID];
} else {
perturb_cov(old_cov, new_cov, param_idx, rand_cov[threadID], n);
}
}
__device__ void mcmc_step(float* curr_L, float* new_cov, float* new_mu, float* old_cov, float* old_mu, int* rand_ints, float* rand_mu, float* rand_cov, float* rand_num, int n, int n_data_per_thread, int n_data, int* take_step, float* data) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
float old_L = curr_L[threadID];
float new_L = get_total_log_likelihood(new_cov, new_mu, data, n_data_per_thread, n_data, n);
float threshold;
// Note: might need to restrict elements of cov to keep matrix positive semi-definite
if (new_L > old_L) {
take_step[threadID] = 1;
curr_L[threadID] = new_L;
} else {
threshold = exp(new_L - old_L);
if (rand_num[threadID] < threshold) {
take_step[threadID] = 1;
curr_L[threadID] = new_L;
} else {
take_step[threadID] = 0;
}
}
}
__device__ float calc_l2_norm(float* mu, float* true_mu, int n) {
int i;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
int offset = n * threadID;
float diff;
float cum_sum = 0.;
float result;
for (i = offset; i < offset + n; i++) {
diff = true_mu[i] - mu[i];
cum_sum += diff * diff;
}
result = sqrt(cum_sum);
return result;
}
__device__ void initialize_cov(float* cov, int n) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int offset = threadID * n;
float val = 5.0f;
int i;
for (i = offset; i < offset + n; i++) {
cov[i] = val;
}
}
__device__ void initialize_means(float initial_mean, float* curr_mu, int n) {
int i;
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int offset = threadID * n;
for (i = offset; i < offset + n; i++) {
curr_mu[i] = initial_mean;
}
}
__host__ int count_n_data(int n) {
FILE *input;
unsigned int i = 0;
float temp;
// input = fopen("data_10.txt", "r");
char input_file [50];
sprintf(input_file, "data_%d.txt", n);
input = fopen(input_file, "r");
while ((fscanf(input, "%f", &temp)) != EOF) {
i++;
}
fclose(input);
return i / n;
}
__host__ void read_data(int n_data, int n, float* data) {
FILE *input;
unsigned int i = 0;
// input = fopen("data_10.txt", "r");
char input_file [50];
sprintf(input_file, "data_%d.txt", n);
input = fopen(input_file, "r");
while ((fscanf(input, "%f", &data[i])) != EOF) {
i++;
}
fclose(input);
}
__device__ void vec_cpy(float* parent_mat, float* child_mat, int n) {
int threadID = threadIdx.x + blockDim.x * blockIdx.x;
int offset = threadID * n;
int i;
for (i = offset; i < offset + n; i++) {
child_mat[i] = parent_mat[i];
}
}
__global__ void kernel_rand_nums(curandState* state, float* rand_mu, float* rand_cov, float mu_step, float cov_step, float* rand_num, int* rand_ints, int n) {
generate_random_nums(state, rand_mu, rand_cov, mu_step, cov_step, rand_num, rand_ints, n);
}
__global__ void mcmc_shared(int n, int n_data, int n_data_per_thread, int n_threads, int n_steps, int spacing, float mu_step, float cov_step, curandState* state, float* curr_cov, float* new_cov, float* curr_mu, float* new_mu, float* rand_num, int* rand_ints, float* rand_mu, float* rand_cov, float* curr_L, int* take_step, float* data, float* mu_inv_arr, float* cov_inv_arr, float* mu_est, float* cov_est, float* all_mu_est) {
int step_count = 0;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
extern __shared__ float s_data[];
float initial_mean = 1.0f;
int local_take_step;
int estimation_offset;
int i;
int copy_offset = threadID * n * n_data_per_thread;
for (i = copy_offset; i < copy_offset + n_data_per_thread * n; i++) {
s_data[i] = data[i];
}
// begin code
initialize_cov(curr_cov, n);
initialize_means(initial_mean, curr_mu, n);
rand_init(state);
curr_L[threadID] = get_total_log_likelihood(curr_cov, curr_mu, s_data, n_data_per_thread, n_data, n);
vec_cpy(curr_cov, new_cov, n);
vec_cpy(curr_mu, new_mu, n);
while (step_count < n_steps) {
generate_random_nums(state, rand_mu, rand_cov, mu_step, cov_step, rand_num, rand_ints, n);
perturb_params(curr_cov, curr_mu, new_cov, new_mu, rand_ints, rand_mu, rand_cov, n);
mcmc_step(curr_L, new_cov, new_mu, curr_cov, curr_mu, rand_ints, rand_mu, rand_cov, rand_num, n, n_data_per_thread, n_data, take_step, s_data);
local_take_step = take_step[threadID];
if (local_take_step == 1) {
vec_cpy(new_cov, curr_cov, n);
vec_cpy(new_mu, curr_mu, n);
}
if (step_count % spacing == 0) {
calc_cov_est(curr_cov, cov_inv_arr, cov_est, n, n_threads);
calc_mu_est(curr_mu, cov_inv_arr, mu_inv_arr, cov_est, mu_est, n, n_threads);
if (threadID == 0) {
estimation_offset = step_count / spacing * n;
// ERROR
for (i = estimation_offset; i < estimation_offset + n; i++) {
all_mu_est[i] = mu_est[i - estimation_offset];
}
}
__syncthreads();
}
step_count += 1;
}
}
__global__ void mcmc(int n, int n_data, int n_data_per_thread, int n_threads, int n_steps, int spacing, float mu_step, float cov_step, curandState* state, float* curr_cov, float* new_cov, float* curr_mu, float* new_mu, float* rand_num, int* rand_ints, float* rand_mu, float* rand_cov, float* curr_L, int* take_step, float* data, float* mu_inv_arr, float* cov_inv_arr, float* mu_est, float* cov_est, float* all_mu_est) {
int step_count = 0;
int threadID = threadIdx.x + blockIdx.x * blockDim.x;
float initial_mean = 1.0f;
int local_take_step;
int estimation_offset;
int i;
// begin code
initialize_cov(curr_cov, n);
initialize_means(initial_mean, curr_mu, n);
rand_init(state);
curr_L[threadID] = get_total_log_likelihood(curr_cov, curr_mu, data, n_data_per_thread, n_data, n);
vec_cpy(curr_cov, new_cov, n);
vec_cpy(curr_mu, new_mu, n);
while (step_count < n_steps) {
generate_random_nums(state, rand_mu, rand_cov, mu_step, cov_step, rand_num, rand_ints, n);
perturb_params(curr_cov, curr_mu, new_cov, new_mu, rand_ints, rand_mu, rand_cov, n);
mcmc_step(curr_L, new_cov, new_mu, curr_cov, curr_mu, rand_ints, rand_mu, rand_cov, rand_num, n, n_data_per_thread, n_data, take_step, data);
local_take_step = take_step[threadID];
if (local_take_step == 1) {
vec_cpy(new_cov, curr_cov, n);
vec_cpy(new_mu, curr_mu, n);
}
if (step_count % spacing == 0) {
calc_cov_est(curr_cov, cov_inv_arr, cov_est, n, n_threads);
calc_mu_est(curr_mu, cov_inv_arr, mu_inv_arr, cov_est, mu_est, n, n_threads);
if (threadID == 0) {
estimation_offset = step_count / spacing * n;
// ERROR
for (i = estimation_offset; i < estimation_offset + n; i++) {
all_mu_est[i] = mu_est[i - estimation_offset];
}
}
__syncthreads();
}
step_count += 1;
}
}
void print_usage() {
printf("Usage: MCMC options are...\n");
printf(" Number of steps: --n_steps=n_steps\n");
printf(" Number of dimensions: --n_dim=n_dim\n");
printf(" Evaluation frequency: --eval_freq=eval_freq\n");
printf(" Store data in shared memory (requires small datasets): --shared_memory=1\n");
}
int query_flag_idx(char* arg, const char* query_array[]) {
int unique_idx = 5;
int i;
for (i = 0; i < 4; i++) {
if (arg[unique_idx] == query_array[i][unique_idx]) {
return i;
}
}
return -1;
}
int* parse_args(int argc, char *argv[]) {
const char* query_array[4];
query_array[0] = "--n_steps";
query_array[1] = "--n_dim";
query_array[2] = "--eval_freq";
query_array[3] = "--shared_memory";
int* arg_vals = (int*)malloc(4 * sizeof(int));
int default_n_steps = 10000;
int default_n = 10;
int default_spacing = 1000;
int default_shared = 0;
int i;
int arg_idx;
int init_idx;
for (i = 0; i < 4; i++) {
arg_vals[i] = -1;
}
for (i = 1; i < argc; i++) {
if (!strcmp(argv[i], "-h") || !strcmp(argv[i], "--help")) {
print_usage();
exit(0);
} else {
arg_idx = query_flag_idx(argv[i], query_array);
if (arg_idx != -1) {
init_idx = strlen(query_array[arg_idx]) + 1;
argv[i] += init_idx;
sscanf(argv[i], "%d", &arg_vals[arg_idx]);
} else {
print_usage();
exit(0);
}
}
}
if (arg_vals[0] == -1) {
printf("Number of steps not provided. Defaulting to %d steps.\n", default_n_steps);
arg_vals[0] = default_n_steps;
}
if (arg_vals[1] == -1) {
printf("Number of dimensions not provided. Defaulting to %d dimensions.\n", default_n);
arg_vals[1] = default_n;
}
if (arg_vals[2] == -1) {
printf("Evaluation frequency not provided. Defaulting to evaluating every %d steps.\n", default_spacing);
arg_vals[2] = default_spacing;
}
if (arg_vals[3] == -1) {
printf("Shared memory not specified. Defaulting to using global memory.\n");
arg_vals[3] = default_shared;
}
return arg_vals;
}
int main(int argc, char *argv[]) {
int* arg_vals = parse_args(argc, argv);
int n_steps = arg_vals[0];
int n = arg_vals[1];
int spacing = arg_vals[2];
int use_shared = arg_vals[3];
int n_threads = 1024;
int n_data = count_n_data(n);
int n_data_per_thread = n_data / n_threads;
int n_blocks = 1;
int n_sample_points = n_steps / spacing;
int i;
float mu_step = 0.2;
float cov_step = 0.2;
FILE* f;
cudaEvent_t start, stop;
curandState* state;
float* rand_num;
float* rand_mu;
float* rand_cov;
int* rand_ints;
float* curr_mu;
float* new_mu;
float* curr_cov;
float* new_cov;
float* curr_L;
int* take_step;
float* data;
float* mu_inv;
float* cov_inv;
float* mu_est;
float* cov_est;
float* all_mu_est;
float* h_data = (float*)malloc(n * n_data * sizeof(float));
float* h_cov = (float*)malloc(n * n_threads * n_blocks * sizeof(float));
float* h_mu = (float*)malloc(n * n_threads * n_blocks * sizeof(float));
float* h_rand = (float*)malloc(n_threads * n_blocks * sizeof(float));
float* h_mu_est = (float*)malloc(n * n_sample_points * sizeof(float));
CudaSafeCall(cudaMalloc(&state, n_blocks * n_threads * sizeof(curandState)));
CudaSafeCall(cudaMalloc((void**)&rand_num, n_blocks * n_threads * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&rand_mu, n_blocks * n_threads * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&rand_cov, n_blocks * n_threads * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&rand_ints, n_blocks * n_threads * sizeof(int)));
CudaSafeCall(cudaMalloc((void**)&curr_L, n_threads * n_blocks * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&take_step, n_threads * n_blocks * sizeof(int)));
CudaSafeCall(cudaMalloc((void**)&curr_mu, n * n_threads * n_blocks * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&new_mu, n * n_threads * n_blocks * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&curr_cov, n * n_threads * n_blocks * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&new_cov, n * n_threads * n_blocks * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&data, n * n_data * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&mu_inv, n * n_blocks * n_threads * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&cov_inv, n * n_threads * n_blocks * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&mu_est, n * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&cov_est, n * sizeof(float)));
CudaSafeCall(cudaMalloc((void**)&all_mu_est, n * n_sample_points * sizeof(float)));
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Array for covariance elements held by every thread
read_data(n_data, n, h_data);
CudaSafeCall(cudaMemcpy(data, h_data, n * n_data * sizeof(float), cudaMemcpyHostToDevice));
// kernel_rand_nums<<<1, 1024>>>(state, rand_mu, rand_cov, mu_step, cov_step, rand_num, rand_ints, n);
//cudaEventRecord(start);
//mcmc<<<n_blocks, n_threads>>>(n, n_data, n_data_per_thread, n_threads, n_steps, spacing, mu_step, cov_step, state, curr_cov, new_cov, curr_mu, new_mu, rand_num, rand_ints, rand_mu, rand_cov, curr_L, take_step, data, mu_inv, cov_inv, mu_est, cov_est, all_mu_est);
//cudaEventRecord(stop);
//CudaCheckError();
if (use_shared == 1) {
cudaEventRecord(start);
mcmc_shared<<<n_blocks, n_threads, n * n_data * sizeof(float)>>>(n, n_data, n_data_per_thread, n_threads, n_steps, spacing, mu_step, cov_step, state, curr_cov, new_cov, curr_mu, new_mu, rand_num, rand_ints, rand_mu, rand_cov, curr_L, take_step, data, mu_inv, cov_inv, mu_est, cov_est, all_mu_est);
cudaEventRecord(stop);
CudaCheckError();
} else {
cudaEventRecord(start);
mcmc<<<n_blocks, n_threads>>>(n, n_data, n_data_per_thread, n_threads, n_steps, spacing, mu_step, cov_step, state, curr_cov, new_cov, curr_mu, new_mu, rand_num, rand_ints, rand_mu, rand_cov, curr_L, take_step, data, mu_inv, cov_inv, mu_est, cov_est, all_mu_est);
cudaEventRecord(stop);
CudaCheckError();
}
CudaSafeCall(cudaMemcpy(h_cov, curr_cov, n * n_threads * sizeof(float), cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(h_mu, curr_mu, n * n_threads * sizeof(float), cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(h_mu_est, all_mu_est, n * n_sample_points * sizeof(float), cudaMemcpyDeviceToHost));
CudaSafeCall(cudaMemcpy(h_rand, rand_num, n_threads * sizeof(float), cudaMemcpyDeviceToHost));
cudaEventSynchronize(stop);
// for (i = 0; i < n * n_threads; i++) {
// printf("%f ", h_mu[i]);
// }
float milliseconds = 0.f;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Code executed in %f ms.\n", milliseconds);
f = fopen("mu_data.txt", "w");
for (i = 0; i < n * n_threads; i++) {
fprintf(f, "%f ", h_mu[i]);
}
fclose(f);
f = fopen("cov_data.txt", "w");
for (i = 0; i < n * n_threads; i++) {
fprintf(f, "%f ", h_cov[i]);
}
fclose(f);
f = fopen("rand_data.txt", "w");
for (i = 0; i < n_threads; i++) {
fprintf(f, "%f ", h_rand[i]);
}
fclose(f);
f = fopen("mu_evolution.txt", "w");
for (i = 0; i < n_sample_points * n; i++) {
fprintf(f, "%f ", h_mu_est[i]);
// printf("%f ", h_mu_est[i]);
}
fclose(f);
free(h_data);
free(h_cov);
free(h_mu);
free(h_rand);
cudaFree(state);
free(h_mu_est);
cudaFree(mu_inv);
cudaFree(cov_inv);
cudaFree(mu_est);
cudaFree(cov_est);
cudaFree(all_mu_est);
cudaFree(rand_num);
cudaFree(rand_mu);
cudaFree(rand_cov);
cudaFree(rand_ints);
cudaFree(curr_L);
cudaFree(take_step);
cudaFree(curr_mu);
cudaFree(new_mu);
cudaFree(curr_cov);
cudaFree(new_cov);
cudaFree(data);
return 0;
}
|
21,313 | #include "simple_particle.cuh"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include "math_functions.h"
#include <stdio.h>
__constant__ simpleParticleSystem d_sps[1];
__global__ void generateParticles();
__global__ void renderParticles(uchar4* devPtr, int img_width, int img_height);
__global__ void updateParticles(float passed_time);
__device__ float2 get_normal_vector(float rand_num);
__device__ float get_energy(float2 p1, float2 p2, float dist_bound_powerd);
__device__ uchar4 get_color_from_energy(float energy);
__device__ float2 get_acceleration(int index);
__device__ void update_particle_velocity(int index, float2 acc, float passed_time);
__device__ int update_particle_position(int index, float passed_time); //return whether the particle is dead
void init_particles_cuda(simpleParticleSystem &sps) {
int max_num_particles = sps.MAX_PARTICLE_SIZE;
int one_batch_num_particles = sps.ONE_BATCH_PARTICLE_SIZE;
cudaMalloc((void**)&sps.energy, sizeof(*sps.energy)*max_num_particles);
cudaMalloc((void**)&sps.position, sizeof(*sps.position)*max_num_particles);
cudaMalloc((void**)&sps.velocity, sizeof(*sps.velocity)*max_num_particles);
cudaMalloc((void**)&sps.remain_time, sizeof(*sps.remain_time)*max_num_particles);
cudaMalloc((void**)&sps.rand_data, sizeof(*sps.rand_data)*one_batch_num_particles*3);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Memory Allocation Error: %s\n", cudaGetErrorString(err));
}
void destroy_particles_cuda(simpleParticleSystem &sps) {
cudaError_t er;
er = cudaFree(sps.energy);
er = cudaFree(sps.position);
er = cudaFree(sps.velocity);
er = cudaFree(sps.remain_time);
er = cudaFree(sps.rand_data);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Memory Free Error: %s\n", cudaGetErrorString(err));
}
void copy_to_device_sps(simpleParticleSystem &sps) {
cudaError_t err = cudaMemcpyToSymbol(d_sps, &sps, sizeof(simpleParticleSystem));
if (err != cudaSuccess)
printf("Constant Memory Copy Error: %s\n", cudaGetErrorString(err));
}
void generate_particles(int generate_size) {
generateParticles << < 1, generate_size >> > ();
//generateParticlesLine <<< 1, sps.ONE_BATCH_PARTICLE_SIZE >>> (
// sps.position, sps.velocity_orientation, sps.velocity, sps.remain_time, sps.rand_data, sps.ONE_BATCH_PARTICLE_SIZE,
// sps.MAX_PARTICLE_SIZE, sps.generator_line[0], sps.generator_line[1], sps.MAX_VELOCITY, sps.MIN_VELOCITY, sps.LIFE_TIME
//);
}
void updata_particles(int generate_size, float passed_time) {
updateParticles << < 1, generate_size >> > (passed_time);
}
void render_particles(uchar4* devPtr, int img_width, int img_height) {
int thread_dim = 16;
int grid_dim_x = (img_width + thread_dim - 1) / thread_dim;
int grid_dim_y = (img_height + thread_dim - 1) / thread_dim;
dim3 grids(grid_dim_x, grid_dim_y);
dim3 threads(thread_dim, thread_dim);
renderParticles << <grids, threads >> > (devPtr, img_width, img_height);
}
__global__ void generateParticles()
{
float2 *position = (*d_sps).position;
float2 *velocity = (*d_sps).velocity;
float *remain_time = (*d_sps).remain_time;
float *rand = (*d_sps).rand_data;
int generate_size = (*d_sps).ONE_BATCH_PARTICLE_SIZE;
int max_size = (*d_sps).MAX_PARTICLE_SIZE;
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ unsigned int generate_start_index;
//get the particle generate block pos
if (index == 0) {
max_size -= generate_size;
generate_start_index = 0;
while (generate_start_index <= max_size) {
if (remain_time[generate_start_index] == 0)
break;
generate_start_index += generate_size;
}
}
__syncthreads();
if (generate_start_index > max_size)
return;
int pid = generate_start_index + index;
float x;
float y;
float2 velocity_orientation;
float n_velocity;
//generate rand position and velocity
switch ((*d_sps).TYPE)
{
case LineGenerator:
x = rand[index] * ((*d_sps).generator_line[0].x - (*d_sps).generator_line[1].x) + (*d_sps).generator_line[1].x;
y = rand[index] * ((*d_sps).generator_line[0].y - (*d_sps).generator_line[1].y) + (*d_sps).generator_line[1].y;
position[pid] = make_float2(x, y);
rand += generate_size;
pid = generate_start_index + index;
velocity_orientation = get_normal_vector(rand[index]);
rand += generate_size;
n_velocity = rand[index] * ((*d_sps).MAX_VELOCITY - (*d_sps).MIN_VELOCITY) + (*d_sps).MIN_VELOCITY;
velocity[pid].x = n_velocity * velocity_orientation.x;
velocity[pid].y = n_velocity * velocity_orientation.y;
break;
case CircleGenerator:
float rand_pos = rand[index];
float2 vec = get_normal_vector(rand_pos);
x = vec.x * (*d_sps).generator_radius.x + (*d_sps).generator_center.x;
y = vec.y * (*d_sps).generator_radius.y + (*d_sps).generator_center.y;
position[pid] = make_float2(x, y);
rand += generate_size;
pid = generate_start_index + index;
float rand_orient = rand[index];
rand_orient = rand_pos + (rand_orient / 2 - rand_orient / 4);
velocity_orientation = get_normal_vector(rand_orient);
rand += generate_size;
n_velocity = rand[index] * ((*d_sps).MAX_VELOCITY - (*d_sps).MIN_VELOCITY) + (*d_sps).MIN_VELOCITY;
velocity[pid].x = n_velocity * velocity_orientation.x;
velocity[pid].y = n_velocity * velocity_orientation.y;
break;
default:
break;
}
//generate remain time
remain_time[pid] = (*d_sps).LIFE_TIME;
}
__global__ void renderParticles(uchar4* devPtr, int img_width, int img_height) {
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x >= img_width || y >= img_height)
return;
if (!(x >= (*d_sps).LIFE_BOUND[0] && x <= (*d_sps).LIFE_BOUND[2]
&& y <= (*d_sps).LIFE_BOUND[1] && y >= (*d_sps).LIFE_BOUND[3]))
return;
int generate_size = (*d_sps).ONE_BATCH_PARTICLE_SIZE;
int max_size = (*d_sps).MAX_PARTICLE_SIZE;
float energy = 0;
float dist_bound_powerd = (*d_sps).ENERGY_SCOPE * (*d_sps).ENERGY_SCOPE;
float2 pos = make_float2(x, y);
for (int start_index = 0; start_index < max_size - generate_size; start_index += generate_size)
{
if ((*d_sps).remain_time[start_index] != 0) {
//here we do not render the first particle of the batch
for (int index = start_index + 1; index < start_index + generate_size; ++index) {
if ((*d_sps).remain_time[index] != 0) {
energy += get_energy((*d_sps).position[index], pos, dist_bound_powerd);
if (energy >= 1) {
energy = 1;
break;
}
}
}
if (energy >= 1) {
break;
}
}
}
int offset = x + y * img_width;
devPtr[offset] = get_color_from_energy(energy);
}
__global__ void updateParticles(float passed_time) {
unsigned int index = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int strip = gridDim.x * blockDim.x;
unsigned int start_index = blockIdx.x*blockDim.x;
__shared__ int living_particle_num;
while (index < (*d_sps).MAX_PARTICLE_SIZE) {
living_particle_num = 0;
//__syncthreads();
if ((*d_sps).remain_time[start_index] != 0) {
if (index != start_index) {
float2 acc = get_acceleration(index);
update_particle_velocity(index, acc, passed_time);
int is_living = update_particle_position(index, passed_time);
if (is_living) {
living_particle_num += 1;
}
}
__syncthreads();
if (index == start_index) {
if(living_particle_num == 0)
(*d_sps).remain_time[index] = 0;
else
(*d_sps).remain_time[index] = 1.0;
}
}
index += strip;
start_index += strip;
}
}
__device__ float2 get_normal_vector(float rand_num) {
float x, y;
sincosf(rand_num*2*PI, &y, &x);
return make_float2(x, y);
}
__device__ float get_energy(float2 p1, float2 p2, float dist_bound_powerd) {
float dx = p1.x - p2.x;
float dy = p1.y - p2.y;
float dist_powered = dx*dx + dy*dy;
if (dist_powered > dist_bound_powerd)
return 0;
if (dist_powered == 0)
return 0.1;
return 0.1 * sqrtf(dist_bound_powerd - dist_powered)/sqrtf(dist_bound_powerd);
}
__device__ uchar4 get_color_from_energy(float energy) {
if (energy == 0)
return make_uchar4(0, 0, 0, 0);
unsigned char r = 90 * energy + 160;
unsigned char g = 180 * energy;
unsigned char b = 60 * energy;
unsigned char w = 255 * energy;
return make_uchar4(r, g, b, w);
}
__device__ float2 get_acceleration(int index) {
return make_float2(0.0, 40.0);
}
__device__ void update_particle_velocity(int index, float2 acc, float passed_time) {
(*d_sps).velocity[index].x += acc.x * passed_time;
(*d_sps).velocity[index].y += acc.y * passed_time;
}
__device__ int update_particle_position(int index, float passed_time) {
(*d_sps).remain_time[index] -= passed_time;
if ((*d_sps).remain_time[index] <= 0) {
(*d_sps).remain_time[index] = 0;
return 0;
}
float2 *pos = &(*d_sps).position[index];
(*pos).x += (*d_sps).velocity[index].x * passed_time;
(*pos).y += (*d_sps).velocity[index].y * passed_time;
float x = (*pos).x;
float y = (*pos).y;
if (x > (*d_sps).LIFE_BOUND[0] && x < (*d_sps).LIFE_BOUND[2]
&& y < (*d_sps).LIFE_BOUND[1] && y > (*d_sps).LIFE_BOUND[3]) {
//if (x < (*d_sps).BOUND_BOX[0] + (*d_sps).ENERGY_SCOPE)
// (*d_sps).BOUND_BOX[0] = x - (*d_sps).ENERGY_SCOPE;
//else if (x >(*d_sps).BOUND_BOX[2] - (*d_sps).ENERGY_SCOPE)
// (*d_sps).BOUND_BOX[2] = x + (*d_sps).ENERGY_SCOPE;
//if (y < (*d_sps).BOUND_BOX[3] + (*d_sps).ENERGY_SCOPE)
// (*d_sps).BOUND_BOX[3] = y - (*d_sps).ENERGY_SCOPE;
//else if (y >(*d_sps).BOUND_BOX[1] - (*d_sps).ENERGY_SCOPE)
// (*d_sps).BOUND_BOX[1] = y + (*d_sps).ENERGY_SCOPE;
return 1;
}
(*d_sps).remain_time[index] = 0;
return 0;
} |
21,314 | #include<stdio.h>
#include<math.h>
#include <fstream>
#include <iostream>
using namespace std;
#define THREADS_PER_BLOCK 1024
#define NUMBER_OF_BLOCKS 1024
#define DEBUG 0
//initially, 6 & 9
// number of threads_per_block*blocks should be atleast V-1.
// threads per block should be greater than or equal to number_of_blocks and both should be power of 2.
// only bcz of the restriction of cuda kernel two.
// if the vertices begin with 1 not 0, be careful, got to map it accordingly.
//destinationVertexArray decrease the value of indices by 1, and when result is out, increase the indices by 1.
//R2 array will also begin with 2 in that case, as the R1 initialization would be 1.
void initializeGraph(int *indexArray , int *destinationVertexArray , int *weightArray, int edges, int vertices)
{
// to be read from file or generators for now make static.
int u = 1,v,w;
int count = 0;
int prev = 1;
for(int i = 0 ; i < edges ; i++)
{
prev = u;
cin >>u>>v>>w;
if(prev == u)
{
count++;
}
else
{
indexArray[prev] = count + indexArray[prev-1];
count = 1;
}
destinationVertexArray[i] = v-1;
weightArray[i] = w;
}
}
// R1 source, R2 destination,R3 weight
void initMSTEdgeList(int *R1 , int *R2 , int *R3, int *indexArray , int *destinationVertexArray, int *weightArray, int vertices)
{
for(int v = indexArray[0] ; v < indexArray[1] ; v++)
{
// update weight of adjacent vertices of vertex 0. -1 bcz counting at r2 starts from 1 i.e at index 0 vertex 1 is placed.
// So correspondingly for R3.
R3[destinationVertexArray[v]-1] = weightArray[v];
}
for(int i = 0;i< vertices-1;i++)
{
R2[i] = i+1;
R1[i] = 0;
}
}
void printArray(int *a , int size)
{
for(int i = 0; i < size ; i++)
printf("%d\t",a[i]);
printf("\n");
}
__global__ void updateMSTList(int startIndex,int endIndex, int *d_nearestVertex, int *destinationVertexArray,int *weightArray,int *d_R1,int *d_R2,int *d_R3)
{
int threadId = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
int offset = startIndex;
if((threadId+offset) >= startIndex && (threadId+offset) <= endIndex)
{
int destVertex = destinationVertexArray[threadId+offset];
if(destVertex != 0)
{
if(d_R2[destVertex-1] != 0)
{
if(d_R3[destVertex-1] > weightArray[threadId+offset] )
{
d_R3[destVertex-1] = weightArray[threadId+offset];
d_R1[destVertex-1] = *d_nearestVertex;
}
}
}
}
}
__global__ void findNearestVertexKernelOne( int *d_R2 , int *d_R3, int *T1, int *T2, int *d_nearestVertex,int secondKernelRequired,int vertices)
{
int blockNo = blockIdx.x;
int threadId = blockNo * THREADS_PER_BLOCK + threadIdx.x;
__shared__ int r2[THREADS_PER_BLOCK],r3[THREADS_PER_BLOCK];
r2[threadIdx.x] = 0;
r3[threadIdx.x] = 10000;
if(threadId < vertices-1)
{
r2[threadIdx.x] = d_R2[threadId];
r3[threadIdx.x] = d_R3[threadId];
__syncthreads();
int i = 0;
for(i = THREADS_PER_BLOCK/2; i > 0 ; i = i>>1)
{
if(threadIdx.x < i)
{
if(r2[threadIdx.x] != 0 && r2[i + threadIdx.x]!=0)
{
// we also want to store the indices of the minimum location, so that we can get the desired vertex.
if(r3[threadIdx.x] > r3[i+threadIdx.x])
{
//update index.
r2[threadIdx.x] = r2[i+threadIdx.x];
//update weight.
r3[threadIdx.x] = r3[i+threadIdx.x];
}
//else its the same.
}
else if(r2[threadIdx.x] == 0 && r2[i+threadIdx.x] == 0)
{
// do nothing
}
else if(r2[threadIdx.x] == 0 && r2[i+threadIdx.x] != 0)
{
r2[threadIdx.x] = r2[i+threadIdx.x];
r3[threadIdx.x] = r3[i+threadIdx.x];
}
// else same present value needs to be considered in the 4th case.
}
__syncthreads();
}
// only one value needed, first thread will write.
if(threadIdx.x == 0)
{
if(!secondKernelRequired)
{
// update the nearest_Vertex
*d_nearestVertex = r2[0];
// mark vertex as used..
d_R2[r2[0]-1] = 0;
}
// for multiple blocks, because they are in the shared memory, set T1 T2 here which is index and weight for second kernel..
else
{
T1[blockIdx.x] = r2[0];
T2[blockIdx.x] = r3[0];
}
}
}
}
__global__ void findNearestVertexKernelTwo(int *d_R1 , int *d_R2 , int *d_R3 , int *d_T1, int *d_T2, int *d_nearestVertex)
{
// no need to use the shared memory, only one block.
for(int i = NUMBER_OF_BLOCKS/2; i > 0 ; i = i>>1)
{
if(threadIdx.x < i)
{
if(d_T1[threadIdx.x] != 0 && d_T1[i + threadIdx.x]!=0)
{
// we also want to store the indices of the minimum location, so that we can get the desired vertex.
if(d_T2[threadIdx.x] > d_T2[i+threadIdx.x])
{
//update index.
d_T1[threadIdx.x] = d_T1[i+threadIdx.x];
//update weight.
d_T2[threadIdx.x] = d_T2[i+threadIdx.x];
}
//else its the same.
}
else if(d_T1[threadIdx.x] == 0 && d_T1[i+threadIdx.x] == 0)
{
// do nothing
}
else if(d_T1[threadIdx.x] == 0 && d_T1[i+threadIdx.x] != 0)
{
d_T1[threadIdx.x] = d_T1[i+threadIdx.x];
d_T2[threadIdx.x] = d_T2[i+threadIdx.x];
}
// else same present value needs to be considered in the 4th case.
}
__syncthreads();
}
if(threadIdx.x == 0)
{
*d_nearestVertex = d_T1[0];
d_R2[d_T1[0]-1] = 0;
}
}
int main()
{
// graph representatioon arrays
int *h_indexArray;
int *h_weightArray;
int *h_destinationVertexArray;
int secondKernelRequired = 0;
// device Graph
int *d_indexArray;
int *d_weightArray;
int *d_destinationVertexArray;
// MST Edge list
int *h_R1;
int *h_R2;
int *h_R3;
int *h_nearestVertex;
int *h_T1;
int *h_T2;
// device MST list
int *d_R1;
int *d_R2;
int *d_R3;
int *d_nearestVertex;
int *d_T1;
int *d_T2;
int edges,vertices;
cin>>vertices>>edges;
cout << vertices << edges<< endl;
edges = edges/2;
// iteration indexes for updation
int startIndex, endIndex;
// allocate memory to compact adjacency list
h_indexArray = (int *)malloc(vertices * sizeof(int));
h_weightArray = (int *)malloc(2 * edges * sizeof(int));
h_destinationVertexArray = (int *)malloc(2 * edges * sizeof(int));
h_nearestVertex = (int *)malloc(sizeof(int));
memset(h_indexArray,0,vertices * sizeof(int));
memset(h_weightArray,0,2 * edges * sizeof(int));
memset(h_destinationVertexArray,0,2 * edges * sizeof(int));
*h_nearestVertex = 0;
h_R1 = (int *)malloc((vertices-1) * sizeof(int));
h_R2 = (int *)malloc((vertices-1) * sizeof(int));
h_R3 = (int *)malloc((vertices-1) * sizeof(int));
h_T1 = (int *)malloc(NUMBER_OF_BLOCKS * sizeof(int));
h_T2 = (int *)malloc(NUMBER_OF_BLOCKS * sizeof(int));
memset(h_R3,1,(vertices-1)*sizeof(int));
memset(h_T1,0,NUMBER_OF_BLOCKS * sizeof(int));
memset(h_T2,0,NUMBER_OF_BLOCKS * sizeof(int));
initializeGraph(h_indexArray,h_destinationVertexArray,h_weightArray,2*edges,vertices);
initMSTEdgeList(h_R1,h_R2,h_R3,h_indexArray,h_destinationVertexArray,h_weightArray,vertices);
// cuda memory allocation of graph representation
cudaMalloc(&d_indexArray,(vertices) * sizeof(int));
cudaMalloc(&d_weightArray,2 * edges * sizeof(int));
cudaMalloc(&d_destinationVertexArray,2 * edges * sizeof(int));
cudaMalloc(&d_nearestVertex,sizeof(int));
// cuda memory allocation of MST list
cudaMalloc(&d_R1,(vertices-1) * sizeof(int));
cudaMalloc(&d_R2,(vertices-1) * sizeof(int));
cudaMalloc(&d_R3,(vertices-1) * sizeof(int));
cudaMalloc(&d_T1, NUMBER_OF_BLOCKS * sizeof(int));
cudaMalloc(&d_T2, NUMBER_OF_BLOCKS * sizeof(int));
// start time
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
// copy host to device graph and initial MST list
cudaMemcpy(d_indexArray,h_indexArray, vertices* sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_destinationVertexArray,h_destinationVertexArray, 2 * edges * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_weightArray,h_weightArray,2 * edges * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_nearestVertex,h_nearestVertex,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_R1,h_R1, (vertices - 1)* sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_R2,h_R2, (vertices - 1)* sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_R3,h_R3, (vertices - 1)* sizeof(int),cudaMemcpyHostToDevice);
// MEMSET T1 AND T2
cudaMemset(d_T1,0,NUMBER_OF_BLOCKS * sizeof(int));
cudaMemset(d_T2,0,NUMBER_OF_BLOCKS * sizeof(int));
if(DEBUG)
{
printArray(h_R1,vertices-1);
printArray(h_R2,vertices-1);
printArray(h_R3,vertices-1);
}
if( vertices-1 > THREADS_PER_BLOCK)
{
secondKernelRequired = 1;
}
// kernel launch
for(int v = 0 ; v < vertices-2; v++)
{
findNearestVertexKernelOne<<<NUMBER_OF_BLOCKS,THREADS_PER_BLOCK>>>(d_R2, d_R3, d_T1, d_T2, d_nearestVertex,secondKernelRequired,vertices);
if(secondKernelRequired)
{
if(DEBUG)
{
printf("\n Yes Requird \n");
cudaMemcpy(h_T1,d_T1, NUMBER_OF_BLOCKS* sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_T2,d_T2, NUMBER_OF_BLOCKS * sizeof(int),cudaMemcpyDeviceToHost);
printArray(h_T1,NUMBER_OF_BLOCKS);
printArray(h_T2,NUMBER_OF_BLOCKS);
}
findNearestVertexKernelTwo<<<1,THREADS_PER_BLOCK>>>(d_R1, d_R2, d_R3, d_T1, d_T2, d_nearestVertex);
}
if(DEBUG)
{
cudaMemcpy(h_R1,d_R1, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_R2,d_R2, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_R3,d_R3, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
printf("\nAfter Minimum\n");
printArray(h_R1,vertices-1);
printArray(h_R2,vertices-1);
printArray(h_R3,vertices-1);
printf("%d\n",*h_nearestVertex);
}
cudaMemcpy(h_nearestVertex,d_nearestVertex, sizeof(int),cudaMemcpyDeviceToHost);
startIndex = h_indexArray[*h_nearestVertex];
if(*(h_nearestVertex) == vertices-1)
endIndex = 2*edges-1;
else
endIndex = h_indexArray[*(h_nearestVertex)+1]-1;
if(DEBUG)
{
printf("\nstart : %d end : %d\n",startIndex,endIndex);
}
// update the list
updateMSTList<<<NUMBER_OF_BLOCKS,THREADS_PER_BLOCK>>>(startIndex, endIndex , d_nearestVertex, d_destinationVertexArray, d_weightArray, d_R1, d_R2, d_R3);
if(DEBUG)
{
cudaMemcpy(h_R1,d_R1, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_R2,d_R2, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_R3,d_R3, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_nearestVertex,d_nearestVertex, sizeof(int),cudaMemcpyDeviceToHost);
printf("\nAfter Update\n");
printArray(h_R1,vertices-1);
printArray(h_R2,vertices-1);
printArray(h_R3,vertices-1);
printf("%d\n",*h_nearestVertex);
}
}
// for generating the output...
cudaMemcpy(h_R1,d_R1, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_R2,d_R2, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_R3,d_R3, (vertices - 1)* sizeof(int),cudaMemcpyDeviceToHost);
// end time
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("u\tv\tw\n");
long cost = 0;
for(int i = 0; i < vertices-1;i++)
{
printf("%d\t%d\t%d\t\n",h_R1[i]+1,i+2,h_R3[i]);
cost = cost + h_R3[i];
}
cout<<"\ncost:"<<cost;
printf("\nTime: %fms\n",time);
}
|
21,315 | #include "includes.h"
__global__ void square_array() {
} |
21,316 | #include <stdio.h>
#include <time.h>
#include <cuda.h>
__host__ cudaEvent_t get_time(void) {
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
// Pulled from module 6 assignment
__host__ void generate_rand_data(int * host_data_ptr, const int num_elem)
{
// Generate random values from 0-19
for(unsigned int i=0; i < num_elem; i++)
host_data_ptr[i] = (int) (rand()%20);
}
__global__ void gpuKernel(int *A, int *B, int *C, const int num_elem) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < (num_elem)) {
// Load data from global memory into registers
int a = A[idx];
int b = B[idx];
int c = 0;
// Perform some math on the array values
c = (a*b) + a + b;
c = c*c;
C[idx] = c;
}
}
__host__ float execute_concurrent_streamed_kernel(int arraySize, int N, int tpb) {
const int h_byteSize = arraySize*sizeof(int);
const int d_byteSize = N*sizeof(int);
cudaDeviceProp prop;
int whichDevice;
// Following taken from the book "CUDA by Example"
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop, whichDevice);
// "A GPU supporting device overlap possesses the capacity to simultaneously
// execute a CUDA C kernel while performing a copy between device and host memory."
if(!prop.deviceOverlap) {
printf("Device will not handle overlaps, so no speedup from streams\n");
return 0;
}
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
int *h_a, *h_b, *h_c;
// Create two sets of GPU buffers
int *d_a0, *d_b0, *d_c0; // Buffers used in stream0
int *d_a1, *d_b1, *d_c1; // Buffers used in stream1
// Allocate pinned memory, cudaMemcpyAsync requires host memory be page-locked
cudaHostAlloc((void **)&h_a, h_byteSize, cudaHostAllocDefault);
cudaHostAlloc((void **)&h_b, h_byteSize, cudaHostAllocDefault);
cudaHostAlloc((void **)&h_c, h_byteSize, cudaHostAllocDefault);
// Used in stream0
cudaMalloc((void**) &d_a0, d_byteSize);
cudaMalloc((void**) &d_b0, d_byteSize);
cudaMalloc((void**) &d_c0, d_byteSize);
// Used in stream1
cudaMalloc((void**) &d_a1, d_byteSize);
cudaMalloc((void**) &d_b1, d_byteSize);
cudaMalloc((void**) &d_c1, d_byteSize);
// Fill host arrays with random data
generate_rand_data(h_a, arraySize);
generate_rand_data(h_b, arraySize);
// Timers
cudaEvent_t startT, stopT;
float deltaT;
startT = get_time();
/* =================================================================================
* We are only copying part of full data each time. Queueing in a ping-pong fashion
* like shown below optimizes the execution timeline. Trying to queue all stream0
* operations and then queue all stream1 operations will cause the copy back to host
* memory in stream0 to block the copy to device for stream1. Now copies can start
* in stream1 while stream0's kernel is executing.
* =================================================================================
*/
for(int i=0; i<arraySize; i+=N*2) {
// Queue up copy of data for a array in both streams
cudaMemcpyAsync(d_a0, h_a+i, d_byteSize, cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_a1, h_a+i+N, d_byteSize, cudaMemcpyHostToDevice, stream1);
// Queue up copy of data for b array in both streams
cudaMemcpyAsync(d_b0, h_b+i, d_byteSize, cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_b1, h_b+i+N, d_byteSize, cudaMemcpyHostToDevice, stream1);
// Queue up running of gpu kernel
gpuKernel<<<N/tpb, tpb, 0, stream0>>>(d_a0, d_b0, d_c0, N);
gpuKernel<<<N/tpb, tpb, 0, stream1>>>(d_a1, d_b1, d_c1, N);
// Queue up copy of data from device to pinned memory
cudaMemcpyAsync(h_c+i, d_c0, d_byteSize, cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(h_c+i+N, d_c1, d_byteSize, cudaMemcpyDeviceToHost, stream1);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
stopT = get_time();
cudaEventSynchronize(stopT);
cudaEventElapsedTime(&deltaT, startT, stopT);
// Cleanup memory
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFree(d_a1);
cudaFree(d_b1);
cudaFree(d_c1);
cudaEventDestroy(startT);
cudaEventDestroy(stopT);
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
return deltaT;
}
__host__ float execute_kernel(int arraySize, int tpb) {
const int h_byteSize = arraySize*sizeof(int);
int *h_a, *h_b, *h_c;
// Create two sets of GPU buffers
int *d_a, *d_b, *d_c; // Buffers used in stream1
// Allocate pinned memory, cudaMemcpyAsync requires host memory be page-locked
cudaHostAlloc((void **)&h_a, h_byteSize, cudaHostAllocDefault);
cudaHostAlloc((void **)&h_b, h_byteSize, cudaHostAllocDefault);
cudaHostAlloc((void **)&h_c, h_byteSize, cudaHostAllocDefault);
cudaMalloc((void**) &d_a, h_byteSize);
cudaMalloc((void**) &d_b, h_byteSize);
cudaMalloc((void**) &d_c, h_byteSize);
// Fill host arrays with random data
generate_rand_data(h_a, arraySize);
generate_rand_data(h_b, arraySize);
// Timers
cudaEvent_t startT, stopT;
float deltaT;
startT = get_time();
// Copy data to device
cudaMemcpy(d_a, h_a, h_byteSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, h_byteSize, cudaMemcpyHostToDevice);
gpuKernel<<<arraySize/tpb, tpb>>>(d_a, d_b, d_c, arraySize);
cudaMemcpy(h_c, d_c, h_byteSize, cudaMemcpyDeviceToHost);
stopT = get_time();
cudaEventSynchronize(stopT);
cudaEventElapsedTime(&deltaT, startT, stopT);
// Cleanup memory
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
cudaEventDestroy(startT);
cudaEventDestroy(stopT);
return deltaT;
}
int main(int argc, char **argv) {
int arraySize = 4096; // Total number of elements to process.
int N = 256; // Number of elements to pass to GPU at one time.
int tpb = 128;
if(argc >= 2)
arraySize = atoi(argv[1]);
if(argc >= 3)
N = atoi(argv[2]);
if(argc >= 4)
tpb = atoi(argv[3]);
if(arraySize % N !=0 || N%tpb!=0) {
printf("Number of total threads is not divisible by number of elements to process in each stream iteration.\n");
return 0;
}
float delta_concurrent = execute_concurrent_streamed_kernel(arraySize, N, tpb);
float delta_normal = execute_kernel(arraySize, tpb);
printf("========================\n");
printf("Summary\n");
printf("Total Threads: %d\n", arraySize);
printf("Number of concurrent kernel instances: %d\n", N);
printf("Thread Size: %d\n", tpb);
printf("========================\n");
printf("Time to copy memory and execute kernel with two streams running concurrently.\n");
printf("duration: %fms\n",delta_concurrent);
printf("========================\n");
printf("Time to copy memory and execute kernel running using a normal kernel execution.\n");
printf("duration: %fms\n\n\n",delta_normal);
}
|
21,317 | #include "includes.h"
__global__ void SetMatrixVauleMinMaxX( float* matrix, int cols, int size, int id_min, int id_max, float value)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int id_column = id%cols;
if (id_column >= id_min && id_column <= id_max && id < size)
matrix[id] = value;
} |
21,318 | #include "includes.h"
__global__ void convolutionRowGPU(double *h_Dst, double *h_Src, double *h_Filter, int imageW, int imageH, int filterR){
int k;
double sum = 0;
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
for (k = -filterR; k <= filterR; k++) {
int d = ix + k; //edw einai to pou tha paei to filtro gi auto elegxei apo katw kai an to d einia ektos oriwn eikonas
if (d >= 0 && d < imageW) {
sum += h_Src[iy * imageW + d] * h_Filter[filterR - k];
}
h_Dst[iy * imageW + ix] = sum;
}
} |
21,319 | #include "includes.h"
//Library Definition
//Constant Definition
#define PI 3.141592654
#define blocksize 32
#define Repetitions 8192
//Print matrix into standard output
void print(double * M,int cols,int rows);
void dot(double * a,double * b, double & c, int cols);
void Create_New_Matrix(double * M,double * New,int * vec, int p0, int pp,int nn);
/*
DEVICE FUNCTIONS
*/
//Matrix transposition (Rows and Cols of M)
__global__ void matrixSum(const double * M1,const double * M2,double * Msum,double alpha,double beta, int rows, int cols)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols)
{
Msum[row + col*rows] = alpha*M1[row+col*rows]+beta*M2[row+col*rows];
}
} |
21,320 | /**
* An introduction to programming with CUDA Thrust
*
* Officially supported library distributed with CUDA since v4.0
* Abstracts the low-level memory and launch dimensions concerns in raw CUDA
* Provides containers and many algorithms for common problems to speed
* development on GPU
* Modeled after C++ STL and standard library
*
*
* Danny George 2012
*/
#include <stdio.h>
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/reduce.h>
int main(int argc, char const *argv[])
{
// create a vector on the host
thrust::host_vector<int> h_vec(50);
// fill incrementing with simple for loop
for (int i=0; i<h_vec.size(); ++i) {
h_vec[i] = i;
}
// --- create vector on device - many ways ----
// copy entire array
thrust::device_vector<int> d_vec1(h_vec);
// copy first 50 elements (h_vec must be at least 50 big)
thrust::device_vector<int> d_vec2(h_vec.begin(), h_vec.begin() + 50);
// --- examples of cudaMemcpy abstractions ----
d_vec2 = h_vec; // copy host to device
d_vec1 = d_vec2; // copy device to device
h_vec = d_vec2; // copy device to host
for (int i=0; i<d_vec1.size(); ++i) {
// direct access to vector elements
std::cout << "d_vec1[" << i << "] = " << d_vec1[i] << std::endl;
d_vec1[i] *= 2;
// NOTE: this is useful for debugging, but is very inefficient!
// it calls cudaMemcpy once for each element in d_vec1
// usually better to copy the entire vector or large chunks of it
}
// basic intro to builtin thrust algorithms
// because of templates - works on host_vectors and device_vectors
// algorithms executed on device will run in parallel
// set the elements to 0, 1, 2, 3, ...
thrust::sequence(h_vec.begin(), h_vec.end());
thrust::sequence(d_vec1.begin(), d_vec1.end());
// set elements to decrementing sequence
thrust::sequence(d_vec2.begin(), d_vec2.end(), (int)d_vec2.size(), -1);
// fill to a constant number
thrust::fill(d_vec1.begin(), d_vec1.end(), 10);
// reduce (sum operation by default)
int sum = thrust::reduce(d_vec1.begin(), d_vec1.end());
std::cout << "sum: " << sum << std::endl;
for (int i=0; i<d_vec2.size(); ++i) {
std::cout << "d_vec2[" << i << "] = " << d_vec2[i] << std::endl;
}
return 0;
}
|
21,321 |
#ifndef CUDACC
#define CUDACC
#endif
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
typedef unsigned int NUMBER;
const int RADIUS = 3;
const NUMBER N = 2048 * 2048;
const int THREADS_PER_BLOCK = 1024;
const int BLOCKS = N / THREADS_PER_BLOCK;
__global__ void stencil_1d(NUMBER* in, NUMBER* out) {
__shared__ NUMBER temp[BLOCKS + (2 * RADIUS)];
NUMBER gindex = threadIdx.x + blockIdx.x * blockDim.x;
NUMBER lindex = threadIdx.x + RADIUS;
if (gindex < N)
{
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[lindex - RADIUS];
temp[lindex + THREADS_PER_BLOCK] = in[gindex + THREADS_PER_BLOCK];
}
__syncthreads();
NUMBER result = 0;
for (NUMBER offset = -RADIUS; offset <= RADIUS; offset++) {
result += temp[lindex + offset];
}
out[gindex] = result;
}
}
void print_stencil(int* v) {
printf("[");
for (int i = 0; i < N; i++) {
if (i != N - 1) {
printf("%d,", v[i]);
}
else {
printf("%d", v[i]);
}
}
printf("]");
}
int main()
{
NUMBER* h_in, * h_out, * d_in, * d_out;
h_in = (NUMBER*)malloc(N * sizeof(NUMBER));
h_out = (NUMBER*)malloc(N * sizeof(NUMBER));
for (NUMBER i = 0; i < N; i++) {
h_in[i] = 1;
}
cudaEvent_t start = cudaEvent_t();
cudaEvent_t stop = cudaEvent_t();
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&d_in, N * sizeof(NUMBER));
cudaMalloc((void**)&d_out, N * sizeof(NUMBER));
cudaMemcpy(d_in, h_in, N * sizeof(NUMBER), cudaMemcpyHostToDevice);
cudaMemcpy(d_out, h_out, N * sizeof(NUMBER), cudaMemcpyHostToDevice);
stencil_1d<<<BLOCKS, THREADS_PER_BLOCK>>>(d_in, d_out);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float time;
cudaEventElapsedTime(&time, start, stop);
printf("Time elapsed on CPU: %f ms.\n", time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
21,322 | // Multiple GPU version of cuFFT_check that uses multiple GPU's
// This program creates a real-valued 3D function sin(x)*cos(y)*cos(z) and then
// takes the forward and inverse Fourier Transform, with the necessary scaling included.
// The output of this process should match the input function
// includes, system
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <complex.h>
// includes, project
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define NX 512
#define NY 512
#define NZ 512
#define NZ2 (NZ/2+1)
#define NN (NX*NY*NZ)
#define L (2*M_PI)
#define TX 8
#define TY 8
#define TZ 8
int divUp(int a, int b) { return (a + b - 1) / b; }
__device__
int idxClip(int idx, int idxMax){
return idx > (idxMax - 1) ? (idxMax - 1) : (idx < 0 ? 0 : idx);
}
__device__
int flatten(int col, int row, int stack, int width, int height, int depth){
return idxClip(stack, depth) + idxClip(row, height)*depth + idxClip(col, width)*depth*height;
// Note: using column-major indexing format
}
__global__
void initialize(double *f1, double *f2)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
const int k = blockIdx.z * blockDim.z + threadIdx.z;
// if ((i >= NX) || (j >= NY) || (k >= NZ)) return;
const int idx = flatten(i, j, k, NX, NY, NZ);
// Initialize array
f1[idx] = 0.5+0.5;
f2[idx] = 2.5*2.0;
return;
}
void initialize_singleGPU(double *f1, double *f2)
{
// Launch CUDA kernel to initialize velocity field
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX, TX), divUp(NY, TY), divUp(NZ, TZ));
initialize<<<gridSize, blockSize>>>(f1, f2);
return;
}
void initialize_multiGPU(const int GPUnum, double *f1, double *f2)
{
int i, idx, NX_per_GPU;
// Split data according to number of GPUs
NX_per_GPU = NX/GPUnum; // This is not a good solution long-term; needs more work for arbitrary grid sizes/nGPUs
printf(" The number of divisions in the X-direction is %d\n", NX_per_GPU);
// Launch CUDA kernel to initialize velocity field
const dim3 blockSize(TX, TY, TZ);
const dim3 gridSize(divUp(NX_per_GPU, TX), divUp(NY, TY), divUp(NZ, TZ));
for (i = 0; i<GPUnum; ++i){
cudaSetDevice(i);
idx = i*NX_per_GPU*NY*NZ; // sets the index value of the data to send to each gpu
initialize<<<gridSize, blockSize>>>(&f1[idx], &f2[idx]);
}
return;
}
int main (void)
{
int i, j, k, idx;
// Declare variables
double *u;
double *u_fft;
// Allocate memory for arrays
cudaMallocManaged(&u, sizeof(double)*NN );
cudaMallocManaged(&u_fft, sizeof(double)*NN );
// Perform kernel calculation using only one GPU first:
cudaSetDevice(0);
initialize_singleGPU(u, u_fft);
cudaDeviceSynchronize();
double result1 = 0.0;
for (i = 0; i < NX; ++i ){
for (j = 0; j<NY; ++j){
for (k = 0; k<NZ; ++k){
idx = k + j*NZ + i*NY*NZ;
result1 += u[idx] + u_fft[idx];
}
}
}
// Set GPU's to use and list device properties
int nGPUs = 2, deviceNum[nGPUs];
for(i = 0; i<nGPUs; ++i)
{
deviceNum[i] = i;
cudaSetDevice(deviceNum[i]);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, deviceNum[i]);
printf(" Device name: %s\n", prop.name);
printf(" Memory Clock Rate (KHz): %d\n",
prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
initialize_multiGPU(nGPUs, u, u_fft);
// Synchronize both GPUs in order to print reports
for (i = 0; i<nGPUs; ++i){
cudaSetDevice(deviceNum[i]);
cudaDeviceSynchronize();
}
double result2 = 0.0;
for (i = 0; i < NX; ++i ){
for (j = 0; j<NY; ++j){
for (k = 0; k<NZ; ++k){
idx = k + j*NZ + i*NY*NZ;
result2 += u[idx] + u_fft[idx];
}
}
}
printf("The value of f1 is %d, which should equal to 6*NX*NY*NZ, %d\n", (int)result1, NN + 5*NN);
printf("The value of f2 is %d, which should equal to 6*NX*NY*NZ, %d\n", (int)result2, NN + 5*NN);
cudaFree(u);
cudaFree(u_fft);
return 0;
} |
21,323 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define WIDTH 100
#define HEIGHT 100
#define GRID_SIZE WIDTH * HEIGHT
#define ACTUAL_GRID_SIZE sizeof(char) * GRID_SIZE
#define BLOCK_WIDTH 1
#define BLOCK_HEIGHT 1
#define NO_OF_GENERATIONS_TO_RUN 500
//#define DUMPFULL
//#define DUMPCOUNT
#define CUBE
__global__ void runGeneration(char currentModel[], char nextModel[]);
__device__ void getBlock(int x, int y, int* topLeftXOfBlock, int* topLeftYOfBlock);
__device__ void getCountOfNeighbours(char model[], int x, int y, int* neighbourCount);
__device__ void addCellValue(char model[], int x, int y, int* value);
__device__ void runRules(char model[], int x, int y, char* fate);
void printGrid(char grid[]);
int main(int argc, char** args) {
printf("%d;%d\n", WIDTH, HEIGHT);
srand( time(NULL) );
char clientModel[GRID_SIZE];
char* deviceModel = 0;
char* deviceModel2 = 0;
for(int index = 0 ; index < GRID_SIZE ; index++){
clientModel[index] = rand() % 2;
}
printGrid(clientModel);
cudaMalloc( (void**)&deviceModel, ACTUAL_GRID_SIZE);
cudaMalloc( (void**)&deviceModel2, ACTUAL_GRID_SIZE);
dim3 grid(WIDTH, HEIGHT);
cudaMemcpy( deviceModel, clientModel, ACTUAL_GRID_SIZE, cudaMemcpyHostToDevice);
char* swapPointer = 0;
for (int generation = 0; generation < NO_OF_GENERATIONS_TO_RUN; generation++) {
runGeneration<<<grid,1>>>(deviceModel, deviceModel2);
swapPointer = deviceModel;
deviceModel = deviceModel2;
deviceModel2 = swapPointer;
cudaMemcpy( clientModel, deviceModel, ACTUAL_GRID_SIZE, cudaMemcpyDeviceToHost );
printGrid(clientModel);
}
cudaFree(deviceModel);
cudaFree(deviceModel2);
}
void printGrid(char grid[]) {
#ifdef CUBE
system("clear");
for(int y = 0; y < HEIGHT ; y++) {
for(int x = 0; x < WIDTH; x++) {
printf("%d", grid[x + y * WIDTH]);
}
printf("\n");
}
printf("\n");
#endif
#ifdef DUMPFULL
for(int y = 0; y < HEIGHT ; y++) {
for(int x = 0; x < WIDTH; x++) {
printf("%d", grid[x + y * WIDTH]);
}
}
#endif
#ifdef DUMPCOUNT
int count = 0;
for(int x = 0; x < GRID_SIZE ; x++){
if(grid[x] == 1) count++;
}
printf("%d\n", count);
#endif
}
__global__ void runGeneration(char currentModel[], char nextModel[]) {
int startx, starty;
getBlock(blockIdx.x, blockIdx.y, &startx, &starty);
for (int x = startx; x < startx + BLOCK_WIDTH; x++) {
for (int y = starty; y < starty + BLOCK_HEIGHT; y++) {
int index = x + (y * WIDTH);
runRules(currentModel, x, y, nextModel + index);
}
}
}
__device__ void getBlock(int x, int y, int* topLeftXOfBlock, int* topLeftYOfBlock) {
*topLeftXOfBlock = x;
*topLeftYOfBlock = y;
}
__device__ void getCountOfNeighbours(char model[], int x, int y, int* neighbourCount) {
*neighbourCount = 0;
addCellValue(model, x + 1, y, neighbourCount);
addCellValue(model, x + 1, y + 1, neighbourCount);
addCellValue(model, x + 1, y - 1, neighbourCount);
addCellValue(model, x, y + 1, neighbourCount);
addCellValue(model, x, y - 1, neighbourCount);
addCellValue(model, x - 1, y, neighbourCount);
addCellValue(model, x - 1, y + 1, neighbourCount);
addCellValue(model, x - 1, y - 1, neighbourCount);
}
__device__ void addCellValue(char model[], int x, int y, int* value) {
if (x < 0 || y < 0) return;
if (x >= WIDTH || y >= HEIGHT) return;
*value += model[x + y * WIDTH];
}
__device__ void runRules(char model[], int x, int y, char* fate) {
int count;
getCountOfNeighbours(model, x, y, &count);
int index = x + (y * WIDTH);
if (model[index] == 1) {
if (count < 2 || count > 3) *fate = 0;
if (count == 2 || count == 3) *fate = 1;
} else {
if (count == 3) { *fate = 1; }
else { *fate = 0; }
}
}
|
21,324 | #include "includes.h"
__global__ void DrawRgbaColorKernel(float *target, int targetWidth, int targetHeight, int inputX, int inputY, int areaWidth, int areaHeight, float r, float g, float b)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int targetPixels = targetWidth * targetHeight;
int texturePixels = areaWidth * areaHeight;
int idTextureRgb = id / texturePixels;
int idTexturePixel = (id - idTextureRgb * texturePixels); // same as (id % texturePixels), but the kernel runs 10% faster
int idTextureY = idTexturePixel / areaWidth;
int idTextureX = (idTexturePixel - idTextureY * areaWidth); // same as (id % textureWidth), but the kernel runs another 10% faster
if (idTextureRgb < 3) // 3 channels that we will write to
{
// if the texture pixel offset by inputX, inputY, lies inside the target
if (idTextureX + inputX < targetWidth &&
idTextureX + inputX >= 0 &&
idTextureY + inputY < targetHeight &&
idTextureY + inputY >= 0)
{
float color = 0.0f;
switch (idTextureRgb)
{
case 0:
color = r;
break;
case 1:
color = g;
break;
case 2:
color = b;
break;
}
int tIndex = targetPixels * idTextureRgb + targetWidth * (idTextureY + inputY) + (idTextureX + inputX);
target[tIndex] = color;
}
}
} |
21,325 | #include <stdio.h>
// by lectures and "CUDA by Example" book
#define ind(i, j, cols) (i * cols + j)
struct dim2 {
int rows;
int cols;
};
// device code: matrices mult calculation
__global__ void mult_matrices_kernel(int* m1, int* m2, int* m3, dim2 m3_dims, int inner_dim) {
int rows = m3_dims.rows;
int cols = m3_dims.cols;
printf("blockId, threadId, dims: [%d, %d], [%d, %d], [%d, %d]\n",
blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, rows, cols);
// row and col that correspond to current thread
// process all elements that correspond to current thread
for (int i = blockIdx.y * blockDim.y + threadIdx.y;
i < rows; i += blockDim.y * gridDim.y)
for (int j = blockIdx.x * blockDim.x + threadIdx.x;
j < cols; j += blockDim.x * gridDim.x) {
printf("blockId, threadId, pos: [%d, %d], [%d, %d], [%d, %d]\n",
blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x, i, j);
int index_3 = ind(i, j, cols);
m3[index_3] = 0;
// iterating over row and down column
for (int k = 0; k < inner_dim; k++) {
int index_1 = ind(i, k, inner_dim);
int index_2 = ind(k, j, cols);
m3[index_3] += m1[index_1] * m2[index_2];
}
}
}
int* cuda_copy_mat(int* host_m, dim2 m_dims) {
int* dev_m;
// size of memory to allocate on device for matrix
long mem_size = m_dims.rows * m_dims.cols * sizeof(int);
// device memory allocation
cudaMalloc((void**) &dev_m, mem_size);
// copying data from host to device
cudaMemcpy(dev_m, host_m, mem_size, cudaMemcpyHostToDevice);
// returning pointer
return dev_m;
}
// host code: preparation
void mult_matrices_gpu(int* host_m1, dim2 m1_dims,
int* host_m2, dim2 m2_dims,
int* host_m3, dim2 m3_dims) {
// Step 1: moving data on device
int* dev_m1 = cuda_copy_mat(host_m1, m1_dims);
int* dev_m2 = cuda_copy_mat(host_m2, m3_dims);
int* dev_m3 = cuda_copy_mat(host_m3, m3_dims);
// Step 2
// grid (of blocks) dimensions
dim3 grid_dim(3, 2, 1);
// block (of threads) dimensions
dim3 block_dim(2, 2, 1);
// running kernel multiplication code
mult_matrices_kernel<<<grid_dim, block_dim>>>(dev_m1, dev_m2, dev_m3, m3_dims, m1_dims.cols);
// Step 3
// copying result from device to host matrix
cudaMemcpy(host_m3, dev_m3, m3_dims.rows * m3_dims.cols * sizeof(int), cudaMemcpyDeviceToHost);
// freeing device memory
cudaFree(dev_m1);
cudaFree(dev_m2);
cudaFree(dev_m3);
}
void mult_matrices_cpu(int* m1, dim2 m1_dims,
int* m2, dim2 m2_dims,
int* m3, dim2 m3_dims) {
for (int i = 0; i < m1_dims.rows; i++)
for (int j = 0; j < m2_dims.cols; j++) {
int index_3 = ind(i, j, m3_dims.cols);
m3[index_3] = 0;
for (int k = 0; k < m1_dims.cols; k++) {
int index_1 = ind(i, k, m1_dims.cols);
int index_2 = ind(k, j, m2_dims.cols);
m3[index_3] += m1[index_1] * m2[index_2];
}
}
}
// create matrix (array representation)
int* create_mat(dim2 dims, int k) {
int rows = dims.rows;
int cols = dims.cols;
int* mat = (int*)malloc(rows * cols * sizeof(int));
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++)
mat[ind(i, j, cols)] = k * (ind(i, j, cols) + 1);
return mat;
}
// print matrix
void print_mat(const char* header, int* mat, dim2 dims) {
int rows = dims.rows;
int cols = dims.cols;
printf("%s (%d, %d):\n", header, rows, cols);
for (int i = 0; i < rows; i++)
for (int j = 0; j < cols; j++) {
printf("\t%d ", mat[ind(i, j, cols)]);
if (j == cols - 1)
printf("\n");
}
}
int main() {
// first matrix
struct dim2 m1_dims = {6, 4};
int* host_m1 = create_mat(m1_dims, 1);
print_mat("First matrix", host_m1, m1_dims);
// second matrix
struct dim2 m2_dims = {4, 8};
int* host_m2 = create_mat(m2_dims, 2);
print_mat("Second matrix", host_m2, m2_dims);
// dimensionality validation
if (m1_dims.cols != m2_dims.rows) {
printf("Error: Inner matrix dimensions does not match:\n"
"(%d, %d) and (%d, %d)",
m1_dims.rows, m1_dims.cols, m2_dims.rows, m2_dims.cols);
return 0;
}
// result matrix
struct dim2 m3_dims = {m1_dims.rows, m2_dims.cols};
int* host_m3 = create_mat(m3_dims, 0);
print_mat("Third matrix", host_m3, m3_dims);
// multiplication
mult_matrices_gpu(host_m1, m1_dims,
host_m2, m2_dims,
host_m3, m3_dims);
// showing result
print_mat("Result matrix", host_m3, m3_dims);
// multiplication
mult_matrices_cpu(host_m1, m1_dims,
host_m2, m2_dims,
host_m3, m3_dims);
// showing result
print_mat("Result matrix", host_m3, m3_dims);
return 0;
} |
21,326 |
/* Includes, system */
#include <stdio.h>
/* Main */
int main(int argc, char** argv)
{
printf("Para ser original -- HOLA MUNDO\n");
}
|
21,327 | #include <stdio.h>
/*
* '__global__' alerts the compiler that a function should be compiled
* to run on a device instead of the host
*/
__global__ void kernel( void ) {
}
int main( void ) {
/*
* <<<?, ?>>> will be run on device, and the '?' in these angle brackets
* are parameters that will influence how the runtime will launch our
* device code.
*/
kernel<<<1, 1>>>();
printf("Hello World!\n");
return 0;
}
|
21,328 | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#define TILE_WIDTH 32
// kernel
__global__ void tiledMultiplyMatricesKernel(float* d_x, float* d_y, float* d_z, int m, int n, int p)
{
__shared__ float tile_x[TILE_WIDTH][TILE_WIDTH];
__shared__ float tile_y[TILE_WIDTH][TILE_WIDTH];
// indexing variables
int rowNum = blockIdx.y * blockDim.y + threadIdx.y;
int colNum = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
for(int i = 0; i < n / TILE_WIDTH; ++i)
{
tile_x[threadIdx.y][threadIdx.x] = d_x[rowNum * n + i * TILE_WIDTH + threadIdx.x];
tile_y[threadIdx.y][threadIdx.x] = d_y[(i * TILE_WIDTH + threadIdx.y) * p + colNum];
__syncthreads();
for(int j = 0; j < TILE_WIDTH; ++j)
{
result += tile_x[threadIdx.y][j] * tile_y[j][threadIdx.x];
}
__syncthreads();
}
// write output
d_z[rowNum * p + colNum] = result;
}
// CUDA error checking
void errorCheck(unsigned int line)
{
cudaError_t cudaError = cudaGetLastError();
if(cudaError != cudaSuccess)
{
printf("CUDA error in line %u in file %s: %s\n", line - 1, __FILE__, cudaGetErrorString(cudaError));
exit(EXIT_FAILURE);
}
}
// host function containing kernel call
void multiplyMatrices(float* x, float* y, float* z, int m, int n, int p)
{
dim3 numOfBlocks(ceil(p / (float) TILE_WIDTH), ceil(m / (float) TILE_WIDTH), 1);
dim3 numOfThreads(TILE_WIDTH, TILE_WIDTH, 1);
size_t bytes_x = m * n * sizeof(float);
size_t bytes_y = n * p * sizeof(float);
size_t bytes_z = m * p * sizeof(float);
float* d_x;
float* d_y;
float* d_z;
cudaMalloc((void**) &d_x, bytes_x);
errorCheck(__LINE__);
cudaMalloc((void**) &d_y, bytes_y);
errorCheck(__LINE__);
cudaMalloc((void**) &d_z, bytes_z);
errorCheck(__LINE__);
cudaMemcpy(d_x, x, bytes_x, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
cudaMemcpy(d_y, y, bytes_y, cudaMemcpyHostToDevice);
errorCheck(__LINE__);
tiledMultiplyMatricesKernel<<<numOfBlocks, numOfThreads>>>(d_x, d_y, d_z, m, n, p);
errorCheck(__LINE__);
cudaMemcpy(z, d_z, bytes_z, cudaMemcpyDeviceToHost);
errorCheck(__LINE__);
cudaFree(d_x);
errorCheck(__LINE__);
cudaFree(d_y);
errorCheck(__LINE__);
cudaFree(d_z);
errorCheck(__LINE__);
}
int main()
{
struct timespec start, end;
srand(time(NULL));
size_t m = 4096;
size_t n = 4096;
size_t p = 4096;
float* x = (float*) malloc(m * n * sizeof(float));
float* y = (float*) malloc(n * p * sizeof(float));
float* z = (float*) malloc(m * p * sizeof(float));
for(int i = 0; i < m * n; ++i)
{
x[i] = rand() % 129 - 64;
}
for(int i = 0; i < n * p; ++i)
{
y[i] = rand() % 129 - 64;
}
clock_gettime(CLOCK_REALTIME, &start);
// do matrix multiplication
multiplyMatrices(x, y, z, m, n, p);
clock_gettime(CLOCK_REALTIME, &end);
time_t execTime = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_nsec - start.tv_nsec) / 1000;
printf("Execution time: %d microseconds.", execTime);
return 0;
}
|
21,329 | #include <iostream>
using namespace std;
#define L 1e-4
#define N_grid 16
#define dx (L/float(N_grid))
void test(float x)
{
float y = x - floor(x/L)*L;
cout << x << " " << y << endl;
}
int main()
{
cout << "L:" << L << endl;
test(-3*L);
test(-2*L);
test(-L);
test(-0.5*L);
test(0);
test(L/2.0);
test(L);
test(L*1.5);
test(3*L);
}
|
21,330 | //RSQF.cu
/*
* Copyright 2021 Regents of the University of California
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include "RSQF.cuh"
#define LOW_BIT_MASK(n) ((1U << n) - 1U)
#define LOW_BIT_MASKLL(n) (n >= 64 ? 0xFFFFFFFFFFFFFFFF : (1ULL << n) - 1ULL)
__host__ __device__ size_t calcNumBlocksGPU(unsigned int q)
{
return (((1 << q) + (SLOTS_PER_BLOCK - 1)) / SLOTS_PER_BLOCK) + 1;
}
__host__ void initCQFGPU(struct countingQuotientFilterGPU* cqf, unsigned int q)
{
cqf->qbits = q;
cqf->numBlocks = calcNumBlocksGPU(q);
cqf_gpu_block* d_filterBlocks;
cudaMalloc((void**) &d_filterBlocks, cqf->numBlocks * sizeof(cqf_gpu_block));
cudaMemset(d_filterBlocks, 0, cqf->numBlocks * sizeof(cqf_gpu_block));
cqf->blocks = d_filterBlocks;
}
__host__ __device__ bool isOccupiedGPU(long long unsigned int occupieds, unsigned int slotNumber)
{
return (1ULL << slotNumber) & occupieds;
}
__device__ long long unsigned int setOccupiedGPU(long long unsigned int occupieds, unsigned int slotNumber)
{
return (1ULL << slotNumber) | occupieds;
}
__host__ __device__ bool isRunEndGPU(long long unsigned int runends, unsigned int slotNumber)
{
return (1ULL << slotNumber) & runends;
}
__device__ long long unsigned int setRunEndGPU(long long unsigned int runends, unsigned int slotNumber)
{
return (1ULL << slotNumber) | runends;
}
__device__ long long unsigned int clearRunEndGPU(long long unsigned int runends, unsigned int slotNumber)
{
return ~(1ULL << slotNumber) & runends;
}
__device__ unsigned int findBlockNumberGPU(unsigned int globalSlotNumber)
{
return globalSlotNumber / SLOTS_PER_BLOCK;
}
__device__ unsigned int findPositionInBlockGPU(unsigned int globalSlotNumber)
{
return globalSlotNumber % SLOTS_PER_BLOCK;
}
__host__ __device__ unsigned int findRemainderIntSlotGPU(unsigned int blockPosition)
{
return blockPosition * RBITS / 64;
}
__host__ __device__ unsigned int findRemainderStartBitGPU(unsigned int blockPosition)
{
return (blockPosition * RBITS) % 64;
}
__device__ unsigned int globalSlotIndexGPU(unsigned int blockNum, unsigned int slotNumInBlock){
return (slotNumInBlock + (blockNum * SLOTS_PER_BLOCK));
}
__host__ __device__ unsigned int getRemainderGPU(struct countingQuotientFilterGPU* cqf, unsigned int blockNum, unsigned int slotNum)
{
unsigned int integerSlot = findRemainderIntSlotGPU(slotNum);
unsigned int startBit = findRemainderStartBitGPU(slotNum);
int spillover = startBit + RBITS - 64;
unsigned int remainder;
if(spillover <= 0){
remainder = (cqf->blocks[blockNum].remainders[integerSlot] >> startBit) & LOW_BIT_MASKLL(RBITS);
}
else{
unsigned int mainBlockBits = RBITS - spillover;
remainder = (cqf->blocks[blockNum].remainders[integerSlot] >> startBit) & LOW_BIT_MASKLL(mainBlockBits);
unsigned int spilledOverBits = (cqf->blocks[blockNum].remainders[integerSlot + 1] & LOW_BIT_MASKLL(spillover)) << mainBlockBits;
remainder = remainder | spilledOverBits;
}
return remainder;
}
__device__ void setRemainderGPU(struct countingQuotientFilterGPU* cqf, unsigned int blockNum, unsigned int slotNum, unsigned int value)
{
unsigned int integerSlot = findRemainderIntSlotGPU(slotNum);
unsigned int startBit = findRemainderStartBitGPU(slotNum);
int spillover = startBit + RBITS - 64;
if(spillover <= 0){
cqf->blocks[blockNum].remainders[integerSlot] &= ~(LOW_BIT_MASKLL(RBITS) << startBit);
cqf->blocks[blockNum].remainders[integerSlot] |= ((long long unsigned int)value << startBit);
}
else{
unsigned int mainBlockBits = RBITS - spillover;
cqf->blocks[blockNum].remainders[integerSlot] &= ~(LOW_BIT_MASKLL(mainBlockBits) << startBit);
cqf->blocks[blockNum].remainders[integerSlot] |= (LOW_BIT_MASKLL(mainBlockBits) & (long long unsigned int)value) << startBit;
cqf->blocks[blockNum].remainders[integerSlot + 1] &= ~(LOW_BIT_MASKLL(spillover));
cqf->blocks[blockNum].remainders[integerSlot + 1] |= (long long unsigned int)value >> mainBlockBits;
}
}
__host__ void printGPUFilter(struct countingQuotientFilterGPU* cqf)
{
cqf_gpu_block* h_filterBlocks = new cqf_gpu_block[cqf->numBlocks];
cudaMemcpy(h_filterBlocks, cqf->blocks, cqf->numBlocks * sizeof(cqf_gpu_block), cudaMemcpyDeviceToHost);
cqf_gpu_block* d_filterBlocks = cqf->blocks;
cqf->blocks = h_filterBlocks;
printf("Filter contents:\n");
for(int i = 0; i < cqf->numBlocks; i++){
printf("block: %i\t", i);
printf("offset: %u\n", cqf->blocks[i].offset);
int rowsPerBlock = SLOTS_PER_BLOCK / 10;
for(int j = 0; j < rowsPerBlock; j++){
printf("\noccupieds:\t");
for(int k = 0; k < 10; k++){
printf("%u\t", isOccupiedGPU(cqf->blocks[i].occupieds, 10 * j + k));
}
printf("\nrunEnds:\t");
for(int k = 0; k < 10; k++){
printf("%u\t", isRunEndGPU(cqf->blocks[i].runEnds, 10 * j + k));
}
printf("\nremainders:\t");
for(int k = 0; k < 10; k++){
printf("%u\t", getRemainderGPU(cqf, i, 10 * j + k));
}
printf("\n");
}
if(SLOTS_PER_BLOCK % 10 != 0){
int numLeft = SLOTS_PER_BLOCK % 10;
printf("\noccupieds:\t");
for(int k = 0; k < numLeft; k++){
printf("%u\t", isOccupiedGPU(cqf->blocks[i].occupieds, rowsPerBlock * 10 + k));
}
printf("\nrunEnds:\t");
for(int k = 0; k < numLeft; k++){
printf("%u\t", isRunEndGPU(cqf->blocks[i].runEnds, rowsPerBlock * 10 + k));
}
printf("\nremainders:\t");
for(int k = 0; k < numLeft; k++){
printf("%u\t", getRemainderGPU(cqf, i, rowsPerBlock * 10 + k));
}
printf("\n");
}
printf("\n --------------------------------------------------------------------- \n");
}
cqf->blocks = d_filterBlocks;
}
__device__ __host__ unsigned int Normal_APHashGPU(unsigned int value, unsigned int maxHashValue)
{
unsigned char p[4];
p[0] = (value >> 24) & 0xFF;
p[1] = (value >> 16) & 0xFF;
p[2] = (value >> 8) & 0xFF;
p[3] = value & 0xFF;
unsigned int hash = 0xAAAAAAAA;
for (int i = 0; i < 4; i++){
hash ^= ((i & 1) == 0) ? ((hash << 7) ^ p[i] ^ (hash >> 3)) : (~((hash << 11) ^ p[i] ^ (hash >> 5)));
}
return hash % maxHashValue;
}
__device__ unsigned int rankBitGPU(long long unsigned int bitArray, unsigned int index)
{
unsigned int rank = __popcll(bitArray & LOW_BIT_MASKLL(index + 1));
return rank;
}
__device__ unsigned int selectBitGPU_old(long long unsigned int bitArray, unsigned int rank)
{
//using iterative method for first basic implementation
if(rank == 0){
return 0;
}
unsigned int nextOne = 0;
for(int i = 1; i <= rank; i++){
if(bitArray == 0) return UINT_MAX; //if runEnd is in next block
nextOne = __ffsll(bitArray);
bitArray &= ~LOW_BIT_MASKLL(nextOne);
}
return nextOne - 1;
}
__device__ const unsigned char kSelectInByte[2048] = {
8, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0,
1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0,
2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0,
1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0,
3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 7, 0,
1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0,
2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0,
1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 5, 0, 1, 0, 2, 0, 1, 0, 3, 0,
1, 0, 2, 0, 1, 0, 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 8, 8, 8, 1,
8, 2, 2, 1, 8, 3, 3, 1, 3, 2, 2, 1, 8, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2,
2, 1, 8, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1,
4, 3, 3, 1, 3, 2, 2, 1, 8, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4,
4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1,
3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 7, 7, 1, 7, 2,
2, 1, 7, 3, 3, 1, 3, 2, 2, 1, 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3,
3, 1, 3, 2, 2, 1, 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1, 6, 4, 4, 1,
4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2,
2, 1, 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1, 8, 8, 8, 8, 8, 8, 8, 2,
8, 8, 8, 3, 8, 3, 3, 2, 8, 8, 8, 4, 8, 4, 4, 2, 8, 4, 4, 3, 4, 3, 3, 2, 8, 8,
8, 5, 8, 5, 5, 2, 8, 5, 5, 3, 5, 3, 3, 2, 8, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3,
4, 3, 3, 2, 8, 8, 8, 6, 8, 6, 6, 2, 8, 6, 6, 3, 6, 3, 3, 2, 8, 6, 6, 4, 6, 4,
4, 2, 6, 4, 4, 3, 4, 3, 3, 2, 8, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2,
6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 7, 8, 7, 7, 2, 8, 7,
7, 3, 7, 3, 3, 2, 8, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 5,
7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2, 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3,
3, 2, 8, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2, 7, 6, 6, 4, 6, 4, 4, 2,
6, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2, 6, 5,
5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 3, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 4, 8, 4, 4, 3, 8, 8, 8, 8, 8, 8,
8, 5, 8, 8, 8, 5, 8, 5, 5, 3, 8, 8, 8, 5, 8, 5, 5, 4, 8, 5, 5, 4, 5, 4, 4, 3,
8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 3, 8, 8, 8, 6, 8, 6, 6, 4, 8, 6,
6, 4, 6, 4, 4, 3, 8, 8, 8, 6, 8, 6, 6, 5, 8, 6, 6, 5, 6, 5, 5, 3, 8, 6, 6, 5,
6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7,
7, 3, 8, 8, 8, 7, 8, 7, 7, 4, 8, 7, 7, 4, 7, 4, 4, 3, 8, 8, 8, 7, 8, 7, 7, 5,
8, 7, 7, 5, 7, 5, 5, 3, 8, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3, 8, 8,
8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 3, 8, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4,
6, 4, 4, 3, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3, 7, 6, 6, 5, 6, 5,
5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 5, 8, 5, 5, 4, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6,
6, 4, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6, 8, 6, 6, 5, 8, 8, 8, 6, 8, 6, 6, 5,
8, 6, 6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8,
8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 4, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7,
8, 7, 7, 5, 8, 8, 8, 7, 8, 7, 7, 5, 8, 7, 7, 5, 7, 5, 5, 4, 8, 8, 8, 8, 8, 8,
8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 4,
8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6,
6, 5, 6, 5, 5, 4, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 5, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 8, 8, 8, 8, 6, 8, 8, 8, 6,
8, 6, 6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7,
8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 8,
8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6,
6, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 6,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 8, 8, 8, 8, 7, 8, 8, 8, 7, 8, 7, 7, 6, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 7
};
__device__ unsigned int selectBitGPU(long long unsigned int x, unsigned int k)
{
if(k == 0)return 0;
k--;
if (k >= __popcll(x)) { return UINT_MAX; }
const long long unsigned int kOnesStep4 = 0x1111111111111111ULL;
const long long unsigned int kOnesStep8 = 0x0101010101010101ULL;
const long long unsigned int kMSBsStep8 = 0x80ULL * kOnesStep8;
long long unsigned int s = x;
s = s - ((s & 0xA * kOnesStep4) >> 1);
s = (s & 0x3 * kOnesStep4) + ((s >> 2) & 0x3 * kOnesStep4);
s = (s + (s >> 4)) & 0xF * kOnesStep8;
long long unsigned int byteSums = s * kOnesStep8;
long long unsigned int kStep8 = k * kOnesStep8;
long long unsigned int geqKStep8 = (((kStep8 | kMSBsStep8) - byteSums) & kMSBsStep8);
long long unsigned int place = __popcll(geqKStep8) * 8;
long long unsigned int byteRank = k - (((byteSums << 8) >> place) & (long long unsigned int)(0xFF));
return place + kSelectInByte[((x >> place) & 0xFF) | (byteRank << 8)];
}
__global__ void lookupGPU(int numItems, struct countingQuotientFilterGPU cqf, unsigned int* hashValues, int* slotValues)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x +blockIdx.y * gridDim.x * blockDim.x;
if(idx >= numItems) return;
//compute hash value
unsigned int q = cqf.qbits;
unsigned int hashValue = hashValues[idx];
//separate into quotient and remainder
unsigned int fq = (hashValue >> RBITS) & LOW_BIT_MASK(q);
unsigned int fr = hashValue & LOW_BIT_MASK(RBITS);
unsigned int blockNum = findBlockNumberGPU(fq);
unsigned int slotNum = findPositionInBlockGPU(fq);
//check occupied bit
if(!isOccupiedGPU(cqf.blocks[blockNum].occupieds, slotNum)){
slotValues[idx] = -1;
return;
}
//find rank of quotient slot
unsigned char blockOffset = cqf.blocks[blockNum].offset;
unsigned int rank = rankBitGPU(cqf.blocks[blockNum].occupieds, slotNum);
//select slot with runEnd rank = quotient rank
//mask off the runEnds for any runs in blocks i-1 or earlier
unsigned int endOfRun = selectBitGPU((cqf.blocks[blockNum].runEnds & ~LOW_BIT_MASKLL(blockOffset)), rank);
//if end of run is in next block
while(endOfRun == UINT_MAX){
rank -= __popcll(cqf.blocks[blockNum].runEnds & ~LOW_BIT_MASKLL(blockOffset));
if(blockOffset - SLOTS_PER_BLOCK > 0){
blockOffset = blockOffset - SLOTS_PER_BLOCK;
}
else{
blockOffset = 0;
}
blockNum++;
if(blockNum > cqf.numBlocks){
slotValues[idx] = -1;
return;
}
//select on remaining rank
endOfRun = selectBitGPU((cqf.blocks[blockNum].runEnds & ~LOW_BIT_MASKLL(blockOffset)), rank);
}
//endOfRun now points to runEnd for correct quotient
//search backwards through run
//end search if: we reach another set runEnd bit; we find the remainder; we reach canonical slot
unsigned int currentRemainder = getRemainderGPU(&cqf, blockNum, endOfRun);
// printf("remainder in last run slot: %u\n", currentRemainder);
unsigned int currentSlot = endOfRun;
do{
if(currentRemainder == fr){
//return index of slot where remainder is stored
slotValues[idx] = globalSlotIndexGPU(blockNum, currentSlot);
return;
}
if(currentRemainder < fr){
slotValues[idx] = -1;
return;
}
if(currentSlot > 0){
currentSlot--;
}
else{
currentSlot = SLOTS_PER_BLOCK - 1;
if(blockNum == 0){
slotValues[idx] = -1;
return;
}
blockNum--;
}
currentRemainder = getRemainderGPU(&cqf, blockNum, currentSlot);
}while(!isRunEndGPU(cqf.blocks[blockNum].runEnds, currentSlot) && (globalSlotIndexGPU(blockNum, currentSlot) >= fq));
slotValues[idx] = -1;
return;
}
__global__ void hashInputs(int numItems, struct countingQuotientFilterGPU cqf, unsigned int* insertValues, unsigned int* fingerprints)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
if(idx >= numItems) return;
//hash values to get fingerprints
unsigned int hashValue = Normal_APHashGPU(insertValues[idx], (1 << (cqf.qbits + RBITS)));
fingerprints[idx] = hashValue;
}
__host__ float launchLookups(countingQuotientFilterGPU cqf, int numValues, unsigned int* d_lookupValues, int* d_slotValuesArray)
{
thrust::device_vector<unsigned int> d_hashValues(numValues);
thrust::fill(d_hashValues.begin(), d_hashValues.end(), 0);
unsigned int* d_hashValuesArray = thrust::raw_pointer_cast(&d_hashValues[0]);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//Hash items
int numBlocks = (numValues + 1023) / 1024;
dim3 hashBlockDims((numBlocks + 31) / 32, 32);
hashInputs<<<hashBlockDims, 1024>>>(numValues, cqf, d_lookupValues, d_hashValuesArray);//was 128
//Create index array to track inputs -> outputs
thrust::device_vector<unsigned int> d_indices(numValues);
thrust::fill(d_indices.begin(), d_indices.end(), 1);
thrust::exclusive_scan(d_indices.begin(), d_indices.end(), d_indices.begin(), 0);
//Sort by fingerprint
thrust::sort_by_key(d_hashValues.begin(), d_hashValues.end(), d_indices.begin());
//Launch lookup kernel
numBlocks = (numValues + 511) / 512;
dim3 blockDims((numBlocks + 31) / 32, 32);
lookupGPU<<<blockDims, 512>>>(numValues, cqf, d_hashValuesArray, d_slotValuesArray); //was 1024
//Sort outputs
thrust::device_ptr<int> d_slotValues(d_slotValuesArray);
thrust::sort_by_key(d_indices.begin(), d_indices.end(), d_slotValues);
cudaEventRecord(stop);
//Calculate timing results
cudaEventSynchronize(stop);
float lookupTime = 0;
cudaEventElapsedTime(&lookupTime, start, stop);
//Free Memory
d_hashValues.~device_vector<unsigned int>();
cudaEventDestroy(start);
cudaEventDestroy(stop);
return lookupTime;
}
__global__ void hashAndLookupGPU(int numItems, struct countingQuotientFilterGPU cqf, unsigned int* lookupValues, int* slotValues)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x +blockIdx.y * gridDim.x * blockDim.x;
if(idx >= numItems) return;
//compute hash value
unsigned int q = cqf.qbits;
unsigned int hashValue = Normal_APHashGPU(lookupValues[idx], (1 << (q + RBITS)));
//separate into quotient and remainder
unsigned int fq = (hashValue >> RBITS) & LOW_BIT_MASK(q);
unsigned int fr = hashValue & LOW_BIT_MASK(RBITS);
unsigned int blockNum = findBlockNumberGPU(fq);
unsigned int slotNum = findPositionInBlockGPU(fq);
//check occupied bit
if(!isOccupiedGPU(cqf.blocks[blockNum].occupieds, slotNum)){
slotValues[idx] = -1;
return;
}
//find rank of quotient slot
unsigned char blockOffset = cqf.blocks[blockNum].offset;
unsigned int rank = rankBitGPU(cqf.blocks[blockNum].occupieds, slotNum);
//select slot with runEnd rank = quotient rank
//mask off the runEnds for any runs in blocks i-1 or earlier
unsigned int endOfRun = selectBitGPU((cqf.blocks[blockNum].runEnds & ~LOW_BIT_MASKLL(blockOffset)), rank);
//if end of run is in next block
while(endOfRun == UINT_MAX){
rank -= __popcll(cqf.blocks[blockNum].runEnds & ~LOW_BIT_MASKLL(blockOffset));
if(blockOffset - SLOTS_PER_BLOCK > 0){
blockOffset = blockOffset - SLOTS_PER_BLOCK;
}
else{
blockOffset = 0;
}
blockNum++;
if(blockNum > cqf.numBlocks){
slotValues[idx] = -1;
return;
}
//select on remaining rank
endOfRun = selectBitGPU((cqf.blocks[blockNum].runEnds & ~LOW_BIT_MASKLL(blockOffset)), rank);
}
//endOfRun now points to runEnd for correct quotient
//search backwards through run
//end search if: we reach another set runEnd bit; we find the remainder; we reach canonical slot
unsigned int currentRemainder = getRemainderGPU(&cqf, blockNum, endOfRun);
// printf("remainder in last run slot: %u\n", currentRemainder);
unsigned int currentSlot = endOfRun;
do{
if(currentRemainder == fr){
//return index of slot where remainder is stored
slotValues[idx] = globalSlotIndexGPU(blockNum, currentSlot);
return;
}
if(currentRemainder < fr){
slotValues[idx] = -1;
return;
}
if(currentSlot > 0){
currentSlot--;
}
else{
currentSlot = SLOTS_PER_BLOCK - 1;
if(blockNum == 0){
slotValues[idx] = -1;
return;
}
blockNum--;
}
currentRemainder = getRemainderGPU(&cqf, blockNum, currentSlot);
}while(!isRunEndGPU(cqf.blocks[blockNum].runEnds, currentSlot) && (globalSlotIndexGPU(blockNum, currentSlot) >= fq));
slotValues[idx] = -1;
return;
}
__host__ float launchUnsortedLookups(countingQuotientFilterGPU cqf, int numValues, unsigned int* d_lookupValues, int* d_slotValuesArray)
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//Launch lookup kernel
int numBlocks = (numValues + 511) / 512;
dim3 blockDims((numBlocks + 31) / 32, 32);
hashAndLookupGPU<<<blockDims, 512>>>(numValues, cqf, d_lookupValues, d_slotValuesArray); //was 1024
cudaEventRecord(stop);
//Calculate timing results
cudaEventSynchronize(stop);
float lookupTime = 0;
cudaEventElapsedTime(&lookupTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return lookupTime;
}
__device__ unsigned int findFirstUnusedSlotGPU(struct countingQuotientFilterGPU* cqf, int* blockNum, unsigned int currentSlot)
{
long long unsigned int occupieds = cqf->blocks[*blockNum].occupieds;
long long unsigned int runEnds = cqf->blocks[*blockNum].runEnds;
unsigned char offset = cqf->blocks[*blockNum].offset;
unsigned int rank = rankBitGPU(occupieds, currentSlot);
unsigned int select = selectBitGPU((runEnds & ~LOW_BIT_MASKLL(offset)), rank);
if(rank == 0){
select = offset;
}
while(currentSlot <= select){
if(select == UINT_MAX || select == SLOTS_PER_BLOCK - 1){
(*blockNum)++;
if(*blockNum > cqf->numBlocks) return UINT_MAX;
occupieds = cqf->blocks[*blockNum].occupieds;
runEnds = cqf->blocks[*blockNum].runEnds;
offset = cqf->blocks[*blockNum].offset;
select = offset - 1; //want currentSlot to be first slot after offset values
}
currentSlot = select + 1;
rank = rankBitGPU(occupieds, currentSlot);
select = selectBitGPU((runEnds & ~LOW_BIT_MASKLL(offset)), rank);
}
return currentSlot;
}
__global__ void quotienting(int numItems, unsigned int qbits, unsigned int* quotients, unsigned int* remainders)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
if(idx >= numItems) return;
//return quotients and remainders
unsigned int hashValue = quotients[idx]; //quotients array initially stores the fingerprint values
quotients[idx] = (hashValue >> RBITS) & LOW_BIT_MASK(qbits);
remainders[idx] = hashValue & LOW_BIT_MASK(RBITS);
}
__global__ void findBlockStartIndices(int numItems, unsigned int* quotients, unsigned int* blockStarts)
{
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * gridDim.x * blockDim.x;
if(idx >= numItems) return;
unsigned int blockNumber = findBlockNumberGPU(quotients[idx]);
if(idx == 0){
blockStarts[blockNumber] = 0;
return;
}
unsigned int previousItemBlock = findBlockNumberGPU(quotients[idx - 1]);
if(blockNumber != previousItemBlock){
blockStarts[blockNumber] = idx;
}
}
__device__ void incrementQueuePointer(unsigned int nextValue, unsigned int* blockInsertQueues, int blockNum, unsigned int lastRegionBlock, unsigned int* blockStarts, int numBlocks, bool* itemsLeft)
{
int nextBlockNum = blockNum + 1;
unsigned int nextBlockStart = blockStarts[nextBlockNum];
while(nextBlockStart == UINT_MAX && nextBlockNum < (numBlocks - 1)){
nextBlockNum++;
nextBlockStart = blockStarts[nextBlockNum];
}
if(nextValue + 1 < nextBlockStart){
blockInsertQueues[blockNum]++;
itemsLeft[0] = true;
}
else{
blockInsertQueues[blockNum] = UINT_MAX;
while(blockNum < lastRegionBlock){
blockNum++;
if(blockInsertQueues[blockNum] != UINT_MAX){
itemsLeft[0] = true;
return;
}
}
}
}
__global__ void insertIntoRegions(int numRegions, int numBlocksPerRegion, int numItems, struct countingQuotientFilterGPU cqf, unsigned int* blockStarts, unsigned int* nextItems, unsigned int* quotients, unsigned int* remainders, int* finalSlotValues, bool* itemsLeft)
{
//TODO: reduce resources used
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x +blockIdx.y * gridDim.x * blockDim.x;
if(idx >= numRegions) return;
//find next item to insert for region
//find start block for region
unsigned int firstRegionBlock = idx * numBlocksPerRegion;
unsigned int lastRegionBlock = (idx * numBlocksPerRegion) + numBlocksPerRegion - 1;
if(lastRegionBlock >= cqf.numBlocks) lastRegionBlock = cqf.numBlocks - 1;
unsigned int nextValue = nextItems[firstRegionBlock];
int blockNum = firstRegionBlock;
while(nextValue == UINT_MAX && blockNum < lastRegionBlock){
blockNum++;
nextValue = nextItems[blockNum];
}
if(nextValue >= numItems) return;
// printf("index of item to be inserted=%u\n", nextValue);
unsigned int fq = quotients[nextValue];
unsigned int fr = remainders[nextValue];
int homeBlockNum = blockNum;
unsigned int homeSlotNum = findPositionInBlockGPU(fq);
// printf("quotient: %u\tslot:%u\tremainder: %u\n", fq, homeSlotNum, fr);
long long unsigned int occupieds = cqf.blocks[blockNum].occupieds;
// printf("blockNum = %u\t lastRegionBlock=%u\n", blockNum, lastRegionBlock);
// printf("homeSlotNum = %u\n", homeSlotNum);
//check occupied bit
bool occupiedBit = isOccupiedGPU(occupieds, homeSlotNum);
// printf("occupied? %u\n", (unsigned int)occupiedBit);
//find rank of quotient slot
unsigned char blockOffset = cqf.blocks[blockNum].offset;
// printf("offset = %u\n", blockOffset);
unsigned int rank = rankBitGPU(occupieds, homeSlotNum);
// printf("rank = %u\n", rank);
//select slot with runEnd rank = quotient rank
//mask off the runEnds for any runs in blocks i-1 or earlier
long long unsigned int runEnds = cqf.blocks[blockNum].runEnds;
//printf("runEnds = %llu\n", runEnds);
unsigned int endOfRun = selectBitGPU((runEnds & ~LOW_BIT_MASKLL(blockOffset)), rank);
if(rank == 0){
if(blockOffset == 0){
endOfRun = 0;
}
else{
endOfRun = blockOffset - 1;
}
}
// printf("select(rank) = %u\n", endOfRun);
//if end of run is in next block
while(endOfRun == UINT_MAX){
rank -= __popcll(cqf.blocks[blockNum].runEnds & ~LOW_BIT_MASKLL(blockOffset));
if(blockOffset - SLOTS_PER_BLOCK > 0){
blockOffset = blockOffset - SLOTS_PER_BLOCK;
}
else{
blockOffset = 0;
}
blockNum++;
//select on remaining rank
endOfRun = selectBitGPU((cqf.blocks[blockNum].runEnds & ~LOW_BIT_MASKLL(blockOffset)), rank);
}
//TODO: block num check during or after loop?
if(blockNum > lastRegionBlock){
//the insert will affect the next region
itemsLeft[0] = true;
return;
}
//endOfRun now points to runEnd for correct quotient
//if select returns location earlier than fq, slot is empty and we can insert the item there
//(also if there are no occupied slots at all in the start block)
if(globalSlotIndexGPU(blockNum, endOfRun) < globalSlotIndexGPU(homeBlockNum, homeSlotNum) | blockOffset + rank == 0){
cqf.blocks[homeBlockNum].runEnds = setRunEndGPU(runEnds, homeSlotNum);
setRemainderGPU(&cqf, homeBlockNum, homeSlotNum, fr);
cqf.blocks[homeBlockNum].occupieds = setOccupiedGPU(occupieds, homeSlotNum);
finalSlotValues[nextValue] = globalSlotIndexGPU(homeBlockNum, homeSlotNum);
//move pointer to next item in queue
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
//if slot is not empty, search through the filter for the first empty slot
else{
endOfRun++;
if(endOfRun == SLOTS_PER_BLOCK){
endOfRun = 0;
blockNum++;
if(blockNum > lastRegionBlock){
//the insert will affect the next region
itemsLeft[0] = true;
return;
}
if(blockNum > cqf.numBlocks){ //insert fails
finalSlotValues[nextValue] = -1;
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
}
unsigned int runEndBlock = blockNum;
unsigned int unusedSlot = findFirstUnusedSlotGPU(&cqf, &blockNum, endOfRun);
if(unusedSlot == UINT_MAX){ //insert fails
finalSlotValues[nextValue] = -1;
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
if(blockNum > lastRegionBlock){
//the insert will affect the next region
itemsLeft[0] = true;
return;
}
if(blockNum > homeBlockNum){
for(int i = 0; i < blockNum - homeBlockNum; i++){
cqf.blocks[blockNum - i].offset++;
}
}
// printf("unused slot idx = %u\n", unusedSlot);
// printf("usused slot block = %u\n", blockNum);
// printf("canonical end of run (block, slot) = (%u, %u)\n", runEndBlock, endOfRun);
//move items over until we get back to the run the item belongs in
while(globalSlotIndexGPU(blockNum, unusedSlot) > globalSlotIndexGPU(runEndBlock, endOfRun)){
// printf("next slot: %u\n", unusedSlot);
if(unusedSlot == 0){
int nextBlock = blockNum - 1;
unsigned int nextSlot = SLOTS_PER_BLOCK - 1;
setRemainderGPU(&cqf, blockNum, unusedSlot, getRemainderGPU(&cqf, nextBlock, nextSlot));
if(isRunEndGPU(cqf.blocks[nextBlock].runEnds, nextSlot)){
cqf.blocks[blockNum].runEnds = setRunEndGPU(cqf.blocks[blockNum].runEnds, unusedSlot);
}
else{
cqf.blocks[blockNum].runEnds = clearRunEndGPU(cqf.blocks[blockNum].runEnds, unusedSlot);
}
unusedSlot = SLOTS_PER_BLOCK - 1;
blockNum--;
}
else{
setRemainderGPU(&cqf, blockNum, unusedSlot, getRemainderGPU(&cqf, blockNum, (unusedSlot - 1)));
if(isRunEndGPU(cqf.blocks[blockNum].runEnds, (unusedSlot - 1))){
cqf.blocks[blockNum].runEnds = setRunEndGPU(cqf.blocks[blockNum].runEnds, unusedSlot);
}
else{
cqf.blocks[blockNum].runEnds = clearRunEndGPU(cqf.blocks[blockNum].runEnds, unusedSlot);
}
unusedSlot--;
}
}
//if the home slot was not previously occupied, then new item is its run
if(!isOccupiedGPU(cqf.blocks[homeBlockNum].occupieds, homeSlotNum)){
setRemainderGPU(&cqf, blockNum, unusedSlot, fr);
cqf.blocks[blockNum].runEnds = setRunEndGPU(cqf.blocks[blockNum].runEnds, unusedSlot);
cqf.blocks[homeBlockNum].occupieds = setOccupiedGPU(cqf.blocks[homeBlockNum].occupieds, homeSlotNum);
finalSlotValues[nextValue] = globalSlotIndexGPU(blockNum, unusedSlot);
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
//if home slot already has a run, put new item in correct sequential location
else{
//move run end over by one slot
unsigned int nextSlot = unusedSlot - 1;
int nextBlock = blockNum;
if(unusedSlot == 0){
nextSlot = SLOTS_PER_BLOCK - 1;
nextBlock = blockNum - 1;
if(nextBlock < 0){ //insert fails
finalSlotValues[nextValue] = -1;
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
}
// printf("nextSlot: %u\tnextBlock:%u\n", nextSlot, nextBlock);
// printf("unusedSlot: %u\tblockNum: %u\n", unusedSlot, blockNum);
cqf.blocks[blockNum].runEnds = setRunEndGPU(cqf.blocks[blockNum].runEnds, unusedSlot);
cqf.blocks[nextBlock].runEnds = clearRunEndGPU(cqf.blocks[nextBlock].runEnds, nextSlot);
//search backwards through run
//end search if: we reach another set runEnd bit; we find remainder <= new remainder; we reach canonical slot
unsigned int nextRemainder = getRemainderGPU(&cqf, nextBlock, nextSlot);
// printf("remainder in last run slot: %u\n", nextRemainder);
do{
if(nextRemainder <= fr){
// printf("setting remainder in block %u, slot %u.\n", blockNum, unusedSlot);
setRemainderGPU(&cqf, blockNum, unusedSlot, fr);
//this stores duplicates
//return index of slot where remainder is stored
finalSlotValues[nextValue] = globalSlotIndexGPU(blockNum, unusedSlot);
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
setRemainderGPU(&cqf, blockNum, unusedSlot, nextRemainder);
if(unusedSlot > 0){
unusedSlot--;
if(unusedSlot == 0){
if(blockNum == 0){
setRemainderGPU(&cqf, blockNum, unusedSlot, fr);
finalSlotValues[nextValue] = globalSlotIndexGPU(blockNum, unusedSlot);
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
nextSlot = SLOTS_PER_BLOCK - 1;
nextBlock--;
}
else{
nextSlot = unusedSlot - 1;
}
}
else{
unusedSlot = SLOTS_PER_BLOCK - 1;
blockNum--;
if(blockNum < 0){ //insert fails
finalSlotValues[nextValue] = -1;
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
nextSlot = unusedSlot - 1;
}
nextRemainder = getRemainderGPU(&cqf, nextBlock, nextSlot);
}while(!isRunEndGPU(cqf.blocks[nextBlock].runEnds, nextSlot) && (globalSlotIndexGPU(nextBlock, nextSlot) >= fq));
//unusedSlot is now head of run. Insert the remainder there.
setRemainderGPU(&cqf, blockNum, unusedSlot, fr);
finalSlotValues[nextValue] = globalSlotIndexGPU(blockNum, unusedSlot);
incrementQueuePointer(nextValue, nextItems, homeBlockNum, lastRegionBlock, blockStarts, cqf.numBlocks, itemsLeft);
return;
}
}
}
//Some possible versions:
// 1. Sort items and divide them up to be inserted into groups of blocks. Groups grow with more iterations.
// 2. Items bid to be inserted into groups of blocks. Flag items that succeed insert and compact them out.
//TODO: Groupings of blocks could also have the start of the group change indices, rather than just changing group sizes. This would keep the number of threads high, while still avoiding too many reptitions on insert failures for less full filters.
__host__ float insertGPU(countingQuotientFilterGPU cqf, int numValues, unsigned int* d_insertValues, int* d_returnValues)
{
//Allocate memory
thrust::device_vector<unsigned int> d_quotients(numValues);
thrust::fill(d_quotients.begin(), d_quotients.end(), 0);
unsigned int* d_quotientsArray = thrust::raw_pointer_cast(&d_quotients[0]);
unsigned int* d_remaindersArray;
cudaMalloc((void**) &d_remaindersArray, numValues * sizeof(unsigned int));
cudaMemset(d_remaindersArray, 0, numValues * sizeof(unsigned int));
unsigned int* d_blockStarts;
cudaMalloc((void**) &d_blockStarts, cqf.numBlocks * sizeof(unsigned int));
cudaMemset(d_blockStarts, 0xFF, cqf.numBlocks * sizeof(unsigned int));
unsigned int* d_nextItems;
cudaMalloc((void**) &d_nextItems, cqf.numBlocks * sizeof(unsigned int));
cudaMemset(d_nextItems, 0xFF, cqf.numBlocks * sizeof(unsigned int));
bool* h_itemsLeft = new bool[1];
h_itemsLeft[0] = 1;
bool* d_itemsLeft;
cudaMalloc((void**) &d_itemsLeft, sizeof(bool));
cudaMemset(d_itemsLeft, 0, sizeof(bool));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
//Hash items
hashInputs<<<(numValues + 1023)/1024, 1024>>>(numValues, cqf, d_insertValues, d_quotientsArray);
//Sort by fingerprint
thrust::sort(d_quotients.begin(), d_quotients.end());
//Split fingerprints into quotients and remainders
quotienting<<<(numValues + 1023)/1024, 1024>>>(numValues, cqf.qbits, d_quotientsArray, d_remaindersArray);
//Compute block ID & write to region start array if first item in region
findBlockStartIndices<<<(numValues + 1023)/1024, 1024>>>(numValues, d_quotientsArray, d_blockStarts);
cudaMemcpy(d_nextItems, d_blockStarts, cqf.numBlocks * sizeof(unsigned int), cudaMemcpyDeviceToDevice);
/* unsigned int* h_printBlockHolder = new unsigned int[cqf.numBlocks];
cudaMemcpy(h_printBlockHolder, d_blockStarts, cqf.numBlocks * sizeof(unsigned int), cudaMemcpyDeviceToHost);
printf("blockStarts after kernel:\n");
for(int i = 0; i < cqf.numBlocks; i++){
printf("%i\t", h_printBlockHolder[i]);
}
printf("\n");
unsigned int* h_quotientsArray = new unsigned int[numValues];
cudaMemcpy(h_quotientsArray, d_quotientsArray, numValues * sizeof(unsigned int), cudaMemcpyDeviceToHost);
unsigned int* h_remaindersArray = new unsigned int[numValues];
cudaMemcpy(h_remaindersArray, d_remaindersArray, numValues * sizeof(unsigned int), cudaMemcpyDeviceToHost);
int numBlockItems = 0;
printf("numBlocks=%u\n", cqf.numBlocks);
for(int i = 0; i < cqf.numBlocks - 1; i++){
printf("\n***Block %i***:\n", i);
if(i == cqf.numBlocks - 2) numBlockItems = numValues - h_printBlockHolder[i];
else{
numBlockItems = h_printBlockHolder[i+1] - h_printBlockHolder[i];
}
if(h_printBlockHolder[i] == UINT_MAX) numBlockItems = 0;
for(int j = 0; j < numBlockItems; j++){
printf("quotient: %u\t remainder: %u\n", h_quotientsArray[h_printBlockHolder[i] + j], h_remaindersArray[h_printBlockHolder[i] + j]);
}
}
*/
//Loop over insert kernel
//If insert overflows, then next iteration has same region size, thread should return (all later items in the region will also overflow, since they are sorted)
int numIterations = 0;
int blocksPerRegion = 1;
int numRegions = cqf.numBlocks;
while(h_itemsLeft[0] == 1){
// printf("--------------------\niteration #: %i\n", numIterations);
numRegions = (cqf.numBlocks + blocksPerRegion - 1) / blocksPerRegion;
//Launch insert kernel with one thread per insert region
insertIntoRegions<<<(numRegions + 127)/128, 128>>>(numRegions, blocksPerRegion, numValues, cqf, d_blockStarts, d_nextItems, d_quotientsArray, d_remaindersArray, d_returnValues, d_itemsLeft);
cudaMemcpy(h_itemsLeft, d_itemsLeft, sizeof(bool), cudaMemcpyDeviceToHost);
cudaMemset(d_itemsLeft, 0, sizeof(bool));
numIterations++;
blocksPerRegion = numIterations / 16 + 1;
// printGPUFilter(&cqf);
}
cudaEventRecord(stop);
//Calculate and print timing results
cudaEventSynchronize(stop);
float insertTime = 0;
cudaEventElapsedTime(&insertTime, start, stop);
// printf("total iterations: %i\n", numIterations);
//Free memory
d_quotients.~device_vector<unsigned int>();
cudaFree(d_remaindersArray);
cudaFree(d_blockStarts);
cudaFree(d_nextItems);
delete[] h_itemsLeft;
cudaFree(d_itemsLeft);
return insertTime;
}
|
21,331 | //
// kernal_add.cu
// XCodeCudaTest
//
// Created by on 2011/11/9.
// Copyright (c) 2011年 takmatsumoto All rights reserved.
//
//__global__ void VecAdd(float* A, float* float* B, float* C)
//{
// int idx = threadIdx.x;
//}
|
21,332 | #include "includes.h"
__global__ void deviceKernel(int *a, int N)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < N; i += stride)
{
a[i] = 1;
}
} |
21,333 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void matrix_mul_gpu(float *A, float * B, float * C, int col_a, int col_b)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int sum = 0;
for(int k=0;k<col_a;k++)
{
sum += A[i*col_a+k]*B[k*col_b+j];
}
C[i*col_b+j] = sum;
}
int main(int argc, char *argv[])
{
if (argc < 5)
{
printf("lack of initial arguments !\n");
return 0;
}
int m,n,k,blocksize;
// m , n , k
m = atoi(argv[1]);
n = atoi(argv[2]);
k = atoi(argv[3]);
blocksize = atoi(argv[4]);
float *A = (float *)malloc(sizeof(float) * m * n);
float *B = (float *)malloc(sizeof(float) * n * k);
float *C = (float *)malloc(sizeof(float) * m * k);
//malloc device memory
float *dA, *dB, *dC;
cudaMalloc((void**)&dA, sizeof(float) *m*n);
cudaMalloc((void**)&dB, sizeof(float) *n*k);
cudaMalloc((void**)&dC, sizeof(float) *m*k);
for (int i = 0; i < m*n; i++) {
srand(i);
A[i] = rand() % 10;
}
for (int i = 0; i < n*k; i++) {
srand(i);
B[i] = rand() % 10;
}
struct timeval start, end;
gettimeofday( &start, NULL );
cudaMemcpy(dA, A, sizeof(float) * m * n, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, sizeof(float) * n * k, cudaMemcpyHostToDevice);
int blockx = pow(2,(int)(log2(blocksize))/2);
int blocky;
if(blockx*blockx == blocksize) blocky = blockx;
else blocky = 2*blockx;
dim3 Block(blockx, blocky);
dim3 Grid((m+Block.x-1)/ Block.x, (k+Block.y-1)/ Block.y );
matrix_mul_gpu <<<Grid, Block >>> (dA, dB, dC, n,k);
//拷贝计算数据-一级数据指针
cudaMemcpy(C, dC, sizeof(float) * m * k, cudaMemcpyDeviceToHost);
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("Block(%d,%d) Grid(%d,%d).\n", Block.x, Block.y, Grid.x, Grid.y);
printf("total time is %f ms\n", timeuse/(float)1000);
FILE *a, *b, *c;
a = fopen("matrixA.m", "wb");
b = fopen("matrixB.m", "wb");
c = fopen("matrixC.m", "wb");
fprintf(a, "A = [ \n");
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
fprintf(a, "%f ", A[i * n + j]);
fprintf(a, "\n");
}
fprintf(a, "];");
fprintf(b, "B = [ \n");
for (int i = 0; i < n; i++)
{
for (int j = 0; j < k; j++)
fprintf(b, "%f ", B[i * k + j]);
fprintf(b, "\n");
}
fprintf(b, "];");
fprintf(c, "C = [ \n");
for (int i = 0; i < m; i++)
{
for (int j = 0; j < k; j++)
fprintf(c, "%f ", C[i * k + j]);
fprintf(c, "\n");
}
fprintf(c, "];");
//释放内存
free(A);
free(B);
free(C);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
return 0;
}
|
21,334 | #include <cstdlib>
#include <cstdio>
#include <cassert>
typedef float float_t;
#define el(M, I, J) (*((float_t*)((char*)((M).ptr) + (I) * (M).pitch) + (J)))
#define w xsize
#define h ysize
#define eps 1e-4
#ifndef BLOCK_H
#define BLOCK_H 32
#endif
#ifndef BLOCK_W
#define BLOCK_W 32
#endif
cudaPitchedPtr allocHostMatrix(size_t h, size_t w)
{
#ifdef HAVE_SHARED
h = BLOCK_W * ((h + BLOCK_W - 1) / BLOCK_W);
w = BLOCK_W * ((w + BLOCK_W - 1) / BLOCK_W);
#endif
void *mem = calloc(sizeof(float_t), w * h);
if (!mem)
{
fprintf(stderr, "Out of memory on host!\n");
exit(1);
}
return make_cudaPitchedPtr(mem, sizeof(float_t) * w, w, h);
}
cudaPitchedPtr allocDevMatrix(size_t h, size_t w)
{
#ifdef HAVE_SHARED
h = BLOCK_W * ((h + BLOCK_W - 1) / BLOCK_W);
w = BLOCK_W * ((w + BLOCK_W - 1) / BLOCK_W);
#endif
void *mem;
int rc; size_t pitch;
#ifndef HAVE_PITCH
rc = cudaMalloc(&mem, sizeof(float_t) * w * h);
pitch = sizeof(float_t) * w;
#else
rc = cudaMallocPitch(&mem, &pitch, sizeof(float_t) * w, h);
#endif
if (rc != cudaSuccess)
{
fprintf(stderr, "Out of memory on device!");
exit(1);
}
return make_cudaPitchedPtr(mem, pitch, w, h);
}
void matrixCpy(cudaPitchedPtr dst, cudaPitchedPtr src, cudaMemcpyKind kind)
{
assert(src.h == dst.h && src.w == dst.w);
int rc;
#ifndef HAVE_PITCH
rc = cudaMemcpy(dst.ptr, src.ptr, src.h * src.w * sizeof(float_t), kind);
#else
rc = cudaMemcpy2D(dst.ptr, dst.pitch, src.ptr, src.pitch, dst.w * sizeof(float_t), dst.h, kind);
#endif
if (rc != cudaSuccess)
{
fprintf(stderr, "matrixCpy error %d!", rc);
exit(1);
}
}
cudaPitchedPtr generateMatrix(size_t h, size_t w)
{
cudaPitchedPtr ret = allocHostMatrix(h, w);
for (int i = 0; i < h; ++i)
{
for (int j = 0; j < w; ++j)
{
el(ret, i, j) = (float_t)rand() / RAND_MAX;
}
}
return ret;
}
void hostMpy(cudaPitchedPtr C, cudaPitchedPtr A, cudaPitchedPtr B)
{
assert(C.h == A.h && A.w == B.h && B.w == C.w);
for (int i = 0; i < C.h; ++i)
{
for (int j = 0; j < C.w; ++j)
{
for (int k = 0; k < A.w; ++k)
{
el(C, i, j) += el(A, i, k) * el(B, k, j);
}
}
}
}
__global__ void doDevMpy1(cudaPitchedPtr C, cudaPitchedPtr A, cudaPitchedPtr B)
{
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= C.h || j >= C.w) return;
float_t result = 0;
for (int k = 0; k < A.w; ++k)
{
result += el(A, i, k) * el(B, k, j);
}
el(C, i, j) = result;
}
__global__ void doDevMpy2(cudaPitchedPtr C, cudaPitchedPtr A, cudaPitchedPtr B)
{
int i = blockDim.y * blockIdx.y + threadIdx.y;
int j = blockDim.x * blockIdx.x + threadIdx.x;
int thrI = threadIdx.y;
int thrJ = threadIdx.x;
__shared__ float_t Ac[BLOCK_W][BLOCK_W], Bc[BLOCK_W][BLOCK_W];
float_t result = 0;
for (int step = 0; step < A.w / BLOCK_W; ++step)
{
Ac[thrI][thrJ] = el(A, i, thrJ + BLOCK_W * step);
Bc[thrI][thrJ] = el(B, thrI + BLOCK_W * step, j);
__syncthreads();
for (int k = 0; k < BLOCK_W; ++k)
{
result += Ac[thrI][k] * Bc[k][thrJ];
}
__syncthreads();
}
el(C, i, j) = result;
}
__global__ void doDevMpy3(cudaPitchedPtr C, cudaPitchedPtr A, cudaPitchedPtr B)
{
int i = BLOCK_W * blockIdx.y + threadIdx.y;
int j = BLOCK_W * blockIdx.x + threadIdx.x;
int thrI = threadIdx.y;
int thrJ = threadIdx.x;
__shared__ float_t Ac[BLOCK_W][BLOCK_W], Bc[BLOCK_W][BLOCK_W];
float_t result1 = 0, result2 = 0;
for (int step = 0; step < A.w / BLOCK_W; ++step)
{
Ac[thrI][thrJ] = el(A, i, thrJ + BLOCK_W * step);
Bc[thrI][thrJ] = el(B, thrI + BLOCK_W * step, j);
Ac[thrI + BLOCK_H][thrJ] = el(A, i + BLOCK_H, thrJ + BLOCK_W * step);
Bc[thrI + BLOCK_H][thrJ] = el(B, thrI + BLOCK_W * step + BLOCK_H, j);
__syncthreads();
for (int k = 0; k < BLOCK_W; ++k)
{
result1 += Ac[thrI][k] * Bc[k][thrJ];
result2 += Ac[thrI + BLOCK_H][k] * Bc[k][thrJ];
}
__syncthreads();
}
el(C, i, j) = result1;
el(C, i + BLOCK_H, j) = result2;
}
void devMpy(cudaPitchedPtr C, cudaPitchedPtr A, cudaPitchedPtr B)
{
assert(C.h == A.h && A.w == B.h && B.w == C.w);
dim3 threads(BLOCK_W, BLOCK_H);
dim3 grid((C.w + BLOCK_W - 1) / BLOCK_W, (C.h + BLOCK_H - 1) / BLOCK_H);
#if defined(HAVE_SHARED) && BLOCK_H == BLOCK_W
assert(BLOCK_W == BLOCK_H && C.h % BLOCK_W == 0 && C.w % BLOCK_W == 0 && A.w % BLOCK_W == 0);
doDevMpy2<<<grid, threads>>>(C, A, B);
#elif defined(HAVE_SHARED) && BLOCK_H * 2 == BLOCK_W
grid.y /= 2;
assert(BLOCK_W == 2 * BLOCK_H && C.h % BLOCK_W == 0 && C.w % BLOCK_W == 0 && A.w % BLOCK_W == 0);
doDevMpy3<<<grid, threads>>>(C, A, B);
#else
doDevMpy1<<<grid, threads>>>(C, A, B);
#endif
}
void matrixPrint(const char* name, cudaPitchedPtr A)
{
printf("Matrix %s:\n", name);
int ilimit = A.h, jlimit = A.w;
if (ilimit > 30) ilimit = 30;
if (jlimit > 15) jlimit = 15;
int i, j;
for (i = 0; i < ilimit; ++i)
{
for (j = 0; j < jlimit; ++j)
{
printf("%f ", el(A, i, j));
}
if (j != A.w)
{
printf("...");
}
printf("\n");
}
if (i != A.h)
{
printf("...\n");
}
printf("-----\n");
}
bool hostEquals(cudaPitchedPtr A, cudaPitchedPtr B)
{
if (A.h != B.h || A.w != B.w) return false;
for (int i = 0; i < A.h; ++i)
{
for (int j = 0; j < A.w; ++j)
{
if (fabs(el(A, i, j) - el(B, i, j)) > eps)
{
return false;
}
}
}
return true;
}
int main(int argc, char** argv)
{
if (argc < 4)
{
fprintf(stderr, "Usage: %s n m k [--check]\n", argv[0]);
return 1;
}
int n = atoi(argv[1]), m = atoi(argv[2]), k = atoi(argv[3]);
bool check = argc >= 5 && !strcmp(argv[4], "--check");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float time;
cudaPitchedPtr A = generateMatrix(n, m), B = generateMatrix(m, k), C = allocHostMatrix(n, k);
cudaPitchedPtr Ad = allocDevMatrix(n, m), Bd = allocDevMatrix(m, k), Cd = allocDevMatrix(n, k);
matrixCpy(Ad, A, cudaMemcpyHostToDevice);
matrixCpy(Bd, B, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
devMpy(Cd, Ad, Bd);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Elapsed time: %f\n", time);
matrixCpy(C, Cd, cudaMemcpyDeviceToHost);
cudaFree(Ad.ptr);
cudaFree(Bd.ptr);
cudaFree(Cd.ptr);
if (check)
{
cudaPitchedPtr C1 = allocHostMatrix(n, k);
hostMpy(C1, A, B);
if (getenv("DUMP_RESULTS"))
{
matrixPrint("A", A);
matrixPrint("B", B);
matrixPrint("C", C);
matrixPrint("C1", C1);
}
printf("Check %s!\n", hostEquals(C, C1) ? "OK" : "Not OK");
free(C1.ptr);
}
free(A.ptr);
free(B.ptr);
free(C.ptr);
}
|
21,335 | /* ENGR-E 517 High Performance Computing
* Original Author : Matt Anderson (Serial Implementation 2D)
* Name : Ninaad Joshi (Serial and Parallel Implementation 1D)
* Project : Demonstration of the 2D Heat Distribution
* Problem using CUDA programming model
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#include <sys/time.h>
/*****************************************************************/
/* set the DEBUG flag to 1 to display the values for every iteration,
* or set to 0 for measuring time for both CPU and GPU
*/
#ifndef DEBUG
#define DEBUG 0
#endif
/* set the DISPLAY flag to 1 to display the final matrix for CPU and GPU
*/
#ifndef DISPLAY
#define DISPLAY 0
#endif
/****************************************************************/
#define TEMP 50.0
#define EPS 1e-6
#define I_FIX 5
#define J_FIX 5
#ifndef COLS
#define COLS 100
#endif
#ifndef ROWS
#define ROWS 100
#endif
#ifndef BLOCK_SIZE_X
#define BLOCK_SIZE_X 32
#endif
#ifndef BLOCK_SIZE_Y
#define BLOCK_SIZE_Y 32
#endif
double* alloc_matrix(){
double* matrix;
matrix = (double*) malloc(ROWS * COLS * sizeof(double));
return matrix;
}
void init_matrix(double* matrix){
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++) {
matrix[i * COLS + j] = 0.0;
}
matrix[I_FIX * COLS + J_FIX] = TEMP;
}
void print_matrix(double* matrix){
for (int i = 0; i < ROWS; i++) {
for (int j = 0; j < COLS; j++)
printf("%3.7lf ", matrix[i * COLS + j]);
printf("\n");
}
}
void copy_matrix(double* dest, double* source) {
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++)
dest[i * COLS + j] = source[i * COLS + j];
}
double max_abs(double* m1, double* m2){
double max_val = DBL_MIN;
for (int i = 0; i < ROWS; i++)
for (int j = 0; j < COLS; j++){
if (fabs(m1[i * COLS + j] - m2[i * COLS + j]) > max_val) {
max_val = fabs(m1[i * COLS + j] - m2[i * COLS + j]);
}
}
return max_val;
}
/***********CPU***********/
void compute_new_values(double* old_matrix, double* new_matrix){
for (int i = 1; i < ROWS-1; i++)
for (int j= 1; j < COLS-1; j++)
new_matrix[i * COLS + j] = 0.25 * (old_matrix[(i-1) * COLS + j]
+ old_matrix[(i+1) * COLS + j]
+ old_matrix[i * COLS + (j-1)]
+ old_matrix[i * COLS + (j+1)]);
new_matrix[I_FIX * COLS + J_FIX] = TEMP;
}
/***********CPU***********/
/***********GPU***********/
__global__ void compute_new_values_gpu(const double* __restrict__ d_old_matrix,
double* __restrict__ d_new_matrix){
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i == I_FIX && j == J_FIX)
d_new_matrix[I_FIX * COLS + J_FIX] = TEMP;
else if (0 < i && i < ROWS - 1 && 0 < j && j < COLS - 1)
d_new_matrix[i * COLS + j] = 0.25 * (d_old_matrix[(i-1) * COLS + j]
+ d_old_matrix[(i+1) * COLS + j]
+ d_old_matrix[i * COLS + (j-1)]
+ d_old_matrix[i * COLS + (j+1)]);
}
/***********GPU***********/
/* Round the value of a / b to nearest higher integer value
*/
int divideUp(int n1, int n2) {
return (n1 % n2 != 0) ? (n1 / n2 + 1) : (n1 / n2);
}
int main(int argc, char *argv[]) {
//CPU
double *a_old = alloc_matrix(); //allocate memory for the matrices
double *a_new = alloc_matrix();
struct timeval a_start, a_end;
double tos_serial;
// GPU
long int iterations = 0, i = 0;
double *h_in = alloc_matrix(); //allocate memory for the matrices
double *h_out = alloc_matrix();
int error;
double *d_in;
double *d_out;
struct timeval h_start, h_end;
double tos_cuda;
printf("DISPLAY = %d DEBUG = %d ROWS = %d COLS = %d\n", DISPLAY, DEBUG, ROWS, COLS);
printf("BLOCK_SIZE_X = %d BLOCK_SIZE_Y = %d\n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
/*************************CPU**************************/
init_matrix(a_old); //initialize the matrices
init_matrix(a_new);
printf("CPU: Starting the serial heat distribution\n");
if (DISPLAY || DEBUG){
printf("CPU:The initial heat distribution matrix is:\n");
print_matrix(a_old);
}
gettimeofday(&a_start, NULL);
while (1) {
if (DEBUG)
printf("\nCPU:Performing a new iteration...%ld\n", iterations);
//compute new values and put them into a_new
compute_new_values(a_old, a_new);
if (DEBUG) {
printf("CPU:a_old is:\n"); //output matrix to screen
print_matrix(a_old);
printf("CPU:a_new is:\n");
print_matrix(a_new);
}
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(a_old, a_new);
if (DEBUG)
printf("CPU:Max diff is: %f\n", max_diff);
if (max_diff < EPS)
break;
copy_matrix(a_old, a_new); //assign values of a_new to a_old
if (DEBUG)
printf("CPU:End of iteration...%ld\n", iterations);
++iterations;
}
gettimeofday(&a_end, NULL);
tos_serial = (a_end.tv_sec - a_start.tv_sec) + \
(a_end.tv_usec - a_start.tv_usec)/1000000.0;
printf("CPU:Time required is %.3e\n", tos_serial);
if (DISPLAY || DEBUG){
printf("CPU:The final heat distribution matrix is:\n");
print_matrix(a_new);
}
printf("The iterations performed by the serial code are %ld\n", iterations);
/*************************GPU**********************/
printf("GPU:Starting the parallel heat distribution on CUDA\n");
init_matrix(h_in); //initialize the matrices
init_matrix(h_out);
cudaMalloc((void **)&d_in, (size_t) ROWS * COLS * sizeof(double));
error = cudaGetLastError();
if (DEBUG)
printf("GPU:d_in cudaMalloc error = %d\n", error);
cudaMalloc((void **)&d_out, (size_t) ROWS * COLS * sizeof(double));
error = cudaGetLastError();
if (DEBUG)
printf("GPU:d_out cudaMalloc error = %d\n", error);
// copy data from host memory to device memory
cudaMemcpy(d_in, h_in, ROWS * COLS * sizeof(double), cudaMemcpyHostToDevice);
// copy data from device memory to device memory
cudaMemcpy(d_out, d_in, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToDevice);
// block and grid dimensions
dim3 blocks(BLOCK_SIZE_X, BLOCK_SIZE_Y);
dim3 grids(divideUp(ROWS, BLOCK_SIZE_X), divideUp(COLS, BLOCK_SIZE_Y));
gettimeofday(&h_start, NULL);
for(i = 0; i < iterations + 1; ++i) {
//compute new values and put them into d_out
compute_new_values_gpu<<<grids, blocks>>>(d_in, d_out);
if (DEBUG){
printf("GPU:Performing a new iteration...%ld\n", i);
// copy data from device memory to host memory
cudaMemcpy(h_in, d_in, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(h_out, d_out, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToHost);
printf("GPU:d_in is:\n"); //output d_in to screen
print_matrix(h_in);
printf("GPU:d_out is:\n"); //output d_out to screen
print_matrix(h_out);
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(h_in, h_out);
printf("GPU:Max diff is: %f\n", max_diff);
if (max_diff < EPS)
break;
printf("GPU:End of iteration...%ld\n", i);
}
// make the current d_out as d_in
cudaMemcpy(d_in, d_out, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToDevice);
}
gettimeofday(&h_end, NULL);
// copy data from device memory to host memory
cudaMemcpy(h_out, d_out, ROWS * COLS * sizeof(double), cudaMemcpyDeviceToHost);
tos_cuda = (h_end.tv_sec - h_start.tv_sec) + \
(h_end.tv_usec - h_start.tv_usec)/1000000.0;
printf("GPU:Time required is %.3e seconds\n", tos_cuda);
if (DISPLAY || DEBUG){
printf("GPU:The final heat distribution matrix is:\n");
print_matrix(h_out);
}
//calculate the maximum absolute differences among pairwise
// differences of old and new matrix elements
double max_diff = max_abs(h_out, a_new);
printf("GPU:Max diff between serial and CUDA implementation is: %f\n",\
max_diff);
printf("Speed Up achieved is : %.3lf\n", tos_serial/tos_cuda);
free(h_in);
free(h_out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
21,336 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<cuda.h>
__global__ void copy_array(float* A, float* B)
{
// int i = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
B[i] = A[i];
}
__global__ void prefix_sum_extend(float* B, int t, int s)
{
// int i = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i != 0)
{
if(B[s+(2*i)-1] > B[s+(2*i)])
B[t+i] = B[s+(2*i)];
else
B[t+i] = B[s+(2*i)-1];
}
}
__global__ void prefix_sum_drop(float* B, float* C, int t, int s)
{
// int i = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i!= 0)
{
if(i==1)
C[t+i]=B[t+i];
else if(i%2==0)
C[t+i]=C[s+i/2];
else
if(C[s+((i-1)/2)] > B[t+i])
C[t+i]=B[t+i];
else
C[t+i]=C[s+((i-1)/2)];
}
}
int main(int argc, char** argv)
{
int k;
scanf("%d",&k);
int i,m,t,s,h;
int N = (int)pow(2.0,k);
size_t size = (N+1) * sizeof(float);
size_t size1 = (2*N)* sizeof(float);
float* h_A = (float*)malloc(size);
float* h_S = (float*)malloc(size);
float* h_B = (float*)malloc(size1);
float* h_C = (float*)malloc(size1);
for(i=1;i<=N;i++)
h_A[i]=rand()%10+1;
/* for (int i=1; i<=N; i++)
printf("%f ",h_A[i]);
printf("\n");
*/
float* d_A;
cudaMalloc(&d_A, size);
float* d_S;
cudaMalloc(&d_S, size);
float* d_B;
cudaMalloc(&d_B, size1);
float* d_C;
cudaMalloc(&d_C, size1);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
int block_size, n_blocks;
if(N >=256)
{
block_size = 256;
n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
}else{
block_size =1;
n_blocks = N+1;
}
// copy_array<<<1, N+1>>>(d_A, d_B);
copy_array<<<n_blocks, block_size>>>(d_A, d_B);
m = N;
t=0;
for(h=1;h<=k;h++)
{
s=t;
t=t+m;
m=m/2;
if(m >=256)
{
block_size = 256;
n_blocks = m/block_size + (m%block_size == 0 ? 0:1);
}else{
block_size =1;
n_blocks = m+1;
}
// prefix_sum_extend<<<1, m+1>>>(d_B, t, s);
prefix_sum_extend<<<n_blocks, block_size>>>(d_B, t, s);
}
for(h=k;h>=0;h--)
{
if(m >=256)
{
block_size = 256;
n_blocks = m/block_size + (m%block_size == 0 ? 0:1);
}else{
block_size =1;
n_blocks = m+1;
}
// prefix_sum_drop<<<1, m+1>>>(d_B, d_C, t, s);
prefix_sum_drop<<<n_blocks, block_size>>>(d_B, d_C, t, s);
m=2*m;
s=t;
t=t-m;
}
if(N >=256)
{
block_size = 256;
n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
}else{
block_size =1;
n_blocks = N+1;
}
// copy_array<<<1, N+1>>>(d_C, d_S);
copy_array<<<n_blocks, block_size>>>(d_C, d_S);
cudaMemcpy(h_B, d_B, size1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_C, d_C, size1, cudaMemcpyDeviceToHost);
cudaMemcpy(h_S, d_S, size, cudaMemcpyDeviceToHost);
if(N < 256)
{
for (int i=1; i<=N; i++)
printf("%f ",h_S[i]);
printf("\n");
}
cudaFree(d_A);
cudaFree(d_S);
cudaFree(d_B);
cudaFree(d_C);
}
|
21,337 | #include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 32
#define N 10240
__global__ void doubleValues(int*difference, int* numbers, int length) {
__shared__ int local_values[N];
int index = BLOCK_SIZE * blockIdx.x + threadIdx.x;
local_values[index] = numbers[index];
__syncthreads();
if(index != length - 1) {
difference[index] = local_values[index + 1] - local_values[index];
}
}
int main() {
int* cpu_arr = (int*)malloc(N * sizeof(int));
if(!cpu_arr) {
perror("malloc");
exit(1);
}
for(int i = 0; i < N; i++) {
cpu_arr[i] = i * i;
}
int* gpu_arr;
if(cudaMalloc(&gpu_arr, sizeof(int) * N) != cudaSuccess) {
fprintf(stderr, "Failed to allocate array on GPU\n");
exit(2);
}
if(cudaMemcpy(gpu_arr, cpu_arr, sizeof(int) * N, cudaMemcpyHostToDevice) != cudaSuccess) {
fprintf(stderr, "Failed to copy array to the GPU\n");
}
int* gpu_difference;
if(cudaMalloc(&gpu_difference, sizeof(int) * N) != cudaSuccess) {
fprintf(stderr, "Failed to allocate array on GPU\n");
exit(2);
}
doubleValues<<<N/BLOCK_SIZE, BLOCK_SIZE>>>(gpu_difference, gpu_arr, N);
cudaDeviceSynchronize();
if(cudaMemcpy(cpu_arr, gpu_difference, sizeof(int) * N, cudaMemcpyDeviceToHost) != cudaSuccess) {
fprintf(stderr, "Failed to copy array to the CPU\n");
}
for(int i = 0; i < N; i++) {
printf("%d\n", cpu_arr[i]);
}
free(cpu_arr);
cudaFree(gpu_arr);
cudaFree(gpu_difference);
return 0;
}
|
21,338 |
#include "kernel.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
|
21,339 | #include<assert.h>
int main(void){
assert(1==2);
return 0;
}
|
21,340 | #ifdef _GLIBCXX_USE_INT128
#undef _GLIBCXX_USE_INT128
#endif
#ifdef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_ATOMIC_BUILTINS
#endif
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/binary_search.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/adjacent_difference.h>
#include <iostream>
#include <iterator>
#include <cstdlib>
// This example compute the histogram [1] and cumulative
// histogram of an array of integer values.
//
// [1] http://en.wikipedia.org/wiki/Histogram
int main(void)
{
const size_t N = 30;
// generate random data on the host
thrust::host_vector<int> h_data(N);
for(size_t i = 0; i < N; i++)
h_data[i] = rand() % 10;
// transfer data to device
thrust::device_vector<int> d_data(h_data);
// print the initial data
std::cout << "initial data" << std::endl;
thrust::copy(d_data.begin(), d_data.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
// sort data to bring equal elements together
thrust::sort(d_data.begin(), d_data.end());
// print the sorted data
std::cout << "sorted data" << std::endl;
thrust::copy(d_data.begin(), d_data.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
// number of histogram bins is equal to the maximum value plus one
const int num_bins = d_data.back() + 1;
// allocate storage for the cumulative histogram and histogram
thrust::device_vector<int> d_cumulative_histogram(num_bins);
thrust::device_vector<int> d_histogram(num_bins);
// find the end of each bin of values
thrust::counting_iterator<int> search_begin(0);
thrust::upper_bound(d_data.begin(),
d_data.end(),
search_begin,
search_begin + num_bins,
d_cumulative_histogram.begin());
// print the cumulative histogram
std::cout << "cumulative histogram" << std::endl;
thrust::copy(d_cumulative_histogram.begin(), d_cumulative_histogram.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
// compute the histogram by taking differences of the cumulative histogram
thrust::adjacent_difference(d_cumulative_histogram.begin(),
d_cumulative_histogram.end(),
d_histogram.begin());
// print the histogram
std::cout << "histogram" << std::endl;
thrust::copy(d_histogram.begin(), d_histogram.end(), std::ostream_iterator<int>(std::cout, " "));
std::cout << std::endl;
std::cout << "TEST PASSED\n";
return 0;
}
|
21,341 | #include "includes.h"
__global__ void smooth(float * v_new, const float * v) {
int myIdx = threadIdx.x * gridDim.x + blockIdx.x;
int numThreads = blockDim.x * gridDim.x;
int myLeftIdx = (myIdx == 0) ? 0 : myIdx - 1;
int myRightIdx = (myIdx == (numThreads - 1)) ? numThreads - 1 : myIdx + 1;
float myElt = v[myIdx];
float myLeftElt = v[myLeftIdx];
float myRightElt = v[myRightIdx];
v_new[myIdx] = 0.25f * myLeftElt + 0.5f * myElt + 0.25f * myRightElt;
} |
21,342 | // is reduce in thrust foldl or foldr?
// => it is foldl in Haskell
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <iostream>
void print_array(int* data, int len){
for(int i=0; i<len; i++){
std::cout << data[i];
}
std::cout << std::endl;
}
struct div_func : public thrust::binary_function<float, float, float> {
__host__ __device__
float operator()(const float &result, const float &arg){
return result / arg;
}
};
int main(void){
int len = 3;
int data[] = {1,2,3};
div_func op;
// 24 / 1 / 2 / 3 = 4
float result = thrust::reduce(data, data+len, 24, op);
print_array(data, len);
std::cout << result << std::endl;
return 0;
}
|
21,343 | // Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
#include <cuda.h>
#include <cuda_runtime.h>
#include <cstdint>
using namespace std;
__global__ void cuda_add_impl(int64_t N, float* O, const float* X, const float* Y) {
auto offset = threadIdx.x;
if (offset < N) {
O[offset] = Y[offset] + X[offset];
}
}
void cuda_add(int64_t N, float* O, const float* X, const float* Y) {
cuda_add_impl<<<1, 256>>>(N, O, X, Y);
}
|
21,344 | // #include "cublas_v2.h"
// #include "cusparse_v2.h"
// #include "curand.h"
// #include <iostream>
// #include <vector>
// #include <stdexcept>
// #include <cstdio>
// #include <chrono>
// #include "langevin.hpp"
// #include "gpu_utils.cuh"
// template <typename RealType>
// __global__ void update_positions(
// const RealType *noise,
// const RealType coeff_a,
// const RealType *coeff_bs, // N x 3, not P x N x 3, but we could just pass in the first index
// const RealType *coeff_cs,
// const RealType coeff_d,
// const RealType coeff_e,
// const RealType *dE_dx,
// const RealType dt,
// const int N,
// const int D,
// RealType *x_t,
// RealType *v_t) {
// int atom_idx = blockIdx.x*blockDim.x + threadIdx.x;
// if(atom_idx >= N) {
// return;
// }
// int d_idx = blockIdx.y;
// int local_idx = atom_idx*D + d_idx;
// // only integrate first three dimensions
// if(d_idx >= 3) {
// // allow for growth in the higher dimensions
// x_t[local_idx] *= coeff_e;
// return;
// }
// // truncated noise
// auto n = noise[local_idx];
// v_t[local_idx] = coeff_a*v_t[local_idx] - coeff_bs[atom_idx]*dE_dx[local_idx] + coeff_cs[atom_idx]*n;
// x_t[local_idx] = (1 - coeff_d*coeff_bs[atom_idx]*dt)*x_t[local_idx] + v_t[local_idx]*dt;
// }
// template<typename RealType>
// __global__ void update_derivatives(
// const RealType coeff_a,
// const RealType *coeff_bs, // shape N
// const RealType coeff_d,
// const RealType *d2E_dxdp,
// const RealType dt,
// const int N,
// const int D,
// RealType *dx_dp_t,
// RealType *dv_dp_t) {
// int atom_idx = blockIdx.x*blockDim.x + threadIdx.x;
// if(atom_idx >= N) {
// return;
// }
// // only integrate first three dimensions
// int d_idx = blockIdx.y;
// if(d_idx >= 3) {
// return;
// }
// int p_idx = blockIdx.z;
// int local_idx = p_idx*N*D + atom_idx*D + d_idx;
// // derivative of the above equation
// RealType tmp = coeff_a*dv_dp_t[local_idx] - coeff_bs[atom_idx]*d2E_dxdp[local_idx];
// dv_dp_t[local_idx] = tmp;
// dx_dp_t[local_idx] = (1 - coeff_d*coeff_bs[atom_idx]*dt)*dx_dp_t[local_idx] + dt*tmp;
// }
// namespace timemachine {
// template<typename RealType>
// LangevinOptimizer<RealType>::LangevinOptimizer(
// RealType dt,
// const int num_dims,
// const RealType coeff_a,
// const std::vector<RealType> &coeff_bs,
// const std::vector<RealType> &coeff_cs,
// const int no) :
// dt_(dt),
// coeff_a_(coeff_a),
// coeff_d_(0.0),
// coeff_e_(1.0),
// d_rng_buffer_(nullptr),
// N_offset_(no) {
// auto start = std::chrono::high_resolution_clock::now();
// gpuErrchk(cudaMalloc((void**)&d_coeff_bs_, coeff_bs.size()*sizeof(RealType)));
// gpuErrchk(cudaMalloc((void**)&d_coeff_cs_, coeff_cs.size()*sizeof(RealType)));
// gpuErrchk(cudaMemcpy(d_coeff_bs_, &coeff_bs[0], coeff_bs.size()*sizeof(RealType), cudaMemcpyHostToDevice));
// gpuErrchk(cudaMemcpy(d_coeff_cs_, &coeff_cs[0], coeff_cs.size()*sizeof(RealType), cudaMemcpyHostToDevice));
// cublasErrchk(cublasCreate(&cb_handle_));
// // curandErrchk(curandCreateGenerator(&cr_rng_, CURAND_RNG_PSEUDO_PHILOX4_32_10)); // DESRES
// curandErrchk(curandCreateGenerator(&cr_rng_, CURAND_RNG_PSEUDO_DEFAULT));
// gpuErrchk(cudaMalloc((void**)&d_rng_buffer_, coeff_bs.size()*num_dims*sizeof(RealType)));
// auto end = std::chrono::high_resolution_clock::now();
// auto seed = std::chrono::duration_cast<std::chrono::microseconds>(end-start).count();
// curandErrchk(curandSetPseudoRandomGeneratorSeed(cr_rng_, seed));
// }
// template<typename RealType>
// LangevinOptimizer<RealType>::~LangevinOptimizer() {
// gpuErrchk(cudaFree(d_coeff_bs_));
// gpuErrchk(cudaFree(d_coeff_cs_));
// gpuErrchk(cudaFree(d_rng_buffer_));
// cublasErrchk(cublasDestroy(cb_handle_));
// curandErrchk(curandDestroyGenerator(cr_rng_));
// }
// template<typename RealType>
// RealType LangevinOptimizer<RealType>::get_dt() const {
// return dt_;
// }
// template<typename RealType>
// void LangevinOptimizer<RealType>::step(
// const int N,
// const int D,
// const int DP,
// const RealType *dE_dx,
// const RealType *d2E_dx2, // may be null
// RealType *d2E_dxdp, // this is modified in place
// RealType *d_x_t,
// RealType *d_v_t,
// RealType *d_dx_dp_t,
// RealType *d_dv_dp_t,
// const RealType *d_input_noise_buffer) const {
// if(N_offset_ == 0) {
// throw std::runtime_error("bad N_offset");
// }
// size_t tpb = 32;
// size_t n_blocks = (N*D + tpb - 1) / tpb;
// if(d2E_dx2 != nullptr && d2E_dxdp != nullptr) {
// hessian_vector_product(N, D, DP, N_offset_, d2E_dx2, d_dx_dp_t, d2E_dxdp);
// dim3 dimGrid_dxdp(n_blocks, D, DP); // x, y, z dims
// update_derivatives<RealType><<<dimGrid_dxdp, tpb>>>(
// coeff_a_,
// d_coeff_bs_,
// coeff_d_,
// d2E_dxdp,
// dt_,
// N,
// D,
// d_dx_dp_t,
// d_dv_dp_t
// );
// gpuErrchk(cudaPeekAtLastError());
// }
// const RealType* d_noise_buf = nullptr;
// if(d_input_noise_buffer == nullptr) {
// curandErrchk(templateCurandNormal(cr_rng_, d_rng_buffer_, N*D, 0.0, 1.0));
// d_noise_buf = d_rng_buffer_;
// } else {
// d_noise_buf = d_input_noise_buffer;
// }
// dim3 dimGrid_dx(n_blocks, D);
// update_positions<RealType><<<dimGrid_dx, tpb>>>(
// d_noise_buf,
// coeff_a_,
// d_coeff_bs_,
// d_coeff_cs_,
// coeff_d_,
// coeff_e_,
// dE_dx,
// dt_,
// N,
// D,
// d_x_t,
// d_v_t
// );
// gpuErrchk(cudaPeekAtLastError());
// }
// template<typename RealType>
// void LangevinOptimizer<RealType>::hessian_vector_product(
// const int N,
// const int D,
// const int DP,
// const int N_offset,
// const RealType *d_A,
// // RealType *d_B,
// // RealType *d_C) const {
// RealType *d_C,
// RealType *d_B) const {
// // sparse routines
// // convert A/B/C into sparse equivalents
// int ND = N*D;
// if(false) {
// cusparseHandle_t handle = 0;
// int total_A;
// cusparseMatDescr_t descr_A = 0;
// int *d_A_nnz_per_row = 0;
// cusparseErrchk(cusparseCreate(&handle));
// gpuErrchk(cudaMalloc((void **)&d_A_nnz_per_row, sizeof(int) * N*D));
// cusparseErrchk(cusparseCreateMatDescr(&descr_A));
// cusparseErrchk(cusparseSetMatType(descr_A, CUSPARSE_MATRIX_TYPE_GENERAL));
// cusparseErrchk(cusparseSetMatIndexBase(descr_A, CUSPARSE_INDEX_BASE_ZERO));
// // Initialize Matrices A, B
// // optimize with lda later if needed
// cusparseErrchk(cusparseNnz(
// handle,
// CUSPARSE_DIRECTION_ROW,
// N*D,
// N*D,
// descr_A,
// d_A,
// N*D,
// d_A_nnz_per_row,
// &total_A
// ));
// int total_B;
// cusparseMatDescr_t descr_B = 0;
// int *d_B_nnz_per_row = 0;
// cusparseErrchk(cusparseCreate(&handle));
// gpuErrchk(cudaMalloc((void **)&d_B_nnz_per_row, sizeof(int) * N*D));
// cusparseErrchk(cusparseCreateMatDescr(&descr_B));
// cusparseErrchk(cusparseSetMatType(descr_B, CUSPARSE_MATRIX_TYPE_GENERAL));
// cusparseErrchk(cusparseSetMatIndexBase(descr_B, CUSPARSE_INDEX_BASE_ZERO));
// // optimize with lda later if needed
// cusparseErrchk(cusparseNnz(
// handle,
// CUSPARSE_DIRECTION_ROW,
// ND,
// DP,
// descr_B,
// d_B,
// ND,
// d_B_nnz_per_row,
// &total_B
// ));
// std::cout << "DP: " << DP << " total_B " << total_B << std::endl;
// RealType *d_sparse_A;
// int *d_sparse_A_rowptr;
// int *d_sparse_A_colptr;
// RealType *d_sparse_B;
// int *d_sparse_B_rowptr;
// int *d_sparse_B_colptr;
// gpuErrchk(cudaMalloc((void **)&d_sparse_A, sizeof(RealType) * total_A));
// gpuErrchk(cudaMalloc((void **)&d_sparse_A_rowptr, sizeof(int) * (ND + 1)));
// gpuErrchk(cudaMalloc((void **)&d_sparse_A_colptr, sizeof(int) * total_A));
// gpuErrchk(cudaMalloc((void **)&d_sparse_B, sizeof(RealType) * total_B));
// gpuErrchk(cudaMalloc((void **)&d_sparse_B_rowptr, sizeof(int) * (ND + 1)));
// gpuErrchk(cudaMalloc((void **)&d_sparse_B_colptr, sizeof(int) * total_B));
// // copy over A and B
// // cusparseErrchk(cusparseDense2csr(handle, ND, ND, descr_A, d_A, ND, d_A_nnz_per_row,
// // d_sparse_A, d_sparse_A_rowptr, d_sparse_A_colptr));
// auto start4 = std::chrono::high_resolution_clock::now();
// cusparseErrchk(cusparseDense2csr(handle, N_offset, N_offset, descr_A, d_A, ND, d_A_nnz_per_row,
// d_sparse_A, d_sparse_A_rowptr, d_sparse_A_colptr));
// cudaDeviceSynchronize();
// auto stop4 = std::chrono::high_resolution_clock::now();
// auto duration4 = std::chrono::duration_cast<std::chrono::microseconds>(stop4 - start4);
// // std::cout << "dense hessian to sparse duration for hessians: " << duration4.count() << std::endl;
// auto start3 = std::chrono::high_resolution_clock::now();
// cusparseErrchk(cusparseDense2csr(handle, N_offset, DP, descr_B, d_B, ND, d_B_nnz_per_row,
// d_sparse_B, d_sparse_B_rowptr, d_sparse_B_colptr));
// cudaDeviceSynchronize();
// auto stop3 = std::chrono::high_resolution_clock::now();
// auto duration3 = std::chrono::duration_cast<std::chrono::microseconds>(stop3 - start3);
// // std::cout << "dense hessian to sparse duration for PN3: " << duration3.count() << std::endl;
// // std::cout << "A" << std::endl;
// cusparseMatDescr_t descr_C = 0;
// RealType *d_sparse_C;
// int *d_sparse_C_rowptr;
// int *d_sparse_C_colptr;
// cusparseErrchk(cusparseCreateMatDescr(&descr_C));
// cusparseErrchk(cusparseSetMatType(descr_C, CUSPARSE_MATRIX_TYPE_GENERAL));
// cusparseErrchk(cusparseSetMatIndexBase(descr_C, CUSPARSE_INDEX_BASE_ZERO));
// int baseC, nnzC;
// // nnzTotalDevHostPtr points to host memory
// cusparseHandle_t handle2 = 0;
// cusparseErrchk(cusparseCreate(&handle2));
// int *nnzTotalDevHostPtr = &nnzC;
// cusparseSetPointerMode(handle2, CUSPARSE_POINTER_MODE_HOST);
// gpuErrchk(cudaMalloc((void**)&d_sparse_C_rowptr, sizeof(int)*(ND+1)));
// // std::cout << "AA " << total_A << " " << total_B << std::endl;
// cusparseErrchk(cusparseXcsrgemmNnz(handle2, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, ND, DP, ND,
// descr_A, total_A, d_sparse_A_rowptr, d_sparse_A_colptr,
// descr_B, total_B, d_sparse_B_rowptr, d_sparse_B_colptr,
// descr_C, d_sparse_C_rowptr, nnzTotalDevHostPtr));
// // std::cout << "B" << std::endl;
// if (NULL != nnzTotalDevHostPtr){
// nnzC = *nnzTotalDevHostPtr;
// } else {
// gpuErrchk(cudaMemcpy(&nnzC, d_sparse_C_rowptr+ND, sizeof(int), cudaMemcpyDeviceToHost));
// gpuErrchk(cudaMemcpy(&baseC, d_sparse_C_rowptr, sizeof(int), cudaMemcpyDeviceToHost));
// nnzC -= baseC;
// }
// gpuErrchk(cudaMalloc((void**)&d_sparse_C_colptr, sizeof(int)*nnzC));
// gpuErrchk(cudaMalloc((void**)&d_sparse_C, sizeof(RealType)*nnzC));
// auto start = std::chrono::high_resolution_clock::now();
// cusparseErrchk(cusparseCsrgemm(handle2, CUSPARSE_OPERATION_NON_TRANSPOSE, CUSPARSE_OPERATION_NON_TRANSPOSE, ND, DP, ND,
// descr_A, total_A, d_sparse_A, d_sparse_A_rowptr, d_sparse_A_colptr,
// descr_B, total_B, d_sparse_B, d_sparse_B_rowptr, d_sparse_B_colptr,
// descr_C, d_sparse_C, d_sparse_C_rowptr, d_sparse_C_colptr));
// cudaDeviceSynchronize();
// auto stop = std::chrono::high_resolution_clock::now();
// auto duration = std::chrono::duration_cast<std::chrono::microseconds>(stop - start);
// std::cout << "sparse duration: " << duration.count() << std::endl;
// throw std::runtime_error("Debug");
// }
// RealType alpha = 1.0;
// RealType beta = 1.0;
// // this is set to UPPER because of fortran ordering
// // furthermore, we assume the atoms are compacted with ligands at the front, such that we do a subset
// // where N_offset = (NUM_ATOMS*3)+(NUM_LIGAND_ATOMS)
// auto start2 = std::chrono::high_resolution_clock::now();
// cublasErrchk(templateSymm(cb_handle_,
// CUBLAS_SIDE_LEFT, CUBLAS_FILL_MODE_UPPER,
// N_offset, DP,
// &alpha,
// d_A, ND,
// d_B, ND,
// &beta,
// d_C, ND));
// cudaDeviceSynchronize();
// auto stop2 = std::chrono::high_resolution_clock::now();
// auto duration2 = std::chrono::duration_cast<std::chrono::microseconds>(stop2 - start2);
// // std::cout << "dense duration: " << duration2.count() << std::endl;
// }
// template<typename RealType>
// void LangevinOptimizer<RealType>::set_coeff_a(RealType a) {
// coeff_a_ = a;
// }
// template<typename RealType>
// void LangevinOptimizer<RealType>::set_coeff_d(RealType d) {
// coeff_d_ = d;
// }
// template<typename RealType>
// void LangevinOptimizer<RealType>::set_coeff_e(RealType e) {
// coeff_e_ = e;
// }
// template<typename RealType>
// void LangevinOptimizer<RealType>::set_coeff_b(int num_atoms, const RealType *cb) {
// gpuErrchk(cudaMemcpy(d_coeff_bs_, cb, num_atoms*sizeof(RealType), cudaMemcpyHostToDevice));
// }
// template<typename RealType>
// void LangevinOptimizer<RealType>::set_coeff_c(int num_atoms, const RealType *cc) {
// gpuErrchk(cudaMemcpy(d_coeff_cs_, cc, num_atoms*sizeof(RealType), cudaMemcpyHostToDevice));
// }
// template<typename RealType>
// void LangevinOptimizer<RealType>::set_dt(RealType ndt) {
// dt_ = ndt;
// }
// }
// template class timemachine::LangevinOptimizer<double>;
// template class timemachine::LangevinOptimizer<float>;
|
21,345 | /*Created by Alessandro Bigiotti*/
#include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
// A function to show some GPU Information
int main(){
// check the number of devices
int nDevices;
cudaGetDeviceCount(&nDevices);
// for each device print some informations
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp properties;
cudaGetDeviceProperties(&properties, i);
printf("Device Number: %d\n", i);
printf(" Info: \n");
printf(" Device name: %s\n", properties.name);
printf(" Memory Clock Rate (KHz): %d\n", properties.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", properties.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n\n", 2.0*properties.memoryClockRate*(properties.memoryBusWidth/8)/1.0e6);
printf(" Computing Capabilities: \n");
printf(" Max Threads per Block: %d\n", properties.maxThreadsPerBlock);
printf(" Max Threads Dim: %d\n", properties.maxThreadsDim[3]);
printf(" Max GridSize: %d\n", properties.maxGridSize[3]);
printf(" WarpSize: %d\n", properties.warpSize);
printf(" Total Global Mem(GB): %f\n", ((properties.totalGlobalMem/1024.0)/1024.0)/1024.0);
printf(" Shered Mem per Block(MB): %f\n\n", (properties.sharedMemPerBlock/1024.0)/1024);
}
return 0;
}
|
21,346 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/time.h>
void initialData( float *ip, int size )
{
// generate different seed for random number
time_t t;
srand( (unsigned int) time (&t) );
for (int i=0; i<size; i++) {
ip[i] = (float)( rand() & 0xFF ) / 10.0f;
}
}
void sumArraysOnHost( float *A, float *B, float *C, const int N )
{
for (int idx=0; idx<N; idx++) {
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnDevice( float *A, float *B, float *C, const int N)
{
size_t tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid >= N) return;
C[tid] = A[tid] + B[tid];
}
double cpuSecond()
{
struct timeval tp;
gettimeofday( &tp, NULL );
return ( (double)tp.tv_sec + (double)tp.tv_usec*1e-6 );
}
int main( int argc, char **argv )
{
// timing...
double startTime, cpuElapsed, gpuElapsed;
// data
int nElem = 1<<24;
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *h_C;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
h_C = (float *)malloc(nBytes);
initialData( h_A, nElem );
initialData( h_B, nElem );
// set up device
int dev = 0;
cudaSetDevice(dev);
float *d_A, *d_B, *d_C, *gpuRes;
cudaMalloc( (float **)&d_A, nBytes );
cudaMalloc( (float **)&d_B, nBytes );
cudaMalloc( (float **)&d_C, nBytes );
gpuRes = (float *)malloc(nBytes);
cudaMemcpy( d_A, h_A, nBytes, cudaMemcpyHostToDevice );
cudaMemcpy( d_B, h_B, nBytes, cudaMemcpyHostToDevice );
startTime = cpuSecond();
sumArraysOnHost( h_A, h_B, h_C, nElem );
cpuElapsed = cpuSecond() - startTime;
//dim3 block = 256; // number of warps per block
//dim3 grid = (nElem + block.x - 1) / block.x; // number of blocks in grid
dim3 block(256,1,1); // number of warps per block
dim3 grid((nElem + block.x - 1) / block.x,1,1); // number of blocks in grid
printf("block = %i\n", block.x);
printf("grid = %i\n", grid.x);
printf("total launched = %i\n", block.x * grid.x);
printf("total needed = %i\n", nElem);
startTime = cpuSecond();
sumArraysOnDevice<<< grid, block >>>( d_A, d_B, d_C, nElem );
cudaDeviceSynchronize();
gpuElapsed = cpuSecond() - startTime;
cudaError_t error = cudaMemcpy( gpuRes, d_C, nBytes,
cudaMemcpyDeviceToHost );
if (error != cudaSuccess) {
printf("Error: %s:%d, ", __FILE__, __LINE__);
printf("code:%d, reason: %s\n", error, cudaGetErrorString(error));
exit(1);
}
double err = 0.0;
for (int i=0; i<nElem; i++) {
err += abs(h_C[i] - gpuRes[i]);
}
printf("Total error is %f\n", err);
printf("Time on CPU is %f\n", cpuElapsed);
printf("Time on GPU is %f\n", gpuElapsed);
printf("GPU speed-up over CPU is %.2f x\n", cpuElapsed / gpuElapsed);
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(gpuRes);
return(0);
}
|
21,347 | #include <iostream>
#include <assert.h>
// #include <glog/logging.h>
#include <cuda.h>
#include <cuda_runtime.h>
// #include <sys/mman.h>
using namespace std;
int main() {
size_t count = 0;
size_t size = 64 * 1024 * 1024 * sizeof(float);
while (true) {
void *host_array;
cudaError errono = cudaMallocHost(&host_array, size);
if (errono == cudaSuccess) {
count++;
cout << "Allocated " << count * 256 << " MB" << endl;
} else {
cout << "Allocation failed at " << count * 256 << " MB, with errono " << errono << endl;
exit(1);
}
if (count > 20) {
exit(0);
}
}
}
|
21,348 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
//TODO : used sharedvar to have (xi - yi)^2 generated in ||lel
__global__ void updateMeans(float *means, float *entries, int *closestMean, int num_entries, int num_means, int num_attribs)
{
int id = threadIdx.y;
int thisMeanCount = 0;
float temp[100];
for(int j = 0; j < num_attribs; j++)
{
temp[j] = 0;
}
for(int i = 0; i < num_entries; i++)
{
if(closestMean[i] == id)
{
//printf("Entry : %d :: Closest : %d \n", i, id);
for(int j = 0; j < num_attribs; j++)
{
//printf("Adding %f to mean %d, attrib %d \n",entries[i*num_attribs + j],closestMean[i],j);
temp[j]+=entries[i*num_attribs + j];
}
thisMeanCount++;
}
}
for(int i = 0 ; i < num_attribs; i++)
{
if(thisMeanCount != 0)
{
means[id*num_attribs + i] = temp[i]/thisMeanCount;
printf("mean : %d , attrib : %d , newMean : %f, count : %d \n",id, i,means[id*num_attribs + i],thisMeanCount);
}
}
}
__global__ void getClosestMeans(float *means, float *entries, int *closestMean, int num_entries, int num_means, int num_attribs, int *flag)
{
printf("ENTERED!!!!\n");
int id = threadIdx.y;
int closestDist = 9999999;
int closest = -1;
float currDist = 0;
flag[0] = 0;
for(int j = 0; j < num_means ; j++)
{
currDist = 0;
for(int i = 0; i < num_attribs; i++)
{
currDist+= (means[j*num_attribs + i] - entries[id*num_attribs + i]) * (means[j*num_attribs + i] - entries[id*num_attribs + i]);
}
printf("Entry %d to mean %d distance : %f \n", id, j, currDist);
if(currDist < closestDist)
{
closestDist = currDist;
closest = j;
}
}
if(closest != closestMean[id])
{
flag[0] = 1;
}
closestMean[id] = closest;
}
int main()
{
//Initial Declarations
int num_entries;
int num_means;
int num_attribs;
//Read vals for init declarations
printf("Enter the number of entries : \n");
scanf("%d", &num_entries);
printf("Enter the number of means : \n");
scanf("%d", &num_means);
printf("Enter the number of attributes : \n");
scanf("%d", &num_attribs);
//Utility declarations
float means[num_means*num_attribs];
float entries[num_entries*num_attribs];
float distances[num_entries*num_means];
int closestMean[num_entries];
printf("Enter the entries : \n");
for(int i = 0; i < num_entries*num_attribs; i++)
{
scanf("%f", &entries[i]);
}
printf("Enter the initial -- means : \n");
for(int i = 0; i < num_means*num_attribs; i++)
{
scanf("%f", &means[i]);
}
dim3 gridCM (1,1);
dim3 threadCM (1, num_entries);
dim3 gridUM (1,1);
dim3 threadUM (1, num_means);
float *dmeans, *dentries, *ddistances;
int *dclosestMean;
float *dtemp; //for UpdateMeans
int *dflag;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaMalloc((void **)&dmeans, sizeof(float)*num_means*num_attribs);
cudaMalloc((void **)&dentries, sizeof(float)*num_entries*num_attribs);
cudaMalloc((void **)&dclosestMean, sizeof(int)*num_entries);
cudaMalloc((void **)&dtemp, sizeof(float)*num_attribs);
cudaMalloc((void **)&dflag, sizeof(int));
int flag[1] = {1};
cudaMemcpy(dflag, flag, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dmeans, means, sizeof(float)*num_means*num_attribs, cudaMemcpyHostToDevice);
cudaMemcpy(dentries, entries, sizeof(float)*num_entries*num_attribs, cudaMemcpyHostToDevice);
while(flag[0] == 1)
{
getClosestMeans<<<gridCM, threadCM>>>(dmeans, dentries, dclosestMean, num_entries, num_means, num_attribs, dflag);
cudaMemcpy(closestMean, dclosestMean, sizeof(int)*num_entries, cudaMemcpyDeviceToHost);
for(int i = 0; i < num_entries; i++)
printf("%d -- ", closestMean[i]);
printf("\n");
updateMeans<<<gridUM, threadUM>>>(dmeans, dentries, dclosestMean, num_entries,num_means, num_attribs);
cudaMemcpy(flag, dflag, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(means, dmeans, sizeof(float)*num_means*num_attribs, cudaMemcpyDeviceToHost);
cudaMemcpy(flag, dflag, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(dflag, flag, sizeof(int), cudaMemcpyHostToDevice);
//cudaMemcpy(dflag, flag, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dmeans, means, sizeof(float)*num_means*num_attribs, cudaMemcpyHostToDevice);
cudaMemcpy(dclosestMean, closestMean, sizeof(int)*num_entries, cudaMemcpyDeviceToHost);
}
cudaMemcpy(means, dmeans, sizeof(float)*num_means*num_attribs, cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedtime;
cudaEventElapsedTime(&elapsedtime,start,stop);
for(int i = 0; i < num_means*num_attribs; i++)
{
if(i == num_attribs)
printf("\n");
printf("%f ",means[i]);
}
printf("\nThe elapsed timer is %f\n", elapsedtime);
}
|
21,349 | //CS-4370 Parallel Programming for many core GPUs
//Name: Gesu Bal
/*
this is a simple cuda program calculating vector add for 2 dimensions on GPU device
I added two two-dimensional matrices A, B on the device GPU.
After the device matrix addition kernel function is invoked, and the addition result is transferred back to the CPU.
The program will also compute the sum matrix of matrices A and B using the CPU.
Then the program compares the device-computed result with the CPU-computed result.
If it matches, it prints out Test PASSED to the screen before exiting.
*/
#include<stdio.h>
#include<cuda.h>
int N,blocksize;
//gpu function for addition
__global__ void add_gpu(int *d_a, int *d_b, int *d_c, int N)
{
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
//int index =i+(j*N);
if((row <N) && (col <N))
{
d_c[row*N+col]=d_a[row*N+col]+d_b[row*N+col];
}
}
//cpu function for addition
void add_matrix_cpu(int *a, int *b, int *cpu_c, int N)
{
int i, j;
for (i=0;i<N;i++) {
for (j=0;j<N;j++) {
cpu_c[i*N+j]=a[i*N+j]+b[i*N+j];
}
}
}
//match cpu and gpu results
int verify(int * a, int * b, int N)
{
int i,j;
int error=0;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
{
if(a[i*N+j]!=b[i*N+j])
{
error++;
}
}
}
if(error==0)
{
printf("CPU and GPU results matched: Test Passed \n");
}
else
{
printf("CPU and GPU results did not match");
}
return 1;
}
//print matrix fucntion
int printMatrix(int *a,int N)
{
int i,j;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
printf("%d\t",a[i*N+j]);
}
printf("\n");
}
return 1;
}
int main()
{
//user input
int r, col;
printf("Select one of the following options for vector addition: \n");
printf("Press a for matrix size 8 * 8 \n");
printf("Press b for matrix size 64 * 64 \n");
printf("Press c for matrix size 128 * 128 \n");
printf("Press d for matrix size 500 * 500 \n");
printf("Press e for matrix size 1000 * 1000 \n");
printf("Press any other key for exiting \n");
char ch;
scanf("%c",&ch);
switch(ch)
{
case 'a':
r=8;
col=8;
N=8;
blocksize=4;
printf("Array size is 8 * 8 \n");
break;
case 'b':
r=64;
col=64;
N=64;
blocksize=16;
printf("Array size is 64 * 64 \n");
break;
case 'c':
r=128;
col=128;
N=128;
blocksize=16;
printf("Array size is 128 * 128 \n");
break;
case 'd':
r=500;
col=500;
N=500;
blocksize=16;
printf("Array size is 500 * 500 \n");
break;
case 'e':
r=1000;
col=1000;
N=1000;
blocksize=16;
printf("Array size is 1000 * 1000 \n");
break;
default:
exit(1);
break;
}
//vector initialization
int *a, *b, *c, *cpu_c, *d_a, *d_b, *d_c;
int a_size=r*col;
int b_size=r*col;
int c_size=r*col;
int cpu_c_size=r*col;
//memory allocation for vectors on host
a=(int*)malloc(sizeof(int)*a_size);
b=(int*)malloc(sizeof(int)*b_size);
c=(int*)malloc(sizeof(int)*c_size);
cpu_c=(int*)malloc(sizeof(int)*cpu_c_size);
//matrix initialization
int i,j;
int init=1325;
for (i=0;i<N;i++)
{
for (j=0;j<N;j++)
{
init=3125*init%65536;
a[i*col+j]=((init-32768)/16384);
b[i*col+j]=(init%1000);
}
}
int cudaret=cudaMalloc((void **)(&d_a),(N*N)*sizeof(int));
if(cudaret!=cudaSuccess)
{printf("memory was not allocated on device \n");}
cudaMalloc((void **)(&d_b),(N*N)*sizeof(int));
cudaMalloc((void **)(&d_c),(N*N)*sizeof(int));
//copying contents of a and b to device arrays
cudaMemcpy(d_a,a,(N*N)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,(N*N)*sizeof(int),cudaMemcpyHostToDevice);
//Initializing block count and block size
dim3 dimBlock(blocksize,blocksize,1);
int blockCount_x = (N - 1)/(double(blocksize))+1;//Get number of blocks needed per direction.
int blockCount_y = (N - 1)/(double(blocksize))+1;
printf("the number of the thread blocks in x direction will be %d\n", blockCount_x);
printf("the number of the thread blocks in y direction will be %d\n", blockCount_y);
dim3 dimGrid(blockCount_x,blockCount_y,1);
//calling CPU program
printf("calculating results for CPU vector addition \n");
printf("---------\n");
add_matrix_cpu(a,b,cpu_c,N);
//printMatrix(a,N);
//pritnMatrix(b,N);
//printMatrix(cpu_c,N);
//call kernel for gpu functioning
printf("calling kernel for gpu computations for vector addition \n");
printf("---------\n");
add_gpu<<<dimGrid,dimBlock>>>(d_a,d_b,d_c,N);
printf("calculating results for gpu \n");
printf("---------\n");
//copying resulting back to cpu from gpu
cudaMemcpy(c,d_c,(N*N)*sizeof(int),cudaMemcpyDeviceToHost);
//matching cpu and gpu results
printf("comparing results for CPU and GPU computations \n");
printf("---------\n");
verify(c,cpu_c,N);
//printMatrix(c,N);
//Deallocating memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
21,350 | #include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <chrono>
#define TILE_DIM 64
void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol);
void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol);
// __global__ void matrixMul(int* A_gpu, int* B_gpu, int* C_gpu, int N) {
// // Row i of matrix C
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// // Column j of matrix C
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int accu = 0;
// if(row<N && col<N) {
// for(int k=0; k<N; k++) {
// accu = accu + A_gpu[row*N+k] * B_gpu[k*N+col];
// }
// C_gpu[row*N+col] = accu;
// }
// }
__global__ void matrixMul(int* A_cpu, int* B_cpu, int* C_cpu, int N)
{
int accu = 0;
int row = blockIdx.y*TILE_DIM + threadIdx.y;
int col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ int shd_A[TILE_DIM][TILE_DIM];
__shared__ int shd_B[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + N - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < N && row < N)
shd_A[threadIdx.y][threadIdx.x] = A_cpu[row*N + k*TILE_DIM + threadIdx.x];
else
shd_A[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < N && col < N)
shd_B[threadIdx.y][threadIdx.x] = B_cpu[(k*TILE_DIM + threadIdx.y)*N + col];
else
shd_B[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int k = 0; k<TILE_DIM; k++)
accu += shd_A[threadIdx.y][k] * shd_B[k][threadIdx.x];
__syncthreads();
}
if (row < N && col < N)
C_cpu[((blockIdx.y * blockDim.y + threadIdx.y)*N) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = accu;
}
// __global__ void matrixMulCol(int* A_gpu, int* B_gpu, int* C_gpu, int N) {
// // Row i of matrix C
// int row = blockIdx.y * blockDim.y + threadIdx.y;
// // Column j of matrix C
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int accu = 0;
// if(row<N && col<N) {
// for(int k=0; k<N; k++) {
// accu = accu + A_gpu[k*N+row] * B_gpu[k*N+col];
// }
// C_gpu[row*N+col] = accu;
// }
// }
__global__ void matrixMulCol(int* A_cpu, int* B_cpu, int* C_cpu, int N)
{
int accu = 0;
int row = blockIdx.y*TILE_DIM + threadIdx.y;
int col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ int shd_A[TILE_DIM][TILE_DIM];
__shared__ int shd_B[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + N - 1)/TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < N && row < N)
shd_A[threadIdx.y][threadIdx.x] = A_cpu[(k*TILE_DIM + threadIdx.x)*N + row];
else
shd_A[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < N && col < N)
shd_B[threadIdx.y][threadIdx.x] = B_cpu[(k*TILE_DIM + threadIdx.y)*N + col];
else
shd_B[threadIdx.y][threadIdx.x] = 0.0;
__syncthreads();
for (int k=0; k<TILE_DIM; k++)
accu += shd_A[threadIdx.y][k] * shd_B[k][threadIdx.x];
__syncthreads();
}
if (row < N && col < N)
C_cpu[((blockIdx.y * blockDim.y + threadIdx.y)*N) +
(blockIdx.x * blockDim.x)+ threadIdx.x] = accu;
}
void random_ints(int* x, int size)
{
srand(time(0));
int i;
for (i=0;i<size;i++) {
x[i]=rand()%10;
//std::cout << x[i] << " ";
}
}
void matrixMulCPU(int* A_cpu, int* B_cpu, int* C_cpu, int N) {
for(int row=0; row<N; row++) {
for(int col=0; col<N; col++){
C_cpu[row*N+col] = 0;
for(int elm=0; elm<N; elm++) {
C_cpu[row*N+col] = C_cpu[row*N+col] + A_cpu[row*N+elm] * B_cpu[elm*N+col];
}
}
}
}
int main(int argc, char* argv[]){
//int N = 3;
int N = atoi(argv[1]);
bool memCol = false;
if (strcmp(argv[4],"MC")==0) {
memCol=true;
}
int NN = N*N;
//define A_cpu, B_cpu, C_cpu in the CPU memory
int *A_cpu, *B_cpu, *C_cpu;
int size = NN * sizeof(int);
// Setup input values
//std::cout << "A: ";
A_cpu = (int*)malloc(size); random_ints(A_cpu, NN);
//std::cout << "\nB: ";
B_cpu = (int*)malloc(size); random_ints(B_cpu, NN);
C_cpu = (int*)malloc(size);
if (strcmp(argv[2],"gpu")==0) {
if(strcmp(argv[3],"MT")==0) {
gpuMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol);
}
else {
gpuNoMemTransfer(A_cpu, B_cpu, C_cpu, N, size, memCol);
}
}
else {
auto t1 = std::chrono::high_resolution_clock::now();
matrixMulCPU(A_cpu, B_cpu, C_cpu, N);
auto t2 = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>( t2 - t1 ).count();
std::cout << "N: " << N << "\tCPU time: " << duration << "us" << std::endl;
}
//std::cout << "\nC: " << C_cpu[0] << " " << C_cpu[1] << " " <<C_cpu[2] << " " << C_cpu[3] << " " << C_cpu[4] <<" " << C_cpu[7] <<" " << C_cpu[8] <<"\n";
free(A_cpu); free(B_cpu); free(C_cpu);
return 0;
}
void gpuMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) {
//define A_gpu, B_gpu, C_gpu in the GPU memory
//std::cout << "\nMem Tr\n";
int *A_gpu, *B_gpu, *C_gpu;
cudaMalloc((void **)&A_gpu, size);
cudaMalloc((void **)&B_gpu, size);
cudaMalloc((void **)&C_gpu, size);
dim3 dimBlock(16, 16);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
float time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (memCol==true) {
//std::cout << "MC\n";
cudaEventRecord( start, 0 );
// Copy inputs to device
cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice);
matrixMulCol<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N);
//memcopy C_gpu to C_cpu
cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost);
//stop time
cudaEventRecord( stop, 0 );
}
else {
//std::cout << "nmc\n";
cudaEventRecord( start, 0 );
// Copy inputs to device
cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice);
matrixMul<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N);
//memcopy C_gpu to C_cpu
cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost);
//stop time
cudaEventRecord( stop, 0 );
}
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu);
float microsec = (time)*1000;
std::cout << "N: " << N << "\tMT\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl;
}
void gpuNoMemTransfer(int* A_cpu, int* B_cpu, int* C_cpu, int N, int size, bool memCol) {
//define A_gpu, B_gpu, C_gpu in the GPU memory
//std::cout << "\nNoMem Tr\n";
int *A_gpu, *B_gpu, *C_gpu;
cudaMalloc((void **)&A_gpu, size);
cudaMalloc((void **)&B_gpu, size);
cudaMalloc((void **)&C_gpu, size);
// Copy inputs to device
cudaMemcpy(A_gpu, A_cpu, size, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B_cpu, size, cudaMemcpyHostToDevice);
dim3 dimBlock(16, 16);
dim3 dimGrid((N+dimBlock.x-1)/dimBlock.x, (N+dimBlock.y-1)/dimBlock.y);
float time = 0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
if (memCol==true) {
//std::cout << "MC\n";
cudaEventRecord( start, 0 );
matrixMulCol<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N);
cudaEventRecord( stop, 0 );
}
else {
//std::cout << "nmc\n";
cudaEventRecord( start, 0 );
matrixMul<<<dimGrid, dimBlock>>>(A_gpu,B_gpu,C_gpu,N);
cudaEventRecord( stop, 0 );
}
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
//memcopy C_gpu to C_cpu
cudaMemcpy(C_cpu, C_gpu, size, cudaMemcpyDeviceToHost);
cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu);
float microsec = (time)*1000;
std::cout << "N: " << N << "\tnt\t" << memCol << "\tGPU time: " << microsec << "us" << std::endl;
}
|
21,351 | /*#include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
#include "device_launch_parameters.h"
#include <device_functions.h>
#define imin(a,b)((a<b)?a:b)
const int N =33 * 1024;
const int threadsPerBlock = 256;
const int blocksPerGrid = imin(32, (N+ threadsPerBlock-1) / threadsPerBlock);
__global__ void kernel(float*a,float*b,float*c)
{
__shared__ float cache[threadsPerBlock];//shared between threads of a block. we have 32 blocks in this example thus, we will have 32 varialbes created for cache each of size 256*float bytes.
float temp = 0;
int tid = threadIdx.x + blockIdx.x*blockDim.x;
int cacheIndex = threadIdx.x;
while (tid < N)
{
temp += a[tid]*b[tid];
tid += gridDim.x*blockDim.x;
}
cache[cacheIndex] = temp;
__syncthreads();
int i = blockDim.x / 2;
while (i!=0)
{
if(cacheIndex<i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
c[blockIdx.x] = cache[0];
//each cache element in all the 32 blocks returning their values at index 0
printf("blockIdx.x= %d ,threadIdx.x=%d , cache=%f \n", blockIdx.x, threadIdx.x, cache[0]);
}
}
int main()
{
float* h_a = new float[N];
float* h_b = new float[N];
float* h_c = new float[blocksPerGrid];
float* d_a;
float* d_b;
float* d_c;
cudaMalloc((void**)&d_a, sizeof(float)*N);
cudaMalloc((void**)&d_b, sizeof(float)*N);
cudaMalloc((void**)&d_c, sizeof(float)*blocksPerGrid);
for (int i = 0; i < N; i++)
{
h_a[i] = i;
h_b[i] = 2 * i;
}
cudaMemcpy(d_a, h_a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float)*N, cudaMemcpyHostToDevice);
kernel << <blocksPerGrid, threadsPerBlock >> >(d_a,d_b,d_c);
cudaMemcpy(h_c, d_c, sizeof(float)*blocksPerGrid, cudaMemcpyDeviceToHost);
//Output h_c
//This partial sum calculated on CPU, better than wasting GPU resources
float c = 0;
for (int i = 0; i < blocksPerGrid; i++)
{
c += h_c[i];
}
//CPU sum for verification and compare
#define sumSquares(x) (x*(x+1)*(2*x+1)/6)
printf("Does GPU value %.6g == %.6g\n", c,2*sumSquares((float) (N-1)));
//Delete all memroAllocs
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete [] h_a;
delete [] h_b;
delete[] h_c;
return 911;
}*/ |
21,352 | // Author: Rajiur Rahman ( rajiurrahman.bd@gmail.com )
// Department of Computer Science, Wayne State University
// knn implemented for GPU.
// have to provide training data, trianing data label, test data, test data label in separate text files. All the files should be ' ' space separated.
/* Instruction for compiling and running
* the commands should be following
* compile - nvcc knn.cu -o knn.o
* run - ./knn.o numTrainRow numTestRow numCol k
*
* For example:
* ./knn.o 20 5 10 4
* ./knn.o 15997 4000 30000 5
* ./knn_new.o 69 20 1000 5
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h> /* strtok() */
#include <sys/types.h> /* open() */
#include <sys/stat.h>
#include <fcntl.h>
#include <unistd.h> /* read(), close() */
#include <math.h>
#include <time.h>
/*__global__ void kernel(int *a)
{
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
int tidy = threadIdx.y + blockIdx.y * blockDim.y;
}*/
__device__ void showData(float* data, int numRow, int numCol, int tidx){
for(int i =tidx*numCol; i<((tidx*numCol)+numCol); i++){
printf("%d-%f\t", i, data[i]);
}
}
// this function calculates the distance between two rows.
__device__ float calculate_distance(float* d_trainData, float* d_testData, int numTrainRow, int numTestRow, int numCol, int currentTestIndex, int currentTrainIndex){
float distance = (float) 100.0;
for (int i = 0; i<numCol; i++){
distance += ( d_testData[currentTestIndex*numCol+i] - d_trainData[currentTrainIndex*numCol+i] ) * ( d_testData[currentTestIndex*numCol+i] - d_trainData[currentTrainIndex*numCol+i] );
}
//distance = distance/(float)numCol;
//distance = (float)sqrt(distance);
return distance;
}
// this function will return nearest neighbor information for a particular test row.
// called from main kernel // d_neighborhoodMatrix has size numTestRow*numTrainRow .. it is a flat array
__device__ float* calculate_distance_matrix(float* d_trainData, float* d_testData, float* d_neighborhoodMatrix, int numTrainRow, int numTestRow, int numCol, int tidx){
//printf("Dealing with test data row %d\n", tidx);
for(int i=0; i<numTrainRow; i++){
//distance form rows testData[tidx] <--> trainData[i]
d_neighborhoodMatrix[tidx*numTrainRow+i] = calculate_distance(d_trainData, d_testData, numTrainRow, numTestRow, numCol, tidx, i);
}
return d_neighborhoodMatrix;
}
// kernel function that will perform k-nearest neighbor classification
// There will be one karnel launched for each row in the test data.
// This function will manage finding nearest neighbors from test data row to training data
__global__ void calculate_similarity_matrix(float* d_trainData, float* d_testData, float* d_neighborhoodMatrix, int numTrainRow, int numTestRow, int numCol){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
//printf("Inside Kernel %d\n", tidx);
//showData(d_testData, numTestRow, numCol, tidx);
//call function for calculating the nearest neighbor of tidx'th row of test data
d_neighborhoodMatrix = calculate_distance_matrix(d_trainData, d_testData, d_neighborhoodMatrix, numTrainRow, numTestRow, numCol, tidx);
}
// this funciton will read the data. It takes parameters file name as string, number of rows and columns.
// It will read a 2-d matrix and save as a flat 1-D array and return the pointer
float* read_data(char* fileName, int numRow, int numCol){
float* data;
data = (float*)malloc(numRow*numCol*sizeof(float));
FILE *file;
file=fopen(fileName, "r");
for (int i=0; i<numRow*numCol; i++){
if (!fscanf(file, "%f", &data[i])){
break;
}
//printf("%d-%f\n",i, data[i]);
}
fclose(file);
return data;
}
void show_data(float* data, int numRow, int numCol){
/*printf("numrow-%d numcol-%d\n\n", numRow, numCol);
for(int i =0; i<numRow*numCol; i++){
printf("%d-%f\t", i, data[i]);
}*/
for(int i=0; i< numRow; i++){
for(int j=0; j<numCol; j++){
printf("%f ",data[i*numCol+j]);
}
printf("\n");
}
}
//comment
__device__ int calculate_nearest_neighbor_index(float* d_neighborhoodMatrix, int tidx, int numTrainRow){
float minValue = 99999.0;
int minIndex = 0;
for (int i=(tidx*numTrainRow); i< (tidx*numTrainRow)+numTrainRow; i++){
if (d_neighborhoodMatrix[i] <= minValue ){
minValue = d_neighborhoodMatrix[i];
minIndex = i-(tidx*numTrainRow);
}
}
return minIndex;
}
// this function will calculate the nearest neighbors from similarity matrix
// a kernel will be launched for each test instance
__global__ void calculate_nearest_neighbor(float* d_neighborhoodMatrix, int* d_nearestNeighborIndices, int numTrainRow, int numTestRow, int numCol, int k){
int tidx = threadIdx.x + blockIdx.x * blockDim.x;
for(int i=0; i<k; i++){
int minIndex = calculate_nearest_neighbor_index(d_neighborhoodMatrix, tidx, numTrainRow);
d_nearestNeighborIndices[tidx*k+i] = minIndex;
d_neighborhoodMatrix[tidx*numTrainRow+minIndex] = (float)99999.0;
}
}
//calculate the label of a test instance
float calculate_label(int* host_nearestNeighborIndices, float* trainLabel, int numTestRow, int numTrainRow, int k, int currentInstance){
float label = (float)0.0;
float sum1 = (float)0.0;
int currentIndex = 0;
for(int i=0; i<k; i++){
currentIndex = host_nearestNeighborIndices[currentInstance*k + i];
//printf("%d - %f\n", currentIndex, trainLabel[currentIndex]);
sum1 += trainLabel[currentIndex];
//printf("currentIndex -%d, valueIndex-%f\n",currentIndex, trainLabel[currentIndex]);
}
printf("\n");
//printf("nstance-%d sum-%f\n", currentInstance, sum1);
if(sum1 >= (k/2) ){
label = (float)1.0;
}
return label;
}
//predict class labels of test data from calculated nearest neighbors
float* predict_label(int* host_nearestNeighborIndices, float* trainLabel, int numTestRow, int numTrainRow, int k){
float* predictedLabel;
predictedLabel = (float*)malloc( numTestRow*sizeof(float) );
for(int i=0; i<numTestRow; i++){
predictedLabel[i] = calculate_label(host_nearestNeighborIndices, trainLabel, numTestRow, numTrainRow, k, i);
//printf("label prediction of instance %d\n", i);
}
return predictedLabel;
}
//calculate accuracy of prediction from original class labels and predicted labels
float calculate_accuracy(float* predictedLabel, float* testLabel, int numTestRow){
int correctPredictionCount = 0;
for(int i=0; i<numTestRow; i++ ){
//printf("original %f \t predicted %f\n", testLabel[i], predictedLabel[i]);
if(predictedLabel[i] == testLabel[i]){
correctPredictionCount ++;
}
}
//printf("\n\n");
return (float)100*((float)correctPredictionCount/(float)numTestRow);
}
void show_data_int(int* data, int numRow, int numCol){
for(int i=0; i< numRow; i++){
for(int j=0; j<numCol; j++){
printf("%d ",data[i*numCol+j]);
}
printf("\n");
}
}
void show_data_nearest_neighbor_labels(int* data, float* trainLabel, int numRow, int numCol){
for(int i=0; i< numRow; i++){
for(int j=0; j<numCol; j++){
printf("%d ",data[i*numCol+j]);
}
printf("\n");
}
}
int main(int argc, char* argv[])
{
//start the timer
clock_t begin_time = clock();
// first, catch the arguments from command line
int numTrainRow = atoi(argv[1]);
int numTestRow = atoi(argv[2]);
int numCol = atoi(argv[3]);
int k = atoi(argv[4]);
printf("\n**************** Hello World ! ******************\n");
// read the data files
float* trainData, *testData, *trainLabel, *trainLabel_1, *testLabel, *predictedLabel;
trainData = read_data("train.txt", numTrainRow, numCol);
testData = read_data("test.txt", numTestRow, numCol);
trainLabel = read_data("label_train.txt", numTrainRow, 1);
testLabel = read_data("label_test.txt", numTestRow, 1);
//trainData = read_data("vec_1k_train.txt", numTrainRow, numCol);
//testData = read_data("vec_1k_test.txt", numTestRow, numCol);
//trainLabel = read_data("label_1k_train.txt", numTrainRow, 1);
//testLabel = read_data("label_1k_test.txt", numTestRow, 1);
printf("Data Read Complete\n");
//show_data(testData, numTestRow, numCol);
//printf("\n\n\n");
//show_data(trainLabel, numTrainRow, 1);
//printf("\n\n\n");
// allocate memory and copy read files to device (GPU) memory from host (CPU) memory
float *d_trainData, *d_testData, *d_neighborhoodMatrix, *host_neighborhoodMatrix; //neighborhood matrix will have numTestRow rows and numTrainRow columns
int *d_nearestNeighborIndices, *host_nearestNeighborIndices; //it has numTestRow rows and k columns
const size_t trainSize = sizeof(float) * size_t(numTrainRow*numCol);
const size_t testSize = sizeof(float) * size_t(numTestRow*numCol);
const size_t neighborhoodMatrixSize = sizeof(float)*size_t(numTestRow*numTrainRow);
cudaMalloc((void **)&d_trainData, trainSize);
cudaMemcpy(d_trainData, trainData, trainSize, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_testData, testSize);
cudaMemcpy(d_testData, testData, testSize, cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_neighborhoodMatrix, neighborhoodMatrixSize);
cudaMalloc((void **)&d_nearestNeighborIndices, (numTestRow*k*sizeof(int)) );
calculate_similarity_matrix<<<1,numTestRow>>>(d_trainData, d_testData, d_neighborhoodMatrix, numTrainRow, numTestRow, numCol);
cudaFree(d_trainData);
cudaFree(d_testData);
printf("Similarity matrix building complete\n");
// copy nearest neighbor matrix from device to CPU and print to view it
host_neighborhoodMatrix = (float*)malloc(neighborhoodMatrixSize);
cudaMemcpy(host_neighborhoodMatrix, d_neighborhoodMatrix, neighborhoodMatrixSize, cudaMemcpyDeviceToHost);
printf("\ncopying similarity matrix from device to host complete\n" );
//printf("\n\nSimilarity matrix\n");
show_data(host_neighborhoodMatrix, numTestRow, numTrainRow);
calculate_nearest_neighbor<<<1,numTestRow>>>(d_neighborhoodMatrix, d_nearestNeighborIndices, numTrainRow, numTestRow, numCol, k);
printf("Nearest Neighbour calculation complete\n");
// copy nearest neighbour indices from device (GPU) to host (CPU)
host_nearestNeighborIndices = (int*)malloc(numTestRow*k*sizeof(int));
cudaMemcpy(host_nearestNeighborIndices, d_nearestNeighborIndices, (numTestRow*k*sizeof(int)), cudaMemcpyDeviceToHost);
//printf("\nCopying nearest neighbour indices from device to host complete\n");
//printf("indices of nearest neighbour\n");
//show_data_int(host_nearestNeighborIndices, numTestRow, k);
predictedLabel = predict_label(host_nearestNeighborIndices, trainLabel, numTestRow, numTrainRow, k);
printf("\nClass label prediction complete\n");
//show_data(predictedLabel, numTestRow, 1);
float acc = calculate_accuracy(predictedLabel, testLabel, numTestRow);
printf("\nPrediction Accuracy: %f", acc);
//take the end time and print time taken for running the program
clock_t end_tiem = clock();
double diff_time = (end_tiem - begin_time) / CLOCKS_PER_SEC;
printf("\n\nTime taken for running the program: %lf\n\n", diff_time);
free(testData);
free(trainData);
free(host_nearestNeighborIndices);
free(host_neighborhoodMatrix);
cudaFree(d_neighborhoodMatrix);
return 0;
}
|
21,353 | #include "includes.h"
__global__ void sum_dWU(const double *Params, const float *bigArray, float *WU) {
int tid,bid, ind, Nfilters, Nthreads, Nfeatures, Nblocks, NfeatW, nWU, nElem;
float sum = 0.0f;
Nfeatures = (int) Params[1]; //NrankPC, number of pcs
NfeatW = (int) Params[4]; //Nchan*nPC
Nfilters = (int) Params[2];
Nthreads = blockDim.x;
Nblocks = gridDim.x;
tid = threadIdx.x;
bid = blockIdx.x;
//WU is NfeatW x Nfilters.
nWU = NfeatW * Nfilters;
nElem = Nfeatures*NfeatW; //number of elements in each subArray of bigArray
//Calculate which element we're addressing
int tind = tid + bid * Nthreads;
int currFilt, currFW, currIndex;
while (tind < nWU){
//which filter and element of WU?
currFilt = floor((double)(tind/NfeatW));
currFW = tind - currFilt*NfeatW;
//Sum up the Nfeature elements of bigArray that correspond to this
//filter and NfeatW
sum = 0.0f;
for(ind=0; ind<Nfeatures; ind++) {
//bigArray is Nfilter arrays of Nfeature x NfeatW;
currIndex = currFilt*nElem + ind*NfeatW + currFW;
sum += bigArray[ currIndex ];
}
WU[tind] += sum;
tind += Nblocks*Nthreads;
}
} |
21,354 | #include <stdio.h>
#define BLOCK_SIZE 1024
__global__ void spmv_csr_kernel(unsigned int dim, unsigned int *csrRowPtr,
unsigned int *csrColIdx, float *csrData, float *inVector,
float *outVector) {
// INSERT KERNEL CODE HERE
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < dim)
{
float dot = 0;
int row_start = csrRowPtr[row];
int row_end = csrRowPtr[row + 1];
for(int j=row_start; j < row_end; j++){
dot += csrData[j] * inVector[csrColIdx[j]];
}
outVector[row] = dot;
}
}
__global__ void spmv_jds_kernel(unsigned int dim, unsigned int *jdsRowPerm,
unsigned int *jdsRowNNZ, unsigned int *jdsColStartIdx,
unsigned int *jdsColIdx, float *jdsData, float* inVector,
float *outVector) {
// INSERT KERNEL CODE HERE
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < dim)
{
float dot = 0;
for(int j=0; j < jdsRowNNZ[row]; j++){
dot += jdsData[row + jdsColStartIdx[j]] * inVector[jdsColIdx[row + jdsColStartIdx[j]]];
}
outVector[jdsRowPerm[row]] = dot;
}
}
void spmv_csr(unsigned int dim, unsigned int *csrRowPtr, unsigned int *csrColIdx,
float *csrData, float *inVector, float *outVector) {
// INSERT CODE HERE
int numBlocks = dim/BLOCK_SIZE;
if(dim%BLOCK_SIZE) numBlocks++;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
spmv_csr_kernel<<<dimGrid, dimBlock>>>(dim, csrRowPtr, csrColIdx, csrData, inVector, outVector);
cudaDeviceSynchronize();
}
void spmv_jds(unsigned int dim, unsigned int *jdsRowPerm, unsigned int *jdsRowNNZ,
unsigned int *jdsColStartIdx, unsigned int *jdsColIdx, float *jdsData,
float* inVector, float *outVector) {
// INSERT CODE HERE
int numBlocks = dim/BLOCK_SIZE;
if(dim%BLOCK_SIZE) numBlocks++;
dim3 dimGrid(numBlocks, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
spmv_jds_kernel<<<dimGrid, dimBlock>>>(dim, jdsRowPerm, jdsRowNNZ, jdsColStartIdx,
jdsColIdx, jdsData, inVector, outVector);
cudaDeviceSynchronize();
}
|
21,355 | #include "includes.h"
__device__ float activator_derivative( float x )
{
float sig = 1.0f / (1.0f + exp( -x ));
return sig * (1 - sig);
}
__global__ void calcSigmoidBackwardGPU( float *dz_next_layer, float *dz_in, float *dz, float *in, int elements )
{
int id = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x;
if( id < elements ){
float x = dz_in[id] += dz_next_layer[id];
float sig = 1.0f / (1.0f + exp( -x ));
dz[id] += ( sig * (1 - sig) ) * dz_in[id]; // sigmoid_derivative * dz_in
}
/* original
for( int i = 0; i < dz_in.size.b * dz_in.size.x * dz_in.size.y * dz_in.size.z; ++i ){
dz_in.data[i] += dz_next_layer.data[i];
}
for ( int i = 0; i < in_total_size; ++i ){
dz.data[i] += activator_derivative( in.data[i] ) * dz_in.data[i];
}
*/
} |
21,356 |
#include "kernel.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__host__ void callKernel(unsigned int size, int *c, const int *a, const int *b) {
addKernel <<< 1, size >>> (c, a, b);
}
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
|
21,357 | /*
Center assignments
Written by Jiageng Mao
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
__device__ float limit_period(float val, float offset, float period){
float rval = val - floor(val / period + offset) * period;
return rval;
}
__device__ float gaussian_radius(float height, float width, float min_overlap){
float a1 = 1;
float b1 = (height + width);
float c1 = width * height * (1 - min_overlap) / (1 + min_overlap);
float sq1 = sqrt(b1 * b1 - 4 * a1 * c1);
float r1 = (b1 + sq1) / 2;
float a2 = 4;
float b2 = 2 * (height + width);
float c2 = (1 - min_overlap) * width * height;
float sq2 = sqrt(b2 * b2 - 4 * a2 * c2);
float r2 = (b2 + sq2) / 2;
float a3 = 4 * min_overlap;
float b3 = -2 * min_overlap * (height + width);
float c3 = (min_overlap - 1) * width * height;
float sq3 = sqrt(b3 * b3 - 4 * a3 * c3);
float r3 = (b3 + sq3) / 2;
return min(min(r1, r2), r3);
}
__global__ void draw_center_kernel(int batch_size, int max_boxes, int max_objs, int num_cls, int H, int W, int code_size, int min_radius,
float voxel_x, float voxel_y, float range_x, float range_y, float out_factor, float gaussian_overlap,
const float *gt_boxes, float *heatmap, int *gt_ind, int *gt_mask, int *gt_cat, float *gt_box_encoding, int *gt_cnt){
/*
Args:
gt_boxes: (B, max_boxes, 8 or 10) with class labels
heatmap: (B, num_cls, H, W)
gt_ind: (B, num_cls, max_objs)
gt_mask: (B, num_cls, max_objs)
gt_cat: (B, num_cls, max_objs)
gt_box_encoding: (B, num_cls, max_objs, code_size) sin/cos
gt_cnt: (B, num_cls)
*/
int bs_idx = blockIdx.x;
int box_idx = threadIdx.x;
if (bs_idx >= batch_size || box_idx >= max_boxes) return;
// move pointer
gt_boxes += bs_idx * max_boxes * code_size;
heatmap += bs_idx * num_cls * H * W;
gt_ind += bs_idx * num_cls * max_objs;
gt_mask += bs_idx * num_cls * max_objs;
gt_cat += bs_idx * num_cls * max_objs;
gt_box_encoding += bs_idx * num_cls * max_objs * code_size;
gt_cnt += bs_idx * num_cls;
// gt box parameters
float x = gt_boxes[box_idx * code_size + 0];
float y = gt_boxes[box_idx * code_size + 1];
float z = gt_boxes[box_idx * code_size + 2];
float dx = gt_boxes[box_idx * code_size + 3];
float dy = gt_boxes[box_idx * code_size + 4];
float dz = gt_boxes[box_idx * code_size + 5];
// origin dx/dy/dz is for box_encodings
float origin_dx = gt_boxes[box_idx * code_size + 3];
float origin_dy = gt_boxes[box_idx * code_size + 4];
float origin_dz = gt_boxes[box_idx * code_size + 5];
float rot = gt_boxes[box_idx * code_size + 6];
float vel_x = 0;
float vel_y = 0;
float cls = 0;
if (code_size == 10) {
vel_x = gt_boxes[box_idx * code_size + 7];
vel_y = gt_boxes[box_idx * code_size + 8];
cls = gt_boxes[box_idx * code_size + 9];
} else if (code_size == 8) {
cls = gt_boxes[box_idx * code_size + 7];
} else {
return;
}
// box not defined
if (dx == 0 || dy == 0 || dz == 0) return;
// cls begin from 1
int cls_idx = (int) cls - 1;
heatmap += cls_idx * H * W;
gt_ind += cls_idx * max_objs;
gt_mask += cls_idx * max_objs;
gt_cat += cls_idx * max_objs;
gt_box_encoding += cls_idx * max_objs * code_size;
gt_cnt += cls_idx;
// transform to bev map coords
float offset = 0.5;
float period = 6.283185307179586;
rot = limit_period(rot, offset, period);
dx = dx / voxel_x / out_factor;
dy = dy / voxel_y / out_factor;
float radius = gaussian_radius(dy, dx, gaussian_overlap);
int radius_int = max(min_radius, (int) radius);
float coor_x = (x - range_x) / voxel_x / out_factor;
float coor_y = (y - range_y) / voxel_y / out_factor;
int coor_x_int = (int) coor_x;
int coor_y_int = (int) coor_y;
if (coor_x_int >= W || coor_x_int < 0) return;
if (coor_y_int >= H || coor_y_int < 0) return;
// draw gaussian map
float div_factor = 6.0;
float sigma = (2 * radius_int + 1) / div_factor;
for (int scan_y = -radius_int; scan_y < radius_int + 1; scan_y++){
if (coor_y_int + scan_y < 0 || coor_y_int + scan_y >= H) continue;
for (int scan_x = -radius_int; scan_x < radius_int + 1; scan_x++){
if (coor_x_int + scan_x < 0 || coor_x_int + scan_x >= W) continue;
float weight = exp(-(scan_x * scan_x + scan_y * scan_y) / (2 * sigma * sigma)); // force convert float sigma
float eps = 0.0000001;
if (weight < eps) weight = 0;
float *w_addr = heatmap + (coor_y_int + scan_y) * W + (coor_x_int + scan_x);
float old_weight = atomicExch(w_addr, weight);
if (old_weight > weight) weight = atomicExch(w_addr, old_weight);
}
}
int obj_idx = atomicAdd(gt_cnt, 1);
if (obj_idx >= max_objs) return;
gt_ind[obj_idx] = coor_y_int * W + coor_x_int;
gt_mask[obj_idx] = 1;
gt_cat[obj_idx] = cls_idx + 1; // begin from 1
gt_box_encoding[obj_idx * code_size + 0] = coor_x - coor_x_int;
gt_box_encoding[obj_idx * code_size + 1] = coor_y - coor_y_int;
gt_box_encoding[obj_idx * code_size + 2] = z;
gt_box_encoding[obj_idx * code_size + 3] = origin_dx;
gt_box_encoding[obj_idx * code_size + 4] = origin_dy;
gt_box_encoding[obj_idx * code_size + 5] = origin_dz;
gt_box_encoding[obj_idx * code_size + 6] = sin(rot);
gt_box_encoding[obj_idx * code_size + 7] = cos(rot);
if (code_size == 10) {
gt_box_encoding[obj_idx * code_size + 8] = vel_x;
gt_box_encoding[obj_idx * code_size + 9] = vel_y;
}
return;
}
void draw_center_kernel_launcher(int batch_size, int max_boxes, int max_objs, int num_cls, int H, int W, int code_size, int min_radius,
float voxel_x, float voxel_y, float range_x, float range_y, float out_factor, float gaussian_overlap,
const float *gt_boxes, float *heatmap, int *gt_ind, int *gt_mask, int *gt_cat, float *gt_box_encoding, int *gt_cnt){
cudaError_t err;
dim3 blocks(batch_size);
dim3 threads(THREADS_PER_BLOCK);
draw_center_kernel<<<blocks, threads>>>(batch_size, max_boxes, max_objs, num_cls, H, W, code_size, min_radius,
voxel_x, voxel_y, range_x, range_y, out_factor, gaussian_overlap,
gt_boxes, heatmap, gt_ind, gt_mask, gt_cat, gt_box_encoding, gt_cnt);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
#ifdef DEBUG
cudaDeviceSynchronize(); // for using printf in kernel function
#endif
} |
21,358 | #include <stdio.h>
#include <math.h>
#include <time.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#include <errno.h>
#include <unistd.h>
/******************************************************************************
* The variable names and the function names of this program is same as provided by the university.
The added variable and function are the only changes made to this program.
* To compile:
* nvcc -o Q2.1_a Q2.1_a.cu -lm
*
* To run:
* .Q2.1_a
*
*****************************************************************************/
typedef struct point_t {
double x;
double y;
} point_t;
int n_data = 1000;
__device__ int d_n_data = 1000;
point_t data[] = {
{83.74,106.34},{73.39,113.86},{77.96,116.13},{79.05,99.53},
{78.42,123.49},{66.05,92.78},{65.28,89.23},{82.08,112.68},
{76.00,89.05},{69.45,97.14},{82.39,118.99},{60.34,80.15},
{57.04,89.13},{80.20,127.97},{84.48,105.81},{78.12,133.41},
{70.52,93.88},{93.03,125.69},{70.05,95.63},{68.58,89.01},
{ 6.70,32.41},{ 6.10,15.84},{16.40,49.00},{32.23,43.99},
{56.73,70.58},{64.22,101.28},{56.10,77.67},{55.15,76.24},
{18.14,27.20},{35.20,69.89},{56.60,74.69},{21.74,44.62},
{18.17,37.32},{73.48,96.31},{85.17,126.58},{84.95,124.48},
{79.38,121.14},{95.12,130.68},{21.14,49.79},{ 2.36,32.37},
{40.79,64.39},{44.71,59.59},{63.84,91.03},{40.63,55.66},
{ 4.75,32.97},{34.81,54.46},{27.77,55.46},{87.10,120.46},
{31.41,49.15},{34.79,50.51},{21.17,49.44},{ 8.87,25.18},
{96.25,125.31},{32.01,51.61},{81.53,119.04},{ 9.78,35.30},
{70.64,105.43},{76.65,115.25},{48.74,71.25},{87.70,123.83},
{ 6.78,50.34},{83.93,124.53},{ 7.81,33.51},{32.90,58.00},
{38.81,76.13},{84.21,110.96},{26.93,56.53},{35.68,66.30},
{89.43,128.18},{40.25,59.29},{ 0.29,20.10},{27.46,49.81},
{59.70,87.64},{44.81,81.52},{ 4.25,49.98},{57.40,87.62},
{ 6.23,25.46},{37.29,57.19},{31.14,43.38},{82.59,101.25},
{23.18,49.57},{84.95,128.84},{29.80,41.03},{12.78,46.02},
{15.66,45.37},{99.80,130.02},{94.17,132.91},{22.60,60.75},
{61.72,81.84},{99.84,123.21},{ 0.26,23.42},{56.43,83.42},
{95.09,136.25},{93.48,136.33},{69.29,104.04},{97.09,127.39},
{56.57,81.25},{10.12,45.78},{78.95,115.47},{59.81,76.07},
{22.57,30.01},{88.86,123.23},{25.04,48.17},{ 2.15,16.57},
{ 0.60,22.46},{23.62,57.28},{71.99,109.50},{99.72,132.03},
{50.01,81.97},{ 9.47,27.03},{ 4.77,17.19},{18.97,55.61},
{ 7.00,35.20},{22.59,63.88},{44.67,69.17},{80.60,96.96},
{58.54,100.03},{83.99,119.83},{49.74,82.94},{80.87,116.76},
{ 1.43,28.16},{97.12,109.29},{ 8.72,45.48},{ 0.16,17.61},
{42.41,66.50},{60.60,106.80},{70.66,110.42},{45.21,58.05},
{22.51,35.63},{97.11,126.70},{88.39,134.12},{92.45,102.06},
{24.94,48.84},{10.11,43.11},{85.41,124.57},{ 1.06,18.21},
{12.83,42.16},{97.76,136.25},{65.19,106.28},{41.08,67.65},
{73.42,108.37},{ 1.90,19.83},{87.83,109.92},{31.08,75.47},
{26.01,51.85},{16.81,30.60},{73.31,110.27},{ 3.17,17.35},
{73.47,110.59},{59.71,83.99},{91.47,109.86},{55.68,69.73},
{39.19,75.61},{33.03,72.17},{65.06,88.92},{12.71,44.96},
{60.87,90.97},{98.35,140.22},{12.03,24.38},{17.66,24.44},
{10.55,31.26},{70.28,99.49},{29.17,65.26},{29.72,59.44},
{93.11,130.73},{10.31,38.89},{ 7.84,28.59},{95.04,114.02},
{17.64,37.28},{15.99,49.27},{99.33,118.30},{75.31,109.18},
{37.72,50.12},{24.63,34.10},{25.68,46.44},{62.28,71.69},
{73.85,104.53},{ 8.77,33.27},{41.19,77.99},{97.40,116.13},
{12.02,37.19},{ 9.29,34.87},{31.27,52.74},{17.94,43.58},
{16.40,25.59},{55.52,96.88},{94.83,127.70},{68.98,110.26},
{31.23,54.59},{49.17,63.18},{19.53,38.67},{83.68,101.98},
{68.82,96.06},{86.77,124.87},{63.85,106.88},{52.77,98.17},
{59.42,89.11},{27.27,52.85},{61.01,90.34},{80.10,116.89},
{73.81,112.05},{32.67,55.78},{ 5.19,14.02},{49.60,67.97},
{85.97,103.76},{12.63,25.81},{63.11,97.75},{ 8.28,32.23},
{39.62,61.00},{44.63,70.61},{80.02,109.81},{67.41,91.42},
{56.76,81.23},{65.41,97.47},{35.93,64.01},{61.15,93.82},
{15.85,38.57},{34.33,58.62},{86.53,111.39},{57.52,70.74},
{80.21,117.90},{92.02,129.92},{71.09,110.89},{56.23,70.38},
{49.93,73.71},{93.20,125.76},{11.74,37.94},{22.60,55.90},
{86.19,114.25},{22.65,66.55},{20.98,44.70},{18.38,42.85},
{66.80,71.14},{84.56,105.13},{83.69,106.03},{86.16,136.85},
{73.01,115.11},{21.03,42.77},{ 5.68,35.39},{26.00,47.86},
{49.04,70.02},{42.01,67.58},{63.84,85.45},{33.99,69.31},
{82.27,131.39},{23.91,50.83},{71.22,99.28},{50.90,74.42},
{63.43,127.34},{64.52,101.94},{26.16,46.94},{93.75,135.33},
{24.45,60.31},{27.07,59.56},{ 6.63,30.60},{96.60,103.76},
{84.70,100.29},{40.94,60.39},{20.97,72.10},{52.04,88.50},
{29.82,67.23},{49.00,81.70},{ 6.81,26.55},{74.02,127.44},
{11.94,22.42},{76.65,85.37},{20.12,51.68},{30.81,57.82},
{84.34,110.19},{24.62,50.82},{ 2.39,29.61},{71.27,119.05},
{ 7.65,27.22},{52.06,76.39},{18.30,40.25},{ 7.64,39.02},
{81.37,106.20},{11.52,27.99},{40.32,45.07},{78.58,96.79},
{95.95,121.82},{27.46,63.07},{55.87,94.45},{ 0.70,24.57},
{97.99,132.86},{33.66,48.20},{90.80,151.81},{26.50,52.14},
{43.63,58.92},{89.43,133.69},{86.56,118.66},{62.23,64.55},
{24.14,57.02},{57.77,94.64},{42.50,74.10},{51.19,77.93},
{12.87,44.92},{60.37,77.71},{33.65,73.18},{23.54,49.79},
{ 4.28,23.60},{60.19,102.95},{34.91,64.20},{88.52,103.35},
{58.87,90.38},{93.30,118.97},{56.23,97.15},{91.77,116.53},
{69.16,91.38},{42.31,66.76},{47.95,72.87},{25.19,49.40},
{86.48,119.90},{40.74,69.72},{43.28,74.73},{98.01,106.18},
{ 7.77,36.49},{94.93,125.37},{56.76,70.90},{ 2.03,26.08},
{ 8.43,32.71},{53.25,108.51},{42.03,79.29},{70.05,99.48},
{28.46,47.84},{52.90,65.89},{13.78,27.18},{34.73,79.88},
{58.46,88.84},{90.98,131.32},{ 6.97,23.44},{42.35,84.44},
{97.08,125.68},{48.45,70.66},{60.29,86.37},{49.84,57.71},
{19.48,52.06},{92.02,129.81},{58.61,96.17},{18.01,52.96},
{30.69,50.44},{75.70,98.74},{86.50,110.18},{94.28,139.99},
{27.65,44.94},{20.51,49.15},{95.70,113.23},{14.88,32.87},
{13.01,42.81},{61.35,84.81},{42.95,70.86},{33.19,37.43},
{54.06,69.36},{47.79,68.21},{37.46,66.41},{24.05,53.35},
{91.70,128.14},{43.79,49.26},{22.27,28.06},{81.85,107.34},
{ 2.84,26.03},{54.53,94.13},{83.91,101.50},{78.32,96.69},
{ 7.44,36.02},{94.55,131.05},{46.17,68.56},{47.07,65.63},
{62.94,88.22},{37.56,69.79},{99.54,127.49},{23.76,20.87},
{82.13,121.06},{39.69,52.19},{12.26,31.42},{84.10,129.77},
{53.53,78.33},{73.41,110.28},{36.68,63.60},{16.80,45.02},
{ 7.14,36.68},{42.08,71.80},{59.56,107.04},{41.23,73.06},
{86.11,115.39},{71.75,98.85},{94.78,127.67},{20.46,36.27},
{32.55,75.12},{73.01,108.35},{50.16,88.92},{76.51,104.09},
{55.23,78.03},{64.48,97.81},{75.78,109.67},{48.27,101.74},
{13.65,37.06},{96.59,123.13},{ 9.52,23.72},{58.56,77.86},
{14.02,58.67},{54.21,67.92},{25.33,63.98},{98.19,134.54},
{23.06,39.93},{46.83,59.60},{72.45,100.56},{ 8.78,12.39},
{67.85,110.33},{74.42,96.69},{70.35,89.57},{79.86,87.27},
{51.62,92.07},{23.21,43.83},{ 1.10,45.80},{11.62,50.67},
{67.71,101.40},{32.18,60.45},{65.00,79.10},{37.65,80.13},
{78.68,116.54},{ 2.97,35.03},{20.43,34.06},{98.46,126.72},
{28.90,63.72},{57.71,64.73},{28.10,48.16},{55.50,75.17},
{23.52,51.43},{11.55,34.28},{93.45,125.26},{ 7.74,35.97},
{99.28,125.32},{93.31,129.15},{67.44,92.65},{47.81,90.68},
{27.28,50.50},{74.30,96.40},{37.61,50.42},{70.69,106.23},
{65.21,105.68},{59.51,96.35},{85.72,124.26},{68.29,90.02},
{26.36,47.68},{79.02,107.46},{61.82,79.06},{12.44,40.21},
{67.46,93.41},{74.45,95.69},{62.39,101.43},{40.92,76.28},
{91.90,124.93},{88.86,116.67},{56.85,92.67},{81.09,111.36},
{56.09,77.71},{40.53,61.71},{67.86,101.91},{63.20,94.65},
{31.41,62.11},{79.61,101.83},{33.32,70.06},{91.68,114.80},
{89.03,123.14},{89.69,135.65},{37.56,57.58},{97.65,124.36},
{87.50,120.56},{23.61,43.56},{53.59,90.38},{13.68,27.00},
{92.77,142.42},{79.97,95.35},{49.32,69.95},{ 8.45,31.33},
{57.25,99.90},{86.61,91.37},{53.29,111.56},{86.31,124.69},
{11.88,36.51},{ 9.14,20.15},{99.57,159.36},{ 6.12,35.23},
{10.58,25.41},{70.37,101.96},{52.44,73.19},{ 4.59,36.48},
{91.91,119.19},{ 2.21,26.61},{82.30,110.67},{77.81,114.88},
{85.43,120.93},{31.16,46.24},{71.60,97.68},{50.43,97.09},
{32.08,53.51},{80.72,120.18},{15.79,43.38},{89.43,120.66},
{86.62,126.08},{71.40,106.75},{97.96,140.90},{72.58,115.59},
{87.40,122.87},{17.73,25.54},{91.01,120.51},{68.14,100.61},
{47.56,76.98},{ 0.17,30.13},{17.60,43.35},{44.43,68.59},
{23.39,58.25},{44.67,63.18},{ 1.49, 6.99},{47.53,59.73},
{92.72,139.16},{71.00,119.66},{ 3.23,25.45},{90.94,129.26},
{84.62,121.43},{24.91,45.74},{77.20,103.91},{34.40,50.34},
{65.07,84.03},{61.97,91.46},{17.00,48.85},{88.77,116.15},
{11.65,29.84},{29.62,61.89},{44.19,80.67},{12.99,44.45},
{59.13,90.89},{80.87,101.25},{ 0.62,49.37},{95.45,131.92},
{21.49,53.63},{73.19,104.63},{47.94,75.73},{61.52,96.93},
{77.57,110.03},{56.90,84.44},{71.63,106.43},{ 7.28,41.55},
{ 6.10,20.51},{55.83,88.97},{63.35,79.25},{21.40,33.97},
{42.88,79.00},{99.78,129.69},{28.85,51.85},{29.17,56.19},
{82.46,120.73},{39.16,75.27},{23.55,56.73},{91.77,151.17},
{86.84,100.56},{95.11,135.84},{ 3.81,29.30},{ 3.01, 9.88},
{91.61,131.55},{20.07,38.71},{78.58,106.18},{71.69,85.02},
{58.34,81.29},{11.17,48.86},{92.10,123.10},{33.79,59.43},
{41.90,85.35},{34.73,56.23},{15.52,29.19},{10.70,36.36},
{16.63,44.55},{64.90,94.69},{56.40,72.86},{96.02,134.14},
{94.06,135.66},{81.44,110.23},{61.28,82.70},{86.17,133.47},
{10.26,44.22},{48.77,75.52},{87.55,117.57},{98.60,132.42},
{83.15,124.33},{70.18,85.08},{37.69,52.66},{70.69,107.51},
{23.17,40.86},{11.11,33.19},{ 2.60,16.37},{96.02,122.01},
{63.31,90.15},{34.63,66.56},{55.82,90.79},{25.40,50.60},
{ 7.06,38.27},{12.53,21.48},{ 4.66,14.08},{14.07,38.83},
{89.02,115.45},{56.75,78.71},{75.35,88.46},{10.74,32.40},
{48.46,78.69},{50.88,90.85},{21.50,55.96},{64.18,106.00},
{29.80,50.60},{36.22,54.69},{31.54,46.65},{44.07,52.18},
{76.01,87.63},{79.40,94.40},{98.57,135.11},{73.75,86.73},
{32.81,60.28},{94.83,127.77},{59.86,100.44},{91.17,114.51},
{12.45,36.03},{96.12,120.33},{75.38,112.33},{96.21,128.60},
{27.28,43.65},{39.84,79.21},{42.42,61.93},{ 3.14,18.36},
{89.49,134.56},{ 8.81,23.46},{90.18,119.62},{20.01,54.51},
{14.96,42.60},{73.66,108.71},{79.39,105.20},{53.14,89.57},
{87.37,103.38},{75.70,124.08},{22.89,46.74},{32.28,75.99},
{52.45,73.07},{ 9.29,33.95},{75.06,94.59},{58.17,95.66},
{46.06,62.02},{76.49,105.19},{17.85,52.56},{69.27,107.40},
{27.00,32.43},{63.55,95.03},{54.19,112.48},{ 1.77,-0.91},
{91.14,131.90},{49.38,77.77},{57.64,87.83},{59.84,100.60},
{ 1.17,20.81},{70.60,112.33},{26.79,46.76},{75.93,103.25},
{53.28,98.53},{52.44,73.11},{82.39,111.91},{84.95,113.72},
{75.00,114.27},{57.56,84.28},{50.70,67.68},{26.36,69.76},
{41.88,72.04},{ 2.29, 8.78},{29.33,69.36},{ 0.39, 9.25},
{38.82,53.53},{35.39,63.67},{ 4.59,38.87},{ 7.21,34.25},
{18.96,39.42},{30.84,60.42},{70.07,99.18},{97.73,143.94},
{65.34,97.35},{93.44,125.63},{66.65,100.86},{39.62,69.03},
{26.26,45.54},{75.41,117.77},{66.81,85.59},{18.88,39.74},
{25.60,57.85},{89.71,128.71},{25.73,54.25},{ 6.42,35.83},
{54.34,90.64},{30.92,52.50},{79.92,109.17},{37.04,48.19},
{90.13,105.54},{ 1.86,15.53},{84.30,126.26},{47.55,88.77},
{97.19,128.94},{72.28,101.30},{15.77,47.08},{75.83,108.56},
{60.49,106.00},{50.72,91.64},{17.36,36.36},{22.38,65.07},
{45.71,73.00},{18.96,38.13},{59.62,95.16},{ 3.62, 9.04},
{14.39,46.96},{29.11,50.08},{ 6.08,40.14},{71.40,90.42},
{93.40,121.75},{80.18,122.34},{41.58,70.11},{30.19,62.87},
{15.58,39.65},{ 2.04,34.20},{13.30,27.91},{ 5.90,38.42},
{86.92,99.01},{39.11,75.84},{52.97,94.99},{29.49,54.34},
{92.40,141.60},{95.25,117.77},{89.83,118.20},{17.93,32.44},
{67.51,108.68},{71.79,92.84},{26.96,30.39},{95.58,137.89},
{37.41,75.87},{28.77,53.56},{31.51,60.71},{89.02,103.92},
{ 6.57,24.83},{46.65,60.05},{87.61,133.66},{55.43,91.77},
{99.61,114.04},{ 8.58,24.28},{72.64,93.69},{64.98,84.64},
{26.78,62.76},{48.30,89.07},{46.45,83.27},{65.05,86.62},
{87.70,104.60},{52.55,75.05},{94.38,126.14},{49.67,60.46},
{79.60,117.39},{84.91,110.25},{72.88,96.21},{24.20,50.59},
{82.32,102.81},{35.32,51.30},{63.80,83.86},{90.49,112.62},
{70.10,116.34},{40.18,66.98},{88.93,135.84},{49.86,96.62},
{ 6.19,35.19},{94.13,110.02},{92.73,127.53},{29.71,52.48},
{28.99,37.23},{85.58,116.09},{97.50,141.54},{ 1.13,26.22},
{95.21,117.79},{35.68,64.99},{90.78,123.59},{40.28,83.34},
{93.53,120.60},{99.16,130.41},{69.71,103.90},{62.89,75.23},
{95.35,157.36},{28.76,53.43},{18.23,37.25},{36.28,71.43},
{38.14,61.35},{34.55,61.35},{28.16,39.07},{74.42,90.63},
{39.54,69.77},{81.82,118.29},{36.13,58.44},{87.52,125.78},
{59.17,95.21},{ 1.18,20.54},{28.90,69.59},{70.44,106.16},
{16.27,34.53},{25.14,54.61},{26.25,46.75},{ 9.09,38.19},
{46.65,69.63},{25.83,42.75},{86.86,137.20},{37.34,80.87},
{76.91,109.10},{65.34,99.86},{54.88,74.35},{18.16,44.11},
{51.86,77.14},{39.26,80.22},{ 9.96,23.45},{29.17,53.30},
{47.53,69.06},{54.09,83.78},{99.01,106.97},{75.74,101.47},
{91.20,130.12},{13.45,39.52},{45.13,73.46},{34.59,66.91},
{85.52,108.74},{74.56,83.25},{ 2.84,21.61},{39.19,79.98},
{99.60,136.33},{64.64,93.93},{81.40,112.90},{80.99,111.74},
{27.12,60.72},{88.86,129.99},{72.75,107.24},{ 8.23,32.74},
{26.54,72.28},{40.38,71.89},{25.78,55.94},{29.60,44.89},
{96.50,126.32},{73.51,86.15},{55.44,71.67},{88.05,120.43},
{98.23,124.87},{83.14,120.90},{12.94,40.30},{ 3.11,23.56},
{ 5.75,26.47},{ 3.61,14.32},{43.88,57.83},{67.04,99.97},
{37.45,75.91},{ 9.11,29.62},{60.23,94.20},{71.27,121.40},
{13.52,28.40},{81.99,126.95},{77.24,106.70},{76.76,120.98},
{91.78,110.19},{57.18,76.57},{ 2.21,24.84},{44.33,74.56},
{72.59,94.18},{45.75,61.53},{56.32,81.85},{41.52,65.65},
{52.87,93.33},{38.56,54.99},{99.44,131.48},{15.62,50.77},
{ 6.17,47.76},{70.92,105.91},{55.79,80.31},{38.47,49.54},
{29.95,54.48},{70.60,92.43},{30.62,65.87},{22.35,54.38},
{76.54,100.85},{99.82,136.65},{62.91,102.27},{45.10,59.86},
{56.19,92.69},{88.97,134.24},{30.38,59.80},{42.77,65.73},
{59.61,81.77},{ 4.39,27.50},{10.08,14.46},{38.15,44.72},
{73.02,106.18},{15.05,29.23},{57.12,88.48},{60.38,89.57},
{47.78,83.80},{77.52,119.08},{74.17,107.35},{ 7.18,41.70},
{89.65,135.86},{65.54,89.68},{41.46,66.37},{ 6.49,37.83},
{47.11,77.42},{93.16,145.25},{29.22,59.79},{31.49,74.48},
{49.95,81.42},{80.68,116.42},{42.77,71.65},{79.81,98.09},
{11.49,36.53},{69.71,92.14},{98.61,137.83},{ 2.06,12.89},
{19.59,52.47},{39.88,73.17},{36.54,59.25},{97.33,117.23},
{33.73,71.77},{13.73,42.62},{30.33,80.15},{38.74,54.58},
{13.51,22.00},{35.18,71.99},{76.01,114.80},{23.70,38.91},
{ 1.05,36.68},{56.02,91.15},{87.23,107.00},{89.94,119.06},
{61.07,82.29},{57.11,77.86},{51.87,76.38},{26.32,42.12},
{24.85,48.18},{16.42,45.70},{ 3.38,34.14},{ 2.15,29.78},
{33.70,57.28},{ 5.49,15.96},{ 5.93,27.03},{35.60,61.48},
{67.87,92.28},{73.38,112.27},{ 5.19,22.07},{14.63,49.33},
{73.40,108.49},{96.35,136.61},{89.90,118.44},{75.21,105.21},
{71.51,125.63},{76.28,86.34},{73.08,94.95},{37.44,67.60},
{67.20,102.08},{40.97,69.34},{91.50,132.04},{ 1.45,23.90},
{ 3.77,35.09},{39.31,52.32},{46.32,90.70},{30.69,45.53},
{42.62,80.56},{72.20,108.15},{14.53,42.16},{84.98,106.08},
{70.98,117.10},{15.22,36.41},{56.72,94.78},{98.54,132.26},
{91.57,126.73},{63.97,99.65},{83.61,113.56},{38.61,53.06},
{42.91,80.55},{96.28,141.26},{53.92,77.85},{92.10,126.52},
{98.60,113.91},{90.29,117.81},{31.26,56.93},{70.75,101.33},
{51.87,79.32},{50.86,80.59},{56.39,75.72},{71.63,97.31},
{42.90,69.19},{40.23,67.54},{39.88,66.03},{35.67,57.92},
{86.43,124.36},{74.87,104.27},{44.42,93.21},{84.58,119.83},
{90.52,115.12},{43.61,71.83},{ 5.44,27.81},{46.09,68.62}};
double residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
__device__ double d_residual_error(double x, double y, double m, double c) {
double e = (m * x) + c - y;
return e * e;
}
double rms_error(double m, double c) {
int i;
double mean;
double error_sum = 0;
for(i=0; i<n_data; i++) {
error_sum += residual_error(data[i].x, data[i].y, m, c);
}
mean = error_sum / n_data;
return sqrt(mean);
}
__global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c);
}
int time_difference(struct timespec *start, struct timespec *finish,
long long int *difference) {
long long int ds = finish->tv_sec - start->tv_sec;
long long int dn = finish->tv_nsec - start->tv_nsec;
if(dn < 0 ) {
ds--;
dn += 1000000000;
}
*difference = ds * 1000000000 + dn;
return !(*difference > 0);
}
int main() {
int i;
double bm = 1.3;
double bc = 10;
double be;
double dm[8];
double dc[8];
double e[8];
double step = 0.01;
double best_error = 999999999;
int best_error_i;
int minimum_found = 0;
double om[] = {0,1,1, 1, 0,-1,-1,-1};
double oc[] = {1,1,0,-1,-1,-1, 0, 1};
struct timespec start, finish;
long long int time_elapsed;
clock_gettime(CLOCK_MONOTONIC, &start);
cudaError_t error;
double *d_dm;
double *d_dc;
double *d_error_sum_arr;
point_t *d_data;
be = rms_error(bm, bc);
error = cudaMalloc(&d_dm, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_dc, (sizeof(double) * 8));
if(error){
fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000));
if(error){
fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaMalloc(&d_data, sizeof(data));
if(error){
fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
while(!minimum_found) {
for(i=0;i<8;i++) {
dm[i] = bm + (om[i] * step);
dc[i] = bc + (oc[i] * step);
}
error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error,
cudaGetErrorString(error));
}
error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice);
if(error){
fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error,
cudaGetErrorString(error));
}
for(i=0;i<8;i++) {
double h_error_sum_arr[1000];
double error_sum_total;
double error_sum_mean;
d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data);
cudaThreadSynchronize();
error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost);
if(error){
fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error,
cudaGetErrorString(error));
}
for(int j=0; j<n_data; j++) {
error_sum_total += h_error_sum_arr[j];
}
error_sum_mean = error_sum_total / n_data;
e[i] = sqrt(error_sum_mean);
if(e[i] < best_error) {
best_error = e[i];
best_error_i = i;
}
error_sum_total = 0;
}
if(best_error < be) {
be = best_error;
bm = dm[best_error_i];
bc = dc[best_error_i];
} else {
minimum_found = 1;
}
}
error = cudaFree(d_dm);
if(error){
fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_dc);
if(error){
fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_data);
if(error){
fprintf(stderr, "cudaFree on d_data returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
error = cudaFree(d_error_sum_arr);
if(error){
fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error,
cudaGetErrorString(error));
exit(1);
}
printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be);
clock_gettime(CLOCK_MONOTONIC, &finish);
time_difference(&start, &finish, &time_elapsed);
printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed,
(time_elapsed/1.0e9));
return 0;
}
|
21,359 |
#include "cuda_runtime.h"
#include <iostream>
void error() {
printf("Encountered an error...");
exit(1);
}
int main()
{
cudaDeviceProp prop;
int count = 0;
if (cudaGetDeviceCount(&count))
error();
for (int i = 0; i < count; ++i) {
if (cudaGetDeviceProperties(&prop, i))
error();
std::cout << "\tProperties of device #" << i << std::endl;
std::cout << "Name: " << prop.name << std::endl;
std::cout << "Compute: " << prop.major << ", " << prop.minor << std::endl;
std::cout << "Clock rate: " << prop.clockRate << std::endl;
std::cout << "Device Overlap: " << prop.deviceOverlap << std::endl;
std::cout << "Kernel Execution Timeout: " << prop.kernelExecTimeoutEnabled << std::endl;
std::cout << std::endl << "\tMemory Information" << std::endl;
std::cout << "Total global memory: " << prop.totalGlobalMem << std::endl;
std::cout << "Total constant memory: " << prop.totalConstMem << std::endl;
std::cout << "Max memory pitch: " << prop.memPitch << std::endl;
std::cout << "Texture Alignment: " << prop.textureAlignment << std::endl;
std::cout << std::endl << "\tMulti-Processor Information" << std::endl;
std::cout << "Multiprocessor count: " << prop.multiProcessorCount << std::endl;
std::cout << "Shared memory per block: " << prop.sharedMemPerBlock << std::endl;
std::cout << "Registers per multiprocessor: " << prop.regsPerBlock << std::endl;
std::cout << "Threads of warp: " << prop.warpSize << std::endl;
std::cout << "Max threads per block: " << prop.maxThreadsPerBlock << std::endl;
std::cout << "Max thread dimensions: " << prop.maxThreadsDim[0] << ", " <<
prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << std::endl;
std::cout << "Max grid dimensions: " << prop.maxGridSize[0] << ", " <<
prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << std::endl;
std::cout << std::endl << std::endl;
}
return 0;
} |
21,360 | #include "includes.h"
__global__ void windowBartlett(float* idata, int length)
{
int tidx = threadIdx.x + blockIdx.x*blockDim.x;
if (tidx < length)
{
idata[tidx] = 0;
}
} |
21,361 | //---------------------------------------------------------------
// Trabalho Práctico Nº4 - CUDA I - CHAD
// Óscar Ferraz
// 2018/2019
// --------------------------------------------------------------
// nvcc -o vecAdd vecAdd.cu -I /usr/local/cuda-9.1/samples/common/inc
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
//#include <helper_cuda.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements.
*
*/
__global__ void histo(unsigned int * hist, unsigned char * Image,int width1, int height1){
__shared__ unsigned int histo_s[256];
if (threadIdx.x < 256)
histo_s[threadIdx.x]=0;
__syncthreads();
int i=threadIdx.x +blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < width1*height1){
atomicAdd( &(histo_s[Image[i]]), 1);
i += stride;
}
__syncthreads();
if (threadIdx.x < 256)
atomicAdd( &(hist[threadIdx.x]),histo_s[threadIdx.x]);
}
/**
* Host main routine
*/
int
main(void)
{
int width=813;
int height=707;
int numElements=width*height;
printf("[Vector addition of %d elements]\n", numElements);
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
size_t size=width*height*sizeof(unsigned char);
size_t size_hist=256*sizeof(unsigned int);
// Allocate the host input vector image
unsigned char *h_image=(unsigned char *)malloc(size);
// Allocate the host input vector igrey
unsigned int *h_hist=(unsigned int *)malloc(size_hist);
// Verify that allocations succeeded
if (h_image==NULL||h_hist==NULL)
{
fprintf(stderr, "Failed to allocate horgbImagest vectors!\n");
exit(EXIT_FAILURE);
}
// reading file
FILE *fp;
unsigned char ch;
fp = fopen("model_result.ppm", "r");
if(fp == NULL)
{
printf("Error in opening the image\n");
fclose(fp);
return(0);
}
printf("Successfully opened the image file\n");
int k=0;
int i=0;
while((ch = fgetc(fp)) != EOF)
{
if(k>=3){
h_image[i]=ch;
i++;
}
if(ch == '\n')
k++;
if(i>=813*707)
break;
}
printf("reading completed\n");
fclose(fp);
// Allocate the device input vector image
unsigned char *d_image=NULL;
err=cudaMalloc((void **)&d_image, size);
if(err!=cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector image (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Allocate the device input vector grey
unsigned int *d_hist=NULL;
err=cudaMalloc((void **)&d_hist, size_hist);
if(err!=cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector grey (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory to the device input vectors in device memory
printf("Copy input image from the host memory to the CUDA device\n");
err=cudaMemcpy(d_image, h_image, size, cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
{
fprintf(stderr, "Failed to copy vector image from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// [!!] Define the number of threads per block and blocks per grid
int threadsPerBlock =1024;
int blocksPerGrid = 1;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
struct timespec start, end;
clock_gettime(CLOCK_MONOTONIC, &start);
// [!!] Launch the Vector Add CUDA Kernel with the respective input parameters
histo<<<blocksPerGrid, threadsPerBlock>>>(d_hist, d_image, width, height);
clock_gettime(CLOCK_MONOTONIC, &end);
err=cudaGetLastError();
if(err!=cudaSuccess)
{
fprintf(stderr, "Failed to launch colorToGreyScaleConvertion kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector in host memory
printf("Copy output data from the CUDA device to the host memory\n");
err=cudaMemcpy((unsigned int *)h_hist, (unsigned int *)d_hist, size_hist, cudaMemcpyDeviceToHost);
if(err!=cudaSuccess)
{
fprintf(stderr, "Failed to copy vector grey from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//=====================================================
// write result
//=====================================================
/*fp = fopen("result_cuda.ppm", "w");
fprintf(fp, "P6\n813 707\n255\n");
for(i=0;i<813*707;i++){
fputc(h_grey[i], fp);
fputc(h_grey[i], fp);
fputc(h_grey[i], fp);
}
fclose(fp);*/
double initialTime=(start.tv_sec*1e3)+(start.tv_nsec*1e-6);
double finalTime=(end.tv_sec*1e3)+(end.tv_nsec*1e-6);
printf("TIme:\t%f ms\n", (finalTime - initialTime));
// Free device global memory
err=cudaFree(d_image);
if(err!=cudaSuccess)
{
fprintf(stderr, "Failed to free device vector image(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err=cudaFree(d_hist);
if(err!=cudaSuccess)
{
fprintf(stderr, "Failed to free device vector grey (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_image);
free(h_hist);
printf("Done\n");
return 0;
}
|
21,362 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <iostream>
// nvcc -O3 -std=c++14 example1.cu -o t1 && ./t1 < stocks2.csv
int main()
{
thrust::host_vector<double> hostApple;
thrust::host_vector<double> hostMicrosoft;
int N = 0;
while (std::cin.fail() == false)
{
N += 1;
double aapl, msft;
std::cin >> aapl >> msft;
hostApple.push_back(aapl);
hostMicrosoft.push_back(msft);
}
/* na linha abaixo os dados são copiados para GPU */
thrust::device_vector<double> AAPL(hostApple);
thrust::device_vector<double> MSFT(hostMicrosoft);
thrust::device_vector<double> diff(N);
thrust::transform(MSFT.begin(), MSFT.end(), AAPL.begin(), diff.begin(), thrust::minus<double>());
double mean = thrust::reduce(diff.begin(), diff.end(), 0.0, thrust::plus<double>()) / N;
std::cout << "Média da diferença das ações: " << mean << "\n";
thrust::device_vector<double> meanDiff(N);
thrust::fill(meanDiff.begin(), meanDiff.end(), mean);
thrust::device_vector<double> diff_X_minus_mean(N);
thrust::transform(diff.begin(), diff.end(), meanDiff.begin(), diff_X_minus_mean.begin(), thrust::minus<double>());
thrust::device_vector<double> diff_X_minus_mean_squared(N);
thrust::transform(diff_X_minus_mean.begin(), diff_X_minus_mean.end(), diff_X_minus_mean_squared.begin(), thrust::square<double>());
double variance = thrust::reduce(diff_X_minus_mean_squared.begin(), diff_X_minus_mean_squared.end(), 0.0, thrust::plus<double>()) / N;
std::cout << "Variância: " << variance << "\n";
}
|
21,363 | #include <cuda_runtime_api.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#define TILE_WIDTH 16
__global__ void gpu_matrix_mult_one(int *a, int *b, int *c, int m, int n, int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y; // get the row
int col = blockIdx.x * blockDim.x + threadIdx.x; // get the column
int sum = 0; // initialize the sum
if( col < k && row < m) // check to make sure that the thread needs to compute
{
for(int i = 0; i < n; i++)
{
sum += a[row * n + i] * b[i * k + col];
}
c[row * k + col] = sum;
}
}
__global__ void gpu_matrix_mult_two(int *d_M, int *d_N, int *d_P, int m, int n, int k)
{
// shared memory for tiling
__shared__ int Mds [TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds [TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// recall that TILE_WIDTH = blockDim
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int pval = 0;
// this loop is iterating through cols of M and rows of N
// recall that n is the shared inner dimension, that's why we're using it
// to define our loop size
for (int ph = 0; ph < n / TILE_WIDTH; ph++){
// boundary check for shared Mds
if (row < k && ph * TILE_WIDTH + tx < m){
// saving tile from M
/* indexing thought exercise:
* "row * k" gets us to our desired row in M
* adding "ph * TILE_WIDTH" moves our tile over to the desired tile location
* adding "tx" moves us to the desired location within the tile
* */
Mds[ty][tx] = d_M[row * k + ph * TILE_WIDTH + tx];
}
// boundary check
if (ph*TILE_WIDTH + ty < k && col < m){
// saving tile from N
/* indexing thought exercise:
* "ph * TILE_WIDTH" moves the tile "down" to the desired location
* adding "ty" gets us to the desired location within the tile
* multiplying by "k" does the magic (remember row major order)
* adding col moves the tile to the desired column*/
Nds[ty][tx] = d_N[(ph * TILE_WIDTH + ty) * k + col];
}
__syncthreads(); // execution barrier
for (int j = 0; j < TILE_WIDTH; j++){
// performing part of inner product
pval += Mds[ty][j] * Nds[j][tx];
}
__syncthreads();
}
if (row < k && col < m){
d_P[row * k + col] = pval;
}
}
int main(int argc, char const *argv[])
{
int m, n, k; // init matrix dimensions
printf("---------------------------------------------\n");
printf("We will be multiplying two matrices\n");
printf("The first will be of size m x n\n");
printf("The second will be of size n x k\n");
printf("I will have you choose these dimensions!\n");
printf("---------------------------------------------\n\n");
printf("Input m:\n");
scanf("%d", &m);
printf("\nInput n:\n");
scanf("%d", &n);
printf("\nInput k:\n");
scanf("%d", &k);
printf("\n");
// Initialize pointers
int *cpu_a, *cpu_b, *cpu_result, *cpu_c;
// Allocate memory to the pointers on the host
cudaMallocHost((void **) &cpu_a, sizeof(int)*m*n); // matrix a
cudaMallocHost((void **) &cpu_b, sizeof(int)*n*k); // matrix b
cudaMallocHost((void **) &cpu_c, sizeof(int)*m*k); // cpu memory for gpu result
cudaMallocHost((void **) &cpu_result, sizeof(int)*m*k); // cpu result
// Generate the matrices
// cpu_a
for (int i = 0; i < m; i++)
{
for (int j = 0; j < n; j++)
{
cpu_a[i * n + j] = rand () % 1024;
}
}
// cpu_b
for (int i = 0; i < n; i++)
{
for (int j = 0; j < k; j++)
{
cpu_b[n * k + j] = rand () % 1024;
}
}
// variable to keep track of time
float gpu_time;
// create start and stop events
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// ******************************************************************************
// ================================ GPU =========================================
// ******************************************************************************
// the final matrix will have size m x k
// we need to spawn enough threads to compute all of the entries
unsigned int grid_rows = (m + TILE_WIDTH - 1) / TILE_WIDTH;
unsigned int grid_cols = (k + TILE_WIDTH - 1) / TILE_WIDTH;
printf("---------------------------------------------\n");
printf("The resulting matrix will be of size %d x %d\n", m, k);
printf("\nI am launching a grid size of %d x %d blocks\n", grid_rows, grid_cols);
printf("Each block will be %d x %d threads\n",TILE_WIDTH,TILE_WIDTH);
printf("This will give you %d x %d available threads\n",grid_rows*TILE_WIDTH,grid_cols*TILE_WIDTH);
printf("---------------------------------------------\n\n");
printf("Press ENTER to begin computation on GPU (w/o tiling)...\n");
getchar();
getchar();
// start to count execution time of GPU version
cudaEventRecord(start, 0);
// Allocate memory space on the device
int *gpu_a, *gpu_b, *gpu_c;
cudaMalloc((void **) &gpu_a, sizeof(int)*m*n);
cudaMalloc((void **) &gpu_b, sizeof(int)*n*k);
cudaMalloc((void **) &gpu_c, sizeof(int)*m*k);
// copy matrix A and B from host to device memory
cudaMemcpy(gpu_a, gpu_a, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, gpu_b, sizeof(int)*n*k, cudaMemcpyHostToDevice);
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
// Launch kernel
// Kernels will always be launched using triple brackets
// the first input in the triple brackets is the dimension of the grid
// the second is the dimension of the block
gpu_matrix_mult_one<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c, m, n, k);
// Transefer results from device to host
cudaMemcpy(cpu_c, gpu_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_time, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d * %dx%d on GPU WITHOUT tiling: %f ms.\n\n", m, n, n, k, gpu_time);
printf("Press ENTER to begin computation on GPU (w/tiling)...\n");
getchar();
// start to count execution time of GPU version
cudaEventRecord(start, 0);
// copy matrix A and B from host to device memory
cudaMemcpy(gpu_a, gpu_a, sizeof(int)*m*n, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, gpu_b, sizeof(int)*n*k, cudaMemcpyHostToDevice);
// Launch kernel
// Kernels will always be launched using triple brackets
// the first input in the triple brackets is the dimension of the grid
// the second is the dimension of the block
gpu_matrix_mult_two<<<dimGrid, dimBlock>>>(gpu_a, gpu_b, gpu_c, m, n, k);
// Transefer results from device to host
cudaMemcpy(cpu_c, gpu_c, sizeof(int)*m*k, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_time, start, stop);
printf("Time elapsed on matrix multiplication of %dx%d * %dx%d on GPU WITH tiling: %f ms.\n\n", m, n, n, k, gpu_time);
// fin
return 0;
} |
21,364 | // n should be less than 10000 when k==3
#include <stdio.h>
#include <cuda.h>
__global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int n, int k);
int main(int argc, char **argv) {
int n = atoi(argv[1]);
int k = atoi(argv[2]);
//generate a 1d array
float *arr = (float*) malloc(n*sizeof(float));
int i;
for (i = n; i > 0; i--) {
arr[n-i] = (float)i;
}
const int numthreadsBlock = 8;
int numChunk;
numChunk = ( n + numthreadsBlock - 1)/numthreadsBlock;
float *maxarr = (float *)malloc(numChunk * sizeof(float));
int numBlock = numChunk;
// declare GPU memory pointers
float *darr, * dmaxarr;
cudaMalloc((void **)&darr, n*sizeof(float));
cudaMalloc((void **)&dmaxarr, numChunk*sizeof(float));
cudaMemcpy(darr, arr, n*sizeof(float), cudaMemcpyHostToDevice);
dim3 dimGrid(numBlock,1);
dim3 dimBlock(numthreadsBlock,1,1);
parallel_max_each_chunk<<<dimGrid,dimBlock,(n+3*numthreadsBlock)*sizeof(float)>>>(dmaxarr, darr, n, k);
cudaThreadSynchronize();
cudaMemcpy(maxarr, dmaxarr, numChunk*sizeof(float), cudaMemcpyDeviceToHost);
//truth
float *smaxarr = (float *)malloc(numChunk*sizeof(float));
for (i = 0; i < numChunk; i ++) {
smaxarr[i] = i*numthreadsBlock + k <=n? arr[i*numthreadsBlock + k/2 ]:0; // k is an odd number
}
//check the results
bool judge = true;
for (i=0; i < numBlock; i++) {
printf("max of block %d, %f %f\n ", i, smaxarr[i], maxarr[i]);
judge = judge && (smaxarr[i] == maxarr[i]);
}
printf("\n--------correct or wrong---------\n");
printf(judge ? "right\n": "wrong\n");
// This is for developing: print out the 1d array
printf("\n--------1d array---------\n");
if ( n < 15) {
for (i=0; i < n; i++) {
printf("element %d, %f\n ", i, arr[i]);
}
}
// check the exit state of CUDA code
cudaError_t error = cudaGetLastError();
if (error !=cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
}
//free gpu memory
cudaFree(dmaxarr);
cudaFree(darr);
return 0;
}
__global__ void parallel_max_each_chunk(float *dmaxarr, float * darr, int n,int k) {
int i, tid = threadIdx.x;
//copy the whole series to shared memory
//always round up and if n is a multiple of blockDim.x no rounding
int chunkSize = (n+blockDim.x-1)/blockDim.x;
extern __shared__ float sdata[];
for (i = 0; i < chunkSize; i++) {
if (tid * chunkSize + i <n)
sdata[tid*chunkSize + i ] = darr[tid*chunkSize + i];
}
__syncthreads();
// declare three array for the maximum found by each thread
extern __shared__ float mymaxvals[];
extern __shared__ float mystartmaxes[];
extern __shared__ float myendmaxes[];
int perstart = threadIdx.x + blockDim.x * blockIdx.x;
int perlen, perend;
double xbar; // a temporay variable used when computing mean of subsequence
if (perstart <= n-k) {
for (perlen = k ; perlen <= n - perstart ; perlen++) {
perend = perstart + perlen - 1;
//compute the mean of subsequence incrementally
if (perlen ==k) {
xbar = 0;
for ( i = perstart; i <= perend; i++) {
xbar += sdata[i];
}
xbar /= (perend - perstart + 1);
mymaxvals[tid] = xbar;
} else {
xbar = ( (perlen-1) * xbar + sdata[perend] ) / perlen;
}
//update the mymaxvals[tid] if the next longer subsequence has a higher mean
if (xbar > mymaxvals[tid]) {
mymaxvals[tid] = xbar;
mystartmaxes[tid] = perstart;
myendmaxes[tid] = perend;
}
}
} else {
mymaxvals[tid] = 0;//initialize it the smallest number
}
// mymaxvals[tid] = sdata[tid];
__syncthreads(); //sync to make sure each thread in this block has done with the for loop
// get the highest among the mymaxvals using reduce
for (int s = blockDim.x/2; s > 0; s>>=1) {
if (tid < s ) {
if(mymaxvals[tid+s] > mymaxvals[tid]) {
mymaxvals[tid] = mymaxvals[tid+s];
mystartmaxes[tid] = mystartmaxes[tid + s];
myendmaxes[tid] = myendmaxes[tid + s];
}
}
__syncthreads();
}
// the maximum among the mymaxvals in this block
if(tid == 0) {
dmaxarr[blockIdx.x] = mymaxvals[0];
}
}
|
21,365 | #include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <time.h>
#include "curisk.cuh"
__global__ void generate_vector_sample_kernel();
__global__ void setup_gamma_generator(long seed);
__device__ __forceinline__ float generate_gamma_1_1(curandState *state);
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("\nError \"%s\" at %s:%d\n", cudaGetErrorString(x), __FILE__, __LINE__);\
exit(EXIT_FAILURE);}} while(0)
#define check_error() do { if(cudaGetLastError()!=cudaSuccess) { \
printf("\nError \"%s\" at %s:%d\n", cudaGetErrorString(cudaGetLastError()), __FILE__, __LINE__);\
exit(EXIT_FAILURE);}} while(0)
#define ESTIMATED_MAX_DIMENSION 32
__constant__ int c_dimension;
__constant__ int c_vector_scheme[ESTIMATED_MAX_DIMENSION];
__constant__ bound_t c_vector_bounds[ESTIMATED_MAX_DIMENSION];
__constant__ ordinal_t c_vector_ordinal[ESTIMATED_MAX_DIMENSION];
__constant__ int c_sample_size;
__device__ float *d_vector_sample;
__device__ int d_round_sample_size;
__device__ float *d_round_vector_sample;
__device__ curandState *d_curand_states;
__device__ int d_vectors_ready;
void generate_vector_sample(sampling_cofiguration_t& conf, sampling_result_t& result, int timeout_rounds)
{
int start_time_point = clock();
/* Скопируем некоторые поля из conf в константную память. */
conf.log() << "Preparing constant variables." << std::endl;
cudaMemcpyToSymbol(c_dimension, &conf.dimension, sizeof(int)); check_error();
cudaMemcpyToSymbol(c_sample_size, &conf.sample_size, sizeof(int)); check_error();
cudaMemcpyToSymbol(c_vector_scheme, conf.vector_scheme, conf.dimension*sizeof(int)); check_error();
cudaMemcpyToSymbol(c_vector_bounds, conf.vector_bounds, conf.dimension*sizeof(bound_t)); check_error();
cudaMemcpyToSymbol(c_vector_ordinal, conf.vector_ordinal, conf.dimension*sizeof(ordinal_t)); check_error();
/* Выделим память для выборки. */
conf.log() << "Allocate memory for vector sample." << std::endl;
float *dh_vector_sample;
cudaMalloc(&dh_vector_sample, conf.sample_size*conf.dimension*sizeof(float)); check_error();
cudaMemcpyToSymbol(d_vector_sample, &dh_vector_sample, sizeof(dh_vector_sample)); check_error();
/* Выделим память для выборки раунда. */
conf.log() << "Allocate memory for round vector sample." << std::endl;
int blocks_per_round = conf.grid_dimension.x;
int vectors_per_block = conf.block_dimension.x;
int round_sample_size = blocks_per_round*vectors_per_block;
cudaMemcpyToSymbol(d_round_sample_size, &round_sample_size, sizeof(int)); check_error();
float *dh_round_vector_sample;
cudaMalloc(&dh_round_vector_sample, round_sample_size*conf.dimension*sizeof(float)); check_error();
cudaMemcpyToSymbol(d_round_vector_sample, &dh_round_vector_sample, sizeof(dh_round_vector_sample)); check_error();
/* Настроим генератор случайных чисел. */
conf.log() << "Setup CUDA random numbers generator." << std::endl;
curandState *dh_curand_states;
cudaMalloc(&dh_curand_states, round_sample_size*sizeof(curandState)); check_error();
cudaMemcpyToSymbol(d_curand_states, &dh_curand_states, sizeof(dh_curand_states)); check_error();
setup_gamma_generator<<<blocks_per_round, vectors_per_block>>>(clock()); check_error();
cudaDeviceSynchronize(); check_error();
/* Число готовых элементов выборки. */
int vectors_ready = 0;
cudaMemcpyToSymbol(d_vectors_ready, &vectors_ready, sizeof(vectors_ready)); check_error();
conf.log() << "Start round cycle." << std::endl;
int rounds = 0;
while (vectors_ready < conf.sample_size)
{
generate_vector_sample_kernel<<<blocks_per_round, vectors_per_block>>>();
CUDA_CALL(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(&vectors_ready, d_vectors_ready, sizeof(vectors_ready)); check_error();
rounds++;
if (rounds > timeout_rounds)
{
conf.log() << "Round cycle is terminated (timeout)." << std::endl;
result.generated_vectors_number = vectors_ready;
result.error = SAMPLING_TIMEOUT;
break;
}
}
conf.log() << "Stop round cycle." << std::endl;
conf.log() << "Vectors generated: " << vectors_ready << "/" << conf.sample_size << "." << std::endl;
if (vectors_ready < conf.sample_size)
{
cudaMemcpy(result.vector_sample, dh_vector_sample, vectors_ready*conf.dimension*sizeof(float), cudaMemcpyDeviceToHost); check_error();
}
else
{
cudaMemcpy(result.vector_sample, dh_vector_sample, conf.sample_size*conf.dimension*sizeof(float), cudaMemcpyDeviceToHost); check_error();
}
cudaFree(dh_vector_sample);
cudaFree(dh_round_vector_sample);
cudaFree(dh_curand_states);
int end_time_point = clock();
float elapsed_time = ((float) (end_time_point - start_time_point))/CLOCKS_PER_SEC;
conf.log() << "Elapsed time: " << elapsed_time << " s." << std::endl;
result.elapsed_time = elapsed_time;
}
__global__
void generate_vector_sample_kernel()
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int dimension = c_dimension;
int i;
float value;
float than_value;
float sum = 0;
int last_vectors_ready;
ordinal_t ordinal;
bound_t bound;
bool eliminate = false;
for (i = 0; i < dimension; i++)
{
if (c_vector_scheme[i] == 0)
value = 0;
else
value = generate_gamma_1_1(d_curand_states + idx);
sum += value;
d_round_vector_sample[dimension*idx + i] = value;
}
if (sum != 0)
{
for (i = 0; i < dimension; i++)
{
value = d_round_vector_sample[dimension*idx + i];
value /= sum;
d_round_vector_sample[dimension*idx + i] = value;
bound = c_vector_bounds[i];
eliminate = eliminate || value < bound.left || value > bound.right;
}
for (i = 0; i < dimension; i++)
{
value = d_round_vector_sample[dimension*idx + i];
ordinal = c_vector_ordinal[i];
than_value = d_round_vector_sample[dimension*idx + ordinal.than_index];
eliminate = eliminate ||
(ordinal.ordinal == ORDINAL_LESS && value >= than_value) ||
(ordinal.ordinal == ORDINAL_MORE && value <= than_value);
}
}
if (!eliminate)
{
last_vectors_ready = atomicAdd(&d_vectors_ready, 1);
if (last_vectors_ready < c_sample_size)
{
for (i = 0; i < dimension; i++)
d_vector_sample[dimension*last_vectors_ready + i] = d_round_vector_sample[dimension*idx + i];
}
}
}
__global__
void setup_gamma_generator(long seed)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < d_round_sample_size)
curand_init(seed, tid, 0, &d_curand_states[tid]);
}
/*
* Функция генерирует случайную величину с распределением Gamma(1,1).
*/
__device__ __forceinline__
float generate_gamma_1_1(curandState *state)
{
curandState localState = *state;
float c, z, u, v, result;
c = 1/sqrtf(9*2/3.);
do {
z = curand_normal(&localState);
u = curand_uniform(&localState);
v = powf(1 + c*z, 3);
} while (z <= (-1/c) || logf(u) >= (0.5*z*z + 2/3. - (2/3.)*v + (2/3.)*logf(v)));
result = (2/3.)*v;
*state = localState;
return result;
} |
21,366 | #include "includes.h"
__global__ void prod( int taille, float * a, float b, float *c ){
int index=threadIdx.x+blockDim.x*blockIdx.x;
if(index>=taille) return;
c[index]=a[index]*b;
} |
21,367 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#include <stdio.h>
#define TILE_SIZE 16
__global__ void mysgemm(int m, int n, int k, const float *A, const float *B, float* C) {
/********************************************************************
*
* Compute C = A x B
* where A is a (m x k) matrix
* where B is a (k x n) matrix
* where C is a (m x n) matrix
*
* Use shared memory for tiling
*
********************************************************************/
// INSERT KERNEL CODE HERE
__shared__ float As[TILE_SIZE][TILE_SIZE];
__shared__ float Bs[TILE_SIZE][TILE_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * TILE_SIZE + ty;
int Col = bx * TILE_SIZE + tx;
float Cvalue = 0.0;
for (int i = 0; i < (k / TILE_SIZE + 1); i++) {
if ((Row < m) && (tx + i*TILE_SIZE < k)) {
As[ty][tx] = A[Row*k + i*TILE_SIZE + tx];
}
else {
As[ty][tx] = 0.0;
}
if ((Col < k) && (ty + i*TILE_SIZE < k)) {
Bs[ty][tx] = B[(i*TILE_SIZE + ty) * n + Col];
}
else {
Bs[ty][tx] = 0.0;
}
__syncthreads();
for (int j = 0; j < TILE_SIZE; ++j) {
Cvalue += As[ty][j] * Bs[j][tx];
}
__syncthreads();
}
if ((Row < m) && (Col < n)) {
C[Row*n + Col] = Cvalue;
}
}
void basicSgemm(char transa, char transb, int m, int n, int k, float alpha, const float *A, int lda, const float *B, int ldb, float beta, float *C, int ldc)
{
if ((transa != 'N') && (transa != 'n')) {
printf("unsupported value of 'transa'\n");
return;
}
if ((transb != 'N') && (transb != 'n')) {
printf("unsupported value of 'transb'\n");
return;
}
if ((alpha - 1.0f > 1e-10) || (alpha - 1.0f < -1e-10)) {
printf("unsupported value of alpha\n");
return;
}
if ((beta - 0.0f > 1e-10) || (beta - 0.0f < -1e-10)) {
printf("unsupported value of beta\n");
return;
}
// Initialize thread block and kernel grid dimensions ---------------------
const unsigned int BLOCK_SIZE = TILE_SIZE;
//INSERT CODE HERE
const int num_blocks_m = ceil(m * 1.0 / BLOCK_SIZE * 1.0);
const int num_blocks_n = ceil(n * 1.0 / BLOCK_SIZE * 1.0);
dim3 dimGrid(num_blocks_m, num_blocks_n);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
mysgemm<<<dimGrid, dimBlock>>>(m, n, k, A, B, C);
// Invoke CUDA kernel -----------------------------------------------------
//INSERT CODE HERE
}
|
21,368 | #include "includes.h"
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*2+0];
float y1=xyz1[(i*n+j)*2+1];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*2+0];
float y2=xyz2[(i*m+j2)*2+1];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*2+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*2+1]),g*(y1-y2));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*2+1]),-(g*(y1-y2)));
}
}
} |
21,369 | #define VS
//#define MGPU
#ifdef VS
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#endif
#ifdef MGPU
#include<cuda.h>
#endif
#include <stdio.h>
#include<stdlib.h>
//#define DEBUG_MATRIX
#define DEBUG
//Dal profiler nvida per il kernel vengono usati 13 registri (si deve passare sulla multiGPU)
void initializeMatrix(int *array, int matrixSize);
void printMatrix(int *matrix, int rows, int cols);
__global__ void sumMatrixGPU(int *a, int*b, int *c, int rows, int cols);
void sumMatrixCPU(int *a, int*b, int *c, int rows, int cols);
void matrixEqualCheck(int *mHost, int *mDevice, int cols, int rows);
int main()
{
int *aHost, *bHost, *cHost, *rHost;
int *aDevice, *bDevice, *cDevice;
int size, N;
dim3 gridDim, blockDim;
int matrixSize;
cudaEvent_t start, stop;
float elapsed;
printf("Inserisci la size della matrice quadrata(N):");
fflush(stdout);
scanf("%d",&N);
printf("Inserisci la size dei blocchi di thread Nt (NtxNt):");
fflush(stdout);
scanf("%d", &blockDim.x);
blockDim.y = blockDim.x;
matrixSize = N * N;
//Determino il numero esatto di blocchi
gridDim.x = N / blockDim.x + ((N % blockDim.x) == 0 ? 0 : 1);
gridDim.y = N / blockDim.y + ((N % blockDim.y) == 0 ? 0 : 1);
size = matrixSize * sizeof(int);
#ifdef DEBUG
printf("Size della matrice: %d\n", matrixSize);
printf("Numero totale di blocchi: %d (%d,%d)\n",gridDim.x*gridDim.y, gridDim.x, gridDim.y);
printf("Numero totale dei Thread per blocco: %d (%d,%d)\n",blockDim.x*blockDim.y, blockDim.x, blockDim.y);
#endif
//Alloco la memoria sull' host
aHost = (int*)malloc(size);
bHost = (int*)malloc(size);
rHost = (int*)malloc(size);
cHost = (int*)calloc(matrixSize, sizeof(int)); //Azzeriamo gli array che raccolgono il risultato(lo inizializzo a zero)
//Alloco memoria sul Device
cudaMalloc((void **)&aDevice, size);
cudaMalloc((void **)&bDevice, size);
cudaMalloc((void **)&cDevice, size);
//Azzeriamo gli array che raccolgono il risultato
cudaMemset(cDevice, 0, size);
//inizializzo le matrici
initializeMatrix(aHost, matrixSize);
initializeMatrix(bHost, matrixSize);
//copio i dati dall'host al device
cudaMemcpy(aDevice, aHost, size, cudaMemcpyHostToDevice);
cudaMemcpy(bDevice, bHost, size, cudaMemcpyHostToDevice);
//Somma parallela
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
sumMatrixGPU <<<gridDim,blockDim>>> (aDevice, bDevice, cDevice, N, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
//De-allocazione eventi
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Tempo per la somma di matrici GPU:%f ms\n", elapsed);
//Somma Seriale
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
sumMatrixCPU(aHost, bHost, cHost, N, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
elapsed = 0;
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Tempo per la somma di matrici CPU:%f ms\n", elapsed);
//fare il metodo di controllo controllo
cudaMemcpy(rHost, cDevice, size, cudaMemcpyDeviceToHost);
#ifdef DEBUG_MATRIX
printf("Host\n");
printMatrix(cHost, N, N);
printf("Device\n");
printMatrix(rHost, N, N);
#endif
matrixEqualCheck(cHost,rHost,N,N);
//De-allocazione eventi
cudaEventDestroy(start); cudaEventDestroy(stop);
//De-allocazione Host
free(aHost); free(bHost); free(cHost); free(rHost);
//De-allocazione Device
cudaFree(aDevice); cudaFree(bDevice); cudaFree(cDevice);
}
void initializeMatrix(int *array, int matrixSize)
{
int i;
for ( i = 0; i < matrixSize; i++)
{
array[i] = i;
}
}
__global__ void sumMatrixGPU(int *a, int*b, int *c, int rows, int cols)
{
//definire id per il controllo
int idx = (blockDim.x*blockIdx.x) + threadIdx.x;
int idy = (blockDim.y*blockIdx.y) + threadIdx.y;
if (idx < rows && idy < cols)
{
//Essendo che rows e cols sono uguli li possiamo usare tranquillamente in maniera intercambiabile
c[(idx*cols) + idy] = a[(idx*cols) + idy] + b[(idx*cols) + idy];
}
}
void printMatrix(int *matrix, int rows, int cols)
{
int i, j;
if (cols < 10 && rows < 10)
{
for (i = 0; i < rows; i++)
{
for (j = 0; j < cols; j++)
{
printf("%d ", matrix[(i*cols) + j]);
}
printf("\n");
}
printf("\n");
}
printf("\n");
}
void sumMatrixCPU(int *a, int*b, int *c, int rows, int cols)
{
int i, size;
size = rows * cols;
for (i = 0; i < size; i++)
{
c[i] = a[i] + b[i];
}
}
void matrixEqualCheck(int *mHost, int *mDevice, int cols, int rows)
{
int i, size, count = 0;
size = cols * rows;
for ( i = 0; i < size; i++)
{
if (mHost[i] == mDevice[i])
{
count++;
}
}
if (count == size)
{
printf("Le matrici sono uguali\n");
}
else
{
printf("Le matrici sono diverse\n");
}
}
|
21,370 | #include <stdio.h>
#include <fstream>
#include <iostream>
#include <stdlib.h>
using namespace std;
int main(){
// make an undirected and connected graph
// remember n is vertices and m is edges
cout << "hello world";
FILE* pFile = fopen("input.txt", "r");
if(pFile == NULL){
cout << "Fam u can't do that stop";
return 0;
}
// read in first line with n and m
int n, m;
fscanf(pFile, "%d %d", &n, &m);
cout << "Settings: " << n << " " << m << endl;
int adj_mat[n][n]; // idk what values we want but I'm starting with 4 nodes
// iterate thru all edges
for(int i = 0; i<m; i++){
int n1, n2, weight;
fscanf(pFile, "%d %d %d", &n1, &n2, &weight);
adj_mat[n1][n2] = weight; // put that node in
adj_mat[n2][n1] = weight; //gotta put it in for both
}
// adjacency graph built now it's dumb lit
// okay fam lets get this bread with the normal sequential algorithm
return 0;
} |
21,371 | #include "includes.h"
__global__ void dyadicAdd(int * counter, const int length, const int shift)
{
if (shift > 0) {
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
int adds = 2*shift;
int Index = adds*(xIndex+1)-1;
if (Index < length) {
counter[Index] = counter[Index] + counter[Index-shift];
}
}
} |
21,372 | /*
* This sample implements a separable convolution
* of a 2D image with an arbitrary filter.
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include <sys/time.h>
#define FILTER_RADIUS 16
#define FILTER_LENGTH (2 * FILTER_RADIUS + 1)
#define ABS(val) ((val)<0.0 ? (-(val)) : (val))
#define accuracy 5e-4
#define TILE_WIDTH 96
#define TILE_HEIGHT 96
__constant__ float Filter[FILTER_LENGTH]; // gia na apothikeusw tous suntelestes tou filtrou
////////////////////////////////////////////////////////////////////////////////
// Reference row convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionRowCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = filterR; y < imageH-filterR; y++) {
for (x = filterR; x < imageW-filterR; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = x + k;
sum += h_Src[y * imageW + d] * h_Filter[filterR - k];
h_Dst[y * imageW + x] = sum;
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
// Reference column convolution filter
////////////////////////////////////////////////////////////////////////////////
void convolutionColumnCPU(float *h_Dst, float *h_Src, float *h_Filter,
int imageW, int imageH, int filterR) {
int x, y, k;
for (y = filterR; y < imageH-filterR; y++) {
for (x = filterR; x < imageW-filterR; x++) {
float sum = 0;
for (k = -filterR; k <= filterR; k++) {
int d = y + k;
sum += h_Src[d * imageW + x] * h_Filter[filterR - k];
h_Dst[y * imageW + x] = sum;
}
}
}
}
///// Reference row convolution filter in GPU /////
__global__ void row_Kernel(float *Dst, float *Src, float *filter,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
if(!((threadIdx.x+blockDim.x*blockIdx.x) < filterR || (threadIdx.x+blockDim.x*blockIdx.x) >= imageH-filterR)){
for(k = -filterR; k<=filterR; k++){
int d = threadIdx.x + k+blockDim.x * blockIdx.x;
sum += Src[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + d]*filter[filterR-k];
}
}
Dst[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + threadIdx.x + blockDim.x*blockIdx.x] = sum;
}
///// Reference column convolution filter in GPU /////
__global__ void column_Kernel(float *Dst, float *Src, float *filter,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
if(!((threadIdx.y+blockDim.y*blockIdx.y) < filterR || (threadIdx.y+blockDim.y*blockIdx.y) >= imageH-filterR)){
for(k = -filterR; k<=filterR; k++){
int d = k+ (blockIdx.y * blockDim.y + threadIdx.y);
sum += Src[d*imageW + threadIdx.x+blockDim.x * blockIdx.x]*filter[filterR-k];
}
}
Dst[(blockIdx.y * blockDim.y + threadIdx.y)*imageW + threadIdx.x+blockDim.x*blockIdx.x] = sum;
}
///// Reference tiled row convolution filter in GPU /////
__global__ void tiled_row_Kernel(float *Dst, float *Src,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
// allocate 1D tile in __shared__ memory
__shared__ float data[TILE_HEIGHT * (FILTER_LENGTH - 1 +TILE_WIDTH)];
//h global adress autou tou thread
const int adr = (blockIdx.y * blockDim.y + threadIdx.y)*imageW + (threadIdx.x +blockDim.x * blockIdx.x);
//xrhsimopoiw to shift gia na kinoumaste katallhla afou twra exoume 1D tile
//kai na grafoume sth swsth thesh
const int shift = threadIdx.y * (TILE_WIDTH + FILTER_LENGTH -1 );
// bash eikonas
const int x0 = threadIdx.x + (blockDim.x * blockIdx.x);
//periptwseis gia na feroume sth share mem ta swsta kommatia tile apo to Src.elegxoume an h eikona einai mesa sta oria pou theloume
//periptwsh 1
if(!(x0 < filterR)){
data[threadIdx.x + shift ] = Src[ adr - filterR];
}
else{
data[threadIdx.x + shift ] = 0;
}
//periptwsh 2
if(!( x0 >= imageH - filterR)){
data[ threadIdx.x + blockDim.x + shift ] = Src[ adr + filterR];
}
else{
data[ threadIdx.x + blockDim.x + shift ] = 0;
}
__syncthreads(); //barrier giati theloume na fortwsoume ola ta dedomena apo thn global protou proxwrhsoume
//convolution
for ( k = -filterR; k <= filterR; k++){
sum += data[ threadIdx.x + filterR + k + shift ] * Filter[ filterR - k];
}
Dst[ adr ] = sum;
}
///// Reference tiled_column convolution filter in GPU /////
__global__ void tiled_column_Kernel(float *Dst, float *Src,int imageW, int imageH, int filterR) {
int k;
float sum=0; //value to store the element of the matrix that is computed by the thread
// allocate 1D tile in __shared__ memory
__shared__ float data[TILE_WIDTH * (TILE_HEIGHT + FILTER_LENGTH - 1)];
//h global adress autou tou thread
const int adr = (blockIdx.y * blockDim.y + threadIdx.y)*imageW + (threadIdx.x +blockDim.x * blockIdx.x);
//xrhsimopoiw to shift gia na kinoumaste katallhla afou twra exoume 1D tile
//kai na grafoume sth swsth thesh
//shift_1 gia thn panw periptwsh(periptwsh 1)
//shift_2 gia thn katw periptwsh(periptwsh 2)
const int shift_1 = threadIdx.y * (TILE_WIDTH);
const int shift_2 = shift_1 + (blockDim.y * TILE_WIDTH);
//bash eikonas
const int y0 = threadIdx.y + (blockIdx.y * blockDim.y);
//periptwseis gia na feroume sth share mem ta swsta kommatia tile apo to Src.elegxoume an h eikona einai mesa sta oria pou theloume
//periptwsh 1
if(!(y0 < filterR)){
data[ threadIdx.x + shift_1 ] = Src[ adr - (imageW * filterR)];
}
else {
data[ threadIdx.x + shift_1 ] = 0;
}
//periptwsh 2
if(!(y0 >= imageH - filterR)){
data[ threadIdx.x + shift_2 ] = Src[ adr + (imageW * filterR)];
}
else{
data[ threadIdx.x + shift_2 ] = 0;
}
__syncthreads(); //barrier giati theloume na fortwsoume ola ta dedomena apo thn global protou proxwrhsoume
//convolution
for (k = -filterR; k <= filterR; k++){
sum += data[ (threadIdx.y + filterR) * TILE_WIDTH + threadIdx.x + (k * TILE_WIDTH)] * Filter[ filterR - k ];
}
Dst[ adr ] = sum;
}
////////////////////////////////////////////////////////////////////////////////
// Main program
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char **argv) {
float
*h_Filter,
*h_Input,
*h_Buffer,
*h_OutputCPU,
*d_Input, //eikona eisodou sto device
*d_OutputGPU1,*d_OutputGPU2, //apotelesma apo to device gpu
*d_Filter, //filtro sto device
*h_OutputGPU1,*h_OutputGPU2, //to epistrefomeno apotelesma apo thn gpu sto host
*d_Buffer; //Buffer sto device gia endiameso apotelesma apo thn row sth column ston kernel
int imageW,newW;
int imageH,newH;
unsigned int i,j;
cudaEvent_t start_GPU1,start_GPU2; //var gia na metrisw xrono sth gpu
cudaEvent_t stop_GPU1,stop_GPU2; //var gia na metrisw xrono sth gpu
float elapsed_GPU1,elapsed_GPU2; //xronos sth gpu
float average_1=0; //mesos xronos gia thn gpu xwris to tile
float average_2=0; //mesos xronso gia thn gpu me to tile
timeval t1; //gia na metrisw to xrono sth cpu
timeval t2; //gia na metrisw to xrono sth cpu
double elapsed_CPU; // xronos sth cpu
cudaError_t err; // elegxos gia cuda malloc kai cudamemcopy
// Ta imageW, imageH ta dinei o xrhsths kai thewroume oti einai isa,
// dhladh imageW = imageH = N, opou to N to dinei o xrhsths.
// Gia aplothta thewroume tetragwnikes eikones.
if(argc <= 1){
printf("I have to terminate because you didnt enter image size.Pls try again \n");
return 1;
}
imageW=atoi(argv[1]);
printf("You entered image size. Should be a power of two and greater than %d\n", FILTER_LENGTH);
if( imageW <= FILTER_LENGTH){
printf("I have to terminate because you enter image smaller than filter\n");
return 1;
}
imageH = imageW;
newH = imageH + FILTER_LENGTH -1;
newW = imageW + FILTER_LENGTH -1;
printf("Image Width x Height = %i x %i\n\n", imageW, imageH);
printf("Allocating and initializing host arrays...\n");
// Tha htan kalh idea na elegxete kai to apotelesma twn malloc...
h_Filter = (float *)malloc(FILTER_LENGTH * sizeof(float));
h_Input = (float *)malloc(newW * newH * sizeof(float));
h_Buffer = (float *)malloc(newW * newH * sizeof(float));
h_OutputCPU = (float *)malloc(newW * newH * sizeof(float));
h_OutputGPU1 = (float *)malloc(newW * newH * sizeof(float));
h_OutputGPU2 = (float *)malloc(newW * newH * sizeof(float));
// if either memory allocation failed, report an error message
if(h_Filter == 0 || h_Input == 0 || h_Buffer == 0 || h_OutputCPU == 0 || h_OutputGPU1 == 0 || h_OutputGPU2 == 0 )
{
printf("couldn't allocate memory\n");
return 1;
}
cudaSetDevice(0/*cutGetMaxGflopsDevice()*/);
// Allocate memory on device
err = cudaSuccess;
err = cudaMalloc((void**)&d_Input, newW * newH * sizeof(float));
err = cudaMalloc((void**)&d_OutputGPU1, newW * newH * sizeof(float));
err = cudaMalloc((void**)&d_OutputGPU2, newW * newH * sizeof(float));
err = cudaMalloc((void**)&d_Filter, ((2*FILTER_RADIUS)+1)*sizeof(float));
err = cudaMalloc((void**)&d_Buffer, newW * newH * sizeof(float));
// if either memory allocation failed, report an error message
if( err != cudaSuccess) {
printf("CUDA Error in allocation memory on device: %s\n", cudaGetErrorString(err));
return 1;
}
// to 'h_Filter' apotelei to filtro me to opoio ginetai to convolution kai
// arxikopoieitai tuxaia. To 'h_Input' einai h eikona panw sthn opoia ginetai
// to convolution kai arxikopoieitai kai auth tuxaia.
//arxikopoiw me floats wste na exw megaluterh anakriveia sta apotelesmata mou
srand(time(NULL));
for (i = 0; i < FILTER_LENGTH; i++)
{
h_Filter[i] = (float)(rand() / (float)RAND_MAX);
}
for (i = 0; i < newW; i++){
for(j=0; j < newH; j++){
if(i<FILTER_RADIUS || j<FILTER_RADIUS || i >= (imageW+FILTER_RADIUS) || j>=(imageH+FILTER_RADIUS)){
h_Input[j*newW+i] = 0;
}
else{
h_Input[j*newH+i] = (float)(rand()/(float)RAND_MAX);
}
h_Buffer[j*newW+i]=0;
h_OutputCPU[j*newH+i]=0;
}
}
// To parakatw einai to kommati pou ekteleitai sthn CPU kai me vash auto prepei na ginei h sugrish me thn GPU.
printf("CPU computation...\n");
///// cpu events gia metrisi xronou /////
gettimeofday(&t1, NULL);
convolutionRowCPU(h_Buffer, h_Input, h_Filter, newW, newH, FILTER_RADIUS); // convolution kata grammes
convolutionColumnCPU(h_OutputCPU, h_Buffer, h_Filter, newW, newH, FILTER_RADIUS); // convolution kata sthles
///// cpu events gia metrisi xronou /////
gettimeofday(&t2, NULL);
elapsed_CPU = (t2.tv_sec - t1.tv_sec) + ((t2.tv_usec - t1.tv_usec)/1000000.0);
printf("CPU elapsed time:%f sec\n",elapsed_CPU);
// Kanete h sugrish anamesa se GPU kai CPU kai an estw kai kapoio apotelesma xeperna thn akriveia
// pou exoume orisei, tote exoume sfalma kai mporoume endexomenws na termatisoume to programma mas
printf("GPU computation...\n");
//Load h_Input and h_Filter to device memory
err = cudaMemcpy(d_Input,h_Input,newW * newH * sizeof(float),cudaMemcpyHostToDevice);
err = cudaMemcpy(d_Filter,h_Filter,((2*FILTER_RADIUS)+1)*sizeof(float),cudaMemcpyHostToDevice);
err = cudaMemcpyToSymbol(Filter,h_Filter,((2*FILTER_RADIUS)+1)*sizeof(float),0,cudaMemcpyHostToDevice);
if( err != cudaSuccess) {
printf("CUDA Error in loading memory on device: %s\n", cudaGetErrorString(err));
return 1;
}
// Kernel Invocation
// Setup the execution configuration
dim3 dimBlock;
dimBlock.x=16;
dimBlock.y=16;
dim3 dimGrid(newW/dimBlock.x,newH/dimBlock.y);
dim3 dimBlock_tiled;
dimBlock_tiled.x=32;
dimBlock_tiled.y=32;
dim3 dimGrid_tiled(newW/dimBlock_tiled.x,newH/dimBlock_tiled.y);
///// cuda events for gpu time calculation /////
cudaEventCreate(&start_GPU1);
cudaEventCreate(&stop_GPU1);
//Launch the device
cudaFuncSetCacheConfig(row_Kernel,cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(column_Kernel,cudaFuncCachePreferL1);
for(int i = 0; i < 10; ++ i){
cudaEventRecord(start_GPU1, 0);
cudaDeviceSynchronize();
row_Kernel<<<dimGrid,dimBlock>>>(d_Buffer,d_Input,d_Filter,newW,newH,FILTER_RADIUS);
cudaDeviceSynchronize();
column_Kernel<<<dimGrid,dimBlock>>>(d_OutputGPU1,d_Buffer,d_Filter,newW,newH,FILTER_RADIUS);
cudaDeviceSynchronize();
///// cuda events for gpu time calculation /////
cudaEventRecord(stop_GPU1, 0);
cudaEventSynchronize(stop_GPU1);
cudaEventElapsedTime(&elapsed_GPU1, start_GPU1, stop_GPU1);
average_1 += elapsed_GPU1;
}
average_1 /= 10;
cudaEventDestroy(start_GPU1);
cudaEventDestroy(stop_GPU1);
///// cuda events for tiled gpu time calculation /////
cudaEventCreate(&start_GPU2);
cudaEventCreate(&stop_GPU2);
//Launch the tiled device
cudaFuncSetCacheConfig(tiled_row_Kernel,cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(tiled_column_Kernel,cudaFuncCachePreferShared);
for(int i = 0; i < 10; ++i ){
cudaEventRecord(start_GPU2, 0);
cudaDeviceSynchronize();
tiled_row_Kernel<<<dimGrid_tiled,dimBlock_tiled>>>(d_Buffer,d_Input,newW,newH,FILTER_RADIUS);
cudaDeviceSynchronize();
tiled_column_Kernel<<<dimGrid_tiled,dimBlock_tiled>>>(d_OutputGPU2,d_Buffer,newW,newH,FILTER_RADIUS);
cudaDeviceSynchronize();
///// cuda events for tiled gpu time calculation /////
cudaEventRecord(stop_GPU2, 0);
cudaEventSynchronize(stop_GPU2);
cudaEventElapsedTime(&elapsed_GPU2, start_GPU2, stop_GPU2);
average_2 += elapsed_GPU2 ;
}
average_2 /= 10;
cudaEventDestroy(start_GPU2);
cudaEventDestroy(stop_GPU2);
// ask CUDA for the last error to occur (if one exists)
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess) {
printf("CUDA Error: %s\n", cudaGetErrorString(error));
return 1;
}
//Read d_OutputGPU1 and d_OutputGPU2 from the device
err = cudaMemcpy(h_OutputGPU1,d_OutputGPU1,newW * newH * sizeof(float),cudaMemcpyDeviceToHost);
err = cudaMemcpy(h_OutputGPU2,d_OutputGPU2,newW * newH * sizeof(float),cudaMemcpyDeviceToHost);
if( err != cudaSuccess) {
printf("CUDA Error in reading memory from device: %s\n", cudaGetErrorString(err));
return 1;
}
printf("GPU elapsed time:%f sec \n ",average_1/1000);
printf("--tiled--GPU elapsed time:%f sec \n ",average_2/1000);
printf("1.GPU:%d=%f\n",imageW * imageH-1,h_OutputGPU1[imageW * imageH-1]);
printf("2.--tiled--GPU:%d=%f\n",imageW * imageH-1,h_OutputGPU2[imageW * imageH-1]);
printf("3.CPU:%d=%f\n",imageW * imageH-1,h_OutputCPU[imageW * imageH-1]);
// CPU Vs GPU (comparison) //
for(i = 0; i< newW * newH; i++){
if ( ABS(h_OutputCPU[i] - h_OutputGPU1[i]) >= accuracy) {
printf("ERROR at element i:%d , accuracy error so i have to terminate sorry \n",i);
return 1;
}
}
// free all the allocated memory
free(h_OutputCPU);
free(h_Buffer);
free(h_Input);
free(h_Filter);
free(h_OutputGPU1);
free(h_OutputGPU2);
cudaFree(d_Input);
cudaFree(d_Filter);
cudaFree(d_OutputGPU1);
cudaFree(d_OutputGPU2);
cudaFree(d_Buffer);
printf("success !!!! \n");
// Do a device reset just in case... Bgalte to sxolio otan ylopoihsete CUDA
cudaDeviceReset();
return 0;
}
|
21,373 | #include "includes.h"
__global__ void cal_hist(float *da, int *hist_da, int N, int M){
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx * blockDim.x + tx;
if(idx < N){
// add a lock here to make sure this (read, write) operation atomic.
atomicAdd(&hist_da[(int)da[idx]], 1);
//hist_da[(int)da[idx]] += 1;
}
} |
21,374 | /*
Implementing Radix sort in CUDA.
*/
#include <stdio.h>
#include <stdlib.h>
#define NUM_ELEMENTS 16
__device__ void partition_by_bit(unsigned int* values, unsigned int bit);
__global__ void radix_sort(unsigned int* d_array){
for(int bit = 0; bit < 32; bit++){
partition_by_bit(d_array, bit);
__syncthreads();
}
}
__device__ unsigned int plus_scan(unsigned int* bit_array){
unsigned int idx = threadIdx.x;
unsigned int size = blockDim.x;
for(int offset = 1; offset < size; offset *= 2){
unsigned int array_offset;
if (idx >= offset){
array_offset = bit_array[idx - offset];
}
__syncthreads();
if(idx >= offset){
bit_array[idx] = array_offset + bit_array[idx];
}
__syncthreads();
}
return bit_array[idx];
}
__device__ void partition_by_bit(unsigned int* values, unsigned int bit){
unsigned int idx = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = values[idx];
unsigned int p_i = (x_i >> bit) & 1;
values[idx] = p_i;
__syncthreads();
unsigned int scan_val = plus_scan(values);
unsigned int total = size - values[size - 1];
__syncthreads();
if (p_i){
values[scan_val - 1 + total] = x_i;
}else{
values[idx - scan_val] = x_i;
}
}
int main(){
const unsigned int BYTES = NUM_ELEMENTS*sizeof(int);
unsigned int h_in [NUM_ELEMENTS];
unsigned int h_out [NUM_ELEMENTS];
for(int i = 0; i < NUM_ELEMENTS; i++){
h_in[i] = rand() % 100; // Generating random numbers between 0 and 99
}
unsigned int* d_array;
cudaMalloc((void **) &d_array, BYTES);
cudaMemcpy(d_array, h_in, BYTES, cudaMemcpyHostToDevice);
radix_sort<<<1, NUM_ELEMENTS>>>(d_array);
cudaMemcpy(h_out, d_array, BYTES, cudaMemcpyDeviceToHost);
printf("Unsorted: \n");
for(int i = 0; i < NUM_ELEMENTS; i++){
printf("%d ", h_in[i]);
}
printf("\n");
printf("Sorted: \n");
for(int i = 0; i < NUM_ELEMENTS; i++){
printf("%d ", h_out[i]);
}
cudaFree(d_array);
return 0;
}
|
21,375 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 8
#define THREADS 2
double wtime() {
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, NULL);
if (sec < 0) sec = tv.tv_sec;
return (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
__global__ void jacobi(float *a, float *b) {
int i = blockIdx.x * blockDim.x * N + threadIdx.x + 1;
if (i < N - 1) b[i] = 0.33333f * (a[i - 1] + a[i] + a[i + 1]);
}
int main() {
float *h_a, *h_b;
float *d_a, *d_b;
h_a = (float *)malloc(N * sizeof(float));
h_b = (float *)malloc(N * sizeof(float));
cudaMalloc(&d_a, N * sizeof(float));
cudaMalloc(&d_b, N * sizeof(float));
for (int i = 1; i < N; i++) {
h_a[i] = h_b[i] = 70.0f;
}
h_a[0] = h_a[N - 1] = h_b[0] = h_b[N - 1] = 150.0f;
cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice);
dim3 block((N - 2) / THREADS);
dim3 thread(THREADS);
for (int i = 0; i < THREADS; i++) {
jacobi<<<block, thread>>>(d_a, d_b);
cudaThreadSynchronize();
float *aux = d_a;
d_a = d_b;
d_b = aux;
}
cudaMemcpy(h_a, d_a, N * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) printf("%2.0f, ", h_a[i]);
printf("\n");
// Liberamos memoria
free(h_a);
free(h_b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
21,376 | //#include <stdlib.h>
//#include <stdio.h>
//#include <curand_kernel.h>
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
////#include "../../common/book.h"
//#include "../../common/cpu_anim.h"
//#include "../../common/common.h"
//#include "../../common/TexUtils.h"
//#include "../../common/PlyBlock.h"
//
//#define DIM 1024
//#define PI 3.1415926535897932f
//#define MAX_TEMP 1.0f
//#define MIN_TEMP 0.0001f
//#define SPEED 0.00005f
//
//
////// this kernel takes in a 2-d array of floats
////// it updates the value-of-interest by a scaled value based
////// on itself and its nearest neighbors
//__global__ void blend_kernel(float *dst, bool dstOut, cudaTextureObject_t texIn, cudaTextureObject_t texOut) {
// // map from threadIdx/BlockIdx to pixel position
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// int offset = x + y * blockDim.x * gridDim.x;
//
// int left = offset - 1;
// int right = offset + 1;
// if (x == 0) left++;
// if (x == DIM - 1) right--;
//
// int top = offset - DIM;
// int bottom = offset + DIM;
// if (y == 0) top += DIM;
// if (y == DIM - 1) bottom -= DIM;
//
// float t, l, c, r, b;
// if (dstOut) {
// t = tex1Dfetch<float>(texIn, top);
// l = tex1Dfetch<float>(texIn, left);
// c = tex1Dfetch<float>(texIn, offset);
// r = tex1Dfetch<float>(texIn, right);
// b = tex1Dfetch<float>(texIn, bottom);
//
// }
// else {
// t = tex1Dfetch<float>(texOut, top);
// l = tex1Dfetch<float>(texOut, left);
// c = tex1Dfetch<float>(texOut, offset);
// r = tex1Dfetch<float>(texOut, right);
// b = tex1Dfetch<float>(texOut, bottom);
// }
// dst[offset] = c + SPEED * (t + b + r + l - 4 * c);
//}
//
//
//
//void anim_gpu(AppBlock *appBlock, int ticks) {
// //HANDLE_ERROR(cudaEventRecord(appBlock->start, 0));
// //dim3 blocks(DIM / 16, DIM / 16);
// //dim3 threads(16, 16);
// //CPUAnimBitmap *bitmap = appBlock->bitmap;
//
// //// since tex is global and bound, we have to use a flag to
// //// select which is in/out per iteration
// //volatile bool dstOut = true;
// //for (int i = 0; i<100; i++) {
// // float *in, *out;
// // if (dstOut) {
// // in = appBlock->d->dev_inSrc;
// // out = appBlock->d->dev_outSrc;
// // }
// // else {
// // out = appBlock->d->dev_inSrc;
// // in = appBlock->d->dev_outSrc;
// // }
// // blend_kernel << <blocks, threads >> >(out, dstOut, *appBlock->d->texIn, *appBlock->d->texOut);
// // dstOut = !dstOut;
// //}
//
// //float_to_color << <blocks, threads >> >(appBlock->d->output_bitmap, appBlock->d->dev_outSrc);
//
// //HANDLE_ERROR(cudaMemcpy(bitmap->get_ptr(),
// // d->output_bitmap,
// // bitmap->image_size(),
// // cudaMemcpyDeviceToHost));
//
// //HANDLE_ERROR(cudaEventRecord(appBlock->stop, 0));
// //HANDLE_ERROR(cudaEventSynchronize(d->stop));
// //float elapsedTime;
// //HANDLE_ERROR(cudaEventElapsedTime(&elapsedTime,
// // d->start, d->stop));
// //d->totalTime += elapsedTime;
// //++d->frames;
// //printf("Average Time per frame: %3.1f ms\n", d->totalTime / d->frames);
// //printf("tic: %d\n\n", ticks);
//}
//
//// clean up memory allocated on the GPU
//
//void anim_exit(AppBlock *d) {
//
// //cudaDestroyTextureObject(*(d->texIn));
// //cudaDestroyTextureObject(*d->texOut);
// //cudaDestroyTextureObject(*d->texConst);
//
// //HANDLE_ERROR(cudaFree(d->dev_inSrc));
// //HANDLE_ERROR(cudaFree(d->dev_outSrc));
// //HANDLE_ERROR(cudaFree(d->dev_constSrc));
//
// //HANDLE_ERROR(cudaEventDestroy(d->start));
// //HANDLE_ERROR(cudaEventDestroy(d->stop));
//}
//
//
////int main(void)
////{
//// PlyBlock data;
//// CPUAnimBitmap bitmap(DIM, DIM, &data);
//// data.bitmap = &bitmap;
//// data.totalTime = 0;
//// data.frames = 0;
//// HANDLE_ERROR(cudaEventCreate(&data.start));
//// HANDLE_ERROR(cudaEventCreate(&data.stop));
////
//// int imageSize = bitmap.image_size();
////
//// HANDLE_ERROR(cudaMalloc((void**)&data.output_bitmap,
//// imageSize));
////
//// // assume float == 4 chars in size (ie rgba)
//// HANDLE_ERROR(cudaMalloc((void**)&data.dev_inSrc, imageSize));
//// HANDLE_ERROR(cudaMalloc((void**)&data.dev_outSrc, imageSize));
//// HANDLE_ERROR(cudaMalloc((void**)&data.dev_constSrc, imageSize));
////
////
//// data.texIn = TexObjFloat1D(data.dev_inSrc, imageSize);
//// data.texOut = TexObjFloat1D(data.dev_outSrc, imageSize);
//// data.texConst = TexObjFloat1D(data.dev_constSrc, imageSize);
////
////
////
//// // intialize the constant data
//// float *temp = RndFloat0to1(DIM*DIM);
////
//// HANDLE_ERROR(cudaMemcpy(data.dev_constSrc, temp, imageSize,
//// cudaMemcpyHostToDevice));
////
////
//// HANDLE_ERROR(cudaMemcpy(data.dev_inSrc, temp, imageSize,
//// cudaMemcpyHostToDevice));
//// free(temp);
////
//// bitmap.anim_and_exit((void(*)(void*, int))anim_gpu,
//// (void(*)(void*))anim_exit);
////}
|
21,377 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#define THREADS_PER_BLOCK 32
__global__ void innerProd(float *aa, float *bb, float *cc)
{
__shared__ float temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x* blockDim.x;
temp[threadIdx.x] = aa[index]*bb[index];
*cc = 8; // Initialized to avoid memory problems. See comments
// below, next to the free and cudaFree commands.
// No thread goes beyond this point until all of them
// have reached it. Threads are only synchronized within
// a block.
__syncthreads();
// Thread 0 sums the pairwise products
if (threadIdx.x == 0) {
float sum = 0;
for (int i = 0; i < THREADS_PER_BLOCK; i++){
sum += temp[i];
}
// Use atomicAdd to avoid different blocks accessing cc at the
// same time (race condition). The atomic opperation enables
// read-modify-write to be performed by a block without interruption.
//*cc += sum;
atomicAdd(cc, sum);
}
}
void cuda_function(float *ainput, float *binput, float *cinput, int NN)
{
std::cout << ">>> inside cuda_function " << std::endl;
std::cout << "NN = " << NN << "\n";
#define NUMBER_OF_BLOCKS (NN/THREADS_PER_BLOCK)
float *d_ainput, *d_binput, *d_cinput;// device copies of a, b, c
//float GPU_profile;
float size = NN * sizeof(float);
ainput = (float *)malloc(size);
binput = (float *)malloc(size);
cinput = (float *)malloc(sizeof(float));
*cinput = 7;
for (int i = 0; i < NN; i++) {
ainput[i] = 1;
binput[i] = 1;
}
std::cout << "a[0] = " << ainput[0] << "\n";
std::cout << "b[0] = " << binput[0] << "\n";
float test = 67.0f;
int retMem = 99;
int retMalloc = 99;
//std::cout << "size = " << size << "\n";
// ----- Variables to profile the execution time
//cudaEvent_t start, stop;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
// In the GPU ------------------------------------------
retMalloc = cudaMalloc((void**)&*d_ainput, 1024*4);
cudaMalloc((void**)&d_binput, size);
cudaMalloc((void**)&d_cinput, sizeof(float));
cudaMemcpy(d_ainput, ainput, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_binput, binput, size, cudaMemcpyHostToDevice);
retMem = cudaMemcpy(d_cinput, cinput, sizeof(float), cudaMemcpyHostToDevice);
// Call kernel
//cudaEventRecord(start);
innerProd<<<NUMBER_OF_BLOCKS, THREADS_PER_BLOCK>>>(d_ainput, d_binput, d_cinput);
//cudaEventRecord(stop);
cudaMemcpy(cinput, d_cinput, sizeof(float), cudaMemcpyDeviceToHost);
//cudaEventSynchronize(stop);
// Elapsed time -- GPU
//cudaEventElapsedTime(&GPU_profile, start, stop);
// -----------------------------------------------------
std::cout << "retMalloc = " << retMalloc << "\n";
std::cout << "retMem = " << retMem << "\n";
std::cout << "NUMBER_OF_BLOCKS = " << NUMBER_OF_BLOCKS << "\n";
std::cout << "c = " << *cinput << "\n";
// std::cout << "Kernel execution time in GPU = " << GPU_profile <<
// " milliseconds" << "\n";
std::cout << ">>> Free memory " << "\n";
// Remember: free and cudaFree DO NOT ERASE MEMORY! They only
// return memory to a pool to be re-allocated. That is why the shared
// variable 'cc' is initialized inside the kernel. See this:
// http://stackoverflow.com/questions/13100615/cudafree-is-not-freeing-memory
//free(ainput);
//free(binput);
//free(cinput);
cudaFree(d_ainput);
cudaFree(d_binput);
cudaFree(d_cinput);
}
|
21,378 | //=============================================================================================
// Name : syncThreadsTest.cu
// Author : Jose Refojo
// Version : 08-02-2017
// Creation date : 28-01-2013
// Copyright : Copyright belongs to Trinity Centre for High Performance Computing
// Description : This program will run a number of block synchronization tests
//=============================================================================================
#include "stdio.h"
__global__ void syncThreadsTest( float *in1, float *in2, float *out, int Ntot) {
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if ( idx <Ntot ) {
out[idx]=in1[idx]+in2[idx];
printf ("syncThreadsTest::threadIdx[%d] =%f %d\n",idx,out[idx],idx%2);
__syncthreads();
if (idx==0) {
printf ("\n\n");
}
if ((idx%2)==1) {
__syncthreads(); // This is potentially dangerous!
printf ("syncThreadsTest::threadIdx[%d] idx%%2=%d\n",idx,idx%2);
} else {
printf ("syncThreadsTest::threadIdx[%d] idx%%2=%d\n",idx,idx%2);
}
if (idx==0) {
printf ("\n\n");
}
// __syncthreads_count test
int syncthreads_count = __syncthreads_count(idx%2==0);
// Another option, that didn't use to work, is to evaluate a variable instead, such as:
//int even = idx%2;
//int syncthreads_count = __syncthreads_count(even);
if (idx==0) {
printf ("syncThreadsTest::threadIdx[%d] syncthreads_count=%d\n",idx,syncthreads_count);
}
// __syncthreads_and test
int syncthreads_and = __syncthreads_and(idx%2==0);
if (idx==0) {
printf ("syncThreadsTest::threadIdx[%d] syncthreads_and=%d\n",idx,syncthreads_and);
}
// __syncthreads_or test
int syncthreads_or = __syncthreads_or(idx%2==0);
if (idx==0) {
printf ("syncThreadsTest::threadIdx[%d] syncthreads_or=%d\n",idx,syncthreads_or);
}
}
}
int main() {
// pointers to host memory
float *a, *b, *c;
// pointers to device memory
float *a_d, *b_d, *c_d;
int N=18;
int i;
// Allocate arrays a, b and c on host
a = (float*) malloc(N*sizeof(float));
b = (float*) malloc(N*sizeof(float));
c = (float*) malloc(N*sizeof(float));
// Allocate arrays a_d, b_d and c_d on device
cudaMalloc ((void **) &a_d, sizeof(float)*N);
cudaMalloc ((void **) &b_d, sizeof(float)*N);
cudaMalloc ((void **) &c_d, sizeof(float)*N);
// Initialize arrays a and b
for (i=0; i<N; i++) {
a[i]= (float) 2*i;
b[i]=-(float) i;
}
// Copy data from host memory to device memory
cudaMemcpy(a_d, a, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b, sizeof(float)*N, cudaMemcpyHostToDevice);
// Compute the execution configuration
int block_size=8;
dim3 dimBlock(block_size);
dim3 dimGrid ( (N/dimBlock.x) + (!(N%dimBlock.x)?0:1) );
// Add arrays a and b, store result in c
syncThreadsTest<<<dimGrid,dimBlock>>>(a_d, b_d, c_d, N);
// Copy data from device memory to host memory
cudaMemcpy(c, c_d, sizeof(float)*N, cudaMemcpyDeviceToHost);
// Print c
printf("addVectorsfloat will generate two vectors, move them to the global memory, and add them together in the GPU\n");
for (i=0; i<N; i++) {
printf(" a[%2d](%10f) + b[%2d](%10f) = c[%2d](%10f)\n",i,a[i],i,b[i],i,c[i]);
}
// Free the memory
free(a); free(b); free(c);
cudaFree(a_d); cudaFree(b_d);cudaFree(c_d);
}
|
21,379 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void check_handshaking_gpu(int * strongNeighbor, int * matches, int numNodes) {
int numThreads = blockDim.x * gridDim.x; //total number of threads
int tid = blockDim.x * blockIdx.x + threadIdx.x; // global index of the thread
int i = 0;
/*this code will automatically loop through the number of threads, as long as you refer to each element in the arrays as [tid]*/
for(i = tid; i < numNodes; i += numThreads)
{
if(i == strongNeighbor[strongNeighbor[i]]){
matches[i] = strongNeighbor[i];
}else{
matches[i] = -1;
}
}
}
|
21,380 | #include "includes.h"
using namespace std;
#define TILE 16
/* LU Decomposition using Shared Memory \
\ CUDA \
\ \
\ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~*/
//Initialize a 2D matrix
__global__ void elim(double *A, int n, int index, int bsize){
extern __shared__ double pivot[];
int idThread=threadIdx.x;
int idBlock=blockIdx.x;
int blockSize=bsize;
if(idThread==0){
for(int i=index;i<n;i++) pivot[i]=A[(index*n)+i];
}
__syncthreads();
//Varitables for pivot, row, start and end
int pivotRow=(index*n);
int currentRow=(((blockSize*idBlock) + idThread)*n);
int start=currentRow+index;
int end=currentRow+n;
//If greater than pivot row, loop from start index + 1(next row) to end of column
if(currentRow >pivotRow){
for(int i= start+1; i<end; ++i){
//Set the matrix value of next row and its column - pivot
A[i]=A[i]-(A[start]*pivot[i-currentRow]);
}
}
} |
21,381 | #include "includes.h"
__global__ void lineSpace ( const int d, const int n, const float *l, const float *h, float *b ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
float delta;
if ( i < d && j < n ) {
delta = ( h[i] - l[i] ) / ( n - 1 );
b[i+j*d] = l[i] + j * delta;
}
} |
21,382 | extern "C" __global__ void vector_add(float *c, float *a, float *b, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
c[i] = a[i] + b[i];
}
}
|
21,383 | #include "includes.h"
__global__ void cuda_divide(float * dst, float *numerator, float *denominator, int width, int height)
{
int row = threadIdx.y + blockIdx.y * blockDim.y;
int col = threadIdx.x + blockIdx.x * blockDim.x;
if(row < height && col < width)
{
int index = row * width + col;
if(denominator[index] > 0.0000001)
{
dst[index] = numerator[index] / denominator[index];
}
else
{
dst[index] = 0;
}
// printf("dst[%d] = %f\n", index, dst[index]);
}
} |
21,384 | #include <stdio.h>
#include <iostream>
#include <limits>
#include <curand.h>
#include<cmath>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <curand_kernel.h>
typedef std::numeric_limits< double > dbl;
// simulation parameters
const double dt = 0.1;
const int N = 100000;
const int T_max = 100000*(1/dt);
const double energy_error_check = 0.05;
// basic constants
const double kB = 1.38065e-23;
const double g = 9.805e-6;
const double pi = 3.14159;
// Fiber and Field Parameters
const double r_casimir_cutoff = 0.1;
//******************************************************//
//******************************************************//
// // T. Yoon et al. PRA, 2019
// // T. Yoon et al. J. Phys, 2020
// // const char atom_species = 'Cs';
// Big Fiber
// const double r_hcpcf = 15;
// const double alpha = 9.4657e-18;
// const double m = 2.2069e-25;
// const double P = 1e-7;
// const double w0 = 11.8;
// const double wvlngth = 0.935;
// const double zR = 467.8453059741634;
// const double Temp = 30e-6;
// const double cx = 0;
// const double cy = 0;
// const double cz = 5000;
// const double R = 1500;
// const double slope = 2000./5000.;
// const double r_check = 400;
// // Small Fiber
// const double r_hcpcf = 3.75 - r_casimir_cutoff;
// const double alpha = 9.4657e-18;
// const double m = 2.2069e-25;
// // scattering rate constants
// const double beta = 25449.809972971874;
// const double spont_v_factor = 0.003360217090858776;
// const double abs_v_factor = 0.0032150016610518837;
// // field realted constants
// const double P = 0.5e-7;
// const double w0 = 2.75;
// const double wvlngth = 0.935;
// const double zR = 25.40994058050568;
// const double Temp = 32e-6;
// const double cx = 0;
// const double cy = 0;
// const double cz = 5000;
// const double R = 1500;
// const double slope = 1000./5000.;
// const double r_check = 250;
//******************************************************//
//******************************************************//
// // M. Bajcsy et al. PRA, 2011
// // const char atom_species = 'Rb';
// const double r_hcpcf = 3.5 - r_casimir_cutoff;
// const double m = 1.44316060e-25;
// const double alpha = 3.038831243e-17;
// scattering rate constants
// const double beta = 382499.3875547423;
// const double spont_v_factor = 0.00578236135589983;
// const double abs_v_factor = 0.005731739384291537;
// const double P = 0.25e-7;
// const double w0 = 2;
// const double wvlngth = 0.802;
// const double zR = 15.668791289724654;
// const double Temp = 40e-6;
// const double cx = 0;
// const double cy = 0;
// const double cz = 6300;
// const double R = 340;
// const double slope = 1000./5000.;
// const double r_check = 250;
//******************************************************//
//******************************************************//
// // A. P Hilton et al. PRApplied, 2018
// // const char atom_species = 'Rb - 85';
const double r_hcpcf = 22.5 - r_casimir_cutoff;
const double m = 1.409993199e-25;
const double alpha = 7.190132913713667e-17;
// scattering rate constants
const double beta = 3015911.2228300544;
const double spont_v_factor = 0.005918379158044901;
const double abs_v_factor = 0.005901520518109168;
const double P = 10e-7;
const double w0 = 16.5;
const double wvlngth = 0.79725;
const double zR = 1072.8110378674457;
const double Temp = 5e-6;
const double cx = 0;
const double cy = 0;
const double cz = 25000;
const double R = 1000; // Gaussian MOT
const double slope = 2000./25000.;
const double r_check = 500;
//******************************************************//
//******************************************************//
// // Yang et al. Fibers, 2020
// // const char atom_species = 'Rb - 85';
// const double r_hcpcf = 32 - r_casimir_cutoff;
// const double m = 1.409993199e-25;
// const double alpha = 1.1732431139058278e-17;
// scattering rate constants
// const double beta = 48021.0283738874;
// const double spont_v_factor = 0.005918379158044901;
// const double abs_v_factor = 0.005730800527481771;
// const double P = 5e-7;
// const double w0 = 22;
// const double wvlngth = 0.821;
// const double zR = 1852.0473134439221;
// const double Temp = 10e-6;
// const double cx = 0;
// const double cy = 0;
// const double cz = 5000;
// const double R = 2000; // Gaussian MOT
// const double slope = 2000./5000.;
// const double r_check = 500;
//******************************************************//
//******************************************************//
struct Particles
{
double p[3];
double v[3];
double a[3];
double t;
double energy_initial;
double energy_current;
double energy_kick_tab;
int statusi; // 0 -- running
// 1 -- z < 0 -- loaded
// 2 -- z < 0 -- not loaded
// 3 -- out of bounds
double N_scattering;
};
// force field function
__device__ void a_dipole(double x, double y, double z, double &ax, double &ay, double &az, int &statusi, double& OPE)
{
double w = 0;
double gaussian = 0;
double intensity = 0;
double ax_dipole = 0;
double ay_dipole = 0;
double az_dipole = 0;
double r = sqrt(pow(x,2) + pow(y,2));
double I0 = (2*P)/(pi*pow(w0,2));
// out of bounds check
if (r > (r_check + slope*z)){
statusi = 3;
}
// a_dipole acceleration update
if (z > 0) {
w = w0*sqrt(1 + pow((z/zR),2));
gaussian = exp(-(2*(pow(x,2) + pow(y,2)))/(pow(w,2)));
intensity = I0*pow((w0/w),2)*gaussian;
ax_dipole = -4*x*alpha*intensity/(m*pow(w,2));
ay_dipole = -4*y*alpha*intensity/(m*pow(w,2));
az_dipole = alpha*intensity*(4*(z/(pow(zR,2)))*pow((w0/w),4)*((pow(x,2) + pow(y,2))/pow(w0,2)) - (2*z/(pow(zR,2)))*pow((w0/w),2))/m;
}
else {
w = w0;
gaussian = exp(-(2*(pow(x,2) + pow(y,2)))/(pow(w,2)));
intensity = I0*pow((w0/w),2)*gaussian;
ax_dipole = -4*x*alpha*intensity/(m*pow(w,2));
ay_dipole = -4*y*alpha*intensity/(m*pow(w,2));
az_dipole = 0;
// inside hcpcf
if (r < r_hcpcf){
// stop condition, loaded
if (z < -100){
statusi = 1;
}
}
// collided, not loaded
else if (r > r_hcpcf){
statusi = 2;
}
}
ax = ax_dipole;
ay = ay_dipole;
az = az_dipole - g;
OPE = -alpha*intensity;
}
// velocity-verlet updater
__global__ void verlet_integrator(struct Particles atoms[])
{
double KE = 0;
double GPE = 0;
double OPE = 0;
double KE_kick = 0;
double intensity = 0;
double p_scattering = 0;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int particle_id = index; particle_id < N; particle_id += stride){
for (int T = 1; atoms[particle_id].statusi == 0 && T < T_max; T += 1){ // atoms[particle_id].p[2]>0; T < 50000
atoms[particle_id].v[0] = atoms[particle_id].v[0] + 0.5*atoms[particle_id].a[0]*dt;
atoms[particle_id].v[1] = atoms[particle_id].v[1] + 0.5*atoms[particle_id].a[1]*dt;
atoms[particle_id].v[2] = atoms[particle_id].v[2] + 0.5*atoms[particle_id].a[2]*dt;
atoms[particle_id].p[0] = atoms[particle_id].p[0] + atoms[particle_id].v[0]*dt;
atoms[particle_id].p[1] = atoms[particle_id].p[1] + atoms[particle_id].v[1]*dt;
atoms[particle_id].p[2] = atoms[particle_id].p[2] + atoms[particle_id].v[2]*dt;
a_dipole(atoms[particle_id].p[0], atoms[particle_id].p[1], atoms[particle_id].p[2], atoms[particle_id].a[0], atoms[particle_id].a[1], atoms[particle_id].a[2], atoms[particle_id].statusi, OPE);
atoms[particle_id].v[0] = atoms[particle_id].v[0] + 0.5*atoms[particle_id].a[0]*dt;
atoms[particle_id].v[1] = atoms[particle_id].v[1] + 0.5*atoms[particle_id].a[1]*dt;
atoms[particle_id].v[2] = atoms[particle_id].v[2] + 0.5*atoms[particle_id].a[2]*dt;
KE = 0.5*m*(pow(atoms[particle_id].v[0],2) + pow(atoms[particle_id].v[1],2) + pow(atoms[particle_id].v[2],2));
// scattering kick: ad-hoc implementation
curandState state_p;
curand_init((unsigned long long)clock() + index, 0, 0, &state_p);
p_scattering = curand_uniform_double(&state_p);
intensity = OPE/(-1*alpha);
if (p_scattering < beta*intensity*dt){
atoms[particle_id].N_scattering = atoms[particle_id].N_scattering + 1;
curandState state_phi;
curandState state_theta;
curand_init((unsigned long long)clock() + index, 0, 0, &state_phi);
curand_init((unsigned long long)clock() + index, 0, 0, &state_theta);
double phi = 2*pi*curand_uniform_double(&state_phi);
double theta = pi*curand_uniform_double(&state_theta);
atoms[particle_id].v[0] = atoms[particle_id].v[0] + spont_v_factor*cos(phi)*sin(theta);
atoms[particle_id].v[1] = atoms[particle_id].v[1] + spont_v_factor*sin(phi)*sin(theta);
atoms[particle_id].v[2] = atoms[particle_id].v[2] + abs_v_factor + spont_v_factor*cos(theta);
KE_kick = KE - 0.5*m*(pow(atoms[particle_id].v[0],2) + pow(atoms[particle_id].v[1],2) + pow(atoms[particle_id].v[2],2));
}
// energy updater
KE = 0.5*m*(pow(atoms[particle_id].v[0],2) + pow(atoms[particle_id].v[1],2) + pow(atoms[particle_id].v[2],2));
GPE = m*g*atoms[particle_id].p[2];
atoms[particle_id].energy_current = (KE + GPE + OPE);
atoms[particle_id].energy_kick_tab = atoms[particle_id].energy_kick_tab + (KE_kick);
// timer
atoms[particle_id].t = atoms[particle_id].t + dt;
KE_kick = 0;
}
}
}
void initialize_MB_cloud(struct Particles atoms[]){
// initializing MB cloud functions
double *phi, *costheta, *u, *therm;
double theta, r;
phi = (double *) malloc(N*sizeof(double));
costheta = (double *) malloc(N*sizeof(double));
u = (double *) malloc(N*sizeof(double));
therm = (double *) malloc(3*N*sizeof(double));
// random number generation
double *d_phi, *d_costheta, *d_u, *d_therm;
cudaMalloc(&d_phi, N*sizeof(double));
cudaMalloc(&d_costheta, N*sizeof(double));
cudaMalloc(&d_u, N*sizeof(double));
cudaMalloc(&d_therm, 3*N*sizeof(double));
// unsigned int seeder, seeder1, seeder2, seeder3;
// seeder = 1234ULL;
// seeder1 = 1234ULL;
// seeder2 = 1234ULL;
// seeder3 = 1234ULL;
time_t seeder;
time(&seeder);
srand((unsigned int) seeder);
// generates N random numbers between 0 and 1
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_XORWOW);
curandSetPseudoRandomGeneratorSeed(gen, seeder);
curandGenerateUniformDouble(gen, d_phi, N);
cudaMemcpy(phi, d_phi, N*sizeof(double), cudaMemcpyDeviceToHost);
curandDestroyGenerator(gen);
cudaFree(d_phi);
time_t seeder1;
time(&seeder1);
srand((unsigned int) seeder1);
curandGenerator_t gen1;
curandCreateGenerator(&gen1, CURAND_RNG_PSEUDO_MTGP32);
curandSetPseudoRandomGeneratorSeed(gen1, seeder1);
curandGenerateUniformDouble(gen1, d_costheta, N);
cudaMemcpy(costheta, d_costheta, N*sizeof(double), cudaMemcpyDeviceToHost);
curandDestroyGenerator(gen1);
cudaFree(d_costheta);
time_t seeder2;
time(&seeder2);
srand((unsigned int) seeder2);
curandGenerator_t gen2;
curandCreateGenerator(&gen2, CURAND_RNG_PSEUDO_MRG32K3A);
curandSetPseudoRandomGeneratorSeed(gen2, seeder2);
curandGenerateUniformDouble(gen2, d_u, N);
cudaMemcpy(u, d_u, N*sizeof(double), cudaMemcpyDeviceToHost);
curandDestroyGenerator(gen2);
cudaFree(d_u);
time_t seeder3;
time(&seeder3);
srand((unsigned int) seeder3);
// normal distribution to sample thermal velocities
curandGenerator_t gen3;
curandCreateGenerator(&gen3, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen3, seeder3);
curandGenerateNormalDouble(gen3, d_therm, 3*N, 0, 1);
cudaMemcpy(therm, d_therm, 3*N*sizeof(double), cudaMemcpyDeviceToHost);
curandDestroyGenerator(gen3);
cudaFree(d_therm);
// initializing (p, v, a, t, status) array values for atoms
for (int i = 0; i < N; i++){
phi[i] = phi[i]*2*pi;
costheta[i] = costheta[i]*2 - 1;
theta = acos(costheta[i]);
r = R*cbrt(u[i]);
atoms[i].p[0] = cx + r*sin(theta)*cos(phi[i]);
atoms[i].p[1] = cy + r*sin(theta)*sin(phi[i]);
atoms[i].p[2] = cz + r*cos(theta);
atoms[i].v[0] = sqrt(kB*Temp/m)*therm[i];
atoms[i].v[1] = sqrt(kB*Temp/m)*therm[i + N - 1];
atoms[i].v[2] = sqrt(kB*Temp/m)*therm[i + 2*N - 1];
atoms[i].a[0] = 0;
atoms[i].a[1] = 0;
atoms[i].a[2] = -g;
atoms[i].N_scattering = 0;
double KE = 0.5*m*(pow(atoms[i].v[0],2) + pow(atoms[i].v[1],2) + pow(atoms[i].v[2],2));
double GE = m*g*atoms[i].p[2];
double x = atoms[i].p[0];
double y = atoms[i].p[1];
double z = atoms[i].p[2];
// calculating dipole field and potneial
double I0 = (2*P)/(pi*pow(w0,2));
double w = w0*sqrt(1 + pow((z/zR),2));
double gaussian = exp(-(2*(pow(x,2) + pow(y,2)))/(pow(w,2)));
double intensity = I0*pow((w0/w),2)*gaussian;
double OPE = -alpha*intensity;
atoms[i].energy_current = (KE + GE + OPE);
atoms[i].energy_initial = atoms[i].energy_current;
double r = sqrt(pow(atoms[i].p[0], 2) + pow(atoms[i].p[1], 2)); // has to be sqrt
if (r > (r_check + slope*atoms[i].p[2])){
atoms[i].statusi = 3;
}
}
}
int main(void){
// AoS allocating memory
struct Particles *Atoms = (struct Particles*)malloc(N*sizeof(struct Particles));
// initializing atoms
initialize_MB_cloud(Atoms);
//************************************************//
printf("----------------------------\n");
printf("Before Evolution:\n");
printf("px = %lf\n", Atoms[0].p[0]);
printf("py = %lf\n", Atoms[0].p[1]);
printf("pz = %lf\n", Atoms[0].p[2]);
printf("vx = %lf\n", Atoms[0].v[0]);
printf("vy = %lf\n", Atoms[0].v[1]);
printf("vz = %lf\n", Atoms[0].v[2]);
std::cout.precision(dbl::digits10);
std::cout << "az (precision) " << Atoms[0].a[2] << std::endl;
printf("----------------------------\n");
//************************************************//
printf("Memory Allocation Begin\n");
// device: 1D arrays for position, velocity and acceleration
Particles *dev_Atoms;
// device: allocation of memory
cudaMalloc(&dev_Atoms, N*sizeof(Particles));
// device: copying initial values
cudaMemcpy(dev_Atoms, Atoms, N*sizeof(Particles), cudaMemcpyHostToDevice);
printf("Memory Allocation End \n");
// kernel initialization
// code reference: https://stackoverflow.com/questions/9985912/how-do-i-choose-grid-and-block-dimensions-for-cuda-kernels
int blockSize; // The launch configurator returned block size
int minGridSize; // The minimum grid size needed to achieve the maximum occupancy for a full device launch
int gridSize; // The actual grid size needed, based on input size
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, verlet_integrator, 0, N);
// Round up according to array size
gridSize = (N + blockSize - 1) / blockSize;
printf("Grid Size (GPU, AoS) = %d\n", gridSize);
printf("Block Size (GPU, AoS) = %d\n", blockSize);
cudaEvent_t cuda_start, cuda_stop;
cudaEventCreate(&cuda_start);
cudaEventCreate(&cuda_stop);
// testing timing of one verlet update of N particles
cudaEventRecord(cuda_start);
verlet_integrator<<<gridSize, blockSize>>>(dev_Atoms);
cudaEventRecord(cuda_stop);
cudaEventSynchronize(cuda_stop);
float cuda_diffs = 0;
cudaEventElapsedTime(&cuda_diffs, cuda_start, cuda_stop);
cuda_diffs = cuda_diffs/1000.0;
printf("GPU Time for Verlet integration (GPU, AoS) = %lf\n", cuda_diffs);
// device: copying initial values
cudaMemcpy(Atoms, dev_Atoms, N*sizeof(Particles), cudaMemcpyDeviceToHost);
//************************************************//
printf("----------------------------\n");
printf("After Evolution:\n");
printf("px = %lf\n", Atoms[0].p[0]);
printf("py = %lf\n", Atoms[0].p[1]);
printf("pz = %lf\n", Atoms[0].p[2]);
printf("vx = %lf\n", Atoms[0].v[0]);
printf("vy = %lf\n", Atoms[0].v[1]);
printf("vz = %lf\n", Atoms[0].v[2]);
std::cout.precision(dbl::digits10);
std::cout << "az (precision) " << Atoms[0].a[2] << std::endl;
printf("----------------------------\n");
//************************************************//
int completed = 0;
int trapped = 0;
int escaped = 0;
int running = 0;
int unstable_integration_counter = 0;
double *fractional_energy_change;
fractional_energy_change = (double *) malloc(N*sizeof(double));
int fractional_energy_change_trapped = 0;
double N_scattering_average_total = 0;
double N_scattering_average_loaded = 0;
double loading_efficiency;
for (int i = 0; i < N; i++){
if (Atoms[i].statusi == 1 || Atoms[i].statusi == 2) {
completed ++;
}
if (Atoms[i].statusi == 1){
trapped++;
fractional_energy_change[i] = abs(Atoms[i].energy_current + Atoms[i].energy_kick_tab - Atoms[i].energy_initial)/(Atoms[i].energy_initial);
if (fractional_energy_change[i] > energy_error_check){
fractional_energy_change_trapped++;
}
N_scattering_average_loaded = N_scattering_average_loaded + Atoms[i].N_scattering;
}
if (Atoms[i].statusi == 3){
escaped++;
}
if (Atoms[i].statusi == 0){
running++;
}
fractional_energy_change[i] = abs(Atoms[i].energy_current + Atoms[i].energy_kick_tab - Atoms[i].energy_initial)/(Atoms[i].energy_initial);
if (fractional_energy_change[i] > energy_error_check){
unstable_integration_counter++;
// std::cout << "Energy instability (> 0.05): " << Atoms_GPU[i].p[0] << ", " << Atoms_GPU[i].p[1] << ", " << Atoms_GPU[i].p[2] << ", " << Atoms_GPU[i].statusi << std::endl;
}
N_scattering_average_total = N_scattering_average_total + Atoms[i].N_scattering;
}
N_scattering_average_total = N_scattering_average_total/N;
N_scattering_average_loaded = N_scattering_average_loaded/trapped;
loading_efficiency = (double)trapped/N;
printf("Total Number of Atoms = %d\n", N);
printf("Accounted Number of Atoms = %d\n", completed + escaped + running);
printf("Completed Atom Runs = %d\n", completed);
printf("Number of Escaped Atoms = %d\n", escaped);
printf("Number of Trapped Atoms = %d\n", trapped);
printf("Number of Running Atoms = %d\n", running);
printf("Loading Efficiency (1e-4) = %lf\n", loading_efficiency*1e4);
double fractional_energy_change_min = fractional_energy_change[0];
for (int j = 0; j < N; j++){
if (fractional_energy_change[j] < fractional_energy_change_min){
fractional_energy_change_min = fractional_energy_change[j];
if (fractional_energy_change_min == 0){
std::cout << Atoms[j].p[0] << ", " << Atoms[j].p[1] << ", " << Atoms[j].p[2] << ", " << Atoms[j].statusi << std::endl;
}
}
}
double fractional_energy_change_max = fractional_energy_change[0];
for (int j = 0; j < N; j++){
if (fractional_energy_change[j] > fractional_energy_change_max){
fractional_energy_change_max = fractional_energy_change[j];
}
}
printf("Total number of fractional errors in energy : %d\n", unstable_integration_counter);
printf("Number of fractional errors that are loaded: %d\n", fractional_energy_change_trapped);
printf("----------------------------\n");
printf("Fractional energy change max: %lf\n", fractional_energy_change_max);
printf("Fractional energy change min: %lf\n", fractional_energy_change_min);
std::cout << "Fractional energy change min (precision) " << fractional_energy_change_min << std::endl;
printf("----------------------------\n");
printf("----------------------------\n");
printf("Fractional energy change max: %lf\n", fractional_energy_change_max);
printf("----------------------------\n");
printf("Average No. of Scattering Events for Loaded Atoms = %lf\n", N_scattering_average_loaded);
//************************************************/
// clearing memory
cudaFree(dev_Atoms);
free(Atoms);
cudaError_t cudaResult;
cudaResult = cudaGetLastError();
if (cudaResult != cudaSuccess) {
printf("In what house, shall thy find solace? ");
}
}
|
21,385 | /*
* FileName: RayTracer_Kernel.cu
*
* Programmer: Jiayin Cao
*/
//the sum for scan
int* g_ScanSum[2];
//some helper functions
__device__ void d_normalize( float4* v )
{
float s = v->x * v->x + v->y * v->y + v->z * v->z;
s = sqrt(s);
v->x /= s;
v->y /= s;
v->z /= s;
}
//cross product
__device__ float4 d_cross( const float4& v1 , const float4& v2 )
{
float4 r;
r.x = v1.y * v2.z - v1.z * v2.y;
r.y = v1.z * v2.x - v1.x * v2.z;
r.z = v1.x * v2.y - v1.y * v2.x;
r.w = 0.0f;
return r;
}
//clamp the value
__device__ float d_clamp( const float v )
{
if( v > 1.0f )
return 1.0f;
if( v < 0.0f )
return 0.0f;
return v;
}
//clamp the float4
__device__ float4 d_saturate( const float4& v )
{
return make_float4( d_clamp( v.x ) , d_clamp( v.y ) , d_clamp( v.z ) , d_clamp( v.w ) );
}
//dot product
__device__ float d_dot( const float4& v1 , const float4& v2 )
{
return v1.x * v2.x + v1.y * v2.y + v1.z * v2.z ;
}
//the length of the vector
__device__ float d_length( const float4& v )
{
return sqrt( v.x * v.x + v.y * v.y + v.z * v.z );
}
//define some useful operators for float4
__device__ float4 operator+ ( const float4& v1 , const float4& v2 )
{
return make_float4( v1.x + v2.x , v1.y + v2.y , v1.z + v2.z , v1.w + v2.w );
}
__device__ float4 operator- ( const float4& v1 , const float4& v2 )
{
return make_float4( v1.x - v2.x , v1.y - v2.y , v1.z - v2.z , v1.w - v2.w );
}
__device__ float4 operator* ( const float4& v , const float d )
{
return make_float4( v.x * d , v.y * d , v.z * d , v.w * d );
}
__device__ float4 operator* ( const float d , const float4& v )
{
return make_float4( v.x * d , v.y * d , v.z * d , v.w * d );
}
__device__ float4 operator* ( const float4& v1 , const float4& v2 )
{
return make_float4( v1.x * v2.x , v1.y * v2.y , v1.z * v2.z , v1.w * v2.w );
}
__device__ float4 operator+= ( float4& v1 , const float4& v2 )
{
v1 = v1 + v2;
return v1;
}
__device__ float2 operator * ( const float d , const float2& v )
{
return make_float2( d * v.x , d * v.y );
}
__device__ float2 operator + ( const float2& v1 , const float2& v2 )
{
return make_float2( v1.x + v2.x , v1.y + v2.y );
}
__device__ float2 operator - ( const float2& v1 , const float2& v2 )
{
return make_float2( v1.x - v2.x , v1.y - v2.y );
}
__device__ float2 floor( const float2& v )
{
int x = (int) v.x ;
int y = (int) v.y ;
return make_float2( x , y );
}
//reflect direction
__device__ float4 d_reflect( const float4& dir , const float4& normal )
{
float dotProduct = ( -2.0f ) * d_dot( dir , normal );
float4 r = dir + dotProduct * normal;
return make_float4( r.x , r.y , r.z , 0.0f );
}
//refraction direction
__device__ float4 d_refract( const float4& dir , float4 normal , float rate )
{
float4 r;
if( d_dot( dir , normal ) > 0 )
{
normal = -1.0f * normal;
rate = 1.0f / rate;
}
float cos = -1.0f * d_dot( dir , normal );
float t = 1 - rate * rate * ( 1 - cos * cos );
if( t < 0 )
{
r = d_reflect( dir , normal );
}else
{
float cos2 = sqrt( t );
r = rate * dir + ( rate * cos - cos2 ) * normal ;
}
return r;
}
//check if the ray intersects with bounding box
__device__ float4 kernelIntersectBoundingBox( float4& ori , float4& dir , float4& min , float4& max , float length )
{
//the result
float4 result = make_float4( 0.0f , 9999999.0f , 0.0f , 0.0f );
//limit the maxium value
if( length > 0 )
result.y = length;
//the variables
float t1 , t2;
if( fabs( dir.x ) < 0.0000001f )
{
if( ori.x > max.x || ori.x < min.x )
return result;
}else
{
t1 = ( max.x - ori.x ) / dir.x;
t2 = ( min.x - ori.x ) / dir.x;
if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; }
//clamp
if( t1 > result.x ) result.x = t1;
if( t2 < result.y ) result.y = t2;
if( result.x > result.y )
return result;
}
if( fabs( dir.y ) < 0.0000001f )
{
if( ori.y > max.y || ori.y < min.y )
return result;
}else
{
t1 = ( max.y - ori.y ) / dir.y;
t2 = ( min.y - ori.y ) / dir.y;
if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; }
//clamp
if( t1 > result.x ) result.x = t1;
if( t2 < result.y ) result.y = t2;
if( result.x > result.y )
return result;
}
if( fabs( dir.y ) < 0.0000001f )
{
if( ori.z > max.z || ori.z < min.z )
return result;
}else
{
t1 = ( max.z - ori.z ) / dir.z;
t2 = ( min.z - ori.z ) / dir.z;
if( t1 > t2 ) { float t = t1; t1 = t2; t2 = t; }
//clamp
if( t1 > result.x ) result.x = t1;
if( t2 < result.y ) result.y = t2;
if( result.x > result.y )
return result;
}
//enable the intersected point
result.z = 1.0f;
return result;
}
//check if the ray intersects with a plane
__device__ float4 kernelIntersectPlane( const float4& v1 , const float4& v2 , const float4& v3 , const float4& ori , const float4& dir )
{
//w : >= 0 ( intersected point enable ) , < 0 ( disable )
float4 result = make_float4( 0.0f , 0.0f , 0.0f , 0.0f );
//get the normal of the plane
float4 normal = d_cross( v2 - v1 , v3 - v1 );
//get the factor
float t = d_dot( normal , ori - v1 ) / d_dot( normal , dir );
//set the result
result = ori - t * dir;
if( t <= 0.0f )
result.w = -t;
else
result.w = -1;
return result;
}
//check if the ray intersects with a triangle
__device__ float4 kernelIntersectTriangle( const float4& v1 , const float4& v2 , const float4& v3 , const float4& ori , const float4& dir )
{
//the result
float4 result = kernelIntersectPlane( v1 , v2 , v3 , ori , dir );
if( result.w < 0 )
return result;
//get the factor
float4 d1 = d_cross( result - v2 , v1 - v2 );
float4 d2 = d_cross( result - v3 , v2 - v3 );
float4 d3 = d_cross( result - v1 , v3 - v1 );
float f1 = d_dot( d1 , d2 );
float f2 = d_dot( d2 , d3 );
if( !( f1 >= -0.000000000000001f && f2 >= -0.000000000000001f ) )
result.w = -1.0f;
return result;
}
//check if the current point is in the bounding box
__device__ int kernelPointInBoundingBox( const float4& p , const float4& min , const float4& max )
{
float threshold = 0.00001f;
if( p.x < min.x - threshold || p.y < min.y - threshold || p.z < min.z - threshold ||
p.x > max.x + threshold || p.y > max.y + threshold || p.z > max.z + threshold )
return false;
return true;
}
//do interplotation
__device__ float4 kernelInterploted( const float4& v1 , const float4& v2 , const float4& v3 , const float4& intersected )
{
//get the vectors
float4 e1 = intersected - v1;
float4 e2 = intersected - v2;
float4 e3 = intersected - v3;
//compute the areas
float4 area;
area.x = d_length( d_cross( e2 , e3 ) );
area.y = d_length( d_cross( e3 , e1 ) );
area.z = d_length( d_cross( e1 , e2 ) );
float d = 1.0f / ( area.x + area.y + area.z );
return area * d;
}
//clear and initialize buffer
__global__ void kernelInitBuffer( float4* buffer ,
int* markedBuffer ,
int pixelNum )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= pixelNum )
return;
buffer[tid] = make_float4( 0.0f , 0.0f , 0.0f , 0.0f );
markedBuffer[tid] = tid;
}
//generate primary ray intersected result
__global__ void kernelGenerateIntersectedPoint( float4* rayOri ,
float4* rayDir ,
float4* vertexBuffer ,
int rayNum ,
int* index ,
float4* result )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= rayNum )
return;
//Load the vertex
int triId = index[tid];
//get the vertex
int id = 3 * triId;
float4 v1 = vertexBuffer[id];
float4 v2 = vertexBuffer[id+1];
float4 v3 = vertexBuffer[id+2];
//ray ori and dir
float4 ori = rayOri[tid];
float4 dir = rayDir[tid];
//get the intersected result
result[tid] = kernelIntersectPlane( v1 , v2 , v3 , ori , dir );
result[tid].w = triId;
}
//Generate primary rays
__global__ void kernelGeneratePrimaryRays( float4 viewInfo ,
float* invViewMatrix ,
float4* rayOri ,
float4* rayDir )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= (int)viewInfo.x * (int)viewInfo.y )
return;
// get the pixel coorindate first
uint2 coord;
coord.x = tid % (int) viewInfo.x;
coord.y = tid / (int)viewInfo.x;
// compute the vector of the ray in screen space
float2 v;
v.x = ( ( ( 2.0f * coord.x ) / viewInfo.x ) - 1.0f ) / viewInfo.z;
v.y = -1.0f * ( ( ( 2.0f * coord.y ) / viewInfo.y ) - 1.0f ) / viewInfo.w;
//copy the original point of the rays
rayOri[tid] = make_float4( invViewMatrix[12] , invViewMatrix[13] , invViewMatrix[14] , tid );
//compute the direction of the ray
float4 dir;
dir.x = ( v.x * invViewMatrix[0] + v.y * invViewMatrix[4] + invViewMatrix[8] );
dir.y = ( v.x * invViewMatrix[1] + v.y * invViewMatrix[5] + invViewMatrix[9] );
dir.z = ( v.x * invViewMatrix[2] + v.y * invViewMatrix[6] + invViewMatrix[10] );
dir.w = 0.0f;
d_normalize( &dir );
rayDir[tid] = make_float4( dir.x , dir.y , dir.z , 1.0f );
}
//traverse the ray through kd-tree
__device__ float4 kernelTraverseRay( float4* kdTree ,
int* indexMap ,
int* offsetBuffer ,
float4* vertexBuffer ,
float4& rayOri ,
float4& rayDir ,
float length )
{
//the intersected result
float4 result = make_float4( 0.0f , 0.0f , 0.0f , -1.0f );
//tree node information
float4 header;
float4 splitInfo;
//the bounding box
float4 minBB = kdTree[2];
float4 maxBB = kdTree[3];
//check if the ray intersects with the current bounding box of the root
result = kernelIntersectBoundingBox( rayOri , rayDir , minBB , maxBB , length );
//if the ray doesn't cross the kd-tree , just return
if( result.z < 0.5f )
{
result = make_float4( 0.0f , 0.0f , 0.0f , -1.0f );
return result;
}
//current traversing node
int currentNodeIndex = 0;
//the mask to mark the traversed node
unsigned int mask = 0;
//current traverse depth
int currentTraverseDepth = 0;
//current inPonit when traversing the node
float4 inPoint = rayOri + result.x * rayDir ;
while( currentTraverseDepth >= 0 )
{
//traverse the current node
do
{
//the current node offset
int currentNodeOffset = currentNodeIndex * 4;
//get the current node information
header = kdTree[ currentNodeOffset ];
splitInfo = kdTree[currentNodeOffset + 1 ];
//check if it's a leaf node
if( splitInfo.x < 0 )
break;
//get the split axis
int splitAxis = (int) splitInfo.x;
//get the pointer of the inPoint
float sPos = 0.0f;
if( splitAxis == 0 )
sPos = inPoint.x;
else if( splitAxis == 1 )
sPos = inPoint.y;
else if( splitAxis == 2 )
sPos = inPoint.z;
//update the virtual stack and traverse the node
if( splitInfo.y > sPos )
currentNodeIndex = (int)header.y;
else
currentNodeIndex = (int)header.z;
//increase the current traverse depth
currentTraverseDepth++;
}while( true );
//get the offset and triangle number
int triOffset = offsetBuffer[currentNodeIndex];
int triNumber = (int)header.w;
//min value
float minFactor = 9999999.0f;
if( length > 0 )
minFactor = length;
//triangle index
int oriTriIndex = -1;
//the bounding box
minBB = kdTree[currentNodeIndex*4+2];
maxBB = kdTree[currentNodeIndex*4+3];
//intersect with the current triangles
for( int i = 0 ; i < triNumber ; i++ )
{
//get the triangles
int triIndex = indexMap[triOffset+i];
//get the vertex
float4 v1 = vertexBuffer[3*triIndex];
float4 v2 = vertexBuffer[3*triIndex+1];
float4 v3 = vertexBuffer[3*triIndex+2];
//get the intersected point
result = kernelIntersectTriangle( v1 , v2 , v3 , rayOri , rayDir );
//limit the factor
if( result.w > 0.0f && result.w < minFactor )
{
if( kernelPointInBoundingBox( result , minBB , maxBB ) )
{
minFactor = result.w;
oriTriIndex = triIndex;
if( length > 0 )
break;
}
}
}
if( oriTriIndex >= 0 )
{
result = rayOri + minFactor * rayDir;
result.w = (float)oriTriIndex;
return result;
}
//back track here
while( currentTraverseDepth >= 0 )
{
if( currentTraverseDepth == 0 )
return make_float4( 0 , 0 , 0 , -1.0f );
//get the current mask
if( mask & ( 0x00000001 << currentTraverseDepth ) )
{
//update the mask
mask &= ~(0x00000001 << currentTraverseDepth );
//decrease the current depth;
currentTraverseDepth--;
//get to the father node
currentNodeIndex = (int)kdTree[ 4 * currentNodeIndex ].x;
//continue to next level
continue;
}
//check the other node
int otherNode = currentNodeIndex + 1;
if( currentNodeIndex % 2 == 0 )
otherNode -= 2;
//get the bounding box of the other node
int otherNodeOffset = 4 * otherNode;
minBB = kdTree[ otherNodeOffset + 2 ];
maxBB = kdTree[ otherNodeOffset + 3 ];
//get the intersected result
float4 bi = kernelIntersectBoundingBox( rayOri , rayDir , minBB , maxBB , length );
if( bi.z > 0.5f )
{
//update the current traverse node
currentNodeIndex = otherNode;
//update the inPoint
inPoint = rayOri + bi.x * rayDir ;
//update the mask
mask |= 0x00000001 << currentTraverseDepth;
break;
}else
{
//update the mask
mask &= ~( 0x00000001 << currentTraverseDepth );
//decrease current depth
currentTraverseDepth--;
//get to the father node
currentNodeIndex = (int) kdTree[ 4 * currentNodeIndex ].x;
}
}
}
result.w = -1.0f;
return result;
}
//get the interseced point
__global__ void kernelGetIntersectedPoint( float4* rayOri ,
float4* rayDir ,
float4* kdTree ,
int* indexMap ,
int* offsetBuffer ,
float4* vertexBuffer ,
int rayNumber ,
float4* result )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= rayNumber )
return;
//get the triangle
result[tid] = kernelTraverseRay( kdTree , indexMap , offsetBuffer , vertexBuffer , rayOri[tid] , rayDir[tid] , -1.0f );
}
//do pixel shader here
__global__ void kernelPixelShader( float4* intersected ,
float4* vertexBuffer ,
float4* normalBuffer ,
float2* texCoordinateBuffer ,
float4* kdTree ,
int* indexMap ,
int* offsetIndexBuffer,
float4* lightBuffer ,
int* attributeBuffer ,
float4* materialBuffer ,
int* textureOffset ,
float4* customTexture ,
int pixelNum ,
float4* rayDir ,
int* offsetBuffer ,
float4* destNormalBuffer ,
float4* imageBuffer )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= pixelNum )
return;
//get the triangle index
int triIndex = (int)intersected[tid].w;
int triOffset = 3 * triIndex;
float4 color = make_float4( 0.0f , 0.0f , 0.0f , 0.0f );
//load the density of the pixel
if( triIndex < 0 )
return;
//get the material index
int matIndex = attributeBuffer[triIndex];
//the material buffer
float4 ambient = materialBuffer[ 4 * matIndex ];
float4 diffuse = materialBuffer[ 4 * matIndex + 1 ];
float4 specular = materialBuffer[ 4 * matIndex + 2 ];
float4 matprop = materialBuffer[ 4 * matIndex + 3 ];
//load the vertex
float4 v1 = vertexBuffer[ triOffset ];
float4 v2 = vertexBuffer[ triOffset + 1 ];
float4 v3 = vertexBuffer[ triOffset + 2 ];
//get the interploted
float4 interploted = kernelInterploted( v1 , v2 , v3 , intersected[tid] );
//get the normal
float4 n1 = normalBuffer[ triOffset ];
float4 n2 = normalBuffer[ triOffset + 1 ];
float4 n3 = normalBuffer[ triOffset + 2 ];
float4 normal = n1 * interploted.x + n2 * interploted.y + n3 * interploted.z;
d_normalize( &normal );
//update the normal buffer
destNormalBuffer[tid] = normal;
destNormalBuffer[tid].w = matIndex;
//the density for the pixel
float density = rayDir[tid].w;
if( matprop.x > -0.5f )
{
//load the texture coordinate
float2 t1 = texCoordinateBuffer[ triOffset ];
float2 t2 = texCoordinateBuffer[ triOffset + 1 ];
float2 t3 = texCoordinateBuffer[ triOffset + 2 ];
float2 texCoord = interploted.x * t1 + interploted.y * t2 + interploted.z * t3;
texCoord = texCoord - floor( texCoord );
if( texCoord.x < 0.0f ) texCoord.x += 1.0f;
if( texCoord.y < 0.0f ) texCoord.y += 1.0f;
//load the texture
float4* imgData = customTexture + textureOffset[(int)matprop.x];
int x = imgData[0].y * texCoord.x ;
int y = imgData[0].z * texCoord.y ;
int texOffset = y * imgData[0].y + x + 1;
diffuse = diffuse * (*(imgData + texOffset)) ;
}
//initialize the image buffer
color = ambient;
//shade the pixels
for( int i = 0 ; i < 2 ; i++ )
{
if( lightBuffer[i].w < 0.01f )
continue;
//the light direction
float4 lightDir = intersected[tid] - lightBuffer[i];
//check if the point is in the shadow
float shadowLen = 0.98f * d_length(lightDir);
d_normalize( &lightDir );
//the dot product
float dotProduct = d_dot( lightDir , normal );
if( dotProduct > 0.0f )
continue;
{
float4 shadowFactor = kernelTraverseRay( kdTree , indexMap , offsetIndexBuffer , vertexBuffer , lightBuffer[i] , lightDir , shadowLen );
if( shadowFactor.w >= 0.0f )
continue;
}
//the light density
float lightDensity = d_clamp( -1.0f * dotProduct ) * lightBuffer[i].w;
//load the density of current pixel
color += diffuse * lightDensity ;
//add specular if possible
if( specular.w > 0 )
{
//reflect direction
float4 reflectDir = d_reflect( lightDir , normal );
d_normalize( &reflectDir );
//get the dot product
float d = d_clamp(-d_dot( reflectDir , rayDir[tid] ));
if( d > 0 )
color += pow( d , specular.w ) * specular;
}
}
int offset = offsetBuffer[tid];
imageBuffer[offset] = d_saturate( imageBuffer[offset] + d_saturate( color * density ) );
}
//generate next level rays
__global__ void kernelGenerateNextLevelRays( float4* materialInfo ,
float4* intersected ,
float4* backNormalBuffer ,
float4* rayOri ,
float4* rayDir ,
int rayNumber ,
float4* destRayOri ,
float4* destRayDir ,
int* markedBuffer )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= rayNumber )
return;
//set marked buffer zero
markedBuffer[tid] = 0;
//load the intersected point
float4 intersectedPoint = intersected[tid];
//get the intersected triangle index
int triIndex = (int)intersectedPoint.w;
if( triIndex < 0 )
return;
//load the normal
float4 normal = backNormalBuffer[tid];
//get the material index
int matIndex = (int)normal.w;
//get the material
float4 matInfo = materialInfo[4*matIndex+3];
//load the ray direction
float4 ori = rayOri[tid];
float4 dir = rayDir[tid];
//if there is reflection , mark result as true
if( matInfo.y > 0 )
{
float4 reflectDir = d_reflect( dir , normal );
d_normalize( &reflectDir );
reflectDir.w = dir.w * matInfo.y;
destRayDir[tid] = reflectDir;
destRayOri[tid] = intersectedPoint + reflectDir * 0.1f;
destRayOri[tid].w = ori.w;
markedBuffer[tid] = 1;
}else if( matInfo.z > 0 )
{
float4 refractDir = d_refract( dir , normal , 1.0f / matInfo.w );
d_normalize( &refractDir );
refractDir.w = dir.w * matInfo.z;
destRayDir[tid] = refractDir;
destRayOri[tid] = intersectedPoint + refractDir * 0.02f;
destRayOri[tid].w = ori.w;
markedBuffer[tid] = 1;
}
}
//copy new rays
__global__ void kernelCopyNewRays( float4* srcRayOri ,
float4* srcRayDir ,
int* scanResult ,
int rayNumber ,
float4* destRayOri ,
float4* destRayDir ,
int* offsets )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= rayNumber )
return;
//load the offset
int offset = scanResult[tid];
if( offset != scanResult[tid+1] )
{
//set the result
destRayOri[offset] = srcRayOri[tid];
destRayDir[offset] = srcRayDir[tid];
offsets[offset] = (int)srcRayOri[tid].w;
}
}
//Do scan on GPU
__global__ void kernelScan( int* data , int number , int oBlockRes , int* blockRes )
{
//the shared memory
__shared__ int sharedMem[512];
//get the thread id
int ltid = threadIdx.x;
int gtid = ltid + blockDim.x * blockIdx.x;
//the block sum
int blocksum = 0;
//zero the rest of the memory
if( 2 * gtid >= number )
{
data[ 2 * gtid ] = 0;
data[ 2 * gtid + 1 ] = 0;
}else if( 2 * gtid == number - 1 )
data[ 2 * gtid + 1 ] = 0;
//Load the data into the shared memory
sharedMem[2*ltid] = data[2*gtid];
sharedMem[2*ltid+1] = data[2*gtid+1];
//the offset
int offset = 1;
for( int d = 256 ; d > 1 ; d >>= 1 )
{
//sync the threads in a group
__syncthreads();
if( ltid < d )
{
int ai = offset * ( 2 * ltid + 1 ) - 1;
int bi = ai + offset;
sharedMem[bi] += sharedMem[ai];
}
offset *= 2;
}
//the block sum
blocksum = sharedMem[511] + sharedMem[255];
//clear the last element
if( ltid == 0 )
{
sharedMem[511] = sharedMem[255];
sharedMem[255] = 0;
}
for( int d = 2 ; d < 512 ; d *= 2 )
{
__syncthreads();
offset >>= 1;
if( ltid < d )
{
int ai = offset * ( 2 * ltid + 1 ) - 1 ;
int bi = ai + offset ;
int t = sharedMem[ai];
sharedMem[ai] = sharedMem[bi];
sharedMem[bi] += t;
}
}
__syncthreads();
data[ 2 * gtid ] = sharedMem[ 2 * ltid ];
data[ 2 * gtid + 1 ] = sharedMem[ 2 * ltid + 1 ];
//Output Block Result
if( oBlockRes > 0 )
{
if( ltid == 0 )
{
//copy the result
blockRes[blockIdx.x] = blocksum;
}
}
}
//Add the block result to the segmented scan result
__global__ void kernelUniformAdd( int* data , int* blockResult )
{
//get the thread id
int ltid = threadIdx.x;
int gtid = ltid + blockDim.x * blockIdx.x;
//add the result
data[gtid] += blockResult[gtid/512];
}
//clear the noise of the image
__global__ void kernelClearNoise( float4* imgData ,
int width ,
int height ,
float4* targetData )
{
//get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//limit the thread id
if( tid >= width * height )
return;
//threshold
float threshold = 0.4f;
//the difference
int difference = 0;
//current index
int currentIndex = tid;
int leftIndex = tid - 1;
int rightIndex = tid + 1;
int upIndex = tid - width ;
int downIndex = tid + width ;
//the coordinate
int i = tid % width;
int j = tid / width;
//current color
float4 color = imgData[currentIndex];
float4 sum = make_float4( 0 , 0 , 0 , 0 );
if( i > 0 )
{
if( d_length( color - imgData[leftIndex] ) > threshold )
difference++;
sum += imgData[leftIndex];
}
if( i < width - 1 )
{
if( d_length( color - imgData[rightIndex] ) > threshold )
difference++;
sum += imgData[rightIndex];
}
if( j > 0 )
{
if( d_length( color - imgData[upIndex] ) > threshold )
difference++;
sum += imgData[upIndex];
}
if( j < height - 1 )
{
if( d_length( color - imgData[downIndex] ) > threshold )
difference++;
sum += imgData[downIndex];
}
if( difference >= 2 )
color = sum * 0.25f;
targetData[tid] = color;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
//initialize buffer
extern "C" void cudaInitBuffer( float4* buffer ,
int* markedBuffer ,
int pixelNum )
{
//the block number
int threadNum = 256;
int blockNum = ( pixelNum + threadNum - 1 ) / threadNum;
//call the kenrel
kernelInitBuffer<<<blockNum,threadNum>>>( buffer , markedBuffer , pixelNum );
}
//generate primary ray intersected result
extern "C" void cudaGenerateIntersectedPoint( float4* rayOri ,
float4* rayDir ,
float4* vertexBuffer ,
int rayNum ,
int* index ,
float4* result )
{
//the block number
int threadNum = 256;
int blockNum = ( rayNum + threadNum - 1 ) / threadNum;
//call the kernel
kernelGenerateIntersectedPoint<<<blockNum , threadNum>>>( rayOri , rayDir , vertexBuffer , rayNum , index , result );
}
//Generate primary rays
extern "C" void cudaGeneratePrimaryRays( float4 viewInfo ,
float* invViewMatrix ,
float4* rayOri ,
float4* rayDir )
{
//get the number of data
int rayNum = (int)( viewInfo.x * viewInfo.y );
//the block number
int threadNum = 256;
int blockNum = ( rayNum + threadNum - 1 ) / threadNum;
//call the kernel
kernelGeneratePrimaryRays<<<blockNum , threadNum>>>( viewInfo , invViewMatrix , rayOri , rayDir );
}
//get intersected point
extern "C" void cudaGetIntersectedPoint( float4* rayOri ,
float4* rayDir ,
float4* kdTree ,
int* indexMap ,
int* offsetBuffer ,
float4* vertexBuffer ,
int rayNumber ,
float4* result )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ;
//call the kernel
kernelGetIntersectedPoint<<<blockNum , threadNum>>>( rayOri , rayDir , kdTree , indexMap , offsetBuffer , vertexBuffer , rayNumber , result );
}
//do pixel shader
extern "C" void cudaPixelShader( float4* interseced ,
float4* vertexBuffer ,
float4* normalBuffer ,
float2* texCoordinateBuffer ,
float4* kdTree ,
int* indexMap ,
int* offsetIndexBuffer ,
float4* lightBuffer ,
int* attributeBuffer ,
float4* materialBuffer ,
int* textureOffset ,
float4* customTexture ,
int pixelNum ,
float4* rayDir ,
int* offsetBuffer ,
float4* destNormalBuffer ,
float4* imageBuffer )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( pixelNum + threadNum - 1 ) / threadNum ;
//call the kernel
kernelPixelShader<<<blockNum , threadNum>>>( interseced , vertexBuffer , normalBuffer , texCoordinateBuffer ,
kdTree , indexMap , offsetIndexBuffer , lightBuffer , attributeBuffer , materialBuffer ,
textureOffset , customTexture , pixelNum , rayDir , offsetBuffer , destNormalBuffer , imageBuffer );
}
//generate next level rays
extern "C" void cudaGenerateNextLevelRays( float4* materialInfo ,
float4* intersected ,
float4* backNormalBuffer ,
float4* rayOri ,
float4* rayDir ,
int rayNumber ,
float4* destRayOri ,
float4* destRayDir ,
int* markedBuffer )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ;
//call the kernel
kernelGenerateNextLevelRays<<<blockNum , threadNum>>>( materialInfo , intersected , backNormalBuffer , rayOri , rayDir ,
rayNumber , destRayOri , destRayDir , markedBuffer );
}
//do scan on gpu
extern "C" void cudaScan( int* data , int num , int level )
{
/* //allocate the number of data
int* cpuData = new int[num];
//pass the data from gpu to cpu
cudaMemcpy( cpuData , data , sizeof( int ) * ( num - 1 ) , cudaMemcpyDeviceToHost );
int last = 0;
for( int i = 0 ; i < num ; i++ )
{
int oldLast = last;
last += cpuData[i];
cpuData[i] = oldLast;
}
//pass the data back from cpu to gpu
cudaMemcpy( data , cpuData , sizeof( int ) * num , cudaMemcpyHostToDevice );
//delete the data
delete[] cpuData;*/
//the dimension of the kernel
dim3 threads( 256 );
dim3 blocks( ( num + 511 ) / 512 );
//call the kernel
kernelScan<<<blocks , threads>>>( data , num , 1 , g_ScanSum[level] );
//scan the block Result
if( num <= 262144 )
kernelScan<<<1 , threads>>>( g_ScanSum[level] , blocks.x , -1 , data );
else
cudaScan( g_ScanSum[level] , blocks.x , level + 1 );
//add the offset
threads.x = 512;
kernelUniformAdd<<< blocks , threads >>> ( data , g_ScanSum[level] );
}
//copy new rays
extern "C" void cudaCopyNewRays( float4* srcRayOri ,
float4* srcRayDir ,
int* scanResult ,
int rayNumber ,
float4* destRayOri ,
float4* destRayDir ,
int* offsets )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( rayNumber + threadNum - 1 ) / threadNum ;
//call the kernel
kernelCopyNewRays<<<blockNum , threadNum>>>( srcRayOri , srcRayDir , scanResult , rayNumber , destRayOri , destRayDir , offsets );
}
//clear the noise of the image
extern "C" void cudaClearNoise( float4* imgData ,
int width ,
int height ,
float4* targetData )
{
//the block and thread number
int threadNum = 256;
int blockNum = ( width * height + 255 ) / 256;
//call the kernel
kernelClearNoise<<<blockNum , threadNum>>>( imgData , width , height , targetData );
} |
21,386 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,int var_6,int var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28) {
if (comp > +1.8298E-36f + (+0.0f * var_1)) {
if (comp < var_2 / var_3 + log10f((var_4 / (-1.6894E-4f / (-1.0921E36f + -1.3485E28f * -1.9564E-35f + var_5))))) {
comp = var_8 * fabsf(atanf((-1.8050E36f / (-1.5588E13f * var_9 / -1.3933E-8f))));
if (comp > (-1.8341E-43f + +1.5459E2f + cosf((+1.4057E-36f * +1.0774E-37f * (var_10 / -1.9005E-35f))))) {
float tmp_1 = -1.1918E24f;
comp += tmp_1 - var_11 + fmodf(ceilf((var_12 / (var_13 - (var_14 - -1.2298E-21f + var_15)))), var_16 / (+1.4845E-36f + (var_17 / (-1.4237E-17f * var_18))));
comp = (var_19 - (+1.6948E-37f * (var_20 - (var_21 * var_22))));
}
for (int i=0; i < var_6; ++i) {
comp = coshf(sqrtf(var_23 / -1.7772E-36f - var_24 * -1.7355E-37f));
comp += log10f(-1.1000E-36f);
float tmp_2 = -1.4713E-35f;
comp = tmp_2 * (var_25 + var_26);
}
for (int i=0; i < var_7; ++i) {
comp += (var_27 + var_28);
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
int tmp_7 = atoi(argv[7]);
int tmp_8 = atoi(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29);
cudaDeviceSynchronize();
return 0;
}
|
21,387 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
//Note: you can place as many kernel functions in this file as are necessary
/** YOUR CODE GOES BELOW (extra credit) **/
/** YOUR CODE GOES ABOVE (extra credit) **/
|
21,388 | #include "includes.h"
/*
* Module to test CUDA module loading and execution.
* To be compiled with:
* nvcc -ptx module_test.cu
*/
#ifdef __cplusplus
extern "C" {
#endif
/// Sets the first N elements of array to value.
#ifdef __cplusplus
}
#endif
__global__ void testMemset(float* array, float value, int N){
int i = ( blockIdx.y*gridDim.x + blockIdx.x ) * blockDim.x + threadIdx.x;
if(i < N){
array[i] = value;
}
} |
21,389 | #include<stdlib.h>
#include<stdio.h>
#include<time.h>
//全局内存
__global__ void global_reduce(float *d_in,float *d_out){
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idxn = threadIdx.x;
for(int s = blockDim.x/2;s>0;s>>=1){
if(idxn<s){
d_in[idx] += d_in[idx+s];
}
__syncthreads();//同步
}
if(idxn == 0){
d_out[blockIdx.x] = d_in[idx];
}
}
//共享内存
__global__ void shared_reduce(float *d_in,float *d_out){
extern __shared__ float s_in[];
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int idxn = threadIdx.x;
s_in[idxn] = d_in[idx];
__syncthreads();
for(int s = blockDim.x/2;s>0;s>>=1){
if(idxn<s){
s_in[idxn] += s_in[idxn+s];
}
__syncthreads();//同步
}
if(idxn == 0){
d_out[blockIdx.x] = s_in[0];
}
}
void init(float *h_in,const int size){
srand((unsigned int)time(NULL));
for(int i=0;i<size;i++)
h_in[i] =(float)(rand()%101)/100.0f;
}
int main(){
int size = 1024;
float *h_in;
float h_out = 0;
h_in = (float *)malloc(size*size*sizeof(float));
init(h_in,size*size);//初始化
time_t t_start = clock();
for(int i=0;i<size*size;i++){
h_out += h_in[i];
}
time_t t_end = clock();
printf("CPU sum:%f\n",h_out);
printf("CPU time:%fms\n",difftime(t_end,t_start));
float *d_in;
float *d_out;
float *d_out_mid;
dim3 block(size);
dim3 thread(size);
cudaMalloc((float **)&d_in,size*size*sizeof(float));
cudaMalloc((float **)&d_out_mid,size*sizeof(float));
cudaMalloc((float **)&d_out,sizeof(float));
cudaMemcpy(d_in,h_in,size*size*sizeof(float),cudaMemcpyHostToDevice);
t_start = clock();
global_reduce<<<block,thread>>>(d_in,d_out_mid);
global_reduce<<<1,thread>>>(d_out_mid,d_out);
t_end = clock();
cudaMemcpy(&h_out,d_out,sizeof(float),cudaMemcpyDeviceToHost);
printf("GPU(global) sum:%f\n",h_out);
printf("GPU(global) time:%fms\n",difftime(t_end,t_start));
cudaMemcpy(d_in,h_in,size*size*sizeof(float),cudaMemcpyHostToDevice);
t_start = clock();
shared_reduce<<<block,thread,size*sizeof(float)>>>(d_in,d_out_mid);
shared_reduce<<<1,thread,size*sizeof(float)>>>(d_out_mid,d_out);
t_end = clock();
cudaMemcpy(&h_out,d_out,sizeof(float),cudaMemcpyDeviceToHost);
printf("GPU(shared) sum:%f\n",h_out);
printf("GPU(shared) time:%fms\n",difftime(t_end,t_start));
free(h_in);
cudaFree(d_in);
cudaFree(d_out_mid);
cudaFree(d_out);
cudaDeviceReset();//重置当前资源
return 0;
}
|
21,390 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define CHECK_STATUS(status) \
if (status != cudaSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
cudaGetErrorString(status))
//////////////////////////////////////////////////////////////////////////////////////////////////
// 设备全局常量内存
__constant__ float constData[256];
// 设备全局内存
__device__ float devData;
// 设备全局内存指针
__device__ float* devPointer;
// Device code
__global__ void MyKernel()
{
printf("%lu\n", sizeof(devData));
}
int main(int argc, char **argv) {
CHECK_STATUS(cudaSetDevice(0));
float data[256];
// 复制数据到设备全局常量内存
cudaMemcpyToSymbol(constData, data, sizeof(data));
// 从设备全局常量内存把数据复制主机内存
cudaMemcpyFromSymbol(data, constData, sizeof(data));
// 复制数据到设备全局内存
float value = 3.14f;
cudaMemcpyToSymbol(devData, &value, sizeof(float));
float* ptr;
cudaMalloc(&ptr, 256 * sizeof(float));
// 把ptr这个指针复制到设备全局内存的指针devPointer。
// 注意,这里复制的是指针。
cudaMemcpyToSymbol(devPointer, &ptr, sizeof(ptr));
// 调用内核
MyKernel<<<1, 1>>>();
// 检查错误
CHECK_STATUS(cudaGetLastError());
// 释放内存,这里要释放ptr,而不是devPointer
CHECK_STATUS(cudaFree(ptr));
return 0;
}
|
21,391 | #include "includes.h"
__global__ void cu_sqrt(const float *A, float *B, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
B[tid] = sqrtf(A[tid]);
tid += stride;
}
} |
21,392 | #include <iostream>
#include <stdio.h>
#include <time.h>
#define LENGTH 10000
using namespace std;
struct aos{
int a;
int b;
int c;
};
__global__ void vector_add(aos *arr){
int i = threadIdx.x ;
if (i < LENGTH)
arr[i].c = arr[i].a + arr[i].b; // read
}
__host__ void vector_add_cpu(float a[], float b[], float *c){
for(int i=0 ; i< LENGTH ; i++){
c[i] = a[i] + b[i];
// std::cout << c[i] << std::endl;
}
}
int main(){
aos *h_aos;
aos *d_aos;
h_aos = new aos [LENGTH];
for(int i=0 ; i< LENGTH; i++){
h_aos[i].a = i;
h_aos[i].b = i;
}
cudaMalloc((void**)&d_aos, LENGTH*sizeof(aos));
cudaMemcpy(d_aos, h_aos, LENGTH*sizeof(aos), cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
float total_time = 0.0;
// for(int k=0 ; k< 1000 ; k++){
// cudaEventRecord(start);
vector_add<<<LENGTH/128, 128>>>(d_aos);
cudaDeviceSynchronize();
// cudaEventRecord(stop);
cudaMemcpy(h_aos, d_aos, LENGTH*sizeof(aos), cudaMemcpyDeviceToHost);
// cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
total_time += milliseconds;
// }
std::cout << "Time taken : " << milliseconds << " Avg time : " << total_time << std::endl;
for(int i=0; i<10 ;i++){
cout << h_aos[i].c << endl;
}
}
|
21,393 | /*!
\brief loopExit.cu
\author Andrew Kerr
\brief simple test of control-flow behavior of kernels
*/
#include <stdio.h>
extern "C" __global__ void kernelLoopExit(int *A, int N) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ int S[64];
S[threadIdx.x] = 0;
A[i] = 0;
for (int j = i; j < N; j++) {
S[threadIdx.x] += 1;
}
__syncthreads();
A[i] = S[threadIdx.x];
}
int main(int argc, char *arg[]) {
const int N = 32;
int *A_host, *A_gpu =0;
int errors = 0;
size_t bytes = sizeof(int)*N;
if (cudaMalloc((void **)&A_gpu, bytes) != cudaSuccess) {
printf("cudaMalloc() - failed to allocate %d bytes on device\n", (int)bytes);
return -1;
}
A_host = (int *)malloc(bytes);
for (int i = 0; i < N; i++) {
A_host[i] = -1;
}
cudaMemcpy(A_gpu, A_host, bytes, cudaMemcpyHostToDevice);
dim3 grid((N+31)/32,1);
dim3 block(32, 1);
kernelLoopExit<<< grid, block >>>(A_gpu, N);
cudaMemcpy(A_host, A_gpu, bytes, cudaMemcpyDeviceToHost);
for (int i = 0; (errors < 5) && i < N; ++i) {
int got = A_host[i];
int expected = N - i;
if (expected != got) {
printf("ERROR 1 [%d] - expected: %d, got: %d\n", i, expected, got);
++errors;
}
}
cudaFree(A_gpu);
free(A_host);
if (errors) {
printf("Pass/Fail : Fail\n");
}
else {
printf("Pass/Fail : Pass\n");
}
return 0;
}
|
21,394 | #include <iostream>
#include <vector>
__global__
void scale_kernel(float *const input_image,
const int size,
float* result) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
const int stride = gridDim.x * blockDim.x;
for (;index < size; index += stride) {
result[index] = 1.25f * input_image[index];
}
}
void scale(float *const input_image,
int width,
int height,
int channels,
float* result) {
int blockSize = 256;
int numBlocks = (width * height * channels) / blockSize + 1;
int size = width * height * channels;
std::cout << "Launching kernel with " << numBlocks << " blocks @ " << blockSize << std::endl;
scale_kernel<<<numBlocks, blockSize>>>(input_image, size, result);
cudaDeviceSynchronize();
}
|
21,395 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/gather.h>
#include <thrust/scan.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <chrono>
#include <iostream>
#include <iomanip>
// This example computes a summed area table using segmented scan
// http://en.wikipedia.org/wiki/Summed_area_table
// convert a linear index to a linear index in the transpose
struct transpose_index : public thrust::unary_function<size_t,size_t>
{
size_t m, n;
__host__ __device__
transpose_index(size_t _m, size_t _n) : m(_m), n(_n) {}
__host__ __device__
size_t operator()(size_t linear_index)
{
size_t i = linear_index / n;
size_t j = linear_index % n;
return m * j + i;
}
};
// convert a linear index to a row index
struct row_index : public thrust::unary_function<size_t,size_t>
{
size_t n;
__host__ __device__
row_index(size_t _n) : n(_n) {}
__host__ __device__
size_t operator()(size_t i)
{
return i / n;
}
};
// transpose an M-by-N array
template <typename T>
void transpose(size_t m, size_t n, thrust::device_vector<T>& src, thrust::device_vector<T>& dst)
{
thrust::counting_iterator<size_t> indices(0);
thrust::gather
(thrust::make_transform_iterator(indices, transpose_index(n, m)),
thrust::make_transform_iterator(indices, transpose_index(n, m)) + dst.size(),
src.begin(),
dst.begin());
}
// scan the rows of an M-by-N array
template <typename T>
void scan_horizontally(size_t m, size_t n, thrust::device_vector<T>& d_data)
{
thrust::counting_iterator<size_t> indices(0);
thrust::inclusive_scan_by_key
(thrust::make_transform_iterator(indices, row_index(n)),
thrust::make_transform_iterator(indices, row_index(n)) + d_data.size(),
d_data.begin(),
d_data.begin());
}
// scan the rows of an M-by-N array
template <typename T>
void transpose_scan_horizontally(size_t m, size_t n, thrust::device_vector<T>& d_data)
{
auto pi_ = thrust::make_permutation_iterator(
d_data.begin(),
thrust::make_transform_iterator(thrust::counting_iterator<size_t>(0),
transpose_index(n, m))
);
thrust::inclusive_scan_by_key(
thrust::make_transform_iterator(thrust::counting_iterator<size_t>(0), row_index(m)),
thrust::make_transform_iterator(thrust::counting_iterator<size_t>(0), row_index(m)) + d_data.size(),
pi_,
pi_
);
/*
1. Approach:
thrust::counting_iterator<size_t> indices(0);
thrust::inclusive_scan_by_key
(thrust::make_transform_iterator(indices, row_index(m)),
thrust::make_transform_iterator(indices, row_index(m)) + d_data.size(),
thrust::make_permutation_iterator(d_data.begin(),thrust::make_transform_iterator(indices,transpose_index(n,m))),
d_data.begin());
*/
}
// print an M-by-N array
template <typename T>
void print(size_t m, size_t n, thrust::device_vector<T>& d_data)
{
thrust::host_vector<T> h_data = d_data;
for(size_t i = 0; i < m; i++)
{
for(size_t j = 0; j < n; j++)
std::cout << std::setw(8) << h_data[i * n + j] << " ";
std::cout << "\n";
}
}
int main(void)
{
size_t m = 1e5; // number of rows
size_t n = 10; // number of columns
std::cout << "Nr. of Rows: " << m << std::endl;
std::cout << "Nr. of Columns: " << n << std::endl;
// 2d array stored in row-major order [(0,0), (0,1), (0,2) ... ]
//thrust::device_vector<int> data(m * n, 1);
//thrust::device_vector<int> data_modified(m * n, 1);
std::cout << "THRUST EXAMPLE: " << std::endl;
std::cout << "======================== " << std::endl;
{
// 2d array stored in row-major order [(0,0), (0,1), (0,2) ... ]
thrust::device_vector<int> data(m * n, 1);
// Synchronise cuda device and start measurement.
cudaDeviceSynchronize();
auto startTime = std::chrono::high_resolution_clock::now();
//std::cout << "[step 1] scan horizontally" << std::endl;
scan_horizontally(m, n, data);
//std::cout << "[step 2] transpose and scan with temp array" << std::endl
thrust::device_vector<int> temp(m * n);
transpose(m, n, data, temp);
//std::cout << "[step 3] scan transpose horizontally" << std::endl;
scan_horizontally(n, m, temp);
//std::cout << "[step 4] transpose the transpose" << std::endl;
transpose(n, m, temp, data);
// print(m, n, data);
cudaDeviceSynchronize();
auto endTime= std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = endTime-startTime;
std::cout << "Temp approach in ms: "<< diff.count()*1000 << std::endl;
}
std::cout << std::endl;
std::cout << "THRUST EXAMPLE MODIFIED: " << std::endl;
std::cout << "======================== " << std::endl;
{
// 2d array stored in row-major order [(0,0), (0,1), (0,2) ... ]
thrust::device_vector<int> data(m * n, 1);
// Synchronise cuda device and start measurement.
cudaDeviceSynchronize();
auto startTime = std::chrono::high_resolution_clock::now();
//std::cout << "[step 1] scan horizontally" << std::endl;
scan_horizontally(m, n, data);
transpose_scan_horizontally(m, n, data);
// print(m, n, data);
cudaDeviceSynchronize();
auto endTime= std::chrono::high_resolution_clock::now();
std::chrono::duration<double> diff = endTime-startTime;
std::cout << "Perm_iter approach in ms: "<< diff.count()*1000 << std::endl;
}
return 0;
} |
21,396 |
extern "C"
__global__
void histgramMakerKernel_naive(int *d_histgram,
const unsigned char* d_text, int textLength) {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid < textLength) {
unsigned char ch = d_text[gid];
atomicAdd(&d_histgram[(int)ch], 1);
}
}
|
21,397 | #include <iostream>
#include <math.h>
#include <unistd.h>
#include <memory>
const std::size_t N = 1 << 20;
__global__
void vec_add(float* const c, const float* const a, const float* const b, const std::size_t n)
{
// shared memory spaces are block-scoped and for intra-thread communication, 10 was meaningless here
//__shared__ int shared_memory[10];
// grids have blocks, blocks have threads
// thread indices are not unique within a grid, so unique-ify it using block information
const std::size_t i = threadIdx.x + (blockIdx.x * blockDim.x);
//threadIdx.y + (blockIdx.y * blockDim.y);
//threadIdx.z + (blockIdx.z * blockDim.z);
// sometimes the grid is oversized statically for a problem that has an upper bound, though an instance may be smaller
// so this guard is used in those cases
// otherwise if the problem is the same size every time, the grid can be exactly sized, and the guard can be removed
if (i < n)
{
c[i] = a[i] + b[i];
}
// barriers
//__syncthreads(); all threads must hit this point before the kernel can finish
}
int main(void)
{
// grids and blocks are topologically laid out similar to the problem
// for 1D arrays, a grid size of (1,1,1) = 1D, and a block size of (N,1,1) = N, would be enough to fully cover the array
const dim3 grid_size(1, 1, 1);
const dim3 block_size(N, 1, 1);
std::unique_ptr<float[]> h_a(new float[N]);
std::unique_ptr<float[]> h_b(new float[N]);
std::unique_ptr<float[]> h_c(new float[N]);
std::size_t error_count = 0;
float* d_a = NULL;
float* d_b = NULL;
float* d_c = NULL;
cudaMalloc(&d_a, N * sizeof(h_a[0]));
cudaMalloc(&d_b, N * sizeof(h_b[0]));
cudaMalloc(&d_c, N * sizeof(h_c[0]));
for (std::size_t i = 0; i < N; ++ i)
{
h_a[i] = 1;
h_b[i] = 2;
}
// for the time being, this code does not work
cudaMemcpy(d_a, h_a.get(), N * sizeof(h_a[0]), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b.get(), N * sizeof(h_b[0]), cudaMemcpyHostToDevice);
cudaMemset(d_c, 0, N * sizeof(h_c[0]));
vec_add<<<grid_size, block_size>>>(d_c, d_a, d_b, N);
cudaDeviceSynchronize(); // kernels are launched async, host immediately returns, so block until the computation is done
// kernels are launched async from the host, but are scheduled serially per device
// since this machine has a single GPU, they'd be scheduled sequentially, but the host would be operating async
cudaMemcpy(h_c.get(), d_c, sizeof(h_c), cudaMemcpyDeviceToHost);
for (std::size_t i = 0; i < N; ++ i)
{
//if ((3 * (i + 1)) != h_c[i])
std::cout << h_c[i] << std::endl;
if (3 != h_c[i])
{
//std::cout << i << std::endl;
//++ error_count;
}
}
std::cout << 100.0 * (static_cast<double>(error_count) / N) << "% mismatched" << std::endl << std::flush;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
21,398 | #include <stdio.h>
__global__ void loop()
{
/*
* This idiomatic expression gives each thread
* a unique index within the entire grid.
*/
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("%d\n", i);
}
int main()
{
/*
* Additional execution configurations that would
* work and meet the exercises contraints are:
*
* <<<5, 2>>>
* <<<10, 1>>>
*/
loop<<<2, 5>>>();
cudaDeviceSynchronize();
}
|
21,399 | #include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//64
__global__ void foo(int* p) {
int* q;
q = p;
q[threadIdx.x] = 0;
}
|
21,400 | #include "includes.h"
__global__ void clearLabel(float *prA, float *prB, unsigned int num_nodes, float base)
{
unsigned int id = blockDim.x * blockIdx.x + threadIdx.x;
if(id < num_nodes)
{
prA[id] = base + prA[id] * 0.85;
prB[id] = 0;
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.