serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
1,201
|
#include <cstdlib>
#include <cstdio>
#include <cmath>
#include <cassert>
#define SIZE 32
__global__ void matrix_add(float** d_A, float** d_B, float** d_C, size_t size) {
size_t i = threadIdx.x + blockDim.x*blockIdx.x;
size_t j = threadIdx.y + blockDim.y*blockIdx.y;
printf("i: %d, j: %d, d_A[i][j]: %f\n", i, j, d_A[i][j]);
/* if(i < size && j < size) */
/* d_C[i][j] = d_A[i][j] + d_B[i][j]; */
}
void print_err_msg(cudaError_t err) {
if(err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
}
int main() {
float** h_A;
float** h_B;
float** h_C;
float** d_A;
float** d_B;
float** d_C;
float** s_A;
float** s_B;
float** s_C;
h_A = new float*[SIZE];
h_B = new float*[SIZE];
h_C = new float*[SIZE];
s_A = (float **)malloc(SIZE*sizeof(float*));
s_B = (float **)malloc(SIZE*sizeof(float*));
s_C = (float **)malloc(SIZE*sizeof(float*));
cudaError_t err;
err = cudaMalloc((void**) &d_A, SIZE*sizeof(float*));
print_err_msg(err);
err = cudaMalloc((void**) &d_B, SIZE*sizeof(float*));
print_err_msg(err);
err = cudaMalloc((void**) &d_C, SIZE*sizeof(float*));
print_err_msg(err);
err = cudaMemcpy(s_A, d_A, SIZE*sizeof(float*), cudaMemcpyDeviceToHost);
err = cudaMemcpy(s_B, d_B, SIZE*sizeof(float*), cudaMemcpyDeviceToHost);
err = cudaMemcpy(s_C, d_C, SIZE*sizeof(float*), cudaMemcpyDeviceToHost);
for(size_t i = 0; i < SIZE; ++i) {
h_A[i] = new float[SIZE];
h_B[i] = new float[SIZE];
h_C[i] = new float[SIZE];
err = cudaMalloc((void **) &s_A[i], SIZE*sizeof(float));
print_err_msg(err);
err = cudaMalloc((void **) &s_B[i], SIZE*sizeof(float));
print_err_msg(err);
err = cudaMalloc((void **) &s_C[i], SIZE*sizeof(float));
print_err_msg(err);
}
for(size_t i = 0; i < SIZE; ++i) {
for(size_t j = 0; j < SIZE; ++j) {
h_A[i][j] = 1.0f;
h_B[i][j] = 2.0f;
}
}
for(size_t i = 0; i < SIZE; ++i) {
cudaMemcpy(s_A[i], h_A[i], SIZE*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(s_B[i], h_B[i], SIZE*sizeof(float), cudaMemcpyHostToDevice);
}
int threads = 32;
dim3 nthreads(threads, threads);
int blocks = ceil(SIZE/threads);
dim3 nblocks(blocks, blocks);
matrix_add<<<nblocks, nthreads>>>(d_A, d_B, d_C, SIZE);
for(size_t i = 0; i < SIZE; ++i) {
cudaMemcpy(h_C[i], s_C[i], SIZE*sizeof(float), cudaMemcpyDeviceToHost);
}
/* for(size_t i = 0; i < SIZE; ++i) { */
/* for(size_t j = 0; j < SIZE; ++j) { */
/* /1* assert(h_C[i][j] == 3.0f); *1/ */
/* printf("%f\t", h_C[i][j]); */
/* } */
/* printf("\n"); */
/* } */
for(size_t i = 0; i < SIZE; ++i) {
delete[] h_A[i];
delete[] h_B[i];
delete[] h_C[i];
err = cudaFree(s_A[i]);
print_err_msg(err);
err = cudaFree(s_B[i]);
print_err_msg(err);
err = cudaFree(s_C[i]);
print_err_msg(err);
}
delete[] h_A;
delete[] h_B;
delete[] h_C;
err = cudaFree(d_A);
print_err_msg(err);
err = cudaFree(d_B);
print_err_msg(err);
err = cudaFree(d_C);
print_err_msg(err);
return 0;
}
|
1,202
|
#ifndef MATRIXMULTIPLICATIONKERNEL_CU
#define MATRIXMULTIPLICATIONKERNEL_CU
#include <curand.h>
__global__ void matrixMul(float * g_C, float * g_A, float *g_B,int wa, int wb){
int x = blockIdx.x * blockDim.y + threadIdx.x;
int y = blockIdx.y*blockDim.x + threadIdx.y;
float result = 0;
int i = 0;
for(i = 0; i < wa; ++i){
float tempA = g_A[y*wa+i];
float tempB = g_B[i*wb+x];
result += tempA*tempB;
}
g_C[y*wa+x] = result;
}
__global__ void matrixMul2(float * g_C, float * g_A, float *g_B,int wa, int wb){
int TILE_SIZE = 16;
int x = blockIdx.x*TILE_SIZE + threadIdx.x;
int y = blockIdx.y*TILE_SIZE + threadIdx.y;
float result = 0;
int i = 0;
for(i = 0; i < wa; ++i){
float tempA = g_A[y*wa+i];
float tempB = g_B[i*wb+x];
result += tempA*tempB;
}
g_C[y*wa+x] = result;
}
__global__ void matrixMul3(float * g_C, float * g_A, float *g_B,int wa, int wb){
const int TILE_WIDTH = 16;
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
int row = bx*blockDim.y + tx;
int col = by*blockDim.x + ty;
float result = 0;
int i = 0;
for(i = 0; i < wa/TILE_WIDTH; ++i){
s_a[tx][ty] = g_A[i*TILE_WIDTH + row*wa +ty];
s_b[tx][ty] = g_B[(i*TILE_WIDTH*wa)+tx*wa+ col];
__syncthreads();
int k =0;
for(k=0;k<TILE_WIDTH;++k){
result += s_a[tx][k] * s_b[k][ty];
}
__syncthreads();
}
g_C[row*wa+col] = result;
}
#endif
|
1,203
|
#include <stdio.h>
__global__ void VecAdd(float * A, float * B, float * C)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void VecPrint(float * V, int len)
{
int to_print = 10;
if (to_print > len)
to_print = len;
for (int i=0; i<to_print; i++)
{
printf("%4.2f", V[i]);
if (i<to_print-1)
printf(", ");
}
if (to_print < len)
printf("...");
printf("\n");
}
int main()
{
int N = 1024;
size_t size = N * sizeof(float);
// Allocate input vectors h_A, h_B and h_C in host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
float* h_C = (float*)malloc(size);
// Initialize input vectors
for (int i=0; i<N; i++)
{
h_A[i] = 2*i+1;
h_B[i] = 4*i+1;
h_C[i] = 0;
}
// Print initialised vectors
VecPrint(h_A, N);
VecPrint(h_B, N);
VecPrint(h_C, N);
// Allocate vectors in device memory
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Run the add kernel
int blocks = 3;
int threads_per_block = N/blocks;
VecAdd<<<blocks, threads_per_block>>>(d_A, d_B, d_C);
// Print result
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
VecPrint(h_C, N);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// Free host memory
delete[] h_A;
delete[] h_B;
return 0;
}
|
1,204
|
__global__
void vecAdd(float *in1, float *in2, float *out, int len) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i<len ) {
out[i] = in1[i]+in2[i];
}
}
|
1,205
|
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void kernel() {
int tid = threadIdx.x;
if (tid < 8) {
printf("inside the kernel\n");
}
else {
printf("outside the kernel\n");
}
}
int cuda(int a, int b) {
kernel<<<1, 10>>>();
cudaDeviceSynchronize();
return 0;
}
|
1,206
|
__device__ volatile float BigData[1024*1024];
|
1,207
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
comp += var_2 - (var_3 + +1.4889E-43f);
if (comp <= (+1.5026E25f + atanf(var_4 * expf(var_5 / var_6 / -1.8817E35f)))) {
float tmp_1 = +1.8395E9f;
float tmp_2 = -0.0f;
comp += tmp_2 * tmp_1 * var_7 + (var_8 / asinf(var_9 / fabsf(-1.3023E-42f)));
comp = (var_10 - sqrtf(-1.1907E35f));
}
if (comp < +1.3739E-41f - var_11) {
comp = (var_12 * (+1.6612E34f / -1.7853E8f / +1.4702E-43f));
comp += var_13 / var_14;
comp += var_15 * -0.0f * (var_16 * var_17 / asinf(floorf((var_18 - +1.4509E-36f - -1.6745E2f))));
}
for (int i=0; i < var_1; ++i) {
comp += (+1.2415E20f - asinf(+0.0f / (var_19 - var_20 * -1.2148E-35f)));
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
cudaDeviceSynchronize();
return 0;
}
|
1,208
|
#include "includes.h"
__global__ void vecAdd(float* a, float* b, float* c, const int N)
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<N)
c[i] = a[i] + b[i];
}
|
1,209
|
#include "includes.h"
__global__ void vector_add(float *out, float *a, float *b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Handling arbitrary vector size
if (tid < n){
out[tid] = a[tid] + b[tid];
}
}
|
1,210
|
#include <stdlib.h>
#include <stdio.h>
int main(void){
int num_elements = 16;
int num_bytes = num_elements*sizeof(int);
int *device_array = 0;
int *host_array = 0;
host_array = (int *)malloc(num_bytes);
cudaMalloc((void**)&device_array, num_bytes);
cudaMemset(device_array, 0, num_bytes);
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
int i;
for(i = 0; i<num_elements; i++)
printf("%d\n", host_array[i]);
free(host_array);
cudaFree(device_array);
return 0;
}
|
1,211
|
#include <stdio.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <stdlib.h>
#include "device_launch_parameters.h"
#include <thrust/scan.h>
#include <thrust/device_vector.h>
#include <thrust/count.h>
const int BASE1 = 10000 + 7;
const int BASE2 = 100000 + 3;
const int MOD1 = 1000000 + 3;
const int MOD2 = 1000000 + 37;
__global__ void findhash(int *d_qvert,int *d_qverc,int *d_qvid,int *d_qelist,bool *d_over,bool *d_qtree,int *d_hash1,int *d_hash2)
{
int i;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=d_qverc[0])
return ;
if(d_qvid[ver]!=0)
return;
int l=d_qvert[ver+1];
int hash1=1,hash2=1;
int flag=0;
for(i=d_qvert[ver];i<l;i++)
{
int m=d_qelist[i];
bool treeedge=d_qtree[i];
if(treeedge){
int tt=d_qvid[m];
if(tt==0)
return;
flag=1;
hash1=(hash1*1L*BASE1)*tt % MOD1;
hash2=(hash2*1L*BASE2)*tt % MOD2;
}
}
if(flag==0)
return;
if(flag==1){
*d_over=false;
d_hash1[hash1]=1;
d_hash2[hash2]=1;
}
}
__global__ void setdeg1(int *d_qvert,int *d_qverc,int *d_qvid,bool *d_qtree)
{
int i;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=d_qverc[0])
return ;
if(d_qvid[ver]!=0)
return;
int l=d_qvert[ver+1];
bool treeedge;
for(i=d_qvert[ver];i<l;i++)
{
treeedge=d_qtree[i];
if(treeedge)
return;
}
//printf("%d %d\n",ver,i);
d_qvid[ver]=1;
}
/*__global__ void alignhash(bool *d_hash1,bool *d_hash2)
{
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=1000038)
return ;
if(d_hash1[ver] || d_hash2[ver]){
d_hash1=true;
}
}*/
__global__ void puttoid(int *d_qvert,int *d_qverc,int *d_qvid,int *d_qelist,bool *d_qtree,int *d_loc,int * d_qidtov)
{
int i;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=d_qverc[0])
return ;
if(d_qvid[ver]!=0)
return;
int l=d_qvert[ver+1];
int hash1=1,hash2=1;
int flag=0;
for(i=d_qvert[ver];i<l;i++)
{
int m=d_qelist[i];
bool treeedge=d_qtree[i];
if(treeedge){
int tt=d_qvid[m];
if(tt==0)
return;
flag=1;
hash1=(hash1*1L*BASE1)*tt % MOD1;
hash2=(hash2*1L*BASE2)*tt % MOD2;
}
}
//printf("%d %d %d \n",ver,flag,d_loc[hash1]);
if(flag==0)
return;
int id=d_loc[hash1];
d_qvid[ver]=id;
d_qidtov[id]=ver;
}
__device__ bool chechall(int ver,bool *check,int i,int dfrom,int dto,int *d_delist,int *d_qelist,int *d_qvid,int qfrom,int qto,int ** d_dcvslist){
//int ql=qfrom-qto;
int ql=qto-qfrom;
int j,k,l;
//d_dcvslist[2][ql]=true;
if(i==ql){
k=d_qelist[i+qfrom-1];
k=d_qvid[k];
if(k>=d_qvid[ver])
return true;
for(j=dfrom;j<dto;j++){
l=d_delist[j];
if(check[l])
continue;
if(!d_dcvslist[k][l])
continue;
return true;
}
}
else{
int res=false;
k=d_qelist[i+qfrom-1];
k=d_qvid[k];
if(k>=d_qvid[ver])
return chechall(ver,check,i+1,dfrom,dto,d_delist,d_qelist,d_qvid,qfrom,qto,d_dcvslist);
for(j=dfrom;j<dto;j++){
l=d_delist[j];
if(check[l])
continue;
if(!d_dcvslist[k][l])
continue;
check[l]=true;
res|=chechall(ver,check,i+1,dfrom,dto,d_delist,d_qelist,d_qvid,qfrom,qto,d_dcvslist);
if(res==true)
return true;
check[l]=false;
}
}
return false;
}
__global__ void findcvs(int ver,int *d_dvert,int *d_dverc,int *d_delist,int *d_qvert,int *d_qelist,int *d_qvid,int ** d_dcvslist )
{
//int i;
int dver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(dver>=d_dverc[0])
return ;
int ql=d_qvert[ver+1]-d_qvert[ver];
int dl=d_dvert[dver+1]-d_dvert[dver];
if(ql>dl)
return;
bool *checked=(bool*)malloc(sizeof(bool)*d_dverc[0]);
//bool *checked=new bool[d_dverc[0]];
memset(checked,false,sizeof(bool)*d_dverc[0]);
//chechall(bool *check,int i,int dfrom,int dto,int *d_delist,int *d_qelist,int *d_qvid,int qfrom,int qto,bool ** d_dcvslist)
if(chechall(ver,checked,1,d_dvert[dver],d_dvert[dver+1],d_delist,d_qelist,d_qvid,d_qvert[ver],d_qvert[ver+1],d_dcvslist))
d_dcvslist[d_qvid[ver]][dver]=true;
free(checked);
}
__global__ void puttolist(int *d_dverc,int *d_loc,int * d_dcvslist )
{
int dver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(dver>=d_dverc[0])
return ;
if(d_loc[dver]!=d_loc[dver+1])
d_dcvslist[d_loc[dver]]=dver;
}
__global__ void checkperm(bool *found,int * qdmap,int * d_qverc,int * d_qelist,int * d_qvert,int * d_dvert,int *d_delist){
int i;
//found[0]=false;
int ver=threadIdx.x*blockDim.y+threadIdx.y+blockDim.x*blockDim.y*(blockIdx.x*gridDim.y+blockIdx.y);
if(ver>=d_qverc[0])
return ;
int n,p,j,k,flag=0;
//for(ver=0;ver<d_qverc[0];ver++){
int l=d_qvert[ver+1];
int dver=qdmap[ver];
n=d_dvert[dver+1];
for(i=d_qvert[ver];i<l;i++)
{
flag=0;
j=d_qelist[i];
p=d_dvert[dver];
k=qdmap[j];
for(;p<n;p++){
if(k==d_delist[p]){
flag=1;
break;
}
}
if(!flag){
*found=false;
return;
}
}
//}
}
__global__ void findall(int *d_mapans,int *d_cans,int *d_qvid,int * d_qverc,int * d_qelist,int * d_qvert,int * d_dvert,int *d_delist,int ** d_cvsverlist,int * d_size_cvs)
{
bool found[1]={true};
long long int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
long long int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x)
+ threadIdx.x;
int i=0;
long long int indexperm=threadId;
for(i=0;i<d_qverc[0];i++){
int j=d_qvid[i];
indexperm/=d_size_cvs[j];
}
if(indexperm)
return;
indexperm=threadId;
int *d_qdmap=&d_mapans[d_qverc[0]*threadId];//new int[d_qverc[0]];
for(i=0;i<d_qverc[0];i++){
int j=d_qvid[i];
d_qdmap[i]=d_cvsverlist[j][indexperm%d_size_cvs[j]];
indexperm/=d_size_cvs[j];
}
//dim3 blocks((max/16 )+ 1,(max/16)+1);
//dim3 threads(16,16);
//found[0]=true;
//checkperm<<<blocks,threads>>> (found,d_qdmap,d_qverc,d_qelist,d_qvert,d_dvert,d_delist);
int n,p,j,k,flag=0,ver;
for(ver=0;ver<d_qverc[0];ver++){
int l=d_qvert[ver+1];
int dver=d_qdmap[ver];
n=d_dvert[dver+1];
for(i=d_qvert[ver];i<l;i++)
{
flag=0;
j=d_qelist[i];
p=d_dvert[dver];
k=d_qdmap[j];
for(;p<n;p++){
if(k==d_delist[p]){
flag=1;
break;
}
}
if(!flag){
*found=false;
return;
}
}
}
if(found[0]){
d_cans[threadId]=1; //printf("%d ", threadId);
}
//delete d_qdmap;
}
int * qdmap;
int *d_qverc,*d_dverc;
int *d_qvid,*d_qidtov,*h_qidtov,*h_qvid;
int *d_qvert,*d_qelist,*d_dvert,*d_delist;//,*d_dvelist,*d_qvelist;
bool *d_qtree,*d_over;
int *d_qdmap;
bool h_over;
/*void callforallperm(bool * check,int ** cvslist,int i,int max,int dmax){
int j,k,l;
l=h_qvid[i-1];
//printf("i%d %di",i,l);
if(i==max){
for(j=0;j<dmax;j++)
if(cvslist[l][j] && !check[j]){
qdmap[i-1]=j;
dim3 blocks((max/16 )+ 1,(max/16)+1);
dim3 threads(16,16);
h_over=true;
//for(k=0;k<max;k++)
// printf("%d ",qdmap[k]);
cudaMemcpy(d_over, &h_over, sizeof(bool), cudaMemcpyHostToDevice) ;
cudaMemcpy(d_qdmap, qdmap, sizeof(int)*(max+1), cudaMemcpyHostToDevice);
checkperm<<<blocks,threads>>> (d_over,d_qdmap,d_qverc,d_qelist,d_qvert,d_dvert,d_delist);
//checkperm(bool *found,int * qdmap,int * d_qverc,int * d_qelist,int * d_qvert,int * d_dvert,int *d_delist)
cudaError_t err = cudaGetLastError();
if(err!=cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(err));
printf("Not Ok");
}
cudaMemcpy(&h_over, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
if(h_over){
for(k=0;k<max;k++)
printf("%d ",qdmap[k]);
//printf("\n");
printf("OK\n");
}
//printf("\n");
}
}
else{
for(j=0;j<dmax;j++){
//printf("%d %d %d\n",j,check[j],cvslist[l][j]);
if(cvslist[l][j] && !check[j]){
check[j]=true;
qdmap[i-1]=j;
callforallperm(check,cvslist,i+1,max,dmax);
check[j]=false;
}
}
}
}*/
int main(int argc, char **argv)
{
int deviceId = 4;
cudaSetDevice(deviceId);
int h_qverc,h_dverc;
int *h_qvert,*h_qelist,*h_dvert,*h_delist;//,*h_dvelist,*h_qvelist;
bool *h_qtree;
int *d_hash1,*d_hash2;
int i,j;
int **h_cvslist,**d_cvslist,**h_tem;
scanf("%d",&h_qverc);
h_qvert=(int *)malloc(sizeof(int)*(h_qverc+1));
h_qvid=(int *)malloc(sizeof(int)*(h_qverc+1));
h_qidtov=(int *)malloc(sizeof(int)*(h_qverc+1));
h_tem=(int **)malloc(sizeof(int*)*(h_qverc+1));
h_cvslist=(int **)malloc(sizeof(int*)*(h_qverc+1));
for(i=0;i<=h_qverc;i++){
scanf("%d",&h_qvert[i]);
}
h_qelist=(int *)malloc(sizeof(int)*h_qvert[h_qverc]);
for(i=0;i<h_qvert[h_qverc];i++)
scanf("%d",&h_qelist[i]);
h_qtree=(bool *)malloc(sizeof(bool)*h_qvert[h_qverc]);
for(i=0;i<h_qvert[h_qverc];i++){
scanf("%d",&j);
if(j==1)
h_qtree[i]=true;
else
h_qtree[i]=false;
}
scanf("%d",&h_dverc);
h_dvert=(int *)malloc(sizeof(int)*(h_dverc+1));
for(i=0;i<=h_dverc;i++){
scanf("%d",&h_dvert[i]);
}
for(i=0;i<=h_qverc;i++)
h_cvslist[i]=(int *)malloc(sizeof(int)*(h_dverc+1));
h_delist=(int *)malloc(sizeof(int)*h_dvert[h_dverc]);
for(i=0;i<h_dvert[h_dverc];i++)
scanf("%d",&h_delist[i]);
cudaMalloc(&d_qverc,sizeof(int));
cudaMalloc(&d_over,sizeof(bool));
cudaMalloc(&d_qvert,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_qidtov,sizeof(int)*(h_qverc+1));
//cudaMalloc(&d_loc,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_qelist,sizeof(int)*h_qvert[h_qverc]);
cudaMalloc(&d_qtree,sizeof(bool)*h_qvert[h_qverc]);
cudaMalloc(&d_hash1,sizeof(int)*1000038);
cudaMalloc(&d_hash2,sizeof(int)*1000038);
cudaMalloc(&d_qvid,sizeof(int)*(h_qverc+1));
cudaMemcpy(d_qverc,&h_qverc,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_qvert,h_qvert,sizeof(int)*(h_qverc+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_qelist,h_qelist,sizeof(int)*h_qvert[h_qverc],cudaMemcpyHostToDevice);
cudaMemcpy(d_qtree,h_qtree,sizeof(bool)*h_qvert[h_qverc],cudaMemcpyHostToDevice);
cudaMemset(d_hash1,0,sizeof(int)*1000038);
cudaMemset(d_hash2,0,sizeof(int)*1000038);
//cudaMemset(d_loc,0,sizeof(int)*(h_qverc+1));
cudaMemset(d_qidtov,-1,sizeof(int)*(h_qverc+1));
cudaMemset(d_qvid,0,sizeof(int)*(h_qverc+1));
int *h_hash1=(int *)malloc(sizeof(int)*1000038);
int *h_hash2=(int *)malloc(sizeof(int)*1000038);
dim3 blocks((sqrt(h_qverc)/16 )+ 1,(sqrt(h_qverc)/16)+1);
dim3 threads(16,16);
//int *d_qvert,int *d_dverc,int *d_qvid,int *d_qelist,bool *d_over,bool *d_hash1,bool *d_hash2)
h_over=true;
//h_qvid[1]=1;
//h_qvid[3]=1;
//cudaMemcpy(d_qvid,h_qvid,sizeof(int)*(h_qverc+1),cudaMemcpyHostToDevice);
//printf("qt%d %dqt\n",h_qtree[0],h_qtree[1]);
setdeg1<<<blocks,threads>>>(d_qvert,d_qverc,d_qvid,d_qtree);
h_over=false;
int maxval=2;
while(!h_over)
{
h_over=true;
cudaMemcpy(d_over, &h_over, sizeof(bool), cudaMemcpyHostToDevice) ;
cudaMemset(d_hash1,0,sizeof(int)*1000038);
findhash <<<blocks,threads>>> (d_qvert,d_qverc,d_qvid,d_qelist,d_over,d_qtree,d_hash1,d_hash2);
//(int *d_qvert,int *d_dverc,int *d_qvid,int *d_qelist,bool *d_over,bool *d_hash1,bool *d_qtree,bool *d_hash2)
cudaError_t err = cudaGetLastError();
if(err!=cudaSuccess)
{
printf("Error: %s\n", cudaGetErrorString(err));
printf("Not Ok");
}
cudaMemcpy(h_hash1,d_hash1,sizeof(int)*1000038,cudaMemcpyDeviceToHost);
h_hash1[0]+=maxval;
thrust::exclusive_scan(h_hash1,h_hash1+1000038,h_hash1);
maxval=h_hash1[1000037];
cudaMemcpy(d_hash1,h_hash1,sizeof(int)*1000038,cudaMemcpyHostToDevice);
puttoid<<<blocks,threads>>>(d_qvert,d_qverc,d_qvid,d_qelist,d_qtree,d_hash1,d_qidtov);
/// cudaMemcpy(h_hash2,d_hash2,sizeof(bool)*1000038,cudaMemcpyDeviceToHost);
cudaMemcpy(&h_over, d_over, sizeof(bool), cudaMemcpyDeviceToHost) ;
//printf("over flag:%d ",h_over);
/*for(i=0;i<h_qverc;i++){
//if()
printf("%d ",h_qvid[i]);
// if(h_hash2[i])
// printf("h2 %d ",i);
// if(h_hash1[i] || h_hash2[i])
// printf("\n");
}
printf("\n");*/
}
cudaMemcpy(h_qvid,d_qvid,sizeof(int)*h_qverc,cudaMemcpyDeviceToHost);
cudaMemcpy(h_qidtov,d_qidtov,sizeof(int)*(h_qverc+1),cudaMemcpyDeviceToHost);
for(i=0;i<=h_qverc;i++){
printf("%d ",h_qidtov[i]);
}
printf("\n");
for(i=0;i<=h_qverc;i++){
printf("%d ",h_qvid[i]);
}
printf("\n");
cudaFree(d_qtree);
cudaFree(d_hash1);
cudaFree(d_hash2);
free(h_hash1);
free(h_hash2);
free(h_qtree);
cudaMalloc(&d_cvslist,sizeof(int*)*(h_qverc+1));
for(i=0;i<=h_qverc;i++){
cudaMalloc(&h_tem[i],sizeof(int)*(h_dverc+1));
cudaMemset(h_tem[i],0,sizeof(int)*(h_dverc+1));
}
cudaMemset(h_tem[1],1,sizeof(int)*(h_dverc+1));
cudaMemcpy(d_cvslist,h_tem,sizeof(int*)*(h_qverc+1),cudaMemcpyHostToDevice);
cudaMalloc(&d_dvert,sizeof(int)*(h_dverc+1));
cudaMalloc(&d_dverc,sizeof(int));
cudaMalloc(&d_delist,sizeof(int)*h_dvert[h_dverc]);
cudaMemcpy(d_dverc,&h_dverc,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_dvert,h_dvert,sizeof(int)*(h_dverc+1),cudaMemcpyHostToDevice);
cudaMemcpy(d_delist,h_delist,sizeof(int)*h_dvert[h_dverc],cudaMemcpyHostToDevice);
dim3 dblocks((sqrt(h_dverc)/16 )+ 1,(sqrt(h_dverc)/16)+1);
dim3 dthreads(16,16);
int **d_cvsverlist,**d_temverlist;
int *d_size_cvs,*h_size_cvs;
memset(h_cvslist[1],1,sizeof(int)*(h_dverc+1));
h_size_cvs=(int *)malloc(sizeof(int)*(h_qverc+1));
memset(h_size_cvs,0,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_size_cvs,sizeof(int)*(h_qverc+1));
cudaMemset(d_size_cvs,0,sizeof(int)*(h_qverc+1));
cudaMalloc(&d_cvsverlist,sizeof(int*)*(h_qverc+1));
d_temverlist=(int **)malloc(sizeof(int*)*(h_qverc+1));
for(i=0;i<=h_qverc;i++){
cudaMalloc(&d_temverlist[i],sizeof(int)*(h_dverc+1));
cudaMemset(d_temverlist[i],0,sizeof(int)*(h_dverc+1));
}
cudaMemcpy(d_cvsverlist,d_temverlist,sizeof(int*)*(h_qverc+1),cudaMemcpyHostToDevice);
long long int totalthreads=1;
for(i=0;i<h_dverc;i++)
h_cvslist[1][i]=i;
cudaMemcpy(d_temverlist[1],h_cvslist[1],sizeof(int)*(h_dverc+1),cudaMemcpyHostToDevice);
h_size_cvs[1]=h_dverc;
for(i=0;i<=h_qverc;i++)
{
if(h_qidtov[i]!=-1)
{
//findcvs(int ver,int *d_dvert,int *d_dverc,int *d_delist,int *d_qvert,int *d_qelist,int *d_qvid,bool ** d_dcvslist )
findcvs<<<dblocks,dthreads>>>(h_qidtov[i],d_dvert,d_dverc,d_delist,d_qvert,d_qelist,d_qvid,d_cvslist);
cudaError_t err = cudaGetLastError();
cudaMemcpy(h_cvslist[i],h_tem[i],sizeof(int)*(h_dverc+1),cudaMemcpyDeviceToHost);
for(j=0;j<=h_dverc;j++)
if(h_cvslist[i][j])
printf("%d ",j);
printf("\n");
//printf("%d ",h_qidtov[i]);
thrust::exclusive_scan(h_cvslist[i],h_cvslist[i]+h_dverc+1,h_cvslist[i]);
h_size_cvs[i]=h_cvslist[i][h_dverc];
cudaMemcpy(h_tem[i],h_cvslist[i],sizeof(int)*(h_dverc+1),cudaMemcpyHostToDevice);
puttolist<<<dblocks,dthreads>>>(d_dverc,h_tem[i],d_temverlist[i]);
// cudaMemcpy(h_cvslist[i],d_temverlist[i],sizeof(int)*(h_dverc+1),cudaMemcpyDeviceToHost);
// for(j=0;j<=h_dverc;j++)
// printf("%d ",h_cvslist[i][j]);
}
}
// cudaMemcpy(h_delist,d_delist,sizeof(int)*(h_dvert[h_dverc]),cudaMemcpyDeviceToHost);
// for(j=0;j<h_dvert[h_dverc];j++)
// printf("%d ",h_delist[j]);
for(i=0;i<h_qverc;i++)
if(h_size_cvs[h_qvid[i]])
totalthreads*=h_size_cvs[h_qvid[i]];
printf("Start %lld\n",totalthreads);
cudaMemcpy(d_size_cvs,h_size_cvs,sizeof(int)*(h_qverc+1),cudaMemcpyHostToDevice);
//totalthreads=1000;
dim3 dpblocks(((int)(sqrt(totalthreads)/16 )+ 1),((int)(sqrt(totalthreads)/16)+1));
dim3 dpthreads(16,16);
int *d_mapans,*d_countans,*h_countans;
cudaMalloc(&d_mapans,sizeof( int)*totalthreads*(h_qverc+1));
cudaMalloc(&d_countans,sizeof( int)*(totalthreads+1));
cudaMemset(d_countans,0,sizeof( int)*(totalthreads+1));
//h_countans=(int *)malloc(sizeof(int)*(totalthreads+1));
//h_countans=0;
//cudaMemcpy(d_countans, &h_countans, sizeof( int), cudaMemcpyHostToDevice) ;
//cudaMemcpy(&h_countans, d_qverc, sizeof(int), cudaMemcpyDeviceToHost) ;
//printf("%d\n",h_countans);
findall<<<dpblocks,dpthreads>>> (d_mapans,d_countans,d_qvid,d_qverc,d_qelist,d_qvert,d_dvert,d_delist,d_cvsverlist,d_size_cvs);
thrust::device_ptr<int> cptr=thrust::device_pointer_cast(d_countans);
//int sum=thrust::count(cptr,cptr+totalthreads,1);
//cudaMemcpy(h_countans, d_countans, sizeof(int)*totalthreads, cudaMemcpyDeviceToHost) ;
//thrust::exclusive_scan(h_countans,h_countans+totalthreads+1,h_countans);
//printf("%d\n",h_countans[totalthreads-1] );
//printf("%d\n",h_countans);
//printf("%d\n",sum);
/*j=0;
for(i=0;i<totalthreads;i++)
if(h_countans[i])
j++;
printf("%d\n",j);
*///bool * check=(bool *)malloc(sizeof(bool)*(h_dverc+1));
//memset(check,false,sizeof(bool)*(h_dverc+1));
//qdmap=(int *)malloc(sizeof(int)*(h_qverc+1));
//cudaMalloc(&d_qdmap,sizeof(int)*(h_qverc+1));
//callforallperm(check,h_cvslist,1,h_qverc,h_dverc);
cudaFree(d_over);
cudaFree(d_qverc);
cudaFree(d_qvert);
cudaFree(d_qelist);
cudaFree(d_qvid);
cudaFree(d_qidtov);
cudaFree(d_dvert);
cudaFree(d_delist);
cudaFree(d_dverc);
cudaFree(d_cvslist);
cudaFree(d_cvsverlist);
cudaFree(d_size_cvs);
/*free(h_qvid);
free(h_qvert);
//free(h_qelist);
free(h_qidtov);
free(h_cvslist);
free(h_dvert);
free(h_delist);*/
}
|
1,212
|
#include <stdio.h>
#include <cuda.h>
__global__ void vecmul(float *A, float* B, float *C, int size)
{
// Row and Column indexes:
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Are they bellow the maximum?
if (col < size && row < size) {
float result = 0;
for(int ix = 0; ix < size; ix++) {
result += A[row*size + ix]*B[ix*size + col];
}
C[row*size + col] = result;
}
}
extern "C" {
void maxmul(float *A, float* B, float *C, int size) {
int total = size*size;
// Allocate device memory:
float* gpu_A;
float* gpu_B;
float* gpu_C;
int msize = total * sizeof(float);
cudaMalloc((void**)&gpu_A, msize);
cudaMemcpy(gpu_A, A, msize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_B, msize);
cudaMemcpy(gpu_B, B, msize, cudaMemcpyHostToDevice);
cudaMalloc((void**)&gpu_C, msize);
// Blocks & grids:
dim3 blocks(size, size);
dim3 grid(1, 1);
// Call the kernel:
vecmul<<<grid,blocks>>>(gpu_A, gpu_B, gpu_C, size);
// Get the result Matrix:
cudaMemcpy(C, gpu_C, msize, cudaMemcpyDeviceToHost);
//Free device matrices
cudaFree(gpu_A);
cudaFree(gpu_B);
cudaFree(gpu_C);
}
}
|
1,213
|
/*
* Name: Nate Steawrt
* Date: 04-04-16
* Description: Serial implementation of Matrix morphism
*/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#define RANDOM_VALUE_MIN 1.0
#define RANDOM_VALUE_MAX 2.0
#define NUM_ROWS 4097
#define NUM_COLS 4097
/*
* Calculate and return a random value between min and max.
*/
double randDouble(double min, double max) {
double range = max - min;
double dist = RAND_MAX / range;
return min + (rand() / dist);
}
/*
* Output the matrix to fout
*/
void outputMatrix(FILE *fout, double *matrix, int rows, int cols) {
int i, j;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
fprintf(fout, "%lf ", *(matrix + i * cols + j));
}
fprintf(fout, "\n");
}
}
__global__ void computeMath(double *matrix) {
int i;
// Grab id of thread
int threadId = blockDim.x * threadIdx.y + threadIdx.x + 1;
// Declare pointers to the two arguments of the addition and the result pointer
double *f_ptr, *first_ptr, *second_ptr;
// Grab starting points for pointers
f_ptr = matrix + threadId * NUM_COLS;
first_ptr = matrix + (threadId - 1) * NUM_COLS + 1;
second_ptr = f_ptr + 1;
// Compute a single row
for (i = 0; i < NUM_COLS - 1; i++, f_ptr++, first_ptr++, second_ptr++) {
*f_ptr = *first_ptr + *second_ptr;
}
}
/*
* Check if an error occurred during the last CUDA command
*/
void checkError() {
int errorCode = cudaGetLastError();
if (errorCode != 0) {
printf("Error %d occurred during last operation.\n", errorCode);
}
}
int main(void) {
// Declare the needed variables
int i, j;
// Define thread hierarchy
int nblocks = 64;
int dimX = 64;
int dimY = 1;
// Declare the memory pointers
double *h_matrix, *d_matrix;
// Allocate memory for host and device
size_t memSize = NUM_ROWS * NUM_COLS * sizeof(*h_matrix);
// Create space on the host and device for matrix
h_matrix = (double *)malloc(memSize);
cudaMalloc( (void**) &d_matrix, memSize);
checkError();
// Initialize the matrix and copy values into device
double *f_ptr = h_matrix; // Setup a traversal pointer
for (i = 0; i < NUM_ROWS; i++) {
for (j = 0; j < NUM_COLS; j++, f_ptr++) {
*f_ptr = randDouble(RANDOM_VALUE_MIN, RANDOM_VALUE_MAX);
}
}
cudaMemcpy(d_matrix, h_matrix, memSize, cudaMemcpyHostToDevice);
checkError();
// Set up grid and block structure
dim3 dimGrid(nblocks);
dim3 dimBlock(dimX, dimY);
// Launch the kernel
for (i = 0; i < 100; i++) {
computeMath<<< dimGrid, dimBlock >>>(d_matrix);
checkError();
}
// Retrieve results and free memory
cudaMemcpy(h_matrix, d_matrix, memSize, cudaMemcpyDeviceToHost);
checkError();
free(h_matrix);
cudaFree(d_matrix);
checkError();
}
|
1,214
|
#include "includes.h"
#define WEIGHTSUM 273
#define BLOCK_SIZE 16
int * heatmap;
size_t heatmap_pitch;
int * scaled_heatmap;
size_t scaled_heatmap_pitch;
int * blurred_heatmap;
size_t blurred_heatmap_pitch;
float* d_desiredPositionX;
float* d_desiredPositionY;
__global__ void computeScaledHeatmap(int* heatmap, size_t heatmap_pitch, int* scaled_heatmap, size_t scaled_heatmap_pitch) {
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Thread row and column block
int row = threadIdx.y;
int col = threadIdx.x;
// x, y coordinate
int x = blockCol * blockDim.x + col;
int y = blockRow * blockDim.y + row;
// Scale the data for visual representation
int value = *((int*)((char*)heatmap + y * heatmap_pitch) + x);
for (int r = 0; r < CELLSIZE; r++) {
int* row = (int*)((char*)scaled_heatmap + (r + y * CELLSIZE) * scaled_heatmap_pitch);
for (int c = 0; c < CELLSIZE; c++) {
row[x * CELLSIZE + c] = value;
}
}
}
|
1,215
|
#include<stdio.h>
#include<math.h>
#include<cuda.h>
#define N 256
__global__ void matrix_vector_multi_gpu_2_128(float *A_d,float *B_d,float *C_d){
int i,j;
j=blockIdx.x*128+threadIdx.x;
A_d[j]=0.0F;
for(i=0;i<N;i++){
A_d[j]=A_d[j]+B_d[j*N+i]*C_d[i];
}
}
int main(){
int i,j;
float A[N],B[N*N],C[N];
float *A_d,*B_d,*C_d;
dim3 blocks(2,1,1);
dim3 threads(128,1,1);
for(j=0;j<N;j++){
for(i=0;i<N;i++){
B[j*N+i]=((float)j)/256.0;
}
}
for(j=0;j<N;j++){
C[j]=1.0F;
}
cudaMalloc((void**)&A_d,N*sizeof(float));
cudaMalloc((void**)&B_d,N*N*sizeof(float));
cudaMalloc((void**)&C_d,N*sizeof(float));
cudaMemcpy(A_d,A,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(B_d,B,N*N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(C_d,C,N*sizeof(float),cudaMemcpyHostToDevice);
matrix_vector_multi_gpu_2_128<<<blocks,threads>>>(A_d,B_d,C_d);
cudaMemcpy(A,A_d,N*sizeof(float),cudaMemcpyDeviceToHost);
for(j=0;j<N;j++){
printf("A[ %d ]=%f \n",j,A[j]);
}
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
1,216
|
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
// #include <opencv2/opencv.hpp>
#include<fstream>
// #include <TooN/TooN.h>
// #include <TooN/se3.h>
// #include <TooN/GR_SVD.h>
// using namespace cv;
using namespace std;
__device__ int get_pos(){
return threadIdx.x + blockIdx.x * blockDim.x;
}
struct Img{
int* data;
int image_width;
int image_height;
int img_arr_size;
Img(){
}
Img(int w, int h){
image_height = h;
image_width = w;
img_arr_size = sizeof(int) * image_width*image_height;
cudaMalloc((void**)&data, img_arr_size);
}
};
struct Matrix4 {
float4 data[4];
inline __host__ __device__ float3 get_translation() const {
return make_float3(data[0].w, data[1].w, data[2].w);
}
};
inline Matrix4 getCameraMatrix( const float4 & k ){
Matrix4 K;
K.data[0] = make_float4(k.x, 0, k.z, 0);
K.data[1] = make_float4(0, k.y, k.w, 0);
K.data[2] = make_float4(0, 0, 1, 0);
K.data[3] = make_float4(0, 0, 0, 1);
return K;
}
inline Matrix4 getInverseCameraMatrix( const float4 & k ){
Matrix4 invK;
invK.data[0] = make_float4(1.0f/k.x, 0, -k.z/k.x, 0);
invK.data[1] = make_float4(0, 1.0f/k.y, -k.w/k.y, 0);
invK.data[2] = make_float4(0, 0, 1, 0);
invK.data[3] = make_float4(0, 0, 0, 1);
return invK;
}
inline __host__ __device__ float3 make_float3(float4 a)
{
// printf("make_float3_start\n");
return make_float3(a.x, a.y, a.z);
}
inline __host__ __device__ float dot(float3 a, float3 b)
{
// printf("dot_start\n");
return a.x * b.x + a.y * b.y + a.z * b.z;
}
inline __host__ __device__ float3 rotate( const Matrix4 & M, const float3 & v){
// printf("start\n");
float3 ans = make_float3(dot(make_float3(M.data[0]), v),dot(make_float3(M.data[1]), v),dot(make_float3(M.data[2]), v));
// printf("end\n");
return ans;
}
std::ostream & operator<<( std::ostream & out, const Matrix4 & m ){
for(unsigned i = 0; i < 4; ++i)
out << m.data[i].x << " " << m.data[i].y << " " << m.data[i].z << " " << m.data[i].w << "\n";
return out;
}
// __global__ void point2depthimage(Matrix4 r_inv, float* depth_image, float* point_sequence) {
// printf("aaa\n");
// }
__global__ void mykernel(float3* a, const Matrix4 r_inv, int max_num,Img* img){
// printf("%d %d %d %d \n", r_inv.data[0].x, r_inv.data[0].y, r_inv.data[0].z, r_inv.data[0].w);
int index = get_pos();
if(index >= max_num)
return ;
// printf("index is %d x is %f, y is %f, z is %f\n", index,arr[index].x, arr[index].y, arr[index].z);
float3 pixel_ = rotate(r_inv, a[index]);
int depth = int(pixel_.z);
int2 image_pos =make_int2(pixel_.x / pixel_.z, pixel_.y / pixel_.z);
if(image_pos.x >= img->image_width || image_pos.y >= img->image_height )
return;
else{
img->data[img->image_width * image_pos.y + image_pos.x] = depth;
}
printf("index is %d,x is %f, y is %f, z is %f, image_pos.x is %d, image_pos.z is %d\n",index,pixel_.x, pixel_.y, pixel_.z, image_pos.x, image_pos.y);
}
int num = 1000;
float3* simulation_array;
float3* test_gpu_array;
int* img;
int array_size = sizeof(float3)*num;
float4 four_element = make_float4(1,1,1,1);
int img_width = 100, img_height = 200;
int* image_show;
Img* img_show = new Img(img_width, img_height);
// Matrix4 camera_M = getCameraMatrix(rotate_matrix);
Matrix4 inv_camera_M = getInverseCameraMatrix(four_element);
int main() {
simulation_array = (float3*)malloc(array_size);
image_show = (int*)malloc(img_show->img_arr_size);
// mykernel<<<100,1000>>>();
for(int x = 0; x<num;x++){
simulation_array[x] = make_float3(rand()%10,rand()%10,rand()%10);
// printf("%f, %f, %f\n", simulation_array[x].x, simulation_arr/ay[x].y,simulation_array[x].z);
}
cudaMalloc((void**)&test_gpu_array, array_size);
cudaMemcpy(test_gpu_array,simulation_array,array_size, cudaMemcpyHostToDevice);
// cout<<inv_camera_M;
mykernel<<<100,10>>>(test_gpu_array, inv_camera_M, num, img_show);
cudaMemcpy(image_show,img_show->data, img_show->img_arr_size, cudaMemcpyDeviceToHost);
// printf("Hello World!\n");
cudaDeviceSynchronize();
ofstream ofile;
ofile.open("image_show.txt");
for(int i=0; i<img_height;i++)
for(int j=0;j<img_width;j++){
if(j == img_width-1)
ofile<<image_show[i*img_width + j]<<'\n';
else
ofile<<image_show[i*img_width + j]<<' ';
}
ofile.close();
cudaDeviceReset();
// uchar *ptmp = NULL;
// Mat depth_image = Mat(img_height, img_width, CV_8UC1);
// for(int i = 0 ;i<img_height;i++){
// // ptmp = depth_image.ptr<uchar>(i);
// for(int ii = 0; ii < img_width; ii++){
// depth_image.at<uchar>(i,ii) = image_show[ii+i*img_show->image_width];
// }
// }
// // namedWindow("depth_image");
// imshow("depth_image", depth_image);
// waitKey(0);
}
|
1,217
|
void ocean() {
int m = 1 << 7;
int n = 1 << 7;
}
|
1,218
|
#include <stdio.h>
#include <cuda.h>
//size of array
#define N 4096
//vector addition kernel
__global__ void vectorAddKernel(int *a, int *b, int *c)
{
int tdx = blockIdx.x * blockDim.x + threadIdx.x;
if(tdx < N)
{
c[tdx] = a[tdx]+b[tdx];
}
}
int main()
{
//grid and block sizes
dim3 grid(16, 1, 1);
dim3 block(256, 1, 1);
//host arrays
int a_h[N];
int b_h[N];
int c_h[N];
//device memory pointers
int *a_d;
int *b_d;
int *c_d;
//load arrays with some numbers
for(int i=0; i<N; i++)
{
a_h[i] = i;
b_h[i] = i*1;
}
//allocate device memory
cudaMalloc((void**)&a_d, N*sizeof(int));
cudaMalloc((void**)&b_d, N*sizeof(int));
cudaMalloc((void**)&c_d, N*sizeof(int));
//copy the host arrays to device
cudaMemcpy(&a_d, &a_h, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&b_d, &b_h, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&c_d, &c_h, N*sizeof(int), cudaMemcpyHostToDevice);
//CUDA events to measure time
cudaEvent_t start;
cudaEvent_t stop;
float elapsedTime;
//start timer
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//launch kernel
vectorAddKernel<<<grid, block>>>(a_d, b_d, c_d);
//stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
//copy the results to host
cudaMemcpy(c_h, c_d, N*sizeof(int), cudaMemcpyDeviceToHost);
//print the results
for(int i=0; i<N; i++)
{
printf("%i+%i = %i\n",a_h[i], b_h[i], c_h[i]);
}
//print out execution time
printf("Time to calculate results: %f ms.\n", elapsedTime);
//clean up
cudaFree(a_h);
cudaFree(b_h);
cudaFree(c_h);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
1,219
|
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <cuda.h>
#define n 2
void fillMatrix(double *w, int li, int lj){
double count = 0;
for(int i=0; i<li; i++){
for(int j=0; j<lj; j++){
w[i*lj+j] = count;
count++;
}
}
}
void print(double *w, int li, int lj){
for(int i=0; i<li; i++){
for(int j=0; j<lj; j++){
printf("%.4lf ", w[i*lj+j]);
}
printf("\n");
}
}
__global__
void product(double *d_x, double *d_y, double *d_z){
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
double sum = 0;
if ((row < n) && (col < n)){
for (int i = 0; i < n; i++) sum += d_x[n*row + i] * d_y[i*n+col];
d_z[row*n+col] = sum;
}
}
int main(int argc, char const *argv[])
{
int size1 = n*n*sizeof(double);
int size2 = n*n*sizeof(double);
int size3 = n*n*sizeof(double);
double *x = (double*)malloc(size1);
double *y = (double*)malloc(size2);
double *z = (double*)malloc(size3);
fillMatrix(x,n,n);
fillMatrix(y,n,n);
clock_t begin, end;
double time_spent;
begin = clock();
double *d_x = (double*)malloc(size1);
double *d_y = (double*)malloc(size2);
double *d_z = (double*)malloc(size3);
cudaMalloc((void**)&d_x, size1);
cudaMalloc((void**)&d_y, size2);
cudaMalloc((void**)&d_z, size3);
cudaMemcpy(d_x, x, size1, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size2, cudaMemcpyHostToDevice);
int threads = 16;
dim3 dimBlock(threads,threads);
dim3 dimGrid((n+dimBlock.x-1)/dimBlock.x, (n+dimBlock.y-1)/dimBlock.y);
product<<<dimGrid,dimBlock>>>(d_x, d_y, d_z);
cudaMemcpy(z,d_z,size3,cudaMemcpyDeviceToHost);
print(x,n,n);
printf("\n");
print(y,n,n);
printf("\n");
print(z,n,n);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("%lf\n", time_spent);
return 0;
}
|
1,220
|
#include<stdio.h>
#include<cuda_runtime.h>
#include <stdlib.h>
#define length 10
#define length_thread 256
#define test(a){\
for(int i =0;i<length;i++){\
printf("a[%d] = %d \n",i,a[i] );\
}\
}
#define pr_array(a,start,end){\
for(int i=start;i<=end;i++){\
printf("a[%d] = %d\n",i,a[i]);\
}\
}
//return b_end where the vuela a will be put in the new array
//if a equel a vuale which is in the b array,a will in front of the vuale;
#define insert0(a,b,b_start,b_end){\
while((b_end-b_start)>1){\
int point = (b_start+b_end)/2;\
if(a<=b[point])b_end = point;\
else b_start = point;\
}\
b_end += (a>b[b_end])-(a<=b[b_start]);\
}\
//if a equel a vuale which is in the b array,a will in back of the vuale;
#define insert1(a,b,b_start,b_end){\
while((b_end-b_start)>1){\
int point = (b_start+b_end)/2;\
if(a<b[point])b_end = point;\
else b_start = point;\
}\
b_end += (a>=b[b_end])-(a<b[b_start]);\
}\
int cmp(const void *a,const void *b)
{
return *(int *)a-*(int *)b;
}
__global__ void pr(int *a){
int tid = threadIdx.x;
__shared__ int a_s[length];
a_s[tid] = a[tid];
a[tid] = a_s[tid]*2;
}
__global__ void merger_thread(int *a,int len){
__shared__ int a_s[length_thread];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int r1,r2;
if(tid < len){
a_s[threadIdx.x] = a[tid];
}
r1 = blockDim.x/2;
int flag = (threadIdx.x>=r1);
tid = threadIdx.x%r1;
if((gridDim.x-1) == blockIdx.x)len %= blockDim.x;
else len = blockDim.x;
__syncthreads();
if(threadIdx.x<(len/2)){
r1 = threadIdx.x*2;
if(a_s[r1]>a_s[r1+1]){
r2 = a_s[r1];
a_s[r1] = a_s[r1+1];
a_s[r1+1] = r2;
}
}
int loop = 1;
int x , start , end;
x = len;
x = (x/2) + (x%2);
while(x>1){
x = (x/2) + (x%2);
r1 = tid>>loop;
r1 *= 2;
start = r1+1-(int)flag;
r1 += flag;
end = 1<<loop;
r1 *= end;
start *= end;
r2 = tid % end;
r1 += r2;
end += (start-1);
__syncthreads();
if(end > len)end = len;
if(r1 < len){
r1 = a_s[r1];
if(flag){
insert1(r1,a_s,start,end);
}
else{
insert0(r1,a_s,start,end);
}
end %= (1<<loop);
r2 +=end;
a_s[r2] = r1;
}
loop++;
__syncthreads();
}
if(threadIdx.x < len){
a[threadIdx.x + blockIdx.x * blockDim.x] = a_s[threadIdx.x];
}
}
int random(int range){
static int start = 444;
int d = 233333;
int k = 33352;
start = ((start*k)+d)%range;
return start;
}
int* gen(){
int *a_h;
cudaHostAlloc( (void**)&a_h, length* sizeof(int),cudaHostAllocDefault );
for (int i = 0; i < length; ++i)
{
a_h[i] = random(52);
printf("a_h[%d] = %d \n",i,a_h[i]);
}
return a_h;
}
void sort_int(int *a,int len){
int block_num = len/length_thread;
if((len%length_thread)!=0)block_num++;
merger_thread<<<block_num,length_thread>>>(a,len);
}
void msort(void *a,size_t num,size_t size,int ( * comparator ) ( const void *, const void * ) ){
}
int main(){
/* int *a_h = gen();
test(a_h);
qsort(a_h,length,sizeof(int),cmp);
printf("sorted!!!!!!!\n");
test(a_h);
a_h[43]=10;
a_h[42]=10;
pr_array(a_h,10,100);
int start = 10,end = 50;
insert0(10,a_h,start,end);
printf("end %d\n",end );
start = 10;
end=50;
insert1(10,a_h,start,end);
printf("end %d\n",end );
printf("used ipad pro maked\n"); */
int *a_h;
cudaHostAlloc( (void**)&a_h, length* sizeof(int),cudaHostAllocDefault );
for (int i = 0;i<length;i++) {
a_h[i] = length - i;
printf("a_h[%d] = %d \n",i,a_h[i]);
}
int *a_d;
cudaMalloc( (void**)&a_d, length*sizeof(int) );
cudaMemcpy(a_d,a_h, length*sizeof(int),cudaMemcpyHostToDevice);
sort_int(a_d,length);
cudaMemcpy(a_h,a_d,length*sizeof(int),cudaMemcpyDeviceToHost);
for (int i = 0;i<length;i++) {
printf("a_h[%d] = %d \n",i,a_h[i]);
}
}
|
1,221
|
/*
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
__global__ void addMultipleBlocks(float *d_a,float *d_b,float *d_c,int m,int n)
{
int i=blockIdx.x*blockDim.x+ threadIdx.x;
if(i<(m*n))
d_c[i]=d_a[i]+d_b[i];
}
__global__ void addSingleBlock(float *d_a,float *d_b,float *d_c,int m,int n)
{
int i=threadIdx.x;
d_c[i]=d_a[i]+d_b[i];
}
int main()
{
float *a,*b,*c;
float *d_a,*d_b,*d_c;
int size,m,n,i,j;
time_t start;
printf("Enter the number of rows and columns : \n");
scanf("%d%d",&m,&n);
a=(float *)malloc((size=sizeof(float)*m*n));
b=(float *)malloc(size);
c=(float *)malloc(size);
for(i=0;i<m;i++)
for(j=0;j<n;j++)
{
a[i*n+j]=i*2+j+0.8;
b[i*n+j]=i*2+j+0.1;
}
cudaMalloc((void **)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void **)&d_c,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
start=time(0);
addSingleBlock<<<1,(m*n)>>>(d_a,d_b,d_c,m,n);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("Result using a single block : \n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%f ",c[i*n+j]);
printf("\n");
}
printf("\n\n\n TIME TAKEN TO COMPUTER THE ADDITION WITH SINGLE BLOCK - %f",(difftime(time(0),start)));
cudaFree(d_c);
free(c);
cudaMalloc((void **)&d_c,size);
c=(float *)malloc(size);
start=time(0);
addMultipleBlocks<<< (int)((m*n)/9.0+1),9>>>(d_a,d_b,d_c,m,n);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("Result using a multiple blocks(3x3) : \n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%f ",c[i*n+j]);
printf("\n");
}
printf("\n\n\n TIME TAKEN TO COMPUTER THE ADDITION WITH MULTIPLE BLOCKS - %f",(difftime(time(0),start)));
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
*/
|
1,222
|
#include <iostream>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/reduce.h>
using namespace std;
int main(int argc, const char *argv[]) {
string N;
if (argc > 1) {
N = string(argv[1]);
}
unsigned int n = atoi(N.c_str());
thrust::host_vector<int> H(n);
for (unsigned int i = 0; i < n; i++) {
H[i] = 1;
}
thrust::device_vector<int> D = H;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int sum = thrust::reduce(D.begin(), D.end());
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms;
cudaEventElapsedTime(&ms, start, stop);
cout << sum << endl;
cout << ms << endl;
return 0;
}
|
1,223
|
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
__global__ void q1(int* d_a,int* d_r,int *d_m)
{
int n = threadIdx.x;
for(int i = 0;i<(*d_m);i++)
{
d_r[n*(*d_m)+i] = d_a[n*(*d_m)+i];
for(int j = 0;j<n;j++)
d_r[n*(*d_m)+i] *= d_a[n*(*d_m)+i];
}
}
int main(void)
{
int *a,*r,m,n,i;
int *d_a,*d_r,*d_m;
printf("Enter m,n : ");
scanf("%d %d",&m,&n);
a = (int*)malloc(m*n*sizeof(int));
r = (int*)malloc(m*n*sizeof(int));
printf("Enter matrix:\n");
for(i=0;i<m*n;i++)
{
scanf("%d",&a[i]);
}
cudaMalloc((void **)&d_a,(m*n)*sizeof(int));
cudaMalloc((void **)&d_r,(m*n)*sizeof(int));
cudaMalloc((void **)&d_m,sizeof(int));
cudaMemcpy(d_a,a,(m*n)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_r,r,(m*n)*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_m,&m,sizeof(int),cudaMemcpyHostToDevice);
q1<<<1,n>>>(d_a,d_r,d_m);
cudaError_t error = cudaGetLastError();
if(error!= cudaSuccess)
{
printf("%s\n",cudaGetErrorString(error));
}
cudaMemcpy(r,d_r,(m*n)*sizeof(int),cudaMemcpyDeviceToHost);
printf("Result matrix :\n");
for(i=0;i<m*n;i++)
{
printf("%d\t",r[i]);
if((i+1)%m==0)
printf("\n");
}
}
|
1,224
|
#include "includes.h"
__global__ void calculateDelaysAndPhases(double * gpuDelays, double lo, double sampletime, int fftsamples, int fftchannels, int samplegranularity, float * rotationPhaseInfo, int *sampleShifts, float* fractionalSampleDelays)
{
size_t ifft = threadIdx.x + blockIdx.x * blockDim.x;
size_t iant = blockIdx.y;
int numffts = blockDim.x * gridDim.x;
double meandelay, deltadelay, netdelaysamples_f, startphase;
double d0, d1, d2, a, b;
double * interpolator = &(gpuDelays[iant*4]);
double filestartoffset = gpuDelays[iant*4+3];
float fractionaldelay;
int netdelaysamples;
// evaluate the delay for the given FFT of the given antenna
// calculate values at the beginning, middle, and end of this FFT
d0 = interpolator[0]*ifft*ifft + interpolator[1]*ifft + interpolator[2];
d1 = interpolator[0]*(ifft+0.5)*(ifft+0.5) + interpolator[1]*(ifft+0.5) + interpolator[2];
d2 = interpolator[0]*(ifft+1.0)*(ifft+1.0) + interpolator[1]*(ifft+1.0) + interpolator[2];
// use these to calculate a linear interpolator across the FFT, as well as a mean value
a = d2-d0; //this is the delay gradient across this FFT
b = d0 + (d1 - (a*0.5 + d0))/3.0; //this is the delay at the start of the FFT
meandelay = a*0.5 + b; //this is the delay in the middle of the FFT
deltadelay = a / fftsamples; // this is the change in delay per sample across this FFT window
netdelaysamples_f = (meandelay - filestartoffset) / sampletime;
netdelaysamples = __double2int_rn(netdelaysamples_f/samplegranularity) * samplegranularity;
// Save the integer number of sample shifts
sampleShifts[iant*numffts + ifft] = netdelaysamples;
// Save the fractional delay
fractionaldelay = (float)(-(netdelaysamples_f - netdelaysamples)*2*M_PI/fftsamples); // radians per FFT channel
fractionalSampleDelays[iant*numffts + ifft] = fractionaldelay;
// set the fringe rotation phase for the first sample of a given FFT of a given antenna
startphase = b*lo;
rotationPhaseInfo[iant*numffts*2 + ifft*2] = (float)(startphase - int(startphase))*2*M_PI;
rotationPhaseInfo[iant*numffts*2 + ifft*2 + 1] = (float)(deltadelay * lo)*2*M_PI;
}
|
1,225
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,int var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float* var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float* var_22,float* var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) {
for (int i=0; i < var_1; ++i) {
if (comp > (var_2 + -1.5747E-35f)) {
comp += coshf((+0.0f / +1.3473E-43f));
if (comp >= -1.3494E36f - (-0.0f * (+1.7416E-44f - var_5))) {
comp += asinf((var_6 + var_7));
float tmp_1 = -0.0f + (+0.0f * var_8);
float tmp_2 = -1.9823E34f / +0.0f - var_9;
comp = tmp_2 * tmp_1 + (var_10 * var_11);
}
for (int i=0; i < var_3; ++i) {
var_12[i] = var_13 + var_14 * fabsf((var_15 - var_16 * ldexpf(floorf(tanhf((-1.9774E-35f * var_17 + var_18))), 2)));
float tmp_3 = +1.5409E26f;
comp = tmp_3 / var_12[i] / -1.2900E-35f + var_19 / (var_20 / (-1.3379E-15f - -1.8224E-35f - var_21));
}
for (int i=0; i < var_4; ++i) {
var_22[i] = sinhf(acosf(var_24 + +1.4771E-35f));
var_23[i] = +1.8351E-35f * (-1.0405E10f * (var_25 / (var_26 * fmodf((-1.5535E35f + -1.3174E-24f * -1.2706E-41f - (-1.4619E-35f - var_27)), (var_28 + (var_29 - (var_30 / ceilf(+1.0588E-43f))))))));
comp += var_23[i] + var_22[i] + powf(var_31 - (var_32 / -1.1886E34f * -0.0f - (var_33 - +0.0f)), -1.0284E-35f);
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
int tmp_4 = atoi(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float* tmp_13 = initPointer( atof(argv[13]) );
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float* tmp_23 = initPointer( atof(argv[23]) );
float* tmp_24 = initPointer( atof(argv[24]) );
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34);
cudaDeviceSynchronize();
return 0;
}
|
1,226
|
/**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
static const int WORK_SIZE = 256;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
__device__ unsigned int bitreverse(unsigned int number) {
number = ((0xf0f0f0f0 & number) >> 4) | ((0x0f0f0f0f & number) << 4);
number = ((0xcccccccc & number) >> 2) | ((0x33333333 & number) << 2);
number = ((0xaaaaaaaa & number) >> 1) | ((0x55555555 & number) << 1);
return number;
}
/**
* CUDA kernel function that reverses the order of bits in each element of the array.
*/
__global__ void bitreverse(void *data) {
unsigned int *idata = (unsigned int*) data;
idata[threadIdx.x] = bitreverse(idata[threadIdx.x]);
}
__global__ void add(int *a, int *b, int *c)
{
//c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
/**
* Host function that prepares data array and passes it to the CUDA kernel.
*/
#define THREADS_PER_BLOCK 1024
int main(int argc, char *argv[])
{
struct timeval t0;
struct timeval t1;
int N = atoi(argv[1]);
int *a, *b, *c; // host copies of a, b, c
int *d_a, *d_b, *d_c; // device copies of a, b, c
int size = N * sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Setup input values
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
int i = 0;
for(i = 0; i < N; i++)
{
a[i] = i;//rand() % 100;
b[i] = i;//rand() % 100;
}
gettimeofday(&t0,0);
// Copy inputs to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU with N blocks
add<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(d_a, d_b, d_c);
// Copy result back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
gettimeofday(&t1,0);
double time_spent = (t1.tv_sec-t0.tv_sec) + (double)(t1.tv_usec-t0.tv_usec)/1000000;
for(i = 0; i < N; i++)
{
printf("%d\n",c[i]);
}
printf("\nTime Spent GPU: %f\n\n",time_spent);
// Cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
// CPU Part
gettimeofday(&t0,0);
for(i = 0; i < N; i++)
{
c[i] = a[i]+b[i];
}
gettimeofday(&t1,0);
time_spent = (t1.tv_sec-t0.tv_sec) + (double)(t1.tv_usec-t0.tv_usec)/1000000;
printf("\nTime Spent CPU: %f\n\n",time_spent);
return 0;
}
|
1,227
|
extern "C"
__device__ float vxd(const float m,float Vy, float w, float ZpTei, float Xr, float Kvx)
{
if (Vy > 0) {
return ((m * Vy * w - Xr + ZpTei) * Kvx);
} else {
return ((m * Vy * w * 1.09f - Xr + ZpTei) * Kvx);//Vx*1.061 ;N0=3;k11=580.91f Ubuntu
//return ((m * Vy * w - Xr + ZpTei) * Kvx);
}//*1.1045; k11=5.91f windows ; 1.09; k11=580.91f
}
extern "C"
__device__ float vyd(const float m,float Vx, float w, float ZpYri, float Yr, float Kvy) {
if (Vx > 0) {
return ((Yr - ZpYri - m * Vx * w * 1.0165f) * Kvy);//Vx*1.0179
//return ((Yr - ZpYri - m * Vx * w) * Kvy);
} else {//*1.0165; k11=5.91f windows
return ((Yr - ZpYri - m * Vx * w) * Kvy);
}
}
extern "C"
__device__ float wd(const float lr,float Mr, float ZpYri, float Yr, float Kw) {
return ((Mr + lr * ZpYri) * Kw);
}
extern "C"
__global__ void modelCalc(float* VX, float* VY, float* ww, float* X, float* Y,
float* W, float* Xobs, float* Yobs, float T, float L, float B,float m,float lr,float Vx, float Vy,
float v,float w, const int size,const int nShips)
{
int j = blockDim.x * blockIdx.x + threadIdx.x;
if (j<nShips){
float k11, k22, k66, k26;
const float delta = 0.7f;//
float c1, c2, c3, m1, m2;// ..
float w_ = 0.0f;// approx
float betta_d = 0.0f;// (rad)
float Cxr, Cyr, Cmr;// Cxr, ,
// float L1=B,T1=L/2;//, ????????????????????
const float p = 1000.0f;//
float Xr, Yr, Mr;//
float ZpTei = 0;// =R+Xa
float Jz;// Gz
float ZpYri;//
const float a66 = 0.31f;//
//float Ramp = 0.0f;//
const float lyamdaR = 1.4f;// c 335 //0.5--1.4 //1
const float deltaR = 0.349f;// - 20% //0.349f
// float Va;//
float Yri, Ysi;//
const float Ar = 6.0f;// //5
const float D = 1.5f;// //2
const float Cta = 10.0f, Ctt = 50.0f;//c 334 //Cta<=20, 0.5--30, Ctt<=20, 1--50 UP!!!
//Ctt =30
float Kvx, Kvy, Kw;//
const float No = 3.0f;// ////3
const float Re = 5000000.0f;// >5E8
float K_betta;
float fit;
const float xk = 1.0f;
const float bettar = 0.9f;
const float fik = 0.95f;
float ld_;// c228
float betta_D;//
float fiD;//c 338
float CyD1;
float CyD;
float A0;//
float xD;//c 339
float viv = 0.0f;// 27 ch3_2
float Rmatr[3][3]={cosf(viv),-sinf(viv),0.0f,sinf(viv),cosf(viv),0.0f,0.0f,0.0f,1.0f};
//------------------------------------------------------------
//c 330 5.91- , 580.91 -
k11 = (580.91f * (float) pow(B / L, 2.0f) + 7.76f * (B / L) - 0.259f) / (48.4f - 6.89f * (B / T) + 1.47f * (float) pow(B / T, 2.0f) - 0.0475f * (float) pow(B / T, 3.0f));
k22 = ((0.722f + 0.224f * delta) * (1.022f - (float) pow(B / L, 2.0f))) / (0.264f + 0.368f * (B / T));
k66 = (2.0f * T / B) * (2.59f + 0.781f * delta) * (0.357f - 1.77f * (float) pow(B / L, 2.0f));
k26 = k22;
//k26=0;
//c 323
c1 = 3.14f * (T / L) * (float) pow((0.63f / delta), (5.0f / 2.0f)) * (float) pow(L / (6.0f * B), (1.0f / 3.0f)) - 0.032f;
c2 = -2.0f * k11 * delta * (B / L);
c3 = 1.35f * (float) pow(T / B, (1.0f / 2.0f)) * (float) pow((0.63f / delta), (3.0f / 2.0f)) - 0.029f;
m1 = 1.67f * (T / L) - 0.56f * delta + 0.43f;
m2 = -0.44f * (T / L) - 0.0375f;
// System.out.printf("c1=%f\tc2=%f\tc3=%f\tm1=%f\tm2=%f\n", c1, c2, c3, m1, m2);
Jz = (m * (float) pow(L, 2.0f) / 12.4f) * (0.463f + 0.574f * (float) pow(delta, a66) + (float) pow(B / L, 2.0f));//c 330
Kvx = 1 / (m * (1 + k11));
Kvy = 1 / (m * (1 + k22));
Kw = 1 / (Jz * (1 + k66));//????? m
//----------------------------------------------------------------
float k1, k2, k3, k4;
float q1, q2, q3, q4;
float z1, z2, z3, z4;
float j1, j2, j3, j4;
//t = 0.0f; //
int t = 0;
float h = 1.0f;
for (int i = 0; i < size; i++) { //16550
v = (float) sqrt((float) pow(Vx, 2.0f) + (float) pow(Vy, 2.0f));
//assert(Vx==0);
if (Vx != 0) {
//c 353 ?????????????????????????
w_ = w * L / v;//??????????????????????
betta_d = -(float) atan(Vy / Vx);//c 350
} else {
w_ = w * L / v;
//betta_d = 0;
betta_d = -(float) atan(Vy / Vx);//c 350
}
Cxr = 0.01f * (1.0f + 170.0f * (T / L));// c 119
Cyr = c1 * betta_d + c2 * w_ + c3 * betta_d * abs(betta_d);//c 323
Cmr = m1 * betta_d + m2 * w_;
Xr = Cxr * L * T * (float) pow(v, 2.0f) * p / 2.0f;//c 320
Yr = Cyr * L * T * (float) pow(v, 2.0f) * p / 2.0f;
Mr = Cmr * L * T * (float) pow(v, 2.0f) * p / 2.0f;
K_betta = 0.43f * (float) pow(Ctt, -0.6f);
fit = (float) pow(1.0f + Ctt, 0.508f);
//IMPORTANT!!! deltaR
Yri = 3.14f * (deltaR - K_betta * xk * (betta_d + lr * w_)) * p * Ar * (float) pow(v * fik * fit, 2.0f) / (1.0f + 2.2f / (float) pow(lyamdaR, 2.0f / 3.0f));
ld_ = 0.77f - 0.125f * (float) sqrt(Ctt) / (1.65f * (float) sqrt(Ctt) - 1.0f);
betta_D = 1.22f - 0.0563f * (float) sqrt(Ctt) / (1.65f * (float) sqrt(Ctt) - 1.0f);
fiD = 0.5f * ((float) sqrt(1.0f + 2.0f * Ctt / betta_D) + 1.0f);
CyD1 = 12.0f * ld_ / (1.0f + 1.56f * ld_);
CyD = CyD1 + 2.0f * betta_D * (float) pow(fiD, 2.0f);
xD = xk * (CyD1 + 2.0f * betta_D * fiD) / (CyD1 + 2.0f * betta_D * (float) pow(fiD, 2.0f));
A0 = 3.14f * (float) pow(D, 2.0f) / 4.0f;
Ysi = CyD * (xD - 0.02f * xk) * (betta_d + lr * w_) * (p / 2.0f) * A0 * (float) pow(v, 2.0f) * (float) pow(fik, 2.0f);
ZpTei = 1000000.0f * (9.740f * (float) pow(No, 2.0f) - 2.23f * v); ////9.740f
ZpYri = 2.0f * (Yri - Ysi);//2
k1 = h * vxd(m,Vy, w, ZpTei, Xr, Kvx);
q1 = h * vyd(m,Vx, w, ZpYri, Yr, Kvy);
z1 = h * wd(lr,Mr, ZpTei, Yr, Kw);
k2 = h * vxd(m,Vy + q1 / 2.0f, w + z1 / 2.0f, ZpTei, Xr, Kvx);
q2 = h * vyd(m,Vx + k1 / 2.0f, w + z1 / 2.0f, ZpYri, Yr, Kvy);
z2 = h * wd(lr,Mr, ZpYri, Yr, Kw);
k3 = h * vxd(m,Vy + q2 / 2.0f, w + z2 / 2.0f, ZpTei, Xr, Kvx);
q3 = h * vyd(m,Vx + k2 / 2.0f, w + z2 / 2.0f, ZpYri, Yr, Kvy);
z3 = h * wd(lr,Mr, ZpYri, Yr, Kw);
k4 = h * vxd(m,Vy + q3, w + z3, ZpTei, Xr, Kvx);
q4 = h * vyd(m,Vx + k3, w + z3, ZpYri, Yr, Kvy);
z4 = h * wd(lr,Mr, ZpYri, Yr, Kw);
Vx = Vx + (1.0f / 6.0f) * (k1 + 2.0f * k2 + 2.0f * k3 + k4);
//VX[t] = Vx / 1.24f;
VX[t] = Vx;
Vy = Vy + (1.0f / 6.0f) * (q1 + 2.0f * q2 + 2.0f * q3 + q4);
VY[t] = Vy;
w = w + (1.0f / 6.0f) * (z1 + 2.0f * z2 + 2.0f * z3 + z4);
ww[t] = w;
//--- -----------------------------------------
k1 = h * vxd(m,Vy, w, ZpTei, Xr, Kvx);
q1 = h * vyd(m,Vx, w, ZpYri, Yr, Kvy);
z1 = h * wd(lr,Mr, ZpTei, Yr, Kw);
k2 = h * vxd(m,Vy + q1 / 2.0f, w + z1 / 2.0f, ZpTei, Xr, Kvx);
q2 = h * vyd(m,Vx + k1 / 2.0f, w + z1 / 2.0f, ZpYri, Yr, Kvy);
z2 = h * wd(lr,Mr, ZpYri, Yr, Kw);
k3 = h * vxd(m,Vy + q2 / 2.0f, w + z2 / 2.0f, ZpTei, Xr, Kvx);
q3 = h * vyd(m,Vx + k2 / 2.0f, w + z2 / 2.0f, ZpYri, Yr, Kvy);
z3 = h * wd(lr,Mr, ZpYri, Yr, Kw);
k4 = h * vxd(m,Vy + q3, w + z3, ZpTei, Xr, Kvx);
q4 = h * vyd(m,Vx + k3, w + z3, ZpYri, Yr, Kvy);
z4 = h * wd(lr,Mr, ZpYri, Yr, Kw);
X[t] = Vx + (1.0f / 6.0f) * (k1 + 2.0f * k2 + 2.0f * k3 + k4);
Y[t] = Vy + (1.0f / 6.0f) * (q1 + 2.0f * q2 + 2.0f * q3 + q4);
W[t] = w + (1.0f / 6.0f) * (z1 + 2.0f * z2 + 2.0f * z3 + z4);
//
viv = W[t];
//-------
Rmatr[0][0] = (float)cos(viv);
Rmatr[0][1] = -(float)sin(viv);
Rmatr[1][0] = (float)sin(viv);
Rmatr[1][1] = (float)cos(viv);
Xobs[t] = Rmatr[0][0] * X[t] + Rmatr[0][1] * Y[t];
Yobs[t] = Rmatr[1][0] * X[t] + Rmatr[1][1] * Y[t];
//----------
t++;
}
}
}
|
1,228
|
__global__ void mapping(double *point_cloud,
const double *img1, const double *img2,
const double *img3, const double *img4,
const double *T,
const double *P1, const double *P2, const double *P3, const double *P4,
const double *ratiox, const double *ratioy, const double *ratioz,
const int *img_width, const int *img_height)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int idy = blockIdx.y*blockDim.y + threadIdx.y;
int idz = threadIdx.z;
int width = gridDim.x*blockDim.x;
int height = gridDim.y*blockDim.y;
int index = idz*width*height + idx*height + idy;
//grid to coordinates
double mx = idx*ratiox[0];
double my = idy*ratioy[0];
double mz = idz*ratioz[0];
//world coordinates
double x = T[0]*mx + T[4]*my + T[8]*mz + T[12];
double y = T[1]*mx + T[5]*my + T[9]*mz + T[13];
double z = mz;
//image coordinates
double u1 = P1[0]*x + P1[3]*y + P1[6]*z + P1[9];
double v1 = P1[1]*x + P1[4]*y + P1[7]*z + P1[10];
double norm1 = P1[2]*x + P1[5]*y + P1[8]*z + P1[11];
u1/=norm1;
v1/=norm1;
double u2 = P2[0]*x + P2[3]*y + P2[6]*z + P2[9];
double v2 = P2[1]*x + P2[4]*y + P2[7]*z + P2[10];
double norm2 = P2[2]*x + P2[5]*y + P2[8]*z + P2[11];
u2/=norm2;
v2/=norm2;
double u3 = P3[0]*x + P3[3]*y + P3[6]*z + P3[9];
double v3 = P3[1]*x + P3[4]*y + P3[7]*z + P3[10];
double norm3 = P3[2]*x + P3[5]*y + P3[8]*z + P3[11];
u3/=norm3;
v3/=norm3;
double u4 = P4[0]*x + P4[3]*y + P4[6]*z + P4[9];
double v4 = P4[1]*x + P4[4]*y + P4[7]*z + P4[10];
double norm4 = P4[2]*x + P4[5]*y + P4[8]*z + P4[11];
u4/=norm4;
v4/=norm4;
int u11 = (u1);
int u22 = (u2);
int u33 = (u3);
int u44 = (u4);
int v11 = (v1);
int v22 = (v2);
int v33 = (v3);
int v44 = (v4);
int final_width = img_width[0];
int final_height = img_height[0];
int seen_record[4] = {0};
//decide the point cloud
if((u11>0)&&(u11<final_width)&&(v11>0)&&(v11<final_height))
seen_record[0] = 1;
if((u22>0)&&(u22<final_width)&&(v22>0)&&(v22<final_height))
seen_record[1] = 1;
if((u33>0)&&(u33<final_width)&&(v33>0)&&(v33<final_height))
seen_record[2] = 1;
if((u44>0)&&(u44<final_width)&&(v44>0)&&(v44<final_height))
seen_record[3] = 1;
int sum = 0;
if((seen_record[0]==1)&&(img1[u11*final_height+v11]!=0))
++sum;
if((seen_record[1]==1)&&(img2[u22*final_height+v22]!=0))
++sum;
if((seen_record[2]==1)&&(img3[u33*final_height+v33]!=0))
++sum;
if((seen_record[3]==1)&&(img4[u44*final_height+v44]!=0))
++sum;
point_cloud[index] = 0;
if(sum>=3)
{
point_cloud[index] = 1;
}
}
|
1,229
|
#include "includes.h"
using namespace std;
// parameter describing the size of matrix A
const int rows = 4096;
const int cols = 4096;
const int BLOCK_SIZE = 16;
// transpose shared kernel
// transpose kernel
__global__ void transpose_naive(float* a, float*b) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int width = gridDim.x * blockDim.x;
int height = gridDim.y * blockDim.y;
// perform transpose
if (x < height && y < width) {
b[x*height + y] = a[y*width + x];
}
}
|
1,230
|
#include <stdio.h>
template <typename T>
struct SinFunctor
{
__host__ __device__
T operator()(const T& x) const {
return sinf(x);
}
};
template <typename T>
struct CosFunctor
{
__host__ __device__
T operator()(const T& x) const {
return cosf(x);
}
};
struct OrderFunctor
{
const float num_oscilators;
OrderFunctor(float _num_oscilators) : num_oscilators(_num_oscilators) {}
__host__ __device__
float operator()(const float& x, const float& y) const {
return sqrtf(powf(x, 2)+powf(y, 2))/num_oscilators;
}
};
__global__ void kernel_heuns(int num_oscilators, int num_couplings, float *phases_new,
float *phases_old, float *omegas,float *couplings, float dt, int *indices, int *ptr)
{
int i_thread = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int index =i_thread; index < num_oscilators*num_couplings; index += stride){
float findex = (float)index;
float fnum_oscilators = (float)num_oscilators;
float fi_section = (findex+1)/fnum_oscilators-1;
fi_section=ceil(fi_section);
int i_section = (int)fi_section;
//int i_section = (int)ceil(((float)index+1)/(float)num_oscilators-1);
int i_node = index - i_section*num_oscilators;
float coupling = couplings[i_section];
float phase = phases_old[index];
float omega = omegas[i_node];
int i_nei = 0;
float f = 0;
float f_tild = 0;
float phase_tild = 0.;
for( int i_ptr = ptr[i_node]; i_ptr < ptr[i_node+1]; i_ptr = i_ptr + 1 ) {
i_nei = indices[i_ptr]+ i_section*num_oscilators;
f += coupling*sinf( phases_old[i_nei] -phase);
};
f = f + omega;
phase_tild = phase + dt*f;
for( int i_ptr = ptr[i_node]; i_ptr < ptr[i_node+1]; i_ptr = i_ptr + 1 ) {
i_nei = indices[i_ptr]+ i_section*num_oscilators;
f_tild += coupling*sinf( phases_old[i_nei] - phase_tild);
};
f_tild = f_tild + omega;
phases_new[index] = phase + dt*(f+f_tild)/2.;
}
}
|
1,231
|
#include <float.h>
#include <math.h>
#include <stdio.h>
__global__ void calculateAreas(const int recs, const double w, const int offset, double *areas) {
const int index = threadIdx.x + offset;
if (index >= recs) return;
const double x = index * w;
double h = 1 - x * x;
//Detect a 0 by accounting for roundoff error.
h = h < DBL_EPSILON ? 0 : sqrt(h);
areas[index] = w * h;
}
void calculateArea(const int recs, double *area) {
double *areas = (double*) malloc(recs * sizeof(double));
if (areas == NULL) {
fprintf(stderr, "malloc failed!\n");
return;
}
double *w_areas;
cudaError_t err = cudaMalloc((void**) &w_areas, (recs * sizeof(double)));
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed: %s\n", cudaGetErrorString(err));
return;
}
const int threadCount = 512, loops = ceil((double) recs / threadCount);
const double width = 1.0 / recs;
for (int c = 0; c < loops; ++c) {
//kernel<<<blocks, threads>>>
calculateAreas<<<1, threadCount>>>(recs, width, c * threadCount, w_areas);
}
err = cudaMemcpy(areas, w_areas, recs * sizeof(double), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed: %s\n", cudaGetErrorString(err));
return;
}
*area = 0;
for (int c = 0; c < recs; ++c) {
*area += areas[c];
}
*area *= 4;
cudaFree(w_areas);
free(areas);
}
|
1,232
|
__global__ void subtract_and_square(float *dest, float *a, float *b, int n)
{
// const int index = threadIdx.x * (threadIdx.y + 1);
// dest[index] = ( a[index] - b[index] ) * ( a[index] - b[index] );
int index = blockDim.x * blockIdx.x + threadIdx.x;
if (index < n)
dest[index] = ( a[index] - b[index] ) * ( a[index] - b[index] );
}
|
1,233
|
#include <cuda.h>
template<typename T>
__device__ __forceinline__ T ldg(const T* ptr) {
#if __CUDA_ARCH__ >= 350
return __ldg(ptr);
#else
return *ptr;
#endif
}
extern "C"
__global__
void transpose_constY(
int nx
, int ny
, int nz
, float * in
, float * out // XYZ -> ZYX
)
{
int kx = blockIdx.x*blockDim.x + threadIdx.x;
int ky = blockIdx.y*blockDim.y + threadIdx.y;
if (kx < nx && ky < ny)
{
// input is ordered by {X,Y,Z}
// output is ordered by {Z,Y,X}
// out[z*num_y*num_x + y*num_x + x] := in[x*num_y*num_z + y*num_z + z]
//float * in_xy = &in[kx*ny*nz + ky*nz];
int k_0 = kx + ky*nx;
int k_1 = kx*ny*nz + ky*nz;
int i = 0;
for (; i < nz; i++, k_0+=ny*nx, k_1++)
{
out[k_0] = ldg(&in[k_1]);
}
}
}
|
1,234
|
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <sys/time.h>
#define NPB_VERSION "3.3.1"
using namespace std;
#define min(x,y) (x) <= (y) ? (x) : (y)
#define max(x,y) (x) >= (y) ? (x) : (y)
// block sizes for CUDA kernels
#define NORM_BLOCK 32
#define SOLVE_BLOCK 32
#define ERHS_BLOCK 32
// timer constants
#define t_total 0
#define t_rhsx 1
#define t_rhsy 2
#define t_rhsz 3
#define t_rhs 4
#define t_xsolve 5
#define t_ysolve 6
#define t_zsolve 7
#define t_rdis1 8
#define t_rdis2 9
#define t_txinvr 10
#define t_pinvr 11
#define t_ninvr 12
#define t_tzetar 13
#define t_add 14
#define t_last 15
//---------------------------------------------------------------------
// diffusion coefficients
//---------------------------------------------------------------------
#define dx1 0.75
#define dx2 0.75
#define dx3 0.75
#define dx4 0.75
#define dx5 0.75
#define dy1 0.75
#define dy2 0.75
#define dy3 0.75
#define dy4 0.75
#define dy5 0.75
#define dz1 1.0
#define dz2 1.0
#define dz3 1.0
#define dz4 1.0
#define dz5 1.0
//#define dxmax max(dx3,dx4)
//#define dymax max(dy2,dy4)
//#define dzmax max(dz2,dz3)
#define dxmax dx3
#define dymax dy2
#define dzmax dz2
//---------------------------------------------------------------------
// fourth difference dissipation
//---------------------------------------------------------------------
#define dssp (max(max(dx1,dy1),dz1)*.25)
#define c4dssp (4.0*dssp)
#define c5dssp (5.0*dssp)
#define c1 1.4
#define c2 0.4
#define c3 0.1
#define c4 1.0
#define c5 1.4
#define c1c2 (c1*c2)
#define c1c5 (c1*c5)
#define c3c4 (c3*c4)
#define c1345 (c1c5*c3c4)
#define conz1 (1.0-c1c5)
#define c2iv 2.5
#define con43 (4.0/3.0)
#define con16 (1.0/6.0)
// macros to linearize multidimensional array accesses
#define fu(m,i,j,k) fu[(i)+nx*((j)+ny*((k)+nz*(m)))]
#define forcing(m,i,j,k) forcing[(i)+nx*((j)+ny*((k)+nz*(m)))]
#define rhs(m,i,j,k) rhs[m+(i)*5+(j)*5*nx+(k)*5*nx*ny]
#define rho_i(i,j,k) rho_i[i+(j)*nx+(k)*nx*ny]
#define us(i,j,k) us[i+(j)*nx+(k)*nx*ny]
#define vs(i,j,k) vs[i+(j)*nx+(k)*nx*ny]
#define ws(i,j,k) ws[i+(j)*nx+(k)*nx*ny]
#define square(i,j,k) square[i+(j)*nx+(k)*nx*ny]
#define qs(i,j,k) qs[i+(j)*nx+(k)*nx*ny]
#define speed(i,j,k) speed[i+(j)*nx+(k)*nx*ny]
static void inline HandleError( cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line );
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
__constant__ double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3;
__constant__ double bt, dt, dtdssp;
__constant__ double dnxm1, dnym1, dnzm1;
__constant__ double dtx1, dttx2, dty1, dtty2, dtz1, dttz2, c2dttx1, c2dtty1, c2dttz1;
__constant__ double comz1, comz4, comz5, comz6, c3c4tx3, c3c4ty3, c3c4tz3;
__constant__ double xxcon1, xxcon2, xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1, dx4tx1, dx5tx1;
__constant__ double yycon1, yycon2, yycon3, yycon4, yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1;
__constant__ double zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1, dz2tz1, dz3tz1, dz4tz1, dz5tz1;
__constant__ double ce[13][5];
//---------------------------------------------------------------------
// exact_rhs computation
//---------------------------------------------------------------------
__device__ static void exact_solution_kernel (const double xi, const double eta, const double zta, double *dtemp) {
for (int m = 0; m < 5; m++)
dtemp[m] = ce[0][m] + xi*(ce[1][m] + xi*(ce[4][m] + xi*(ce[7][m] + xi*ce[10][m]))) +
eta*(ce[2][m] + eta*(ce[5][m] + eta*(ce[8][m] + eta*ce[11][m])))+
zta*(ce[3][m] + zta*(ce[6][m] + zta*(ce[9][m] + zta*ce[12][m])));
}
__global__ static void exact_rhs_kernel_init (double *forcing, const int nx, const int ny, const int nz) {
int i, j, k, m;
k = blockIdx.y;
j = blockIdx.x;
i = threadIdx.x;
for (m = 0; m < 5; m++) forcing(m,i,j,k) = (double)0.0;
}
__global__ static void exact_rhs_kernel_x (double *forcing, const int nx, const int ny, const int nz) {
int i, j, k, m;
double xi, eta, zta, dtemp[5], dtpp;
double ue[5][5], buf[3][5], cuf[3], q[3];
k = blockIdx.x*blockDim.x+threadIdx.x+1;
j = blockIdx.y*blockDim.y+threadIdx.y+1;
if (k >= nz-1 || j >= ny-1) return;
zta = (double)k * dnzm1;
eta = (double)j * dnym1;
//---------------------------------------------------------------------
// xi-direction flux differences
//---------------------------------------------------------------------
for (i = 0; i < 3; i++) {
xi = (double)i * dnxm1;
exact_solution_kernel(xi, eta, zta, dtemp);
for (m = 0; m < 5; m++) ue[i+1][m] = dtemp[m];
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) buf[i][m] = dtpp*dtemp[m];
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];
q[i] = 0.5 * (buf[i][1]*ue[i+1][1] + buf[i][2]*ue[i+1][2] + buf[i][3]*ue[i+1][3]);
}
for (i = 1; i < nx-1; i++) {
if (i+2 < nx) {
xi = (double)(i+2) * dnxm1;
exact_solution_kernel(xi, eta, zta, dtemp);
for (m = 0; m < 5; m++) ue[4][m] = dtemp[m];
}
dtemp[0] = 0.0 - tx2*(ue[3][1]-ue[1][1])+ dx1tx1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]);
dtemp[1] = 0.0 - tx2*((ue[3][1]*buf[2][1]+c2*(ue[3][4]-q[2]))-(ue[1][1]*buf[0][1]+c2*(ue[1][4]-q[0])))+xxcon1*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dx2tx1*(ue[3][1]-2.0*ue[2][1]+ue[1][1]);
dtemp[2] = 0.0 - tx2*(ue[3][2]*buf[2][1]-ue[1][2]*buf[0][1])+xxcon2*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dx3tx1*(ue[3][2]-2.0*ue[2][2]+ue[1][2]);
dtemp[3] = 0.0 - tx2*(ue[3][3]*buf[2][1]-ue[1][3]*buf[0][1])+xxcon2*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dx4tx1*(ue[3][3]-2.0*ue[2][3]+ue[1][3]);
dtemp[4] = 0.0 - tx2*(buf[2][1]*(c1*ue[3][4]-c2*q[2])-buf[0][1]*(c1*ue[1][4]-c2*q[0]))+0.5*xxcon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+xxcon4*(cuf[2]-2.0*cuf[1]+cuf[0])+
xxcon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dx5tx1*(ue[3][4]-2.0*ue[2][4]+ ue[1][4]);
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
if (i == 1) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp*(5.0*ue[2][m] - 4.0*ue[3][m] + ue[4][m]);
} else if (i == 2) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp*(-4.0*ue[1][m] + 6.0*ue[2][m] - 4.0*ue[3][m] + ue[4][m]);
} else if (i >= 3 && i < nx-3) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp*(ue[0][m] - 4.0*ue[1][m]+6.0*ue[2][m] - 4.0*ue[3][m] + ue[4][m]);
} else if (i == nx-3) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp*(ue[0][m] - 4.0*ue[1][m] +6.0*ue[2][m] - 4.0*ue[3][m]);
} else if (i == nx-2) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp*(ue[0][m] - 4.0*ue[1][m] + 5.0*ue[2][m]);
}
for (m = 0; m < 5; m++) {
ue[0][m] = ue[1][m];
ue[1][m] = ue[2][m];
ue[2][m] = ue[3][m];
ue[3][m] = ue[4][m];
buf[0][m] = buf[1][m];
buf[1][m] = buf[2][m];
}
cuf[0] = cuf[1]; cuf[1] = cuf[2];
q[0] = q[1]; q[1] = q[2];
if (i < nx-2) {
dtpp = 1.0/ue[3][0];
for (m = 1; m < 5; m++) buf[2][m] = dtpp*ue[3][m];
cuf[2] = buf[2][1] * buf[2][1];
buf[2][0] = cuf[2] + buf[2][2] * buf[2][2] + buf[2][3] * buf[2][3];
q[2] = 0.5 * (buf[2][1]*ue[3][1] + buf[2][2]*ue[3][2] + buf[2][3]*ue[3][3]);
}
}
}
__global__ static void exact_rhs_kernel_y (double *forcing, const int nx, const int ny, const int nz) {
int i, j, k, m;
double xi, eta, zta, dtemp[5], dtpp;
double ue[5][5], buf[3][5], cuf[3], q[3];
k = blockIdx.x*blockDim.x+threadIdx.x+1;
i = blockIdx.y*blockDim.y+threadIdx.y+1;
if (k >= nz-1 || i >= nx-1) return;
zta = (double)k * dnzm1;
xi = (double)i * dnxm1;
//---------------------------------------------------------------------
// eta-direction flux differences
//---------------------------------------------------------------------
for (j = 0; j < 3; j++) {
eta = (double)j * dnym1;
exact_solution_kernel(xi, eta, zta, dtemp);
for (m = 0; m < 5; m++) ue[j+1][m] = dtemp[m];
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) buf[j][m] = dtpp * dtemp[m];
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];
q[j] = 0.5*(buf[j][1]*ue[j+1][1] + buf[j][2]*ue[j+1][2] + buf[j][3]*ue[j+1][3]);
}
for (j = 1; j < ny-1; j++) {
if (j+2 < ny) {
eta = (double)(j+2) * dnym1;
exact_solution_kernel(xi, eta, zta, dtemp);
for (m = 0; m < 5; m++) ue[4][m] = dtemp[m];
}
dtemp[0] = forcing(0,i,j,k) - ty2*(ue[3][2]-ue[1][2])+ dy1ty1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]);
dtemp[1] = forcing(1,i,j,k) - ty2*(ue[3][1]*buf[2][2]-ue[1][1]*buf[0][2])+yycon2*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dy2ty1*(ue[3][1]-2.0*ue[2][1]+ ue[1][1]);
dtemp[2] = forcing(2,i,j,k) - ty2*((ue[3][2]*buf[2][2]+c2*(ue[3][4]-q[2]))-(ue[1][2]*buf[0][2]+c2*(ue[1][4]-q[0])))+yycon1*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dy3ty1*( ue[3][2]-2.0*ue[2][2] +ue[1][2]);
dtemp[3] = forcing(3,i,j,k) - ty2*(ue[3][3]*buf[2][2]-ue[1][3]*buf[0][2])+yycon2*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dy4ty1*( ue[3][3]-2.0*ue[2][3]+ ue[1][3]);
dtemp[4] = forcing(4,i,j,k) - ty2*(buf[2][2]*(c1*ue[3][4]-c2*q[2])-buf[0][2]*(c1*ue[1][4]-c2*q[0]))+0.5*yycon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+yycon4*(cuf[2]-2.0*cuf[1]+cuf[0])+
yycon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dy5ty1*(ue[3][4]-2.0*ue[2][4]+ue[1][4]);
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
if (j == 1) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp * (5.0*ue[2][m] - 4.0*ue[3][m] +ue[4][m]);
} else if (j == 2) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp * (-4.0*ue[1][m] + 6.0*ue[2][m] - 4.0*ue[3][m] + ue[4][m]);
} else if (j >= 3 && j < ny-3) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp*(ue[0][m] - 4.0*ue[1][m] + 6.0*ue[2][m] - 4.0*ue[3][m] + ue[4][m]);
} else if (j == ny-3) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp * (ue[0][m] - 4.0*ue[1][m] + 6.0*ue[2][m] - 4.0*ue[3][m]);
} else if (j == ny-2) {
for (m = 0; m < 5; m++) forcing(m,i,j,k) = dtemp[m] - dssp * (ue[0][m] - 4.0*ue[1][m] + 5.0*ue[2][m]);
}
for (m = 0; m < 5; m++) {
ue[0][m] = ue[1][m];
ue[1][m] = ue[2][m];
ue[2][m] = ue[3][m];
ue[3][m] = ue[4][m];
buf[0][m] = buf[1][m];
buf[1][m] = buf[2][m];
}
cuf[0] = cuf[1]; cuf[1] = cuf[2];
q[0] = q[1]; q[1] = q[2];
if (j < ny-2) {
dtpp = 1.0/ue[3][0];
for (m = 1; m < 5; m++) buf[2][m] = dtpp * ue[3][m];
cuf[2] = buf[2][2] * buf[2][2];
buf[2][0] = cuf[2] + buf[2][1] * buf[2][1] + buf[2][3] * buf[2][3];
q[2] = 0.5*(buf[2][1]*ue[3][1] + buf[2][2]*ue[3][2] + buf[2][3]*ue[3][3]);
}
}
}
__global__ static void exact_rhs_kernel_z (double *forcing, const int nx, const int ny, const int nz) {
int i, j, k, m;
double xi, eta, zta, dtpp, dtemp[5];
double ue[5][5], buf[3][5], cuf[3], q[3];
j = blockIdx.x*blockDim.x+threadIdx.x+1;
i = blockIdx.y*blockDim.y+threadIdx.y+1;
if (j >= ny-1 || i >= nx-1) return;
eta = (double)j * dnym1;
xi = (double)i * dnxm1;
//---------------------------------------------------------------------
// zeta-direction flux differences
//---------------------------------------------------------------------
for (k = 0; k < 3; k++) {
zta = (double)k * dnzm1;
exact_solution_kernel(xi, eta, zta, dtemp);
for (m = 0; m < 5; m++) ue[k+1][m] = dtemp[m];
dtpp = 1.0/dtemp[0];
for (m = 1; m < 5; m++) buf[k][m] = dtpp * dtemp[m];
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];
q[k] = 0.5*(buf[k][1]*ue[k+1][1] + buf[k][2]*ue[k+1][2] + buf[k][3]*ue[k+1][3]);
}
for (k = 1; k < nz-1; k++) {
if (k+2 < nz) {
zta = (double)(k+2) * dnzm1;
exact_solution_kernel(xi, eta, zta, dtemp);
for (m = 0; m < 5; m++) ue[4][m] = dtemp[m];
}
dtemp[0] = forcing(0,i,j,k) - tz2*(ue[3][3]-ue[1][3])+dz1tz1*(ue[3][0]-2.0*ue[2][0]+ue[1][0]);
dtemp[1] = forcing(1,i,j,k) - tz2*(ue[3][1]*buf[2][3]-ue[1][1]*buf[0][3])+zzcon2*(buf[2][1]-2.0*buf[1][1]+buf[0][1])+dz2tz1*(ue[3][1]-2.0*ue[2][1]+ue[1][1]);
dtemp[2] = forcing(2,i,j,k) - tz2*(ue[3][2]*buf[2][3]-ue[1][2]*buf[0][3])+zzcon2*(buf[2][2]-2.0*buf[1][2]+buf[0][2])+dz3tz1*(ue[3][2]-2.0*ue[2][2]+ue[1][2]);
dtemp[3] = forcing(3,i,j,k) - tz2*((ue[3][3]*buf[2][3]+c2*(ue[3][4]-q[2]))-(ue[1][3]*buf[0][3]+c2*(ue[1][4]-q[0])))+zzcon1*(buf[2][3]-2.0*buf[1][3]+buf[0][3])+dz4tz1*(ue[3][3]-2.0*ue[2][3] +ue[1][3]);
dtemp[4] = forcing(4,i,j,k) - tz2*(buf[2][3]*(c1*ue[3][4]-c2*q[2])-buf[0][3]*(c1*ue[1][4]-c2*q[0]))+0.5*zzcon3*(buf[2][0]-2.0*buf[1][0]+buf[0][0])+
zzcon4*(cuf[2]-2.0*cuf[1]+cuf[0])+zzcon5*(buf[2][4]-2.0*buf[1][4]+buf[0][4])+dz5tz1*(ue[3][4]-2.0*ue[2][4]+ue[1][4]);
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
if (k == 1) {
for (m = 0; m < 5; m++) dtemp[m] = dtemp[m] - dssp*(5.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);
} else if (k == 2) {
for (m = 0; m < 5; m++) dtemp[m] = dtemp[m] - dssp*(-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);
} else if (k >= 3 && k < nz-3) {
for (m = 0; m < 5; m++) dtemp[m] = dtemp[m] - dssp*(ue[0][m]-4.0*ue[1][m]+6.0*ue[2][m]-4.0*ue[3][m]+ue[4][m]);
} else if (k == nz-3) {
for (m = 0; m < 5; m++) dtemp[m] = dtemp[m] - dssp*(ue[0][m]-4.0*ue[1][m] + 6.0*ue[2][m] - 4.0*ue[3][m]);
} else if (k == nz-2) {
for (m = 0; m < 5; m++) dtemp[m] = dtemp[m] - dssp*(ue[0][m]-4.0*ue[1][m]+5.0*ue[2][m]);
}
//---------------------------------------------------------------------
// now change the sign of the forcing function,
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) forcing(m,i,j,k) = -1.0 * dtemp[m];
for (m = 0; m < 5; m++) {
ue[0][m] = ue[1][m];
ue[1][m] = ue[2][m];
ue[2][m] = ue[3][m];
ue[3][m] = ue[4][m];
buf[0][m] = buf[1][m];
buf[1][m] = buf[2][m];
}
cuf[0] = cuf[1]; cuf[1] = cuf[2];
q[0] = q[1]; q[1] = q[2];
if (k < nz-2) {
dtpp = 1.0/ue[3][0];
for (m = 1; m < 5; m++) buf[2][m] = dtpp * ue[3][m];
cuf[2] = buf[2][3] * buf[2][3];
buf[2][0] = cuf[2] + buf[2][1] * buf[2][1] + buf[2][2] * buf[2][2];
q[2] = 0.5*(buf[2][1]*ue[3][1] + buf[2][2]*ue[3][2] + buf[2][3]*ue[3][3]);
}
}
}
void exact_rhs (double* forcing, int nx, int ny, int nz) {
dim3 gridinit(ny,nz);
exact_rhs_kernel_init<<<gridinit,nx>>>(forcing, nx, ny, nz);
int yblock = min(ERHS_BLOCK,ny);
int ygrid = (ny+yblock-1)/yblock;
int zblock_y = min(ERHS_BLOCK/yblock,nz);
int zgrid_y = (nz+zblock_y-1)/zblock_y;
dim3 grid_x(zgrid_y,ygrid), block_x(zblock_y,yblock);
exact_rhs_kernel_x<<<grid_x,block_x>>>(forcing, nx, ny, nz);
int xblock = min(ERHS_BLOCK,nx);
int xgrid = (nx+xblock-1)/xblock;
int zblock_x = min(ERHS_BLOCK/xblock,nz);
int zgrid_x = (nz+zblock_x-1)/zblock_x;
dim3 grid_y(zgrid_x,xgrid), block_y(zblock_x,xblock);
exact_rhs_kernel_y<<<grid_y,block_y>>>(forcing, nx, ny, nz);
int yblock_x = min(ERHS_BLOCK/xblock,ny);
int ygrid_x = (ny+yblock_x-1)/yblock_x;
dim3 grid_z(ygrid_x,xgrid), block_z(yblock_x,xblock);
exact_rhs_kernel_z<<<grid_z,block_z>>>(forcing, nx, ny, nz);
}
//---------------------------------------------------------------------
// initialize_kernel
//---------------------------------------------------------------------
__global__ static void initialize_kernel (double *fu, const int nx, const int ny, const int nz) {
int i, j, k, m;
double xi, eta, zta, temp[5];
double Pface11[5], Pface12[5], Pface21[5], Pface22[5], Pface31[5], Pface32[5];
double zero, one;
k = blockIdx.x;
j = blockIdx.y;
i = threadIdx.x;
//---------------------------------------------------------------------
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
fu(0,i,j,k) = (double)1.0;
fu(1,i,j,k) = (double)0.0;
fu(2,i,j,k) = (double)0.0;
fu(3,i,j,k) = (double)0.0;
fu(4,i,j,k) = (double)1.0;
zero = (double)0.0;
one = (double)1.0;
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the zone
//---------------------------------------------------------------------
zta = (double)k * dnzm1;
eta = (double)j * dnym1;
xi = (double)i * dnxm1;
exact_solution_kernel (zero, eta, zta, Pface11);
exact_solution_kernel (one, eta, zta, Pface12);
exact_solution_kernel (xi, zero, zta, Pface21);
exact_solution_kernel (xi, one, zta, Pface22);
exact_solution_kernel (xi, eta, zero, Pface31);
exact_solution_kernel (xi, eta, one, Pface32);
for (m = 0; m < 5; m++) {
double Pxi = xi * Pface12[m] + (1.0-xi)*Pface11[m];
double Peta = eta * Pface22[m] + (1.0-eta)*Pface21[m];
double Pzeta = zta * Pface32[m] + (1.0-zta)*Pface31[m];
fu(m,i,j,k) = Pxi + Peta + Pzeta - Pxi*Peta - Pxi*Pzeta - Peta*Pzeta + Pxi*Peta*Pzeta;
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
xi = (double)0.0;
if (i == 0) {
zta = (double)k * dnzm1;
eta = (double)j * dnym1;
exact_solution_kernel (xi, eta, zta, temp);
for (m = 0; m < 5; m++) fu(m,i,j,k) = temp[m];
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
xi = (double)1.0;
if (i == nx-1) {
zta = (double)k * dnzm1;
eta = (double)j * dnym1;
exact_solution_kernel (xi, eta, zta, temp);
for (m = 0; m < 5; m++) fu(m,i,j,k) = temp[m];
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
eta = (double)0.0;
if (j == 0) {
zta = (double)k * dnzm1;
xi = (double)i * dnxm1;
exact_solution_kernel (xi,eta,zta,temp);
for (m = 0; m < 5; m++) fu(m,i,j,k) = temp[m];
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
eta = (double)1.0;
if (j == ny-1) {
zta = (double)k * dnzm1;
xi = (double)i * dnxm1;
exact_solution_kernel (xi,eta,zta,temp);
for (m = 0; m < 5; m++) fu(m,i,j,k) = temp[m];
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
zta = (double)0.0;
if (k == 0) {
eta = (double)j * dnym1;
xi = (double)i * dnxm1;
exact_solution_kernel (xi, eta, zta, temp);
for (m = 0; m < 5; m++) fu(m,i,j,k) = temp[m];
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
zta = (double)1.0;
if (k == nz-1) {
eta = (double)j * dnym1;
xi = (double)i * dnxm1;
exact_solution_kernel (xi, eta, zta, temp);
for (m = 0; m < 5; m++) fu(m,i,j,k) = temp[m];
}
}
//---------------------------------------------------------------------
// adi: compute_rhs
//---------------------------------------------------------------------
__global__ static void compute_rhs_kernel_1 (double *rho_i, double *us, double *vs, double *ws, double *speed, double *qs, double *square, double *fu, const int nx, const int ny, const int nz) {
int i, j, k;
k = blockIdx.y;
j = blockIdx.x;
i = threadIdx.x;
//---------------------------------------------------------------------
// compute the reciprocal of density, and the kinetic energy,
// and the speed of sound.
//---------------------------------------------------------------------
double rho_nv = 1.0/fu(0,i,j,k);
double square_ijk;
rho_i(i,j,k) = rho_nv;
us(i,j,k) = fu(1,i,j,k) * rho_nv;
vs(i,j,k) = fu(2,i,j,k) * rho_nv;
ws(i,j,k) = fu(3,i,j,k) * rho_nv;
square_ijk = 0.5*(fu(1,i,j,k)*fu(1,i,j,k) + fu(2,i,j,k)*fu(2,i,j,k) + fu(3,i,j,k)*fu(3,i,j,k)) * rho_nv;
square(i,j,k) = 0.5*(fu(1,i,j,k)*fu(1,i,j,k) + fu(2,i,j,k)*fu(2,i,j,k) + fu(3,i,j,k)*fu(3,i,j,k)) * rho_nv;
qs(i,j,k) = square_ijk * rho_nv;
//---------------------------------------------------------------------
// (don't need speed and ainx until the lhs computation)
//---------------------------------------------------------------------
speed(i,j,k) = sqrt(c1c2*rho_nv*(fu(4,i,j,k) - square_ijk));
}
__global__ static void compute_rhs_kernel_2 (double *rho_i, double *us, double *vs, double *ws, double *qs, double *square, double *rhs, double *forcing, double *fu, int nx, const int ny, const int nz) {
int i, j, k, m;
k = blockIdx.y;
j = blockIdx.x;
i = threadIdx.x;
double rtmp[5];
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole zone
// including the boundary
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) rtmp[m] = forcing(m,i,j,k);
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
if (k >= 1 && k < nz-1 && j >= 1 && j < ny-1 && i >= 1 && i < nx-1) {
double uijk = us(i,j,k);
double up1 = us(i+1,j,k);
double um1 = us(i-1,j,k);
rtmp[0] = rtmp[0] + dx1tx1*(fu(0,i+1,j,k) - 2.0*fu(0,i,j,k) + fu(0,i-1,j,k)) - tx2*(fu(1,i+1,j,k)-fu(1,i-1,j,k));
rtmp[1] = rtmp[1] + dx2tx1*(fu(1,i+1,j,k) - 2.0*fu(1,i,j,k) + fu(1,i-1,j,k)) + xxcon2*con43*(up1-2.0*uijk+um1) - tx2*(fu(1,i+1,j,k)*up1 - fu(1,i-1,j,k)*um1 + (fu(4,i+1,j,k)-square(i+1,j,k)-fu(4,i-1,j,k)+square(i-1,j,k))*c2);
rtmp[2] = rtmp[2] + dx3tx1*(fu(2,i+1,j,k) - 2.0*fu(2,i,j,k) + fu(2,i-1,j,k)) + xxcon2*(vs(i+1,j,k)-2.0*vs(i,j,k)+vs(i-1,j,k)) - tx2*(fu(2,i+1,j,k)*up1 - fu(2,i-1,j,k)*um1);
rtmp[3] = rtmp[3] + dx4tx1*(fu(3,i+1,j,k) - 2.0*fu(3,i,j,k) + fu(3,i-1,j,k)) + xxcon2*(ws(i+1,j,k)-2.0*ws(i,j,k)+ws(i-1,j,k)) - tx2*(fu(3,i+1,j,k)*up1 - fu(3,i-1,j,k)*um1);
rtmp[4] = rtmp[4] + dx5tx1*(fu(4,i+1,j,k) - 2.0*fu(4,i,j,k) + fu(4,i-1,j,k)) + xxcon3*(qs(i+1,j,k)-2.0*qs(i,j,k)+qs(i-1,j,k))+ xxcon4*(up1*up1-2.0*uijk*uijk+um1*um1) +
xxcon5*(fu(4,i+1,j,k)*rho_i(i+1,j,k) - 2.0*fu(4,i,j,k)*rho_i(i,j,k) + fu(4,i-1,j,k)*rho_i(i-1,j,k)) - tx2*((c1*fu(4,i+1,j,k) - c2*square(i+1,j,k))*up1 - (c1*fu(4,i-1,j,k) - c2*square(i-1,j,k))*um1 );
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
if (i == 1) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp * (5.0*fu(m,i,j,k)-4.0*fu(m,i+1,j,k)+fu(m,i+2,j,k));
} else if (i == 2) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp * (-4.0*fu(m,i-1,j,k)+6.0*fu(m,i,j,k)-4.0*fu(m,i+1,j,k)+fu(m,i+2,j,k));
} else if (i >= 3 && i < nx-3) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp * ( fu(m,i-2,j,k)-4.0*fu(m,i-1,j,k)+6.0*fu(m,i,j,k)-4.0*fu(m,i+1,j,k)+fu(m,i+2,j,k));
} else if (i == nx-3) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp * (fu(m,i-2,j,k)-4.0*fu(m,i-1,j,k)+6.0*fu(m,i,j,k)-4.0*fu(m,i+1,j,k) );
} else if (i == nx-2) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp * (fu(m,i-2,j,k)-4.0*fu(m,i-1,j,k) + 5.0*fu(m,i,j,k));
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
double vijk = vs(i,j,k);
double vp1 = vs(i,j+1,k);
double vm1 = vs(i,j-1,k);
rtmp[0] = rtmp[0] + dy1ty1*(fu(0,i,j+1,k) - 2.0*fu(0,i,j,k) + fu(0,i,j-1,k)) - ty2*(fu(2,i,j+1,k)-fu(2,i,j-1,k));
rtmp[1] = rtmp[1] + dy2ty1*(fu(1,i,j+1,k) - 2.0*fu(1,i,j,k) + fu(1,i,j-1,k)) + yycon2*(us(i,j+1,k)-2.0*us(i,j,k)+us(i,j-1,k)) - ty2*(fu(1,i,j+1,k)*vp1-fu(1,i,j-1,k)*vm1);
rtmp[2] = rtmp[2] + dy3ty1*(fu(2,i,j+1,k) - 2.0*fu(2,i,j,k) + fu(2,i,j-1,k)) + yycon2*con43*(vp1-2.0*vijk+vm1) - ty2*(fu(2,i,j+1,k)*vp1-fu(2,i,j-1,k)*vm1+(fu(4,i,j+1,k)-square(i,j+1,k)-fu(4,i,j-1,k)+square(i,j-1,k))*c2);
rtmp[3] = rtmp[3] + dy4ty1*(fu(3,i,j+1,k) - 2.0*fu(3,i,j,k) + fu(3,i,j-1,k)) + yycon2*(ws(i,j+1,k)-2.0*ws(i,j,k)+ws(i,j-1,k))-ty2*(fu(3,i,j+1,k)*vp1-fu(3,i,j-1,k)*vm1);
rtmp[4] = rtmp[4] + dy5ty1*(fu(4,i,j+1,k) - 2.0*fu(4,i,j,k) + fu(4,i,j-1,k)) + yycon3*(qs(i,j+1,k)-2.0*qs(i,j,k)+qs(i,j-1,k)) + yycon4*(vp1*vp1-2.0*vijk*vijk+vm1*vm1) +
yycon5*(fu(4,i,j+1,k)*rho_i(i,j+1,k)-2.0*fu(4,i,j,k)*rho_i(i,j,k)+fu(4,i,j-1,k)*rho_i(i,j-1,k)) - ty2*((c1*fu(4,i,j+1,k)-c2*square(i,j+1,k))*vp1 - (c1*fu(4,i,j-1,k)-c2*square(i,j-1,k))*vm1);
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
if (j == 1) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(5.0*fu(m,i,j,k)-4.0*fu(m,i,j+1,k)+fu(m,i,j+2,k));
} else if (j == 2) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(-4.0*fu(m,i,j-1,k)+6.0*fu(m,i,j,k)-4.0*fu(m,i,j+1,k)+fu(m,i,j+2,k));
} else if (j >= 3 && j < ny-3) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(fu(m,i,j-2,k)-4.0*fu(m,i,j-1,k)+6.0*fu(m,i,j,k)-4.0*fu(m,i,j+1,k)+fu(m,i,j+2,k));
} else if (j == ny-3) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(fu(m,i,j-2,k)-4.0*fu(m,i,j-1,k)+6.0*fu(m,i,j,k)-4.0*fu(m,i,j+1,k));
} else if (j == ny-2) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(fu(m,i,j-2,k)-4.0*fu(m,i,j-1,k)+5.0*fu(m,i,j,k));
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
double wijk = ws(i,j,k);
double wp1 = ws(i,j,k+1);
double wm1 = ws(i,j,k-1);
rtmp[0] = rtmp[0] + dz1tz1*(fu(0,i,j,k+1)-2.0*fu(0,i,j,k)+fu(0,i,j,k-1)) - tz2*(fu(3,i,j,k+1)-fu(3,i,j,k-1));
rtmp[1] = rtmp[1] + dz2tz1*(fu(1,i,j,k+1)-2.0*fu(1,i,j,k)+fu(1,i,j,k-1)) + zzcon2*(us(i,j,k+1)-2.0*us(i,j,k)+us(i,j,k-1)) - tz2*(fu(1,i,j,k+1)*wp1-fu(1,i,j,k-1)*wm1);
rtmp[2] = rtmp[2] + dz3tz1*(fu(2,i,j,k+1)-2.0*fu(2,i,j,k)+fu(2,i,j,k-1)) + zzcon2*(vs(i,j,k+1)-2.0*vs(i,j,k)+vs(i,j,k-1)) - tz2*(fu(2,i,j,k+1)*wp1-fu(2,i,j,k-1)*wm1);
rtmp[3] = rtmp[3] + dz4tz1*(fu(3,i,j,k+1)-2.0*fu(3,i,j,k)+fu(3,i,j,k-1)) + zzcon2*con43*(wp1-2.0*wijk+wm1) - tz2*(fu(3,i,j,k+1)*wp1-fu(3,i,j,k-1)*wm1+(fu(4,i,j,k+1)-square(i,j,k+1)-fu(4,i,j,k-1)+square(i,j,k-1))*c2);
rtmp[4] = rtmp[4] + dz5tz1*(fu(4,i,j,k+1)-2.0*fu(4,i,j,k)+fu(4,i,j,k-1)) + zzcon3*(qs(i,j,k+1)-2.0*qs(i,j,k)+qs(i,j,k-1)) + zzcon4*(wp1*wp1-2.0*wijk*wijk+wm1*wm1) +
zzcon5*(fu(4,i,j,k+1)*rho_i(i,j,k+1)-2.0*fu(4,i,j,k)*rho_i(i,j,k)+fu(4,i,j,k-1)*rho_i(i,j,k-1)) - tz2*((c1*fu(4,i,j,k+1)-c2*square(i,j,k+1))*wp1-(c1*fu(4,i,j,k-1)-c2*square(i,j,k-1))*wm1);
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
if (k == 1) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(5.0*fu(m,i,j,k)-4.0*fu(m,i,j,k+1)+fu(m,i,j,k+2));
} else if (k == 2) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(-4.0*fu(m,i,j,k-1)+6.0*fu(m,i,j,k)-4.0*fu(m,i,j,k+1)+fu(m,i,j,k+2));
} else if (k >= 3 && k < nz-3) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(fu(m,i,j,k-2)-4.0*fu(m,i,j,k-1)+6.0*fu(m,i,j,k)-4.0*fu(m,i,j,k+1)+fu(m,i,j,k+2));
} else if (k == nz-3) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(fu(m,i,j,k-2)-4.0*fu(m,i,j,k-1)+6.0*fu(m,i,j,k)-4.0*fu(m,i,j,k+1));
} else if (k == nz-2) {
for (m = 0; m < 5; m++) rtmp[m] = rtmp[m] - dssp*(fu(m,i,j,k-2)-4.0*fu(m,i,j,k-1)+5.0*fu(m,i,j,k));
}
for (m = 0; m < 5; m++) rtmp[m] *= dt;
}
for (m = 0; m < 5; m++) rhs(m,i,j,k) = rtmp[m];
}
//---------------------------------------------------------------------
// adi: txinvr
//---------------------------------------------------------------------
__global__ static void txinvr_kernel ( double *rho_i, double *us, double *vs, double *ws, double *speed, double *qs, double *rhs, const int nx, const int ny, const int nz) {
int i, j, k;
k = blockIdx.y+1;
j = blockIdx.x+1;
i = threadIdx.x+1;
double ru1 = rho_i(i,j,k);
double uu = us(i,j,k);
double vv = vs(i,j,k);
double ww = ws(i,j,k);
double ap = speed(i,j,k);
double ac2inv = 1.0/( ap*ap );
double r1 = rhs(0,i,j,k);
double r2 = rhs(1,i,j,k);
double r3 = rhs(2,i,j,k);
double r4 = rhs(3,i,j,k);
double r5 = rhs(4,i,j,k);
double t1 = c2*ac2inv*(qs(i,j,k)*r1 - uu*r2 - vv*r3 - ww*r4 + r5);
double t2 = bt * ru1 * ( uu * r1 - r2 );
double t3 = ( bt * ru1 * ap ) * t1;
rhs(0,i,j,k) = r1 - t1;
rhs(1,i,j,k) = -ru1*(ww*r1-r4);
rhs(2,i,j,k) = ru1*(vv*r1-r3);
rhs(3,i,j,k) = -t2+t3;
rhs(4,i,j,k) = t2+t3;
}
//---------------------------------------------------------------------
// adi: x_solve
//---------------------------------------------------------------------
#define lhs(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m-3)))]
#define lhsp(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m+4)))]
#define lhsm(m,i,j,k) lhs[(j-1)+(ny-2)*((k-1)+(nz-2)*((i)+nx*(m-3+2)))]
#define rtmp(m,i,j,k) rstmp[(j)+ny*((k)+nz*((i)+nx*(m)))]
__global__ static void x_solve_kernel (double *rho_i, double *us, double *speed, double *rhs, double *lhs, double *rstmp, const int nx, const int ny, const int nz) {
int i, j, k, m;
double rhon[3], cv[3], _ls[3][5], _lp[3][5], _rs[3][5], fac1;
double zero;
k = blockIdx.x*blockDim.x+threadIdx.x+1;
j = blockIdx.y*blockDim.y+threadIdx.y+1;
if (k >= nz-1 || j >= ny-1) return;
//---------------------------------------------------------------------
// Computes the left hand side for the three x-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// zap the whole left hand side for starters
//---------------------------------------------------------------------
_ls[0][0] = (double)0.0;
_ls[0][1] = (double)0.0;
_ls[0][2] = (double)1.0;
_ls[0][3] = (double)0.0;
_ls[0][4] = (double)0.0;
lhsp(0,0,j,k) = (double)0.0;
lhsp(1,0,j,k) = (double)0.0;
lhsp(2,0,j,k) = (double)1.0;
lhsp(3,0,j,k) = (double)0.0;
lhsp(4,0,j,k) = (double)0.0;
zero = (double)0.0;
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//--------------------------------------------------------------------
for (i = 0; i < 3; i++) {
fac1 = c3c4*rho_i(i,j,k);
//rhon[i] = max(max(max(dx2+con43*fac1, dx5+c1c5*fac1), dxmax+fac1), zero+dx1);
if (dx2+con43*fac1>dx5+c1c5*fac1)
rhon[i] = dx2+con43*fac1;
else
rhon[i] = dx5+c1c5*fac1;
if (rhon[i]<dxmax+fac1)
rhon[i] = dxmax+fac1;
if (rhon[i]<zero+dx1)
rhon[i] = zero+dx1;
cv[i] = us(i,j,k);
}
_ls[1][0] = (double)0.0;
_ls[1][1] = - dttx2 * cv[0] - dtx1 * rhon[0];
_ls[1][2] = 1.0 + c2dttx1 * rhon[1];
_ls[1][3] = dttx2 * cv[2] - dtx1 * rhon[2];
_ls[1][4] = (double)0.0;
_ls[1][2] += comz5;
_ls[1][3] -= comz4;
_ls[1][4] += comz1;
for (m = 0; m < 5; m++) lhsp(m,1,j,k) = _ls[1][m];
rhon[0] = rhon[1]; rhon[1] = rhon[2];
cv[0] = cv[1]; cv[1] = cv[2];
for (m = 0; m < 3; m++) {
_rs[0][m] = rhs(m,0,j,k);
_rs[1][m] = rhs(m,1,j,k);
}
//---------------------------------------------------------------------
// perform the Thomas algorithm; first, FORWARD ELIMINATION
//---------------------------------------------------------------------
for (i = 0; i < nx-2; i++) {
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
if (i+2 == nx-1) {
_ls[2][0] = (double)0.0;
_ls[2][1] = (double)0.0;
_ls[2][2] = (double)1.0;
_ls[2][3] = (double)0.0;
_ls[2][4] = (double)0.0;
lhsp(0,i+2,j,k) = (double)0.0;
lhsp(1,i+2,j,k) = (double)0.0;
lhsp(2,i+2,j,k) = (double)1.0;
lhsp(3,i+2,j,k) = (double)0.0;
lhsp(4,i+2,j,k) = (double)0.0;
} else {
fac1 = c3c4*rho_i(i+3,j,k);
//rhon[2] = max(max(max(dx2+con43*fac1, dx5+c1c5*fac1), dxmax+fac1), zero+dx1);
if (dx2+con43*fac1>dx5+c1c5*fac1)
rhon[2] = dx2+con43*fac1;
else
rhon[2] = dx5+c1c5*fac1;
if (rhon[2]<dxmax+fac1)
rhon[2] = dxmax+fac1;
if (rhon[2]<zero+dx1)
rhon[2] = zero+dx1;
cv[2] = us(i+3,j,k);
_ls[2][0] = (double)0.0;
_ls[2][1] = - dttx2 * cv[0] - dtx1 * rhon[0];
_ls[2][2] = 1.0 + c2dttx1 * rhon[1];
_ls[2][3] = dttx2 * cv[2] - dtx1 * rhon[2];
_ls[2][4] = (double)0.0;
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
if (i+2 == 2) {
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
_ls[2][4] += comz1;
} else if (i+2 >= 3 && i+2 < nx-3) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
_ls[2][4] += comz1;
} else if (i+2 == nx-3) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
} else if (i+2 == nx-2) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz5;
}
//---------------------------------------------------------------------
// store computed lhs for later reuse
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) lhsp(m,i+2,j,k) = _ls[2][m];
rhon[0] = rhon[1]; rhon[1] = rhon[2];
cv[0] = cv[1]; cv[1] = cv[2];
}
//---------------------------------------------------------------------
// load rhs values for current iteration
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[2][m] = rhs(m,i+2,j,k);
//---------------------------------------------------------------------
// perform current iteration
//---------------------------------------------------------------------
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
for (m = 0; m < 3; m++) _rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[1][m] -= _ls[1][1] * _rs[0][m];
_ls[2][1] -= _ls[2][0] * _ls[0][3];
_ls[2][2] -= _ls[2][0] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[2][m] -= _ls[2][0] * _rs[0][m];
//---------------------------------------------------------------------
// store computed lhs and prepare data for next iteration
// rhs is stored in a temp array such that write accesses are coalesced
//---------------------------------------------------------------------
lhs(3,i,j,k) = _ls[0][3];
lhs(4,i,j,k) = _ls[0][4];
for (m = 0; m < 5; m++) {
_ls[0][m] = _ls[1][m];
_ls[1][m] = _ls[2][m];
}
for (m = 0; m < 3; m++) {
rtmp(m,i,j,k) = _rs[0][m];
_rs[0][m] = _rs[1][m];
_rs[1][m] = _rs[2][m];
}
}
//---------------------------------------------------------------------
// The last two rows in this zone are a bit different,
// since they do not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
i = nx-2;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
for (m = 0; m < 3; m++) _rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[1][m] -= _ls[1][1] * _rs[0][m];
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac1 = 1.0/_ls[1][2];
for (m = 0; m < 3; m++) _rs[1][m] *= fac1;
lhs(3,nx-2,j,k) = _ls[0][3];
lhs(4,nx-2,j,k) = _ls[0][4];
//---------------------------------------------------------------------
// subsequently, fill the other factors u+c, u-c
//---------------------------------------------------------------------
for (i = 0; i < 3; i++) cv[i] = speed(i,j,k);
for (m = 0; m < 5; m++) {
_ls[0][m] = lhsp(m,0,j,k);
_lp[0][m] = lhsp(m,0,j,k);
_ls[1][m] = lhsp(m,1,j,k);
_lp[1][m] = lhsp(m,1,j,k);
}
_lp[1][1] -= dttx2 * cv[0];
_lp[1][3] += dttx2 * cv[2];
_ls[1][1] += dttx2 * cv[0];
_ls[1][3] -= dttx2 * cv[2];
cv[0] = cv[1];
cv[1] = cv[2];
_rs[0][3] = rhs(3,0,j,k);
_rs[0][4] = rhs(4,0,j,k);
_rs[1][3] = rhs(3,1,j,k);
_rs[1][4] = rhs(4,1,j,k);
//---------------------------------------------------------------------
// do the u+c and the u-c factors
//---------------------------------------------------------------------
for (i = 0; i < nx-2; i++) {
//---------------------------------------------------------------------
// first, fill the other factors u+c, u-c
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
_ls[2][m] = lhsp(m,i+2,j,k);
_lp[2][m] = lhsp(m,i+2,j,k);
}
_rs[2][3] = rhs(3,i+2,j,k);
_rs[2][4] = rhs(4,i+2,j,k);
if (i+2 < nx-1) {
cv[2] = speed(i+3,j,k);
_lp[2][1] -= dttx2 * cv[0];
_lp[2][3] += dttx2 * cv[2];
_ls[2][1] += dttx2 * cv[0];
_ls[2][3] -= dttx2 * cv[2];
cv[0] = cv[1];
cv[1] = cv[2];
}
m = 3;
fac1 = 1.0/_lp[0][2];
_lp[0][3] *= fac1;
_lp[0][4] *= fac1;
_rs[0][m] *= fac1;
_lp[1][2] -= _lp[1][1]*_lp[0][3];
_lp[1][3] -= _lp[1][1]*_lp[0][4];
_rs[1][m] -= _lp[1][1]*_rs[0][m];
_lp[2][1] -= _lp[2][0]*_lp[0][3];
_lp[2][2] -= _lp[2][0]*_lp[0][4];
_rs[2][m] -= _lp[2][0]*_rs[0][m];
m = 4;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
_rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1]*_ls[0][3];
_ls[1][3] -= _ls[1][1]*_ls[0][4];
_rs[1][m] -= _ls[1][1]*_rs[0][m];
_ls[2][1] -= _ls[2][0]*_ls[0][3];
_ls[2][2] -= _ls[2][0]*_ls[0][4];
_rs[2][m] -= _ls[2][0]*_rs[0][m];
//---------------------------------------------------------------------
// store computed lhs and prepare data for next iteration
// rhs is stored in a temp array such that write accesses are coalesced
//---------------------------------------------------------------------
for (m = 3; m < 5; m++) {
lhsp(m,i,j,k) = _lp[0][m];
lhsm(m,i,j,k) = _ls[0][m];
rtmp(m,i,j,k) = _rs[0][m];
_rs[0][m] = _rs[1][m];
_rs[1][m] = _rs[2][m];
}
for (m = 0; m < 5; m++) {
_lp[0][m] = _lp[1][m];
_lp[1][m] = _lp[2][m];
_ls[0][m] = _ls[1][m];
_ls[1][m] = _ls[2][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
i = nx-2;
m = 3;
fac1 = 1.0/_lp[0][2];
_lp[0][3] *= fac1;
_lp[0][4] *= fac1;
_rs[0][m] *= fac1;
_lp[1][2] -= _lp[1][1]*_lp[0][3];
_lp[1][3] -= _lp[1][1]*_lp[0][4];
_rs[1][m] -= _lp[1][1]*_rs[0][m];
m = 4;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
_rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1]*_ls[0][3];
_ls[1][3] -= _ls[1][1]*_ls[0][4];
_rs[1][m] -= _ls[1][1]*_rs[0][m];
//---------------------------------------------------------------------
// Scale the last row immediately
//---------------------------------------------------------------------
_rs[1][3] /= _lp[1][2];
_rs[1][4] /= _ls[1][2];
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[0][m] -= lhs(3,nx-2,j,k)*_rs[1][m];
_rs[0][3] -= _lp[0][3]*_rs[1][3];
_rs[0][4] -= _ls[0][3]*_rs[1][4];
for (m = 0; m < 5; m++) {
_rs[2][m] = _rs[1][m];
_rs[1][m] = _rs[0][m];
}
for (i = nx-3; i >= 0; i--) {
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[0][m] = rtmp(m,i,j,k) - lhs(3,i,j,k)*_rs[1][m] - lhs(4,i,j,k)*_rs[2][m];
//---------------------------------------------------------------------
// And the remaining two
//---------------------------------------------------------------------
_rs[0][3] = rtmp(3,i,j,k) - lhsp(3,i,j,k)*_rs[1][3] - lhsp(4,i,j,k)*_rs[2][3];
_rs[0][4] = rtmp(4,i,j,k) - lhsm(3,i,j,k)*_rs[1][4] - lhsm(4,i,j,k)*_rs[2][4];
if (i+2 < nx-1) {
//---------------------------------------------------------------------
// Do the block-diagonal inversion
//---------------------------------------------------------------------
double r1 = _rs[2][0];
double r2 = _rs[2][1];
double r3 = _rs[2][2];
double r4 = _rs[2][3];
double r5 = _rs[2][4];
double t1 = bt * r3;
double t2 = 0.5 * (r4+r5);
_rs[2][0] = -r2;
_rs[2][1] = r1;
_rs[2][2] = bt * ( r4 - r5 );
_rs[2][3] = -t1 + t2;
_rs[2][4] = t1 + t2;
}
for (m = 0; m < 5; m++) {
rhs(m,i+2,j,k) = _rs[2][m];
_rs[2][m] = _rs[1][m];
_rs[1][m] = _rs[0][m];
}
}
//---------------------------------------------------------------------
// Do the block-diagonal inversion
//---------------------------------------------------------------------
double tf1 = bt * _rs[2][2];
double tf2 = 0.5 * (_rs[2][3]+_rs[2][4]);
rhs(0,1,j,k) = -_rs[2][1];
rhs(1,1,j,k) = _rs[2][0];
rhs(2,1,j,k) = bt * ( _rs[2][3] - _rs[2][4] );
rhs(3,1,j,k) = -tf1 + tf2;
rhs(4,1,j,k) = tf1 + tf2;
for (m = 0; m < 5; m++) rhs(m,0,j,k) = _rs[1][m];
}
#undef lhs
#undef lhsp
#undef lhsm
#undef rtmp
//---------------------------------------------------------------------
// adi: y_solve
//---------------------------------------------------------------------
#define lhs(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m-3)))]
#define lhsp(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m+4)))]
#define lhsm(m,i,j,k) lhs[(i-1)+(nx-2)*((k-1)+(nz-2)*((j)+ny*(m-3+2)))]
#define rtmp(m,i,j,k) rstmp[(i)+nx*((k)+nz*((j)+ny*(m)))]
__global__ static void y_solve_kernel (double *rho_i, double *vs, double *speed, double *rhs, double *lhs, double *rstmp, const int nx, const int ny, const int nz) {
int i, j, k, m;
double rhoq[3], cv[3], _ls[3][5], _lp[3][5], _rs[3][5], fac1;
double zero;
k = blockIdx.x*blockDim.x+threadIdx.x+1;
i = blockIdx.y*blockDim.y+threadIdx.y+1;
if (k >= nz-1 || i >= nx-1) return;
//---------------------------------------------------------------------
// Computes the left hand side for the three y-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// zap the whole left hand side for starters
//---------------------------------------------------------------------
_ls[0][0] = (double)0.0;
_ls[0][1] = (double)0.0;
_ls[0][2] = (double)1.0;
_ls[0][3] = (double)0.0;
_ls[0][4] = (double)0.0;
lhsp(0,i,0,k) = (double)0.0;
lhsp(1,i,0,k) = (double)0.0;
lhsp(2,i,0,k) = (double)1.0;
lhsp(3,i,0,k) = (double)0.0;
lhsp(4,i,0,k) = (double)0.0;
zero = (double)0.0;
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
for (j = 0; j < 3; j++) {
fac1 = c3c4*rho_i(i,j,k);
//rhoq[j] = max(max(max(dy3+con43*fac1, dy5+c1c5*fac1), dymax+fac1), zero+dy1);
if (dy3+con43*fac1>dy5+c1c5*fac1)
rhoq[j] = dy3+con43*fac1;
else
rhoq[j] = dy5+c1c5*fac1;
if (rhoq[j]<dymax+fac1)
rhoq[j] = dymax+fac1;
if (rhoq[j]<zero+dy1)
rhoq[j] = zero+dy1;
cv[j] = vs(i,j,k);
}
_ls[1][0] = (double)0.0;
_ls[1][1] = -dtty2*cv[0]-dty1 * rhoq[0];
_ls[1][2] = 1.0 + c2dtty1 * rhoq[1];
_ls[1][3] = dtty2*cv[2]-dty1 * rhoq[2];
_ls[1][4] = (double)0.0;
_ls[1][2] += comz5;
_ls[1][3] -= comz4;
_ls[1][4] += comz1;
for (m = 0; m < 5; m++) lhsp(m,i,1,k) = _ls[1][m];
rhoq[0] = rhoq[1];
rhoq[1] = rhoq[2];
cv[0] = cv[1]; cv[1] = cv[2];
for (m = 0; m < 3; m++) {
_rs[0][m] = rhs(m,i,0,k);
_rs[1][m] = rhs(m,i,1,k);
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
for (j = 0; j < ny-2; j++) {
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
if (j+2 == ny-1) {
_ls[2][0] = (double)0.0;
_ls[2][1] = (double)0.0;
_ls[2][2] = (double)1.0;
_ls[2][3] = (double)0.0;
_ls[2][4] = (double)0.0;
lhsp(0,i,j+2,k) = (double)0.0;
lhsp(1,i,j+2,k) = (double)0.0;
lhsp(2,i,j+2,k) = (double)1.0;
lhsp(3,i,j+2,k) = (double)0.0;
lhsp(4,i,j+2,k) = (double)0.0;
} else {
fac1 = c3c4*rho_i(i,j+3,k);
//rhoq[2] = max(max(max(dy3+con43*fac1, dy5+c1c5*fac1), dymax+fac1), zero+dy1);
if (dy3+con43*fac1>dy5+c1c5*fac1)
rhoq[2] = dy3+con43*fac1;
else
rhoq[2] = dy5+c1c5*fac1;
if (rhoq[2]<dymax+fac1)
rhoq[2] = dymax+fac1;
if (rhoq[2]<zero+dy1)
rhoq[2] = zero+dy1;
cv[2] = vs(i,j+3,k);
_ls[2][0] = (double)0.0;
_ls[2][1] = -dtty2*cv[0]-dty1 * rhoq[0];
_ls[2][2] = 1.0 + c2dtty1 * rhoq[1];
_ls[2][3] = dtty2*cv[2]-dty1 * rhoq[2];
_ls[2][4] = (double)0.0;
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
if (j+2 == 2) {
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
_ls[2][4] += comz1;
} else if (j+2 >= 3 && j+2 < ny-3) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
_ls[2][4] += comz1;
} else if (j+2 == ny-3) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
} else if (j+2 == ny-2) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz5;
}
//---------------------------------------------------------------------
// store computed lhs for later reuse
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) lhsp(m,i,j+2,k) = _ls[2][m];
rhoq[0] = rhoq[1]; rhoq[1] = rhoq[2];
cv[0] = cv[1]; cv[1] = cv[2];
}
//---------------------------------------------------------------------
// load rhs values for current iteration
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[2][m] = rhs(m,i,j+2,k);
//---------------------------------------------------------------------
// perform current iteration
//---------------------------------------------------------------------
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
for (m = 0; m < 3; m++) _rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[1][m] -= _ls[1][1] * _rs[0][m];
_ls[2][1] -= _ls[2][0] * _ls[0][3];
_ls[2][2] -= _ls[2][0] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[2][m] -= _ls[2][0] * _rs[0][m];
//---------------------------------------------------------------------
// store computed lhs and prepare data for next iteration
// rhs is stored in a temp array such that write accesses are coalesced
//---------------------------------------------------------------------
lhs(3,i,j,k) = _ls[0][3];
lhs(4,i,j,k) = _ls[0][4];
for (m = 0; m < 5; m++) {
_ls[0][m] = _ls[1][m];
_ls[1][m] = _ls[2][m];
}
for (m = 0; m < 3; m++) {
rtmp(m,i,j,k) = _rs[0][m];
_rs[0][m] = _rs[1][m];
_rs[1][m] = _rs[2][m];
}
}
//---------------------------------------------------------------------
// The last two rows in this zone are a bit different,
// since they do not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
j = ny-2;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
for (m = 0; m < 3; m++) _rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[1][m] -= _ls[1][1] * _rs[0][m];
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac1 = 1.0/_ls[1][2];
for (m = 0; m < 3; m++) _rs[1][m] *= fac1;
lhs(3,i,ny-2,k) = _ls[0][3];
lhs(4,i,ny-2,k) = _ls[0][4];
//---------------------------------------------------------------------
// do the u+c and the u-c factors
//---------------------------------------------------------------------
for (j = 0; j < 3; j++) cv[j] = speed(i,j,k);
for (m = 0; m < 5; m++) {
_ls[0][m] = lhsp(m,i,0,k);
_lp[0][m] = lhsp(m,i,0,k);
_ls[1][m] = lhsp(m,i,1,k);
_lp[1][m] = lhsp(m,i,1,k);
}
_lp[1][1] -= dtty2*cv[0];
_lp[1][3] += dtty2*cv[2];
_ls[1][1] += dtty2*cv[0];
_ls[1][3] -= dtty2*cv[2];
cv[0] = cv[1];
cv[1] = cv[2];
_rs[0][3] = rhs(3,i,0,k);
_rs[0][4] = rhs(4,i,0,k);
_rs[1][3] = rhs(3,i,1,k);
_rs[1][4] = rhs(4,i,1,k);
for (j = 0; j < ny-2; j++) {
for (m = 0; m < 5; m++) {
_ls[2][m] = lhsp(m,i,j+2,k);
_lp[2][m] = lhsp(m,i,j+2,k);
}
_rs[2][3] = rhs(3,i,j+2,k);
_rs[2][4] = rhs(4,i,j+2,k);
if (j+2 < ny-1) {
cv[2] = speed(i,j+3,k);
_lp[2][1] -= dtty2*cv[0];
_lp[2][3] += dtty2*cv[2];
_ls[2][1] += dtty2*cv[0];
_ls[2][3] -= dtty2*cv[2];
cv[0] = cv[1];
cv[1] = cv[2];
}
fac1 = 1.0/_lp[0][2];
m = 3;
_lp[0][3] *= fac1;
_lp[0][4] *= fac1;
_rs[0][m] *= fac1;
_lp[1][2] -= _lp[1][1] * _lp[0][3];
_lp[1][3] -= _lp[1][1] * _lp[0][4];
_rs[1][m] -= _lp[1][1] * _rs[0][m];
_lp[2][1] -= _lp[2][0] * _lp[0][3];
_lp[2][2] -= _lp[2][0] * _lp[0][4];
_rs[2][m] -= _lp[2][0] * _rs[0][m];
m = 4;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
_rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
_rs[1][m] -= _ls[1][1] * _rs[0][m];
_ls[2][1] -= _ls[2][0] * _ls[0][3];
_ls[2][2] -= _ls[2][0] * _ls[0][4];
_rs[2][m] -= _ls[2][0] * _rs[0][m];
//---------------------------------------------------------------------
// store computed lhs and prepare data for next iteration
// rhs is stored in a temp array such that write accesses are coalesced
//---------------------------------------------------------------------
for (m = 3; m < 5; m++) {
lhsp(m,i,j,k) = _lp[0][m];
lhsm(m,i,j,k) = _ls[0][m];
rtmp(m,i,j,k) = _rs[0][m];
_rs[0][m] = _rs[1][m];
_rs[1][m] = _rs[2][m];
}
for (m = 0; m < 5; m++) {
_lp[0][m] = _lp[1][m];
_lp[1][m] = _lp[2][m];
_ls[0][m] = _ls[1][m];
_ls[1][m] = _ls[2][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
j = ny-2;
m = 3;
fac1 = 1.0/_lp[0][2];
_lp[0][3] *= fac1;
_lp[0][4] *= fac1;
_rs[0][m] *= fac1;
_lp[1][2] -= _lp[1][1] * _lp[0][3];
_lp[1][3] -= _lp[1][1] * _lp[0][4];
_rs[1][m] -= _lp[1][1] * _rs[0][m];
m = 4;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
_rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
_rs[1][m] -= _ls[1][1] * _rs[0][m];
//---------------------------------------------------------------------
// Scale the last row immediately
//---------------------------------------------------------------------
_rs[1][3] /= _lp[1][2];
_rs[1][4] /= _ls[1][2];
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[0][m] -= lhs(3,i,ny-2,k) * _rs[1][m];
_rs[0][3] -= _lp[0][3] * _rs[1][3];
_rs[0][4] -= _ls[0][3] * _rs[1][4];
for (m = 0; m < 5; m++) {
_rs[2][m] = _rs[1][m];
_rs[1][m] = _rs[0][m];
}
for (j = ny-3; j >= 0; j--) {
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[0][m] = rtmp(m,i,j,k) - lhs(3,i,j,k)*_rs[1][m] - lhs(4,i,j,k)*_rs[2][m];
//---------------------------------------------------------------------
// And the remaining two
//---------------------------------------------------------------------
_rs[0][3] = rtmp(3,i,j,k) - lhsp(3,i,j,k)*_rs[1][3] - lhsp(4,i,j,k)*_rs[2][3];
_rs[0][4] = rtmp(4,i,j,k) - lhsm(3,i,j,k)*_rs[1][4] - lhsm(4,i,j,k)*_rs[2][4];
if (j+2 < ny-1) {
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
double r1 = _rs[2][0];
double r2 = _rs[2][1];
double r3 = _rs[2][2];
double r4 = _rs[2][3];
double r5 = _rs[2][4];
double t1 = bt * r1;
double t2 = 0.5 * ( r4 + r5 );
_rs[2][0] = bt * ( r4 - r5 );
_rs[2][1] = -r3;
_rs[2][2] = r2;
_rs[2][3] = -t1 + t2;
_rs[2][4] = t1 + t2;
}
for (m = 0; m < 5; m++) {
rhs(m,i,j+2,k) = _rs[2][m];
_rs[2][m] = _rs[1][m];
_rs[1][m] = _rs[0][m];
}
}
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication
//---------------------------------------------------------------------
double tf1 = bt * _rs[2][0];
double tf2 = 0.5 * ( _rs[2][3] + _rs[2][4] );
rhs(0,i,1,k) = bt * ( _rs[2][3] - _rs[2][4] );
rhs(1,i,1,k) = -_rs[2][2];
rhs(2,i,1,k) = _rs[2][1];
rhs(3,i,1,k) = -tf1 + tf2;
rhs(4,i,1,k) = tf1 + tf2;
for (m = 0; m < 5; m++) rhs(m,i,0,k) = _rs[1][m];
}
#undef lhs
#undef lhsp
#undef lhsm
#undef rtmp
//---------------------------------------------------------------------
// adi: z_solve
//---------------------------------------------------------------------
#define lhs(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m-3)))]
#define lhsp(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m+4)))]
#define lhsm(m,i,j,k) lhs[(i-1)+(nx-2)*((j-1)+(ny-2)*((k)+nz*(m-3+2)))]
#define rtmp(m,i,j,k) rstmp[(i)+nx*((j)+ny*((k)+nz*(m)))]
__global__ static void z_solve_kernel (double *rho_i, double *us, double *vs, double *ws, double *speed, double *qs, double *fu, double *rhs, double *lhs, double *rstmp, const int nx, const int ny, const int nz) {
int i, j, k, m;
double rhos[3], cv[3], _ls[3][5], _lp[3][5], _rs[3][5], fac1;
double zero;
j = blockIdx.x*blockDim.x+threadIdx.x+1;
i = blockIdx.y*blockDim.y+threadIdx.y+1;
if (j >= ny-1 || i >= nx-1) return;
//---------------------------------------------------------------------
// Computes the left hand side for the three z-factors
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// zap the whole left hand side for starters
//---------------------------------------------------------------------
_ls[0][0] = (double)0.0;
_ls[0][1] = (double)0.0;
_ls[0][2] = (double)1.0;
_ls[0][3] = (double)0.0;
_ls[0][4] = (double)0.0;
lhsp(0,i,j,0) = (double)0.0;
lhsp(1,i,j,0) = (double)0.0;
lhsp(2,i,j,0) = (double)1.0;
lhsp(3,i,j,0) = (double)0.0;
lhsp(4,i,j,0) = (double)0.0;
zero = (double)0.0;
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
for (k = 0; k < 3; k++) {
fac1 = c3c4*rho_i(i,j,k);
//rhos[k] = max(max(max(dz4+con43*fac1, dz5+c1c5*fac1), dzmax+fac1), zero+dz1);
if (dz4+con43*fac1>dz5+c1c5*fac1)
rhos[k] = dz4+con43*fac1;
else
rhos[k] = dz5+c1c5*fac1;
if (rhos[k]<dzmax+fac1)
rhos[k] = dzmax+fac1;
if (rhos[k]<zero+dz1)
rhos[k] = zero+dz1;
cv[k] = ws(i,j,k);
}
_ls[1][0] = (double)0.0;
_ls[1][1] = -dttz2*cv[0] - dtz1*rhos[0];
_ls[1][2] = 1.0 + c2dttz1 * rhos[1];
_ls[1][3] = dttz2*cv[2] - dtz1*rhos[2];
_ls[1][4]= (double)0.0;
_ls[1][2] += comz5;
_ls[1][3] -= comz4;
_ls[1][4] += comz1;
for (m = 0; m < 5; m++) lhsp(m,i,j,1) = _ls[1][m];
rhos[0] = rhos[1]; rhos[1] = rhos[2];
cv[0] = cv[1]; cv[1] = cv[2];
for (m = 0; m < 3; m++) {
_rs[0][m] = rhs(m,i,j,0);
_rs[1][m] = rhs(m,i,j,1);
}
//---------------------------------------------------------------------
// FORWARD ELIMINATION
//---------------------------------------------------------------------
for (k = 0; k < nz-2; k++) {
//---------------------------------------------------------------------
// first fill the lhs for the u-eigenvalue
//---------------------------------------------------------------------
if (k+2 == nz-1) {
_ls[2][0] = (double)0.0;
_ls[2][1] = (double)0.0;
_ls[2][2] = (double)1.0;
_ls[2][3] = (double)0.0;
_ls[2][4] = (double)0.0;
lhsp(0,i,j,k+2) = (double)0.0;
lhsp(1,i,j,k+2) = (double)0.0;
lhsp(2,i,j,k+2) = (double)1.0;
lhsp(3,i,j,k+2) = (double)0.0;
lhsp(4,i,j,k+2) = (double)0.0;
} else {
fac1 = c3c4*rho_i(i,j,k+3);
//rhos[2] = max(max(max(dz4+con43*fac1, dz5+c1c5*fac1), dzmax+fac1), zero+dz1);
if (dz4+con43*fac1>dz5+c1c5*fac1)
rhos[2] = dz4+con43*fac1;
else
rhos[2] = dz5+c1c5*fac1;
if (rhos[2]<dzmax+fac1)
rhos[2] = dzmax+fac1;
if (rhos[2]<zero+dz1)
rhos[2] = zero+dz1;
cv[2] = ws(i,j,k+3);
_ls[2][0] = (double)0.0;
_ls[2][1] = -dttz2*cv[0] - dtz1*rhos[0];
_ls[2][2] = 1.0 + c2dttz1 * rhos[1];
_ls[2][3] = dttz2*cv[2] - dtz1*rhos[2];
_ls[2][4] = (double)0.0;
//---------------------------------------------------------------------
// add fourth order dissipation
//---------------------------------------------------------------------
if (k+2 == 2) {
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
_ls[2][4] += comz1;
} else if (k+2 >= 3 && k+2 < nz-3) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
_ls[2][4] += comz1;
} else if (k+2 == nz-3) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz6;
_ls[2][3] -= comz4;
} else if (k+2 == nz-2) {
_ls[2][0] += comz1;
_ls[2][1] -= comz4;
_ls[2][2] += comz5;
}
//---------------------------------------------------------------------
// store computed lhs for later reuse
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) lhsp(m,i,j,k+2) = _ls[2][m];
rhos[0] = rhos[1]; rhos[1] = rhos[2];
cv[0] = cv[1];
cv[1] = cv[2];
}
//---------------------------------------------------------------------
// load rhs values for current iteration
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[2][m] = rhs(m,i,j,k+2);
//---------------------------------------------------------------------
// perform current iteration
//---------------------------------------------------------------------
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
for (m = 0; m < 3; m++) _rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[1][m] -= _ls[1][1] * _rs[0][m];
_ls[2][1] -= _ls[2][0] * _ls[0][3];
_ls[2][2] -= _ls[2][0] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[2][m] -= _ls[2][0] * _rs[0][m];
//---------------------------------------------------------------------
// store computed lhs and prepare data for next iteration
// rhs is stored in a temp array such that write accesses are coalesced
//---------------------------------------------------------------------
lhs(3,i,j,k) = _ls[0][3];
lhs(4,i,j,k) = _ls[0][4];
for (m = 0; m < 5; m++) {
_ls[0][m] = _ls[1][m];
_ls[1][m] = _ls[2][m];
}
for (m = 0; m < 3; m++) {
rtmp(m,i,j,k) = _rs[0][m];
_rs[0][m] = _rs[1][m];
_rs[1][m] = _rs[2][m];
}
}
//---------------------------------------------------------------------
// The last two rows in this zone are a bit different,
// since they do not have two more rows available for the
// elimination of off-diagonal entries
//---------------------------------------------------------------------
k = nz-2;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
for (m = 0; m < 3; m++) _rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
for (m = 0; m < 3; m++) _rs[1][m] -= _ls[1][1] * _rs[0][m];
//---------------------------------------------------------------------
// scale the last row immediately
//---------------------------------------------------------------------
fac1 = 1.0/_ls[1][2];
for (m = 0; m < 3; m++) _rs[1][m] *= fac1;
lhs(3,i,j,k) = _ls[0][3];
lhs(4,i,j,k) = _ls[0][4];
//---------------------------------------------------------------------
// subsequently, fill the other factors u+c, u-c
//---------------------------------------------------------------------
for (k = 0; k < 3; k++) cv[k] = speed(i,j,k);
for (m = 0; m < 5; m++) {
_ls[0][m] = lhsp(m,i,j,0);
_lp[0][m] = lhsp(m,i,j,0);
_ls[1][m] = lhsp(m,i,j,1);
_lp[1][m] = lhsp(m,i,j,1);
}
_lp[1][1] -= dttz2*cv[0];
_lp[1][3] += dttz2*cv[2];
_ls[1][1] += dttz2*cv[0];
_ls[1][3] -= dttz2*cv[2];
cv[0] = cv[1];
cv[1] = cv[2];
_rs[0][3] = rhs(3,i,j,0);
_rs[0][4] = rhs(4,i,j,0);
_rs[1][3] = rhs(3,i,j,1);
_rs[1][4] = rhs(4,i,j,1);
//---------------------------------------------------------------------
// do the u+c and the u-c factors
//---------------------------------------------------------------------
for (k = 0; k < nz-2; k++) {
//---------------------------------------------------------------------
// first, fill the other factors u+c, u-c
//---------------------------------------------------------------------
for (m = 0; m < 5; m++) {
_ls[2][m] = lhsp(m,i,j,k+2);
_lp[2][m] = lhsp(m,i,j,k+2);
}
_rs[2][3] = rhs(3,i,j,k+2);
_rs[2][4] = rhs(4,i,j,k+2);
if (k+2 < nz-1) {
cv[2] = speed(i,j,k+3);
_lp[2][1] -= dttz2*cv[0];
_lp[2][3] += dttz2*cv[2];
_ls[2][1] += dttz2*cv[0];
_ls[2][3] -= dttz2*cv[2];
cv[0] = cv[1];
cv[1] = cv[2];
}
m = 3;
fac1 = 1.0/_lp[0][2];
_lp[0][3] *= fac1;
_lp[0][4] *= fac1;
_rs[0][m] *= fac1;
_lp[1][2] -= _lp[1][1] * _lp[0][3];
_lp[1][3] -= _lp[1][1] * _lp[0][4];
_rs[1][m] -= _lp[1][1] * _rs[0][m];
_lp[2][1] -= _lp[2][0] * _lp[0][3];
_lp[2][2] -= _lp[2][0] * _lp[0][4];
_rs[2][m] -= _lp[2][0] * _rs[0][m];
m = 4;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
_rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
_rs[1][m] -= _ls[1][1] * _rs[0][m];
_ls[2][1] -= _ls[2][0] * _ls[0][3];
_ls[2][2] -= _ls[2][0] * _ls[0][4];
_rs[2][m] -= _ls[2][0] * _rs[0][m];
//---------------------------------------------------------------------
// store computed lhs and prepare data for next iteration
// rhs is stored in a temp array such that write accesses are coalesced
//---------------------------------------------------------------------
for (m = 3; m < 5; m++) {
lhsp(m,i,j,k) = _lp[0][m];
lhsm(m,i,j,k) = _ls[0][m];
rtmp(m,i,j,k) = _rs[0][m];
_rs[0][m] = _rs[1][m];
_rs[1][m] = _rs[2][m];
}
for (m = 0; m < 5; m++) {
_lp[0][m] = _lp[1][m];
_lp[1][m] = _lp[2][m];
_ls[0][m] = _ls[1][m];
_ls[1][m] = _ls[2][m];
}
}
//---------------------------------------------------------------------
// And again the last two rows separately
//---------------------------------------------------------------------
k = nz-2;
m = 3;
fac1 = 1.0/_lp[0][2];
_lp[0][3] *= fac1;
_lp[0][4] *= fac1;
_rs[0][m] *= fac1;
_lp[1][2] -= _lp[1][1] * _lp[0][3];
_lp[1][3] -= _lp[1][1] * _lp[0][4];
_rs[1][m] -= _lp[1][1] * _rs[0][m];
m = 4;
fac1 = 1.0/_ls[0][2];
_ls[0][3] *= fac1;
_ls[0][4] *= fac1;
_rs[0][m] *= fac1;
_ls[1][2] -= _ls[1][1] * _ls[0][3];
_ls[1][3] -= _ls[1][1] * _ls[0][4];
_rs[1][m] -= _ls[1][1] * _rs[0][m];
//---------------------------------------------------------------------
// Scale the last row immediately some of this is overkill
// if this is the last cell
//---------------------------------------------------------------------
_rs[1][3] /= _lp[1][2];
_rs[1][4] /= _ls[1][2];
//---------------------------------------------------------------------
// BACKSUBSTITUTION
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[0][m] -= lhs(3,i,j,nz-2) * _rs[1][m];
_rs[0][3] -= _lp[0][3] * _rs[1][3];
_rs[0][4] -= _ls[0][3] * _rs[1][4];
for (m = 0; m < 5; m++) {
_rs[2][m] = _rs[1][m];
_rs[1][m] = _rs[0][m];
}
for (k = nz-3; k >= 0; k--) {
//---------------------------------------------------------------------
// The first three factors
//---------------------------------------------------------------------
for (m = 0; m < 3; m++) _rs[0][m] = rtmp(m,i,j,k) - lhs(3,i,j,k)*_rs[1][m] - lhs(4,i,j,k)*_rs[2][m];
//---------------------------------------------------------------------
// And the remaining two
//---------------------------------------------------------------------
_rs[0][3] = rtmp(3,i,j,k) - lhsp(3,i,j,k)*_rs[1][3] - lhsp(4,i,j,k)*_rs[2][3];
_rs[0][4] = rtmp(4,i,j,k) - lhsm(3,i,j,k)*_rs[1][4] - lhsm(4,i,j,k)*_rs[2][4];
if (k+2 < nz-1) {
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication tzetar
//---------------------------------------------------------------------
double xvel = us(i,j,k+2);
double yvel = vs(i,j,k+2);
double zvel = ws(i,j,k+2);
double ac = speed(i,j,k+2);
double uzik1 = fu(0,i,j,k+2);
double t1 = (bt*uzik1)/ac * (_rs[2][3] + _rs[2][4]);
double t2 = _rs[2][2] + t1;
double t3 = bt*uzik1 * (_rs[2][3] - _rs[2][4]);
_rs[2][4] = uzik1*(-xvel*_rs[2][1] + yvel*_rs[2][0]) + qs(i,j,k+2)*t2 + c2iv*(ac*ac)*t1 + zvel*t3;
_rs[2][3] = zvel*t2 + t3;
_rs[2][2] = uzik1*_rs[2][0] + yvel*t2;
_rs[2][1] = -uzik1*_rs[2][1] + xvel*t2;
_rs[2][0] = t2;
}
for (m = 0; m < 5; m++) {
rhs(m,i,j,k+2) = _rs[2][m];
_rs[2][m] = _rs[1][m];
_rs[1][m] = _rs[0][m];
}
}
//---------------------------------------------------------------------
// block-diagonal matrix-vector multiplication tzetar
//---------------------------------------------------------------------
double xfvel = us(i,j,1);
double yfvel = vs(i,j,1);
double zfvel = ws(i,j,1);
double afc = speed(i,j,1);
double ufzik1 = fu(0,i,j,1);
double tf1 = (bt*ufzik1)/afc * (_rs[2][3] + _rs[2][4]);
double tf2 = _rs[2][2] + tf1;
double tf3 = bt*ufzik1 * (_rs[2][3] - _rs[2][4]);
rhs(4,i,j,1) = ufzik1*(-xfvel*_rs[2][1] + yfvel*_rs[2][0]) + qs(i,j,1)*tf2 + c2iv*(afc*afc)*tf1 + zfvel*tf3;
rhs(3,i,j,1) = zfvel*tf2 + tf3;
rhs(2,i,j,1) = ufzik1*_rs[2][0] + yfvel*tf2;
rhs(1,i,j,1) = -ufzik1*_rs[2][1] + xfvel*tf2;
rhs(0,i,j,1) = tf2;
for (m = 0; m < 5; m++) rhs(m,i,j,0) = _rs[1][m];
}
#undef lhs
#undef lhsp
#undef lhsm
#undef rtmp
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
__global__ static void add_kernel (double *fu, double *rhs, const int nx, const int ny, const int nz) {
int i, j, k, m;
k = blockIdx.y+1;
j = blockIdx.x+1;
i = threadIdx.x+1;
m = threadIdx.y;
fu(m,i,j,k) += rhs(m,i,j,k);
}
//---------------------------------------------------------------------
// adi
//---------------------------------------------------------------------
void adi(bool singlestep, int nx, int ny, int nz, int niter, double* rho_i, double* us, double* vs, double* ws,
double* speed, double* qs, double* square, double* rhs, double* lhs, double* forcing, double* fu, double* rstmp) {
HANDLE_ERROR(cudaDeviceSynchronize());
int itmax = singlestep ? 1 : niter;
int xblock, xgrid, yblock, ygrid, zblock, zgrid;
for (int step = 1; step <= itmax; step++) {
if (step % 20 == 0 || step == 1 && !singlestep)
printf(" Time step %4d\n", step);
//compute_rhs();
dim3 grid1(ny,nz);
compute_rhs_kernel_1<<<grid1,nx>>>(rho_i, us, vs, ws, speed, qs, square, fu, nx, ny, nz);
compute_rhs_kernel_2<<<grid1,nx>>>(rho_i, us, vs, ws, qs, square, rhs, forcing, fu, nx, ny, nz);
//txinvr();
dim3 grid2(ny-2,nz-2);
txinvr_kernel<<<grid2,nx-2>>> (rho_i, us, vs, ws, speed, qs, rhs, nx, ny, nz);
//x_solve();
yblock = min(SOLVE_BLOCK,ny);
ygrid = (ny+yblock-1)/yblock;
zblock = min(SOLVE_BLOCK/yblock,nz);
zgrid = (nz+zblock-1)/zblock;
dim3 grid3(zgrid,ygrid), block3(zblock,yblock);
x_solve_kernel<<<grid3,block3>>>(rho_i, us, speed, rhs, lhs, rstmp, nx, ny, nz);
//y_solve();
xblock = min(SOLVE_BLOCK,nx);
xgrid = (nx+xblock-1)/xblock;
zblock = min(SOLVE_BLOCK/xblock,nz);
zgrid = (nz+zblock-1)/zblock;
dim3 grid4(zgrid,xgrid), block4(zblock,xblock);
y_solve_kernel<<<grid4,block4>>>(rho_i, vs, speed, rhs, lhs, rstmp, nx, ny, nz);
//z_solve();
xblock = min(SOLVE_BLOCK,nx);
xgrid = (nx+xblock-1)/xblock;
yblock = min(SOLVE_BLOCK/xblock,ny);
ygrid = (ny+yblock-1)/yblock;
dim3 grid5(ygrid,xgrid), block5(yblock,xblock);
z_solve_kernel<<<grid5,block5>>>(rho_i, us, vs, ws, speed, qs, fu, rhs, lhs, rstmp, nx, ny, nz);
//add();
dim3 grid6(ny-2,nz-2);
dim3 block6(nx-2,5);
add_kernel<<<grid6,block6>>>(fu, rhs, nx, ny, nz);
}
HANDLE_ERROR(cudaDeviceSynchronize());
}
//---------------------------------------------------------------------
// defaults from parameters
//---------------------------------------------------------------------
void read_input(char benchclass, double* dd_td, int* nx, int* ny, int* nz, int* niter) {
FILE *file;
if ((file = fopen("inputsp.data", "rt")) != NULL) {
char line[1024];
printf(" Reading from input file inputsp.data\n");
fgets(line, sizeof(line)-1, file);
sscanf(line, "%i", niter);
fgets(line, sizeof(line)-1, file);
sscanf(line, "%lf", dd_td);
fgets(line, sizeof(line)-1, file);
sscanf(line, "%i %i %i", nx, ny, nz);
fclose(file);
} else {
// printf(" No input file inputsp.data. Using compiled defaults\n");
int problem_size;
switch (benchclass) {
case 's':
case 'S': problem_size = 12; *dd_td = 0.015; *niter = 100; break;
case 'w':
case 'W': problem_size = 36; *dd_td = 0.0015; *niter = 400; break;
case 'a':
case 'A': problem_size = 64; *dd_td = 0.0015; *niter = 400; break;
case 'b':
case 'B': problem_size = 102; *dd_td = 0.001; *niter = 400; break;
case 'c':
case 'C': problem_size = 162; *dd_td = 0.00067; *niter = 400; break;
case 'd':
case 'D': problem_size = 408; *dd_td = 0.00030; *niter = 500; break;
case 'e':
case 'E': problem_size = 1020; *dd_td = 0.0001; *niter = 500; break;
default: printf("setparams: Internal error: invalid class %c\n", benchclass); exit(EXIT_FAILURE);
}
*nx = *ny = *nz = problem_size;
}
printf("\n\n NAS Parallel Benchmarks (NPB3.3-CUDA) - SP Benchmark\n\n");
printf(" Size: %4dx%4dx%4d\n", *nx, *ny, *nz);
printf(" Iterations: %4d dt_d: %10.6F\n", *niter, *dd_td);
printf("\n");
}
int main(int argc, char **argv) {
char benchclass = argc > 1 ? argv[1][0] : 'S';
struct timeval start_t;
struct timeval end_t;
struct timeval skt_t;
struct timeval ske_t;
int niter;
int nx, ny, nz;
double hdd;
double dd_d;
double *fu, *forcing, *rhs, *rho_i, *us, *vs, *ws, *qs, *speed, *square, *lhs, *rstmp;
//double* rmsbuf;
//double xce[5], xcr[5];
char CUDAname[256];
int CUDAmp, CUDAclock, CUDAmemclock, CUDAl2cache;
size_t CUDAmem;
//---------------------------------------------------------------------
// read input data
//---------------------------------------------------------------------
read_input(benchclass, &hdd, &nx, &ny, &nz, &niter);
dd_d = hdd;
//---------------------------------------------------------------------
// allocate CUDA device memory
//---------------------------------------------------------------------
int gridsize = nx*ny*nz;
int facesize = max(max(nx*ny, nx*nz), ny*nz);
gettimeofday(&start_t, NULL);
HANDLE_ERROR(cudaMalloc((void **)&fu, 5*gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&forcing, 5*gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&rhs, 5*gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&rho_i, gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&us, gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&vs, gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&ws, gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&qs, gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&speed, gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&square, gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&lhs, 9*gridsize*sizeof(double)));
HANDLE_ERROR(cudaMalloc((void **)&rstmp, 5*gridsize*sizeof(double)));
//HANDLE_ERROR(cudaMalloc((void **)&rmsbuf, 5*facesize*sizeof(double)));
double ce_d[13][5];
ce_d[0][0] = (double)2.0;
ce_d[1][0] = (double)0.0;
ce_d[2][0] = (double)0.0;
ce_d[3][0] = (double)4.0;
ce_d[4][0] = (double)5.0;
ce_d[5][0] = (double)3.0;
ce_d[6][0] = (double)0.5;
ce_d[7][0] = (double)0.02;
ce_d[8][0] = (double)0.01;
ce_d[9][0] = (double)0.03;
ce_d[10][0] = (double)0.5;
ce_d[11][0] = (double)0.4;
ce_d[12][0] = (double)0.3;
ce_d[0][1] = (double)1.0;
ce_d[1][1] = (double)0.0;
ce_d[2][1] = (double)0.0;
ce_d[3][1] = (double)0.0;
ce_d[4][1] = (double)1.0;
ce_d[5][1] = (double)2.0;
ce_d[6][1] = (double)3.0;
ce_d[7][1] = (double)0.01;
ce_d[8][1] = (double)0.03;
ce_d[9][1] = (double)0.02;
ce_d[10][1] = (double)0.4;
ce_d[11][1] = (double)0.3;
ce_d[12][1] = (double)0.5;
ce_d[0][2] = (double)2.0;
ce_d[1][2] = (double)2.0;
ce_d[2][2] = (double)0.0;
ce_d[3][2] = (double)0.0;
ce_d[4][2] = (double)0.0;
ce_d[5][2] = (double)2.0;
ce_d[6][2] = (double)3.0;
ce_d[7][2] = (double)0.04;
ce_d[8][2] = (double)0.03;
ce_d[9][2] = (double)0.05;
ce_d[10][2] = (double)0.3;
ce_d[11][2] = (double)0.5;
ce_d[12][2] = (double)0.4;
ce_d[0][3] = (double)2.0;
ce_d[1][3] = (double)2.0;
ce_d[2][3] = (double)0.0;
ce_d[3][3] = (double)0.0;
ce_d[4][3] = (double)0.0;
ce_d[5][3] = (double)2.0;
ce_d[6][3] = (double)3.0;
ce_d[7][3] = (double)0.03;
ce_d[8][3] = (double)0.05;
ce_d[9][3] = (double)0.04;
ce_d[10][3] = (double)0.2;
ce_d[11][3] = (double)0.1;
ce_d[12][3] = (double)0.3;
ce_d[0][4] = (double)5.0;
ce_d[1][4] = (double)4.0;
ce_d[2][4] = (double)3.0;
ce_d[3][4] = (double)2.0;
ce_d[4][4] = (double)0.1;
ce_d[5][4] = (double)0.4;
ce_d[6][4] = (double)0.3;
ce_d[7][4] = (double)0.05;
ce_d[8][4] = (double)0.04;
ce_d[9][4] = (double)0.03;
ce_d[10][4] = (double)0.1;
ce_d[11][4] = (double)0.3;
ce_d[12][4] = (double)0.2;
double bt_d = sqrt(0.5);
double dnxm1_d = 1.0/((double)nx-1.0);
double dnym1_d = 1.0/((double)ny-1.0);
double dnzm1_d = 1.0/((double)nz-1.0);
double tx1_d = 1.0 / (dnxm1_d * dnxm1_d);
double tx2_d = 1.0 / (2.0 * dnxm1_d);
double tx3_d = 1.0 / dnxm1_d;
double ty1_d = 1.0 / (dnym1_d * dnym1_d);
double ty2_d = 1.0 / (2.0 * dnym1_d);
double ty3_d = 1.0 / dnym1_d;
double tz1_d = 1.0 / (dnzm1_d * dnzm1_d);
double tz2_d = 1.0 / (2.0 * dnzm1_d);
double tz3_d = 1.0 / dnzm1_d;
double dtx1_d = dd_d*tx1_d;
double dttx2_d = dd_d*tx2_d;
double dty1_d = dd_d*ty1_d;
double dtty2_d = dd_d*ty2_d;
double dtz1_d = dd_d*tz1_d;
double dttz2_d = dd_d*tz2_d;
double c2dttx1_d = 2.0*dtx1_d;
double c2dtty1_d = 2.0*dty1_d;
double c2dttz1_d = 2.0*dtz1_d;
double dtdssp_d = dd_d*dssp;
double comz1_d = dtdssp_d;
double comz4_d = 4.0*dtdssp_d;
double comz5_d = 5.0*dtdssp_d;
double comz6_d = 6.0*dtdssp_d;
double c3c4tx3_d = c3c4*tx3_d;
double c3c4ty3_d = c3c4*ty3_d;
double c3c4tz3_d = c3c4*tz3_d;
double dx1tx1_d = dx1*tx1_d;
double dx2tx1_d = dx2*tx1_d;
double dx3tx1_d = dx3*tx1_d;
double dx4tx1_d = dx4*tx1_d;
double dx5tx1_d = dx5*tx1_d;
double dy1ty1_d = dy1*ty1_d;
double dy2ty1_d = dy2*ty1_d;
double dy3ty1_d = dy3*ty1_d;
double dy4ty1_d = dy4*ty1_d;
double dy5ty1_d = dy5*ty1_d;
double dz1tz1_d = dz1*tz1_d;
double dz2tz1_d = dz2*tz1_d;
double dz3tz1_d = dz3*tz1_d;
double dz4tz1_d = dz4*tz1_d;
double dz5tz1_d = dz5*tz1_d;
double xxcon1_d = c3c4tx3_d*con43*tx3_d;
double xxcon2_d = c3c4tx3_d*tx3_d;
double xxcon3_d = c3c4tx3_d*conz1*tx3_d;
double xxcon4_d = c3c4tx3_d*con16*tx3_d;
double xxcon5_d = c3c4tx3_d*c1c5*tx3_d;
double yycon1_d = c3c4ty3_d*con43*ty3_d;
double yycon2_d = c3c4ty3_d*ty3_d;
double yycon3_d = c3c4ty3_d*conz1*ty3_d;
double yycon4_d = c3c4ty3_d*con16*ty3_d;
double yycon5_d = c3c4ty3_d*c1c5*ty3_d;
double zzcon1_d = c3c4tz3_d*con43*tz3_d;
double zzcon2_d = c3c4tz3_d*tz3_d;
double zzcon3_d = c3c4tz3_d*conz1*tz3_d;
double zzcon4_d = c3c4tz3_d*con16*tz3_d;
double zzcon5_d = c3c4tz3_d*c1c5*tz3_d;
HANDLE_ERROR (cudaMemcpyToSymbol (&ce, &ce_d, 13*5*sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&bt, &bt_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dnxm1, &dnxm1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dnym1, &dnym1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dnzm1, &dnzm1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&tx1, &tx1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&tx2, &tx2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&tx3, &tx3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&ty1, &ty1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&ty2, &ty2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&ty3, &ty3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&tz1, &tz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&tz2, &tz2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&tz3, &tz3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dtx1, &dtx1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dttx2, &dttx2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dty1, &dty1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dtty2, &dtty2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dtz1, &dtz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dttz2, &dttz2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&c2dttx1, &c2dttx1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&c2dtty1, &c2dtty1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&c2dttz1, &c2dttz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dt, &dd_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dtdssp, &dtdssp_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&comz1, &comz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&comz4, &comz4_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&comz5, &comz5_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&comz6, &comz6_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&c3c4tx3, &c3c4tx3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&c3c4ty3, &c3c4ty3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&c3c4tz3, &c3c4tz3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dx1tx1, &dx1tx1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dx2tx1, &dx2tx1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dx3tx1, &dx3tx1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dx4tx1, &dx4tx1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dx5tx1, &dx5tx1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dy1ty1, &dy1ty1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dy2ty1, &dy2ty1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dy3ty1, &dy3ty1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dy4ty1, &dy4ty1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dy5ty1, &dy5ty1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dz1tz1, &dz1tz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dz2tz1, &dz2tz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dz3tz1, &dz3tz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dz4tz1, &dz4tz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&dz5tz1, &dz5tz1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&xxcon1, &xxcon1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&xxcon2, &xxcon2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&xxcon3, &xxcon3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&xxcon4, &xxcon4_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&xxcon5, &xxcon5_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&yycon1, &yycon1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&yycon2, &yycon2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&yycon3, &yycon3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&yycon4, &yycon4_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&yycon5, &yycon5_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&zzcon1, &zzcon1_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&zzcon2, &zzcon2_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&zzcon3, &zzcon3_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&zzcon4, &zzcon4_d, sizeof(double)));
HANDLE_ERROR (cudaMemcpyToSymbol (&zzcon5, &zzcon5_d, sizeof(double)));
gettimeofday(&skt_t, NULL);
exact_rhs(forcing, nx, ny, nz);
//sp->initialize();
dim3 grid(nz,ny);
initialize_kernel<<<grid,nx>>> (fu, nx, ny, nz);
//---------------------------------------------------------------------
// do one time step to touch all code, and reinitialize
//---------------------------------------------------------------------
adi(true, nx, ny, nz, niter, rho_i, us, vs, ws,
speed, qs, square, rhs, lhs, forcing, fu, rstmp);
//sp->initialize();
initialize_kernel<<<grid,nx>>> (fu, nx, ny, nz);
//---------------------------------------------------------------------
// main time stepping loop
//---------------------------------------------------------------------
//sp->adi(false);
adi(false, nx, ny, nz, niter, rho_i, us, vs, ws,
speed, qs, square, rhs, lhs, forcing, fu, rstmp);
gettimeofday(&ske_t, NULL);
gettimeofday(&end_t, NULL);
std::cout << "time: "<<((end_t.tv_sec-start_t.tv_sec)+(end_t.tv_usec-start_t.tv_usec)*1e-6) << std::endl;
std::cout << "kernel: "<<((ske_t.tv_sec-skt_t.tv_sec)+(ske_t.tv_usec-skt_t.tv_usec)*1e-6) << std::endl;
//std::cout << (sdkGetAverageTimerValue(&timer)/1000.0) / iterations << " seconds per iteration" << std::endl;
//---------------------------------------------------------------------
// verification test
//---------------------------------------------------------------------
//char verifyclass;
//bool verified;
//---------------------------------------------------------------------
// More timers
//---------------------------------------------------------------------
//sp->print_timers();
//delete sp;
return EXIT_SUCCESS;
}
|
1,235
|
#include "includes.h"
__global__ void bcnn_grad_scales_kernel(float *x_norm, float *delta, int batch, int n, int size, float *scale_updates) {
__shared__ float part[BCNN_CUDA_THREADS];
int i, b;
int filter = blockIdx.x;
int p = threadIdx.x;
float sum = 0;
for (b = 0; b < batch; ++b) {
for (i = 0; i < size; i += BCNN_CUDA_THREADS) {
int index = p + i + size * (filter + n * b);
sum += (p + i < size) ? delta[index] * x_norm[index] : 0;
}
}
part[p] = sum;
__syncthreads();
if (p == 0) {
for (i = 0; i < BCNN_CUDA_THREADS; ++i)
scale_updates[filter] += part[i];
}
}
|
1,236
|
#define ulong unsigned long long
#define uint unsigned int
#define MOD_P0 469762049LL
#define MOD_P1 1811939329LL
#define MOD_P2 2013265921LL
//Ř݂ɑfP^̂ŁAꂼ̗]肩猳̒l
//̂ƂP͑SČŒȂ̂ŏ]vZ͑Sߑłł
//E0`E2́AE3ɏo
//Jオl
//arrayLength2=arrayE3̔zTCY
__global__ void GarnerGPU(uint *arrayE0,uint *arrayE1,uint *arrayE2,uint *arrayE3,uint arrayLength2 ) {
int idx = threadIdx.x+blockIdx.x*256;
ulong ar=arrayE0[idx];
ulong br=arrayE1[idx];
ulong cr=arrayE2[idx];
ulong x=ar;
ulong brx=br-x+MOD_P1;
if (brx>=MOD_P1)brx-=MOD_P1;
x=x+(brx*1540148431)%MOD_P1*MOD_P0;
//1540148431=modinv(MOD_P0,MOD_P1)
//̎_xMOD_P1*MOD_P0ȉł邱ƂۏĂ
ulong crx=cr+MOD_P2-x%MOD_P2;
if (crx>=MOD_P2)crx-=MOD_P2;
ulong w1=(crx*1050399624)%MOD_P2;
//1050399624=modinv(MOD_P0,MOD_P2) *modinv(MOD_P1,MOD_P2)%MOD_P2
ulong w2=MOD_P0*MOD_P1;
ulong out_lo=w1*w2;
ulong out_hi=__umul64hi(w1,w2);
if (out_lo>(out_lo+x)){
out_hi++;
}
out_lo+=x;
//Jオ菈
uint ui00_32=(uint)(out_lo%(1ULL<<32ULL));
uint ui32_64=(uint)(out_lo/(1ULL<<32ULL));
uint ui64_96=(uint)(out_hi%(1ULL<<32ULL));
uint lastE3_0 = atomicAdd( &arrayE3[idx+0], ui00_32 );
if ((lastE3_0+ui00_32)<lastE3_0){//Jオl
ui32_64++;
if (ui32_64==0)ui64_96++;
}
if (ui32_64!=0){
uint lastE3_1 = atomicAdd( &arrayE3[idx+1], ui32_64 );
if ((lastE3_1+ui32_64)<lastE3_1){//Jオl
ui64_96++;//I[o[t[邱Ƃ͐ɂȂ
}
}
uint upflg=0;
if (ui64_96!=0){
uint lastE3_2 = atomicAdd( &arrayE3[idx+2], ui64_96 );
if ((lastE3_2+ui64_96)<lastE3_2){//Jオl
upflg++;
}
}
uint lastE3_i;
for(int i=idx+3;i<arrayLength2;i++){ //9999999+1݂ȂƂp
if (upflg==0)break;
lastE3_i = atomicAdd( &arrayE3[i], upflg );
if (lastE3_i==4294967295){
upflg=1;
}else{
upflg=0;
}
}
}
|
1,237
|
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define N 17 // size of arrays
__global__ void transpose (int *a, int *b) {
int col = blockIdx.x*blockDim.x+threadIdx.x;
int row =blockIdx.y*blockDim.y+threadIdx.y;
int index1 = col + row * N;
int index2 = row + col * N;
if ( col < N && row < N) b[index1]= a[index2];
}
int main (int argc, char **argv ) {
int i,j;
int size = N * N *sizeof( int);
int a[N][N], *devA, *devB;
int gridsize = (int) ceil((double) N/16);
for (i = 0; i < N; i++){ // put some numbers into array
for (j= 0; j < N; j++) {
a[i][j] = j + N * i;
}
}
printf("Initial values");
for (i = 0; i < N; i++) {
printf("\n");
for (j= 0; j < N; j++) {
printf("%3d ",a[i][j]);
}
}
printf("\nN = %d, grid size = %d\n",N,gridsize);
dim3 block (16,16);
dim3 grid (gridsize, gridsize);
cudaMalloc( (void**)&devA, size );
cudaMalloc( (void**)&devB, size );
cudaMemcpy( devA, a, size, cudaMemcpyHostToDevice);
transpose<<<grid, block>>>(devA, devB);
cudaMemcpy( a, devB, size, cudaMemcpyDeviceToHost);
printf("Results");
for (i = 0; i < N; i++) {
printf("\n");
for (j= 0; j < N; j++) {
printf("%3d ",a[i][j]);
}
}
printf("\n");
cudaFree( devA);
cudaFree( devB);
return (0);
}
|
1,238
|
#include "includes.h"
__global__ void Product (float *a, float *b, float *c)
{
// Out of all the threads created each one computes 1 value of C and stores into cval
float cval = 0.00;
int R = blockIdx.y * blockDim.y + threadIdx.y; //Row of the matrix
int C = blockIdx.x * blockDim.x + threadIdx.x; //Column of the matrix
//Defining the size of the matrix//
int N=1000;
if(R> N || C > N ){
return;
}
for (int j = 0; j < N; j++)
{
cval += a[R * N+ j] *b[j * N + C];
}
c[R * N + C]+= cval;
}
|
1,239
|
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void checkIndex(void) {
printf("threadIdx:(%d, %d, %d) blockIdx:(%d, %d, %d) blockDim:(%d, %d, %d) "
"gridDim:(%d, %d, %d)\n", threadIdx.x, threadIdx.y, threadIdx.z,
blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z,
gridDim.x,gridDim.y,gridDim.z);
}
int main(int argc, char **argv) {
int nX = 16;
int nY = 16; // define total data element
//int nEle = 16;
//int iLen = 8;
// define grid and block structure
dim3 block (8, 8);
dim3 grid (nX/block.x, nY/block.y);
//dim3 block (iLen, iLen);
//dim3 grid ((nEle + block.x-1)/block.x, (nEle + block.y-1)/block.y);
// check grid and block dimension from host side
printf("grid.x %d grid.y %d grid.z %d\n",grid.x, grid.y, grid.z);
printf("block.x %d block.y %d block.z %d\n",block.x, block.y, block.z);
// check grid and block dimension from device side
checkIndex <<<grid, block>>> ();
// reset device before you leave
cudaDeviceReset();
return(0);
}
|
1,240
|
// filename: ax.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "ax"
{
__global__ void ax(const int lengthC, const double a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthC)
{
c[i] = a*b[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
}
}
|
1,241
|
#include "includes.h"
__global__ void dot_cmp_kernaldm(const float* data1, const float* data2, const float* dm, float* device_soln, const int size, const int num_threads, const int offset)
{
float dot = 0.0f;
float nnn = 0.0f;
int idx = threadIdx.x + blockIdx.x*num_threads + offset;
for(int i = 0; i < size; i++){
int index = i*size + idx % size + ((idx/size)*size*size); //for coalesing
if(dm[index] > 0.5){
dot += data1[index]*data2[index];
nnn += 1.0f;
}
}
device_soln[idx] = dot/nnn;
}
|
1,242
|
/******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
#define BLOCK_SIZE 512
__global__ void reduction(float *out, float *in, unsigned size)
{
/********************************************************************
Load a segment of the input vector into shared memory
Traverse the reduction tree
Write the computed sum to the output vector at the correct index
********************************************************************/
__shared__ float partialSum[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockIdx.x*blockDim.x;
partialSum[t] = in[start + t];
partialSum[blockDim.x + t] = in[start + blockDim.x + t];
for (unsigned int stride = BLOCK_SIZE; stride >= 1; stride /= 2)
{
__syncthreads();
if (t < stride && t + start + stride < size)
partialSum[t]+=partialSum[t+stride];
}
if (t == 0)
out[blockIdx.x] = partialSum[0];
}
// INSERT KERNEL CODE HERE
|
1,243
|
#include <cuda.h>
#include <stdio.h>
__global__ void K(int *p) {
*p = 0;
printf("%d\n", *p);
}
int main() {
int *x, *y;
cudaMalloc(&x, sizeof(int));
K<<<2, 10>>>(x);
cudaDeviceSynchronize();
y = x;
cudaFree(y);
K<<<2, 10>>>(x);
cudaDeviceSynchronize();
//cudaError_t err = cudaGetLastError();
//printf("error=%d, %s, %s\n", err, cudaGetErrorName(err), cudaGetErrorString(err));
return 0;
}
|
1,244
|
#include "includes.h"
char* concat(char *s1, char *s2);
__global__ void r_final_sum_and_alpha_calculation(float * r_squared ,float * p_sum ,int size)
{
int index = threadIdx.x ;
__shared__ float shared_r_squared[1024] ;
__shared__ float shared_p_sum[1024] ;
if (index < size)
{
shared_r_squared[index] = r_squared[index] ;
shared_p_sum[index] = p_sum[index] ;
} else
{
shared_r_squared[index] = 0 ;
shared_p_sum[index] = 0 ;
}
__syncthreads() ;
for (unsigned int s = blockDim.x/2 ; s> 0 ; s >>= 1)
{
if (index < s)
{
shared_r_squared[index] = shared_r_squared[index] + shared_r_squared[index +s] ;
shared_p_sum[index] = shared_p_sum[index] + shared_p_sum[index +s] ;
__syncthreads() ;
}
}
if(threadIdx.x == 0)
{
//alpha
r_squared[blockIdx.x] = shared_r_squared[0]/shared_p_sum[0] ;
}
}
|
1,245
|
#include "rgb_pixels_factory.cuh"
int RgbPixelsFactory::random(int max)
{
return rand() % max;
}
unsigned char RgbPixelsFactory::randomChar()
{
return random(256);
}
Pixel * RgbPixelsFactory::generate(int count, int maxX, int maxY)
{
Pixel* pixels = new Pixel[count];
for (int i = 0; i < count; i++)
{
pixels[i].color.red = randomChar();
pixels[i].color.green = randomChar();
pixels[i].color.blue = randomChar();
pixels[i].point.x = random(maxX);
pixels[i].point.y = random(maxY);
}
return pixels;
}
|
1,246
|
//
// CrossCorrelation.cu
// CrossCorrelation
//
// Created by Vivek Sridhar on 29/06/17.
// Copyright © 2017 Vivek Sridhar. All rights reserved.
//
#include <iostream>
#include <fstream>
#include <sstream>
#include <vector>
#include <algorithm>
#include <stdio.h>
#include <stdlib.h>
#include <dirent.h>
template <typename T>
std::string to_string(const T& value) {
std::stringstream ss;
ss << value;
return ss.str();
}
long factorial(long val)
{
std::cout << val << "\n";
long result = 1;
for (long i = 1; i <= val; ++i)
{
result *= i;
}
return result;
}
long combination(long n, long r)
{
return (factorial(n)) / ((factorial(n - r)) * factorial(r));
}
__global__ void kernel(float *x1, float *y1, float *x2, float *y2, float *res, int tau, int na_frames, long nElements)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (tau < 0)
{
if (index >= -tau+na_frames)
{
res[index] = x1[index] * x2[index + tau] + y1[index] * y2[index + tau];
}
else res[index] = 0.0;
}
else
{
if (index < nElements - tau)
{
res[index] = x1[index] * x2[index + tau] + y1[index] * y2[index + tau];
}
else res[index] = 0.0;
}
}
// total measurement points in the time series is defined by nElements
#define M 1024 // number of threads per block
#define fps 10 // frames per second of input video (used to determine tau)
#define time 5 // time in seconds within which time delayed cross correlation is calculated (tau ranges from -time*fps to time*fps)
#define n_inds 10
int na_frames = 0; // number of frames in the start with nas
int scale = 1; // time window for analysis in seconds; varying this allows us to examine dynamics of leadership across varying timescales; setting scale larger than the entire time series or -1 gives aggregated statistics across the entire duration (otherwise, timescale of analysis is scale*fps)
//const int pairs = combination(n_inds, 2);
const bool aggregate = false; // this boolean decides whether you output a dynamic time variable leadership network or a static time aggregated network; scale is set to -1 if aggregate is true
std::ofstream outputFile1;
int main ()
{
DIR *dir;
FILE *pFile_x1; FILE *pFile_y1; FILE *pFile_x2; FILE *pFile_y2;
long lSize;
long nElements;
struct dirent *file;
float *d_x1, *d_y1, *d_x2, *d_y2, *d_res;
float *x1, *y1, *x2, *y2, *res;
size_t result_x1, result_y1, result_x2, result_y2;
if (aggregate) scale = -1;
std::vector<std::string> files;
std::string directory = "/home/user/Documents/Vivek/cuda/DirectionalCorrelation/Data/Input/pigeons/10_birds/ffA3/cross_correlation/";
dir = opendir(directory.c_str());
int idx = 0;
while ((file = readdir(dir)) != NULL)
{
if (file->d_name[0] == 'd')
{
files.push_back(file->d_name);
++idx;
}
}
std::sort(files.begin(), files.begin()+2*n_inds);
closedir(dir);
// Open output file
std::string filename_cc;
if (scale != -1) filename_cc = "cross_correlation_01.csv";
else filename_cc = "avgd_cross_correlation.csv";
outputFile1.open(filename_cc.c_str());
// Output file headers
if (aggregate || scale == -1) outputFile1 << "id1"<< ", " << "id2" << ", " << "tau" << ", " << "cc" << "\n";
else outputFile1 << "time" << ", " << "id1" << ", " << "id2" << ", " << "tau" << ", " << "cc" << "\n";
//files = {"dir_x00", "dir_x01", "dir_y00", "dir_y01"}
for (int a = 0; a < n_inds; ++a)
{
for (int b = 0; b < n_inds; ++b)
{
if (b != a)
{
pFile_x1 = fopen ((directory + files[a]).c_str(), "rb");
pFile_y1 = fopen ((directory + files[a+n_inds]).c_str(), "rb");
pFile_x2 = fopen ((directory + files[b]).c_str(), "rb");
pFile_y2 = fopen ((directory + files[b+n_inds]).c_str(), "rb");
if (pFile_x1==NULL || pFile_y1==NULL || pFile_x2==NULL || pFile_y2==NULL) { fputs ("File error",stderr); exit (1); }
// obtain file size
fseek (pFile_x1 , 0 , SEEK_END);
lSize = ftell (pFile_x1);
rewind (pFile_x1);
nElements = lSize / sizeof(float);
// allocate memory to contain the whole file
// device memory
cudaMalloc((void **) &d_x1, lSize);
cudaMalloc((void **) &d_y1, lSize);
cudaMalloc((void **) &d_x2, lSize);
cudaMalloc((void **) &d_y2, lSize);
cudaMalloc((void **) &d_res, lSize);
// host memory
x1 = (float*) malloc(lSize);
y1 = (float*) malloc(lSize);
x2 = (float*) malloc(lSize);
y2 = (float*) malloc(lSize);
res = (float*) malloc(lSize);
if (x1 == NULL || y1==NULL || x2==NULL || y2==NULL || res==NULL) { fputs ("Memory error",stderr); exit (2); }
// copy the file into the respective float pointers
result_x1 = fread (x1, sizeof(float), nElements, pFile_x1);
result_y1 = fread (y1, sizeof(float), nElements, pFile_y1);
result_x2 = fread (x2, sizeof(float), nElements, pFile_x2);
result_y2 = fread (y2, sizeof(float), nElements, pFile_y2);
if (result_x1 != nElements || result_y1 != nElements || result_x2 != nElements || result_y2 != nElements) { fputs ("Reading error",stderr); exit (3); }
// the whole files are now loaded in the memory x1, y1, x2 and y2 respectively
cudaMemcpy(d_x1, x1, lSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_y1, y1, lSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_x2, x2, lSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_y2, y2, lSize, cudaMemcpyHostToDevice);
if (scale*fps > nElements) scale = -1;
int tau_max[nElements - scale*fps];
float res_tmp[nElements - scale*fps];
float res_max[nElements - scale*fps];
std::fill_n(tau_max, nElements - scale*fps, 0);
std::fill_n(res_tmp, nElements - scale*fps, 0.0);
std::fill_n(res_max, nElements - scale*fps, -1.0);
for (int tau = -time*fps; tau <= time*fps; ++tau)
{
kernel<<<(nElements + M - 1) / M, M>>>(d_x1, d_y1, d_x2, d_y2, d_res, tau, na_frames, nElements);
cudaMemcpy(res, d_res, lSize, cudaMemcpyDeviceToHost);
if (scale == -1)
{
float res_now = -1.0f;
for (int i = na_frames; i < nElements; ++i)
{
if (res[i] != res[i]) std::cout << x1[i] << " " << y1[i] << " " << i << " " << tau << "\n"; // if nans
res_now += res[i];
}
outputFile1 << (to_string(files[a][5])).c_str() << (to_string(files[a][6])).c_str() << (to_string(files[a][7])).c_str() << ", " << (to_string(files[b][5])).c_str() << (to_string(files[b][6])).c_str() << (to_string(files[b][7])).c_str() << ", " << tau << ", " << res_now / nElements << "\n";
}
else
{
std::fill_n(res_tmp, nElements - scale*fps, 0.0);
for (int i = na_frames; i < nElements - scale*fps; ++i)
{
for (int j = i; j < i + scale*fps; ++j)
{
res_tmp[i] += res[j];
if (j == i + scale*fps - 1 && res_max[i] < res_tmp[i]) { res_max[i] = res_tmp[i]; tau_max[i] = tau; }
}
}
}
}
if (scale != -1)
{
for (int t = 0; t < nElements - scale*fps; ++t)
{
outputFile1 << t + scale*fps/2 << ", " << (to_string(files[a][5])).c_str() << (to_string(files[a][6])).c_str() << (to_string(files[a][7])).c_str() << ", " << (to_string(files[b][5])).c_str() << (to_string(files[b][6])).c_str() << (to_string(files[b][7])).c_str() << ", " << tau_max[t] << ", " << res_max[t] / (scale*fps) << "\n";
}
}
fclose(pFile_x1);
fclose(pFile_x2);
fclose(pFile_y1);
fclose(pFile_y2);
cudaFree(d_x1); cudaFree(d_y1); cudaFree(d_x2); cudaFree(d_y2); cudaFree(d_res);
free(x1); free(y1); free(x2); free(y2);
}
}
}
// terminate
fclose(pFile_x1); fclose(pFile_y1); fclose(pFile_x2); fclose(pFile_y2);
return 0;
}
|
1,247
|
#include "includes.h"
/**
* @brief cudaCreateBuffer Allocates a cuda buffer and stops the programm on error.
* @param size
* @return
*/
__global__ void kernelSetDoubleBuffer(float* gpuBuffPtr, float v, size_t size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < size)
gpuBuffPtr[index] = v;
}
|
1,248
|
#include "includes.h"
__global__ void MatrixCopy_naive (const float * A , int Acount, int Acols, float * out0 , int out0count)
{
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id<out0count)
{
out0[id] = A[id];
}
}
|
1,249
|
// 3D convolution by CUDA
__global__ void cu_conv(const float *A,const float *K,const float *B, int kw, int kh, int kn, int cw_rem, int ch_rem, float *C){
// A : input data, K : Kernel, B : bias
int cx = threadIdx.x + blockIdx.x*blockDim.x;
int cy = threadIdx.y + blockIdx.y*blockDim.y;
int cz = blockIdx.z/int(kn*kh*kw);
int n = (blockIdx.z%(kn*kh*kw)) / (kh*kw);
int j = ((blockIdx.z%(kn*kh*kw)) % (kh*kw)) / kw;
int i = ((blockIdx.z%(kn*kh*kw)) % (kh*kw)) % kw;
int cw = blockDim.x*gridDim.x + cw_rem;
int ch = blockDim.y*gridDim.y + ch_rem;
int aw = cw + (kw-1);
int ah = ch + (kh-1);
int cidx = cx + cy*cw + cz*(cw*ch);
int aidx = (cx+i) + (cy+j)*aw + (cz)*(aw*ah);
int kidx = i + j*kw + n*(kw*kh);
int bidx = n;
if (cx < cw && cy < ch){
C[cidx] = A[aidx]*K[kidx] + B[bidx]/(kw*kh);
}
}
|
1,250
|
#include <iostream>
using namespace std;
int main(void)
{
cout << "Hello nvcc!" << endl;
return 0;
}
|
1,251
|
__global__ void print_values(const int* ints, const double* dbls,
int* result)
{
int i = threadIdx.x;
result[i] = ints[i] + (dbls[i] > 0.0);
}
|
1,252
|
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <cstdlib>
#include <ctime>
#include <iostream>
__global__ void matmul(float* matA, float* matB, float* matC, int width){
float pVal = 0;
for(int i=0; i<width; ++i){
float elementMatA = matA[threadIdx.y*width+i];
float elementMatB = matB[i*width+threadIdx.x];
pVal += elementMatA * elementMatB;
}
matC[threadIdx.y*width+threadIdx.x] = pVal;
}
void matriksMul(float* mA, float* mB, float* mC, int width){
//Device pointer
float* a_d, *b_d, *c_d;
//Matriks size
int size = width * width *sizeof(float) ;
//allocate dan copy matriks a
int cudaError = cudaMalloc((void**)&a_d, size);
if (cudaError != cudaSuccess)
{
fprintf(stderr, "Error invoking cudaMemcpy (ERRCODE %d)\n", cudaError);
}
fprintf(stderr, "cudaMemcpy (ERRCODE %d)\n", cudaError);
cudaMemcpy(a_d, mA, size , cudaMemcpyHostToDevice );
//allocate dan copy matriks b
cudaMalloc((void**)&b_d, size);
cudaMemcpy(b_d, mB, size , cudaMemcpyHostToDevice );
//allocate memory to device c
cudaMalloc((void**)&c_d, size);
dim3 dimGrid(1, 1);
dim3 dimBlock(width, width);
matmul<<<dimGrid,dimBlock>>>(a_d,b_d,c_d,width);
cudaMemcpy(mC,c_d,size, cudaMemcpyDeviceToHost );
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
}
int main(void){
void matriksMul(float *, float *, float *, int);
const int width = 10;
float* M, *N, *P;
size_t size = width * width *sizeof(float);
// allocate arrays on host
M = (float *) malloc(size);
N = (float *) malloc(size);
P = (float *) malloc(size);
// float M[width*width], N[width*width], P[width*width];
for(int i = 0; i < (width*width) ; i++) {
M[i] = i;
N[i] = width*width - i;
P[i] = 0.f;
// printf("%3f %3f %3f\n", M[i], N[i], P[i]);
}
matriksMul(M, N, P, width);
for(int i = 0; i < (width*width) ; i++) {
printf("%f", P[i]);
if( i%width ==0){
printf("\n");
}
}
free(M);
free(N);
free(P);
return 0;
}
|
1,253
|
#include <stdio.h>
#include <stdlib.h>
#define NUM_BLOCKS 32
#define BLOCK_WIDTH 1
__global__ void hello()
{
printf("Hello world! I'm thread %d in block %d\n", threadIdx.x, blockIdx.x);
}
int main(int argc,char **argv)
{
int num_blocks = NUM_BLOCKS, block_width = BLOCK_WIDTH;
if(argc>1){
num_blocks = atoi(argv[1]);
block_width = atoi(argv[2]);
}
// launch the kernel
hello<<<num_blocks, block_width>>>();
// force the printf()s to flush
cudaDeviceSynchronize();
return 0;
}
|
1,254
|
#include "includes.h"
__global__ void add( double *a, double *b, double *c, int n )
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// handle the data at this index
while (tid < n) {
c[tid] = a[tid] + b[tid];
tid += blockDim.x * gridDim.x;
}
//printf("Value of *ip variable: %f\n", a[tid] );
}
|
1,255
|
#include <stdio.h>
#define BLOCK_SIZE 256
#define NUM_ELEMENTS (4096*100)
// CUDA API error checking macro
#define cudaCheck(error) \
if (error != cudaSuccess) { \
printf("Fatal error: %s at %s:%d\n", \
cudaGetErrorString(error), \
__FILE__, __LINE__); \
exit(1); \
}
__global__ void reverse_1d(int *in, int *out)
{
__shared__ int temp[BLOCK_SIZE];
int gindex = threadIdx.x + (blockIdx.x * blockDim.x);
int lindex = threadIdx.x;
temp[BLOCK_SIZE - lindex - 1] = in[gindex];
__syncthreads();
out[BLOCK_SIZE * (gridDim.x - 1 - blockIdx.x) + threadIdx.x] = temp[lindex];
}
__global__ void reverse_1Drection(int *in, int *out)
{
int gindex = threadIdx.x + (blockIdx.x * blockDim.x);
out[NUM_ELEMENTS - gindex - 1] = in[gindex];
}
int main()
{
unsigned int i;
int h_in[NUM_ELEMENTS], h_out[NUM_ELEMENTS];
int *d_in, *d_out;
// Initialize host data
for( i = 0; i < (NUM_ELEMENTS); ++i )
h_in[i] = i;
// Allocate space on the device
cudaCheck( cudaMalloc( &d_in, (NUM_ELEMENTS) * sizeof(int)) );
cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) );
// Copy input data to device
cudaCheck( cudaMemcpy( d_in, h_in, (NUM_ELEMENTS) * sizeof(int), cudaMemcpyHostToDevice) );
/*cuda timing*/
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* end start timing */
reverse_1d<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
/*cuda timing stop*/
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start); cudaEventDestroy(stop);
cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) );
// Verify every out value is 7
for( i = 0; i < NUM_ELEMENTS; ++i )
if (h_out[i] != h_in[NUM_ELEMENTS-i-1])
{
printf("Element h_out[%d] == %d ERROR\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS)
printf("[SHARED] SUCCESS! %f\n", time);
// Free out memory
cudaFree(d_out);
cudaCheck( cudaMalloc( &d_out, NUM_ELEMENTS * sizeof(int)) );
/*cuda timing*/
cudaEvent_t start1, stop1;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventRecord(start1, 0);
/* end start timing */
reverse_1Drection<<< (NUM_ELEMENTS + BLOCK_SIZE - 1)/BLOCK_SIZE, BLOCK_SIZE >>> (d_in, d_out);
/*cuda timing stop*/
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&time, start1, stop1);
cudaEventDestroy(start1); cudaEventDestroy(stop1);
cudaCheck( cudaMemcpy( h_out, d_out, NUM_ELEMENTS * sizeof(int), cudaMemcpyDeviceToHost) );
// Verify every out value is 7
for( i = 0; i < NUM_ELEMENTS; ++i )
if (h_out[i] != h_in[NUM_ELEMENTS-i-1])
{
printf("Element h_out[%d] == %d ERROR\n", i, h_out[i]);
break;
}
if (i == NUM_ELEMENTS)
printf("[PRIVATE] SUCCESS! %f\n", time);
// Free out memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
1,256
|
/* File: vec_add.cu
* Purpose: Implement vector addition on a gpu using cuda
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <chrono>
using namespace std;
using namespace std::chrono;
/* Kernel for vector addition */
__global__ void Vec_add(float x[], float y[], float z[], int n) {
/* blockDim.x = threads_per_block */
/* First block gets first threads_per_block components. */
/* Second block gets next threads_per_block components, etc. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block_count*threads_per_block may be >= n */
if (i < n) z[i] = x[i] + y[i];
} /* Vec_add */
/* Host code */
int main() {
//create some vectors, run the test for each one and report the output. This will be good C++ and CUDA practice.
int n, i;
float *h_x, *h_y, *h_z;
float *d_x, *d_y, *d_z;
int threads_per_block;
int block_count;
size_t size;
n = 10000000; // Number of elements in vector
size = n*sizeof(float); // Vector size
/* Allocate input vectors in host memory */
h_x = (float*) malloc(size);
h_y = (float*) malloc(size);
h_z = (float*) malloc(size);
/* Initialize input vectors */
for (i = 0; i < n; i++) {
h_x[i] = i+1;
h_y[i] = n-i;
}
/* Allocate vectors in device memory */
// We use host pointers as a pointer to the on-device memory.
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
/* Define block size */
threads_per_block = 256;
/* Define grid size. If we just computed n/threads_per_block */
/* we might get fewer threads than vector components. Using */
/* ceil(n/threads_per_block) guarantees at least one thread */
/* per vector component. The following formula is a kludge */
/* since it appears that the CUDA ceil function doesn't work */
/* correctly. */
block_count = (n + threads_per_block - 1)/threads_per_block; // just enough blocks that there is a thread per element
/* Invoke kernel using block_count blocks, each of which */
/* contains threads_per_block threads */
Vec_add<<<block_count, threads_per_block>>>(d_x, d_y, d_z, n);
cudaThreadSynchronize();
int numTests = 10000;
high_resolution_clock::time_point t1 = high_resolution_clock::now();
for (int i =0; i<numTests; i++){
Vec_add<<<block_count, threads_per_block>>>(d_x, d_y, d_z, n);
/* Wait for the kernel to complete */
}
cudaThreadSynchronize();
high_resolution_clock::time_point t2 = high_resolution_clock::now();
auto duration = duration_cast<microseconds>( t2 - t1 ).count();
cout << (duration/1000) << " ms" << endl;
float numSeconds = (float) duration / 1e6;
long long numFlops = long(n) * long(numTests);
float flopsPerSecond = (float)numFlops / numSeconds;
float MflopsPerSecond = flopsPerSecond / 1e6;
cout << "MFLOP/s = " << MflopsPerSecond << endl;
/* Copy result from device memory to host memory */
/* h_z contains the result in host memory */
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
float expectedSum = n + 1;
printf("Testing....");
for (i = 0; i < n; i++){
if(h_z[i] != expectedSum){
printf("Failure at %i ", i);
}
}
printf("\n");
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
return 0;
} /* main */
|
1,257
|
#include <iostream>
#include <cstdio>
using namespace std;
#include <cuda_runtime.h>
#define TIMES 24
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////HELP FUNCTIONS/////////////////////////////////////////////////
void RandomInit(float* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() / (float)RAND_MAX;
}
}
void RandomInit(unsigned* data, int n)
{
for (int i=0; i<n; i++)
{
data[i] = rand() % n;
}
}
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors(cudaError err, const char *file, const int line )
{
if(cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : CUDA Runtime API error %d: %s.\n",file, line, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
// This will output the proper error string when calling cudaGetLastError
#define getLastCudaError(msg) __getLastCudaError (msg, __FILE__, __LINE__)
inline void __getLastCudaError(const char *errorMessage, const char *file, const int line )
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
fprintf(stderr, "%s(%i) : getLastCudaError() CUDA error : %s : (%d) %s.\n",
file, line, errorMessage, (int)err, cudaGetErrorString( err ) );
exit(-1);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
/////////////////////////////////////////////_VECTOR_ADDITION_///////////////////////////////////////////////////////
// Device code
__global__ void irreguler(const float* A, float* C, float* F)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i == 0)
{
C[i] = A[i];
__syncthreads();
//high latncy
C[i] = C[i] + A[i];
__syncthreads();
F[i] = C[i+1];
}
}
__global__ void mb2(float* A, float* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i == 0) {
C[i] = A[i]; //write to C[i] is a miss (cache line is missing)
__syncthreads();
C[i+1] = A[i]; //write to C[i+1] is a hit (cache line is found)
__syncthreads();
C[i] = C[i] + A[i]; //read of C[i] is a miss (entire sector is missing, fetch it from memory)
__syncthreads();
A[i] = C[i] + C[i+1]; //read C[i] and C[i+1] are hits (entire sector exists)
}
}
__global__ void l1(float* A, float* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i == 0) {
C[i] = A[i] + A[i+32];
__syncthreads();
C[i] = A[i] + A[i+32] + C[i];
}
}
// Host code
void VectorAddition(int N, int threadsPerBlock)
{
cout<<"Vector Addition for input size "<<N<<" :\n";
// Variables
float* h_A;
float* h_C;
float* d_A;
float* d_C;
float total_time=0;
size_t size = N * sizeof(float);
// Allocate input vectors h_A and h_B in host memory
h_A = (float*)malloc(size);
h_C = (float*)malloc(size);
// Initialize input vectors
RandomInit(h_A, N);
// Allocate vectors in device memory
checkCudaErrors( cudaMalloc((void**)&d_A, size) );
checkCudaErrors( cudaMalloc((void**)&d_C, size) );
// Copy vectors from host memory to device memory
checkCudaErrors( cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaThreadSynchronize());
// Invoke kernel
cout<<"Invoke Kernel\n";
//int threads = 128;
int blocksPerGrid = ((N+ threadsPerBlock-1) / threadsPerBlock);
for (int i = 0; i < 1; i++) {
l1<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_C);
getLastCudaError("kernel launch failure");
checkCudaErrors(cudaThreadSynchronize());
}
float dSeconds = total_time/((float)TIMES * 1000);
float dNumOps = N;
float gflops = 1.0e-9 * dNumOps/dSeconds;
cout<<"Time = "<<dSeconds*1.0e3<< "msec"<<endl<<"gflops = "<<gflops<<endl;
// Copy result from device memory to host memory
// h_C contains the result in host memory
checkCudaErrors( cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost) );
// Verify result
int i;
for (i = 0; i < N; ++i) {
float sum = h_A[i];
if (fabs(h_C[i] - sum) > 1e-5)
break;
}
// Free device memory
if (d_A)
cudaFree(d_A);
if (d_C)
cudaFree(d_C);
// Free host memory
if (h_A)
free(h_A);
if (h_C)
free(h_C);
cudaDeviceReset();
if(i == N)
cout<<"SUCCSESS"<<endl;
else
cout<<"FAILED"<<endl;
}
//////////////////////////////////////////////////////
int main(int argc,char *argv[])
{
if(argc < 3)
printf("Unsuffcient number of arguments!\n");
else
{
VectorAddition(atoi(argv[1]), atoi(argv[2]));
}
}
|
1,258
|
#include "includes.h"
__global__ void stencil_1d(int *in, int *out) {
// within a block, threads share data via shared memory ("global memory")
// data is not visible to threads in other blocks
// use __shared__ to declare a var/array in shared memory
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
// each thread processs one output element (blockDim.x elements per block)
int gindex = threadIdx.x + (blockIdx.x * blockDim.x) + RADIUS;
int lindex = threadIdx.x + RADIUS;
// read input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
// synchronize all threads in the block : ensure all data is available
__syncthreads();
// apply the stencil
int result = 0;
for (int offset = -RADIUS ; offset <= RADIUS ; offset++) {
result += temp[lindex + offset];
}
// store the result
out[gindex-RADIUS] = result;
}
|
1,259
|
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <limits>
#include <sys/time.h>
#define ARRAY_SIZE 10000
#define BLOCK_SIZE 256
#define MICROSECONDS(start, end) ((end.tv_sec - start.tv_sec) * 1000000LL + end.tv_usec - start.tv_usec)
#define MILLISECONDS(start, end) MICROSECONDS(start, end) / 1000.0
#define SECONDS(start, end) MILLISECONDS(start, end) / 1000.0
void cpu_saxpy(const float *x, float *y, const float a)
{
for (unsigned int i = 0; i < ARRAY_SIZE; i++)
{
y[i] += a * x[i];
}
}
__global__ void gpu_saxpy(const float *x, float *y, const float a)
{
const unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < ARRAY_SIZE)
{
y[i] += a * x[i];
}
}
double cpuSecond()
{
struct timeval tp;
gettimeofday(&tp, NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
int main(int argc, char **argv)
{
// Initialize data.
struct timeval start, end;
float *x = (float *)malloc(ARRAY_SIZE * sizeof(float));
float *y = (float *)malloc(ARRAY_SIZE * sizeof(float));
const float a = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
for (unsigned int i = 0; i < ARRAY_SIZE; i++)
{
x[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
y[i] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX);
}
// Copy data for CPU.
float *cpu_x = (float *)malloc(ARRAY_SIZE * sizeof(float));
float *cpu_y = (float *)malloc(ARRAY_SIZE * sizeof(float));
memcpy(cpu_x, x, ARRAY_SIZE);
memcpy(cpu_y, y, ARRAY_SIZE);
// Run CPU SAXPY.
printf("Computing SAXPY on the CPU... ");
gettimeofday(&start, NULL);
cpu_saxpy(cpu_x, cpu_y, a);
gettimeofday(&end, NULL);
printf("Done! Took %lfms.\n", MILLISECONDS(start, end));
// Copy data for GPU.
float *gpu_x = (float *)malloc(ARRAY_SIZE * sizeof(float));
float *gpu_y = (float *)malloc(ARRAY_SIZE * sizeof(float));
memcpy(gpu_x, x, ARRAY_SIZE);
memcpy(gpu_y, y, ARRAY_SIZE);
float *cuda_x;
float *cuda_y;
cudaMalloc(&cuda_x, ARRAY_SIZE * sizeof(float));
cudaMalloc(&cuda_y, ARRAY_SIZE * sizeof(float));
cudaMemcpy(cuda_x, gpu_x, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(cuda_y, gpu_y, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
// Run GPU SAXPY.
printf("Computing SAXPY on the GPU... ");
// Make sure that the grid size is enough to fit all elements.
gettimeofday(&start, NULL);
gpu_saxpy<<<(ARRAY_SIZE + BLOCK_SIZE - 1) / BLOCK_SIZE, BLOCK_SIZE>>>(cuda_x, cuda_y, a);
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
printf("Done! Took %lfms.\n", MILLISECONDS(start, end));
cudaMemcpy(gpu_x, cuda_x, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(gpu_y, cuda_y, ARRAY_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
// Compare CPU/GPU SAXPY.
bool success = true;
float epsilon = std::numeric_limits<float>::epsilon();
printf("Comparing the output for each implementation... ");
for (unsigned int i = 0; i < 25; i++)
{
if (abs(cpu_y[i] - gpu_y[i]) > epsilon)
{
success = false;
break;
}
}
printf(success ? "Correct!\n" : "Incorrect!\n");
// Free resources.
free(x);
free(y);
free(cpu_x);
free(cpu_y);
free(gpu_x);
free(gpu_y);
cudaFree(x);
cudaFree(y);
return 0;
}
|
1,260
|
int main()
{
const unsigned int N = 1048576;
const unsigned int bytes = N * sizeof(int);
int *h_a = (int*)malloc(bytes);
int *d_a;
cudaMalloc((int**)&d_a, bytes);
memset(h_a, 0, bytes);
cudaMemcpy(d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(h_a, d_a, bytes, cudaMemcpyDeviceToHost);
return 0;
}
/*
Run these next commands to run the program:
$ nvcc profile.cu -o profile_test
$ nvprof ./profile_test
*/
|
1,261
|
#include "Logger.cuh"
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <stdio.h>
#include <time.h>
#define PREFIX_SIZE 256
// Do not change order. It matches the values of LOGGER_LEVEL_XXXXX
static char* level_name_by_level[] = { "ERROR", "WARN", "INFO", "DEBUG" };
static void logger_print(const char *log_name, unsigned int level, unsigned int level_message, char* fmt, va_list args) {
if (level < level_message) return;
// Create a "prefixed" format string, by appending the log level and the log name
char* prefixed_fmt = (char*) malloc(PREFIX_SIZE + (strlen(log_name) + 1) + (strlen(fmt) + 1));
char* log_level = level_name_by_level[level_message];
time_t t = time(NULL);
struct tm tm = *localtime(&t);
sprintf(
prefixed_fmt,
"%02d/%02d/%04d %02d:%02d:%02d - [%5s] - %s - %s \n",
tm.tm_mday, // day
tm.tm_mon + 1, // month
tm.tm_year + 1900, // year
tm.tm_hour, tm.tm_min, tm.tm_sec, // hour:minutes:seconds
log_level,
log_name,
fmt
);
vprintf(prefixed_fmt, args);
free(prefixed_fmt);
}
Logger::Logger(char* name, unsigned int level) {
unsigned int name_size = strlen(name) + 1;
this->name = (char *) malloc(name_size * sizeof(char));
strcpy(this->name, name);
this->level = level;
}
void Logger::info(char* fmt, ...) {
va_list args;
va_start(args, fmt);
logger_print(this->name, this->level, LOGGER_LEVEL_INFO, fmt, args);
va_end(args);
}
void Logger::warn(char* fmt, ...) {
va_list args;
va_start(args, fmt);
logger_print(this->name, this->level, LOGGER_LEVEL_WARN, fmt, args);
va_end(args);
}
void Logger::error(char* fmt, ...) {
va_list args;
va_start(args, fmt);
logger_print(this->name, this->level, LOGGER_LEVEL_ERROR, fmt, args);
va_end(args);
}
void Logger::debug(char* fmt, ...) {
va_list args;
va_start(args, fmt);
logger_print(this->name, this->level, LOGGER_LEVEL_DEBUG, fmt, args);
va_end(args);
}
Logger::~Logger() {
free(this->name);
}
|
1,262
|
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
//#include "cutil.h"
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
{
printf("Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
printf("\nPress ENTER to exit...\n");
getchar();
exit(-1);
}
}
//name of the input file
#define INPUT_FILE_NAME "input.txt"
//name of the compressed file
#define COMPRESSED_FILE_NAME "compressed.txt"
#define COMPRESSED_FILE_NAME_GPU "compressed_gpu.txt"
//name of the uncompressed file
#define DECOMPRESSED_FILE_NAME "decompressed.txt"
//name of the config file
#define CONFIG_FILE_NAME "config.txt"
//max number of characters
#define MAX_CHAR 256
//#define MAX_CHAR 30
//max lenght of the number which can occur in char_frequency or char_huffman_table)
#define MAX_LENGTH_OF_NUMBER 10
//lenght of the array in shared memory on device
#define SHARED_MEMORY_SIZE 256
//lenght of the array in const memory on device
#define CONST_MEMORY_SIZE 15000 //(MAX_CHAR*(MAX_CHAR-1))
//To fill and pass the file as an array to GPU
#define MAX_FILE_CHARS 50000
#define BLOCK_SIZE 256
struct node {
int val;
int weight;
struct node * right, * left;
};
//keeps frequency of particular characters (index - symbof of the character, value - frequency of the character)
int char_frequency[MAX_CHAR];
//keeps huffman table
int char_huffman_table[MAX_CHAR][MAX_CHAR-1];
//keeps number which tells how many bits were unused in last byte (variable is set after call compress_file())
int last_byte_padding=0;
//for writing gpu output
int last_byte_padding_gpu = 0;
//keeps number of characters in current input file - file has to have less than 2,147,483,647 characters (variable is set after call read_file())
int number_of_char=0 ;
//To fill and pass the file as an array to GPU
unsigned char *h_input=0,*d_input=0;
// To read char_huffman_table at the GPU
int *d_char_huffman_table=0;
int copiedarray2[MAX_CHAR][MAX_CHAR-1];
__device__ int char_huffman_table_gpu[MAX_CHAR][MAX_CHAR-1];
//To write the output from compression in GPU
//char *compressedfile_array=0;
bool *compressedfile_array=0;
bool *finalcompressed_array=0;
// To keep track of how many characters each block wrote
int *block_cntr_array=0;
int *block_cntr_array_check=0;
int *d_last_byte_padding=0;
int *finalsize=0;
int *orig_number_of_char=0;
int *huffman_check = (int *)malloc((MAX_CHAR)*(MAX_CHAR-1) *sizeof(int));
bool *d_bool = 0;
bool *h_bool = 0;
__global__ void final_compression(int *block_cntr_array,bool *compressedfile_array,bool *finalcompressed_array,int number_of_char)
//__device__ void final_compression(int *block_cntr_array,bool *compressedfile_array,bool *finalcompressed_array)
{
int index_blocks=blockIdx.x*blockDim.x+threadIdx.x;
int index_file=(blockIdx.x*blockDim.x+threadIdx.x)*255;
int final_index=0;
if(index_blocks < number_of_char)
{
for(int i=0;i<index_blocks;i++)
{
final_index = final_index+ block_cntr_array[i];
}
for(int i=0;i<block_cntr_array[index_blocks];i++)
{
finalcompressed_array[final_index+i]=compressedfile_array[index_file+i];
}
}
}
//__global__ void computearray_size(int* block_cntr_array,int *finalsize,int *orig_number_of_char)
__device__ void computearray_size(int* block_cntr_array,int *finalsize,int *orig_number_of_char)
{
*finalsize = 0;
for(int i=0;i<*orig_number_of_char;i++)
{
(*finalsize)=(*finalsize) + block_cntr_array[i];
}
}
/*__global__ void compress_file_gpu(unsigned char *d_input,char *compressedfile_array,int *char_huffman_table2,int *block_cntr_array,int* d_last_byte_padding)
{
int write_counter=0,block_counter=0; //how many bits have been written in specific byte
unsigned char input_char;
unsigned char output_char = 0x0;
unsigned char end_of_file = 255;
unsigned char mask = 0x01; //00000001;
int index_file=(blockIdx.x*blockDim.x+threadIdx.x)*255;
int index_blocks=blockIdx.x*blockDim.x+threadIdx.x;
//for(int i=0;i<MAX_CHAR;i++)
//{
//int *row = (int*)((char*)char_huffman_table2 + i * pitch);
//for (int c = 0; c < MAX_CHAR-1; ++c) {
// char_huffman_table_gpu[i][c] = row[c];
//}
//}
input_char = d_input[index_blocks];
for(int i = 0 ; i < (MAX_CHAR - 1) ; i++)
{
if(char_huffman_table2[input_char*255+i] == 0) //detect if current character on particular position has 0 or 1
{
output_char = output_char << 1; //if 0 then shift bits one position to left (last bit after shifting is 0)
write_counter++;
block_counter++;
}
else if(char_huffman_table2[input_char*255+i] == 1)
{
output_char = output_char << 1; //if 1 then shift bits one position to left...
output_char = output_char | mask; //...and last bit change to: 1
write_counter++;
block_counter++;
}
else //-1
{
//if(input_char == end_of_file) //if EOF is detected then write current result to file
//{
if(write_counter != 0)
{
output_char = output_char << (8-write_counter);
compressedfile_array[index_file]=output_char;
output_char = 0x0;
}
else //write_counter == 0
{
compressedfile_array[index_file]=output_char;
}
//}
break;
}
if(write_counter == 8) //if result achieved 8 (size of char) then write it to compressed_file
{
compressedfile_array[index_file]=output_char;
output_char = 0x0;
write_counter = 0;
}
}
block_cntr_array[index_blocks]=block_counter;
*d_last_byte_padding = write_counter; //to decompress file we have to know how many bits in last byte have been written
//update_config(write_counter); //TODO to zakomentowac przy ostatecznych pomiarach
}*/
//__global__ void compress_file_gpu(unsigned char *d_input,bool *compressedfile_array,int *char_huffman_table2,int *block_cntr_array,int* d_last_byte_padding)
__global__ void compress_file_gpu(unsigned char *d_input,bool *compressedfile_array,int *char_huffman_table2,int *block_cntr_array,int* d_last_byte_padding,int *finalsize,int *orig_number_of_char,int number_of_char)
{
//int write_counter=0,
int block_counter=0; //how many bits have been written in specific byte
unsigned char input_char;
//unsigned char output_char = 0x0;
//unsigned char end_of_file = 255;
//unsigned char mask = 0x01; //00000001;
int index_file=(blockIdx.x*blockDim.x+threadIdx.x)*255;
int index_blocks=blockIdx.x*blockDim.x+threadIdx.x;
if(index_blocks < number_of_char)
{
//for(int i=0;i<MAX_CHAR;i++)
//{
//int *row = (int*)((char*)char_huffman_table2 + i * pitch);
//for (int c = 0; c < MAX_CHAR-1; ++c) {
// char_huffman_table_gpu[i][c] = row[c];
//}
//}
input_char = d_input[index_blocks];
for(int i = 0 ; i < (MAX_CHAR - 1) ; i++)
{
if(char_huffman_table2[input_char*255+i] == 0) //detect if current character on particular position has 0 or 1
{
//output_char = output_char << 1; //if 0 then shift bits one position to left (last bit after shifting is 0)
compressedfile_array[index_file+i] = false;
//write_counter++;
block_counter++;
}
else if(char_huffman_table2[input_char*255+i] == 1)
{
//output_char = output_char << 1; //if 1 then shift bits one position to left...
//output_char = output_char | mask; //...and last bit change to: 1
//write_counter++;
compressedfile_array[index_file+i] = true;
block_counter++;
}
else //-1
{
/*if(input_char == end_of_file) //if EOF is detected then write current result to file
{
if(write_counter != 0)
{
output_char = output_char << (8-write_counter);
compressedfile_array[index_file]=output_char;
output_char = 0x0;
}
else //write_counter == 0
{
compressedfile_array[index_file]=output_char;
}
}*/
break;
}
/*if(write_counter == 8) //if result achieved 8 (size of char) then write it to compressed_file
{
compressedfile_array[index_file]=output_char;
output_char = 0x0;
write_counter = 0;
}*/
}
block_cntr_array[index_blocks]=block_counter;
//*d_last_byte_padding = write_counter; //to decompress file we have to know how many bits in last byte have been written
//update_config(write_counter); //TODO to zakomentowac przy ostatecznych pomiarach
computearray_size(block_cntr_array,finalsize,orig_number_of_char);
//final_compression(block_cntr_array,compressedfile_array,finalcompressed_array);
}
}
void write_GPU_compressed(bool *final_compressed_cpu,int *finalsize_cpu)
{
FILE *compressed_file;
int write_counter=0; //how many bits have been written in specific byte
//unsigned char input_char;
unsigned char output_char = 0x0;
//unsigned char end_of_file = 255;
unsigned char mask = 0x01; //00000001;
compressed_file = fopen(COMPRESSED_FILE_NAME_GPU, "wb");
if ((compressed_file==NULL))
{
perror ("Error reading file");
}
else
{
for(int i = 0 ; i < (*finalsize_cpu) ; i++)
{
if(int(final_compressed_cpu[i]) == 0)
{
output_char = output_char << 1; //if 0 then shift bits one position to left (last bit after shifting is 0)
write_counter++;
}
else if (int(final_compressed_cpu[i]) == 1)
{
output_char = output_char << 1; //if 1 then shift bits one position to left...
output_char = output_char | mask; //...and last bit change to: 1
write_counter++;
}
if(write_counter == 8) //if result achieved 8 (size of char) then write it to compressed_file
{
printf("Compressed char in decimal is %d \n", output_char);
putc(output_char, compressed_file);
output_char = 0x0;
write_counter = 0;
}
}
if(write_counter != 0)
{
output_char = output_char << (8-write_counter);
printf("Compressed char in decimal is %d \n", output_char);
putc(output_char, compressed_file);
output_char = 0x0;
}
}
fclose(compressed_file);
last_byte_padding_gpu = write_counter;
}
void print_dchar_huffman_table()
{
printf("\n dchar huffman table ");
getchar();
bool flag = false;
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
printf("Huffman table:\n");
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
for(int i = 0 ; i < MAX_CHAR ; i++)
{
flag = false;
for(int j = 0 ; j < (MAX_CHAR -1) ; j++)
{
if(copiedarray2[i][j] != -1)
{
if(!flag)
{
if(i == 10)//new line
{
printf("\\n:\t");
}
else
{
printf("%c:\t",i);
}
}
flag = true;
printf("%d ", copiedarray2[i][j]);
}
}
if(flag) printf("\n");
}
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
getchar();
}
//Huffman table construction+++++++++++++++++++++++++++++++++++++++++++++++++++
void insertion_sort(node **forest, int length)
{
for(int i = 1; i < length ; i++)
{
node *tmp = forest[i];
int j = i - 1;
bool done = false;
do
{
if(forest[j]->weight < tmp->weight) //> ascending order; < descending order
{
forest[j+1] = forest[j];
j = j-1;
if(j < 0)
{
done = true;
}
}
else
{
done = true;
}
}while(!done);
forest[j+1] = tmp;
}
}
void print_char_frequency()
{
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
printf("character frequency:\n");
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
for(int i = 0 ; i < MAX_CHAR ; i++)
{
if(char_frequency[i] != 0)
{
if(i == 10)//new line
{
printf("%d)\tval: \\n\tfreq: %d\n",i, char_frequency[i]);
}
else
{
printf("%d)\tval: %c\tfreq: %d\n",i, i, char_frequency[i]);
}
}
}
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
}
void print_char_huffman_table()
{
bool flag = false;
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
printf("Huffman table:\n");
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
for(int i = 0 ; i < MAX_CHAR ; i++)
{
flag = false;
for(int j = 0 ; j < (MAX_CHAR -1) ; j++)
{
if(char_huffman_table[i][j] != -1)
{
if(!flag)
{
if(i == 10)//new line
{
printf("\\n:\t");
}
else
{
printf("%c:\t",i);
}
}
flag = true;
printf("%d ", char_huffman_table[i][j]);
}
}
if(flag) printf("\n");
}
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
}
void printout_inorder(node * tree)
{
if(tree->left) printout_inorder(tree->left);
if(tree->val != NULL)
{
if(tree->val == '\n')
{
printf("weight: %d\tvalue: \\n\n",tree->weight);
}
else
{
printf("weight: %d\tvalue: %c\n",tree->weight, tree->val);
}
}
else
{
printf("weight: %d\tvalue: NULL\n",tree->weight);
}
if(tree->right) printout_inorder(tree->right);
}
void read_file()
{
FILE *file;
unsigned char end_of_file = 255;
unsigned char c;
file = fopen(INPUT_FILE_NAME, "r");
if (file==NULL)
{
perror ("Error reading file");
}
else
{
//storing the file contents into h_input
h_input = (unsigned char *)malloc(MAX_FILE_CHARS*sizeof(char));
do
{
c = getc (file);
//if(c == end_of_file) printf("\n Found EOF \n");
//printf("c before putting into array is %c\n",c);
h_input[number_of_char]=c;
number_of_char++;
char_frequency[c]++;
} while (c != end_of_file);
fclose (file);
}
// h_input[number_of_char] = end_of_file;
char_frequency[end_of_file] = 0; //to avoid problems with several EOF in one file
//EOF is not needed ; so going to decrement
number_of_char--;
}
void traverse_preorder(node *root, int *path)
{
if(root->val != NULL)
{
for(int i = 0 ; i < MAX_CHAR -1 ; i++)
{
char_huffman_table[root->val][i] = path[i];
}
}
if(root->left)//left 1
{
int counter = 0;
for(int i = 0 ; i < MAX_CHAR - 1 ; i++)
{
if(path[i] == -1)
{
break;
}
counter++;
}
path[counter] = 1;
traverse_preorder(root->left, path);
path[counter] = -1;
}
if(root->right)//right 0
{
int counter = 0;
for(int i = 0 ; i < MAX_CHAR - 1 ; i++)
{
if(path[i] == -1)
{
break;
}
counter++;
}
path[counter] = 0;
traverse_preorder(root->right, path);
path[counter] = -1;
}
}
void construct_huffman_table(node *root)
{
int path[MAX_CHAR - 1];
for(int i = 0 ; i < MAX_CHAR - 1 ; i++)
{
path[i] = -1;
}
traverse_preorder(root, path);
}
void build_binary_tree()
{
int forest_counter = 0;
node *forest[MAX_CHAR];
node *curr;
for(int i = 0 ; i < MAX_CHAR ; i++) //initial forest
{
if(char_frequency[i] != 0)
{
curr = (node *)malloc(sizeof(node));
curr->left = curr->right = NULL;
curr->val = i;
curr->weight = char_frequency[i];
forest[forest_counter] = curr;
forest_counter++;
}
}
insertion_sort(forest, forest_counter);//sorted initial forest
while(forest_counter > 1)//build final tree
{
node *parent;
parent = (node *)malloc(sizeof(node));
parent->right = forest[forest_counter-1];
parent->left = forest[forest_counter-2];
parent->weight = forest[forest_counter-1]->weight + forest[forest_counter-2]->weight;
parent->val = NULL;
forest[forest_counter-1] = NULL;
forest[forest_counter-2] = parent;
forest_counter--;
insertion_sort(forest, forest_counter);
}
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
printf("Huffman tree (inorder traversal sequence):\n");
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
printout_inorder(forest[0]);
printf("+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\n");
construct_huffman_table(forest[0]);
//delete_binary_tree_postorder(forest[0]); //after building Huffman table we do not need Huffman tree anymore
}
void array_initializer()
{
for(int i = 0 ; i < MAX_CHAR ; i++)
{
char_frequency[i] = 0;
}
for(int i = 0 ; i < MAX_CHAR ; i++)
{
for(int j = 0 ; j < (MAX_CHAR-1) ; j++)
{
char_huffman_table[i][j] = -1;
}
}
}
//+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
//Calculation on CPU+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void compress_file()
{
FILE *input_file;
FILE *compressed_file;
unsigned char output_char;
int write_counter; //how many bits have been written in specific byte
unsigned char input_char;
unsigned char end_of_file ;
unsigned char mask ; //00000001;
input_file = fopen(INPUT_FILE_NAME, "rb");
compressed_file = fopen(COMPRESSED_FILE_NAME, "wb"); // apend file (add text to a file or create a file if it does not exist)
output_char= 0x0;
write_counter = 0;
end_of_file = 255;
mask = 0x01;
if ((input_file==NULL)||(compressed_file==NULL))
{
perror ("Error reading file");
}
else
{
do
{
input_char = getc (input_file); //read one character from input file
for(int i = 0 ; i < (MAX_CHAR - 1) ; i++)
{
if(char_huffman_table[input_char][i] == 0) //detect if current character on particular position has 0 or 1
{
output_char = output_char << 1; //if 0 then shift bits one position to left (last bit after shifting is 0)
write_counter++;
}
else if(char_huffman_table[input_char][i] == 1)
{
output_char = output_char << 1; //if 1 then shift bits one position to left...
output_char = output_char | mask; //...and last bit change to: 1
write_counter++;
}
else //-1
{
if(input_char == end_of_file) //if EOF is detected then write current result to file
{
if(write_counter != 0)
{
output_char = output_char << (8-write_counter);
printf("Compressed char in decimal is %d \n", output_char);
putc(output_char, compressed_file);
output_char = 0x0;
}
else //write_counter == 0
{
printf("Compressed char in decimal is %d \n", output_char);
putc(output_char, compressed_file);
}
}
break;
}
if(write_counter == 8) //if result achieved 8 (size of char) then write it to compressed_file
{
printf("Compressed char in decimal is %d \n", output_char);
putc(output_char, compressed_file);
output_char = 0x0;
write_counter = 0;
}
}
} while (input_char != end_of_file);
fclose (input_file);
fclose(compressed_file);
last_byte_padding = write_counter; //to decompress file we have to know how many bits in last byte have been written
//update_config(write_counter); //TODO to zakomentowac przy ostatecznych pomiarach
}
}
void print_gpu_compressed_file(char *final_compressed_cpu,int finalsize_cpu)
{
FILE *compressed_file;
compressed_file = fopen(COMPRESSED_FILE_NAME_GPU, "wb");
for(int i=0;i<finalsize_cpu;i++)
{
char c=final_compressed_cpu[i];
printf("i is %d and c is %c",i,c);
putc(c,compressed_file);
}
fclose(compressed_file);
}
void decompress_file()
{
FILE *compressed_file;
FILE *decompressed_file;
unsigned char end_of_file = 255;
unsigned char mask = 0x7F; //01111111;
unsigned char curr;
unsigned char next;
int written_char_counter=0;
int pattern[MAX_CHAR - 1];
for(int i = 0 ; i < (MAX_CHAR - 1); i++)
{
pattern[i] = -1;
}
compressed_file = fopen(COMPRESSED_FILE_NAME, "rb");
decompressed_file = fopen(DECOMPRESSED_FILE_NAME, "wb");
if ((compressed_file==NULL)||(decompressed_file==NULL))
{
perror ("Error reading file");
}
else
{
int bit_counter=0;
unsigned char first_bit;
bool read_next = true;
curr = getc (compressed_file);
next = getc (compressed_file); //we have to read one byte in advance due to padding
// for(int i = 0 ; i < (MAX_CHAR - 1) ; i++) //builds a pattern and chcecks if it matches to char_huffman_table
int pattern_counter=-1;
while(pattern_counter < (MAX_CHAR - 1))
{
pattern_counter++;
first_bit = curr | mask; //check if first bit is 0 or 1
curr = curr << 1;
if(bit_counter == 7)
{
bit_counter = 0;
curr = next;
if(read_next)
{
next = getc (compressed_file);
if(next == end_of_file)
{
if((number_of_char - written_char_counter) < 8)
{
read_next = false;
bit_counter = 7 - last_byte_padding;
}
}
}
if((curr == end_of_file) && ((number_of_char - written_char_counter) < 8))
{
break;
}
}
else
{
bit_counter++;
}
if(first_bit == 255)
{
pattern[pattern_counter] = 1;
}
else
{
pattern[pattern_counter] = 0;
}
bool flag = true;
for(int j = 0 ; j < MAX_CHAR ; j++)
{
flag = true;
for(int k = 0 ; k < (MAX_CHAR - 1) ; k++)
{
if(char_huffman_table[j][k] != pattern[k])
{
flag = false;
break;
}
}
if(flag == true)
{
written_char_counter++;
putc(j, decompressed_file);
for(int i = 0 ; i < (MAX_CHAR - 1); i++)
{
pattern[i] = -1;
}
pattern_counter = -1;
break;
}
}
}
fclose (compressed_file);
fclose (decompressed_file);
}
}
void initialize()
{
array_initializer();
read_file();
print_char_frequency();
build_binary_tree();
print_char_huffman_table();
}
/*__global__ void compress(int *d_input,int number_of_char,int *d_char_huffman_table,int MAX_CHAR)
{
int i=0;
extern __shared__ int my2DArray[32][32]; //size need to be coded a development time though
my2DArray[threadIdx.x][threadIdx.y] = flatArray[blockDim.x * threadIdx.y + threadIdx.x];
}*/
__global__ void read2darray(int *devPtr,int pitch)
{
int elements[2][2];
for (int r = 0; r < 2; ++r) {
int* row = (int*)((char*)devPtr + r * pitch);
for (int c = 0; c < 2; ++c) {
elements[r][c] = row[c];
}
}
}
__global__ void check_bool(bool *d_bool)
{
d_bool[0]=false;
d_bool[1]=false;
}
void print_huffman()
{
printf(" \n Huffman after copying back \n " );
unsigned char input_char1;
unsigned char input_char2;
unsigned char input_char3;
unsigned char input_char4;
input_char1 = h_input[0];
input_char2 = h_input[1];
input_char3 = h_input[2];
input_char4 = h_input[3];
for (int i=0;i < MAX_CHAR-1;i++)
{
if(huffman_check[ input_char1*255+i]!= -1 ) printf ("\t%c code is %d \n", input_char1,huffman_check[ input_char1*255+i]);
}
for (int i=0;i < MAX_CHAR-1;i++)
{
if(huffman_check[ input_char2*255+i]!= -1) printf ("\t%c code is %d \n",input_char2, huffman_check[ input_char2*255+i]);
}
for (int i=0;i < MAX_CHAR-1;i++)
{
if(huffman_check[ input_char3*255+i]!= -1 ) printf ("\t%c code is %d \n", input_char3,huffman_check[ input_char3*255+i]);
}
for (int i=0;i < MAX_CHAR-1;i++)
{
if(huffman_check[ input_char4*255+i]!= -1 ) printf ("\t%c code is %d \n", input_char4,huffman_check[ input_char4*255+i]);
}
}
int main(int argc, char* argv[])
{
int *finalsize_cpu=0;
unsigned char end_of_file = 255;
printf("start\n");
initialize();
cudaEvent_t start, stop; // cuda events to measure time
float elapsed_time,elapsed_time_Cont;
cudaEventCreate(&start); // timing objects
cudaEventCreate(&stop);
unsigned int timer2=0;
time_t seconds;
// In initialize in cpu,we put the file chars into array, fill huffman table and char_freq_arrays
//copy the input contents into an array
cudaMalloc((void **)&d_input,number_of_char*sizeof(char));
checkCUDAError("Error in allocating d_input");
cudaMemcpy(d_input,h_input,number_of_char*sizeof(char),cudaMemcpyHostToDevice);
checkCUDAError("Error in copying d_input");
// Allocate space for the compressed file to be used in GPU
cudaMalloc((void **)&compressedfile_array,number_of_char*(MAX_CHAR -1)*sizeof(bool));
checkCUDAError("Error in allocating compressedfile_array");
//
cudaMalloc((void **)&d_char_huffman_table,(MAX_CHAR)*(MAX_CHAR-1) * sizeof(int));
checkCUDAError("Error in allocating d_char_huffman_table");
cudaMemcpy(d_char_huffman_table,char_huffman_table,(MAX_CHAR)*(MAX_CHAR-1) * sizeof(int),cudaMemcpyHostToDevice);
checkCUDAError("Error in copying d_char_huffman_table");
cudaMemcpy(huffman_check,d_char_huffman_table,(MAX_CHAR)*(MAX_CHAR-1) * sizeof(int),cudaMemcpyDeviceToHost);
checkCUDAError("Error in copying back");
cudaMalloc((void **)&block_cntr_array,number_of_char*sizeof(int));
checkCUDAError("Error in allocating block_cntr_array");
cudaMalloc((void **)&d_last_byte_padding,sizeof(int));
checkCUDAError("Error in allocating d_last_byte_padding");
cudaMalloc((void **)&finalsize,sizeof(int));
checkCUDAError("Error in allocating finalsize");
cudaMalloc((void **)&orig_number_of_char,sizeof(int));
checkCUDAError("Error in allocating orig_number_of_char");
cudaMemcpy(orig_number_of_char,&number_of_char,sizeof(int),cudaMemcpyHostToDevice);
checkCUDAError("Error in copying orig_number_of_char");
// check if i can make a boolean array
h_bool=(bool *) malloc(2*sizeof(bool));
h_bool[0]=true;
h_bool[1]=true;
printf("bool1 is %d and bool2 is %d \n",h_bool[0],h_bool[1]);
cudaMalloc((void **)&d_bool,2*sizeof(bool));
checkCUDAError("Error in d_bool");
cudaMemcpy(d_bool,h_bool,2*sizeof(bool),cudaMemcpyHostToDevice);
checkCUDAError("Error in copying d_bool");
//check_bool<<<1,1>>>(d_bool);
checkCUDAError("Error in kernel changing d_bool");
cudaThreadSynchronize();
checkCUDAError("Error in cudaThreadSynchronize");
cudaMemcpy(h_bool,d_bool,2*sizeof(bool),cudaMemcpyDeviceToHost);
checkCUDAError("Error in copying d_bool back");
printf("Now bool1 is %d and bool2 is %d \n",h_bool[0],h_bool[1]);
int checkhuff[2][3]= { {0, 0, 0},
{1, 1, 1} };
bool flag = true;
for(int j=0;j<2;j++)
{
flag = true;
for(int k=0;k<2;k++)
{
printf("h_bool is %d \t checkhuff is %d \n",int(h_bool[k]),checkhuff[j][k]);
if(checkhuff[j][k] != int(h_bool[k]))
{
flag = false;
break;
}
}
if(flag == true)
{
printf("pattern for %d is found\n",checkhuff[j][0]);
}
}
//copy and send the huffman table as a 2d array to GPU Device
//int *darray=0;
//size_t pitch;
//cudaMallocPitch( (void**)&darray, &pitch, 2 * sizeof(int), 2);
//cudaMemcpy2D(darray,pitch,harray,2*sizeof(int),2*sizeof(int),2,cudaMemcpyHostToDevice);
//cudaMalloc((void **)&darray,4*sizeof(int));
//cudaMemcpy(darray,harray,4*sizeof(int),cudaMemcpyHostToDevice);
//cudaMemcpy2D(copiedarray,2*sizeof(int),darray,pitch,pitch,2,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(copiedarray,2*sizeof(int),darray,pitch,2*sizeof(int),2,cudaMemcpyDeviceToHost);
//printf("After copying back %d, \t %d, \t %d, \t %d \n",copiedarray[0][0],copiedarray[0][1],copiedarray[1][0],copiedarray[1][1]);
//int *darray_2d=0;
//cudaMalloc((void **)&darray_2d,4*sizeof(int));
//cudaMemcpy(darray,harray,4*sizeof(int),cudaMemcpyHostToDevice);
// read2darray<<<1,1>>>(darray, pitch);
//size_t pitch2;
//cudaMallocPitch( (void**)&d_char_huffman_table, &pitch2, (MAX_CHAR-1) * sizeof(int), MAX_CHAR);
//cudaMemcpy2D(d_char_huffman_table,pitch2,char_huffman_table,(MAX_CHAR-1) * sizeof(int),(MAX_CHAR-1) * sizeof(int),MAX_CHAR,cudaMemcpyHostToDevice);
//cudaMemcpy2D(char_huffman_table_gpu,(MAX_CHAR-1) * sizeof(int),char_huffman_table,(MAX_CHAR-1) * sizeof(int),(MAX_CHAR-1) * sizeof(int),MAX_CHAR,cudaMemcpyHostToDevice);
//checkCUDAError("Error in char_huffman_table_gpu");
//cudaMemcpy2D(copiedarray2,(MAX_CHAR-1)*sizeof(int),d_char_huffman_table,pitch2,(MAX_CHAR-1)*sizeof(int),MAX_CHAR,cudaMemcpyDeviceToHost);
//cudaMemcpy2D(copiedarray2,(MAX_CHAR-1)*sizeof(int),char_huffman_table_gpu,(MAX_CHAR-1) * sizeof(int),(MAX_CHAR-1) * sizeof(int),MAX_CHAR,cudaMemcpyDeviceToHost);
//cudaMemcpy(orig_number_of_char,&number_of_char,sizeof(int),cudaMemcpyHostToDevice);
// checkCUDAError("Error in copiedarray2");
// print_dchar_huffman_table();
printf("\n the number of characters in the input file is %d \n",number_of_char);
getchar();
/*for(int i=0;i<number_of_char;i++)
{
if( h_input[i] == end_of_file ) printf(" EOF \n");
printf(" Copying into array: i is %d and c is %c \n",i,h_input[i]);
}*/
getchar();
print_huffman();
int no_of_blocks = (number_of_char + BLOCK_SIZE -1)/BLOCK_SIZE;
printf("no_of_blocksis %d \n", no_of_blocks);
if(no_of_blocks == 0) no_of_blocks =1;
//compress_file_gpu<<<number_of_char,1>>>(d_input,compressedfile_array,d_char_huffman_table,block_cntr_array,d_last_byte_padding);
cudaEventRecord(start, 0); // start time
checkCUDAError("Error in cudaEventRecord start \n");
compress_file_gpu<<<no_of_blocks,BLOCK_SIZE>>>(d_input,compressedfile_array,d_char_huffman_table,block_cntr_array,d_last_byte_padding,finalsize,orig_number_of_char,number_of_char);
checkCUDAError("Error in compress_file_gpu \n");
cudaThreadSynchronize();
//cudaMalloc((void **)&block_cntr_array_check,number_of_char*sizeof(int));
//checkCUDAError("Error in allocating block_cntr_array_check");
block_cntr_array_check = (int *) malloc(number_of_char*sizeof(int));
cudaMemcpy(block_cntr_array_check,block_cntr_array,number_of_char*sizeof(int),cudaMemcpyDeviceToHost);
checkCUDAError("Error in copying back block_cntr_array_check");
for(int i=0; i < number_of_char; i++)
{
printf(" block size for i = %d is %d \n",i, block_cntr_array_check[i]);
}
// computearray_size<<<1,1>>>(block_cntr_array,finalsize,orig_number_of_char);
checkCUDAError("Error in Compute array \n");
finalsize_cpu = (int *)malloc(sizeof(int));
cudaMemcpy(finalsize_cpu,finalsize,sizeof(int),cudaMemcpyDeviceToHost);
printf("The final compressed array size is %d \n ", *finalsize_cpu);
checkCUDAError("Error in finalsize_cpu");
int block = *finalsize_cpu;
//allocate space for the final compressed array
cudaMalloc((void **)&finalcompressed_array,((*finalsize_cpu)*sizeof(bool)));
checkCUDAError("cudaMemcpyHostToDevice");
final_compression<<<no_of_blocks,BLOCK_SIZE>>>(block_cntr_array,compressedfile_array,finalcompressed_array,number_of_char);
checkCUDAError("Error in final_compression call \n");
cudaThreadSynchronize();
checkCUDAError("Error in cudaThreadSynchronize \n");
cudaEventRecord(stop, 0);
checkCUDAError("Error in cudaEventRecord stop \n");
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Time to calculate results: %f ms.\n", elapsed_time); // print out execution time
bool *final_compressed_cpu=0;
final_compressed_cpu = (bool *)malloc((*finalsize_cpu)*sizeof(bool));
cudaMemcpy(final_compressed_cpu,finalcompressed_array,((*finalsize_cpu)*sizeof(bool)),cudaMemcpyDeviceToHost);
checkCUDAError("Error in copying final_compressed_cpu\n");
//print_gpu_compressed_file(final_compressed_cpu,*finalsize_cpu);
printf("The compressed value in binary is ");
write_GPU_compressed(final_compressed_cpu,finalsize_cpu);
for(int i=0;i<*finalsize_cpu;i++)
// sprintf(compress_file+i,final_compressed_cpu[i]);
//printf("i is %d and val is %d \n",i,final_compressed_cpu[i]);
printf("\n");
// cudaMalloc((void **)&d_char_huffman_table,(MAX_CHAR)*(MAX_CHAR-1)*sizeof(int));
// cudaMemcpy(d_char_huffman_table,char_huffman_table,(MAX_CHAR)*(MAX_CHAR-1)*sizeof(int),cudaMemcpyHostToDevice);
// printf("\n Going to compress on the GPU ");
// compress<<<number_of_char,1>>>(d_input,number_of_char,d_char_huffman_table,MAX_CHAR);
//test ends
printf("compressing on CPU...\n");
//timer2=0;
// CUT_SAFE_CALL(cutCreateTimer(&timer2));
//CUT_SAFE_CALL(cutStartTimer(timer2));
/*clock_t Linuxclock_start,Linuxclock_end; // clock return type
cudaEvent_t CUDAevent_start, CUDAevent_end;
float CUDAEvents_time;
cudaEventRecord(CUDAevent_start, 0 );
cudaEventSynchronize(CUDAevent_start);
Linuxclock_start = clock();*/
/*time_t before,after;
before = time (NULL);*/
clock_t start1, stop1;
start1 = clock();
compress_file();
stop1 = clock();
float elapsedTime = (float)(stop1 - start1) /
(float)CLOCKS_PER_SEC * 1000.0f;
printf( "Time in cpu : %3.1f ms\n", elapsedTime );
printf("Time to calculate results: %f ms.\n", elapsed_time); // print out execution time
printf("Speedup achieved is %lf \n", elapsedTime/elapsed_time );
/*after = time (NULL);
double dif;
dif = difftime (after,before);
printf ("It took you %.9lf seconds to type your name.\n", dif );*/
/*Linuxclock_end = clock();
cudaEventRecord(CUDAevent_end, 0 ); // instrument code to measure end time
cudaEventSynchronize(CUDAevent_end);
cudaEventElapsedTime(&CUDAEvents_time, CUDAevent_start, CUDAevent_end);
printf("CPU Time using CUDA events: %f ms\n", CUDAEvents_time); // time_CUDAEvents is in ms
printf("CPU Time using Linux clock: %f ms\n", ((double) (Linuxclock_end - Linuxclock_start) * 1000)/CLOCKS_PER_SEC); //Linuxclock in sec
cudaEventDestroy(CUDAevent_start);
cudaEventDestroy(CUDAevent_end);*/
//CUT_SAFE_CALL(cutStopTimer(timer2));
//float time2=cutGetAverageTimerValue(timer2);
//printf(" Time on Host %f\n", time2);
printf("decompressing on CPU...\n");
decompress_file();
getchar();
return 0;
}
|
1,263
|
#include <stdio.h>
#include <cuda_runtime.h>
void printMatrix(int *C, const int nx, const int ny){
int *ic = C;
printf("\n Matrix:(%d, %d)\n",nx,ny);
for(int i =0; i < ny; i++){
for(int j =0; j < nx; j++){
printf("%3d",ic[j + i*nx]);
}
printf("\n");
}
printf("\n");
}
__global__ void printThreadIndex(int *A, const int nx, const int ny){
int ix = threadIdx.x + blockDim.x * blockIdx.x;
int iy = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int idx = ix + iy*nx;
printf("thread_id:(%d,%d),block_id:(%d,%d), coordinate(%d,%d) global index %2d ival %2d\n",
threadIdx.x,threadIdx.y, blockIdx.x,blockIdx.y,ix,iy,idx,A[idx]);
}
void initInt(int *ip, int size){
for(int i =0; i < size; i++) ip[i] = i;
}
int main(){
printf("Starting ....\n");
// set device
int dev = 0;
cudaDeviceProp deviceProp;
cudaSetDevice(dev);
// set matrix dimension
int nx = 8, ny = 6;
int nxy = nx * ny, nBytes = nxy*sizeof(int);
// malloc host memory
int *h_A;
h_A = (int *)malloc(nBytes);
// init host matrix with integer
initInt(h_A,nxy);
printMatrix(h_A,nx,ny);
// malloc device memory
int *d_A;
cudaMalloc((void **)&d_A, nBytes);
// cp data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
// set up execution configuration
dim3 block(4,2);
dim3 grid((nx + block.x - 1)/block.x, (ny + block.y -1)/block.y);
// invoke the kernel
printThreadIndex<<<grid,block>>>(d_A,nx,ny);
cudaDeviceSynchronize();
// free host and device memory
cudaFree(d_A);
free(h_A);
// reset device
cudaDeviceReset();
return 0;
}
|
1,264
|
#include <cuda.h>
/* Size of a block */
#define BLOCK_X 32
#define BLOCK_Y 16
__global__ void kernadd (float* mout, float* min1, float *min2, int nx, int ny, size_t pitch)
{
int i, j, index;
/* UP TO YOU edit line below so that the index is correctly evaluated */
i = blockDim.x * blockIdx.x +threadIdx.x;
j = blockDim.y * blockIdx.y +threadIdx.y;
index= i + j * pitch/sizeof(float);
if ((i < nx) && (j < ny))
mout[index] = min1[index] + min2[index];
}
/* extern "C" below is because this file follows C++ linking conventions */
/* whereas the companion C file (addition.c) follows C linking conventions */
/* which are different */
extern "C" void AddOnGpu(float* mat_out, float *mat_in1, float *mat_in2,\
int nx, int ny) {
size_t pitch; /* Same pitch for all matrices, since
they all have same size */
/* Matrix allocation on device */
float *mat_out_gpu, *mat_in1_gpu, *mat_in2_gpu;
/* UP TO YOU : do the allocation below, using cudaMallocPitch ()*/
cudaMallocPitch(&mat_out_gpu, &pitch, sizeof(float)*nx, ny);
cudaMallocPitch(&mat_in1_gpu, &pitch, sizeof(float)*nx, ny);
cudaMallocPitch(&mat_in2_gpu, &pitch, sizeof(float)*nx, ny);
/* The arguments mat_in1 and mat_in2 passed above are on the host. */
/* UP TO YOU : write below the instructions to copy it to the device */
/* You'll need to google the function cudaMemcpy2D () */
cudaMemcpy2D(mat_in1_gpu, pitch, mat_in1, nx*sizeof(float), nx*sizeof(float), ny, cudaMemcpyHostToDevice);
cudaMemcpy2D(mat_in2_gpu, pitch, mat_in2, nx*sizeof(float), nx*sizeof(float), ny, cudaMemcpyHostToDevice);
/* Grid topology below */
/* A block is BLOCK_X threads wide by BLOCK_Y threads high */
dim3 block (BLOCK_X, BLOCK_Y);
/* UP TO YOU : complete the number of blocks below */
int n1 = (nx+BLOCK_X-1/BLOCK_X);
int n2 = (nx+BLOCK_Y-1/BLOCK_Y);
dim3 grid (n1,n2);
/* UP TO YOU : kernel invocation */
kernadd <<< grid, block >>> (mat_out_gpu,mat_in1_gpu, mat_in2_gpu, nx, ny, pitch);
cudaThreadSynchronize();
/* We now transfer back the matrix from the device to the host */
/* UP TO YOU : write cudaMemcpy2D () instruction below */
cudaMemcpy2D(mat_out, nx * sizeof(float), mat_out_gpu, pitch, nx * sizeof(float),ny,cudaMemcpyDeviceToHost);
/* free memory */
cudaFree(mat_out_gpu);
cudaFree(mat_in1_gpu);
cudaFree(mat_in2_gpu);
}
|
1,265
|
//2 layered neural network with LIF neurons
//computing Vm in parallel, Computing Isyn
//all-all connectivity between 2 layers
//starting point of reading mnist set by 'start'
#include<stdio.h>
#include<math.h>
#include<time.h>
#include<stdlib.h>
#include "device_launch_parameters.h"
#include "cuda_runtime_api.h"
#define C 300E-12
#define gL 30E-9
#define VT 20E-3
#define EL -70E-3
#define Rp 3E-3
#define dt 1E-4
#define decay 0.9802
#define decays 0.9231
#define decay1 0.9048
#define WT 5E-9
#define w_lat -1.0E-9 //inhibitory lateral strength
//num of neurons in layer 1 and layer 2
#define p 28
#define q 28
#define N_imgs 50000
#define N2 10 //no. of neurons in 2nd layer
//Convolution parameters
#define Nw 3
#define L (p-Nw+1) //square matrix
#define N1 (12*L*L) //no. of neurons in 1st layer
#define Nthrds 1024 //use max no. of threads available per SM
#define Nsyns N1*N2 //no. of connections
#define T 0.1
#define M 1000
//Training parameters:
#define r 20.0E-11 //Learning Rate for 100ms
#define max_epochs 20 //1 complete presentation of all images
//Variables for image reading
unsigned char *pix_spks_d;
int *d_imgs_lin, img_lin[N_imgs*(p*q+1)];
int test_set[N_imgs][p*q+1];
__device__ int d_imgs[N_imgs][p*q+1];
__device__ double img_spks[p*q][M];
__device__ double syn1[256], syn1s[256], syn[256][M];
__device__ unsigned char in_spk[N1];
__device__ double Isyn[N1][N2], weight[N1][N2];
__device__ double Isyn_tot[N2], Isyn_lat[N2];
__device__ double I_lat[N2];
//weight update variables:
__device__ unsigned char D_op[N2][M];
__device__ signed char err[N2];
__device__ unsigned char Y_op[N2];
__device__ double del_w[N1][N2];
__device__ double ci[N1], d_hat[N1];
__device__ double cis[N1], norm_dh;
__device__ double cil[N2], cils[N2];
__device__ double d_hat_sq[N1];
//Neuron variables:
__device__ int ref_time1[N1],ref_time2[N2];
__device__ double Vm1[N1],Vm2[N2];
////////////CUDA Kernels///////////
__global__ void img_readKernel(int *img)
{
for(int i=0; i<N_imgs; i++){
for(int j=0; j<(p*q+1); j++){
d_imgs[i][j]=img[i*(p*q+1)+j];
}
}
}
__device__ unsigned char pix_spks[256][M];
unsigned char pix_spks_h[256*M];
//uniformly spaced spikes
__global__ void convert2Spks(unsigned char *pix_spks_d)
{
for(int i=0; i<256; i++) {
syn1[i]=0.0; syn1s[i]=0.0;
for(int l=0; l<M; l++) {
pix_spks[i][l]=pix_spks_d[i*M+l];
}
}
//Compute the synaptic kernels:
for(int i=0; i<256; i++) {
for(int l=0; l<M; l++) {
syn1[i]*=decay; syn1s[i]*=decays;
if(pix_spks[i][l]==1) {
syn1[i]+=1.0;
syn1s[i]+=1.0;
}
syn[i][l]=syn1[i]-syn1s[i];
}
}
}
unsigned char pixspks[256*M];
__global__ void pix2spks(int k, int i)
{
int tx=threadIdx.x+blockIdx.x*blockDim.x;
if(tx<(p*q)) {
img_spks[tx][i]=syn[d_imgs[k][tx+1]][i];
__syncthreads();
}
}
__global__ void createDes(int k) {
int tx=threadIdx.x+blockIdx.x*blockDim.x;
if(tx<N2) {
for(int i=0; i<M;i++) {
D_op[tx][i]=0;
}
if(tx==d_imgs[k][0]) {
for(int i=100; i<M; i+=35)
D_op[tx][i]=1;
}
}
}
/*__global__ void pix2spks(int k)
{
//Load the spikes trains for the corresponding pixels:
for(int i=1; i<(p*q+1); i++) {
for(int l=0; l<M; l++) {
img_spks[i-1][l]=syn[d_imgs[k][i]][l];
}
}
//Create the desired spike trains:
for(int j=0; j<N2; j++) {
for(int i=0; i<M; i++) {
D_op[j][i]=0;
}
if(j==d_imgs[k][0]) {
//for(int i=100; i<M; i+=50)
for(int i=100; i<M; i+=35)
D_op[j][i]=1;
}
}
}*/
__device__ double I_in[N1];
__global__ void clear_vars()
{
int tx=threadIdx.x+blockIdx.x*blockDim.x;
int ty=threadIdx.y+blockIdx.y*blockDim.y;
if(tx<N1 && ty<N2) {
Vm2[ty]=EL; Isyn_tot[ty]=0.0;
ref_time2[ty]=0.0;
Vm1[tx]=EL; ref_time1[tx]=0.0;
I_in[tx]=0.0;
del_w[tx][ty]=0.0;
Isyn[tx][ty]=0.0;
ci[tx]=0.0; cis[tx]=0.0; d_hat[tx]=0.0;
cil[ty]=0.0; cils[ty]=0.0;
Isyn_lat[ty]=0.0;
}
}
__global__ void LifKernel1(int i)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
double k1, k2;
if(j<N1) {
if(ref_time1[j]<i) ref_time1[j]=0;
k1=(-gL*(Vm1[j]-EL)+I_in[j])/C;
k2=(-gL*((Vm1[j]+dt*k1)-EL)+I_in[j])/C;
Vm1[j]+=(dt*(k1+k2)/2)*(ref_time1[j]==0);
if(Vm1[j]<EL) Vm1[j]=EL;
if(Vm1[j]>=VT) {
Vm1[j]=EL;
ref_time1[j]=i+round(Rp/dt);
in_spk[j]=1;
} else {
in_spk[j]=0;
}
__syncthreads();
}
}
__global__ void LifKernel2(int i)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
double k1, k2;
if(j<N2) {
if(ref_time2[j]<i) ref_time2[j]=0;
k1=(-gL*(Vm2[j]-EL)+Isyn_tot[j]+Isyn_lat[j])/C;
k2=(-gL*((Vm2[j]+dt*k1)-EL)+Isyn_tot[j]+Isyn_lat[j])/C;
Vm2[j]+=(dt*(k1+k2)/2)*(ref_time2[j]==0);
if(Vm2[j]<EL) Vm2[j]=EL;
if(Vm2[j]>=VT) {
Vm2[j]=EL;
ref_time2[j]=i+round(Rp/dt);
Y_op[j]=1;
} else {
Y_op[j]=0;
}
//compute the error:
err[j]=D_op[j][i]-Y_op[j];
__syncthreads();
}
}
//kernels for the total synapses in the network
__global__ void SynKernel(int i)
{
int ix=blockIdx.x*blockDim.x + threadIdx.x;
int iy=blockIdx.y*blockDim.y + threadIdx.y;
if(ix<N1 && iy<N2) {
if(iy==0) {
ci[ix]=ci[ix]*decay;
cis[ix]=cis[ix]*decays;
if(in_spk[ix]==1) {
ci[ix]+=1.0;
cis[ix]+=1.0;
}
d_hat[ix]=d_hat[ix]*decay1 + ((ci[ix]-cis[ix])*dt)/C;
d_hat_sq[ix]=d_hat[ix]*d_hat[ix];
}
__syncthreads();
Isyn[ix][iy]=(ci[ix]-cis[ix])*weight[ix][iy];
}
}
__global__ void Lat_curr(int i)
{
int ix=blockIdx.x*blockDim.x+threadIdx.x;
if(ix<N2) {
cil[ix]=cil[ix]*decay;
cils[ix]=cils[ix]*decays;
if(Y_op[ix]==1) {
cil[ix]+=1.0;
cils[ix]+=1.0;
}
I_lat[ix]=w_lat*(cil[ix]-cils[ix]);
Isyn_lat[ix]=0;
for(int k=0; k<N2; k++) {
if(k!=ix) {
Isyn_lat[ix]+=I_lat[k];
}
}
}
}
__device__ double total_curr[8][N2];
__device__ double total_dhatsq[8];
//optimized version
__global__ void IsynRedKernel(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int tid=threadIdx.x;
for(unsigned int s=blockDim.x/2;s>0;s>>=1) {
if(iy<N2 && tid<s && (ix+s)<N1) {
Isyn[ix][iy]+=Isyn[ix+s][iy];
}
if(iy==0 && tid<s && (ix+s)<N1) {
d_hat_sq[ix]+=d_hat_sq[ix+s];
}
__syncthreads();
}
if(tid==0 && iy<N2) {
total_curr[blockIdx.x][iy]=Isyn[ix][iy];
if(iy==0) {
total_dhatsq[blockIdx.x]=d_hat_sq[ix];
}
}
}
__global__ void reduce1(int i) {
int tx=threadIdx.x+blockIdx.x*blockDim.x;
if(tx<N2) {
double total1=0.0;
for(int k=0; k<8; k++) {
total1+=total_curr[k][tx];
}
Isyn_tot[tx]=total1;
if(tx==0) {
double total2=0.0;
for(int j=0; j<8; j++)
total2+=total_dhatsq[j];
norm_dh=sqrt(total2);
}
}
}
__global__ void CalcUpdate(int i, double l_rate)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
if(ix<N1 && iy<N2) {
if(norm_dh!=0 && err[iy]!=0) {
del_w[ix][iy]+=(err[iy]*l_rate*d_hat[ix]/norm_dh);
}
__syncthreads();
}
}
__global__ void WtUpdt()
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
if(ix<N1 && iy<N2) {
weight[ix][iy]+=del_w[ix][iy];
}
}
__global__ void cpyWts(double *wts)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
if(ix<N1 && iy<N2) {
wts[ix*N2+iy]=weight[ix][iy];
}
}
__device__ double w_conv1[Nw][Nw], w_conv2[Nw][Nw];
__device__ double w_conv3[Nw][Nw], w_conv4[Nw][Nw];
__device__ double w_conv5[Nw][Nw], w_conv6[Nw][Nw];
__device__ double w_conv7[Nw][Nw], w_conv8[Nw][Nw];
__device__ double w_conv9[Nw][Nw], w_conv10[Nw][Nw];
__device__ double w_conv11[Nw][Nw], w_conv12[Nw][Nw];
__global__ void initialize2D(double *d_wts, double *c_wts)
{
for(int i=0; i<N1; i++) {
for(int j=0; j<N2; j++) {
weight[i][j]=d_wts[i*N2+j];
}
}
for(int i=0; i<(12*Nw); i++) {
for(int j=0; j<Nw; j++) {
if(i<Nw) {
w_conv1[i][j]=c_wts[i*Nw+j];
} else if(i>=Nw && i<(2*Nw)) {
w_conv2[i-Nw][j]=c_wts[i*Nw+j];
} else if(i>=(2*Nw) && i<(3*Nw)) {
w_conv3[i-(2*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(3*Nw) && i<(4*Nw)){
w_conv4[i-(3*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(4*Nw) && i<(5*Nw)){
w_conv5[i-(4*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(5*Nw) && i<(6*Nw)){
w_conv6[i-(5*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(6*Nw) && i<(7*Nw)){
w_conv7[i-(6*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(7*Nw) && i<(8*Nw)){
w_conv8[i-(7*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(8*Nw) && i<(9*Nw)){
w_conv9[i-(8*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(9*Nw) && i<(10*Nw)){
w_conv10[i-(9*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(10*Nw) && i<(11*Nw)){
w_conv11[i-(10*Nw)][j]=c_wts[i*Nw+j];
} else if(i>=(11*Nw) && i<(12*Nw)){
w_conv12[i-(11*Nw)][j]=c_wts[i*Nw+j];
}
}
}
}
__global__ void convKernel1(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv1[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[ix*L+iy]=temp;
}
}
__global__ void convKernel2(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv2[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel3(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv3[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[2*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel4(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv4[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[3*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel5(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv5[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[4*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel6(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv6[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[5*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel7(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv7[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[6*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel8(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv8[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[7*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel9(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv9[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[8*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel10(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv10[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[9*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel11(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv11[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[10*L*L+ix*L+iy]=temp;
}
}
__global__ void convKernel12(int i)
{
int ix=threadIdx.x+blockIdx.x*blockDim.x;
int iy=threadIdx.y+blockIdx.y*blockDim.y;
int x, y;
if(ix<L && iy<L) {
double temp=0.0;
for(x=0; x<Nw; x++) {
for(y=0; y<Nw; y++) {
temp+=WT*w_conv12[x][y]*img_spks[(ix+x)*q+iy+y][i];
}
}
I_in[11*L*L+ix*L+iy]=temp;
}
}
long timediff(clock_t t1, clock_t t2) {
long elapsed;
elapsed = ((double)t2 - t1) / CLOCKS_PER_SEC * 1000;
return elapsed;
}
double h_wts[N1*N2], *d_wts;
double *dcwts;
double h_wts_saved[N1*N2], *d_wts_saved;
double c_wts[12*Nw*Nw];
int main(int argc, char *argv[])
{
int start=atoi(argv[1]);
FILE *FW, *FWI; //to load initial wts and store final wts
FILE *conv_wt;
clock_t t1, t2;
long elapsed=0;
FILE *F_train, *F_lif_spks;
//for concurrent execution of different kernels:
cudaStream_t stream2, stream3, stream4, stream5, stream6;
cudaStream_t stream7, stream8, stream9, stream10, stream11, stream12;
cudaStream_t stream13, stream14,stream15;
//set the gpu device:
cudaSetDevice(4);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
cudaStreamCreate(&stream5);
cudaStreamCreate(&stream6);
cudaStreamCreate(&stream7);
cudaStreamCreate(&stream8);
cudaStreamCreate(&stream9);
cudaStreamCreate(&stream10);
cudaStreamCreate(&stream11);
cudaStreamCreate(&stream12);
cudaStreamCreate(&stream13);
cudaStreamCreate(&stream14);
cudaStreamCreate(&stream15);
F_lif_spks = fopen("pixels_spks.csv","r");
if(F_lif_spks == NULL)
{
perror("Error while opening file pixels_spks.csv\n");
exit(EXIT_FAILURE);
}
F_train = fopen("mnist_train.csv","r");
//F_test = fopen("mnist_test.csv","r");
if(F_train == NULL)
{
perror("Error while opening file mnist_train.csv\n");
exit(EXIT_FAILURE);
}
FWI = fopen("wts_initial.csv","r");
if(FWI == NULL)
{
perror("Error while opening file wts_trained.csv\n");
exit(EXIT_FAILURE);
}
conv_wt=fopen("kernels_3x3.csv","r");
if(conv_wt==NULL) {
perror("Error while opening file kernel.csv\n");
exit(EXIT_FAILURE);
}
printf("Total no. of neurons=%d, no. of synapses to be trained=%d\n",(N1+N2), N1*N2);
cudaMalloc((void**)&d_imgs_lin,sizeof(int)*(N_imgs*(p*q+1)));
cudaMalloc((void**)&d_wts,N1*N2*sizeof(double));
cudaMalloc((void**)&dcwts,12*Nw*Nw*sizeof(double));
cudaMalloc((void**)&d_wts_saved,N1*N2*sizeof(double));
cudaMalloc((void**)&pix_spks_d,256*M*sizeof(unsigned char));
//Read the initial weights:
//printf("Reading final trained weights from file\n");
for(int i=0; i<N1; i++) {
for(int j=0; j<N2; j++) {
fscanf(FWI,"%lf,",&h_wts[i*N2+j]);
}
}
fclose(FWI);
for(int i=0; i<(12*Nw); i++) {
for(int j=0; j<Nw; j++) {
fscanf(conv_wt,"%lf,",&c_wts[i*Nw+j]);
}
}
fclose(conv_wt);
for(int i=0; i<(256); i++) {
for(int j=0; j<M; j++) {
fscanf(F_lif_spks,"%d,",&pix_spks_h[i*M+j]);
}
}
fclose(F_lif_spks);
cudaMemcpy(pix_spks_d,pix_spks_h,256*M*sizeof(unsigned char),cudaMemcpyHostToDevice);
cudaMemcpy(d_wts,h_wts,N1*N2*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dcwts,c_wts,12*Nw*Nw*sizeof(double),cudaMemcpyHostToDevice);
initialize2D<<<1,1>>>(d_wts,dcwts);
cudaDeviceSynchronize();
cudaFree(d_wts);
cudaFree(dcwts);
//Read the images from file:
for(int n=0;n<N_imgs;n++) {
for(int j=0;j<(p*q+1);j++) {
fscanf(F_train,"%d,",&test_set[n][j]);
}
}
fclose(F_train);
//convert 2D matrix to 1D for transfer to device:
for(int n=0; n<(N_imgs);n++) {
for(int j=0;j<(p*q+1);j++) {
img_lin[n*(p*q+1)+j]=test_set[n][j];
}
}
cudaMemcpy(d_imgs_lin,img_lin,sizeof(int)*(N_imgs*(p*q+1)),cudaMemcpyHostToDevice);
//call cuda kernel to read in the images:
img_readKernel<<<1,1>>>(d_imgs_lin);
cudaThreadSynchronize();
cudaFree(d_imgs_lin);
int NBlks=(N1/Nthrds)+1;
//printf("blocks=%d, threads=%d\n",NBlks,Nthrds);
dim3 dimGrid(82,1,1);
dim3 dimBlock(100,10,1);
dim3 grid_syn(NBlks,10,1);
dim3 block_syn(Nthrds,1,1);
//convert pixel values 0 to 255 into spike trains
convert2Spks<<<1,1>>>(pix_spks_d);
cudaDeviceSynchronize();
cudaFree(pix_spks_d);
dim3 convGrid(1,1,1);
dim3 convBlks(26,26,1);
double learn=r;
//CPU time required for computation
t1 = clock();
int l=0; //image index
for(int n=0; n<max_epochs; n++) {
printf("Epoch=%d\n",n);
if(n<3) learn=r;
else if(n>=3 && n<6) learn=r/2;
else if(n>=6 && n<9) learn=r/4;
else if(n>=9 && n<12) learn=r/8;
else if(n>=12 && n<15) learn=r/16;
else if(n>=15 && n<18) learn=r/32;
else learn=r/64;
for(l=0; l<N_imgs; l++) {
printf("l=%d image %d\n",l,test_set[l][0]);
createDes<<<1,10>>>(l);
clear_vars<<<dimGrid,dimBlock>>>();
cudaDeviceSynchronize();
//simulate for all time steps
for(int i=0; i<M; i++) {
pix2spks<<<1,784>>>(l,i);
///////////////////////////////////////////////////
cudaDeviceSynchronize();
convKernel1<<<convGrid,convBlks,0,stream2>>>(i);
convKernel2<<<convGrid,convBlks,0,stream3>>>(i);
convKernel3<<<convGrid,convBlks,0,stream4>>>(i);
convKernel4<<<convGrid,convBlks,0,stream5>>>(i);
convKernel5<<<convGrid,convBlks,0,stream6>>>(i);
convKernel6<<<convGrid,convBlks,0,stream7>>>(i);
convKernel7<<<convGrid,convBlks,0,stream8>>>(i);
convKernel8<<<convGrid,convBlks,0,stream9>>>(i);
convKernel9<<<convGrid,convBlks,0,stream10>>>(i);
convKernel10<<<convGrid,convBlks,0,stream11>>>(i);
convKernel11<<<convGrid,convBlks,0,stream12>>>(i);
convKernel12<<<convGrid,convBlks,0,stream13>>>(i);
/////////////////////////////////////////////////////
cudaDeviceSynchronize();
LifKernel1<<<NBlks,Nthrds>>>(i);
////////////////////////////////////////////////
cudaDeviceSynchronize();
SynKernel<<<dimGrid,dimBlock,0,stream14>>>(i);
Lat_curr<<<1,10,0,stream15>>>(i);
////////////////////////////////////////////////
cudaDeviceSynchronize();
IsynRedKernel<<<grid_syn,block_syn>>>(i);
reduce1<<<10,1>>>(i);
///////////////////////////////////////////////
cudaDeviceSynchronize();
LifKernel2<<<1,10>>>(i);
////////////////////////////////////////////////////
cudaDeviceSynchronize();
CalcUpdate<<<dimGrid,dimBlock>>>(i,learn);
}
//////////////////////////////////////////////////////////////////////////////
WtUpdt<<<dimGrid,dimBlock>>>();
//cudaDeviceSynchronize();
} //end of loops over N_imgs
t2 = clock();
elapsed += timediff(t1,t2);
printf("Elapsed time: %ld ms\n", elapsed);
char wts_file[25];
sprintf(wts_file,"wts_gpu%d.txt",n);
if((FW=fopen(wts_file,"w"))==NULL) {
printf("Failed to open file wts_gpu_sample%d.txt\n",n);
}
printf("Copying the trained weights to the host for analysis\n");
cpyWts<<<dimGrid,dimBlock>>>(d_wts_saved);
cudaMemcpy(h_wts_saved,d_wts_saved,N1*N2*sizeof(double), cudaMemcpyDeviceToHost);
//save the weights in a file:
for(int i=0; i<N1; i++) {
for(int j=0; j<N2; j++) {
fprintf(FW,"%0.14f,",h_wts_saved[i*N2+j]);
}
fprintf(FW,"\n");
}
fclose(FW);
} //end of n epochs loops
cudaFree(d_wts_saved);
cudaDeviceReset();
return(0);
}
|
1,266
|
#include <string.h>
#include <stdio.h>
#include <iostream>
struct DataElement
{
char *name;
int value;
};
__global__
void Kernel(DataElement *elem) {
printf("On device: name=%s, value=%d\n", elem->name, elem->value);
elem->name[0] = 'd';
elem->value++;
}
void launch(DataElement *elem, cudaStream_t &stream) {
Kernel<<< 1, 1, 0, stream >>>(elem);
//cudaDeviceSynchronize();
}
void iteration(cudaStream_t &stream)
{
DataElement *e;
cudaMallocManaged((void**)&e, sizeof(DataElement));
e->value = 10;
cudaMallocManaged((void**)&(e->name), sizeof(char) * (strlen("hello") + 1) );
strcpy(e->name, "hello");
launch(e, stream);
printf("On host: name=%s, value=%d\n", e->name, e->value);
cudaFree(e->name);
cudaFree(e);
}
int main(void)
{
cudaError_t err;
int count = 0;
err = cudaGetDeviceCount(&count);
std::cout << count << " devices found." << std::endl;
for (int d=0;d<count;d++) {
err = cudaSetDevice(d);
if (err != cudaSuccess) {
std::cout << "error setting device, #=" << cudaGetErrorString(err) << std::endl;
}
cudaDeviceProp deviceProp;
err = cudaGetDeviceProperties(&deviceProp, d);
if (err != cudaSuccess) {
std::cout << "error getting device properties, #=" << cudaGetErrorString(err) << std::endl;
}
std::cout << "Using device " << d << ", name: " << deviceProp.name << std::endl;
for (int s = 0 ; s < 10 ; s++) {
cudaStream_t stream;
err = cudaStreamCreate(&stream);
if (err != cudaSuccess) {
std::cout << "error in stream creation, #=" << cudaGetErrorString(err) << std::endl;
}
iteration(stream);
cudaStreamDestroy(stream);
}
}
}
|
1,267
|
#include "includes.h"
__global__ void ElementwiseNorm(float * A, float *B, int size) {
int id = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
if (id < size)
A[id] /= B[id];
}
|
1,268
|
#include<stdio.h>
#include<stdlib.h>
#define TPB 8
#define W 4
#define H 4
#define TX 1
#define TY 1
int N=H*W;
__device__ float distance(float x1, float x2){
return sqrt ((x2-x1)*(x2-x1));
}
__global__ void distanceKernel(float *d_out, float *d_in, float ref, int w){
const int c=blockIdx.x*blockDim.x+threadIdx.x;
const int r=blockIdx.y*blockDim.y+threadIdx.y;
const int i=r*w+c;
const float x=d_in[i];
d_out[i]=distance(x, ref);
printf("c=%d, r=%d, i=%d: the distance between %f to %f is %f. \n", c, r, i, ref, x, d_out[i]); ////
}
void distanceArray(float *out, float *in, float ref, int len){
float *d_in=0;
float *d_out=0; ////
cudaMalloc(&d_in, len*sizeof(float));
cudaMalloc(&d_out, len*sizeof(float));
cudaMemcpy(d_in, in, len*sizeof(float), cudaMemcpyHostToDevice);
const dim3 blockSize(TX, TY);
const int bx=(W+TX-1)/TX;
const int by=(W+TY-1)/TY;
const dim3 gridSize=dim3(bx, by);
distanceKernel<<<gridSize, blockSize>>>(d_out, d_in, ref, W);
cudaMemcpy(out, d_out, len*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
float scale(int i, int n){
return ((float) i)/(n-1);
}
int main(){
const float ref=0.5f;
float *in=(float*) calloc(N,sizeof(float));
float *out=(float*) calloc(N, sizeof(float));
for(int i=0; i<N; ++i){
in[i]=scale(i,N); //
}
distanceArray(out, in, ref, N);
printf("______________________________ \n");
for(int j=0; j<N; ++j){
printf("The distance, printed from the host, between %f to %f is %f. \n", ref, in[j], out[j]);
}
free(in);
free(out);
return 0;
}
|
1,269
|
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
float* readData(char* filename)
{
FILE* handle = fopen(filename, "r");
if(handle == NULL)
{
printf("Error opening file: %s\n", filename);
exit(0);
}
int num, i;
fscanf(handle, "%d", &num);
float data[num];
for(i=0; i<num; i++)
fscanf(handle, "%f", &data[i]);
//printf("%f %f %f\n", data[0], data[1], data[2]);
return data;
}
int main(int argc, char *argv[]) {
float *hostInput1 = NULL;
float *hostInput2 = NULL;
int i;
/* parse the input arguments */
//@@ Insert code here
if(argc != 11)
{
printf("\nUsage: ./ThrustVectorAdd_Template -e <expected.raw> -i <input0.raw> , <input1.raw> -o <output.raw> -t vector\n\n");
return 0;
}
char* input0_filename = argv[4];
char* input1_filename = argv[6];
char* output_filename = argv[8];
// Import host input data
//@@ Read data from the raw files here
//@@ Insert code here
hostInput1 = readData(input0_filename);
hostInput2 = readData(input1_filename);
// Declare and allocate host output
//@@ Insert code here
int num = sizeof(hostInput1)/sizeof(float);
thrust::host_vector<float> hostOutput(num);
// Declare and allocate thrust device input and output vectors
//@@ Insert code here
thrust::device_vector<float> devInput1(num);
thrust::device_vector<float> devInput2(num);
thrust::device_vector<float> devOutput(num);
// Copy to device
//@@ Insert code here
thrust::copy(hostInput1, hostInput1 + num, devInput1.begin());
thrust::copy(hostInput2, hostInput2 + num, devInput2.begin());
// Execute vector addition
//@@ Insert Code here
//printf("dev: %f %f\n", devInput1[1], devInput2[1]);
thrust::transform(devInput1.begin(), devInput1.end(), devInput2.begin(), devOutput.begin(), thrust::plus<float>());
/////////////////////////////////////////////////////////
// Copy data back to host
//@@ Insert code here
thrust::copy(devOutput.begin(), devOutput.end(), hostOutput.begin());
//printf("%d %d %d\n", hostOutput[1], hostOutput[2], hostOutput[0]);
//Cross-verification
float* verifyData = readData(output_filename);
if(num != sizeof(verifyData)/sizeof(float))
printf("Size not matching: Output size: %d\tExpected size: %d\n", num, sizeof(verifyData)/sizeof(float));
else
for(i=0; i<num; i++)
{
if((float)verifyData[i] != (float)hostOutput[i])
printf("Data not matching: Location: %d\tOutput: %f\tExpected: %f\n", i+1, hostOutput[i], verifyData[i]);
}
return 0;
}
|
1,270
|
// This example demonstrate how use the printf() function inside a kernel.
// In order to do that the code must be generate to architetures with compute capability greater than 2.0
// Compile:
// nvcc -gencode=arch=compute_30,code=sm_30 -g -o helloGPU helloGPU.cu
#include <stdio.h>
__global__ void helloCUDA(float f)
{
printf("Hello world from thread %d -- My value of f = %f\n", threadIdx.x, f);
}
int main()
{
helloCUDA<<<1, 5>>>(1.2345f);
cudaDeviceReset();
return 0;
}
|
1,271
|
/* ==================================================================
Programmers:
Kevin Wagner
Elijah Malaby
John Casey
Omptimizing SDH histograms for input larger then global memory
==================================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <ctype.h>
#include <math.h>
#include <sys/time.h>
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
float x_pos;
float y_pos;
float z_pos;
} atom;
unsigned long long * histogram; /* list of all buckets in the histogram */
unsigned long long PDH_acnt; /* total number of data points */
int block_size; /* Number of threads per block */
int num_buckets; /* total number of buckets in the histogram */
float PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
unsigned long long * histogram_GPU;
unsigned long long * temp_histogram_GPU;
atom * atom_list_GPU;
__global__ void kernelSumHistogram( unsigned long long int *InputHists, unsigned long long int *hist, int num_atoms, int num_buckets, int block_size) {
unsigned long long int tid = threadIdx.x + blockIdx.x * blockDim.x;
int h_pos = tid;
unsigned long long int NumberOfSumLoop = 0;
NumberOfSumLoop = (num_atoms)/block_size + ((num_atoms%block_size) ? 1:0);
while(h_pos < num_buckets) {
unsigned long long int tmpAns = 0;
for(int i=0;i<NumberOfSumLoop;i++){
tmpAns = tmpAns + *(InputHists+(i*num_buckets)+h_pos);
}
hist[h_pos] = tmpAns;
h_pos += blockDim.x * gridDim.x;
}
__syncthreads();
}
__device__ void block_to_block (atom * block_a, atom * block_b, int b_length, unsigned long long * histogram, float resolution) {
atom me = block_a[threadIdx.x];
for(int i = 0; i < b_length; i++)
atomicAdd(&(histogram[(int)(sqrt((me.x_pos - block_b[i].x_pos) * (me.x_pos - block_b[i].x_pos) +
(me.y_pos - block_b[i].y_pos) * (me.y_pos - block_b[i].y_pos) +
(me.z_pos - block_b[i].z_pos) * (me.z_pos - block_b[i].z_pos)) / resolution)]),
1);
}
__global__ void GPUKernelFunction (unsigned long long PDH_acnt, float PDH_res, atom * atom_list_GPU, unsigned long long * histogram_GPU, int num_buckets) {
extern __shared__ unsigned long long SHist[];
/* assign register values */
int i, h_pos;
float dist;
atom * my_block = &atom_list_GPU[blockIdx.x * blockDim.x];
atom temp_atom_1 = my_block[threadIdx.x];
for(h_pos=threadIdx.x; h_pos < num_buckets; h_pos+=blockDim.x)
SHist[h_pos] = 0;
__syncthreads();
/* loop through all points in atom list calculating distance from current point to all further points */
for (i = threadIdx.x + 1; i < blockDim.x && i+blockIdx.x*blockDim.x < PDH_acnt; i++)
{
atom temp_atom_2 = my_block[i];
dist = sqrt((temp_atom_1.x_pos - temp_atom_2.x_pos) * (temp_atom_1.x_pos - temp_atom_2.x_pos) +
(temp_atom_1.y_pos - temp_atom_2.y_pos) * (temp_atom_1.y_pos - temp_atom_2.y_pos) +
(temp_atom_1.z_pos - temp_atom_2.z_pos) * (temp_atom_1.z_pos - temp_atom_2.z_pos));
h_pos = (int)(dist / PDH_res);
atomicAdd(&(SHist[h_pos]), 1);
}
__syncthreads();
for(i=blockIdx.x+1; i < gridDim.x-1; i++)
block_to_block(my_block,
&atom_list_GPU[i*blockDim.x],
blockDim.x,
SHist,
PDH_res);
block_to_block(my_block,
&atom_list_GPU[i*blockDim.x],
PDH_acnt-i*blockDim.x, // Last block may be small
SHist,
PDH_res);
__syncthreads();
for(h_pos = threadIdx.x; h_pos < num_buckets; h_pos += blockDim.x)
*(histogram_GPU+(num_buckets*blockIdx.x)+h_pos) += SHist[h_pos];
}
/* print the counts in all buckets of the histogram */
void output_histogram_GPU(){
int i;
unsigned long long total_cnt = 0;
for(i=0; i< num_buckets; i++) {
if(i%5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", histogram[i]);
total_cnt += histogram[i];
/* we also want to make sure the total distance count is correct */
if(i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
void GPU_baseline() {
int num_blocks = ((PDH_acnt + block_size)/block_size);
/* copy histogram to device memory */
cudaMalloc((void**) &histogram_GPU, sizeof(unsigned long long)*num_buckets);
cudaMemset(histogram_GPU, 0, sizeof(unsigned long long)*num_buckets);
cudaMalloc((void**) &temp_histogram_GPU, sizeof(unsigned long long)*num_buckets*num_blocks);
cudaMemset(temp_histogram_GPU, 0, sizeof(unsigned long long)*num_buckets*num_blocks);
/* copy atom list to device memory */
cudaMalloc((void**) &atom_list_GPU, sizeof(atom) * PDH_acnt);
cudaMemcpy(atom_list_GPU, atom_list, sizeof(atom) * PDH_acnt, cudaMemcpyHostToDevice);
/* start time keeping */
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
/* Run Kernel */
GPUKernelFunction <<<num_blocks, block_size, sizeof(unsigned long long)*num_buckets>>> (PDH_acnt, PDH_res, atom_list_GPU, temp_histogram_GPU, num_buckets);
cudaDeviceSynchronize();
kernelSumHistogram<<<3, 512>>>(temp_histogram_GPU, histogram_GPU, PDH_acnt, num_buckets, block_size);
/* stop time keeping */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
float elapsedTime;
cudaEventElapsedTime( &elapsedTime, start, stop );
/* transfer histogram to host memory */
cudaMemcpy(histogram, histogram_GPU, sizeof(unsigned long long)*num_buckets, cudaMemcpyDeviceToHost);
/* print out the histogram */
output_histogram_GPU();
elapsedTime = elapsedTime/1000;
printf( "******** Total Running Time of Kernel = %0.5f sec *******\n", elapsedTime );
/* free cuda timekeeping */
cudaEventDestroy( start );
cudaEventDestroy( stop );
cudaFree(temp_histogram_GPU);
}
/* Input Validation Function */
bool isNumber(char number[], bool floatingpoint)
{
for (int i = 0; number[i] != 0; i++)
{
//if (number[i] > '9' || number[i] < '0')
if (!isdigit(number[i]))
{
if((number[i] == '.' && floatingpoint))
{
floatingpoint = false;
}
else
{
return false;
}
}
}
return true;
}
/* Most of this input validation can probably be pulled whenever we hardcode our block size and if we hardcode our bucket width */
int main(int argc, char **argv)
{
/* input validation */
if((argc > 3))
{
if(((isNumber(argv[1], false) && isNumber(argv[2], true)) && isNumber(argv[3], false)))
{
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
block_size = atoi(argv[3]);
}
else
{
printf( "Invalid Input Error Invalid Arguments\n Valid input is ./program_name {#of_samples} {bucket_width} {block_size}\n");
return 0;
}
}
else
{
printf( "Invalid Input Error Insufficient Arguments\n Valid input is ./program_name {#of_samples} {bucket_width} {block_size}\n");
return 0;
}
/* allocate memory */
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
histogram = (unsigned long long *)malloc(sizeof(unsigned long long)*num_buckets);
atom_list = (atom *)malloc(sizeof(atom)*PDH_acnt);
srand(1);
/* generate data following a uniform distribution */
for(int i = 0; i < PDH_acnt; i++) {
atom_list[i].x_pos = ((float)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].y_pos = ((float)(rand()) / RAND_MAX) * BOX_SIZE;
atom_list[i].z_pos = ((float)(rand()) / RAND_MAX) * BOX_SIZE;
}
/* call GPU histrogram compute */
GPU_baseline();
/* free memory */
free(histogram);
free(atom_list);
return 0;
}
|
1,272
|
#include "friction_update.cuh"
__global__ void friction_update
(
SimulationParameters sim_params,
SolverParameters solver_params,
real dt,
AssembledSolution d_assem_sol
)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
if (x < sim_params.cells + 2)
{
if (d_assem_sol.h_BC[x] > solver_params.tol_dry && abs(d_assem_sol.q_BC[x]) > solver_params.tol_dry)
{
real u = d_assem_sol.q_BC[x] / d_assem_sol.h_BC[x];
real Cf = solver_params.g * pow(sim_params.manning, C(2.0)) / pow(d_assem_sol.h_BC[x], C(1.0) / C(3.0));
real Sf = -Cf * abs(u) * u;
real D = 1 + 2 * dt * Cf * abs(u) / d_assem_sol.h_BC[x];
// Update
d_assem_sol.q_BC[x] += dt * Sf / D;
}
}
}
|
1,273
|
#include "includes.h"
__global__ void CalcAngMom(double *AngMomx_d, double *AngMomy_d, double *AngMomz_d, double *GlobalAMx_d, double *GlobalAMy_d, double *GlobalAMz_d, double *Mh_d, double *Rho_d, double A, double Omega, double *Altitude_d, double *Altitudeh_d, double *lonlat_d, double *areasT, double *func_r_d, int num, bool DeepModel) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int nv = gridDim.y;
int lev = blockIdx.y;
if (id < num) {
double AMx, AMy, AMz;
double rx, ry, rz, r;
//calculate control volume
double zup, zlow, Vol;
zup = Altitudeh_d[lev + 1] + A;
zlow = Altitudeh_d[lev] + A;
if (DeepModel) {
Vol = areasT[id] / pow(A, 2) * (pow(zup, 3) - pow(zlow, 3)) / 3;
}
else {
Vol = areasT[id] * (zup - zlow);
}
//radius vector
r = (A + Altitude_d[lev]);
rx = r * func_r_d[id * 3 + 0];
ry = r * func_r_d[id * 3 + 1];
rz = r * func_r_d[id * 3 + 2];
//angular momentum r x p (total x and y over globe should ~ 0, z ~ const)
AMx = ry * Mh_d[id * 3 * nv + lev * 3 + 2] - rz * Mh_d[id * 3 * nv + lev * 3 + 1]
- Rho_d[id * nv + lev] * Omega * r * rz * cos(lonlat_d[id * 2 + 1])
* cos(lonlat_d[id * 2]);
AMy = -rx * Mh_d[id * 3 * nv + lev * 3 + 2] + rz * Mh_d[id * 3 * nv + lev * 3 + 0]
- Rho_d[id * nv + lev] * Omega * r * rz * cos(lonlat_d[id * 2 + 1])
* sin(lonlat_d[id * 2]);
AMz = rx * Mh_d[id * 3 * nv + lev * 3 + 1] - ry * Mh_d[id * 3 * nv + lev * 3 + 0]
+ Rho_d[id * nv + lev] * Omega * r * r * cos(lonlat_d[id * 2 + 1])
* cos(lonlat_d[id * 2 + 1]);
//AMx, AMy should go to zero when integrated over globe
// (but in practice, are just much smaller than AMz)
//total in control volume
AngMomx_d[id * nv + lev] = AMx * Vol;
AngMomy_d[id * nv + lev] = AMy * Vol;
AngMomz_d[id * nv + lev] = AMz * Vol;
}
}
|
1,274
|
#include "includes.h"
__global__ void box_encode_kernel(float * targets_dx, float * targets_dy, float * targets_dw, float * targets_dh, float4 * boxes, float4 * anchors, float wx, float wy, float ww, float wh, size_t gt, size_t idxJump) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
size_t row_offset;
float anchors_x1,anchors_x2, anchors_y1, anchors_y2,
boxes_x1, boxes_x2, boxes_y1, boxes_y2, ex_w, ex_h,
ex_ctr_x, ex_ctr_y, gt_w, gt_h, gt_ctr_x, gt_ctr_y;
for (int i=idx; i<gt; i+=idxJump){
row_offset = i;
anchors_x1 = anchors[row_offset].x;
anchors_y1 = anchors[row_offset].y;
anchors_x2 = anchors[row_offset].z;
anchors_y2 = anchors[row_offset].w;
boxes_x1 = boxes[row_offset].x;
boxes_y1 = boxes[row_offset].y;
boxes_x2 = boxes[row_offset].z;
boxes_y2 = boxes[row_offset].w;
ex_w = anchors_x2 - anchors_x1 + 1;
ex_h = anchors_y2 - anchors_y1 + 1;
ex_ctr_x = anchors_x1 + 0.5 * ex_w;
ex_ctr_y = anchors_y1 + 0.5 * ex_h;
gt_w = boxes_x2 - boxes_x1 + 1;
gt_h = boxes_y2 - boxes_y1 + 1;
gt_ctr_x = boxes_x1 + 0.5 * gt_w;
gt_ctr_y = boxes_y1 + 0.5 * gt_h;
targets_dx[i] = wx * (gt_ctr_x-ex_ctr_x)/ex_w;
targets_dy[i] = wy * (gt_ctr_y-ex_ctr_y)/ex_h;
targets_dw[i] = ww * log(gt_w/ex_w);
targets_dh[i] = wh * log(gt_h/ex_h);
}
}
|
1,275
|
#include <iostream>
#include <math.h>
#include <cuda.h>
#include <stdio.h>
// function to add the elements of two arrays
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
__global__ void add(int n, float *x, float *y)
{
}
int main(void)
{
int N = 1<<20; // 1M elements
float *x = new float[N];
float *y = new float[N];
cudaMallocManaged(&x,N*sizeof(float));
cudaMallocManaged(&y,N*sizeof(float));
// Run kernel on 1M elements on the CPU
const dim3 threadsPerBlock(16, 16);
const dim3 numBlocks(1024/16, 768/16);
add<<<numBlocks,threadsPerBlock>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
// Free memory
cudaFree(x);
cudaFree(y);
// Free memory
return 0;
}
|
1,276
|
#include "cuda.h"
#define N 1000
__device__ float A[N][N];
__device__ float B[N][N];
__device__ float C[N][N];
__global__ void vectorAdd(float A[N][N], float B[N][N], float C[N][N])
{
int i = threadIdx.x;
int j = threadIdx.y;
C[i][j] = A[i][j] + B[i][j];
}
int main()
{
int bpg = 1;
dim3 tpb(N, N);
vectorAdd<<<bpg, tpb>>>(A, B, C);
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
1,277
|
#include <stdio.h>
#include <stdlib.h>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR: file:%s line:%d message:%s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while (0)
__global__ void kernel(double *devVec, double *revVec, int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int offset = blockDim.x + gridDim.x;
while (idx < n) {
revVec[idx] = devVec[n - 1 - idx];
idx += offset;
}
}
void printVector(double *vec, int size) {
for (int i = 0; i < size; ++i) {
printf("%f ", vec[i]);
}
printf("\n");
}
int main() {
int n; // Vector size
double *vec;
int blocks = 256, threads = 256;
// Set up initial data
scanf("%d", &n);
vec = (double *)malloc(sizeof(double) * n);
for (int i = 0; i < n; ++i) {
scanf("%lf", &vec[i]);
}
// Create CUDA vectors and copy data to first
double *devVec, *revVec;
CSC(cudaMalloc(&devVec, sizeof(double) * n));
CSC(cudaMalloc(&revVec, sizeof(double) * n));
CSC(cudaMemcpy(devVec, vec, sizeof(double) * n, cudaMemcpyHostToDevice));
// Call kernel
kernel<<<blocks, threads>>>(devVec, revVec, n);
CSC(cudaGetLastError());
// Copy results from device to host memory
CSC(cudaMemcpy(vec, revVec, sizeof(double) * n, cudaMemcpyDeviceToHost));
CSC(cudaFree(devVec));
CSC(cudaFree(revVec));
// Print results
for (int i = 0; i < n; ++i) {
printf("%.10e ", vec[i]);
}
free(vec);
return 0;
}
|
1,278
|
#include "includes.h"
__global__ void packcoo_kernel(int num_entries, int* row_indices, int* column_indices, int* aggridx, int* partidx, int* partlabel)
{
int entryidx = blockIdx.x * blockDim.x + threadIdx.x;
if(entryidx < num_entries)
{
int row = row_indices[entryidx];
int col = column_indices[entryidx];
int l = partlabel[row];
int partstart = aggridx[partidx[l]];
unsigned int newindex = row - partstart;
newindex <<= 16;
newindex += col - partstart;
row_indices[entryidx] = newindex;
}
}
|
1,279
|
#include "includes.h"
__global__ void glcm_calculation_nol(int *A,int *glcm, const int nx, const int ny,int maxx)
{
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
unsigned int idx = iy * nx + ix;
//unsigned int idr = iy * (maxx+1) + ix;
int k,l;
int p;
//Calculate GLCM
if(idx < nx*ny ){
for(k=0;k<=maxx;k++){
for(l=0;l<=maxx;l++){
if((A[idx]==k) && (A[idx+1]==l)){
p=((maxx+1)*k) +l;
atomicAdd(&glcm[p],1);
}
}
}
}
}
|
1,280
|
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include "pinnedmem.cuh"
cudaError
mallocHost(void** h_mem ,uint memSize, memoryMode memMode, bool wc)
{
if( PINNED == memMode ) {
#if CUDART_VERSION >= 2020
return cudaHostAlloc( h_mem, memSize, (wc) ? cudaHostAllocWriteCombined : 0 );
#else
if (wc) {printf("Write-Combined unavailable on CUDART_VERSION less than 2020, running is: %d", CUDART_VERSION);}
return cudaMallocHost( h_mem, memSize );
#endif
}
else { // PAGEABLE memory mode
*h_mem = malloc( memSize );
}
return cudaSuccess;
}
cudaError
freeHost(void* h_mem, memoryMode memMode)
{
if( PINNED == memMode ) {
return cudaFreeHost(h_mem);
}
else {
free(h_mem);
}
return cudaSuccess;
}
/*
* exitOnError: Show the error message and terminate the application.
*/
void exitOnError(const char *whereAt) {
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error at %s: %s\n", whereAt, cudaGetErrorString(error));
exit(-1);
}
}
|
1,281
|
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <time.h>
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
#include <math.h>
void API_add_v2(float *a, float *b, float *c, int data_num)
{
printf("In function: data nums = %d\n",data_num);
// Number of threads in each thread block
int blockSize = 1024;
// Number of thread blocks in grid
int gridSize = (int)ceil((float)data_num/blockSize);
vecAdd<<<gridSize, blockSize>>>(a, b, c, data_num);
}
|
1,282
|
// headers
#include <stdio.h>
#include <cuda.h> // for CUDA
// global variables
int inputLength=5;
float *hostInput1=NULL;
float *hostInput2=NULL;
float *hostOutput=NULL;
float *deviceInput1=NULL;
float *deviceInput2=NULL;
float *deviceOutput=NULL;
// global kernel function definition
__global__ void vecAdd(float *in1,float *in2,float *out,int len)
{
// variable declarations
int i=blockIdx.x * blockDim.x + threadIdx.x;
// code
if(i < len)
{
out[i]=in1[i]+in2[i];
}
}
int main(int argc,char *argv[])
{
// function declarations
void cleanup(void);
// code
// allocate host-memory
hostInput1=(float *)malloc(inputLength * sizeof(float));
if(hostInput1== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 1.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostInput2=(float *)malloc(inputLength * sizeof(float));
if(hostInput2== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Input Array 2.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
hostOutput=(float *)malloc(inputLength * sizeof(float));
if(hostOutput== NULL)
{
printf("CPU Memory Fatal Error = Can Not Allocate Enough Memory For Host Output Array.\nExitting ...\n");
cleanup();
exit(EXIT_FAILURE);
}
// fill above input host vectors with arbitary but hard-coded data
hostInput1[0]=101.0;
hostInput1[1]=102.0;
hostInput1[2]=103.0;
hostInput1[3]=104.0;
hostInput1[4]=105.0;
hostInput2[0]=201.0;
hostInput2[1]=202.0;
hostInput2[2]=203.0;
hostInput2[3]=204.0;
hostInput2[4]=205.0;
// allocate device-memory
int size=inputLength * sizeof(float);
cudaError_t err=cudaSuccess;
err=cudaMalloc((void **)&deviceInput1,size);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err=cudaMalloc((void **)&deviceInput2,size);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cudaFree(deviceInput1);
cleanup();
exit(EXIT_FAILURE);
}
err=cudaMalloc((void **)&deviceOutput,size);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// copy host memory contents to device memory
err=cudaMemcpy(deviceInput1,hostInput1,size,cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
err=cudaMemcpy(deviceInput2,hostInput2,size,cudaMemcpyHostToDevice);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// cuda kernel configuration
dim3 DimGrid=dim3(ceil(inputLength/256.0),1,1);
dim3 DimBlock=dim3(256,1,1);
vecAdd<<<DimGrid,DimBlock>>>(deviceInput1,deviceInput2,deviceOutput,inputLength);
// copy device memory to host memory
err=cudaMemcpy(hostOutput,deviceOutput,size,cudaMemcpyDeviceToHost);
if(err!=cudaSuccess)
{
printf("GPU Memory Fatal Error = %s In File Name %s At Line No. %d.\nExitting ...\n",cudaGetErrorString(err),__FILE__,__LINE__);
cleanup();
exit(EXIT_FAILURE);
}
// results
int i;
for(i=0;i<inputLength;i++)
{
printf("%f + %f = %f\n",hostInput1[i],hostInput2[i],hostOutput[i]);
}
// total cleanup
cleanup();
return(0);
}
void cleanup(void)
{
// code
// free allocated device-memory
if(deviceInput1)
{
cudaFree(deviceInput1);
deviceInput1=NULL;
}
if(deviceInput2)
{
cudaFree(deviceInput2);
deviceInput2=NULL;
}
if(deviceOutput)
{
cudaFree(deviceOutput);
deviceOutput=NULL;
}
// free allocated host-memory
if(hostInput1)
{
free(hostInput1);
hostInput1=NULL;
}
if(hostInput2)
{
free(hostInput2);
hostInput2=NULL;
}
if(hostOutput)
{
free(hostOutput);
hostOutput=NULL;
}
}
|
1,283
|
#include <stdio.h>
#include <cuda.h>
__global__ void alloutputs(int *counter) {
int oldc = atomicAdd(counter, 1);
if (*counter == 34) printf("%d\n", oldc);
}
int main() {
int *counter, hcounter = 0;
cudaMalloc(&counter, sizeof(int));
cudaMemcpy(counter, &hcounter, sizeof(int), cudaMemcpyHostToDevice);
alloutputs<<<1, 34>>>(counter);
cudaDeviceSynchronize();
return 0;
}
|
1,284
|
#include <stdio.h>
__global__ void kicache_test4_2 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_4 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_6 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_8 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_10 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_12 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_14 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_16 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_18 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_20 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_22 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_24 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_26 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_28 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_30 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_32 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_34 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_36 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_38 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_40 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_42 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_44 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_46 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_48 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_50 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_52 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_54 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_56 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_58 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_60 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_62 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_64 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_72 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_80 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_88 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_96 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_104 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_112 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_120 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_128 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_136 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_144 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_152 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_160 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_168 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_176 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_184 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_192 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_200 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_208 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_216 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_224 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
__global__ void kicache_test4_232 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
//__global__ void kicache_test4_240 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
//__global__ void kicache_test4_248 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
//__global__ void kicache_test4_256 (unsigned int *ts, unsigned int* out, int p1, int p2, int its);
void measure_icache()
{
unsigned int ts[1024]; // ts, output from kernel. Two elements used per thread.
unsigned int *d_ts;
unsigned int *d_out; // Unused memory for storing output
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
// Allocate device array.
if (cudaSuccess != cudaMalloc((void**)&d_ts, sizeof(ts)))
{
printf ("cudaMalloc failed %s:%d\n", __FILE__, __LINE__);
return;
}
if (cudaSuccess != cudaMalloc((void**)&d_out, 4))
{
printf ("cudaMalloc failed %s:%d\n", __FILE__, __LINE__);
return;
}
fprintf (stderr, "Running icache test...\n");
// Measure instruction cache size
printf ("Instruction cache size:\n");
unsigned int sum_times[32];
printf (" 0.5 KB steps: ");
Db.x = 1;
for (int p2 = 1; p2 <= 32; p2++)
{
unsigned int sum_time = 0;
bool failed = false;
for (int i=0;i<100 && !failed ;i++)
{
cudaGetLastError(); // Clear previous error code, if any
switch (p2) {
case 1: kicache_test4_2 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 2: kicache_test4_4 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 3: kicache_test4_6 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 4: kicache_test4_8 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 5: kicache_test4_10 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 6: kicache_test4_12 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 7: kicache_test4_14 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 8: kicache_test4_16 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 9: kicache_test4_18 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 10: kicache_test4_20 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 11: kicache_test4_22 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 12: kicache_test4_24 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 13: kicache_test4_26 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 14: kicache_test4_28 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 15: kicache_test4_30 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 16: kicache_test4_32 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 17: kicache_test4_34 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 18: kicache_test4_36 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 19: kicache_test4_38 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 20: kicache_test4_40 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 21: kicache_test4_42 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 22: kicache_test4_44 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 23: kicache_test4_46 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 24: kicache_test4_48 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 25: kicache_test4_50 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 26: kicache_test4_52 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 27: kicache_test4_54 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 28: kicache_test4_56 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 29: kicache_test4_58 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 30: kicache_test4_60 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 31: kicache_test4_62 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 32: kicache_test4_64 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
}
if (cudaGetLastError() != cudaSuccess)
{
failed = true;
break;
}
cudaThreadSynchronize();
cudaMemcpy(ts, d_ts, sizeof(ts), cudaMemcpyDeviceToHost);
sum_time += (ts[1] - ts[0]);
}
if (failed)
{
printf ("xxxx ");
}
else
{
// Compute average latency over the lifetime of each warp (sum_time), and average throughput of the kernel (sum_max_time).
printf ("%.5f ", sum_time/100.0/(p2*64)); fflush(stdout);
}
sum_times[p2-1] = sum_time;
}
printf (" (icache = ");
for (int last_i=1, i=1;i<32;i++)
{
//printf("sum time %d, %d \n",sum_times[i]/(i+1),sum_times[last_i]/(last_i+1)*1.33);
if (sum_times[i]/(i+1) > sum_times[last_i]/(last_i+1) /**1.33*/)
{
printf ("%.1fKB ", i*0.5);
last_i = i;
}
}
printf (")\n");
printf (" 2 KB steps: ");
Db.x = 1;
for (int p2 = 1; p2 <= 29/*32*/; p2++)
{
unsigned int sum_time = 0;
bool failed = false;
for (int i=0;i<100 && !failed ;i++)
{
cudaGetLastError(); // Clear previous error code, if any
switch (p2) {
case 1: kicache_test4_8 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 2: kicache_test4_16 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 3: kicache_test4_24 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 4: kicache_test4_32 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 5: kicache_test4_40 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 6: kicache_test4_48 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 7: kicache_test4_56 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 8: kicache_test4_64 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 9: kicache_test4_72 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 10: kicache_test4_80 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 11: kicache_test4_88 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 12: kicache_test4_96 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 13: kicache_test4_104 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 14: kicache_test4_112 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 15: kicache_test4_120 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 16: kicache_test4_128 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 17: kicache_test4_136 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 18: kicache_test4_144 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 19: kicache_test4_152 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 20: kicache_test4_160 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 21: kicache_test4_168 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 22: kicache_test4_176 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 23: kicache_test4_184 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 24: kicache_test4_192 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 25: kicache_test4_200 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 26: kicache_test4_208 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 27: kicache_test4_216 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 28: kicache_test4_224 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
case 29: kicache_test4_232 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
//case 30: kicache_test4_240 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
//case 31: kicache_test4_248 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
//case 32: kicache_test4_256 <<<Dg, Db>>>(d_ts, d_out, 1, p2, 2); break;
}
if (cudaGetLastError() != cudaSuccess)
{
failed = true;
break;
}
cudaThreadSynchronize();
cudaMemcpy(ts, d_ts, sizeof(ts), cudaMemcpyDeviceToHost);
sum_time += (ts[1] - ts[0]);
}
if (failed)
{
printf ("xxxx ");
}
else
{
// Compute average latency over the lifetime of each warp (sum_time), and average throughput of the kernel (sum_max_time).
printf ("%.1f ", sum_time/100.0/(p2*256)); fflush(stdout);
}
sum_times[p2-1] = sum_time;
}
printf (" (icache = ");
for (int last_i=1, i=1;i<28/*32*/;i++)
{
if (sum_times[i]/(i+1) > sum_times[last_i]/(last_i+1) /**1.33*/)
{
printf ("%.1fKB ", i*2.0);
last_i = i;
}
}
printf (")\n");
printf ("\n Test instruction cache sharing by running two thread blocks concurently.\n");
Db.x = 1;
dim3 Dg2 = dim3(31,1,1);
for (int blk2 = 1; blk2 <31; blk2+= (blk2 == 1? 9 : 10))
{
printf (" TPC 0,%d (2 KB steps): ", blk2);
int mask = (1<<blk2) | 1; // Enable two blocks for execution
for (int p2 = 1; p2 <= 28/*32*/; p2++)
{
unsigned int sum_time = 0;
bool failed = false;
for (int i=0;i<50 && !failed ;i++)
{
cudaGetLastError(); // Clear previous error code, if any
switch (p2) {
case 1: kicache_test4_8 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 2: kicache_test4_16 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 3: kicache_test4_24 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 4: kicache_test4_32 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 5: kicache_test4_40 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 6: kicache_test4_48 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 7: kicache_test4_56 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 8: kicache_test4_64 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 9: kicache_test4_72 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 10: kicache_test4_80 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 11: kicache_test4_88 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 12: kicache_test4_96 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 13: kicache_test4_104 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 14: kicache_test4_112 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 15: kicache_test4_120 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 16: kicache_test4_128 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 17: kicache_test4_136 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 18: kicache_test4_144 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 19: kicache_test4_152 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 20: kicache_test4_160 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 21: kicache_test4_168 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 22: kicache_test4_176 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 23: kicache_test4_184 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 24: kicache_test4_192 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 25: kicache_test4_200 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 26: kicache_test4_208 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 27: kicache_test4_216 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
case 28: kicache_test4_224 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
//case 29: kicache_test4_232 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
//case 30: kicache_test4_240 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
//case 31: kicache_test4_248 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
//case 32: kicache_test4_256 <<<Dg2, Db>>>(d_ts, d_out, mask, p2, 2); break;
}
if (cudaGetLastError() != cudaSuccess)
{
failed = true;
break;
}
cudaThreadSynchronize();
cudaMemcpy(ts, d_ts, sizeof(ts), cudaMemcpyDeviceToHost);
sum_time += (ts[1] - ts[0]);
}
if (failed)
{
printf ("xxxx ");
}
else
{
printf ("%.1f ", sum_time/50.0/(p2*256)); fflush(stdout);
}
sum_times[p2-1] = sum_time;
}
printf (" (apparent icache = ");
for (int last_i=1, i=1;i<32;i++)
{
if (sum_times[i]/(i+1) > sum_times[last_i]/(last_i+1) *1.25)
{
printf ("%.1fKB ", i*2.0);
last_i = i;
}
}
printf (")\n");
}
printf ("\n");
cudaFree(d_ts);
cudaFree(d_out);
}
int main()
{
measure_icache();
}
|
1,285
|
#include "includes.h"
__device__ size_t GIDX(size_t row, size_t col, int H, int W) {
return row * W + col;
}
__global__ void kernel_blur(float* d_I, float* d_Ib, int H, int W) {
size_t row = threadIdx.y + blockDim.y * blockIdx.y;
size_t col = threadIdx.x + blockDim.x * blockIdx.x;
size_t idx = GIDX(row, col, H, W);
if (row >= H - KERN_RADIUS || row <= KERN_RADIUS || col >= W - KERN_RADIUS || col <= KERN_RADIUS) {
return;
}
int count = 0;
for (int i = -KERN_RADIUS; i <= KERN_RADIUS; i++) {
for (int j = -KERN_RADIUS; j <= KERN_RADIUS; j++) {
d_Ib[idx] += d_I[GIDX(row + i, col + j, H, W)] * gaussian_kernel[count];
count++;
}
}
}
|
1,286
|
/**
* @file markFilterEdge.cu
* @date Spring 2020, revised Spring 2021
* @author Hugo De Moraes
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void markFilterEdges_gpu(int * src, int * dst, int * matches, int * keepEdges, int numEdges) {
// Get Thread ID
const int NUM_THREADS = blockDim.x * gridDim.x;
const int COL = blockIdx.x * blockDim.x + threadIdx.x;
const int ROW = blockIdx.y * blockDim.y + threadIdx.y;
const int FIRST_T_ID = COL + ROW * NUM_THREADS;
for(int curTID = FIRST_T_ID; curTID <= numEdges; curTID += NUM_THREADS) {
if(matches[src[curTID]] != -1 || matches[dst[curTID]] != -1) {
keepEdges[curTID] = 0;
}
else {
keepEdges[curTID] = 1;
}
}
}
|
1,287
|
extern "C"
{
__global__ void DmeanSquareLoss(const int lengthx, const double pref, const double *gradc, const double *x,const double *y, double *gradn )
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthx)
{
gradn[i] += pref * gradc[0] * (x[i]-y[i]);
}
}
}
|
1,288
|
#include "includes.h"
__global__ void chol_kernel_cudaUFMG_division(float * U, int elem_per_thr) {
// Get a thread identifier
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
int tn = ty * blockDim.x * gridDim.x + tx;
//#define DEBUGDIV
#ifdef DEBUGDIV
int dbg = 0;
if(blockIdx.x == 4){
if(blockIdx.y == 5){
if(threadIdx.x == 2){
if(threadIdx.y == 1){
dbg = 1;
printf("\n\n");
printf("\ntx=%d \nty=%d \ntn=%d", tx, ty, tn);
}
}
}
}
#endif
for(unsigned i=0;i<elem_per_thr;i++){
int iel = tn * elem_per_thr + i;
int xval = iel % MATRIX_SIZE;
int yval = iel / MATRIX_SIZE;
if(xval == yval){
continue;
}
#ifdef DEBUGDIV
if(dbg == 1){
if(i==37){
printf("\niel=%d \nxval=%d \nyval=%d", iel, xval, yval);
}
}
#endif
// if on the lower diagonal...
if(yval > xval){
xval = MATRIX_SIZE - xval - 1;
yval = MATRIX_SIZE - yval - 1;
}
int iU = xval + yval * MATRIX_SIZE;
int iDiag = yval + yval * MATRIX_SIZE;
#ifdef DEBUGDIV
if(dbg == 1){
if(i==37){
printf("\nxtrans=%d \nytrans=%d \niU=%d \niDiag=%d", xval, yval, iU, iDiag);
printf("\n\n");
}
}
#endif
U[iU] /= U[iDiag];
}
}
|
1,289
|
//Based on the work of Andrew Krepps
#include <stdio.h>
#include <stdlib.h> //srand and rand
#include <math.h>
// add function d
__global__ void add(int *a, int *b, int *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
__global__ void subtract(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] - b[id];
}
__global__ void mult(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] * b[id];
}
__global__ void mod(int *a, int *b, int *c, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] % b[id];
}
int main(int argc, char** argv)
{
// read command line arguments
int totalThreads = (1 << 20);
int blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
int numBlocks = totalThreads/blockSize;
printf("Using %d Threads and %d BlockSize\n",totalThreads, blockSize);
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
// Host input vectors
int *h_a, *h_b;
//Host output vectors for different functions "h_c_func"
int *h_c_add,*h_c_sub,*h_c_mult,*h_c_mod;
// Device input vectors
int *d_a, *d_b;
//Device output vector
int *d_c_add,*d_c_sub,*d_c_mult,*d_c_mod;
// Size, in bytes, of each vector
size_t bytes = totalThreads*sizeof(int);
// Allocate memory for each vector on host
h_a = (int*)malloc(bytes);
h_b = (int*)malloc(bytes);
h_c_add = (int*)malloc(bytes);
h_c_sub = (int*)malloc(bytes);
h_c_mult = (int*)malloc(bytes);
h_c_mod = (int*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c_add, bytes);
cudaMalloc(&d_c_sub, bytes);
cudaMalloc(&d_c_mult, bytes);
cudaMalloc(&d_c_mod, bytes);
//initialize the input vectors
for(int i = 0;i<totalThreads;i++){
//first array is 0 through number of threads
h_a[i] = i;
// second array is a random number between 0 and 3
h_b[i] = rand() % 4;
}
//copy both input arrays from host to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice);
//performing add function
printf("Performing Add function\n");
add<<<numBlocks, totalThreads>>>(d_a, d_b, d_c_add, totalThreads);
//performing subtract function
printf("Performing subtract function\n");
subtract<<<numBlocks, totalThreads>>>(d_a, d_b, d_c_sub, totalThreads);
//performing mult function
printf("Performing mult function\n");
mult<<<numBlocks, totalThreads>>>(d_a, d_b, d_c_mult, totalThreads);
//performing mod fuction
printf("Performing mod function\n");
mod<<<numBlocks, totalThreads>>>(d_a, d_b, d_c_mod, totalThreads);
//copy the output arrays from device to host
cudaMemcpy( h_c_add, d_c_add, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_sub, d_c_sub, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_mult, d_c_mult, bytes, cudaMemcpyDeviceToHost);
cudaMemcpy( h_c_mod, d_c_mod, bytes, cudaMemcpyDeviceToHost);
//free up space on our GPU
cudaFree(d_c_add);
cudaFree(d_c_sub);
cudaFree(d_c_mult);
cudaFree(d_c_add);
return 0;
}
|
1,290
|
// filename: gaxpy2.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "gaxpy2"
{
__global__ void gaxpy4(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < n) {
c[i] = (double) i; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
}
}
|
1,291
|
#include<cstdio>
#define S 64
#define ZERO 0
#define PI 3.14159265
#define LOW 9
#define HIGH 18
#define QUEUE_SIZE 128
#define KERNEL_RADIUS 8
extern "C" {
__device__
bool btwn(int a, int x, int y){
return (a>=x && a<y);
}
__device__
void load_to_shared(int* src, int cache[][S], int th_x, int th_y, int n, int m){
int val, pos, ind_x, ind_y;
if(threadIdx.x==0 && threadIdx.y==0){
ind_x = th_x-1; ind_y = th_y-1;
if(btwn(ind_x, 0, m) && btwn(ind_y, 0, n)){
pos = ind_y*m + ind_x;
val = src[pos];
}
cache[threadIdx.y][threadIdx.x] = val;
}
if(threadIdx.x==0 && threadIdx.y==31){
ind_x = th_x-1; ind_y = th_y+1;
if(btwn(ind_x, 0, m) && btwn(ind_y, 0, n)){
pos = ind_y*m + ind_x;
val = src[pos];
}
cache[threadIdx.y+2][threadIdx.x] = val;
}
if(threadIdx.x==31 && threadIdx.y==0){
ind_x = th_x+1; ind_y = th_y-1;
if(btwn(ind_x, 0, m) && btwn(ind_y, 0, n)){
pos = ind_y*m + ind_x;
val = src[pos];
}
cache[threadIdx.y][threadIdx.x+2] = val;
}
if(threadIdx.x==31 && threadIdx.y==31){
ind_x = th_x+1; ind_y = th_y-1;
if(btwn(ind_x, 0, m) && btwn(ind_y, 0, n)){
pos = ind_y*m + ind_x;
val = src[pos];
}
cache[threadIdx.y+2][threadIdx.x+2] = val;
}
if(threadIdx.y==0){
ind_x = th_x; ind_y = th_y-1; val=ZERO;
if(btwn(ind_x, 0, m) && btwn(ind_y, 0, n)){
pos = ind_y*m + ind_x;
val = src[pos];
}
cache[threadIdx.y][threadIdx.x+1] = val;
}
if(threadIdx.y==31){
ind_x = th_x; ind_y = th_y+1; val=ZERO;
if(btwn(ind_x, 0, m) && btwn(ind_y, 0, n)){
pos = ind_y*m + ind_x;
val = src[pos];
}
cache[threadIdx.y+2][threadIdx.x+1] = val;
}
if(threadIdx.x==0){
ind_x = th_x-1; ind_y = th_y; val=ZERO;
if(btwn(ind_x, 0, m) && btwn(ind_y, 0, n)){
pos = ind_y*m + ind_x;
val = src[pos];
}
cache[threadIdx.y+1][threadIdx.x] = val;
}
if(threadIdx.x==31){
ind_x = th_x+1; ind_y = th_y; val=ZERO;
if(btwn(ind_x, 0, m) && btwn(ind_y, 0, n)){
pos = ind_y*m + ind_x;
val = src[pos];
}
cache[threadIdx.y+1][threadIdx.x+2] = val;
}
}
__global__
void sobel(int* src, int* dstMagni, float * arcTangensOut){
__shared__ int cache[34][S];
int m = gridDim.x*32, n = gridDim.y*32,
th_x = blockIdx.x * 32 + threadIdx.x,
th_y = blockIdx.y * 32 + threadIdx.y,
i_src = th_y*m + th_x, ind_x, ind_y,
magn_x, magn_y, magnAbs_x, magnAbs_y;
/*now we load to shared with a frame of thickness eq 1*/
cache[threadIdx.y+1][threadIdx.x+1] = src[i_src];
load_to_shared(src, cache, th_x, th_y, n, m);
ind_y = threadIdx.y+1;
ind_x = threadIdx.x+1;
__syncthreads();
magn_x = cache[ind_y][ind_x-1] - cache[ind_y][ind_x+1];
magnAbs_x = ((magn_x>0) ? magn_x : -magn_x);
magn_y = cache[ind_y+1][ind_x] - cache[ind_y-1][ind_x];
magnAbs_y = ((magn_y>0) ? magn_y : -magn_y);
dstMagni[i_src] = magnAbs_x + magnAbs_y;
arcTangensOut[i_src] = atan2((float) magn_y,(float) magn_x)
* 180 / PI;
}
__global__
void nonMaximalSupression(int * magn, float * arcTangens, int * dest) {
__shared__ int cacheMagn[34][S];
int m = gridDim.x*32, n = gridDim.y*32,
th_x = blockIdx.x * 32 + threadIdx.x,
th_y = blockIdx.y * 32 + threadIdx.y,
i_src = th_y*m + th_x, ind_x, ind_y;
float angle;
cacheMagn[threadIdx.y+1][threadIdx.x+1] = magn[i_src];
load_to_shared(magn, cacheMagn, th_x, th_y, n, m);
ind_y = threadIdx.y+1; ind_x = threadIdx.x+1;
__syncthreads();
angle = arcTangens[i_src];
if (angle < 0) angle = 360 + angle;
//north && south
int centerCell = cacheMagn[ind_y][ind_x];
dest[i_src] = centerCell;
if ((337.5 <= angle || angle < 22.5) ||
(157.25 <= angle && angle < 202.5)) {
if (cacheMagn[ind_y][ind_x+1] > centerCell ||
cacheMagn[ind_y][ind_x-1] > centerCell)
dest[i_src] = 0;
} // north-east && south-west
else if ((22.5 <= angle && angle < 67.5) ||
(202.5 <= angle && angle < 247.5)) {
if (cacheMagn[ind_y-1][ind_x+1] > centerCell ||
cacheMagn[ind_y+1][ind_x-1] > centerCell)
dest[i_src] = 0;
} // west && east
else if ((67.5 <= angle && angle < 112.5) ||
(247.5 <= angle && angle < 292.5)) {
if (cacheMagn[ind_y+1][ind_x] > centerCell ||
cacheMagn[ind_y-1][ind_x] > centerCell)
dest[i_src] = 0;
} // west-north && east-south
else if ((112.5 <= angle && angle < 157.5) ||
(292.5 <= angle || angle < 337.5)) {
if (cacheMagn[ind_y-1][ind_x-1] > centerCell ||
cacheMagn[ind_y+1][ind_x+1] > centerCell)
dest[i_src] = 0;
}
}
__global__
void prepareBfs(int* src){
int m = gridDim.x*32;
int th_x = blockIdx.x * 32 + threadIdx.x;
int th_y = blockIdx.y * 32 + threadIdx.y;
int i_src = th_y*m + th_x;
int val = src[i_src];
if(val < LOW){
src[i_src] = 0;
}
else if(val >= HIGH){
src[i_src] = -2;
}
else{
src[i_src] = -1;
}
}
__global__
void oneBfs(int* src, int* dst, int* changed){
__shared__ int cache[34][S];
int m = gridDim.x*32, n = gridDim.y*32,
th_x = blockIdx.x * 32 + threadIdx.x,
th_y = blockIdx.y * 32 + threadIdx.y,
i_src = th_y*m + th_x,
ind_y = threadIdx.y+1, ind_x = threadIdx.x+1,
queue[QUEUE_SIZE],
beg=0, end=0, val,
procInd_x, procInd_y, x_new, y_new;
cache[threadIdx.y+1][threadIdx.x+1] = src[i_src];
val = cache[ind_y][ind_x];
load_to_shared(src, cache, th_x, th_y, n, m);
__syncthreads();
if(val==-1){
for(int i=-1; i<2; ++i){
for(int j=-1; j<2; ++j){
procInd_x = ind_x+i;
procInd_y = ind_y+j;
if(cache[procInd_y][procInd_x]==-2){
queue[end++] = ind_y;
queue[end++] = ind_x;
cache[ind_y][ind_x]=-2;
*changed=1;
i=2;
j=2;
}
}
}
}
while(beg!=end){
procInd_y = queue[beg++];
procInd_x = queue[beg++];
for(int i=-1; i<2; ++i){
for(int j=-1; j<2; ++j){
x_new = procInd_x+i;
y_new = procInd_y+j;
if(cache[y_new][x_new]==-1 && btwn(y_new, 1, 33)
&& btwn(x_new, 1, 33)){
queue[end++] = y_new;
queue[end++] = x_new;
cache[y_new][x_new]=-2;
}
}
}
}
__syncthreads();
dst[i_src] = cache[ind_y][ind_x];
}
__global__
void final_battle(int* src){
int m = gridDim.x*32;
int th_x = blockIdx.x * 32 + threadIdx.x;
int th_y = blockIdx.y * 32 + threadIdx.y;
int i_src = th_y*m + th_x;
int val = src[i_src];
if(val==-2){
src[i_src] = 255;
}
else{
src[i_src] = 0;
}
}
__global__
void gaussianFilter(int * src, int * dest) {
__shared__ int cache[34][S];
int n = gridDim.y*32;
int m = gridDim.x*32;
int th_x = blockIdx.x * 32 + threadIdx.x;
int th_y = blockIdx.y * 32 + threadIdx.y;
int i_src = th_y*m + th_x;
int ind_y, ind_x;
cache[threadIdx.y+1][threadIdx.x+1] = src[i_src];
load_to_shared(src, cache, th_x, th_y, n, m);
ind_y = threadIdx.y+1+KERNEL_RADIUS; ind_x = threadIdx.x+1+KERNEL_RADIUS;
__syncthreads();
int sum = 0;
for (int i = -KERNEL_RADIUS; i <= KERNEL_RADIUS; i++) {
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++) {
sum += cache[ind_y+i][ind_x+j]; //d_kernel[KERNEL_RADIUS + j]
}
}
dest[i_src] = (int) sum;
}
}
|
1,292
|
#include <cstdio>
#include <functional>
#include <iostream>
#include <random>
#define BLOCKSIZE 256
void FillMatrix(float* matrix, int height, int width) {
std::mt19937 gen(time(0));
std::uniform_real_distribution<float> distribution(-1.0f, 1.0f);
auto generate = std::bind(distribution, gen);
for (int i = 0; i < height * width; ++i) {
matrix[i] = generate();
}
}
void PrintMatrix(float *matrix, int height, int width) {
for (int i = 0; i < height; ++i) {
for (int j = 0; j < width; ++j) {
std::cout << matrix[i * width + j] << " ";
}
std::cout << std::endl;
}
}
__global__
void kernel_mul(float *A, float *B, float *C, int mid_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int height = blockDim.x * gridDim.x;
int width = blockDim.y * gridDim.y;
C[i * width + j] = 0.0f;
for (int k = 0; k < mid_size; ++k) {
C[i * width + j] += A[i * mid_size + k] * B[k * width + j];
}
}
__global__
void kernel_my_mul(float *A, float *B, float *C, int mid_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int height = blockDim.x * gridDim.x;
int width = blockDim.y * gridDim.y;
__shared__ float block_a[BLOCKSIZE];
__shared__ float block_b[BLOCKSIZE];
block_a[threadIdx.y] = A[i * width + threadIdx.y];
block_b[threadIdx.x] = B[threadIdx.x * height + j];
__syncthreads();
C[i * width + j] = 0.0f;
for (int k = 0; k < mid_size; ++k) {
C[i * width + j] += block_a[k] * block_b[k];
}
}
void try_both_multiplications(float *h_A, float *h_B, float *h_C) {
float* d_A;
float* d_B;
float* d_C;
cudaMalloc(&d_A, sizeof(float) * 128 * 384);
cudaMalloc(&d_B, sizeof(float) * 384 * 256);
cudaMalloc(&d_C, sizeof(float) * 128 * 256);
cudaMemcpy(d_A, h_A, sizeof(float) * 128 * 384, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float) * 384 * 256, cudaMemcpyHostToDevice);
// kernel call
dim3 num_blocks(8, 16);
dim3 block_size(16, 16);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
kernel_mul<<<num_blocks, block_size>>>(d_A, d_B, d_C, 384);
cudaEventRecord(stop);
cudaMemcpy(h_C, d_C, sizeof(float) * 128 * 256, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
std::cout << "banal elapsed in " << milliseconds << std::endl;
PrintMatrix(h_C, 128, 256);
cudaEvent_t start2;
cudaEvent_t stop2;
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
cudaEventRecord(start2);
kernel_my_mul<<<num_blocks, block_size>>>(d_A, d_B, d_C, 384);
cudaEventRecord(stop2);
cudaMemcpy(h_C, d_C, sizeof(float) * 128 * 256, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop2);
float milliseconds2 = 0;
cudaEventElapsedTime(&milliseconds2, start2, stop2);
std::cout << "shared memory elapsed in " << milliseconds2 << std::endl;
PrintMatrix(h_C, 128, 256);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main() {
float *h_A;
float *h_B;
float *h_C;
// h_A 128 * 384
// h_B 384 * 256
// h_C 128 * 256
h_A = new float[128 * 384];
h_B = new float[384 * 256];
h_C = new float[128 * 256];
FillMatrix(h_A, 128, 384);
FillMatrix(h_B, 384, 256);
try_both_multiplications(h_A, h_B, h_C);
delete[] h_A;
delete[] h_B;
delete[] h_C;
return 0;
}
|
1,293
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
static const int N = 10;
#define CHECK_STATUS(status) \
if (status != cudaSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
cudaGetErrorString(status))
//
__global__ void VecAdd(float *A, float *B, float *C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char **argv) {
CHECK_STATUS(cudaSetDevice(0));
float a[N];
float b[N];
for(int i = 0;i<N;i++){
a[i] = i;
b[i] = i;
}
float c[N];
float *d_a,*d_b,*d_c;
//分配显存
CHECK_STATUS(cudaMalloc(&d_a, N*sizeof(float)));
CHECK_STATUS(cudaMalloc(&d_b, N*sizeof(float)));
CHECK_STATUS(cudaMalloc(&d_c, N*sizeof(float)));
// 把数据从内存复制到显存
CHECK_STATUS(cudaMemcpy(d_a,a,N* sizeof(float),cudaMemcpyHostToDevice));
CHECK_STATUS(cudaMemcpy(d_b,b,N* sizeof(float),cudaMemcpyHostToDevice));
// 调用kernel
VecAdd<<<1,N>>>(d_a,d_b,d_c);
// 检查错误
CHECK_STATUS(cudaGetLastError());
// 从显存把数据复制到内存
CHECK_STATUS(cudaMemcpy(c,d_c,N* sizeof(float),cudaMemcpyDeviceToHost));
// 打印
for(int i=0;i<N;i++)
printf("%f ",c[i]);
printf("\n");
//释放显存
CHECK_STATUS(cudaFree(d_a));
CHECK_STATUS(cudaFree(d_b));
CHECK_STATUS(cudaFree(d_c));
return 0;
}
|
1,294
|
#include "includes.h"
__global__ void Ecalc2(float* out, const float* label)
{
int i = threadIdx.x; //4
//int j = blockDim.y*blockIdx.y + threadIdx.y; //Data.count
out[i] = label[i] - out[i];
}
|
1,295
|
#include "stdio.h"
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
// Defining number of elements in Array
#define N 50000
__global__ void gpuAdd(int *d_a, int *d_b, int *d_c) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N) {
d_c[tid] = d_a[tid] + d_b[tid];
tid += blockDim.x * gridDim.x;
}
}
int main(void) {
int *h_a, *h_b, *h_c;
int *d_a0, *d_b0, *d_c0;
int *d_a1, *d_b1, *d_c1;
cudaStream_t stream0, stream1;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaEvent_t e_start, e_stop;
cudaEventCreate(&e_start);
cudaEventCreate(&e_stop);
cudaEventRecord(e_start, 0);
cudaHostAlloc((void **)&h_a, 2 * N * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&h_b, 2 * N * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void **)&h_c, 2 * N * sizeof(int), cudaHostAllocDefault);
cudaMalloc((void **)&d_a0, N * sizeof(int));
cudaMalloc((void **)&d_b0, N * sizeof(int));
cudaMalloc((void **)&d_c0, N * sizeof(int));
cudaMalloc((void **)&d_a1, N * sizeof(int));
cudaMalloc((void **)&d_b1, N * sizeof(int));
cudaMalloc((void **)&d_c1, N * sizeof(int));
for (int i = 0; i < N * 2; i++) {
h_a[i] = 2 * i * i;
h_b[i] = i;
}
cudaMemcpyAsync(d_a0, h_a, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_a1, h_a + N, N * sizeof(int), cudaMemcpyHostToDevice,
stream1);
cudaMemcpyAsync(d_b0, h_b, N * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_b1, h_b + N, N * sizeof(int), cudaMemcpyHostToDevice,
stream1);
gpuAdd<<<512, 512, 0, stream0>>>(d_a0, d_b0, d_c0);
gpuAdd<<<512, 512, 0, stream1>>>(d_a1, d_b1, d_c1);
cudaMemcpyAsync(h_c, d_c0, N * sizeof(int), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(h_c + N, d_c1, N * sizeof(int), cudaMemcpyDeviceToHost,
stream0);
cudaDeviceSynchronize();
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(e_stop, 0);
cudaEventSynchronize(e_stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, e_start, e_stop);
printf("Time to add %d numbers: %3.1f ms\n", 2 * N, elapsedTime);
int Correct = 1;
printf("Vector addition on GPU \n");
// Printing result on console
for (int i = 0; i < 2 * N; i++) {
if ((h_a[i] + h_b[i] != h_c[i])) {
Correct = 0;
}
}
if (Correct == 1) {
printf("GPU has computed Sum Correctly\n");
} else {
printf("There is an Error in GPU Computation\n");
}
// Free up memory
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFree(d_a0);
cudaFree(d_b0);
cudaFree(d_c0);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
return 0;
}
|
1,296
|
#include <stdio.h>
__global__ void kadd(float *a, float *b, float *c, const unsigned int el_per_thread)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int offset = i * el_per_thread;
for(unsigned int idx = 0; idx < el_per_thread; idx++) {
c[offset+idx] = a[offset+idx] + b[offset+idx];
}
}
|
1,297
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N 1000000
#define B 1024
__global__ void prescan( float *g_idata, float *INCR, int n);
void scanCPU(float *f_out, float *f_in, int i_n);
double myDiffTime(struct timeval &start, struct timeval &end)
{
double d_start, d_end;
d_start = (double)(start.tv_sec + start.tv_usec/1000000.0);
d_end = (double)(end.tv_sec + end.tv_usec/1000000.0);
return (d_end - d_start);
}
int main(int argc, char **argv)
{
float a[N], c[N], g[N];
timeval start, end;
float *dev_a, *dev_g, INCRR[B];
float size = N*sizeof(float);
// cudaMallocManaged(&a, N*sizeof(float));
double d_gpuTime, d_cpuTime;
cudaHostAlloc(&dev_a, size, cudaHostAllocDefault);
cudaHostAlloc(&dev_g, size, cudaHostAllocDefault);
dev_a = (float *)malloc(size);
dev_g = (float *)malloc(size);
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_g, size);
for (int i = 0; i < N; i++)
{
// a[i] = (float)(rand() % 1000000)/1000.0;
a[i] = i+1;
}
int BATCH = (N/B);
gettimeofday(&start,NULL);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
prescan <<< BATCH, B>>> ( dev_a ,INCRR , N);
cudaDeviceSynchronize();
cudaMemcpy(g, dev_g, size, cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
d_gpuTime = myDiffTime(start, end);
gettimeofday(&start, NULL);
scanCPU(c, a, N);
gettimeofday(&end, NULL);
d_cpuTime = myDiffTime(start, end);
cudaFree(dev_a); cudaFree(dev_g);
// for (int i = 0; i < N; i++)
// {
// printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]);
// }
printf("GPU Time for scan size %i: %f\n", N, d_gpuTime);
printf("CPU Time for scan size %i: %f\n", N, d_cpuTime);
}
__global__ void prescan( float *g_idata, float *INCR, int n)
{
extern __shared__ float temp[], g_odata[], SUMS[];
// allocated on invocation
int thid = threadIdx.x + (blockIdx.x * blockDim.x);
int offset = 1;
temp[2*thid] = g_idata[2*thid];
// load input into shared memory
temp[2*thid+1] = g_idata[2*thid+1];
for (int d = B>>1; d > 0; d >>= 1)
// build sum in place up the tree
{
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
for(int d =B>>1; d>0; d>>=1){
//build up sums
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
}
}
if (SUMS && thid == 0)
{
SUMS[B] = temp[2*B-1];
temp[n - 1] = 0;
}
// clear the last element
for (int d = 1; d < n; d *= 2)
// traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = offset*(2*thid+1)-1;
int bi = offset*(2*thid+2)-1;
float t = SUMS[ai];
SUMS[ai] = SUMS[bi];
SUMS[bi] += INCR[bi];
}
}
__syncthreads();
temp[2*thid] = INCR[2*thid];
// write results to device memory
// g_odata[2*thid+1] = temp[2*thid+1]
// g_odata[2*thid+1] = INCR[thid];
}
}
void scanCPU(float *f_out, float *f_in, int i_n)
{
f_out[0] = 0;
for (int i = 1; i < i_n; i++)
f_out[i] = f_out[i-1] + f_in[i-1];
}
|
1,298
|
#include "includes.h"
using namespace std;
// this amazingly nice error checking function is stolen from:
//https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
__global__ void MatrixMulKernel(double *OutMat, double *Mat1, double *Mat2, int Arows, int Acols, int Bcols) {
// row and column within submatrix
int blockrow = blockIdx.y;//*
int row = threadIdx.y;
int blockcol = blockIdx.x;
int col = threadIdx.x ;
// allocate these arrays only once we can change the values in them later
__shared__ double subAshared[BLOCKSIZE*BLOCKSIZE];
__shared__ double subBshared[BLOCKSIZE*BLOCKSIZE];
double Cvalue=0;
for (int B = 0; B < ceil((double)(Acols / BLOCKSIZE)) + 1; B++) {
// fetch from global memory
// yes, these took a LONG time to figure out. Pencil and Paper FTW!
/* notice:
1) how these indexes are actually offset a multiple of B, *not 1*.
2) threads are offset by col which will be 1 apart for each thread
3) which means that means all threads in the warp are hitting successive global memory cells
*/
int Mat1index = (row + blockrow*BLOCKSIZE)*Acols + col + B*BLOCKSIZE;
int Mat2index = (B*BLOCKSIZE + row)*Bcols + BLOCKSIZE*blockcol + col;
if (Mat1index < Arows*Acols)
subAshared[row*BLOCKSIZE + col] = Mat1[Mat1index];
else
subAshared[row*BLOCKSIZE + col] = 0;
if (Mat2index < Acols*Bcols)
subBshared[row*BLOCKSIZE + col] = Mat2[Mat2index];
else
subBshared[row*BLOCKSIZE + col] = 0;
__syncthreads();
// this computation is all using shared memory (fast)
for (int j = 0; j < BLOCKSIZE; j++)
if ((row*BLOCKSIZE + j < BLOCKSIZE*BLOCKSIZE) && (j*BLOCKSIZE + col < BLOCKSIZE*BLOCKSIZE))
Cvalue += subAshared[row*BLOCKSIZE + j]*subBshared[j*BLOCKSIZE + col];
__syncthreads();
}
if ((row < Arows) && (col < Bcols)) {
int finalmatrow = blockrow*BLOCKSIZE + row;
int finalmatcol = blockcol*BLOCKSIZE + col;
OutMat[finalmatrow*Bcols + finalmatcol] = Cvalue;
}
}
|
1,299
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
//void Algorithm1();
void Algorithm4(int m, int n, int l);
//for gemm 4 algorithm
#define BLOCK_SIZE_x 16
#define BLOCK_SIZE_y 4
template<int block_size_x, int block_size_y>
__global__ void device_Matrix_multi(const double* const device_matrix_A,const double* const device_matrix_B,double* device_matrix_C,const int m,const int n,const int l)
{
const int threadid_x = threadIdx.x;
const int threadid_y = threadIdx.y;
const int blockid_x = blockIdx.x;
const int blockid_y = blockIdx.y;
__shared__ double matrix_B_shared[block_size_x][block_size_x+1];
double c[block_size_x];
for (int i = 0; i< block_size_x; i++)
{
c[i] = 0.0;
}
int idx_A = blockid_x*block_size_x*block_size_y + threadid_x + threadid_y*block_size_x;
int idx_B = threadid_x + (blockid_y*block_size_x + threadid_y)*n;
int idx_B_last = idx_B + n;
int col_A = 0;
do
{
for(int i = 0; i < block_size_x; i += block_size_y)
matrix_B_shared[threadid_x][threadid_y + i] = device_matrix_B[idx_B + i*n];
idx_B += block_size_x;
__syncthreads();
int i_bound = min(block_size_x, n - col_A);
for (int i = 0; i < i_bound; i++, idx_A+=m)
{
for (int j = 0; j < block_size_x; j++)
{
c[j] += device_matrix_A[idx_A]*matrix_B_shared[i][j];
}
}
col_A += block_size_x;
__syncthreads();
}while (idx_B < idx_B_last);
if (blockid_x*block_size_x*block_size_y + threadid_x + threadid_y*block_size_x < m)
{
int idx_D = blockid_x*block_size_x*block_size_y + (threadid_x + threadid_y*block_size_x) + blockid_y*block_size_x*m;
int i_bound = min(block_size_x, l - blockid_y*block_size_x);
for (int i = 0; i < i_bound; i++, idx_D += m)
{
device_matrix_C[idx_D] = c[i];
}
}
}
int main()
{
Algorithm4(32, 32, 32);
Algorithm4(64, 64, 64);
Algorithm4(128,128, 128);
Algorithm4(256, 256, 256);
Algorithm4(512, 512, 512);
Algorithm4(1024, 1024, 1024);
Algorithm4(2048, 2048, 2048);
Algorithm4(4096, 4096, 4096);
}
void Algorithm4(int m, int n, int l) {
printf("inside function");
double* matrix_A;
double* matrix_B;
double* matrix_C;
double *device_matrix_A;
double *device_matrix_B;
double *device_matrix_C;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
// Allocate the device memory
matrix_A = (double*)malloc( m*n* sizeof(double));
matrix_B = (double*)malloc( m*l* sizeof(double));
matrix_C = (double*)malloc( m*l*sizeof(double));
cudaMalloc(&device_matrix_A, m*n*sizeof(double));
cudaMalloc(&device_matrix_B, n*l*sizeof(double));
cudaMalloc(&device_matrix_C, m*l*sizeof(double));
for(int i = 0; i < m; i++)
{
for(int j = 0; j <n; j++){
matrix_A[i *n + j] = rand()%10;
matrix_B[i *n + j] = rand()%10;
matrix_C[i *n + j] = 0;
}
}
// Copy data from the host memory to the device memory
cudaMemcpy(device_matrix_A, matrix_A, m*n*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(device_matrix_B, matrix_B, n*l*sizeof(double), cudaMemcpyHostToDevice);
dim3 nthreads(0, 0);
dim3 nblocks(0, 0); // Launch the kernel
nthreads.x = BLOCK_SIZE_x;
nthreads.y = BLOCK_SIZE_y;
nblocks.x = (m + nthreads.x*nthreads.y - 1)/(nthreads.x*nthreads.y);
nblocks.y = (l + nthreads.x - 1)/nthreads.x;
cudaEventRecord(start);
printf("nuumber of blocks in x = %d\n", nblocks.x);
printf("nuumber of blocks in y = %d\n", nblocks.y);
printf("number of threads in x = %d\n", nthreads.x);
printf("number of threads in y =%d\n", nthreads.y);
printf("total threads = %d", nblocks.x*nblocks.y*nthreads.x*nthreads.y);
device_Matrix_multi<BLOCK_SIZE_x, BLOCK_SIZE_y> <<<nblocks, nthreads>>> ( device_matrix_A,device_matrix_B, device_matrix_C,m,n,l
);
// Copy data from the device memory to the host memory
cudaMemcpy(matrix_C, device_matrix_C, m*l*sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
for(int i=0; i<m;i++){
for(int j =0; j<n; j++){
}
}
cudaEventElapsedTime(&milliseconds, start, stop);
printf("elaspsed = %f ms\n\n\n", milliseconds);
// Free the device memory
cudaFree(device_matrix_A);
cudaFree(device_matrix_B);
cudaFree(device_matrix_C);
free(matrix_A);
free(matrix_B);
free(matrix_C);
}
|
1,300
|
#include <iostream>
#include <sys/time.h>
__global__ void saxpyDevice(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
void saxpy(int n, float a, float *x, float *y){
float *d_x, *d_y;
// allocate GPU memory, and upload data
cudaMalloc(&d_x, n*sizeof(float));
cudaMalloc(&d_y, n*sizeof(float));
cudaMemcpy(d_x, x, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, n*sizeof(float), cudaMemcpyHostToDevice);
// send instructions to GPU
saxpyDevice<<<(n+255)/256, 256>>>(n, 2.0f, d_x, d_y);
// download data, and free GPU memory
cudaMemcpy(y, d_y, n*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(d_x);
cudaFree(d_y);
}
int main(void){
int N = 1<<20;
float *x, *y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
struct timeval t0, t1;
gettimeofday(&t0, NULL);
for (int i=0; i<100; i++)
saxpy(N, 2.0f, x, y);
gettimeofday(&t1, NULL);
std::cout<<"CUDA = "<<(t1.tv_sec - t0.tv_sec)*1000 + (t1.tv_usec-t0.tv_usec)/1000<<"ms"<<std::endl;
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.