serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
7,801 |
#include <stdlib.h>
#include <sys/time.h>
#include <time.h>
#include <stdio.h>
#define THREADS 512
__global__ void getMax_global(unsigned int *d_data, unsigned int *d_max, int n);
__global__ void getMax_local(unsigned int *d_data, unsigned int *d_max, int n);
__global__ void getMax_binary(unsigned int *d_data, unsigned int *d_max, int n);
int main(int argc, char* argv[]){
if (argc < 2){
puts("Usage: ./a.out [N]");
return 0;
}
int N=atoi(argv[1]);
printf("N: %d\n", N);
size_t sz = sizeof(int) * N;
unsigned int *data = (unsigned int*)malloc(sz);
srand(time(NULL));
for(int i =0; i<N;i++)
data[i] = (unsigned int)(rand()%100000000);
struct timeval start, end, timer;
unsigned int *d_data;
cudaMalloc((void **) &d_data, sz);
unsigned int *d_max;
cudaMalloc((void **) &d_max, sizeof(unsigned int));
unsigned int max;
gettimeofday(&start, NULL);
max = 0;
for(int i=0; i<N; i++){
if (max < data[i]){
max = data[i];
}
}
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("CPU, elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
printf("CPU, max value: %d \n", max);
int threads = 512;
int grid = (N % threads) ? N/threads+1 : N/threads;
max = 0;
cudaMemcpy(d_data, data, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_max, data, sizeof(unsigned int), cudaMemcpyHostToDevice);
gettimeofday(&start, NULL);
getMax_global<<< grid, threads >>>(d_data, d_max, N);
cudaDeviceSynchronize();
cudaMemcpy(&max, d_max, sizeof(unsigned int), cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("Global GPU, elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
printf("Global GPU, max value : %d\n", max);
max = 0;
cudaMemcpy(d_data, data, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_max, data, sizeof(unsigned int), cudaMemcpyHostToDevice);
gettimeofday(&start, NULL);
getMax_local<<< grid, threads >>>(d_data, d_max, N);
cudaDeviceSynchronize();
cudaMemcpy(&max, d_max, sizeof(unsigned int), cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("Local GPU, elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
printf("Local GPU, max value : %d\n", max);
max = 0;
cudaMemcpy(d_data, data, sz, cudaMemcpyHostToDevice);
cudaMemcpy(d_max, data, sizeof(unsigned int), cudaMemcpyHostToDevice);
gettimeofday(&start, NULL);
getMax_binary<<< grid, threads >>>(d_data, d_max, N);
cudaDeviceSynchronize();
cudaMemcpy(&max, d_max, sizeof(unsigned int), cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
timersub(&end, &start, &timer);
printf("Binary GPU, elapsed time: %lf\n", (timer.tv_usec / 1000.0 + timer.tv_sec * 1000.0));
printf("Binary GPU, max value : %d\n", max);
return 0;
}
__global__ void getMax_global(unsigned int * d_data, unsigned int *d_max, int n){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if(gid < n)
atomicMax(d_max, d_data[gid]);
}
__global__ void getMax_local(unsigned int *d_data, unsigned int *d_max, int n){
__shared__ unsigned int s_max;
__shared__ unsigned int s_data[THREADS];
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
s_max = 0;
atomicMax(&s_max, d_data[gid]);
__syncthreads();
if (tid == 0)
atomicMax(d_max, s_max);
}
__global__ void getMax_binary(unsigned int *d_data, unsigned int *d_max, int n){
__shared__ unsigned int s_data[THREADS];
int tid = threadIdx.x;
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = THREADS>>1;
s_data[tid] = d_data[gid];
__syncthreads();
for(;;){
if(stride == 1)
break;
if(tid < stride){
if(s_data[tid] < s_data[tid+stride])
s_data[tid] = s_data[tid+stride];
}
stride >>= 1;
__syncthreads();
}
if(tid == 0)
atomicMax(d_max, s_data[0]);
}
|
7,802 | #include "includes.h"
__global__ void sobelEdgeDetection(int *input, int *output, int width, int height, int thresh) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j * width + i;
if ( ((i > 0) && (j > 0)) && ((i < (width - 1)) && (j < (height - 1))))
{
int sum1 = 0, sum2 = 0, magnitude;
sum1 = input[width * (j - 1) + (i + 1)] - input[width * (j - 1) + (i - 1)]
+ 2 * input[width * (j) + (i + 1)] - 2 * input[width * (j) + (i - 1)]
+ input[width * (j + 1) + (i + 1)] - input[width * (j + 1) + (i - 1)];
sum2 = input[width * (j - 1) + (i - 1)] + 2 * input[width * (j - 1) + (i)] + input[width * (j - 1) + (i + 1)]
- input[width * (j + 1) + (i - 1)] - 2 * input[width * (j + 1) + (i)] - input[width * (j + 1) + (i + 1)];
magnitude = sum1 * sum1 + sum2 * sum2;
if(magnitude > thresh)
output[index] = 255;
else
output[index] = 0;
}
} |
7,803 | #include <cuda.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
f0 = f0 -omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 = f1 -omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 = f2 -omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 = f3 -omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 = f4 -omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 = f5 -omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 = f6 -omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 = f7 -omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 = f8 -omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 = f9 -omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10= f10-omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11= f11-omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
f12= f12-omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13= f13-omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
f14= f14-omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15= f15-omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16= f16-omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17= f17-omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18= f18-omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
__device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ 1.f*f8+ -4.f*f9+ f10+ 1.f*f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
m1 -= -11.f*rho+19.f*(u*u+v*v+w*w);
//m2 -= -475.f/63.f*(u*u+v*v+w*w);
m2 -= -7.53968254f*(u*u+v*v+w*w);
m4 -= -0.66666667f*u;//qx_eq
m6 -= -0.66666667f*v;//qx_eq
m8 -= -0.66666667f*w;//qx_eq
m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// m10-= 0.f;//.f.f;//.f.5*meq[9];/.f.f;//.f.5*meq[9];/.f.f;//pixx
m11-= (v*v-w*w);//pww_eq
// m12-= 0.f;//.f.f;//.f.5*meq[11];/.f.f;//.f.5*meq[9];/.f.f;//piww
m13-= u*v;//pxy_eq
m14-= v*w;//pyz_eq
m15-= u*w;//pxz_eq
// m16-= 0.0;//mx_eq
// m17-= 0.0;//my_eq
// m18-= 0.0;//mz_eq
f0 -= - 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
f2 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f3 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
f4 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m6) + -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f9 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f10 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f11 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f12 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f13 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f14 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
f15 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f16 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f17 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
f18 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
f2 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
f4 -= 0.083333333f*(m11)*omega + -0.083333333f*(m12);
f5 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
f6 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
f7 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ ( 0.25f*(m13) )*omega;
f8 -= 0.083333333f*(m11)*omega + 0.041666667f*(m12)+ (-0.25f*(m13) )*omega;
f9 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
f10 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
f11 -= +( 0.25f*(m14) )*omega ;
f12 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
f13 -= +( -0.25f*(m14) )*omega ;
f14 -= -0.083333333f*(m11)*omega + 0.083333333f*(m12);
f15 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + -0.25f*(m15))*omega ;
f16 -= +( -0.25f*(m14) )*omega ;
f17 -= -0.083333333f*(m11)*omega + -0.041666667f*(m12) +( + 0.25f*(m15))*omega ;
f18 -= +( 0.25f*(m14) )*omega ;
f5 -= 0.125f*(m16)+ -0.125f*(m17);
f6 -= -0.125f*(m16)+ -0.125f*(m17);
f7 -= -0.125f*(m16)+ 0.125f*(m17);
f8 -= 0.125f*(m16)+ 0.125f*(m17);
f10 -= -0.125f*(m16) + 0.125f*(m18);
f11 -= + 0.125f*(m17)+ -0.125f*(m18);
f12 -= 0.125f*(m16) + 0.125f*(m18);
f13 -= + -0.125f*(m17)+ -0.125f*(m18);
f15 -= -0.125f*(m16) + -0.125f*(m18);
f16 -= + 0.125f*(m17)+ 0.125f*(m18);
f17 -= 0.125f*(m16) + -0.125f*(m18);
f18 -= + -0.125f*(m17)+ 0.125f*(m18);
}
//{
// float u,v,w; //rho,
//// rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
//// f10+f11+f12+f13+f14+f15+f16+f17+f18;
// u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
// v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
// w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
//
// float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//
// //m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
//// m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
//// +11.f*(f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18) - 19.f*(u*u+v*v+w*w);
//// m1 = -19.f*f0+ 19.f*f5+ 19.f*f6+ 19.f*f7+ 19.f*f8+19.f*f10+ 19.f*f11+ 19.f*f12+ 19.f*f13+ 19.f*f15+ 19.f*f16+ 19.f*f17+ 19.f*f18- 19.f*(u*u+v*v+w*w);
// m1 = 19.f*(-f0+f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18-(u*u+v*v+w*w));
// m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
//// m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
//// m4 =-4.f*f1+4.f*f3+f5-f6-f7+f8+f10-f12+f15-f17+0.66666667f*(f1-f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m4 =-3.33333333f*f1+3.33333333f*f3 +1.66666667f*(f5+f8+f10+f15) -0.33333333f*(f3+f6+f7+f12+f17);
//// m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
//// m6 =-4.f*f2+4.f*f4+f5+f6-f7-f8+f11-f13+f16-f18+0.66666667f*(f2-f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m6 =-3.33333333f*f2+3.33333333f*f4+1.66666667f*(f5+f6+f11+f16) -0.33333333f*(f7+f8+f13+f18);//-0.66666667f*(f2-f4+f5+f6-f7-f8+f11-f13+f16-f18);
//// m8 =-4.f*f9+f10+f11+f12+f13+4.f*f14-f15-f16-f17-f18;
//// m8 =-4.f*f9+f10+f11+f12+f13+4.f*f14-f15-f16-f17-f18 +0.66666667f*(f9+f10+f11+f12+f13-f14-f15-f16-f17-f18);
// m8 =-3.33333333f*f9+3.33333333f*f14+1.66666667f*(f10+f11+f12+f13)-0.33333333f*(f15+f16+f17+f18);//-0.33333333f*(f10+f11+f12+f13-f15-f16-f17-f18);
//// m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
//// m9 = 2.f*(f1+f3-f11-f13-f16-f18) - f2- f4+ f5+ f6+ f7+ f8+ - f9+ f10+ f12- f14+ f15+ f17;
// m9 = (f1+f3-f11-f13-f16-f18)+(f1+f3-f11-f13-f16-f18) - f2- f4+ f5+ f6+ f7+ f8+ - f9+ f10+ f12- f14+ f15+ f17;
//// m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
// m10 = (-f1-f3)+(-f1-f3)+(-f1-f3)+(-f1-f3)+(f2+f4+f9-f11-f13-f16-f18)+(f2+f4+f9-f11-f13-f16-f18)+ f5+ f6+ f7+ f8+ f10+ f12+ f15+ f17;
// m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
//// m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
//// m12 = 2.f*(-f2-f4+f9+f14) + f5+ f6+ f7+ f8- f10 + - f12- f15 - f17 ;
// m12 = (-f2-f4+f9+f14)+(-f2-f4+f9+f14) + f5+ f6+ f7+ f8- f10 + - f12- f15 - f17 ;
// m13 = f5+ - f6+ f7+ - f8 ;
// m14 = f11 + - f13 + - f16 + f18;
// m15 = f10 + - f12 + - f15 + f17 ;
// m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
// m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
// m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
//
//// m1 -= -11.f*rho+19.f*(u*u+v*v+w*w);
// //m2 -= -475.f/63.f*(u*u+v*v+w*w);
// m2 -= -7.53968254f*(u*u+v*v+w*w);
//// m4 -= -0.66666667f*u;//qx_eq
//// m6 -= -0.66666667f*v;//qx_eq
//// m8 -= -0.66666667f*w;//qx_eq
// m9 -= (2.f*u*u-(v*v+w*w));//(2.f*.f*.f-(u1*u1+u2*u2));///3.f;//pxx_eq
// m11-= (v*v-w*w);//pww_eq
// m13-= u*v;//pxy_eq
// m14-= v*w;//pyz_eq
// m15-= u*w;//pxz_eq
//
//
//f0 -= - 0.012531328f*(m1)+ 0.047619048f*(m2);
//f1 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
//f2 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m6)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f3 -= -0.0045948204f*(m1)+ -0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*(m9)*omega + -0.055555556f*(m10);
//f4 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m6)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ 0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4)+ -0.025f*(m6) + 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f9 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + -0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f10 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f11 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f12 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + 0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f13 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ 0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f14 -= -0.0045948204f*(m1)+ -0.015873016f*(m2) + 0.1f*(m8)+ -0.027777778f*(m9)*omega + 0.027777778f*(m10);
//f15 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f16 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//f17 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ -0.025f*(m4) + -0.025f*(m8)+ 0.027777778f*(m9)*omega + 0.013888889f*(m10);
//f18 -= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6)+ -0.025f*(m8)+ -0.055555556f*(m9)*omega + -0.027777778f*(m10);
//
//f2 -= 0.083333333f*((m11)*omega-m12);// + -0.083333333f*(m12);
//f4 -= 0.083333333f*((m11)*omega-m12);// + -0.083333333f*(m12);
//f5 -= 0.083333333f*((m11)*omega + 0.5f*(m12))+ ( 0.25f*(m13))*omega;
//f6 -= 0.083333333f*((m11)*omega + 0.5f*(m12))+ (-0.25f*(m13))*omega;
//f7 -= 0.083333333f*((m11)*omega + 0.5f*(m12))+ ( 0.25f*(m13))*omega;
//f8 -= 0.083333333f*((m11)*omega + 0.5f*(m12))+ (-0.25f*(m13))*omega;
//f9 -= -0.083333333f*((m11)*omega + (m12));
//f10 -= -0.083333333f*((m11)*omega + -0.5f*(m12)) +( 0.25f*(m15))*omega ;
//f11 -= ( 0.25f*(m14))*omega ;
//f12 -= -0.083333333f*((m11)*omega + -0.5f*(m12)) +(-0.25f*(m15))*omega ;
//f13 -= (-0.25f*(m14))*omega ;
//f14 -= -0.083333333f*((m11)*omega + (m12));
//f15 -= -0.083333333f*((m11)*omega + -0.5f*(m12)) +(-0.25f*(m15))*omega ;
//f16 -= (-0.25f*(m14))*omega ;
//f17 -= -0.083333333f*((m11)*omega + -0.5f*(m12)) +( 0.25f*(m15))*omega ;
//f18 -= ( 0.25f*(m14))*omega ;
//
//f5 -= 0.125f*(m16)+ -0.125f*(m17);
//f6 -= -0.125f*(m16)+ -0.125f*(m17);
//f7 -= -0.125f*(m16)+ 0.125f*(m17);
//f8 -= 0.125f*(m16)+ 0.125f*(m17);
////f10 -= -0.125f*(m16) + 0.125f*(m18);
//f10 -= -0.125f*(m16-m18);
//f11 -= + 0.125f*(m17-m18);//+ -0.125f*(m18);
//f12 -= 0.125f*(m16+m18);// + 0.125f*(m18);
//f13 -= + -0.125f*(m17+m18);//+ -0.125f*(m18);
//f15 -= -0.125f*(m16+m18);// + -0.125f*(m18);
//f16 -= + 0.125f*(m17+m18);//+ 0.125f*(m18);
//f17 -= 0.125f*(m16-m18);// + -0.125f*(m18);
//f18 -= + -0.125f*(m17-m18);//+ 0.125f*(m18);
//
//
//
//
//
//
//}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int height, int depth)
{
// if (x<0 || x>pitch || y<0 || y>height || z<0 || z>depth) return 0;
// else
return (x+y*pitch+z*height*pitch)+f_num*pitch*height*depth;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__global__ void simple_copy(float* fA, float* fB,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// fB[f_mem(1 ,x,y,z,pitch,height,depth)] = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// fB[f_mem(2 ,x,y,z,pitch,height,depth)] = fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// fB[f_mem(3 ,x,y,z,pitch,height,depth)] = fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// fB[f_mem(4 ,x,y,z,pitch,height,depth)] = fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// fB[f_mem(5 ,x,y,z,pitch,height,depth)] = fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// fB[f_mem(6 ,x,y,z,pitch,height,depth)] = fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// fB[f_mem(7 ,x,y,z,pitch,height,depth)] = fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// fB[f_mem(8 ,x,y,z,pitch,height,depth)] = fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// fB[f_mem(9 ,x,y,z,pitch,height,depth)] = fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// fB[f_mem(10,x,y,z,pitch,height,depth)] = fA[f_mem(10,x,y,z,pitch,height,depth)];
// fB[f_mem(11,x,y,z,pitch,height,depth)] = fA[f_mem(11,x,y,z,pitch,height,depth)];
// fB[f_mem(12,x,y,z,pitch,height,depth)] = fA[f_mem(12,x,y,z,pitch,height,depth)];
// fB[f_mem(13,x,y,z,pitch,height,depth)] = fA[f_mem(13,x,y,z,pitch,height,depth)];
// fB[f_mem(14,x,y,z,pitch,height,depth)] = fA[f_mem(14,x,y,z,pitch,height,depth)];
// fB[f_mem(15,x,y,z,pitch,height,depth)] = fA[f_mem(15,x,y,z,pitch,height,depth)];
// fB[f_mem(16,x,y,z,pitch,height,depth)] = fA[f_mem(16,x,y,z,pitch,height,depth)];
// fB[f_mem(17,x,y,z,pitch,height,depth)] = fA[f_mem(17,x,y,z,pitch,height,depth)];
// fB[f_mem(18,x,y,z,pitch,height,depth)] = fA[f_mem(18,x,y,z,pitch,height,depth)];
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f1 = fA[j+pitch*height*depth];
f2 = fA[j+pitch*height*depth+pitch*height*depth];
// f1 = fA[(x+y*pitch+z*height*pitch)+pitch*height*depth];
// f1 = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f2 = fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// f3 = fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// f4 = fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// f5 = fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// f6 = fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// f7 = fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// f8 = fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// f9 = fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// f10 = fA[f_mem(10,x,y,z,pitch,height,depth)];
// f11 = fA[f_mem(11,x,y,z,pitch,height,depth)];
// f12 = fA[f_mem(12,x,y,z,pitch,height,depth)];
// f13 = fA[f_mem(13,x,y,z,pitch,height,depth)];
// f14 = fA[f_mem(14,x,y,z,pitch,height,depth)];
// f15 = fA[f_mem(15,x,y,z,pitch,height,depth)];
// f16 = fA[f_mem(16,x,y,z,pitch,height,depth)];
// f17 = fA[f_mem(17,x,y,z,pitch,height,depth)];
// f18 = fA[f_mem(18,x,y,z,pitch,height,depth)];
fB[j+pitch*height*depth] = f1 ;//+0.01f;
fB[j+pitch*height*depth+pitch*height*depth] = f2;
// fB[(x+y*pitch+z*height*pitch)+pitch*height*depth] = f1 ;//+0.01f;
// fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 +0.01f;
// fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;//+0.01f;
// fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;//+0.01f;
// fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;//+0.01f;
// fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;//+0.01f;
// fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;//+0.01f;
// fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;//+0.01f;
// fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;//+0.01f;
// fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;//+0.01f;
// fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;//+0.01f;
// fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;//+0.01f;
// fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;//+0.01f;
// fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;//+0.01f;
// fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;//+0.01f;
// fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;//+0.01f;
// fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;//+0.01f;
// fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;//+0.01f;
// fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;//+0.01f;
}
//int const blockx = 192;
//int const blocky = 1;
__global__ void mrt_d_single(float* fA, float* fB,
int *image, float omega, float uMax,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
// int i = x+y*blockDim.x*gridDim.x;
//float u,v,w,rho;//,usqr;
int im = image[i];
if(im == 1){//BB
//__shared__ float f0_s [blockDim.x][blockDim.y];
// __shared__ float f1_s [blockx][blocky];
// __shared__ float f2_s [blockx][blocky];
// __shared__ float f3_s [blockx][blocky];
// __shared__ float f4_s [blockx][blocky];
// __shared__ float f5_s [blockx][blocky];
// __shared__ float f7_s [blockx][blocky];
// __shared__ float f6_s [blockx][blocky];
// __shared__ float f8_s [blockx][blocky];
// __shared__ float f9_s [blockx][blocky];
// __shared__ float f10_s[blockx][blocky];
// __shared__ float f11_s[blockx][blocky];
// __shared__ float f12_s[blockx][blocky];
// __shared__ float f13_s[blockx][blocky];
// __shared__ float f14_s[blockx][blocky];
// __shared__ float f15_s[blockx][blocky];
// __shared__ float f16_s[blockx][blocky];
// __shared__ float f17_s[blockx][blocky];
// __shared__ float f18_s[blockx][blocky];
//
// f1_s [threadIdx.x][threadIdx.y] = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f2_s [threadIdx.x][threadIdx.y] = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// f3_s [threadIdx.x][threadIdx.y] = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];//fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// f4_s [threadIdx.x][threadIdx.y] = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// f5_s [threadIdx.x][threadIdx.y] = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// f7_s [threadIdx.x][threadIdx.y] = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// f6_s [threadIdx.x][threadIdx.y] = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// f8_s [threadIdx.x][threadIdx.y] = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// f9_s [threadIdx.x][threadIdx.y] = fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// f10_s[threadIdx.x][threadIdx.y] = fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(10,x,y,z,pitch,height,depth)];
// f11_s[threadIdx.x][threadIdx.y] = fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(11,x,y,z,pitch,height,depth)];
// f12_s[threadIdx.x][threadIdx.y] = fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(12,x,y,z,pitch,height,depth)];
// f13_s[threadIdx.x][threadIdx.y] = fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(13,x,y,z,pitch,height,depth)];
// f14_s[threadIdx.x][threadIdx.y] = fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(14,x,y,z,pitch,height,depth)];
// f15_s[threadIdx.x][threadIdx.y] = fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(15,x,y,z,pitch,height,depth)];
// f16_s[threadIdx.x][threadIdx.y] = fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];//fA[f_mem(16,x,y,z,pitch,height,depth)];
// f17_s[threadIdx.x][threadIdx.y] = fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(17,x,y,z,pitch,height,depth)];
// f18_s[threadIdx.x][threadIdx.y] = fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(18,x,y,z,pitch,height,depth)];
// fB[j+pitch*height*depth*1 ] = f1_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*2 ] = f2_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*3 ] = f3_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*4 ] = f4_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*5 ] = f5_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*6 ] = f7_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*7 ] = f6_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*8 ] = f8_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*9 ] = f9_s [threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*10] = f10_s[threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*11] = f11_s[threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*12] = f12_s[threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*13] = f13_s[threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*14] = f14_s[threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*15] = f15_s[threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*16] = f16_s[threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*17] = f17_s[threadIdx.x][threadIdx.y];
// fB[j+pitch*height*depth*18] = f18_s[threadIdx.x][threadIdx.y];
float f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f1 = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];//fA[f_mem(1 ,x,y,z,pitch,height,depth)];
f2 = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(2 ,x,y,z,pitch,height,depth)];
f3 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];//fA[f_mem(3 ,x,y,z,pitch,height,depth)];
f4 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(4 ,x,y,z,pitch,height,depth)];
f5 = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(5 ,x,y,z,pitch,height,depth)];
f7 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(7 ,x,y,z,pitch,height,depth)];
f6 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];//fA[f_mem(6 ,x,y,z,pitch,height,depth)];
f8 = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];//fA[f_mem(8 ,x,y,z,pitch,height,depth)];
f9 = fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(9 ,x,y,z,pitch,height,depth)];
f10= fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(10,x,y,z,pitch,height,depth)];
f11= fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(11,x,y,z,pitch,height,depth)];
f12= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(12,x,y,z,pitch,height,depth)];
f13= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];//fA[f_mem(13,x,y,z,pitch,height,depth)];
f14= fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(14,x,y,z,pitch,height,depth)];
f15= fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(15,x,y,z,pitch,height,depth)];
f16= fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];//fA[f_mem(16,x,y,z,pitch,height,depth)];
f17= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(17,x,y,z,pitch,height,depth)];
f18= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];//fA[f_mem(18,x,y,z,pitch,height,depth)];
fB[j+pitch*height*depth*1 ] = f1 ;
fB[j+pitch*height*depth*2 ] = f2 ;
fB[j+pitch*height*depth*3 ] = f3 ;
fB[j+pitch*height*depth*4 ] = f4 ;
fB[j+pitch*height*depth*5 ] = f5 ;
fB[j+pitch*height*depth*6 ] = f6 ;
fB[j+pitch*height*depth*7 ] = f7 ;
fB[j+pitch*height*depth*8 ] = f8 ;
fB[j+pitch*height*depth*9 ] = f9 ;
fB[j+pitch*height*depth*10] = f10;
fB[j+pitch*height*depth*11] = f11;
fB[j+pitch*height*depth*12] = f12;
fB[j+pitch*height*depth*13] = f13;
fB[j+pitch*height*depth*14] = f14;
fB[j+pitch*height*depth*15] = f15;
fB[j+pitch*height*depth*16] = f16;
fB[j+pitch*height*depth*17] = f17;
fB[j+pitch*height*depth*18] = f18;
// fB[j+pitch*height*depth*1 ] = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];
// fB[j+pitch*height*depth*2 ] = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
// fB[j+pitch*height*depth*3 ] = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];
// fB[j+pitch*height*depth*4 ] = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
// fB[j+pitch*height*depth*5 ] = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];
// fB[j+pitch*height*depth*6 ] = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];
// fB[j+pitch*height*depth*7 ] = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];
// fB[j+pitch*height*depth*8 ] = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];
// fB[j+pitch*height*depth*9 ] = fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// fB[j+pitch*height*depth*10] = fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];
// fB[j+pitch*height*depth*11] = fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
// fB[j+pitch*height*depth*12] = fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// fB[j+pitch*height*depth*13] = fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
// fB[j+pitch*height*depth*14] = fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
// fB[j+pitch*height*depth*15] = fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];
// fB[j+pitch*height*depth*16] = fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
// fB[j+pitch*height*depth*17] = fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];
// fB[j+pitch*height*depth*18] = fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
// fB[f_mem(1 ,x,y,z,pitch,height,depth)] = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// fB[f_mem(3 ,x,y,z,pitch,height,depth)] = fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// fB[f_mem(2 ,x,y,z,pitch,height,depth)] = fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// fB[f_mem(4 ,x,y,z,pitch,height,depth)] = fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// fB[f_mem(5 ,x,y,z,pitch,height,depth)] = fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// fB[f_mem(7 ,x,y,z,pitch,height,depth)] = fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// fB[f_mem(6 ,x,y,z,pitch,height,depth)] = fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// fB[f_mem(8 ,x,y,z,pitch,height,depth)] = fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// fB[f_mem(9 ,x,y,z,pitch,height,depth)] = fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// fB[f_mem(14,x,y,z,pitch,height,depth)] = fA[f_mem(14,x,y,z,pitch,height,depth)];
// fB[f_mem(10,x,y,z,pitch,height,depth)] = fA[f_mem(10,x,y,z,pitch,height,depth)];
// fB[f_mem(17,x,y,z,pitch,height,depth)] = fA[f_mem(17,x,y,z,pitch,height,depth)];
// fB[f_mem(11,x,y,z,pitch,height,depth)] = fA[f_mem(11,x,y,z,pitch,height,depth)];
// fB[f_mem(18,x,y,z,pitch,height,depth)] = fA[f_mem(18,x,y,z,pitch,height,depth)];
// fB[f_mem(12,x,y,z,pitch,height,depth)] = fA[f_mem(12,x,y,z,pitch,height,depth)];
// fB[f_mem(15,x,y,z,pitch,height,depth)] = fA[f_mem(15,x,y,z,pitch,height,depth)];
// fB[f_mem(13,x,y,z,pitch,height,depth)] = fA[f_mem(13,x,y,z,pitch,height,depth)];
// fB[f_mem(16,x,y,z,pitch,height,depth)] = fA[f_mem(16,x,y,z,pitch,height,depth)];
// fB[j] = fA[j];
// fB[f_mem(1 ,x,y,z,pitch,height,depth)] = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];
// fB[f_mem(3 ,x,y,z,pitch,height,depth)] = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];
// fB[f_mem(2 ,x,y,z,pitch,height,depth)] = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
// fB[f_mem(4 ,x,y,z,pitch,height,depth)] = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
// fB[f_mem(5 ,x,y,z,pitch,height,depth)] = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];
// fB[f_mem(7 ,x,y,z,pitch,height,depth)] = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];
// fB[f_mem(6 ,x,y,z,pitch,height,depth)] = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];
// fB[f_mem(8 ,x,y,z,pitch,height,depth)] = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];
// fB[f_mem(9 ,x,y,z,pitch,height,depth)] = fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// fB[f_mem(14,x,y,z,pitch,height,depth)] = fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
// fB[f_mem(10,x,y,z,pitch,height,depth)] = fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];
// fB[f_mem(17,x,y,z,pitch,height,depth)] = fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];
// fB[f_mem(11,x,y,z,pitch,height,depth)] = fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
// fB[f_mem(18,x,y,z,pitch,height,depth)] = fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
// fB[f_mem(12,x,y,z,pitch,height,depth)] = fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// fB[f_mem(15,x,y,z,pitch,height,depth)] = fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];
// fB[f_mem(13,x,y,z,pitch,height,depth)] = fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
// fB[f_mem(16,x,y,z,pitch,height,depth)] = fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
}
else{
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// float meq1,meq2,meq4,meq6,meq7,meq8;
// f0 = fA[j];
// f1 = fA[f_mem(1 ,x,y,z,pitch,height,depth)];
// f2 = fA[f_mem(2 ,x,y,z,pitch,height,depth)];
// f3 = fA[f_mem(3 ,x,y,z,pitch,height,depth)];
// f4 = fA[f_mem(4 ,x,y,z,pitch,height,depth)];
// f5 = fA[f_mem(5 ,x,y,z,pitch,height,depth)];
// f6 = fA[f_mem(6 ,x,y,z,pitch,height,depth)];
// f7 = fA[f_mem(7 ,x,y,z,pitch,height,depth)];
// f8 = fA[f_mem(8 ,x,y,z,pitch,height,depth)];
// f9 = fA[f_mem(9 ,x,y,z,pitch,height,depth)];
// f10= fA[f_mem(10,x,y,z,pitch,height,depth)];
// f11= fA[f_mem(11,x,y,z,pitch,height,depth)];
// f12= fA[f_mem(12,x,y,z,pitch,height,depth)];
// f13= fA[f_mem(13,x,y,z,pitch,height,depth)];
// f14= fA[f_mem(14,x,y,z,pitch,height,depth)];
// f15= fA[f_mem(15,x,y,z,pitch,height,depth)];
// f16= fA[f_mem(16,x,y,z,pitch,height,depth)];
// f17= fA[f_mem(17,x,y,z,pitch,height,depth)];
// f18= fA[f_mem(18,x,y,z,pitch,height,depth)];
f0 = fA[j];
f1 = fA[f_mem(1 ,x-1,y ,z ,pitch,height,depth)];
f2 = fA[f_mem(2 ,x ,y-1,z ,pitch,height,depth)];
f3 = fA[f_mem(3 ,x+1,y ,z ,pitch,height,depth)];
f4 = fA[f_mem(4 ,x ,y+1,z ,pitch,height,depth)];
f5 = fA[f_mem(5 ,x-1,y-1,z ,pitch,height,depth)];
f6 = fA[f_mem(6 ,x+1,y-1,z ,pitch,height,depth)];
f7 = fA[f_mem(7 ,x+1,y+1,z ,pitch,height,depth)];
f8 = fA[f_mem(8 ,x-1,y+1,z ,pitch,height,depth)];
f9 = fA[f_mem(9 ,x ,y ,z-1,pitch,height,depth)];
f10= fA[f_mem(10,x-1,y ,z-1,pitch,height,depth)];
f11= fA[f_mem(11,x ,y-1,z-1,pitch,height,depth)];
f12= fA[f_mem(12,x+1,y ,z-1,pitch,height,depth)];
f13= fA[f_mem(13,x ,y+1,z-1,pitch,height,depth)];
f14= fA[f_mem(14,x ,y ,z+1,pitch,height,depth)];
f15= fA[f_mem(15,x-1,y ,z+1,pitch,height,depth)];
f16= fA[f_mem(16,x ,y-1,z+1,pitch,height,depth)];
f17= fA[f_mem(17,x+1,y ,z+1,pitch,height,depth)];
f18= fA[f_mem(18,x ,y+1,z+1,pitch,height,depth)];
// f1 = fA[f_mem(1 ,dmax(x-1) ,y ,z ,pitch,height,depth)];
// f3 = fA[f_mem(3 ,dmin(x+1,width),y ,z ,pitch,height,depth)];
// f2 = fA[f_mem(2 ,x ,dmax(y-1) ,z ,pitch,height,depth)];
// f4 = fA[f_mem(4 ,x ,dmin(y+1,height),z ,pitch,height,depth)];
// f5 = fA[f_mem(5 ,dmax(x-1) ,dmax(y-1) ,z ,pitch,height,depth)];
// f7 = fA[f_mem(7 ,dmin(x+1,width),dmin(y+1,height),z ,pitch,height,depth)];
// f6 = fA[f_mem(6 ,dmin(x+1,width),dmax(y-1) ,z ,pitch,height,depth)];
// f8 = fA[f_mem(8 ,dmax(x-1) ,dmin(y+1,height),z ,pitch,height,depth)];
// f9 = fA[f_mem(9 ,x ,y ,dmax(z-1) ,pitch,height,depth)];
// f14= fA[f_mem(14,x ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// f10= fA[f_mem(10,dmax(x-1) ,y ,dmax(z-1) ,pitch,height,depth)];
// f17= fA[f_mem(17,dmin(x+1,width),y ,dmin(z+1,depth) ,pitch,height,depth)];
// f11= fA[f_mem(11,x ,dmax(y-1) ,dmax(z-1) ,pitch,height,depth)];
// f18= fA[f_mem(18,x ,dmin(y+1,height),dmin(z+1,depth) ,pitch,height,depth)];
// f12= fA[f_mem(12,dmin(x+1,width),y ,dmax(z-1) ,pitch,height,depth)];
// f15= fA[f_mem(15,dmax(x-1) ,y ,dmin(z+1,depth) ,pitch,height,depth)];
// f13= fA[f_mem(13,x ,dmin(y+1,height),dmax(z-1) ,pitch,height,depth)];
// f16= fA[f_mem(16,x ,dmax(y-1) ,dmin(z+1,depth) ,pitch,height,depth)];
if(im == 3)//DirichletWest
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == height-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
if(z == depth-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
// float fInt1,fInt2;//,fDiff;
float u,v,w,rho;
u = 0.0f;//*PoisProf(zcoord)*1.5;
v = uMax;//0.0;
w = 0.0f;
// fInt1 = f0+f2+f4+f9+f11+f13+f14+f16+f18;
// fInt2 = f3+f6+f7+f12+f17;
// rho = u+(fInt1+2.0f*fInt2); //D2Q9i
rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float usqr = u*u+v*v+w*w;
// f0 -= 1.0f/3.0f*(rho-1.5f*usqr);
// f2 -= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// f3 -= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// f4 -= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f6 -= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// f7 -= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f9 -= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f11-= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
// f12-= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// f13-= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
// f14-= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f16-= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// f17-= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// f18-= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr)+f3-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr)+f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr)+f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr)+f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr)+f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
}
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
//bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,height,depth)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,height,depth)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,height,depth)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,height,depth)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,height,depth)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,height,depth)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,height,depth)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,height,depth)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,height,depth)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,height,depth)] = f9 ;
fB[f_mem(10,x,y,z,pitch,height,depth)] = f10;
fB[f_mem(11,x,y,z,pitch,height,depth)] = f11;
fB[f_mem(12,x,y,z,pitch,height,depth)] = f12;
fB[f_mem(13,x,y,z,pitch,height,depth)] = f13;
fB[f_mem(14,x,y,z,pitch,height,depth)] = f14;
fB[f_mem(15,x,y,z,pitch,height,depth)] = f15;
fB[f_mem(16,x,y,z,pitch,height,depth)] = f16;
fB[f_mem(17,x,y,z,pitch,height,depth)] = f17;
fB[f_mem(18,x,y,z,pitch,height,depth)] = f18;
}
}
__global__ void initialize_single(float *f,
int width, int height, int depth, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
f[j+0 *pitch*height*depth]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*height*depth]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f[j+12*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f[j+14*pitch*height*depth]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*height*depth]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__global__ void initialize(float* f0, float* f1, float* f2,
float* f3, float* f4, float* f5,
float* f6, float* f7, float* f8, float* f9,
float* f10, float* f11, float* f12,
float* f13, float* f14, float* f15,
float* f16, float* f17, float* f18,
int width, int height, size_t pitch)//pitch in elements
//__global__ void initialize(void** f0in, void** f1in,
// int w, int h, int pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
// int i = x+y*width+z*width*height;//index on linear mem
int j = x+y*pitch+z*height*pitch;//index on padded mem (pitch in elements)
// f1out[j] = tex2D(texRef_f2A,x,y+h*z);
float u,v,w,rho,feq,usqr;
rho = 1.0f;
u = 0.0f;
v = 0.0f;
w = 0.0f;
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
feq = 1.0f/3.0f*(rho-1.5f*usqr);
f0[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f1[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f2[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f3[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f4[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f5[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f6[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f7[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f8[j] = feq;
feq = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f9[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f10[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f11[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f12[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f13[j] = feq;
feq = 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f14[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f15[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f16[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f17[j] = feq;
feq = 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f18[j] = feq;
}
__global__ void copytest(cudaPitchedPtr devPitchedPtr, float * test_d, int w, int h, int d)
//__global__ void copytest(float *test)//, int w, int h, int d)
//__global__ void copytest(int * image)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
char* devPtr = (char*)devPitchedPtr.ptr;
int pitch = devPitchedPtr.pitch;
// int slicepitch = pitch*height;
//// int pitch = devPitchedPtr.pitch;
// char *slice = devPtr + blockIdx.x*slicepitch;
float* test = (float *)(devPtr);
// //int slicePitch = pitch*extent.height;
//int i = threadIdx.x+threadIdx.y*blockDim.x+threadIdx.z*blockDim.x*blockDim.y;
int i = x+y*w+z*w*h;//index on linear mem
//int j = threadIdx.x+threadIdx.y*pitch+threadIdx.z*blockDim.y;
int j = x+y*pitch/sizeof(float)+z*h*pitch/sizeof(float);//index on padded mem
//if(test[i] == 2)
//test[0] = 2.f;//test[i];
test_d[i] = test[j];
test[j] += 100;
}
int main(int argc, char *argv[])
{
// float *f0_h, *f1_h, *f2_h, *f3_h, *f4_h, *f5_h, *f6_h, *f7_h, *f8_h, *f9_h;
// float *f10_h, *f11_h, *f12_h, *f13_h, *f14_h, *f15_h, *f16_h, *f17_h, *f18_h;
// float *f0_dA, *f1_dA, *f2_dA, *f3_dA, *f4_dA, *f5_dA, *f6_dA, *f7_dA, *f8_dA, *f9_dA;
// float *f10_dA, *f11_dA, *f12_dA, *f13_dA, *f14_dA, *f15_dA, *f16_dA, *f17_dA, *f18_dA;
// float *f0_dB, *f1_dB, *f2_dB, *f3_dB, *f4_dB, *f5_dB, *f6_dB, *f7_dB, *f8_dB, *f9_dB;
// float *f10_dB, *f11_dB, *f12_dB, *f13_dB, *f14_dB, *f15_dB, *f16_dB, *f17_dB, *f18_dB;
int *image_d, *image_h;
//cudaPitchedPtr f0_d;
ofstream output;
output.open ("LBM1_out.dat");
size_t memsize, memsize_int;
size_t pitch;
int i, n, nBlocks, xDim, yDim, zDim,tMax;
float Re, omega, uMax, CharLength;
int BLOCKSIZEx = 256;
int BLOCKSIZEy = 1;
int BLOCKSIZEz = 1;
xDim = 256;
yDim = 128;
zDim = 32;
tMax = 100;
Re = 500.f;//100.f;
uMax = 0.08f;
CharLength = xDim-2.f;
omega = 1.0f/(3.0f*(uMax*CharLength/Re)+0.5f);
cout<<"omega: "<<omega<<endl;
nBlocks = (xDim/BLOCKSIZEx+xDim%BLOCKSIZEx)*(yDim/BLOCKSIZEy+yDim%BLOCKSIZEy)
*(zDim/BLOCKSIZEz+zDim%BLOCKSIZEz);
int B = BLOCKSIZEx*BLOCKSIZEy*BLOCKSIZEz;
n = nBlocks*B;//block*dimx*dimy
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEx, BLOCKSIZEy, BLOCKSIZEz);
dim3 grid(xDim/BLOCKSIZEx,yDim/BLOCKSIZEy,zDim/BLOCKSIZEz);
memsize = n*sizeof(float);
memsize_int = n*sizeof(int);
cudaExtent extent = make_cudaExtent(xDim*sizeof(float),yDim,zDim);
// f0_h = (float *)malloc(memsize);
// f1_h = (float *)malloc(memsize);
// f2_h = (float *)malloc(memsize);
// f3_h = (float *)malloc(memsize);
// f4_h = (float *)malloc(memsize);
// f5_h = (float *)malloc(memsize);
// f6_h = (float *)malloc(memsize);
// f7_h = (float *)malloc(memsize);
// f8_h = (float *)malloc(memsize);
// f9_h = (float *)malloc(memsize);
// f10_h = (float *)malloc(memsize);
// f11_h = (float *)malloc(memsize);
// f12_h = (float *)malloc(memsize);
// f13_h = (float *)malloc(memsize);
// f14_h = (float *)malloc(memsize);
// f15_h = (float *)malloc(memsize);
// f16_h = (float *)malloc(memsize);
// f17_h = (float *)malloc(memsize);
// f18_h = (float *)malloc(memsize);
//
image_h = (int *)malloc(memsize_int);
float *fA_h,*fA_d,*fB_d;
fA_h = (float *)malloc(memsize*19);
cudaMallocPitch((void **) &fA_d, &pitch, xDim*sizeof(float), yDim*zDim*19);
cudaMallocPitch((void **) &fB_d, &pitch, xDim*sizeof(float), yDim*zDim*19);
cudaMalloc((void **) &image_d, memsize_int);
// cudaMallocPitch((void **) &f0_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f1_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f2_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f3_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f4_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f5_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f6_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f7_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f8_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f9_dA , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f10_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f11_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f12_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f13_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f14_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f15_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f16_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f17_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f18_dA, &pitch, xDim*sizeof(float), yDim*zDim);
// cout<<pitch<<endl;
// cudaMallocPitch((void **) &f0_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f1_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f2_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f3_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f4_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f5_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f6_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f7_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f8_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f9_dB , &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f10_dB, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f11_dB, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f12_dB, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f13_dB, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f14_dB, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f15_dB, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f16_dB, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f17_dB, &pitch, xDim*sizeof(float), yDim*zDim);
// cudaMallocPitch((void **) &f18_dB, &pitch, xDim*sizeof(float), yDim*zDim);
cout<<pitch<<endl;
size_t pitch_elements = pitch/sizeof(float);
cudaChannelFormatDesc desc = cudaCreateChannelDesc<float>();
for (i = 0; i < n*19; i++)
{
fA_h[i] = i;
}
for (i = 0; i < n; i++)
{
int x = i%xDim;
int y = (i/xDim)%yDim;
int z = (i/xDim)/yDim;
// f0_h[i] = i;
// f1_h[i] = i;
// f2_h[i] = i;
// f3_h[i] = i;
// f4_h[i] = i;
// f5_h[i] = i;
// f6_h[i] = i;
// f7_h[i] = i;
// f8_h[i] = i;
// f9_h[i] = i;
// f10_h[i] = i;
// f11_h[i] = i;
// f12_h[i] = i;
// f13_h[i] = i;
// f14_h[i] = i;
// f15_h[i] = i;
// f16_h[i] = i;
// f17_h[i] = i;
// f18_h[i] = i;
image_h[i] = 0;
if(x < 1) image_h[i] = 3;//DirichletWest
if(x > xDim-2) image_h[i] = 1;//BB
if(y < 1) image_h[i] = 1;//BB
if(y > yDim-2) image_h[i] = 1;//BB
if(z < 1) image_h[i] = 1;//DirichletWest
if(z > zDim-2) image_h[i] = 1;//BB
}
cudaMemcpy(image_d, image_h, memsize_int, cudaMemcpyHostToDevice);
if(true)//texture settings
{
// texRef_f0B.normalized = false;
// texRef_f1B.normalized = false;
// texRef_f2B.normalized = false;
// texRef_f3B.normalized = false;
// texRef_f4B.normalized = false;
// texRef_f5B.normalized = false;
// texRef_f6B.normalized = false;
// texRef_f7B.normalized = false;
// texRef_f8B.normalized = false;
// texRef_f9B.normalized = false;
// texRef_f10B.normalized = false;
// texRef_f11B.normalized = false;
// texRef_f12B.normalized = false;
// texRef_f13B.normalized = false;
// texRef_f14B.normalized = false;
// texRef_f15B.normalized = false;
// texRef_f16B.normalized = false;
// texRef_f17B.normalized = false;
// texRef_f18B.normalized = false;
// texRef_f0B.filterMode = cudaFilterModePoint;
// texRef_f1B.filterMode = cudaFilterModePoint;
// texRef_f2B.filterMode = cudaFilterModePoint;
// texRef_f3B.filterMode = cudaFilterModePoint;
// texRef_f4B.filterMode = cudaFilterModePoint;
// texRef_f5B.filterMode = cudaFilterModePoint;
// texRef_f6B.filterMode = cudaFilterModePoint;
// texRef_f7B.filterMode = cudaFilterModePoint;
// texRef_f8B.filterMode = cudaFilterModePoint;
// texRef_f9B.filterMode = cudaFilterModePoint;
// texRef_f10B.filterMode = cudaFilterModePoint;
// texRef_f11B.filterMode = cudaFilterModePoint;
// texRef_f12B.filterMode = cudaFilterModePoint;
// texRef_f13B.filterMode = cudaFilterModePoint;
// texRef_f14B.filterMode = cudaFilterModePoint;
// texRef_f15B.filterMode = cudaFilterModePoint;
// texRef_f16B.filterMode = cudaFilterModePoint;
// texRef_f17B.filterMode = cudaFilterModePoint;
// texRef_f18B.filterMode = cudaFilterModePoint;
// texRef_f0A.normalized = false;
// texRef_f1A.normalized = false;
// texRef_f2A.normalized = false;
// texRef_f3A.normalized = false;
// texRef_f4A.normalized = false;
// texRef_f5A.normalized = false;
// texRef_f6A.normalized = false;
// texRef_f7A.normalized = false;
// texRef_f8A.normalized = false;
// texRef_f9A.normalized = false;
// texRef_f10A.normalized = false;
// texRef_f11A.normalized = false;
// texRef_f12A.normalized = false;
// texRef_f13A.normalized = false;
// texRef_f14A.normalized = false;
// texRef_f15A.normalized = false;
// texRef_f16A.normalized = false;
// texRef_f17A.normalized = false;
// texRef_f18A.normalized = false;
// texRef_f0A.filterMode = cudaFilterModePoint;
// texRef_f1A.filterMode = cudaFilterModePoint;
// texRef_f2A.filterMode = cudaFilterModePoint;
// texRef_f3A.filterMode = cudaFilterModePoint;
// texRef_f4A.filterMode = cudaFilterModePoint;
// texRef_f5A.filterMode = cudaFilterModePoint;
// texRef_f6A.filterMode = cudaFilterModePoint;
// texRef_f7A.filterMode = cudaFilterModePoint;
// texRef_f8A.filterMode = cudaFilterModePoint;
// texRef_f9A.filterMode = cudaFilterModePoint;
// texRef_f10A.filterMode = cudaFilterModePoint;
// texRef_f11A.filterMode = cudaFilterModePoint;
// texRef_f12A.filterMode = cudaFilterModePoint;
// texRef_f13A.filterMode = cudaFilterModePoint;
// texRef_f14A.filterMode = cudaFilterModePoint;
// texRef_f15A.filterMode = cudaFilterModePoint;
// texRef_f16A.filterMode = cudaFilterModePoint;
// texRef_f17A.filterMode = cudaFilterModePoint;
// texRef_f18A.filterMode = cudaFilterModePoint;
}
cudaMemcpy2D(fA_d ,pitch,fA_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(fB_d ,pitch,fA_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim*19,cudaMemcpyHostToDevice);
for (i = 0; i < n*19; i++)
{
fA_h[i] = 0;
}
// if(true)//mem copy host to dev
// {
// cudaMemcpy2D(f0_dA ,pitch,f0_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f1_dA ,pitch,f1_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f2_dA ,pitch,f2_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f3_dA ,pitch,f3_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f4_dA ,pitch,f4_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f5_dA ,pitch,f5_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f6_dA ,pitch,f6_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f7_dA ,pitch,f7_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f8_dA ,pitch,f8_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f9_dA ,pitch,f9_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f10_dA,pitch,f11_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f11_dA,pitch,f11_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f12_dA,pitch,f12_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f13_dA,pitch,f13_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f14_dA,pitch,f14_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f15_dA,pitch,f15_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f16_dA,pitch,f16_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f17_dA,pitch,f17_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f18_dA,pitch,f18_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f0_dB ,pitch,f0_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f1_dB ,pitch,f1_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f2_dB ,pitch,f2_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f3_dB ,pitch,f3_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f4_dB ,pitch,f4_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f5_dB ,pitch,f5_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f6_dB ,pitch,f6_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f7_dB ,pitch,f7_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f8_dB ,pitch,f8_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f9_dB ,pitch,f9_h ,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f10_dB,pitch,f11_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f11_dB,pitch,f11_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f12_dB,pitch,f12_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f13_dB,pitch,f13_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f14_dB,pitch,f14_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f15_dB,pitch,f15_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f16_dB,pitch,f16_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f17_dB,pitch,f17_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// cudaMemcpy2D(f18_dB,pitch,f18_h,xDim*sizeof(float),xDim*sizeof(float),yDim*zDim,cudaMemcpyHostToDevice);
// }
// if(true)//bind texture
// {
// cudaBindTexture2D(0,&texRef_f0A, f0_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f1A, f1_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f2A, f2_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f3A, f3_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f4A, f4_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f5A, f5_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f6A, f6_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f7A, f7_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f8A, f8_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f9A, f9_dA ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f10A,f10_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f11A,f11_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f12A,f12_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f13A,f13_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f14A,f14_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f15A,f15_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f16A,f16_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f17A,f17_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f18A,f18_dA,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f0B, f0_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f1B, f1_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f2B, f2_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f3B, f3_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f4B, f4_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f5B, f5_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f6B, f6_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f7B, f7_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f8B, f8_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f9B, f9_dB ,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f10B,f10_dB,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f11B,f11_dB,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f12B,f12_dB,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f13B,f13_dB,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f14B,f14_dB,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f15B,f15_dB,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f16B,f16_dB,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f17B,f17_dB,&desc,xDim,yDim*zDim,pitch);
// cudaBindTexture2D(0,&texRef_f18B,f18_dB,&desc,xDim,yDim*zDim,pitch);
// }
//
//
// initialize<<<grid, threads>>>(f0_dA.ptr, f1_dA.ptr, f2_dA.ptr, f3_dA.ptr, f4_dA.ptr, f5_dA.ptr, f6_dA.ptr, f7_dA.ptr, f8_dA.ptr, f9_dA.ptr,
// f10_dA.ptr, f11_dA.ptr, f12_dA.ptr, f13_dA.ptr, f14_dA.ptr, f15_dA.ptr, f16_dA.ptr, f17_dA.ptr, f18_dA.ptr,
// xDim,yDim,pitch);
// initialize<<<grid, threads>>>(f0_dA, f1_dA, f2_dA, f3_dA, f4_dA, f5_dA, f6_dA, f7_dA, f8_dA, f9_dA,
// f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA,
// xDim,yDim,pitch_elements);
initialize_single<<<grid, threads>>>(fA_d,xDim,yDim,zDim,pitch_elements);
// cudaFuncSetCacheConfig(mrt_d_single,cudaFuncCachePreferL1);
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
for(int t = 0; t<tMax; t=t+2){
//for(int t = 0; t<tMax; t=t+1){
//mrt_d<<<grid, threads>>>(f0_d,f1_d,f2_d,f3_d,f4_d,f5_d,f6_d,f7_d,f8_d,n,image_d,omega,uMax);
//test<<<grid, threads>>>(f0_d,f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,
// mrt_d_textAB<<<grid, threads>>>(f0_dB,f1_dB,f2_dB,f3_dB,f4_dB,f5_dB,f6_dB,f7_dB,f8_dB,f9_dB,
// f10_dB, f11_dB, f12_dB, f13_dB, f14_dB, f15_dB, f16_dB, f17_dB, f18_dB,
// image_d,omega,uMax,xDim,yDim,pitch_elements);
//
// mrt_d_textBA<<<grid, threads>>>(f0_dA,f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,f9_dA,
// f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA,
// image_d,omega,uMax,xDim,yDim,pitch_elements);
//
//
// mrt_d_hybAB<<<grid, threads>>>(f0_dB,f1_dB,f2_dB,f3_dB,f4_dB,f5_dB,f6_dB,f7_dB,f8_dB,f9_dB,
// f10_dB, f11_dB, f12_dB, f13_dB, f14_dB, f15_dB, f16_dB, f17_dB, f18_dB,
// f2_dA,f4_dA,f9_dA,
// f11_dA, f13_dA, f14_dA, f16_dA, f18_dA,
// image_d,omega,uMax,xDim,yDim,pitch_elements);
//
// mrt_d_hybBA<<<grid, threads>>>(f0_dA,f1_dA,f2_dA,f3_dA,f4_dA,f5_dA,f6_dA,f7_dA,f8_dA,f9_dA,
// f10_dA, f11_dA, f12_dA, f13_dA, f14_dA, f15_dA, f16_dA, f17_dA, f18_dA,
// f2_dB,f4_dB,f9_dB,
// f11_dB, f13_dB, f14_dB, f16_dB, f18_dB,
// image_d,omega,uMax,xDim,yDim,pitch_elements);
// mrt_d_single<<<grid, threads>>>(fA_d,fB_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
// mrt_d_single<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
simple_copy<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
simple_copy<<<grid, threads>>>(fB_d,fA_d,image_d,omega,uMax,xDim,yDim,zDim,pitch_elements);
if(t%1000 == 0 && t>0) cout<<"finished "<<t<<" timesteps\n";
}
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(xDim*yDim*zDim*double(tMax/1000000.f))/restime<<"MLUPS)"<<endl;
cout<<xDim<<","<<yDim<<","<<zDim<<","<<tMax<<","<<restime<<endl;
// copytest<<<grid, threads>>>(f10_dA,test_d,xDim,yDim,zDim);
//copytest<<<grid, threads>>>(test_d);
//copytest<<<grid, threads>>>(image_d);
// cudaUnbindTexture(texRef_f0A);
// cudaUnbindTexture(texRef_f1A);
// cudaUnbindTexture(texRef_f2A);
// cudaUnbindTexture(texRef_f3A);
// cudaUnbindTexture(texRef_f4A);
// cudaUnbindTexture(texRef_f5A);
// cudaUnbindTexture(texRef_f6A);
// cudaUnbindTexture(texRef_f7A);
// cudaUnbindTexture(texRef_f8A);
// cudaUnbindTexture(texRef_f9A);
// cudaUnbindTexture(texRef_f10A);
// cudaUnbindTexture(texRef_f11A);
// cudaUnbindTexture(texRef_f12A);
// cudaUnbindTexture(texRef_f13A);
// cudaUnbindTexture(texRef_f14A);
// cudaUnbindTexture(texRef_f15A);
// cudaUnbindTexture(texRef_f16A);
// cudaUnbindTexture(texRef_f17A);
// cudaUnbindTexture(texRef_f18A);
// cudaUnbindTexture(texRef_f0B);
// cudaUnbindTexture(texRef_f1B);
// cudaUnbindTexture(texRef_f2B);
// cudaUnbindTexture(texRef_f3B);
// cudaUnbindTexture(texRef_f4B);
// cudaUnbindTexture(texRef_f5B);
// cudaUnbindTexture(texRef_f6B);
// cudaUnbindTexture(texRef_f7B);
// cudaUnbindTexture(texRef_f8B);
// cudaUnbindTexture(texRef_f9B);
// cudaUnbindTexture(texRef_f10B);
// cudaUnbindTexture(texRef_f11B);
// cudaUnbindTexture(texRef_f12B);
// cudaUnbindTexture(texRef_f13B);
// cudaUnbindTexture(texRef_f14B);
// cudaUnbindTexture(texRef_f15B);
// cudaUnbindTexture(texRef_f16B);
// cudaUnbindTexture(texRef_f17B);
// cudaUnbindTexture(texRef_f18B);
// cudaMemcpy2D(f0_h,xDim*sizeof(float) , f0_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f1_h,xDim*sizeof(float) , f1_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f2_h,xDim*sizeof(float) , f2_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f3_h,xDim*sizeof(float) , f3_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f4_h,xDim*sizeof(float) , f4_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f5_h,xDim*sizeof(float) , f5_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f6_h,xDim*sizeof(float) , f6_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f7_h,xDim*sizeof(float) , f7_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f8_h,xDim*sizeof(float) , f8_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f9_h,xDim*sizeof(float) , f9_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f10_h,xDim*sizeof(float),f10_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f11_h,xDim*sizeof(float),f11_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f12_h,xDim*sizeof(float),f12_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f13_h,xDim*sizeof(float),f13_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f14_h,xDim*sizeof(float),f14_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f15_h,xDim*sizeof(float),f15_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f16_h,xDim*sizeof(float),f16_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f17_h,xDim*sizeof(float),f17_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
// cudaMemcpy2D(f18_h,xDim*sizeof(float),f18_dA,pitch,xDim*sizeof(float),yDim*zDim,cudaMemcpyDeviceToHost);
cudaMemcpy2D(fA_h,xDim*sizeof(float),fA_d,pitch,xDim*sizeof(float),yDim*zDim*19,cudaMemcpyDeviceToHost);
// cout<<"f1_h is "<<f1_h[0]<<endl;
//cudaMemcpy(f0_h, f0_d.ptr, memsize, cudaMemcpyDeviceToHost);
cudaMemcpy(image_h, image_d, memsize_int, cudaMemcpyDeviceToHost);
// cout<<image_h[0]<<endl;
// cout<<"test_d: "<<test_h[0]<<endl;
// for(i = 0; i<n; i++){
// cout<<f0_h[i]<<",";
// }
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\"\n";
output<<"ZONE F=POINT, I="<<xDim<<", J="<<yDim<<", K="<<zDim<<"\n";
int row = 0;
int col = 0;
int dep = 0;
i = 0;
float rho, u, v, w;
int j;
for(dep = 0; dep<zDim; dep++){
for(row = 0; row<yDim; row++){
for(col = 0; col<xDim; col++){
i = dep*xDim*yDim+row*xDim+col;
// rho = 0;
rho = fA_h[i];
for(j = 1; j<19; j++)
rho+=fA_h[i+xDim*yDim*zDim*j];
// rho = f0_h[i]+f1_h[i]+f2_h[i]+f3_h[i]+f4_h[i]+f5_h[i]+f6_h[i]+f7_h[i]+f8_h[i]+f9_h[i]+
// f10_h[i]+f11_h[i]+f12_h[i]+f13_h[i]+f14_h[i]+f15_h[i]+f16_h[i]+f17_h[i]+f18_h[i];
u = fA_h[i+xDim*yDim*zDim*1]-fA_h[i+xDim*yDim*zDim*3]+fA_h[i+xDim*yDim*zDim*5]-fA_h[i+xDim*yDim*zDim*6]-
fA_h[i+xDim*yDim*zDim*7]+fA_h[i+xDim*yDim*zDim*8]+fA_h[i+xDim*yDim*zDim*10]-fA_h[i+xDim*yDim*zDim*12]
+fA_h[i+xDim*yDim*zDim*15]-fA_h[i+xDim*yDim*zDim*17];
v = fA_h[i+xDim*yDim*zDim*2]-fA_h[i+xDim*yDim*zDim*4]+fA_h[i+xDim*yDim*zDim*5]+fA_h[i+xDim*yDim*zDim*6]-fA_h[i+xDim*yDim*zDim*7]-fA_h[i+xDim*yDim*zDim*8]+fA_h[i+xDim*yDim*zDim*11]-fA_h[i+xDim*yDim*zDim*13]+fA_h[i+xDim*yDim*zDim*16]-fA_h[i+xDim*yDim*zDim*18];
w = fA_h[i+xDim*yDim*zDim*9]+fA_h[i+xDim*yDim*zDim*10]+fA_h[i+xDim*yDim*zDim*11]+fA_h[i+xDim*yDim*zDim*12]+fA_h[i+xDim*yDim*zDim*13]-fA_h[i+xDim*yDim*zDim*14]-fA_h[i+xDim*yDim*zDim*15]-fA_h[i+xDim*yDim*zDim*16]-fA_h[i+xDim*yDim*zDim*17]-fA_h[i+xDim*yDim*zDim*18];
// output<<col<<", "<<row<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl;
output<<col<<", "<<row<<", "<<dep<<", "<<u<<","<<v<<","<<w<<","<<rho<<endl;
// output<<row<<", "<<col<<", "<<dep<<", "<<u<<","<<v<<","<<fA_h[i+xDim*yDim*zDim*4]<<","<<rho<<endl;
}
}
}
output.close();
cudaFree(image_d);
// cudaFree(f0_dA);
// cudaFree(f1_dA);
// cudaFree(f2_dA);
// cudaFree(f3_dA);
// cudaFree(f4_dA);
// cudaFree(f5_dA);
// cudaFree(f6_dA);
// cudaFree(f7_dA);
// cudaFree(f8_dA);
// cudaFree(f9_dA);
// cudaFree(f10_dA);
// cudaFree(f11_dA);
// cudaFree(f12_dA);
// cudaFree(f13_dA);
// cudaFree(f14_dA);
// cudaFree(f15_dA);
// cudaFree(f16_dA);
// cudaFree(f17_dA);
// cudaFree(f18_dA);
// cudaFree(f0_dB);
// cudaFree(f1_dB);
// cudaFree(f2_dB);
// cudaFree(f3_dB);
// cudaFree(f4_dB);
// cudaFree(f5_dB);
// cudaFree(f6_dB);
// cudaFree(f7_dB);
// cudaFree(f8_dB);
// cudaFree(f9_dB);
// cudaFree(f10_dB);
// cudaFree(f11_dB);
// cudaFree(f12_dB);
// cudaFree(f13_dB);
// cudaFree(f14_dB);
// cudaFree(f15_dB);
// cudaFree(f16_dB);
// cudaFree(f17_dB);
// cudaFree(f18_dB);
cudaFree(fA_d);
cudaFree(fB_d);
return(0);
}
|
7,804 | #include <string.h>
#include <sys/types.h>
#include <netinet/in.h>
#include <stdint.h>
#include "seq_sha1.cuh"
#define blk0(i) (block->l[i] = htonl(block->l[i]))
#define rol(value, bits) (((value) << (bits)) | ((value) >> (32 - (bits))))
#define blk0(i) (block->l[i] = htonl(block->l[i]))
#define blk(i) (block->l[i&15] = rol(block->l[(i+13)&15]^block->l[(i+8)&15] ^block->l[(i+2)&15]^block->l[i&15],1))
#define R0(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk0(i)+0x5A827999+rol(v,5);w=rol(w,30);
#define R1(v,w,x,y,z,i) z+=((w&(x^y))^y)+blk(i)+0x5A827999+rol(v,5);w=rol(w,30);
#define R2(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0x6ED9EBA1+rol(v,5);w=rol(w,30);
#define R3(v,w,x,y,z,i) z+=(((w|x)&y)|(w&x))+blk(i)+0x8F1BBCDC+rol(v,5);w=rol(w,30);
#define R4(v,w,x,y,z,i) z+=(w^x^y)+blk(i)+0xCA62C1D6+rol(v,5);w=rol(w,30);
/* Hash a single 512-bit block. This is the core of the algorithm. */
void SHA1Transform(uint32_t state[5], const uint8_t buffer[64])
{
uint32_t a, b, c, d, e;
typedef union {
uint8_t c[64];
uint32_t l[16];
} CHAR64LONG16;
CHAR64LONG16 *block;
uint8_t workspace[64];
block = (CHAR64LONG16*)workspace;
memcpy(block, buffer, 64);
/* Copy context->state[] to working vars */
a = state[0];
b = state[1];
c = state[2];
d = state[3];
e = state[4];
/* 4 rounds of 20 operations each. Loop unrolled. */
R0(a,b,c,d,e, 0); R0(e,a,b,c,d, 1); R0(d,e,a,b,c, 2); R0(c,d,e,a,b, 3);
R0(b,c,d,e,a, 4); R0(a,b,c,d,e, 5); R0(e,a,b,c,d, 6); R0(d,e,a,b,c, 7);
R0(c,d,e,a,b, 8); R0(b,c,d,e,a, 9); R0(a,b,c,d,e,10); R0(e,a,b,c,d,11);
R0(d,e,a,b,c,12); R0(c,d,e,a,b,13); R0(b,c,d,e,a,14); R0(a,b,c,d,e,15);
R1(e,a,b,c,d,16); R1(d,e,a,b,c,17); R1(c,d,e,a,b,18); R1(b,c,d,e,a,19);
R2(a,b,c,d,e,20); R2(e,a,b,c,d,21); R2(d,e,a,b,c,22); R2(c,d,e,a,b,23);
R2(b,c,d,e,a,24); R2(a,b,c,d,e,25); R2(e,a,b,c,d,26); R2(d,e,a,b,c,27);
R2(c,d,e,a,b,28); R2(b,c,d,e,a,29); R2(a,b,c,d,e,30); R2(e,a,b,c,d,31);
R2(d,e,a,b,c,32); R2(c,d,e,a,b,33); R2(b,c,d,e,a,34); R2(a,b,c,d,e,35);
R2(e,a,b,c,d,36); R2(d,e,a,b,c,37); R2(c,d,e,a,b,38); R2(b,c,d,e,a,39);
R3(a,b,c,d,e,40); R3(e,a,b,c,d,41); R3(d,e,a,b,c,42); R3(c,d,e,a,b,43);
R3(b,c,d,e,a,44); R3(a,b,c,d,e,45); R3(e,a,b,c,d,46); R3(d,e,a,b,c,47);
R3(c,d,e,a,b,48); R3(b,c,d,e,a,49); R3(a,b,c,d,e,50); R3(e,a,b,c,d,51);
R3(d,e,a,b,c,52); R3(c,d,e,a,b,53); R3(b,c,d,e,a,54); R3(a,b,c,d,e,55);
R3(e,a,b,c,d,56); R3(d,e,a,b,c,57); R3(c,d,e,a,b,58); R3(b,c,d,e,a,59);
R4(a,b,c,d,e,60); R4(e,a,b,c,d,61); R4(d,e,a,b,c,62); R4(c,d,e,a,b,63);
R4(b,c,d,e,a,64); R4(a,b,c,d,e,65); R4(e,a,b,c,d,66); R4(d,e,a,b,c,67);
R4(c,d,e,a,b,68); R4(b,c,d,e,a,69); R4(a,b,c,d,e,70); R4(e,a,b,c,d,71);
R4(d,e,a,b,c,72); R4(c,d,e,a,b,73); R4(b,c,d,e,a,74); R4(a,b,c,d,e,75);
R4(e,a,b,c,d,76); R4(d,e,a,b,c,77); R4(c,d,e,a,b,78); R4(b,c,d,e,a,79);
/* Add the working vars back into context.state[] */
state[0] += a;
state[1] += b;
state[2] += c;
state[3] += d;
state[4] += e;
/* Wipe variables */
a = b = c = d = e = 0;
}
/* SHA1Init - Initialize new context */
void SHA1Init(SHA1_CTX* context)
{
/* SHA1 initialization constants */
context->state[0] = 0x67452301;
context->state[1] = 0xEFCDAB89;
context->state[2] = 0x98BADCFE;
context->state[3] = 0x10325476;
context->state[4] = 0xC3D2E1F0;
context->count[0] = context->count[1] = 0;
}
/* Run your data through this. */
void SHA1Update(SHA1_CTX* context, const uint8_t* data, unsigned int len)
{
unsigned int i, j;
j = (context->count[0] >> 3) & 63;
if ((context->count[0] += len << 3) < (len << 3)) context->count[1]++;
context->count[1] += (len >> 29);
if ((j + len) > 63) {
memcpy(&context->buffer[j], data, (i = 64-j));
SHA1Transform(context->state, context->buffer);
for ( ; i + 63 < len; i += 64) {
SHA1Transform(context->state, &data[i]);
}
j = 0;
}
else i = 0;
memcpy(&context->buffer[j], &data[i], len - i);
}
/* Add padding and return the message digest. */
void SHA1Final(uint8_t digest[20], SHA1_CTX* context)
{
uint32_t i, j;
uint8_t finalcount[8];
for (i = 0; i < 8; i++) {
finalcount[i] = (uint8_t)((context->count[(i >= 4 ? 0 : 1)]
>> ((3-(i & 3)) * 8) ) & 255); /* Endian independent */
}
SHA1Update(context, (const unsigned char *)"\200", 1);
while ((context->count[0] & 504) != 448) {
SHA1Update(context, (const unsigned char *)"\0", 1);
}
SHA1Update(context, finalcount, 8); /* Should cause a SHA1Transform() */
for (i = 0; i < 20; i++) {
digest[i] = (uint8_t)
((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
}
/* Wipe variables */
i = j = 0;
memset(context->buffer, 0, 64);
memset(context->state, 0, 20);
memset(context->count, 0, 8);
memset(&finalcount, 0, 8);
#ifdef SHA1HANDSOFF /* make SHA1Transform overwrite it's own static vars */
SHA1Transform(context->state, context->buffer);
#endif
}
void SHA1FinalNoLen(uint8_t digest[20], SHA1_CTX* context)
{
uint32_t i, j;
for (i = 0; i < 20; i++) {
digest[i] = (uint8_t)
((context->state[i>>2] >> ((3-(i & 3)) * 8) ) & 255);
}
/* Wipe variables */
i = j = 0;
memset(context->buffer, 0, 64);
memset(context->state, 0, 20);
memset(context->count, 0, 8);
#ifdef SHA1HANDSOFF /* make SHA1Transform overwrite it's own static vars */
SHA1Transform(context->state, context->buffer);
#endif
}
|
7,805 | // magictoken.ex_mul_f32_f32.begin
// Foreign function example: multiplication of a pair of floats
extern "C" __device__ int
mul_f32_f32(
float* return_value,
float x,
float y)
{
// Compute result and store in caller-provided slot
*return_value = x * y;
// Signal that no Python exception occurred
return 0;
}
// magictoken.ex_mul_f32_f32.end
// magictoken.ex_sum_reduce_proto.begin
extern "C"
__device__ int
sum_reduce(
float* return_value,
float* array,
int n
);
// magictoken.ex_sum_reduce_proto.end
// Performs a simple reduction on an array passed by pointer using the
// ffi.from_buffer() method. Implements the prototype above.
extern "C"
__device__ int
sum_reduce(
float* return_value,
float* array,
int n
)
{
double sum = 0.0;
for (size_t i = 0; i < n; ++i) {
sum += array[i];
}
*return_value = (float)sum;
return 0;
}
|
7,806 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef float float_type;
#define KERNEL_ITERATIONS 1000
__device__ void warp_reduce(volatile float_type *sdata, const unsigned int thread_id)
{
sdata[thread_id] += sdata[thread_id + 32];
sdata[thread_id] += sdata[thread_id + 16];
sdata[thread_id] += sdata[thread_id + 8];
sdata[thread_id] += sdata[thread_id + 4];
sdata[thread_id] += sdata[thread_id + 2];
sdata[thread_id] += sdata[thread_id + 1];
}
__global__ void reduce_pi(float_type *gdata)
{
extern __shared__ float_type sdata[];
const unsigned int thread_id = threadIdx.x;
const unsigned long long int i = (((unsigned long long int)blockIdx.x) * blockDim.x + threadIdx.x) * KERNEL_ITERATIONS;
float_type current_thread_factor = 0.0f;
for (int it = 0; it < KERNEL_ITERATIONS; it++)
{
const float factor = ((i + it) & 1) ? -1.0f : 1.0f;
current_thread_factor += factor / (((i + it) << 1) + 1);
}
sdata[thread_id] = current_thread_factor;
__syncthreads();
// reduction in shared memory
for (unsigned int stride = blockDim.x >> 1; stride > 32; stride >>= 1)
{
if (thread_id < stride) {
sdata[thread_id] += sdata[thread_id + stride];
}
__syncthreads();
}
if (thread_id < 32)
warp_reduce(sdata, thread_id);
// write result for this block to global memory
if (thread_id == 0)
gdata[blockIdx.x] = sdata[0];
}
void Usage(char *prog_name);
int main(int argc, char *argv[])
{
long long n, i;
double factor = 0.0;
double sum = 0.0;
float_type *dev_sum;
float_type *cpu_sum;
if (argc != 2)
Usage(argv[0]);
n = strtoll(argv[1], NULL, 10);
if (n < 1)
Usage(argv[0]);
printf("Before for loop, factor = %f.\n", factor);
long long block_size = 1024;
long long grid_size = ceil(((double)n / KERNEL_ITERATIONS) / block_size);
cpu_sum = (float_type*)calloc(grid_size, sizeof(float_type));
cudaMalloc(&dev_sum, grid_size * sizeof(float_type));
reduce_pi<<< grid_size, block_size, block_size * sizeof(float_type) >>>(dev_sum);
cudaMemcpy(cpu_sum, dev_sum, grid_size * sizeof(float_type), cudaMemcpyDeviceToHost);
factor = ((n - 1) % 2 == 0) ? 1.0 : -1.0;
for (i = 0; i < grid_size; i++)
sum += cpu_sum[i];
cudaFree(dev_sum);
free(cpu_sum);
printf("After for loop, factor = %f.\n", factor);
sum = 4.0 * sum;
printf("With n = %lld terms\n", n);
printf(" Our estimate of pi = %.14f\n", sum);
printf(" Ref estimate of pi = %.14f\n", 4.0 * atan(1.0));
return 0;
}
void Usage(char *prog_name)
{
fprintf(stderr, "usage: %s <thread_count> <n>\n", prog_name);
fprintf(stderr, " n is the number of terms and should be >= 1\n");
exit(0);
}
|
7,807 | #include "includes.h"
__global__ void fc_gpu_kernel(float *y, float *x, float *weights, const int weightHeight,const int outSize, const int inSize){
//printf(x);
int row = blockIdx.y*blockDim.y+threadIdx.y;
int col = blockIdx.x*blockDim.x+threadIdx.x;
//printf("row %d, col %d in fc.cu \n",row,col);
if(row < inSize && col < outSize){
//float acc = 0;
for(int i = 0; i < weightHeight; ++i){
y[row*outSize+col] +=x[row*weightHeight + i ]*weights[i*outSize+col];
//printf("x[%d] is %.1f,weight[%d] is %.1f\n", row*weightHeight+i,x[row*weightHeight+i],i*outSize+col,weights[i*outSize+col]);
}
//printf("acc is %3f, y %d is %3f\n",acc, row*outSize+col, y[row*outSize+col] );
}
} |
7,808 | #include "includes.h"
__global__ void cuConvertC4ToC3Kernel(const float4* src, size_t src_stride, float3* dst, size_t dst_stride, int width, int height)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
int src_c = y*src_stride + x;
int dst_c = y*dst_stride + x;
if (x<width && y<height)
{
float4 val=src[src_c];
dst[dst_c] = make_float3(val.x, val.y, val.z);
}
} |
7,809 | #include <iostream>
using namespace std;
#define TILE_WIDTH 64
#define iceil(num,den) (num+den-1)/den
//Prints the image on screen
void printMatrix(float* img, int w, int h) {
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
cout << img[i*w + j] << " ";
}
cout << endl;
}
cout <<"*****" << endl;
}
__global__ void matrixMulKernel_NoSM(float *d_M, float *d_N, float *d_P, int width){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(col < 1 && row < width){
float pValue = 0;
for(int k = 0; k < width; k++)
{
pValue += d_M[row*width + k] * d_N[k + col];
}
d_P[row + col] = pValue;
}
}
__global__ void matrixMulKernel_SM(float *d_M, float *d_N, float *d_P, int width){
__shared__ float Ms[TILE_WIDTH][TILE_WIDTH];
__shared__ float Ns[TILE_WIDTH][1];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx + tx;
float pValue = 0;
for(int m=0; m<width/TILE_WIDTH; m++){
Ms[ty][tx] = d_M[(m*TILE_WIDTH + tx) + row*width];
Ns[ty][tx] = d_N[(m*TILE_WIDTH + ty) + col];
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++)
pValue += Ms[ty][k] * Ns[k][tx];
__syncthreads();
}
d_P[row + col] = pValue;
}
void matrixMul(float* M, float* N, float* P, int width) {
//This number of bytes are going be allocated and transferred
int size = width * width * sizeof(float);
int rsize = width * sizeof(float);
float *d_M, *d_N, *d_P; //Device Pointers
//Allocate memory from GPU for my input and output array
cudaMalloc((void**)&d_M, size);
cudaMalloc((void**)&d_N, rsize);
cudaMalloc((void**)&d_P, rsize);
//Transfer data to the GPU
cudaMemcpy(d_M, M, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, N, rsize, cudaMemcpyHostToDevice);
dim3 myBlockDim(1, TILE_WIDTH, 1);
dim3 myGridDim(iceil(width, TILE_WIDTH), iceil(width, TILE_WIDTH), 1);
//===== Not using Shared Memory ===============
matrixMulKernel_NoSM <<<myGridDim, myBlockDim >>> (d_M, d_N, d_P, width);
cudaMemcpy(P, d_P, rsize, cudaMemcpyDeviceToHost);
printMatrix(P, 1, width);
//===== Using Shared Memory ===================
matrixMulKernel_SM <<<myGridDim, myBlockDim >>> (d_M, d_N, d_P, width);
cudaMemcpy(P, d_P, size, cudaMemcpyDeviceToHost);
printMatrix(P, 1, width);
//----------------------------------------------------
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
}
int main(){
srand(time(0));
int width = 320;
float *M = new float[width*width];
float *N = new float[width];
float *P = new float[width];
//Load value to the image
for (int i = 0; i < width; i++)
for (int j = 0; j < width; j++)
M[i*width + j] = rand()%10;
for (int i = 0; i < width; i++)
N[i] = rand()%10;
printMatrix(M, width, width);
printMatrix(N, 1, width);
matrixMul(M, N, P, width);
return 0;
}
|
7,810 | #include <stdio.h>
#include <stdlib.h>
void cpu_mxv(int m, int n, int* a, int* b, int* c){
int sum;
// #pragma omp parallel if(m>10|n>10) shared(m,n,a,b,c) private(i,j,sum)
// #pragma omp for
for(int i=0;i<m;i++){
sum=0.0;
for(int j=0;j<n;j++){
sum += a[i*n+j]*b[j];
}
c[i]=sum;
}
}
__global__ void gpu_mxv(int n, int* a, int* b, int* c){
int j = blockIdx.x * blockDim.x + threadIdx.x;
//int row = blockIdx.y * blockDim.y + threadIdx.y;
for(int i=0; i<n; i++){
c[j] += a[j*n+i]*b[i];
}
}
void validate(int m, int *h_c, int *h_cc, float gtime, float ctime){
int all_ok = 1;
for (int i = 0; i < m; ++i){
//printf("h_c[%d]: %d h_cc[%d]: %d\n", i, h_c[i], i, h_cc[i]);
if(h_cc[i] != h_c[i]){
all_ok = 0;
break;
}
}
// roughly compute speedup
if(all_ok)
printf("all results are correct!!!, speedup = %f\n", ctime/ gtime);
else
printf("incorrect results\n");
}
// main
int main(){
int m = 500;
int n = 1000;
// allocate memory in host
int *h_a, *h_b, *h_c, *h_cc;
cudaMallocHost((void **) &h_a, m*n*sizeof(int));
cudaMallocHost((void **) &h_b, n*sizeof(int));
cudaMallocHost((void **) &h_c, m*sizeof(int));
cudaMallocHost((void **) &h_cc, m*sizeof(int));
for (int i = 0; i < m; ++i) {
for (int j = 0; j < n; ++j) {
h_a[i * n + j] = rand() % 10;
}
}
for( int i = 0; i < n; i++)
h_b[i] = rand()% 10;
// Allocate memory space on the device
int *d_a, *d_b, *d_c;
cudaMalloc((void **) &d_a, m*n*sizeof(int));
cudaMalloc((void **) &d_b, n*sizeof(int));
cudaMalloc((void **) &d_c, m*sizeof(int));
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float gpu_elapsed_time_ms, cpu_elapsed_time_ms;
// copy matrix A and B from host to device memory
cudaMemcpy(d_a, h_a, m*n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, n*sizeof(int), cudaMemcpyHostToDevice);
// start to count execution time of GPU version
cudaEventRecord(start, 0);
gpu_mxv<<<10,50>>>(n, d_a, d_b, d_c); //m threads
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication on GPU: %f ms.\n",gpu_elapsed_time_ms);
// copy result matrix C from device to host memory
cudaMemcpy(h_c, d_c, m*sizeof(int), cudaMemcpyDeviceToHost);
// start to count execution time of CPU version
cudaEventRecord(start, 0);
cpu_mxv(m, n, h_a, h_b, h_cc);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication on CPU: %f ms.\n",cpu_elapsed_time_ms);
//compare the solution and claculate speed up
validate(m, h_c, h_cc, gpu_elapsed_time_ms, cpu_elapsed_time_ms);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
} |
7,811 | #include <cstdio>
#include <thrust/scan.h>
#include <thrust/device_vector.h>
__global__ void computeLengths(int* data, int* lengths, int* symbolSizes, int n) {
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thid >= n) return;
lengths[thid] = symbolSizes[data[thid]];
//printf("%d %d\n", thid, lengths[thid]);
}
int* toIntPtr(thrust::device_vector<int>& v) {
return thrust::raw_pointer_cast(&v[0]);
}
__global__ void computeFirstByteSym(int* lengthSum, int* firstByteSym, int n, int outSize) {
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thid >= n || thid == 0) return;
int curr = (lengthSum[thid - 1] / 8);
int prev = (thid == 1) ? 0 : (lengthSum[thid - 2]/8);
if (prev != curr) {
//printf("sym: %d, curr: %d\n", thid, curr);
firstByteSym[curr] = thid;
}
}
__global__ void computeResult(int* symbols, int* data, int* firstByteSym, int* lengthSum, int* results, int n, int outSize) {
int thid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (thid >= outSize) return;
int symI = firstByteSym[thid];
if (symI != 0) symI --;
int offsetDelta = thid * 8;
//printf("thid: %d, sym: %d\n", thid, symI);
int result = 0;
while (symI < n) {
int shift = (symI == 0 ? 0 : lengthSum[symI-1]) - offsetDelta;
//printf("thid: %d | symI: %d, shift: %d\n", thid, symI, shift);
if (shift > 8) break;
int code = symbols[data[symI]];
if (shift >= 0)
result |= code << shift;
else
result |= code >> (-shift);
symI ++;
}
results[thid] = result;
}
int main(){
cudaSetDevice(1);
int symbolsData[] = {0b1, 0b01, 0b001, 0b000};
int sizesData[] = {1, 2, 3, 3};
const int symN = 4;
const int dataN = 10000;
thrust::device_vector<int> data;
for (int i=0; i < dataN; i ++) data.push_back(i % symN);
thrust::device_vector<int> symbols (symbolsData, symbolsData + symN);
thrust::device_vector<int> symbolSizes (sizesData, sizesData + symN);
thrust::device_vector<int> lengthSum (dataN);
computeLengths<<<(dataN + 1023) / 1024, 1024>>>
(toIntPtr(data), toIntPtr(lengthSum),
toIntPtr(symbolSizes), dataN);
thrust::inclusive_scan(lengthSum.begin(), lengthSum.end(), lengthSum.begin());
//for (int i : lengthSum) printf("l=%d\n", i);
int outSize = (lengthSum.back() / 8 + 1);
thrust::device_vector<int> firstByteSym;
firstByteSym.resize(outSize, 0);
computeFirstByteSym<<<(outSize + 1023) / 1024, 1024>>>
(toIntPtr(lengthSum),
toIntPtr(firstByteSym),
dataN, outSize);
//for (int i : firstByteSym) printf("s=%d\n", i);
thrust::device_vector<int> result;
result.resize(outSize, 0);
computeResult<<<(outSize + 1023) / 1024, 1024>>>
(toIntPtr(symbols),
toIntPtr(data),
toIntPtr(firstByteSym),
toIntPtr(lengthSum),
toIntPtr(result),
dataN, outSize);
//for (int i : result) printf("result=%d\n", i);
cudaDeviceSynchronize ();
return 0;
}
|
7,812 | #include "cuda_runtime.h"
#include "cuda_runtime_api.h"
#include "device_launch_parameters.h"
#include <iostream>
#include <numeric>
#include <math.h>
using namespace std;
#define BLOCK_SIZE 4;
__global__ void mean(float* input, int n)
{
const int tid = threadIdx.x;
int step_size = 1;
int number_of_threads = blockDim.x;
while (number_of_threads > 0)
{
if (tid < number_of_threads)
{
const int fst = tid * step_size * 2;
const int snd = fst + step_size;
if(snd < n)
{
input[fst] += input[snd];// a = a+b
}
}
step_size <<= 1;
if(number_of_threads == 1)
break;
number_of_threads = (int)ceil((float)number_of_threads/2.0); // divide number of threads by 2
__syncthreads();
}
__syncthreads();
input[0] /= n;
}
int main()
{
int count=0;
float result;
float *d;
cout<<"\nEnter the number of elements : ";
cin>>count;
const int size = count * sizeof(float);
float *h;
h = new float[count];
cout<<"\nEnter the elements : \n";
for(int i=0;i<count;i++)
cin>>h[i];
//h[i] = rand()%1000;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
//cout<<ceil((float)count/2.0);
mean <<<1, ceil((float)count/2.0) >>>(d,count);
cudaMemcpy(&result, d, sizeof(float), cudaMemcpyDeviceToHost);
cout << "Mean is " << result << endl;
getchar();
cudaFree(d);
delete[] h;
return 0;
}
/*
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvcc ParRedMean.cu -o ParRedMean
ParRedMean.cu
Creating library ParRedMean.lib and object ParRedMean.exp
PS D:\MyFiles\Projects\LP1-LabAsg\2-HPC> nvprof ./ParRedMean
Enter the number of elements : 4
Enter the elements :
2
3
6
1
==26012== NVPROF is profiling process 26012, command: ./ParRedMean
Mean is 3
==26012== Profiling application: ./ParRedMean
==26012== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 63.04% 2.7840us 1 2.7840us 2.7840us 2.7840us mean(float*, int)
23.91% 1.0560us 1 1.0560us 1.0560us 1.0560us [CUDA memcpy HtoD]
13.04% 576ns 1 576ns 576ns 576ns [CUDA memcpy DtoH]
API calls: 78.89% 167.23ms 1 167.23ms 167.23ms 167.23ms cudaMalloc
20.69% 43.855ms 1 43.855ms 43.855ms 43.855ms cuDevicePrimaryCtxRelease
0.14% 293.70us 97 3.0270us 100ns 163.00us cuDeviceGetAttribute
0.11% 226.30us 1 226.30us 226.30us 226.30us cudaFree
0.08% 167.00us 1 167.00us 167.00us 167.00us cuModuleUnload
0.05% 116.30us 2 58.150us 22.700us 93.600us cudaMemcpy
0.03% 61.100us 1 61.100us 61.100us 61.100us cuDeviceTotalMem
0.01% 28.300us 1 28.300us 28.300us 28.300us cudaLaunchKernel
0.00% 10.000us 1 10.000us 10.000us 10.000us cuDeviceGetPCIBusId
0.00% 1.5000us 3 500ns 300ns 900ns cuDeviceGetCount
0.00% 1.2000us 2 600ns 100ns 1.1000us cuDeviceGet
0.00% 700ns 1 700ns 700ns 700ns cuDeviceGetName
0.00% 300ns 1 300ns 300ns 300ns cuDeviceGetUuid
0.00% 300ns 1 300ns 300ns 300ns cuDeviceGetLuid
*/ |
7,813 | #include "includes.h"
//kernel for computing histogram right in memory
//computer partial histogram on shared memory and mix them on global memory
__global__ void hist_inShared (const int* values, int length, int* hist){
//load shared memory
extern __shared__ int shHist[];
shHist[threadIdx.x] = 0;
__syncthreads();
//compute index and interval
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
//iterate over index and interval since it is less than the total length
while(idx < length){
int val = values[idx];
//increment value frequency on histogram using atomic in order to be thread safe
atomicAdd(&shHist[val], 1);
idx += stride;
}
//combine partial histogram on shared memory to create a full histogram
__syncthreads();
atomicAdd(&hist[threadIdx.x], shHist[threadIdx.x]);
} |
7,814 | #include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
//use 16 for portability
#define BLOCK_SIZE 16
struct Matrix
{
int height;
int width;
int pWidth; //width of parent matrix
float* data;
};
void InitMatrix(Matrix M);
Matrix CopyShape(const Matrix M){
struct Matrix M_copy;
M_copy.height = M.height;
M_copy.width = M.width;
return M_copy;
}
//A and B are input, C is output
//Naive matrix multiplication algorithm
__global__ void MatMul_k(const struct Matrix A, const struct Matrix B, Matrix C){
//row of A determines C row, Col of B determines C col
//Block determines which submatrix of C we work on
//Create sub matrix of C to calculate with shared memory
struct Matrix C_sub;
C_sub.width = BLOCK_SIZE;
C_sub.height = BLOCK_SIZE;
//int C_stride = C.width;
int C_y = C.width * BLOCK_SIZE * blockIdx.y;
int C_x = BLOCK_SIZE * blockIdx.x;
C_sub.data = &C.data[C_y + C_x];
//Thread determines where in C block we are
float C_val = 0.0;
int x = threadIdx.y;
int y = threadIdx.x;
//loop over A and B submatrices to compute C submatrix
for(int m = 0; m < (A.width / BLOCK_SIZE); m++){
struct Matrix A_sub;
A_sub.width = BLOCK_SIZE;
A_sub.height = BLOCK_SIZE;
int A_y = A.width * blockIdx.y * BLOCK_SIZE;
int A_x = m * BLOCK_SIZE;
A_sub.data = &A.data[A_y + A_x];
struct Matrix B_sub;
B_sub.width = BLOCK_SIZE;
B_sub.height = BLOCK_SIZE;
int B_y = B.width * m * BLOCK_SIZE;
int B_x = blockIdx.x * BLOCK_SIZE;
B_sub.data = &A.data[B_y + B_x];
//this memory is shared between threads
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
//each thread loads an element
//note we use parent widths
As[y][x] = A_sub.data[A.width * y + x];
Bs[y][x] = B_sub.data[B.width * y + x];
//make sure all memory is loaded
__syncthreads();
//Compute Asub and Bsub product to accumulate Csub element
for(int c = 0; c < BLOCK_SIZE; c++){
C_val += As[y][c] * Bs[c][x];
}
//wait for computation to finish before loading new memory
__syncthreads();
}
//write C sub element, again note parent width
C_sub.data[C.width * y + x] = C_val;
}
Matrix MatMul(const Matrix A, const Matrix B){
Matrix C;
C.width = A.height;
C.height = B.width;
C.data = (float*)malloc(C.width * C.height * sizeof(float));
if(A.width != B.height){
printf("Inner matrix dimensions must be equal!");
C.data = NULL;
return C;
}
//Copy A and B over to GPU
struct Matrix A_gpu;// = CopyShape(A);
A_gpu.height = A.height;
A_gpu.width = A.width;
size_t A_size = A_gpu.height * A_gpu.width * sizeof(float);
cudaError_t err = cudaMalloc(&A_gpu.data, A_size);
printf("Cuda Error: malloc A: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(A_gpu.data, A.data, A_size, cudaMemcpyHostToDevice);
printf("Cuda Error: cpy A: %s\n", cudaGetErrorString(err));
struct Matrix B_gpu = CopyShape(B);
size_t B_size = B_gpu.height * B_gpu.width * sizeof(float);
err = cudaMalloc(&B_gpu.data, B_size);
printf("Cuda Error: malloc B: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(B_gpu.data, B.data, B_size, cudaMemcpyHostToDevice);
printf("Cuda Error: cpy B: %s\n", cudaGetErrorString(err));
//Make space for resul matrix
struct Matrix C_gpu = CopyShape(C);
size_t C_size = C_gpu.width * C_gpu.height * sizeof(float);
//C_gpu.data = (float*)malloc(C_gpu.width * C_gpu.height * sizeof(float));
//InitMatrix(C_gpu);
cudaMalloc(&C_gpu.data, C_size);
printf("Cuda Error: malloc C: %s\n", cudaGetErrorString(err));
err = cudaMemcpy(C_gpu.data, C.data, C_size, cudaMemcpyHostToDevice);
printf("Cuda Error: cpy C: %s\n", cudaGetErrorString(err));
//Run Cuda Code
dim3 block_dim(BLOCK_SIZE, BLOCK_SIZE); //z dim = 1
int grid_x = ceil(C_gpu.width/block_dim.x);
int grid_y = ceil(C_gpu.height/block_dim.y);
dim3 grid_dim(grid_x, grid_y);
MatMul_k<<<grid_dim, block_dim>>>(A_gpu, B_gpu, C_gpu);
err = cudaThreadSynchronize();
printf("Run Cuda Code: %s\n", cudaGetErrorString(err));
//Get Result
err = cudaMemcpy(C.data, C_gpu.data, C_size, cudaMemcpyDeviceToHost);
printf("Get Result: %s\n", cudaGetErrorString(err));
cudaFree(A_gpu.data);
cudaFree(B_gpu.data);
cudaFree(C_gpu.data);
return C;
}
void SetVal(Matrix M, int x, int y, float val){
if(y*M.width + x > M.width*M.height)
printf("Reading past end of array\n");
M.data[y*M.width + x] = val;
}
float GetVal(Matrix M, int x, int y){
return M.data[y*M.width + x];
}
void InitMatrix(Matrix M){
for(int y = 0; y<M.height; y++){
for(int x = 0; x<M.width; x++){
float val = 20*(float)rand()/(float)RAND_MAX;
SetVal(M, x, y, val);
}
}
}
int main(){
int NUM_ARRAYS = 3;
//struct Matrix As[NUM_ARRAYS];
//struct Matrix Bs[NUM_ARRAYS];
for(int i=1; i<NUM_ARRAYS+1; i++){
struct Matrix A, B;
//Initialize Array
A.height = i*5000;
A.width = i*3500;
A.data = (float*)malloc(A.width * A.height * sizeof(float));
InitMatrix(A);
B.height = i*3500;
B.width = i*7500;
B.data = (float*)malloc(B.width * B.height * sizeof(float));
InitMatrix(B);
//Get Matrix Product of Array
printf("********Entering Matrix Mul*****\n");
clock_t start = clock();
struct Matrix C = MatMul(A, B);
clock_t time = clock() - start;
float sec = (float)time/(float)CLOCKS_PER_SEC;
printf("Time %d: %f\n", i, sec);
free(A.data);
free(B.data);
free(C.data);
}
}
|
7,815 | #include <math.h>
unsigned int get_proportional_resampling_size(const int n_resamplings, const int n_samples) {
return(n_samples + n_resamplings * n_samples);
}
unsigned int get_leave_one_out_resampling_size(const int n_samples) {
return(n_samples + n_samples * n_samples);
}
void construct_proportional_resampling_indices(const int n_resamplings, const int n_samples, const float prop, short *resampled) {
unsigned int N = get_proportional_resampling_size(n_resamplings, n_samples); // the first batch will be full data set
unsigned int rand_threshold = round(RAND_MAX * (1.0 - prop));
for (unsigned int i = 0; i < n_samples; i++) {
resampled[i] = 1; // full data set
}
for (unsigned int i = n_samples; i < N; i++) {
resampled[i] = rand() < rand_threshold ? 0 : 1;
}
}
void construct_leave_one_out_resampling_indices(const int n_samples, short *resampled) {
// usngined long int is 0 to ~4.3G,
// hence, probably not recommeded to use leave-one-out sampling
// when n_samples > ~65k
//
// this is for both memory and compute-time consideration
unsigned int N = get_leave_one_out_resampling_size(n_samples); // the first batch will be full data set
for (unsigned int i = 0; i < n_samples; i++) {
resampled[i] = 1; // full data set
}
for (unsigned int i = n_samples; i < N; i++) {
resampled[i] = 1;
}
// left out samples
for (unsigned int i = 0; i < n_samples; i++) {
// i = 0 -> (n_samples) + 0
// i = 1 -> (n_samples) + n_samples*1 + 1
// i = 2 -> (n_samples) + n_samples+2 + 2
resampled[n_samples + n_samples*i + i] = 0;
}
} |
7,816 | #include <string.h> /* for memcpy() etc. */
#include "Sha2.cuh"
#if defined(__cplusplus)
extern "C"
{
#endif
#define rotl32(x,n) (((x) << n) | ((x) >> (32 - n)))
#define rotr32(x,n) (((x) >> n) | ((x) << (32 - n)))
#if !defined(bswap_32)
#define bswap_32(x) __byte_perm(x, x, 0x0123);
#endif
#define ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z))))
#define maj(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y))))
/* round transforms for SHA256 and SHA512 compression functions */
#define vf(n,i) v[(n - i) & 7]
#define hf(i) (p[i & 15] += \
g_1(p[(i + 14) & 15]) + p[(i + 9) & 15] + g_0(p[(i + 1) & 15]))
#if defined(SHA_224) || defined(SHA_256)
#define SHA256_MASK (SHA256_BLOCK_SIZE - 1)
#define bsw_32(p,n) \
{ int _i = (n); while(_i--) ((uint_32t*)p)[_i] = bswap_32(((uint_32t*)p)[_i]); }
//__device__ void bsw_32(uint_32t *p, uint_32t i) {
//
// while(i--) {
// p[i] = __byte_perm(p[i], p[i], 0x0123);
// }
//}
#define s_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22))
#define s_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25))
#define g_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3))
#define g_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10))
#define k_0 k256
/* rotated SHA256 round definition. Rather than swapping variables as in */
/* FIPS-180, different variables are 'rotated' on each round, returning */
/* to their starting positions every eight rounds */
/* SHA256 mixing data */
__constant__ const uint_32t k256[64] =
{ 0x428a2f98ul, 0x71374491ul, 0xb5c0fbcful, 0xe9b5dba5ul,
0x3956c25bul, 0x59f111f1ul, 0x923f82a4ul, 0xab1c5ed5ul,
0xd807aa98ul, 0x12835b01ul, 0x243185beul, 0x550c7dc3ul,
0x72be5d74ul, 0x80deb1feul, 0x9bdc06a7ul, 0xc19bf174ul,
0xe49b69c1ul, 0xefbe4786ul, 0x0fc19dc6ul, 0x240ca1ccul,
0x2de92c6ful, 0x4a7484aaul, 0x5cb0a9dcul, 0x76f988daul,
0x983e5152ul, 0xa831c66dul, 0xb00327c8ul, 0xbf597fc7ul,
0xc6e00bf3ul, 0xd5a79147ul, 0x06ca6351ul, 0x14292967ul,
0x27b70a85ul, 0x2e1b2138ul, 0x4d2c6dfcul, 0x53380d13ul,
0x650a7354ul, 0x766a0abbul, 0x81c2c92eul, 0x92722c85ul,
0xa2bfe8a1ul, 0xa81a664bul, 0xc24b8b70ul, 0xc76c51a3ul,
0xd192e819ul, 0xd6990624ul, 0xf40e3585ul, 0x106aa070ul,
0x19a4c116ul, 0x1e376c08ul, 0x2748774cul, 0x34b0bcb5ul,
0x391c0cb3ul, 0x4ed8aa4aul, 0x5b9cca4ful, 0x682e6ff3ul,
0x748f82eeul, 0x78a5636ful, 0x84c87814ul, 0x8cc70208ul,
0x90befffaul, 0xa4506cebul, 0xbef9a3f7ul, 0xc67178f2ul,
};
__device__ void m_cycle(uint_32t *p, uint_32t *v, int x, int y) {
uint32_t v4 = vf(4,x);
uint32_t v0 = vf(0,x);
vf(7, x) += (y ? hf(x) : p[x]) + k_0[x+y] + s_1(v4) + ch(v4, vf(5,x), vf(6,x));
vf(3, x) += vf(7,x);
vf(7, x) += s_0(v0) + maj(v0, vf(1, x), vf(2, x));
}
/* Compile 64 bytes of hash data into SHA256 digest value */
/* NOTE: this routine assumes that the byte order in the */
/* ctx->wbuf[] at this point is such that low address bytes */
/* in the ORIGINAL byte stream will go into the high end of */
/* words on BOTH big and little endian systems */
__device__ VOID_RETURN sha256_compile(sha256_ctx ctx[1])
{
int j, mp;
uint_32t *p = ctx->wbuf, v[8];
memcpy(v, ctx->hash, 8 * sizeof(uint_32t));
// #pragma unroll
for(j = 0; j < 64; j+=16)
{
for(mp = 0; mp < 16; mp++) {
m_cycle(p, v, mp, j);
}
// printf("%02d: [ %x %x %x ] [ %x %x %x ] [ %x %x %x ] [ %x %x %x ] [ %x %x %x ] [ %x ]\n", j, &p);
// printf("%02d: [ ", j);
// for(int i = 0; i < 16; i++) {
// printf("%x ", p[i]);
// }
// printf(" ]\n");
}
for(j = 0; j < 8; j++) {
ctx->hash[j] += v[j];
}
}
/* SHA256 hash data in an array of bytes into hash buffer */
/* and call the hash_compile function as required. */
__device__ VOID_RETURN sha256_hash(const unsigned char data[], unsigned long len, sha256_ctx ctx[1])
{ uint_32t pos = (uint_32t)(ctx->count[0] & SHA256_MASK),
space = SHA256_BLOCK_SIZE - pos;
const unsigned char *sp = data;
//printf("L %d %d %d\n", ctx->count[0], len);
if((ctx->count[0] += len) < len)
++(ctx->count[1]);
while(len >= space) {
memcpy(((unsigned char*)ctx->wbuf) + pos, sp, space);
sp += space; len -= space; space = SHA256_BLOCK_SIZE; pos = 0;
bsw_32(ctx->wbuf, SHA256_BLOCK_SIZE >> 2);
sha256_compile(ctx);
}
memcpy(((unsigned char*)ctx->wbuf) + pos, sp, len);
/* printf("SET count [");
for(int i = 0; i < 2; i++) {
printf("%x ", ctx->count[i]);
}
printf("]\nSET buf [");
for(int i = 0; i < 16; i++) {
printf("%x ", ctx->wbuf[i]);
}
printf("\nSET hash [");
for(int i = 0; i < 8; i++) {
printf("%x ", ctx->hash[i]);
}
printf("]\n");
*/
}
__device__ void sha256_setstate_c(sha256_ctx ctx[1], sha256_ctx ictx) {
memcpy(ctx, &ictx, sizeof(sha256_ctx));
}
/* SHA256 Final padding and digest calculation */
__device__ static void sha_end1(unsigned char hval[], sha256_ctx ctx[1], const unsigned int hlen)
{ uint_32t i = (uint_32t)(ctx->count[0] & SHA256_MASK);
/* put bytes in the buffer in an order in which references to */
/* 32-bit words will put bytes with lower addresses into the */
/* top of 32 bit words on BOTH big and little endian machines */
bsw_32(ctx->wbuf, (i + 3) >> 2);
/* we now need to mask valid bytes and add the padding which is */
/* a single 1 bit and as many zero bits as necessary. Note that */
/* we can always add the first padding byte here because the */
/* buffer always has at least one empty slot */
ctx->wbuf[i >> 2] &= 0xffffff80 << 8 * (~i & 3);
ctx->wbuf[i >> 2] |= 0x00000080 << 8 * (~i & 3);
/* we need 9 or more empty positions, one for the padding byte */
/* (above) and eight for the length count. If there is not */
/* enough space pad and empty the buffer */
if(i > SHA256_BLOCK_SIZE - 9)
{
if(i < 60) ctx->wbuf[15] = 0;
sha256_compile(ctx);
i = 0;
}
else /* compute a word index for the empty buffer positions */
i = (i >> 2) + 1;
memset(ctx->wbuf + i, 0, sizeof(ctx->wbuf) - 2 - i);
/* the following 32-bit length fields are assembled in the */
/* wrong byte order on little endian machines but this is */
/* corrected later since they are only ever used as 32-bit */
/* word values. */
ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29);
ctx->wbuf[15] = ctx->count[0] << 3;
sha256_compile(ctx);
/* extract the hash value as bytes in case the hash buffer is */
/* mislaigned for 32-bit words */
//#pragma unroll
for(i = 0; i < hlen; ++i)
hval[i] = (unsigned char)(ctx->hash[i >> 2] >> (8 * (~i & 3)));
}
#endif
__device__ VOID_RETURN sha256_begin(sha256_ctx ctx[1])
{
ctx->count[0] = ctx->count[1] = 0;
ctx->hash[0] = 0x6a09e667ul;
ctx->hash[1] = 0xbb67ae85ul;
ctx->hash[2] = 0x3c6ef372ul;
ctx->hash[3] = 0xa54ff53aul;
ctx->hash[4] = 0x510e527ful;
ctx->hash[5] = 0x9b05688cul;
ctx->hash[6] = 0x1f83d9abul;
ctx->hash[7] = 0x5be0cd19ul;
}
__device__ VOID_RETURN sha256_end(unsigned char hval[], sha256_ctx ctx[1])
{
sha_end1(hval, ctx, SHA256_DIGEST_SIZE);
}
__device__ VOID_RETURN s_sha256_end(unsigned char hval[], sha256_ctx ctx[1])
{
sha_end1(hval, ctx, SHA256_DIGEST_SIZE);
}
__device__ VOID_RETURN sha256(unsigned char hval[], const unsigned char data[], unsigned long len)
{ sha256_ctx cx[1];
sha256_begin(cx);
sha256_hash(data, len, cx);
sha_end1(hval, cx, SHA256_DIGEST_SIZE);
}
#if defined(__cplusplus)
}
#endif
|
7,817 | #ifndef MACROS
#define MACROS
#define CGNS_TYPE CG_FILE_HDF5
//#define CGNS_TYPE CG_FILE_ADF
#define STR_LENGTH 64
#define NUM_RESIDS 3
#ifdef D2Q9
#define DIM 2
#define Q 9
#define LOAD_E(e) { \
e[0][0]= 0;e[0][1]= 1;e[0][2]= 0;e[0][3]=-1;e[0][4]= 0;e[0][5]= 1;e[0][6]=-1;e[0][7]=-1;e[0][8]= 1; \
e[1][0]= 0;e[1][1]= 0;e[1][2]= 1;e[1][3]= 0;e[1][4]=-1;e[1][5]= 1;e[1][6]= 1;e[1][7]=-1;e[1][8]=-1; \
}
#define LOAD_M(m_tmp) {\
double m[Q][Q]=\
{ 1, 1, 1, 1, 1, 1, 1, 1, 1,\
-4,-1,-1,-1,-1, 2, 2, 2, 2,\
4,-2,-2,-2,-2, 1, 1, 1, 1,\
0, 1, 0,-1, 0, 1,-1,-1, 1,\
0,-2, 0, 2, 0, 1,-1,-1, 1,\
0, 0, 1, 0,-1, 1, 1,-1,-1,\
0, 0,-2, 0, 2, 1, 1,-1,-1,\
0, 1,-1, 1,-1, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 1,-1, 1,-1}; \
memcpy(m_tmp,m,sizeof(double)*Q*Q);\
}
#define LOAD_M_INV(m_inv_tmp) {\
double m_inv[Q][Q]=\
{0.1111111111111111,-0.11111111111111112, 0.1111111111111111,0,0,0,0,0,0,\
0.1111111111111111,-0.02777777777777779, -0.055555555555555566,0.16666666666666666,-0.16666666666666669,0,0,0.25,0,\
0.1111111111111111,-0.027777777777777762,-0.055555555555555539,0,-1.3877787807814457e-017,0.16666666666666666,-0.16666666666666669,-0.25,0,\
0.1111111111111111,-0.02777777777777779, -0.055555555555555566,-0.16666666666666666,0.16666666666666669,0,0,0.25,0,\
0.1111111111111111,-0.02777777777777779, -0.055555555555555566,0,1.3877787807814457e-017,-0.16666666666666666,0.16666666666666669,-0.25,0,\
0.1111111111111111, 0.055555555555555552, 0.027777777777777776,0.16666666666666666,0.083333333333333329,0.16666666666666666,0.083333333333333329,0,0.25,\
0.1111111111111111, 0.055555555555555552, 0.027777777777777776,-0.16666666666666666,-0.083333333333333329,0.16666666666666666,0.083333333333333329,0,-0.25,\
0.1111111111111111, 0.055555555555555552, 0.027777777777777776,-0.16666666666666666,-0.083333333333333329,-0.16666666666666666,-0.083333333333333329,0,0.25,\
0.1111111111111111, 0.055555555555555552, 0.027777777777777776,0.16666666666666666,0.083333333333333329,-0.16666666666666666,-0.083333333333333329,0,-0.25};\
memcpy(m_inv_tmp,m_inv,sizeof(double)*Q*Q);\
}
#define LOAD_OMEGA(omega) {omega[0]=4.0/9.0;omega[1]=1.0/9.0;omega[2]=1.0/9.0;omega[3]=1.0/9.0;omega[4]=1.0/9.0;omega[5]=1.0/36.0;omega[6]=1.0/36.0;omega[7]=1.0/36.0;omega[8]=1.0/36.0;}
#define LOAD_OPP(opp) {opp[0]=0;opp[1]=3;opp[2]=4;opp[3]=1;opp[4]=2;opp[5]=7;opp[6]=8;opp[7]=5;opp[8]=6;}
#define NUM_THREADS_DIM_X 32
#define NUM_THREADS_DIM_Y 16
#endif
#ifdef D3Q15
#define DIM 3
#define Q 15
/*#define LOAD_E(e) { \
e[0][0]= 0;e[0][1]= 1;e[0][2]=-1;e[0][3]= 0;e[0][4]= 0;e[0][5]= 0;e[0][6]= 0;e[0][7]= 1;e[0][8]=-1;e[0][9]= 1;e[0][10]=-1;e[0][11]= 1;e[0][12]=-1;e[0][13]= 1;e[0][14]=-1; \
e[1][0]= 0;e[1][1]= 0;e[1][2]= 0;e[1][3]= 1;e[1][4]=-1;e[1][5]= 0;e[1][6]= 0;e[1][7]= 1;e[1][8]=-1;e[1][9]= 1;e[1][10]=-1;e[1][11]=-1;e[1][12]= 1;e[1][13]=-1;e[1][14]= 1; \
e[2][0]= 0;e[2][1]= 0;e[2][2]= 0;e[2][3]= 0;e[2][4]= 0;e[2][5]= 1;e[2][6]=-1;e[2][7]= 1;e[2][8]=-1;e[2][9]=-1;e[2][10]= 1;e[2][11]= 1;e[2][12]=-1;e[2][13]=-1;e[2][14]= 1; \
}*/
#define LOAD_E(e) { \
e[0][0]= 0;e[0][1]= 1;e[0][2]=-1;e[0][3]= 0;e[0][4]= 0;e[0][5]= 0;e[0][6]= 0;e[0][7]= 1;e[0][8]=-1;e[0][9]= 1;e[0][10]=-1;e[0][11]= 1;e[0][12]=-1;e[0][13]= 1;e[0][14]=-1; \
e[1][0]= 0;e[1][1]= 0;e[1][2]= 0;e[1][3]= 1;e[1][4]=-1;e[1][5]= 0;e[1][6]= 0;e[1][7]= 1;e[1][8]= 1;e[1][9]=-1;e[1][10]=-1;e[1][11]= 1;e[1][12]= 1;e[1][13]=-1;e[1][14]=-1; \
e[2][0]= 0;e[2][1]= 0;e[2][2]= 0;e[2][3]= 0;e[2][4]= 0;e[2][5]= 1;e[2][6]=-1;e[2][7]= 1;e[2][8]= 1;e[2][9]= 1;e[2][10]= 1;e[2][11]=-1;e[2][12]=-1;e[2][13]=-1;e[2][14]=-1; \
}
#define LOAD_M(m_tmp) {\
double m[Q][Q]=\
{ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,\
-2,-1,-1,-1,-1,-1,-1, 1, 1, 1, 1, 1, 1, 1, 1,\
16,-4,-4,-4,-4,-4,-4, 1, 1, 1, 1, 1, 1, 1, 1,\
0, 1,-1, 0, 0, 0, 0, 1,-1, 1,-1, 1,-1, 1,-1,\
0,-4, 4, 0, 0, 0, 0, 1,-1, 1,-1, 1,-1, 1,-1,\
0, 0, 0, 1,-1, 0, 0, 1, 1,-1,-1, 1, 1,-1,-1,\
0, 0, 0,-4, 4, 0, 0, 1, 1,-1,-1, 1, 1,-1,-1,\
0, 0, 0, 0, 0, 1,-1, 1, 1, 1, 1,-1,-1,-1,-1,\
0, 0, 0, 0, 0,-4, 4, 1, 1, 1, 1,-1,-1,-1,-1,\
0, 2, 2,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 1, 1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 1,-1,-1, 1, 1,-1,-1, 1,\
0, 0, 0, 0, 0, 0, 0, 1, 1,-1,-1,-1,-1, 1, 1,\
0, 0, 0, 0, 0, 0, 0, 1,-1, 1,-1,-1, 1,-1, 1,\
0, 0, 0, 0, 0, 0, 0, 1,-1,-1, 1,-1, 1, 1,-1 };\
memcpy(m_tmp,m,sizeof(double)*Q*Q);\
}
#define LOAD_M_INV(m_inv_tmp) {\
double m_inv[Q][Q]=\
{1/15,-1/9 , 2/45 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ,\
1/15,-1/18,-1/90 , 1/10,-1/10, 0 , 0 , 0 , 0 , 1/6 , 0 , 0 , 0 , 0 , 0 ,\
1/15,-1/18,-1/90 ,-1/10, 1/10, 0 , 0 , 0 , 0 , 1/6 , 0 , 0 , 0 , 0 , 0 ,\
1/15,-1/18,-1/90 , 0 , 0 , 1/10,-1/10, 0 , 0 ,-1/12, 1/4, 0 , 0 , 0 , 0 ,\
1/15,-1/18,-1/90 , 0 , 0 ,-1/10, 1/10, 0 , 0 ,-1/12, 1/4, 0 , 0 , 0 , 0 ,\
1/15,-1/18,-1/90 , 0 , 0 , 0 , 0 , 1/10,-1/10,-1/12,-1/4, 0 , 0 , 0 , 0 ,\
1/15,-1/18,-1/90 , 0 , 0 , 0 , 0 ,-1/10, 1/10,-1/12,-1/4, 0 , 0 , 0 , 0 ,\
1/15, 1/18, 1/360, 1/10, 1/40, 1/10, 1/40, 1/10, 1/40, 0 , 0 , 1/8, 1/8, 1/8, 1/8,\
1/15, 1/18, 1/360,-1/10,-1/40, 1/10, 1/40, 1/10, 1/40, 0 , 0 ,-1/8, 1/8,-1/8,-1/8,\
1/15, 1/18, 1/360, 1/10, 1/40,-1/10,-1/40, 1/10, 1/40, 0 , 0 ,-1/8,-1/8, 1/8,-1/8,\
1/15, 1/18, 1/360,-1/10,-1/40,-1/10,-1/40, 1/10, 1/40, 0 , 0 , 1/8,-1/8,-1/8, 1/8,\
1/15, 1/18, 1/360, 1/10, 1/40, 1/10, 1/40,-1/10,-1/40, 0 , 0 , 1/8,-1/8,-1/8,-1/8,\
1/15, 1/18, 1/360,-1/10,-1/40, 1/10, 1/40,-1/10,-1/40, 0 , 0 ,-1/8,-1/8, 1/8, 1/8,\
1/15, 1/18, 1/360, 1/10, 1/40,-1/10,-1/40,-1/10,-1/40, 0 , 0 ,-1/8, 1/8,-1/8, 1/8,\
1/15, 1/18, 1/360,-1/10,-1/40,-1/10,-1/40,-1/10,-1/40, 0 , 0 , 1/8, 1/8, 1/8,-1/8}; \
memcpy(m_inv_tmp,m_inv,sizeof(double)*Q*Q);\
}
#define LOAD_OMEGA(omega) {omega[0]=2.0/9.0;omega[1]=1.0/9.0;omega[2]=1.0/9.0;omega[3]=1.0/9.0;omega[4]=1.0/9.0;omega[5]=1.0/9.0;omega[6]=1.0/9.0;omega[7]=1.0/72.0;omega[8]=1.0/72.0;omega[9]=1.0/72.0;omega[10]=1.0/72.0;omega[11]=1.0/72.0;omega[12]=1.0/72.0;omega[13]=1.0/72.0;omega[14]=1.0/72.0;}
//#define LOAD_OPP(opp) {opp[0]=0;opp[1]=2;opp[2]=1;opp[3]=4;opp[4]=3;opp[5]=6;opp[6]=5;opp[7]=8;opp[8]=7;opp[9]=10;opp[10]=9;opp[11]=12;opp[12]=11;opp[13]=14;opp[14]=13;}
#define LOAD_OPP(opp) {opp[0]=0;opp[1]=2;opp[2]=1;opp[3]=4;opp[4]=3;opp[5]=6;opp[6]=5;opp[7]=14;opp[8]=13;opp[9]=12;opp[10]=11;opp[11]=10;opp[12]=9;opp[13]=8;opp[14]=7;}
#define NUM_THREADS_DIM_X 32
#define NUM_THREADS_DIM_Y 4
#define NUM_THREADS_DIM_Z 4
#endif
#endif
|
7,818 | #include <stdio.h>
#include <cuda_runtime.h>
// CUDA Kernel
__global__ void
vectorAdd(const float *A, const float *B, float *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
C[i] = A[i] + B[i];
}
}
/**
* Host main routine
*/
int
main(void)
{
int numElements = 15;
size_t size = numElements * sizeof(float);
printf("[Vector addition of %d elements]\n", numElements);
float a[numElements],b[numElements],c[numElements];
float *a_gpu,*b_gpu,*c_gpu;
cudaMalloc((void **)&a_gpu, size);
cudaMalloc((void **)&b_gpu, size);
cudaMalloc((void **)&c_gpu, size);
for (int i=0;i<numElements;++i ){
a[i] = i*i;
b[i] = i;
}
// Copy the host input vectors A and B in host memory to the device input vectors in
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
cudaMemcpy(a_gpu, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_gpu, b, size, cudaMemcpyHostToDevice);
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 256;
int blocksPerGrid =(numElements + threadsPerBlock - 1) / threadsPerBlock;
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(a_gpu, b_gpu, c_gpu, numElements);
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
cudaMemcpy(c, c_gpu, size, cudaMemcpyDeviceToHost);
// Free device global memory
cudaFree(a_gpu);
cudaFree(b_gpu);
cudaFree(c_gpu);
for (int i=0;i<numElements;++i ){
printf("%f \n",c[i]);
}
printf("Done\n");
return 0;
}
|
7,819 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
using namespace std;
typedef unsigned long long uint64;
typedef unsigned long uint16;
typedef uint64 rule;
typedef uint16 cellData;
//(16| 16| 16|8 | 8 |)
//(A |-> B| C|<lblName|lblWeight>|)
__device__ int getRuleA(rule r){return r >> 48;}
__device__ int getRuleB(rule r){return r >> 32 & 0xFFFF;}
__device__ int getRuleC(rule r){return r >> 16 & 0xFFFF;}
__device__ int getRuleN(rule r){return r >> 8 & 0xFF;}
__device__ int getRuleW(rule r){return r & 0xFF;}
__host__ rule buildRule(int A, int B, int C, int lblName, int lblWeght){
return (uint64)A << 48 | (uint64)B << 32 | (uint64)C << 16 | lblName << 8 | lblWeght;
}
//(16 |16 |16 |8 |8 )
//(k |non-terminalIndex |lblState |lblName |lblWeght )
__device__ int getDataI(cellData d){return d;}
__host__ cellData buildData_Host(int ruleIndex){return ruleIndex;}
__device__ cellData buildData(int k, int ruleIndex, int lblState, int lblName, int lblWeght){
return ruleIndex;
}
__global__ void processRule(rule* rules, int rulesCount, int nCount, int strLen, int subLen, cellData* table){
// subLen === l
// start === i
int start = blockIdx.y * blockDim.y + threadIdx.y;
if(start >= strLen - subLen) return;
rule currentRule = rules[threadIdx.x];
for(int k = 0; k < subLen; k++){
cellData *current = table + ( subLen * strLen + start ) * (nCount + 1);
cellData *left = table + ( k * strLen + start ) * (nCount + 1);
cellData *right = table + ( (subLen - k - 1) * strLen + (k + start + 1) ) * (nCount + 1);
int c = getRuleC(currentRule);
if(current[ getRuleA(currentRule) ]) return;
for(int m = 1; m <= nCount; m++){
if ( getDataI( left[m] ) == getRuleB(currentRule) ){
for(int n = 1; n <= nCount; n++){
if ( getDataI( right[n] ) == c ){
current[ getRuleA(currentRule) ] = getRuleA(currentRule);
}
}
}
}
}
__syncthreads();
}
__host__ void fillTable(rule* rules, int rulesCount, int nCount, int strLen, cellData* table){
cudaEvent_t start, stop;
float gpuTime = 0.0f;
int deviceCount;
cudaDeviceProp cdp;
cudaGetDeviceProperties ( &cdp, 0 );
cudaEventCreate ( &start );
cudaEventCreate ( &stop );
cudaEventRecord ( start, 0 );
cellData *dev_table = 0;
rule *dev_rules = 0;
int table_size = strLen * strLen * (nCount+1);
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
cudaStatus = cudaMalloc((void**)&dev_table, table_size * sizeof(cellData));
cudaStatus = cudaMalloc((void**)&dev_rules, rulesCount * sizeof(rule));
cudaStatus = cudaMemcpy(dev_table, table, table_size * sizeof(cellData), cudaMemcpyHostToDevice);
cudaStatus = cudaMemcpy(dev_rules, rules, rulesCount * sizeof(rule), cudaMemcpyHostToDevice);
int threadsPerBlockX = cdp.maxThreadsPerBlock / rulesCount;
for(int subLen = 1; subLen <= strLen; subLen++){
processRule<<< dim3( 1,(strLen-subLen)/(threadsPerBlockX)+1 ), dim3(rulesCount, threadsPerBlockX ) >>>(dev_rules, rulesCount, nCount, strLen, subLen, dev_table);
}
cudaStatus = cudaDeviceSynchronize();
cudaStatus = cudaMemcpy(table, dev_table, table_size * sizeof(cellData), cudaMemcpyDeviceToHost);
cudaEventRecord (stop, 0);
cudaEventSynchronize ( stop );
cudaEventElapsedTime ( &gpuTime, start, stop );
cout<<"time "<< gpuTime<<endl;
cudaFree(dev_table);
cudaFree(dev_rules);
}
int main(){
int rulesCount = 7;
int wordLen = 1000;
int nCount = 4;
rule *rules = new rule[rulesCount];
rules[0] = buildRule(1,2,3,0,0);
rules[1] = buildRule(2,3,2,0,0);
rules[2] = buildRule(2,3,3,0,0);
rules[3] = buildRule(3,1,2,0,0);
rules[4] = buildRule(4,2,4,0,0);
rules[5] = buildRule(1,4,2,0,0);
rules[6] = buildRule(3,4,2,0,0);
cellData* table = new cellData[wordLen * wordLen * (nCount + 1)];
for(int i = 0; i < wordLen * wordLen * (nCount + 1); i++)
table[i] = 0;
for(int i=0; i<wordLen; i++){
table[i*(nCount+1) + 0] = 1;
table[i*(nCount+1) + 3] = buildData_Host(3);
}
table[(wordLen-2)*(nCount+1) + 0] = 1;
table[(wordLen-2)*(nCount+1) + 2] = buildData_Host(2);
table[(wordLen-2)*(nCount+1) + 3] = buildData_Host(0);
table[(wordLen-5)*(nCount+1) + 0] = 1;
table[(wordLen-5)*(nCount+1) + 4] = buildData_Host(4);
table[(wordLen-5)*(nCount+1) + 3] = buildData_Host(0);
fillTable(rules, rulesCount, nCount, wordLen, table);
//for(int i = 0; i < wordLen; i++){
// for(int j = 0; j < wordLen*(nCount+1); j++){
// if( !( j % (nCount+1) ) && j ) cout<<' ';
// if( !( j % (nCount+1) ) )
// cout<<"";//(table[i*wordLen*(nCount+1)+j]);
// else
// cout<<(table[i*wordLen*(nCount+1)+j]);
// //if( !( j % (nCount+1) ) ) cout<<' ';
// }
// cout<<endl;
//}
for(int i = wordLen-1; i < wordLen; i++){
for(int j = 0; j < 1*(nCount+1); j++){
if( !( j % (nCount+1) ) && j ) cout<<' ';
if( !( j % (nCount+1) ) )
cout<<(table[i*wordLen*(nCount+1)+j]);
else
cout<<(table[i*wordLen*(nCount+1)+j] >> 32 & 0xFFFF);
if( !( j % (nCount+1) ) ) cout<<' ';
}
cout<<endl;
}
system("pause");
return 0;
} |
7,820 | #include <stdio.h>
int main(int argc, char *argv[])
{
// Get the number of devices.
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("deviceCount: %d\n", deviceCount);
// Get the properties of device 0.
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
// Print some properties.
// Refer to driver_types.h for a complete list of properties.
printf("name: %s\n", deviceProp.name);
printf("major: %d\n", deviceProp.major);
printf("minor: %d\n", deviceProp.minor);
printf("multiProcessorCount: %d\n", deviceProp.multiProcessorCount);
printf("totalGlobalMem: %d B = %d MB\n", deviceProp.totalGlobalMem, deviceProp.totalGlobalMem / 1048576);
printf("sharedMemPerBlock: %d B = %d KB\n", deviceProp.sharedMemPerBlock, deviceProp.sharedMemPerBlock / 1024);
printf("totalConstMem: %d B = %d KB\n", deviceProp.totalConstMem, deviceProp.totalConstMem / 1024);
printf("regsPerBlock: %d\n", deviceProp.regsPerBlock);
printf("ECCEnabled: %d\n", deviceProp.ECCEnabled);
printf("kernelExecTimeoutEnabled: %d\n", deviceProp.kernelExecTimeoutEnabled);
printf("clockRate: %d KHz = %d MHz\n", deviceProp.clockRate, deviceProp.clockRate / 1000);
printf("memoryClockRate: %d KHz = %d MHz\n", deviceProp.memoryClockRate, deviceProp.memoryClockRate / 1000);
printf("memoryBusWidth: %d bits\n", deviceProp.memoryBusWidth);
printf("l2CacheSize: %d B = %d KB\n", deviceProp.l2CacheSize, deviceProp.l2CacheSize / 1024);
printf("warpSize: %d\n", deviceProp.warpSize);
printf("maxThreadsPerMultiProcessor: %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf("maxThreadsPerBlock: %d\n", deviceProp.maxThreadsPerBlock);
printf("maxThreadsDim[0]: %d\n", deviceProp.maxThreadsDim[0]);
printf("maxThreadsDim[1]: %d\n", deviceProp.maxThreadsDim[1]);
printf("maxThreadsDim[2]: %d\n", deviceProp.maxThreadsDim[2]);
printf("maxGridSize[0]: %d\n", deviceProp.maxGridSize[0]);
printf("maxGridSize[1]: %d\n", deviceProp.maxGridSize[1]);
printf("maxGridSize[2]: %d\n", deviceProp.maxGridSize[2]);
printf("deviceOverlap: %d\n", deviceProp.deviceOverlap);
printf("asyncEngineCount: %d\n", deviceProp.asyncEngineCount);
printf("integrated: %d\n", deviceProp.integrated);
printf("canMapHostMemory: %d\n", deviceProp.canMapHostMemory);
printf("concurrentKernels: %d\n", deviceProp.concurrentKernels);
printf("tccDriver: %d\n", deviceProp.tccDriver);
printf("unifiedAddressing: %d\n", deviceProp.unifiedAddressing);
printf("pciBusID: %d\n", deviceProp.pciBusID);
printf("pciDeviceID: %d\n", deviceProp.pciDeviceID);
printf("computeMode: %d\n", deviceProp.computeMode);
if (deviceProp.computeMode == cudaComputeModeDefault) printf("computeMode: %s\n", "Default (multiple host threads can use ::cudaSetDevice() with device simultaneously)");
if (deviceProp.computeMode == cudaComputeModeExclusive) printf("computeMode: %s\n", "Exclusive (only one host thread in one process is able to use ::cudaSetDevice() with this device)");
if (deviceProp.computeMode == cudaComputeModeProhibited) printf("computeMode: %s\n", "Prohibited (no host thread can use ::cudaSetDevice() with this device)");
if (deviceProp.computeMode == cudaComputeModeExclusiveProcess) printf("computeMode: %s\n", "Exclusive Process (many threads in one process is able to use ::cudaSetDevice() with this device)");
}
|
7,821 | #include <stdio.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <time.h>
#include <iostream>
// Variables globales GPU y CPU
#define l_kernel 2
#define stride 2
/******************************
* Procesamiento Matriz CPU *
******************************/
/*
* Funcion Max
*/
float MaxCPU(float A, float B){
float result = A > B ? A : B;
return result;
}
/*
* Lectura Archivo
*/
void Read(float** R, float** G, float** B, int *M, int *N, const char *filename, int tipo) {
FILE *fp;
fp = fopen(filename, "r");
fscanf(fp, "%d %d\n", M, N);
int imsize = (*M) * (*N);
int Mres, Nres, X, Y;
float* R1, * G1, * B1;
Mres = (*M) / stride;
Nres = (*N) / stride;
X = stride;
Y = stride;
R1 = new float[imsize];
G1 = new float[imsize];
B1 = new float[imsize];
if (tipo == 0){ // Lectura normal
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(R1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(G1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(B1[i]));
}
else if (tipo == 1){ //lectura SoA
for(int jj = 0; jj < Mres; jj++)
for(int j = 0; j < Y; j++)
for(int ii = 0; ii < Nres; ii++)
for(int i = 0; i < X; i++)
fscanf(fp, "%f ", &(R1[(i + j * X) * (Mres*Nres) + (ii + jj * Nres)]));
for(int jj = 0; jj < Mres; jj++)
for(int j = 0; j < Y; j++)
for(int ii = 0; ii < Nres; ii++)
for(int i = 0; i < X; i++)
fscanf(fp, "%f ", &(R1[(i + j * X) * (Mres*Nres) + (ii + jj * Nres)]));
for(int jj = 0; jj < Mres; jj++)
for(int j = 0; j < Y; j++)
for(int ii = 0; ii < Nres; ii++)
for(int i = 0; i < X; i++)
fscanf(fp, "%f ", &(R1[(i + j * X) * (Mres*Nres) + (ii + jj * Nres)]));
}
fclose(fp);
*R = R1; *G = G1; *B = B1;
}
/*
* Escritura Archivo
*/
void Write(float* out, int M_out, int N_out, const char *filename) {
FILE *fp;
fp = fopen(filename, "w");
fprintf(fp, "%d %d\n", M_out, N_out);
for(int i = 0; i < M_out*N_out-1; i++)
fprintf(fp, "%f ", out[i]);
fprintf(fp, "%f\n", out[M_out*N_out-1]);
for(int i = 0; i < M_out*N_out-1; i++)
fprintf(fp, "%f ", out[i]);
fprintf(fp, "%f\n", out[M_out*N_out-1]);
for(int i = 0; i < M_out*N_out-1; i++)
fprintf(fp, "%f ", out[i]);
fprintf(fp, "%f\n", out[M_out*N_out-1]);
fclose(fp);
}
/*
* Imprimir Array como matriz
*/
void ShowMatrix(float *matrix, int N, int M) {
for(int i = 0; i < N; i++){
for(int j = 0; j < M; j++)
printf("%.4f ", matrix[j + i*M]);
printf("\n");
}
printf("\n");
}
/*
* Suma de Matrices R,G,B y Funcion de activacion RELU
*/
void SumaMatrizCPU(float **out, float *R, float *G, float *B, int M, int N){
float* sum = new float[M*N];
for(int i=0; i < M*N; i++){
sum[i] = (R[i]+G[i]+B[i])/3.0;
}
*out = sum;
}
/*
* Funcion de activacion RELU
*/
void ReluCPU(float *out, int M, int N){
for(int i=0; i < M*N; i++){
if(out[i] < 0){
out[i] = 0;
} else if(out[i] > 1){
out[i] = 1;
}
}
}
/*
* Funcion Max pooling 2x2
*/
void PoolingCPU(float **out, int *M, int *N){
int new_N, new_M;
float v1, v2, v3, v4;
new_N = (*N)/2;
new_M = (*M)/2;
// printf("new_M: %d new_N: %d\n", new_M, new_N);
float* temp = new float[new_N*new_M];
for(int i=0; i < new_M; i++){
for(int j=0; j < new_N; j++){
// printf("v1: %d\n", j*2 + i*2*(*N));
v1 = (*out)[j*2 + i*2*(*N)];
v2 = (*out)[j*2 + 1 + i*2*(*N)];
v3 = (*out)[j*2 + (i+1)*(*N)];
v4 = (*out)[j*2 + 1 + (i+1)*(*N)];
temp[j + i*new_N] = MaxCPU(MaxCPU(v1, v2), MaxCPU(v3, v4));
}
}
*out = temp;
*N = new_N; *M = new_M;
}
/*
* "Producto" Matricial sub_A * kernel = C
* id: id del primer elemento de la submatriz, N: ancho matriz R
*/
float Product_Matrix(float *A, float *B, int N_original, int id){
int col, row, idx_kernel;
float count;
col = id%N_original;
row = id/N_original;
count = 0.0;
// Recorremos stride
idx_kernel = 0;
for(int i=row; i < row + l_kernel; i++){
for(int j=col; j< col + l_kernel; j++){
int id_casilla = j + i*N_original;
// printf("%.1f x %.1f\n", A[id_casilla], B[idx_kernel]);
count += A[id_casilla] * B[idx_kernel];
idx_kernel += 1;
}
}
return count;
}
/*
* Convolucion de A y kernel (recorre la primera matriz y hace el producto matricial por cada elemento)
*/
void ConvolucionCPU(float *A, float **out, float *kernel, int Mres, int Nres, int N_original){
float* temp = new float[Nres*Mres];
int count_output = 0;
int i = 0;
while(i < N_original*(N_original-1)){
if((i/N_original)%2 == 0){
temp[count_output] = Product_Matrix(A, kernel, N_original, i);
// printf("i: %d out:%d\n", i, count_output);
// printf("fila: %d\n", (i/N_original));
count_output++;
i = i+stride;
} else{
i = i+N_original;
}
}
*out = temp;
}
void cnn_CPU(){
int M, N;
clock_t t1, t2;
double ms;
float array[l_kernel*l_kernel] = {1, 0, 1, -2}; // Conjunto de kernel(matrices) a usar
// float array[l_kernel*l_kernel] = {0, 1, 0, 1, -4, 1, 0, 1, 0}; // Conjunto de kernel(matrices) a usar
// float array[l_kernel*l_kernel] = {-5, 5, 0, -5, 5, 0,-5, 5, 0}; // Conjunto de kernel(matrices) a usar
// float array[l_kernel*l_kernel] = {-5, -5, -5, 5, 5, 5, 0, 0, 0}; // Conjunto de kernel(matrices) a usar
float *kernel = new float[l_kernel*l_kernel];
float *Rhost, *Ghost, *Bhost;
float *Rhostout, *Ghostout, *Bhostout;
// Lectura de archivo
Read(&Rhost, &Ghost, &Bhost, &M, &N, "img.txt", 0);
kernel = &array[0];
printf("Kernel:\n");
ShowMatrix(kernel, l_kernel, l_kernel);
float *output_image; // Conjunto de imagenes(matrices) de salida por kernel
// printf("Matriz original: %d x %d\n", M, N);
t1 = clock();
// ShowMatrix(Rhost, M, N);
// Por cada proceso de convolucion
for(int c=0; c<2; c++){
// printf("\n########## Convolucion %d ###########\n", c+1);
// Actualizamos N,M si aun se puede
int N_original = N;
// printf("M: %d N: %d\n", M, N);
N = N/stride;
M = M/stride;
// Si es el primero se suman las matrices RGB resultantes
if(c == 0){
// ShowMatrix(Rhost, M + l_kernel -1, N + l_kernel -1);
ConvolucionCPU(Rhost, &Rhostout, kernel, M, N, N_original);
ConvolucionCPU(Ghost, &Ghostout, kernel, M, N, N_original);
ConvolucionCPU(Bhost, &Bhostout, kernel, M, N, N_original);
// ShowMatrix(Rhostout, M, N);
SumaMatrizCPU(&output_image, Rhostout, Ghostout, Bhostout, M, N);
} else {
// ShowMatrix(output_image, stride*M, stride*N);
ConvolucionCPU(output_image, &output_image, kernel, M, N, N_original);
}
// printf("Matriz Convolucion %d: %d x %d\n", c+1, M, N);
// ShowMatrix(output_image, M, N);
}
ReluCPU(output_image, M, N);
PoolingCPU(&output_image, &M, &N);
printf("Imagen Salida CPU: %d x %d\n", M, N);
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
std::cout << "Tiempo CPU: " << ms << "[ms]" << std::endl;
// printf("Imagen pooling: %d x %d\n", M, N);
// ShowMatrix(output_image, M, N);
// printf("Imagen salida: %d x %d\n", M, N);
// ShowMatrix(output_image, M, N);
Write(output_image, M, N, "ResultadoCPU.txt");
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout, delete[] output_image;
}
__global__ void kernel_sum(float *Rin, float *Gin, float *Bin, float *out, int Mres, int Nres){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Mres*Nres){
out[tid] = (Rin[tid] + Gin[tid] + Bin[tid])/3.0;
}
}
__global__ void kernel_relu(float *out, int Mres, int Nres){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Mres*Nres){
if(out[tid] < 0){
out[tid] = 0.0;
} else if(out[tid] > 1){
out[tid] = 1.0;
}
}
}
/*
* Procesamiento SoA GPU
*/
__global__ void kernel_poolingSoA(float *in, float *out, int Mres, int Nres, int N_original){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Mres*Nres){
float max = 0.0;
for(int i=0; i<l_kernel*l_kernel; i++){
float actual = in[tid + i * Mres* Nres];
if (actual > max){
max = actual;
}
}
out[tid] = max;
}
}
__device__ int kernel_ordenSoA(int tid, int Mres, int Nres){
int x_pix, y_pix, x_block, y_block, x_dentro_del_bloque, y_dentro_del_bloque;
x_pix = tid%(Mres*stride);
y_pix = tid/(Nres*stride);
x_block = x_pix/stride;
y_block = y_pix/stride;
x_dentro_del_bloque = x_pix%stride;
y_dentro_del_bloque = y_pix%stride;
return (x_dentro_del_bloque + y_dentro_del_bloque * l_kernel) * (Mres*Nres) + (x_block + y_block * Nres);
}
__global__ void kernel_convolucionSoA(float *in, float *out, float *kernel_dev, int Mres, int Nres){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Mres*Nres){
float suma = 0.0;
for(int i=0; i<l_kernel*l_kernel; i++){
suma += in[tid + i * Mres* Nres] * kernel_dev[i];
}
// Almacenamos como SoA en la imagen de salida
int id = kernel_ordenSoA(tid, Mres/2, Nres/2);
// printf("Mres: %d Nres: %d tid: %d id: %d\n", Mres/2, Nres/2, tid, id);
out[id] = suma;
}
}
void SoA_GPU(){
//procesamiento de las convoluciones con 3 streams, 1 por cada color
cudaEvent_t ct1, ct2;
float dt;
int M, N, Mres, Nres;
int gs, bs = 256;
float array[l_kernel*l_kernel] = {1, 0, 1, -2};
// float array[l_kernel*l_kernel] = {0, 1, 0, 1, -4, 1, 0, 1, 0}; // Conjunto de kernel(matrices) a usar
float *kernel = new float[l_kernel*l_kernel];
kernel = &array[0];
float *Rhost, *Ghost, *Bhost, *hostout;
float *Rdev_in, *Gdev_in, *Bdev_in, *Rdev_out, *Gdev_out, *Bdev_out, *kernel_dev;
Read(&Rhost, &Ghost, &Bhost, &M, &N, "img.txt", 1);
Mres = M/2;
Nres = N/2;
gs = (int)ceil((float) Mres*Nres / bs);
// kernel gpu
cudaMalloc((void**)&kernel_dev, l_kernel * l_kernel * sizeof(float));
// Arrays de entrada
cudaMalloc((void**)&Rdev_in, M * N * sizeof(float));
cudaMalloc((void**)&Gdev_in, M * N * sizeof(float));
cudaMalloc((void**)&Bdev_in, M * N * sizeof(float));
// Array de salida
cudaMalloc((void**)&Rdev_out, Mres * Nres * sizeof(float));
cudaMalloc((void**)&Gdev_out, Mres * Nres * sizeof(float));
cudaMalloc((void**)&Bdev_out, Mres * Nres * sizeof(float));
// Copiar en memoria global de GPU
cudaMemcpy(Rdev_in, Rhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev_in, Ghost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev_in, Bhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(kernel_dev, kernel, l_kernel * l_kernel * sizeof(float), cudaMemcpyHostToDevice);
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
//kernel calls
for(int c=0; c<2; c++){
if(c == 0){
//convolucion
kernel_convolucionSoA<<<gs, bs>>>(Rdev_in, Rdev_out, kernel_dev, Mres, Nres);
kernel_convolucionSoA<<<gs, bs>>>(Gdev_in, Gdev_out, kernel_dev, Mres, Nres);
kernel_convolucionSoA<<<gs, bs>>>(Bdev_in, Bdev_out, kernel_dev, Mres, Nres);
// Unir canales
kernel_sum<<<gs, bs>>>(Rdev_out, Gdev_out, Bdev_out, Rdev_out, Mres, Nres);
}
else{
Mres = Mres/2;
Nres = Nres/2;
gs = (int)ceil((float) Mres*Nres / bs);
//convolucion
kernel_convolucionSoA<<<gs, bs>>>(Rdev_out, Rdev_out, kernel_dev, Mres, Nres);
}
}
kernel_relu<<<gs, bs>>>(Rdev_out, Mres, Nres);
// printf("Imagen salida: %d x %d\n", Mres, Nres);
int N_original = Nres;
Nres = Nres/2;
Mres = Mres/2;
gs = (int)ceil((float) Mres*Nres / bs);
Rdev_in = Rdev_out;
kernel_poolingSoA<<<gs, bs>>>(Rdev_in, Rdev_out, Mres, Nres, N_original);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
hostout = new float[Mres*Nres];
cudaMemcpy(hostout, Rdev_out, Mres * Nres * sizeof(float), cudaMemcpyDeviceToHost);
printf("Imagen salida SoA: %d x %d\n", Mres, Nres);
// ShowMatrix(hostout, Mres, Nres);
Write(hostout, Mres, Nres, "ResultadoSoA.txt\0");
std::cout << "Tiempo SoA" << ": " << dt << "[ms]" << std::endl;
cudaFree(Rdev_in); cudaFree(Gdev_in); cudaFree(Bdev_in);
cudaFree(Rdev_out); cudaFree(Gdev_out); cudaFree(Bdev_out);
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] hostout;
}
__global__ void kernel_convolucion_SoA_MCOMP(float *in, float *out, int Mres, int Nres){
__shared__ int kernel_local[4];
kernel_local[0] = 1;
kernel_local[1] = 0;
kernel_local[2] = 1;
kernel_local[3] = -2;
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Mres*Nres){
float suma = 0.0;
for(int i=0; i<l_kernel*l_kernel; i++){
suma += in[tid + i * Mres* Nres] * kernel_local[i];
}
// Almacenamos como SoA en la imagen de salida
int id = kernel_ordenSoA(tid, Mres/2, Nres/2);
// printf("Mres: %d Nres: %d tid: %d id: %d\n", Mres/2, Nres/2, tid, id);
out[id] = suma;
}
}
/*
* Memoria compartida SoA
*/
void SoA_MCOMP_GPU(){
//procesamiento de las convoluciones con 3 streams, 1 por cada color
cudaEvent_t ct1, ct2;
float dt;
int M, N, Mres, Nres;
int gs, bs = 256;
float *Rhost, *Ghost, *Bhost, *hostout;
float *Rdev_in, *Gdev_in, *Bdev_in, *Rdev_out, *Gdev_out, *Bdev_out;
Read(&Rhost, &Ghost, &Bhost, &M, &N, "img.txt", 1);
Mres = M/2;
Nres = N/2;
gs = (int)ceil((float) Mres*Nres / bs);
// Arrays de entrada
cudaMalloc((void**)&Rdev_in, M * N * sizeof(float));
cudaMalloc((void**)&Gdev_in, M * N * sizeof(float));
cudaMalloc((void**)&Bdev_in, M * N * sizeof(float));
// Array de salida
cudaMalloc((void**)&Rdev_out, Mres * Nres * sizeof(float));
cudaMalloc((void**)&Gdev_out, Mres * Nres * sizeof(float));
cudaMalloc((void**)&Bdev_out, Mres * Nres * sizeof(float));
// Copiar en memoria global de GPU
cudaMemcpy(Rdev_in, Rhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev_in, Ghost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev_in, Bhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
//kernel calls
for(int c=0; c<2; c++){
if(c == 0){
//convolucion
kernel_convolucion_SoA_MCOMP<<<gs, bs>>>(Rdev_in, Rdev_out, Mres, Nres);
kernel_convolucion_SoA_MCOMP<<<gs, bs>>>(Gdev_in, Gdev_out, Mres, Nres);
kernel_convolucion_SoA_MCOMP<<<gs, bs>>>(Bdev_in, Bdev_out, Mres, Nres);
// Unir canales
kernel_sum<<<gs, bs>>>(Rdev_out, Gdev_out, Bdev_out, Rdev_out, Mres, Nres);
} else{
Mres = Mres/2;
Nres = Nres/2;
gs = (int)ceil((float) Mres*Nres / bs);
//convolucion
kernel_convolucion_SoA_MCOMP<<<gs, bs>>>(Rdev_out, Rdev_out, Mres, Nres);
}
}
kernel_relu<<<gs, bs>>>(Rdev_out, Mres, Nres);
// printf("Imagen salida: %d x %d\n", Mres, Nres);
int N_original = Nres;
Nres = Nres/2;
Mres = Mres/2;
gs = (int)ceil((float) Mres*Nres / bs);
Rdev_in = Rdev_out;
kernel_poolingSoA<<<gs, bs>>>(Rdev_in, Rdev_out, Mres, Nres, N_original);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
hostout = new float[Mres*Nres];
cudaMemcpy(hostout, Rdev_out, Mres * Nres * sizeof(float), cudaMemcpyDeviceToHost);
printf("Imagen salida SoA_MCOMP: %d x %d\n", Mres, Nres);
// ShowMatrix(hostout, Mres, Nres);
Write(hostout, Mres, Nres, "Resultado_SoA_MCOMP.txt\0");
std::cout << "Tiempo SoA con MCOMP" << ": " << dt << "[ms]" << std::endl;
cudaFree(Rdev_in); cudaFree(Gdev_in); cudaFree(Bdev_in);
cudaFree(Rdev_out); cudaFree(Gdev_out); cudaFree(Bdev_out);
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] hostout;
}
/*
* Memoria constante SoA
*/
__constant__ float kernel_const[4];
__global__ void kernel_convolucion_SoA_MCONST(float *in, float *out, int Mres, int Nres){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Mres*Nres){
float suma = 0.0;
for(int i=0; i<l_kernel*l_kernel; i++){
suma += in[tid + i * Mres* Nres] * kernel_const[i];
}
// Almacenamos como SoA en la imagen de salida
int id = kernel_ordenSoA(tid, Mres/2, Nres/2);
// printf("Mres: %d Nres: %d tid: %d id: %d\n", Mres/2, Nres/2, tid, id);
out[id] = suma;
}
}
void SoA_MCONST_GPU(){
//procesamiento de las convoluciones con 3 streams, 1 por cada color
cudaEvent_t ct1, ct2;
float dt;
int M, N, Mres, Nres;
int gs, bs = 256;
float array[l_kernel*l_kernel] = {1, 0, 1, -2};
float *kernel_host = new float[l_kernel*l_kernel];
kernel_host = &array[0];
float *Rhost, *Ghost, *Bhost, *hostout;
float *Rdev_in, *Gdev_in, *Bdev_in, *Rdev_out, *Gdev_out, *Bdev_out;
Read(&Rhost, &Ghost, &Bhost, &M, &N, "img.txt", 1 );
Mres = M/2;
Nres = N/2;
gs = (int)ceil((float) Mres*Nres / bs);
// Arrays de entrada
cudaMalloc((void**)&Rdev_in, M * N * sizeof(float));
cudaMalloc((void**)&Gdev_in, M * N * sizeof(float));
cudaMalloc((void**)&Bdev_in, M * N * sizeof(float));
// Array de salida
cudaMalloc((void**)&Rdev_out, Mres * Nres * sizeof(float));
cudaMalloc((void**)&Gdev_out, Mres * Nres * sizeof(float));
cudaMalloc((void**)&Bdev_out, Mres * Nres * sizeof(float));
// Copiar en memoria global de GPU
cudaMemcpy(Rdev_in, Rhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev_in, Ghost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev_in, Bhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(kernel_const, &array, 4 * sizeof(float), 0, cudaMemcpyHostToDevice);
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
//kernel calls
for(int c=0; c<2; c++){
if(c == 0){
//convolucion
kernel_convolucion_SoA_MCONST<<<gs, bs>>>(Rdev_in, Rdev_out, Mres, Nres);
kernel_convolucion_SoA_MCONST<<<gs, bs>>>(Gdev_in, Gdev_out, Mres, Nres);
kernel_convolucion_SoA_MCONST<<<gs, bs>>>(Bdev_in, Bdev_out, Mres, Nres);
// Unir canales
kernel_sum<<<gs, bs>>>(Rdev_out, Gdev_out, Bdev_out, Rdev_out, Mres, Nres);
} else{
Mres = Mres/2;
Nres = Nres/2;
gs = (int)ceil((float) Mres*Nres / bs);
//convolucion
kernel_convolucion_SoA_MCONST<<<gs, bs>>>(Rdev_out, Rdev_out, Mres, Nres);
}
}
kernel_relu<<<gs, bs>>>(Rdev_out, Mres, Nres);
//printf("Imagen salida: %d x %d\n", Mres, Nres);
int N_original = Nres;
Nres = Nres/2;
Mres = Mres/2;
gs = (int)ceil((float) Mres*Nres / bs);
Rdev_in = Rdev_out;
kernel_poolingSoA<<<gs, bs>>>(Rdev_in, Rdev_out, Mres, Nres, N_original);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
hostout = new float[Mres*Nres];
cudaMemcpy(hostout, Rdev_out, Mres * Nres * sizeof(float), cudaMemcpyDeviceToHost);
printf("Imagen salida SoA_MCONST: %d x %d\n", Mres, Nres);
// ShowMatrix(hostout, Mres, Nres);
Write(hostout, Mres, Nres, "Resultado_SoA_MCONST.txt\0");
std::cout << "Tiempo SoA con MCONST" << ": " << dt << "[ms]" << std::endl;
cudaFree(Rdev_in); cudaFree(Gdev_in); cudaFree(Bdev_in);
cudaFree(Rdev_out); cudaFree(Gdev_out); cudaFree(Bdev_out);
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] hostout;
delete[] kernel_host;
}
/*
* Procesamiento AoS GPU
*/
__global__ void kernel_poolingAoS(float *in, float *out, int Mres, int Nres, int N_original){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < Nres*Mres){ //1 thread para cada pixel de salida
float max = 0.0;
int x, y, col = 0;
x = (tid%Nres);
y = (tid/Nres);
// Si N_original es impar, el pooling del borde se almacena con este thread
if(!N_original%2 && x+1 == N_original-1){
col = tid%Mres;
}
float valores[4] = {
in[x*2 + y*2*(N_original)],
in[x*2 + 1 + y*2*(N_original)],
in[x*2 + (y*2+1)*(N_original)],
in[x*2 + 1 + (y*2+1)*(N_original)]
};
for (int i = 0; i< 4; i++){
if (valores[i] > max){
max = valores[i];
}
}
out[tid - col] = max;
}
}
__global__ void kernel_convolucionAoS(float *in, float *out, float *kernel_dev, int Mres, int Nres){
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if (tid < Mres*Nres){
int x, y, N_original;
x = 1 + tid%Nres; //coordenaas del centro de cada sub_matriz
y = 1 + tid/Nres;
N_original = Nres*2;
float suma = 0;
int indice_sub_matriz, indice_kernel;
for (int i = -1; i<=1 ; i++){
for (int j = -1; j <= 1; j++){
indice_sub_matriz = (x+i)*stride + (y+j)*stride*(N_original);
indice_kernel = (1+i) + (1+j)*3;
suma += in[indice_sub_matriz] * kernel_dev[indice_kernel];
}
}
// printf("%f\n", suma);
out[tid] = suma;
}
}
void AoS_GPU(){
//procesamiento de las convoluciones con 3 streams, 1 por cada color
cudaEvent_t ct1, ct2;
float dt;
int M, N, Mres, Nres;
int gs, bs = 256;
float array[l_kernel*l_kernel] = {1, 0, 1, -2};
// float array[l_kernel*l_kernel] = {0, 1, 0, 1, -4, 1, 0, 1, 0}; // Conjunto de kernel(matrices) a usar
float *kernel = new float[l_kernel*l_kernel];
kernel = &array[0];
float *Rhost, *Ghost, *Bhost, *hostout;
float *Rdev_in, *Gdev_in, *Bdev_in, *Rdev_out, *Gdev_out, *Bdev_out, *kernel_dev;
Read(&Rhost, &Ghost, &Bhost, &M, &N, "img.txt", 0);
Mres = M/2;
Nres = N/2;
gs = (int)ceil((float) Mres*Nres / bs);
// kernel gpu
cudaMalloc((void**)&kernel_dev, l_kernel * l_kernel * sizeof(float));
// Arrays de entrada
cudaMalloc((void**)&Rdev_in, M * N * sizeof(float));
cudaMalloc((void**)&Gdev_in, M * N * sizeof(float));
cudaMalloc((void**)&Bdev_in, M * N * sizeof(float));
// Array de salida
cudaMalloc((void**)&Rdev_out, Mres * Nres * sizeof(float));
cudaMalloc((void**)&Gdev_out, Mres * Nres * sizeof(float));
cudaMalloc((void**)&Bdev_out, Mres * Nres * sizeof(float));
// Copiar en memoria global de GPU
cudaMemcpy(Rdev_in, Rhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev_in, Ghost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev_in, Bhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(kernel_dev, kernel, l_kernel * l_kernel * sizeof(float), cudaMemcpyHostToDevice);
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
//kernel calls
for(int c=0; c<2; c++){
if(c == 0){
//convolucion
kernel_convolucionAoS<<<gs, bs>>>(Rdev_in, Rdev_out, kernel_dev, Mres, Nres);
kernel_convolucionAoS<<<gs, bs>>>(Gdev_in, Gdev_out, kernel_dev, Mres, Nres);
kernel_convolucionAoS<<<gs, bs>>>(Bdev_in, Bdev_out, kernel_dev, Mres, Nres);
// Unir canales
kernel_sum<<<gs, bs>>>(Rdev_out, Gdev_out, Bdev_out, Rdev_out, Mres, Nres);
} else{
if(stride == 1){
Mres = M - l_kernel + 1;
Nres = N - l_kernel + 1;
} else{
Mres = Mres/2;
Nres = Nres/2;
}
gs = (int)ceil((float) Mres*Nres / bs);
//convolucion
kernel_convolucionAoS<<<gs, bs>>>(Rdev_out, Rdev_out, kernel_dev, Mres, Nres);
}
}
kernel_relu<<<gs, bs>>>(Rdev_out, Mres, Nres);
// printf("Imagen salida: %d x %d\n", Mres, Nres);
int N_original = Nres;
Nres = Nres/2;
Mres = Mres/2;
gs = (int)ceil((float) Mres*Nres / bs);
Rdev_in = Rdev_out;
kernel_poolingAoS<<<gs, bs>>>(Rdev_in, Rdev_out, Mres, Nres, N_original);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
hostout = new float[Mres*Nres];
cudaMemcpy(hostout, Rdev_out, Mres * Nres * sizeof(float), cudaMemcpyDeviceToHost);
printf("Imagen salida AoS: %d x %d\n", Mres, Nres);
// ShowMatrix(hostout, Mres, Nres);
Write(hostout, Mres, Nres, "ResultadoAoS.txt\0");
std::cout << "Tiempo AoS con MG" << ": " << dt << "[ms]" << std::endl;
cudaFree(Rdev_in); cudaFree(Gdev_in); cudaFree(Bdev_in);
cudaFree(Rdev_out); cudaFree(Gdev_out); cudaFree(Bdev_out);
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] hostout;
}
/*
* Codigo Principal
*/
int main(int argc, char **argv){
/*
* Parte CPU
*/
cnn_CPU(); // 22[ms]
/*
* Parte GPU
*/
// Memoria Global
AoS_GPU(); // AoS 0.44032[ms]
SoA_GPU(); // SoA 0.29184[ms]
// Memoria Compartida con SoA
SoA_MCOMP_GPU(); // Memoria Compartida 0.289792[ms]
SoA_MCONST_GPU(); //Memoria Constante 0.289792
return 0;
} |
7,822 | #include "includes.h"
__global__ void Caps(char *c, int *b)
{
int tid = blockIdx.x;
if (tid < N)
{
if (b[tid] == 1)
{
int ascii = (int)c[tid];
ascii -= 32;
c[tid] = (char)ascii;
}
}
} |
7,823 |
//#include <cutil_inline.h>
#define WIDTH 4096
#define HEIGHT 7808
__global__ void kernel3(float *input0,float *result0)
{
int tidx = blockIdx.x * 32 + threadIdx.x;
int tidy = blockIdx.y * 32 + threadIdx.y;
int offset = tidy * HEIGHT;
// THIS IS WRONG TOO! WELL, ALMOST - THE "HEIGHT" AND "WIDTH" NAMES NEED
// SWAPPING (I THINK).
result0[tidx * WIDTH + tidy]
=
input0[((tidx - 5 >= 0) ? ((tidx - 5 < HEIGHT) ? (tidx - 5) : (HEIGHT - 1)) : 0) + offset] * 3.5482936e-2 +
input0[((tidx - 4 >= 0) ? ((tidx - 4 < HEIGHT) ? (tidx - 4) : (HEIGHT - 1)) : 0) + offset] * 5.850147e-2 +
input0[((tidx - 3 >= 0) ? ((tidx - 3 < HEIGHT) ? (tidx - 3) : (HEIGHT - 1)) : 0) + offset] * 8.63096e-2 +
input0[((tidx - 2 >= 0) ? ((tidx - 2 < HEIGHT) ? (tidx - 2) : (HEIGHT - 1)) : 0) + offset] * 0.113945305 +
input0[((tidx - 1 >= 0) ? ((tidx - 1 < HEIGHT) ? (tidx - 1) : (HEIGHT - 1)) : 0) + offset] * 0.13461047 +
input0[((tidx >= 0) ? ((tidx < HEIGHT) ? (tidx ) : (HEIGHT - 1)) : 0) + offset] * 0.14230047 +
input0[((tidx + 1 >= 0) ? ((tidx + 1 < HEIGHT) ? (tidx + 1) : (HEIGHT - 1)) : 0) + offset] * 0.13461047 +
input0[((tidx + 2 >= 0) ? ((tidx + 2 < HEIGHT) ? (tidx + 2) : (HEIGHT - 1)) : 0) + offset] * 0.113945305 +
input0[((tidx + 3 >= 0) ? ((tidx + 3 < HEIGHT) ? (tidx + 3) : (HEIGHT - 1)) : 0) + offset] * 8.63096e-2 +
input0[((tidx + 4 >= 0) ? ((tidx + 4 < HEIGHT) ? (tidx + 4) : (HEIGHT - 1)) : 0) + offset] * 5.850147e-2 +
input0[((tidx + 5 >= 0) ? ((tidx + 5 < HEIGHT) ? (tidx + 5) : (HEIGHT - 1)) : 0) + offset] * 3.5482936e-2;
}
extern "C" void
DoRows4(float * smoothX, float * res)
{
dim3
dimBlocks(HEIGHT / 32, WIDTH / 32),
dimThreads(32, 32);
kernel3<<<dimBlocks, dimThreads>>>(smoothX, res);
}
__global__ void kernel4(float *input0,float *result0)
{
int tidx = blockIdx.x * 32 + threadIdx.x;
int tidy = blockIdx.y * 32 + threadIdx.y;
int offset = tidy * WIDTH;
result0[tidx * HEIGHT + tidy]
=
input0[((tidx - 5 >= 0) ? ((tidx - 5 < WIDTH) ? (tidx - 5) : (WIDTH - 1)) : 0) + offset] * 3.5482936e-2 +
input0[((tidx - 4 >= 0) ? ((tidx - 4 < WIDTH) ? (tidx - 4) : (WIDTH - 1)) : 0) + offset] * 5.850147e-2 +
input0[((tidx - 3 >= 0) ? ((tidx - 3 < WIDTH) ? (tidx - 3) : (WIDTH - 1)) : 0) + offset] * 8.63096e-2 +
input0[((tidx - 2 >= 0) ? ((tidx - 2 < WIDTH) ? (tidx - 2) : (WIDTH - 1)) : 0) + offset] * 0.113945305 +
input0[((tidx - 1 >= 0) ? ((tidx - 1 < WIDTH) ? (tidx - 1) : (WIDTH - 1)) : 0) + offset] * 0.13461047 +
input0[((tidx >= 0) ? ((tidx < WIDTH) ? (tidx ) : (WIDTH - 1)) : 0) + offset] * 0.14230047 +
input0[((tidx + 1 >= 0) ? ((tidx + 1 < WIDTH) ? (tidx + 1) : (WIDTH - 1)) : 0) + offset] * 0.13461047 +
input0[((tidx + 2 >= 0) ? ((tidx + 2 < WIDTH) ? (tidx + 2) : (WIDTH - 1)) : 0) + offset] * 0.113945305 +
input0[((tidx + 3 >= 0) ? ((tidx + 3 < WIDTH) ? (tidx + 3) : (WIDTH - 1)) : 0) + offset] * 8.63096e-2 +
input0[((tidx + 4 >= 0) ? ((tidx + 4 < WIDTH) ? (tidx + 4) : (WIDTH - 1)) : 0) + offset] * 5.850147e-2 +
input0[((tidx + 5 >= 0) ? ((tidx + 5 < WIDTH) ? (tidx + 5) : (WIDTH - 1)) : 0) + offset] * 3.5482936e-2;
}
extern "C" void
DoCols4(float * smoothY, float * res)
{
dim3
dimBlocks(WIDTH / 32, HEIGHT / 32),
dimThreads(32, 32);
kernel4<<<dimBlocks, dimThreads>>>(smoothY, res);
}
|
7,824 | #include "includes.h"
__global__ void gGetValueByKey(float* d_in, float* d_out, int* indeces, int n) {
int tid = threadIdx.x + blockDim.x * blockIdx.x;
if(tid < n) {
int index = indeces[tid];
d_out[tid] = d_in[index];
}
} |
7,825 | #include <stdio.h>
const int N = 10000 ;
__global__ void Vector_Addition ( int *dev_a , int *dev_b , int *dev_c)
{
//Lay ra id cua thread trong 1 block.
int tid = blockIdx.x ; // blockDim.x*blockIdx.x+threadIdx.x
if ( tid < N )
*(dev_c+tid) = *(dev_a+tid) + *(dev_b+tid) ;
}
int main (void)
{
//Cap phat bo nho 3 mang A B C tren CPU
int *Host_a, *Host_b, *Host_c;
Host_a = (int *) malloc (N*sizeof(int));
Host_b = (int *) malloc (N*sizeof(int));
Host_c = (int *) malloc (N*sizeof(int));
//Khởi tao bên CPU
for ( int i = 0; i <N ; i++ )
{
*(Host_a+i) = i ;
*(Host_b+i) = i+1 ;
}
//Cap phat bo nho 3 mang A B C tren GPU
int *dev_a , *dev_b, *dev_c ;
cudaMalloc(&dev_a , N*sizeof(int) ) ;
cudaMalloc(&dev_b , N*sizeof(int) ) ;
cudaMalloc(&dev_c , N*sizeof(int) ) ;
//Copy mang host_a, host_b tu CPU cho mang dev_a,dev_b tren GPU
cudaMemcpy (dev_a , Host_a , N*sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy (dev_b , Host_b , N*sizeof(int) , cudaMemcpyHostToDevice);
//Tính toán trên GPU
//N block/gird ,1 thread/1 block
Vector_Addition <<< N, 1 >>> (dev_a , dev_b , dev_c ) ;
//Copy lại CPU
cudaMemcpy(Host_c , dev_c , N*sizeof(int) , cudaMemcpyDeviceToHost);
//Ket qua
for ( int i = 0; i<N; i++ )
printf ("%d + %d = %d\n", *(Host_a+i) , *(Host_b+i) , *(Host_c+i) ) ;
//Gia phong bo nhe
cudaFree (dev_a) ;
cudaFree (dev_b) ;
cudaFree (dev_c) ;
system("pause");
return 0 ;
} |
7,826 | #include <iostream>
#include <stdio.h>
#include <algorithm>
using namespace std;
__global__ void vecAddKernel(int* d_a, int* d_b, int n, int* d_c){
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < n){
d_c[index] = d_a[index] + d_b[index];
}
}
void vecAdd(int* a, int* b, int n, int* c){
dim3 dimGrid(ceil(n/64), 1, 1);
dim3 dimBlock(64, 1, 1);
int size = n*sizeof(int);
int* d_a;
int* d_b;
int* d_c;
cudaMalloc((void**)&d_a, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_b, size);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_c, size);
vecAddKernel<<<dimGrid, dimBlock>>>(d_a, d_b, n, d_c);
cudaError_t err = cudaDeviceSynchronize();
if(err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), __FILE__, __LINE__);
exit(EXIT_FAILURE);
}
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(){
int n;
scanf("%d", &n);
int vecSize = n * sizeof(int);
int* h_a = (int*)malloc(vecSize);
int* h_b = (int*)malloc(vecSize);
int* h_c = (int*)malloc(vecSize);
for(int i = 0; i < n; ++i){
h_a[i] = i;
h_b[i] = n-i;
}
vecAdd(h_a, h_b, n, h_c);
for(int i = 0; i < n; ++i){
printf("%d ", h_c[i]);
}
printf("\n");
return 0;
}
|
7,827 | #include <cuda_runtime.h>
#include <iostream>
#include <string>
using namespace std;
#define CUDA_CHECK cuda_check(__FILE__,__LINE__)
void cuda_check(std::string file, int line)
{
cudaError_t e = cudaGetLastError();
if (e != cudaSuccess)
{
std::cout << std::endl << file << ", line " << line << ": " << cudaGetErrorString(e) << " (" << e << ")" << std::endl;
exit(1);
}
}
// CUDA add float
__device__ float Add(int a, int b)
{
return a+b;
}
// CUDA array adding
__global__ void AddArrays(int *a, int *b, int *c, int size)
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if(idx < size)
{
c[idx] = Add(a[idx], b[idx]);
}
}
extern "C" void gpu_memAlloc(int **a, int **b, int **c, int size)
{
cudaMalloc(a, sizeof(int) * size); CUDA_CHECK;
cudaMalloc(b, sizeof(int) * size); CUDA_CHECK;
cudaMalloc(c, sizeof(int) * size); CUDA_CHECK;
}
//copying data from host to device
extern "C" void gpu_setData(int *dst_d, int *src_h, int size)
{
cudaMemcpy(dst_d, src_h, sizeof(int) * size, cudaMemcpyHostToDevice); CUDA_CHECK;
}
extern "C" void gpu_getData(int *dst_h, int *src_d, int size)
{
cudaMemcpy(dst_h, src_d, sizeof(int) * size, cudaMemcpyDeviceToHost); CUDA_CHECK;
}
//Addition function
//number of thread and block is set before call Kernel
extern "C" void gpu_addVectors(int *a_d, int *b_d, int *c_d, int size)
{
dim3 block = dim3(32, 1, 1);
dim3 grid = dim3((size + block.x - 1) / block.x, 1, 1);
{
AddArrays<<<grid, block>>>(a_d, b_d, c_d, size);
}
CUDA_CHECK;
}
//read data back to host
extern "C" void gpu_memRelease(int *a_d, int *b_d, int *c_d)
{
cudaFree(a_d); CUDA_CHECK;
cudaFree(b_d); CUDA_CHECK;
cudaFree(c_d); CUDA_CHECK;
}
|
7,828 | #include "includes.h"
__global__ void vecAdd(float * in1, int offset, int len) {
//@@ Insert code to implement vector addition here
int i = threadIdx.x;
if( (offset + i) <len ) in1[offset + i] = in1[offset + i]+in1[offset-1];
if( (offset + i + blockDim.x ) <len ) in1[offset + i+ blockDim.x] = in1[offset + i+ blockDim.x]+in1[offset-1];
} |
7,829 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#define CEIL(a,b) ((a+b-1)/b)
#define PI 3.1415926
#define EDGE 0
#define NOEDGE 255
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
int ThreshLo=50, ThreshHi=100; // "Edge" vs. "No Edge" thresholds
// Where images and temporary results are stored in GPU
uch *GPUImg, *GPUResultImg;
double *GPUBWImg, *GPUGaussImg, *GPUGradient, *GPUTheta;
struct ImgProp{
ui Hpixels;
ui Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that calculates a B&W image from an RGB image
// resulting image has a double type for each pixel position
__global__
void BWKernel(double *ImgBW, uch *ImgGPU, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (R+G+B)/3.0;
}
__device__
double Gauss[5][5] = { { 2, 4, 5, 4, 2 },
{ 4, 9, 12, 9, 4 },
{ 5, 12, 15, 12, 5 },
{ 4, 9, 12, 9, 4 },
{ 2, 4, 5, 4, 2 } };
// Kernel that calculates a Gauss image from the B&W image
// resulting image has a double type for each pixel position
__global__
void GaussKernel(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double G=0.00;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){
ImgGauss[MYpixIndex] = 0.0;
return;
}else{
G = 0.0;
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * Gauss[i + 2][j + 2]);
}
}
ImgGauss[MYpixIndex] = G / 159.00;
}
}
__device__
double Gx[3][3] = { { -1, 0, 1 },
{ -2, 0, 2 },
{ -1, 0, 1 } };
__device__
double Gy[3][3] = { { -1, -2, -1 },
{ 0, 0, 0 },
{ 1, 2, 1 } };
// Kernel that calculates Gradient, Theta from the Gauss image
// resulting image has a double type for each pixel position
__global__
void SobelKernel(double *ImgGrad, double *ImgTheta, double *ImgGauss, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double GX,GY;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgGrad[MYpixIndex] = 0.0;
ImgTheta[MYpixIndex] = 0.0;
return;
}else{
GX = 0.0; GY = 0.0;
for (i = -1; i <= 1; i++){
for (j = -1; j <= 1; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
GX += (ImgGauss[indx] * Gx[i + 1][j + 1]);
GY += (ImgGauss[indx] * Gy[i + 1][j + 1]);
}
}
ImgGrad[MYpixIndex] = sqrt(GX*GX + GY*GY);
ImgTheta[MYpixIndex] = atan(GX / GY)*180.0 / PI;
}
}
// Kernel that calculates the threshold image from Gradient, Theta
// resulting image has an RGB for each pixel, same RGB for each pixel
__global__
void ThresholdKernel(uch *ImgResult, double *ImgGrad, double *ImgTheta, ui Hpixels, ui Vpixels, ui ThreshLo, ui ThreshHi)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
unsigned char PIXVAL;
double L, H, G, T;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYresultIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgResult[MYresultIndex] = NOEDGE;
ImgResult[MYresultIndex + 1] = NOEDGE;
ImgResult[MYresultIndex + 2] = NOEDGE;
return;
}else{
L = (double)ThreshLo; H = (double)ThreshHi;
G = ImgGrad[MYpixIndex];
PIXVAL = NOEDGE;
if (G <= L){ // no edge
PIXVAL = NOEDGE;
}else if (G >= H){ // edge
PIXVAL = EDGE;
}else{
T = ImgTheta[MYpixIndex];
if ((T<-67.5) || (T>67.5)){
// Look at left and right: [row][col-1] and [row][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - 1]>H) || (ImgGrad[MYpixIndex + 1]>H)) ? EDGE : NOEDGE;
}
else if ((T >= -22.5) && (T <= 22.5)){
// Look at top and bottom: [row-1][col] and [row+1][col]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels]>H) || (ImgGrad[MYpixIndex + Hpixels]>H)) ? EDGE : NOEDGE;
}
else if ((T>22.5) && (T <= 67.5)){
// Look at upper right, lower left: [row-1][col+1] and [row+1][col-1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels + 1]>H) || (ImgGrad[MYpixIndex + Hpixels - 1]>H)) ? EDGE : NOEDGE;
}
else if ((T >= -67.5) && (T<-22.5)){
// Look at upper left, lower right: [row-1][col-1] and [row+1][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels - 1]>H) || (ImgGrad[MYpixIndex + Hpixels + 1]>H)) ? EDGE : NOEDGE;
}
}
ImgResult[MYresultIndex] = PIXVAL;
ImgResult[MYresultIndex + 1] = PIXVAL;
ImgResult[MYresultIndex + 2] = PIXVAL;
}
}
/*
// helper function that wraps CUDA API calls, reports any error and exits
void chkCUDAErr(cudaError_t error_id)
{
if (error_id != CUDA_SUCCESS){
printf("CUDA ERROR :::%\n", cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
}
*/
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
// clock_t CPUStartTime, CPUEndTime, CPUElapsedTime;
// GPU code run times
float totalTime, totalKernelTime, tfrCPUtoGPU, tfrGPUtoCPU;
float kernelExecTimeBW, kernelExecTimeGauss, kernelExecTimeSobel, kernelExecTimeThreshold;
cudaError_t cudaStatus;
cudaEvent_t time1, time2, time2BW, time2Gauss, time2Sobel, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, ThrPerBlk=256, NumBlocks;
ui GPUDataTfrBW, GPUDataTfrGauss, GPUDataTfrSobel, GPUDataTfrThresh,GPUDataTfrKernel, GPUDataTfrTotal;
cudaDeviceProp GPUprop;
void *GPUptr; // Pointer to the bulk-allocated GPU memory
ul GPUtotalBufferSize;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
strcpy(ProgName, "imedgeG");
switch (argc){
case 6: ThreshHi = atoi(argv[5]);
case 5: ThreshLo = atoi(argv[4]);
case 4: ThrPerBlk = atoi(argv[3]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [ThrPerBlk] [ThreshLo] [ThreshHi]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100",ProgName);
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
if ((ThreshLo<0) || (ThreshHi>255) || (ThreshLo>ThreshHi)){
printf("\nInvalid Thresholds: Threshold must be between [0...255] ...\n");
printf("\n\nNothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
printf("Cannot allocate memory for the input image...\n");
free(TheImg);
exit(EXIT_FAILURE);
}
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
goto EXITERROR;
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto EXITERROR;
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui) GPUprop.maxGridSize[0] * (ui) GPUprop.maxGridSize[1] * (ui )GPUprop.maxGridSize[2]/1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks>=5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks>=5) ? 'M':'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
cudaEventCreate(&time1); cudaEventCreate(&time2);
cudaEventCreate(&time2BW); cudaEventCreate(&time2Gauss); cudaEventCreate(&time2Sobel);
cudaEventCreate(&time3); cudaEventCreate(&time4);
cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
// Allocate GPU buffer for the input and output images and the imtermediate results
GPUtotalBufferSize = 4 * sizeof(double)*IMAGEPIX + 2 * sizeof(uch)*IMAGESIZE;
cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory");
goto EXITERROR;
}
GPUImg = (uch *)GPUptr;
GPUResultImg = GPUImg + IMAGESIZE;
GPUBWImg = (double *)(GPUResultImg + IMAGESIZE);
GPUGaussImg = GPUBWImg + IMAGEPIX;
GPUGradient = GPUGaussImg + IMAGEPIX;
GPUTheta = GPUGradient + IMAGEPIX;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy CPU to GPU failed!");
goto EXITCUDAERROR;
}
cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done
//dim3 dimBlock(ThrPerBlk);
//dim3 dimGrid(ip.Hpixels*BlkPerRow);
BlkPerRow = CEIL(ip.Hpixels, ThrPerBlk);
NumBlocks = IPV*BlkPerRow;
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
BWKernel <<< NumBlocks, ThrPerBlk >>> (GPUBWImg, GPUImg, ip.Hpixels);
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2BW, 0); // Time stamp after BW image calculation
GPUDataTfrBW = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
GaussKernel <<< NumBlocks, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, ip.Hpixels, ip.Vpixels);
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2Gauss, 0); // Time stamp after Gauss image calculation
GPUDataTfrGauss = 2*sizeof(double)*IMAGEPIX;
SobelKernel <<< NumBlocks, ThrPerBlk >>> (GPUGradient, GPUTheta, GPUGaussImg, ip.Hpixels, ip.Vpixels);
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2Sobel, 0); // Time stamp after Gradient, Theta computation
GPUDataTfrSobel = 3 * sizeof(double)*IMAGEPIX;
ThresholdKernel <<< NumBlocks, ThrPerBlk >>> (GPUResultImg, GPUGradient, GPUTheta, ip.Hpixels, ip.Vpixels, ThreshLo, ThreshHi);
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
GPUDataTfrThresh = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
GPUDataTfrKernel = GPUDataTfrBW + GPUDataTfrGauss + GPUDataTfrSobel + GPUDataTfrThresh;
GPUDataTfrTotal = GPUDataTfrKernel + 2 * IMAGESIZE;
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CopyImg, GPUResultImg, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
goto EXITCUDAERROR;
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1); cudaEventSynchronize(time2);
cudaEventSynchronize(time2BW); cudaEventSynchronize(time2Gauss); cudaEventSynchronize(time2Sobel);
cudaEventSynchronize(time3); cudaEventSynchronize(time4);
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecTimeBW, time2, time2BW);
cudaEventElapsedTime(&kernelExecTimeGauss, time2BW, time2Gauss);
cudaEventElapsedTime(&kernelExecTimeSobel, time2Gauss, time2Sobel);
cudaEventElapsedTime(&kernelExecTimeThreshold, time2Sobel, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
totalKernelTime = kernelExecTimeBW + kernelExecTimeGauss + kernelExecTimeSobel + kernelExecTimeThreshold;
cudaStatus = cudaDeviceSynchronize();
//checkError(cudaGetLastError()); // screen for errors in kernel launches
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk
printf("\n\n----------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("----------------------------------------------------------------------------\n");
printf("%s %s %s %u %d %d [%u BLOCKS, %u BLOCKS/ROW]\n", ProgName, InputFileName, OutputFileName, ThrPerBlk, ThreshLo, ThreshHi, NumBlocks, BlkPerRow);
printf("----------------------------------------------------------------------------\n");
printf(" CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE,tfrCPUtoGPU));
printf(" GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU));
printf("----------------------------------------------------------------------------\n");
printf(" BW Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeBW, DATAMB(GPUDataTfrBW), DATABW(GPUDataTfrBW, kernelExecTimeBW));
printf(" Gauss Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeGauss, DATAMB(GPUDataTfrGauss), DATABW(GPUDataTfrGauss, kernelExecTimeGauss));
printf(" Sobel Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeSobel, DATAMB(GPUDataTfrSobel), DATABW(GPUDataTfrSobel, kernelExecTimeSobel));
printf("Threshold Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeThreshold, DATAMB(GPUDataTfrThresh), DATABW(GPUDataTfrThresh, kernelExecTimeThreshold));
printf("----------------------------------------------------------------------------\n");
printf(" Total Kernel-only time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalKernelTime, DATAMB(GPUDataTfrKernel), DATABW(GPUDataTfrKernel, totalKernelTime));
printf(" Total time with I/O included =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalTime, DATAMB(GPUDataTfrTotal), DATABW(GPUDataTfrTotal, totalTime));
printf("----------------------------------------------------------------------------\n");
// Deallocate CPU, GPU memory and destroy events.
cudaFree(GPUptr);
cudaEventDestroy(time1); cudaEventDestroy(time2);
cudaEventDestroy(time2BW); cudaEventDestroy(time2Gauss); cudaEventDestroy(time2Sobel);
cudaEventDestroy(time3); cudaEventDestroy(time4);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
KERNELERROR:
fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus);
EXITCUDAERROR:
cudaFree(GPUptr);
EXITERROR:
free(TheImg);
free(CopyImg);
return(EXIT_FAILURE);
}
|
7,830 | #include "includes.h"
# define M 10000
# define N 10000
__global__ void add( int * a, int * b, int * c)
{
unsigned int i= blockDim.x *blockIdx.x + threadIdx.x;
unsigned int j= blockDim.y *blockIdx.y + threadIdx.y;
if(i<M && j<N)
c[i*M+j]=a[i*M+j]+b[i*M+j];
} |
7,831 | // First CUDA program
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#define DATA_SIZE 1048576
#define BLOCK_NUM 32
#define THREAD_NUM 256
extern "C" __global__ void sumOfSquares(int *num, int* result, clock_t* time);
__global__ void sumOfSquares(int *num, int* result, clock_t* time)
{
extern __shared__ int shared[];
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int i;
if(tid == 0) time[bid] = clock();
shared[tid] = 0;
for(i = bid * THREAD_NUM + tid; i < DATA_SIZE; i += BLOCK_NUM * THREAD_NUM) {
shared[tid] += num[i] * num[i];
}
__syncthreads();
if(tid < 128) { shared[tid] += shared[tid + 128]; } __syncthreads();
if(tid < 64) { shared[tid] += shared[tid + 64]; } __syncthreads();
if(tid < 32) { shared[tid] += shared[tid + 32]; } __syncthreads();
if(tid < 16) { shared[tid] += shared[tid + 16]; } __syncthreads();
if(tid < 8) { shared[tid] += shared[tid + 8]; } __syncthreads();
if(tid < 4) { shared[tid] += shared[tid + 4]; } __syncthreads();
if(tid < 2) { shared[tid] += shared[tid + 2]; } __syncthreads();
if(tid < 1) { shared[tid] += shared[tid + 1]; } __syncthreads();
if(tid == 0) {
result[bid] = shared[0];
time[bid + BLOCK_NUM] = clock();
}
}
|
7,832 | #include <cstdio>
#include "cuda.h"
__global__
void GPUFunction()
{
printf("hello from the Gpu.\n");
}
int main()
{
GPUFunction<<<1, 1>>>();
cudaDeviceSynchronize();
return EXIT_SUCCESS;
}
|
7,833 | // Written by Barry Wilkinson, UNC-Charlotte. Pi.cu December 22, 2010.
//Derived somewhat from code developed by Patrick Rogers, UNC-C
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#include <time.h>
#include <curand_kernel.h>
#define BLOCKS 512
#define THREADS 512
#define PI 3.141592654 // known value of pi
__global__ void gpu_monte_carlo(float *estimate) {
unsigned int tid = threadIdx.x + blockDim.x * blockIdx.x;
float j,x;
j = 2*tid-1 ; //denominator term
x = 1.0/ j ;
estimate[tid] = 4.0*(tid%2 == 1)? x : -x; // return estimate of pi
}
int main (int argc, char *argv[]) {
clock_t start, stop;
float host[BLOCKS * THREADS];
float *dev;
printf("# of trials = %d, # of blocks = %d, # of threads/block = %d.\n", BLOCKS * THREADS,
BLOCKS, THREADS);
start = clock();
cudaMalloc((void **) &dev, BLOCKS * THREADS * sizeof(float)); // allocate device mem. for counts
gpu_monte_carlo<<<BLOCKS, THREADS>>>(dev);
cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(float), cudaMemcpyDeviceToHost); // return results
float pi_gpu;
for(int i = 0; i < BLOCKS * THREADS; i++) {
pi_gpu += host[i];
}
stop = clock();
printf("GPU pi calculated in %f s.\n", (stop-start)/(float)CLOCKS_PER_SEC);
printf("CUDA estimate of PI = %.10g [error of %.10g]\n", pi_gpu*4.0, pi_gpu - PI);
return 0;
}
|
7,834 | #include "includes.h"
#define Width 1920
#define Height 2520
#define iterations 100
__global__ void convolution_kernel(unsigned char* A, unsigned char* B)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int x = i-2*blockIdx.x-1;
int y = j-2*blockIdx.y-1;
__shared__ unsigned char As[32][32];
//Copy from global memory to shared memory
if (x<0) {
x=0;
} else if (x==Width) {
x=Width-1;
}
if (y<0) {
y=0;
} else if (y == Height) {
y = Height-1;
}
As[threadIdx.x][threadIdx.y] = A[Width*y + x];
__syncthreads();
// Computations
if (threadIdx.x!=0 && threadIdx.x!=31 && threadIdx.y!=0 && threadIdx.y!=31) {
B[Width*y + x] = (As[threadIdx.x-1][threadIdx.y-1] +
As[threadIdx.x ][threadIdx.y-1] * 2 +
As[threadIdx.x+1][threadIdx.y-1] +
As[threadIdx.x-1][threadIdx.y ] *2 +
As[threadIdx.x ][threadIdx.y ] *4 +
As[threadIdx.x+1][threadIdx.y ] * 2 +
As[threadIdx.x-1][threadIdx.y+1] * 1 +
As[threadIdx.x ][threadIdx.y+1] * 2 +
As[threadIdx.x+1][threadIdx.y+1] * 1)/16;
}
} |
7,835 | #pragma once
#include <string>
#include <array>
#include <iostream>
#include "Vector3.cuh.cu"
namespace RayTracing
{
constexpr size_t CONFIG_STRING_MAX_COUNT = 1024;
struct Trajectory
{
float r, z, phi;
float rA, zA;
float rOm, zOm, phiOm;
float rP, zP;
}; // Trajectory
struct FigureData
{
RayTracing::Vector3 origin;
RayTracing::Color color;
float radius;
float reflectance, transparency;
int edgeLightsNum;
}; // FigureData
struct FloorData
{
RayTracing::Vector3 A, B, C, D;
char texturePath[CONFIG_STRING_MAX_COUNT];
RayTracing::Color color;
float reflectance;
}; // FloorData
struct LightSourceData
{
RayTracing::Vector3 origin;
float radius;
RayTracing::Color color;
}; // LightSource
struct Config
{
int framesNum;
char outputTemplate[CONFIG_STRING_MAX_COUNT];
int width;
int height;
float horizontalViewDegrees;
Trajectory lookFrom, lookAt;
FigureData A, B, C;
FloorData floorData;
int lightSourcesNum;
std::array<LightSourceData, 4> lightSources;
int recursionDepth;
float samplesPerPixel;
}; // Config
std::istream& operator>>(std::istream &istream, Config& config);
std::istream& operator>>(std::istream &istream, Trajectory& trajectory);
std::istream& operator>>(std::istream &istream, FigureData& figureData);
std::istream& operator>>(std::istream &istream, FloorData& floorData);
std::istream& operator>>(std::istream &istream, LightSourceData& lightSourceData);
} // namespace RayTracing
|
7,836 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#define BIN_WIDTH 0.25
#define BLOCK_DIM 256
#define COVERAGE 180
#define LINE_LENGTH 30
#define BINS_TOTAL (COVERAGE * (int)(1 / BIN_WIDTH))
typedef struct Galaxy
{
float declination;
float declination_cos;
float declination_sin;
float right_ascension;
} Galaxy;
__global__ void adjust_galaxy_set(Galaxy *galaxy_set, int n);
__global__ void collect_histograms(Galaxy *D_galaxy_set, Galaxy *R_galaxy_set, int *DD_histogram_collected, int *DR_histogram_collected, int *RR_histogram_collected, int n);
__global__ void measure_galaxy_distribution(int *DD_histogram, int *DR_histogram, int *RR_histogram, float *distribution, int n);
void accumulate_histograms(int *DD_histogram_collected, int *DR_histogram_collected, int *RR_histogram_collected, int *DD_histogram, int *DR_histogram, int *RR_histogram, int n);
void read_file(FILE *file_pointer, const char *DELIMITER, Galaxy *galaxy_set, int n);
void write_file_int(FILE *file_pointer, int *content, int n);
void write_file_float(FILE *file_pointer, float *content, int n);
int main()
{
/* READING REAL GALAXIES FILE */
FILE *file_pointer = fopen("./input-data/real-galaxies.txt", "r");
const char *DELIMITER = "\t";
// Reads number of galaxies to process (defined on the first line of the file).
char line[LINE_LENGTH];
fgets(line, LINE_LENGTH, file_pointer);
const int GALAXIES_TOTAL = atoi(line);
Galaxy *real;
cudaMallocManaged(&real, GALAXIES_TOTAL * sizeof(Galaxy));
read_file(file_pointer, DELIMITER, real, GALAXIES_TOTAL);
/* READING RANDOM GALAXIES FILE */
file_pointer = fopen("./input-data/random-galaxies.txt", "r");
DELIMITER = " ";
// Checks that number of galaxies is equal in both files.
fgets(line, LINE_LENGTH, file_pointer);
if (GALAXIES_TOTAL != atoi(line))
{
printf("Both files should have equal number of galaxies!");
return 1;
}
Galaxy *random;
cudaMallocManaged(&random, GALAXIES_TOTAL * sizeof(Galaxy));
read_file(file_pointer, DELIMITER, random, GALAXIES_TOTAL);
/* ADJUSTING GALAXY SETS */
int GRID_DIM = ceilf(GALAXIES_TOTAL / (float)BLOCK_DIM);
adjust_galaxy_set<<<GRID_DIM, BLOCK_DIM>>>(real, GALAXIES_TOTAL);
adjust_galaxy_set<<<GRID_DIM, BLOCK_DIM>>>(random, GALAXIES_TOTAL);
/* COLLECTING HISTOGRAMS */
const int COLLECTED_HISTOGRAM_SIZE = GRID_DIM * BINS_TOTAL;
int *DD_histogram_collected, *DR_histogram_collected, *RR_histogram_collected;
cudaMallocManaged(&DD_histogram_collected, COLLECTED_HISTOGRAM_SIZE * sizeof(int));
cudaMallocManaged(&DR_histogram_collected, COLLECTED_HISTOGRAM_SIZE * sizeof(int));
cudaMallocManaged(&RR_histogram_collected, COLLECTED_HISTOGRAM_SIZE * sizeof(int));
collect_histograms<<<GRID_DIM, BLOCK_DIM>>>(real, random, DD_histogram_collected, DR_histogram_collected, RR_histogram_collected, GALAXIES_TOTAL);
cudaDeviceSynchronize();
/* ACCUMULATING HISTOGRAMS */
int *DD_histogram, *DR_histogram, *RR_histogram;
cudaMallocManaged(&DD_histogram, BINS_TOTAL * sizeof(int));
cudaMallocManaged(&DR_histogram, BINS_TOTAL * sizeof(int));
cudaMallocManaged(&RR_histogram, BINS_TOTAL * sizeof(int));
accumulate_histograms(DD_histogram_collected, DR_histogram_collected, RR_histogram_collected, DD_histogram, DR_histogram, RR_histogram, COLLECTED_HISTOGRAM_SIZE);
/* DETERMINING DISTRIBUTION */
float *distribution;
cudaMallocManaged(&distribution, BINS_TOTAL * sizeof(float));
GRID_DIM = ceilf(BINS_TOTAL / (float)BLOCK_DIM);
measure_galaxy_distribution<<<GRID_DIM, BLOCK_DIM>>>(DD_histogram, DR_histogram, RR_histogram, distribution, BINS_TOTAL);
cudaDeviceSynchronize();
/* WRITING RESULTS TO FILE */
system("mkdir -p results");
file_pointer = fopen("results/DD_histogram.txt", "w");
write_file_int(file_pointer, DD_histogram, BINS_TOTAL);
file_pointer = fopen("results/RR_histogram.txt", "w");
write_file_int(file_pointer, RR_histogram, BINS_TOTAL);
file_pointer = fopen("results/Distribution.txt", "w");
write_file_float(file_pointer, distribution, BINS_TOTAL);
/* CLEAN UP */
fclose(file_pointer);
cudaFree(real);
cudaFree(random);
cudaFree(DD_histogram);
cudaFree(DR_histogram);
cudaFree(RR_histogram);
printf("Galaxy distribution successfully calculated!\n");
return 0;
}
__device__ float arcminutes_to_radians(float arcminute_value)
{
return (M_PI * arcminute_value) / (60 * 180);
}
__global__ void adjust_galaxy_set(Galaxy *galaxy_set, int n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
float declination = arcminutes_to_radians(galaxy_set[i].declination);
galaxy_set[i].declination = declination;
galaxy_set[i].declination_cos = cosf(declination);
galaxy_set[i].declination_sin = sinf(declination);
galaxy_set[i].right_ascension = arcminutes_to_radians(galaxy_set[i].right_ascension);
}
}
__device__ float angle_between_galaxies(Galaxy first_galaxy, Galaxy second_galaxy)
{
float x = first_galaxy.declination_sin * second_galaxy.declination_sin +
first_galaxy.declination_cos * second_galaxy.declination_cos *
cosf(first_galaxy.right_ascension - second_galaxy.right_ascension);
// Checks that x is within the boundaries of [-1.0f, 1.0f].
x = fminf(1.0f, fmaxf(-1.0f, x));
return acosf(x);
}
__device__ float radians_to_degrees(float radian_value)
{
return radian_value * (180 / M_PI);
}
__device__ void update_bin(int *bin, float angle, int incrementor)
{
int index = floorf(radians_to_degrees(angle) / BIN_WIDTH);
atomicAdd(&bin[index], incrementor);
}
__global__ void collect_histograms(Galaxy *D_galaxy_set, Galaxy *R_galaxy_set, int *DD_histogram_collected, int *DR_histogram_collected, int *RR_histogram_collected, int n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
// Shared arrays are used to reduce duration of atomic adding when updating results.
// Each block uses its own set of arrays of histograms, equal to the size of the actual histogram arrays.
__shared__ int shared_DD_histogram[BINS_TOTAL];
__shared__ int shared_DR_histogram[BINS_TOTAL];
__shared__ int shared_RR_histogram[BINS_TOTAL];
for (int i = threadIdx.x; i < BINS_TOTAL; i += blockDim.x)
{
shared_DD_histogram[i] = 0;
shared_DR_histogram[i] = 0;
shared_RR_histogram[i] = 0;
}
__syncthreads();
for (int i = 0; i < n; i += 1)
for (int j = index; j < n; j += stride)
{
// Every pair of D-R galaxy is compared.
float angle = angle_between_galaxies(D_galaxy_set[i], R_galaxy_set[j]);
update_bin(shared_DR_histogram, angle, 1);
// D-D and R-R galaxy pairs are only compared from the same starting index forward.
// If both indexes are the same, the relevant bin is incremented by one.
if (j == i)
{
angle = 0;
update_bin(shared_DD_histogram, angle, 1);
update_bin(shared_RR_histogram, angle, 1);
}
// When one of the indexes is greater, the relevant bin is incremented by two.
// This is the same as doing the comparison twice, thus saving execution time.
else if (j > i)
{
angle = angle_between_galaxies(D_galaxy_set[i], D_galaxy_set[j]);
update_bin(shared_DD_histogram, angle, 2);
angle = angle_between_galaxies(R_galaxy_set[i], R_galaxy_set[j]);
update_bin(shared_RR_histogram, angle, 2);
}
}
__syncthreads();
// The section corresponding to the current block is updated in the collected histogram arrays.
for (int i = threadIdx.x; i < BINS_TOTAL; i += blockDim.x)
{
DD_histogram_collected[blockIdx.x * BINS_TOTAL + i] = shared_DD_histogram[i];
DR_histogram_collected[blockIdx.x * BINS_TOTAL + i] = shared_DR_histogram[i];
RR_histogram_collected[blockIdx.x * BINS_TOTAL + i] = shared_RR_histogram[i];
}
}
__global__ void measure_galaxy_distribution(int *DD_histogram, int *DR_histogram, int *RR_histogram, float *distribution, int n)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
{
if (RR_histogram[i] == 0)
continue;
distribution[i] = (DD_histogram[i] - 2.0f * DR_histogram[i] + RR_histogram[i]) / RR_histogram[i];
}
}
void accumulate_histograms(int *DD_histogram_collected, int *DR_histogram_collected, int *RR_histogram_collected, int *DD_histogram, int *DR_histogram, int *RR_histogram, int n)
{
for (int i = 0; i < n; i += 1)
{
DD_histogram[i % BINS_TOTAL] += DD_histogram_collected[i];
DR_histogram[i % BINS_TOTAL] += DR_histogram_collected[i];
RR_histogram[i % BINS_TOTAL] += RR_histogram_collected[i];
}
}
void read_file(FILE *file_pointer, const char *DELIMITER, Galaxy *galaxy_set, int n)
{
char line[LINE_LENGTH];
const int DECLINATION_INDEX = 1;
const int RIGHT_ASCENSION_INDEX = 0;
for (int i = 0; i < n; i += 1)
{
fgets(line, LINE_LENGTH, file_pointer);
char *token = strtok(line, DELIMITER);
int index = 0;
while (token != NULL)
{
float arcminute_value = atof(token);
if (index == DECLINATION_INDEX)
galaxy_set[i].declination = arcminute_value;
else if (index == RIGHT_ASCENSION_INDEX)
galaxy_set[i].right_ascension = arcminute_value;
index += 1;
token = strtok(NULL, DELIMITER);
}
}
}
void write_file_int(FILE *file_pointer, int *content, int n)
{
for (int i = 0; i < n; i += 1)
fprintf(file_pointer, "%d\n", content[i]);
}
void write_file_float(FILE *file_pointer, float *content, int n)
{
for (int i = 0; i < n; i += 1)
fprintf(file_pointer, "%f\n", content[i]);
}
|
7,837 | #include "includes.h"
__global__ void momentum_update_1D_1D(float* x, float* d, float* m, float learning_rate, float momentum, float gradClip, bool nesterov, int size) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (; tid < size; tid += stride) {
float temp = d[tid];
if (temp > gradClip) temp = gradClip;
if (temp < -gradClip) temp = -gradClip;
m[tid] *= momentum;
m[tid] += temp;
if (nesterov) { temp += momentum * m[tid]; }
else { temp = m[tid]; }
x[tid] -= learning_rate * temp;
d[tid] = 0;
}
} |
7,838 | //
// Created by igor on 23.05.2021.
//
#include "Material.cuh"
|
7,839 | #include "includes.h"
__global__ void MultiplyPolarGridbyConstantKernel (double *Dens, int nrad, int nsec, double ScalingFactor)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
if (i<=nrad && j<nsec)
Dens[i*nsec + j] *= ScalingFactor;
} |
7,840 | /* compile with: nvcc -O3 hw1.cu -o hw1 */
#include <stdio.h>
#include <sys/time.h>
///////////////////////////////////////////////// DO NOT CHANGE ///////////////////////////////////////
#define IMG_HEIGHT 256
#define IMG_WIDTH 256
#define N_IMAGES 10000
typedef unsigned char uchar;
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
void process_image(uchar *img_in, uchar *img_out) {
int histogram[256] = { 0 };
for (int i = 0; i < IMG_WIDTH * IMG_HEIGHT; i++) {
histogram[img_in[i]]++;
}
int cdf[256] = { 0 };
int hist_sum = 0;
for (int i = 0; i < 256; i++) {
hist_sum += histogram[i];
cdf[i] = hist_sum;
}
int cdf_min = 0;
for (int i = 0; i < 256; i++) {
if (cdf[i] != 0) {
cdf_min = cdf[i];
break;
}
}
uchar map[256] = { 0 };
for (int i = 0; i < 256; i++) {
int map_value = (float)(cdf[i] - cdf_min) / (IMG_WIDTH * IMG_HEIGHT - cdf_min) * 255;
map[i] = (uchar)map_value;
}
for (int i = 0; i < IMG_WIDTH * IMG_HEIGHT; i++) {
img_out[i] = map[img_in[i]];
}
}
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
long long int distance_sqr_between_image_arrays(uchar *img_arr1, uchar *img_arr2) {
long long int distance_sqr = 0;
for (int i = 0; i < N_IMAGES * IMG_WIDTH * IMG_HEIGHT; i++) {
distance_sqr += SQR(img_arr1[i] - img_arr2[i]);
}
return distance_sqr;
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ int arr_min(int arr[], int arr_size) {
// we assume arr_size threads call this function for arr[]
__shared__ int SharedMin;
int tid = threadIdx.x;
if((arr[tid] > 0) && ((tid == 0) || (arr[tid-1] == 0))) // cdf is a rising function, so only the first non zero will have zero before it.
SharedMin = arr[tid];
__syncthreads();
return SharedMin;
}
__device__ void prefix_sum(int arr[], int arr_size) {
int tid = threadIdx.x;
int increment;
// start doing reductions, like in the tutorial
for ( int stride = 1 ; stride <= arr_size-1 ; stride*=2 )
{
increment = 0;
if(tid>=stride)
increment = arr[tid - stride];
__syncthreads();
arr[tid] += increment;
__syncthreads();
}
return;
}
__global__ void process_image_kernel(uchar *in, uchar *out) {
int tid = threadIdx.x;
int bid = blockIdx.x;
__shared__ int hist[256];
// init hist to 0
hist[tid] = 0;
// promote in/out pointers to point to the beginning of this block's image
in += bid*IMG_HEIGHT*IMG_WIDTH;
out += bid*IMG_HEIGHT*IMG_WIDTH;
__syncthreads(); // make sure hist is initiated before we continue
// start updating the histogram, one row at a time
for(int row = 0; row < IMG_HEIGHT; ++row)
{
int val = *(in+(IMG_WIDTH*row)+tid);
atomicAdd(&hist[val],1);
}
__syncthreads();
// hist is ready, build cdf
prefix_sum(hist,256);
__syncthreads();
// hist arr now contains cdf, get cdfMin
int cdfMinId = arr_min(hist,256);
// build map array
__shared__ uchar map[256];
map[tid] = 255 * (((double)(hist[tid]-cdfMinId))/(IMG_HEIGHT*IMG_WIDTH - cdfMinId));
__syncthreads();
// update the output image values by using the map array
for(int row = 0; row < IMG_HEIGHT; ++row)
*(out+(256*row)+tid) = map[*(in+(256*row)+tid)];
return ;
}
int main() {
///////////////////////////////////////////////// DO NOT CHANGE ///////////////////////////////////////
uchar *images_in;
uchar *images_out_cpu; //output of CPU computation. In CPU memory.
uchar *images_out_gpu_serial; //output of GPU task serial computation. In CPU memory.
uchar *images_out_gpu_bulk; //output of GPU bulk computation. In CPU memory.
CUDA_CHECK( cudaHostAlloc(&images_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) );
CUDA_CHECK( cudaHostAlloc(&images_out_cpu, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) );
CUDA_CHECK( cudaHostAlloc(&images_out_gpu_serial, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) );
CUDA_CHECK( cudaHostAlloc(&images_out_gpu_bulk, N_IMAGES * IMG_HEIGHT * IMG_WIDTH, 0) );
/* instead of loading real images, we'll load the arrays with random data */
srand(0);
for (long long int i = 0; i < N_IMAGES * IMG_WIDTH * IMG_HEIGHT; i++) {
images_in[i] = rand() % 256;
}
double t_start, t_finish;
// CPU computation. For reference. Do not change
printf("\n=== CPU ===\n");
t_start = get_time_msec();
for (int i = 0; i < N_IMAGES; i++) {
uchar *img_in = &images_in[i * IMG_WIDTH * IMG_HEIGHT];
uchar *img_out = &images_out_cpu[i * IMG_WIDTH * IMG_HEIGHT];
process_image(img_in, img_out);
}
t_finish = get_time_msec();
printf("total time %f [msec]\n", t_finish - t_start);
long long int distance_sqr;
///////////////////////////////////////////////////////////////////////////////////////////////////////////
// GPU task serial computation
printf("\n=== GPU Task Serial ===\n"); //Do not change
// allocate GPU memory for a single input image and a single output image
uchar *img_in, *img_out;
CUDA_CHECK(cudaMalloc((void**)&img_in,IMG_HEIGHT*IMG_WIDTH));
CUDA_CHECK(cudaMalloc((void**)&img_out,IMG_HEIGHT*IMG_WIDTH));
t_start = get_time_msec(); //Do not change
//in a for loop:
// 1. copy the relevant image from images_in to the GPU memory you allocated
// 2. invoke GPU kernel on this image
// 3. copy output from GPU memory to relevant location in images_out_gpu_serial
for(int i = 0; i < N_IMAGES; ++i)
{
CUDA_CHECK(cudaMemcpy(img_in,&images_in[i * IMG_WIDTH * IMG_HEIGHT], IMG_HEIGHT * IMG_WIDTH,cudaMemcpyHostToDevice));
dim3 threads(256),blocks(1);
process_image_kernel<<<blocks,threads>>>(img_in,img_out);
CUDA_CHECK(cudaDeviceSynchronize());
// Check for errors
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess){
fprintf(stderr,"Kernel execution failed:%s\n",cudaGetErrorString(error));
return 1;
}
// no error, copy image to cpu memory
CUDA_CHECK(cudaMemcpy(&images_out_gpu_serial[i * IMG_WIDTH * IMG_HEIGHT], img_out, IMG_HEIGHT * IMG_WIDTH,cudaMemcpyDeviceToHost));
}
t_finish = get_time_msec(); //Do not change
distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_serial); // Do not change
printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); //Do not change
// Free the allocated memory before rewriting the pointers
CUDA_CHECK(cudaFree(img_in));
CUDA_CHECK(cudaFree(img_out));
// GPU bulk
printf("\n=== GPU Bulk ===\n"); //Do not change
//allocate GPU memory for a all input images and all output images
CUDA_CHECK(cudaMalloc((void**)&img_in,IMG_HEIGHT*IMG_WIDTH*N_IMAGES));
CUDA_CHECK(cudaMalloc((void**)&img_out,IMG_HEIGHT*IMG_WIDTH*N_IMAGES));
t_start = get_time_msec(); //Do not change
//copy all input images from images_in to the GPU memory you allocated
//invoke a kernel with N_IMAGES threadblocks, each working on a different image
//copy output images from GPU memory to images_out_gpu_bulk
CUDA_CHECK(cudaMemcpy(img_in,images_in, N_IMAGES * IMG_HEIGHT * IMG_WIDTH,cudaMemcpyHostToDevice));
dim3 threads(256),blocks(N_IMAGES);
process_image_kernel<<<blocks,threads>>>(img_in,img_out);
CUDA_CHECK(cudaDeviceSynchronize());
CUDA_CHECK(cudaMemcpy(images_out_gpu_bulk, img_out, N_IMAGES * IMG_HEIGHT * IMG_WIDTH,cudaMemcpyDeviceToHost));
t_finish = get_time_msec(); //Do not change
distance_sqr = distance_sqr_between_image_arrays(images_out_cpu, images_out_gpu_bulk); // Do not change
printf("total time %f [msec] distance from baseline %lld (should be zero)\n", t_finish - t_start, distance_sqr); //Do not chhange
// Free all of the remaining allocated memory before completion
CUDA_CHECK(cudaFree(img_in));
CUDA_CHECK(cudaFree(img_out));
CUDA_CHECK(cudaFreeHost(images_in));
CUDA_CHECK(cudaFreeHost(images_out_cpu));
CUDA_CHECK(cudaFreeHost(images_out_gpu_bulk));
CUDA_CHECK(cudaFreeHost(images_out_gpu_serial));
return 0;
} |
7,841 | #include "aabb_tree_node.cuh"
#include <assert.h>
#include <stdio.h>
#include <new>
AABBTreeNode::AABBTreeNode() {
this->leaf = true;
this->bounding_box = AABB3();
}
AABBTreeNode::AABBTreeNode(AABBTreeNode* c1, AABBTreeNode* c2) {
this->leaf = false;
this->c1 = c1;
this->c2 = c2;
this->bounding_box = c1->bounding_box.get_union(&c2->bounding_box);
}
AABBTreeNode::AABBTreeNode(Block* child) {
this->leaf = true;
this->bounding_box = *child->get_bounding_box();
this->c1 = child;
}
bool AABBTreeNode::is_leaf() {
return this->leaf;
}
AABBTreeNode* AABBTreeNode::get_c1() {
assert(!this->leaf);
return (AABBTreeNode*) this->c1;
}
AABBTreeNode* AABBTreeNode::get_c2() {
assert(!this->leaf);
return (AABBTreeNode*) this->c2;
}
Block* AABBTreeNode::get_leaf() {
assert(this->leaf);
return (Block*) this->c1;
}
AABB3* AABBTreeNode::get_bounding_box() {
return &this->bounding_box;
}
void AABBTreeNode::insert_block(Block* child) {
if (is_leaf()) {
AABBTreeNode* a = new AABBTreeNode(this->get_leaf());
AABBTreeNode* b = new AABBTreeNode(child);
new (this) AABBTreeNode(a, b);
} else {
assert(false);
}
}
AABB2 AABBTreeNode::trace_box(Frustrum view) {
Vec3 vertices[8];
Vec2 projections[8];
get_bounding_box()->get_vertices(vertices);
Plane3 view_plane = Plane3(view.orig_a, view.orig_b, view.orig_c);
Vec3 view_plane_center = (view.orig_a + view.orig_c) * 0.5;
Vec3 up = (view.orig_b - view.orig_a);
Vec3 side = (view.orig_d - view.orig_a);
for (int i = 0; i < 8; i++) {
Vec3 intersection_point = view_plane.intersection_point(view.origin, vertices[i] - view.origin);
Vec3 relative_screen_point = intersection_point - view_plane_center;
// Project up vector onto intersection line.
float y = relative_screen_point.dot(up.normalize()) / up.length();
float x = relative_screen_point.dot(side.normalize()) / side.length();
projections[i] = Vec2(min(0.5, max(-0.5, x)), min(0.5, max(-0.5, y))); // Limit vertices outside screen to be on the edge, otherwise weird stuff happens.
}
Vec2 minimum = projections[0];
Vec2 maximum = projections[0];
for (int i = 1; i < 8; i++) {
minimum = minimum.minimum(projections[i]);
maximum = maximum.maximum(projections[i]);
}
return AABB2(minimum, maximum);
} |
7,842 | #include <iostream>
#include <stdio.h>
#include <climits>
#define BLOCK_SIZE 512
/*
* Definitions:
* d[i] = shortest path so far from source to i
* U = unvisited verts
* F = frontier verts
* del = biggest d[i] (i from U) that we can add to frontier
* del[i] = min(d[u] + del[u] for all u in U) (ith iteration)
* del[u] = minimum weight of its outgoing edges
*
*/
// find min edge out
__global__ void findAllMins(int* adjMat, int* outVec, size_t gSize) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
int ind = globalThreadId * gSize;
int min = INT_MAX;
if(globalThreadId < gSize) {
for(int i = 0; i < gSize; i++) {
if(adjMat[ind + i] < min && adjMat[ind + i] > 0) {
min = adjMat[ind + i];
}
}
outVec[globalThreadId] = min;
}
}
/*
* forall i in parallel do
* if (F[i])
* forall j predecessor of i do
* if (U[j])
* c[i]= min(c[i],c[j]+w[j,i]);
*
*/
__global__ void relax(int* U, int* F, int* d, size_t gSize, int* adjMat) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if (globalThreadId < gSize) {
if (F[globalThreadId]) {
for (int i = 0; i < gSize; i++) {
if(adjMat[globalThreadId*gSize + i] && i != globalThreadId && U[i]) {
atomicMin(&d[i], d[globalThreadId] + adjMat[globalThreadId * gSize + i]);
}
}
}
}
}
__global__ void min(int* U, int* d, int* outDel, int* minOutEdges, size_t gSize, int useD) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
int pos1 = 2*globalThreadId;
int pos2 = 2*globalThreadId + 1;
int val1, val2;
if(pos1 < gSize) {
val1 = minOutEdges[pos1] + (useD ? d[pos1] : 0);
if(pos2 < gSize) {
val2 = minOutEdges[pos2] + (useD ? d[pos2] : 0);
val1 = val1 <= 0 ? INT_MAX : val1;
val2 = val2 <= 0 ? INT_MAX : val2;
if(useD) {
val1 = U[pos1] ? val1 : INT_MAX;
val2 = U[pos2] ? val2 : INT_MAX;
}
if(val1 > val2) {
outDel[globalThreadId] = val2;
}
else{
outDel[globalThreadId] = val1;
}
}
else {
val1 = val1 <= 0 ? INT_MAX : val1;
if(useD) {
val1 = U[pos1] ? val1 : INT_MAX;
}
outDel[globalThreadId] = val1;
}
}
}
/*
* F[tid] = false
* if(U[tid] and d[tid] < del)
* U[tid] = false
* F[tid] = true
*
*/
__global__ void update(int* U, int* F, int* d, int* del, size_t gSize) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if (globalThreadId < gSize) {
F[globalThreadId] = 0;
if(U[globalThreadId] && d[globalThreadId] < del[0]) {
U[globalThreadId] = 0;
F[globalThreadId] = 1;
}
}
}
/*
* U[tid] = true
* F[tid] = false
* d[tid] = -1
*/
__global__ void init(int* U, int* F, int* d, int startNode, size_t gSize) {
int globalThreadId = blockIdx.x * blockDim.x + threadIdx.x;
if (globalThreadId < gSize) {
U[globalThreadId] = 1;
F[globalThreadId] = 0;
d[globalThreadId] = INT_MAX;
}
if(globalThreadId == 0) {
d[globalThreadId] = 0;
U[globalThreadId] = 0;
F[globalThreadId] = 1;
}
}
void doShortest(int* adjMat, int* shortestOut, size_t gSize, int startingNode,
int* _d_adjMat,
int* _d_outVec,
int* _d_unvisited,
int* _d_frontier,
int* _d_estimates,
int* _d_delta,
int* _d_minOutEdge) {
int del;
int numBlocks = (gSize / BLOCK_SIZE) + 1;
// O(n) but total algo is larger than O(n) so who cares
findAllMins<<<numBlocks, BLOCK_SIZE>>>(_d_adjMat, _d_minOutEdge, gSize);
/*
* pseudo-code algo
*
* init<<<>>>(U, F, d)
* while(del != -1)
* relax<<<>>>(U, F, d)
* del = min<<<>>>(U, d)
* update<<<>>>(U, F, d, del)
*/
int curSize = gSize;
int dFlag;
int* _d_minTemp1;
int* _d_minTemp2;
int* tempDebug = (int*) malloc(sizeof(int) * gSize);
cudaMalloc((void**) &_d_minTemp1 , sizeof(int) * gSize);
init<<<numBlocks, BLOCK_SIZE>>>(_d_unvisited, _d_frontier, _d_estimates, startingNode, gSize);
do {
dFlag = 1;
curSize = gSize;
cudaMemcpy(_d_minTemp1, _d_minOutEdge, sizeof(int) * gSize, cudaMemcpyDeviceToDevice);
relax<<<numBlocks, BLOCK_SIZE>>>(_d_unvisited, _d_frontier, _d_estimates, gSize, _d_adjMat);
do {
min<<<numBlocks, BLOCK_SIZE>>>(_d_unvisited, _d_estimates, _d_delta, _d_minTemp1, curSize, dFlag);
_d_minTemp2 = _d_minTemp1;
_d_minTemp1 = _d_delta;
_d_delta = _d_minTemp2;
curSize /= 2;
dFlag = 0;
} while (curSize > 0);
_d_minTemp2 = _d_minTemp1;
_d_minTemp1 = _d_delta;
_d_delta = _d_minTemp2;
update<<<numBlocks, BLOCK_SIZE>>>(_d_unvisited, _d_frontier, _d_estimates, _d_delta, gSize);
cudaMemcpy(&del, _d_delta, sizeof(int), cudaMemcpyDeviceToHost);
} while(del != INT_MAX);
cudaMemcpy(shortestOut, _d_estimates, sizeof(int) * gSize, cudaMemcpyDeviceToHost);
#ifndef NO_PRINT
for(int i = 0; i < gSize; i++){
printf("shotest path from %d to %d is %d long.\n", startingNode, i, shortestOut[i]);
}
printf("\n");
#endif
cudaFree(_d_minTemp1);
}
|
7,843 | // #CSCS CUDA Training
//
// #Example 4.3 - dot product with two step reduction, all processing on GPU
//
// #Author: Ugo Varetto
//
// #Goal: compute the dot product of two vectors performing all the computation on the GPU
//
// #Rationale: shows how to perform the dot product of two vectors as a parallel reduction
// with all the computation on the GPU
//
// #Solution: use the same standard parallel reduction algorithm shown in examples 4.1 and 4.2 twice:
// 1) each block produces stores a single result into an array
// 2) the last block to compute a partial reduction perform a parallel
// reduction step on the array generated at (1)
// The first thread (0) of each block atomically increments a global counter then
// checks the counter value: the last block to increment the counter is the one
// that reads '(gridDim.x - 1)' from the counter.
// Global synchronization is achieved through __threadfence()
// to ensure that all the elements in the output array have been written
//
// #Code: 1) compute launch grid configuration
// 2) allocate data on host(cpu) and device(gpu)
// 3) initialize data directly on GPU
// 4) launch kernel
// 5) report errors
// 6) read data back
// 7) free memory
//
// #Compilation:
// nvcc -arch=sm_13 4_3_parallel-dot-product-atomics-portable-optimized.cu -o dot-product-atomics
//
// #Execution: ./dot-product-atomics
//
// #Note: kernel invocations ( foo<<<...>>>(...) ) are *always* asynchronous and a call to
// cudaThreadSynchronize() is required to wait for the end of kernel execution from
// a host thread; in case of synchronous copy operations like cudaMemcpy(...,cudaDeviceToHost)
// kernel execution is guaranteed to be terminated before data are copied
//
// #Note: also check cudaMemset, cudaErrorString, cudaGetLastError usage
//
// #Note: as of CUDA 3.2 it seems that kernels do not stall anymore when invoking
// __syncthreads from within an if block dependent on the thread id;
// #see http://forums.nvidia.com/index.php?showtopic=178284
//
//#include <cuda_runtime.h> // automatically added by nvcc
#include <vector>
#include <iostream>
typedef float real_t;
const size_t BLOCK_SIZE = 16;
//------------------------------------------------------------------------------
//Full on-gpu reduction
// each block atomically increments this variable when done
// performing the first reduction step
__device__ unsigned int count = 0;
// shared memory used by partial_dot and sum functions
// for temporary partial reductions; declare as global variable
// because used in more than one function
__shared__ real_t cache[ BLOCK_SIZE ];
// partial dot product: each thread block produces a single value
__device__ real_t partial_dot( const real_t* v1, const real_t* v2, int N, real_t* out ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if( i >= N ) return real_t( 0 );
cache[ threadIdx.x ] = 0.f;
// the threads in the thread block iterate over the entire domain; iteration happens
// whenever the total number of threads is lower than the domain size
while( i < N ) {
cache[ threadIdx.x ] += v1[ i ] * v2[ i ];
i += gridDim.x * blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2;
}
return cache[ 0 ];
}
// sum all elements in array; array size assumed to be equal to number of blocks
__device__ real_t sum( const real_t* v ) {
cache[ threadIdx.x ] = 0.f;
int i = threadIdx.x;
// the threads in the thread block iterate oevr the entire domain
// of size == gridDim.x == total number of blocks; iteration happens
// whenever the number of threads in a thread block is lower than
// the total number of thread blocks
while( i < gridDim.x ) {
cache[ threadIdx.x ] += v[ i ];
i += blockDim.x;
}
__syncthreads(); // required because later on the current thread is accessing
// data written by another thread
i = BLOCK_SIZE / 2;
while( i > 0 ) {
if( threadIdx.x < i ) cache[ threadIdx.x ] += cache[ threadIdx.x + i ];
__syncthreads();
i /= 2;
}
return cache[ 0 ];
}
// perform parallel dot product in two steps:
// 1) each block computes a single value and stores it into an array of size == number of blocks
// 2) the last block to finish step (1) performs a reduction on the array produced in the above step
// parameters:
// v1 first input vector
// v2 second input vector
// N size of input vector
// out output vector: size MUST be equal to the number of GPU blocks since it us used
// for partial reduction; result is at position 0
__global__ void full_dot( const real_t* v1, const real_t* v2, int N, real_t* out ) {
// true if last block to compute value
__shared__ bool lastBlock;
// each block computes a value
real_t r = partial_dot( v1, v2, N, out );
if( threadIdx.x == 0 ) {
// value is stored into output array by first thread of each block
out[ blockIdx.x ] = r;
// wait for value to be available to all the threads on the device
__threadfence();
// increment atomic counter and retrieve value
const unsigned int v = atomicInc( &count, gridDim.x );
// check if last block to perform computation
lastBlock = ( v == gridDim.x - 1 );
}
// the code below is executed by *all* threads in the block:
// make sure all the threads in the block access the correct value
// of the variable 'lastBlock'
__syncthreads();
// last block performs a the final reduction steps which produces one single value
if( lastBlock ) {
r = sum( out );
if( threadIdx.x == 0 ) {
out[ 0 ] = r;
count = 0;
}
}
}
//------------------------------------------------------------------------------
// cpu implementation of dot product
real_t dot( const real_t* v1, const real_t* v2, int N ) {
real_t s = 0;
for( int i = 0; i != N; ++i ) {
s += v1[ i ] * v2[ i ];
}
return s;
}
// initialization function run on the GPU
__global__ void init_vector( real_t* v, int N ) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
while( i < N ) {
v[ i ] = 1.0f;//real_t( i ) / 1000000.f;
i += gridDim.x * blockDim.x;
}
}
//------------------------------------------------------------------------------
int main(int argc, char** argv ) {
const size_t ARRAY_SIZE = 1024;//1024 * 1024; //1Mi elements
const int BLOCKS = 64;//512;
const int THREADS_PER_BLOCK = BLOCK_SIZE;//256; // total threads = 512 x 256 = 128ki threads;
const size_t SIZE = ARRAY_SIZE * sizeof( real_t );
// device storage
real_t* dev_v1 = 0; // vector 1
real_t* dev_v2 = 0; // vector 2
real_t* dev_out = 0; // result array, final result is at position 0;
// also used for temporary GPU storage,
// must have size == number of thread blocks
cudaMalloc( &dev_v1, SIZE );
cudaMalloc( &dev_v2, SIZE );
cudaMalloc( &dev_out, sizeof( real_t ) * BLOCKS );
// host storage
std::vector< real_t > host_v1( ARRAY_SIZE );
std::vector< real_t > host_v2( ARRAY_SIZE );
real_t host_out = 0.f;
// initialize vector 1 with kernel; much faster than using for loops on the cpu
init_vector<<< 1024, 256 >>>( dev_v1, ARRAY_SIZE );
cudaMemcpy( &host_v1[ 0 ], dev_v1, SIZE, cudaMemcpyDeviceToHost );
// initialize vector 2 with kernel; much faster than using for loops on the cpu
init_vector<<< 1024, 256 >>>( dev_v2, ARRAY_SIZE );
cudaMemcpy( &host_v2[ 0 ], dev_v2, SIZE, cudaMemcpyDeviceToHost );
// execute kernel
full_dot<<<BLOCKS, THREADS_PER_BLOCK>>>( dev_v1, dev_v2, ARRAY_SIZE, dev_out );
std::cout << cudaGetErrorString( cudaGetLastError() ) << std::endl;
// copy output data from device(gpu) to host(cpu)
cudaMemcpy( &host_out, dev_out, sizeof( real_t ), cudaMemcpyDeviceToHost );
// print dot product by summing up the partially reduced vectors
std::cout << "GPU: " << host_out << std::endl;
// print dot product on cpu
std::cout << "CPU: " << dot( &host_v1[ 0 ], &host_v2[ 0 ], ARRAY_SIZE ) << std::endl;
// free memory
cudaFree( dev_v1 );
cudaFree( dev_v2 );
cudaFree( dev_out );
return 0;
}
|
7,844 | #include <stdio.h>
__global__ void MyKernel() {
}
__global__ void MyKernelFlops(float n, float a, float b, float c) {
int i =0;
while (i<n) {
a+=b*c;
i++;
}
}
__global__ void MyKernelIops(int n, int a, int b, int c) {
int i =0;
while (i<n) {
a+=b*c;
i++;
}
}
void measureInIops() {
int n = 1000000;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
MyKernelIops<<<2, 1024>>>(n, 2, 3, 6);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("\nTime in milliseconds : %f",milliseconds);
float giops = (n*1)/milliseconds/1e6;
printf("\nGIOPS : %f",giops);
}
void measureInFlops() {
int n = 1000000;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
MyKernelFlops<<<2, 1024>>>(n, 2.1f, 3.5f, 6.0f);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("\nTime in milliseconds : %f",milliseconds);
float gflops = (n*1)/milliseconds/1e6;
printf("\nGFLOPS : %f",gflops);
}
int main(void)
{
int n, type ;
float *d_a, *d_b;
printf("Please select from below options :\n1 -> Measure GPU Speed \n2 -> Measure memory bandwidth \n --> : ");
scanf("%d",&type);
if(type == 1) {
int m;
printf("Please select from below options :\n1 -> Measure in GIOPS \n2 -> Measure in GFLOPS \n --> : ");
scanf("%d",&m);
if(m==1) {
measureInIops();
} else if(m == 2) {
measureInFlops();
}
}else if(type == 2) {
printf("Enter Block size : ");
scanf("%d",&n);
cudaMalloc(&d_a, n);
cudaMalloc(&d_b, n);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_a, d_b, n, cudaMemcpyDeviceToDevice);
cudaEventRecord(start);
MyKernel<<<2, 1024>>>();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Bandwidth (GB/s): %fn", n*4/milliseconds/1e6);
}
} |
7,845 | #include "includes.h"
__global__ void init_image_array_GPU(unsigned long long int* image, int pixels_per_image)
{
int my_pixel = threadIdx.x + blockIdx.x*blockDim.x;
if (my_pixel < pixels_per_image)
{
// -- Set the current pixel to 0 and return, avoiding overflow when more threads than pixels are used:
image[my_pixel] = (unsigned long long int)(0); // Initialize non-scatter image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize Compton image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize Rayleigh image
my_pixel += pixels_per_image; // (advance to next image)
image[my_pixel] = (unsigned long long int)(0); // Initialize multi-scatter image
}
} |
7,846 | #include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
//Input Matrix: z
//(vector workspaces) size n: vector, vector1
//(matrix workspaces) size n*n: prevm, Newm, Q, NewQ, R, z, z1
//Output for eigenvectors: eigenvector
//Output for eigenvalues: vector
__global__ void block_QR(float* z, float* z1, float* vector, float* vector1, float* Q, float* NewQ, float* R, float* PrevM, float* NewM, int* converged, float* eigenvector, const int *WidthOfMatrix, const int *ind, const int *vind)
{
//extern __shared__ float z1[];
int n = WidthOfMatrix[blockIdx.x];
int index = ind[blockIdx.x];
int vectindex = vind[blockIdx.x];
int numofelements = n*n;
if(threadIdx.x==0){
converged[blockIdx.x] = 0;
}
if(threadIdx.x<numofelements){
int i;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
//set eigenvector to the identity matrix.
if(i/n==i%n)eigenvector[i+index]=1;
else eigenvector[i+index]=0;
}
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i+index;
z1[iplusindex]=z[iplusindex];
Q[iplusindex]=z[iplusindex];
PrevM[iplusindex]=z[iplusindex];
}
do{
int k, j, PowOf2;
for(k=0;k<n-1;k++){
//Householder Code
//STEP 0: Get value of z[k*n+k] for use in step 4
float NormCheck = z[k*n+k+index];
//STEP 1: Find minor matrix of the input matrix z and sets it to z
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i%n==i/n&&i/n<k)z[i+index]=1;
else if(i/n>=k&&i%n>=k)z[i+index]=z[i+index];
else z[i+index]=0;
}
__syncthreads();
//STEP 2: Find kTH column of z and set to vector
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
vector[i+vectindex] = z[i*n+k+index];
}
}
//STEP 3: Find the norm of the kTh column and set to NormOfKcol
float NormOfKcol;
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
//Need a temporary for vector that we can change the values of since we need mcol later
vector1[iplusvectindex] = vector[iplusvectindex];
vector1[iplusvectindex] *= vector1[iplusvectindex];
}
}
PowOf2 = 1;
__syncthreads();
//add all x's together, 2 at a time. O((log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
NormOfKcol = sqrt(vector1[0+vectindex]);
//STEP 4: Make Norm Negative if NormCheck is > 0
if(NormCheck > 0) NormOfKcol = -NormOfKcol;
//STEPS 5+6 Combined: add NormOfKcol to tmp[k]
if(k==threadIdx.x)vector[k+vectindex]=vector[k+vectindex]+NormOfKcol;
__syncthreads();
//STEP 7: Finds the addition of the new kcol and stores it in tmp[0]
//used in ||tmp||
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector1[iplusvectindex] = vector[iplusvectindex] * vector[iplusvectindex];
PowOf2 = 1;
}
}
__syncthreads();
//add all tmp's together, 2 at a time. O(n(log(n)) function
for(i = 0;i < ((float)n)/2.0;i++){
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n&&j%(PowOf2*2)==0&&j+PowOf2<n){
int jplusvectindex = j + vectindex;
vector1[jplusvectindex] = vector1[jplusvectindex] + vector1[PowOf2+jplusvectindex];
}
}
__syncthreads();
//PowOf2 = pow(2,i)
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
if(j<n){
PowOf2 *= 2;
}
}
}
__syncthreads();
//STEP 8: Divide vector Vmadd by the Norm[0] and set it to Vdiv
// Vdiv = Vmadd / norm
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i<n){
int iplusvectindex = i + vectindex;
vector[iplusvectindex] = vector[iplusvectindex]/(sqrt(vector1[vectindex]));
}
}
__syncthreads();
//STEP 9: Multiply the Vdiv vector by its transverse and subtract that from I, store the resulting matrix in Vmul
// Vmul = I - 2 * Vdiv * Vdiv^T
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index] = -2 * vector[i/n+vectindex] * vector[i%n+vectindex];
}
//if on the diagonal(row==column)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n){
R[i+index] += 1;
}
}
__syncthreads();
//STEP 10: Multiply Vmul by input matrix z1 and store in VmulZ
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i+index]=0;
for(j=0;j<n;j++){
z[i+index]+= R[i/n*n+j+index] * z1[j*n+i%n+index];
}
}
//STEP 11: if k!=0 Multiply Vmul by input matrix Q and set to NewQ
if(k!=0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewQ[i+index]=0;
for(j=0;j<n;j++)
{
NewQ[i+index]+= R[i/n*n+j+index] * Q[j*n+i%n+index];
}
}
}
__syncthreads();
//STEP 12.1: If first iteration of k, set Q to vmul for use in next iteration of k
if(k==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = R[i+index];
}
}
//STEP 12.2: If after first iteration of k, set Q to NewQ, which was found by multiplying the old Q by Vmul.
else {
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
Q[i+index] = NewQ[i+index];
}
}
//STEP 12.3: Set z and z1 to VmulZ for use in the next iteration of k.
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z1[i+index] = z[i+index];
}
__syncthreads();
}
//Once for loop is completed:
//STEP 13: Multiply matrices Q and m to find the matrix R
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
R[i+index]=0;
}
for(i=0;i<n;i++)
{
for(j=threadIdx.x;j<numofelements;j=j+blockDim.x){
R[j+index]+= Q[j/n*n+i+index] * PrevM[i*n+j%n+index];
}
}
__syncthreads();
//STEP 14: Find the transpose of matrix Q and store int TransposeOfQ
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
// # -> #%n*n+#/n
// for n=4 0->0 1->4 2->8 3->12
// 4->1 5->5 6->9 7->13
// 8->2 9->6 10->10 11->14
// 12->3 13->7 14->11 15->15
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
z[i%n*n+i/n+index] = Q[i+index];
}
__syncthreads();
//STEP 14.5: Multiply matrices eigenvector and TransposeOfQ and store in eigenvector(use NewM as a temporary matrix)
//NewM contains new eigenvectors
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++){
NewM[i+index]+= eigenvector[i/n*n+j+index] * z[j*n+i%n+index];
}
}
__syncthreads();
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
eigenvector[i+index]=NewM[i+index];
}
__syncthreads();
//STEP 15: Multiply matrices R and TransposeOfQ and store in NewM matrix
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
NewM[i+index]=0;
for(j=0;j<n;j++)
{
NewM[i+index]+= R[i/n*n+j+index] * z[j*n+i%n+index];
}
}
//STEP 16: Check for Convergence of New Matrix (Newm)
if(threadIdx.x==0){
converged[blockIdx.x] = 1;
}
__syncthreads();
//threadIdx.x%n = column
//threadIdx.x/n = row (integer division)
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
if(i/n==i%n&&(PrevM[i+index]/NewM[i+index]>1.000001||
PrevM[i+index]/NewM[i+index]<0.999999)){
converged[blockIdx.x] = 0;
}
}
__syncthreads();
//STEP 17: Set up for next iteration if converged is 0
if(converged[blockIdx.x]==0){
for(i=threadIdx.x;i<numofelements;i=i+blockDim.x){
int iplusindex = i + index;
z[iplusindex] = NewM[iplusindex];
z1[iplusindex] = NewM[iplusindex];
Q[iplusindex] = NewM[iplusindex];
PrevM[iplusindex] = NewM[iplusindex];
}
}
__syncthreads();
}while(converged[blockIdx.x]==0);
//put eigenvalues into vector
if(threadIdx.x<n){
vector[threadIdx.x+vectindex]=NewM[threadIdx.x+threadIdx.x*n+index];
}
__syncthreads();
if(threadIdx.x==0){
//Sort Eigenvalues low to high and swap eigenvectors to match eigenvalues
//Simple Bubble Sort
int i1,i2,i3;
for(i1=vectindex;i1<n-1+vectindex;i1++){
for(i2=i1+1;i2<n+vectindex;i2++){
if(vector[i1]>vector[i2]){
float tmp = vector[i1];
vector[i1] = vector[i2];
vector[i2] = tmp;
for(i3 = 0;i3<n;i3++){
float tmp = eigenvector[i3*n+(i1-vectindex)%n+index];
eigenvector[i3*n+(i1-vectindex)%n+index] = eigenvector[i3*n+(i2-vectindex)%n+index];
eigenvector[i3*n+(i2-vectindex)%n+index] = tmp;
}
}
}
}
}
}
}
//Number of matrices, matrix data, size of matrix array, matrix index data, eigenvalue index data , widths data, empty vector for eigenvalues
extern "C" void BlockQR_HOST(const int n_mat, float *matrix, const size_t matrix_size, const int *index, const int *eigenvaluesindex, const int *sizes, float *eigenvalues ) {
//Set up timing variables
//struct timeval start, finish;
//struct timezone tz;
//Set variables for CUDA use
int* d_converged = NULL;
int* d_sizes = NULL;
int* d_eigenvaluesindex = NULL;
int* d_index = NULL;
float* d_PrevM=NULL;
float* d_NewM=NULL;
float* d_Q=NULL;
float* d_NewQ=NULL;
float* d_R = NULL;
float* d_vector = NULL;
float* d_vector1 = NULL;
float* d_z1 = NULL;
float* d_z = NULL;
float* d_eigenvector = NULL;
//Calculate vector size needed for all matrices(add all the widths together)
int vector_size = 0;
int largestmatrix = 0;
int i;
for(i=0;i<n_mat;i++){
if(largestmatrix<sizes[i])largestmatrix = sizes[i];
vector_size+=sizes[i];
}
largestmatrix *= largestmatrix;
cudaMalloc((void **)&d_converged, sizeof(int) * n_mat);
cudaMalloc((void **)&d_sizes, sizeof(int) * n_mat);
cudaMalloc((void **)&d_eigenvaluesindex, sizeof(int) * n_mat);
cudaMalloc((void **)&d_index, sizeof(int) * n_mat);
cudaMalloc((void **)&d_vector, sizeof(float) * vector_size);
cudaMalloc((void **)&d_vector1, sizeof(float) * vector_size);
cudaMalloc((void **)&d_PrevM, sizeof(float) * matrix_size);
cudaMalloc((void **)&d_NewM, sizeof(float) * matrix_size);
cudaMalloc((void **)&d_Q, sizeof(float) * matrix_size);
cudaMalloc((void **)&d_NewQ, sizeof(float) * matrix_size);
cudaMalloc((void **)&d_R, sizeof(float) * matrix_size);
cudaMalloc((void **)&d_z, sizeof(float) * matrix_size);
cudaMalloc((void **)&d_z1, sizeof(float) * matrix_size);
cudaMalloc((void **)&d_eigenvector, sizeof(float) * matrix_size);
//Get CUDA device properties
cudaDeviceProp props;
cudaGetDeviceProperties(&props, 0);
int threads = props.maxThreadsPerBlock;
//copy input matrix data into z
cudaMemcpy(d_z, matrix, sizeof(float) * matrix_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_sizes, sizes, sizeof(int)*n_mat, cudaMemcpyHostToDevice);
cudaMemcpy(d_index, index, sizeof(int)*n_mat, cudaMemcpyHostToDevice);
cudaMemcpy(d_eigenvaluesindex, eigenvaluesindex, sizeof(int)*n_mat, cudaMemcpyHostToDevice);
//Time and run kernel
float elapsed=0;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
block_QR <<<n_mat, threads>>>(d_z,d_z1,d_vector,d_vector1,d_Q, d_NewQ, d_R, d_PrevM, d_NewM, d_converged, d_eigenvector,d_sizes, d_index, d_eigenvaluesindex);
cudaEventRecord(stop, 0);
cudaEventSynchronize (stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("The elapsed time in gpu was %.2f ms\n", elapsed);
//copy back data
cudaMemcpy(eigenvalues, d_vector, sizeof(float) * vector_size, cudaMemcpyDeviceToHost);
cudaMemcpy(matrix, d_eigenvector, sizeof(float) * matrix_size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(d_eigenvector);
cudaFree(d_sizes);
cudaFree(d_eigenvaluesindex);
cudaFree(d_index);
cudaFree(d_converged);
cudaFree(d_PrevM);
cudaFree(d_NewM);
cudaFree(d_Q);
cudaFree(d_NewQ);
cudaFree(d_R);
cudaFree(d_vector);
cudaFree(d_vector1);
cudaFree(d_z);
cudaFree(d_z1);
cudaDeviceReset();
}
|
7,847 | #include "includes.h"
__global__ void float1toUchar1(float1 *inputImage, uchar1 *outputImage, int width, int height)
{
int offsetBlock = blockIdx.x * blockDim.x + blockIdx.y * blockDim.y * width;
int offset = offsetBlock + threadIdx.x + threadIdx.y * width;
float1 pixelf = inputImage[offset];
uchar1 pixel;
pixel.x = (unsigned char) pixelf.x;
outputImage[offset] = pixel;
} |
7,848 | #include "buffer.cuh"
__device__ buffer_t* buffer_constructor(size_t size, void* memptr)
{
buffer_t* buffer = (buffer_t*)memptr;
buffer->size = size - sizeof(unsigned long) - sizeof(char*);
buffer->current_index = buffer->pool;
return buffer;
}
__device__ void* buffer_malloc(buffer_t* buffer, size_t size)
{
if(size > buffer->size - (buffer->current_index - buffer->pool))
{
return NULL;
}
void* ptr = buffer->current_index;
buffer->current_index += size;
return ptr;
}
__device__ void free_buffer(buffer_t* buffer)
{
//free(buffer);
}
__device__ void reset_buffer(buffer_t* buffer)
{
buffer->current_index = buffer->pool;
}
|
7,849 | /*
* reference: http://home.ie.cuhk.edu.hk/~wkshum/papers/pagerank.pdf
*/
#include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <tuple>
#include <vector>
#include <chrono> // timing
#include <algorithm> // sort
/* global variables, this is where you would change the parameters */
const long long N = 10876; // number of nodes
const int num_iter = 10; // number of pagerank iterations
const std::string filename = "p2p-Gnutella04.txt";
const float d = 0.85f; // damping factor. 0.85 as defined by Google
const int blocksize = 512;
typedef float trans_m_col[N];
typedef int vis_m_col[N];
void read_inputfile( vis_m_col *visited_matrix, int outgoing_table[ N ] )
{
// Read file and build node.
std::ifstream infile;
infile.open( filename );
if (infile.fail()) {
std::cerr << "Error opeing a file" << std::endl;
infile.close();
exit( 1 );
}
std::string line;
int a, b;
int count_edge = 0;
while ( getline( infile, line ) )
{
std::istringstream iss( line );
if ( ! ( iss >> a >> b ) ) { break; } // Format error.
// Process pair (a, b).
// std::cout << a << " " << b << std::endl;
visited_matrix[ a ][ b ] = 1;
outgoing_table[ a ] += 1;
count_edge++;
}
infile.close();
// report shape
std::cout << "finish reading graph ... \n"
<< N << " nodes\n"
<< count_edge << " edges"
<< std::endl;
}
/**
* outgoing_table, transition_matrix, visited_matrix
*/
__global__
void update_entries( trans_m_col *transition_matrix, vis_m_col *visited_matrix, int *outgoing_table, int N )
{
int const idx = threadIdx.x + blockIdx.x * blockDim.x;
int const i = idx / N;
int const j = idx % N;
if (i < N && j < N)
{
if ( outgoing_table[ j ] == 0 )
{
// dangling node: 1 / N
transition_matrix[ i ][ j ] = 1.0f / N;
}
else if ( visited_matrix[ j ][ i ] == 1 )
{
// if v(j, i) is visited then a(ij) = 1/L(j)
transition_matrix[ i ][ j ] = 1.0f / outgoing_table[ j ];
}
// else{ table->ij_entries_matrix[ i ][ j ] = 0.0; }
}
}
__global__
void pagerank( float *score_table, float *old_score_table, trans_m_col *transition_matrix, float d, int N )
{
int const j = threadIdx.x + blockIdx.x * blockDim.x;
if (j < N)
{
/* update pagerank scores */
float sum = 0.0f;
for ( auto k = 0; k < N; ++k )
{
sum += old_score_table[ k ] * transition_matrix[ j ][ k ];
}
score_table[ j ] = d * old_score_table[ j ] + ( 1.0f - d ) * sum;
}
}
int comp( std::tuple< int, float > const &i, std::tuple< int, float > const &j )
{
return std::get< 1 >( i ) > std::get< 1 >( j );
}
void print_top_5( float arr[ N ] )
{
std::vector< std::tuple< int, float > > sorted = {};
for ( auto i = 0; i < N; ++i )
{
sorted.push_back( std::tuple< int, float >{ i, arr[ i ] } );
}
std::sort( sorted.begin(), sorted.end(), comp );
std::cout << "Top five:" << std::endl;
for ( auto i = 0; i < std::min( ( long long ) 5, N); ++i )
{
std::cout << std::get< 0 >( sorted[ i ] ) << "(" << std::get< 1 >( sorted[ i ] ) << ") ";
}
std::cout << std::endl;
}
void print_total( float arr[] )
{
float sum = 0.0f;
for ( auto i = 0; i < N; ++i )
{
sum += arr[ i ];
}
std::cout << "sum=" << sum << std::endl;
}
int main()
{
auto const total_start_time = std::chrono::steady_clock::now();
auto const score_t_size = N * sizeof(float);
auto const out_t_size = N * sizeof(int);
auto const vis_m_size = N * N * sizeof(int);
auto const trans_m_size = N * N * sizeof(float);
vis_m_col *visited_matrix;
visited_matrix = ( vis_m_col * )malloc( vis_m_size );
memset(visited_matrix, 0, vis_m_size);
trans_m_col *transition_matrix;
transition_matrix = ( trans_m_col * )malloc( trans_m_size );
memset(transition_matrix, 0, trans_m_size);
float score_table[ N ] = { 0 };
std::fill_n(score_table, N, 1.0f / N );
int outgoing_table[ N ] = { 0 };
read_inputfile( visited_matrix, outgoing_table );
float *dev_score_table, *dev_old_score_table;
int *dev_outgoing_table;
vis_m_col *dev_visited_matrix;
trans_m_col *dev_transition_matrix;
cudaMalloc( &dev_score_table, score_t_size );
cudaMalloc( &dev_old_score_table, score_t_size );
cudaMalloc( &dev_outgoing_table, out_t_size );
cudaMalloc( &dev_visited_matrix, vis_m_size );
cudaMalloc( &dev_transition_matrix, trans_m_size );
cudaError_t err = cudaGetLastError(); // add
if (err != cudaSuccess) std::cout << "CUDA error: " << cudaGetErrorString(err) << std::endl; // add
cudaMemcpy( dev_score_table, score_table, score_t_size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_outgoing_table, outgoing_table, out_t_size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_visited_matrix, visited_matrix, vis_m_size, cudaMemcpyHostToDevice );
cudaMemcpy( dev_transition_matrix, transition_matrix, trans_m_size, cudaMemcpyHostToDevice );
/* timing the pre processing */
auto const update_start_time = std::chrono::steady_clock::now();
auto num_blocks = ceil( N * N / static_cast< float >( blocksize ) );
update_entries<<< num_blocks, blocksize >>>( dev_transition_matrix, dev_visited_matrix, dev_outgoing_table, N );
auto const update_end_time = std::chrono::steady_clock::now();
auto const update_time = std::chrono::duration_cast< std::chrono::microseconds >\
( update_end_time - update_start_time ).count();
/* timing the pagerank algorithm */
auto const pagerank_start_time = std::chrono::steady_clock::now();
num_blocks = ceil( N / static_cast< float >( blocksize ) );
/* iterations must be serial */
for ( auto i = 0; i < num_iter - 1; ++i )
{
/* scores from previous iteration */
cudaMemcpy( dev_old_score_table, dev_score_table, score_t_size, cudaMemcpyDeviceToDevice );
pagerank<<< num_blocks, blocksize >>>( dev_score_table, dev_old_score_table, dev_transition_matrix, d, N );
}
auto const pagerank_end_time = std::chrono::steady_clock::now();
auto const pagerank_time = std::chrono::duration_cast< std::chrono::microseconds >\
( pagerank_end_time - pagerank_start_time ).count();
/* retrieve final scores array from device and store back to host */
cudaMemcpy(score_table, dev_score_table, score_t_size, cudaMemcpyDeviceToHost);
cudaFree( dev_score_table );
cudaFree( dev_old_score_table );
cudaFree( dev_outgoing_table );
cudaFree( dev_visited_matrix );
cudaFree( dev_transition_matrix );
auto const total_end_time = std::chrono::steady_clock::now();
auto const total_time = std::chrono::duration_cast< std::chrono::microseconds >\
( total_end_time - total_start_time ).count();
print_top_5( score_table );
print_total( score_table );
std::cout << "in_kernel_update_time = "
<< update_time
<< " us"
<< "\nin_kernel_pagerank_time = "
<< pagerank_time
<< " us"
<< "\nprogram_total_time = "
<< total_time
<< " us"
<< std::endl;
return 0;
} |
7,850 | #include "knn.cuh"
#include <iostream>
using namespace std;
namespace knn {
void printCudaVersion() {
int runtime_ver;
cudaRuntimeGetVersion(&runtime_ver);
cout << "CUDA Runtime version: " << runtime_ver << '\n';
int driver_ver;
cudaDriverGetVersion(&driver_ver);
std::cout << "CUDA Driver version: " << driver_ver << '\n';
}
void printCudaDevices() {
int devicesCount;
cudaGetDeviceCount(&devicesCount);
for(int deviceIndex = 0; deviceIndex < devicesCount; ++deviceIndex) {
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, deviceIndex);
cout << "gpu[" << deviceIndex << "]: " << deviceProperties.name
<< " (" << deviceProperties.major << "." << deviceProperties.minor << ")\n";
}
}
__global__ void knn_gpu(unsigned V, unsigned N, unsigned Q, unsigned K,
float* data, float* query, unsigned* idx, float* dist) {
}
void knn_gpu_test(unsigned gpu_index,
unsigned V, unsigned N, unsigned Q, unsigned K,
float* data, float* query, unsigned* idx, float* dist) {
// get devices count
int devicesCount;
cudaGetDeviceCount(&devicesCount);
if (gpu_index >= devicesCount) {
std::cout << "GPU index [" << gpu_index << "] out of range!" << '\n';
return;
}
cudaDeviceProp deviceProperties;
cudaGetDeviceProperties(&deviceProperties, gpu_index);
cudaSetDevice(gpu_index);
cout << "Using gpu[" << gpu_index << "]: " << deviceProperties.name
<< " (" << deviceProperties.major << "." << deviceProperties.minor << ")\n";
// copy data to device
float* d_data;
cudaMalloc((void**)&d_data, V * N * sizeof(float));
cudaMemcpy(d_data, data, V * N * sizeof(float), cudaMemcpyHostToDevice);
// copy query to device
float* d_query;
cudaMalloc((void**)&d_query, V * Q * sizeof(float));
cudaMemcpy(d_query, query, V * Q * sizeof(float), cudaMemcpyHostToDevice);
// allocate indexes
unsigned* d_idx;
cudaMalloc((void**)&d_idx, K * Q * sizeof(unsigned));
// allocate distances
float* d_dist;
cudaMalloc((void**)&d_dist, K * Q * sizeof(float));
// free
cudaFree(d_dist);
cudaFree(d_idx);
cudaFree(d_query);
cudaFree(d_data);
/*
for (uint32_t q = 0; q < Q; q++) {
float* local_query = query + V * q;
float* local_dist = dist + K * q;
uint32_t* local_idx = idx + K * q;
// fill up distances with max float
for (uint32_t k = 0; k < K; k++) {
local_dist[k] = std::numeric_limits<float>::max();
}
for (uint32_t n = 0; n < N; n++) {
const auto d = distance<V>(local_query, data + V * n);
insert_in_order<K>(d, n, local_dist, local_idx);
}
}*/
}
}
|
7,851 | #include <stdio.h>
#include <time.h>
using namespace std;
#define PI 3.1415926535897932384
#define mu0 4*PI*1e-7
#define threadsPerBlock 1024
//grid will be r driven meaning grid(r,z) = grid[r*zMax + z]
__global__ void init(double *grid, double Il, double dI, double ldr, double rlength, int grid_size, int zseg){
int rem = grid_size%threadsPerBlock;
int divi = grid_size/threadsPerBlock;
int start, fin;
if(threadIdx.x<rem){
start = threadIdx.x*(divi+1);
fin = start + divi + 1;
}
else{
start = threadIdx.x*divi + rem;
fin = start + divi;
}
for(int i = start; i<fin; i++){
long int rrow = i/(zseg+2);
long int zcol = i%(zseg+2);
grid[i] = (1-(rrow*rrow*ldr*ldr/(3*rlength*rlength)))*3*mu0*(Il + zcol*dI)*rrow*ldr/(4*PI*rlength*rlength);
}
}
__global__ void run(double *rod_new, double r_aug, double z_aug, long int maxSteps, int grid_size, int rseg, int zseg){
int rem = grid_size%threadsPerBlock;
int divi = grid_size/threadsPerBlock;
int start, fin;
if(threadIdx.x<rem){
start = threadIdx.x*(divi+1);
fin = start + divi + 1;
}
else{
start = threadIdx.x*divi + rem;
fin = start + divi;
}
long int steps = 0;
extern __shared__ double grid_new_s[];
extern __shared__ double grid_old_s[];
for(int i = start; i<fin; i++){
grid_new_s[i] = rod_new[i];
}
__syncthreads();
while(steps<maxSteps){
for(int i = start; i<fin; i++){
grid_old_s[i] = grid_new_s[i];
}
__syncthreads();
for(int i = start; i<fin; i++){
int r = i/(zseg+2);
int z = i%(zseg+2);
if(z != 0 && z != zseg+1){
if(r!= 0 && r!= rseg+1){
if(r==1){
grid_new_s[i] += r_aug*(2*grid_old_s[i+(zseg+2)] - 4*grid_old_s[i]) +
z_aug * (grid_old_s[i+1] - 2*grid_old_s[i] + grid_old_s[i-1]);
}
else{
grid_new_s[i] += r_aug*((1+(1/(2*r)))*grid_old_s[i+(zseg+2)] + (-2-(1/(r*r)))*grid_old_s[i] + (1-(1/(2*r)))*grid_old_s[i-(zseg+2)])
+z_aug*(grid_old_s[i+1] - 2*grid_old_s[i] + grid_old_s[i-1]);
}
}
}
}
steps++;
__syncthreads();
}
for(int i = start; i<fin; i++){
rod_new[i] = grid_new_s[i];
}
}
int main(){
double Il, Ir, rlength, eta, tstep, ldr, ldz, tottime, zlength;
int rseg, zseg;
printf("What is your left I? ");
scanf("%lf", &Il);
printf("What is your right I? ");
scanf("%lf", &Ir);
printf("What is the radius of your rod? ");
scanf("%lf", &rlength);
printf("What is the length of your rod? ");
scanf("%lf", &zlength);
printf("What is eta? ");
scanf("%lf", &eta);
printf("How many segments would you like per radius? ");
scanf("%d", &rseg);
printf("How many segments would you like per length? ");
scanf("%d", &zseg);
ldr = rlength/(rseg+1);
ldz = zlength/(zseg+1);
double smallest = ldr;
if(ldz < ldr)
smallest = ldz;
tstep = 0.125*smallest*smallest*mu0/eta;
printf("How long would you like to run? ");
scanf("%lf", &tottime);
double *h_grid, *d_grid;
size_t grid_size = (rseg + 2)*(zseg+2) * sizeof(double);
h_grid = (double*)malloc(grid_size);
cudaMalloc(&d_grid, grid_size);
double dI = (Ir - Il) / (zseg+2);
init<<<1,threadsPerBlock>>>(d_grid, Il, dI, ldr, rlength, (rseg + 2)*(zseg+2), zseg);
cudaMemcpy(h_grid, d_grid, grid_size, cudaMemcpyDeviceToHost);
FILE *myfile;
myfile = fopen("init.txt", "w");
long int i;
for(i = 0; i< zseg+1; i++)
fprintf(myfile, "%lf ", i*ldz);
fprintf(myfile, "%lf\n", i*ldz);
for(i = 0; i< rseg+1; i++)
fprintf(myfile, "%lf ", i*ldr);
fprintf(myfile, "%lf\n", i*ldr);
for(i = 0; i< (rseg + 2)*(zseg+2); i++){
if(i%(zseg+2)==zseg+1)
fprintf(myfile, "%lf\n", h_grid[i]);
else
fprintf(myfile, "%lf ", h_grid[i]);
}
fclose(myfile);
double r_aug = eta*tstep/(mu0*ldr*ldr);
double z_aug = eta*tstep/(mu0*ldz*ldz);
long int total_steps = tottime / tstep;
printf("\nSteps: %ld\n", total_steps);
clock_t begin, end;
double time_spent;
begin = clock();
//run
printf("Called run\n");
run<<<1,threadsPerBlock, (rseg + 2)*(zseg+2)*sizeof(double)>>>(d_grid, r_aug, z_aug, total_steps, (rseg + 2)*(zseg+2), rseg, zseg);
cudaDeviceSynchronize();
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
cudaMemcpy(h_grid, d_grid, grid_size, cudaMemcpyDeviceToHost);
myfile = fopen("res.txt", "w");
for(i = 0; i< zseg+1; i++)
fprintf(myfile, "%lf ", i*ldz);
fprintf(myfile, "%lf\n", i*ldz);
for(i = 0; i< rseg+1; i++)
fprintf(myfile, "%lf ", i*ldr);
fprintf(myfile, "%lf\n", i*ldr);
for(i = 0; i< (rseg + 2)*(zseg+2); i++){
if(i%(zseg+2)==zseg+1)
fprintf(myfile, "%lf\n", h_grid[i]);
else
fprintf(myfile, "%lf ", h_grid[i]);
}
fclose(myfile);
free(h_grid);
cudaFree(d_grid);
printf("\n------------------------------------\nExecution took: %lf sec\n", time_spent);
return 0;
}
|
7,852 | #include "includes.h"
__global__ void multi(float *a, float *b, float *c, int width) {
__shared__ float s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ float s_b[TILE_WIDTH][TILE_WIDTH];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
for (int p = 0; p < width/TILE_WIDTH; p++)
{
s_a[threadIdx.y][threadIdx.x] = a[row*width + (p*TILE_WIDTH + threadIdx.x)];
s_b[threadIdx.y][threadIdx.x] = b[(p*TILE_WIDTH + threadIdx.y)*width + col];
__syncthreads();
for (int i = 0; i < TILE_WIDTH; i++)
{
result += s_a[threadIdx.y][i] * s_b[i][threadIdx.x];
}
__syncthreads();
}
c[row * width + col] = result;
} |
7,853 | #include "includes.h"
const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
// THIS UPDATE DOES NOT UPDATE ELOSS?
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void average_snips(const double *Params, const int *st, const int *id, const float *x, const float *y, const int *counter, const float *dataraw, const float *W, const float *U, double *WU, int *nsp, const float *mu, const float *z){
int nt0, tidx, tidy, bid, NT, Nchan,k, Nrank, Nfilt;
int currInd;
float Th;
double X, xsum;
NT = (int) Params[0];
Nfilt = (int) Params[1];
nt0 = (int) Params[4];
Nrank = (int) Params[6];
Nchan = (int) Params[9];
tidx = threadIdx.x;
bid = blockIdx.x;
//Th = 10.f;
Th = (float) Params[15];
// we need wPCA projections in here, and then to decide based on total
// idx is the time sort order of the spikes; the original order is a function
// of when threads complete in mexGetSpikes. Compilation of the sums for WU, sig, and dnextbest
// in a fixed order makes the calculation deterministic.
for(currInd=0; currInd<counter[0];currInd++) {
// only do this if the spike is "GOOD"
if (x[currInd]>Th){
if (id[currInd]==bid){
if (tidx==0 && threadIdx.y==0)
nsp[bid]++;
tidy = threadIdx.y;
while (tidy<Nchan){
X = 0.0f;
for (k=0;k<Nrank;k++)
X += W[tidx + bid* nt0 + nt0*Nfilt*k] *
U[tidy + bid * Nchan + Nchan*Nfilt*k];
xsum = dataraw[st[currInd]+tidx + NT * tidy] + y[currInd] * X;
//WU[tidx+tidy*nt0 + nt0*Nchan * bid] *= p[bid];
WU[tidx+tidy*nt0 + nt0*Nchan * bid] += (double) xsum;
tidy+=blockDim.y;
} //end of while loop over channels
} //end of if block for id == bid
}
} //end of for loop over spike indicies
} //end of function |
7,854 | #include "includes.h"
__global__ void clip_kerneld(double *v, int n, double limit) {
int x(threadIdx.x + blockDim.x * blockIdx.x);
if (x >= n) return;
v[x] = (v[x] > limit) ? limit : ((v[x] < -limit) ? -limit : v[x]);
} |
7,855 | // (c) Copyright 2013 Lev Barash, Landau Institute for Theoretical Physics, Russian Academy of Sciences
// This is supplement to the paper:
// L.Yu. Barash, L.N. Shchur, "PRAND: GPU accelerated parallel random number generation library: Using most reliable algorithms and applying parallelism of modern GPUs and CPUs".
// e-mail: barash @ itp.ac.ru (remove space)
#include<stdio.h>
#define gq58x3_CUDA_CALL(x) do { if((x) != cudaSuccess) { printf("Error: %s at %s:%d\n",cudaGetErrorString(cudaGetLastError()),__FILE__,__LINE__); exit(1);}} while(0)
#define gq58x3_BLOCKS 128
#define gq58x3_THREADS 192
#define gq58x3_ARRAY_SECTIONS (gq58x3_BLOCKS*gq58x3_THREADS/12)
#define gq58x3_k 8
#define gq58x3_q 48
#define gq58x3_g 288230374541099008ULL
#define gq58x3_gdiv8 36028796817637376ULL
typedef unsigned long long lt;
typedef struct{
lt xN[12] __attribute__ ((aligned(16))),
xP[12] __attribute__ ((aligned(16)));
} gq58x3_state;
typedef gq58x3_state gq58x3_sse_state;
lt gq58x3_sse_Consts[10] __attribute__ ((aligned(16))) =
{13835057977972752384ULL,13835057977972752384ULL,1610612736ULL,1610612736ULL,
288230371923853311ULL,288230371923853311ULL,288230374541099008ULL,288230374541099008ULL,
18157383382357244923ULL,18157383382357244923ULL};
extern "C" __host__ unsigned int gq58x3_sse_generate_(gq58x3_sse_state* state){
unsigned output;
asm volatile("movaps (%3),%%xmm0\n" \
"movaps (%2),%%xmm1\n" \
"movaps (%1),%%xmm4\n" \
"movaps %%xmm4,(%2)\n" \
"psllq $3,%%xmm4\n" \
"paddq %%xmm0,%%xmm4\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm4\n" \
"paddq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm4\n" \
"movaps %%xmm4,(%1)\n" \
"movaps %%xmm4,%%xmm1\n" \
"paddq %%xmm4,%%xmm1\n" \
"paddq %%xmm4,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm4\n" \
"movaps 16(%2),%%xmm1\n" \
"movaps 16(%1),%%xmm5\n" \
"movaps %%xmm5,16(%2)\n" \
"psllq $3,%%xmm5\n" \
"paddq %%xmm0,%%xmm5\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"paddq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,16(%1)\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm5\n" \
"movaps 32(%2),%%xmm1\n" \
"movaps 32(%1),%%xmm6\n" \
"movaps %%xmm6,32(%2)\n" \
"psllq $3,%%xmm6\n" \
"paddq %%xmm0,%%xmm6\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm6\n" \
"paddq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm6\n" \
"movaps %%xmm6,32(%1)\n" \
"movaps %%xmm6,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"paddq %%xmm6,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm6\n" \
"movaps 48(%2),%%xmm1\n" \
"movaps 48(%1),%%xmm7\n" \
"movaps %%xmm7,48(%2)\n" \
"psllq $3,%%xmm7\n" \
"paddq %%xmm0,%%xmm7\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"paddq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,48(%1)\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm7\n" \
"psrlq $55,%%xmm4\n" \
"psrlq $55,%%xmm5\n" \
"psrlq $55,%%xmm6\n" \
"psrlq $55,%%xmm7\n" \
"packssdw %%xmm5,%%xmm4\n" \
"packssdw %%xmm7,%%xmm6\n" \
"movaps 64(%2),%%xmm1\n" \
"movaps 64(%1),%%xmm5\n" \
"movaps %%xmm5,64(%2)\n" \
"psllq $3,%%xmm5\n" \
"paddq %%xmm0,%%xmm5\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm5\n" \
"paddq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm5\n" \
"movaps %%xmm5,64(%1)\n" \
"movaps %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"paddq %%xmm5,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm5\n" \
"movaps 80(%2),%%xmm1\n" \
"movaps 80(%1),%%xmm7\n" \
"movaps %%xmm7,80(%2)\n" \
"psllq $3,%%xmm7\n" \
"paddq %%xmm0,%%xmm7\n" \
"psllq $4,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"psllq $1,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"psrlq $58,%%xmm1\n" \
"psllq $29,%%xmm1\n" \
"movaps %%xmm1,%%xmm3\n" \
"psllq $1,%%xmm3\n" \
"paddq %%xmm1,%%xmm3\n" \
"psllq $29,%%xmm1\n" \
"psubq %%xmm1,%%xmm7\n" \
"paddq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq 16(%3),%%xmm1\n" \
"pshufd $245,%%xmm1,%%xmm3\n" \
"pcmpgtd 32(%3),%%xmm3\n" \
"pand 48(%3),%%xmm3\n" \
"psubq %%xmm3,%%xmm7\n" \
"movaps %%xmm7,80(%1)\n" \
"movaps %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"paddq %%xmm7,%%xmm1\n" \
"psrlq $29,%%xmm1\n" \
"paddq %%xmm1,%%xmm7\n" \
"psrlq $55,%%xmm5\n" \
"psrlq $55,%%xmm7\n" \
"packssdw %%xmm7,%%xmm5\n" \
"packssdw %%xmm4,%%xmm4\n" \
"packssdw %%xmm6,%%xmm6\n" \
"packssdw %%xmm5,%%xmm5\n" \
"packsswb %%xmm4,%%xmm4\n" \
"packsswb %%xmm6,%%xmm6\n" \
"packsswb %%xmm5,%%xmm5\n" \
"pand 64(%3),%%xmm4\n" \
"pslld $6,%%xmm4\n" \
"pxor %%xmm4,%%xmm5\n" \
"pslld $3,%%xmm6\n" \
"pxor %%xmm6,%%xmm5\n" \
"movd %%xmm5,%0\n" \
"":"=&r"(output):"r"(state->xN),"r"(state->xP),"r"(gq58x3_sse_Consts));
return output;
}
extern "C" __device__ __host__ void gq58x3_get_sse_state_(gq58x3_state* state,gq58x3_sse_state* sse_state){
int i; for(i=0;i<12;i++) {sse_state->xN[i]=state->xN[i]; sse_state->xP[i]=state->xP[i];}
}
extern "C" __device__ __host__ lt gq58x3_mod_g(lt x){ // returns x (mod g)
lt F,G; F = (x>>58); G = x-(F<<58)+(F<<29)+(F<<30);
return ((G>=gq58x3_g) ? (G-gq58x3_g) : G);
}
extern "C" __device__ __host__ lt gq58x3_MyMult(lt A,lt B){ // returns AB (mod gq58x3_g), where it is implied that A,B<gq58x3_g;
lt A1,A0,B1,B0,curr,x,m;
A1=A>>32; B1=B>>32; A0=A-(A1<<32)+(12*A1); B0=B-(B1<<32)+(12*B1);
if(A0>>32) {A0-=4294967284ULL; A1++;}
if(B0>>32) {B0-=4294967284ULL; B1++;}
curr=A1*B0+B1*A0; m=curr>>26; x=curr-(m<<26);
curr=((3*m+(x<<4))<<28)+(gq58x3_g-12*x)+(144*A1*B1)+(gq58x3_mod_g(A0*B0));
return gq58x3_mod_g(curr);
}
extern "C" __device__ __host__ lt gq58x3_CNext2(lt N,lt P,lt myk,lt myq){ // returns (myk*N-myq*P) (mod gq58x3_g)
lt curr1,curr2;
curr1=gq58x3_MyMult(myk,N); curr2=gq58x3_MyMult(myq,P);
if(curr1>=curr2) return (curr1-curr2); else return (gq58x3_g+curr1-curr2);
}
extern "C" __device__ __host__ lt gq58x3_CNext(lt N,lt P){ // returns (8N-48P) (mod gq58x3_g)
return gq58x3_mod_g((N+6*(gq58x3_g-P))<<3);
}
extern "C" __device__ __host__ lt gq58x3_GetNextN(lt x0,lt x1,unsigned int n){ //returns x_{2^n}
lt myk=gq58x3_k,myq=gq58x3_q,i,x=x1;
for(i=0;i<n;i++){
x=gq58x3_CNext2(x,x0,myk,myq);
myk=gq58x3_CNext2(myk,2,myk,myq);
myq=gq58x3_CNext2(myq,0,myq,0);
}
return x;
}
extern "C" __device__ __host__ lt gq58x3_GetNextAny(lt x0,lt x1,lt N64,lt N0){ //N=2^64*N64+N0+1
lt i,xp=x0,xn=x1,xpnew,xnnew,shift=0;
i=N0; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gq58x3_GetNextN(xp,xn,shift);
xnnew=gq58x3_GetNextN(xn,gq58x3_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
i=N64; shift=64; while(i>0){
if(i%2==1){ // xp,xn ----> 2^shift
xpnew=gq58x3_GetNextN(xp,xn,shift);
xnnew=gq58x3_GetNextN(xn,gq58x3_CNext(xn,xp),shift);
xp=xpnew; xn=xnnew;
}
i/=2; shift++;
}
return xp; // returns x_N, where N=2^64*N64+N0+1
}
extern "C" __device__ __host__ void gq58x3_skipahead_(gq58x3_state* state, lt offset64, lt offset0){ // offset=offset64*2^64+offset0+1
lt xn,xp,j;
for(j=0;j<12;j++){
xp=gq58x3_GetNextAny(state->xP[j],state->xN[j],offset64,offset0);
xn=gq58x3_GetNextAny(state->xP[j],state->xN[j],offset64,offset0+1);
state->xP[j]=xp; state->xN[j]=xn;
}
}
extern "C" __device__ __host__ void gq58x3_init_(gq58x3_state* state){
lt x0=100142853817629549ULL,x1=133388305121829306ULL,xp,xn,j;
for(j=0;j<12;j++){
xp=gq58x3_GetNextAny(x0,x1,0,24014539279611495ULL);
xn=gq58x3_GetNextAny(x0,x1,0,24014539279611496ULL);
state->xP[j]=xp; state->xN[j]=xn; x0=xp; x1=xn;
}
}
extern "C" __device__ __host__ void gq58x3_init_short_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^8; length of each sequence <= 8*10^7
gq58x3_skipahead_(state,0,82927047ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ void gq58x3_init_medium_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^6; length of each sequence <= 8*10^9
gq58x3_skipahead_(state,0,8799201913ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ void gq58x3_init_long_sequence_(gq58x3_state* state,unsigned SequenceNumber){
gq58x3_init_(state); // 0 <= SequenceNumber < 2*10^4; length of each sequence <= 8*10^11
gq58x3_skipahead_(state,0,828317697521ULL*(unsigned long long)SequenceNumber);
}
extern "C" __device__ __host__ unsigned int gq58x3_generate_(gq58x3_state* state){
unsigned sum=0; int i; lt temp;
for(i=0;i<12;i++){
temp=gq58x3_mod_g((state->xN[i]+6*(gq58x3_g-state->xP[i]))<<3);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+=((((temp/gq58x3_gdiv8)<<((i<4)?6:((i<8)?3:0)))%256)<<(8*(i%4)));
}
return sum;
}
extern "C" __device__ __host__ float gq58x3_generate_uniform_float_(gq58x3_state* state){
unsigned sum=0; int i; lt temp;
for(i=0;i<12;i++){
temp=gq58x3_mod_g((state->xN[i]+6*(gq58x3_g-state->xP[i]))<<3);
state->xP[i]=state->xN[i]; state->xN[i]=temp;
sum+=((((temp/gq58x3_gdiv8)<<((i<4)?6:((i<8)?3:0)))%256)<<(8*(i%4)));
}
return ((float) sum) * 2.3283064365386963e-10;
}
extern "C" __host__ void gq58x3_print_state_(gq58x3_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<12;i++) {printf("%llu",state->xN[i]%gq58x3_g); printf((i<11)?",":"}\nxP={");}
for(i=0;i<12;i++) {printf("%llu",state->xP[i]%gq58x3_g); printf((i<11)?",":"}\n\n");}
}
extern "C" __host__ void gq58x3_print_sse_state_(gq58x3_sse_state* state){int i;
printf("Generator State:\nxN={");
for(i=0;i<12;i++) {printf("%llu",state->xN[i]%gq58x3_g); printf((i<11)?",":"}\nxP={");}
for(i=0;i<12;i++) {printf("%llu",state->xP[i]%gq58x3_g); printf((i<11)?",":"}\n\n");}
}
__global__ void gq58x3_kernel_generate_array(gq58x3_state* state, unsigned int* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=sum; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_(gq58x3_state* state, unsigned int* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x3_state),cudaMemcpyHostToDevice));
gq58x3_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x3_kernel_generate_array<<<gq58x3_BLOCKS,gq58x3_THREADS>>>(dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(cudaGetLastError());
gq58x3_CUDA_CALL(cudaFree(dev_state)); gq58x3_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gq58x3_kernel_generate_array_float(gq58x3_state* state, float* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=((float)sum) * 2.3283064365386963e-10; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_float_(gq58x3_state* state, float* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x3_state),cudaMemcpyHostToDevice));
gq58x3_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x3_kernel_generate_array_float<<<gq58x3_BLOCKS,gq58x3_THREADS>>>(dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(cudaGetLastError());
gq58x3_CUDA_CALL(cudaFree(dev_state)); gq58x3_CUDA_CALL(cudaFree(dev_length));
}
__global__ void gq58x3_kernel_generate_array_double(gq58x3_state* state, double* out, long* length) {
unsigned sum,i,j,orbit,seqNum,shift1,shift2; long offset; lt temp;
__shared__ lt xP[gq58x3_THREADS]; // one generator per s=12 threads, i.e. one orbit
__shared__ lt xN[gq58x3_THREADS]; // per thread, i.e. blockDim.x orbits per block
__shared__ unsigned a[gq58x3_THREADS]; // array "a" contains corresponding parts of output
orbit = threadIdx.x % 12;
seqNum = (threadIdx.x + blockIdx.x * blockDim.x)/12; // RNG_sequence index
offset = seqNum*(*length); // start of the section in the output array
xP[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset);
xN[threadIdx.x]=gq58x3_GetNextAny(state->xP[orbit],state->xN[orbit],0,offset+1);
j=(orbit>>2); shift1 = 6-3*j; shift2 = (8*(orbit-(j<<2)));
for(i=0;i<(*length);i++){ // each s=12 threads result in "length" values in the output array
temp = gq58x3_CNext( xN[threadIdx.x], xP[threadIdx.x] );
xP[threadIdx.x] = xN[threadIdx.x]; xN[threadIdx.x] = temp;
a[threadIdx.x] = ((((temp/gq58x3_gdiv8)<<shift1)&(255U))<<shift2);
__syncthreads();
if((orbit&3)==0) a[threadIdx.x] = a[threadIdx.x]+a[threadIdx.x+1]+a[threadIdx.x+2]+a[threadIdx.x+3];
__syncthreads();
if(orbit==0){ sum=a[threadIdx.x]+a[threadIdx.x+4]+a[threadIdx.x+8]; out[offset+i]=((double)sum) * 2.3283064365386963e-10; }
}
}
extern "C" __host__ void gq58x3_generate_gpu_array_double_(gq58x3_state* state, double* dev_out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x3_state),cudaMemcpyHostToDevice));
gq58x3_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x3_kernel_generate_array_double<<<gq58x3_BLOCKS,gq58x3_THREADS>>>(dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(cudaGetLastError());
gq58x3_CUDA_CALL(cudaFree(dev_state)); gq58x3_CUDA_CALL(cudaFree(dev_length));
}
extern "C" __host__ void gq58x3_generate_array_(gq58x3_state* state, unsigned int* out, unsigned int* length){
long mylength = (*length)/gq58x3_ARRAY_SECTIONS;
gq58x3_state* dev_state;
unsigned int* dev_out;
long* dev_length;
if((mylength*gq58x3_ARRAY_SECTIONS)<(*length)) mylength++;
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_state,sizeof(gq58x3_state)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_out,mylength*gq58x3_ARRAY_SECTIONS*sizeof(unsigned int)));
gq58x3_CUDA_CALL(cudaMalloc((void**)&dev_length,sizeof(long)));
gq58x3_CUDA_CALL(cudaMemcpy(dev_state,state,sizeof(gq58x3_state),cudaMemcpyHostToDevice));
gq58x3_CUDA_CALL(cudaMemcpy(dev_length,&mylength,sizeof(long),cudaMemcpyHostToDevice));
gq58x3_kernel_generate_array<<<gq58x3_BLOCKS,gq58x3_THREADS>>>(dev_state,dev_out,dev_length);
gq58x3_CUDA_CALL(cudaGetLastError());
gq58x3_CUDA_CALL(cudaMemcpy(out,dev_out,(*length)*sizeof(unsigned int),cudaMemcpyDeviceToHost));
gq58x3_CUDA_CALL(cudaFree(dev_state)); gq58x3_CUDA_CALL(cudaFree(dev_out));
gq58x3_CUDA_CALL(cudaFree(dev_length));
}
|
7,856 | //nvcc -ptx test.cu -ccbin "F:Visual Studio\VC\Tools\MSVC\14.12.25827\bin\Hostx64\x64"
#include "curand_kernel.h"
__device__ void EM1( double *x,
double *y,
const int parNum) {
int globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
int localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
int threadsPerBlock = blockDim.x*blockDim.y;
int n = localThreadIdx + globalBlockIndex*threadsPerBlock;
if ( n >= parNum ){
return;
}
curandState state;
curand_init((unsigned long long)clock(),0,n, & state);
x[n]=curand_uniform_double(&state);
y[n]=curand_normal(&state);
}
__global__ void processMandelbrotElement(
double *x,
double *y,
const int parNum) {
EM1(x,y,parNum);
} |
7,857 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <vector>
#include <algorithm>
#include <cmath>
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
__constant__ double dev_avg[32 * 3];
__constant__ double dev_det[32];
__constant__ double dev_ncov[32 * 9];
__device__ int classification(uchar4 p, int nc) {
double d_i[32] = {};
double t_1[3] = {};
for (int i = 0; i < nc; ++i) {
double t_2[3] = {};
int offset = i * 9;
t_1[0] = p.x - dev_avg[i * 3];
t_1[1] = p.y - dev_avg[i * 3 + 1];
t_1[2] = p.z - dev_avg[i * 3 + 2];
for (int j = 0; j < 3; ++j) {
for (int k = 0; k < 3; ++k) {
t_2[j] += t_1[k] * dev_ncov[offset + k * 3 + j];
}
d_i[i] -= t_2[j] * t_1[j];
}
d_i[i] -= std::log(std::abs(dev_det[i]));
}
double d_max = d_i[0];
int cl = 0;
for (int i = 1; i < nc; ++i) {
if (d_i[i] > d_max) {
d_max = d_i[i];
cl = i;
}
}
return cl;
}
__global__ void MLE(uchar4* data, int w, int h, int nc) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
for (int i = idx; i < w * h; i += offset) {
data[i].w = classification(data[i], nc);
}
}
int main() {
std::string in, out;
int nc, w, h;
std::cin >> in >> out >> nc;
std::vector<std::vector<uint2>> cl;
for (int i = 0; i < nc; ++i) {
int np;
std::cin >> np;
cl.push_back(std::vector<uint2>(np));
for (int j = 0; j < np; ++j) {
std::cin >> cl[i][j].x >> cl[i][j].y;
}
}
FILE* fp = fopen(in.c_str(), "rb");
fread(&w, sizeof(int), 1, fp);
fread(&h, sizeof(int), 1, fp);
uchar4* data = new uchar4[w * h];
fread(data, sizeof(uchar4), w * h, fp);
fclose(fp);
double* avg = new double[nc * 3];
std::fill_n(avg, nc * 3, 0.0);
for (int i = 0; i < nc; ++i) {
for (int j = 0; j < cl[i].size(); ++j) {
uchar4 p = data[cl[i][j].y * w + cl[i][j].x];
avg[i * 3] += p.x;
avg[i * 3 + 1] += p.y;
avg[i * 3 + 2] += p.z;
}
for (int k = 0; k < 3; ++k) {
avg[i * 3 + k] /= cl[i].size();
}
}
double* cov = new double[nc * 9];
std::fill_n(cov, nc * 9, 0.0);
for (int i = 0; i < nc; ++i) {
int offset = i * 9;
for (int j = 0; j < cl[i].size(); ++j) {
uchar4 p = data[cl[i][j].y * w + cl[i][j].x];
double v[3];
v[0] = p.x - avg[i * 3];
v[1] = p.y - avg[i * 3 + 1];
v[2] = p.z - avg[i * 3 + 2];
for (int n = 0; n < 3; ++n) {
for (int m = 0; m < 3; ++m) {
cov[offset + n * 3 + m] += v[n] * v[m];
}
}
}
for (int n_ = 0; n_ < 3; ++n_) {
for (int m_ = 0; m_ < 3; ++m_) {
cov[offset + n_ * 3 + m_] /= cl[i].size() - 1;
}
}
}
double* det = new double[nc];
for (int i = 0; i < nc; ++i) {
int offset = i * 9;
det[i] = cov[offset] * (cov[offset + 4] * cov[offset + 8] - cov[offset + 7] * cov[offset + 5]) -
cov[offset + 3] * (cov[offset + 1] * cov[offset + 8] - cov[offset + 7] * cov[offset + 2]) +
cov[offset + 6] * (cov[offset + 1] * cov[offset + 5] - cov[offset + 4] * cov[offset + 2]);
}
//for (int i = 0; i < nc; ++i) std::cout << det[i] << " ";
double* ncov = new double[nc * 9];
for (int i = 0; i < nc; ++i) {
int offset = i * 9;
ncov[offset] = (cov[offset + 4] * cov[offset + 8] - cov[offset + 7] * cov[offset + 5]) / det[i];
ncov[offset + 1] = -(cov[offset + 3] * cov[offset + 8] - cov[offset + 6] * cov[offset + 5]) / det[i];
ncov[offset + 2] = (cov[offset + 3] * cov[offset + 7] - cov[offset + 6] * cov[offset + 4]) / det[i];
ncov[offset + 3] = -(cov[offset + 1] * cov[offset + 8] - cov[offset + 7] * cov[offset + 2]) / det[i];
ncov[offset + 4] = (cov[offset] * cov[offset + 8] - cov[offset + 6] * cov[offset + 2]) / det[i];
ncov[offset + 5] = -(cov[offset] * cov[offset + 7] - cov[offset + 6] * cov[offset + 1]) / det[i];
ncov[offset + 6] = (cov[offset + 1] * cov[offset + 5] - cov[offset + 4] * cov[offset + 2]) / det[i];
ncov[offset + 7] = -(cov[offset] * cov[offset + 5] - cov[offset + 3] * cov[offset + 2]) / det[i];
ncov[offset + 8] = (cov[offset] * cov[offset + 4] - cov[offset + 3] * cov[offset + 1]) / det[i];
for (int m = 0; m < 3; ++m) {
for (int n = m; n < 3; ++n) {
std::swap(ncov[offset + m * 3 + n], ncov[offset + n * 3 + m]);
}
}
}
delete[] cov;
uchar4* dev_data;
CSC(cudaMalloc(&dev_data, sizeof(uchar4) * w * h));
CSC(cudaMemcpy(dev_data, data, sizeof(uchar4) * w * h, cudaMemcpyHostToDevice));
CSC(cudaMemcpyToSymbol(dev_avg, avg, sizeof(double) * nc * 3));
CSC(cudaMemcpyToSymbol(dev_det, det, sizeof(double) * nc));
CSC(cudaMemcpyToSymbol(dev_ncov, ncov, sizeof(double) * nc * 9));
delete[] avg;
delete[] det;
delete[] ncov;
cudaEvent_t start, end;
CSC(cudaEventCreate(&start));
CSC(cudaEventCreate(&end));
CSC(cudaEventRecord(start));
MLE <<<16, 256 >>> (dev_data, w, h, nc);
CSC(cudaGetLastError());
CSC(cudaEventRecord(end));
CSC(cudaEventSynchronize(end));
float t;
CSC(cudaEventElapsedTime(&t, start, end));
CSC(cudaEventDestroy(start));
CSC(cudaEventDestroy(end));
printf("time = %f\n", t);
CSC(cudaMemcpy(data, dev_data, sizeof(uchar4) * w * h, cudaMemcpyDeviceToHost));
CSC(cudaFree(dev_data));
fp = fopen(out.c_str(), "wb");
fwrite(&w, sizeof(int), 1, fp);
fwrite(&h, sizeof(int), 1, fp);
fwrite(data, sizeof(uchar4), w * h, fp);
fclose(fp);
delete[] data;
return 0;
}
|
7,858 | /******************************************* ternarytest2.cu ***************************************/
/*mostra 0 no índice 0, 2.4 no índice 1 e nos índice ímpares, mostra valor lixo nos demais índices */
//fail: assert
#include <stdio.h>
#include "cuda.h"
#include <assert.h>
#define N 2 //64
__global__ void foo(float* A, float c) {
A[threadIdx.x ? 2*threadIdx.x : 1] = c;
}
|
7,859 | /*
* EyUpdater.cpp
*
* Created on: 01 февр. 2016 г.
* Author: aleksandr
*/
#include "EyUpdater.h"
#include "SmartIndex.h"
// indx - индекс вдоль правой или левой границы по y от firstY до lastY
__host__ __device__
void EyUpdater::operator() (const int indx) {
// correct Ey field along left edge
/*mm = firstX;
for (nn = firstY; nn < lastY; nn++)
Ey(mm, nn) += Ceyh(mm, nn) * Hz1G(g1, mm - 1);
// correct Ey field along right edge
mm = lastX;
for (nn = firstY; nn < lastY; nn++)
Ey(mm, nn) -= Ceyh(mm, nn) * Hz1G(g1, mm);*/
float Ceyh = S*377.0;
int m = firstX;
Ey(m, indx) = Ey(m, indx) + Ceyh * Hz1D[m-1];
m = lastX;
Ey(m, indx) = Ey(m, indx) - Ceyh * Hz1D[m];
}
|
7,860 | // ***********************************************************************
//
// Demo program for education in subject
// Computer Architectures and Paralel Systems
// Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava
// email:petr.olivka@vsb.cz
//
// Example of CUDA Technology Usage
// Multiplication of elements in float array
//
// ***********************************************************************
#include <cuda_runtime.h>
#include <stdio.h>
//#define swap(float *p1, float *p2){ float tmp = *p1; *p1=*p2; *p2=tmp;}
// Demo kernel for array elements multiplication.
// Every thread selects one element and multiply it.
__global__ void kernel_mult( float *pole, int L, int inc)
{
// No 2 swapping in one kernel (collisions), rather run 2 kernels in loop below...
int l = blockDim.x * blockIdx.x + threadIdx.x;
if(l%2==1)
return;
// if grid is greater then length of array...
int border = (L-1-inc);
if (l>=border) return;
float tmp;
if(pole[l+inc]>pole[l+1+inc])
{
tmp=pole[l+inc];
pole[l+inc]=pole[l+1+inc];
pole[l+1+inc]=tmp;
}
}
void bsort( float *P, int Length)
{
cudaError_t cerr;
int threads = 1024;
int blocks = ( Length + threads - 1 ) / threads;
printf("blocks: %d\n", blocks);
// Memory allocation in GPU device
float *cudaP;
cerr = cudaMalloc( &cudaP, Length * sizeof( float ) );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Copy data from PC to GPU device
cerr = cudaMemcpy( cudaP, P, Length * sizeof( float ), cudaMemcpyHostToDevice );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Grid creation
for(int i=0;i<Length/2; i++)
{
kernel_mult<<< blocks, threads >>>(cudaP, Length, 0);
kernel_mult<<< blocks, threads >>>(cudaP, Length, 1);
}
if ( ( cerr = cudaGetLastError() ) != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Copy data from GPU device to PC
cerr = cudaMemcpy( P, cudaP, Length * sizeof( float ), cudaMemcpyDeviceToHost );
if ( cerr != cudaSuccess )
printf( "CUDA Error [%d] - '%s'\n", __LINE__, cudaGetErrorString( cerr ) );
// Free memory
cudaFree(cudaP);
}
|
7,861 | #include <stdio.h>
#define ThreadsPerBlock 10
__device__ void convolution(int conv_col, int conv_row, float *d_kernel, int k_size, float *d_matrix, int size_x, int size_y, float *d_conv, int max_row, int max_col){
int conv_index = conv_col+ conv_row*max_col;
d_conv[conv_index] = 0;
for(int k_row = 0; k_row < k_size; k_row ++){
for(int k_col = 0; k_col < k_size ; k_col ++){
d_conv[conv_index] +=
d_kernel[k_col + (k_row*k_size)] *
d_matrix[(conv_col+k_col) + (conv_row+k_row)*size_x];
}
}
}
__global__ void convolute(float *d_kernel, int k_size, float *d_matrix, int size_x, int size_y, float *d_conv, int max_row, int max_col){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
//flattens matrix to be able to process it properly
if(max_row > row && max_col > col){
convolution(col, row, d_kernel, k_size, d_matrix, size_x, size_y, d_conv, max_row, max_col);
}
}
void print_mat(float *mat, int n){
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%.1f\t", mat[i*n+j]);
}
printf("\n");
}
printf("\n");
}
void fill_mat(float *mat, int n){
int c = 0;
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
//mat[i*n+j] = c++%10;
mat[i*n+j] = 2;
}
}
}
void fill_ker(float *mat){
int size = 3;
for (int i = 0; i < size; i++){
for (int j = 0; j < size; j++){
if(i==1 && j == 1){
mat[i*size+j] = 8;
} else {
mat[i*size+j] = -1;
}
}
}
}
int main(){
float *h_kernel, *h_matrix, *h_conv;
float *d_kernel, *d_matrix, *d_conv;
int k_size = 3;
int size_x, size_y;
printf("Please enter the size of the square matrix to convolute over: \n");
scanf("%d", &size_x);
size_y = size_x;
int max_row = size_x - (k_size/2)*2;
int max_col = size_y - (k_size/2)*2;
int numBlocks = (ThreadsPerBlock + (size_x*size_y-1))/ThreadsPerBlock;
h_kernel = (float *)malloc(sizeof(float)*k_size*k_size);
h_matrix = (float *)malloc(sizeof(float)*size_x*size_y);
h_conv = (float *)malloc(sizeof(float)*max_row*max_col);
fill_ker(h_kernel);
fill_mat(h_matrix, size_x);
printf("\n\n----------- Kernel to apply: \n");
print_mat(h_kernel, k_size);
printf("\n\n----------- Original Matrix to convolute: \n");
print_mat(h_matrix, size_x);
cudaMalloc((void**)&d_kernel,sizeof(float)*k_size*k_size);
cudaMalloc((void**)&d_matrix,sizeof(float)*size_x*size_y);
cudaMalloc((void**)&d_conv,sizeof(float)*max_row*max_col);
cudaMemcpy(d_kernel, h_kernel,sizeof(float)*k_size*k_size, cudaMemcpyHostToDevice);
cudaMemcpy(d_matrix, h_matrix,sizeof(float)*size_x*size_y, cudaMemcpyHostToDevice);
dim3 Blocks(numBlocks,numBlocks);
dim3 Threads(ThreadsPerBlock,ThreadsPerBlock);
//printf("Blocks %i \nThreads %i \n", numBlocks, ThreadsPerBlock);
convolute<<<Blocks, Threads>>>(d_kernel, k_size, d_matrix, size_x, size_y, d_conv, max_row, max_col);
cudaMemcpy(h_conv, d_conv,sizeof(float)*max_row*max_col, cudaMemcpyDeviceToHost);
printf("\n\n------------ Resulting Matrix: \n");
print_mat(h_conv, max_col);
free(h_kernel);
free(h_conv);
free(h_matrix);
cudaFree(d_kernel);
cudaFree(d_conv);
cudaFree(d_matrix);
}
|
7,862 | #include <stdio.h>
#include <math.h>
//#include <cutil.h>
#define max(x,y) ((x)>(y)?(x):(y))
#define min(x,y) ((x)<(y)?)(x):(y))
extern int cudaMemcpy();
extern int cudaFree();
extern void _syncthreads();
extern int cudaMemcpyToSymbol();
extern void MV_GPU_wrapper(int*, float*, float*, float*, int*, int, int, int);
extern int cudaMalloc();
extern __global__ void mv_GPU(int, int, int, int*, float*, float*, float*, int*);
//extern __shared__ float*;
int block_size = 0;
int grid_num = 0;
int threads_per_block = 0;
int max_non_zero_per_row = 0;
int compare(float* a, float* b, int size, double threshold)
{
int i;
for(i = 0; i < size; i++)
{
if(abs(a[i] - b[i]) > threshold) return 0;
}
return 1;
}
void normalMV(int nr, int* ptr, float* data, float* t, float* b, int* indices){
int i, j;
for(i = 0; i < nr; i++){
for(j = ptr[i]; j < ptr[i + 1]; j++){
t[i] = t[i] + data[j] * b[indices[j]];
}
}
}
extern void MV_GPU_wrapper(int* ptr, float* data, float* t, float* b, int* indices, int nr, int nc, int n){
float* devO1Ptr;
float* devI1Ptr;
float* devI2Ptr;
int* devIdPtr;
int* devptr_ptr;
cudaMalloc((void**)&devO1Ptr, 4 * nr);
cudaMalloc((void**)&devI1Ptr, 4 * n);
cudaMalloc((void**)&devI2Ptr, 4 * nc);
cudaMemcpy(devO1Ptr, t, 4 * nr, cudaMemcpyHostToDevice);
cudaMemcpy(devI1Ptr, data, 4 * n, cudaMemcpyHostToDevice);
cudaMemcpy(devI2Ptr, b, 4 * nc, cudaMemcpyHostToDevice);
cudaMalloc((void**)&devptr_ptr, 4 * (nr + 1));
cudaMalloc((void**)&devIdPtr, 4 * n);
cudaMemcpy(devptr_ptr, ptr, 4 * (nr + 1), cudaMemcpyHostToDevice);
cudaMemcpy(devIdPtr, indices, 4 * n, cudaMemcpyHostToDevice);
dim3 dimGrid(grid_num, 1);
dim3 dimBlock(block_size, 1);
dim3 threadsPerBlock(threads_per_block, 1);
// printf("we have %d grids, %d blocks and %d threads per block\n", grid_num, block_size, threads_per_block);
mv_GPU<<<dimGrid, dimBlock, threads_per_block>>>(nr, max_non_zero_per_row, block_size, devptr_ptr, devI1Ptr, devO1Ptr, devI2Ptr, devIdPtr);
cudaMemcpy(t, devO1Ptr, nr * 4, cudaMemcpyDeviceToHost);
cudaFree(devO1Ptr);
cudaFree(devI1Ptr);
cudaFree(devI2Ptr);
cudaFree(devIdPtr);
cudaFree(devptr_ptr);
//data number of non zero
}
extern __global__ void mv_GPU(int nr, int mx, int blockSize, int* ptr, float* data, float* t, float* b, int* indices)
{
int bx;
int tx;
float suif_tmp0;
// __shared__ float _P1[];
// __shared__ float* AS = _P1;
// __shared__ float* BS = AS + (sizeof(float) * blockSize);
// int blksz = blockSize;
int k, j;
bx = blockIdx.x;
tx = threadIdx.x;
int ptr_cur;
int ptr_next;
if(tx <= -(blockSize * bx) + (nr - 1)){
// suif_tmp0 = 0.0;
suif_tmp0 = ((float* )(float(*)[])t)[tx + blockSize * bx];
ptr_cur = ((int*)(int(*)[])ptr)[tx + blockSize * bx];
ptr_next = ((int*)(int(*)[])ptr)[blockSize * bx + tx + 1];
}
// for(k = 0; k < grid_num - 1; k++){
// if(tx <= -(block_size * k) + (nr - 1)){
// ((float*)(float(*)[blksz])BS)[blksz * k + tx - blksz * k] = ((float*)(float(*)[])data)[blksz * k + tx];
// }
// __syncthreads();
for(j = 0; j < mx; j++){
if(tx <= -(blockSize * bx) + (nr - 1)){
if(ptr_next > (ptr_cur + j)){
//suif_tmp0 = suif_tmp0 + ((float*)(float(*)[blksz]BS)[(ptr_cur + j) - (blksz * k)] * b[indices[ptr_cur + j]];
suif_tmp0 = suif_tmp0 + data[ptr_cur + j] * b[indices[ptr_cur + j]];
}
}
// __syncthreads();
}
__syncthreads();
//}
if(tx <= -(blockSize * bx) + (nr - 1)){
((float*)(float(*)[])t)[tx + blockSize * bx] = suif_tmp0;
}
}
int main(int argc, char** argv){
FILE* fp;
char line[1024];
int* ptr, *indices;
float *data, *b, *t_h, *t_d;
int i, j;
int n, nc, nr;
if(argc < 2) abort();
if((fp = fopen(argv[1], "r")) == NULL) abort();
fgets(line, 128, fp);
while(line[0] == '%'){
fgets(line, 128, fp);
}
sscanf(line, "%d %d %d\n", &nr, &nc, &n);
ptr = (int*)malloc((nr + 1) * sizeof(int));
indices = (int*)malloc(n * sizeof(int));
data = (float*)malloc(n * sizeof(int));
b = (float*)malloc(nc * sizeof(int));
t_h = (float*)malloc(nr * sizeof(int));
t_d = (float*)malloc(nr * sizeof(int));
int lastr = 0;
for(i = 0; i < n; i++)
{
int r;
fscanf(fp, "%d %d %f\n", &r, &(indices[i]), &(data[i]));
indices[i]--;
if(r != lastr){
ptr[r - 1] = i;
lastr = r;
}
}
ptr[nr] = n;
int temp = 0;
for(i = 0; i < nr; i++){
temp = ptr[i + 1] - ptr[i];
max_non_zero_per_row = max(temp, max_non_zero_per_row);
}
for(i = 0; i < nr; i++){
t_h[i] = 0.0;
t_d[i] = 0.0;
}
for(i = 0; i < nc; i++)
b[i] = (float) rand() / 1111111111;
fclose(fp);
// block_size = (nr + 31) / 32;
block_size = sqrt(nr) + (sqrt(nr) / 2);
grid_num = block_size / 2;
threads_per_block = 32;
cudaEvent_t start_event, end_event;
float elapsed_time_seq, elapsed_time_gpu;
cudaEventCreate(&start_event);
cudaEventCreate(&end_event);
cudaEventRecord(start_event, 0);
normalMV(nr, ptr, data, t_h, b, indices);
cudaEventRecord(end_event, 0);
cudaEventSynchronize(end_event);
cudaEventElapsedTime(&elapsed_time_seq, start_event, end_event);
cudaEventCreate(&start_event);
cudaEventCreate(&end_event);
cudaEventRecord(start_event, 0);
MV_GPU_wrapper(ptr, data, t_d, b, indices, nr, nc, n);
// cudaThreadSynchronize();
cudaEventRecord(end_event, 0);
cudaEventSynchronize(end_event);
cudaEventElapsedTime(&elapsed_time_gpu, start_event, end_event);
int res = compare(t_h, t_d, nr, 0.01);
if(res == 1)
printf("VALID!\n Sequential Time: %.2f mesc\n Parallel Time: %.2f mesc\n Speedup = %.2f\n", elapsed_time_seq, elapsed_time_gpu, elapsed_time_seq / elapsed_time_gpu);
else
printf("INVALID...\n");
return 0;
}
|
7,863 | // ==========================================================================
// $Id$
// ==========================================================================
// (C)opyright: 2009-2010
//
// Ulm University
//
// Creator: Hendrik Lensch
// Email: {hendrik.lensch,johannes.hanika}@uni-ulm.de
// ==========================================================================
// $Log$
// ==========================================================================
#include <stdio.h>
#include <vector_types.h>
#include <stdlib.h>
using namespace std;
#define MAX_BLOCKS 256
#define MAX_THREADS 128
#define RTEST // use random initialization of array
/* compute the dot product between a1 and a2. a1 and a2 are of size
dim. The result of each thread should be stored in _dst[blockIdx.x *
blockDim.x + threadIdx.x]. Each thread should accumulate the dot
product of a subset of elements.
*/__global__ void dotProdKernel(float *_dst, const float* _a1, const float* _a2,
int _dim)
{
// program your kernel here
float result = 0.f;
const int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
int elements_per_thread = (_dim + MAX_BLOCKS*MAX_THREADS -1 ) / (MAX_BLOCKS*MAX_THREADS);
int begin = thread_index * elements_per_thread;
int end = begin + elements_per_thread;
if(end > _dim)
end = _dim;
for(int i=begin; i<end; i++)
{
result += _a1[i] * _a2[i];
}
_dst[thread_index] = result;
}
/* This program sets up two large arrays of size dim and computes the
dot product of both arrays.
The arrays are uploaed only once and the dot product is computed
multiple times. While this does not make too much sense it
demonstrated the possible speedup. */
int main(int argc, char* argv[])
{
// parse command line
int acount = 1;
if (argc < 3)
{
printf("usage: testDotProduct <dim> <GPU-flag [0,1]>\n");
exit(1);
}
// number of elements in both vectors
int dim = atoi(argv[acount++]);
// flag indicating weather the CPU or the GPU version should be executed
bool gpuVersion = atoi(argv[acount++]);
printf("dim: %d\n", dim);
float* cpuArray1 = new float[dim];
float* cpuArray2 = new float[dim];
// initialize the two arrays (either random or deterministic)
for (int i = 0; i < dim; ++i)
{
#ifdef RTEST
cpuArray1[i] = drand48();
cpuArray2[i] = drand48();
#else
cpuArray1[i] = 2.0;
cpuArray2[i] = i % 10;
#endif
}
// now the gpu stuff
float* gpuArray1;
float* gpuArray2;
float* gpuResult;
float* h;
if (gpuVersion)
{
const size_t input_size = dim * sizeof(float);
const size_t output_size = MAX_BLOCKS * MAX_THREADS * sizeof(float);
cudaMalloc((void**) &gpuArray1, input_size);
cudaMalloc((void**) &gpuArray2, input_size);
cudaMalloc((void**) &gpuResult, output_size);
cudaMemcpy(gpuArray1, cpuArray1, input_size,
cudaMemcpyHostToDevice);
cudaMemcpy(gpuArray2, cpuArray2, input_size,
cudaMemcpyHostToDevice);
// allocate an array to download the results of all threads
h = new float[MAX_BLOCKS * MAX_THREADS];
}
const int num_iters = 100;
double finalDotProduct;
if (!gpuVersion)
{
printf("cpu: ");
for (int iter = 0; iter < num_iters; ++iter)
{
finalDotProduct = 0.0;
for (int i = 0; i < dim; ++i)
{
finalDotProduct += cpuArray1[i] * cpuArray2[i];
}
}
}
else
{
// Cuda version here
printf("gpu: ");
// a simplistic way of splitting the problem into threads
dim3 blockGrid(MAX_BLOCKS);
dim3 threadBlock(MAX_THREADS);
for (int iter = 0; iter < num_iters; ++iter)
{
dotProdKernel<<<blockGrid, threadBlock>>>(gpuResult, gpuArray1,
gpuArray2, dim);
cudaThreadSynchronize();
}
// download and combine the results of multiple threads on the CPU
//!!!!!!!!! missing !!!!!!!!!!!!!!!!!!!!!!!!
cudaMemcpy(h, gpuResult, MAX_BLOCKS * MAX_THREADS * sizeof(float), cudaMemcpyDeviceToHost);
finalDotProduct = 0.f;
for(int i=0; i<MAX_BLOCKS * MAX_THREADS; ++i)
finalDotProduct += h[i];
}
printf("Result: %f\n", finalDotProduct);
if (gpuVersion)
{
// cleanup GPU memory
//!!!!!!!!! missing !!!!!!!!!!!!!!!!!!!!!!!!
delete[] h;
}
delete[] cpuArray2;
delete[] cpuArray1;
printf("done\n");
}
|
7,864 | #include "includes.h"
__global__ void mult2_kernel(double *g_out, double *a, double *b, double *ct, int n) {
const int j2 = blockIdx.x * blockDim.x + threadIdx.x;
double wkr, wki, xr, xi, yr, yi, ajr, aji, akr, aki;
double new_ajr, new_aji, new_akr, new_aki;
const int m = n >> 1;
const int nc = n >> 2;
const int j = j2 << 1;
if (j2) {
int nminusj = n - j;
wkr = 0.5 - ct[nc - j2];
wki = ct[j2];
ajr = a[j];
aji = a[1 + j];
akr = a[nminusj];
aki = a[1 + nminusj];
xr = ajr - akr;
xi = aji + aki;
yr = wkr * xr - wki * xi;
yi = wkr * xi + wki * xr;
ajr -= yr;
aji -= yi;
akr += yr;
aki -= yi;
xr = b[j];
xi = b[1 + j];
yr = b[nminusj];
yi = b[1 + nminusj];
new_aji = ajr * xi + xr * aji;
new_ajr = ajr * xr - aji * xi;
new_aki = akr * yi + yr * aki;
new_akr = akr * yr - aki * yi;
xr = new_ajr - new_akr;
xi = new_aji + new_aki;
yr = wkr * xr + wki * xi;
yi = wkr * xi - wki * xr;
g_out[j] = new_ajr - yr;
g_out[1 + j] = yi - new_aji;
g_out[nminusj] = new_akr + yr;
g_out[1 + nminusj] = yi - new_aki;
} else {
xr = a[0];
xi = a[1];
yr = b[0];
yi = b[1];
g_out[0] = xr * yr + xi * yi;
g_out[1] = -xr * yi - xi * yr;
xr = a[0 + m];
xi = a[1 + m];
yr = b[0 + m];
yi = b[1 + m];
g_out[1 + m] = -xr * yi - xi * yr;
g_out[0 + m] = xr * yr - xi * yi;
}
} |
7,865 | #include <stdio.h>
#define N 4194304
#define THREADS 64
__global__ void vecAdd(int *a, int *b, int *c){
int tid = blockIdx.x*blockDim.x + threadIdx.x;
if(tid < N )
c[tid] = a[tid] + b[tid];
}
int main(int argc, char* argv[]){
int *a,*b,*c;
int *dev_a,*dev_b,*dev_c;
int totalSize = N*sizeof(int);
int idx;
int size,blocks,threads;
float total_time;
cudaEvent_t start,stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
size = N;
blocks = size/THREADS;
threads = THREADS;
cudaMalloc((void**)&dev_a,totalSize);
cudaMalloc((void**)&dev_b,totalSize);
cudaMalloc((void**)&dev_c,totalSize);
a = (int*) malloc(totalSize);
b = (int*) malloc(totalSize);
c = (int*) malloc(totalSize);
for(idx=0;idx<N;idx++){
a[idx] = idx;
b[idx] = idx*2;
}
cudaMemcpy(dev_a,a,totalSize,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,b,totalSize,cudaMemcpyHostToDevice);
int iteration = 0;
float avg_time = 0.0;
for(iteration=0;iteration<10;iteration++){
//call kernel and measure times
cudaEventRecord(start,0);
vecAdd<<<blocks,threads>>>(dev_a,dev_b,dev_c);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&total_time,start,stop);
printf("\n time for %i blocks of %i threads : %f \n",blocks,threads,total_time);
avg_time+=total_time;
}
avg_time/=10.0;
printf("average time for %i size vector mult is %f ",size,avg_time);
cudaMemcpy(c,dev_c,totalSize,cudaMemcpyDeviceToHost);
/*
for(idx=0;idx<N;idx++)
printf("\n%i+%i=%i\n",a[idx],b[idx],c[idx]);
*/
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
7,866 | #include <stdio.h>
#define ARRAY_SIZE 128
__global__ void avarage_list(float * value_in,float * value_out){
//Local memory
int index = threadIdx.x;
float sum = 0.0;
// Static shared var
__shared__ float sh_arr[ARRAY_SIZE];
//Shared mem | global memory
sh_arr[index] = value_in[index];
__syncthreads(); // Garante que todos os numeros foram copiados antes de começar a prox operaçao
//shared memory operation
for(int i = 0; i <= index;i++){
sum += sh_arr[i];
}
// Global memory | local memory
value_out[index] = sum / (index + 1);
}
int main(int argc,char** argv){
const int BYTE_SIZE = ARRAY_SIZE * sizeof(float);
//Host var
float h_values_in[ARRAY_SIZE];
float h_avarage_out[ARRAY_SIZE];
printf("Array Values : \n");
for(int i = 0 ; i < ARRAY_SIZE;i++){
h_values_in[i] = float(i * 2);
printf("%.2f " , h_values_in[i]);
}
printf("\n");
//Device var
float *d_values_in;
float *d_avarage_out;
cudaMalloc((void**) &d_values_in,BYTE_SIZE);
cudaMemcpy(d_values_in,h_values_in,BYTE_SIZE,cudaMemcpyHostToDevice);
cudaMalloc((void**) &d_avarage_out,BYTE_SIZE);
avarage_list<<<1,ARRAY_SIZE>>>(d_values_in,d_avarage_out);
cudaMemcpy(h_avarage_out,d_avarage_out,BYTE_SIZE,cudaMemcpyDeviceToHost);
printf("Avarage Array: \n");
for(int i = 0 ; i < ARRAY_SIZE;i++){
printf("%.2f ",h_avarage_out[i]);
}
printf("\n");
cudaFree(d_values_in);
cudaFree(d_avarage_out);
return 0;
}
|
7,867 | extern "C"
__global__ void add(int n, float *a, float *b, float *c) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) {
c[tid] = a[tid] + b[tid];
}
}
|
7,868 | #include "includes.h"
__global__ void GoniometricFunctionKernel(float* input, float* output, const int size, const int type)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
if(id < size)
{ // Sine = 0, Cosine = 1, Tan = 2, Tanh = 3, Sinh = 4, Cosh = 5 see MyGonioType in MyTransform.cs
switch (type)
{
case 0:
output[id] = sinf(input[id]);
break;
case 1:
output[id] = cosf(input[id]);
break;
case 2:
output[id] = tanf(input[id]);
break;
case 3:
output[id] = tanhf(input[id]);
break;
case 4:
output[id] = sinhf(input[id]);
break;
case 5:
output[id] = coshf(input[id]);
break;
case 6:
output[id] = asinf(input[id]);
break;
case 7:
output[id] = acosf(input[id]);
break;
case 10:
output[id] = atan2f(input[2*id], input[2*id+1]);
break;
}
}
} |
7,869 | #include <stdio.h>
#include <sys/time.h>
#include <time.h>
const int N = 16;
const int blocksize = 16;
__global__
void hello(int *a, int *b)
{
for (int i=0; i<10000000; i++){
a[threadIdx.x] += b[threadIdx.x];
}
}
int main()
{
int a[N]; // = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
int b[N]; // = {15, 10, 6, 0, -11, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
for (int i=0; i<N; i++){
a[i] = 1;
b[i] = 1;
}
struct timeval start_tv;
gettimeofday(&start_tv,NULL);
//printf("time %u:%u\n",tv.tv_sec,tv.tv_usec);
//time_t t = time(NULL);
//struct tm tm = *localtime(&t);
//printf("year: %d \n", tm.tm_year);
//std::time_t startTime = std::time(nullptr);
//time_t startTime = time(NULL);
//time(&startTime);
int *ad;
int *bd;
const int csize = N*sizeof(int);
const int isize = N*sizeof(int);
//for (int j=0; j<10000; j++){
cudaMalloc( (void**)&ad, csize );
cudaMalloc( (void**)&bd, isize );
cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice );
cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice );
dim3 dimBlock( blocksize, 1 );
dim3 dimGrid( 1, 1 );
hello<<<dimGrid, dimBlock>>>(ad, bd);
cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost );
cudaFree( ad );
cudaFree( bd );
//}
cudaDeviceSynchronize();
//time_t endTime;
//time(&endTime);
struct timeval end_tv;
gettimeofday(&end_tv,NULL);
for (int i=0; i<N; i++){
printf("%d ", a[i]);
}
printf("\n");
//printf("start time: %f \n", startTime);
//printf("end time: %f \n", endTime);
//printf("time used: %f \n", endTime-startTime);
if(end_tv.tv_usec >= start_tv.tv_usec){
printf("time %u:%u\n",end_tv.tv_sec - start_tv.tv_sec, end_tv.tv_usec - start_tv.tv_usec);
}else{
printf("time %u:%u\n",end_tv.tv_sec - start_tv.tv_sec, 1000000 - start_tv.tv_usec + end_tv.tv_usec);
}
return EXIT_SUCCESS;
}
|
7,870 | #include "BmpSave.cuh"
#include <fstream>
#include <string>
namespace BmpSave {
__host__ __device__ Color::Color(unsigned char rr, unsigned char gg,
unsigned char bb)
: r(rr), g(gg), b(bb){};
__host__ __device__ Color::Color(int col) {
r = (col & 0xFF0000) >> 16;
g = (col & 0x00FF00) >> 8;
b = col & 0x0000FF;
}
__host__ __device__ Color::Color() {
r = 0;
g = 0;
b = 0;
}
void saveBmp(const std::string &filename, const int width, const int height,
Color *pixels) {
BmpHeader header(width, height);
std::ofstream fout(filename.c_str(), std::ios::binary);
fout.write(reinterpret_cast<char *>(&header), sizeof(BmpHeader));
fout.write(reinterpret_cast<char *>(pixels), width * height * sizeof(Color));
}
} // namespace BmpSave
|
7,871 | // Reads a cell at (x+dx, y+dy)
__device__ int read_cell(int* source_domain, int x, int y, int dx, int dy, unsigned int domain_x, unsigned int domain_y)
{
// Wrap around
x = (unsigned int)(x + dx) % domain_x;
y = (unsigned int)(y + dy) % domain_y;
return source_domain[y * domain_x + x];
}
// Compute kernel
__global__ void life_kernel(int* source_domain, int* dest_domain, int domain_x, int domain_y)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= domain_x || ty >= domain_y)
{
return;
}
// Read cell
int myself = read_cell(source_domain, tx, ty, 0, 0, domain_x, domain_y);
// TODO: Read the 8 neighbors and count number of blue and red
int redcells = 0;
int bluecells = 0;
int cell;
for (int line = -1; line < 2; ++line)
{
for (int column = -1; column < 2; ++column)
{
//Do not read myself
if (!(line == 0 && column == 0))
{
cell = read_cell(source_domain, tx, ty, line, column, domain_x, domain_y);
if (cell == 1)
{
redcells++;
}
else if (cell == 2)
{
bluecells++;
}
}
}
}
// TODO: Compute new value
int sum = redcells + bluecells;
// By default, the cell dies (or stay empty)
int newvalue = 0;
if (myself == 0 && sum == 3)
{
// New cell
newvalue = redcells > bluecells ? 1 : 2;
}
else if (sum == 2 || sum == 3)
{
// Survives
newvalue = myself;
}
// TODO: Write it in dest_domain
dest_domain[ty * domain_x + tx] = newvalue;
}
// Compute kernel
__global__ void life_kernel_q5(int* source_domain, int* dest_domain, int domain_x, int domain_y)
{
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= domain_x || ty >= domain_y)
{
return;
}
extern __shared__ int sharedData[];
int ligneDessous = ((int)blockIdx.y - 1 < 0) ? gridDim.y - 1 : blockIdx.y - 1;
int ligneDessus = (blockIdx.y + 1 >= gridDim.y) ? 0 : blockIdx.y + 1;
// Ligne de dessus
memcpy(&sharedData[0 * domain_x], &source_domain[blockIdx.y * domain_x], domain_x);
// Ligne courante
memcpy(&sharedData[1 * domain_x], &source_domain[ligneDessus * domain_x], domain_x);
// Ligne de dessous
memcpy(&sharedData[2 * domain_x], &source_domain[ligneDessous * domain_x], domain_x);
// Read cell
int myself = read_cell(sharedData, tx, ty, 0, 0, domain_x, domain_y);
// TODO: Read the 8 neighbors and count number of blue and red
int redcells = 0;
int bluecells = 0;
int cell;
for (int line = -1; line < 2; ++line)
{
for (int column = -1; column < 2; ++column)
{
//Do not read myself
if (!(line == 0 && column == 0))
{
cell = read_cell(sharedData, tx, ty, line, column, domain_x, domain_y);
if (cell == 1)
{
redcells++;
}
else if (cell == 2)
{
bluecells++;
}
}
}
}
// TODO: Compute new value
int sum = redcells + bluecells;
// By default, the cell dies (or stay empty)
int newvalue = 0;
if (myself == 0 && sum == 3)
{
// New cell
newvalue = redcells > bluecells ? 1 : 2;
}
else if (sum == 2 || sum == 3)
{
// Survives
newvalue = myself;
}
// TODO: Write it in dest_domain
dest_domain[ty * domain_x + tx] = newvalue;
}
|
7,872 | #include "includes.h"
__global__ void reduce(double *a,double *z, int sizeOut){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if(tid > N/2)return;
extern __shared__ double subTotals[];
subTotals[threadIdx.x]=(a[tid*2]+a[tid*2+1])/2;//sum every two values using all threads
__syncthreads();
int level=2;
while ((blockDim.x/level) >= sizeOut){//keep halving values until sizeout remains
if(threadIdx.x % level==0){//use half threads every iteration
subTotals[threadIdx.x]=(subTotals[threadIdx.x]+subTotals[threadIdx.x+(level/2)])/2;
}
__syncthreads();//we have to sync threads every time here :(
level = level * 2;
}
level = level /2;
if(threadIdx.x % level==0){
z[tid/level] = subTotals[threadIdx.x];
}
} |
7,873 | /* Block size X: 32 */
__global__ void fct_ale_a2b(const int maxLevels, const int * __restrict__ nLevels, const int * __restrict__ elementNodes, double * __restrict__ UV_rhs, const double * __restrict__ fct_ttf_max, const double * __restrict__ fct_ttf_min, const double big_number)
{
const unsigned int element_index = (blockIdx.x * maxLevels);
const unsigned int element_node0_index = (elementNodes[(blockIdx.x * 3)] - 1) * maxLevels;
const unsigned int element_node1_index = (elementNodes[(blockIdx.x * 3) + 1] - 1) * maxLevels;
const unsigned int element_node2_index = (elementNodes[(blockIdx.x * 3) + 2] - 1) * maxLevels;
for ( unsigned int level = threadIdx.x; level < maxLevels + 1; level += 32 )
{
if ( level < nLevels[blockIdx.x] - 1 )
{
double temp1 = fmax(fct_ttf_max[element_node0_index + level], fct_ttf_max[element_node1_index + level]);
temp1 = fmax(temp1, fct_ttf_max[element_node2_index + level]);
double temp2 = fmin(fct_ttf_min[element_node0_index + level], fct_ttf_min[element_node1_index + level]);
temp2 = fmin(temp2, fct_ttf_min[element_node2_index + level]);
UV_rhs[2*(element_index + level)] = temp1;
UV_rhs[2*(element_index + level) + 1] = temp2;
}
else if ( level < maxLevels - 1 )
{
UV_rhs[2*(element_index + level)] = -big_number;
UV_rhs[2*(element_index + level) + 1] = big_number;
}
}
}
|
7,874 | #include <iostream>
#include <assert.h>
int main()
{
cudaStream_t stream;
cudaStreamCreate(&stream);
assert(cudaStreamDestroy(stream) == 0 /* destroyed without issues*/);
assert(cudaStreamDestroy(stream) == 709 /* cudaErrorContextIsDestroyed */);
// Default stream is non-owning thus should not be destroyed by users
assert(cudaStreamDestroy(0 /*default stream*/) == 400 /* cudaErrorInvalidResourceHandle */);
return 0;
}
|
7,875 | #include <stdio.h>
#include <math.h>
__global__ void epsilon_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, n, l;
const double px = px_arr[i];
const double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential;
double evec_element, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (n=-1; n<2; n++){
px_pot = px - kxprime - n*q;
py_pot = py - kyprime;
potential = - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*(n+1) + l];
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (n==0) & (l==0)){
// printf(" epsilon_k[7,5] kprime[22,33] n=0, l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential terms is %f\n",potential);
// printf(" evec element is %f\n", evec_element);
// printf(" value of term is: %f\n", potential*evec_element*evec_element);
// }
accumulator += potential * evec_element*evec_element;
}
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
// Add the kinetic term:
accumulator += (px*px + py*py);
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" epsilon_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
__global__ void h_of_p_GPU(double *output_arr, double *px_arr, double *py_arr,
double *kxprime_arr, double *kyprime_arr,
double *E_k_n, double *U_k,
double mu, double q, double g, double theta,
int N_kx, int N_ky, int debug)
{
const int i = threadIdx.x + blockDim.x*blockIdx.x;
const int j = threadIdx.y + blockDim.y*blockIdx.y;
const int N_n = 3;
const int N_l = 3;
int iprime, jprime, l;
double px = px_arr[i];
double py = py_arr[j];
double kxprime, kyprime;
double px_pot, py_pot, potential_1, potential_2;
double evec_element_1, evec_element_2, evec_element_3, eigenval;
double accumulator = 0;
double pot_trig_terms = pow(cos(theta), 2) - pow(sin(theta), 2)*0;
double V_of_q = g*sqrt(q*q) * pot_trig_terms; // V(px=q, py=0)
const double dpx = px_arr[1] - px_arr[0];
const double dpy = py_arr[1] - py_arr[0];
const double prefactor = dpx*dpy/(4*M_PI*M_PI);
if ((i < N_kx) & (j < N_ky)){
for (iprime=0; iprime<N_kx; iprime++){
for (jprime=0; jprime<N_ky; jprime++){
kxprime = kxprime_arr[iprime];
kyprime = kyprime_arr[jprime];
for (l=0; l<3; l++){
eigenval = E_k_n[N_ky*N_l*iprime + N_l*jprime + l];
if (eigenval <= mu){
evec_element_1 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*0 + l];
evec_element_2 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*1 + l];
evec_element_3 = U_k[N_ky*N_n*N_l*iprime + N_n*N_l*jprime + N_l*2 + l];
px_pot = px - kxprime + q;
py_pot = py - kyprime;
potential_1 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
px_pot = px - kxprime;
py_pot = py - kyprime;
potential_2 = V_of_q - g*sqrt(px_pot*px_pot + py_pot*py_pot) * pot_trig_terms;
accumulator += potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3;
// if ((debug > 0) & (i==7) & (j==5) & (iprime==22) & (jprime==33) & (l==0)){
// printf(" h_k[7,5] kprime[22,33], l=0 (CUDA):\n");
// printf(" px is: %f\n", px);
// printf(" py is: %f\n", py);
// printf(" kxprime is: %f\n", kxprime);
// printf(" kyprime is: %f\n", kyprime);
// printf(" potential term 1 is %f\n", potential_1);
// printf(" potential term 2 is %f\n", potential_2);
// printf(" value of term is: %f\n",
// potential_1 * evec_element_1*evec_element_2 + potential_2 * evec_element_2*evec_element_3);
// }
}
}
}
}
// Multiple the sum by the appropriate prefactor:
accumulator *= prefactor;
output_arr[N_ky*i + j] = accumulator;
// if ((debug > 0) & (i==7) & (j==5)){
// printf(" h_k[7,5] (CUDA): %f\n\n", accumulator);
// }
}
}
|
7,876 | #include <stdio.h>
struct der1 {
der1() {}
int size1;
__host__ __device__ int get_size() const { return size1; }
};
struct der2 {
der2() {}
int size2;
__host__ __device__ int get_size() const { return size2; }
};
struct join : public der1, public der2 {
join() : der1(), der2() {}
template <typename view_t = der1>
__host__ __device__ int get_size() const {
return view_t::get_size();
}
};
template <typename join_t>
__global__ void kernel(join_t container) {
int size = container.template get_size<der2>(); // this doesnt compile.
printf("size = %i\n", size);
}
void test_templated() {
using index_t = int;
using value_t = float;
// Host stuff.
join host_container;
host_container.size1 = 10;
host_container.size2 = 20;
int size = host_container.template get_size<der1>(); // this compiles.
printf("size = %i\n", size);
// Device stuff.
join dev_container;
dev_container.size1 = 10;
dev_container.size2 = 20;
kernel<<<1, 1>>>(dev_container);
cudaDeviceSynchronize();
}
int main(int argc, char** argv) {
test_templated();
} |
7,877 | #include<stdio.h>
__managed__ int sum=0;
__global__ void Array_sum(int *a,int *n)
{
int tid=threadIdx.x;
if(tid<*n)
atomicAdd(&sum,a[tid]);
}
int main()
{
int n=10,i;
//printf("Enter N:");
//scanf("%d",&n);
int a[n];
int *cuda_a,*cuda_n;
for(i=0;i<n;i++)
{
a[i]=rand()%100;
printf("%d ",a[i]);
}
printf("\n");
cudaMalloc((void**)&cuda_a,n*sizeof(int));
cudaMalloc((void**)&cuda_n,sizeof(int));
cudaMemcpy(cuda_a,a,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(cuda_n,&n,sizeof(int),cudaMemcpyHostToDevice);
Array_sum <<<1,n>>>(cuda_a,cuda_n);
printf("Sum:%d\n",sum);
cudaFree(cuda_a);
cudaFree(cuda_n);
return 0;
} |
7,878 | /**
* Prints Thread ID of each thread
* What to observe/ponder:
* - Any trends on how the thread IDs are printed?
* - Why are they printed like so?
*/
#include <stdio.h>
void check_cuda_errors()
{
cudaError_t rc;
rc = cudaGetLastError();
if (rc != cudaSuccess)
{
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
}
}
__global__ void printer()
{
printf("%d\n", threadIdx.x);
}
int main(int argc, char **argv)
{
printer<<<1, 1024>>>();
// Waits for all CUDA threads to complete.
cudaDeviceSynchronize();
check_cuda_errors();
return 0;
} |
7,879 | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
using namespace std;
__global__ void vectorAddKer(int *d_A, int *d_B, int *d_C, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index >= size) {
return;
}
d_C[index] = d_A[index] + d_B[index];
}
int main(int argc, char const *argv[]) {
int size = 0;
if(argc != 2) {
size = 1024;
} else {
size = atoi(argv[1]);
}
int *h_A, *h_B, *h_C;
int *d_A, *d_B, *d_C;
h_A = new int[size];
h_B = new int[size];
h_C = new int[size];
for(unsigned i = 0; i < size; ++i) {
h_A[i] = i;
h_B[i] = size - i;
}
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc(&d_A, size * sizeof(int));
cudaMalloc(&d_B, size * sizeof(int));
cudaMalloc(&d_C, size * sizeof(int));
cudaMemcpy(d_A, h_A, size * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size * sizeof(int), cudaMemcpyHostToDevice);
dim3 gridsize, blocksize;
blocksize.x = 256;
gridsize.x = (size + blocksize.x - 1) / blocksize.x;
vectorAddKer<<<gridsize, blocksize>>>(d_A, d_B, d_C, size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "time=" << elapsedTime << endl;
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(h_C, d_C, size * sizeof(int), cudaMemcpyDeviceToHost);
for(unsigned i = 0; i < size; ++i) {
printf("%d + %d = %d\n", h_A[i], h_B[i], h_C[i]);
}
return 0;
} |
7,880 | #include <stdio.h>
#define B0 0.675603595979828813
#define B1 -0.175603595979828813
#define D0 1.35120719195965763
#define D1 -1.70241438391931525
/* function prototypes */
void force(long, long, long, double *, double *, double *, double *,double *, double *, float *, float *, double *, double *);
__global__ void step_type1(long , double, double, double *, double *, double *);
__global__ void step_type2(long , double, double *, double *, double *);
void timestep(long n, long nblock, long nthread, double dt, double *mx, double *my, double *magx, double *magy,
double *magx_gpu, double *magy_gpu, double *r_gpu, double *p_gpu, double *f_gpu, float *sinr_gpu, float *cosr_gpu)
{
double bb0,bb1,dd0,dd1;
bb0=B0*dt;
bb1=B1*dt;
dd0=D0*dt;
dd1=D1*dt;
step_type1<<<nblock,nthread>>>(n,bb0,dd0,r_gpu,p_gpu,f_gpu);
force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu);
step_type1<<<nblock,nthread>>>(n,bb1,dd1,r_gpu,p_gpu,f_gpu);
force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu);
step_type1<<<nblock,nthread>>>(n,bb1,dd0,r_gpu,p_gpu,f_gpu);
force(n,nblock,nthread,mx,my,magx,magy,r_gpu,f_gpu,sinr_gpu,cosr_gpu,magx_gpu,magy_gpu);
step_type2<<<nblock,nthread>>>(n,bb0,r_gpu,p_gpu,f_gpu);
return;
}
|
7,881 | inline __device__ float4 make_float4(float s)
{
return make_float4(s, s, s, s);
}
inline __device__ float4 operator+(float4 a, float4 b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
inline __host__ __device__ float4 operator-(float4 a, float4 b)
{
return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
inline __device__ void operator+=(float4 &a, float4 b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
inline __device__ float4 operator*(float4 a, float4 b)
{
return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
inline __device__ float4 operator*(float4 a, float b)
{
return make_float4(a.x * b, a.y * b, a.z * b, a.w * b);
}
inline __device__ float4 operator*(float b, float4 a)
{
return make_float4(b * a.x, b * a.y, b * a.z, b * a.w);
}
__global__ void nbody_kernel_reference(float dt1,
float4* pos_old,
float4* pos_new,
float4* oldVel,
float4* newVel,
float damping,
float softeningSqr)
{
const float4 dt = make_float4(dt1, dt1, dt1, 0.0f);//(float4){.x=dt1,.y=dt1,.z=dt1,.w=0.0f};
int gti = blockIdx.x*blockDim.x + threadIdx.x;
int ti = threadIdx.x;
int n = blockDim.x*gridDim.x;
int nt = blockDim.x;
int nb = n/nt;
__shared__ float4 pblock[1024]; // FIXME
float4 p = pos_old[gti];
float4 v = oldVel[gti];
float4 a = make_float4(0.0f);//{.x=0.0f,.y=0.0f,.z=0.0f,.w=0.0f};
for(int jb=0; jb < nb; jb++) { /* Foreach block ... */
pblock[ti] = pos_old[jb*nt+ti]; /* Cache ONE particle position */
__syncthreads(); /* Wait for others in the work-group */
for(int j=0; j<nt; j++) { /* For ALL cached particle positions ... */
float4 p2 = pblock[j]; /* Read a cached particle position */
float4 d = p2 - p;
float invr = rsqrtf(d.x*d.x + d.y*d.y + d.z*d.z + softeningSqr);
float f = p2.w*invr*invr*invr;
a += f*d; /* Accumulate acceleration */
}
__syncthreads(); /* Wait for others in work-group */
}
p += dt*v + damping*dt*dt*a;
v += dt*a;
pos_new[gti] = p;
newVel[gti] = v;
}
|
7,882 | /**
* @file pctdemo_processMandelbrotElement.cu
*
* CUDA code to calculate the Mandelbrot Set on a GPU.
*
* Based on previous work by The MathWorks, Inc in 2011.
*/
__constant__ double Rvalues [256] = { 0.2422, 0.2444, 0.2464, 0.2484, 0.2503, 0.2522, 0.2540, 0.2558, 0.2576, 0.2594, 0.2611, 0.2628, 0.2645, 0.2661, 0.2676, 0.2691, 0.2704, 0.2717, 0.2729, 0.2740, 0.2749, 0.2758, 0.2766, 0.2774, 0.2781, 0.2788, 0.2794, 0.2798, 0.2802, 0.2806, 0.2809, 0.2811, 0.2813, 0.2814, 0.2814, 0.2813, 0.2811, 0.2809, 0.2807, 0.2803, 0.2798, 0.2791, 0.2784, 0.2776, 0.2766, 0.2754, 0.2741, 0.2726, 0.2710, 0.2691, 0.2670, 0.2647, 0.2621, 0.2591, 0.2556, 0.2517, 0.2473, 0.2424, 0.2369, 0.2311, 0.2250, 0.2189, 0.2128, 0.2066, 0.2006, 0.1950, 0.1903, 0.1869, 0.1847, 0.1831, 0.1818, 0.1806, 0.1795, 0.1785, 0.1778, 0.1773, 0.1768, 0.1764, 0.1755, 0.1740, 0.1716, 0.1686, 0.1649, 0.1610, 0.1573, 0.1540, 0.1513, 0.1492, 0.1475, 0.1461, 0.1446, 0.1429, 0.1408, 0.1383, 0.1354, 0.1321, 0.1288, 0.1253, 0.1219, 0.1185, 0.1152, 0.1119, 0.1085, 0.1048, 0.1009, 0.0964, 0.0914, 0.0855, 0.0789, 0.0713, 0.0628, 0.0535, 0.0433, 0.0328, 0.0234, 0.0155, 0.0091, 0.0046, 0.0019, 0.0009, 0.0018, 0.0046, 0.0094, 0.0162, 0.0253, 0.0369, 0.0504, 0.0638, 0.0770, 0.0899, 0.1023, 0.1141, 0.1252, 0.1354, 0.1448, 0.1532, 0.1609, 0.1678, 0.1741, 0.1799, 0.1853, 0.1905, 0.1954, 0.2003, 0.2061, 0.2118, 0.2178, 0.2244, 0.2318, 0.2401, 0.2491, 0.2589, 0.2695, 0.2809, 0.2929, 0.3052, 0.3176, 0.3301, 0.3424, 0.3548, 0.3671, 0.3795, 0.3921, 0.4050, 0.4184, 0.4322, 0.4463, 0.4608, 0.4753, 0.4899, 0.5044, 0.5187, 0.5329, 0.5470, 0.5609, 0.5748, 0.5886, 0.6024, 0.6161, 0.6297, 0.6433, 0.6567, 0.6701, 0.6833, 0.6963, 0.7091, 0.7218, 0.7344, 0.7468, 0.7590, 0.7710, 0.7829, 0.7945, 0.8060, 0.8172, 0.8281, 0.8389, 0.8495, 0.8600, 0.8703, 0.8804, 0.8903, 0.9000, 0.9093, 0.9184, 0.9272, 0.9357, 0.9440, 0.9523, 0.9606, 0.9689, 0.9770, 0.9842, 0.9900, 0.9946, 0.9966, 0.9971, 0.9972, 0.9971, 0.9969, 0.9966, 0.9962, 0.9957, 0.9949, 0.9938, 0.9923, 0.9906, 0.9885, 0.9861, 0.9835, 0.9807, 0.9778, 0.9748, 0.9720, 0.9694, 0.9671, 0.9651, 0.9634, 0.9619, 0.9608, 0.9601, 0.9596, 0.9595, 0.9597, 0.9601, 0.9608, 0.9618, 0.9629, 0.9642, 0.9657, 0.9674, 0.9692, 0.9711, 0.9730, 0.9749, 0.9769 };
__constant__ double Gvalues [256] = { 0.1504, 0.1534, 0.1569, 0.1607, 0.1648, 0.1689, 0.1732, 0.1773, 0.1814, 0.1854, 0.1893, 0.1932, 0.1972, 0.2011, 0.2052, 0.2094, 0.2138, 0.2184, 0.2231, 0.2280, 0.2330, 0.2382, 0.2435, 0.2489, 0.2543, 0.2598, 0.2653, 0.2708, 0.2764, 0.2819, 0.2875, 0.2930, 0.2985, 0.3040, 0.3095, 0.3150, 0.3204, 0.3259, 0.3313, 0.3367, 0.3421, 0.3475, 0.3529, 0.3583, 0.3638, 0.3693, 0.3748, 0.3804, 0.3860, 0.3916, 0.3973, 0.4030, 0.4088, 0.4145, 0.4203, 0.4261, 0.4319, 0.4378, 0.4437, 0.4497, 0.4559, 0.4620, 0.4682, 0.4743, 0.4803, 0.4861, 0.4919, 0.4975, 0.5030, 0.5084, 0.5138, 0.5191, 0.5244, 0.5296, 0.5349, 0.5401, 0.5452, 0.5504, 0.5554, 0.5605, 0.5655, 0.5705, 0.5755, 0.5805, 0.5854, 0.5902, 0.5950, 0.5997, 0.6043, 0.6089, 0.6135, 0.6180, 0.6226, 0.6272, 0.6317, 0.6363, 0.6408, 0.6453, 0.6497, 0.6541, 0.6584, 0.6627, 0.6669, 0.6710, 0.6750, 0.6789, 0.6828, 0.6865, 0.6902, 0.6938, 0.6972, 0.7006, 0.7039, 0.7071, 0.7103, 0.7133, 0.7163, 0.7192, 0.7220, 0.7248, 0.7275, 0.7301, 0.7327, 0.7352, 0.7376, 0.7400, 0.7423, 0.7446, 0.7468, 0.7489, 0.7510, 0.7531, 0.7552, 0.7572, 0.7593, 0.7614, 0.7635, 0.7656, 0.7678, 0.7699, 0.7721, 0.7743, 0.7765, 0.7787, 0.7808, 0.7828, 0.7849, 0.7869, 0.7887, 0.7905, 0.7922, 0.7937, 0.7951, 0.7964, 0.7975, 0.7985, 0.7994, 0.8002, 0.8009, 0.8016, 0.8021, 0.8026, 0.8029, 0.8031, 0.8030, 0.8028, 0.8024, 0.8018, 0.8011, 0.8002, 0.7993, 0.7982, 0.7970, 0.7957, 0.7943, 0.7929, 0.7913, 0.7896, 0.7878, 0.7859, 0.7839, 0.7818, 0.7796, 0.7773, 0.7750, 0.7727, 0.7703, 0.7679, 0.7654, 0.7629, 0.7604, 0.7579, 0.7554, 0.7529, 0.7505, 0.7481, 0.7457, 0.7435, 0.7413, 0.7392, 0.7372, 0.7353, 0.7336, 0.7321, 0.7308, 0.7298, 0.7290, 0.7285, 0.7284, 0.7285, 0.7292, 0.7304, 0.7330, 0.7365, 0.7407, 0.7458, 0.7513, 0.7569, 0.7626, 0.7683, 0.7740, 0.7798, 0.7856, 0.7915, 0.7974, 0.8034, 0.8095, 0.8156, 0.8218, 0.8280, 0.8342, 0.8404, 0.8467, 0.8529, 0.8591, 0.8654, 0.8716, 0.8778, 0.8840, 0.8902, 0.8963, 0.9023, 0.9084, 0.9143, 0.9203, 0.9262, 0.9320, 0.9379, 0.9437, 0.9494, 0.9552, 0.9609, 0.9667, 0.9724, 0.9782, 0.9839 };
__constant__ double Bvalues [256] = { 0.6603, 0.6728, 0.6847, 0.6961, 0.7071, 0.7179, 0.7286, 0.7393, 0.7501, 0.7610, 0.7719, 0.7828, 0.7937, 0.8043, 0.8148, 0.8249, 0.8346, 0.8439, 0.8528, 0.8612, 0.8692, 0.8767, 0.8840, 0.8908, 0.8973, 0.9035, 0.9094, 0.9150, 0.9204, 0.9255, 0.9305, 0.9352, 0.9397, 0.9441, 0.9483, 0.9524, 0.9563, 0.9600, 0.9636, 0.9670, 0.9702, 0.9733, 0.9763, 0.9791, 0.9817, 0.9840, 0.9862, 0.9881, 0.9898, 0.9912, 0.9924, 0.9935, 0.9946, 0.9955, 0.9965, 0.9974, 0.9983, 0.9991, 0.9996, 0.9995, 0.9985, 0.9968, 0.9948, 0.9926, 0.9906, 0.9887, 0.9867, 0.9844, 0.9819, 0.9793, 0.9766, 0.9738, 0.9709, 0.9677, 0.9641, 0.9602, 0.9560, 0.9516, 0.9473, 0.9432, 0.9393, 0.9357, 0.9323, 0.9289, 0.9254, 0.9218, 0.9182, 0.9147, 0.9113, 0.9080, 0.9050, 0.9022, 0.8998, 0.8975, 0.8953, 0.8932, 0.8910, 0.8887, 0.8862, 0.8834, 0.8804, 0.8770, 0.8734, 0.8695, 0.8653, 0.8609, 0.8562, 0.8513, 0.8462, 0.8409, 0.8355, 0.8299, 0.8242, 0.8183, 0.8124, 0.8064, 0.8003, 0.7941, 0.7878, 0.7815, 0.7752, 0.7688, 0.7623, 0.7558, 0.7492, 0.7426, 0.7359, 0.7292, 0.7224, 0.7156, 0.7088, 0.7019, 0.6950, 0.6881, 0.6812, 0.6741, 0.6671, 0.6599, 0.6527, 0.6454, 0.6379, 0.6303, 0.6225, 0.6146, 0.6065, 0.5983, 0.5899, 0.5813, 0.5725, 0.5636, 0.5546, 0.5454, 0.5360, 0.5266, 0.5170, 0.5074, 0.4975, 0.4876, 0.4774, 0.4669, 0.4563, 0.4454, 0.4344, 0.4233, 0.4122, 0.4013, 0.3904, 0.3797, 0.3691, 0.3586, 0.3480, 0.3374, 0.3267, 0.3159, 0.3050, 0.2941, 0.2833, 0.2726, 0.2622, 0.2521, 0.2423, 0.2329, 0.2239, 0.2155, 0.2075, 0.1998, 0.1924, 0.1852, 0.1782, 0.1717, 0.1658, 0.1608, 0.1570, 0.1546, 0.1535, 0.1536, 0.1546, 0.1564, 0.1587, 0.1615, 0.1650, 0.1695, 0.1749, 0.1815, 0.1890, 0.1973, 0.2061, 0.2151, 0.2237, 0.2312, 0.2373, 0.2418, 0.2446, 0.2429, 0.2394, 0.2351, 0.2309, 0.2267, 0.2224, 0.2181, 0.2138, 0.2095, 0.2053, 0.2012, 0.1974, 0.1939, 0.1906, 0.1875, 0.1846, 0.1817, 0.1787, 0.1757, 0.1726, 0.1695, 0.1665, 0.1636, 0.1608, 0.1582, 0.1557, 0.1532, 0.1507, 0.1480, 0.1450, 0.1418, 0.1382, 0.1344, 0.1304, 0.1261, 0.1216, 0.1168, 0.1116, 0.1061, 0.1001, 0.0938, 0.0872, 0.0805 };
/** Work out which piece of the global array this thread should operate on */
__device__ size_t calculateGlobalIndex() {
// Which block are we?
size_t const globalBlockIndex = blockIdx.x + blockIdx.y * gridDim.x;
// Which thread are we within the block?
size_t const localThreadIdx = threadIdx.x + blockDim.x * threadIdx.y;
// How big is each block?
size_t const threadsPerBlock = blockDim.x*blockDim.y;
// Which thread are we overall?
return localThreadIdx + globalBlockIndex*threadsPerBlock;
}
/** The actual Mandelbrot algorithm for a single location */
__device__ unsigned int doIterations( double const realPart0,
double const imagPart0,
unsigned int const maxIters ) {
// Initialise: z = z0
double realPart = realPart0;
double imagPart = imagPart0;
unsigned int count = 0;
// Loop until escape
while ( ( count <= maxIters )
&& ((realPart*realPart + imagPart*imagPart) <= 4.0) ) {
++count;
// Update: z = z*z + z0;
double const oldRealPart = realPart;
realPart = realPart*realPart - imagPart*imagPart + realPart0;
imagPart = 2*oldRealPart*imagPart + imagPart0;
}
return count;
}
/** Main entry point.
* Works out where the current thread should read/write to global memory
* and calls doIterations to do the actual work.
*/
__global__ void processMandelbrotElement(
double * outR,
double * outG,
double * outB,
int * minCountsNow,
const double * x,
const double * y,
const unsigned int maxIters,
const unsigned int numel,
unsigned int minCountsLast) {
// Work out which thread we are
size_t const globalThreadIdx = calculateGlobalIndex();
// If we're off the end, return now
if (globalThreadIdx >= numel) {
return;
}
// Get our X and Y coords
double const realPart0 = x[globalThreadIdx];
double const imagPart0 = y[globalThreadIdx];
// Run the itearations on this location
unsigned int const count = doIterations( realPart0, imagPart0, maxIters );
unsigned int const value = log( double( count + 1 - minCountsLast))/log(double(maxIters))*255;
if (count < minCountsNow[0]) {
minCountsNow[0] = count;
}
outR[globalThreadIdx] = Rvalues[value];
outG[globalThreadIdx] = Gvalues[value];
outB[globalThreadIdx] = Bvalues[value];
}
|
7,883 | //
// kernel routine
//
#include "cuda_runtime.h"
__global__ void my_first_kernel(float *x)
{
// Uncomment line below and define integer "tid" as global index to vector "x"
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Uncomment line below and define x[tid] to be equal to the thread index
x[tid] = (float)threadIdx.x;
}
|
7,884 | #include<iostream>
#include<cuda.h>
#include<cuda_runtime.h>
using namespace std;
__device__ void print(int a, int b, int sum){
printf("Printing sum inside DEVICE\n");
printf("\n%d\t%d + %d = %d", threadIdx.x, a, b, sum);
}
__global__ void add(int *a, int *b, int* sum){
*sum = *a + *b;
print(*a, *b, *sum);
}
int main(){
int *a, *b, *sum;
cudaMallocManaged(&a, sizeof(int));
cudaMallocManaged(&b, sizeof(int));
cudaMallocManaged(&sum , sizeof(int));
cout<<"Enter A: "; cin>>*a;
cout<<"Enter B: "; cin>>*b;
add<<<1,10>>>(a,b,sum);
cudaDeviceSynchronize();
cout<<"\nPrinting sum in HOST: Sum is "<<*sum<<endl;
return 0;
}
|
7,885 | #include "includes.h"
extern "C"
__global__ void calc_entropy_atomic(float *float_image_in, float *entropy_out, int blk_size) {
//calculate entropy of a block through a single thread
__shared__ float sum;
if (threadIdx.x == 0 && threadIdx.y == 0) {
sum = 0.0;
}
__syncthreads();
int blocksize = blk_size*blk_size;
//vertical offset to get to beginning of own block
int v_offset_to_blkrow = gridDim.x*blockDim.x*blockDim.y*blockIdx.y;
int v_offset_to_pixrow = blockDim.x*gridDim.x*threadIdx.y;
int h_offset = blockDim.x*blockIdx.x + threadIdx.x;
int idx = v_offset_to_blkrow + v_offset_to_pixrow + h_offset; //idx of top left corner of the block
int out_idx = blockIdx.y*gridDim.x + blockIdx.x;
//normalize image
float_image_in[idx] = float_image_in[idx] * float_image_in[idx] / (blocksize);
atomicAdd(&sum, float_image_in[idx]);
__syncthreads();
__shared__ float entropy;
if (threadIdx.x == 0 && threadIdx.y == 0) {
entropy = 0.0;
}
__syncthreads();
float_image_in[idx] = float_image_in[idx] / sum;
//shannon entropy
atomicAdd(&entropy, -float_image_in[idx] * log2(float_image_in[idx]));
__syncthreads();
//printf("%f\n", sum2);
if (threadIdx.x == 0 && threadIdx.y == 0) {
entropy_out[out_idx] = entropy;
}
} |
7,886 | #include "includes.h"
namespace {
} // namespace
__global__ void join_add(const int *d1, const int *d2, int *d3) { d3[0] = d1[0] + d2[0]; } |
7,887 | #include "includes.h"
__global__ void writeSimilarities(const float* nvccResults, int* activelayers, int writestep, int writenum, float* similarities, int active_patches, int patches)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < active_patches)
{
float res = nvccResults[tid];
int patch = activelayers[tid];
for (int i = 0; i < writenum; ++i)
similarities[patches*writestep*i + patch] = res;
}
} |
7,888 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
#define THREADS 30
#define BLOCKS 200
#define SIZE 6000
__device__ float d_A[SIZE][SIZE];
__device__ float d_B[SIZE][SIZE];
__device__ float d_C[SIZE][SIZE];
__device__ float d_D[SIZE][SIZE];
__device__ float d_V[SIZE];
__device__ float d_VET[SIZE];
__device__ float ESCALAR = 1.25;
__global__ void load()
{
for(int i = 0; i < SIZE; i++)
{
for(int j = 0; j < SIZE; j++)
{
d_A[i][j] = i + j;
d_B[i][j] = i + j;
d_C[i][j] = 0;
d_D[i][j] = 0;
}
d_V[i] = i;
d_VET[i] = 0;
}
}
__global__ void sumA_B()
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < SIZE && j < SIZE)
d_C[i][j] = d_A[i][j] + d_B[i][j];
}
__global__ void mulA_B()
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < SIZE && j < SIZE)
{
for(int k = 0; k < SIZE; k++)
d_D[i][j] += d_A[i][k] * d_B[k][j];
}
}
__global__ void mulA_ESCALAR()
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < SIZE && j < SIZE)
d_A[i][j] *= ESCALAR;
}
__global__ void mulB_V()
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < SIZE)
{
for(int j = 0; j < SIZE; j++)
d_VET[i] += d_B[i][j] * d_V[j];
}
}
int main()
{
clock_t begin, end;
cudaSetDevice(0);
load<<<1, 1>>>();
cudaDeviceSynchronize();
printf("Somar A e B e armazenar em C.\n");
begin = clock();
sumA_B<<<dim3(BLOCKS, BLOCKS), dim3(THREADS, THREADS)>>>();
cudaDeviceSynchronize();
end = clock();
printf("Feito em %.3f segundos.\n", double(end - begin) / CLOCKS_PER_SEC);
printf("Multiplicar A e B e armazenar em D.\n");
begin = clock();
mulA_B<<<dim3(BLOCKS, BLOCKS), dim3(THREADS, THREADS)>>>();
cudaDeviceSynchronize();
end = clock();
printf("Feito em %.3f segundos.\n", double(end - begin) / CLOCKS_PER_SEC);
printf("Multiplicar A e ESCALAR e armazenar em A.\n");
begin = clock();
mulA_ESCALAR<<<dim3(BLOCKS, BLOCKS), dim3(THREADS, THREADS)>>>();
cudaDeviceSynchronize();
end = clock();
printf("Feito em %.3f segundos.\n", double(end - begin) / CLOCKS_PER_SEC);
printf("Multiplicar B e V e armazenar em VET.\n");
begin = clock();
mulB_V<<<BLOCKS, THREADS>>>();
cudaDeviceSynchronize();
end = clock();
printf("Feito em %.3f segundos.\n", double(end - begin) / CLOCKS_PER_SEC);
} |
7,889 | #include "includes.h"
__global__ void makeProjection( float *eT, float *e, float *eigenvec, int *indices, int M, int N ) {
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
if( elementNum >= M * N ) {
return;
}
int m = elementNum / N;
int n = elementNum % N;
e[n * M + m] = eigenvec[n * M + indices[m]];
eT[m * N + n] = e[n * M + m];
} |
7,890 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform.h>
#include <stdlib.h>
const float alpha = 0.375;
const float beta = 0.875;
struct vectorScaleAdd
{
template <typename Tuple>
__host__ __device__
void operator()(Tuple t)
{
thrust::get<2>(t) = alpha*thrust::get<0>(t) + beta*thrust::get<1>(t);
}
};
int main(void)
{
int N = 4;
// float alpha = 0.5, beta = 0.25;
thrust::host_vector<float> hA(N);
thrust::host_vector<float> hB(N);
thrust::host_vector<float> hC(N);
thrust::device_vector<float> dA(N);
thrust::device_vector<float> dB(N);
thrust::device_vector<float> dC(N);
for (int i = 0; i < N; i++)
{
dA[i] = (rand() % 256)/256.0;
dB[i] = (rand() % 256)/256.0;
}
// thrust::transform(dA.begin(),dA.end(),dB.begin(),dC.begin(),thrust::plus<float>());
thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(dA.begin(),dB.begin(),dC.begin())),
thrust::make_zip_iterator(thrust::make_tuple(dA.end(),dB.end(),dC.end())),
vectorScaleAdd());
hA = dA;
hB = dB;
hC = dC;
printf(" alpha * H1 + beta * H2 = H3\n");
for (int i = 0; i < N; i++)
{
printf("%.3f * %.3f + %.3f * %.3f = %.3f\n", alpha, hA[i], beta, hB[i],hC[i]);
}
return 0;
}
|
7,891 | /*
Example code showing use of bariers to synchronize all threads in a block.
Barier is set with __syncthreads();
Job of this program is:
1. Initialize array with threadIndex
2. At each index assign value of index + 1
Compile: nvcc shiftLeft.cu -o shiftLeft.out
Run: ./shiftLeft
*/
#include <stdio.h>
#define NUM_BLOCKS 1
#define BLOCK_WIDTH 128
__global__ void shiftLeft(int* array)
{
int idx = threadIdx.x;
array[idx] = idx;
__syncthreads();
if (idx < BLOCK_WIDTH - 1) {
int tmp = array[idx + 1];
__syncthreads();
array[idx] = tmp;
}
}
int main(int argc,char **argv)
{
const int ARRAY_SIZE = BLOCK_WIDTH;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
// output array on the host
int h_out[ARRAY_SIZE];
// declare GPU memory pointer
int* d_out;
// allocate GPU memory
cudaMalloc((void**) &d_out, ARRAY_BYTES);
// launch the kernel
shiftLeft<<<NUM_BLOCKS, BLOCK_WIDTH>>>(d_out);
// copy back the result array to the CPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i = 0; i < ARRAY_SIZE; ++i) {
printf("%d", h_out[i]);
printf(i % 4 != 3 ? "\t" : "\n");
}
// free GPU memory allocation
cudaFree(d_out);
return 0;
} |
7,892 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
//Create a histogram kernel
__global__ void histogram_kernel(const int* const d_container, int* d_histogram,
const int maxsize, const int num_bins) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index > maxsize) return;
int bin = d_container[index] % num_bins;
atomicAdd(&d_histogram[bin], 1);
}
int main(int argc, char **argv) {
//Generate a random array of ints
int maxsize = 1 << 24;
int *container_array = new int[maxsize];
for(int i=0;i<maxsize;i++) {
container_array[i] = i;
}
const int num_bins = 10000;
int histogram[num_bins];
for(int i=0;i<num_bins;i++) {
histogram[i] = 0;
}
int bin;
for(int i=0;i<maxsize;i++) {
bin = container_array[i] % num_bins;
histogram[bin] ++;
}
//Allocate mem to device
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0) {
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int) devProps.totalGlobalMem,
(int) devProps.major, (int) devProps.minor,
(int) devProps.clockRate);
}
int* d_container_array;
int* d_histogram;
int* h_histogram = new int[num_bins];
cudaMalloc(&d_container_array, sizeof(int) * maxsize);
cudaMalloc(&d_histogram, sizeof(int) * num_bins);
cudaMemcpy(d_container_array, container_array, sizeof(int) * maxsize, cudaMemcpyHostToDevice);
cudaMemset(d_histogram, 0, sizeof(int) * num_bins);
int gridsize = ceil(maxsize / 1024);
histogram_kernel<<<gridsize, 1024>>>(d_container_array, d_histogram, maxsize, num_bins);
cudaMemcpy(h_histogram, d_histogram, sizeof(int) * num_bins, cudaMemcpyDeviceToHost);
//Call histogram kernel and get histogram from device to host
//COmpare the results
for(int i=0;i<num_bins;i++) {
printf("%d %d\n", histogram[i], histogram[i] - h_histogram[i]);
}
cudaFree(d_container_array);
cudaFree(d_histogram);
free(h_histogram);
free(container_array);
return 0;
}
|
7,893 | #include "includes.h"
__global__ void convolutionRowGPU(float *d_Dst, float *d_Src, float *d_Filter, int imageW, int imageH, int filterR){
int k;
float sum=0;
int row=blockDim.y*blockIdx.y+threadIdx.y+filterR;
int col=blockDim.x*blockIdx.x+threadIdx.x+filterR;
int newImageW=imageW+filterR*2;
for (k = -filterR; k <= filterR; k++) {
int d = col+ k;
sum += d_Src[row *newImageW + d] * d_Filter[filterR - k];
}
d_Dst[row *newImageW + col] = sum;
} |
7,894 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
/*
* kernel function to be run parallel on device
*/
__global__
void rgb2gray_kernel(const uchar3 *rgb,
uchar1 *gray,
const int n_rows,
const int n_cols
){
// maximum index accessible in the image
const int img_size = n_rows * n_cols;
int pixelIdx_x = blockIdx.x * blockDim.x + threadIdx.x;
int pixelIdx_y = blockIdx.y * blockDim.y + threadIdx.y;
// be careful not to exceed image boundary
if (pixelIdx_x < n_cols && pixelIdx_y < n_rows){
int pixelIdx = pixelIdx_y * n_cols + pixelIdx_x;
// gray = .299f * red + .587f * geen + .114f * blue;
gray[pixelIdx].x = (unsigned char)(
0.299 * rgb[pixelIdx].x +
0.587 * rgb[pixelIdx].y +
0.114 * rgb[pixelIdx].z );
}
}
void rgb2gray_caller(const uchar3 *rgb,
uchar1 *gray,
const int n_rows,
const int n_cols){
// n_rows = n_threads_y
// n_cols = n_threads_x
const int n_threads_per_dim = 32;
// number of blocks along x axis
const int n_blocks_x = n_cols % n_threads_per_dim == 0 ? n_cols / n_threads_per_dim : n_cols / n_threads_per_dim + 1 ;
const int n_blocks_y = n_rows % n_threads_per_dim == 0 ? n_rows / n_threads_per_dim : n_rows / n_threads_per_dim + 1 ;
std::cout << "n_threads_per_dim: " << n_threads_per_dim << std::endl;
std::cout << "n_threads_per_block: " << n_threads_per_dim * n_threads_per_dim << std::endl;
std::cout << "n_blocks_x: " << n_blocks_x << ", ";
std::cout << "n_blocks_y: " << n_blocks_y << std::endl;
const dim3 grid(n_blocks_x, n_blocks_y, 1);
const dim3 block(n_threads_per_dim, n_threads_per_dim, 1);
rgb2gray_kernel<<<grid, block>>>(rgb, gray, n_rows, n_cols);
}
|
7,895 | #include <cstdio>
#if defined(NDEBUG)
#define CUDA_CHECK(x) (x) //release mode
#else // debug mode
#define CUDA_CHECK(x) do{ \
(x); \
cudaError_t e = cudaGetLastError(); \
if (e != cudaSuccess) { \
printf("cuda failure %s at %s:%d\n", cudaGetErrorString(e), __FILE__, __LINE__); \
return 1; \
}\
}while(0)
#endif
/*
void CUDA_CHECK(cudaError_t e){
if(e != cudaSuccess){
printf("cuda failure");
}
}
*/
//main program for the CPU: compiled by MS-VC++
int main(void){
//host-side data
const int SIZE = 5;
const int a[SIZE] = {1,2,3,4,5};
int b[SIZE] = {0,0,0,0,0};
//print source
printf("a = {%d,%d,%d,%d,%d}\n",a[0],a[1],a[2],a[3],a[4]);
//device-side data
int *dev_a = 0;
int *dev_b = 0;
//allocate device memory
CUDA_CHECK(cudaMalloc((void**)&dev_a,SIZE*sizeof(int)) );
CUDA_CHECK(cudaMalloc((void**)&dev_a,SIZE*sizeof(int)) );
//copy from host to device
CUDA_CHECK(cudaMemcpy(dev_a, a, SIZE*sizeof(int),cudaMemcpyDeviceToDevice) );
//copy from device to device
CUDA_CHECK(cudaMemcpy(dev_b,dev_a,SIZE*sizeof(int),cudaMemcpyDeviceToDevice) );
//copy from device to host
CUDA_CHECK(cudaMemcpy(b,dev_b,SIZE*sizeof(int),cudaMemcpyDeviceToHost) );
//free device memory
CUDA_CHECK(cudaFree(dev_a) );
CUDA_CHECK(cudaFree(dev_b) );
//print the result
printf("b = { %d,%d,%d,%d,%d}\n",b[0],b[1],b[2],b[3],b[4]);
return 0;
}
|
7,896 | #include<iostream>
#include<stdio.h>
#include<malloc.h>
#include<cuda.h>
using namespace std;
#define TILE_WIDTH 32
__global__
void MultiplicaMatricesCU(int* A,int filA,int colA,int* B,int filB,int colB,int* C){//filC=filA,colC=colB
//Tamaño total de los elementos con que vamos a trabajar
__shared__ float A_s[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_s[TILE_WIDTH][TILE_WIDTH];
//Para saber en qué bloque y qué hilo estamos
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int gx = gridDim.x;
int gy = gridDim.y;
//Para el resultado de C
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int suma = 0;//para llevar la suma de las multiplicaciones
int n = 0, m = 0;
while(m < gx && n < gy){
/* De A queremos sacar las columnas, por eso:
* col = ( ( m * TILE_WIDTH ) + tx )
* col = ( ( bx * TILE_WIDTH ) + tx )
* Hacemos la comparación entre ambas.
* Vemos que m se mueve entre los bloques en el eje x (las columnas)
*/
if(( ( m * TILE_WIDTH ) + tx ) < colA && row < filA) //Si no se pasa
A_s[ty][tx] = A[ (row * colA) + ( ( m * TILE_WIDTH ) + tx )];//(Row*colA + k), donde k-> 0..filB (filB = colA)
else A_s[ty][tx] = 0;
/* De B queremos sacar las filas, por eso:
* row = ( ( m * TILE_WIDTH ) + tx )
* row = ( ( by * TILE_WIDTH ) + tx )
* Hacemos la comparación entre ambas.
* Vemos que n se mueve entre los bloques en el eje y (las filas)
*/
if(( n * TILE_WIDTH + ty) < filB && col < colB)
B_s[ty][tx] = B[( ( n * TILE_WIDTH + ty) * colB ) + col ];//(k*colB)+Col, donde k-> 0..filB
else B_s[ty][tx] = 0;
m++; n++;
__syncthreads();//espera a todos los hilos
for (int k=0; k < TILE_WIDTH ; ++k) {
suma += A_s[ty][k] * B_s[k][tx];
}
__syncthreads();
}
if(row < filA && col < colB)
C[ (row * colB) + col] = suma; //C[filA][colB]
}
__host__
void multiplicaMatrices(int* X,int filX,int colX,int* Y,int filY,int colY,int* Z){
for(int i=0;i<filX;i++){
for(int j=0;j<colY;j++){
int suma=0;
for(int k=0;k<filY;k++){
suma=suma+X[(i*colX)+k]*Y[(k*colY)+j];
}
Z[(i*colY)+j]=suma;
}
}
}
__host__
void imprime(int* A,int filas, int columnas){//imprime como si fuera una matriz
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
cout<<A[(i*columnas)+j]<<" ";
}
cout<<endl;
}
}
__host__
void inicializa(int *A,int filas, int columnas){//inicializa arreglos
for(int i=0;i<filas*columnas;i++){
A[i]=1;
}
}
__host__
bool compara(int *A, int *B, int filas, int columnas){
for(int i = 0; i < filas; i++){
for(int j = 0; j < columnas; j++){
if(A[i*columnas+j] != B[i*columnas+j]) return false;
}
}
return true;
}
int main(void){
clock_t startCPU,endCPU,startGPU,endGPU;
cudaError_t error = cudaSuccess;
int *A,*B,*C; //A[filA][colA],B[filB][colB],C[filA][colB]
int *d_A,*d_B,*d_C,*h_C;
//int filA=2048,colA=2048,filB=2048,colB=2048;
int filA=1024,colA=1024,filB=1024,colB=1024;
//-------------------------------CPU--------------------------------------------------------------------
A=(int*)malloc(filA*colA*sizeof(int));
B=(int*)malloc(filB*colB*sizeof(int));
C=(int*)malloc(filA*colB*sizeof(int));
inicializa(A,filA,colA);
inicializa(B,filB,colB);
if(colA==filB){//para que sean multiplicables
startCPU = clock();
multiplicaMatrices(A,filA,colA,B,filB,colB,C);
endCPU = clock();
//imprime(C,filA,colB);
}else{
cout<<"Error, no se pueden multiplicar"<<endl;
return 0;
}
double time_CPU=((double)(endCPU-startCPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la CPU fue: "<<time_CPU<<endl;
//-------------------------------GPU--------------------------------------------------------------------
h_C=(int*)malloc(filA*colB*sizeof(int));
startGPU = clock();
error=cudaMalloc((void**)&d_A,filA*colA*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_A"<<endl;
//return -1;
}
cudaMalloc((void**)&d_B,filB*colB*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_B"<<endl;
//return -1;
}
cudaMalloc((void**)&d_C,filA*colB*sizeof(int));
if(error != cudaSuccess){
cout<<"Error reservando memoria para d_C"<<endl;
//return -1;
}
cudaMemcpy(d_A,A,filA*colA*sizeof(int),cudaMemcpyHostToDevice);//destino d_A y origen A
cudaMemcpy(d_B,B,filB*colB*sizeof(int),cudaMemcpyHostToDevice);
//Depende directamente de la dimensión de las matrices
dim3 dimblock(32,32,1);
//dim3 dimGrid(32,32,1);
dim3 dimGrid(ceil((double)(colB/32)),ceil((double)(filA/32)),1);
MultiplicaMatricesCU<<<dimGrid,dimblock>>>(d_A,filA,colA,d_B,filB,colB,d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C,d_C,filA*colB*sizeof(int),cudaMemcpyDeviceToHost);
endGPU = clock();
/*
COMENTARIO: IMPRESIONES DE PRUEBA
cout << "MATRIZ A" << endl;
imprime(A, filA,colA);
cout << endl << "MATRIZ B" << endl;
imprime(B, filB,colB);
cout << endl << "MATRIZ RESULTADO" << endl;
imprime(h_C, filA,colB);
*/
double time_GPU=((double)(endGPU-startGPU))/CLOCKS_PER_SEC;
cout<<"El tiempo transcurrido en la GPU fue: "<<time_GPU<<endl;
//-----------------------------------------------------------------------------------
cout<<"El tiempo de aceleramiento fue: "<<time_CPU/time_GPU<<endl;
if(compara(h_C, C, filA, colB)) cout << "Buen cálculo" << endl;
else cout << "Mal cálculo" << endl;
free(A);free(B);free(C);free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
7,897 | #include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <time.h>
#include <sys/time.h>
#define NN (1<<10) // number of seeds
#define MM (1<<20) // number of samples per seed
#define THREADBLOCKSIZE 1024
#define LENGTH (N*sizeof(float))
#define INDEX (blockIdx.x * blockDim.x + threadIdx.x)
#define MARK_TIME(t) gettimeofday(&t, NULL)
#define CALC_TIME(t1, t2) (1.0e6 * (t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec))/(1.0e6)
#define D2H cudaMemcpyDeviceToHost
#define H2D cudaMemcpyHostToDevice
extern __shared__ float sdat[];
/*
Calculates an estimate for pi for each thread. Uses m random numbers
generated using the globalState random number generator
*/
__device__ void d_gen(curandState *globalState, int m) {
int i = INDEX;
curandState localState = globalState[i];
int spt = m; //number of samples per thread
int tid = threadIdx.x;
int count = 0;
for (int s = 0; s < spt; s++) {
float rx = curand_uniform(&localState);
float ry = curand_uniform(&localState);
float mag = rx*rx + ry*ry;
if (mag <= 1.0f) {
count += 1;
sdat[tid] += 1.0;
}
}
globalState[i] = localState;
sdat[tid] *= 1.0/spt;
}
/*
Creates an estimate of pi for an entire block. Sums the
individual thread estimates and then divides by the number of threads
in a block.
*/
__device__ void d_count(float *sums) {
int tid = threadIdx.x;
for (int i = blockDim.x/2; i > 0; i >>= 1) {
if (tid < i)
sdat[tid] += sdat[tid + i];
__syncthreads();
}
__syncthreads();
if (tid == 0) {
sdat[0] *= 1.0/blockDim.x;
sums[blockIdx.x] = sdat[0];
}
}
/*
Actuallyt generates the estimates for pi for each block once the random
number generators are correctly set up.
*/
__global__ void generate(curandState *globalState, float *sums, int m) {
d_gen(globalState, m);
__syncthreads();
d_count(sums);
__syncthreads();
}
/*
Sets up the random number generators for the blocks. This step must be
called before the pi estimates are generated.
*/
__global__ void kernel_setup(curandState *states) {
int i = INDEX;
curand_init(0, i, 0, &states[i]);
}
/*
Actually generates the estimate of pi.
*/
int main(int argc, char *argv[]) {
int N,M;
if (argc == 3) {
N = 1 << atoi(argv[1]);
M = 1 << atoi(argv[2]);
printf("N: %d, M: %d\n",N, M);
} else {
N = NN;
M = MM;
}
printf("sizeof curandState %d\n", sizeof(curandState));
struct timeval begin, t1, t2; //, bs1, bs2;
MARK_TIME(begin);
printf("starting pi calc...\n");
dim3 block, grid;
block.x = THREADBLOCKSIZE;
grid.x = (N + THREADBLOCKSIZE - 1)/THREADBLOCKSIZE;
printf("grid.x %d\n", grid.x);
printf("block.x %d\n", block.x);
MARK_TIME(t1);
printf("mallocing on host and device\n");
float *p, *d_p;
p = (float *)malloc(grid.x*sizeof(float));
MARK_TIME(t2);
printf("it took %f seconds to allocate p...\n", CALC_TIME(t1, t2));
cudaMalloc(&d_p,grid.x*sizeof(float));
MARK_TIME(t1);
printf("it took %f seconds to allocate d_p...\n", CALC_TIME(t2, t1));
curandState *states;
cudaMalloc(&states, N*sizeof(curandState));
MARK_TIME(t2);
printf("it took %f seconds to allocate states...\n", CALC_TIME(t1, t2));
printf("running kernel_setup...");
MARK_TIME(t1);
kernel_setup<<<grid, block>>>(states);
MARK_TIME(t2);
printf("done\n");
printf("it took %f seconds to execute kernel_setup...\n", CALC_TIME(t1, t2));
printf("running generate...");
MARK_TIME(t1);
generate<<<grid, block, block.x*sizeof(float)>>>(states, d_p, M);
MARK_TIME(t2);
printf("done\n");
printf("it took %f seconds to execute generate...\n", CALC_TIME(t1, t2));
printf("starting cuda memcpy...\n");
MARK_TIME(t1);
cudaMemcpy(p, d_p, grid.x*sizeof(float), cudaMemcpyDeviceToHost);
MARK_TIME(t2);
printf("done\n");
printf("it took %f seconds to memcpy to host...\n", CALC_TIME(t1, t2));
MARK_TIME(t1);
int num_print = grid.x;
float total = 0.0;
for (int i = 0; i < num_print; i++) {
//printf("i:%d\tsum %f\n",i,p[i]);
total += p[i];
}
float pi = 4.0 * total / grid.x;
printf("pi estimate: %f\n", pi);
printf("cleaning up\n");
cudaFree(states);
cudaFree(d_p);
free(p);
MARK_TIME(t2);
printf("it took %f seconds to calc total and clean up\n", CALC_TIME(t1,t2));
printf("\nThe total execution time of this program was %f seconds\n", CALC_TIME(begin,t2));
FILE *fp = fopen("gpu_results.dat", "a");
fprintf(fp, "%d %d %.10f\n",N,M,pi);
fclose(fp);
return 0;
}
|
7,898 | /*
Author Javier Rodríguez
A01152572
*/
#include "cuda_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define N (1000000)
#define THREADS_PER_BLOCK 1000
//pi on cpu
double getPiCpu(){
long num_rects = N, i;
double mid, height, width, area;
double sum = 0.0;
width = 1.0 / (double) num_rects;
for (i = 0; i < num_rects; i++) {
mid = (i + 0.5) * width;
height = 4.0 / (1.0 + mid * mid);
sum += height;
}
area = width * sum;
return area;
}
//Pi gpu
__global__ void getPiGpu(double *a, long n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//Pi variables
double mid, width;
width = 1.0 / (long) n;
if(tid < n){
mid = (tid + 0.5) * width;
a[tid] = 4.0 / (1.0 + mid * mid);
}
}
// double piSum(double *a){
// double sum, width, pi;
// long num_rects = N;
//
// width = 1.0 / (double) num_rects;
//
// for (long i = 0; i < N; i++) {
// sum += a[i];
// }
// pi = width * sum;
// return pi;
// }
int main() {
double piCpu, piGpu;
double sum, width;
double a[N];
double *d_a;
double size = N * sizeof(double);
d_a=(double*)malloc(size);
cudaMalloc((void**)&d_a, size);
//time on gpu
clock_t timeOnGpu = clock();
//kernel call
getPiGpu<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(d_a, N);
//devicetohost recuperar array de heights
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
cudaFree(d_a);
// piGpu = piSum(a[N]);
width = 1.0 / (double) N;
for (long i = 0; i < N; i++) {
sum += a[i];
}
piGpu = width * sum;
printf("%f\n", piGpu);
printf("time on GPU %f \n", ((double)clock() - timeOnGpu)/CLOCKS_PER_SEC);
//Get pi cpu and print
clock_t timeOnCpu = clock();
piCpu = getPiCpu();
printf("%lf\n", piCpu);
printf("time on CPU %f \n", ((double)clock() - timeOnCpu)/CLOCKS_PER_SEC);
return 0;
}
|
7,899 | //Program to cube a bunch of numbers from an array
#include<iostream>
#include<cuda.h>
using namespace std;
__global__ void cube(float *d_out, float* d_in)
{
int idx = threadIdx.x;
float f = d_in[idx] ;
//The d_ implies that the array sits on the device
d_out[idx] = f*f*f;
}//end of kernel 'cube'
int main(int argc, char** argv)
{
const int ARRAY_SIZE = 96;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
//generate input array on the host
float h_in[ARRAY_SIZE];
//the h_ implies that the array sits on the host
//Build the test array - the elements of which are going to be cubed
for(int i=0; i< ARRAY_SIZE; i++)
{
h_in[i] = float(i);
}//end of for
float h_out[ARRAY_SIZE];
//declare GPU memory pointers
float *d_in;
float *d_out;
//allocate memory for the two arrays on the device
cudaMalloc((void**)&d_in,ARRAY_BYTES);
cudaMalloc((void**)&d_out,ARRAY_BYTES);
//transfer the array to the GPU
// destination,source,size,method
cudaMemcpy(d_in,h_in,ARRAY_BYTES,cudaMemcpyHostToDevice);
//launch the kernel
cube<<<1,ARRAY_SIZE>>>(d_out,d_in);
//Kernelname<<<number_of_blocks,number_of_threads_per_block>>(parameters to kernel);
//copy the results back onto the device
cudaMemcpy(h_out,d_out,ARRAY_BYTES,cudaMemcpyDeviceToHost);
//print out the results
for(int i=0;i < ARRAY_SIZE; i++)
{
cout<<i<<":"<<h_out[i]<<endl;
}//end of for
cudaFree(d_in);
cudaFree(d_out);
}//end of main
|
7,900 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
// Matrix multiplication: AxB=C
//CUDA kernel. Each thread takes care of one cell of C matrix
__global__ void matmul(double *a, double *b, double *c, int n)
{
// Get global thread ID
int Col = blockIdx.x*blockDim.x+threadIdx.x;
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Not out of bounds
if((Col<n) && (Row<n)) {// Mutliply matrices
// printf("Hello thread %d\n", threadIdx.x);
// c[Row*n + Col] = 0;
double sum = 0.0;
for(int k=0;k<n;k++) {
// c[Row*n + Col] += a[Row*n+k]*b[k*n+Col];
sum += a[Row*n+k]*b[k*n+Col];
}
c[Row*n + Col] = sum;
}
}
extern "C" void matmul_wrapper()
{
// Size of matrix
int n = 1000;
// int n = 3;
// Host input matrices
double *h_a;
double *h_b;
// Host output matrices
double *h_c;
// Device input matrices
double *d_a;
double *d_b;
// Device output matrices
double *d_c;
//Size, in bytes, of each array
size_t bytes = n*n*sizeof(double);
// Allocate memory for each matrix on host
h_a = (double*)malloc(bytes);
h_b = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each matrix on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_c, bytes);
printf(" C Memory allocated \n");
// Initialize vectors on host
int i;
int j;
for(i=0;i<n;i++) {
for(j=0;j<n;j++) {
h_a[i*n+j] = (i+1)*(j+1);//sinf(i)*sinf(j);
h_b[i*n+j] = i-j;//cosf(i)*cosf(j);
}
}
printf(" C Arrays initialized \n");
// Copy host matrices to device
cudaMemcpy(d_a,h_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b, bytes, cudaMemcpyHostToDevice);
printf(" C Data sent to GPU \n");
int blockSize, gridSize;
// Number of threads in each thread block
// blockSize = 1024;
blockSize = 32;
// Number of thread blocks in grid
gridSize = (int)ceil((double)n/blockSize);
dim3 dimBlock(blockSize,blockSize);
dim3 dimGrid(gridSize,gridSize);
printf("%d\n", gridSize);
printf("%d\n", blockSize);
// Execute the kernel
matmul<<<dimGrid, dimBlock>>>(d_a,d_b,d_c, n);
printf(" C Kernel executed \n");
// Copy array back to host
cudaMemcpy(h_c, d_c, bytes, cudaMemcpyDeviceToHost);
// h_c[0] = 0.5;
// h_c[1] = 1.5;
// h_c[3] = 3.5;
// CHECK RESULTS for 3x3 MATRIX
// printf("%f %f %f\n",h_a[0],h_a[1],h_a[2]);
// printf("%f %f %f\n",h_a[3],h_a[4],h_a[5]);
// printf("%f %f %f\n",h_a[6],h_a[7],h_a[8]);
// printf("\n");
// printf("%f %f %f\n",h_b[0],h_b[1],h_b[2]);
// printf("%f %f %f\n",h_b[3],h_b[4],h_b[5]);
// printf("%f %f %f\n",h_b[6],h_b[7],h_b[8]);
// printf("\n");
// printf("%f %f %f\n",h_c[0],h_c[1],h_c[2]);
// printf("%f %f %f\n",h_c[3],h_c[4],h_c[5]);
// printf("%f %f %f\n",h_c[6],h_c[7],h_c[8]);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
printf(" C =============== \n");
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.