serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
10,001 | /*
******************************************************
This file is the OpenACC multi-GPU version of 2D Heat Equation
using OpenMP+OpenACC hybrid model. This implementation is based
on the CPU version from
http://www.many-core.group.cam.ac.uk/archive/CUDAcourse09/
Permission to use, copy, distribute and modify this software for any
purpose with or without fee is hereby granted. This software is
provided "as is" without express or implied warranty.
Send comments or suggestions for this OpenACC version to
rxu6@uh.edu, schandra@udel.edu
Authors: Rengan Xu, Sunita Chandrasekaran
May 26th, 2016
******************************************************
*/
extern "C" __global__ void step_kernel(int ni,
int nj,
double tfac,
double *temp_in,
double *temp_out)
{
int i, j, i00, im10, ip10, i0m1, i0p1;
double d2tdx2, d2tdy2;
j = blockIdx.y + 1;
while(j < nj-1)
{
i = threadIdx.x + blockIdx.x*blockDim.x + 1;
while(i < ni-1)
{
i00 = i + ni*j;
im10 = i-1 + ni*j;
ip10 = i+1 + ni*j;
i0m1 = i + ni*(j-1);
i0p1 = i + ni*(j+1);
d2tdx2 = temp_in[im10] - 2*temp_in[i00] + temp_in[ip10];
d2tdy2 = temp_in[i0m1] - 2*temp_in[i00] + temp_in[i0p1];
temp_out[i00] = temp_in[i00] + tfac*(d2tdx2 + d2tdy2);
i += blockDim.x*gridDim.x;
}
j += gridDim.y;
}
}
|
10,002 | /*
number of mathematical operations (only floating point)
operation flo/o total
+-* : 37 1 37
/ : 3 4 12
sin : 1 8 8
cos : 1 8 8
pow : 8 13 104
sum 169
*/
#define GN_L2P_KERNEL_CORE \
xiic=veci[6*tx+0]-xic;\
yiic=veci[6*tx+1]-yic;\
ziic=veci[6*tx+2]-zic;\
r=sqrtf(xiic*xiic+yiic*yiic+ziic*ziic)+eps;\
th=acosf(ziic/r);\
if(abs(xiic)+abs(yiic)<eps){\
ph=0;\
}\
else if(abs(xiic)<eps){\
ph=yiic/abs(yiic)*M_PI*0.5;\
}\
else if(xiic>0){\
ph=atanf(yiic/xiic);\
}\
else{\
ph=atanf(yiic/xiic)+M_PI;\
}\
gr=0;\
gth=0;\
gph=0;\
xx=__cosf(th);\
yy=__sinf(th);\
if(fabs(yy)<eps) yy=1/eps;\
s2=sqrtf((1-xx)*(1+xx));\
fact=1;\
pn=1;\
for(m=0;m<mp;m++){\
p=pn;\
nm=m*m+2*m;\
nms=m*(m+1)/2+m;\
ere=__cosf(m*ph);\
if(m==0) ere=0.5;\
eim=__sinf(m*ph);\
bnm=vecl[nm]*p;\
p1=p;\
p=xx*(2*m+1)*p;\
bth=vecl[nm]*(p-(m+1)*xx*p1)/yy;\
rrre=m*powf(r,m-1)*bnm*ere;\
rthre=powf(r,m)*bth*ere;\
rphre=-m*powf(r,m)*bnm*eim;\
rrim=m*powf(r,m-1)*bnm*eim;\
rthim=powf(r,m)*bth*eim;\
rphim=m*powf(r,m)*bnm*ere;\
gr+=2*(rrre*vecj[2*nms+0]-rrim*vecj[2*nms+1]);\
gth+=2*(rthre*vecj[2*nms+0]-rthim*vecj[2*nms+1]);\
gph+=2*(rphre*vecj[2*nms+0]-rphim*vecj[2*nms+1]);\
for(n=m+1;n<mp;n++){\
nm=n*n+n+m;\
nms=n*(n+1)/2+m;\
bnm=vecl[nm]*p;\
p2=p1;\
p1=p;\
p=(xx*(2*n+1)*p1-(n+m)*p2)/(n-m+1);\
bth=vecl[nm]*((n-m+1)*p-(n+1)*xx*p1)/yy;\
rrre=n*powf(r,n-1)*bnm*ere;\
rthre=powf(r,n)*bth*ere;\
rphre=-m*powf(r,n)*bnm*eim;\
rrim=n*powf(r,n-1)*bnm*eim;\
rthim=powf(r,n)*bth*eim;\
rphim=m*powf(r,n)*bnm*ere;\
gr+=2*(rrre*vecj[2*nms+0]-rrim*vecj[2*nms+1]);\
gth+=2*(rthre*vecj[2*nms+0]-rthim*vecj[2*nms+1]);\
gph+=2*(rphre*vecj[2*nms+0]-rphim*vecj[2*nms+1]);\
}\
pn=-pn*fact*s2;\
fact=fact+2;\
}\
gx=__sinf(th)*__cosf(ph)*gr+__cosf(th)*__cosf(ph)/r*gth-\
__sinf(ph)/r/yy*gph;\
gy=__sinf(th)*__sinf(ph)*gr+__cosf(th)*__sinf(ph)/r*gth+\
__cosf(ph)/r/yy*gph;\
gz=__cosf(th)*gr-__sinf(th)/r*gth;\
veck[tx]-=0.25/M_PI*(gx*veci[6*tx+3]+gy*veci[6*tx+4]+gz*veci[6*tx+5]);
|
10,003 | #include "includes.h"
__global__ void quad(float *a, int n, float *u, float *v)
{
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row < n && col < n && col >= row) {
float sum = u[col]*a[row*n+col]*u[row];
if (col == row)
atomicAdd(v, sum);
else
atomicAdd(v, 2*sum);
}
} |
10,004 | #include <iostream>
#include <iomanip>
#include <vector>
#include <string>
#include <fstream>
using namespace std;
void Linspace(double*, double, double, int);
void Uniform(double*, double, int);
__global__ void RungeKuttaStepOriginal(double*, double*, int);
__device__ void RightHandSide(double&, double, double);
template <class DataType>
DataType* AllocateHostMemory(int);
template <class DataType>
DataType* AllocateDeviceMemory(int);
int main()
{
// INITIAL SETUP ----------------------------------------------------------------------------------
int NumberOfProblems = 61440; // 92160
int BlockSize = 128;
cudaSetDevice(1);
double* h_State = AllocateHostMemory<double>(NumberOfProblems);
double* h_Parameters = AllocateHostMemory<double>(NumberOfProblems);
double* d_State = AllocateDeviceMemory<double>(NumberOfProblems);
double* d_Parameters = AllocateDeviceMemory<double>(NumberOfProblems);
Linspace(h_Parameters, 0.1, 1.0, NumberOfProblems);
Uniform(h_State, -0.5, NumberOfProblems);
cudaMemcpy(d_State, h_State, sizeof(double)*NumberOfProblems, cudaMemcpyHostToDevice);
cudaMemcpy(d_Parameters, h_Parameters, sizeof(double)*NumberOfProblems, cudaMemcpyHostToDevice);
int GridSize = NumberOfProblems/BlockSize + (NumberOfProblems % BlockSize == 0 ? 0:1);
clock_t SimulationStart;
clock_t SimulationEnd;
SimulationStart = clock();
RungeKuttaStepOriginal<<<GridSize, BlockSize>>> (d_State, d_Parameters, NumberOfProblems);
cudaDeviceSynchronize();
SimulationEnd = clock();
cout << "Simulation time: " << 1000.0*(SimulationEnd-SimulationStart) / CLOCKS_PER_SEC << "ms" << endl << endl;
cout << "Simulation time / 1000 RK4 step: " << 1000.0*(SimulationEnd-SimulationStart) / CLOCKS_PER_SEC << "ms" << endl;
cout << "Ensemble size: " << NumberOfProblems << endl << endl;
cudaMemcpy(h_State, d_State, sizeof(double)*NumberOfProblems, cudaMemcpyDeviceToHost);
//for (int i=0; i<NumberOfProblems; i++)
// cout << "P: " << h_Parameters[i] << " Sates: " << h_State[i] << endl;
}
// AUXILIARY FUNCTION -----------------------------------------------------------------------------
void Linspace(double* x, double B, double E, int N)
{
double Increment;
x[0] = B;
if ( N>1 )
{
x[N-1] = E;
Increment = (E-B)/(N-1);
for (int i=1; i<N-1; i++)
{
x[i] = B + i*Increment;
}
}
}
void Uniform(double* x, double V, int N)
{
for (int i=0; i<N; i++)
{
x[i] = V;
}
}
__forceinline__ __device__ void RightHandSide(double& F, double X, double P)
{
F = X*X - P; // 1 FMA
}
__global__ void RungeKuttaStepOriginal(double* d_State, double* d_Parameters, int N)
{
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if (tid < N)
{
double X;
double P;
double k1;
double k2;
double k3;
double k4;
double x;
double dT = 0.01;
double dTp2 = 0.5*dT;
double dTp6 = dT * (1.0/6.0);
X = d_State[tid];
P = d_Parameters[tid];
for (int i=0; i<1000; i++)
{
// k1
RightHandSide(k1, X, P);
x = X + dTp2*k1;
RightHandSide(k2, x, P);
x = X + dTp2*k2;
RightHandSide(k3, x, P);
x = X + dT*k3;
RightHandSide(k4, x, P);
X = X + dTp6*( k1 + 2*k2 + 2*k3 + k4 );
}
d_State[tid] = X;
}
}
template <class DataType>
DataType* AllocateHostMemory(int N)
{
DataType* HostMemory = new (std::nothrow) DataType [N];
if (HostMemory == NULL)
{
std::cerr << "Failed to allocate Memory on the HOST!\n";
exit(EXIT_FAILURE);
}
return HostMemory;
}
template <class DataType>
DataType* AllocateDeviceMemory(int N)
{
cudaError_t Error = cudaSuccess;
DataType* MemoryAddressInDevice = NULL;
Error = cudaMalloc((void**)&MemoryAddressInDevice, N * sizeof(DataType));
if (Error != cudaSuccess)
{
std::cerr << "Failed to allocate Memory on the DEVICE!\n";
exit(EXIT_FAILURE);
}
return MemoryAddressInDevice;
} |
10,005 | #include "includes.h"
__global__ void GPUKernel_Iqdb(int a,int v,int nQ,double * in,double * out) {
int blockid = blockIdx.x*gridDim.y + blockIdx.y;
int id = blockid*blockDim.x + threadIdx.x;
if ( id >= v*v*nQ ) return;
int q = id%nQ;
int d = (id-q)%(nQ*v)/nQ;
int b = (id-q-d*nQ)/(nQ*v);
if ( b < a ) return;
int id2 = (b-a)*nQ*v+d*nQ+q;
out[id2] = in[id];
} |
10,006 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#define BLOCK_SIZE 32
#define WA 64
#define HA 64
#define HC 3
#define WC 3
#define WB (WA - WC + 1)
#define HB (HA - HC + 1)
#define CHANNEL_SIZE 3
__global__ void conv_flat(float* A, float* B, float* C)
{
int col = blockIdx.x * (BLOCK_SIZE - WC + 1) + threadIdx.x;
int row = blockIdx.y * (BLOCK_SIZE - WC + 1) + threadIdx.y;
int row_i = row - WC + 1;
int col_i = col - WC + 1;
float tmp = 0;
__shared__ float shm[BLOCK_SIZE][BLOCK_SIZE][CHANNEL_SIZE];
if (row_i < WA && row_i >= 0 && col_i < WA && col_i >= 0)
{
shm[threadIdx.y][threadIdx.x][blockIdx.z] = A[(col_i * WA + row_i)*CHANNEL_SIZE +blockIdx.z];
}
else
{
shm[threadIdx.y][threadIdx.x][blockIdx.z] = 0;
}
__syncthreads();
if (threadIdx.y < (BLOCK_SIZE - WC + 1) && threadIdx.x < (BLOCK_SIZE - WC + 1) && row < (WB - WC + 1) && col < (WB - WC + 1))
{
for (int i = 0; i< WC;i++)
{
for (int j = 0;j<WC;j++)
{
tmp += shm[threadIdx.y + i][threadIdx.x + j][blockIdx.z] * C[(j*WC + i)*CHANNEL_SIZE+blockIdx.z];
}
}
B[(col*WB + row)*CHANNEL_SIZE + blockIdx.z] = tmp;
}
}
__global__ void hadamad(float* channel_expanded,float* out_val)
{
out_val[blockIdx.x * WB +blockIdx.y] += channel_expanded[(blockIdx.x * WB +blockIdx.y) *CHANNEL_SIZE + threadIdx.x];
__syncthreads();
}
void Convolution(float* A, float* B, float* C)
{
float *channel_expanded;
cudaMalloc( &channel_expanded, WB*HB*CHANNEL_SIZE*sizeof(float) );
dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid( (WB - 1) / (BLOCK_SIZE - WC + 1), (WB - 1) / (BLOCK_SIZE - WC + 1),CHANNEL_SIZE);
conv_flat <<<grid,threads>>>(A, channel_expanded,C);
hadamad <<<grid,CHANNEL_SIZE>>>(channel_expanded, B);
}
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
__host__ int main(void)
{
float h_a[64][64][3] ={1.0,1.0,1.0};
float h_b[3] [3] [3] ={ 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,
1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,
1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
float h_c[62][62] ={0.0};
float *da;
float *db;
float *dc;
cudaMalloc((void***)&da,sizeof(h_a));
cudaMalloc((void***)&db,sizeof(h_b));
cudaMalloc((void***)&dc,sizeof(h_c));
cudaMemcpy(da,h_a,sizeof(h_a),cudaMemcpyHostToDevice);
cudaMemcpy(db,h_b,sizeof(h_b),cudaMemcpyHostToDevice);
// dim3 threads(BLOCK_SIZE, BLOCK_SIZE);
// dim3 grid((WB - 1) / (BLOCK_SIZE - WC + 1), (WB - 1) / (BLOCK_SIZE - WC + 1),CHANNEL_SIZE);
Convolution(da,dc,db);
cudaMemcpy(h_c,dc,sizeof(h_c),cudaMemcpyDeviceToHost);
for(int j =0; j < 62;j ++)
{
for(int k =0; k < 62;k ++)
{
printf("%.0f ",h_c[k][j]);
}
printf("\n");
}
printf("\n");
} |
10,007 | #include <stdio.h>
#include <assert.h>
#include <cuda_runtime.h>
int XDIM;
int YDIM;
/*
* NXN Matrix Multiplication
*/
__global__ void
matMult(const int *A, const int *B, int *C, int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
for(int j = 0; j < numElements; j++){
int idx = (i*numElements)+j;
for(int k = 0; k < numElements; k++){
C[idx] += A[(i*numElements)+k] * B[(k*numElements)+j];
}
}
//printf("---→test: %d", i);
}
}
void printMat(int * M, int XDIM){
int i;
int j;
for(i = 0; i < XDIM; i++){
for(j = 0; j < XDIM; j++){
printf(" %d ", M[i*XDIM+j]);
}
printf("\n");
}
}
int main(int argc, char **argv){
XDIM = 40;
YDIM = 40;
if(argc > 1){
XDIM = atoi(argv[1]);
YDIM = XDIM;
}
//STEP 1 : Allocate in host
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Print the mat size to be used, and compute its size
size_t size = sizeof(int*)*YDIM*XDIM;
//printf("[Mat multiplication of %d elements]\n", YDIM);
// Allocate the host input vector A
int * h_A = (int *)malloc(size);
// Allocate the host input vector B
int * h_B = (int *)malloc(size);
// Allocate the host output vector C
int * h_C = (int *)malloc(size);
// Initialize h_A and h_B with random numbers, h_C with 0's
for(int i = 0; i < XDIM*XDIM; i++){
h_A[i] = rand() & 0xF;
h_B[i] = rand() & 0xF;
h_C[i] = 0;
}
// Verify that allocations succeeded
if (h_A == NULL || h_B == NULL || h_C == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
//STEP 2: ALLOCATE IN CUDA
// Allocate device memory
int *d_A = NULL;
int *d_B = NULL;
int *d_C = NULL;
cudaError_t error;
error = cudaMalloc((void **) &d_A, size);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_B, size);
if (error != cudaSuccess)
{
printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_C, size);
if (error != cudaSuccess)
{
printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// copy host memory to device
error = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMemcpy(d_C, h_C, size, cudaMemcpyHostToDevice);
if (error != cudaSuccess)
{
printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
// Launch the Mat mult CUDA Kernel
int threadsPerBlock = 1024;
int blocksPerGrid =(XDIM + threadsPerBlock - 1) / threadsPerBlock;
//printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
matMult<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, XDIM);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the device result vector in device memory to the host result vector
// in host memory.
//printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector C from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector B (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free device vector C (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
//printMat(h_A,XDIM);
//printMat(h_B,XDIM);
//printMat(h_C,XDIM);
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//printf("Done\n");
return 0;
}
|
10,008 | /*
David Ebert
Homework 6 - GPU Dot Product
HW 6 Assignment:
-Dot product on GPU
-Add as much up on the GPU as possible
-Final addition is alright on the CPU
Output for N = 5000:
Vector A is 0.5 repeated 5000 times.
Vector B is 2.0 repeated 5000 times.
The dot product of A and B should be equal to N = 5000.
Output for N = 5000 (Seems to work up to at least 50 000):
Time in milliseconds= 0.318000000000000
A[0] = 2.00000 B[0] = 0.50000 C[0] = 1024.00000
A[1024] = 2.00000 B[1024] = 0.50000 C[1024] = 1024.00000
A[2048] = 2.00000 B[2048] = 0.50000 C[2048] = 1024.00000
A[3072] = 2.00000 B[3072] = 0.50000 C[3072] = 1024.00000
A[4096] = 2.00000 B[4096] = 0.50000 C[4096] = 904.00000
A and B are vectors of length 5000
The number of threads per block is 1024 (a power of 2)
The number of blocks is 5
The total number of threads is 5120
Finally, the dot product of A and B is... 5000 <-- This is correct.
*/
// To compile and run: nvcc EbertHW6.cu -O3 -o temp -lcudart -run
// To run: ./temp
#include <sys/time.h>
#include <stdio.h>
//Length of vectors to be added.
#define N 5000 // Length of A and B vectors. Works for values up to 10000
#define numThreads 1024 // This is really the number of threads per block. Should be power of 2.
// For some reason this isn't working with values greater than 64 if N is big.
// I think it's a sync thread problem.
#define M ((N+numThreads-1)/numThreads)*numThreads // M is the total number of threds, just larger than N
float *A_CPU, *B_CPU, *C_CPU; //CPU pointers
float *A_GPU, *B_GPU, *C_GPU; //GPU pointers
dim3 dimBlock; //This variable will hold the Dimensions of your block
void AllocateMemory()
{
//Allocate Device (GPU) Memory, & allocates the value of the specific pointer/array
cudaMalloc(&A_GPU,M*sizeof(float));
cudaMalloc(&B_GPU,M*sizeof(float));
cudaMalloc(&C_GPU,M*sizeof(float));
//Allocate Host (CPU) Memory
A_CPU = (float*)calloc(M,sizeof(float));
B_CPU = (float*)calloc(M,sizeof(float));
C_CPU = (float*)calloc(M,sizeof(float));
}
//Loads values into vectors that we will add.
void Innitialize()
{
int i;
for(i = 0; i < N; i++)
{
A_CPU[i] = 2.0;
B_CPU[i] = 0.5; // dot product should be N
//_CPU[i] = (float)i;
//B_CPU[i] = (float)i;
}
}
//Cleaning up memory after we are finished.
void CleanUp(float *A_CPU,float *B_CPU,float *C_CPU,float *A_GPU,float *B_GPU,float *C_GPU) //free
{
free(A_CPU); free(B_CPU); free(C_CPU);
cudaFree(A_GPU); cudaFree(B_GPU); cudaFree(C_GPU);
}
//This is the kernel. It is the function that will run on the GPU.
//It adds vectors A and B then stores result in vector C
__global__ void Dot(float *A, float *B, float *C)
{
int thread_id; //threadIdx.x + blockIdx.x * blockDim.x;
for(thread_id = threadIdx.x + blockIdx.x * blockDim.x; thread_id<N; thread_id += (blockDim.x * gridDim.x)){
C[thread_id] = A[thread_id]*B[thread_id];
}
__syncthreads();
//int theThread = threadIdx.x;
int i = numThreads/2;
while(i!=0){
for(thread_id = threadIdx.x + blockIdx.x * blockDim.x; thread_id<N; thread_id += (blockDim.x*gridDim.x)){
if(threadIdx.x <i){
C[thread_id] += C[thread_id+i];
}
__syncthreads();
}
i/=2;
}
}
int main()
{
//printf("Hello World \n");
// print number of threads (should be multiple of numThreads)
int i;
timeval start, end;
//cudaError_t err; // Not sure what this is. Turning it off.
//Set the thread structure that you will be using on the GPU
//SetUpCudaDevices(); // Not sure what this function is. Turning it off.
//Partitioning off the memory that you will be using.
AllocateMemory();
//Loading up values to be added.
Innitialize();
//Starting the timer
gettimeofday(&start, NULL);
//Copy Memory from CPU to GPU
cudaMemcpyAsync(A_GPU, A_CPU, M*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyAsync(B_GPU, B_CPU, M*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyAsync(C_GPU, C_CPU, M*sizeof(float), cudaMemcpyHostToDevice);
//Calling the Kernel (GPU) function.
Dot<<<M,numThreads>>>(A_GPU, B_GPU, C_GPU);
//Copy Memory from GPU to CPU
cudaMemcpyAsync(C_CPU, C_GPU, M*sizeof(float), cudaMemcpyDeviceToHost);
//Stopping the timer
gettimeofday(&end, NULL);
//Calculating the total time used in the addition and converting it to milliseconds.
float time = (end.tv_sec * 1000000 + end.tv_usec) - (start.tv_sec * 1000000 + start.tv_usec);
//Displaying the time
printf("Time in milliseconds= %.15f\n", (time/1000.0));
// Displaying vector info you will want to comment out the vector print line when your
//vector becomes big. This is just to make sure everything is running correctly.
for(i = 0; i < M; i++)
{
//printf("A[%d] = %.5f B[%d] = %.5f C[%d] = %.5f\n", i, A_CPU[i], i, B_CPU[i], i, C_CPU[i]);
}
// calculate and print final sum
int sum;
int j;
for(j=0;j<N;j=j+numThreads){
sum+=C_CPU[j];
printf("A[%d] = %.5f B[%d] = %.5f C[%d] = %.5f\n", j, A_CPU[j], j, B_CPU[j], j, C_CPU[j]);
}
printf("A and B are vectors of length %d\n", N);
printf("The number of threads per block is %d (a power of 2) ", numThreads);
printf("\nThe number of blocks is %d\n", M/numThreads);
printf("The total number of threads is %d\n", M);
printf("Finally, the dot product of A and B is... %d\n", sum);
//You're done so cleanup your mess.
CleanUp(A_CPU,B_CPU,C_CPU,A_GPU,B_GPU,C_GPU);
return(0);
}
|
10,009 | #include "includes.h"
#define uint unsigned int
#ifdef __INTELLISENSE__
void __syncthreads();
#endif // __INTELLISENSE__
void InitCPUData(double** matrices, int size);
void FillHostMatrix(double** matrices, int size);
cudaError_t InitGPUData(double** matrices, int **dSize, int size, int **dStride, int stride);
cudaError_t CudaMemcpyMatrix(double** matrices, int size, cudaMemcpyKind flag);
void ForwardElimination(double* matrix, int size);
void BackwardSubstitute(double* matrix, int size);
__global__ void ForwardEliminationColumn(double* matrix, int* size, int* row, int* stride, int* pivotRow)
{
int _size = *size;
int _row = *row;
int _stride = *stride;
int _pivotRow = *pivotRow;
int startColumn = (blockIdx.x * blockDim.x + threadIdx.x) * _stride;
double pivot = (double)matrix[_pivotRow * (_size + 1) + _pivotRow];
double belowPivot = (double)matrix[_row * (_size + 1) + _pivotRow];
double ratio = belowPivot / pivot;
for (int i = 0; i < _stride; ++i)
{
if (startColumn + i < (_size + 1))
{
matrix[_row * (_size + 1) + startColumn + i] -= (ratio * matrix[_pivotRow * (_size + 1) + startColumn + i]);
__syncthreads();
}
}
} |
10,010 | #include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
//#include <glm/glm.hpp>
#include <stdio.h>
#include <iostream>
using namespace std;
__global__ void kernel1(int *a,int *b,int *c) {
int tid=threadIdx.x + blockIdx.x*blockDim.x;
if(tid<32)
c[tid] = a[tid]+b[tid];
}
extern "C"
int cuda_main()
{
int *h_a,*d_a,*h_b,*d_b,*h_c,*d_c;
h_a=new int[32];
h_b=new int[32];
h_c=new int[32];
for(int i=0;i<32;i++) {
h_a[i] = i;
h_b[i] = i*2;
}
cudaMalloc((void**)&d_a,sizeof(int)*32);
cudaMalloc((void**)&d_b,sizeof(int)*32);
cudaMalloc((void**)&d_c,sizeof(int)*32);
cudaMemcpy(d_a,h_a,32*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,32*sizeof(int),cudaMemcpyHostToDevice);
dim3 blocks(1);
dim3 threads(32);
kernel1 <<< blocks,threads >>>(d_a,d_b,d_c);
cudaMemcpy(h_c,d_c,32*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<32;i++)
cout << h_a[i] << '+' << h_b[i] << '=' << h_c[i] << endl;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete[] h_a;
delete[] h_b;
delete[] h_c;
return 0;
}
|
10,011 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void __launch_bounds__(256,1) curvi_1 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3) {
for (int i=2; i<=N-3; i++) {
double _t_58_ = 2.0 * mu[i+2][j][k];
_t_58_ += la[i+2][j][k];
double _t_55_ = met1[i+2][j][k] * _t_58_ * met2[i+2][j][k];
double _v_26_ = c2 * u1[i+2][j][k+2];
double _v_0_ = c2 * u1[i][j+2][k+2];
_v_0_ -= c2 * u1[i][j-2][k+2];
double _v_3_ = c2 * u2[i][j+2][k+2];
_v_3_ -= c2 * u2[i][j-2][k+2];
double _v_6_ = c2 * u1[i][j+2][k-2];
_v_6_ -= c2 * u1[i][j-2][k-2];
double _v_9_ = c2 * u2[i][j+2][k-2];
_v_9_ -= c2 * u2[i][j-2][k-2];
double _v_13_ = c2 * u1[i][j+2][k+1];
_v_13_ -= c2 * u1[i][j-2][k+1];
double _v_16_ = c2 * u2[i][j+2][k+1];
_v_16_ -= c2 * u2[i][j-2][k+1];
double _v_19_ = c2 * u1[i][j+2][k-1];
_v_19_ -= c2 * u1[i][j-2][k-1];
double _v_22_ = c2 * u2[i][j+2][k-1];
_v_22_ -= c2 * u2[i][j-2][k-1];
double _v_64_ = c2 * u1[i+2][j][k+2];
_v_26_ -= c2 * u1[i+2][j][k-2];
double _v_73_ = c2 * u1[i+2][j][k-2];
double _t_56_ = _v_26_;
double _v_27_ = c1 * u1[i+2][j][k+1];
double _v_1_ = c1 * u1[i][j+1][k+2];
_v_1_ -= c1 * u1[i][j-1][k+2];
double _v_4_ = c1 * u2[i][j+1][k+2];
_v_4_ -= c1 * u2[i][j-1][k+2];
double _v_7_ = c1 * u1[i][j+1][k-2];
_v_7_ -= c1 * u1[i][j-1][k-2];
double _v_10_ = c1 * u2[i][j+1][k-2];
_v_10_ -= c1 * u2[i][j-1][k-2];
double _v_14_ = c1 * u1[i][j+1][k+1];
_v_14_ -= c1 * u1[i][j-1][k+1];
double _v_17_ = c1 * u2[i][j+1][k+1];
_v_17_ -= c1 * u2[i][j-1][k+1];
double _v_20_ = c1 * u1[i][j+1][k-1];
_v_20_ -= c1 * u1[i][j-1][k-1];
double _v_23_ = c1 * u2[i][j+1][k-1];
_v_23_ -= c1 * u2[i][j-1][k-1];
_v_27_ -= c1 * u1[i+2][j][k-1];
_t_56_ += _v_27_;
double _v_28_ = strx[i] * _t_55_ * _t_56_;
double _v_83_ = c2 * u1[i+2][j][k+1];
double _v_92_ = c2 * u1[i+2][j][k-1];
double _v_44_ = c2 * _v_28_;
double _v_29_ = c2 * u2[i+2][j][k+2];
double _v_67_ = c2 * u2[i+2][j][k+2];
_v_29_ -= c2 * u2[i+2][j][k-2];
double _v_76_ = c2 * u2[i+2][j][k-2];
double _t_63_ = _v_29_;
double _v_30_ = c1 * u2[i+2][j][k+1];
_v_30_ -= c1 * u2[i+2][j][k-1];
_t_63_ += _v_30_;
double _t_62_ = met1[i+2][j][k] * la[i+2][j][k] * met3[i+2][j][k];
double _v_31_ = stry[j] * _t_62_ * _t_63_;
double _v_86_ = c2 * u2[i+2][j][k+1];
double _v_95_ = c2 * u2[i+2][j][k-1];
_v_44_ += c2 * _v_31_;
double _t_67_ = met1[i+2][j][k] * la[i+2][j][k] * met4[i+2][j][k];
double _v_32_ = c2 * u3[i+2][j][k+2];
double _v_70_ = c2 * u3[i+2][j][k+2];
_v_32_ -= c2 * u3[i+2][j][k-2];
double _v_79_ = c2 * u3[i+2][j][k-2];
double _t_68_ = _v_32_;
double _v_33_ = c1 * u3[i+2][j][k+1];
_v_33_ -= c1 * u3[i+2][j][k-1];
_t_68_ += _v_33_;
double _v_34_ = _t_67_ * _t_68_;
double _v_89_ = c2 * u3[i+2][j][k+1];
double _v_98_ = c2 * u3[i+2][j][k-1];
_v_44_ += c2 * _v_34_;
double _t_76_ = 2.0 * mu[i-2][j][k];
_t_76_ += la[i-2][j][k];
double _t_73_ = met1[i-2][j][k] * _t_76_ * met2[i-2][j][k];
double _v_35_ = c2 * u1[i-2][j][k+2];
_v_64_ -= c2 * u1[i-2][j][k+2];
_v_35_ -= c2 * u1[i-2][j][k-2];
_v_73_ -= c2 * u1[i-2][j][k-2];
double _t_74_ = _v_35_;
double _v_36_ = c1 * u1[i-2][j][k+1];
_v_36_ -= c1 * u1[i-2][j][k-1];
_t_74_ += _v_36_;
double _v_37_ = strx[i] * _t_73_ * _t_74_;
_v_83_ -= c2 * u1[i-2][j][k+1];
_v_92_ -= c2 * u1[i-2][j][k-1];
_v_44_ += c2 * _v_37_;
double _v_38_ = c2 * u2[i-2][j][k+2];
_v_67_ -= c2 * u2[i-2][j][k+2];
_v_38_ -= c2 * u2[i-2][j][k-2];
_v_76_ -= c2 * u2[i-2][j][k-2];
double _t_81_ = _v_38_;
double _v_39_ = c1 * u2[i-2][j][k+1];
_v_39_ -= c1 * u2[i-2][j][k-1];
_t_81_ += _v_39_;
double _t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met3[i-2][j][k];
double _v_40_ = stry[j] * _t_80_ * _t_81_;
_v_86_ -= c2 * u2[i-2][j][k+1];
_v_95_ -= c2 * u2[i-2][j][k-1];
_v_44_ += c2 * _v_40_;
double _t_85_ = met1[i-2][j][k] * la[i-2][j][k] * met4[i-2][j][k];
double _v_41_ = c2 * u3[i-2][j][k+2];
_v_70_ -= c2 * u3[i-2][j][k+2];
_v_41_ -= c2 * u3[i-2][j][k-2];
_v_79_ -= c2 * u3[i-2][j][k-2];
double _t_86_ = _v_41_;
double _v_42_ = c1 * u3[i-2][j][k+1];
_v_42_ -= c1 * u3[i-2][j][k-1];
_t_86_ += _v_42_;
double _v_43_ = _t_85_ * _t_86_;
_v_89_ -= c2 * u3[i-2][j][k+1];
_v_98_ -= c2 * u3[i-2][j][k-1];
_v_44_ += c2 * _v_43_;
double _t_51_ = stry[j] * _v_44_;
double _t_95_ = 2.0 * mu[i+1][j][k];
_t_95_ += la[i+1][j][k];
double _t_92_ = met1[i+1][j][k] * _t_95_ * met2[i+1][j][k];
double _v_45_ = c2 * u1[i+1][j][k+2];
_v_45_ -= c2 * u1[i+1][j][k-2];
double _t_93_ = _v_45_;
double _v_46_ = c1 * u1[i+1][j][k+1];
double _v_84_ = c1 * u1[i+1][j][k+1];
_v_46_ -= c1 * u1[i+1][j][k-1];
double _v_93_ = c1 * u1[i+1][j][k-1];
_t_93_ += _v_46_;
double _v_47_ = strx[i] * _t_92_ * _t_93_;
double _v_65_ = c1 * u1[i+1][j][k+2];
double _v_74_ = c1 * u1[i+1][j][k-2];
double _v_63_ = c1 * _v_47_;
double _v_48_ = c2 * u2[i+1][j][k+2];
_v_48_ -= c2 * u2[i+1][j][k-2];
double _t_100_ = _v_48_;
double _v_49_ = c1 * u2[i+1][j][k+1];
double _v_87_ = c1 * u2[i+1][j][k+1];
_v_49_ -= c1 * u2[i+1][j][k-1];
double _v_96_ = c1 * u2[i+1][j][k-1];
_t_100_ += _v_49_;
double _t_99_ = met1[i+1][j][k] * la[i+1][j][k] * met3[i+1][j][k];
double _v_50_ = stry[j] * _t_99_ * _t_100_;
double _v_68_ = c1 * u2[i+1][j][k+2];
double _v_77_ = c1 * u2[i+1][j][k-2];
_v_63_ += c1 * _v_50_;
double _t_104_ = met1[i+1][j][k] * la[i+1][j][k] * met4[i+1][j][k];
double _v_51_ = c2 * u3[i+1][j][k+2];
_v_51_ -= c2 * u3[i+1][j][k-2];
double _t_105_ = _v_51_;
double _v_52_ = c1 * u3[i+1][j][k+1];
double _v_90_ = c1 * u3[i+1][j][k+1];
_v_52_ -= c1 * u3[i+1][j][k-1];
double _v_99_ = c1 * u3[i+1][j][k-1];
_t_105_ += _v_52_;
double _v_53_ = _t_104_ * _t_105_;
double _v_71_ = c1 * u3[i+1][j][k+2];
double _v_80_ = c1 * u3[i+1][j][k-2];
_v_63_ += c1 * _v_53_;
double _t_113_ = 2.0 * mu[i-1][j][k];
_t_113_ += la[i-1][j][k];
double _t_110_ = met1[i-1][j][k] * _t_113_ * met2[i-1][j][k];
double _v_54_ = c2 * u1[i-1][j][k+2];
_v_54_ -= c2 * u1[i-1][j][k-2];
double _t_111_ = _v_54_;
double _v_55_ = c1 * u1[i-1][j][k+1];
_v_84_ -= c1 * u1[i-1][j][k+1];
_v_55_ -= c1 * u1[i-1][j][k-1];
_v_93_ -= c1 * u1[i-1][j][k-1];
_t_111_ += _v_55_;
double _v_56_ = strx[i] * _t_110_ * _t_111_;
_v_65_ -= c1 * u1[i-1][j][k+2];
_v_74_ -= c1 * u1[i-1][j][k-2];
_v_63_ += c1 * _v_56_;
double _v_57_ = c2 * u2[i-1][j][k+2];
_v_57_ -= c2 * u2[i-1][j][k-2];
double _t_118_ = _v_57_;
double _v_58_ = c1 * u2[i-1][j][k+1];
_v_87_ -= c1 * u2[i-1][j][k+1];
_v_58_ -= c1 * u2[i-1][j][k-1];
_v_96_ -= c1 * u2[i-1][j][k-1];
_t_118_ += _v_58_;
double _t_117_ = met1[i-1][j][k] * la[i-1][j][k] * met3[i-1][j][k];
double _v_59_ = stry[j] * _t_117_ * _t_118_;
_v_68_ -= c1 * u2[i-1][j][k+2];
_v_77_ -= c1 * u2[i-1][j][k-2];
_v_63_ += c1 * _v_59_;
double _t_122_ = met1[i-1][j][k] * la[i-1][j][k] * met4[i-1][j][k];
double _v_60_ = c2 * u3[i-1][j][k+2];
_v_60_ -= c2 * u3[i-1][j][k-2];
double _t_123_ = _v_60_;
double _v_61_ = c1 * u3[i-1][j][k+1];
_v_90_ -= c1 * u3[i-1][j][k+1];
_v_61_ -= c1 * u3[i-1][j][k-1];
_v_99_ -= c1 * u3[i-1][j][k-1];
_t_123_ += _v_61_;
double _v_62_ = _t_122_ * _t_123_;
_v_71_ -= c1 * u3[i-1][j][k+2];
_v_80_ -= c1 * u3[i-1][j][k-2];
_v_63_ += c1 * _v_62_;
_t_51_ += stry[j] * _v_63_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_51_;
double _t_144_ = _v_70_;
_t_144_ += _v_71_;
double _t_143_ = met1[i][j][k+2] * mu[i][j][k+2] * met4[i][j][k+2];
double _v_72_ = stry[j] * _t_143_ * _t_144_;
double _v_82_ = c2 * _v_72_;
double _t_132_ = _v_64_;
_t_132_ += _v_65_;
double _t_134_ = 2.0 * mu[i][j][k+2];
double _t_137_ = met1[i][j][k+2] * mu[i][j][k+2] * met3[i][j][k+2];
_t_134_ += la[i][j][k+2];
double _t_131_ = met1[i][j][k+2] * _t_134_ * met2[i][j][k+2];
double _t_9_ = met1[i][j][k+2] * la[i][j][k+2] * met2[i][j][k+2];
double _t_130_ = _t_131_ * _t_132_;
double _v_66_ = stry[j] * _t_130_ * strx[i];
_v_82_ += c2 * _v_66_;
double _t_138_ = _v_67_;
_t_138_ += _v_68_;
double _v_69_ = _t_137_ * _t_138_;
_v_82_ += c2 * _v_69_;
double _t_151_ = _v_73_;
_t_151_ += _v_74_;
double _t_153_ = 2.0 * mu[i][j][k-2];
_t_153_ += la[i][j][k-2];
double _t_150_ = met1[i][j][k-2] * _t_153_ * met2[i][j][k-2];
double _t_21_ = met1[i][j][k-2] * la[i][j][k-2] * met2[i][j][k-2];
double _t_149_ = _t_150_ * _t_151_;
double _v_75_ = stry[j] * _t_149_ * strx[i];
_v_82_ += c2 * _v_75_;
double _t_157_ = _v_76_;
_t_157_ += _v_77_;
double _t_156_ = met1[i][j][k-2] * mu[i][j][k-2] * met3[i][j][k-2];
double _t_162_ = met1[i][j][k-2] * mu[i][j][k-2] * met4[i][j][k-2];
double _v_78_ = _t_156_ * _t_157_;
_v_82_ += c2 * _v_78_;
double _t_163_ = _v_79_;
_t_163_ += _v_80_;
double _v_81_ = stry[j] * _t_162_ * _t_163_;
_v_82_ += c2 * _v_81_;
double _t_127_ = _v_82_;
double _t_183_ = _v_89_;
_t_183_ += _v_90_;
double _t_182_ = met1[i][j][k+1] * mu[i][j][k+1] * met4[i][j][k+1];
double _v_91_ = stry[j] * _t_182_ * _t_183_;
double _v_101_ = c1 * _v_91_;
double _t_171_ = _v_83_;
_t_171_ += _v_84_;
double _t_173_ = 2.0 * mu[i][j][k+1];
double _t_176_ = met1[i][j][k+1] * mu[i][j][k+1] * met3[i][j][k+1];
_t_173_ += la[i][j][k+1];
double _t_170_ = met1[i][j][k+1] * _t_173_ * met2[i][j][k+1];
double _t_34_ = met1[i][j][k+1] * la[i][j][k+1] * met2[i][j][k+1];
double _t_169_ = _t_170_ * _t_171_;
double _v_85_ = stry[j] * _t_169_ * strx[i+2];
_v_101_ += c1 * _v_85_;
double _t_177_ = _v_86_;
_t_177_ += _v_87_;
double _v_88_ = _t_176_ * _t_177_;
_v_101_ += c1 * _v_88_;
double _t_190_ = _v_92_;
_t_190_ += _v_93_;
double _t_192_ = 2.0 * mu[i][j][k-1];
_t_192_ += la[i][j][k-1];
double _t_189_ = met1[i][j][k-1] * _t_192_ * met2[i][j][k-1];
double _t_46_ = met1[i][j][k-1] * la[i][j][k-1] * met2[i][j][k-1];
double _t_188_ = _t_189_ * _t_190_;
double _v_94_ = stry[j] * _t_188_ * strx[i-2];
_v_101_ += c1 * _v_94_;
double _t_196_ = _v_95_;
_t_196_ += _v_96_;
double _t_195_ = met1[i][j][k-1] * mu[i][j][k-1] * met3[i][j][k-1];
double _t_201_ = met1[i][j][k-1] * mu[i][j][k-1] * met4[i][j][k-1];
double _v_97_ = _t_195_ * _t_196_;
_v_101_ += c1 * _v_97_;
double _t_202_ = _v_98_;
_t_202_ += _v_99_;
double _v_100_ = stry[j] * _t_201_ * _t_202_;
_v_101_ += c1 * _v_100_;
_t_127_ += _v_101_;
r1ic0jc0kc0 += _t_127_;
double _t_4_ = _t_137_;
double _t_5_ = _v_0_;
_t_5_ += _v_1_;
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = strx[i] * _t_3_ * stry[j+2];
double _v_12_ = c2 * _v_2_;
double _t_10_ = _v_3_;
_t_10_ += _v_4_;
double _v_5_ = _t_9_ * _t_10_;
_v_12_ += c2 * _v_5_;
double _t_16_ = _t_156_;
double _t_17_ = _v_6_;
_t_17_ += _v_7_;
double _t_15_ = _t_16_ * _t_17_;
double _v_8_ = strx[i] * _t_15_ * stry[j];
_v_12_ += c2 * _v_8_;
double _t_22_ = _v_9_;
_t_22_ += _v_10_;
double _v_11_ = _t_21_ * _t_22_;
_v_12_ += c2 * _v_11_;
double _t_0_ = _v_12_;
double _t_29_ = _t_176_;
double _t_30_ = _v_13_;
_t_30_ += _v_14_;
double _t_28_ = _t_29_ * _t_30_;
double _v_15_ = strx[i] * _t_28_ * stry[j-2];
double _v_25_ = c1 * _v_15_;
double _t_35_ = _v_16_;
_t_35_ += _v_17_;
double _v_18_ = _t_34_ * _t_35_;
_v_25_ += c1 * _v_18_;
double _t_41_ = _t_195_;
double _t_42_ = _v_19_;
_t_42_ += _v_20_;
double _t_40_ = _t_41_ * _t_42_;
double _v_21_ = strx[i] * _t_40_ * stry[j];
_v_25_ += c1 * _v_21_;
double _t_47_ = _v_22_;
_t_47_ += _v_23_;
double _v_24_ = _t_46_ * _t_47_;
_v_25_ += c1 * _v_24_;
_t_0_ += _v_25_;
r1ic0jc0kc0 += _t_0_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
}
__global__ void __launch_bounds__(128,4) curvi_2 (double * __restrict__ in_r1, double *__restrict__ in_u1, double * __restrict__ in_u2, double *__restrict__ in_u3, double * __restrict__ in_mu, double * __restrict__ in_la, double * __restrict__ in_met1, double * __restrict__ in_met2, double * __restrict__ in_met3, double * __restrict__ in_met4, double * strx, double * stry, double c1, double c2, int N) {
//Determing the block's indices
int blockdim_k= (int)(blockDim.x);
int k0 = (int)(blockIdx.x)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_i= (int)(blockDim.z);
int i0 = (int)(blockIdx.z)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.z);
double (*u1)[304][304] = (double (*)[304][304])in_u1;
double (*u2)[304][304] = (double (*)[304][304])in_u2;
double (*u3)[304][304] = (double (*)[304][304])in_u3;
double (*mu)[304][304] = (double (*)[304][304])in_mu;
double (*la)[304][304] = (double (*)[304][304])in_la;
double (*r1)[304][304] = (double (*)[304][304])in_r1;
double (*met1)[304][304] = (double (*)[304][304])in_met1;
double (*met2)[304][304] = (double (*)[304][304])in_met2;
double (*met3)[304][304] = (double (*)[304][304])in_met3;
double (*met4)[304][304] = (double (*)[304][304])in_met4;
if (j>=2 & k>=2 & j<=N-3 & k<=N-3 & i>=2 & i<=N-3) {
double _v_0_ = c2 * u1[i][j+2][k+2];
_v_0_ -= c2 * u1[i][j+2][k-2];
double _t_5_ = _v_0_;
double _v_1_ = c1 * u1[i][j+2][k+1];
_v_1_ -= c1 * u1[i][j+2][k-1];
_t_5_ += _v_1_;
double _t_4_ = met1[i][j+2][k] * mu[i][j+2][k] * met3[i][j+2][k];
double _t_3_ = _t_4_ * _t_5_;
double _v_2_ = strx[i] * _t_3_ * stry[j+1];
double _v_12_ = c2 * _v_2_;
double _v_3_ = c2 * u2[i][j+2][k+2];
_v_3_ -= c2 * u2[i][j+2][k-2];
double _t_10_ = _v_3_;
double _v_4_ = c1 * u2[i][j+2][k+1];
_v_4_ -= c1 * u2[i][j+2][k-1];
_t_10_ += _v_4_;
double _t_9_ = met1[i][j+2][k] * mu[i][j+2][k] * met2[i][j+2][k];
double _v_5_ = _t_9_ * _t_10_;
_v_12_ += c2 * _v_5_;
double _v_9_ = c2 * u2[i][j-2][k+2];
_v_9_ -= c2 * u2[i][j-2][k-2];
double _t_22_ = _v_9_;
double _v_10_ = c1 * u2[i][j-2][k+1];
_v_10_ -= c1 * u2[i][j-2][k-1];
_t_22_ += _v_10_;
double _t_21_ = met1[i][j-2][k] * mu[i][j-2][k] * met2[i][j-2][k];
double _v_11_ = _t_21_ * _t_22_;
_v_12_ += c2 * _v_11_;
double _v_6_ = c2 * u1[i][j-2][k+2];
_v_6_ -= c2 * u1[i][j-2][k-2];
double _t_17_ = _v_6_;
double _v_7_ = c1 * u1[i][j-2][k+1];
_v_7_ -= c1 * u1[i][j-2][k-1];
_t_17_ += _v_7_;
double _t_16_ = met1[i][j-2][k] * mu[i][j-2][k] * met3[i][j-2][k];
double _t_15_ = _t_16_ * _t_17_;
double _v_8_ = strx[i] * _t_15_ * stry[j];
_v_12_ += c2 * _v_8_;
double _t_0_ = _v_12_;
double _v_19_ = c2 * u1[i][j-1][k+2];
_v_19_ -= c2 * u1[i][j-1][k-2];
double _t_42_ = _v_19_;
double _v_20_ = c1 * u1[i][j-1][k+1];
_v_20_ -= c1 * u1[i][j-1][k-1];
_t_42_ += _v_20_;
double _t_41_ = met1[i][j-1][k] * mu[i][j-1][k] * met3[i][j-1][k];
double _t_40_ = _t_41_ * _t_42_;
double _v_21_ = strx[i] * _t_40_ * stry[j];
double _v_25_ = c1 * _v_21_;
double _v_13_ = c2 * u1[i][j+1][k+2];
_v_13_ -= c2 * u1[i][j+1][k-2];
double _t_30_ = _v_13_;
double _v_14_ = c1 * u1[i][j+1][k+1];
_v_14_ -= c1 * u1[i][j+1][k-1];
_t_30_ += _v_14_;
double _t_29_ = met1[i][j+1][k] * mu[i][j+1][k] * met3[i][j+1][k];
double _t_28_ = _t_29_ * _t_30_;
double _v_15_ = strx[i] * _t_28_ * stry[j-1];
_v_25_ += c1 * _v_15_;
double _v_16_ = c2 * u2[i][j+1][k+2];
_v_16_ -= c2 * u2[i][j+1][k-2];
double _t_35_ = _v_16_;
double _v_17_ = c1 * u2[i][j+1][k+1];
_v_17_ -= c1 * u2[i][j+1][k-1];
_t_35_ += _v_17_;
double _t_34_ = met1[i][j+1][k] * mu[i][j+1][k] * met2[i][j+1][k];
double _v_18_ = _t_34_ * _t_35_;
_v_25_ += c1 * _v_18_;
double _v_22_ = c2 * u2[i][j-1][k+2];
_v_22_ -= c2 * u2[i][j-1][k-2];
double _t_47_ = _v_22_;
double _v_23_ = c1 * u2[i][j-1][k+1];
_v_23_ -= c1 * u2[i][j-1][k-1];
_t_47_ += _v_23_;
double _t_46_ = met1[i][j-1][k] * mu[i][j-1][k] * met2[i][j-1][k];
double _v_24_ = _t_46_ * _t_47_;
_v_25_ += c1 * _v_24_;
_t_0_ += _v_25_;
double r1ic0jc0kc0 = r1[i][j][k];
r1ic0jc0kc0 += _t_0_;
double _v_26_ = c2 * u2[i+2][j+2][k];
_v_26_ -= c2 * u2[i-2][j+2][k];
double _v_29_ = c2 * u2[i+2][j-2][k];
_v_29_ -= c2 * u2[i-2][j-2][k];
double _v_40_ = c2 * u2[i+2][j+2][k];
_v_40_ -= c2 * u2[i+2][j-2][k];
double _v_43_ = c2 * u2[i-2][j+2][k];
_v_43_ -= c2 * u2[i-2][j-2][k];
double _v_33_ = c2 * u2[i+2][j+1][k];
_v_33_ -= c2 * u2[i-2][j+1][k];
double _v_36_ = c2 * u2[i+2][j-1][k];
_v_36_ -= c2 * u2[i-2][j-1][k];
double _v_47_ = c2 * u2[i+1][j+2][k];
_v_47_ -= c2 * u2[i+1][j-2][k];
double _v_50_ = c2 * u2[i-1][j+2][k];
_v_50_ -= c2 * u2[i-1][j-2][k];
double _v_27_ = c1 * u2[i+1][j+2][k];
_v_27_ -= c1 * u2[i-1][j+2][k];
double _v_30_ = c1 * u2[i+1][j-2][k];
_v_30_ -= c1 * u2[i-1][j-2][k];
double _v_41_ = c1 * u2[i+2][j+1][k];
_v_41_ -= c1 * u2[i+2][j-1][k];
double _v_44_ = c1 * u2[i-2][j+1][k];
_v_44_ -= c1 * u2[i-2][j-1][k];
double _v_34_ = c1 * u2[i+1][j+1][k];
_v_34_ -= c1 * u2[i-1][j+1][k];
double _v_37_ = c1 * u2[i+1][j-1][k];
_v_37_ -= c1 * u2[i-1][j-1][k];
double _v_48_ = c1 * u2[i+1][j+1][k];
_v_48_ -= c1 * u2[i+1][j-1][k];
double _v_51_ = c1 * u2[i-1][j+1][k];
_v_51_ -= c1 * u2[i-1][j-1][k];
double _t_54_ = _v_26_;
_t_54_ += _v_27_;
double _t_53_ = met1[i][j+2][k] * mu[i][j+2][k] * met1[i][j+2][k];
double _v_28_ = _t_53_ * _t_54_;
double _v_32_ = c2 * _v_28_;
double _t_59_ = _v_29_;
_t_59_ += _v_30_;
double _t_58_ = met1[i][j-2][k] * mu[i][j-2][k] * met1[i][j-2][k];
double _v_31_ = _t_58_ * _t_59_;
_v_32_ += c2 * _v_31_;
double _t_51_ = _v_32_;
double _t_76_ = _v_40_;
_t_76_ += _v_41_;
double _t_75_ = met1[i+2][j][k] * la[i+2][j][k] * met1[i+2][j][k];
double _v_42_ = _t_75_ * _t_76_;
double _v_46_ = c2 * _v_42_;
double _t_81_ = _v_43_;
_t_81_ += _v_44_;
double _t_80_ = met1[i-2][j][k] * la[i-2][j][k] * met1[i-2][j][k];
double _v_45_ = _t_80_ * _t_81_;
_v_46_ += c2 * _v_45_;
_t_51_ += _v_46_;
double _t_65_ = _v_33_;
_t_65_ += _v_34_;
double _t_64_ = met1[i][j+1][k] * mu[i][j+1][k] * met1[i][j+1][k];
double _v_35_ = _t_64_ * _t_65_;
double _v_39_ = c1 * _v_35_;
double _t_70_ = _v_36_;
_t_70_ += _v_37_;
double _t_69_ = met1[i][j-1][k] * mu[i][j-1][k] * met1[i][j-1][k];
double _v_38_ = _t_69_ * _t_70_;
_v_39_ += c1 * _v_38_;
_t_51_ += _v_39_;
double _t_87_ = _v_47_;
_t_87_ += _v_48_;
double _t_86_ = met1[i+1][j][k] * la[i+1][j][k] * met1[i+1][j][k];
double _v_49_ = _t_86_ * _t_87_;
double _v_53_ = c1 * _v_49_;
double _t_92_ = _v_50_;
_t_92_ += _v_51_;
double _t_91_ = met1[i-1][j][k] * la[i-1][j][k] * met1[i-1][j][k];
double _v_52_ = _t_91_ * _t_92_;
_v_53_ += c1 * _v_52_;
_t_51_ += _v_53_;
r1ic0jc0kc0 += _t_51_;
r1[i][j][k] = r1ic0jc0kc0;
}
}
extern "C" void host_code (double *h_r1, double *h_u1, double *h_u2, double *h_u3, double *h_mu, double *h_la, double *h_met1, double *h_met2, double *h_met3, double *h_met4, double *h_strx, double *h_stry, double c1, double c2, int N) {
double *r1;
cudaMalloc (&r1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for r1\n");
cudaMemcpy (r1, h_r1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u1;
cudaMalloc (&u1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u1\n");
cudaMemcpy (u1, h_u1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u2;
cudaMalloc (&u2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u2\n");
cudaMemcpy (u2, h_u2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *u3;
cudaMalloc (&u3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for u3\n");
cudaMemcpy (u3, h_u3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *mu;
cudaMalloc (&mu, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *la;
cudaMalloc (&la, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met1;
cudaMalloc (&met1, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met1\n");
cudaMemcpy (met1, h_met1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met2;
cudaMalloc (&met2, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met2\n");
cudaMemcpy (met2, h_met2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met3;
cudaMalloc (&met3, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met3\n");
cudaMemcpy (met3, h_met3, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *met4;
cudaMalloc (&met4, sizeof(double)*N*N*N);
check_error ("Failed to allocate device memory for met4\n");
cudaMemcpy (met4, h_met4, sizeof(double)*N*N*N, cudaMemcpyHostToDevice);
double *strx;
cudaMalloc (&strx, sizeof(double)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice);
double *stry;
cudaMalloc (&stry, sizeof(double)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (16, 8);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1);
curvi_1 <<<gridconfig, blockconfig>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
dim3 blockconfig_1 (16, 2, 2);
dim3 gridconfig_1 (ceil(N, blockconfig_1.x), ceil(N, blockconfig_1.y), ceil(N, blockconfig_1.z));
curvi_2 <<<gridconfig_1, blockconfig_1>>> (r1, u1, u2, u3, mu, la, met1, met2, met3, met4, strx, stry, c1, c2, N);
cudaMemcpy (h_r1, r1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost);
}
|
10,012 | /*
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO LICENSEE:
*
* This source code and/or documentation ("Licensed Deliverables") are
* subject to NVIDIA intellectual property rights under U.S. and
* international Copyright laws.
*
* These Licensed Deliverables contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a form of NVIDIA software license agreement by and
* between NVIDIA and Licensee ("License Agreement") or electronically
* accepted by Licensee. Notwithstanding any terms or conditions to
* the contrary in the License Agreement, reproduction or disclosure
* of the Licensed Deliverables to any third party without the express
* written consent of NVIDIA is prohibited.
*
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, NVIDIA MAKES NO REPRESENTATION ABOUT THE
* SUITABILITY OF THESE LICENSED DELIVERABLES FOR ANY PURPOSE. IT IS
* PROVIDED "AS IS" WITHOUT EXPRESS OR IMPLIED WARRANTY OF ANY KIND.
* NVIDIA DISCLAIMS ALL WARRANTIES WITH REGARD TO THESE LICENSED
* DELIVERABLES, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY,
* NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* NOTWITHSTANDING ANY TERMS OR CONDITIONS TO THE CONTRARY IN THE
* LICENSE AGREEMENT, IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY
* SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, OR ANY
* DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
* ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THESE LICENSED DELIVERABLES.
*
* U.S. Government End Users. These Licensed Deliverables are a
* "commercial item" as that term is defined at 48 C.F.R. 2.101 (OCT
* 1995), consisting of "commercial computer software" and "commercial
* computer software documentation" as such terms are used in 48
* C.F.R. 12.212 (SEPT 1995) and is provided to the U.S. Government
* only as a commercial end item. Consistent with 48 C.F.R.12.212 and
* 48 C.F.R. 227.7202-1 through 227.7202-4 (JUNE 1995), all
* U.S. Government End Users acquire the Licensed Deliverables with
* only those rights set forth herein.
*
* Any use of the Licensed Deliverables in individual and commercial
* software must include, in the user documentation and internal
* comments to the code, the above Disclaimer and U.S. Government End
* Users Notice.
*/
#pragma once
#if !defined(CU_COMPLEX_H_)
#define CU_COMPLEX_H_
#if defined(__cplusplus)
extern "C" {
#endif /* __cplusplus */
#include <math.h> /* import fabsf, sqrt */
#include "vector_types.h"
typedef float2 cuFloatComplex;
__host__ __device__ static __inline__ float cuCrealf(cuFloatComplex x)
{
return x.x;
}
__host__ __device__ static __inline__ float cuCimagf(cuFloatComplex x)
{
return x.y;
}
__host__ __device__ static __inline__ cuFloatComplex make_cuFloatComplex
(float r, float i)
{
cuFloatComplex res;
res.x = r;
res.y = i;
return res;
}
__host__ __device__ static __inline__ float cuCnormf(cuFloatComplex x)
{
return cuCrealf(x) * cuCrealf(x) + cuCimagf(x) * cuCimagf(x);
}
__host__ __device__ static __inline__ cuFloatComplex cuConjf(cuFloatComplex x)
{
return make_cuFloatComplex(cuCrealf(x), -cuCimagf(x));
}
__host__ __device__ static __inline__ cuFloatComplex cuCaddf(cuFloatComplex x,
cuFloatComplex y)
{
return make_cuFloatComplex(cuCrealf(x) + cuCrealf(y),
cuCimagf(x) + cuCimagf(y));
}
__host__ __device__ static __inline__ cuFloatComplex cuCsubf(cuFloatComplex x,
cuFloatComplex y)
{
return make_cuFloatComplex(cuCrealf(x) - cuCrealf(y),
cuCimagf(x) - cuCimagf(y));
}
/* This implementation could suffer from intermediate overflow even though
* the final result would be in range. However, various implementations do
* not guard against this (presumably to avoid losing performance), so we
* don't do it either to stay competitive.
*/
__host__ __device__ static __inline__ cuFloatComplex cuCmulf(cuFloatComplex x,
cuFloatComplex y)
{
cuFloatComplex prod;
prod = make_cuFloatComplex((cuCrealf(x) * cuCrealf(y)) -
(cuCimagf(x) * cuCimagf(y)),
(cuCrealf(x) * cuCimagf(y)) +
(cuCimagf(x) * cuCrealf(y)));
return prod;
}
/* This implementation guards against intermediate underflow and overflow
* by scaling. Such guarded implementations are usually the default for
* complex library implementations, with some also offering an unguarded,
* faster version.
*/
__host__ __device__ static __inline__ cuFloatComplex cuCdivf(cuFloatComplex x,
cuFloatComplex y)
{
cuFloatComplex quot;
float s = fabsf(cuCrealf(y)) + fabsf(cuCimagf(y));
float oos = 1.0f / s;
float ars = cuCrealf(x) * oos;
float ais = cuCimagf(x) * oos;
float brs = cuCrealf(y) * oos;
float bis = cuCimagf(y) * oos;
s = (brs * brs) + (bis * bis);
oos = 1.0f / s;
quot = make_cuFloatComplex(((ars * brs) + (ais * bis)) * oos,
((ais * brs) - (ars * bis)) * oos);
return quot;
}
/*
* We would like to call hypotf(), but it's not available on all platforms.
* This discrete implementation guards against intermediate underflow and
* overflow by scaling. Otherwise we would lose half the exponent range.
* There are various ways of doing guarded computation. For now chose the
* simplest and fastest solution, however this may suffer from inaccuracies
* if sqrt and division are not IEEE compliant.
*/
__host__ __device__ static __inline__ float cuCabsf(cuFloatComplex x)
{
float a = cuCrealf(x);
float b = cuCimagf(x);
float v, w, t;
a = fabsf(a);
b = fabsf(b);
if (a > b) {
v = a;
w = b;
}
else {
v = b;
w = a;
}
t = w / v;
t = 1.0f + t * t;
t = v * sqrtf(t);
if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
t = v + w;
}
return t;
}
__host__ __device__ static __inline__ cuFloatComplex operator* (const float& fl, const cuFloatComplex& cm) {
return make_cuFloatComplex(fl * cuCrealf(cm), fl * cuCimagf(cm));
}
/* Double precision */
typedef double2 cuDoubleComplex;
__host__ __device__ static __inline__ double cuCreal(cuDoubleComplex x)
{
return x.x;
}
__host__ __device__ static __inline__ double cuCimag(cuDoubleComplex x)
{
return x.y;
}
__host__ __device__ static __inline__ cuDoubleComplex make_cuDoubleComplex
(double r, double i)
{
cuDoubleComplex res;
res.x = r;
res.y = i;
return res;
}
__host__ __device__ static __inline__ cuDoubleComplex cuConj(cuDoubleComplex x)
{
return make_cuDoubleComplex(cuCreal(x), -cuCimag(x));
}
__host__ __device__ static __inline__ cuDoubleComplex cuCadd(cuDoubleComplex x,
cuDoubleComplex y)
{
return make_cuDoubleComplex(cuCreal(x) + cuCreal(y),
cuCimag(x) + cuCimag(y));
}
__host__ __device__ static __inline__ cuDoubleComplex cuCsub(cuDoubleComplex x,
cuDoubleComplex y)
{
return make_cuDoubleComplex(cuCreal(x) - cuCreal(y),
cuCimag(x) - cuCimag(y));
}
/* This implementation could suffer from intermediate overflow even though
* the final result would be in range. However, various implementations do
* not guard against this (presumably to avoid losing performance), so we
* don't do it either to stay competitive.
*/
__host__ __device__ static __inline__ cuDoubleComplex cuCmul(cuDoubleComplex x,
cuDoubleComplex y)
{
cuDoubleComplex prod;
prod = make_cuDoubleComplex((cuCreal(x) * cuCreal(y)) -
(cuCimag(x) * cuCimag(y)),
(cuCreal(x) * cuCimag(y)) +
(cuCimag(x) * cuCreal(y)));
return prod;
}
/* This implementation guards against intermediate underflow and overflow
* by scaling. Such guarded implementations are usually the default for
* complex library implementations, with some also offering an unguarded,
* faster version.
*/
__host__ __device__ static __inline__ cuDoubleComplex cuCdiv(cuDoubleComplex x,
cuDoubleComplex y)
{
cuDoubleComplex quot;
double s = (fabs(cuCreal(y))) + (fabs(cuCimag(y)));
double oos = 1.0 / s;
double ars = cuCreal(x) * oos;
double ais = cuCimag(x) * oos;
double brs = cuCreal(y) * oos;
double bis = cuCimag(y) * oos;
s = (brs * brs) + (bis * bis);
oos = 1.0 / s;
quot = make_cuDoubleComplex(((ars * brs) + (ais * bis)) * oos,
((ais * brs) - (ars * bis)) * oos);
return quot;
}
/* This implementation guards against intermediate underflow and overflow
* by scaling. Otherwise we would lose half the exponent range. There are
* various ways of doing guarded computation. For now chose the simplest
* and fastest solution, however this may suffer from inaccuracies if sqrt
* and division are not IEEE compliant.
*/
__host__ __device__ static __inline__ double cuCabs(cuDoubleComplex x)
{
double a = cuCreal(x);
double b = cuCimag(x);
double v, w, t;
a = fabs(a);
b = fabs(b);
if (a > b) {
v = a;
w = b;
}
else {
v = b;
w = a;
}
t = w / v;
t = 1.0 + t * t;
t = v * sqrt(t);
if ((v == 0.0) ||
(v > 1.79769313486231570e+308) || (w > 1.79769313486231570e+308)) {
t = v + w;
}
return t;
}
#if defined(__cplusplus)
}
#endif /* __cplusplus */
/* aliases */
typedef cuFloatComplex cuComplex;
__host__ __device__ static __inline__ cuComplex make_cuComplex(float x,
float y)
{
return make_cuFloatComplex(x, y);
}
/* float-to-double promotion */
__host__ __device__ static __inline__ cuDoubleComplex cuComplexFloatToDouble
(cuFloatComplex c)
{
return make_cuDoubleComplex((double)cuCrealf(c), (double)cuCimagf(c));
}
__host__ __device__ static __inline__ cuFloatComplex cuComplexDoubleToFloat
(cuDoubleComplex c)
{
return make_cuFloatComplex((float)cuCreal(c), (float)cuCimag(c));
}
__host__ __device__ static __inline__ cuComplex cuCfmaf(cuComplex x, cuComplex y, cuComplex d)
{
float real_res;
float imag_res;
real_res = (cuCrealf(x) * cuCrealf(y)) + cuCrealf(d);
imag_res = (cuCrealf(x) * cuCimagf(y)) + cuCimagf(d);
real_res = -(cuCimagf(x) * cuCimagf(y)) + real_res;
imag_res = (cuCimagf(x) * cuCrealf(y)) + imag_res;
return make_cuComplex(real_res, imag_res);
}
__host__ __device__ static __inline__ cuDoubleComplex cuCfma(cuDoubleComplex x, cuDoubleComplex y, cuDoubleComplex d)
{
double real_res;
double imag_res;
real_res = (cuCreal(x) * cuCreal(y)) + cuCreal(d);
imag_res = (cuCreal(x) * cuCimag(y)) + cuCimag(d);
real_res = -(cuCimag(x) * cuCimag(y)) + real_res;
imag_res = (cuCimag(x) * cuCreal(y)) + imag_res;
return make_cuDoubleComplex(real_res, imag_res);
}
#endif /* !defined(CU_COMPLEX_H_) */
|
10,013 | #include "includes.h"
__global__ void cudaising(int* G, double* w, int* newG, int n, int workperthread) {
int startingId = threadIdx.x * workperthread;
//shared w and G in block
__shared__ double tempW[5 * 5];
__shared__ int tempG[(517 + 4) * 5];
//copy necessary elements from G into tempG
for (int i = -2; i <= 2; i++) {
for (int j = -2; j <= n + 2; j++) {
tempG[(j + 2) + (i + 2) * (n + 4)] = G[((j + n) % n) + ((blockIdx.x + i + n) % n) * n];
}
}
//copy using threads
/*if (threadIdx.x >=25&&threadIdx.x <30) {
for (int j = -2; j <= n + 2; j++) {
tempG[(j + 2) + (threadIdx.x-2-25 + 2) * (n + 4)] = G[((j + n) % n) + ((blockIdx.x + threadIdx.x-2-25 + n) % n) * n];
}
}
*/
//Copy w in tempW
if (threadIdx.x < 25) {
tempW[threadIdx.x] = w[threadIdx.x];
}
__syncthreads();
//for every element computed by this thread
for (int element = 0; element < workperthread; element++) {
double newSpin = 0.0;
//for every point in matrix w
for (int ii = 0; ii < 5; ii++) {
for (int jj = 0; jj < 5; jj++) {
//compute new Spin of element
newSpin += tempW[(jj)+(ii) * 5] * tempG[startingId + element + jj + ii * (n + 4)];
}
}
//global index of element whose spin was just calculated
int index = startingId + element + blockIdx.x * blockDim.x * workperthread;
//if newSpin > 0 then the updated spin = 1
if (newSpin > 0.000001) {
newG[index] = 1;
}
//if newSpin < 0 then the updated spin = -1
else if (newSpin < -0.000001) {
newG[index] = -1;
}
//if newSpin = 0 then the updated spin = old spin
else {
newG[index] = G[index];
}
}
__syncthreads();
} |
10,014 | #include "des.cuh"
#include <stdio.h>
void printUsage() {
printf("Usage : ./main [k|e|d] key (input output)\n");
printf("\t./main k key\n");
printf("\t./main e key plain-text encrypted-text\n");
printf("\t./main d key encrypted-text decrypted-text\n");
}
int main(int argc, char* argv[]) {
bool invalid = false;
switch(argc) {
case 3: {
if(*argv[1] == 'k') {
/* key geneartion */
keyGen(argv[2]);
}
else
invalid = true;
break;
}
case 5: {
if(*argv[1] == 'e') {
/* encryption */
encryption_cu(argv[3], argv[4], argv[2]);
}
else if(*argv[1] == 'd') {
/* decryption */
decryption_cu(argv[3], argv[4], argv[2]);
}
else
invalid = true;
break;
}
default:
invalid = true;
}
if(invalid)
printUsage();
return 0;
}
|
10,015 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void helloKernel() {
int blockId = blockIdx.x
+ blockIdx.y * gridDim.x
+ blockIdx.z * gridDim.x * gridDim.y;
int threadId = threadIdx.x
+ threadIdx.y * blockDim.x
+ threadIdx.z * blockDim.x * blockDim.y
+ blockId * blockDim.x * blockDim.y * blockDim.z;
printf("GPU: gridDim:(%2d, %2d, %2d) blockDim:(%2d, %2d, %2d) blockIdx:(%2d, %2d, %2d) "
"threadIdx:(%2d, %2d, %2d) -> Thread[%2d]: %s\n", gridDim.x, gridDim.y,
gridDim.z, blockDim.x, blockDim.y, blockDim.z, blockIdx.x, blockIdx.y,
blockIdx.z, threadIdx.x, threadIdx.y, threadIdx.z, threadId, "Hello World!!!\n");
}
int main(int argc, char **argv) {
// Define the GPU id to work.
cudaSetDevice(0);
// Hello from host.
printf("Host: Hello World!!!\n");
// Hello from GPU.
helloKernel<<<1,1>>>();
// Reset device.
cudaDeviceReset();
return (0);
}
|
10,016 | #include <iostream>
#include <time.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <sstream>
#include <unistd.h>
/*
* Lectura Archivo
*/
void Read(float** R, float** G, float** B, int *M, int *N, const char *filename) {
FILE *fp;
fp = fopen(filename, "r");
fscanf(fp, "%d %d\n", M, N);
int imsize = (*M) * (*N);
float* R1 = new float[imsize];
float* G1 = new float[imsize];
float* B1 = new float[imsize];
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(R1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(G1[i]));
for(int i = 0; i < imsize; i++)
fscanf(fp, "%f ", &(B1[i]));
fclose(fp);
*R = R1; *G = G1; *B = B1;
}
/*
* Escritura Archivo
*/
void Write(float* R, float* G, float* B, int M, int N, const char *filename) {
FILE *fp;
fp = fopen(filename, "w");
fprintf(fp, "%d %d\n", M, N);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", R[i]);
fprintf(fp, "%f\n", R[M*N-1]);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", G[i]);
fprintf(fp, "%f\n", G[M*N-1]);
for(int i = 0; i < M*N-1; i++)
fprintf(fp, "%f ", B[i]);
fprintf(fp, "%f\n", B[M*N-1]);
fclose(fp);
}
/*
* Procesamiento Imagen CPU
*/
/*Pregunta 1*/
void funcionCPU(float *R,float *G, float* B,int M,int N, int X,float *Rout,float* Gout,float* Bout){
for(int i=0;i<M*N;++i){
if((i%(2*X))<X){
Rout[i]=R[i+X];
Gout[i]=G[i+X];
Bout[i]=B[i+X];
}
else{
Rout[i]=R[i-X];
Gout[i]=G[i-X];
Bout[i]=B[i-X];
}
}
}
/*
* Procesamiento Imagen GPU
*/
/*Pregunta 2*/
__global__ void kernel1(float *R, float *G, float* B, float *Rout, float *Gout, float* Bout, int M, int N, int X){
int tId= threadIdx.x+blockIdx.x*blockDim.x;
if(tId<M*N){
if((tId%(2*X))<X){
Rout[tId]=R[tId+X];
Gout[tId]=G[tId+X];
Bout[tId]=B[tId+X];
}
else{
Rout[tId]=R[tId-X];
Gout[tId]=G[tId-X];
Bout[tId]=B[tId-X];
}
}
}
/*Pregunta 3*/
__global__ void kernel2(float *R, float *G, float* B, float *Rout, float *Gout, float* Bout, int M, int N, int X){
int tId= threadIdx.x+blockIdx.x*blockDim.x;
int par, impar;
if(tId<M*N){
par=int(tId/N)*N+((2*int(tId/X))*X+tId%X)%N;
impar=int(tId/N)*N+(((2*int(tId/X)+1))*X+tId%X)%N;
if((blockIdx.x)%4 < 2){
Rout[impar]=R[par];
Gout[impar]=G[par];
Bout[impar]=B[par];
}
else{
Rout[par]=R[impar];
Gout[par]=G[impar];
Bout[par]=B[impar];
}
}
}
/*
* Lectura Archivo Modificada
*/
/*Pregunta 4*/
void Read2(float** R, float** G, float** B, int *M, int *N,int X, const char *filename){
FILE *fp;
fp = fopen(filename, "r");
fscanf(fp, "%d %d\n", M, N);
int prim, sec;
int imsize = (*M) * (*N);
float* R1 = new float[imsize];
float* G1 = new float[imsize];
float* B1 = new float[imsize];
for(int i = 0; i < imsize; i++){
if(i%(*N)==0){
prim=0;
sec=(*N)/2;
}
if ((i%(2*X))<X){
fscanf(fp, "%f ", &(R1[prim+((i/(*N))*(*N))]));
prim++;
}
else{
fscanf(fp, "%f ", &(R1[sec+((i/(*N))*(*N))]));
sec++;
}
}
for(int i = 0; i < imsize; i++){
if(i%(*N)==0){
prim=0;
sec=(*N)/2;
}
if ((i%(2*X))<X){
fscanf(fp, "%f ", &(G1[prim+((i/(*N))*(*N))]));
prim++;
}
else{
fscanf(fp, "%f ", &(G1[sec+((i/(*N))*(*N))]));
sec++;
}
}
for(int i = 0; i < imsize; i++){
if(i%(*N)==0){
prim=0;
sec=(*N)/2;
}
if ((i%(2*X))<X){
fscanf(fp, "%f ", &(B1[prim+((i/(*N))*(*N))]));
prim++;
}
else{
fscanf(fp, "%f ", &(B1[sec+((i/(*N))*(*N))]));
sec++;
}
}
fclose(fp);
*R = R1; *G = G1; *B = B1;
}
/*
* Escritura Archivo Modificada
*/
void Write2(float* R, float* G, float* B, int M, int N, int X, const char *filename){
FILE *fp;
fp = fopen(filename, "w");
fprintf(fp, "%d %d\n", M, N);
//std::cout<<'Entro R\n';
int prim,sec;
for(int i = 0; i < M*N; i++){
if(i%N==0){
prim=0;
sec=(N)/2;
}
if ((i%(2*X))<X){
fprintf(fp, "%f ", R[prim+((i/(N))*(N))]);
prim++;
}else{
if(i==M*N-1){
fprintf(fp, "%f\n", R[sec+((i/(N))*(N))]);
}
else{
fprintf(fp, "%f ", R[sec+((i/(N))*(N))]);
}
sec++;
}
}
for(int i = 0; i < M*N; i++){
if(i%(N)==0){
prim=0;
sec=(N)/2;
}
if ((i%(2*X))<X){
fprintf(fp, "%f ", G[prim+((i/(N))*(N))]);
prim++;
}else{
if(i==M*N-1){
fprintf(fp, "%f\n", G[sec+((i/(N))*(N))]);
}
else{
fprintf(fp, "%f ", G[sec+((i/(N))*(N))]);
}
sec++;
}
}
for(int i = 0; i < M*N; i++){
if(i%(N)==0){
prim=0;
sec=(N)/2;
}
if ((i%(2*X))<X){
fprintf(fp, "%f ", B[prim+((i/(N))*(N))]);
prim++;
}else{
if(i==M*N-1){
fprintf(fp, "%f\n", B[sec+((i/(N))*(N))]);
}
else{
fprintf(fp, "%f ", B[sec+((i/(N))*(N))]);
}
sec++;
}
}
fclose(fp);
}
/*Pregunta 4*/
__global__ void kernel3(float *R, float *G, float* B, float *Rout, float *Gout, float* Bout, int M, int N, int X){
int tId= threadIdx.x+blockIdx.x*blockDim.x;
int shift=N/2;
if(tId<M*N){
if((blockIdx.x)%4 < 2){
Rout[tId+shift]=R[tId];
Gout[tId+shift]=G[tId];
Bout[tId+shift]=B[tId];
}
else{
Rout[tId-shift]=R[tId];
Gout[tId-shift]=G[tId];
Bout[tId-shift]=B[tId];
}
}
}
/*
* Codigo Principal
*/
int main(int argc, char **argv){
/*
* Inicializacion
*/
clock_t t1, t2;
cudaEvent_t ct1, ct2;
double ms;
float dt;
int M, N, X;
float *Rhost, *Ghost, *Bhost;
float *Rhostout, *Ghostout, *Bhostout;
float *Rdev, *Gdev, *Bdev;
float *Rdevout, *Gdevout, *Bdevout;
Read(&Rhost, &Ghost, &Bhost, &M, &N, "imagen.txt"); // Lectura de datos
/*
* Parte CPU
*/
std::cout <<"Pregunta 1" << std::endl;
Rhostout = new float[M*N];
Ghostout = new float[M*N];
Bhostout = new float[M*N];
std::stringstream ss;
std::string s;
for(int X=1; X<1024; X*=2){
ss.str("");
t1 = clock();
funcionCPU(Rhost, Ghost, Bhost, M, N, X,Rhostout, Ghostout, Bhostout); // Agregar parametros!
t2 = clock();
ms = 1000.0 * (double)(t2 - t1) / CLOCKS_PER_SEC;
std::cout <<"X:"<< X<< "-Tiempo CPU: " << ms << "[ms]" << std::endl;
ss << "imgCPU-P1-X_" << X << ".txt";
s = ss.str();
Write(Rhostout, Ghostout, Bhostout, M, N, s.c_str());
}
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
/*
* Parte GPU
*/
int grid_size, block_size = 256;
grid_size = (int)ceil((float) M * N / block_size);
cudaMalloc((void**)&Rdev, M * N * sizeof(float));
cudaMalloc((void**)&Gdev, M * N * sizeof(float));
cudaMalloc((void**)&Bdev, M * N * sizeof(float));
cudaMemcpy(Rdev, Rhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev, Ghost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev, Bhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&Rdevout, M * N * sizeof(float));
cudaMalloc((void**)&Gdevout, M * N * sizeof(float));
cudaMalloc((void**)&Bdevout, M * N * sizeof(float));
Rhostout = new float[M*N];
Ghostout = new float[M*N];
Bhostout = new float[M*N];
std::cout <<"Pregunta 2" << std::endl;
// Primer Kernel
for( X=1; X<1024; X*=2){
ss.str("");
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
kernel1<<<grid_size, block_size>>>(Rdev, Gdev, Bdev, Rdevout,Gdevout,Bdevout, M, N, X);
cudaDeviceSynchronize();
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
std::cout <<"X:"<< X<< "-Tiempo GPU: " << dt << "[ms]" << std::endl;
ss << "imgGPU-P2-X_" << X << ".txt";
s = ss.str();
cudaMemcpy(Rhostout, Rdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Ghostout, Gdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Bhostout, Bdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
Write(Rhostout, Ghostout, Bhostout, M, N, s.c_str());
}
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
cudaFree(Rdev); cudaFree(Gdev); cudaFree(Bdev);
cudaFree(Rdevout); cudaFree(Gdevout); cudaFree(Bdevout);
cudaMalloc((void**)&Rdev, M * N * sizeof(float));
cudaMalloc((void**)&Gdev, M * N * sizeof(float));
cudaMalloc((void**)&Bdev, M * N * sizeof(float));
cudaMemcpy(Rdev, Rhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev, Ghost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev, Bhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&Rdevout, M * N * sizeof(float));
cudaMalloc((void**)&Gdevout, M * N * sizeof(float));
cudaMalloc((void**)&Bdevout, M * N * sizeof(float));
Rhostout = new float[M*N];
Ghostout = new float[M*N];
Bhostout = new float[M*N];
std::cout <<"Pregunta 3" << std::endl;
/*Segundo Kernel*/
for( X=1; X<1024; X*=2){
ss.str("");
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
kernel2<<<grid_size, block_size>>>(Rdev, Gdev, Bdev, Rdevout,Gdevout,Bdevout, M, N, X);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cudaDeviceSynchronize();
std::cout <<"X:"<< X<< "-Tiempo GPU: " << dt << "[ms]" << std::endl;
ss << "imgGPU-P3-X_" << X << ".txt";
s = ss.str();
cudaMemcpy(Rhostout, Rdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Ghostout, Gdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Bhostout, Bdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
Write(Rhostout, Ghostout, Bhostout, M, N, s.c_str());
}
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
cudaFree(Rdev); cudaFree(Gdev); cudaFree(Bdev);
cudaFree(Rdevout); cudaFree(Gdevout); cudaFree(Bdevout);
std::cout <<"Pregunta 4" << std::endl;
/*Segundo Kernel*/
for( X=1; X<1024; X*=2){
//Init punteros
Rdevout=Gdevout=Bdevout=NULL;
Rdev=Gdev=Bdev=NULL;
Rhost=NULL;
Ghost=NULL;
Bhost=NULL;
ss.str("");
Read2(&Rhost, &Ghost, &Bhost, &M, &N, X, "imagen.txt");
//malloc arreglos kernel
cudaMalloc((void**)&Rdev, M * N * sizeof(float));
cudaMalloc((void**)&Gdev, M * N * sizeof(float));
cudaMalloc((void**)&Bdev, M * N * sizeof(float));
cudaMemcpy(Rdev, Rhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Gdev, Ghost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(Bdev, Bhost, M * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMalloc((void**)&Rdevout, M * N * sizeof(float));
cudaMalloc((void**)&Gdevout, M * N * sizeof(float));
cudaMalloc((void**)&Bdevout, M * N * sizeof(float));
//ejecucion del kernel
cudaEventCreate(&ct1);
cudaEventCreate(&ct2);
cudaEventRecord(ct1);
kernel3<<<grid_size, block_size>>>(Rdev, Gdev, Bdev, Rdevout,Gdevout,Bdevout, M, N, X);
cudaEventRecord(ct2);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
cudaDeviceSynchronize();
std::cout <<"X:"<< X<< "-Tiempo GPU: " << dt << "[ms]" << std::endl;
ss << "imgGPU-P4-X_" << X << ".txt";
s = ss.str();
//escritura en el archivo
Rhostout = new float[M*N];
Ghostout = new float[M*N];
Bhostout = new float[M*N];
cudaMemcpy(Rhostout, Rdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Ghostout, Gdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(Bhostout, Bdevout, M * N * sizeof(float), cudaMemcpyDeviceToHost);
Write2(Rhostout, Ghostout, Bhostout, M, N, X, s.c_str());
//liberacion de memoria
cudaFree(Rdev); cudaFree(Gdev); cudaFree(Bdev);
cudaFree(Rdevout); cudaFree(Gdevout); cudaFree(Bdevout);
delete[] Rhost; delete[] Ghost; delete[] Bhost;
delete[] Rhostout; delete[] Ghostout; delete[] Bhostout;
}
return 0;
} |
10,017 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <math.h>
//#include <unistd.h>
#define EXPO 2 //so [0,1] will be break into 2^6 intervals 64*64
#define PI 3.14159265
__global__ void CalculateTheD(int step,float* deviceB, float* deviceC, float* deviceD, float* deviceX, float* devicenewB, float* devicenewC, float* devicenewD)
{
int blocksize=16;
int gridsize=gridDim.x; //this will always be 1
// int gridsize2=gridDim.x;
// printf("gridsize: %d %d\n", gridsize,gridsize2);
// __device__ bool myGlobalFlag=true;
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int blocktotal=blocksize*blocksize;
int temp1=by*gridsize+bx;
int temp2=ty*blocksize+tx;
int ind=temp1*blocktotal+temp2;
int m=(int)pow(2.0,EXPO*1.0)-1;
/*int column=threadIdx.x;
int row=threadIdx.y;*/
int row=ind/m;
int column=ind%m;
int iloopstep=(int)pow(2.0,(EXPO-step)*1.0)-1;
int h=(int)pow(2.0,(step-1)*1.0);
int multiplier=(int)pow(2.0,step*1.0);
float* oldB;
float* oldC;
float* oldD;
float* newB;
float* newC;
float* newD;
//this is to exchange which hold the previous value which hold the current value
if(step%2==0)
{
oldB=devicenewB;
oldC=devicenewC;
oldD=devicenewD;
newB=deviceB;
newC=deviceC;
newD=deviceD;
}
else
{
oldB=deviceB;
oldC=deviceC;
oldD=deviceD;
newB=devicenewB;
newC=devicenewC;
newD=devicenewD;
}
//use the device value as old value and store the updated one in to the new value
if(ind<m*m) //so only the first 63 threads do work and the other one is hanging there
{
float sumBB=0.0;
for(int k=0;k<m;k++)
{
sumBB=sumBB+oldB[row*m+k]*oldB[k*m+column];
}
float sumCC=0.0;
for(int k=0;k<m;k++)
{
sumCC=sumCC+oldC[row*m+k]*oldC[k*m+column];
}
//based on formula (5.4.2.15) on book
newB[row*m+column]=2*sumCC-sumBB;
newC[row*m+column]=sumCC;
//now calculate the new d and it needs to loop through i in each block
//look at the third formula on 5.4.2.15 on book
if(column==0)
{
//for calculate d we just need 63 tthreads but B and C we need 63*63 threads
for(int i=1;i<=iloopstep;i++)
{
float sumCD1=0.0;
for(int k=0;k<m;k++)
{
sumCD1=sumCD1+oldC[row*m+k]*oldD[(i*multiplier-h-1)*m+k];
}
float sumCD2=0.0;
for(int k=0;k<m;k++)
{
sumCD2=sumCD2+oldC[row*m+k]*oldD[(i*multiplier+h-1)*m+k];
}
float sumBD=0.0;
for(int k=0;k<m;k++)
{
sumBD=sumBD+oldB[row*m+k]*oldD[(i*multiplier-1)*m+k];
}
newD[(i*multiplier-1)*m+row]=sumCD1+sumCD2-sumBD;
//printf("gpu:%lf:",newD[(i*multiplier-1)*m+row]);
}
}
}
//sync the thread before go to the next step.
__syncthreads();
/* if(row==0&&column==0)
{
for(int i=0;i<9;i++)
{
printf("%lf ",oldD[i]);
}
printf("\n");
}*/
}
//***************************begin of unblock version of cyclic reduction*********************************************************************************//
__global__ void CalculatePArrayKernel(int step,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD)
{
int bx=blockIdx.x;
int by=blockIdx.y;
int tx=threadIdx.x;
int ty=threadIdx.y;
int helper11=pow(2.0,(EXPO+1)*1.0);
int helper22=pow(2.0,(EXPO-step+1)*1.0);
int helper44=pow(2.0,(EXPO-step+2)*1.0);
int helper33=pow(2.0,EXPO*1.0)-1;
//printf("step is running: %d \n",step);
// if(helper3<pow(2.0,(EXPO-step)*1.0)-1)
//--step 1 is special case.
/* if((tx!=(blockColumn-1))&&(ty!=(blockRow-1)))-----this is very important branch divergence happen here, need
//to figure out how exactly cuda works!!
/*****calcualte A******************/
int helper1=helper11;
int helper2=helper22;
int helper4=helper44;
int flag=0;//special for step1.
if(step==1)
{
helper1=0;
helper2=0;
helper4=0;
flag=1;
}
int helper3=ty*blockColumn+tx+1;
if(helper3<=(pow(2.0,1.0*(EXPO-step))-1.0))
{
float ahelperfora1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperfora2=deviceA[-step+helper1-helper4+2*(helper3)-1];
float bhelperfora1=deviceB[-step+helper1-helper4+2*(helper3)-1];
deviceA[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*(ahelperfora1)*ahelperfora2/bhelperfora1;
//*****calculate C******************/
float chelperforc1=deviceC[-step+helper1-helper4+2*(helper3)];
float chelperforc2=deviceC[-step+helper1-helper4+2*(helper3)+1];
float bhelperforc1=deviceB[-step+helper1-helper4+2*(helper3)+1];
deviceC[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=-1*chelperforc1*chelperforc2/bhelperforc1;
//calculate B***********************************************//
float bhelperforb1=deviceB[-step+helper1-helper4+2*(helper3)];
float bhelperforb2=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperforb3=deviceB[-step+helper1-helper4+2*(helper3)+1];
float ahelperforb1=deviceA[-step+helper1-helper4+2*(helper3)];
float ahelperforb2=deviceA[-step+helper1-helper4+2*(helper3)+1];
float chelperforb1=deviceC[-step+helper1-helper4+2*(helper3)-1];
float chelperforb2=deviceC[-step+helper1-helper4+2*(helper3)];
deviceB[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=bhelperforb1-ahelperforb1/bhelperforb2*chelperforb1-chelperforb2/bhelperforb3*ahelperforb2;
//calculate D***************************************************//
float dhelperford1=deviceD[-step+helper1-helper4+2*(helper3)];
float dhelperford2=deviceD[-step+helper1-helper4+2*(helper3)-1];
float dhelperford3=deviceD[-step+helper1-helper4+2*(helper3)+1];
float ahelperford1=deviceA[-step+helper1-helper4+2*(helper3)];
float bhelperford1=deviceB[-step+helper1-helper4+2*(helper3)-1];
float bhelperford2=deviceB[-step+helper1-helper4+2*(helper3)+1];
float chelperford1=deviceC[-step+helper1-helper4+2*(helper3)];
deviceD[-1-step+helper1-helper2+helper3+flag*(1+helper33)]=dhelperford1-ahelperford1/bhelperford1*dhelperford2-chelperford1/bhelperford2*dhelperford3;
for(int i=0;i<6;i++)
{
//printf("cudab %lf \n",deviceB[i]);
printf("cudab %lf \n",deviceB[i]);
}
for(int i=0;i<6;i++)
{
//printf("cudab %lf \n",deviceB[i]);
printf("cudad %lf \n",deviceD[i]);
}
}
__syncthreads();
}
__global__ void BackwardKernel(int k,int blockRow, int blockColumn,float* deviceA, float* deviceB, float* deviceC, float* deviceD,float* deviceFinalX,float initialValue)
{
int bx1=blockIdx.x;
int by1=blockIdx.y;
int tx1=threadIdx.x;
int ty1=threadIdx.y;
//printf("inside of kernle %f \n",deviceFinalX[4]);
int backhelper1=ty1*blockColumn+tx1+1;
int backhelper2=2*backhelper1-1;//(int((2*backhelper1-1)*pow(2.0,1.0*(k-1))))/(int)(pow(2.0,(k-1)*1.0));
int backhelper3=(int)pow(2.0,(EXPO+1)*1.0);
int backhelper4=(int)pow(2.0,(EXPO-k+2)*1.0);
int h=(int)(pow(2.0,1.0*(k-1)));
float backhelperd=deviceD[-k+backhelper3-backhelper4+backhelper2];
float backhelpera=deviceA[-k+backhelper3-backhelper4+backhelper2];
float backhelperb=deviceB[-k+backhelper3-backhelper4+backhelper2];
float backhelperc=deviceC[-k+backhelper3-backhelper4+backhelper2];
int xindex1=backhelper2*pow(2.0,1.0*(k-1))-h;
int xindex2=backhelper2*pow(2.0,1.0*(k-1))+h;
//so thread i will be in charge of (2i-1)*2^(k-1) calculation
//printf("%d ",int((2*backhelper1-1)*pow(2.0,1.0*(k-1))));
deviceFinalX[(int)(backhelper2*pow(2.0,1.0*(k-1)))]=(backhelperd-backhelpera*deviceFinalX[xindex1]-backhelperc*deviceFinalX[xindex2])*1.0/backhelperb;
__syncthreads();
}
//***************************end of not block version of cyclic reduction*********************************************************************************//
int main()
{
//matrix size will be 63*63 as our setup
int m=pow(2,EXPO)-1;
int loopH=pow(2,EXPO-1);
//syntax will follow the routine in the book
float *B;
float *C;
float *D;
float *X; //X to store the solution
float *newB;
float *newC;
float *newD;
int b=1;
int a=0;
int maxBlockSize=16;
//B and C share the same chuck length
int chunkLength=m*m;
float delta=(b-a)*1.0/(m+1.0);
float deltaSquare=delta*delta;
int chunkSize=chunkLength*sizeof(float);
// printf("value of m %d and delta %lf!! \n",m,delta);
B=(float*)malloc(chunkSize);
C=(float*)malloc(chunkSize);
D=(float*)malloc(chunkSize);
//this is to store the final answer
X=(float*)malloc(chunkSize);
newB=(float*)malloc(chunkSize);
newC=(float*)malloc(chunkSize);
newD=(float*)malloc(chunkSize);
//initilize B
for(int i=0;i<m;i++)
{
for(int j=0;j<m;j++)
{
B[i*m+j]=0.0;
C[i*m+j]=0.0;
}
}
for(int i=0;i<m;i++)
{
B[i*m+i]=-4.0;
if(i!=0)
{
B[i*m+i-1]=1.0;
}
if(i!=m-1)
{
B[i*m+i+1]=1.0;;
}
}
//initilize C
for(int i=0;i<m;i++)
{
C[i*m+i]=1.0;
}
for(int i=0;i<m;i++)
{
for(int j=0;j<m;j++)
{
float x=(j+1)*delta;
float y=(i+1)*delta;
D[i*m+j]=(2*x*x+2*y*y-2*x-2*y)*deltaSquare;
//printf("%lf",D[i*m+j]);
}
//printf("\n");
}
//initilize x
for(int i=0;i<m;i++)
{
for(int j=0;j<m;j++)
{
X[i*m+j]=0.0;
}
}
//printf("let test this2:\n");
float *deviceB,*deviceC,*deviceD,*deviceX,*devicenewB,*devicenewC,*devicenewD;
cudaMalloc((void**)&deviceB,chunkSize);
cudaMalloc((void**)&deviceC,chunkSize);
cudaMalloc((void**)&deviceD,chunkSize);
cudaMalloc((void**)&deviceX,chunkSize);
cudaMalloc((void**)&devicenewB,chunkSize);
cudaMalloc((void**)&devicenewC,chunkSize);
cudaMalloc((void**)&devicenewD,chunkSize);
cudaMemcpy(deviceB,B,chunkSize,cudaMemcpyHostToDevice); //store previous value
cudaMemcpy(deviceC,C,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceD,D,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceX,X,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(devicenewB,newB,chunkSize,cudaMemcpyHostToDevice); //store current stored value
cudaMemcpy(devicenewC,newC,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(devicenewD,newD,chunkSize,cudaMemcpyHostToDevice);
//int gridSize=((m+1)/maxBlockSize)*((m+1)/maxBlockSize); //gridSize for this problem will be 16
//dim3 dimGrid(1,gridSize)
dim3 dimGrid(1,1); //since the maximum process we are going to use will be 63*63
int blockRow=maxBlockSize;//pow(2,EXPO/2); //here will be 8 and 8
int blockColumn=maxBlockSize;//pow(2,EXPO/2); //here will be 8 and 8
dim3 dimBlock(blockColumn,blockRow);
for(int step=1;step<EXPO;step++)
{
//so the logic here will be if step is odd, then it use B,C,D as the old value and new value into newB, newC,newD.
//if step is even, then use newB,newC,newD as the old value and put the update value into B,C,D.
//here is to calculate the d(2^(k-1))(K-1) in the book
CalculateTheD<<<dimGrid,dimBlock>>>(step,deviceB,deviceC,deviceD,deviceX,devicenewB,devicenewC,devicenewD);
}
//the last step here will be 5 so it will write its new value into newB, newC, newD.
cudaMemcpy(newD,devicenewD,chunkSize,cudaMemcpyDeviceToHost);
/* for (int i=0;i<m;i++)
{
if(i%8==0)
{
printf("\n");
}
printf("%lf ",newD[3+i]);
}*/
//release some of the memory
cudaFree(deviceB);
cudaFree(deviceC);
cudaFree(deviceD);
cudaFree(devicenewB);
cudaFree(devicenewC);
cudaFree(devicenewD);
free(B);
free(C);
free(D);
free(newB);
free(newC);
free(newD);
/*cudaMemcpy(deviceB,B,chunkSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceC,C,chunkSize,cudaMemcpyHostToDevice);*/
//z will D in the not block version of cyclic reduction, ZA, ZB, ZC will corresponding to A, B and C
float *Z,*ZA,*ZB,*ZC,*FinalX;
int finalLengthX=(int)pow(2,EXPO)+1;
int chunkLength1=(pow(2,EXPO)-1)*2+1;
int zSize=chunkLength1*sizeof(float);
Z=(float*)malloc(zSize);
ZA=(float*)malloc(zSize);
ZB=(float*)malloc(zSize);
ZC=(float*)malloc(zSize);
FinalX=(float*)malloc(finalLengthX*sizeof(float)); //the first and last one should be know by the boundary condition
float *deviceZ,*deviceZA,*deviceZB, *deviceZC,*deviceFinalX;
cudaMalloc((void**)&deviceZ,zSize);
cudaMalloc((void**)&deviceZA,zSize);
cudaMalloc((void**)&deviceZB,zSize);
cudaMalloc((void**)&deviceZC,zSize);
cudaMalloc((void**)&deviceFinalX,finalLengthX*sizeof(float));
//set up the matrix step
for(int j=1;j<=loopH;j++)
{
//for each j, za,zb,zc all going to be different
ZA[0]=0;
for(int i=1;i<m;i++)
{
ZA[i]=1.0;
}
//else will be 0,since it has been seperate to half and half
for(int i=m;i<chunkLength;i++)
{
ZA[i]=0;
}
for(int i=0;i<m;i++)
{
ZB[i]=-4.0+2*cos((2.0*j-1.0)/(m+1.0)*PI);
//printf("zb:%f \n",ZB[i]);
}
for(int i=m;i<chunkLength;i++)
{
ZB[i]=0;
}
ZC[m-1]=0;
for(int i=0;i<m-1;i++)
{
ZC[i]=1.0;
}
for(int i=m;i<chunkLength;i++)
{
ZC[i]=0;
}
//if it is the first step z will be from d, otherwise, z will be from the previous solution of x
if(j==1)
{
for(int i=0;i<m;i++)
{
Z[i]=newD[(loopH-1)*m+i]*(-1.0);
printf("this original one being called? %lf \n",Z[i]);
}
for(int i=m;i<chunkLength;i++)
{
Z[i]=0;
}
}
else
{
for(int i=0;i<m;i++)
{
//to do this will be x
Z[i]=FinalX[i+1];
printf("does this ever called? %lf \n",Z[i]);
}
for(int i=m;i<chunkLength;i++)
{
Z[i]=0;
}
}
//now need to call the cyclic function to find the solution of x
cudaMemcpy(deviceZ,Z,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZA,ZA,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZB,ZB,zSize,cudaMemcpyHostToDevice);
cudaMemcpy(deviceZC,ZC,zSize,cudaMemcpyHostToDevice);
for(int j=1;j<EXPO;j++)
{
//the lock size should change, the first step it will need 2^(n-j)-1, so first step will be 3 if n=3
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-j)/2);
//printf("blockrow is :%d \n",blockRow);
int blockColumn=pow(2,EXPO-j-(EXPO-j)/2);
//printf("blockColumn is :%d \n",blockColumn);
dim3 dimBlock(blockColumn,blockRow);
//in each step the processor being used should decrease should be 2^(n-j)-1 in jth step
CalculatePArrayKernel<<<dimGrid,dimBlock>>>(j,blockRow,blockColumn,deviceZA,deviceZB,deviceZC,deviceZ);
}
//backward
//copy the device vector to host
//cudaMemcpy(ZA,deviceZA,chunkSize,cudaMemcpyDeviceToHost);
// sleep(20);
cudaDeviceSynchronize(); //cpu will wait until cuda finish the job, this is such important function!
cudaMemcpy(ZB,deviceZB,zSize,cudaMemcpyDeviceToHost);
for(int i=0;i<2*m;i++)
{
printf("zbresult:%lf \n",ZB[i]);
}
//cudaMemcpy(C,deviceC,chunkSize,cudaMemcpyDeviceToHost);
cudaMemcpy(Z,deviceZ,zSize,cudaMemcpyDeviceToHost);
int lastIndex=(int)pow(2,EXPO+1)-EXPO-3;
float initialValue=Z[lastIndex]/ZB[lastIndex];
printf("initial value: %lf \n",initialValue);
FinalX[0]=0;
FinalX[(int)pow(2,EXPO-1)]=initialValue;
//printf("the value in the middle is: %f and this suppose to close to 0.5 when n goes big! \n",FinalX[(int)pow(2,EXPO-1)]);
cudaMemcpy(deviceFinalX,FinalX,finalLengthX*sizeof(float),cudaMemcpyHostToDevice);
for(int k=EXPO-1;k>=1;k--)
{
//so the most one will use 2^(n-k) variable will be covered!
dim3 dimGrid(1,1);
int blockRow=pow(2,(EXPO-k)/2);
int blockColumn=pow(2,EXPO-k-(EXPO-k)/2);
dim3 dimBlock(blockColumn,blockRow);
BackwardKernel<<<dimGrid,dimBlock>>>(k,blockRow,blockColumn,deviceZA,deviceZB,deviceZC,deviceZ,deviceFinalX,initialValue);
}
cudaDeviceSynchronize();
cudaMemcpy(FinalX,deviceFinalX,finalLengthX*sizeof(float),cudaMemcpyDeviceToHost);
}
printf("\n final result for x(2^(k-1) block which should have %d values in it:\n",m);
for (int i=1;i<finalLengthX-1;i++)
{
//this will we stored in X the 2^(k-1) the block.
X[(loopH-1)*m+i-1]=FinalX[i];
printf("%lf ",FinalX[i]);
}
//now need to do the block wise backsubstitution based on the formula of 5.4.2.17
for(int step=EXPO-1;step>=1;step--)
{
//based on formula 5.4.2.30
//ok this is loop trhough the matrix in 5.4.2.17
int help1=pow(2,EXPO-step);
for(int backStep=1;backStep<=help1;backStep++)
{
//factorize B(step-1)
//first and last one need to be treat specially, C[j-1] will be just identity matrix here
if(backStep==1)
{
}
}
}
}
//now we begin with the second step, based on formula 5.4.2.30
// in order to solve this sequentially we need the non-block version of cyclic reduction that in previous homework and sinceH here is 63*63 so that we need 63 threads at most to solve this
//in this way one block should be enough.
|
10,018 | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <math.h>
__global__ void ADD(float * A, float*O,int N) {
int i = (blockDim.x * blockIdx.x + threadIdx.x) * N;
if (i < N * 1024 * 1024){
O[i] = A[i]+threadIdx.x;
}
}
int main(void) {
int N = 1;
while(N <= 40){
size_t size = N * 1024 * 1024 * sizeof(float);
float* h_A = (float*)malloc(size);
float* h_O = (float*)malloc(size);
float* d_A;
cudaMalloc((void**)&d_A, size);
float* d_O;
cudaMalloc((void**)&d_O, size);
cudaEvent_t stop,stop1,stop2;
cudaEvent_t start,start1,start2;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
for( int i = 0; i< N * 1024 * 1024; i++){
h_A[i] = rand()/(float)RAND_MAX;
}
cudaEventRecord(start);
cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float elapsedTime = 0;
cudaEventElapsedTime(&elapsedTime, start, stop);
int threadsPerBlock = 256;
int blocksPerGrid = (N * 1024 * 1024 + threadsPerBlock - 1) / threadsPerBlock;
cudaEventRecord(start1);
ADD<<<blocksPerGrid,threadsPerBlock>>>(d_A,d_O,N);
cudaEventRecord(stop1);
cudaDeviceSynchronize ();
cudaEventSynchronize(stop1);
float elapsedTime1 = 0;
cudaEventElapsedTime(&elapsedTime1, start1, stop1);
cudaEventRecord(start2);
cudaMemcpy(h_O, d_O, size, cudaMemcpyDeviceToHost);
cudaEventRecord(stop2);
cudaEventSynchronize(stop2);
float elapsedTime2 = 0;
cudaEventElapsedTime(&elapsedTime2, start2, stop2);
cudaFree(d_A);
cudaFree(d_O);
free(h_A);
free(h_O);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaEventDestroy(start1);
cudaEventDestroy(stop1);
cudaEventDestroy(start2);
cudaEventDestroy(stop2);
if(N == 1){
printf("%s\n%s\n%s\n","FIRSTNAME: XINYUN","LASTNAME: LV","E-MAIL: xinyunlv0425@gmail.com");
printf("%-28s%-15s%-15s%-15s\n","N"," CPUtoGPU(ms)"," Kernel(ms)"," GPUtoCPU(ms)");
}
printf("%-30d%-15f%-15f%-15f\n",N,elapsedTime,elapsedTime1,elapsedTime2);
N++;
}
}
|
10,019 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <time.h>
#define MAXN 1000000
//todo: read clusters from file
//todo: the choise for init clusters
//todo: the ending criteria
const int ThreadsPerBlock = 1024; // max value since CC2.0
int BlocksPerGridN = 0;
int BlocksPerGridK = 0;
int N; // number of points
int K; // number of clusters
int T; // number of iterations
char INPUT_FILE[256]; // input file name
float *POINTS; // POINTS[i*2+0]:x POINTS[i*2+1]:y
int *CLASSES; // class for each point
int *NUM_CLASSES; // number of points in each class
float *CLUSTERS; // position for each cluster
float *OLD_CLUSTERS; // position for each cluster
// size for each array
size_t S_POINTS;
size_t S_CLASSES;
size_t S_NUM_CLASSES;
size_t S_CLUSTERS;
// values on CUDA device
int USEGPU; // use gpu or cpu
int SYNC; // synchronize data between cpu and gpu after each iter
float *D_POINTS; // POINTS[i*2+0]:x POINTS[i*2+1]:y
int *D_CLASSES; // class for each point
int *D_NUM_CLASSES; // number of points in each class
float *D_CLUSTERS; // position for each cluster
float *D_OLD_CLUSTERS; // position for each cluster
void write_results(int n, int k){
FILE *outputFile;
int i;
outputFile = fopen("Classes.txt", "w");
for(i=0;i<n;i++){
fprintf(outputFile, "%d\n", CLASSES[i]);
}
fclose(outputFile);
outputFile = fopen("Clusters.txt", "w");
for(i=0;i<k;i++){
fprintf(outputFile, "%f,%f\n", CLUSTERS[i*2], CLUSTERS[i*2+1]);
}
fclose(outputFile);
}
void update_classes(int n, int k){ //based on CLUSTERS
int i,j,minK;
float minDis, dis, disX, disY;
for(i=0;i<n;i++){
disX = POINTS[i*2]-CLUSTERS[0];
disY = POINTS[i*2+1]-CLUSTERS[1];
minK = 0;
minDis = disX*disX + disY*disY;
for(j=1;j<k;j++){
disX = POINTS[i*2]-CLUSTERS[j*2];
disY = POINTS[i*2+1]-CLUSTERS[j*2+1];
dis = disX*disX + disY*disY;
if(dis<minDis){
minK = j;
minDis = dis;
}
}
CLASSES[i] = minK;
}
}
__global__ void cuda_update_classes_kernel(const float *d_points,
const float *d_clusters,
int *d_classes,
int n, int k){
int i,j,minK;
float minDis, dis, disX, disY;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n){
disX = d_points[i*2]-d_clusters[0];
disY = d_points[i*2+1]-d_clusters[1];
minK = 0;
minDis = disX*disX + disY*disY;
for(j=1;j<k;j++){
disX = d_points[i*2]-d_clusters[j*2];
disY = d_points[i*2+1]-d_clusters[j*2+1];
dis = disX*disX + disY*disY;
if(dis<minDis){
minK = j;
minDis = dis;
}
}
d_classes[i] = minK;
}
}
void cuda_update_classes(int n, int k, int sync=1){ // based on CLUSTERS, sync: synchronize between host and device
cudaError_t cuerr = cudaSuccess; // use with cudaGetErrorString(cuerr);
int err;
// copy data to device
if(sync){
err = 1;
//err &= cudaMemcpy(D_POINTS, POINTS, S_POINTS, cudaMemcpyHostToDevice) == cudaSuccess;
err &= cudaMemcpy(D_CLUSTERS, CLUSTERS, S_CLUSTERS, cudaMemcpyHostToDevice) == cudaSuccess;
if (!err)
{
fprintf(stderr, "Failed to copy data from host to device\n");
exit(EXIT_FAILURE);
}
}
cuda_update_classes_kernel<<<BlocksPerGridN, ThreadsPerBlock>>>(D_POINTS, D_CLUSTERS, D_CLASSES, n, k);
// copy result to host
if(sync){
err = 1;
err &= (cuerr = cudaMemcpy(CLASSES, D_CLASSES, S_CLASSES, cudaMemcpyDeviceToHost)) == cudaSuccess;
//printf("err code %s %d\n", cudaGetErrorString(cuerr), err);
if (!err)
{
fprintf(stderr, "Failed to copy data from device to host\n");
exit(EXIT_FAILURE);
}
}
}
void count_classes(int n, int k){
int i;
for(i=0;i<k;i++){
NUM_CLASSES[i]=0;
}
for(i=0;i<n;i++){
NUM_CLASSES[CLASSES[i]]++;
}
}
__global__ void cuda_count_classes_kernel_clean(int *d_num_classes,
int k){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < k){
d_num_classes[i]=0;
}
}
__global__ void cuda_count_classes_kernel_sum(const int *d_classes,
int *d_num_classes,
int n){
int i;
int _class;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n){
_class = d_classes[i];
atomicAdd(&d_num_classes[_class], 1);
//d_num_classes[_class] += 1;
}
}
void cuda_count_classes(int n, int k, int sync=1){
cudaError_t cuerr = cudaSuccess; // use with cudaGetErrorString(cuerr);
int err;
// copy data to device
if(sync){
err = 1;
//err &= cudaMemcpy(D_POINTS, POINTS, S_POINTS, cudaMemcpyHostToDevice) == cudaSuccess;
//err &= cudaMemcpy(D_CLASSES, CLASSES, S_CLASSES, cudaMemcpyHostToDevice) == cudaSuccess;
if (!err)
{
fprintf(stderr, "Failed to copy data from host to device\n");
exit(EXIT_FAILURE);
}
}
cuda_count_classes_kernel_clean<<<BlocksPerGridK, ThreadsPerBlock>>>(D_NUM_CLASSES, k);
cuda_count_classes_kernel_sum<<<BlocksPerGridN, ThreadsPerBlock>>>(D_CLASSES, D_NUM_CLASSES, n);
// copy result to host
if(sync){
err = 1;
err &= (cuerr = cudaMemcpy(NUM_CLASSES, D_NUM_CLASSES, S_NUM_CLASSES, cudaMemcpyDeviceToHost)) == cudaSuccess;
//printf("err code %s %d\n", cudaGetErrorString(cuerr), err);
if (!err)
{
fprintf(stderr, "Failed to copy data from device to host\n");
exit(EXIT_FAILURE);
}
}
}
void update_clusters(int n, int k){ // based on CLASSES
int i;
int _class;
// clean
for(i=0;i<k;i++){
CLUSTERS[i*2]=0;
CLUSTERS[i*2+1]=0;
NUM_CLASSES[i]=0;
}
// sum
for(i=0;i<n;i++){
_class = CLASSES[i];
NUM_CLASSES[_class]++;
CLUSTERS[_class*2] += POINTS[i*2];
CLUSTERS[_class*2+1] += POINTS[i*2+1];
}
// divide
for(i=0;i<k;i++){
//if(NUM_CLASSES[i]!=0){
CLUSTERS[i*2] /= NUM_CLASSES[i]; // produce nan when divided by 0
CLUSTERS[i*2+1] /= NUM_CLASSES[i];
//}
}
}
__global__ void cuda_update_clusters_kernel_clean(float *d_clusters,
int *d_num_classes,
int k){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < k){
d_clusters[i*2]=0;
d_clusters[i*2+1]=0;
d_num_classes[i]=0;
}
}
__global__ void cuda_update_clusters_kernel_sum(const float *d_points,
const int *d_classes,
float *d_clusters,
int *d_num_classes,
int n){
int i;
int _class;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n){
_class = d_classes[i];
atomicAdd(&d_num_classes[_class], 1);
//d_num_classes[_class] += 1;
atomicAdd(&d_clusters[_class*2], d_points[i*2]);
//d_clusters[_class*2] += d_points[i*2];
atomicAdd(&d_clusters[_class*2+1], d_points[i*2+1]);
//d_clusters[_class*2+1] += d_points[i*2+1];
}
}
__global__ void cuda_update_clusters_kernel_sum_reduce(const float *d_points,
const int *d_classes,
float *d_clusters,
int *d_num_classes,
int n, int k){
extern __shared__ int shared[];
int *shared_num_classes = shared;
float *shared_clusters_x = (float*)&shared_num_classes[blockDim.x];
float *shared_clusters_y = (float*)&shared_clusters_x[blockDim.x];
int i,tid;
int _class, cluster, stride;
tid = threadIdx.x;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n){
_class = d_classes[i];
for(cluster=0;cluster<k;cluster++){
if(cluster==_class){
shared_num_classes[tid]=1;
shared_clusters_x[tid] = d_points[i*2];
shared_clusters_y[tid] = d_points[i*2+1];
}else{
shared_num_classes[tid]= 0;
shared_clusters_x[tid] = 0;
shared_clusters_y[tid] = 0;
}
__syncthreads();
for(stride=blockDim.x/2;stride>0;stride>>=1){
if(tid<stride){
shared_num_classes[tid]+=shared_num_classes[tid+stride];
shared_clusters_x[tid]+=shared_clusters_x[tid+stride];
shared_clusters_y[tid]+=shared_clusters_y[tid+stride];
}
__syncthreads();
}
/* we understand that a better way is to store the results of each blocks
and apply another reduce. But there are just a few blocks running at
the same time, so we believe this version is fast enough.*/
if(tid==0){
atomicAdd(&d_num_classes[cluster],shared_num_classes[0]);
atomicAdd(&d_clusters[cluster*2],shared_clusters_x[0]);
atomicAdd(&d_clusters[cluster*2+1],shared_clusters_y[0]);
}
}
}
}
__global__ void cuda_update_clusters_kernel_divide(float *d_clusters,
const int *d_num_classes,
int k){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < k){
d_clusters[i*2] /= d_num_classes[i];
d_clusters[i*2+1] /= d_num_classes[i];
}
}
void cuda_update_clusters(int n, int k, int sync=1){ // based on CLUSTERS, sync: synchronize between host and device
cudaError_t cuerr = cudaSuccess; // use with cudaGetErrorString(cuerr);
int err;
// copy data to device
if(sync){
err = 1;
//err &= cudaMemcpy(D_POINTS, POINTS, S_POINTS, cudaMemcpyHostToDevice) == cudaSuccess;
//err &= cudaMemcpy(D_CLASSES, CLASSES, S_CLASSES, cudaMemcpyHostToDevice) == cudaSuccess;
if (!err)
{
fprintf(stderr, "Failed to copy data from host to device\n");
exit(EXIT_FAILURE);
}
}
cuda_update_clusters_kernel_clean<<<BlocksPerGridK, ThreadsPerBlock>>>(D_CLUSTERS, D_NUM_CLASSES, k);
//cuda_update_clusters_kernel_sum<<<BlocksPerGridN, ThreadsPerBlock>>>(D_POINTS, D_CLASSES, D_CLUSTERS, D_NUM_CLASSES, n);
cuda_update_clusters_kernel_sum_reduce<<<BlocksPerGridN, ThreadsPerBlock, sizeof(int)*ThreadsPerBlock+sizeof(float)*ThreadsPerBlock*2>>>(D_POINTS, D_CLASSES, D_CLUSTERS, D_NUM_CLASSES, n, k);
cuda_update_clusters_kernel_divide<<<BlocksPerGridK, ThreadsPerBlock>>>(D_CLUSTERS, D_NUM_CLASSES, k);
// copy result to host
if(sync){
err = 1;
err &= (cuerr = cudaMemcpy(CLUSTERS, D_CLUSTERS, S_CLUSTERS, cudaMemcpyDeviceToHost)) == cudaSuccess;
err &= (cuerr = cudaMemcpy(NUM_CLASSES, D_NUM_CLASSES, S_NUM_CLASSES, cudaMemcpyDeviceToHost)) == cudaSuccess;
//printf("err code %s %d\n", cudaGetErrorString(cuerr), err);
if (!err)
{
fprintf(stderr, "Failed to copy data from device to host\n");
exit(EXIT_FAILURE);
}
}
}
void clean_clusters_0(int n, int *K){ // remove empty clusters, CLASSES are invalid after this process
int i = 0;
while(i<*K){
if(NUM_CLASSES[i]==0){
CLUSTERS[i*2] = CLUSTERS[*K * 2];
CLUSTERS[i*2+1] = CLUSTERS[*K * 2 + 1];
NUM_CLASSES[i]= NUM_CLASSES[*K];
(*K)--;
}else{
i++;
}
}
}
void clean_clusters(int n, int *K=NULL){ // use old positions for empty clusters
int i = 0;
while(i<*K){
if(NUM_CLASSES[i]==0){
printf("cluster %d empty, use old value\n", i);
CLUSTERS[i*2] = OLD_CLUSTERS[i * 2];
CLUSTERS[i*2+1] = OLD_CLUSTERS[i * 2 + 1];
}
i++;
}
memcpy(OLD_CLUSTERS, CLUSTERS, S_CLUSTERS);
}
__global__ void cuda_clean_clusters_kernel(float *d_clusters,
float *d_old_clusters,
int *d_num_classes,
int k){
int i;
i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<k){
if(d_num_classes[i]==0){
d_clusters[i*2] = d_old_clusters[i*2];
d_clusters[i*2+1] = d_old_clusters[i*2+1];
}else{
d_old_clusters[i*2] = d_clusters[i*2];
d_old_clusters[i*2+1] = d_clusters[i*2+1];
}
}
}
void cuda_clean_clusters(int n, int *K=NULL, int sync=1){ // use old positions for empty clusters
cudaError_t cuerr = cudaSuccess; // use with cudaGetErrorString(cuerr);
int err;
// copy data to device
if(sync){
err = 1;
err &= cudaMemcpy(D_CLUSTERS, CLUSTERS, S_CLUSTERS, cudaMemcpyHostToDevice) == cudaSuccess;
err &= cudaMemcpy(D_OLD_CLUSTERS, OLD_CLUSTERS, S_CLUSTERS, cudaMemcpyHostToDevice) == cudaSuccess;
err &= cudaMemcpy(D_NUM_CLASSES, NUM_CLASSES, S_NUM_CLASSES, cudaMemcpyHostToDevice) == cudaSuccess;
if (!err)
{
fprintf(stderr, "Failed to copy data from host to device\n");
exit(EXIT_FAILURE);
}
}
cuda_clean_clusters_kernel<<<BlocksPerGridK, ThreadsPerBlock>>>(D_CLUSTERS, D_OLD_CLUSTERS, D_NUM_CLASSES, *K);
// copy result to host
if(sync){
err = 1;
err &= (cuerr = cudaMemcpy(CLUSTERS, D_CLUSTERS, S_CLUSTERS, cudaMemcpyDeviceToHost)) == cudaSuccess;
err &= (cuerr = cudaMemcpy(OLD_CLUSTERS, D_OLD_CLUSTERS, S_CLUSTERS, cudaMemcpyDeviceToHost)) == cudaSuccess;
//printf("err code %s %d\n", cudaGetErrorString(cuerr), err);
if (!err)
{
fprintf(stderr, "Failed to copy data from device to host\n");
exit(EXIT_FAILURE);
}
}
}
void clean_clusters_2(int n, int *K=NULL){ // random choose from points
int i=0,p;
while(i<*K){
if(NUM_CLASSES[i]==0){
p = (rand()) % n;
printf("cluster %d empty, replace with point %d\n", i, p);
CLUSTERS[i*2] = POINTS[p * 2];
CLUSTERS[i*2+1] = POINTS[p * 2 + 1];
}
i++;
}
memcpy(OLD_CLUSTERS, CLUSTERS, S_CLUSTERS);
}
void init(int n, int k, char *input, int updateClasses){ // malloc and read points (and clusters)
FILE *inputFile;
int i;
float x,y;
// read points
S_POINTS = n * 2 * sizeof(float);
POINTS = (float*)malloc(S_POINTS);
inputFile = fopen(input, "r");
for(i=0;i<n;i++){
if(fscanf(inputFile, "%f,%f\n", &x, &y)==2){
POINTS[i*2] = x;
POINTS[i*2+1] = y;
}
}
fclose(inputFile);
// classes init
S_CLASSES = n * sizeof(int);
CLASSES = (int*)malloc(S_CLASSES);
// clusters init
S_NUM_CLASSES = k * sizeof(int);
S_CLUSTERS = k * 2 * sizeof(float);
NUM_CLASSES = (int*)malloc(S_NUM_CLASSES);
CLUSTERS = (float*)malloc(S_CLUSTERS);
OLD_CLUSTERS = (float*)malloc(S_CLUSTERS);
for(i=0;i<k;i++){
CLUSTERS[i*2]=POINTS[i*2];
CLUSTERS[i*2+1]=POINTS[i*2+1];
}
// update classes
if(updateClasses){
update_classes(n, k);
count_classes(n, k);
}
}
void cuda_init(int n, int k){ // malloc and copy data to device
cudaError_t cuerr = cudaSuccess; // use with cudaGetErrorString(cuerr);
int noerr = 1;
// malloc
noerr &= (cuerr = cudaMalloc((void **)&D_POINTS, S_POINTS)) == cudaSuccess;
//printf("err code %s\n", cudaGetErrorString(cuerr));
noerr &= (cuerr = cudaMalloc((void **)&D_CLASSES, S_CLASSES)) == cudaSuccess;
noerr &= (cuerr = cudaMalloc((void **)&D_NUM_CLASSES, S_NUM_CLASSES)) == cudaSuccess;
noerr &= (cuerr = cudaMalloc((void **)&D_CLUSTERS, S_CLUSTERS)) == cudaSuccess;
noerr &= (cuerr = cudaMalloc((void **)&D_OLD_CLUSTERS, S_CLUSTERS)) == cudaSuccess;
if (!noerr)
{
fprintf(stderr, "Failed to allocate device vector\n");
exit(EXIT_FAILURE);
}
// copy data
noerr = 1;
noerr &= cudaMemcpy(D_POINTS, POINTS, S_POINTS, cudaMemcpyHostToDevice) == cudaSuccess;
noerr &= cudaMemcpy(D_CLUSTERS, CLUSTERS, S_CLUSTERS, cudaMemcpyHostToDevice) == cudaSuccess;
noerr &= cudaMemcpy(D_OLD_CLUSTERS, D_CLUSTERS, S_CLUSTERS, cudaMemcpyDeviceToDevice) == cudaSuccess;
if (!noerr)
{
fprintf(stderr, "Failed to copy data from host to device\n");
exit(EXIT_FAILURE);
}
// blocksPerGrid
BlocksPerGridN = (n + ThreadsPerBlock - 1) / ThreadsPerBlock;
BlocksPerGridK = (k + ThreadsPerBlock - 1) / ThreadsPerBlock;
printf("Using %d blocks of %d threads\n", BlocksPerGridN, ThreadsPerBlock);
// update classes
cuda_update_classes(n, k);
cuda_count_classes(n, k);
}
void cuda_toHost(int n, int k){
cudaError_t cuerr = cudaSuccess; // use with cudaGetErrorString(cuerr);
int noerr = 1;
noerr = 1;
noerr &= (cuerr = cudaMemcpy(CLUSTERS, D_CLUSTERS, S_CLUSTERS, cudaMemcpyDeviceToHost)) == cudaSuccess;
noerr &= (cuerr = cudaMemcpy(CLASSES, D_CLASSES, S_CLASSES, cudaMemcpyDeviceToHost)) == cudaSuccess;
if (!noerr)
{
fprintf(stderr, "Failed to copy data from host to device\n");
exit(EXIT_FAILURE);
}
}
int data_count(char *fileName){
FILE *inputFile;
float x, y;
int count=0;
inputFile = fopen(fileName, "r");
while(fscanf(inputFile, "%f,%f\n", &x, &y)==2){
count++;
//printf("%f,%f\n",tmp1,tmp2);
}
fclose(inputFile);
return count;
}
int cmd_parser(int argc, char **argv, int *n, int *k, int *t, char *input){
int invalid;
int valid;
char ch;
char usage[] = "Usage: %s -n N -k K -t T -i Input.txt [-g]\n"
" N: Number_of_Points, default: the number of lines in Input_File\n"
" K: default: 2\n"
" T: max iterations for the kmeans algorithm\n"
" Input: should be n lines, two floats in each line and split by ','\n"
" -g: Use GPU, otherwise, use CPU only.\n"
" -s: synchronize after each step (for debug).\n"
" Results will be in Classes.txt and Clusters.txt\n";
invalid = 0;
valid = 0;
if(argc==1){
invalid = 1;
}
//default values
*n = -1;
*k = 2;
*t = 1;
USEGPU = 0;
SYNC = 0;
while((ch = getopt(argc, argv, "n:k:t:i:gsh")) != -1) {
switch(ch) {
case 'n':
sscanf(optarg, "%d", n);
break;
case 'k':
sscanf(optarg, "%d", k);
break;
case 't':
sscanf(optarg, "%d", t);
break;
case 'i':
strncpy(input, optarg, 256);
valid = 1;
break;
case 'g':
USEGPU = 1;
break;
case 's':
SYNC = 1;
break;
case 'h': //print help
invalid = 1;
break;
case '?':
invalid = 1;
default:
;
}
}
if(valid && *n==-1){
*n = data_count(input);
}
invalid = invalid || !valid;
if(invalid){
printf(usage, argv[0]);
}
if(*n>MAXN){
invalid = 1;
printf("N is too large\n");
}
//printf("option N: %d\n", *n);
//printf("option K: %d\n", *k);
//printf("option T: %d\n", *t);
//printf("option Input: %s\n", input);
//printf("invalid %d\n", invalid);
return invalid;
}
int main(int argc, char **argv) {
int t;
srand(time(0));
if(cmd_parser(argc, argv, &N, &K, &T, INPUT_FILE)){ // not enough parameters
return 1;
}
if(USEGPU){
init(N, K, INPUT_FILE, 0);
cuda_init(N, K);
}else{
init(N, K, INPUT_FILE, 1);
}
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
for(t=0;t<T;t++){
if(USEGPU){
cuda_clean_clusters(N, &K, SYNC);
cuda_update_classes(N, K, SYNC);
cuda_update_clusters(N, K, SYNC);
}else{
clean_clusters(N, &K);
update_classes(N, K);
update_clusters(N, K);
}
if(SYNC){
printf("NUM CLASSES ");
for(int i=0;i<K;i++){
printf("%d, ",NUM_CLASSES[i]);
}
printf("\n");
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
printf("Time for core computation: %f ms\n", time);
if(USEGPU){
cuda_toHost(N, K);
}
write_results(N, K);
return 0;
}
|
10,020 | // 2D Ising model simulation via Metropolis-Hastings algorithm
// parallel setup ~ single checkboard: preventing race conditions
// include header(s)
#include <random>
#include <cmath>
#include <numeric>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <stdio.h>
// time measurement
#include <chrono>
// cuRAND
#include <curand_kernel.h>
#include <curand.h>
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// constants
// spatial size of simulation table (use > 1 and even)
const int spatialSize = 1024;
// integration time
const int intTime = (int)1e4;
// coupling
const float coupling = (float)0.45;
// file name to save data
const std::string fileName = "C:\\Users\\david\\Desktop\\MSc\\Ising model\\RENORM_HW1\\magnetisation.txt";
// number of threads per block
const int nThread = 64;
// block size
const int sizeInBlocks = 16;
// number of blocks
const int nBlock = sizeInBlocks * sizeInBlocks;
// size of a single block
const int blockSize = spatialSize / sizeInBlocks;
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// calculate the energy difference due to a single flip
__device__ int DeltaE(int *table, int row, int col, int dim)
{
// spin in question
int s = table[row * dim + col];
// periodic boundary conditions
int rowRight = (row + 1) % dim, rowLeft = (row + dim - 1) % dim, colDown = (col + 1) % dim, colUp = (col + dim - 1) % dim;
// neighbours
int right = table[rowRight * spatialSize + col], left = table[rowLeft * spatialSize + col], down = table[row * spatialSize + colDown], up = table[row * spatialSize + colUp];
// return energy difference (divided by J)
return 2 * s * (up + down + left + right);
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// calculate rate
__device__ float Rate(int *table, int row, int col, int dim, float coupling)
{
// energy difference due to flip (divided by J)
int deltaE = DeltaE(table, row, col, dim);
// calculate rate
if (deltaE < 0)
return 1.;
else if (deltaE == 0)
return 0.5;
else
return expf(-coupling * deltaE);
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// square function for integers
__host__ __device__ int Square(int x) { return x * x; }
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// spin flip ~ site visit for given (row, col)
__device__ void SpinFlip(int *table, float coupling, curandState &state, int row, int col)
{
// random number for flipping
float randVal = curand_uniform(&state);
// rate
float rate = Rate(table, row, col, spatialSize, coupling);
// flip or not to flip...
if (rate > randVal)
table[row * spatialSize + col] *= -1;
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// kernel for Metropolis sweep ~ even sites
__global__ void KernelMetropolisEven(int *table, curandState *states, float coupling, int sweep)
{
// thread index inside the block
int id = threadIdx.x;
// block index
int bid = blockIdx.x;
// thread index
int tid = bid * blockDim.x + id;
// initialize cuRAND
curand_init(2 * sweep, tid, 0, &states[tid]);
// locate block and thread
int minRow = (int)(bid / sizeInBlocks) * blockSize;
int minCol = bid * blockSize - sizeInBlocks * minRow;
// move to thread
minRow += id * blockSize / nThread;
for (int irow = minRow; irow < minRow + blockSize / nThread; irow++)
{
// columns for even sites only
for (int icol = (((irow % 2) == 0) ? minCol : minCol + 1); icol < minCol + blockSize; icol += 2)
{
SpinFlip(table, coupling, states[tid], irow, icol);
}
}
}
// kernel for Metropolis sweep ~ odd sites
__global__ void KernelMetropolisOdd(int *table, curandState *states, float coupling, int sweep)
{
// thread index inside the block
int id = threadIdx.x;
// block index
int bid = blockIdx.x;
// thread index
int tid = bid * blockDim.x + id;
// initialize cuRAND
curand_init(2 * sweep + 1, tid, 0, &states[tid]);
// locate block and thread
int minRow = (int)(bid / sizeInBlocks) * blockSize;
int minCol = bid * blockSize - sizeInBlocks * minRow;
// move to thread
minRow += id * blockSize / nThread;
for (int irow = minRow; irow < minRow + blockSize / nThread; irow++)
{
// columns for odd sites only
for (int icol = (((irow % 2) == 0) ? minCol + 1 : minCol); icol < minCol + blockSize; icol += 2)
{
SpinFlip(table, coupling, states[tid], irow, icol);
}
}
}
// ------------------------------------------------------------------------------------------------------------------------------------------------------
// main function
int main(int, char **)
{
// random number generation
std::random_device rd{};
std::mt19937 gen(rd());
// [0, 1] ~ real
std::uniform_real_distribution<double> distrReal(0., 1.);
// vector of time measurements
std::vector<float> timeMeasurement;
// initialize spins (cold start)
// host
std::vector<int> table(Square(spatialSize), 1);
// device
int *tableDev = nullptr;
// cuRAND states
curandState *statesDev = nullptr;
// container for magnetisation values
std::vector<double> m(intTime, 0.);
// simulation
// Metropolis sweeps
for (int iSweep = 0; iSweep < intTime; iSweep++)
{
// device
tableDev = nullptr;
// cuRAND states
statesDev = nullptr;
// CUDA error handling
cudaError_t err = cudaSuccess;
// memory allocation for the device
err = cudaMalloc((void **)&tableDev, Square(spatialSize) * sizeof(int));
if (err != cudaSuccess)
{
std::cout << "Error allocating CUDA memory (TABLE): " << cudaGetErrorString(err) << std::endl;
return -1;
}
err = cudaMalloc((void **)&statesDev, nBlock * nThread * sizeof(curandState));
if (err != cudaSuccess)
{
std::cout << "Error allocating CUDA memory (cuRAND): " << cudaGetErrorString(err) << std::endl;
return -1;
}
// copy data onto device
err = cudaMemcpy(tableDev, table.data(), Square(spatialSize) * sizeof(int), cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
std::cout << "Error copying memory to device (TABLE): " << cudaGetErrorString(err) << std::endl;
return -1;
}
// even kernel
KernelMetropolisEven<<<nBlock, nThread>>>(tableDev, statesDev, coupling, iSweep);
// odd kernel
KernelMetropolisOdd<<<nBlock, nThread>>>(tableDev, statesDev, coupling, iSweep);
// get errors from run
err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cout << "CUDA error in kernel call: " << cudaGetErrorString(err) << std::endl;
return -1;
}
// copy data from device
err = cudaMemcpy(table.data(), tableDev, Square(spatialSize) * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
std::cout << "Error copying memory to host: " << cudaGetErrorString(err) << std::endl;
return -1;
}
// free memory
err = cudaFree(tableDev);
if (err != cudaSuccess)
{
std::cout << "Error freeing allocation (TABLE): " << cudaGetErrorString(err) << std::endl;
return -1;
}
err = cudaFree(statesDev);
if (err != cudaSuccess)
{
std::cout << "Error freeing allocation (cuRAND): " << cudaGetErrorString(err) << std::endl;
return -1;
}
// compute magnetisation
m[iSweep] = std::accumulate(table.begin(), table.end(), 0.) / Square(spatialSize);
}
// write magnetisation results to file
// file
std::ofstream file;
file.open(fileName);
for (int im = 0; im < intTime; im++)
{
file << m[im] << std::endl;
}
file.close();
} |
10,021 | #ifndef _CUDA_CONVOLUTION_H_CU_
#define _CUDA_CONVOLUTION_H_CU_
#endif
|
10,022 | /*! jacobi.cu
*/
#include "jacobi.cuh"
#include <iostream>
#include <fstream>
__global__
void doJacobiIteration(int dimX, int dimY, float * in, float * out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x,
j = blockIdx.y * blockDim.y + threadIdx.y;
const int offset = i * dimY + j;
// Remember to do nothing for boundary values.
if( i < 1 || i > dimX - 2 )
return;
if( j < 1 || j > dimY - 2 )
return;
out += offset;
in += offset;
// Jacobi iteration for harmonic means the ouput is average of neighbor points in grid.
*out = *(in - 1) * 0.25 +
*(in + 1) * 0.25 +
*(in - dimY) * 0.25 +
*(in + dimY) * 0.25;
}
__host__
void copyToDevice(float * values, const int dimensions[2], float ** in, float ** out)
{
const int memSize = dimensions[0] * dimensions[1] * sizeof(float);
if (cudaMalloc( in, memSize ) != cudaSuccess)
throw "Can't allocate in on device.";
if (cudaMalloc( out, memSize ) != cudaSuccess)
throw "Can't allocate out on device.";
if(cudaMemcpy( *in, values, memSize, cudaMemcpyHostToDevice ) != cudaSuccess)
throw "Can't copy values to in on device.";
if(cudaMemcpy( *out, values, memSize, cudaMemcpyHostToDevice ) != cudaSuccess)
throw "Can't copy values to out on device.";
}
__host__
void setBoundaryValues(float * values, const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f)
{
float stride[2], pos;
int i, last[2] = {dimensions[0] - 1, dimensions[1] - 1};
float * memPos1, * memPos2;
for (i = 0; i < 2; i++)
stride[i] = (upperRight[i] - lowerLeft[i]) / last[i];
// Fill in top and bottom.
memPos1 = values;
memPos2 = values + (dimensions[1]-1);
for (i = 0, pos = lowerLeft[0]; i < dimensions[0]; i++, pos += stride[0], memPos1+=dimensions[1], memPos2+=dimensions[1])
{
*memPos1 = f(pos, lowerLeft[1]);
*memPos2 = f(pos, upperRight[1]);
}
// Fill in sides.
memPos1 = values + 1;
memPos2 = values + (dimensions[0] - 1) * dimensions[1] + 1;
for (i = 0, pos = lowerLeft[1]+stride[1]; i < dimensions[0] - 2; i++, pos += stride[1], memPos1++ , memPos2++ )
{
*memPos1 = f(lowerLeft[0], pos);
*memPos2 = f(upperRight[0], pos);
}
}
__host__
float * makeInitialValues( const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f )
{
float * values = new float[dimensions[0] * dimensions[1]],
* rowPos = values,
* colPos;
// We don't do anything for boundary values yet.
rowPos = values + dimensions[1];
for (int i = 0; i < dimensions[0] - 2; i++, rowPos += dimensions[1])
{
colPos = rowPos + 1;
for (int j = 0; j < dimensions[1] - 2; j++, colPos++)
*colPos = 0;
}
setBoundaryValues( values, dimensions, lowerLeft, upperRight, f );
return values;
}
__host__
float * makeTrueValues(const int dimensions[2], const float lowerLeft[2], const float upperRight[2], harmonic f)
{
float *values = new float[dimensions[0] * dimensions[1]],
*rowPosition = values,
*colPosition;
float stride[2] {(upperRight[0] - lowerLeft[0]) / static_cast<float>(dimensions[0] - 1),
(upperRight[1] - lowerLeft[1]) / static_cast<float>(dimensions[1] - 1) };
int i, j;
float x, y;
for (i = 0, x = lowerLeft[0]; i < dimensions[0]; i++, x += stride[0], rowPosition += dimensions[1])
{
colPosition = rowPosition;
for (j = 0, y = lowerLeft[1]; j < dimensions[1] ; j++, y += stride[1], colPosition++)
*colPosition = f(x, y);
}
return values;
}
__host__
float * getErrors(const float * values, const float * trueValues, const int dimensions[2])
{
float * errors = new float[dimensions[0] * dimensions[1]];
unsigned int position = 0;
for ( int i = 0; i < dimensions[0]; i++)
{
for (int j = 0; j < dimensions[1]; j++, position++)
errors[position] = values[position] - trueValues[position];
}
return errors;
}
__host__
float * getRelativeErrors(const float * errors, const float * trueValues, const int dimensions[2], float cutOff)
{
float * relErrors = new float[dimensions[0] * dimensions[1]], * newError;
float absError, absTrue;
const float log10 = std::log(10);
newError = relErrors;
for(int i = 0; i < dimensions[0]; i++)
{
for(int j = 0; j < dimensions[1]; j++, newError++, errors++, trueValues++)
{
absError = abs(*errors);
absTrue = abs(*trueValues);
// Use a cutoff as a work around to dividing by 0.
if (absTrue < cutOff)
absTrue = cutOff;
// Now use cutoff to work around logarithm of 0.
if (absError / absTrue < cutOff)
*newError = std::log(cutOff) / log10;
else
*newError = std::log(absError / absTrue) / log10;
}
}
return relErrors;
}
__host__
float getAverageError(const float * values, const float * trueValues, const int dimensions[2]) //dimX, const int dimY )
{
// Now get the average error.
double error = 0;
int offset;
for (int i = 0; i < dimensions[0]; i++)
{
offset = i * dimensions[1];
for (int j = 0; j < dimensions[1]; j++, offset++)
{
error += abs(values[offset] - trueValues[offset]);
}
}
error /= dimensions[0] * dimensions[1];
return static_cast<float>(error);
}
__host__
void printValues(const int dimensions[2], const float * values)
{
const float * pos = values;
for (int i = 0; i < dimensions[0]; i++)
{
for (int j = 0; j < dimensions[1]; j++, pos++)
std::cout << *pos << ",\t";
std::cout << std::endl;
}
std::cout << std::endl;
}
__host__
void saveToFile(const float * values, const int dimensions[2], const float lowerLeft[2], const float upperRight[2],
const char * filename)
{
std::ofstream myFile(filename, std::ios::binary);
if(!myFile.is_open()) {
throw "Unable to open file.";
}
unsigned int sizeValues = dimensions[0] * dimensions[1] * sizeof(float);
float * tuples = new float[dimensions[0] * dimensions[1] * 3], * coord;
float position[2], skip[2];
for(int i = 0; i < 2; i++)
{
position[i] = lowerLeft[i];
skip[i] = (upperRight[i] - lowerLeft[i]) / (dimensions[i] - 1);
}
coord = tuples;
for( int i = 0; i < dimensions[0]; i++, position[0] += skip[0])
{
position[1] = lowerLeft[1];
for( int j = 0; j < dimensions[1]; j++, position[1] += skip[1], values++)
{
*coord = position[0];
coord++;
*coord = position[1];
coord++;
*coord = *values;
coord++;
}
}
myFile.write((const char *) tuples, 3 * sizeValues);
myFile.close();
delete tuples;
}
|
10,023 | #include <stdio.h>
#include <stdlib.h>
#define N 2048
#define BLOCK_SIZE 32
__global__ void matrix_transpose_naive (int *input, int *output)
{
int index_X = threadIdx.x + blockIdx.x * blockDim.x;
int index_Y = threadIdx.y + blockIdx.y * blockDim.y;
int index = index_Y * N + index_X;
int transposed_index = index_X * N + index_Y;
// 非結合アクセスでストア
output[transposed_index] = input[index];
// 非結合アクセスでロード
// output[index] = input[transposed_index];
}
__global__ void matrix_transpose_shared (int *input, int *output)
{
__shared__ int sharedMemory [BLOCK_SIZE] [BLOCK_SIZE + 1];
// 転置前のグローバルメモリ上のインデックス
int index_X = threadIdx.x + blockIdx.x * blockDim.x;
int index_Y = threadIdx.y + blockIdx.y * blockDim.y;
// 転置後のグローバルメモリ上のインデックス
int t_index_X = threadIdx.x + blockIdx.y * blockDim.x;
int t_index_Y = threadIdx.y + blockIdx.x * blockDim.y;
// ローカルのインデックス
int local_index_X = threadIdx.x;
int local_index_Y = threadIdx.y;
int index = index_Y * N + index_X;
int transposed_index = t_index_Y * N + t_index_X;
// グローバルメモリから結合アクセスが可能なように共有メモリに読み出してから転置
sharedMemory[local_index_X][local_index_Y] = input[index];
__syncthreads();
// 転置したデータをグローバルメモリに書き出し
output[transposed_index] = sharedMemory[local_index_Y][local_index_X];
}
// インデックスと同じ数字で行列を埋める
void fill_array(int *data) {
for(int idx = 0; idx < (N * N); idx++) {
data[idx] = idx;
}
}
// 結果を表示
void print_output(int *a, int *b) {
printf("\n Original Matrix::\n");
for(int idx=0 ; idx < (N * N); idx++) {
if(idx % N == 0)
printf("\n");
printf(" %d ", a[idx]);
}
printf("\n Transposed Matrix::\n");
for(int idx = 0 ; idx < (N * N); idx++) {
if(idx % N == 0)
printf("\n");
printf(" %d ", b[idx]);
}
}
int main(void)
{
int *a, *b;
int *d_a, *d_b;
int size = N * N * sizeof(int);
// ホスト側でメモリを確保して中身を書き込む
a = (int *)malloc(size);
b = (int *)malloc(size);
fill_array(a);
// デバイス側のメモリを確保
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
// データをコピーする
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// 転置の実行
dim3 blockSize(BLOCK_SIZE,BLOCK_SIZE,1);
dim3 gridSize(N/BLOCK_SIZE,N/BLOCK_SIZE,1);
// matrix_transpose_naive<<<gridSize, blockSize>>>(d_a, d_b);
matrix_transpose_shared<<<gridSize, blockSize>>>(d_a, d_b);
// 結果をホスト側に書き戻して表示
cudaMemcpy(b, d_b, size, cudaMemcpyDeviceToHost);
print_output(a, b);
// メモリの解放
free(a);
free(b);
cudaFree(d_a);
cudaFree(d_b);
return 0;
} |
10,024 | #include "str_opts.cuh"
/*
__device__ __host__ int strlen(char* s){
int c = 0;
while(*(s+c)){
c++;
}
return c;
}
__device__ __host__ int strcmp(char* str1, char* str2){
if (str1 == NULL || str2 == NULL){
return -1;
}
char* i = str1;
char* j = str2;
int i_len = strlen(str1);
int j_len = strlen(str2);
if (i_len != j_len){
return -1;
}
for(int x = 0; x < i_len && x < j_len; x++){
if ((int)i[x] > (int)j[x]){
return 1;
}
else if ((int)i[x] < (int)j[x]){
return -1;
}
//i++;
//j++;
}
return 0;
}
*/
|
10,025 | #include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <math.h>
#include <iostream>
#include <vector>
#include "cuda-kernel.cu"
using namespace std;
float uniform(float low, float high) {
return low + (static_cast<float>(rand())/RAND_MAX)*(high - low);
}
class Simulation {
public:
int n_rec;
int n_in;
int n_ref;
float threshold;
float tau_volt;
float volt_coeff;
bool device_memory_allocated;
int buffer_length;
int net_state_size;
int weights_rec_size;
int weights_in_size;
vector<float> weights_rec;
vector<float> weights_in;
float* voltages;
float* spikes;
vector<float> refractory_buffer;
Simulation(int nrec,
int nin,
int nref,
float thr,
float tau_v,
int buffer_len);
Simulation(int nrec,
int nin,
int nref,
float thr,
float tau_v,
int buffer_len,
float* voltage_array,
float* spike_array);
void allocate();
void copyToDevice();
void simulate(int timesteps);
void copyFromDevice();
void free();
protected:
float* weights_in_device;
float* weights_rec_device;
float* voltages_device;
float* spikes_device;
int* refractory_buffer_device;
};
Simulation::Simulation(int nrec,
int nin,
int nref,
float thr,
float tauv,
int buffer_len) {
n_rec = nrec;
n_in = nin;
n_ref = nref;
threshold = thr;
tau_volt = tauv;
volt_coeff = exp(-1.0/tauv);
device_memory_allocated = false;
buffer_length = buffer_len;
net_state_size = buffer_len*nrec;
weights_rec_size = nrec*nrec;
weights_in_size = nin*nrec;
// first index is presynaptic, second index is postsynaptic
for(size_t i = 0; i < n_in; ++i) {
for(size_t j = 0; j < n_rec; ++j) {
weights_in.push_back(uniform(0.0, 1.0));
}
}
for(size_t i = 0; i < n_rec; ++i) {
for(size_t j = 0; j < n_rec; ++j) {
weights_rec.push_back(uniform(-1.0, 1.0));
}
}
voltages = (float*) malloc(net_state_size*sizeof(float));
spikes = (float*) malloc(net_state_size*sizeof(float));
for(size_t i = 0; i < buffer_length; ++i) {
for(size_t j = 0; j < n_rec; ++j) {
refractory_buffer.push_back(0);
}
}
weights_in_device = NULL;
weights_rec_device = NULL;
voltages_device = NULL;
spikes_device = NULL;
refractory_buffer_device = NULL;
}
Simulation::Simulation(int nrec,
int nin,
int nref,
float thr,
float tauv,
int buffer_len,
float* volt_arr,
float* spike_arr) {
n_rec = nrec;
n_in = nin;
n_ref = nref;
threshold = thr;
tau_volt = tauv;
volt_coeff = exp(-1.0/tauv);
device_memory_allocated = false;
buffer_length = buffer_len;
net_state_size = buffer_len*nrec;
weights_rec_size = nrec*nrec;
weights_in_size = nin*nrec;
// first index is presynaptic, second index is postsynaptic
for(size_t i = 0; i < n_in; ++i) {
for(size_t j = 0; j < n_rec; ++j) {
weights_in.push_back(uniform(0.0, 1.0));
}
}
for(size_t i = 0; i < n_rec; ++i) {
for(size_t j = 0; j < n_rec; ++j) {
weights_rec.push_back(uniform(-1.0, 1.0));
}
}
voltages = volt_arr;
spikes = spike_arr;
for(size_t i = 0; i < buffer_length; ++i) {
for(size_t j = 0; j < n_rec; ++j) {
refractory_buffer.push_back(0);
}
}
weights_in_device = NULL;
weights_rec_device = NULL;
voltages_device = NULL;
spikes_device = NULL;
refractory_buffer_device = NULL;
}
void Simulation::allocate() {
cudaMalloc(&weights_in_device, weights_in_size*sizeof(float));
cudaMalloc(&weights_rec_device, weights_rec_size*sizeof(float));
cudaMalloc(&voltages_device, net_state_size*sizeof(float));
cudaMalloc(&spikes_device, net_state_size*sizeof(float));
cudaMalloc(&refractory_buffer_device, net_state_size*sizeof(int));
device_memory_allocated = true;
}
void Simulation::copyToDevice() {
cudaMemcpy(weights_in_device, weights_in.data(), weights_in_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(weights_rec_device, weights_rec.data(), weights_rec_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(voltages_device, voltages, net_state_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(spikes_device, spikes, net_state_size*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(refractory_buffer_device, refractory_buffer.data(), net_state_size*sizeof(int), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
}
void Simulation::simulate(int timesteps) {
float in_currents[buffer_length*n_in];
float* in_currents_device;
cudaMalloc(&in_currents_device, buffer_length*n_in*sizeof(float));
for(size_t i = 0; i < buffer_length; ++i) {
for(size_t j = 0; j < n_in; ++j) {
in_currents[n_in*i + j] = uniform(0.0, 0.3);
}
}
cudaMemcpy(in_currents_device, in_currents, buffer_length*n_in*sizeof(float), cudaMemcpyHostToDevice);
cudakernel::stepLIF<<<1, n_rec>>>(voltages_device,
spikes_device,
refractory_buffer_device,
in_currents_device,
weights_in_device,
weights_rec_device,
n_in,
n_rec,
weights_in_size,
weights_rec_size,
buffer_length,
n_ref,
volt_coeff,
threshold,
0,
timesteps);
cudaDeviceSynchronize();
// just in case leaving the scope doesn't destroy the allocation(?)
cudaFree(in_currents_device);
}
void Simulation::copyFromDevice() {
cudaMemcpy(weights_in.data(), weights_in_device, weights_in_size*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(weights_rec.data(), weights_rec_device, weights_rec_size*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(voltages, voltages_device, net_state_size*sizeof(float), cudaMemcpyDeviceToHost);
/*
for(size_t i = 0; i < 20; ++i) {
for(size_t j = 0; j < 5; ++j) {
printf("%f ", voltages[n_rec*i + j]);
}
printf("\n");
}
*/
cudaMemcpy(spikes, spikes_device, net_state_size*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(refractory_buffer.data(), refractory_buffer_device, net_state_size*sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
void Simulation::free() {
cudaFree(weights_in_device);
cudaFree(weights_rec_device);
cudaFree(voltages_device);
cudaFree(spikes_device);
cudaFree(refractory_buffer_device);
device_memory_allocated = false;
}
extern "C" {
Simulation* new_simulation(int nrec, int nin, int nref, float thr, float tauv, int buffer_len, float* volt_arr, float* spike_arr) {
return new Simulation(nrec, nin, nref, thr, tauv, buffer_len, volt_arr, spike_arr);
}
void allocate_simulation(Simulation* sim) { sim->allocate(); }
void copy_to_device(Simulation* sim) { sim->copyToDevice(); }
void run_simulation(Simulation* sim, int steps) { sim->simulate(steps); }
void copy_from_device(Simulation* sim) { sim->copyFromDevice(); }
void free_simulation(Simulation* sim) { sim->free(); }
}
|
10,026 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37) {
for (int i=0; i < var_1; ++i) {
if (comp <= (var_3 - asinf(+1.7521E10f))) {
comp += (-1.9690E-41f - asinf(var_4 - +0.0f - -1.9133E-35f + (-1.5446E-35f - (var_5 - -1.6507E16f))));
comp = (var_6 - +1.8865E17f);
float tmp_1 = log10f(+1.9512E34f + (var_7 * (var_8 / -1.0342E-35f)));
comp += tmp_1 / (var_9 * sinhf(var_10 + (+1.0115E36f - +1.8865E-35f - atan2f(var_11 / -1.0065E-37f / -1.3746E34f, asinf(fabsf(-0.0f + var_12 + (+1.0080E-37f * atan2f((-1.4713E34f - var_13 - var_14), (-1.5362E22f / -1.5605E-41f - (+1.0096E35f / (-1.9276E-42f + var_15 / var_16)))))))))));
if (comp == -1.0641E-42f / (+0.0f * sinhf((var_17 - var_18 + var_19 / (-1.0401E4f + (-0.0f - var_20)))))) {
float tmp_2 = +1.3573E-36f;
float tmp_3 = var_21 * +1.6859E35f - (var_22 * -1.0250E-21f / +1.4568E-42f * +1.0954E34f);
float tmp_4 = -1.0442E-42f;
comp += tmp_4 - tmp_3 / tmp_2 - (var_23 * (var_24 / (var_25 * var_26 - +1.5615E14f * var_27)));
}
for (int i=0; i < var_2; ++i) {
comp += +1.9032E36f - -1.1718E-43f / (var_28 - (+1.7680E-42f + +1.0898E-44f));
}
if (comp <= var_29 - (var_30 - var_31 + var_32 - -1.4134E-35f + +1.4831E-43f)) {
comp += (var_33 + var_34 - -1.0453E-36f * -1.4205E-44f + (var_35 - var_36));
comp = -0.0f + fabsf((var_37 + +1.2200E-1f));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38);
cudaDeviceSynchronize();
return 0;
}
|
10,027 | #include <cstdio>
#include <cstdlib>
#include <vector>
__global__ void bucketsort(int* key,int* bucket,int N){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i>N) return;
atomicAdd(&bucket[key[i]],1);
__syncthreads();
for(int accumulate=0,val=0;accumulate<=i;val++){
key[i]=val;
accumulate+=bucket[val];
}
}
int main() {
int N = 100;
const int M=64;
int range = 5;
int *key;
int *bucket;
cudaMallocManaged(&key,N*sizeof(int));
for (int i=0; i<N; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
cudaMallocManaged(&bucket,range*sizeof(int));
for(int i=0;i<range;i++)bucket[i]=0;
bucketsort<<<(N+M-1)/M,M>>>(key,bucket,N);
cudaDeviceSynchronize();
for (int i=0; i<N; i++) {
printf("%d ",key[i]);
}
printf("\n");
}
|
10,028 | /**
*
* Matrix Multiplication - CUDA for GPUs
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
// Thread block size
#define BLOCK_SIZE 32
#define STRIDE BLOCK_SIZE
int input_size;
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
typedef struct
{
float ** element;
} matrix;
__device__ float GetElement(const Matrix A, int row, int col) {
return A.elements[row * A.stride + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value) {
A.elements[row * A.stride + col] = value;
}
__device__ Matrix GetSubMatrix(Matrix A, int row, int col) {
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
long long wall_clock_time()
{
#ifdef __linux__
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000ll);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000ll);
#endif
}
void allocate_matrix(Matrix* m)
{
m->elements = (float*)malloc(input_size * input_size * sizeof(float));
}
void allocate_matrix_seq(matrix* m)
{
int i;
// allocate array for all the rows
m->element = (float**)malloc(sizeof(float*) * input_size);
if (m->element == NULL)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < input_size; i++)
{
m->element[i] = (float*)malloc(sizeof(float) * input_size);
if (m->element[i] == NULL)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
}
}
/**
* Free the memory allocated for a matrix.
**/
// void free_matrix(Matrix* m) {
// int i;
// for (i = 0; i < size*size; i++)
// cudaFree(m->elements[i]);
// // int i;
// // for (i = 0; i < size; i++)
// // cudaFree(m->elements[i]);
// // cudaFree(m->elements);
// }
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(Matrix m)
{
m.stride = STRIDE;
int i;
for (i = 0; i < input_size*input_size; i++) {
m.elements[i] = rand() % 10;
}
}
void init_matrix_seq(matrix m)
{
int i, j;
for (i = 0; i < input_size; i++)
for (j = 0; j < input_size; j++)
{
m.element[i][j] = rand() % 10;
}
}
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Do the multiplication
for (i = 0; i < input_size; i++)
for (j = 0; j < input_size; j++)
for(k = 0; k < input_size; k++)
result.element[i][j] += a.element[i][k] * b.element[k][j];
}
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.stride = A.width;
d_A.width = d_A.stride;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.stride = B.width;
d_B.width = d_B.stride;
d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.stride = C.width;
d_C.width = d_C.stride;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C) {
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
}
void work()
{
Matrix a, b, result2;
matrix a_seq, b_seq, result1;
long long before, after;
int correct, i, j, dim;
cudaError_t rc;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result2);
allocate_matrix_seq(&a_seq);
allocate_matrix_seq(&b_seq);
allocate_matrix_seq(&result1);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
init_matrix_seq(a_seq);
init_matrix_seq(b_seq);
// Perform SEQ matrix multiplication
before = wall_clock_time();
mm(a_seq, b_seq, result1);
after = wall_clock_time();
fprintf(stderr, "Sequential matrix multiplication took %1.2f seconds\n", ((float)(after - before))/1000000000);
// Perform CUDA matrix multiplication
before = wall_clock_time();
MatMul(a, b, result2);
cudaDeviceSynchronize();
after = wall_clock_time();
fprintf(stderr, "CUDA matrix multiplication on GPU took %1.2f seconds\n", ((float)(after - before))/1000000000);
// was there any error?
rc = cudaGetLastError();
if (rc != cudaSuccess)
printf("Last CUDA error %s\n", cudaGetErrorString(rc));
// Compare the results
int v = 0;
correct = 1;
for (i = 0; correct && i < input_size; i++)
for (j = 0; j < input_size; j++) {
if (result1.element[i][j] != result2.elements[v]) {
correct = 0;
break;
}
v++;
}
if (correct)
printf("The result matrices are identical!\n");
else
printf("Difference in result matrices at element (%d, %d)!\n", i, j);
// free_matrix(&a);
// free_matrix(&b);
// free_matrix(&result1);
// free_matrix(&result2);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size>\n", argv[0]);
if (argc >= 2)
input_size = atoi(argv[1]);
else
input_size = 1024;
fprintf(stderr,"Sequential matrix multiplication of size %d\n", input_size);
// Multiply the matrices
work();
return 0;
}
|
10,029 | #include "CUDAHelper.cuh"
#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600
#else
__device__ double atomicAdd(double* address, double val) {
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#endif
template <class T>
__host__ void setPointerToNull(T **d_ptr) {
if (*d_ptr != nullptr){
cudaFree(*d_ptr);
cudaCheckErrors("ERROR");
*d_ptr = nullptr;
}
}
|
10,030 |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
/********************************************************
CUDA Kernel
********************************************************/
__global__ void matrixMul (float* C, float* A, float* B, int TA)
{
const int TILE_SIZE = 16;
__shared__ float As[TILE_SIZE][TILE_SIZE];
__shared__ float Bs[TILE_SIZE][TILE_SIZE];
for (int tileId = 0; tileId < TA; tileId += TILE_SIZE) {
/* calcul des coordonnees du thread dans les matrices locales As/Bs */
int i = threadIdx.y;
int j = threadIdx.x;
// copie un element de A et B vers la mémoire partagée
As[i][j] = A[blockIdx.y * TA + tileId * TILE_SIZE];
Bs[i][j] = B[tileId * TILE_SIZE + blockIdx.x];
__syncthreads();
/* calcul de c[i][j] */
float cc = 0;
for (int k = 0; k < TILE_SIZE; ++k) {
float elementA, elementB;
elementA = As[i][k];
elementB = Bs[k][j];
cc += elementA * elementB;
}
__syncthreads();
// copier dans C ?
C[blockIdx.y * TA + j] += cc;
}
}
/********************************************************
Programme main
********************************************************/
/////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
int i, j, GRID_SIZE_X, GRID_SIZE_Y, GRID_SIZE_Z, BLOCK_SIZE_X, BLOCK_SIZE_Y, TILE_SIZE;
int TM;
cudaError_t cerror; /*valeur retour gpU*/
/* pour le calcul du temps de traitement */
float tc;
cudaEvent_t depart, arret;
cudaEventCreate(&depart);
cudaEventCreate(&arret);
/* valeurs par defaut */
TM = 2048;
BLOCK_SIZE_X = 16;
BLOCK_SIZE_Y = BLOCK_SIZE_X;
TILE_SIZE = BLOCK_SIZE_Y;
if ((TM % BLOCK_SIZE_X) != 0 || (TM % BLOCK_SIZE_Y) != 0) {
printf("Taille matrice non multiple des dim bloc %d, %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
exit(1);
}
GRID_SIZE_X = TM / BLOCK_SIZE_X;
GRID_SIZE_Y = TM / BLOCK_SIZE_Y;
GRID_SIZE_Z = TM / TILE_SIZE;
/* allocation des matrices sur CPU */
unsigned int size_A = TM * TM;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
unsigned int size_B = TM * TM;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
unsigned int size_C = TM * TM;
unsigned int mem_size_C = sizeof(float) * size_C;
float* h_C = (float*) malloc(mem_size_C);
/* initialisation des matrices avec des valeurs permettant de verifier le resultat */
for(i = 0; i < TM; i++){
for(j = 0; j < TM; j++){
h_A[i*TM+j] = 1.0;
h_B[i*TM+j] = 1.0;
h_C[i*TM+j] = 0.0;
if (i==j) {
h_A[i*TM+j]=(float) (i+1);
h_B[i*TM+j]=(float) (i+1);
}
}
}
/* allocation des matrices sur GPU */
float* d_A;
float* d_B;
cudaMalloc((void**) &d_A, mem_size_A);
cudaMalloc((void**) &d_B, mem_size_B);
float* d_C;
cudaMalloc((void**) &d_C, mem_size_C);
/* top depart pour le calcul du temps de transfert */
cudaEventRecord(depart,0);
/* copie des matrives A et B depuis le CPU vers le GPU */
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice);
cudaEventRecord(arret,0);
cudaEventSynchronize(arret);
cudaEventElapsedTime(&tc,depart, arret);
printf("Temps de transfert host vers device : %f seconde\n", tc/1000.0);
/* definiton de la grille et des blocs */
dim3 grid(GRID_SIZE_X, GRID_SIZE_Y);
dim3 block(BLOCK_SIZE_X, BLOCK_SIZE_Y);
printf("grille %d, %d \n", GRID_SIZE_X, GRID_SIZE_Y, GRID_SIZE_Z);
printf("bloc %d, %d \n", BLOCK_SIZE_X, BLOCK_SIZE_Y);
cudaEventRecord(depart,0);
/* execution du kernel */
matrixMul<<< grid, block >>>(d_C, d_A, d_B, TM);
cudaEventRecord(arret,0);
cudaEventSynchronize(arret);
cudaEventElapsedTime(&tc,depart, arret);
printf("Temps de calcul : %f seconde\n", tc/1000.0);
/* valeur retour GPU : 0 = OK, sinon erreur */
cerror=cudaGetLastError();
printf(" retour GPU = %d \n", (int) cerror);
cudaEventRecord(depart,0);
/* copie de la matrive C depuis le GPU */
cudaMemcpy(h_C, d_C, mem_size_C,
cudaMemcpyDeviceToHost);
cudaEventRecord(arret,0);
cudaEventSynchronize(arret);
cudaEventElapsedTime(&tc,depart, arret);
printf("Temps transfert device vers host : %f seconde\n", tc/1000.0);
/* verification du resultat */
for (i = 0; i < TM; i++){
for (j = 0; j < TM; j++){
if ((i==j) && (h_C[i*TM+j] != (float)((i+1)*(i+1)+TM-1))) {
printf("Erreur i: %d j: %d %f\n", i, j, h_C[i*TM+j] );
exit(1);
} else if ((i!=j) && (h_C[i*TM+j] != (float) (i + j + TM))) {
printf("Erreur i: %d j: %d\n", i, j);
exit(1);
}
}
}
cudaEventDestroy(depart);
cudaEventDestroy(arret);
/* liberation de la memoire */
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
} |
10,031 | #include <iostream>
#include <curand.h>
struct random_d_array
{
float *d_a;
int n;
random_d_array(int n) :n{n}
{
cudaMalloc((void**)&d_a, n*sizeof(float));
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandGenerateUniform(gen, d_a, n);
}
~random_d_array()
{
cudaFree(&d_a);
}
};
using namespace std;
__global__ void MyKernel(float *array, int arrayCount)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < arrayCount)
array[idx] *= array[idx];
}
int launchMyKernel(float *array, int arrayCount)
{
int blockSize;
int minGridSize;
int gridSize;
cudaEvent_t start, stop;
float milliseconds = 0;
blockSize = 32;
gridSize = (arrayCount + blockSize - 1)/blockSize;
cout << "Trying non-optiomal blockSize = " << blockSize << ", gridSize = " << gridSize << endl;
float average = 0.0;
for(int i = 0; i < 10; ++i)
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
MyKernel<<<gridSize, blockSize>>>(array, arrayCount);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout <<"i = "<< i << ": " << milliseconds << " ms" << endl;
if(i > 0) average += milliseconds;
}
average /= 10 - 1;
cout << "Average = " << average << endl;
cout << "============" << endl;
cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize,
(void*)MyKernel, 0, arrayCount);
gridSize = (arrayCount + blockSize - 1)/blockSize;
cout << "Suggested blockSize = " << blockSize << ", gridSize = " << gridSize << ", minGridSize = " << minGridSize << endl;
average = 0.0;
for(int i = 0; i < 10; ++i)
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
MyKernel<<<gridSize, blockSize>>>(array, arrayCount);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventElapsedTime(&milliseconds, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cout << "i = " << i << ": " << milliseconds << " ms" << endl;
if(i > 0) average += milliseconds;
}
average /= 10 - 1;
cout << "Average = " << average << endl;
return 0;
}
int main()
{
int n = 100000;
random_d_array A(n);
launchMyKernel(A.d_a, n);
}
|
10,032 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define X 0
#define Y 1
#define SIZEA 65536
#define SIZEB 65336
#define N_BLOCKS 64
#define N_THREADS 2
__global__ void mergeBig_k(int *A, int *B, int *M, int *A_idx, int *B_idx){
// Mémoire shared sur laquelle nous allons travaillé
__shared__ int A_shared[1024];
__shared__ int B_shared[1024];
__shared__ int biaisA;
__shared__ int biaisB;
// (endA-startA) : taille de A dans la partition
// (endB-startB) : taille de B dans la partition
int startA, endA;
int startB, endB;
// On récupére les index du début et de la fin de A et B par rapport au tableau global
if (blockIdx.x == 0){
startA = 0;
endA = A_idx[blockIdx.x];
startB = 0;
endB = B_idx[blockIdx.x];
}
else if (blockIdx.x == N_BLOCKS-1){
startA = A_idx[blockIdx.x-1];
endA = SIZEA;
startB = B_idx[blockIdx.x-1];
endB = SIZEB;
}
else{
startA = A_idx[blockIdx.x-1];
endA = A_idx[blockIdx.x];
startB = B_idx[blockIdx.x-1];
endB = B_idx[blockIdx.x];
}
// Notations de l'article
// Il y a N élements à fusioner
// N = SIZEA + SIZEB
// Chaque partition contient N/p éléments, chaque bloc traite une partition
// N / p = (endB-startB) + (endA-startA) = (SIZEA+SIZEB) / N_BLOCKS
// Si Z est le nombre de threads
// On va fusioner Z éléments à la fois
// Donc on a besoin de le faire (N / p) / Z fois
// On va faire bouger la fenetre glissante (N / p) / Z fois
int iter_max = (blockDim.x - 1 + (endB-startB) + (endA-startA)) / blockDim.x;
int iter = 0;
biaisA = 0;
biaisB = 0;
do{
// Pour synchroniser les biais
__syncthreads();
// Chargement des valeurs dans la mémoire shared
if (startA + biaisA + threadIdx.x < endA){
A_shared[threadIdx.x] = A[startA + biaisA + threadIdx.x];
}
if (startB + biaisB + threadIdx.x < endB){
B_shared[threadIdx.x] = B[startB + biaisB + threadIdx.x];
}
// Pour synchroniser la mémoire shared
__syncthreads();
// Récuperer la taille de la fenetre glissante
// En général c'est le nombre de threads (blockDim.x), i.e On est dans un carré Z * Z normalement
// Mais la taille peut être inférieure si il y a moins de blockDim.x éléments à charger
int sizeAshared = endA-startA - biaisA;
int sizeBshared = endB-startB - biaisB;
if (sizeAshared < 0)
sizeAshared = 0;
if (sizeAshared > blockDim.x && sizeAshared != 0)
sizeAshared = blockDim.x;
if (sizeBshared < 0)
sizeBshared = 0;
if (sizeBshared > blockDim.x && sizeBshared != 0)
sizeBshared = blockDim.x;
// Binary search
int i = threadIdx.x;
if (i < sizeAshared + sizeBshared){
int K[2];
int P[2];
if (i > sizeAshared) {
K[X] = i - sizeAshared;
K[Y] = sizeAshared;
P[X] = sizeAshared;
P[Y] = i - sizeAshared;
}
else {
K[X] = 0;
K[Y] = i;
P[X] = i;
P[Y] = 0;
}
while (1) {
int offset = (abs(K[Y] - P[Y]))/2;
int Q[2] = {K[X] + offset, K[Y] - offset};
if (Q[Y] >= 0 && Q[X] <= sizeBshared && (Q[Y] == sizeAshared || Q[X] == 0 || A_shared[Q[Y]] > B_shared[Q[X]-1])) {
if (Q[X] == sizeBshared || Q[Y] == 0 || A_shared[Q[Y]-1] <= B_shared[Q[X]]) {
int idx = startA + startB + i + iter * blockDim.x;
if (Q[Y] < sizeAshared && (Q[X] == sizeBshared || A_shared[Q[Y]] <= B_shared[Q[X]]) ) {
M[idx] = A_shared[Q[Y]];
atomicAdd(&biaisA, 1); // Biais à incrementer
}
else {
M[idx] = B_shared[Q[X]];
atomicAdd(&biaisB, 1); // Biais à incrementer
}
//printf("blockIdx.x = %d threadIdx.x = %d idx = %d m = %d biaisA = %d\n", blockIdx.x, threadIdx.x, idx, M[idx], biaisA);
break ;
}
else {
K[X] = Q[X] + 1;
K[Y] = Q[Y] - 1;
}
}
else {
P[X] = Q[X] - 1;
P[Y] = Q[Y] + 1 ;
}
}
}
iter = iter + 1;
} while(iter < iter_max);
}
__global__ void pathBig_k(int *A, int *B, int *M, int *A_idx, int *B_idx){
// Dans ce kernel, on va simplement chercher N_BLOCKS diagonales
// de telle sorte que chaque bloc traitera N / N_BLOCKS elements dans le second kernel
int i = (SIZEA + SIZEB)/N_BLOCKS * (blockIdx.x + 1);
if (blockIdx.x == N_BLOCKS-1){
return;
}
// Binary search
int K[2];
int P[2];
if (i > SIZEA) {
K[X] = i - SIZEA;
K[Y] = SIZEA;
P[X] = SIZEA;
P[Y] = i - SIZEA;
}
else {
K[X] = 0;
K[Y] = i;
P[X] = i;
P[Y] = 0;
}
while (1) {
int offset = (abs(K[Y] - P[Y]))/2;
int Q[2] = {K[X] + offset, K[Y] - offset};
if (Q[Y] >= 0 && Q[X] <= SIZEB && (Q[Y] == SIZEA || Q[X] == 0 || A[Q[Y]] > B[Q[X]-1])) {
if (Q[X] == SIZEB || Q[Y] == 0 || A[Q[Y]-1] <= B[Q[X]]) {
if (Q[Y] < SIZEA && (Q[X] == SIZEB || A[Q[Y]] <= B[Q[X]]) ) {
M[i] = A[Q[Y]];
}
else {
M[i] = B[Q[X]];
}
A_idx[blockIdx.x] = Q[Y];
B_idx[blockIdx.x] = Q[X];
// printf("blockIdx.x = %d | Aidx[%d] = %d | Bidx[%d] = %d \n", blockIdx.x, blockIdx.x, Q[Y], blockIdx.x, Q[X]);
break ;
}
else {
K[X] = Q[X] + 1;
K[Y] = Q[Y] - 1;
}
}
else {
P[X] = Q[X] - 1;
P[Y] = Q[Y] + 1;
}
}
}
int main(){
// Allocation de la mémoire, remplissage du tableau
int *A = (int*) malloc(sizeof(int) * SIZEA);
for (int i = 0; i < SIZEA; i++){
A[i] = 2 * i;
}
int *B = (int*) malloc(sizeof(int) * SIZEB);
for (int i = 0; i < SIZEB; i++){
B[i] = 2 * i + 1;
}
int mHost[SIZEA + SIZEB]; // Tableau merged
int A_idx[N_BLOCKS]; // Merge path
int B_idx[N_BLOCKS]; // Merge path
int *aDevice, *bDevice, *mDevice, *A_idxDevice, *B_idxDevice;
// Allocation de la mémoire globale du GPU
cudaMalloc( (void**) &aDevice, SIZEA * sizeof(int) );
cudaMalloc( (void**) &bDevice, SIZEB * sizeof(int) );
cudaMalloc( (void**) &mDevice, (SIZEA+SIZEB) * sizeof(int) );
cudaMalloc( (void**) &A_idxDevice, N_BLOCKS * sizeof(int) );
cudaMalloc( (void**) &B_idxDevice, N_BLOCKS * sizeof(int) );
// Copier les tableaux vers le GPU
cudaMemcpy( aDevice, A, SIZEA * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( bDevice, B, SIZEB * sizeof(int), cudaMemcpyHostToDevice );
// Lancer le kernel pour trouver une partition des tableaux
// (SIZEA+SIZEB) / N_BLOCKS elements à traiter pour chaque bloc dans le second kernel
pathBig_k<<<N_BLOCKS, 1>>>(aDevice, bDevice, mDevice, A_idxDevice, B_idxDevice);
// cudaMemcpy( mHost, mDevice, (SIZEA+SIZEB) * sizeof(int), cudaMemcpyDeviceToHost );
// cudaMemcpy( A_idx, A_idxDevice, N_BLOCKS * sizeof(int), cudaMemcpyDeviceToHost );
// cudaMemcpy( B_idx, B_idxDevice, N_BLOCKS * sizeof(int), cudaMemcpyDeviceToHost );
// A_idx[N_BLOCKS-1] = SIZEA;
// B_idx[N_BLOCKS-1] = SIZEB;
// cudaMemcpy( A_idxDevice, A_idx, N_BLOCKS * sizeof(int), cudaMemcpyHostToDevice );
// cudaMemcpy( B_idxDevice, B_idx, N_BLOCKS * sizeof(int), cudaMemcpyHostToDevice );
// (SIZEA+SIZEB) / N_BLOCKS elements à traiter pour chaque bloc dans le second kernel
// Fenetre glissante pour charger les éléménts dans la mémoire shared
mergeBig_k<<<N_BLOCKS, N_THREADS>>>(aDevice, bDevice, mDevice, A_idxDevice, B_idxDevice);
// Copier le tableau résultat vers le CPU, puis affichage
cudaMemcpy( mHost, mDevice, (SIZEA+SIZEB) * sizeof(int), cudaMemcpyDeviceToHost );
for (int i = 0; i < SIZEA+SIZEB; i ++){
printf("m[%d] = %d\n", i, mHost[i]);
}
// Liberation de la mémoire
free(A);
free(B);
cudaFree(aDevice);
cudaFree(bDevice);
cudaFree(mDevice);
cudaFree(A_idxDevice);
cudaFree(B_idxDevice);
return 0;
}
|
10,033 | #include "includes.h"
__global__ void vectorAdd(int* a, int* b, int* c, int n){
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if (tid < n){
c[tid] = a[tid] + b[tid];
}
} |
10,034 |
#include <math.h>
extern "C"
__global__ void relative(float x1, float y1, float x2, float y2,float* result )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
float returned=0;
float sum=(x1-x2)*(x1-x2)+(y1-y2)*(y1-y2);
returned=(float)sqrt((float)sum);
if (index == 0) result[0] = returned;
}
extern "C"
__global__ void real(float lat1,float lon1,float lat2,float lon2,float* result )
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
float returned=0;
float R = 6371; // Radius of the earth in km
float dLat = (lat2-lat1)* (3.14159265359/180); // deg2rad below
float dLon = (lon2-lon1)* (3.14159265359/180);
float a =
sinf(dLat/2) * sinf(dLat/2) +
cosf(lat1* (3.14159265359/180)) * cosf(lat2* (3.14159265359/180)) *
sinf(dLon/2) * sinf(dLon/2);
float c = 2 * atan2f(sqrt(a), sqrt(1-a));
float d = R * c; // Distance in km
returned=d*1000;
if (index == 0) result[0] = returned;
}
|
10,035 | // Rishabh Agarwal - 18JE0676
#include <bits/stdc++.h>
#include <cuda.h>
using namespace std;
// kernel function
__global__ void kernelFunction(int *a, int *b, int *c, int n) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < n) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs)/2;
}
}
int main( void ) {
cudaDeviceProp prop;
int whichDevice;
cudaGetDevice(&whichDevice);
cudaGetDeviceProperties(&prop, whichDevice);
if (!prop.deviceOverlap) {
cout << "Device will not handle overlaps, so no speed up from streams\n";
return 0;
}
if(prop.concurrentKernels == 0) {
cout << "> GPU does not support concurrent kernel execution\n";
cout << " CUDA kernel runs will be serialized\n";
}
if(prop.asyncEngineCount == 0) {
cout << "GPU does not support concurrent Data transer and overlaping of kernel execution & data transfer\n";
cout << "Mem copy call will be blocking calls\n";
}
cudaEvent_t start, stop;
float elapsedTime;
int n = 1024*1024;
int maxsize = n*20;
int *ha, *hb, *hc;
int *da0, *db0, *dc0, *da1, *db1, *dc1;
cudaStream_t stream0, stream1;
// start the timers
cudaEventCreate(&start);
cudaEventCreate(&stop);
// initialize the streams
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
// allocate the memory on the GPU
cudaMalloc(&da0, n * sizeof(int));
cudaMalloc(&da1, n * sizeof(int));
cudaMalloc(&db0, n * sizeof(int));
cudaMalloc(&db1, n * sizeof(int));
cudaMalloc(&dc0, n * sizeof(int));
cudaMalloc(&dc1, n * sizeof(int));
// allocate host locked memory, used to stream
cudaHostAlloc((void**)&ha, maxsize * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&hb, maxsize * sizeof(int), cudaHostAllocDefault);
cudaHostAlloc((void**)&hc, maxsize * sizeof(int), cudaHostAllocDefault);
for(int i=0; i < maxsize; i++) {
ha[i] = i + 10;
hb[i] = i + 200;
}
cudaEventRecord(start, 0);
for(int i=0; i < maxsize; i += n*2) {
// enqueue copies of a in stream0 and stream1
cudaMemcpyAsync(da0, ha + i, n * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(da1, ha + i + n, n * sizeof(int), cudaMemcpyHostToDevice, stream1);
// enqueue copies of b in stream0 and stream1
cudaMemcpyAsync(db0, hb + i, n * sizeof(int), cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(db1, hb + i + n, n * sizeof(int), cudaMemcpyHostToDevice, stream1);
// enqueue kernels in stream0 and stream1
kernelFunction <<< n/256, 256, 0, stream0 >>> (da0, db0, dc0, n);
kernelFunction <<< n/256, 256, 0, stream1 >>> (da1, db1, dc1, n);
// enqueue copies of c from device to locked memory
cudaMemcpyAsync(hc + i, dc0, n * sizeof(int), cudaMemcpyDeviceToHost, stream0);
cudaMemcpyAsync(hc + i + n, dc1, n * sizeof(int), cudaMemcpyDeviceToHost, stream1);
}
cudaStreamSynchronize(stream0);
cudaStreamSynchronize(stream1);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "Time taken in ms: " << elapsedTime << "\n\n";
// we are printing only upto 20 elements
cout << "Vector A: \n";
for(int i=0; i < 20; i++) {
cout << ha[i] << " ";
}
cout << "\n\n";
cout << "Vector B: \n";
for(int i=0; i < 20; i++) {
cout << hb[i] << " ";
}
cout << "\n\n";
cout <<"After performing operation: C[i] = ((A[i] + A[i+1] + A[i+2]) / 3 + (B[i] + B[i+1] + B[i+2]) / 3) / 2\n";
cout << "Vector C: \n";
for(int i=0; i < 20; i++) {
cout << hc[i] << " ";
}
cout << "\n\n";
cudaFreeHost(ha);
cudaFreeHost(hb);
cudaFreeHost(hc);
cudaFree(da0);
cudaFree(da1);
cudaFree(db0);
cudaFree(db1);
cudaFree(dc0);
cudaFree(dc1);
cudaStreamDestroy(stream0);
cudaStreamDestroy(stream1);
return 0;
}
|
10,036 | #include "includes.h"
__global__ void invalidateFlow_kernel(float *modFlowX, float *modFlowY, const float *constFlowX, const float *constFlowY, int width, int height, float cons_thres) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
if (x < width && y < height) {
int ind = __mul24(y, width) + x;
float mFX = modFlowX[ind];
float mFY = modFlowY[ind];
float cFX = constFlowX[ind];
float cFY = constFlowY[ind];
float err = (mFX - cFX) * (mFX - cFX) + (mFY - cFY) * (mFY - cFY);
err = sqrtf(err);
if (err > cons_thres) {
mFX = nanf("");
mFY = nanf("");
}
modFlowX[ind] = mFX;
modFlowY[ind] = mFY;
}
} |
10,037 | /**
*
* author@Haris Wang
* 2020.10.15
*
* */
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 16
// initial matrix with random value
void init_matrix(float *arr, int size)
{
for (int i = 0; i < size; i++)
{
arr[i] = (float)(rand() % 8 + 1);
}
}
// matrix multiply on CPU
void matrix_mul_on_host(float *A, float *B, float *C, int M, int K, int N)
{
for (int i = 0; i < M; i++)
{
for (int j = 0; j < N; j++)
{
C[i*N + j] = 0;
for (int k = 0; k < K; k++)
{
C[i*N + j] += A[i*K + k] * B[k*N + j];
}
}
}
}
// matrix multiply on GPU without shared memory
__global__ void matrix_mul_on_device(float *array_A, float *array_B, float *array_C, int M, int K, int N)
{
int ix = threadIdx.x + blockDim.x*blockIdx.x;
int iy = threadIdx.y + blockDim.y*blockIdx.y;
if (ix < N && iy < M)
{
array_C[iy*N + ix] = 0;
for (int k = 0; k < K; k++)
{
array_C[iy*N + ix] += array_A[iy*K + k] * array_B[k*N + ix];
}
}
}
// matrix multiply on GPU with shared memory
__global__ void matrix_mul_sharedMem(float *A, float *B, float *C, int M, int K, int N)
{
__shared__ float sharedM[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float sharedN[BLOCK_SIZE][BLOCK_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * BLOCK_SIZE + ty;
int col = bx * BLOCK_SIZE + tx;
float Csub = 0.0;
for (int i = 0; i < (K+BLOCK_SIZE-1) / BLOCK_SIZE; i++)
{
if (i*BLOCK_SIZE + tx < K && row < M)
sharedM[ty][tx] = A[row*K + i * BLOCK_SIZE + tx];
else
sharedM[ty][tx] = 0.0;
if (i*BLOCK_SIZE + ty < K && col < N)
sharedN[ty][tx] = B[(i*BLOCK_SIZE + ty)*N + col];
else
sharedN[ty][tx] = 0.0;
__syncthreads();
for (int j = 0; j < BLOCK_SIZE; j++)
Csub += sharedM[ty][j] * sharedN[j][tx];
__syncthreads();
}
if (row < M && col < N)
C[row*N + col] = Csub;
}
int main(void)
{
int M,K,N;
printf("Please enter M K N :\n");
scanf("%d %d %d", &M, &K, &N);
int Axy = M * K;
int Bxy = K * N;
int Cxy = M * N;
float *h_A, *h_B, *hostRef, *deviceRef;
h_A = (float*)malloc(Axy * sizeof(float));
h_B = (float*)malloc(Bxy * sizeof(float));
init_matrix(h_A, Axy);
init_matrix(h_B, Bxy);
hostRef = (float*)malloc(Cxy * sizeof(float));
deviceRef = (float*)malloc(Cxy * sizeof(float));
printf("\n");
printf("------------------------------------------------------------------------------------\n");
printf("Computing matrix product using matrix_mul_on_host \n");
clock_t start = clock();
matrix_mul_on_host(h_A, h_B, hostRef, M, K, N);
clock_t finish = clock();
float time = (float)(finish - start) / CLOCKS_PER_SEC * 1000;
printf("Time cost on CPU is %.2f ms \n",time);
printf("------------------------------------------------------------------------------------\n\n\n");
printf("------------------------------------------------------------------------------------\n");
printf("Computing matrix product using multiplicateMatrixshared \n");
float *d_A, *d_B, *d_C;
cudaMalloc((void**)&d_A, Axy * sizeof(float));
cudaMalloc((void**)&d_B, Bxy * sizeof(float));
cudaMalloc((void**)&d_C, Cxy * sizeof(float));
cudaMemcpy(d_A, h_A, Axy * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, Bxy * sizeof(float), cudaMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid((N + block.x - 1) / block.x, (M + block.y - 1) / block.y);
cudaEvent_t gpustart, gpustop;
cudaEventCreate(&gpustart);
cudaEventCreate(&gpustop);
cudaEventRecord(gpustart, 0);
matrix_mul_sharedMem <<< grid, block >>> (d_A, d_B, d_C, M, K, N);
cudaDeviceSynchronize();
cudaEventRecord(gpustop, 0);
cudaEventSynchronize(gpustop);
cudaEventElapsedTime(&time, gpustart, gpustop);
cudaEventDestroy(gpustart);
cudaEventDestroy(gpustop);
cudaMemcpy(deviceRef, d_C, Cxy * sizeof(float), cudaMemcpyDeviceToHost);
printf("Time cost on GPU using sharedMem is %.2f ms \n",time);
printf("------------------------------------------------------------------------------------\n\n\n");
printf("------------------------------------------------------------------------------------\n");
printf("Computing matrix product using matrix_mul_on_device \n");
cudaEventCreate(&gpustart);
cudaEventCreate(&gpustop);
cudaEventRecord(gpustart, 0);
dim3 grid_2(256,256),
block_2(32,32);
matrix_mul_on_device <<<grid_2,block_2>>> (d_A, d_B, d_C, M, K, N);
cudaDeviceSynchronize();
cudaEventRecord(gpustop, 0);
cudaEventSynchronize(gpustop);
cudaEventElapsedTime(&time, gpustart, gpustop);
cudaEventDestroy(gpustart);
cudaEventDestroy(gpustop);
cudaMemcpy(deviceRef, d_C, Cxy * sizeof(float), cudaMemcpyDeviceToHost);
printf("Time cost on GPU without sharedMem is %.2f ms \n",time);
printf("------------------------------------------------------------------------------------\n\n\n");
// Check the results
/*
for(int i=0; i<Cxy; i++)
{
if(deviceRef[i]==hostRef[i])
{
printf("idx: %d passed !! \n", i);
}
}
*/
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(hostRef);
free(deviceRef);
return 0;
} |
10,038 | __device__ float activation(float input) {
return 1 / (1 + (exp((float) -1 * (input))));
}
__global__ void cuda_neural_network(float* input, float* next, float* weights) {
extern __shared__ float buffer[];
float inputWeight;
float inputBias;
float tmp;
inputWeight = weights[(blockDim.x + 1) * blockIdx.x + threadIdx.x];
if (threadIdx.x == 0) {
inputBias = weights[(blockDim.x + 1) * blockIdx.x + blockDim.x];
}
tmp = input[threadIdx.x] * inputWeight;
buffer[threadIdx.x] = tmp;
__syncthreads();
#pragma unroll
for (int i = 1; i < blockDim.x; i *= 2) {
int j = threadIdx.x + i;
if (j < blockDim.x) {
tmp += buffer[j];
__syncthreads();
buffer[threadIdx.x] = tmp;
__syncthreads();
}
}
if (threadIdx.x == 0) {
next[blockIdx.x] = activation(tmp + inputBias);
}
}
__global__ void cuda_neural_network_error(float* current, float* next,
float* weights, float* learning, float* labels, bool override) {
extern __shared__ float buffer[];
float weight;
float bias;
float error;
float tmp;
float output;
float l;
int weightIndex = blockIdx.x + threadIdx.x * (gridDim.x + 1);
weight = weights[weightIndex];
if (blockIdx.x == 0) {
bias = weights[(threadIdx.x + 1) * (gridDim.x + 1) - 1];
}
error = next[threadIdx.x];
if (labels != NULL) {
error = error * (1 - error) * (labels[threadIdx.x] - error);
}
tmp = error * weight;
buffer[threadIdx.x] = tmp;
__syncthreads();
#pragma unroll
for (int i = 1; i < blockDim.x; i *= 2) {
int j = threadIdx.x + i;
if (j < blockDim.x) {
tmp += buffer[j];
__syncthreads();
buffer[threadIdx.x] = tmp;
__syncthreads();
}
}
if (threadIdx.x == 0) {
output = current[blockIdx.x];
l = *learning;
buffer[1] = output * l;
buffer[2] = l;
}
__syncthreads();
weights[weightIndex] = weight + buffer[1] * error;
if (blockIdx.x == 0) {
weights[(threadIdx.x + 1) * (gridDim.x + 1) - 1] = bias + error * buffer[2];
}
if (threadIdx.x == 0 && override) {
current[blockIdx.x] = output * (1 - output) * tmp;
}
}
|
10,039 | #include "includes.h"
#define L2HYS_EPSILON 0.01f
#define L2HYS_EPSILONHYS 1.0f
#define L2HYS_CLIP 0.2f
#define data_h2y 30
//long h_windowx=Imagewidth/Windowx;
//long h_windowy=ImageHeight/Windowy;
//dim3 blocks(h_windowx,h_windowy);//h_windowx=ImageWidth/Windowx,h_windowy=ImageHeight/Windowy
//dim3 threads(Windowx,Windowy);//ÿһ¸öÏß³Ì¿é¼ÆËãÒ»¸öcellµÄÌØÕ÷Á¿
//dim3 block(18,7);//Ò»¸öcell·Ö18¸ö½Ç¶È·½Ïò,Ò»¸ö·½Ïò7¸öcell£¬
__global__ void smooth(float *in,float *out)
{
int k,j,i;
int m_nBIN=10;
float *m_pCellFeatures=in;
int t_nLineWidth=70;
float t_pTemp[10];
for ( k = 0; k < 18; ++k )//18
{
for ( j = 0; j < 7; ++j )//7
{
for ( i = 0; i< 10; ++i )//10
{
int t_nLeft;
int t_nRight;
t_nLeft = ( i - 1 + m_nBIN ) % m_nBIN;
t_nRight = ( i + 1 ) % m_nBIN;
t_pTemp[i] = m_pCellFeatures[k * t_nLineWidth + j * m_nBIN + i] * 0.8f
+ m_pCellFeatures[k * t_nLineWidth + j * m_nBIN + t_nLeft] * 0.1f
+ m_pCellFeatures[k * t_nLineWidth + j * m_nBIN + t_nRight] * 0.1f;
}
for ( i = 0; i < m_nBIN; ++i )
{
out[k * t_nLineWidth + j * m_nBIN + i] = t_pTemp[i];
}
}
}
} |
10,040 | #include "includes.h"
__global__ void _kpoly64(int nx, int ns, double *xval, int *xrow, int *xcol, double *sval, int *srow, int *scol, double *k, double c, double d) {
int i, n, x1, x2, xc, xr, s1, s2, sc, sr;
i = threadIdx.x + blockIdx.x * blockDim.x;
n = nx*ns;
while (i < n) {
double ki = 0;
xc = i % nx;
sc = i / nx;
x1 = xcol[xc]-1; x2 = xcol[xc+1]-1;
s1 = scol[sc]-1; s2 = scol[sc+1]-1;
while ((x1 < x2) && (s1 < s2)) {
xr = xrow[x1]; sr = srow[s1];
if (sr < xr) s1++;
else if (xr < sr) x1++;
else ki += xval[x1++]*sval[s1++];
}
k[i] = pow(ki + c, d);
i += blockDim.x * gridDim.x;
}
} |
10,041 | #include <iostream>
#include <math.h>
__global__
void add (int n, float *x, float *y){
for (int i = 0; i < n; i++){
y[i] = x[i] + y[i];
}
}
int main(void) {
int N = 1000000;
float *x;
float *y;
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
for (int i = 0; i < N; i++){
x[i] = 1.0f;
y[i] = 2.0f;
}
add<<<1,1>>>(N,x,y);
cudaDeviceSynchronize();
cudaFree(x);
cudaFree(y);
return 0;
}
|
10,042 | #include <stdio.h>
#include <curand_kernel.h>
#define CURAND_CALL(x) do { \
if((x)!=CURAND_STATUS_SUCCESS) { \
printf("Error at %s:%d\n",__FILE__,__LINE__); \
return EXIT_FAILURE;}} while(0)
int main(int argc, char *argv[]) {
int n = 100;
int i;
curandGenerator_t gen;
float *devData, *hostData;
/* Allocate n floats on host */
hostData = (float *)calloc(n, sizeof(float));
/* Allocate n floats on device */
cudaMalloc(&devData, n*sizeof(float));
CURAND_CALL(curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT));
/* Set seed */
CURAND_CALL(curandSetPseudoRandomGeneratorSeed(gen, 1234567));
/* Generate n floats on device */
CURAND_CALL(curandGenerateUniform(gen, devData, n));
/* Copy device memory to host */
cudaMemcpy(hostData, devData, n * sizeof(float), cudaMemcpyDeviceToHost);
printf("hello world\n");
/* Show result */
for(i = 0; i < n; i++) {
printf("%1.4f ", hostData[i]); }
printf("\n");
/* Cleanup */
CURAND_CALL(curandDestroyGenerator(gen));
cudaFree(devData);
free(hostData);
return 0;
}
|
10,043 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define WIDTH 64
float M[WIDTH][WIDTH] = {0};
float N[WIDTH][WIDTH] = {0};
float P[WIDTH][WIDTH] = {0};
float MxN[WIDTH][WIDTH] = {0};
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width);
void MatMul(float *M, float *N, float *P, int width);
int main(int argc, char *argv[])
{
int width = WIDTH;
int pass = 1;
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
M[i][j] = rand() % 30;
N[i][j] = rand() % 30;
}
}
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
for (int k = 0; k < width; ++k) {
MxN[i][j] += M[i][k] * N[k][j];
}
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
MatMul((float *)M, (float *)N, (float *)P, width);
for (int i = 0; i < width; ++i) {
for (int j = 0; j < width; ++j) {
if(MxN[i][j] != P[i][j]) {
printf("MxN[%d][%d] = %2.0f P[%d][%d] = %2.0f\n", i, j, MxN[i][j], i, j, P[i][j]);
pass = 0;
}
}
}
printf("Test %s\n", (pass)?"PASSED":"FAILED");
return 0;
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(float *Md, float *Nd, float *Pd, int width)
{
// Thread row and column within matrix
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// Each thread computes one element of P
// by accumulating results into Pvalue
float Pvalue = 0;
// Multiply M and N
for (int k = 0; k < width; ++k) {
float Melement = *(Md + row*width + k);
float Nelement = *(Nd + k*width + col);
Pvalue += Melement * Nelement;
}
// Write Pvalue to device memory
// Each thread writes one element
*(Pd + row*width + col) = Pvalue;
}
// Matrix multiplication - Host code
void MatMul(float *M, float *N, float *P, int width)
{
size_t size = width * width * sizeof(float);
float *Md, *Nd, *Pd;
// Allocate and Load M, N to device memory
cudaMalloc((void **)&Md, size);
cudaMemcpy(Md, M, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
// Allocate P on the device
cudaMalloc((void **)&Pd, size);
// Setup the execution configuration
dim3 dimBlock(16, 16);
dim3 dimGrid(width/16, width/16);
// Get start time event
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Invoke kernel
MatMulKernel<<<dimGrid, dimBlock>>>(Md, Nd, Pd, width);
cudaError_t cuda_err = cudaGetLastError();
if ( cudaSuccess != cuda_err ){
printf("before kernel call: error = %s\n", cudaGetErrorString (cuda_err));
exit(1) ;
}
// Get stop time event
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// Compute execution time
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Read P from device memory
cudaMemcpy(P, Pd, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
}
|
10,044 | #include <stdio.h>
#define WARP_SIZE 32
extern "C"
{
__global__ void cuda_test(
int anglebatch,
int numgroups,
int ncornr,
int size_maxCorner,
double* psicbatch)
{
int Groups=192;
#define psicbatch(ig,b,c) psicbatch[(ig) + Groups * ((b) + size_maxCorner *(c) )]
printf("anglebatch = %d\n", anglebatch);
//printf("numgroups = %d\n", numgroups);
//printf("ncornr = %d\n", ncornr);
//printf("size_maxCorner = %d\n",size_maxCorner);
for (int angle=blockIdx.x; angle<anglebatch; angle+=gridDim.x) {
for(int c=0; c<size_maxCorner; c++) {
{
for (int g=threadIdx.x; g<numgroups; g+= blockDim.x) {
psicbatch(g,c,angle) = 1;
}
}
}
}
}
__global__ void GPU_sweep(
int size_maxCorner,
int size_maxcf,
int nAngle,
int nzones,
int ncornr,
int Groups,
int nbelem,
int* AngleOrder,
double* soa_omega,
int* nextZ,
int* next,
int* soa_nCorner,
int* soa_nCFaces,
int* soa_c0,
double* soa_STotal,
double* soa_STime,
double* soa_SigtInv,
double* soa_Volume,
double* soa_Sigt,
double* soa_A_fp,
double* soa_A_ez,
int* soa_Connect,
double* psic,
double* psib )
{
// double omega[3];
int c,ig,i,icface,ifp,cez,k,ii;
// double Q[Groups * size_maxCorner];
// double src[Groups * size_maxCorner];
// double SigtVol[Groups * size_maxCorner];
// double afpm[size_maxcf];
// double psifp[Groups * size_maxcf];
// int ez_exit[size_maxcf];
// double coefpsic[size_maxcf];
// double tpsic[Groups * size_maxCorner];
// double psi_opp[Groups];
double area_opp,area_inv,sumArea;
double r_psifp;
double psi_opp,tpsic;
double omega0, omega1, omega2;
__shared__ double *Q, *src, *volume, *coefpsic, *afpm, *psifp;
__shared__ int *ez_exit;
__shared__ double sm_agg[625];
int offset = 0;
Q = &(sm_agg[0]);
offset += size_maxCorner * WARP_SIZE;
src = &(sm_agg[offset]);
offset += size_maxCorner * WARP_SIZE;
volume = &(sm_agg[offset]);
offset += size_maxCorner;
coefpsic = &(sm_agg[offset]);
offset += size_maxcf;
afpm = &(sm_agg[offset]);
offset += size_maxcf;
psifp = &(sm_agg[offset]);
offset += size_maxcf * WARP_SIZE;
//note ez_exit has integer type
ez_exit = (int*) &(sm_agg[offset]);
const double fouralpha = 1.82;
// const double fouralpha4 = 5.82;
#define soa_omega(a,b) soa_omega[a + 3 * b]
// #define tpsic(ig,c) tpsic[ (ig) + Groups * (c)]
#define EB_ListExit(a,ia) EB_ListExit[ a + 2 * (ia) ]
#define soa_A_fp(a,icface,c,zone) soa_A_fp[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_A_ez(a,icface,c,zone) soa_A_ez[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_Connect(a,icface,c,zone) soa_Connect[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define psifp(ig,jf) psifp[(ig) + Groups * (jf)]
#define psib(ig,b,c) psib[(ig) + Groups * ((b) + nbelem * (c) )]
#define psic(ig,b,c) psic[(ig) + Groups * ((b) + size_maxCorner *(c) )]
#define Q(ig,c) Q[(ig) + Groups * (c)]
#define src(ig,c) src[(ig) + Groups * (c)]
// #define SigtVol(ig,c) SigtVol[(ig) + Groups * (c)]
#define soa_Sigt(ig,zone) soa_Sigt[(ig) + Groups * (zone)]
#define soa_Volume(c,zone) soa_Volume[(ig) + Groups * (zone)]
#define soa_SigtInv(ig,zone) soa_SigtInv[(ig) + Groups * (zone)]
#define soa_STotal(ig,c,zone) soa_STotal[ig + Groups * ( c + size_maxCorner * (zone) )]
#define soa_STime(ig,c,Angle,zone) soa_STime[ig + Groups * ( c + size_maxCorner * ( Angle + nAngle * (zone) ) )]
#define nextZ(a,b) nextZ[ (a) + nzones * (b) ]
#define next(a,b) next[ (a) + (ncornr+1) * (b) ]
// for(int Angle=0;Angle<nAngle;Angle++)
int Angle = blockIdx.y;
ig = threadIdx.x;
// if(ig==0) printf("my offset=%d\n",offset);
// if(ig==0)
// {
// printf("psic=%x\n",psic);
// printf("nextZ=%x\n",psic);
// printf("next=%x\n",psic);
// printf("psib=%x\n",psic);
// }
{
omega0 = soa_omega(0,Angle);
omega1 = soa_omega(1,Angle);
omega2 = soa_omega(2,Angle);
int ndone = 0;
for(ii=0;ii<nzones;ii++)
{
int zone = nextZ(ii,Angle) - 1;
int nCorner = soa_nCorner[zone];
int nCFaces = soa_nCFaces[zone];
int c0 = soa_c0[zone] ;
double Sigt = soa_Sigt(ig,zone);
for(c=0;c<nCorner;c++)
{
double source = soa_STotal(ig,c,zone) + soa_STime(ig,c,Angle,zone);
Q(ig,c) = soa_SigtInv(ig,zone)*source ;
src(ig,c) = soa_Volume(c,zone) *source;
//SigtVol(ig,c) = soa_Sigt(ig,zone)*soa_Volume(c,zone);
volume[c] = soa_Volume(c,zone);
}
for(i=0;i<nCorner;i++)
{
int ic = next(ndone+i,Angle);
c = ic - c0 - 1;
sumArea = 0.0;
for(icface=0;icface<nCFaces;icface++)
{
afpm[icface] = omega0*soa_A_fp(0,icface,c,zone) +
omega1*soa_A_fp(1,icface,c,zone) +
omega2*soa_A_fp(2,icface,c,zone);
int icfp = soa_Connect(1,icface,c,zone) - 1;
int ib = soa_Connect(2,icface,c,zone) - 1;
if ( afpm[icface] >= 0.0 )
{
sumArea = sumArea + afpm[icface];
}
else
{
if (icfp == 0)
{
// psifp(ig,icface) = psib(ig,ib,Angle);
r_psifp = psib(ig,ib,Angle);
// r_psifp = 0.3;
}
else
{
// psifp(ig,icface) = psic(ig,icfp,Angle);
r_psifp = psic(ig,icfp,Angle);
// r_psifp = 0.7;
}
src(ig,c) -= afpm[icface]*r_psifp;
psifp(ig,icface) = r_psifp;
}
}
int nxez = 0;
for(icface=0;icface<nCFaces;icface++)
{
double aez = omega0*soa_A_ez(0,icface,c,zone) + omega1*soa_A_ez(1,icface,c,zone) + omega2*soa_A_ez(2,icface,c,zone) ;
if (aez > 0.0 )
{
sumArea = sumArea + aez;
area_opp = .0;
cez = soa_Connect(2,icface,c,zone) - 1;
ez_exit[nxez] = cez;
coefpsic[nxez] = aez;
nxez = nxez + 1;
if (nCFaces == 3)
{
ifp = (icface+1)%nCFaces;
if ( afpm[ifp] < 0.0 )
{
area_opp = -afpm[ifp];
psi_opp = psifp(ig,ifp);
}
}
else
{
ifp = icface;
area_opp = 0.0;
psi_opp = 0.0;
for(k=0;k<nCFaces-2;k++)
{
ifp = ifp%nCFaces;
if ( afpm[ifp] < 0.0 )
{
area_opp = area_opp - afpm[ifp];
psi_opp = psi_opp - afpm[ifp]*psifp(ig,ifp);
}
}
area_inv = 1.0/area_opp;
psi_opp = psi_opp*area_inv;
}
if (area_opp > 0.0) {
double aez2 = aez*aez;
{
double sigv = Sigt*volume[c];
double sigv2 = sigv*sigv;
double gnum = aez2*( fouralpha*sigv2 + aez*(4.0*sigv + 3.0*aez) );
double gtau = gnum/( gnum + 4.0*sigv2*sigv2 + aez*sigv*(6.0*sigv2 + 2.0*aez*(2.0*sigv + aez)) ) ;
double sez = gtau*sigv*( psi_opp - Q(ig,c) ) + 0.5*aez*(1.0 - gtau)*( Q(ig,c) - Q(ig,cez) );
src(ig,c) = src(ig,c) + sez;
src(ig,cez) = src(ig,cez) - sez;
}
}
else
{
double sez = 0.5*aez*( Q(ig,c) - Q(ig,cez) );
src(ig,c) = src(ig,c) + sez;
src(ig,cez) = src(ig,cez) - sez;
}
}
}
// printf("ckim angle,zone,corner,aez_cnt %d,%d,%d,%d\n",Angle,zone,c,aez_cnt);
tpsic = src(ig,c)/(sumArea + Sigt*volume[c]);
for(icface=0;icface<nxez;icface++)
{
int cez = ez_exit[icface];
src(ig,cez) = src(ig,cez) + coefpsic[icface]*tpsic;
}
//hope that ther is no self referencing
psic(ig,c0+c,Angle) = tpsic;
}
ndone = ndone + nCorner;
}
}
// ExitBdy => getExitList(QuadSet, Angle)
// for(i=0;i<EB_nExit;i++)
// {
// int ib = EB_ListExit(1,i);
// int ic = EB_ListExit(2,i);
// for(ig=0;ig<Groups;ig++)
// psib(ig,ib) = psic(ig,ic);
// }
}
void CC_sweep(
int size_maxCorner,
int size_maxcf,
int nAngle,
int nzones,
int ncornr,
int Groups,
int nbelem,
int* Angle,
double* soa_omega,
int* nextZ,
int* next,
int* soa_nCorner,
int* soa_nCFaces,
int* soa_c0,
double* soa_STotal,
double* soa_STime,
double* soa_SigtInv,
double* soa_Volume,
double* soa_Sigt,
double* soa_A_fp,
double* soa_A_ez,
int* soa_Connect,
double* psic,
double* psib )
{
double omega[3];
int c,ig,i,icface,ifp,cez,k,ii;
double Q[Groups * size_maxCorner];
double src[Groups * size_maxCorner];
double SigtVol[Groups * size_maxCorner];
double afpm[size_maxcf];
double psifp[Groups * size_maxcf];
int ez_exit[size_maxcf];
double coefpsic[size_maxcf];
double tpsic[Groups * size_maxCorner];
double psi_opp[Groups];
double area_opp,area_inv,sumArea;
const double fouralpha = 1.82;
// const double fouralpha4 = 5.82;
#define soa_omega(a,b) soa_omega[a + 3 * b]
#define tpsic(ig,c) tpsic[ (ig) + Groups * (c)]
#define EB_ListExit(a,ia) EB_ListExit[ a + 2 * (ia) ]
#define soa_A_fp(a,icface,c,zone) soa_A_fp[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_A_ez(a,icface,c,zone) soa_A_ez[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define soa_Connect(a,icface,c,zone) soa_Connect[ a + 3 * ( icface + size_maxcf * ( c + size_maxCorner * (zone) ) )]
#define psifp(ig,jf) psifp[(ig) + Groups * (jf)]
#define psib(ig,b,c) psib[(ig) + Groups * ((b) + nbelem * (c) )]
#define psic(ig,b,c) psic[(ig) + Groups * ((b) + size_maxCorner *(c) )]
#define Q(ig,c) Q[(ig) + Groups * (c)]
#define src(ig,c) src[(ig) + Groups * (c)]
#define SigtVol(ig,c) SigtVol[(ig) + Groups * (c)]
#define soa_Sigt(ig,zone) soa_Sigt[(ig) + Groups * (zone)]
#define soa_Volume(c,zone) soa_Volume[(ig) + Groups * (zone)]
#define soa_SigtInv(ig,zone) soa_SigtInv[(ig) + Groups * (zone)]
#define soa_STotal(ig,c,zone) soa_STotal[ig + Groups * ( c + size_maxCorner * (zone) )]
#define soa_STime(ig,c,Angle,zone) soa_STime[ig + Groups * ( c + size_maxCorner * ( Angle + nAngle * (zone) ) )]
#define nextZ(a,b) nextZ[ (a) + nzones * (b) ]
#define next(a,b) next[ (a) + (ncornr+1) * (b) ]
for(int Angle=0;Angle<nAngle;Angle++)
{
omega[0] = soa_omega(0,Angle);
omega[1] = soa_omega(1,Angle);
omega[2] = soa_omega(2,Angle);
int ndone = 0;
for(ii=0;ii<nzones;ii++)
{
int zone = nextZ(ii,Angle) - 1;
int nCorner = soa_nCorner[zone];
int nCFaces = soa_nCFaces[zone];
int c0 = soa_c0[zone] ;
for(c=0;c<nCorner;c++)
{
for(ig=0;ig<Groups;ig++)
{
double source = soa_STotal(ig,c,zone) + soa_STime(ig,c,Angle,zone);
Q(ig,c) = soa_SigtInv(ig,zone)*source ;
src(ig,c) = soa_Volume(c,zone) *source;
SigtVol(ig,c) = soa_Sigt(ig,zone)*soa_Volume(c,zone);
}
}
for(i=0;i<nCorner;i++)
{
int ic = next(ndone+i,Angle);
c = ic - c0 - 1;
sumArea = 0.0;
for(icface=0;icface<nCFaces;icface++)
{
afpm[icface] = omega[0]*soa_A_fp(0,icface,c,zone) +
omega[1]*soa_A_fp(1,icface,c,zone) +
omega[2]*soa_A_fp(2,icface,c,zone);
int icfp = soa_Connect(1,icface,c,zone) - 1;
int ib = soa_Connect(2,icface,c,zone) - 1;
if ( afpm[icface] >= 0.0 )
{
sumArea = sumArea + afpm[icface];
}
else
{
if (icfp == 0)
{
for(ig=0;ig<Groups;ig++) psifp(ig,icface) = psib(ig,ib,Angle);
}
else
{
for(ig=0;ig<Groups;ig++) psifp(ig,icface) = psic(ig,icfp,Angle);
}
for(ig=0;ig<Groups;ig++) src(ig,c) = src(ig,c) - afpm[icface]*psifp(ig,icface);
}
}
int nxez = 0;
for(icface=0;icface<nCFaces;icface++)
{
double aez = omega[0]*soa_A_ez(0,icface,c,zone) + omega[1]*soa_A_ez(1,icface,c,zone) + omega[2]*soa_A_ez(2,icface,c,zone) ;
if (aez > 0.0 )
{
sumArea = sumArea + aez;
area_opp = .0;
cez = soa_Connect(2,icface,c,zone) - 1;
ez_exit[nxez] = cez;
coefpsic[nxez] = aez;
nxez = nxez + 1;
if (nCFaces == 3)
{
ifp = icface%nCFaces;
if ( afpm[ifp] < 0.0 )
{
area_opp = -afpm[ifp];
for(ig=0;ig<Groups;ig++) psi_opp[ig] = psifp(ig,ifp);
}
}
else
{
ifp = icface;
area_opp = 0.0;
for(ig=0;ig<Groups;ig++) psi_opp[ig] = 0.0;
for(k=0;k<nCFaces-2;k++)
{
ifp = ifp%nCFaces;
if ( afpm[ifp] < 0.0 )
{
area_opp = area_opp - afpm[ifp];
for(ig=0;ig<Groups;ig++) psi_opp[ig] = psi_opp[ig] - afpm[ifp]*psifp(ig,ifp);
}
}
area_inv = 1.0/area_opp;
for(ig=0;ig<Groups;ig++)
psi_opp[ig] = psi_opp[ig]*area_inv;
}
if (area_opp > 0.0) {
double aez2 = aez*aez;
for(ig=0;ig<Groups;ig++)
{
double sigv = SigtVol(ig,c);
double sigv2 = sigv*sigv;
double gnum = aez2*( fouralpha*sigv2 + aez*(4.0*sigv + 3.0*aez) );
double gtau = gnum/( gnum + 4.0*sigv2*sigv2 + aez*sigv*(6.0*sigv2 + 2.0*aez*(2.0*sigv + aez)) ) ;
double sez = gtau*sigv*( psi_opp[ig] - Q(ig,c) ) + 0.5*aez*(1.0 - gtau)*( Q(ig,c) - Q(ig,cez) );
src(ig,c) = src(ig,c) + sez;
src(ig,cez) = src(ig,cez) - sez;
}
}
else
{
for(ig=0;ig<Groups;ig++)
{
double sez = 0.5*aez*( Q(ig,c) - Q(ig,cez) );
src(ig,c) = src(ig,c) + sez;
src(ig,cez) = src(ig,cez) - sez;
}
}
}
}
// printf("ckim angle,zone,corner,aez_cnt %d,%d,%d,%d\n",Angle,zone,c,aez_cnt);
for(ig=0;ig<Groups;ig++)
tpsic(ig,c) = src(ig,c)/(sumArea + SigtVol(ig,c));
for(icface=0;icface<nxez;icface++)
{
int cez = ez_exit[icface];
for(ig=0;ig<Groups;ig++)
src(ig,cez) = src(ig,cez) + coefpsic[icface]*tpsic(ig,c);
}
}
ndone = ndone + nCorner;
for(c=0;c<nCorner;c++)
{
for(ig=0;ig<Groups;ig++)
psic(ig,c0+c,Angle) = tpsic(ig,c);
}
}
ndone++;
}
// ExitBdy => getExitList(QuadSet, Angle)
// for(i=0;i<EB_nExit;i++)
// {
// int ib = EB_ListExit(1,i);
// int ic = EB_ListExit(2,i);
// for(ig=0;ig<Groups;ig++)
// psib(ig,ib) = psic(ig,ic);
// }
}
}
|
10,045 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
int main(int argc, char **argv)
{
// Initialise CUDA - select device
cudaSetDevice(0);
return 0;
} |
10,046 | #include<iostream>
#include <math.h>
#include <fstream>
using namespace std;
//print functions
void print_matrix(float *m, int x, int y){
for (int i=0; i<x; i++){
for (int j=0; j<y; j++){
cout << m[i*x+j] << ", ";
}
cout << "\n ";
}
}
void print_vector(float *m, int size){
for (int j=0; j<size; j++){
cout << m[j] << ", ";
}
cout << "\n ";
}
// Create a vector of evenly spaced numbers.
__global__ void range(float *r, float min, float max, int N) {
float delta = (max-min)/float(N-1);
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=N) return;
r[i] = min + i*delta;
}
__device__ void copy_matrix(float* o, float* copy, int size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=size) return;
copy[i]= o[i];
}
__global__ void pressure_poisson_conditions(float* p,float* pn, float* b, float dx, float dy, int nx, int ny){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=nx*nx) return;
if ((i+1) % nx == 0){
p[i] = p[i-1];
}
if(i<nx){
p[i] = p[i+nx];
}
if ((i%nx) == 0){
p[i] = p[i+1];
}
__syncthreads();
if(i>=(nx*(nx-1))){
p[i]=0.0;
}
}
__global__ void pressure_poisson(float* p,float* pn, float* b, float dx, float dy, int nx, int ny){
copy_matrix(p, pn, nx);
int i = blockIdx.x * blockDim.x + threadIdx.x;
p[i] = (((pn[i+1] + pn[i-1]) * pow(dy,2) + (pn[i+nx] + pn[i-nx]) * pow(dx,2) )/ (2.0 * (pow(dx,2) + pow(dy,2))) - pow(dx,2) * pow(dy,2) / (2.0 * (pow(dx,2) + pow(dy,2))) * b[i]);
}
__global__ void build_up_b(float*b, float* u, float*v, float* un, float* vn, float dx, float dy, int nx, int ny){
int rho = 1;
float nu = 0.1, dt = 0.001;
copy_matrix(u, un, nx*nx); //problem can be here
copy_matrix(v, vn, nx*nx);
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=nx*nx || i<nx || i>nx*(nx-1)-1 || i%nx ==0 || i%nx ==nx-1) return; //loop bounds
b[i] = (rho * ((1/dt) * ((u[i+1] - u[i-1]) / (2*dx) + (v[i+nx] - v[i-nx])/ (2 * dy)) - ((u[i+1] - u[i-1]) / (2*dx))*((u[i+1] - u[i-1]) / (2*dx)) - 2 * ((u[i+nx] - u[i-nx]) / (2*dy) * (v[i+1] - v[i-1])/(2 * dx)) - ((v[i+nx] - v[i-nx]) / (2*dy)) * ((v[i+nx] - v[i-nx]) / (2*dy))));
}
__global__ void initiate_matrix(float *a, float value, int x, int y){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=x*y) return;
a[i] = i;
}
__global__ void cavity_conditions(int nt, float*u, float* v,float*un, float* vn,float dt, float dx, float dy, float* p, float rho, float nu, int nx, int ny){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=nx*nx) return;
if (i % nx == 0){
u[i] = 0.0;
v[i] = 0.0;
}
if ((i+1) % nx == 0){
u[i] = 0.0;
v[i] = 0.0;
}
__syncthreads();
if (i<nx){
u[i] = 0.0;
v[i] = 0.0;
}
__syncthreads();
if(i>=(nx*(nx-1))){
u[i]=1.0;
v[i]=0.0;
}
__syncthreads();
}
__global__ void cavity_flow_ops(int nt, float*u, float* v,float*un, float* vn,float dt, float dx, float dy, float* p, float rho, float nu, int nx, int ny){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i/nx==0 || i%nx == nx-1 || i/nx == nx-1 || i%nx == 0) return; //loop boundaries
if (i < nx * ny && i>=1) {
u[i] = (un[i] - un[i] * dt/dx * (un[i] - un[i-1]) - vn[i] * dt/dy * (un[i] - un[i-nx]) - dt/ (2* rho * dx) * (p[i+1] - p[i-1]) + nu * (dt/ pow(dx,2)) * (un[i+1] - 2*un[i] + un[i-1] + dt/(pow(dy,2)) * (un[i+nx] - 2 *un[i] + un[i-nx])));
__syncthreads();
v[i]= (vn[i] - un[i] * dt/dx * (vn[i] - vn[i-1]) - vn[i] * dt/dy * (vn[i] - vn[i-nx]) - dt/ (2 * rho * dy) * (p[i+nx] - p[i-nx]) + nu * (dt /(pow(dx,2)) * (vn[i+1] - 2* vn[i] + vn[i-1]) + dt/(pow(dy,2)) * (vn[i+nx] - 2 * vn[i] + vn[i-nx])));
__syncthreads();
}
__syncthreads();
}
ofstream output;
void print_matrix_to_file(float* m,int size){
for (int i=0; i<size; i++){
for (int j=0; j<size; j++){
output << m[i*size+j] << ",";
}
output << "\n ";
}
}
void print_to_file(float* X, float* Y, float* p, float* u, float* v, int nx){
output.open("results_nt700.csv");
output << "p\n";
print_matrix_to_file(p, nx);
output << "u\n";
print_matrix_to_file(u,nx);
output << "v\n";
print_matrix_to_file(v, nx);
}
void cavity_flow(int nt,float dt, float dx, float dy, float rho, float nu, int nx, int ny, float *X, float* Y){
const int N = nx*nx;
const int M = 1024;
float *u, *v, *p;
cudaMallocManaged(&u, nx*ny*sizeof(float));
cudaMallocManaged(&v, nx*ny*sizeof(float));
cudaMallocManaged(&p, nx*ny*sizeof(float));
cudaMemset(u, 0.0, nx*nx*sizeof(float));
cudaMemset(v, 0.0, nx*nx*sizeof(float));
cudaMemset(p, 0.0, nx*nx*sizeof(float));
//copy vectors for cavity_flow function
float *un, *vn, *b;
cudaMallocManaged(&un, nx*ny*sizeof(float));
cudaMallocManaged(&vn, nx*ny*sizeof(float));
cudaMallocManaged(&b, nx*ny*sizeof(float));
//initiate to 0
cudaMemset(un, 0.0, nx*nx*sizeof(float));
cudaMemset(vn, 0.0, nx*nx*sizeof(float));
cudaMemset(b, 0.0, nx*nx*sizeof(float));
float *pn;
cudaMallocManaged(&pn, nx*ny*sizeof(float));
cudaDeviceSynchronize();
for (int n=0; n<nt; n++){
build_up_b<<<(N+M-1)/M,M>>>(b,u,v,un,vn,dx,dy,nx,ny);
cudaDeviceSynchronize();
for (int i=0; i<50; i++){
pressure_poisson<<<(N+M-1)/M,M>>>(p, pn,b, dx, dy,nx,ny);
cudaDeviceSynchronize();
pressure_poisson_conditions<<<(N+M-1)/M,M>>>(p, pn,b, dx, dy,nx,ny);
}
cudaDeviceSynchronize();
cavity_flow_ops<<<(N+M-1)/M,M>>>(nt, u, v, un, vn, dt, dx, dy, p, rho, nu, nx, ny);
cudaDeviceSynchronize();
cavity_conditions<<<(N+M-1)/M,M>>>(nt, u, v, un, vn, dt, dx, dy, p, rho, nu, nx, ny);
cudaDeviceSynchronize();
}
print_to_file(X, Y, p, u, v,nx);
cudaFree(u);
cudaFree(un);
cudaFree(v);
cudaFree(vn);
cudaFree(b);
cudaFree(p);
cudaFree(pn);
}
int main(){
float rho = 1.0;
float nu = .1, dt = .001;
int nx = 41, ny = 41, nt = 700, c = 1;
float dx = 2 / float((nx - 1)), dy = 2 / float((ny - 1));
float* x, *y;
const int N = nx*nx;
const int M = 1024;
cudaMallocManaged(&x, nx*sizeof(float));
cudaMallocManaged(&y, ny*sizeof(float));
range<<<1,nx>>>(x,0,2,nx);
range<<<1,nx>>>(y,0,2,nx);
cudaDeviceSynchronize();
float *X, *Y;
cudaMallocManaged(&X, nx*nx*sizeof(float));
cudaMallocManaged(&Y, ny*ny*sizeof(float));
for (int i=0; i<nx; i++){
for (int j=0; j<ny; j++){
X[i*nx+j] = x[j];
}
}
for (int i=0; i<nx; i++){
for (int j=0; j<ny; j++){
Y[i*nx+j] = x[i];
}
}
cudaDeviceSynchronize();
cavity_flow(nt, dt, dx, dy, rho, nu, nx, ny, X, Y);
cudaDeviceSynchronize();
cudaFree(Y);
cudaFree(X);
cudaFree(y);
cudaFree(x);
}
|
10,047 | // includes, system
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
__global__ void copy(float *data_in, float *data_out, int n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int offset = 0;
int xid = tid + offset;
data_in[xid] = data_out[xid];
}
/////////////////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////////////////
int main( int argc, char** argv)
{
float *idata_h, *odata_h;
float *idata_d, *odata_d;
const int N = 1000;
cudaEvent_t start, stop;
float time, effBandwidth;
int i;
// allocate host and device memory
idata_h = ( float* ) malloc(N * sizeof(float));
odata_h = ( float* ) malloc(N * sizeof(float));
cudaMalloc(&idata_d, N * sizeof(float));
cudaMalloc(&odata_d, N * sizeof(float));
//Fill the input array
for (i = 0; i < N; i++)
{
idata_h[i] = (float) i;
}
//Copy the input array to the device
cudaMemcpy(idata_d, idata_h, N*sizeof(float), cudaMemcpyHostToDevice);
//Set up the timing variables and begin timing
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
//Copy Kernal
copy<<<N, 1>>>(idata_d, idata_h, N);
//Stop timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Copy the output array from the device
cudaMemcpy(odata_h, odata_d, N*sizeof(float), cudaMemcpyDeviceToHost);
// verify the data is correct
for (i = 0; i < N; i++)
{
assert(odata_h[i] == idata_h[i] );
}
// If the program makes it this far, then the results are
// correct and there are no run-time errors. Good work!
printf("Correct!\n");
//Compute the Effective Bandwidth
cudaEventElapsedTime(&time, start, stop);
effBandwidth = 2*N*sizeof(float)/1.0e9/time;
printf("Kernel time = %es\n", time);
printf("Effective Bandwidth = %e s\n", effBandwidth);
//Free the device and host memory
free(idata_h); free(odata_h);
cudaFree(idata_d); cudaFree(odata_d);
return 0;
}
|
10,048 | #include <cstdio>
using namespace std;
__global__ void
foo_kernel(int step)
{
printf("loop: %d\n", step);
}
int main()
{
int n_stream = 5;
cudaStream_t *ls_stream;
ls_stream = (cudaStream_t*) new cudaStream_t[n_stream];
// create multiple streams
for (int i = 0; i < n_stream; i++)
cudaStreamCreate(&ls_stream[i]);
// execute kernels with the CUDA stream each
for (int i = 0; i < n_stream; i++) {
foo_kernel<<< 1, 1, 0, ls_stream[i] >>>(i);
cudaStreamSynchronize(ls_stream[i]);
}
// synchronize the host and GPU
cudaDeviceSynchronize();
// terminates all the created CUDA streams
for (int i = 0; i < n_stream; i++)
cudaStreamDestroy(ls_stream[i]);
delete [] ls_stream;
return 0;
} |
10,049 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#define BLOCK_SIZE 32
#define WA 64
#define HA 64
#define HC 3
#define WC 3
#define PAD 1
#define WB (WA+2*PAD - WC + 1)
#define HB (HA+2*PAD - HC + 1)
#define CHANNEL_SIZE 3
__global__ void Convolution(float* Input, float* Kernel, float* Output)
{
__shared__ float kernel_part[HC][WC][CHANNEL_SIZE];
int col_idx = blockIdx.x - PAD + threadIdx.x;
int row_idx = blockIdx.y - PAD + threadIdx.y;
if( WA>col_idx && col_idx >=0 && HA>row_idx && row_idx >=0)
{
kernel_part[threadIdx.y][threadIdx.x][threadIdx.z] = Input[(col_idx * WA +row_idx)*CHANNEL_SIZE + threadIdx.z];
}
else
{
kernel_part[threadIdx.y][threadIdx.x][threadIdx.z] = 0;
}
//__syncthreads;
atomicAdd(&(Output[(blockIdx.x * WB +blockIdx.y)]), kernel_part[threadIdx.y][threadIdx.x][threadIdx.z]);
}
__host__ int main(void)
{
float h_a[64][64][3] ={1.0,1.0,1.0};
float h_b[3] [3] [3] ={ 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,
1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,
1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
float h_c[64][64] ={0.0};
float *da;
float *db;
float *dc;
cudaMalloc((void***)&da,sizeof(h_a));
cudaMalloc((void***)&db,sizeof(h_b));
cudaMalloc((void***)&dc,sizeof(h_c));
cudaMemcpy(da,h_a,sizeof(h_a),cudaMemcpyHostToDevice);
cudaMemcpy(db,h_b,sizeof(h_b),cudaMemcpyHostToDevice);
dim3 threads(WC, HC,CHANNEL_SIZE);
dim3 grid(WB,HB);
Convolution <<< grid,threads>>>(da,db,dc);
cudaMemcpy(h_c,dc,sizeof(h_c),cudaMemcpyDeviceToHost);
for(int j =0; j < WB;j ++)
{
for(int k =0; k < WB;k ++)
{
printf("%.0f ",h_c[k][j]);
}
printf("\n");
}
printf("\n");
} |
10,050 | //pass
//--blockDim=64 --gridDim=1 --equality-abstraction --no-inline
#include "cuda.h"
__global__ void foo(int* p) {
__shared__ int A[10];
p[0] = A[0];
}
|
10,051 | #include <stdio.h>
__global__ void cube(float * d_out, float * d_in) {
int index = threadIdx.x;
float f = d_in[index];
d_out[index] = f * f * f;
}
__global__ void square(float * d_out, float * d_in) {
int index = threadIdx.x;
float f = d_in[index];
d_out[index] = f * f;
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 25;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h1_in[ARRAY_SIZE], h2_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h1_in[i] = h2_in[i] = float(i);
}
float h1_out[ARRAY_SIZE], h2_out[ARRAY_SIZE];
// declare GPU memory pointers
float * d1_in, *d2_in;
float * d1_out, *d2_out;
cudaSetDevice(0);
// allocate GPU memory
cudaMalloc((void**) &d1_in, ARRAY_BYTES);
cudaMalloc((void**) &d1_out, ARRAY_BYTES);
cudaSetDevice(1);
cudaMalloc((void**) &d2_in, ARRAY_BYTES);
cudaMalloc((void**) &d2_out, ARRAY_BYTES);
cudaSetDevice(0);
// transfer the array to the GPU
cudaMemcpy(d1_in, h1_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaSetDevice(1);
cudaMemcpy(d2_in, h2_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaSetDevice(0);
// launch the kernel
cube<<<1, ARRAY_SIZE>>>(d1_out, d1_in);
cudaSetDevice(1);
square<<<1, ARRAY_SIZE>>>(d2_out, d2_in);
cudaSetDevice(0);
// copy back the result array to the CPU
cudaMemcpy(h1_out, d1_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaSetDevice(1);
cudaMemcpy(h2_out, d2_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h1_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
printf("\n---------------------------------------------------\n\n");
// print out the resulting array
for (int i =0; i < ARRAY_SIZE; i++) {
printf("%f", h2_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
cudaSetDevice(0);
cudaFree(d1_in);
cudaFree(d1_out);
cudaSetDevice(1);
cudaFree(d2_in);
cudaFree(d2_out);
return 0;
}
|
10,052 | #include "includes.h"
__device__ __host__ int maximum( int a, int b, int c){
int k;
if( a <= b )
k = b;
else
k = a;
if( k <=c )
return(c);
else
return(k);
}
__global__ void needle_cuda_noshr_2( int* reference, int* matrix_cuda, int cols, int penalty, int i, int block_width)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int b_index_x = bx + block_width - i;
int b_index_y = block_width - bx -1;
int index = cols * BLOCK_SIZE * b_index_y + BLOCK_SIZE * b_index_x + ( cols + 1 );
for( int m = 0 ; m < BLOCK_SIZE ; m++) {
if ( tx <= m ){
int t_index_x = tx;
int t_index_y = m - tx;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
for( int m = BLOCK_SIZE - 2 ; m >=0 ; m--) {
if ( tx <= m){
int t_index_x = tx + BLOCK_SIZE - m -1;
int t_index_y = BLOCK_SIZE - tx - 1;
int idx = index + t_index_y * cols + t_index_x;
matrix_cuda[idx] = maximum( matrix_cuda[idx-cols-1] + reference[idx],
matrix_cuda[idx - 1] - penalty,
matrix_cuda[idx - cols] - penalty);
}
}
} |
10,053 | // From CUDA for Engineering
// norm/kernel.cu
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <math.h>
#include <stdio.h>
#define N (128*128)
int main(int argc, char** argv)
{
thrust::device_vector<float> dvec_x(N, 1.0f);
float norm = sqrt(thrust::inner_product(dvec_x.begin(), dvec_x.end(), dvec_x.begin(), 0.0f));
printf("norm = %.0f\n", norm);
return 0;
}
|
10,054 | #include <iostream>
#include <fstream>
__global__ void common_elements(int* d_A, int* d_B, int* d_C) {
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int num_threads = blockDim.x * gridDim.x;
d_C[tid] = 0;
// loop over A and B and count the number of common elements
// add your code here
for (int i = tid; i < 2048; i += num_threads) {
for (int j = 0; j < 2048; ++j){
if (d_A[i] == d_B[j]){
++d_C[tid];
}
}
}
}
int main() {
int *A = (int*)malloc(2048 * sizeof(int));
int *B = (int*)malloc(2048 * sizeof(int));
// read files
std::ifstream inputa("a.txt", std::ifstream::in);
std::ifstream inputb("b.txt", std::ifstream::in);
for (int i = 0; i < 2048; i++) {
inputa >> A[i];
inputb >> B[i];
}
int num_blocks_per_grid = 4;
int num_threads_per_grid = 32;
// Allocate the memory in GPU to store the content of A,B,C
int *d_A, *d_B, *d_C;
cudaMalloc((void **)&d_A, 2048 * sizeof(int));
cudaMalloc((void **)&d_B, 2048 * sizeof(int));
// d_C stores the number of common elements found by each thread
cudaMalloc((void **)&d_C, num_blocks_per_grid * num_threads_per_grid * sizeof(int));
// Copy A, B to d_A,d_B
cudaMemcpy(d_A, A, 2048 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, 2048 * sizeof(int), cudaMemcpyHostToDevice);
common_elements<<<num_blocks_per_grid,num_threads_per_grid>>>(d_A, d_B, d_C);
int *C = (int*)malloc(num_blocks_per_grid * num_threads_per_grid * sizeof(int));
cudaMemcpy(C, d_C, num_blocks_per_grid * num_threads_per_grid* sizeof(int), cudaMemcpyDeviceToHost);
int num_common_elements = 0;
for (int i = 0; i < num_blocks_per_grid * num_threads_per_grid; i++) {
num_common_elements += C[i];
}
// print the number of common elements
std::cout << num_common_elements << std::endl;
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(A);
free(B);
free(C);
return 0;
}
|
10,055 | /*
============================================================================
Name : GScuda.cu
Author : caleb
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdint.h>
#include <limits.h>
#include <curand_kernel.h>
#include <stdio.h>
#include <time.h>
#include <assert.h>
#include <cub/cub.cuh>
using namespace cub;
// I ASSUME THROUGHOUT THAT sizeof(int) = 4
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err);
__global__ void emptyKernel() {
}
/**
* Host function that copies the data and launches GS on the CPU
*
*/
void empty_kernel()
{
struct timespec start, end;
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &start);
emptyKernel<<<1,1>>> ();
cudaDeviceSynchronize();
clock_gettime(CLOCK_MONOTONIC, &end);
long long unsigned int diff = (1000000000L) * (end.tv_sec - start.tv_sec) + end.tv_nsec - start.tv_nsec;
printf("kernel time %llu\n",diff);
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
10,056 | #include <stdio.h>
#include <time.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define MAX_W 9999999
#define TRUE 1
#define FALSE 0
typedef int boolean;
int E,V;
typedef struct
{
int u;
int v;
} Edge;
typedef struct
{
int title;
boolean visited;
} Vertex;
int *weights;
Vertex *vertices;
Edge *edges;
//Finds the weight of the path from vertex u to vertex v
__device__ __host__ int findEdge(Vertex u, Vertex v, Edge *edges, int *weights, int E)
{
for(int i = 0; i < E; i++)
{
if(edges[i].u == u.title && edges[i].v == v.title)
{
return weights[i];
}
}
return MAX_W;
}
__global__ void Find_Vertex(Vertex *vertices, Edge *edges, int *weights, int *length, int *updateLength, int V, int E)
{
int u = threadIdx.x;
if(vertices[u].visited == FALSE)
{
vertices[u].visited = TRUE;
for(int v = 0; v < V; v++)
{
int weight = findEdge(vertices[u], vertices[v], edges, weights, E);
if(weight < MAX_W)
{
if(updateLength[v] > length[u] + weight)
{
updateLength[v] = length[u] + weight;
}
}
}
}
}
//Updates the shortest path array (length)
__global__ void Update_Paths(Vertex *vertices, int *length, int *updateLength)
{
int u = threadIdx.x;
if(length[u] > updateLength[u])
{
vertices[u].visited = FALSE;
length[u] = updateLength[u];
}
updateLength[u] = length[u];
}
void Graph_Randomizer(int V, int E){
srand(time(NULL));
for(int i = 0; i < V; i++)
{
Vertex a = { .title =(int) i, .visited=FALSE};
vertices[i] = a;
}
for(int i = 0; i < E; i++)
{
Edge e = {.u = (int) rand()%V , .v = rand()%V};
edges[i] = e;
weights[i] = rand()%100;
}
}
//Runs the program
int main(int argc, char **argv)
{
V = atoi(argv[1]);
E = atoi(argv[2]);
int *len, *updateLength;
Vertex *d_V;
Vertex *root;
Edge *d_E;
int *d_W;
int *d_L;
int *d_C;
vertices = (Vertex *)malloc(sizeof(Vertex) * V);
edges = (Edge *)malloc(sizeof(Edge) * E);
weights = (int *)malloc(E* sizeof(int));
Graph_Randomizer(V, E);
len = (int *)malloc(V * sizeof(int));
updateLength = (int *)malloc(V * sizeof(int));
root = (Vertex *)malloc(sizeof(Vertex) * V);
cudaMalloc((void**)&d_V, sizeof(Vertex) * V);
cudaMalloc((void**)&d_E, sizeof(Edge) * E);
cudaMalloc((void**)&d_W, E * sizeof(int));
cudaMalloc((void**)&d_L, V * sizeof(int));
cudaMalloc((void**)&d_C, V * sizeof(int));
cudaMemcpy(d_V, vertices, sizeof(Vertex) * V, cudaMemcpyHostToDevice);
cudaMemcpy(d_E, edges, sizeof(Edge) * E, cudaMemcpyHostToDevice);
cudaMemcpy(d_W, weights, E * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_L, len, V * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, updateLength, V * sizeof(int), cudaMemcpyHostToDevice);
for(int count = 0; count < V; count++){
root[count].title = count;
root[count].visited = FALSE;
}
clock_t start = clock();
for(int count = 0; count < V; count++){
root[count].visited = TRUE;
len[root[count].title] = 0;
updateLength[root[count].title] = 0;
for(int i = 0; i < V;i++)
{
if(vertices[i].title != root[count].title)
{
len[(int)vertices[i].title] = findEdge(root[count], vertices[i], edges, weights, E);
updateLength[vertices[i].title] = len[(int)vertices[i].title];
}
else{
vertices[i].visited = TRUE;
}
}
cudaMemcpy(d_L, len, V * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, updateLength, V * sizeof(int), cudaMemcpyHostToDevice);
for(int i = 0; i < V; i++){
Find_Vertex<<<1, V>>>(d_V, d_E, d_W, d_L, d_C, V, E);
for(int j = 0; j < V; j++)
{
Update_Paths<<<1,V>>>(d_V, d_L, d_C);
}
}
}
clock_t end = clock();
float seconds = (float)(end - start) / CLOCKS_PER_SEC;
printf("Elapsed time on GPU = %f sec\n", seconds);
}
|
10,057 | // This program implements a 1D convolution using CUDA,
// and stores the mask in constant memory. It loads the
// primary array into shared memory, but not halo elements.
// By: Nick from CoffeeBeforeArch
#include <cassert>
#include <stdio.h>
// Length of our convolution mask
#define MASK_LENGTH 7
// Allocate space for the mask in constant memory
__constant__ int mask[MASK_LENGTH];
// 1-D convolution kernel
// All threads load 1 element into shared memory
// All threads compute 1 element in final array
// Arguments:
// array = padded array
// result = result array
// n = number of elements in array
__global__ void convolution_1d(int *array, int *result, int n) {
// Global thread ID calculation
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// Store all elements needed to compute output in shared memory
extern __shared__ int s_array[];
// Load elements from the main array into shared memory
// This is naturally offset by "r" due to padding
s_array[threadIdx.x] = array[tid];
__syncthreads();
// Temp value for calculation
int temp = 0;
// Go over each element of the mask
for (int j = 0; j < MASK_LENGTH; j++) {
// Get the array value from the caches
if ((threadIdx.x + j) >= blockDim.x) {
temp += array[tid + j] * mask[j];
// Get the value from shared memory
// Only the last warp will be diverged (given mask size)
} else {
temp += s_array[threadIdx.x + j] * mask[j];
}
}
// Write-back the results
result[tid] = temp;
}
// Verify the result on the CPU
void verify_result(int *array, int *mask, int *result, int n) {
int temp;
for (int i = 0; i < n; i++) {
temp = 0;
for (int j = 0; j < MASK_LENGTH; j++) {
temp += array[i + j] * mask[j];
}
// Commenting assert, macro expands to machine dependent string
//assert(temp == result[i]);
}
}
int main() {
// Number of elements in result array
int n = 1 << 20;
// Size of the array in bytes
int bytes_n = n * sizeof(int);
// Size of the mask in bytes
size_t bytes_m = MASK_LENGTH * sizeof(int);
// Radius for padding the array
int r = MASK_LENGTH / 2;
int n_p = n + r * 2;
// Size of the padded array in bytes
size_t bytes_p = n_p * sizeof(int);
// Allocate the array (include edge elements)...
int *h_array = new int[n_p];
// ... and initialize it
for (int i = 0; i < n_p; i++) {
if ((i < r) || (i >= (n + r))) {
h_array[i] = 0;
} else {
h_array[i] = rand() % 100;
}
}
// Allocate the mask and initialize it
int *h_mask = new int[MASK_LENGTH];
for (int i = 0; i < MASK_LENGTH; i++) {
h_mask[i] = rand() % 10;
}
// Allocate space for the result
int *h_result = new int[n];
// Allocate space on the device
int *d_array, *d_result;
cudaMalloc(&d_array, bytes_p);
cudaMalloc(&d_result, bytes_n);
// Copy the data to the device
cudaMemcpy(d_array, h_array, bytes_p, cudaMemcpyHostToDevice);
// Copy the mask directly to the symbol
// This would require 2 API calls with cudaMemcpy
cudaMemcpyToSymbol(mask, h_mask, bytes_m);
// Threads per TB
int THREADS = 256;
// Number of TBs
int GRID = (n + THREADS - 1) / THREADS;
// Amount of space per-block for shared memory
// This is padded by the overhanging radius on either side
size_t SHMEM = THREADS * sizeof(int);
// Call the kernel
convolution_1d<<<GRID, THREADS, SHMEM>>>(d_array, d_result, n);
// Copy back the result
cudaMemcpy(h_result, d_result, bytes_n, cudaMemcpyDeviceToHost);
// Verify the result
verify_result(h_array, h_mask, h_result, n);
printf("COMPLETED SUCCESSFULLY\n");
// Free allocated memory on the device and host
delete[] h_array;
delete[] h_result;
delete[] h_mask;
cudaFree(d_array);
cudaFree(d_result);
return 0;
} |
10,058 | #include<stdio.h>
#include<string>
__global__
void vecAddKernel(float * a, float * b, float * c, int n){//funcion ejecutada en la gpu, osea la funcion device
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
*(c+i) = *(a+i) + *(b+i);//c[i] = a[i] + b[i]
}
void vecAdd(float * a, float * b, float * c, int n){
int size = n * sizeof(float);//calcular el tamaño de memoria de los vectores
float * d_a, * d_b, * d_c;
cudaMalloc((void **) & d_a, size);//creacion de los vectores en la memoria del gpu. //es necesario el void **?
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);//destino, origen, tamaño,
cudaMalloc((void **) & d_b, size);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) & d_c, size);
//llamada al la funcon kernel con ciertos parametros
//<<<numero de bloques, tamaño de los bloques>>>, el numero de bloques es el techo de (tamaño de elementos / tamaño del bloque), el tamaño del bloque es el numero de threads del bloque
//el numero recibido por la funcion ceil (techo) debe ser un flotante, por eso el .0
vecAddKernel<<<ceil(n/256.0), 256>>> (d_a, d_b, d_c, n);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
//liberando memoria del device
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
int main(){
int n = 1 << 20;//probar con 20
float * a, * b, * c;
a = (float *)malloc(n*sizeof(float));//los malloc fevuelven un tipo void * ahy que convertirlo a lfoat *
b = (float *)malloc(n*sizeof(float));
c = (float *)malloc(n*sizeof(float));
for(int i = 0; i < n; i++){
*(a+i) = i;
*(b+i) = i;
}
vecAdd(a, b, c, n);
/*for(int i = 0; i < n; i++){
printf("%f ", *(c+i));
}*/
printf("Finalizado\n");
return 0;
}
|
10,059 | #pragma once
#include <vector>
#include <iostream>
class Image {
private:
unsigned int _width;
unsigned int _height;
unsigned int _channels;
float* _data;
bool _usesPinnedMemory;
Image() = delete;
public:
Image(const unsigned int width, const unsigned int height, unsigned int channels) :
_width(width), _height(height), _channels(channels) {
_usesPinnedMemory = true;
auto status = cudaMallocHost((void**) &_data, raw_data_length());
if(status != cudaSuccess){
std::cerr << "cudaMallocHost failed, pageable memory used" << "\n";
_usesPinnedMemory = false;
_data = (float*) malloc(raw_data_length());
}
}
Image(const unsigned int width, const unsigned int height, unsigned int channels, float* image_data) :
_width(width), _height(height), _channels(channels) {
_usesPinnedMemory = true;
auto status = cudaMallocHost((void**) &_data, raw_data_length());
if(status != cudaSuccess){
std::cerr << "cudaMallocHost failed, pageable memory used" << "\n";
_usesPinnedMemory = false;
_data = (float*) malloc(raw_data_length());
}
memcpy(_data, image_data, raw_data_length());
}
unsigned int width() const { return _width; }
unsigned int height() const { return _height; }
unsigned int channels() const { return _channels; }
unsigned int raw_data_length() const { return _width * _height * _channels * sizeof(float); }
float* raw_data() { return _data; }
const float* raw_data() const { return _data; }
Image(const Image &rhs){
_width = rhs.width();
_height = rhs.height();
_channels = rhs.channels();
if(rhs._usesPinnedMemory){
_usesPinnedMemory = true;
auto status = cudaMallocHost((void**) &_data, rhs.raw_data_length());
if(status != cudaSuccess){
std::cerr << "cudaMallocHost failed, pegeable memory used" << "\n";
_usesPinnedMemory = false;
_data = (float*) malloc(rhs.raw_data_length());
}
}
memcpy((void *) _data, (const void *) rhs.raw_data(), rhs.raw_data_length());
};
Image(Image &rhs){
_width = rhs.width();
_height = rhs.height();
_channels = rhs.channels();
_usesPinnedMemory = rhs._usesPinnedMemory;
_data = rhs._data;
rhs._data = nullptr;
};
~Image(){
if(_data != nullptr){
if(_usesPinnedMemory == true){
cudaFreeHost(_data);
}else{
free(_data);
}
}
}
}; |
10,060 | #include<stdio.h>
#define Width 4
#define TILE_WIDTH 2
__global__ void mat_mul(int *a, int *b,int *ab, int width)
{
// shorthand
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
// allocate tiles in __shared__ memory
__shared__ int s_a[TILE_WIDTH][TILE_WIDTH];
__shared__ int s_b[TILE_WIDTH][TILE_WIDTH];
// calculate the row & col index
int row = by*blockDim.y + ty;
int col = bx*blockDim.x + tx;
int result = 0;
// loop over the tiles of the input in phases
for(int p = 0; p < width/TILE_WIDTH; ++p)
{
// collaboratively load tiles into __shared__
s_a[ty][tx] = a[row*width + (p*TILE_WIDTH + tx)];
s_b[ty][tx] = b[(p*TILE_WIDTH + ty)*width + col];
__syncthreads();
// dot product between row of s_a and col of s_b
for(int k = 0; k < TILE_WIDTH; ++k)
result += s_a[ty][k] * s_b[k][tx];
__syncthreads();
}
ab[row*width+col] = result;
}
int main()
{
int mat_size=Width*Width*sizeof(int); //Calculate memory size required for float matrix
int tot_elements=Width*Width;
int *M,*N,*P,*ptr; // Host matrix pointers
int a=0,x=1;
int i=0;
int *Md,*Nd,*Pd; //Matrix Pointer on device memoryi.e GPU
//int size=Width*Width*sizeof(int);
M=(int*)malloc(mat_size); //Allocate memory on host for matrix
N=(int*)malloc(mat_size);
P=(int*)malloc(mat_size);
//P_CPU=(int*)malloc(mat_size);
ptr=M;
printf("\nGenarating random elements for matrix");
for(i=0;i<tot_elements;i++)
{ //a=(rand()%10); //Generates random no. in 0 to 10 range
//*ptr=a;
*ptr=x++;
ptr++;
}
ptr=N;
for(i=0;i<tot_elements;i++)
{
//a=(rand()%10);
*ptr=x--;
ptr++;
}
printf("Matrix A=\n ");
for(int i=0;i<Width*Width;i++)
{ if(i%(Width)==0){
printf("\n");
}
printf("%d ",M[i]);
}
printf("Matrix B=\n ");
for(int i=0;i<Width*Width;i++)
{ if(i%(Width)==0){
printf("\n");
}
printf("%d ",N[i]);
}
cudaMalloc((void**)&Md,mat_size); //Allocate memory on device global memory
cudaMemcpy(Md,M,mat_size,cudaMemcpyHostToDevice); //Copy matrix data from host to device memory
cudaMalloc((void**)&Nd,mat_size);
cudaMemcpy(Nd,N,mat_size,cudaMemcpyHostToDevice);
cudaMalloc((void**)&Pd,mat_size);
dim3 dimGrid(TILE_WIDTH,TILE_WIDTH); //Variable for threads arrangement in a block.
dim3 dimBlock(Width/TILE_WIDTH,Width/TILE_WIDTH); //Variable for blocks arrangement in a grid.
mat_mul<<<dimGrid,dimBlock>>>(Md,Nd,Pd,Width); //Kernel invocation with grid and block specification in angle brackets
cudaMemcpy(P,Pd,mat_size,cudaMemcpyDeviceToHost); //Copy resultant matrix from device to host
//display the resultant matrix
printf("Product=\n ");
for(int i=0;i<Width*Width;i++)
{ if(i%(Width)==0){
printf("\n");
}
printf("%d ",P[i]);
}
//Free device memory
cudaFree(Md);
cudaFree(Nd);
cudaFree(Pd);
free(M);
free(N);
free(P);
}
|
10,061 | #include "includes.h"
// Bilinear sampling is done in BHWD (coalescing is not obvious in BDHW)
// we assume BHWD format in inputImages
// we assume BHW(YX) format on grids
__device__ bool between(int value, int lowerBound, int upperBound)
{
return (value >= lowerBound && value <= upperBound);
}
__device__ void getTopLeft(float x, int width, int& point, float& weight)
{
/* for interpolation :
stores in point and weight :
- the x-coordinate of the pixel on the left (or y-coordinate of the upper pixel)
- the weight for interpolating
*/
float xcoord = (x + 1) * (width - 1) / 2;
point = floor(xcoord);
weight = 1 - (xcoord - point);
}
__global__ void bilinearSamplingFromGrid(float* inputImages_data, int inputImages_strideBatch, int inputImages_strideChannels, int inputImages_strideHeight, int inputImages_strideWidth, float* grids_data, int grids_strideBatch, int grids_strideYX, int grids_strideHeight, int grids_strideWidth, float* output_data, int output_strideBatch, int output_strideChannels, int output_strideHeight, int output_strideWidth, int inputImages_channels, int inputImages_height, int inputImages_width, int output_width)
{
// each (32,16) block 16 output pixels (for coalescing the grid read)
// x,y = coordinates (xOut = blockIdx.x*16+blockDim.y+threadIdx.y)
// z = batch index
// threadIdx.x : used for features (coalescing is trivial)
const int xOut = blockIdx.x*blockDim.y+threadIdx.y;
const bool withinImageBounds = xOut < output_width;
const bool withinGridBounds = blockIdx.x*blockDim.y + threadIdx.x / 2 < output_width;
const int yOut = blockIdx.y;
const int width = inputImages_width;
const int height = inputImages_height;
const int b = blockIdx.z;
float yf,xf;
__shared__ float gridData[32];
if (threadIdx.y==0 && withinGridBounds)
{
gridData[threadIdx.x] = grids_data[b*grids_strideBatch + yOut*grids_strideHeight + xOut*grids_strideWidth + threadIdx.x];
}
__syncthreads();
if(!withinImageBounds) return;
yf = gridData[threadIdx.y*2];
xf = gridData[threadIdx.y*2+1];
int yInTopLeft, xInTopLeft;
float yWeightTopLeft, xWeightTopLeft;
getTopLeft(xf, inputImages_width, xInTopLeft, xWeightTopLeft);
getTopLeft(yf, inputImages_height, yInTopLeft, yWeightTopLeft);
const int outAddress = output_strideBatch * b + output_strideHeight * yOut + output_strideWidth * xOut;
const int inTopLeftAddress = inputImages_strideBatch * b + inputImages_strideHeight * yInTopLeft + inputImages_strideWidth * xInTopLeft;
const int inTopRightAddress = inTopLeftAddress + inputImages_strideWidth;
const int inBottomLeftAddress = inTopLeftAddress + inputImages_strideHeight;
const int inBottomRightAddress = inBottomLeftAddress + inputImages_strideWidth;
float v=0;
float inTopLeft=0;
float inTopRight=0;
float inBottomLeft=0;
float inBottomRight=0;
bool topLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft, 0, height-1);
bool topRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft, 0, height-1);
bool bottomLeftIsIn = between(xInTopLeft, 0, width-1) && between(yInTopLeft+1, 0, height-1);
bool bottomRightIsIn = between(xInTopLeft+1, 0, width-1) && between(yInTopLeft+1, 0, height-1);
// interpolation happens here
for(int t=threadIdx.x; t<inputImages_channels; t+= blockDim.x)
{
if(topLeftIsIn) inTopLeft = inputImages_data[inTopLeftAddress + t];
if(topRightIsIn) inTopRight = inputImages_data[inTopRightAddress + t];
if(bottomLeftIsIn) inBottomLeft = inputImages_data[inBottomLeftAddress + t];
if(bottomRightIsIn) inBottomRight = inputImages_data[inBottomRightAddress + t];
v = xWeightTopLeft * yWeightTopLeft * inTopLeft
+ (1 - xWeightTopLeft) * yWeightTopLeft * inTopRight
+ xWeightTopLeft * (1 - yWeightTopLeft) * inBottomLeft
+ (1 - xWeightTopLeft) * (1 - yWeightTopLeft) * inBottomRight;
output_data[outAddress + t] = v;
}
} |
10,062 | #include <cuda.h>
#include <cuda_fp16.h>
#include <iostream>
#define TILE_WIDTH 16
__constant__ int xdims_k[4];
__constant__ int wdims_k[4];
__constant__ int ydims_k[4];
__constant__ int xdims_f[2];
__constant__ int wdims_f[2];
/*__global__ void conv_forward_kernel_basic(float *X, float *W, float *Y){
int n, m, h, w;
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
n = blockIdx.x;
m = blockIdx.y;
h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
int C = wdims_k[2]; // in_channel
int P = wdims_k[0]; // filter_h
int Q = wdims_k[1]; // filter_w
if(h < ydims_k[1] && w < ydims_k[2]){
float acc = 0;
for(int c = 0; c < C; c++){
for(int p = 0; p < P; p++){
for(int q = 0; q < Q; q++)
acc += X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (h + p) * xdims_k[2] * xdims_k[3] + (w + q) * xdims_k[3] + c]
* W[p * wdims_k[1] * wdims_k[2] * wdims_k[3] + q * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
}
}
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = (acc < 0) ? 0 : acc;
}
}
void conv_forward_host_basic(const float *X, const int xdims[4], const float *W,
const int wdims[4], float *Y, const int ydims[4]){
float *X_device;
float *W_device;
float *Y_device;
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
int W_size = wdims[0] * wdims[1] * wdims[2] * wdims[3] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int d_size = 4 * sizeof(int);
cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &W_device, W_size);
cudaMalloc((void**) &Y_device, Y_size);
cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(xdims_k, xdims, d_size);
cudaMemcpyToSymbol(wdims_k, wdims, d_size);
cudaMemcpyToSymbol(ydims_k, ydims, d_size);
// std::cout << X_size/sizeof(float) << ", " << W_size/sizeof(float) << ", " << Y_size/sizeof(float) << std::endl;
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
conv_forward_kernel_basic<<<gridDim, blockDim>>>(X_device, W_device, Y_device);
cudaDeviceSynchronize();
cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(W_device);
cudaFree(Y_device);
}*/
/*__global__ void conv_forward_kernel_tiled(half *X, half *W, half *Y){
int C = xdims_k[3]; // in_channel
int P = wdims_k[0]; // filter_h
int Q = wdims_k[1]; // filter_w
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int n, m, h0, w0, h_base, w_base, h, w;
int X_tile_width = TILE_WIDTH + Q - 1;
int X_tile_height = TILE_WIDTH + P - 1;
extern __shared__ half shmem[];
half *X_shared = &shmem[0];
half *W_shared = &shmem[X_tile_width * X_tile_height];
n = blockIdx.x;
m = blockIdx.y;
h0 = threadIdx.y;
w0 = threadIdx.x;
h_base = (blockIdx.z / W_grid) * TILE_WIDTH;
w_base = (blockIdx.z % W_grid) * TILE_WIDTH;
h = h_base + h0;
w = w_base + w0;
float acc = 0;
for(int c = 0; c < C; c++){ // sum over input channels
if((h0 < P) && (w0 < Q)) // load weight
W_shared[h0 * Q + w0] = W[h0 * Q * wdims_k[2] * wdims_k[3] + w0 * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
__syncthreads();
for(int i = h; i < h_base + X_tile_height; i += TILE_WIDTH){ // load tiles
for(int j = w; j < w_base + X_tile_width; j += TILE_WIDTH)
X_shared[(i - h_base) * X_tile_width + (j - w_base)] =
X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + i * xdims_k[2] * xdims_k[3] + j * xdims_k[3] + c];
}
__syncthreads();
if(h < ydims_k[1] && w < ydims_k[2]){
for(int p = 0; p < P; p++){ // sum
for(int q = 0; q < Q; q++)
acc += X_shared[(h0 + p) * X_tile_width + (w0 + q)] * W_shared[p * Q + q];
}
}
__syncthreads();
}
if((h < ydims_k[1]) && (w < ydims_k[2]))
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = (acc < 0) ? 0 : acc;
}
half* conv_forward_host_tiled(const half *X, const int xdims[4], const half *W, const int wdims[4],
half *Y, const int ydims[4], half *in = NULL){
half *X_device = in;
half *W_device;
half *Y_device;
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(half);
int W_size = wdims[0] * wdims[1] * wdims[2] * wdims[3] * sizeof(half);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(half);
int d_size = 4 * sizeof(int);
if(in == NULL) cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &W_device, W_size);
cudaMalloc((void**) &Y_device, Y_size);
if(in == NULL) cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
//cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemset(Y, 0, Y_size);
cudaMemcpyToSymbol(xdims_k, xdims, d_size);
cudaMemcpyToSymbol(wdims_k, wdims, d_size);
cudaMemcpyToSymbol(ydims_k, ydims, d_size);
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
size_t shmem_size = sizeof(float) * ((TILE_WIDTH + wdims[0] - 1) * (TILE_WIDTH + wdims[1] - 1) + wdims[0] * wdims[1]);
conv_forward_kernel_tiled<<<gridDim, blockDim, shmem_size>>>(X_device, W_device, Y_device);
cudaDeviceSynchronize();
//cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(W_device);
//cudaFree(Y_device);
return Y_device;
}*/
__global__ void average_pool_kernel(float *X, float *Y, int pool_size){
int n, m, h, w;
int W_grid = (ydims_k[2] + TILE_WIDTH - 1) / TILE_WIDTH;
n = blockIdx.x;
m = blockIdx.y;
h = blockIdx.z / W_grid * TILE_WIDTH + threadIdx.y;
w = blockIdx.z % W_grid * TILE_WIDTH + threadIdx.x;
if(h < ydims_k[1] && w < ydims_k[2]){
float acc = 0;
for(int p = 0; p < pool_size; p++){
for(int q = 0; q < pool_size; q++)
acc += X[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (pool_size * h + p) * xdims_k[2] * xdims_k[3] +
(pool_size * w + q) * xdims_k[3] + m];
}
Y[((n * ydims_k[1] + h) * ydims_k[2] + w) * ydims_k[3] + m] = acc / (1.0f * pool_size * pool_size);
}
}
float* average_pool_host(const float *X, const int xdims[4], const int pool_size,
float *Y, const int ydims[4], float *in = NULL){
float *X_device = in;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int d_size = 4 * sizeof(int);
//if(in == NULL) cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &Y_device, Y_size);
//if(in == NULL) cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
//cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemset(Y, 0, Y_size);
cudaMemcpyToSymbol(xdims_k, xdims, d_size);
cudaMemcpyToSymbol(ydims_k, ydims, d_size);
int W_grid = (ydims[2] + TILE_WIDTH - 1) / TILE_WIDTH;
int H_grid = (ydims[1] + TILE_WIDTH - 1) / TILE_WIDTH;
int Z = H_grid * W_grid;
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim(ydims[0], ydims[3], Z);
average_pool_kernel<<<gridDim, blockDim>>>(X_device, Y_device, pool_size);
cudaDeviceSynchronize();
//cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
//cudaFree(Y_device);
return Y_device;
}
/*__global__ void fully_forward_kernel(float *X, float *W, float *Y, bool relu){
int i = blockIdx.x * TILE_WIDTH + threadIdx.x;
int j = blockIdx.y * TILE_WIDTH + threadIdx.y;
if(i < xdims_f[0] && j < wdims_f[1]){
float sum = 0;
for (int k = 0; k < xdims_f[1]; k++) {
sum += X[i * xdims_f[1] + k] * W[k * wdims_f[1] + j];
}
Y[i * wdims_f[1] + j] = (relu && (sum < 0)) ? 0: sum;
}
}
float* fully_forward_host(const float *X, const int xdims[2], float *W, const int wdims[2],
float *Y, const int ydims[2], float *in = NULL, bool copy = false){
float *X_device = in;
float *W_device;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * sizeof(float);
int W_size = wdims[0] * wdims[1] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * sizeof(float);
int d_size = 2 * sizeof(int);
//if(in == NULL) cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &W_device, W_size);
cudaMalloc((void**) &Y_device, Y_size);
//if(in == NULL) cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
//cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemset(Y, 0, Y_size);
cudaMemcpyToSymbol(xdims_f, xdims, d_size);
cudaMemcpyToSymbol(wdims_f, wdims, d_size);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim((xdims[0]+TILE_WIDTH-1)/TILE_WIDTH, (wdims[1]+TILE_WIDTH-1)/TILE_WIDTH, 1);
fully_forward_kernel<<<gridDim, blockDim>>>(X_device, W_device, Y_device, !copy);
cudaDeviceSynchronize();
if(copy) cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(W_device);
//cudaFree(Y_device);
return Y_device;
}*/
__global__ void fully_forward_kernel_tiled(float *A, float *B, float *C, bool relu) {
extern __shared__ float shmemmrelu[];
float *Ads = &shmemmrelu[0];
float *Bds = &shmemmrelu[TILE_WIDTH * TILE_WIDTH];
int bx=blockIdx.x;int by=blockIdx.y;
int tx=threadIdx.x;int ty=threadIdx.y;
int Row=by*TILE_WIDTH+ty;
int Col=bx*TILE_WIDTH+tx;
float Cvalue=0;
for (int ph = 0; ph < (xdims_f[1] + TILE_WIDTH - 1) / TILE_WIDTH; ++ph){
if ((Row < xdims_f[0]) && (ph * TILE_WIDTH + tx < xdims_f[1]))
Ads[ty * TILE_WIDTH + tx] = A[Row * xdims_f[1] + ph * TILE_WIDTH + tx];
else
Ads[ty * TILE_WIDTH + tx] = 0.0;
if((ph * TILE_WIDTH + ty < wdims_f[0]) && (Col < wdims_f[1]))
Bds[ty * TILE_WIDTH + tx] = B[((ph * TILE_WIDTH) + ty) * wdims_f[1] + Col];
else
Bds[ty * TILE_WIDTH + tx] = 0.0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k)
Cvalue += Ads[ty * TILE_WIDTH + k] * Bds[k * TILE_WIDTH + tx];
__syncthreads();
}
if ((Row < xdims_f[0]) && (Col < wdims_f[1]))
C[Row * wdims_f[1] + Col] = (Cvalue < 0 && relu) ? 0 : Cvalue;
}
float* fully_forward_host_tiled(const float *X, const int xdims[2], float *W, const int wdims[2],
float *Y, const int ydims[2], float *in, bool copy = false){
float *X_device = in;
float *W_device;
float *Y_device;
//int X_size = xdims[0] * xdims[1] * sizeof(float);
int W_size = wdims[0] * wdims[1] * sizeof(float);
int Y_size = ydims[0] * ydims[1] * sizeof(float);
int d_size = 2 * sizeof(int);
//cudaMalloc((void**) &X_device, X_size);
cudaMalloc((void**) &W_device, W_size);
cudaMalloc((void**) &Y_device, Y_size);
//cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
cudaMemcpy(Y_device, Y, Y_size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(xdims_f, xdims, d_size);
cudaMemcpyToSymbol(wdims_f, wdims, d_size);
dim3 blockDim(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim((wdims[1]+TILE_WIDTH-1)/TILE_WIDTH,(xdims[0]+TILE_WIDTH-1)/TILE_WIDTH, 1);
size_t shmemm_size = sizeof(float) * (TILE_WIDTH * TILE_WIDTH * 2);
fully_forward_kernel_tiled<<<gridDim, blockDim, shmemm_size>>>(X_device, W_device, Y_device, !copy);
cudaDeviceSynchronize();
if(copy) cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(W_device);
//cudaFree(Y_device);
return Y_device;
}
__global__ void gemmrelu_conv_kernel_merge(float *A, float *B, float *C) {
extern __shared__ float shmemmrelu[];
float *Ads = &shmemmrelu[0];
float *Bds = &shmemmrelu[TILE_WIDTH * TILE_WIDTH];
// Y[n, output height , output width, m] = 0 // W[p filter_h, q filter_w, c, m] // X[n, h + p,w + q,c]
int n = blockIdx.z;
int numARows = ydims_k[3];
int numAColumns = xdims_k[3] * wdims_k[0] * wdims_k[1];
int numBRows = xdims_k[3] * wdims_k[0] * wdims_k[1];
int numBColumns = ydims_k[1] * ydims_k[2];
int numCRows = ydims_k[3];
int numCColumns = ydims_k[1] * ydims_k[2];
int bx=blockIdx.x; int by=blockIdx.y;
int tx=threadIdx.x; int ty=threadIdx.y;
int Row=by*TILE_WIDTH+ty;
int Col=bx*TILE_WIDTH+tx;
float Cvalue=0;
for (int ph=0;ph<(numAColumns+TILE_WIDTH-1)/TILE_WIDTH;++ph){
if ((Row<numARows)&&(ph*TILE_WIDTH+tx<numAColumns)){
int m = by * TILE_WIDTH + ty;
int c = (ph * TILE_WIDTH + tx)/ (wdims_k[0] * wdims_k[1]);
int p = ((ph * TILE_WIDTH + tx) % (wdims_k[0] * wdims_k[1])) / wdims_k[1];
int q = ((ph * TILE_WIDTH + tx) % (wdims_k[0] * wdims_k[1])) % wdims_k[1];
Ads[ty * TILE_WIDTH + tx]=A[p * wdims_k[1] * wdims_k[2] * wdims_k[3] + q * wdims_k[2] * wdims_k[3] + c * wdims_k[3] + m];
}
else
Ads[ty * TILE_WIDTH + tx]=0.0;
if((ph * TILE_WIDTH + ty<numBRows)&&(Col<numBColumns)){
int cx = (ph * TILE_WIDTH + ty) / (wdims_k[0] * wdims_k[1]);
int px = ((ph * TILE_WIDTH + ty) % (wdims_k[0] * wdims_k[1])) / wdims_k[1];
int qx = ((ph * TILE_WIDTH + ty) % (wdims_k[0] * wdims_k[1])) % wdims_k[1];
int h_out = (bx * TILE_WIDTH + tx) / ydims_k[2];
int w_out = (bx * TILE_WIDTH + tx) % ydims_k[2];
Bds[ty * TILE_WIDTH + tx] =
B[n * xdims_k[1] * xdims_k[2] * xdims_k[3] + (h_out + px) * xdims_k[2] * xdims_k[3] +(w_out + qx) * xdims_k[3] + cx];}
else
Bds[ty * TILE_WIDTH + tx] = 0.0;
__syncthreads();
for(int k=0; k<TILE_WIDTH; ++k){
Cvalue += Ads[ty * TILE_WIDTH + k]*Bds[k * TILE_WIDTH + tx];}
__syncthreads();
}
if ((Row<numCRows)&&(Col<numCColumns)){
atomicAdd(&C[n * ydims_k[1] * ydims_k[2] * ydims_k[3] + (Col / ydims_k[2]) * ydims_k[2] * ydims_k[3]
+ (Col % ydims_k[2]) * ydims_k[3] + Row], (Cvalue < 0) ? 0 : Cvalue);
//C[n * ydims_k[1] * ydims_k[2] * ydims_k[3] + (Col / ydims_k[2]) * ydims_k[2] * ydims_k[3]
//+ (Col % ydims_k[2]) * ydims_k[3] + Row] = (Cvalue < 0) ? 0 : Cvalue;
}
}
float* convLayer_forward_merge(float *X, float *W, float *Y, const int xdims[4], const int ydims[4],
const int wdims[4], float *in = NULL){
int d_size = sizeof(int) * 4;
int W_size = sizeof(float) * wdims[0] * wdims[1] *wdims[2] * wdims[3];
int Y_size = ydims[0] * ydims[1] * ydims[2] * ydims[3] * sizeof(float);
int X_size = xdims[0] * xdims[1] * xdims[2] * xdims[3] * sizeof(float);
size_t shmemm_size = sizeof(float) * (TILE_WIDTH * TILE_WIDTH * 2);
float *W_device; //// Y[n, output height , output width, m] = 0 // W[p filter_h, q filter_w, c, m] X[n, h + p,w + q,c]
float *X_device = in;
float *Y_device;
cudaMalloc((void**) &Y_device, Y_size);
cudaMalloc((void**) &W_device, W_size);
if(in == NULL) cudaMalloc((void**) &X_device, X_size);
cudaMemset(Y_device, 0, Y_size);
cudaMemcpy(W_device, W, W_size, cudaMemcpyHostToDevice);
if(in == NULL) cudaMemcpy(X_device, X, X_size, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(wdims_k, wdims, d_size);
cudaMemcpyToSymbol(xdims_k, xdims, d_size);
cudaMemcpyToSymbol(ydims_k, ydims, d_size);
dim3 blockDim1(TILE_WIDTH, TILE_WIDTH, 1);
dim3 gridDim3((ydims[1] * ydims[2] + TILE_WIDTH-1)/TILE_WIDTH, (ydims[3]+TILE_WIDTH-1)/TILE_WIDTH, ydims[0]);
gemmrelu_conv_kernel_merge<<< gridDim3, blockDim1, shmemm_size >>>(W_device, X_device, Y_device);
cudaDeviceSynchronize();
//cudaMemcpy(Y, Y_device, Y_size, cudaMemcpyDeviceToHost);
cudaFree(X_device);
//cudaFree(Y_device);
cudaFree(W_device);
return Y_device;
}
__global__ void float_to_half_kernel(float *in, half *out, int size){
int pos = blockIdx.x * TILE_WIDTH * TILE_WIDTH + threadIdx.x;
if(pos < size) out[pos] = __float2half(in[pos]);
}
half* float_to_half_host(float *in, int size){
float *in_device;
half *out_device;
cudaMalloc((void**) &in_device, sizeof(float) * size);
cudaMalloc((void**) &out_device, size * sizeof(half));
cudaMemcpy((void**) &in_device, in, sizeof(float) * size, cudaMemcpyHostToDevice);
cudaMemset(out_device, 0, size * sizeof(half));
dim3 blockDim(TILE_WIDTH * TILE_WIDTH, 1, 1);
dim3 gridDim((size + TILE_WIDTH * TILE_WIDTH - 1) / (TILE_WIDTH * TILE_WIDTH), 1, 1);
float_to_half_kernel<<<gridDim, blockDim>>>(in_device, out_device, size);
cudaDeviceSynchronize();
cudaFree(in_device);
return out_device;
}
__global__ void half_to_float_kernel(half *in, float *out, int size){
int pos = blockIdx.x * TILE_WIDTH * TILE_WIDTH + threadIdx.x;
if(pos < size) out[pos] = __half2float(in[pos]);
}
float* half_to_float_host(half *in, int size){
half *in_device;
float *out_device;
cudaMalloc((void**) &in_device, sizeof(half) * size);
cudaMalloc((void**) &out_device, sizeof(float) * size);
cudaMemcpy((void**) &in_device, in, sizeof(half) * size, cudaMemcpyHostToDevice);
cudaMemset(out_device, 0, size * sizeof(half));
dim3 blockDim(TILE_WIDTH * TILE_WIDTH, 1, 1);
dim3 gridDim((size + TILE_WIDTH * TILE_WIDTH - 1) / (TILE_WIDTH * TILE_WIDTH), 1, 1);
half_to_float_kernel<<<gridDim, blockDim>>>(in_device, out_device, size);
cudaDeviceSynchronize();
cudaFree(in_device);
return out_device;
}
|
10,063 | #if ACCELERATE_MODE == ACCELERATE_MODE_CUDA
#include <cuda.h>
#include <cuda_runtime_api.h>
template <class T>
__host__ void getLaunchConfiguration(T t, int n, int *blocks, int *threads) {
cudaOccupancyMaxPotentialBlockSize(blocks, threads, t, 0, n);
*blocks = (n + *threads - 1) / *threads;
}
__global__ void vec_lgamma(double *a, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = lgamma(a[idx]);
}
}
__host__ double *cu_lgammed(const int rows, const int cols, double *iData) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
cudaMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_lgamma, N, &blocks, &threads);
vec_lgamma<<<blocks, threads>>>(iData, C_accelerate_data, N);
cudaDeviceSynchronize();
return C_accelerate_data;
}
__global__ void vec_add(double *a, double *b, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = a[idx] + b[idx];
}
}
__global__ void vec_sub(double *a, double *b, double *c, const unsigned int n) {
const long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
c[idx] = a[idx] - b[idx];
}
}
__host__ double *cu_add(const int rows, const int cols, double *m1, double *m2) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
cudaMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_add, N, &blocks, &threads);
vec_add<<<blocks, threads>>>(m1, m2, C_accelerate_data, N);
cudaDeviceSynchronize();
return C_accelerate_data;
}
__host__ double *cu_sub(const int rows, const int cols, double *m1, double *m2) {
auto N = rows * cols;
double *C_accelerate_data = nullptr;
cudaMalloc((void **) &C_accelerate_data, rows * cols * sizeof(double));
int blocks, threads;
getLaunchConfiguration(vec_sub, N, &blocks, &threads);
vec_sub<<<blocks, threads>>>(m1, m2, C_accelerate_data, N);
cudaDeviceSynchronize();
return C_accelerate_data;
}
#endif
|
10,064 | /*-----------
*
* distanceShared.cu
*
* This is the source file of a kernel to calculate total distances of all points using shared memory.
*
* streamsOptBenchmark/distanceShared.cu
*
* By Hao Li
*
*------------
*/
// #include "distanceGlobal.cu"
__device__ float get_distance(float x1, float y1, float x2, float y2)
{
return sqrt((x1-x2)*(x1-x2)+(y1-y2)*(y1-y2));
}
// __global__ void gpu_shared_memory(float *d_res, float *d_x, float *d_y, int samples)
__global__ void gpu_shared_memory(float *d_res, float *d_x, int samples, int thread_per_block)
{
for(int l = 0; l < 10000; l++)
{
int idx1 = blockDim.x * blockIdx.x + threadIdx.x;
int idx2, i, j;
float distance = 0.0;
// __shared__ float l_x[thread_per_block];
// __shared__ float l_y[thread_per_block];
// __shared__ float r_x[thread_per_block];
// __shared__ float r_y[thread_per_block];
extern __shared__ float l_x[];
// __shared__ float l_y[thread_per_block] = l_x[thread_per_block + ];
// __shared__ float r_x[thread_per_block] = l_x[thread_per_block * 2 + ];
// __shared__ float r_y[thread_per_block] = l_x[thread_per_block * 3 + ];
if (idx1 < samples) {
l_x[threadIdx.x] = d_x[idx1];
// l_y[threadIdx.x] = d_y[idx1];
l_x[thread_per_block + threadIdx.x] = d_x[samples + idx1];
__syncthreads();
for(idx2 = threadIdx.x + 1; idx2 < blockDim.x; idx2++)
{
// distance += get_distance(l_x[threadIdx.x], l_x[idx2], l_y[threadIdx.x], l_y[idx2]);
distance += get_distance(l_x[threadIdx.x], l_x[idx2],
l_x[thread_per_block + threadIdx.x], l_x[thread_per_block + idx2]);
}
for(i = blockIdx.x + 1; i < ceilf(samples/blockDim.x);i++)
{
idx2 = blockDim.x * i + threadIdx.x;
if (idx2 < samples)
// {
// r_x[threadIdx.x] = d_x[idx2];
l_x[thread_per_block * 2 + threadIdx.x] = d_x[idx2];
// r_y[threadIdx.x] = d_y[idx2];
l_x[thread_per_block * 3 + threadIdx.x] = d_x[samples + idx2];
__syncthreads();
for (j = 0; j < blockDim.x ;j++)
// distance += get_distance(l_x[threadIdx.x], r_x[j], l_y[threadIdx.x], r_y[j]);
distance += get_distance(l_x[threadIdx.x], l_x[thread_per_block * 2 + j],
l_x[thread_per_block + threadIdx.x], l_x[thread_per_block * 3 + j]);
// }
}
d_res[idx1] = distance / (samples - idx1);
}
}
}
// int main(int argc, char **argv)
// {
// float *host_x, *host_y;
// float *host_result;
// float host_sum = 0.0;
// int samples = SAMPLES;
// init_data(&host_x, &host_y, &host_result, samples);
// float *d_x, *d_y, *d_res, *d_res2;
// int num_of_block = ceil(samples/thread_per_block);
// int size = sizeof(float) * samples;
// cudaMalloc((void**)&d_x, size);
// cudaMalloc((void**)&d_y, size);
// cudaMalloc((void**)&d_res, size);
// cudaMalloc((void**)&d_res2, size);
// cudaMemcpy(d_x, host_x, size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_y, host_y, size, cudaMemcpyHostToDevice);
// dim3 GridDim(num_of_block, 1, 1), BlockDim(thread_per_block, 1, 1);
// // gpu_avg_distance<<<GridDim, BlockDim>>>(d_res, d_x, d_y, samples);
// cudaMemcpy(host_result, d_res, size, cudaMemcpyDeviceToHost);
// // host_sum = compute_sum(host_result, samples);
// // printf("GPU Global Memory -- Result = %f", host_sum);
// gpu_shared_memory<<<GridDim, BlockDim>>>(d_res2, d_x, d_y, samples);
// cudaMemcpy(host_result, d_res2, size, cudaMemcpyDeviceToHost);
// host_sum = compute_sum(host_result,samples);
// printf("GPU Shared Memory -- Result = %f", host_sum);
// cudaFree(d_x);
// cudaFree(d_y);
// cudaFree(d_res);
// cudaFree(d_res2);
// free( host_x );
// free( host_y );
// free( host_result );
// return 0;
// }
|
10,065 |
extern "C"
__constant__ unsigned int pattern[256];
extern "C"
__global__
void searchPatternKernel_opt(int *d_nFound, int *d_offsets, int patternLength,
int nMaxMatched,
const unsigned int* __restrict__ d_text4, int searchLength4) {
int gid = blockDim.x * blockIdx.x + threadIdx.x;
if (gid < searchLength4) {
bool matched[4] = { true, true, true, true };
const unsigned int *d_myPos4 = &d_text4[gid];
unsigned int chars4_1 = d_myPos4[0];
bool noMatch = false;
int patternLength4 = patternLength / 4;
int idx = 0;
for (;idx < patternLength4; ++idx) {
unsigned int chars4_0 = chars4_1;
chars4_1 = d_myPos4[idx + 1];
unsigned int pattern4 = pattern[idx];
matched[0] &= (pattern4 == chars4_0);
matched[1] &= (pattern4 == __byte_perm(chars4_0, chars4_1, 0x4321));
matched[2] &= (pattern4 == __byte_perm(chars4_0, chars4_1, 0x5432));
matched[3] &= (pattern4 == __byte_perm(chars4_0, chars4_1, 0x6543));
noMatch = (!matched[0] && !matched[1]) && (!matched[2] && !matched[3]);
if (noMatch)
return;
}
int nToBeCompared = patternLength - patternLength4 * 4;
if (nToBeCompared != 0) {
unsigned int chars4_0 = chars4_1;
chars4_1 = d_myPos4[idx + 1];
unsigned int patternMask = (0xffffffff >> ((4 - nToBeCompared) * 8));
unsigned int pattern4 = pattern[patternLength4] & patternMask;
matched[0] &= (pattern4 == (chars4_0 & patternMask));
matched[1] &= (pattern4 == (__byte_perm(chars4_0, chars4_1, 0x4321) & patternMask));
matched[2] &= (pattern4 == (__byte_perm(chars4_0, chars4_1, 0x5432) & patternMask));
matched[3] &= (pattern4 == (__byte_perm(chars4_0, chars4_1, 0x6543) & patternMask));
noMatch = (!matched[0] && !matched[1]) && (!matched[2] && !matched[3]);
if (noMatch)
return;
}
#pragma unroll
for (int idx = 0; idx < 4; ++idx) {
if (matched[idx]) {
int offsetPos = atomicAdd(d_nFound, 1);
if (offsetPos < nMaxMatched)
d_offsets[offsetPos] = gid * 4 + idx;
}
}
}
}
|
10,066 | #define TPB1 128
#define TPB2D 16
#define TPBS 92
__global__ void pre_streamLBGK_15s(float * fIn, float * fOut,
const int * snl, const int * lnl,
const float u_bc, const float omega,
const float * ex,const float * ey,
const float * ez, const float * w,
const int nnodes, const int numSpd){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
//load fIn data into shared memory
__shared__ float fIns[TPBS][15];
for(int spd=0;spd<numSpd;spd++){
fIns[threadIdx.x][spd]=fIn[spd*nnodes+tid];
}
//compute density and velocity
float rho = 0.; float ux=0.; float uy=0.; float uz=0.; float f_tmp;
for(int spd=0;spd<numSpd;spd++){
f_tmp=fIns[threadIdx.x][spd];
rho+=f_tmp;
ux+=f_tmp*ex[spd];
uy+=f_tmp*ey[spd];
uz+=f_tmp*ez[spd];
}
ux/=rho; uy/=rho; uz/=rho;
//check for boundary conditions and update fIns
if(snl[tid]==1){
ux=0.; uy=0.; uz=0.;
}
if(lnl[tid]==1){
for(int spd=1;spd<numSpd;spd++){
f_tmp = 3.0*(ex[spd]*(-ux)+ey[spd]*(u_bc-uy)+ez[spd]*(-uz));
fIns[threadIdx.x][spd]+=w[spd]*rho*f_tmp;
}
ux=0.;uy=u_bc;uz=0.;
}
//if it's not a bounce-back node, I can compute fEq and relax for each
//speed
float cu, fEq;
for(int spd=0;spd<15;spd++){
cu = 3.0*(ex[spd]*ux+ey[spd]*uy+ez[spd]*uz);
fEq=rho*w[spd]*(1.+cu+0.5*(cu*cu)-
(1.5)*(ux*ux+uy*uy+uz*uz));
fIns[threadIdx.x][spd]=
fIns[threadIdx.x][spd]-omega*(fIns[threadIdx.x][spd]-fEq);
}//for(int spd=0...
if(snl[tid]==1){
//if it's a solid node, I need to do a swap
// 1 -- 2
f_tmp=fIns[threadIdx.x][2];fIns[threadIdx.x][2]=fIns[threadIdx.x][1];
fIns[threadIdx.x][1]=f_tmp;
// 3 -- 4
f_tmp=fIns[threadIdx.x][4];fIns[threadIdx.x][4]=fIns[threadIdx.x][3];
fIns[threadIdx.x][3]=f_tmp;
// 5--6
f_tmp=fIns[threadIdx.x][6];fIns[threadIdx.x][6]=fIns[threadIdx.x][5];
fIns[threadIdx.x][5]=f_tmp;
// 7--14
f_tmp=fIns[threadIdx.x][14];fIns[threadIdx.x][14]=fIns[threadIdx.x][7];
fIns[threadIdx.x][7]=f_tmp;
// 8--13
f_tmp=fIns[threadIdx.x][13];fIns[threadIdx.x][13]=fIns[threadIdx.x][8];
fIns[threadIdx.x][8]=f_tmp;
// 9--12
f_tmp=fIns[threadIdx.x][12];fIns[threadIdx.x][12]=fIns[threadIdx.x][9];
fIns[threadIdx.x][9]=f_tmp;
// 10--11
f_tmp=fIns[threadIdx.x][11];fIns[threadIdx.x][11]=fIns[threadIdx.x][10];
fIns[threadIdx.x][10]=f_tmp;
}
//now write fIns out to fOut
for(int spd=0;spd<15;spd++){
fOut[spd*nnodes+tid]=fIns[threadIdx.x][spd];
}
}//if(tid<nnodes)...
}
__global__ void pre_collide_15s(float * fIn, float * fEq,
float * uxG, float * uyG, float * uzG,
const int * snl,
const int * lnl,const float u_bc,
const float * ex,
const float * ey, const float * ez,
const float * w, const int nnodes,
const int numSpd){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
//load fIn data into shared memory
__shared__ float fIns[TPBS][15];
for(int spd=0;spd<numSpd;spd++){
fIns[threadIdx.x][spd]=fIn[spd*nnodes+tid];
}
//compute density and velocity
float rho = 0.; float ux=0.; float uy = 0.; float uz=0.; float f_tmp;
for(int spd=0;spd<numSpd;spd++){
f_tmp=fIns[threadIdx.x][spd];
rho+=f_tmp;
ux+=f_tmp*ex[spd];
uy+=f_tmp*ey[spd];
uz+=f_tmp*ez[spd];
}
ux/=rho; uy/=rho; uz/=rho;
//check for boundary conditions and update fIns
if(snl[tid]==1){
ux=0.; uy=0.; uz=0.;
}
if(lnl[tid]==1){
for(int spd=1;spd<numSpd;spd++){
f_tmp = 3.0*(ex[spd]*(-ux)+ey[spd]*(u_bc-uy)+ez[spd]*(-uz));
fIns[threadIdx.x][spd]+=w[spd]*rho*f_tmp;
}
ux=0.;uy=u_bc;uz=0.;
}
uxG[tid]=ux; uyG[tid]=uy; uzG[tid]=uz;
//compute and store fEq
for(int spd=0;spd<numSpd;spd++){
f_tmp = 3.0*(ex[spd]*ux+ey[spd]*uy+ez[spd]*uz);
fEq[spd*nnodes+tid]=w[spd]*rho*(1. + f_tmp +
0.5*(f_tmp*f_tmp)-
1.5*(ux*ux+uy*uy+uz*uz));
}
}//if(tid<nnodes)...
}
void pre_streamLBGK_15s_cuda(float * fIn, float * fOut,const int * snl,
const int * lnl, const float u_bc,
const float omega, const float * ex,
const float * ey,const float * ez,
const float * w, const int nnodes,
const int numSpd){
dim3 BLOCKS(TPBS,1,1);
dim3 GRIDS((nnodes+TPBS-1)/TPBS,1,1);
pre_streamLBGK_15s<<<GRIDS,BLOCKS>>>(fIn,fOut,snl,lnl,u_bc,omega,
ex,ey,ez,w,nnodes,numSpd);
}
void pre_collide_15s_cuda(float * fIn,float * fEq,
float * uxG,float * uyG, float * uzG,
const int * snl,const int * lnl,
const float u_bc,const float * ex, const float * ey,
const float * ez,const float * w,const int nnodes,
const int numSpd){
dim3 BLOCKS(TPBS,1,1);
dim3 GRIDS((nnodes+TPBS-1)/TPBS,1,1);
pre_collide_15s<<<GRIDS,BLOCKS>>>(fIn,fEq,uxG,uyG,uzG,snl,lnl,u_bc,
ex,ey,ez,w,nnodes,
numSpd);
}
__global__ void comp_speed(float * U, const float * ux, const float * uy,
const float * uz, const int nnodes){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
U[tid]=sqrt(ux[tid]*ux[tid]+uy[tid]*uy[tid]+uz[tid]*uz[tid]);
}
}
void comp_speed_cuda(float * U, const float * ux, const float * uy,
const float * uz, const int nnodes){
dim3 BLOCKS(TPB1,1,1);
dim3 GRIDS((nnodes+TPB1-1)/TPB1,1,1);
comp_speed<<<GRIDS,BLOCKS>>>(U,ux,uy,uz,nnodes);
}
__global__ void comp_density(float * rho, const float * fIn,const int nnodes,
const int numSpd){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
float rho_tmp = 0.;
for(int spd=0;spd<numSpd;spd++){
rho_tmp+=fIn[spd*nnodes+tid];
}
rho[tid]=rho_tmp;
}
}
__global__ void comp_density2D(float * rho, const float * fIn,const int Nx,
const int Ny, const int Nz,
const int numSpd){
int X = threadIdx.x+blockIdx.x*blockDim.x;
int Y = threadIdx.y+blockIdx.y*blockDim.y;
int Z = threadIdx.z+blockIdx.z*blockDim.z;
if((X<Nx)&&(Y<Ny)&&(Z<Nz)){
int tid=X+Y*Nx+Z*Nx*Ny;
int nnodes=Nx*Ny*Nz;
float rho_tmp = 0.;
for(int spd=0;spd<numSpd;spd++){
rho_tmp+=fIn[spd*nnodes+tid];
}
rho[tid]=rho_tmp;
}
}
void comp_density_cuda2D(float * rho, const float * fIn, const int Nx,
const int Ny, const int Nz, const int numSpd){
dim3 BLOCKS(TPB2D,TPB2D,1);
dim3 GRIDS((Nx+TPB2D-1)/TPB2D,(Ny+TPB2D-1)/TPB2D,Nz);
comp_density2D<<<GRIDS,BLOCKS>>>(rho,fIn,Nx,Ny,Nz,numSpd);
}
void comp_density_cuda(float * rho,const float * fIn, const int nnodes,
const int numSpd){
dim3 BLOCKS(TPB1,1,1);
dim3 GRIDS((nnodes+TPB1-1)/TPB1,1,1);
comp_density<<<GRIDS,BLOCKS>>>(rho,fIn,nnodes,numSpd);
}
__global__ void comp_velocity(float * ux,float * uy, float * uz,
const float * fIn,
const float * ex, const float * ey,
const float * ez, const int nnodes,
const int numSpd){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
float ux_r = 0.;
float uy_r = 0.;
float uz_r = 0.;
float f_tmp; float rho_r = 0.;
for(int spd=0;spd<numSpd;spd++){
f_tmp = fIn[spd*nnodes+tid];
rho_r+=f_tmp;
ux_r+=ex[spd]*f_tmp;
uy_r+=ey[spd]*f_tmp;
uz_r+=ez[spd]*f_tmp;
}
ux[tid]=ux_r/rho_r;
uy[tid]=uy_r/rho_r;
uz[tid]=uz_r/rho_r;
}
}
__global__ void comp_velocity2D(float * ux, float * uy, float * uz,
const float * fIn, const float * ex,
const float * ey, const float * ez,
const int Nx, const int Ny, const int Nz,
const int numSpd){
int X=threadIdx.x+blockIdx.x*blockDim.x;
int Y=threadIdx.y+blockIdx.y*blockDim.y;
int Z=threadIdx.z+blockIdx.z*blockDim.z;
if((X<Nx)&&(Y<Ny)&&(Z<Nz)){
int tid=X+Y*Nx+Z*Nx*Ny;
int nnodes=Nx*Ny*Nz;
float ux_r = 0.;
float uy_r = 0.;
float uz_r = 0.;
float f_tmp; float rho_r = 0.;
for(int spd=0;spd<numSpd;spd++){
f_tmp = fIn[spd*nnodes+tid];
rho_r+=f_tmp;
ux_r+=ex[spd]*f_tmp;
uy_r+=ey[spd]*f_tmp;
uz_r+=ez[spd]*f_tmp;
}
ux[tid]=ux_r/rho_r;
uy[tid]=uy_r/rho_r;
uz[tid]=uz_r/rho_r;
}
}
void comp_velocity_cuda2D(float * ux, float * uy, float * uz,
const float * fIn, const float * ex,
const float * ey, const float * ez,
const int Nx, const int Ny, const int Nz,
const int numSpd){
dim3 BLOCKS(TPB2D,TPB2D,1);
dim3 GRIDS((Nx+TPB2D-1)/TPB2D,(Ny+TPB2D-1)/TPB2D,Nz);
comp_velocity2D<<<GRIDS,BLOCKS>>>(ux,uy,uz,fIn,ex,ey,ez,Nx,Ny,Nz,numSpd);
}
void comp_velocity_cuda(float * ux, float * uy, float * uz,
const float * fIn, const float * ex,
const float * ey, const float * ez,
const int nnodes, const int numSpd){
dim3 BLOCKS(TPB1,1,1);
dim3 GRIDS((nnodes+TPB1-1)/TPB1,1,1);
comp_velocity<<<GRIDS,BLOCKS>>>(ux,uy,uz,fIn,ex,ey,ez,nnodes,numSpd);
}
__global__ void velocity_BC(float * fIn, float * ux, float * uy, float * uz,
const int * lnl, const int * snl,
const float * ex, const float * ey,
const float * ez, const float * rho, const float u_bc,
const float * w,const int nnodes, const int numSpd){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
if(lnl[tid]==1){
float rho_r = rho[tid];
float ux_r = ux[tid];
float uy_r = uy[tid];
float uz_r = uz[tid];
float dx = -ux_r;
float dy = u_bc-uy_r;
float dz = -uz_r;
float cu;
for(int spd=1;spd<numSpd;spd++){
cu = 3.0*(ex[spd]*dx+ey[spd]*dy+ez[spd]*dz);
fIn[spd*nnodes+tid]+=w[spd]*rho_r*cu;
}
ux[tid]=0.;
uy[tid]=u_bc;
uz[tid]=0.;
}//if(lnl[tid]==1)...
if(snl[tid]==1){
ux[tid]=0.;
uy[tid]=0.;
uz[tid]=0.;
}
}
}
__global__ void velocity_BC2D(float * fIn, float * ux, float * uy, float * uz,
const int * lnl, const int * snl,
const float * ex, const float * ey, const float * ez,
const float * rho, const float u_bc,
const float * w, const int Nx, const int Ny,
const int Nz,const int numSpd){
int X = threadIdx.x+blockIdx.x*blockDim.x;
int Y = threadIdx.y+blockIdx.y*blockDim.y;
int Z = threadIdx.z+blockIdx.z*blockDim.z;
if((X<Nx)&&(Y<Ny)&&(Z<Nz)){
int tid = X+Y*Nx+Z*Nx*Ny;
int nnodes=Nx*Ny*Nz;
if(lnl[tid]==1){
float rho_r = rho[tid];
float ux_r = ux[tid];
float uy_r = uy[tid];
float uz_r = uz[tid];
float dx = -ux_r;
float dy = u_bc-uy_r;
float dz = -uz_r;
float cu;
for(int spd=1;spd<numSpd;spd++){
cu = 3.0*(ex[spd]*dx+ey[spd]*dy+ez[spd]*dz);
fIn[spd*nnodes+tid]+=w[spd]*rho_r*cu;
}
ux[tid]=0.;
uy[tid]=u_bc;
uz[tid]=0.;
}//if(lnl[tid]==1)...
if(snl[tid]==1){
ux[tid]=0.;
uy[tid]=0.;
uz[tid]=0.;
}
}
}
void velocity_BC2D_cuda(float * fIn, float * ux, float * uy, float * uz,
const int *lnl, const int * snl, const float * ex,
const float * ey, const float * ez, const float * rho,
const float u_bc, const float * w, const int Nx,
const int Ny, const int Nz, const int numSpd){
dim3 BLOCKS(TPB2D,TPB2D,1);
dim3 GRIDS((Nx+TPB2D-1)/TPB2D,(Ny+TPB2D-1)/TPB2D,Nz);
velocity_BC2D<<<GRIDS,BLOCKS>>>(fIn,ux,uy,uz,lnl,snl,ex,ey,ez,
rho,u_bc,w,Nx,Ny,Nz,numSpd);
}
void velocity_BC_cuda(float * fIn,float * ux, float * uy, float * uz,
const int * lnl, const int * snl, const float * ex,
const float * ey,
const float * ez,const float * rho, const float u_bc,
const float * w, const int nnodes,const int numSpd){
dim3 BLOCKS(TPB1,1,1);
dim3 GRIDS((nnodes+TPB1-1)/TPB1,1,1);
velocity_BC<<<GRIDS,BLOCKS>>>(fIn,ux,uy,uz,lnl,snl,ex,ey,ez,rho,u_bc,w,nnodes,numSpd);
}
__global__ void comp_fEq(float * fEq,const float * rho, const float * w,
const float * ux, const float * uy, const float * uz,
const float * ex, const float * ey, const float * ez,
const int nnodes,const int numSpd){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
float ux_r = ux[tid];
float uy_r = uy[tid];
float uz_r = uz[tid];
float rho_r = rho[tid];
float cu;
for(int spd=0;spd<numSpd;spd++){
cu = 3.0*(ex[spd]*ux_r+ey[spd]*uy_r+ez[spd]*uz_r);
fEq[spd*nnodes+tid]=w[spd]*rho_r*(1.0+cu+(0.5)*(cu*cu)-
1.5*(ux_r*ux_r+uy_r*uy_r+
uz_r*uz_r));
}
}
}
__global__ void comp_fEq2D(float * fEq, const float * rho, const float * w,
const float * ux, const float * uy, const float * uz,
const float * ex, const float * ey, const float * ez,
const int Nx, const int Ny, const int Nz,
const int numSpd){
int X = threadIdx.x+blockIdx.x*blockDim.x;
int Y = threadIdx.y+blockIdx.y*blockDim.y;
int Z = threadIdx.z+blockIdx.z*blockDim.z;
if((X<Nx)&&(Y<Ny)&&(Z<Nz)){
int tid = X+Y*Nx+Z*Nx*Ny;
int nnodes=Nx*Ny*Nz;
float ux_r = ux[tid];
float uy_r = uy[tid];
float uz_r = uz[tid];
float rho_r = rho[tid];
float cu;
for(int spd=0;spd<numSpd;spd++){
cu = 3.0*(ex[spd]*ux_r+ey[spd]*uy_r+ez[spd]*uz_r);
fEq[spd*nnodes+tid]=w[spd]*rho_r*(1.0+cu+(0.5)*(cu*cu)-
1.5*(ux_r*ux_r+uy_r*uy_r+
uz_r*uz_r));
}
}
}
void comp_fEq2D_cuda(float * fEq, const float * rho, const float * w,
const float * ux, const float * uy, const float * uz,
const float * ex, const float * ey, const float * ez,
const int Nx, const int Ny, const int Nz,
const int numSpd){
dim3 BLOCKS(TPB2D,TPB2D,1);
dim3 GRIDS((Nx+TPB2D-1)/TPB2D,(Ny+TPB2D-1)/TPB2D,Nz);
comp_fEq2D<<<GRIDS,BLOCKS>>>(fEq,rho,w,ux,uy,uz,ex,ey,ez,Nx,Ny,Nz,numSpd);
}
void comp_fEq_cuda(float * fEq,const float * rho, const float * w,
const float * ux, const float *uy, const float * uz,
const float * ex,const float * ey, const float * ez,
const int nnodes,const int numSpd){
dim3 BLOCKS(TPB1,1,1);
dim3 GRIDS((nnodes+TPB1-1)/TPB1,1,1);
comp_fEq<<<GRIDS,BLOCKS>>>(fEq,rho,w,ux,uy,uz,ex,ey,ez,nnodes,numSpd);
}
__global__ void collideLBGK(float * fOut, const float * fIn, const float * fEq,
const float omega,const int nnodes,const int numSpd){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
for(int spd=0;spd<numSpd;spd++){
fOut[spd*nnodes+tid]=fIn[spd*nnodes+tid]-omega*(fIn[spd*nnodes+tid]-fEq[spd*nnodes+tid]);
}
}
}
__global__ void collideLBGK2D(float * fOut, const float * fIn, const float * fEq,
const float omega, const int Nx, const int Ny,
const int Nz, const int numSpd){
int X = threadIdx.x+blockIdx.x*blockDim.x;
int Y = threadIdx.y+blockIdx.y*blockDim.y;
int Z = threadIdx.z+blockIdx.z*blockDim.z;
if((X<Nx)&&(Y<Ny)&&(Z<Nz)){
int nnodes=Nx*Ny*Nz;
int tid=X+Y*Nx+Z*Nx*Ny;
for(int spd=0;spd<numSpd;spd++){
fOut[spd*nnodes+tid]=fIn[spd*nnodes+tid]-omega*(fIn[spd*nnodes+tid]-
fEq[spd*nnodes+tid]);
}
}
}
void collideLBGK2D_cuda(float * fOut, const float * fIn, const float * fEq,
const float omega, const int Nx, const int Ny,
const int Nz, const int numSpd){
dim3 BLOCKS(TPB2D,TPB2D,1);
dim3 GRIDS((Nx+TPB2D-1)/TPB2D,(Ny+TPB2D-1)/TPB2D,Nz);
collideLBGK2D<<<GRIDS,BLOCKS>>>(fOut,fIn,fEq,omega,Nx,Ny,Nz,numSpd);
}
void collideLBGK_cuda(float * fOut, const float * fIn, const float * fEq,
const float omega, const int nnodes,const int numSpd){
dim3 BLOCKS(TPB1,1,1);
dim3 GRIDS((nnodes+TPB1-1)/TPB1,1,1);
collideLBGK<<<GRIDS,BLOCKS>>>(fOut,fIn,fEq,omega,nnodes,numSpd);
}
__global__ void bounceBack(float * fOut, const float * fIn, const int * snl,
const int * bb_spd, const int nnodes, const int numSpd){
int tid=threadIdx.x+blockIdx.x*blockDim.x;
if(tid<nnodes){
if(snl[tid]==1){
for(int spd=1;spd<numSpd;spd++){
fOut[bb_spd[spd]*nnodes+tid]=fIn[spd*nnodes+tid];
}
}
}
}
__global__ void bounceBack2D(float * fOut, const float * fIn, const int * snl,
const int * bb_spd, const int Nx, const int Ny,
const int Nz, const int numSpd){
int X = threadIdx.x+blockIdx.x*blockDim.x;
int Y = threadIdx.y+blockIdx.y*blockDim.y;
int Z = threadIdx.z+blockIdx.z*blockDim.z;
if((X<Nx)&&(Y<Ny)&&(Z<Nz)){
int tid=X+Y*Nx+Z*Nx*Ny;
int nnodes=Nx*Ny*Nz;
if(snl[tid]==1){
for(int spd=1;spd<numSpd;spd++){
fOut[bb_spd[spd]*nnodes+tid]=fIn[spd*nnodes+tid];
}
}
}
}
void bounceBack2D_cuda(float * fOut, const float * fIn, const int * snl,
const int * bb_spd, const int Nx, const int Ny,
const int Nz, const int numSpd){
dim3 BLOCKS(TPB2D,TPB2D,1);
dim3 GRIDS((Nx+TPB2D-1)/TPB2D,(Ny+TPB2D-1)/TPB2D,Nz);
bounceBack2D<<<GRIDS,BLOCKS>>>(fOut,fIn,snl,bb_spd,Nx,Ny,Nz,numSpd);
}
void bounceBack_cuda(float * fOut, const float * fIn, const int * snl,
const int * bb_spd,const int nnodes,const int numSpd){
dim3 BLOCKS(TPB1,1,1);
dim3 GRIDS((nnodes+TPB1-1)/TPB1,1,1);
bounceBack<<<GRIDS,BLOCKS>>>(fOut,fIn,snl,bb_spd,nnodes,numSpd);
}
__global__ void stream(float * fIn, const float * fOut, const float * ex,
const float * ey, const float * ez,const int Nx,
const int Ny, const int Nz,const int numSpd){
int X = threadIdx.x+blockIdx.x*blockDim.x;
int Y = threadIdx.y+blockIdx.y*blockDim.y;
int Z = threadIdx.z+blockIdx.z*blockDim.z;
if((X<Nx)&&(Y<Ny)&&(Z<Nz)){
int nnodes=Nx*Ny*Nz;
int tid=X+Y*Nx+Z*Nx*Ny;
int tid_t;
int X_t,Y_t,Z_t;
for(int spd=0;spd<numSpd;spd++){
X_t=X+ex[spd];
Y_t=Y+ey[spd];
Z_t=Z+ez[spd];
if(X_t==Nx)
X_t=0;
if(Y_t==Ny)
Y_t=0;
if(Z_t==Nz)
Z_t=0;
if(X_t<0)
X_t=(Nx-1);
if(Y_t<0)
Y_t=(Ny-1);
if(Z_t<0)
Z_t=(Nz-1);
tid_t=X_t+Y_t*Nx+Z_t*Nx*Ny;
fIn[spd*nnodes+tid_t]=fOut[spd*nnodes+tid];
}
}
}
void stream_cuda(float * fIn, const float * fOut, const float * ex,
const float * ey, const float * ez, const int Nx,
const int Ny, const int Nz, const int numSpd){
dim3 BLOCKS(TPB2D,TPB2D,1);
dim3 GRIDS((Nx+TPB2D-1)/TPB2D,(Ny+TPB2D-1)/TPB2D,Nz);
stream<<<GRIDS,BLOCKS>>>(fIn,fOut,ex,ey,ez,Nx,Ny,Nz,numSpd);
}
|
10,067 | /*Vector sum in CPU*/
#include <stdio.h>
#define N 10
void add(int *a, int *b, int *c)
{
int tid=0;
while(tid < N){
c[tid]=a[tid]+b[tid];
tid+=1;
}
}
int main(void)
{
int a[N],b[N],c[N];
for (int i = 0; i<N; i++)
{
a[i]=-i;
b[i]=i*i;
}
add(a,b,c);
for(int i=0;i<N;i++)
{
printf("%d+%d=%d\n", a[i],b[i],c[i] );
}
return 0;
}
|
10,068 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#ifndef NDEBUG
#define CHECK_STATUS(status) \
if (status != cudaSuccess) \
fprintf(stderr, "File: %s\nLine:%d Function:%s>>>%s\n", __FILE__, __LINE__, __FUNCTION__,\
cudaGetErrorString(status))
#else
#define CHECK_STATUS(status) status
#endif
//////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void MyKernel(){
}
int main(int argc, char **argv) {
CHECK_STATUS(cudaSetDevice(0)); // 选择设备0
cudaStream_t s0;
cudaStreamCreate(&s0); // 创建与设备0关联的流s0
MyKernel<<<100, 64, 0, s0>>>(); // 在设备0的s0上运行MyKernel
CHECK_STATUS(cudaGetLastError());
CHECK_STATUS(cudaSetDevice(1)); // 选择设备1
cudaStream_t s1;
cudaStreamCreate(&s1); // 创建与设备1关联的流s1
MyKernel<<<100, 64, 0, s1>>>(); // 在设备1的s1上运行MyKernel
CHECK_STATUS(cudaGetLastError());
// 这个调用会失败
MyKernel<<<100, 64, 0, s0>>>(); // 在设备1上,在s0上运行MyKernel
// 1.内存复制会成功
// 2.如果流和事件关联的设备不一样,cudaEventRecord()会失败
// 3.如果输入的两时间关联的设备不同,cudaEventElapsedTime()会失败
// 4.cudaEventSynchronize()和cudaEventQuery()会成功
// 5.cudaStreamWaitEvent()会成功
CHECK_STATUS(cudaStreamDestroy(s0));
CHECK_STATUS(cudaStreamDestroy(s1));
return 0;
}
|
10,069 | #include "includes.h"
__global__ void k3(const int N, int* augPath, bool* visited, int* frontier, bool* new_frontier, bool* par_mat, int* cap_mat, bool* adj_mat, int* cap_max_mat, int* maxflow, bool* augFound) {
augFound[0] = false;
//Find the augmented path
augPath[0] = N - 1;
int i = 1, vertex = N - 1;
while(vertex != 0) {
for(int j = 0; j < N; j++) {
if(par_mat[vertex * N + j]) {
vertex = j;
augPath[i] = vertex;
i++;
break;
}
}
}
//Compute the bottleneck for the augmented path
int bottleneck = -1;
for(int i = 0; i < N; i++) {
if(augPath[i] == 0)
break;
else {
int k = augPath[i];
int j = augPath[i + 1];
int freeCap;
if(adj_mat[j * N + k]) {
freeCap = cap_max_mat[j * N + k] - cap_mat[j * N + k];
} else {
freeCap = cap_mat[k * N + j];
}
if(bottleneck == -1)
bottleneck = freeCap;
else if(freeCap < bottleneck)
bottleneck = freeCap;
}
}
maxflow[0] += bottleneck;
//Update capacities in d_cap_mat
for(int i = 0; i < N; i++) {
if(augPath[i] == 0)
break;
else {
int k = augPath[i];
int j = augPath[i + 1];
if(adj_mat[j * N + k]) {
cap_mat[j * N + k] += bottleneck;
} else {
cap_mat[k * N + j] -= bottleneck;
}
}
}
//Initialize par_mat
for(int i=0;i<N*N;i++)
par_mat[i] = false;
//Initialize visited and frontier
for(int i=0;i<N;i++) visited[i] = false;
for(int i=0;i<N;i++) new_frontier[i] = false;
visited[0] = true;
frontier[0] = 1;
frontier[1] = 0;
} |
10,070 | #include "includes.h"
/*----------------------------------------------------------------
*
* Multiprocesadores: Cuda
* Fecha: 11-Nov-2019
* Autor: A01206747 Mariana Perez
Autor: A01205559 Roberto Nuñez
* Image = 1080 x 1920
Speedup = 33.93700 ms / 0.00250 ms = 13.5748
*--------------------------------------------------------------*/
__global__ void grayscale(unsigned char *src, unsigned char *dest, int width, int height, int nChannels) {
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y < height && x < width) {
int pos = (y * width + x) * nChannels;
unsigned char r = (float)src[pos];
unsigned char g = (float)src[pos + 1];
unsigned char b = (float)src[pos + 2];
dest[pos] = dest[pos + 1] = dest[pos + 2] = (unsigned char)0.2126 * r + 0.7152 * g + 0.0722 * b;
}
} |
10,071 | #include <stdio.h>
int main()
{
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" totalGlobalMem: %d\n", prop.totalGlobalMem);
printf(" Const Mem : %ui\n", prop.totalConstMem);
printf("Max shared mem for blocks %d\n", prop.sharedMemPerBlock);
printf("max regs per block %d\n", prop.regsPerBlock);
printf("Max thread per block %d\n", prop.maxThreadsPerBlock);
printf("multiProcessorCount : %d\n", prop.multiProcessorCount);
printf("maxThreadsDim %d %d %d\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize %d %d %d\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
}
} |
10,072 | #include <iostream>
#include <cuda.h>
using namespace std;
// device variable
__device__ float d_test[2][2];
__global__ void kernel1() {
d_test[1][1] = 1.0;
}
int main() {
float h_test = 0.0;
cudaMemset(&d_test,0,4*sizeof(float));
// invoke kernel
kernel1 <<<1,1>>> ();
// Use cudaMemcpyFromSymbol instead of cudaMemcpy
cudaMemcpyFromSymbol(&h_test, d_test, sizeof(float), 3*sizeof(float), cudaMemcpyDeviceToHost);
cout << h_test << endl;
}
|
10,073 | #include "includes.h"
__global__ void mykernel(void)
{
} |
10,074 | //====================================================
// Device Recognization
// main.cu : Main Routine
//----------------------------------------------------
// Rev.01 2019.06.29 M.Munetomo
//----------------------------------------------------
// Copyright (C) 2019 Munetomo Maruyama
//====================================================
#include <cinttypes>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <unistd.h>
#define DATA_SIZE_X 64
#define DATA_SIZE_Y 32
#define BLOCK_SIZE (8, 4)
//-----------------
// Device Kernel
//-----------------
__global__ void Device_Kernel(void)
{
uint32_t ix = threadIdx.x + blockIdx.x * blockDim.x;
uint32_t iy = threadIdx.y + blockIdx.y * blockDim.y;
printf("Device (ix, iy) = (%d, %d) : \
threadIdx.x = %d, blockIdx.x = %d, blockDim.x = %d, \
threadIdx.y = %d, blockIdx.y = %d, blockDim.y = %d\n",
ix, iy,
threadIdx.x, blockIdx.x, blockDim.x,
threadIdx.y, blockIdx.y, blockDim.y);
}
//----------------------------------
// Check Error during CUDA Runtime
//----------------------------------
#define CHECK(func) \
{ \
const cudaError_t error = func; \
if (error != cudaSuccess) \
{ \
printf("Error: %s:%d, ", __FILE__, __LINE__); \
printf("Code:%d, Reason: %s\n", error, \
cudaGetErrorString(error)); \
cudaDeviceReset(); \
exit(EXIT_FAILURE); \
} \
}
//----------------------------------
// Main Routine
//----------------------------------
int main(void)
{
// Grids and Blocks
dim3 block BLOCK_SIZE;
dim3 grid(((DATA_SIZE_X) + block.x - 1) / block.x,
((DATA_SIZE_Y) + block.y - 1) / block.y);
//
// Call Kernel (warm up)
Device_Kernel <<<grid, block>>> ();
//
// Wait for termination of all threads
CHECK(cudaDeviceSynchronize());
//
// Reset Device
CHECK(cudaDeviceReset());
//
// Return from this Program
return(EXIT_SUCCESS);
}
//====================================================
// End of Program
//====================================================
|
10,075 | #include "includes.h"
__global__ void ComputeQuadsKernel( float *pointsCoordinates, float *vertexData, int quadOffset, float textureSide, int *activityFlag, int textureWidth, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
float x = pointsCoordinates[threadId * 3];
float y = pointsCoordinates[threadId * 3 + 1];
float z = pointsCoordinates[threadId * 3 + 2];
float halfSide = 0.50f * textureSide;
if(activityFlag[threadId] == 0)
{
halfSide = 0.00f;
}
int textureOffset = quadOffset + maxCells * 4 * 3 * 3;
float textureAbsLength = (float)(maxCells * textureWidth);
// vertical x-alligned
vertexData[quadOffset + threadId * 36] = x - halfSide;
vertexData[quadOffset + threadId * 36 + 1] = y + halfSide;
vertexData[quadOffset + threadId * 36 + 2] = z;
vertexData[textureOffset + threadId * 24] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 1] = 0.00f;
vertexData[quadOffset + threadId * 36 + 3] = x - halfSide;
vertexData[quadOffset + threadId * 36 + 4] = y - halfSide;
vertexData[quadOffset + threadId * 36 + 5] = z;
vertexData[textureOffset + threadId * 24 + 2] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 3] = 1.00f;
vertexData[quadOffset + threadId * 36 + 6] = x + halfSide;
vertexData[quadOffset + threadId * 36 + 7] = y - halfSide;
vertexData[quadOffset + threadId * 36 + 8] = z;
vertexData[textureOffset + threadId * 24 + 4] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 5] = 1.00f;
vertexData[quadOffset + threadId * 36 + 9] = x + halfSide;
vertexData[quadOffset + threadId * 36 + 10] = y + halfSide;
vertexData[quadOffset + threadId * 36 + 11] = z;
vertexData[textureOffset + threadId * 24 + 6] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 7] = 0.00f;
// horizontal
vertexData[quadOffset + threadId * 36 + 12] = x - halfSide;
vertexData[quadOffset + threadId * 36 + 13] = y;
vertexData[quadOffset + threadId * 36 + 14] = z + halfSide;
vertexData[textureOffset + threadId * 24 + 8] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 9] = 1.00f;
vertexData[quadOffset + threadId * 36 + 15] = x - halfSide;
vertexData[quadOffset + threadId * 36 + 16] = y;
vertexData[quadOffset + threadId * 36 + 17] = z - halfSide;
vertexData[textureOffset + threadId * 24 + 10] = (float)(threadId * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 11] = 0.00f;
vertexData[quadOffset + threadId * 36 + 18] = x + halfSide;
vertexData[quadOffset + threadId * 36 + 19] = y;
vertexData[quadOffset + threadId * 36 + 20] = z - halfSide;
vertexData[textureOffset + threadId * 24 + 12] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 13] = 0.00f;
vertexData[quadOffset + threadId * 36 + 21] = x + halfSide;
vertexData[quadOffset + threadId * 36 + 22] = y;
vertexData[quadOffset + threadId * 36 + 23] = z + halfSide;
vertexData[textureOffset + threadId * 24 + 14] = (float)((threadId + 1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 15] = 1.00f;
// vertical z-alligned
vertexData[quadOffset + threadId * 36 + 24] = x;
vertexData[quadOffset + threadId * 36 + 25] = y - halfSide;
vertexData[quadOffset + threadId * 36 + 26] = z + halfSide;
vertexData[textureOffset + threadId * 24 + 16] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 17] = 1.00f;
vertexData[quadOffset + threadId * 36 + 27] = x;
vertexData[quadOffset + threadId * 36 + 28] = y - halfSide;
vertexData[quadOffset + threadId * 36 + 29] = z - halfSide;
vertexData[textureOffset + threadId * 24 + 18] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 19] = 1.00f;
vertexData[quadOffset + threadId * 36 + 30] = x;
vertexData[quadOffset + threadId * 36 + 31] = y + halfSide;
vertexData[quadOffset + threadId * 36 + 32] = z - halfSide;
vertexData[textureOffset + threadId * 24 + 20] = (float)((threadId) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 21] = 0.00f;
vertexData[quadOffset + threadId * 36 + 33] = x;
vertexData[quadOffset + threadId * 36 + 34] = y + halfSide;
vertexData[quadOffset + threadId * 36 + 35] = z + halfSide;
vertexData[textureOffset + threadId * 24 + 22] = (float)((threadId+1) * textureWidth) / textureAbsLength;
vertexData[textureOffset + threadId * 24 + 23] = 0.00f;
}
} |
10,076 | #include "includes.h"
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
__global__ void game(int* A, const int N, const int largeur, const int hauteur){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int y = idx / hauteur;
int x = idx - (y * largeur);
if (y >= hauteur || x >= largeur)
return;
int me = A[idx];
int north = 0 ;
int northEast = 0;
int northWest = 0;
int south = 0;
int southEast = 0;
int southWest = 0;
int east = 0;
int west = 0;
if (x > 0)
west = A[idx -1];
if (x < largeur - 1)
east = A[idx + 1];
if (y > 0)
north = A[idx - largeur];
if (y < hauteur - 1)
south = A[idx + largeur];
if ((y < hauteur - 1) && (x < largeur - 1))
southEast = A[idx + largeur + 1];
if ((y < hauteur - 1) && (x > 0))
southWest = A[idx + largeur - 1];
if ((y > 0) && (x >0))
northWest = A[idx - largeur - 1];
if ((y > 0) && (x < largeur - 1))
northEast = A[idx - largeur + 1];
int res = north + south + east + west + northEast + northWest + southEast + southWest;
//__syncthreads();
if ((me == 1) && (res < 2) || (res > 3))
A[idx] = 0;
else
if ((me == 0) && (res == 3))
A[idx] = 1;
} |
10,077 | #include <iostream>
#include <stdio.h>
#include <cuda.h>
#define THREADS 4
#define ITEMS_PER_THREAD 30
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
__global__ void histogram_cuda(int *histogram, float *values, size_t nb, float bin_size, float min, int bins, int nb_thread)
{
// nb = total size of elems
int id = (blockIdx.x * blockDim.x + threadIdx.x) * ITEMS_PER_THREAD;
int thread_id = threadIdx.x;
int *local_hist = (int *)malloc(sizeof(int) * bins);
if (id == 0)
printf("Bin size : %f\n", bin_size);
// Init local histogram
for (int i = 0; i < bins; i++)
local_hist[i] = 0;
// One shared array per bin
extern __shared__ int s_bins[];
// Compute serially local bin
for (int i = 0; i < ITEMS_PER_THREAD; i++)
{
for (int j = 0; j < bins; j += 1)
{
// if (id + i < NB)
// printf("values[%d] = %f <= %f\n", id + i, values[id + i], (float)min + (float)(j + 1) * bin_size);
if (id + i < nb && values[id + i] <= ((float)min + (float)(j + 1) * bin_size))
{
local_hist[j] += 1;
//printf("BlockIdx : %d - Thread %d : values[%d] = %f -> local_hist[%d] = %d\n", blockIdx.x, thread_id, id + i, values[id + i], j, local_hist[j]);
break ;
}
}
}
__syncthreads();
// Store local bins into shared bins
for (int i = 0; i < bins; i++)
{
s_bins[THREADS * i + thread_id] = local_hist[i];
// printf("Block %d - Thread %d : s_bins[%d] = local_hist[%d] = %d\n", blockIdx.x, thread_id, THREADS * i + thread_id, i, local_hist[i]);
}
__syncthreads();
// if (thread_id == 0)
// {
// for (int i = 0; i < nb_thread * 3; i++)
// {
// printf("s_bins[%d] = %d\n", i, s_bins[i]);
// }
// }
// Reduce each shared bin
// int size = (blockIdx.x == gridDim.x - 1) ? (NB % blockDim.x) : blockDim.x;
int size = THREADS;
for (size_t s = THREADS / 2; s > 0; s >>= 1)
{
if (thread_id + s < THREADS && thread_id < s)
{
for (size_t j = 0; j < bins; j++)
{
s_bins[j * THREADS + thread_id] = s_bins[j * THREADS + thread_id] + s_bins[j * THREADS + thread_id + s];
if (size % 2 == 1 && thread_id + s + s == size - 1)
s_bins[j * THREADS + thread_id] = s_bins[j * THREADS + thread_id] + s_bins[j * THREADS + thread_id + s + s];
}
}
__syncthreads();
size = s;
}
// Store the result into histogram
if (thread_id == 0)
{
for (int i = 0; i < bins; i++) {
histogram[i + blockIdx.x * bins] = s_bins[THREADS * i];
// histogram[0 + blockIdx.x * bins] = s_bins[0];
// histogram[1 + blockIdx.x * bins] = s_bins[THREADS];
// histogram[2 + blockIdx.x * bins] = s_bins[THREADS * 2];
// printf("histogram[%d] = %d\n", 0 + blockIdx.x * bins, s_bins[0]);
// printf("histogram[%d] = %d\n", 1 + blockIdx.x * bins, s_bins[THREADS]);
// printf("histogram[%d] = %d\n", i + blockIdx.x * bins, s_bins[THREADS * i]);
}
}
}
void histogram(float *h_values, size_t size, float min, float max, int *h_histogram, size_t bins)
{
float *d_values;
float **d_values_ = &d_values;
int *d_histogram;
int **d_histogram_ = &d_histogram;
int nb_thread = size / ITEMS_PER_THREAD + 1;
int grid_dim = nb_thread / THREADS + 1;
// printf("size = %d\n", size);
// printf("ITEMS_PER_THREAD = %d\n", ITEMS_PER_THREAD);
// printf("nb_thread = %d\n", nb_thread);
// printf("grid dim = %d\n", grid_dim);
// cudaMalloc
checkCudaErrors(cudaMalloc(d_values_, sizeof(float) * size));
checkCudaErrors(cudaMalloc(d_histogram_, sizeof(int) * bins * grid_dim));
// cudaMemcpy HostToDevice
checkCudaErrors(cudaMemcpy(d_values, h_values, sizeof(float) * size, cudaMemcpyHostToDevice));
// cudaMemset
checkCudaErrors(cudaMemset(d_histogram, 0, sizeof(int) * bins));
// printf("size of shared mem = %d\n", THREADS * bins);
// printf("\n\n");
// // kernel HISTOGRAM
histogram_cuda<<<grid_dim, THREADS, THREADS * bins * sizeof(int) >>>(d_histogram, d_values, size, (float)(max - min) / (float)bins, min, bins, nb_thread);
cudaDeviceSynchronize();// checkCudaErrors(cudaGetLastError());
// cudaMemcpy DeviceToHost
checkCudaErrors(cudaMemcpy(h_histogram, d_histogram, sizeof(int) * bins * grid_dim, cudaMemcpyDeviceToHost));
// cudaFree
cudaFree(d_values_);
cudaFree(d_histogram_);
}
|
10,078 | /**
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
/** Thrust Libraries (not necessary..)
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
*/
#include "computation.cuh"
#define DEFAULT_SENSIBILITY 1000
#define THREADS_PER_BLOCK 512
/**
* Default value used to target the results of all of those threads
* whose delta results to be greater than 1. This trick helps to avoid
* them to be considered as suitable results during the following
* minimization step.
*/
#define INFINITY_VALUE 1000
/**
* It is assumed that delta, which represents the relative minimum
* distance of the random matrix M could only take values between
* 0 and 1.
*/
#define MAX_DELTA_VALUE 1
/**
* Help parameter used to enable/disable the detailed printing
* of the couples (delta, n)
*/
bool verboseMode = false;
bool debugMode = false;
/**
* General function used to find out the properties of the running CUDA device
*/
void DisplayHeaderDevice()
{
const int Kb = 1024;
const int Mb = Kb * Kb;
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int device = 0; device < deviceCount; ++device) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d has compute capability %d.%d.\n", device, deviceProp.major, deviceProp.minor);
printf("Global Memory: %f Mb\n", (float) deviceProp.totalGlobalMem / Mb);
printf("Shared Memory: %f Kb\n", (float) deviceProp.sharedMemPerBlock / Kb);
printf("Constant Memory: %f Kb\n", (float) deviceProp.totalConstMem / Kb);
printf("Block registers: %d\n", deviceProp.regsPerBlock);
printf("Warp Size: %d\n", deviceProp.warpSize);
printf("Thread per block: %d\n", deviceProp.maxThreadsPerBlock);
printf("Max block dimensions: [ %d, %d, %d ]\n", deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf("Max grid dimensions: [ %d, %d, %d ]\n", deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
}
}
int main(int argc, char **argv)
{
// Parameter used to set the number of intervals used for the computation
// of the optimal parameter
int sensibility;
if (argc >= 2) sensibility = atoi(argv[1]);
else sensibility = DEFAULT_SENSIBILITY;
// Parameter used to set the maximum possible value for delta
float maxDeltaThreshold;
if (argc >= 3) maxDeltaThreshold = atof(argv[2]);
else maxDeltaThreshold = MAX_DELTA_VALUE;
if (argc == 4) verboseMode = true;
// Show the properties of the CUDA devices..
if(debugMode) DisplayHeaderDevice();
size_t vectorSize = sensibility * sizeof(float);
// Compute launching parameters for the kernel
dim3 dimGrid((unsigned int) ceilf((float) sensibility/THREADS_PER_BLOCK ), 1, 1);
dim3 dimBlock(THREADS_PER_BLOCK,1,1);
// Allocating a raw vector to be processed on the device.
float *rawTestAmountRowsVector;
cudaMalloc(&rawTestAmountRowsVector, vectorSize);
// Allocate the arbitrary infinite value in constant memory
int infinityValue = INFINITY_VALUE;
cudaMemcpyToSymbol(infinity, &infinityValue, sizeof(int));
// Invoke the kernel for the computation of the amounted rows
computeAmountRows<<<dimGrid, dimBlock>>>(rawTestAmountRowsVector, sensibility, maxDeltaThreshold);
cudaThreadSynchronize();
////////////////////////////////////////////////////////////////////////////////////////////////////
// The minimum value is now extracted and all the correlated couples (delta, n) are printed out.
////////////////////////////////////////////////////////////////////////////////////////////////////
printf("\nEvaluation of the parameter Delta in order to find out the optimal amount of rows, N\n\n");
printf("Test Condition:\nDomain of the parameter delta = [0, %f]\n", maxDeltaThreshold);
printf("Number of samples considered = %d\nInterval between two adjacent delta samples = %f\n\n", sensibility, (float) 1/sensibility);
float *testAmountRowsVector;
testAmountRowsVector = (float *) malloc(vectorSize);
cudaMemcpy(testAmountRowsVector, rawTestAmountRowsVector, vectorSize, cudaMemcpyDeviceToHost);
float minComparison = infinityValue;
float delta;
int minIndex;
for(int i=0; i< sensibility; i++) {
if(testAmountRowsVector[i] < minComparison) {
minIndex = i;
minComparison = testAmountRowsVector[i];
}
delta = (float) i/sensibility + (float) 1/sensibility;
if (delta <= maxDeltaThreshold && verboseMode) {
printf("delta = %f -> n = %f \n", delta, testAmountRowsVector[i]);
}
}
printf("\nThe minimum amount of rows is N = %f k obtained by taking delta = %f\n", minComparison, (float) minIndex/sensibility);
if(argc < 2)
printf("\nHELP\nYou can run the analysis with test values different from the default ones.\n"
"In particular 3 parameters could be provided when launching the program:\n\t"
"1. Numbers of intervals tested for Delta [positive integer value] (Default = 1000)\n\t"
"2. Maximum value tested for Delta [float value between 0 and 1] (Default = 1)\n\t"
"3. Enable/Disable Verbose mode [provide a non-null string to enable this mode.. e.g. v] (Default = Disable)\n");
////////////////////////////////////////////////////////////////////////////////////////////////////
// Wrap raw pointer with a device pointer
//thrust::device_ptr<float> devTestAmountRowsVector(rawTestAmountRowsVector);
// Instruction used to locate the position of the smallest amount of rows
//ptr_to_smallest_value = thrust::min_element(devTestAmountRowsVector, devTestAmountRowsVector + sensibility);
// Reduce operations to find the smallest amount of rows
//float minAmountRows = thrust::reduce(devTestAmountRowsVector, devTestAmountRowsVector + sensibility, infinityValue, thrust::minimum<float>());
//printf("The minimum amount of rows is N = %f k \n", minAmountRows);
cudaFree(rawTestAmountRowsVector);
return 0;
}
|
10,079 | #include <cassert>
#include <algorithm>
#include <stdlib.h>
#include <stdio.h>
#include <cfloat>
#include <cuda.h>
#include <sys/time.h>
/*
Graph --- Representation of a directed graph.
Nodes are numbered starting with 0, to num_nodes - 1.
The edges for node i are stored starting at
edge_destinations[edge_offsets[i]]
and continuing until and *excluding*
edge_destinations[edge_offsets[i+1]]
Edges for a node will be in sorted order.
For example, the graph with the following edges:
0 -> 1
0 -> 2
1 -> 2
3 -> 4
would be represented as
num_nodes = 5
edge_offsets = {
0, (index 0)
2, (index 1)
3, (index 2)
3, (index 3)
4, (index 4)
4, (index 5)
}
edge_destinations = {
1, (represents 0 to 1)
2, (represents 0 to 2)
2, (represents 1 to 2)
4, (represents 3 to 4)
}
*/
struct Graph {
int num_nodes;
int *edge_offsets;
int *edge_destinations;
};
/*
CPU_bfs() --- reference implementation of a breadth-first search
arguments:
Graph* theGraph --- the graph to search
int starting_node --- the index of the node to start the search at
int *output_bfs_tree --- output, giving the results of the search, described below
This performs a breadth-first search and outputs the result to output_bfs_tree.
If node i was not reached during the BFS, then output_bfs_tree[i] will be -1.
If node i was the starting_node, then output_bfs_tree[i] will be i.
If node i was reached during the BFS, then output_bfs_tree[i] will be the index of
the node from which it was first reached. If there are multiple possibilities,
then *any* of them is permissible.
For example, given the graph
0 -> 1
0 -> 2
1 -> 2
1 -> 6
2 -> 3
2 -> 6
3 -> 0
4 -> 5
then the after CPU_bfs(theGraph, 0, output_bfs_tree), the values in output_bfs_tree will be:
output_bfs_tree[0] = 0
output_bfs_tree[1] = 0
output_bfs_tree[2] = 0
output_bfs_tree[3] = 2
output_bfs_tree[4] = -1
output_bfs_tree[5] = -1
output_bfs_tree[6] = 1 OR output_bfs_tree[6] = 2 (either is permitted)
*/
void CPU_bfs(Graph* graph, int starting_node, int *output_bfs_tree);
void GPU_bfs(Graph* graph, int starting_node, int *output_bfs_tree, int kernel_code, float *kernel_time, float *transfer_time);
/*
Verify the result of a BFS is correct
Returns true if it is, false otherwise. Outputs a message to stderr
about the first discovered disagreement.
*/
bool verify_bfs(Graph* graph, int starting_node, int *output_bfs_tree);
/*
Load a graph from a file in starting_node<whitespace>ending_node pairs.
Nodes must be in numerical order and the edges for a node must be in
numerical order.
The file may also contain comments lines starting with '#' which
will be ignored.
*/
void load_graph(FILE *in, Graph *outGraph);
/* Timing utility functions */
float usToSec(long long time);
long long start_timer();
long long stop_timer(long long start_time, const char *name);
void die(const char *message);
// Main program
int main(int argc, char **argv) {
//default kernel
int kernel_code = 1;
int starting_node = -1;
// Parse vector length and kernel options
const char *graph_file;
if (argc >= 2) {
graph_file = argv[1];
for (int i = 2; i < argc; ++i) {
if (i + 1 < argc) {
if (0 == strcmp(argv[i], "-k")) {
kernel_code = atoi(argv[i + 1]);
++i;
} else if (0 == strcmp(argv[i], "-s")) {
starting_node = atoi(argv[i + 1]);
++i;
} else {
die("USAGE: ../breadth_first_search input_file -s starting_node -k kernel_code");
}
} else {
die("USAGE: ../breadth_first_search input_file -s starting_node -k kernel_code");
}
}
} else {
die("USAGE: ../breadth_first_search input_file -s starting_node -k kernel_code");
}
Graph graph;
FILE *in = fopen(graph_file, "r");
if (!in) {
die("Could not open input file");
}
load_graph(in, &graph);
printf("loaded graph with %d nodes\n", graph.num_nodes);
if (starting_node == -1) {
/* find first node with at least one out edge */
++starting_node;
while (graph.edge_offsets[starting_node] == graph.edge_offsets[starting_node + 1])
++starting_node;
}
printf("selected to start at node %d\n", starting_node);
int *cpu_output_bfs_tree = (int*) malloc(graph.num_nodes * sizeof(int));
int *gpu_output_bfs_tree = (int*) malloc(graph.num_nodes * sizeof(int));
long long start_cpu = start_timer();
CPU_bfs(&graph, starting_node, cpu_output_bfs_tree);
long long CPU_time = stop_timer(start_cpu, "CPU version");
if (!verify_bfs(&graph, starting_node, cpu_output_bfs_tree)) {
fprintf(stderr, "CPU BFS produces INCORRECT RESULT!\n");
}
#ifdef DEBUG
fprintf(stderr, "first few parents are %d/%d/%d/%d/%d/%d\n",
cpu_output_bfs_tree[0], cpu_output_bfs_tree[1],
cpu_output_bfs_tree[2], cpu_output_bfs_tree[3],
cpu_output_bfs_tree[4], cpu_output_bfs_tree[5]);
#endif
float GPU_kernel_time = INFINITY;
float transfer_time = INFINITY;
long long start_gpu = start_timer();
GPU_bfs(&graph, starting_node, gpu_output_bfs_tree, kernel_code, &GPU_kernel_time, &transfer_time);
long long GPU_time = stop_timer(start_gpu, "GPU version");
// Compute the speedup or slowdown
//// Not including data transfer
if (GPU_kernel_time > usToSec(CPU_time)) printf("\nCPU outperformed GPU kernel by %.2fx\n", (float) (GPU_kernel_time) / usToSec(CPU_time));
else printf("\nGPU kernel outperformed CPU by %.2fx\n", (float) usToSec(CPU_time) / (float) GPU_kernel_time);
//// Including data transfer
if (GPU_time > CPU_time) printf("\nCPU outperformed GPU total runtime (including data transfer) by %.2fx\n", (float) GPU_time / (float) CPU_time);
else printf("\nGPU total runtime (including data transfer) outperformed CPU by %.2fx\n", (float) CPU_time / (float) GPU_time);
if (!verify_bfs(&graph, starting_node, gpu_output_bfs_tree)) {
fprintf(stderr, "GPU BFS produces INCORRECT RESULT!\n");
}
cudaFree(cpu_output_bfs_tree);
cudaFree(gpu_output_bfs_tree);
cudaFree(graph.edge_offsets);
cudaFree(graph.edge_destinations);
}
void GPU_bfs(Graph* graph, int starting_node, int *output_bfs_tree, int kernel_code, float *kernel_runtime, float *transfer_runtime) {
for (int i = 0; i < graph->num_nodes; ++i) {
output_bfs_tree[i] = -1;
}
output_bfs_tree[starting_node] = starting_node;
// IMPLEMENT YOUR BFS HERE
}
void CPU_bfs(Graph* graph, int starting_node, int *output_bfs_tree) {
char *visited;
cudaMallocHost((void**) &visited, graph->num_nodes);
for (int i = 0; i < graph->num_nodes; ++i) {
output_bfs_tree[i] = -1;
visited[i] = 0;
}
output_bfs_tree[starting_node] = starting_node;
int *frontier;
cudaMallocHost((void**) &frontier, sizeof(int) * graph->num_nodes);
int frontier_start = 0;
int frontier_end = 1;
int next_frontier_end = 1;
frontier[0] = starting_node;
visited[starting_node] = 1;
while (frontier_end > frontier_start) {
for (int parent_index = frontier_start;
parent_index < frontier_end;
++parent_index) {
int parent = frontier[parent_index];
for (int edge_index = graph->edge_offsets[parent];
edge_index < graph->edge_offsets[parent + 1];
++edge_index) {
int child = graph->edge_destinations[edge_index];
#ifdef DEBUG
fprintf(stderr, "BFS: processing child %d of %d\n",
child, parent);
#endif
if (visited[child]) continue;
visited[child] = 1;
output_bfs_tree[child] = parent;
frontier[next_frontier_end++] = child;
}
}
frontier_start = frontier_end;
frontier_end = next_frontier_end;
}
cudaFree(frontier);
cudaFree(visited);
}
static bool next_pair(FILE *in, int *first, int *second) {
for (;;) {
char line[4096];
char *result = fgets(line, sizeof line, in);
if (!result) {
return false;
}
if (line[strlen(line)] == '\n') {
fprintf(stderr, "load_graph: excessively long line starting with [%s]\n", line);
exit(EXIT_FAILURE);
}
if (line[0] == '#') {
continue; // comment
}
if (sscanf(line, "%d %d", first, second) == 2) {
return true;
} else {
fprintf(stderr, "load_graph: malformed line: [%s]\n", line);
exit(EXIT_FAILURE);
}
}
}
void load_graph(FILE *in, Graph* graph) {
/* First read file to determine sizez */
int first, second;
int max_node = 0;
int num_edges = 0;
while (next_pair(in, &first, &second)) {
if (first > max_node) {
max_node = first;
}
if (second > max_node) {
max_node = second;
}
++num_edges;
}
graph->num_nodes = max_node + 1;
cudaMallocHost((void**) &graph->edge_offsets, sizeof(int) * (max_node + 2));
cudaMallocHost((void**) &graph->edge_destinations, sizeof(int) * (num_edges));
rewind(in);
int last_first = 0, last_second = -1;
graph->edge_offsets[0] = 0;
graph->edge_offsets[1] = 0;
while (next_pair(in, &first, &second)) {
if (last_first < first) {
last_second = -1;
}
while (last_first < first) {
graph->edge_offsets[last_first + 2] = graph->edge_offsets[last_first + 1];
++last_first;
}
assert(second > last_second);
graph->edge_destinations[
graph->edge_offsets[first + 1]++
] = second;
}
graph->edge_offsets[max_node+1] = graph->edge_offsets[max_node];
}
bool verify_bfs(Graph* graph, int starting_node, int *output_bfs_tree) {
int *distances;
cudaMallocHost((void**) &distances, sizeof(int) * graph->num_nodes);
for (int i = 0; i < graph->num_nodes; ++i) {
distances[i] = -1;
}
distances[starting_node] = 0;
/* first find the distances of all nodes to the starting node based
on the BFS tree */
for (int node = 0; node < graph->num_nodes; ++node) {
/* special case for starting node */
if (node == starting_node) {
if (output_bfs_tree[node] != starting_node) {
fprintf(stderr, "starting node %d linked to %d instead of self\n",
starting_node, output_bfs_tree[node]);
return false;
}
continue;
}
/* if the node is contained in the BFS tree, go up to the root and find the distance */
if (output_bfs_tree[node] != -1) {
int max_iterations = graph->num_nodes + 1;
int saw_distance = 1;
int parent = output_bfs_tree[node];
if (parent >= graph->num_nodes) {
fprintf(stderr, "node %d linked to impossible node %d\n", node, parent);
goto out_failed;
}
if (distances[parent] != -1) {
distances[node] = distances[parent] + 1;
} else {
while (saw_distance < max_iterations && parent != starting_node) {
parent = output_bfs_tree[parent];
if (parent == -1 || parent >= graph->num_nodes) {
fprintf(stderr, "node %d chains to impossible node %d in BFS tree (via %d)\n", node, parent, output_bfs_tree[node]);
return false;
}
++saw_distance;
}
if (saw_distance == max_iterations) {
fprintf(stderr, "node %d is part of a cycle in the BFS tree\n", node);
return false;
}
distances[node] = saw_distance;
distances[output_bfs_tree[node]] = saw_distance - 1;
}
}
}
/* now that distances are computed, for each node in the BFS tree check that
(1) its parent actually has an edge to it
(2) the distances of all its children are >= 1 + its distance
*/
for (int node = 0; node < graph->num_nodes; ++node) {
int parent = output_bfs_tree[node];
if (parent == -1) {
continue;
}
/* check for edge from parent to node */
if (node != starting_node) {
int low_edge_index = graph->edge_offsets[parent];
int high_edge_index = graph->edge_offsets[parent + 1];
int found_index = -1;
/* binary search
current valid range is [low_edge_index, high_edge_index)
*/
while (low_edge_index < high_edge_index) {
int midpoint = low_edge_index + (high_edge_index - low_edge_index) / 2;
int midpoint_destination = graph->edge_destinations[midpoint];
if (midpoint_destination == node) {
found_index = midpoint;
break;
} else if (midpoint_destination > node) {
high_edge_index = midpoint;
} else {
low_edge_index = midpoint + 1;
}
}
if (found_index == -1) {
fprintf(stderr, "node %d has parent %d in BFS tree, but no %d->%d edge in graph\n",
node, parent, parent, node);
goto out_failed;
}
/* if this isn't true, the code in the previous loop is probably broken */
if (distances[node] != distances[parent] + 1) {
fprintf(stderr, "inconsistent distance for node %d and parent %d\n", node, parent);
goto out_failed;
}
}
/* check distances of children */
for (int edge_index = graph->edge_offsets[node];
edge_index < graph->edge_offsets[node + 1];
++edge_index) {
int child = graph->edge_destinations[edge_index];
if (distances[child] > distances[node] + 1) {
fprintf(stderr, "child %d of %d is at distance %d via %d, but could be at distance %d via %d\n",
child, node, distances[child],
output_bfs_tree[child],
distances[node] + 1,
node);
goto out_failed;
} else if (distances[child] == -1) {
fprintf(stderr, "child %d of %d is not in BFS tree, but %d is\n",
child, node, node);
goto out_failed;
}
}
}
cudaFree(distances);
return true;
out_failed:
cudaFree(distances);
return false;
}
// Returns the current time in microseconds
long long start_timer() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
// converts a long long ns value to float seconds
float usToSec(long long time) {
return ((float)time)/(1000000);
}
// Prints the time elapsed since the specified time
long long stop_timer(long long start_time, const char *name) {
struct timeval tv;
gettimeofday(&tv, NULL);
long long end_time = tv.tv_sec * 1000000 + tv.tv_usec;
float elapsed = usToSec(end_time - start_time);
printf("%s: %.5f sec\n", name, elapsed);
return end_time - start_time;
}
// Prints the specified message and quits
void die(const char *message) {
printf("%s\n", message);
exit(1);
}
|
10,080 | __global__ void update_pre(int nn, int nne, double vf, float c_ul_tmp, double *ul, double *ul_prev, double *ul_tmp, double *kl, double *el_sum) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int eidx = tid / nne; // index of elements
int lidx = tid % nne; // index of nodes
int i;
if (tid < nn) {
// sum in element
if (lidx == 0) {
el_sum[eidx] = 0;
for (i=0; i<nne; i++)
el_sum[eidx] += vf * ul[tid + i];
}
// ul_tmp
ul_tmp[tid] = ul_prev[tid] + c_ul_tmp * kl[tid];
}
}
__global__ void update_ul(int nn, int ne, int nne, double dt, double de, double vf, float c_ul, double *ul, double *ul_tmp, double *kl, double *el_sum) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int eidx = tid / nne; // index of elements
int lidx = tid % nne; // index of nodes
int i;
double el_sum_left, gg, bb, mm, kk;
if (tid < nn) {
if (eidx > 0) el_sum_left = el_sum[eidx-1];
else el_sum_left = el_sum[ne-1];
gg = el_sum[eidx] - pow(-1., lidx) * el_sum_left;
bb = 0;
if (lidx != 0) {
for (i=(lidx-1)%2; i<lidx; i+=2)
bb += 2 * vf * ul_tmp[eidx*nne + i];
}
mm = (2 * lidx + 1) / de;
kk = dt * mm * (bb - gg);
ul[tid] += c_ul * kk;
kl[tid] = kk;
}
}
|
10,081 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h> //srand()
//#include <stdbool.h>
//#define block 514
extern "C" void smooth_global_outer(float* b, float* a, int n, int loop, int BLOCK);
extern "C" void smooth_shared_outer(float* b, float* a, int n, int loop, int BLOCK);
//(2) 裝置核心(global 版).
__global__ void smooth_global(float* b, float* a, int n){
int k = blockIdx.x*blockDim.x+threadIdx.x;
if(k==0){
b[k]=(2*a[0]+a[1])*0.25;
}
else if(k==n-1){
b[k]=(a[n-2]+2*a[n-1])*0.25;
}
else if(k<n){
b[k]=(a[k-1]+2*a[k]+a[k+1])*0.25;
}
}
//(3) 裝置核心(shared 版).
__global__ void smooth_shared(float* b, float* a, int n, int BLOCK){
int base = blockIdx.x*blockDim.x;
int t = threadIdx.x;
//__shared__ float s[BLOCK+2];//宣告共享記憶體.
extern __shared__ float s[];//宣告共享記憶體.
//載入主要資料 s[1]~s[BLOCK]
// s[0] <-- a[base-1] (左邊界)
// s[1] <-- a[base]
// s[2] <-- a[base+1]
// s[3] <-- a[base+2]
// ...
// s[BLOCK] <-- a[base+BLOCK-1]
// s[BLOCK+1] <-- a[base+BLOCK] (右邊界)
if(base+t<n){
s[t+1]=a[base+t];
}
if(t==0){
//左邊界.
if(base==0){
s[0]=0;
}
else{
s[0]=a[base-1]; //載入邊界資料 s[0] & s[BLOCK+1] (只用兩個執行緒處理)
}
}
if(t==32){ //*** 使用獨立的 warp 讓 branch 更快 ***
if(base+BLOCK>=n){ //右邊界.
s[n-base+1]=0;
}
else{
s[BLOCK+1] = a[base+BLOCK];
}
}
__syncthreads(); //同步化 (確保共享記憶體已寫入)
if(base+t<n){
b[base+t]=(s[t]+2*s[t+1]+s[t+2])*0.25; //輸出三點加權平均值
}
};
extern "C" void smooth_global_outer(float* b, float* a, int n, int loop, int BLOCK)
{
dim3 block(BLOCK, 1, 1);
dim3 grid(n/BLOCK+1, 1, 1);
for(int k=0; k<loop; k++)
{
smooth_global<<< grid, block >>>(b, a, n);
}
}
extern "C" void smooth_shared_outer(float* b, float* a, int n, int loop, int BLOCK)
{
dim3 block(BLOCK, 1, 1);
dim3 grid(n/BLOCK+1, 1, 1);
for(int k=0; k<loop; k++){
smooth_shared<<< grid, block, BLOCK+2 >>>(b, a, n, BLOCK);
}
}
|
10,082 | #include <stdio.h>
//indicates a function that runs on the divice
// and also is colled from host code (du global)
//ad a single integer
__global__ void add(int *a, int *b, int *c){
*c = *a + *b; //ver si funciona sin lo sastericos
int d = *c + 1;
printf("kernel %d\n", d);
}
int main(void){
int a, b, c; //host copies of a,b,c
int *d_a, *d_b, *d_c; //device copies of a, b,c
int size = sizeof(int);
//allocate space for device copies of a,b,c
cudaMalloc(&d_a, size); //cuando va??? (void **), como ir a cuda Managed, agregar check error.
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size);
a = 1;
b = 1;
//copy inputs to device
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
//launch add() kernel on GPU
add<<<1,1>>>(d_a,d_b,d_c);
//copy result back to host
cudaMemcpy(&c,d_c,size,cudaMemcpyDeviceToHost);
printf("fuera del kernel %i\n", c);
//cleanup
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
10,083 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
using namespace std;
int main(){
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
for(int i=0; i<count; i++){
cudaGetDeviceProperties(&prop,i);
cout<<"---Some Information for the Device---"<<endl;
cout<<"Name : " << prop.name << endl;
cout<<"Compute capability : " << prop.major << "."<< prop.minor << endl;
cout<<"Clock Rate : " << prop.clockRate << endl;
}
} |
10,084 | #include "includes.h"
__global__ void reduceGmemUnroll(int *g_idata, int *g_odata, unsigned int n)
{
// set thread ID
unsigned int tid = threadIdx.x;
unsigned int idx = blockIdx.x * blockDim.x * 4 + threadIdx.x;
// convert global data pointer to the local pointer of this block
int *idata = g_idata + blockIdx.x * blockDim.x * 4;
// unrolling 4
if (idx < n)
{
int a1, a2, a3, a4;
a1 = a2 = a3 = a4 = 0;
a1 = g_idata[idx];
if (idx + blockDim.x < n) a2 = g_idata[idx + blockDim.x];
if (idx + 2 * blockDim.x < n) a3 = g_idata[idx + 2 * blockDim.x];
if (idx + 3 * blockDim.x < n) a4 = g_idata[idx + 3 * blockDim.x];
g_idata[idx] = a1 + a2 + a3 + a4;
}
__syncthreads();
// in-place reduction in global memory
if (blockDim.x >= 1024 && tid < 512) idata[tid] += idata[tid + 512];
__syncthreads();
if (blockDim.x >= 512 && tid < 256) idata[tid] += idata[tid + 256];
__syncthreads();
if (blockDim.x >= 256 && tid < 128) idata[tid] += idata[tid + 128];
__syncthreads();
if (blockDim.x >= 128 && tid < 64) idata[tid] += idata[tid + 64];
__syncthreads();
// unrolling warp
if (tid < 32)
{
volatile int *vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = idata[0];
} |
10,085 | #include <iostream>
#include <chrono>
__global__ void polynomial_expansion (float* poly, int degree, int n, float* array) {
float out = 0.;
float xdegree = 1.;
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n){
float x = array[i];
for (int k=0; k<=degree; ++k) {
out += xdegree*poly[k];
xdegree *= x;
}
array[i] = out;
}
}
int main (int argc, char* argv[]) {
//TODO: add usage
if (argc < 3) {
std::cerr<<"usage: "<<argv[0]<<" n degree"<<std::endl;
return -1;
}
int n = atoi(argv[1]);
int degree = atoi(argv[2]);
int nbiter = 1;
float* array = new float[n];
float* poly = new float[degree+1];
float *xPointer, *coeffPointer;
for (int i=0; i<n; ++i)
array[i] = 1.0;
for (int i=0; i<degree+1; ++i)
poly[i] = 1.0;
cudaMalloc(&xPointer, n*sizeof(float));
cudaMalloc(&coeffPointer, (degree+1)*sizeof(float));
cudaMemcpy(xPointer, array, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(coeffPointer, poly, (degree+1)*sizeof(float), cudaMemcpyHostToDevice);
std::chrono::time_point<std::chrono::system_clock> begin, end;
begin = std::chrono::system_clock::now();
for (int iter = 0; iter<nbiter; ++iter){
polynomial_expansion<<<(n+255)/256, 256>>>(coeffPointer, degree, n, xPointer);
}
cudaDeviceSynchronize();
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy(array, xPointer, n*sizeof(float), cudaMemcpyDeviceToHost);
end = std::chrono::system_clock::now();
std::chrono::duration<double> totaltime = (end-begin)/nbiter;
std::cerr<<array[0]<<std::endl;
std::cout<<n<<" "<<degree<<" "<<totaltime.count()<<std::endl;
delete[] array;
delete[] poly;
cudaFree(xPointer);
cudaFree(coeffPointer);
return 0;
}
|
10,086 | #include <cuda.h>
#include <stdio.h>
#define N 512
__global__ void add_number(int* a, int* b, int* c){
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
int main(int argc, char **argv){
int *a, *b, *c; // Host copies of variables
int *d_a, *d_b, *d_c; // Device copies of variables
int size = N * sizeof(int);
// Allocation of device's memory
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
// Allocation of space for variables on host
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
// Setting up input variables
for(int i = 0; i < N; i++){
*(a+i) = i;
*(b+i) = i;
}
// Copy inputs to device memory
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launching kernel on GPU
add_number<<<1,N>>>(d_a, d_b, d_c);
// Copy results back to host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
for(int i = 1; i < N; i++){
printf("%d\n", *(c+i));
}
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
10,087 | /****************************************************************************\
* --- Practical Course: GPU Programming in Computer Vision ---
*
* time: winter term 2012/13 / March 11-18, 2013
*
* project: gradient
* file: gradient.cu
*
*
\******* PLEASE ENTER YOUR CORRECT STUDENT LOGIN, NAME AND ID BELOW *********/
const char* studentLogin = "p116";
const char* studentName = "Arash Bakhtiari";
const int studentID = 03625141;
/****************************************************************************\
*
* In this file the following methods have to be edited or completed:
*
* derivativeY_sm_d(const float *inputImage, ... )
* derivativeY_sm_d(const float3 *inputImage, ... )
* gradient_magnitude_d(const float *inputImage, ... )
* gradient_magnitude_d(const float3 *inputImage, ... )
*
\****************************************************************************/
#include "gradient.cuh"
#define BW 16
#define BH 16
const char* getStudentLogin() { return studentLogin; };
const char* getStudentName() { return studentName; };
int getStudentID() { return studentID; };
bool checkStudentData() { return strcmp(studentLogin, "p010") != 0 && strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
bool checkStudentNameAndID() { return strcmp(studentName, "John Doe") != 0 && studentID != 1234567; };
__global__ void derivativeX_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1);
if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = 0.5f*(u[threadIdx.x+2][threadIdx.y]-u[threadIdx.x][threadIdx.y]);
}
__global__ void derivativeX_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue;
__shared__ float3 u[BW+2][BH];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1);
if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y] = u[threadIdx.x+1][threadIdx.y];
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
}
__syncthreads();
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x+2][threadIdx.y].x - u[threadIdx.x][threadIdx.y].x);
imgValue.y = 0.5f*(u[threadIdx.x+2][threadIdx.y].y - u[threadIdx.x][threadIdx.y].y);
imgValue.z = 0.5f*(u[threadIdx.x+2][threadIdx.y].z - u[threadIdx.x][threadIdx.y].z);
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void derivativeY_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y + 1] = *((float*) ((char*) inputImage + y
* iPitchBytes) + x);
if (y == 0)
u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y + 1];
else if (threadIdx.y == 0)
u[threadIdx.x][threadIdx.y] = *((float*) ((char*) inputImage + (y - 1)
* iPitchBytes) + x);
if (y == (iHeight - 1))
u[threadIdx.x][threadIdx.y + 2] = u[threadIdx.x][threadIdx.y + 1];
else if (threadIdx.y == blockDim.y - 1)
u[threadIdx.x][threadIdx.y + 2] = *((float*) ((char*) inputImage
+ (y + 1) * iPitchBytes) + x);
}
__syncthreads();
if (x < iWidth && y < iHeight)
*((float*) (((char*) outputImage) + y * iPitchBytes) + x) = 0.5f
* (u[threadIdx.x][threadIdx.y + 2] - u[threadIdx.x][threadIdx.y]);
}
__global__ void derivativeY_sm_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float3 imgValue;
__shared__ float3 u[BW][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x][threadIdx.y + 1] = *((float3*) ((char*) inputImage + y
* iPitchBytes) + x);
if (y == 0)
u[threadIdx.x][threadIdx.y] = u[threadIdx.x][threadIdx.y + 1];
else if (threadIdx.y == 0)
u[threadIdx.x][threadIdx.y] = *((float3*) ((char*) inputImage + (y
- 1) * iPitchBytes) + x);
if (y == (iHeight - 1))
u[threadIdx.x][threadIdx.y + 2] = u[threadIdx.x][threadIdx.y + 1];
else if (threadIdx.y == blockDim.y - 1)
u[threadIdx.x][threadIdx.y + 2] = *((float3*) ((char*) inputImage
+ (y + 1) * iPitchBytes) + x);
}
__syncthreads();
if (x < iWidth && y < iHeight) {
imgValue.x = 0.5f*(u[threadIdx.x][threadIdx.y+2].x - u[threadIdx.x][threadIdx.y].x);
imgValue.y = 0.5f*(u[threadIdx.x][threadIdx.y+2].y - u[threadIdx.x][threadIdx.y].y);
imgValue.z = 0.5f*(u[threadIdx.x][threadIdx.y+2].z - u[threadIdx.x][threadIdx.y].z);
*((float3*)(((char*)outputImage) + y*iPitchBytes)+ x) = imgValue;
}
}
__global__ void gradient_magnitude_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float u[BW+2][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x-1);
if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y+1] = *((float*)((char*)inputImage + y*iPitchBytes)+x+1);
if (y == 0)
u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y + 1];
else if (threadIdx.y == 0)
u[threadIdx.x+1][threadIdx.y] = *((float*) ((char*) inputImage + (y
- 1) * iPitchBytes) + x);
if (y == (iHeight - 1))
u[threadIdx.x+1][threadIdx.y + 2] = u[threadIdx.x+1][threadIdx.y + 1];
else if (threadIdx.y == blockDim.y - 1)
u[threadIdx.x+1][threadIdx.y + 2] = *((float*) ((char*) inputImage
+ (y + 1) * iPitchBytes) + x);
}
__syncthreads();
float tempDerX;
float tempDerY;
if (x < iWidth && y < iHeight) {
tempDerX = 0.5f*(u[threadIdx.x + 2][threadIdx.y+1]-u[threadIdx.x][threadIdx.y+1]);
tempDerY = 0.5f*(u[threadIdx.x+1][threadIdx.y + 2] - u[threadIdx.x+1][threadIdx.y]);
*((float*)(((char*)outputImage) + y*iPitchBytes)+ x) = sqrt( tempDerX*tempDerX + tempDerY*tempDerY );
}
}
__global__ void gradient_magnitude_d(const float3 *inputImage, float3 *outputImage,
int iWidth, int iHeight, size_t iPitchBytes)
{
// // ### implement me ###
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float3 u[BW+2][BH+2];
if (x < iWidth && y < iHeight) {
u[threadIdx.x+1][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x);
if (x == 0) u[threadIdx.x][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == 0) u[threadIdx.x][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x-1);
if (x == (iWidth-1)) u[threadIdx.x+2][threadIdx.y+1] = u[threadIdx.x+1][threadIdx.y+1];
else if (threadIdx.x == blockDim.x-1) u[threadIdx.x+2][threadIdx.y+1] = *((float3*)((char*)inputImage + y*iPitchBytes)+x+1);
if (y == 0)
u[threadIdx.x+1][threadIdx.y] = u[threadIdx.x+1][threadIdx.y + 1];
else if (threadIdx.y == 0)
u[threadIdx.x+1][threadIdx.y] = *((float3*) ((char*) inputImage + (y
- 1) * iPitchBytes) + x);
if (y == (iHeight - 1))
u[threadIdx.x+1][threadIdx.y + 2] = u[threadIdx.x+1][threadIdx.y + 1];
else if (threadIdx.y == blockDim.y - 1)
u[threadIdx.x+1][threadIdx.y + 2] = *((float3*) ((char*) inputImage
+ (y + 1) * iPitchBytes) + x);
}
__syncthreads();
float3 normValue;
float3 xValue;
float3 yValue;
if (x < iWidth && y < iHeight) {
// x derivatives
xValue.x = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].x
- u[threadIdx.x][threadIdx.y+1].x);
xValue.y = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].y
- u[threadIdx.x][threadIdx.y+1].y);
xValue.z = 0.5f * (u[threadIdx.x + 2][threadIdx.y+1].z
- u[threadIdx.x][threadIdx.y+1].z);
// y derivatives
yValue.x = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].x
- u[threadIdx.x+1][threadIdx.y].x);
yValue.y = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].y
- u[threadIdx.x+1][threadIdx.y].y);
yValue.z = 0.5f * (u[threadIdx.x+1][threadIdx.y + 2].z
- u[threadIdx.x+1][threadIdx.y].z);
normValue.x = sqrt(xValue.x*xValue.x + yValue.x*yValue.x);
normValue.y = sqrt(xValue.y*xValue.y + yValue.y*yValue.y);
normValue.z = sqrt(xValue.z*xValue.z + yValue.z*yValue.z);
*((float3*) (((char*) outputImage) + y * iPitchBytes) + x) = normValue ;
}
}
void gpu_derivative_sm_d(const float *inputImage, float *outputImage,
int iWidth, int iHeight, int iSpectrum, int mode)
{
size_t iPitchBytes;
float *inputImage_d = 0, *outputImage_d = 0;
dim3 blockSize(BW, BH);
dim3 gridSize( (int)ceil(iWidth/(float)BW), (int)ceil(iHeight/(float)BH) );
//dim3 smSize(BW+2,BH);
if(iSpectrum == 1) {
cutilSafeCall( cudaMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( cudaMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float), iHeight ) );
cutilSafeCall( cudaMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float), iWidth*sizeof(float), iHeight, cudaMemcpyHostToDevice) );
if (mode == 0)
derivativeX_sm_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
derivativeY_sm_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
gradient_magnitude_d<<<gridSize, blockSize>>>(inputImage_d, outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( cudaThreadSynchronize() );
cutilSafeCall( cudaMemcpy2D(outputImage, iWidth*sizeof(float), outputImage_d, iPitchBytes, iWidth*sizeof(float), iHeight, cudaMemcpyDeviceToHost) );
}
else if(iSpectrum == 3) {
cutilSafeCall( cudaMallocPitch( (void**)&(inputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( cudaMallocPitch( (void**)&(outputImage_d), &iPitchBytes, iWidth*sizeof(float3), iHeight ) );
cutilSafeCall( cudaMemcpy2D(inputImage_d, iPitchBytes, inputImage, iWidth*sizeof(float3), iWidth*sizeof(float3), iHeight, cudaMemcpyHostToDevice) );
if (mode == 0)
derivativeX_sm_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 1)
derivativeY_sm_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
else if (mode == 2)
gradient_magnitude_d<<<gridSize, blockSize>>>((float3*)inputImage_d, (float3*)outputImage_d, iWidth, iHeight, iPitchBytes);
cutilSafeCall( cudaThreadSynchronize() );
cutilSafeCall( cudaMemcpy2D(outputImage, iWidth*sizeof(float3), outputImage_d, iPitchBytes, iWidth*sizeof(float3), iHeight, cudaMemcpyDeviceToHost) );
}
cutilSafeCall( cudaFree(inputImage_d) );
cutilSafeCall( cudaFree(outputImage_d) );
}
|
10,088 | #include <iostream>
#include <cuda_runtime_api.h>
#include <stdio.h>
using namespace std;
#define LEN 100000000
__global__ void add_vec(int *v1, int *v2, int *res, size_t l) {
// cudaError_t status;
int i = blockIdx.x * blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (; i < l; i+= step) {
res[i] = v1[i] + v2[i];
}
}
__global__ void gen_numbers(int *v1, int *v2, size_t l) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (; i < l; i += step) {
v1[i] = i;
v2[i] = i * 40 + 2;
}
}
int main() {
cudaError_t status;
int *v1_gpu, *v2_gpu, *res_gpu;
status = cudaMalloc((void**)&v1_gpu, sizeof(int) * LEN);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
status = cudaMalloc((void**)&v2_gpu, sizeof(int) * LEN);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
status = cudaMalloc((void**)&res_gpu, sizeof(int) * LEN);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
gen_numbers<<<2, 32, 0>>>(v1_gpu, v2_gpu, LEN);
add_vec<<<2, 32, 0>>>(v1_gpu, v2_gpu, res_gpu, LEN);
status = cudaFree(v1_gpu);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
status = cudaFree(v2_gpu);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
status = cudaFree(res_gpu);
if (status != cudaSuccess) {
cout << cudaGetErrorString(status) << endl;
}
return 0;
}
|
10,089 | // very simple vector add example discussed in class
// --everything is in one *.cpp program
// --no error checking; not a good idea
using namespace std;
#include <iostream>
#include <math.h>
#include <stdio.h>
#define TILE_WIDTH 512
// iceil macro
// returns an integer ceil value where integer numerator is first parameter
// and integer denominator is the second parameter. iceil is the rounded
// up value of numerator/denominator when there is a remainder
// equivalent to ((num%den!=0) ? num/den+1 : num/den)
#define iceil(num,den) (num+den-1)/den
// GPU kernel
__global__ void findNearestCentroidKernel(float *X, float *Y, float *CX, float *CY, float *TCX, float *TCY, int *COUNT, int N, int K, int *BREAK, float *R) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
while(BREAK[0] == 0){
if(i < K){
R[i] = 0;
TCX[i] = 0;
TCY[i] = 0;
COUNT[i] = 0;
}
__syncthreads();
if(i < N) {
float minDis = sqrt(((CX[0] - X[i])*(CX[0] - X[i])) + ((CY[0] - Y[i])*(CY[0] - Y[i])));
int nearestIndex = 0;
for(int k = 1; k < K; k++){
// find nearest centroid index for this point
float dis = sqrt(((CX[k] - X[i])*(CX[k] - X[i])) + ((CY[k] - Y[i])*(CY[k] - Y[i])));
if(dis < minDis){
minDis = dis;
nearestIndex = k;
}
}
// radius
float maxDis = sqrt(((CX[nearestIndex] - X[i])*(CX[nearestIndex] - X[i])) + ((CY[nearestIndex] - Y[i]) * (CY[nearestIndex] - Y[i])));
if(R[nearestIndex] < maxDis){
R[nearestIndex] = maxDis;
}
// sum to calculate mean
atomicAdd(&TCX[nearestIndex], X[i]);
atomicAdd(&TCY[nearestIndex], Y[i]);
atomicAdd(&COUNT[nearestIndex], 1);
}
__syncthreads();
if(i < K){
TCX[i] /= COUNT[i];
TCY[i] /= COUNT[i];
}
__syncthreads();
if(i==0) {
int isMoved = 0;
for(int k = 0; k < K; k++) {
if(CX[k] != TCX[k] || CY[k] != TCY[k]){
isMoved = 1;
}
CX[k] = TCX[k];
CY[k] = TCY[k];
}
if(isMoved == 0){
BREAK[0] = 1;
}
}
__syncthreads();
}
}
void findNearestCentroid(float *X, float *Y, float *CX, float *CY, float *TCX, float *TCY, int *COUNT, int N, int K, int *BREAK, float *R) {
int size = N * sizeof(float);
int kSize = K * sizeof(float);
float *d_X, *d_Y, *d_CX, *d_CY, *d_TCX, *d_TCY, *d_R;
int *d_COUNT, *d_BREAK;
// allocate device memory and transfer points and centroids
cudaMalloc((void **) &d_X, size);
cudaMemcpy(d_X, X, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_Y, size);
cudaMemcpy(d_Y, Y, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_CX, kSize);
cudaMemcpy(d_CX, CX, kSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_CY, kSize);
cudaMemcpy(d_CY, CY, kSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_TCX, kSize);
cudaMemcpy(d_TCX, TCX, kSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_TCY, kSize);
cudaMemcpy(d_TCY, TCY, kSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_COUNT, K * sizeof(int));
cudaMemcpy(d_COUNT, COUNT, K * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_BREAK, sizeof(int));
cudaMemcpy(d_BREAK, BREAK, sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_R, kSize);
cudaMemcpy(d_R, R, kSize, cudaMemcpyHostToDevice);
findNearestCentroidKernel <<<iceil(N,TILE_WIDTH),TILE_WIDTH>>>(d_X, d_Y, d_CX, d_CY, d_TCX, d_TCY, d_COUNT, N, K, d_BREAK, d_R);
cudaError_t error_id=cudaGetLastError();
if (error_id != cudaSuccess) {
cout << "Attempted Launch of MatriMulKernel returned " <<
(int)error_id << endl;
cout << cudaGetErrorString(error_id) << endl ;
exit(EXIT_FAILURE);
}
cudaMemcpy(CX, d_CX, kSize, cudaMemcpyDeviceToHost);
cudaMemcpy(CY, d_CY, kSize, cudaMemcpyDeviceToHost);
cudaMemcpy(TCX, d_TCX, kSize, cudaMemcpyDeviceToHost);
cudaMemcpy(TCY, d_TCY, kSize, cudaMemcpyDeviceToHost);
cudaMemcpy(BREAK, d_BREAK, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(R, d_R, kSize, cudaMemcpyDeviceToHost);
// free device memory
cudaFree(d_X); cudaFree(d_Y); cudaFree(d_CX); cudaFree(d_CY);
cudaFree(d_TCX); cudaFree(d_TCY); cudaFree(d_COUNT); cudaFree(d_BREAK);
cudaFree(d_R);
}
int main (int argc, char **argv) {
// code test input
// float xp[] = {2, 3, 4, 2, 4, 2, 3, 4, 7, 8, 7, 8};
// float yp[] = {2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6};
// int K = 2;
// dataset
float xp[] = {0.50045,0.62042,0.63662,0.78343,0.39833,0.76105,0.86649,0.5338,0.67921,0.58146,0.51889,0.82518,0.38533,0.53911,0.90309,0.94548,0.79293,0.7579,0.28621,0.44331,0.69774,0.4574,0.70613,0.72721,0.83308,0.47526,0.53495,0.99023,0.45525,0.85482,0.77327,0.86059,0.74007,0.74936,0.59231,0.55414,0.84518,0.74866,0.49398,0.67464,0.80713,0.58186,0.43138,0.79422,0.83607,0.70866,0.91123,0.91224,0.89366,0.69314,0.77839,0.79542,0.62412,0.42176,0.77219,0.45899,0.597,0.72932,0.89745,0.86918,0.86877,0.60436,0.44866,0.67697,0.48163,0.7069,0.64928,0.69971,0.37205,0.63079,0.61336,0.35087,0.50331,0.84022,0.30077,0.78189,0.87846,0.52592,0.97682,0.36551,0.58415,0.68087,0.49863,0.47702,0.80379,0.84552,0.82612,0.58831,0.71168,0.47222,0.98558,0.82497,0.35571,0.77466,0.53953,0.45861,0.68372,0.41362,0.27898,0.71739,-0.61555,-0.059872,0.15817,-0.80462,0.013774,-0.026154,0.57119,0.1383,0.14187,-0.88263,0.59423,-0.44384,0.2998,-0.24294,0.13261,0.46752,-0.14789,0.43184,0.17082,0.60789,-0.36113,-0.2566,-0.99087,-0.53892,0.33925,-0.49112,0.16715,-0.019133,0.0705,0.46192,-0.77776,-0.62921,0.32942,0.016021,-0.21196,-0.61637,0.53244,0.38571,-0.47271,0.080991,0.039742,-0.65584,0.57717,-0.57962,-0.015825,-0.91257,-0.64211,-0.72704,0.24372,-0.21568,-0.4985,-0.2709,0.16699,0.01051,-0.30193,0.18312,-0.87527,0.42738,-0.21141,0.41621,0.11842,-0.14174,-0.18126,-0.098237,-0.3365,0.14356,0.35141,0.13923,-0.16566,-0.70101,-0.0694,-0.68869,-0.38079,-0.43985,0.14795,-0.3374,-0.89033,0.15952,0.0018652,0.45042,0.080862,-0.054846,-0.50866,-0.66301,-0.099192,0.053784,-0.15054,-0.60872,-1.0649,-0.76489,-0.23357,-0.56653,0.22882,0.036619,-0.47542,0.53557,0.057822,-0.21597,0.084768,0.29547,-1.6625,-2.3066,-2.5451,-2.5055,-1.8161,-2.2007,-1.5987,-2.2245,-1.8461,-2.331,-1.6917,-1.5426,-2.0721,-1.5946,-2.5846,-1.9467,-2.2048,-1.7053,-2.2967,-2.1201,-2.0924,-1.8321,-2.0276,-1.7004,-2.1879,-2.1517,-1.7454,-2.3393,-2.3783,-2.028,-1.77,-1.8245,-1.9501,-1.8441,-1.4555,-2.7113,-2.1346,-1.8869,-1.596,-2.0089,-2.4819,-2.0069,-2.7124,-2.0677,-2.3332,-1.8419,-2.0059,-1.9892,-2.4431,-2.5464,-2.3451,-2.2541,-2.5845,-1.5723,-1.7045,-1.5421,-2.5846,-1.8773,-2.2585,-2.407,-1.9173,-2.5959,-1.8451,-2.0366,-2.3052,-1.7897,-2.4067,-1.4965,-2.1428,-2.0735,-2.3237,-2.4941,-1.7384,-1.7941,-1.7079,-2.4116,-2.4062,-2.3151,-2.6612,-1.7589,-2.5339,-1.531,-2.1624,-1.8302,-1.5692,-1.7906,-2.2307,-2.1941,-2.5677,-2.374,-1.9896,-2.006,-2.3813,-2.1396,-1.989,-2.4698,-1.519,-1.4752,-1.8924,-1.9298};
float yp[] = {0.99137,1.1864,1.1091,1.1882,0.86828,1.2912,1.1251,1.2909,1.2921,0.91792,0.98102,0.87589,1.0807,0.879,1.1005,1.0128,1.1342,1.1215,1.0179,1.0879,0.65743,0.8492,1.2998,0.891,0.85405,0.75205,1.0531,0.83313,0.80712,0.65705,1.2371,0.82968,1.1268,1.082,1.252,1.0891,1.0913,1.248,1.2467,0.71219,0.8547,1.0433,1.141,1.2024,0.68393,1.2528,0.96512,0.89731,0.92602,0.77342,1.2817,1.1748,1.2687,0.76846,0.91627,1.0187,1.1778,0.97085,1.1594,0.71518,1.263,0.85016,0.71094,1.0995,1.1069,0.72923,0.90394,0.67114,0.94768,1.1872,0.94457,0.83987,1.1424,1.2836,0.91407,1.1718,0.80268,0.60764,0.81599,0.91851,1.324,1.2934,1.3125,1.1482,0.95867,1.2578,0.63838,1.3116,0.92079,0.99451,0.93409,1.0769,0.74702,0.99721,1.3215,0.98109,1.0413,0.7791,0.9537,0.62689,-0.44045,0.44408,-0.24966,0.5796,-0.50121,0.63359,0.13374,-0.24062,-0.53195,0.64043,0.57053,0.0097233,0.16545,0.2039,0.74279,-0.39876,0.31784,0.20217,-0.25401,0.058463,-0.57635,0.0088247,-0.054508,-0.041507,0.63631,-0.46832,-0.56195,-0.63778,0.40821,-0.20645,0.85026,0.99861,0.041061,0.34774,-0.21727,0.58605,0.66522,0.11627,-0.31435,-0.36548,0.13246,-0.27425,0.33963,0.20461,0.66094,-0.40905,0.59764,-0.36371,-0.47063,-0.52933,0.47001,0.07556,0.32864,-0.44967,-0.3139,0.28728,0.18796,-0.12,1.026,-0.14989,0.67563,0.8246,0.12724,-0.40327,-0.15703,-0.32224,0.80277,0.77583,0.37448,0.096927,0.047256,-0.092455,0.50284,-0.31497,0.3849,0.68048,-0.24552,0.92299,1.0894,-0.35057,-0.096622,0.58476,1.0457,0.80786,0.4435,0.77992,-0.16127,-0.57881,-0.018881,-0.32336,0.34652,1.0319,-0.39581,0.29568,0.61551,-0.091232,0.49379,0.049174,0.33141,0.019771,0.39901,-0.28535,-0.14886,-0.36566,-0.60674,-0.027574,0.31511,-0.40363,-0.40677,0.29229,-0.0034327,0.046595,0.67712,-0.16778,0.39392,0.11283,0.47853,0.55313,0.26466,-0.15206,-0.53149,0.22226,0.37138,-0.053052,-0.52837,0.055299,0.3594,0.078523,-0.25653,-0.13005,0.61349,-0.42266,0.60408,-0.12923,-0.17669,-0.010072,0.17846,0.39363,0.23257,0.033562,-0.50101,-0.46628,0.05617,0.32735,-0.29258,-0.30209,0.071615,-0.18159,-0.10489,-0.032858,-0.12095,0.47737,0.34116,-0.31788,-0.36801,-0.26249,-0.23782,0.36064,-0.46076,0.46653,0.23937,0.33773,-0.56519,0.65367,-0.46669,0.48537,0.1671,0.3639,0.33264,-0.22258,0.25899,0.18506,0.013162,0.44468,0.18279,-0.56412,0.15889,-0.35779,-0.074785,0.29244,0.23565,-0.024191,0.11848,-0.32517,0.14238,0.5062,-0.66232,0.5214,0.27723,-0.077432,-0.30826,0.33229,-0.35625,-0.53503,0.21018,0.025991,-0.45004,-0.005085,-0.59767,0.1629};
int K = 3;
int N = sizeof(xp)/sizeof(int); //length of an integer array
float *X = new float[N];
float *Y = new float[N];
float *CX = new float[K];
float *CY = new float[K];
float *TCX = new float[K];
float *TCY = new float[K];
float *R = new float[K];
int *COUNT = new int[K];
int *BREAK = new int[1];
for (int i=0;i<N;i++) {
X[i] = xp[i];
Y[i] = yp[i];
}
for(int k = 0; k < K; k++) {
TCX[k] = 0;
TCY[k] = 0;
COUNT[k] = 0;
R[k] = 0;
// random centers
int randomIndex = rand() % N;
CX[k] = X[randomIndex];
CY[k] = Y[randomIndex];
}
BREAK[0] = 0;
findNearestCentroid(X, Y, CX, CY, TCX, TCY, COUNT, N, K, BREAK, R);
// Output Results
for (int i=0;i<K;i++) {
cout << "CX[" << i << "]=" << CX[i] <<", " << "CY[" << i << "]=" << CY[i] <<", Radius[" << i << "]="<<R[i]<< endl;
}
cout << "BREAK=" << BREAK[0] << endl;
free(X); free(Y); free(CX); free(CY); free(TCX); free(TCY); free(BREAK); free(COUNT); free(R);
}
|
10,090 | #include "includes.h"
__global__ void rgb2binaryKernel(unsigned char *imgr,unsigned char *imgg,unsigned char *imgb,unsigned char *img_binary, int n, int umbral) {
int r, g, b;
int index = threadIdx.x + blockIdx.x * blockDim.x;
if (index < n){
r = imgr[index];
g = imgg[index];
b = imgb[index];
img_binary[index] = (unsigned char)( 0.299*r + 0.587*g + 0.114*b)>umbral?255:0;
}
} |
10,091 | #include "includes.h"
__global__ void Atualiza( double *u, double *u_prev, const int n ) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if( idx == 0 ) {
u[ 0 ] = u[ n ] = 0.; /* forca condicao de contorno */
}
else if( idx < n ) {
u[ idx ] = u_prev[ idx ] + kappa * dt / ( dx * dx ) * ( u_prev[ idx - 1 ] - 2 * u_prev[ idx ] + u_prev[ idx + 1 ] );
}
} |
10,092 | //****************************************************************************
// Also note that we've supplied a helpful debugging function called checkCudaErrors.
// You should wrap your allocation and copying statements like we've done in the
// code we're supplying you. Here is an example of the unsafe way to allocate
// memory on the GPU:
//
// cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols);
//
// Here is an example of the safe way to do the same thing:
//
// checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * numRows * numCols));
//****************************************************************************
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <iostream>
#include <iomanip>
#include <algorithm> // std::max
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
typedef unsigned char uchar;
#define FILTER_WIDTH 3
#define NTHREADS 32
#define checkCudaErrors(val) check( (val), #val, __FILE__, __LINE__)
template<typename T>
void check(T err, const char* const func, const char* const file, const int line) {
if (err != cudaSuccess) {
std::cerr << "CUDA error at: " << file << ":" << line << std::endl;
std::cerr << cudaGetErrorString(err) << " " << func << std::endl;
exit(1);
}
}
__constant__ float filtro[FILTER_WIDTH * FILTER_WIDTH];
__global__ void box_filter(const unsigned char* const inputChannel, unsigned char* const outputChannel,
int numRows, int numCols, const float* __restrict__ filter, const int filterWidth)
{
// Aplicar el filtro a cada pixel de la imagen...
// NOTA: Que un thread tenga una posición correcta en 2D no quiere decir que al aplicar el filtro
// los valores de sus vecinos sean correctos, ya que pueden salirse de la imagen.
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx >= numCols || idy >= numRows) return;
float c = 0.0f;
for (int fx = 0; fx < filterWidth; ++fx) {
for (int fy = 0; fy < filterWidth; ++fy) {
int imagex = idx + fx - filterWidth / 2;
int imagey = idy + fy - filterWidth / 2;
imagex = min(max(imagex, 0), numCols - 1); // Limit image on borders...
imagey = min(max(imagey, 0), numRows - 1); // Limit image on borders...
c += (filter[fy * filterWidth + fx] * inputChannel[imagey * numCols + imagex]);
}
}
outputChannel[idy * numCols + idx] = c;
}
// This kernel takes in an image represented as a uchar4 and splits
// it into three images consisting of only one color channel each
__global__ void separateChannels(const uchar4* const inputImageRGBA,
int numRows,
int numCols,
unsigned char* const redChannel,
unsigned char* const greenChannel,
unsigned char* const blueChannel)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idx >= numCols || idy >= numRows) return;
int id = idy * numCols + idx;
redChannel[id] = inputImageRGBA[id].x;
greenChannel[id] = inputImageRGBA[id].y;
blueChannel[id] = inputImageRGBA[id].z;
}
// This kernel takes in three color channels and recombines them
// into one image. The alpha channel is set to 255 to represent
// that this image has no transparency.
__global__ void recombineChannels(const unsigned char* const redChannel,
const unsigned char* const greenChannel,
const unsigned char* const blueChannel,
uchar4* const outputImageRGBA,
int numRows,
int numCols)
{
const int2 thread_2D_pos = make_int2(blockIdx.x * blockDim.x + threadIdx.x, blockIdx.y * blockDim.y + threadIdx.y);
const int thread_1D_pos = thread_2D_pos.y * numCols + thread_2D_pos.x;
// make sure we don't try and access memory outside the image
//by having any threads mapped there return early...
if (thread_2D_pos.x >= numCols || thread_2D_pos.y >= numRows) return;
unsigned char red = redChannel[thread_1D_pos];
unsigned char green = greenChannel[thread_1D_pos];
unsigned char blue = blueChannel[thread_1D_pos];
// Alpha should be 255 for no transparency
uchar4 outputPixel = make_uchar4(red, green, blue, 255);
outputImageRGBA[thread_1D_pos] = outputPixel;
}
unsigned char *d_red, *d_green, *d_blue;
float *d_filter;
void allocateMemoryAndCopyToGPU(const size_t numRowsImage, const size_t numColsImage, const float* const h_filter, const size_t filterWidth)
{
int sizeImg = sizeof(uchar4) * numRowsImage * numColsImage;
int sizeFilter = sizeof(uchar4) * filterWidth * filterWidth;
cudaMalloc(&d_red, sizeImg);
cudaMalloc(&d_green, sizeImg);
cudaMalloc(&d_blue, sizeImg);
cudaMalloc(&d_filter, sizeFilter);
cudaMemcpy(d_filter, h_filter, sizeFilter, cudaMemcpyHostToDevice);
cudaMemset(d_red, 0, sizeImg);
cudaMemset(d_green, 0, sizeImg);
cudaMemset(d_blue, 0, sizeImg);
}
// Crear el filtro se que va a aplicar (en CPU) y almacenar su tamaño...
void create_filter(float **d_filter, const float *mask, const int size)
{
float *h_filter = (float *) malloc(sizeof(float) * size);
if (!h_filter) {
std::cerr << "Error creating filter.." << strerror(errno) << '\n';
exit(1);
}
for (int i = 0; i < size; ++i) {
h_filter[i] = mask[i];
}
cudaMalloc(d_filter, sizeof(float) * size);
cudaMemcpy(*d_filter, h_filter, sizeof(float) * size, cudaMemcpyHostToDevice);
}
void open_mpi_separate_channels(uchar4* const d_inputImageRGBA,
const size_t numRows,
const size_t numCols,
unsigned char *d_red,
unsigned char *d_green,
unsigned char *d_blue)
{
const dim3 blockSize(NTHREADS, NTHREADS, 1);
const dim3 gridSize((numCols - 1) / blockSize.x + 1, (numRows - 1) / blockSize.y + 1, 1);
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
void open_mpi_box_filter(const unsigned char *channel,
unsigned char *filter_channel,
const size_t numRows,
const size_t numCols,
float* d_filter,
const int filterWidth)
{
const dim3 blockSize(NTHREADS, NTHREADS, 1);
const dim3 gridSize((numCols - 1) / blockSize.x + 1, (numRows - 1) / blockSize.y + 1, 1);
box_filter<<<gridSize, blockSize>>>(channel, filter_channel, numRows, numCols, d_filter, filterWidth);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
void open_mpi_recombine_channels(const unsigned char* d_redFiltered,
const unsigned char* d_greenFiltered,
const unsigned char* d_blueFiltered,
uchar4* const d_outputImageRGBA,
const size_t numRows,
const size_t numCols)
{
const dim3 blockSize(NTHREADS, NTHREADS, 1);
const dim3 gridSize((numCols - 1) / blockSize.x + 1, (numRows - 1) / blockSize.y + 1, 1);
recombineChannels<<<gridSize, blockSize>>>(d_redFiltered, d_greenFiltered, d_blueFiltered, d_outputImageRGBA, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
void convolution(uchar4* const d_inputImageRGBA,
uchar4* const d_outputImageRGBA,
const size_t numRows,
const size_t numCols,
unsigned char *d_redFiltered,
unsigned char *d_greenFiltered,
unsigned char *d_blueFiltered,
const int filterWidth)
{
const dim3 blockSize(NTHREADS, NTHREADS, 1);
const dim3 gridSize((numCols - 1) / blockSize.x + 1, (numRows - 1) / blockSize.y + 1, 1);
separateChannels<<<gridSize, blockSize>>>(d_inputImageRGBA, numRows, numCols, d_red, d_green, d_blue);
// Call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
box_filter<<<gridSize, blockSize>>>(d_red, d_redFiltered, numRows, numCols, d_filter, filterWidth);
box_filter<<<gridSize, blockSize>>>(d_blue, d_blueFiltered, numRows, numCols, d_filter, filterWidth);
box_filter<<<gridSize, blockSize>>>(d_green, d_greenFiltered, numRows, numCols, d_filter, filterWidth);
// Again, call cudaDeviceSynchronize(), then call checkCudaErrors() immediately after
// launching your kernel to make sure that you didn't make any mistakes.
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
recombineChannels<<<gridSize, blockSize>>>(d_redFiltered, d_greenFiltered, d_blueFiltered, d_outputImageRGBA, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
//Free all the memory that we allocated
// TODO: make sure you free any arrays that you allocated
void cleanup() {
checkCudaErrors(cudaFree(d_red));
checkCudaErrors(cudaFree(d_green));
checkCudaErrors(cudaFree(d_blue));
checkCudaErrors(cudaFree(d_filter));
}
|
10,093 | #include <stdio.h>
#include <iostream>
#define N 512
__global__ void kernel(int *a[], int n)
{
int index = threadIdx.x + blockDim.x * threadIdx.y;
if(index < n)
a[index] = &index;
}
int main(int argc, char **argv)
{
int n = N;
if(argc == 2)
n = atoi(argv[1]);
else if (argc == 1)
n = 512;
else
printf("Error # of Arguments");
int* a = new int[n];
int *dev_a[N];
//allocate the memory on the GPU
cudaMalloc( (void**)&dev_a, N * sizeof(int) );
//creates a kernel
kernel<<<1,N>>>(dev_a, n);
//copy the arrays 'a' to the GPU
cudaMemcpy( dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice );
for (int i=0; i< n; i++)
{
printf("%d\n",a[i]);
}
cudaFree( dev_a );
delete[] a;
return 0;
}
|
10,094 | #include "includes.h"
#define CUDA_CHECK_ERROR
#define CudaSafeCall(err) __CudaSafeCall(err, __FILE__, __LINE__)
#define CudaCheckError() __CudaCheckError(__FILE__, __LINE__)
__global__ void transform_fc(float *input, const float *raw_input, const int width, const int channels)
{
int thread_id = threadIdx.x;
int size = width * width;
for (int s = 0; s < size; s++)
input[thread_id * size + s] = raw_input[s * channels + thread_id];
if (thread_id == 0)
input[width * width * channels] = 1;
} |
10,095 | #include "includes.h"
__global__ void kernel_movinv32_write(char* _ptr, char* end_ptr, unsigned int pattern, unsigned int lb, unsigned int sval, unsigned int offset)
{
unsigned int i;
unsigned int* ptr = (unsigned int*) (_ptr + blockIdx.x*BLOCKSIZE);
if (ptr >= (unsigned int*) end_ptr) {
return;
}
unsigned int k = offset;
unsigned pat = pattern;
for (i = 0;i < BLOCKSIZE/sizeof(unsigned int); i++){
ptr[i] = pat;
k++;
if (k >= 32){
k=0;
pat = lb;
}else{
pat = pat << 1;
pat |= sval;
}
}
return;
} |
10,096 | #include "includes.h"
__global__ void sum_S_calc ( float *S_calcc, float *f_ptxc, float *f_ptyc, float *f_ptzc, float *S_calc, float *Aq, float *q_S_ref_dS, int num_q, int num_atom, int num_atom2, float alpha, float k_chi, float *sigma2) {
for (int ii = blockIdx.x; ii < num_q; ii += gridDim.x) {
// Tree-like summation of S_calcc to get S_calc
for (int stride = num_atom2 / 2; stride > 0; stride >>= 1) {
__syncthreads();
for(int iAccum = threadIdx.x; iAccum < stride; iAccum += blockDim.x) {
S_calcc[ii * num_atom2 + iAccum] += S_calcc[ii * num_atom2 + stride + iAccum];
}
}
__syncthreads();
S_calc[ii] = S_calcc[ii * num_atom2];
__syncthreads();
if (threadIdx.x == 0) {
Aq[ii] = S_calc[ii] - q_S_ref_dS[ii+num_q];
Aq[ii] *= -alpha;
Aq[ii] += q_S_ref_dS[ii + 2*num_q];
Aq[ii] *= k_chi / sigma2[ii];
Aq[ii] += Aq[ii];
}
__syncthreads();
for (int jj = threadIdx.x; jj < num_atom; jj += blockDim.x) {
f_ptxc[ii * num_atom2 + jj] *= Aq[ii] * alpha;
f_ptyc[ii * num_atom2 + jj] *= Aq[ii] * alpha;
f_ptzc[ii * num_atom2 + jj] *= Aq[ii] * alpha;
}
}
} |
10,097 | #include <cuda_runtime.h>
__global__ void double_kernel(const float* data, float* output, size_t n) { //CUDA function
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < n) {
output[idx] = data[idx] * 2;
}
}
void launch_double_ext_cuda_kernel(const float* data, float* output, size_t n){ //CPP function to call CUDA function above
const int threads = 256;
const int blocks = (n + threads - 1) / threads;
double_kernel<<<blocks, threads>>>(data, output, n);
}
|
10,098 | #include "includes.h"
__global__ void vectorAddKernel(float* deviceA, float* deviceB, float* deviceResult) {
unsigned i = blockIdx.x * blockDim.x + threadIdx.x;
// insert operation here
deviceResult[i] = deviceA[i]+deviceB[i];
} |
10,099 | #include "includes.h"
/* objective
* C = A*B // A[m][k], B[k][n], C[m][n]
* compile: nvcc --gpu-architecture=compute_60 --gpu-code=sm_60 -O3 matmul_double.cu -o matmul_double
Using nvprof for this lab
nvprof -- query-metrics
nvprof dram_read_transactions ./test 1024 1024 128
nvprof ./test 1024 1024 128
second line of result shows time for GPU kernel
GFlop ( 2MNK * 10^-9 ) / time (second)
*/
#define TILE_WIDTH 16
__global__ void matmul_double(double* A, double* B , double* C, int M, int N, int K)
{
/// complete code
int bx = blockIdx.x ;
int by = blockIdx.y ;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int row = by * TILE_WIDTH + ty ;
int col = bx * TILE_WIDTH + tx ;
__shared__ double SA[TILE_WIDTH][TILE_WIDTH] ;
__shared__ double SB[TILE_WIDTH][TILE_WIDTH] ;
double Csub = 0;
for (int i = 0; i < (K-1)/TILE_WIDTH +1 ; ++i)
{
/* code */
//SA[ty][tx] = A[row*n + i * TILE_WIDTH + tx] ;
//SB[ty][tx] = B[(i * TILE_WIDTH + ty )*n + col ] ;
if ( (row < M) && (i * TILE_WIDTH + tx < K ) ){
SA[ty][tx] = A[row*K + i * TILE_WIDTH + tx] ;
}
else{
SA[ty][tx] = 0;
}
if ( (col < N ) && ( i * TILE_WIDTH + ty < K) ){
SB[ty][tx] = B[(i*TILE_WIDTH + ty)*N + col] ;
}
else{
SB[ty][tx] = 0;
}
__syncthreads() ;
for (int k = 0; k < TILE_WIDTH; ++k){
Csub += SA[ty][k] * SB[k][tx] ;
}
__syncthreads() ;
}
//C[row*n + col] = Csub ;
if ( (row < M ) && ( col < N )){
C[ row * N + col] = Csub ;
}
} |
10,100 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include<iostream>
using namespace std;
// GPU method
__global__ void searchText(char* data, char* keyword, int dataLen,int keyLen)
{
int found = 0;
int i = (blockIdx.x * 1024) + threadIdx.x;
int checkNext = i;
for(int j = 0;j < keyLen; j++)
{
if(data[checkNext] == keyword[j])
{
checkNext++;
if(j==keyLen - 1)
{
found = 1;
break;
}
}
else
{
break;
}
}
if(found == 1){
printf("Match found at %d\n",i);
}
}
int main(int argc, char* argv[])
{
char* str = (char*)malloc(512 * sizeof(char));
char* str1 = (char*)malloc(512 * sizeof(char));
printf("Enter the input file which has to be searched\n");
scanf ("%s",str);
printf("Enter the dictionary with the keywords to be searched \n");
scanf("%s",str1);
printf("input = %s\ndict = %s\n",str,str1);
char *buf,*tok;
buf = (char*)malloc(500 *sizeof(char));
tok = (char*)malloc(500 *sizeof(char));
FILE *f = fopen(str, "r");
fseek(f, 0, SEEK_END);
long fsize = ftell(f);
fseek(f, 0, SEEK_SET);
char *text = (char *)malloc((fsize + 1) * sizeof(char));
printf("reading..\n");
fread(text, fsize, 1, f);
printf("done...\n");
fclose(f);
int noOfBlocks = strlen(text)/1024;
noOfBlocks++;
printf("text size = %d\nfsize = %d\n",noOfBlocks,fsize);
char* d_text;
cudaMalloc((void**)&d_text, strlen(text) * sizeof(char));
cudaMemcpy(d_text, text, strlen(text) * sizeof(char), cudaMemcpyHostToDevice);
FILE *f1 = fopen(str1,"r");
while(fgets(buf,512,f1))
{
char* keys = (char*)malloc(128 * sizeof(char));
tok = strtok(buf,"\t");
printf("searching for = %s\n",tok);
strcpy(keys,tok);
cudaSetDevice(0);
char* d_keys;
cudaMalloc((void**)&d_keys, strlen(keys) * sizeof(char));
cudaMemcpy(d_keys, keys, strlen(keys) * sizeof(char), cudaMemcpyHostToDevice);
searchText<<<noOfBlocks, 1024>>>(d_text, d_keys, strlen(text),strlen(keys));
cudaDeviceSynchronize();
cudaFree(d_keys);
free(keys);
}
cudaFree(d_text);
free(text);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.