serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
13,201 | extern "C" __global__ void add1_i(int * const vector, const int size)
{
int i = (blockIdx.y * gridDim.x * blockDim.x) + blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
vector[i] += 1;
}
extern "C" __global__ void add1_f(float * const vector, const int size)
{
int i = (blockIdx.y * gridDim.x * blockDim.x) + blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
vector[i] += 1;
}
extern "C" __global__ void sqrt_f(float * const vector, const int size)
{
int i = (blockIdx.y * gridDim.x * blockDim.x) + blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
vector[i] = sqrt(vector[i]);
}
extern "C" __global__ void reduction_add_i(const int * const d_in, int * const res, const int size)
{
extern __shared__ int s_data[];
int idxOffset = (blockIdx.y * gridDim.x * blockDim.x ) + blockDim.x * blockIdx.x;
int idx = idxOffset + threadIdx.x;
int tx = threadIdx.x;
s_data[tx] = (idx < size)? d_in[idx]: 0;
__syncthreads();
int stride;
for (stride=blockDim.x/2; stride > 0; stride>>=1) {
if (tx < stride) {
s_data[tx] += s_data[tx + stride];
}
__syncthreads();
}
if (tx == 0)
res[blockIdx.y * gridDim.x + blockIdx.x] = s_data[0];
}
extern "C" __global__ void reduction_add_j(const unsigned int * const d_in, unsigned int * const res, const int size)
{
extern __shared__ int s_data[];
int idxOffset = (blockIdx.y * gridDim.x * blockDim.x ) + blockDim.x * blockIdx.x;
int idx = idxOffset + threadIdx.x;
int tx = threadIdx.x;
s_data[tx] = (idx < size)? d_in[idx]: 0;
__syncthreads();
int stride;
for (stride=blockDim.x/2; stride > 0; stride>>=1) {
if (tx < stride) {
s_data[tx] += s_data[tx + stride];
}
__syncthreads();
}
if (tx == 0)
res[blockIdx.y * gridDim.x + blockIdx.x] = s_data[0];
}
extern "C" __global__ void reduction_multiply_j(const unsigned int * const d_in, unsigned int * const res, const int size)
{
extern __shared__ int s_data[];
int idxOffset = (blockIdx.y * gridDim.x * blockDim.x ) + blockDim.x * blockIdx.x;
int idx = idxOffset + threadIdx.x;
int tx = threadIdx.x;
s_data[tx] = (idx < size)? d_in[idx]: 1;
__syncthreads();
int stride;
for (stride=blockDim.x/2; stride > 0; stride>>=1) {
if (tx < stride) {
s_data[tx] *= s_data[tx + stride];
}
__syncthreads();
}
if (tx == 0)
res[blockIdx.y * gridDim.x + blockIdx.x] = s_data[0];
res[blockIdx.x] = s_data[0];
}
|
13,202 | // function: oneXn
// m1 of shape H*L, m2 of shape N*L*M, m3 of shape N*H*M
// N blocks, 2 dimensional block size
__global__ void oneXn(float* m1, float* m2, float* m3,
int size_n, int size_l, int size_h, int size_m){
int n=size_n,l=size_l, h=size_h, m=size_m;
for(int h_index=threadIdx.x;h_index<h;h_index+=blockDim.x){
for(int m_index=threadIdx.y;m_index<m;m_index+=blockDim.y){
float value=0.0;
for(int l_index=0;l_index<l;l_index++){
float x=m1[h_index*l+l_index];
float y=m2[blockIdx.x*l*m+l_index*m+m_index];
value+=x*y;
}
m3[blockIdx.x*h*m+h_index*m+m_index]=value;
}
}
}
// function: nXone
// m1 of shape N*H*L, m2 of shape L*M, m3 of shape N*H*M
// N blocks, 2 dimensional block size
__global__ void nXone(float* m1, float* m2, float* m3,
int size_n, int size_l, int size_h, int size_m){
int n=size_n, l=size_l, h=size_h, m=size_m;
for(int h_index=threadIdx.x;h_index<h;h_index+=blockDim.x){
for(int m_index=threadIdx.y;m_index<m;m_index+=blockDim.y){
float value=0.0;
for(int l_index=0;l_index<l;l_index++){
float x=m1[blockIdx.x*h*l+h_index*l+l_index];
float y=m2[l_index*m+m_index];
value+=x*y;
}
m3[blockIdx.x*h*m+h_index*m+m_index]=value;
}
}
} |
13,203 | #include <iostream>
#include <math.h>
#include <cstdio>
using namespace std;
// Thread block size
const int blockSize = 16;
// Matrices are stored in row-major order:
// M(row, clo) = *(M.elements + row*M.stride + col);
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col) {
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col, float value) {
A.elements[row * A.stride + col] = value;
}
// Get the blockSizexblockSize sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col) {
Matrix Asub;
Asub.width = blockSize;
Asub.height = blockSize;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * blockSize * row + blockSize * col];
return Asub;
}
// CPU matrix multiplication for evaluating results
void cpu_matrix_multi(float *matA, float *matB, float *matC, int m, int k, int n) {
for (int i = 0; i < m; i++) {
for (int j = 0; j < n; j++) {
float tmp = 0.0;
for (int l = 0; l < k; l++) {
tmp += matA[i*k + l] * matB[l*n + j];
}
matC[i*n + j] = tmp;
}
}
}
// Matrix multiplication kernel called by MatMul()
__global__
void MatMulKernel_SharMem(const Matrix A, const Matrix B, Matrix C) {
// Block row and col
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csubof C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
float Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate results
for (int m = 0; m < A.width/blockSize; m++) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[blockSize][blockSize];
__shared__ float Bs[blockSize][blockSize];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < blockSize; e++)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread write one element
SetElement(Csub, row, col, Cvalue);
}
// Matrix multiplication - host code
// Matrix dimensions are assumed to be multiples of blockSize
void MatMul(const Matrix A, const Matrix B, Matrix C) {
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.width;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(blockSize, blockSize, 1);
dim3 dimGrid(B.width/dimBlock.x, A.height/dimBlock.y, 1);
MatMulKernel_SharMem<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
int main() {
// Initiate A and B elements on host memory
Matrix h_A;
h_A.height = 1024; h_A.width = h_A.stride = 1024;
float* h_matA = new float[h_A.height * h_A.width];
std::srand(1103);
for (int i = 0; i < h_A.height; i++)
for (int j = 0; j < h_A.width; j++)
h_matA[i*h_A.width+j] = float(std::rand())/float(RAND_MAX);
h_A.elements = h_matA;
Matrix h_B;
h_B.height = 1024; h_B.width = h_B.stride = 1024;
float* h_matB = new float[h_B.height * h_B.width];
for (int i = 0; i < h_B.height; i++)
for (int j = 0; j < h_B.width; j++)
h_matB[i*h_B.width+j] = float(std::rand())/float(RAND_MAX);
h_B.elements = h_matB;
// Matrix C size
Matrix h_C;
h_C.height = h_A.height; h_C.width = h_C.stride = h_B.width;
float* h_matC = new float[h_A.height * h_B.width];
h_C.elements = h_matC;
// Call MatMul()
MatMul(h_A, h_B, h_C);
// Evaluate results
float* h_matC_cpu = new float[h_A.height * h_B.width];
// cpu_matrix_multi(h_matA, h_matB, h_matC_cpu, h_A.height, h_A.width, h_B.width);
cpu_matrix_multi(h_A.elements, h_B.elements, h_matC_cpu, h_A.height, h_A.width, h_B.width);
bool res_flag = false;
float resol = 0.000001;
for (int i = 0; i < h_C.height; i++) {
for (int j = 0; j < h_C.width; j++) {
if (fabs(*(h_C.elements+i*h_C.width+j) - h_matC[i*h_C.width+j]) > resol)
res_flag = true;
}
}
if (res_flag == false)
cout << "Matrix multiplication by GPU is right! " << endl;
else
cout << "Results are not right! " << endl;
// Free memory on host
delete [] h_matA;
delete [] h_matB;
delete [] h_matC;
delete [] h_matC_cpu;
return 0;
}
|
13,204 | #include <stdio.h>
#include "cuda.h"
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
#define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1)
void check_error (const char* message) {
cudaError_t error = cudaGetLastError ();
if (error != cudaSuccess) {
printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error));
exit(-1);
}
}
__global__ void sw4 (float * __restrict__ uacc_0, float * __restrict__ uacc_1, float * __restrict__ uacc_2, float * __restrict__ u_0, float * __restrict__ u_1, float * __restrict__ u_2, float * __restrict__ mu, float * __restrict__ la, float * __restrict__ strx, float * __restrict__ stry, float * __restrict__ strz, int N) {
//Determing the block's indices
int blockdim_i= (int)(blockDim.x);
int i0 = (int)(blockIdx.x)*(blockdim_i);
int i = max (i0, 0) + (int)(threadIdx.x);
int blockdim_j= (int)(blockDim.y);
int j0 = (int)(blockIdx.y)*(blockdim_j);
int j = max (j0, 0) + (int)(threadIdx.y);
int blockdim_k= (int)(blockDim.z);
int k0 = (int)(blockIdx.z)*(blockdim_k);
int k = max (k0, 0) + (int)(threadIdx.z);
// Assumptions
int a1 = 1;
float h = 3.7;
float cof = 1e0 / ( h * h);
if (i>=2 & j>=2 & k>=2 & i<=N-3 & j<=N-3 & k<=N-3) {
/* 28 * 3 = 84 flops */
float mux1 = mu[k*N*N+j*N+i-1] * strx[i-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i-2] * strx[i-2]);
float mux2 = mu[k*N*N+j*N+i-2] * strx[i-2] + mu[k*N*N+j*N+i+1] * strx[i+1] + 3 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i-1] * strx[i-1]);
float mux3 = mu[k*N*N+j*N+i-1] * strx[i-1] + mu[k*N*N+j*N+i+2] * strx[i+2] + 3 * (mu[k*N*N+j*N+i+1] * strx[i+1] + mu[k*N*N+j*N+i] * strx[i]);
float mux4 = mu[k*N*N+j*N+i+1] * strx[i+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strx[i] + mu[k*N*N+j*N+i+2] * strx[i+2]);
float muy1 = mu[k*N*N+(j-1)*N+i] * stry[j-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j-2)*N+i] * stry[j-2]);
float muy2 = mu[k*N*N+(j-2)*N+i] * stry[j-2] + mu[k*N*N+(j+1)*N+i] * stry[j+1] + 3 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j-1)*N+i] * stry[j-1]);
float muy3 = mu[k*N*N+(j-1)*N+i] * stry[j-1] + mu[k*N*N+(j+2)*N+i] * stry[j+2] + 3 * (mu[k*N*N+(j+1)*N+i] * stry[j+1] + mu[k*N*N+j*N+i] * stry[j]);
float muy4 = mu[k*N*N+(j+1)*N+i] * stry[j+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * stry[j] + mu[k*N*N+(j+2)*N+i] * stry[j+2]);
float muz1 = mu[(k-1)*N*N+j*N+i] * strz[k-1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k-2)*N*N+j*N+i] * strz[k-2]);
float muz2 = mu[(k-2)*N*N+j*N+i] * strz[k-2] + mu[(k+1)*N*N+j*N+i] * strz[k+1] + 3 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k-1)*N*N+j*N+i] * strz[k-1]);
float muz3 = mu[(k-1)*N*N+j*N+i] * strz[k-1] + mu[(k+2)*N*N+j*N+i] * strz[k+2] + 3 * (mu[(k+1)*N*N+j*N+i] * strz[k+1] + mu[k*N*N+j*N+i] * strz[k]);
float muz4 = mu[(k+1)*N*N+j*N+i] * strz[k+1] - 3e0 / 4 * (mu[k*N*N+j*N+i] * strz[k] + mu[(k+2)*N*N+j*N+i] * strz[k+2]);
/* 78 * 3 = 234 flops */
float r1 = 1e0 / 6 * (strx[i] * ((2 * mux1 + la[k*N*N+j*N+i-1] * strx[i-1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strx[i] + la[k*N*N+j*N+i-2] * strx[i-2])) * (u_0[k*N*N+j*N+i-2] - u_0[k*N*N+j*N+i]) + (2 * mux2 + la[k*N*N+j*N+i-2] * strx[i-2] + la[k*N*N+j*N+i+1] * strx[i+1] + 3 * (la[k*N*N+j*N+i] * strx[i] + la[k*N*N+j*N+i-1] * strx[i-1])) * (u_0[k*N*N+j*N+i-1] - u_0[k*N*N+j*N+i]) + (2 * mux3 + la[k*N*N+j*N+i-1] * strx[i-1] + la[k*N*N+j*N+i+2] * strx[i+2] + 3 * (la[k*N*N+j*N+i+1] * strx[i+1] + la[k*N*N+j*N+i] * strx[i])) * (u_0[k*N*N+j*N+i+1] - u_0[k*N*N+j*N+i]) + (2 * mux4 + la[k*N*N+j*N+i+1] * strx[i+1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strx[i] + la[k*N*N+j*N+i+2] * strx[i+2])) * (u_0[k*N*N+j*N+i+2] - u_0[k*N*N+j*N+i])) + stry[j] * (muy1 * (u_0[k*N*N+(j-2)*N+i] - u_0[k*N*N+j*N+i]) + muy2 * (u_0[k*N*N+(j-1)*N+i] - u_0[k*N*N+j*N+i]) + muy3 * (u_0[k*N*N+(j+1)*N+i] - u_0[k*N*N+j*N+i]) + muy4 * (u_0[k*N*N+(j+2)*N+i] - u_0[k*N*N+j*N+i])) + strz[k] * (muz1 * (u_0[(k-2)*N*N+j*N+i] - u_0[k*N*N+j*N+i]) + muz2 * (u_0[(k-1)*N*N+j*N+i] - u_0[k*N*N+j*N+i]) + muz3 * (u_0[(k+1)*N*N+j*N+i] - u_0[k*N*N+j*N+i]) + muz4 * (u_0[(k+2)*N*N+j*N+i] - u_0[k*N*N+j*N+i])));
float r2 = 1e0 / 6 * (strx[i] * (mux1 * (u_1[k*N*N+j*N+i-2] - u_1[k*N*N+j*N+i]) + mux2 * (u_1[k*N*N+j*N+i-1] - u_1[k*N*N+j*N+i]) + mux3 * (u_1[k*N*N+j*N+i+1] - u_1[k*N*N+j*N+i]) + mux4 * (u_1[k*N*N+j*N+i+2] - u_1[k*N*N+j*N+i])) + stry[j] * ((2 * muy1 + la[k*N*N+(j-1)*N+i] * stry[j-1] - 3e0 / 4 * (la[k*N*N+j*N+i] * stry[j] + la[k*N*N+(j-2)*N+i] * stry[j-2])) * (u_1[k*N*N+(j-2)*N+i] - u_1[k*N*N+j*N+i]) + (2 * muy2 + la[k*N*N+(j-2)*N+i] * stry[j-2] + la[k*N*N+(j+1)*N+i] * stry[j+1] + 3 * (la[k*N*N+j*N+i] * stry[j] + la[k*N*N+(j-1)*N+i] * stry[j-1])) * (u_1[k*N*N+(j-1)*N+i] - u_1[k*N*N+j*N+i]) + (2 * muy3 + la[k*N*N+(j-1)*N+i] * stry[j-1] + la[k*N*N+(j+2)*N+i] * stry[j+2] + 3 * (la[k*N*N+(j+1)*N+i] * stry[j+1] + la[k*N*N+j*N+i] * stry[j])) * (u_1[k*N*N+(j+1)*N+i] - u_1[k*N*N+j*N+i]) + (2 * muy4 + la[k*N*N+(j+1)*N+i] * stry[j+1] - 3e0 / 4 * (la[k*N*N+j*N+i] * stry[j] + la[k*N*N+(j+2)*N+i] * stry[j+2])) * (u_1[k*N*N+(j+2)*N+i] - u_1[k*N*N+j*N+i])) + strz[k] * (muz1 * (u_1[(k-2)*N*N+j*N+i] - u_1[k*N*N+j*N+i]) + muz2 * (u_1[(k-1)*N*N+j*N+i] - u_1[k*N*N+j*N+i]) + muz3 * (u_1[(k+1)*N*N+j*N+i] - u_1[k*N*N+j*N+i]) + muz4 * (u_1[(k+2)*N*N+j*N+i] - u_1[k*N*N+j*N+i])));
float r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k*N*N+j*N+i-2] - u_2[k*N*N+j*N+i]) + mux2 * (u_2[k*N*N+j*N+i-1] - u_2[k*N*N+j*N+i]) + mux3 * (u_2[k*N*N+j*N+i+1] - u_2[k*N*N+j*N+i]) + mux4 * (u_2[k*N*N+j*N+i+2] - u_2[k*N*N+j*N+i])) + stry[j] * (muy1 * (u_2[k*N*N+(j-2)*N+i] - u_2[k*N*N+j*N+i]) + muy2 * (u_2[k*N*N+(j-1)*N+i] - u_2[k*N*N+j*N+i]) + muy3 * (u_2[k*N*N+(j+1)*N+i] - u_2[k*N*N+j*N+i]) + muy4 * (u_2[k*N*N+(j+2)*N+i] - u_2[k*N*N+j*N+i])) + strz[k] * ((2 * muz1 + la[(k-1)*N*N+j*N+i] * strz[k-1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strz[k] + la[(k-2)*N*N+j*N+i] * strz[k-2])) * (u_2[(k-2)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz2 + la[(k-2)*N*N+j*N+i] * strz[k-2] + la[(k+1)*N*N+j*N+i] * strz[k+1] + 3 * (la[k*N*N+j*N+i] * strz[k] + la[(k-1)*N*N+j*N+i] * strz[k-1])) * (u_2[(k-1)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz3 + la[(k-1)*N*N+j*N+i] * strz[k-1] + la[(k+2)*N*N+j*N+i] * strz[k+2] + 3 * (la[(k+1)*N*N+j*N+i] * strz[k+1] + la[k*N*N+j*N+i] * strz[k])) * (u_2[(k+1)*N*N+j*N+i] - u_2[k*N*N+j*N+i]) + (2 * muz4 + la[(k+1)*N*N+j*N+i] * strz[k+1] - 3e0 / 4 * (la[k*N*N+j*N+i] * strz[k] + la[(k+2)*N*N+j*N+i] * strz[k+2])) * (u_2[(k+2)*N*N+j*N+i] - u_2[k*N*N+j*N+i])));
/* 120 * 3 = 360 flops */
r1 = r1 + strx[i] * stry[j] * (1e0 / 144) * (la[k*N*N+j*N+i-2] * (u_1[k*N*N+(j-2)*N+i-2] - u_1[k*N*N+(j+2)*N+i-2] + 8 * (-u_1[k*N*N+(j-1)*N+i-2] + u_1[k*N*N+(j+1)*N+i-2])) - 8 * (la[k*N*N+j*N+i-1] * (u_1[k*N*N+(j-2)*N+i-1] - u_1[k*N*N+(j+2)*N+i-1] + 8 * (-u_1[k*N*N+(j-1)*N+i-1] + u_1[k*N*N+(j+1)*N+i-1]))) + 8 * (la[k*N*N+j*N+i+1] * (u_1[k*N*N+(j-2)*N+i+1] - u_1[k*N*N+(j+2)*N+i+1] + 8 * (-u_1[k*N*N+(j-1)*N+i+1] + u_1[k*N*N+(j+1)*N+i+1]))) - (la[k*N*N+j*N+i+2] * (u_1[k*N*N+(j-2)*N+i+2] - u_1[k*N*N+(j+2)*N+i+2] + 8 * (-u_1[k*N*N+(j-1)*N+i+2] + u_1[k*N*N+(j+1)*N+i+2])))) + strx[i] * strz[k] * (1e0 / 144) * (la[k*N*N+j*N+i-2] * (u_2[(k-2)*N*N+j*N+i-2] - u_2[(k+2)*N*N+j*N+i-2] + 8 * (-u_2[(k-1)*N*N+j*N+i-2] + u_2[(k+1)*N*N+j*N+i-2])) - 8 * (la[k*N*N+j*N+i-1] * (u_2[(k-2)*N*N+j*N+i-1] - u_2[(k+2)*N*N+j*N+i-1] + 8 * (-u_2[(k-1)*N*N+j*N+i-1] + u_2[(k+1)*N*N+j*N+i-1]))) + 8 * (la[k*N*N+j*N+i+1] * (u_2[(k-2)*N*N+j*N+i+1] - u_2[(k+2)*N*N+j*N+i+1] + 8 * (-u_2[(k-1)*N*N+j*N+i+1] + u_2[(k+1)*N*N+j*N+i+1]))) - (la[k*N*N+j*N+i+2] * (u_2[(k-2)*N*N+j*N+i+2] - u_2[(k+2)*N*N+j*N+i+2] + 8 * (-u_2[(k-1)*N*N+j*N+i+2] + u_2[(k+1)*N*N+j*N+i+2])))) + strx[i] * stry[j] * (1e0 / 144) * (mu[k*N*N+(j-2)*N+i] * (u_1[k*N*N+(j-2)*N+i-2] - u_1[k*N*N+(j-2)*N+i+2] + 8 * (-u_1[k*N*N+(j-2)*N+i-1] + u_1[k*N*N+(j-2)*N+i+1])) - 8 * (mu[k*N*N+(j-1)*N+i] * (u_1[k*N*N+(j-1)*N+i-2] - u_1[k*N*N+(j-1)*N+i+2] + 8 * (-u_1[k*N*N+(j-1)*N+i-1] + u_1[k*N*N+(j-1)*N+i+1]))) + 8 * (mu[k*N*N+(j+1)*N+i] * (u_1[k*N*N+(j+1)*N+i-2] - u_1[k*N*N+(j+1)*N+i+2] + 8 * (-u_1[k*N*N+(j+1)*N+i-1] + u_1[k*N*N+(j+1)*N+i+1]))) - (mu[k*N*N+(j+2)*N+i] * (u_1[k*N*N+(j+2)*N+i-2] - u_1[k*N*N+(j+2)*N+i+2] + 8 * (-u_1[k*N*N+(j+2)*N+i-1] + u_1[k*N*N+(j+2)*N+i+1])))) + strx[i] * strz[k] * (1e0 / 144) * (mu[(k-2)*N*N+j*N+i] * (u_2[(k-2)*N*N+j*N+i-2] - u_2[(k-2)*N*N+j*N+i+2] + 8 * (-u_2[(k-2)*N*N+j*N+i-1] + u_2[(k-2)*N*N+j*N+i+1])) - 8 * (mu[(k-1)*N*N+j*N+i] * (u_2[(k-1)*N*N+j*N+i-2] - u_2[(k-1)*N*N+j*N+i+2] + 8 * (-u_2[(k-1)*N*N+j*N+i-1] + u_2[(k-1)*N*N+j*N+i+1]))) + 8 * (mu[(k+1)*N*N+j*N+i] * (u_2[(k+1)*N*N+j*N+i-2] - u_2[(k+1)*N*N+j*N+i+2] + 8 * (-u_2[(k+1)*N*N+j*N+i-1] + u_2[(k+1)*N*N+j*N+i+1]))) - (mu[(k+2)*N*N+j*N+i] * (u_2[(k+2)*N*N+j*N+i-2] - u_2[(k+2)*N*N+j*N+i+2] + 8 * (-u_2[(k+2)*N*N+j*N+i-1] + u_2[(k+2)*N*N+j*N+i+1]))));
r2 = r2 + strx[i] * stry[j] * (1e0 / 144) * (mu[k*N*N+j*N+i-2] * (u_0[k*N*N+(j-2)*N+i-2] - u_0[k*N*N+(j+2)*N+i-2] + 8 * (-u_0[k*N*N+(j-1)*N+i-2] + u_0[k*N*N+(j+1)*N+i-2])) - 8 * (mu[k*N*N+j*N+i-1] * (u_0[k*N*N+(j-2)*N+i-1] - u_0[k*N*N+(j+2)*N+i-1] + 8 * (-u_0[k*N*N+(j-1)*N+i-1] + u_0[k*N*N+(j+1)*N+i-1]))) + 8 * (mu[k*N*N+j*N+i+1] * (u_0[k*N*N+(j-2)*N+i+1] - u_0[k*N*N+(j+2)*N+i+1] + 8 * (-u_0[k*N*N+(j-1)*N+i+1] + u_0[k*N*N+(j+1)*N+i+1]))) - (mu[k*N*N+j*N+i+2] * (u_0[k*N*N+(j-2)*N+i+2] - u_0[k*N*N+(j+2)*N+i+2] + 8 * (-u_0[k*N*N+(j-1)*N+i+2] + u_0[k*N*N+(j+1)*N+i+2])))) + strx[i] * stry[j] * (1e0 / 144) * (la[k*N*N+(j-2)*N+i] * (u_0[k*N*N+(j-2)*N+i-2] - u_0[k*N*N+(j-2)*N+i+2] + 8 * (-u_0[k*N*N+(j-2)*N+i-1] + u_0[k*N*N+(j-2)*N+i+1])) - 8 * (la[k*N*N+(j-1)*N+i] * (u_0[k*N*N+(j-1)*N+i-2] - u_0[k*N*N+(j-1)*N+i+2] + 8 * (-u_0[k*N*N+(j-1)*N+i-1] + u_0[k*N*N+(j-1)*N+i+1]))) + 8 * (la[k*N*N+(j+1)*N+i] * (u_0[k*N*N+(j+1)*N+i-2] - u_0[k*N*N+(j+1)*N+i+2] + 8 * (-u_0[k*N*N+(j+1)*N+i-1] + u_0[k*N*N+(j+1)*N+i+1]))) - (la[k*N*N+(j+2)*N+i] * (u_0[k*N*N+(j+2)*N+i-2] - u_0[k*N*N+(j+2)*N+i+2] + 8 * (-u_0[k*N*N+(j+2)*N+i-1] + u_0[k*N*N+(j+2)*N+i+1])))) + stry[j] * strz[k] * (1e0 / 144) * (la[k*N*N+(j-2)*N+i] * (u_2[(k-2)*N*N+(j-2)*N+i] - u_2[(k+2)*N*N+(j-2)*N+i] + 8 * (-u_2[(k-1)*N*N+(j-2)*N+i] + u_2[(k+1)*N*N+(j-2)*N+i])) - 8 * (la[k*N*N+(j-1)*N+i] * (u_2[(k-2)*N*N+(j-1)*N+i] - u_2[(k+2)*N*N+(j-1)*N+i] + 8 * (-u_2[(k-1)*N*N+(j-1)*N+i] + u_2[(k+1)*N*N+(j-1)*N+i]))) + 8 * (la[k*N*N+(j+1)*N+i] * (u_2[(k-2)*N*N+(j+1)*N+i] - u_2[(k+2)*N*N+(j+1)*N+i] + 8 * (-u_2[(k-1)*N*N+(j+1)*N+i] + u_2[(k+1)*N*N+(j+1)*N+i]))) - (la[k*N*N+(j+2)*N+i] * (u_2[(k-2)*N*N+(j+2)*N+i] - u_2[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_2[(k-1)*N*N+(j+2)*N+i] + u_2[(k+1)*N*N+(j+2)*N+i])))) + stry[j] * strz[k] * (1e0 / 144) * (mu[(k-2)*N*N+j*N+i] * (u_2[(k-2)*N*N+(j-2)*N+i] - u_2[(k-2)*N*N+(j+2)*N+i] + 8 * (-u_2[(k-2)*N*N+(j-1)*N+i] + u_2[(k-2)*N*N+(j+1)*N+i])) - 8 * (mu[(k-1)*N*N+j*N+i] * (u_2[(k-1)*N*N+(j-2)*N+i] - u_2[(k-1)*N*N+(j+2)*N+i] + 8 * (-u_2[(k-1)*N*N+(j-1)*N+i] + u_2[(k-1)*N*N+(j+1)*N+i]))) + 8 * (mu[(k+1)*N*N+j*N+i] * (u_2[(k+1)*N*N+(j-2)*N+i] - u_2[(k+1)*N*N+(j+2)*N+i] + 8 * (-u_2[(k+1)*N*N+(j-1)*N+i] + u_2[(k+1)*N*N+(j+1)*N+i]))) - (mu[(k+2)*N*N+j*N+i] * (u_2[(k+2)*N*N+(j-2)*N+i] - u_2[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_2[(k+2)*N*N+(j-1)*N+i] + u_2[(k+2)*N*N+(j+1)*N+i]))));
r3 = r3 + strx[i] * strz[k] * (1e0 / 144) * (mu[k*N*N+j*N+i-2] * (u_0[(k-2)*N*N+j*N+i-2] - u_0[(k+2)*N*N+j*N+i-2] + 8 * (-u_0[(k-1)*N*N+j*N+i-2] + u_0[(k+1)*N*N+j*N+i-2])) - 8 * (mu[k*N*N+j*N+i-1] * (u_0[(k-2)*N*N+j*N+i-1] - u_0[(k+2)*N*N+j*N+i-1] + 8 * (-u_0[(k-1)*N*N+j*N+i-1] + u_0[(k+1)*N*N+j*N+i-1]))) + 8 * (mu[k*N*N+j*N+i+1] * (u_0[(k-2)*N*N+j*N+i+1] - u_0[(k+2)*N*N+j*N+i+1] + 8 * (-u_0[(k-1)*N*N+j*N+i+1] + u_0[(k+1)*N*N+j*N+i+1]))) - (mu[k*N*N+j*N+i+2] * (u_0[(k-2)*N*N+j*N+i+2] - u_0[(k+2)*N*N+j*N+i+2] + 8 * (-u_0[(k-1)*N*N+j*N+i+2] + u_0[(k+1)*N*N+j*N+i+2])))) + stry[j] * strz[k] * (1e0 / 144) * (mu[k*N*N+(j-2)*N+i] * (u_1[(k-2)*N*N+(j-2)*N+i] - u_1[(k+2)*N*N+(j-2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-2)*N+i] + u_1[(k+1)*N*N+(j-2)*N+i])) - 8 * (mu[k*N*N+(j-1)*N+i] * (u_1[(k-2)*N*N+(j-1)*N+i] - u_1[(k+2)*N*N+(j-1)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-1)*N+i] + u_1[(k+1)*N*N+(j-1)*N+i]))) + 8 * (mu[k*N*N+(j+1)*N+i] * (u_1[(k-2)*N*N+(j+1)*N+i] - u_1[(k+2)*N*N+(j+1)*N+i] + 8 * (-u_1[(k-1)*N*N+(j+1)*N+i] + u_1[(k+1)*N*N+(j+1)*N+i]))) - (mu[k*N*N+(j+2)*N+i] * (u_1[(k-2)*N*N+(j+2)*N+i] - u_1[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j+2)*N+i] + u_1[(k+1)*N*N+(j+2)*N+i])))) + strx[i] * strz[k] * (1e0 / 144) * (la[(k-2)*N*N+j*N+i] * (u_0[(k-2)*N*N+j*N+i-2] - u_0[(k-2)*N*N+j*N+i+2] + 8 * (-u_0[(k-2)*N*N+j*N+i-1] + u_0[(k-2)*N*N+j*N+i+1])) - 8 * (la[(k-1)*N*N+j*N+i] * (u_0[(k-1)*N*N+j*N+i-2] - u_0[(k-1)*N*N+j*N+i+2] + 8 * (-u_0[(k-1)*N*N+j*N+i-1] + u_0[(k-1)*N*N+j*N+i+1]))) + 8 * (la[(k+1)*N*N+j*N+i] * (u_0[(k+1)*N*N+j*N+i-2] - u_0[(k+1)*N*N+j*N+i+2] + 8 * (-u_0[(k+1)*N*N+j*N+i-1] + u_0[(k+1)*N*N+j*N+i+1]))) - (la[(k+2)*N*N+j*N+i] * (u_0[(k+2)*N*N+j*N+i-2] - u_0[(k+2)*N*N+j*N+i+2] + 8 * (-u_0[(k+2)*N*N+j*N+i-1] + u_0[(k+2)*N*N+j*N+i+1])))) + stry[j] * strz[k] * (1e0 / 144) * (la[(k-2)*N*N+j*N+i] * (u_1[(k-2)*N*N+(j-2)*N+i] - u_1[(k-2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-2)*N*N+(j-1)*N+i] + u_1[(k-2)*N*N+(j+1)*N+i])) - 8 * (la[(k-1)*N*N+j*N+i] * (u_1[(k-1)*N*N+(j-2)*N+i] - u_1[(k-1)*N*N+(j+2)*N+i] + 8 * (-u_1[(k-1)*N*N+(j-1)*N+i] + u_1[(k-1)*N*N+(j+1)*N+i]))) + 8 * (la[(k+1)*N*N+j*N+i] * (u_1[(k+1)*N*N+(j-2)*N+i] - u_1[(k+1)*N*N+(j+2)*N+i] + 8 * (-u_1[(k+1)*N*N+(j-1)*N+i] + u_1[(k+1)*N*N+(j+1)*N+i]))) - (la[(k+2)*N*N+j*N+i] * (u_1[(k+2)*N*N+(j-2)*N+i] - u_1[(k+2)*N*N+(j+2)*N+i] + 8 * (-u_1[(k+2)*N*N+(j-1)*N+i] + u_1[(k+2)*N*N+(j+1)*N+i]))));
/* 3 * 3 = 9 flops */
uacc_0[k*N*N+j*N+i] = a1 * uacc_0[k*N*N+j*N+i] + cof * r1;
uacc_1[k*N*N+j*N+i] = a1 * uacc_1[k*N*N+j*N+i] + cof * r2;
uacc_2[k*N*N+j*N+i] = a1 * uacc_2[k*N*N+j*N+i] + cof * r3;
}
}
extern "C" void host_code (float *h_uacc_0, float *h_uacc_1, float *h_uacc_2, float *h_u_0, float *h_u_1, float *h_u_2, float *h_mu, float *h_la, float *h_strx, float *h_stry, float *h_strz, int N) {
float *uacc_0;
cudaMalloc (&uacc_0, sizeof(float)*N*N*N);
check_error ("Failed to allocate device memory for uacc_0\n");
cudaMemcpy (uacc_0, h_uacc_0, sizeof(float)*N*N*N, cudaMemcpyHostToDevice);
float *uacc_1;
cudaMalloc (&uacc_1, sizeof(float)*N*N*N);
check_error ("Failed to allocate device memory for uacc_1\n");
cudaMemcpy (uacc_1, h_uacc_1, sizeof(float)*N*N*N, cudaMemcpyHostToDevice);
float *uacc_2;
cudaMalloc (&uacc_2, sizeof(float)*N*N*N);
check_error ("Failed to allocate device memory for uacc_2\n");
cudaMemcpy (uacc_2, h_uacc_2, sizeof(float)*N*N*N, cudaMemcpyHostToDevice);
float *u_0;
cudaMalloc (&u_0, sizeof(float)*N*N*N);
check_error ("Failed to allocate device memory for u_0\n");
cudaMemcpy (u_0, h_u_0, sizeof(float)*N*N*N, cudaMemcpyHostToDevice);
float *u_1;
cudaMalloc (&u_1, sizeof(float)*N*N*N);
check_error ("Failed to allocate device memory for u_1\n");
cudaMemcpy (u_1, h_u_1, sizeof(float)*N*N*N, cudaMemcpyHostToDevice);
float *u_2;
cudaMalloc (&u_2, sizeof(float)*N*N*N);
check_error ("Failed to allocate device memory for u_2\n");
cudaMemcpy (u_2, h_u_2, sizeof(float)*N*N*N, cudaMemcpyHostToDevice);
float *mu;
cudaMalloc (&mu, sizeof(float)*N*N*N);
check_error ("Failed to allocate device memory for mu\n");
cudaMemcpy (mu, h_mu, sizeof(float)*N*N*N, cudaMemcpyHostToDevice);
float *la;
cudaMalloc (&la, sizeof(float)*N*N*N);
check_error ("Failed to allocate device memory for la\n");
cudaMemcpy (la, h_la, sizeof(float)*N*N*N, cudaMemcpyHostToDevice);
float *strx;
cudaMalloc (&strx, sizeof(float)*N);
check_error ("Failed to allocate device memory for strx\n");
cudaMemcpy (strx, h_strx, sizeof(float)*N, cudaMemcpyHostToDevice);
float *stry;
cudaMalloc (&stry, sizeof(float)*N);
check_error ("Failed to allocate device memory for stry\n");
cudaMemcpy (stry, h_stry, sizeof(float)*N, cudaMemcpyHostToDevice);
float *strz;
cudaMalloc (&strz, sizeof(float)*N);
check_error ("Failed to allocate device memory for strz\n");
cudaMemcpy (strz, h_strz, sizeof(float)*N, cudaMemcpyHostToDevice);
dim3 blockconfig (32, 4, 2);
dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), ceil(N, blockconfig.z));
sw4 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N);
cudaMemcpy (h_uacc_0, uacc_0, sizeof(float)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_1, uacc_1, sizeof(float)*N*N*N, cudaMemcpyDeviceToHost);
cudaMemcpy (h_uacc_2, uacc_2, sizeof(float)*N*N*N, cudaMemcpyDeviceToHost);
cudaFree (uacc_0);
cudaFree (uacc_1);
cudaFree (uacc_2);
cudaFree (u_0);
cudaFree (u_1);
cudaFree (u_2);
cudaFree (mu);
cudaFree (la);
cudaFree (strx);
cudaFree (stry);
cudaFree (strz);
}
|
13,205 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
#include <stdio.h>
//static float N = 1000;
__global__ void addKernel(float*a, float*b, float*c, float N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<N) {
c[i] = a[i] + b[i];
}
}
int main() {
double *d_a;
cudaMalloc((void**)&d_a, 1024 * sizeof(double));
cudaError err = cudaGetLastError();
if (err == cudaSuccess) {
printf("double its ok!\n");
}
else {
printf("double its fatal error\n");
}
float *d_b;
int n = 100000;
cudaMalloc((void**)&d_a, n * sizeof(float));
err = cudaGetLastError();
if (err == cudaSuccess) {
printf("float its ok!\n");
}
else {
printf("float its fatal error\n");
}
/*int N = 256 * 256;
float *h_a, *h_b, *h_res;
float *d_a, *d_b, *d_res;
h_a = (float*)malloc(N * sizeof(float));
h_b = (float*)malloc(N * sizeof(float));
h_res = (float*)malloc(N * sizeof(float));
cudaMalloc((void**)&d_a, N * sizeof(float));
cudaMalloc((void**)&d_b, N * sizeof(float));
cudaMalloc((void**)&d_res, N * sizeof(float));
for (int i = 0; i < N; i++) {
h_a[0]= 100;
h_b[0] = 33;
h_res[i] = 0;
}
cudaMemcpy(d_a, h_a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, N * sizeof(float), cudaMemcpyHostToDevice);
int threads = 256;
int blocks = 256;
addKernel << <threads, blocks >> > (d_a, d_b, d_res, N);
cudaMemcpy(h_res, d_res, N * sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < 10; i++) {
printf("%d \n", h_res[i]);
}
*/
system("pause");
return 0;
} |
13,206 | /* Vector reduction example using shared memory. WOrks for small vectors.
Author: Naga Kandasamy
Date modified: 02/14/2017
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <time.h>
// includes, kernels
#include "vector_reduction_kernel.cu"
// For simplicity, just to get the idea in this MP, we're fixing the problem size to 512 elements.
#define NUM_ELEMENTS 512
void runTest( int argc, char** argv);
float computeOnDevice(float* h_data, int array_mem_size);
void checkCUDAError(const char *msg);
extern "C" void computeGold( float* reference, float* idata, const unsigned int len);
int
main( int argc, char** argv)
{
runTest( argc, argv);
return EXIT_SUCCESS;
}
////////////////////////////////////////////////////////////////////////////////
//! Run naive scan test
////////////////////////////////////////////////////////////////////////////////
void
runTest( int argc, char** argv)
{
int num_elements = NUM_ELEMENTS;
const unsigned int array_mem_size = sizeof( float) * num_elements;
// allocate host memory to store the input data
float* h_data = (float*) malloc(array_mem_size);
// initialize the input data on the host to be integer values
// between 0 and 1000
srand(time(NULL));
for(unsigned int i = 0; i < num_elements; ++i){
h_data[i] = floorf(1000*(rand()/(float)RAND_MAX));
}
// compute reference solution
float reference = 0.0f;
computeGold(&reference , h_data, num_elements);
float result = computeOnDevice(h_data, num_elements);
// We can use an epsilon of 0 since values are integral and in a range
// that can be exactly represented
float epsilon = 0.0f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
printf( "Test %s\n", (1 == result_regtest) ? "PASSED" : "FAILED");
printf( "device: %f host: %f\n", result, reference);
// cleanup memory
free( h_data);
}
// Take h_data from host, copies it to device, setup grid and thread
// dimentions, excutes kernel function, and copy result of scan back
// to h_data.
// Note: float* h_data is both the input and the output of this fun/*
float
computeOnDevice(float* h_data, int num_elements)
{
// initialize variables for device data, cuda error and timer
float* d_data;
int data_size = sizeof(float) * num_elements;
// allocate memory on device
cudaMalloc((void**)&d_data, data_size);
checkCUDAError("Error allocating memory");
// copy host memory to device
cudaMemcpy(d_data, h_data, data_size, cudaMemcpyHostToDevice);
checkCUDAError("Error copying host to device memory");
// Invoke kernel
int threads = 256;
int blocks = 2;
reduction_v1<<<blocks, threads>>>(d_data, num_elements);
// reduction_v2<<<blocks, threads>>>(d_data, num_elements);
checkCUDAError("Error in kernel");
// copy device memory to host
cudaMemcpy(h_data, d_data, data_size, cudaMemcpyDeviceToHost);
checkCUDAError("Error copying host to device memory");
// cleanup device memory
cudaFree(d_data);
checkCUDAError("Error freeing memory");
// calculate final result of two partially calculated blocks
float result = h_data[0] + h_data[1];
return result;
}
void
checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
printf("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
|
13,207 | #define Inf 9999999
void __global__ dark_channel_ker(float * image, float * darkc, float * index, int N, int H, int W, int wsize)
{
int C = 3;
int D = 1;
int hsize = (wsize - 1) / 2;
int ids = blockDim.x * blockIdx.x + threadIdx.x;
int idt = gridDim.x * blockDim.x;
for(; ids<W*H*N; ids+=idt)
{
// ids = W*H*n + W*h + w
int idw = ids % W;
ids = (ids - idw) / W;
int idh = ids % H;
ids = (ids - idh) / H;
int idn = ids;
float tmpc = Inf;
float tmp1 = 0;
float tmp2 = 0;
float tmp3 = 0;
for(int idc=0; idc<C; idc++)
{
for(int p=idh-hsize; p<idh+hsize+1; p++)
{
for(int q=idw-hsize; q<idw+hsize+1; q++)
{
if(p>-1 && p<H && q>-1 && q<W)
{
float pixel_value = image[W*H*C*idn + W*H*idc + W*p + q];
if(pixel_value<tmpc)
{
tmpc = pixel_value;
tmp1 = idc;
tmp2 = p;
tmp3 = q;
}
}
}
}
}
darkc[W*H*D*idn + W*H*0 + W*idh + idw] = tmpc;
index[W*H*C*idn + W*H*0 + W*idh + idw] = tmp1;
index[W*H*C*idn + W*H*1 + W*idh + idw] = tmp2;
index[W*H*C*idn + W*H*2 + W*idh + idw] = tmp3;
}
}
void __global__ dark_extract_ker(float * image, float * index, float * darkc, int N, int H, int W)
{
int C = 3;
int ids = blockDim.x * blockIdx.x + threadIdx.x;
int idt = gridDim.x * blockDim.x;
for(; ids<N*H*W; ids+=idt)
{
int idw = ids % W;
ids = (ids - idw) / W;
int idh = ids % H;
ids = (ids - idh) / H;
int idn = ids;
int idz = index[W*H*C*idn + W*H*0 + W*idh + idw];
int idx = index[W*H*C*idn + W*H*1 + W*idh + idw];
int idy = index[W*H*C*idn + W*H*2 + W*idh + idw];
darkc[W*H*idn + W*idh + idw] = image[W*H*C*idn + W*H*idz + W*idx + idy];
}
}
void __global__ place_back_ker(float * darkc, float * index, float * image, float * accum, int N, int H, int W, int wsize)
{
int C = 3;
int hsize = (wsize - 1) / 2;
int ids = blockDim.x * blockIdx.x + threadIdx.x;
int idt = gridDim.x * blockDim.x;
for(; ids<N*C*H*W; ids+=idt)
{
int idw = ids % W;
ids = (ids - idw) / W;
int idh = ids % H;
ids = (ids - idh) / H;
int idc = ids % C;
ids = (ids - idc) / C;
int idn = ids;
image[W*H*C*idn + W*H*idc + W*idh + idw] = 0;
accum[W*H*C*idn + W*H*idc + W*idh + idw] = 0;
float tmpi = 0;
float tmpa = 0;
for(int p=idh-hsize; p<idh+hsize+1; p++)
{
for(int q=idw-hsize; q<idw+hsize+1; q++)
{
if(p>-1 && p<H && q>-1 && q<W)
{
int idz = (int) index[W*H*C*idn + W*H*0 + W*p + q];
int idx = (int) index[W*H*C*idn + W*H*1 + W*p + q];
int idy = (int) index[W*H*C*idn + W*H*2 + W*p + q];
if(idx==idh && idy==idw && idz==idc)
{
tmpi += darkc[W*H*idn + W*p + q];
tmpa += 1;
}
}
}
}
image[W*H*C*idn + W*H*idc + W*idh + idw] = tmpi;
accum[W*H*C*idn + W*H*idc + W*idh + idw] = tmpa;
}
}
#ifdef __cplusplus
extern "C"
{
#endif
void dark_channel_run(float * d_image, float * d_darkc, float * d_index, int N, int H, int W, int wsize)
{
int const threadsPerBlock = 1024;
int const blocksPerGrid = (N*H*W + threadsPerBlock - 1) / threadsPerBlock;
dark_channel_ker<<<blocksPerGrid, threadsPerBlock>>>(d_image, d_darkc, d_index, N, H, W, wsize);
}
void dark_extract_run(float * d_image, float * d_index, float * d_darkc, int N, int H, int W)
{
int const threadsPerBlock = 1024;
int const blocksPerGrid = (N*H*W + threadsPerBlock - 1) / threadsPerBlock;
dark_extract_ker<<<blocksPerGrid, threadsPerBlock>>>(d_image, d_index, d_darkc, N, H, W);
}
void place_back_run(float * d_darkc, float * d_index, float * d_image, float * d_accum, int N, int H, int W, int wsize)
{
int const threadsPerBlock = 1024;
int const blocksPerGrid = (N*3*H*W + threadsPerBlock - 1) / threadsPerBlock;
place_back_ker<<<blocksPerGrid, threadsPerBlock>>>(d_darkc, d_index, d_image, d_accum, N, H, W, wsize);
}
#ifdef __cplusplus
}
#endif
|
13,208 | #include "includes.h"
__global__ void __longToFloat(long long *A, float *B, int N) {
int ip = threadIdx.x + blockDim.x * (blockIdx.x + gridDim.x * blockIdx.y);
for (int i = ip; i < N; i += blockDim.x * gridDim.x * gridDim.y) {
B[i] = (float)(A[i]);
}
} |
13,209 | #include <cuda_runtime.h>
#include <algorithm>
#include <iostream>
#include <vector>
#define CHECK_ERROR(FUNC) checkCudaErrorMsg(FUNC, " " #FUNC)
void inline checkCudaErrorMsg(cudaError status, const char *msg) {
if (status != cudaSuccess) {
std::cout << msg << " - " << status << std::endl;
exit(EXIT_FAILURE);
}
}
/* A simple CUDA kernel that performs matrix multiplication */
__global__ void matmul(const float *a, const float *b, float *c, int n) {
int col = (blockIdx.x * blockDim.x) + threadIdx.x;
int row = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.f;
if (row < n && col < n) {
for (int i = 0; i < n; ++i) {
sum += a[row * n + i] * b[i * n + col];
}
c[row * n + col] = sum;
}
}
int main(int argc, char *argv[]) {
/* Size of matrix */
constexpr int N = 473;
/* Create and fill matrices on host */
std::vector<float> h_a(N * N);
std::vector<float> h_b(N * N);
std::vector<float> h_c(N * N);
std::fill(h_a.begin(), h_a.end(), 3.f);
std::fill(h_b.begin(), h_b.end(), 2.f);
/* Create matrices on device and copy host data */
float *d_a = nullptr;
float *d_b = nullptr;
float *d_c = nullptr;
size_t matSizeInBytes = N * N * sizeof(float);
CHECK_ERROR(cudaMalloc((void **)&d_a, matSizeInBytes));
CHECK_ERROR(cudaMalloc((void **)&d_b, matSizeInBytes));
CHECK_ERROR(cudaMalloc((void **)&d_c, matSizeInBytes));
CHECK_ERROR(
cudaMemcpy(d_a, h_a.data(), matSizeInBytes, cudaMemcpyHostToDevice));
CHECK_ERROR(
cudaMemcpy(d_b, h_b.data(), matSizeInBytes, cudaMemcpyHostToDevice));
/* Invoke the matmul kernel */
dim3 blockSize(32, 32);
dim3 gridSize;
gridSize.x = ceil(static_cast<double>(N) / static_cast<double>(blockSize.x));
gridSize.y = ceil(static_cast<double>(N) / static_cast<double>(blockSize.y));
matmul<<<gridSize, blockSize>>>(d_a, d_b, d_c, N);
/* Synchronize and copy data back to host */
CHECK_ERROR(cudaDeviceSynchronize());
CHECK_ERROR(
cudaMemcpy(h_c.data(), d_c, matSizeInBytes, cudaMemcpyDeviceToHost));
/* Free device memory */
CHECK_ERROR(cudaFree(d_a));
CHECK_ERROR(cudaFree(d_b));
CHECK_ERROR(cudaFree(d_c));
/* Verify results */
std::vector<float> expected(N * N, 0.f);
for (int i = 0; i < N; ++i) {
for (int j = 0; j < N; ++j) {
for (int k = 0; k < N; ++k) {
expected[i * N + j] += h_a[i * N + k] * h_b[k * N + j];
}
}
}
for (int i = 0; i < N * N; ++i) {
if (h_c[i] != expected[i]) {
std::cout << "Result incorrect!\n";
std::cout << "Expected " << expected[i] << " Actual " << h_c[i] << "\n";
return 1;
}
}
std::cout << "Result correct!\n";
return 0;
}
|
13,210 | //includes creating atoms with mass and temperature using probability function
//based on velocity intervals ranging 3 standard deviations with random angles
//converts spherical coordinates to cartesian coordinates for intial velocities
//Paige Diamond, Zeke Elkins, Shannon White, Kayla Huff, Tim Webber
// 10-30-2014
#include <cmath>
#include <math.h>
#include <iostream>
#include <stdlib.h>
#include <cstdlib> // needed for rand
#include <ctime>//neded for srand(time(0))
#include <fstream> //needed for files
#include <vector>
#include <cfloat>
#include <string>
#define N 1000000
using namespace std;
int main() {
double mass = 6.63352088E-27; //mass in kg for one atom (ARGON)
float k = 1.3806488E-23; //m^2 kg s^-2 K^-1
double temp = 298; //kelvin
double pi = 3.14159;
double avgVelocity = 0;
double velocity[N],velX[N],velY[N],velZ[N],x[N],y[N],z[N],theta[N],phi[N];
int atomIndexL = 0;
double velIndex = 0;
double width = 0;
double mostProbVel = 0;
int atomIndex=0;
//Set range for possible values (0-2*average or +-3 standard deviations)
avgVelocity = (sqrt(2*k*temp/mass))*(2/(sqrt(pi)));
cout<<"Average velocity" << avgVelocity<< endl;
mostProbVel = sqrt (2*k*temp/mass);
velIndex = mostProbVel;
//most probable velocity is different than average velocity because the distribution is skewed.
//We want to start at the "top of the hill" so we always overestimate the boxes and don't
//run into rounding errors
/*for(atomIndex = 0; atomIndex <N; atomIndex++){
if(atomIndex == 0){
width = pow(3, 1/3)/(pow((mass/(2*pi*k*temp)),1/2)*pow(N, 1/3)*pow(4*pi, 1/3));
}*/
//creates atoms left of the hill until the velocity is equal to zero
for (atomIndexL=0; atomIndexL < N; atomIndexL++ ){
if (velIndex >= 0.0000){
width = 1/((sqrt((mass/(2*pi*k*temp))*(mass/(2*pi*k*temp))*(mass/(2*pi*k*temp))))*(4*pi*(velIndex*velIndex))*(exp(-(mass*(velIndex*velIndex))/(2*k*temp)))*N);
x[atomIndex]= static_cast <double> (rand()) / static_cast <double> (RAND_MAX/500);
y[atomIndex] = static_cast <double> (rand()) / static_cast <double> (RAND_MAX/500);
z[atomIndex] = static_cast <double> (rand()) / static_cast <double> (RAND_MAX/500);
theta[atomIndex] = static_cast<double>(rand()) / static_cast<double>(RAND_MAX/(M_PI));
phi[atomIndex] = static_cast<double>(rand()) / static_cast<double>(RAND_MAX/(2*M_PI));
//Randomize Velocity within intvel, convert to cartesian coordinates
velocity[atomIndex]=velIndex - static_cast <double> (rand()) / static_cast <double> (RAND_MAX/width);
velX[atomIndex]=velocity[atomIndex]*sin(theta[atomIndex])*cos(phi[atomIndex]);
velY[atomIndex]=velocity[atomIndex]*sin(theta[atomIndex])*sin(phi[atomIndex]);
velZ[atomIndex]=velocity[atomIndex]*cos(theta[atomIndex]);
velIndex=velIndex-width;
//Update index
cout << "width" << width << endl;
cout <<"Atom Index " << atomIndexL << endl;
cout << "Atom Velocity " << velocity[atomIndex] << endl;
cout<< velX[atomIndex] << " " << velY[atomIndex] << " " << velZ[atomIndex] << endl;
atomIndex++;
}
}
velIndex = mostProbVel;
//creates remaining atoms to the right of the most probable velocity
for (; atomIndex<N; atomIndex++){
width = 1/((sqrt((mass/(2*pi*k*temp))*(mass/(2*pi*k*temp))*(mass/(2*pi*k*temp))))*(4*pi*(velIndex*velIndex))*(exp(-(mass*(velIndex*velIndex))/(2*k*temp)))*N);
x[atomIndex]= static_cast <double> (rand()) / static_cast <double> (RAND_MAX/500);
y[atomIndex] = static_cast <double> (rand()) / static_cast <double> (RAND_MAX/500);
z[atomIndex] = static_cast <double> (rand()) / static_cast <double> (RAND_MAX/500);
theta[atomIndex] = static_cast<double>(rand()) / static_cast<double>(RAND_MAX/(M_PI));
phi[atomIndex] = static_cast<double>(rand()) / static_cast<double>(RAND_MAX/(2*M_PI));
//Randomize Velocity within intvel, convert to cartesian coordinates
velocity[atomIndex]=velIndex + static_cast <double> (rand()) / static_cast <double> (RAND_MAX/width);
velX[atomIndex]=velocity[atomIndex]*sin(theta[atomIndex])*cos(phi[atomIndex]);
velY[atomIndex]=velocity[atomIndex]*sin(theta[atomIndex])*sin(phi[atomIndex]);
velZ[atomIndex]=velocity[atomIndex]*cos(theta[atomIndex]);
velIndex=velIndex+width;
//Update index
cout << "width" << width << endl;
cout <<"Atom Index " << atomIndex << endl;
cout << "Atom Velocity " << velocity[atomIndex] << endl;
cout<< velX[atomIndex] << " " << velY[atomIndex] << " " << velZ[atomIndex] << endl;
}
//for (; atomIndex <N; atomIndex++){
//x[atomIndex]= static_cast <float> (rand()) / static_cast <float> (RAND_MAX/500);
// y[atomIndex] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX/500);
// z[atomIndex] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX/500);
// theta[atomIndex] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX/(M_PI));
// phi[atomIndex] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX/(2*M_PI));
//Randomize Velocity within intvel, convert to cartesian coordinates
// velocity[atomIndex]=velIndex;
// velX[atomIndex]=velocity[atomIndex]*sin(theta[atomIndex])*cos(phi[atomIndex]);
// velY[atomIndex]=velocity[atomIndex]*sin(theta[atomIndex])*sin(phi[atomIndex]);
// velZ[atomIndex]=velocity[atomIndex]*cos(theta[atomIndex]);
// velIndex=velIndex+width;
//Update index
// cout << "width" << width << endl;
//cout <<"Atom Index " << atomIndex << endl;
// cout << "Atom Velocity " << velocity[atomIndex] << endl;
// cout<< velX[atomIndex] << " " << velY[atomIndex] << " " << velZ[atomIndex] << endl;
/* velocity[N-1]=velIndex;
x[N-1]= static_cast <float> (rand()) / static_cast <float> (RAND_MAX/500);
y[N-1] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX/500);
z[N-1] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX/500);
theta[N-1] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX/(M_PI));
phi[N-1] = static_cast<float>(rand()) / static_cast<float>(RAND_MAX/(2*M_PI));
velX[N-1]=velocity[N-1]*sin(theta[N-1])*cos(phi[N-1]);
velY[N-1]=velocity[N-1]*sin(theta[N-1])*sin(phi[N-1]);
velZ[N-1]=velocity[N-1]*cos(theta[N-1]);
cout <<"Atom Index " << atomIndex << endl;
cout << "Atom Velocity " << velocity[atomIndex] << endl;
cout<< velX[atomIndex] << " " << velY[atomIndex] << " " << velZ[atomIndex] << endl;
*/
double avgCheck = 0;
//check out average
for(int j=0; j<atomIndex; j++){
avgCheck = avgCheck + velocity [j];
}
avgCheck = avgCheck / atomIndex;
cout << "Average: " << avgCheck << endl;
return 0;
}
|
13,211 |
//DOT PRODUCT FOR AB=C;COMPILES AND GIVES CORRECT ANS
//1D
#include<cmath>
#include<cstdio>
//#define BLOCKSIZE 1
__global__
void dotproduct(int* A,int*B,int*C,int M,int N)
{
//printf("%d %d\n", A[0],A[1]);
//printf("%d %d\n", B[0],B[1]);
//printf("%d %d\n", C[0],C[1]);
M=2;
int I=blockIdx.x*blockDim.x+threadIdx.x;
int temp =0;
if( I < M ){
for(int i=0;i<M;i++){
C[i]=A[i]*B[i];
temp+=C[i];
printf("matrix products:%d\n", C[i]);
}
printf("dot product is; %d\n",temp);
}
}
int main(){
int A[2]={1,2};
int B[2]={1,1};
int C[2]={0,0};
int* d_A;int* d_B;int* d_C;
int M=2;
int N=2;
//int K=2;
cudaMalloc(&d_A, M *sizeof(int));//let memory store that m*n space for you of size ints
cudaMalloc(&d_B, M *sizeof(int));
cudaMalloc(&d_C, sizeof(int));
//copy Aand B FROM HOST TO DEVICE
cudaMemcpy(d_A, &A[0],M *sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy(d_B, &B[0],M *sizeof(int) , cudaMemcpyHostToDevice);
cudaMemcpy(d_C, &C[0],sizeof(int) , cudaMemcpyHostToDevice);
dotproduct<<<1,1>>>(d_A,d_B,d_C,M,N);
//COPY RESULT BACK TO HOST
cudaMemcpy(&C[0], d_C, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(A);//TO FREE MEMORY
cudaFree(B);
cudaFree(C);
}
|
13,212 | ///////////////////////////////////////////////////////////////////////////////
//
// The MIT License
//
// Copyright (c) 2006 Scientific Computing and Imaging Institute,
// University of Utah (USA)
//
// License for the specific language governing rights and limitations under
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
///////////////////////////////////////////////////////////////////////////////
#ifndef ELVIS_CORE_INNER_INTEGRAL_ADAPTIVE_TRAPEZOIDAL_CU
#define ELVIS_CORE_INNER_INTEGRAL_ADAPTIVE_TRAPEZOIDAL_CU
namespace ElVis
{
// /// Adaptive trapezoidal integration using a runtime stack to handle
// recursion.
// /// Value are kept for subsequent sampling.
// template<typename T, unsigned int n>
// struct InnerIntegralAdaptiveTrapezoidal
// {
// public:
// struct StackPoint
// {
// template<typename FieldFunc>
// __device__
// void Evaluate(const TransferFunction* densityFunc,
// TransferFunctionChannel channel,
// const FieldFunc& fieldFunc)
// {
// T s = fieldFunc(TVal);
// F = densityFunc->Sample(channel, s);
// }
// __device__ void Reset()
// {
// TVal = MAKE_FLOAT(1e30);
// }
// __device__ bool IsUninitialized() const
// {
// return TVal == MAKE_FLOAT(1e30);
// }
// __device__ StackPoint& operator=(const StackPoint& rhs)
// {
// TVal = rhs.TVal;
// F = rhs.F;
// return *this;
// }
// T TVal;
// T F;
// };
// struct StackEntry
// {
// __device__ void CalculateMidpointT()
// {
// Mid().TVal = Left().TVal + (Right().TVal -
// Left().TVal)/2.0;
// }
// __device__ void SetT(const T& t0, const T& t1)
// {
// Left().TVal = t0;
// Right().TVal = t1;
// CalculateMidpointT();
// }
// __device__ T GetH() const
// {
// return Right().TVal - Left().TVal;
// }
// template<typename FieldFunc>
// __device__ void EvaluateAll(const TransferFunction*
// densityFunc,
// TransferFunctionChannel channel,
// const FieldFunc& fieldFunc)
// {
// for(unsigned int i = 0; i < 3; ++i)
// {
// points[i].Evaluate(densityFunc, channel, fieldFunc);
// }
// }
// __device__ StackPoint& Left() { return points[0]; }
// __device__ StackPoint& Mid() { return points[1]; }
// __device__ StackPoint& Right() { return points[2]; }
// __device__ const StackPoint& Left() const { return
// points[0]; }
// __device__ const StackPoint& Mid() const { return
// points[1]; }
// __device__ const StackPoint& Right() const { return
// points[2]; }
// StackPoint points[3];
// };
// template<typename FieldFunctionType>
// __device__ void Integrate(const T& t0, const T& t1, const
// TransferFunction* transferFunction,
// TransferFunctionChannel channel,
// const FieldFunctionType& fieldFunction, const T&
// globalEpsilon,
// const T& globalIntegralEstimate,
// const T& maxFunctionValue, bool&
// reachedMaxRecursion,
// bool traceEnabled)
// {
// if( traceEnabled )
// {
// printf("Global Epsilon %f, globalIntegralEstimate %f,
// maxValue %f\n", globalEpsilon, globalIntegralEstimate,
// maxFunctionValue);
// }
// const unsigned int maxRecursion = n;
// reachedMaxRecursion = false;
// StackEntry stack[maxRecursion];
// stack[0].SetT(t0, t1);
// stack[0].EvaluateAll(transferFunction, channel,
// fieldFunction);
// stack[1].Left() = stack[0].Left();
// stack[1].Mid().Reset();
// stack[1].Right() = stack[0].Mid();
// unsigned int minimumDepth = 2;
// int i = 1;
// t[0] = t0;
// f[0] = stack[0].Left().F;
// I[0] = 0.0;
// adaptiveIndex = 0;
// while( i > 0 )
// {
// reachedMaxRecursion |= (i == maxRecursion-1);
// if( stack[i].Mid().IsUninitialized() )
// {
// bool needToSubdivide = false;
// stack[i].CalculateMidpointT();
// stack[i].Mid().Evaluate(transferFunction, channel,
// fieldFunction);
// if( i < minimumDepth )
// {
// needToSubdivide = true;
// }
// else
// {
// T I0 = stack[i].GetH()/MAKE_FLOAT(2.0) *
// (stack[i].Left().F + stack[i].Right().F);
// T I1 = stack[i].GetH()/MAKE_FLOAT(4.0) *
// (stack[i].Left().F + 2.0*stack[i].Mid().F +
// stack[i].Right().F);
// T localEpsilon =
// globalEpsilon*globalIntegralEstimate *
// (stack[i].GetH()/stack[0].GetH());
// if( traceEnabled )
// {
// printf("Level %d, Interval (%f, %f, %f),
// values (%f, %f, %f) I0 = %f, I1 = %f,
// localEpsilon = %f\n", i,
// stack[i].Left().TVal, stack[i].Mid().TVal,
// stack[i].Right().TVal,
// stack[i].Left().F, stack[i].Mid().F,
// stack[i].Right().F, I0, I1,
// localEpsilon);
// }
// ElVisFloat h = stack[i].GetH()/MAKE_FLOAT(2.0);
// if( stack[i].Left().F == MAKE_FLOAT(0.0) &&
// stack[i].Mid().F == MAKE_FLOAT(0.0) &&
// stack[i].Right().F == MAKE_FLOAT(0.0) )
// {
// ElVis::Interval<ElVisFloat> range =
// fieldFunction.EstimateRange(stack[i].Left().TVal,
// stack[i].Right().TVal);
// ElVisFloat maxValue =
// transferFunction->GetMaxValue(channel,
// range);
// T maxSegmentError =
// (maxFunctionValue*h)/globalIntegralEstimate;
// T updatedSegmentError =
// (maxValue*h)/globalIntegralEstimate;
// if( traceEnabled )
// {
// printf("All 3 values are 0. Scalar
// range is (%f, %f), maxSegmentError %f,
// updatedSegmentError %f\n",
// range.GetLow(), range.GetHigh(),
// maxSegmentError,
// updatedSegmentError);
// }
// if( updatedSegmentError > localEpsilon && i
// < maxRecursion-1 )
// {
// needToSubdivide = true;
// }
// }
// else if( stack[i].Left().F == MAKE_FLOAT(0.0) ||
// stack[i].Mid().F == MAKE_FLOAT(0.0) ||
// stack[i].Right().F == MAKE_FLOAT(0.0) )
// {
// // If any of the samples are 0, then we know
// there is a breakpoint somewhere and we
// should subdivide.
// T maxSegmentError =
// (maxFunctionValue*h)/globalIntegralEstimate;
// ElVis::Interval<ElVisFloat> range =
// fieldFunction.EstimateRange(stack[i].Left().TVal,
// stack[i].Right().TVal);
// ElVisFloat maxValue =
// transferFunction->GetMaxValue(channel,
// range);
// T updatedSegmentError =
// (maxValue*h)/globalIntegralEstimate;
// if( traceEnabled )
// {
// printf("At least one value is 0. Scalar
// range is (%f, %f), maxSegmentError %f,
// updatedSegmentError %f\n",
// range.GetLow(), range.GetHigh(),
// maxSegmentError,
// updatedSegmentError);
// }
// if( traceEnabled )
// {
// printf("One of the samples is 0,
// maxSegmentError = %f, localEpsilon =
// %f\n", maxSegmentError, localEpsilon);
// }
// if(updatedSegmentError > localEpsilon && i <
// maxRecursion-1 )
// {
// needToSubdivide = true;
// }
// }
// else
// {
// T errorEstimate =
// fabs(I0-I1)/globalIntegralEstimate;
// if( traceEnabled )
// {
// printf("No samples 0, errorEstimate =
// %f, localEpsilon = %f\n", errorEstimate,
// localEpsilon);
// }
// if( errorEstimate > localEpsilon && i <
// maxRecursion-1 )
// {
// needToSubdivide = true;
// }
// }
// }
// if( traceEnabled )
// {
// printf("Subdividing = %d\n", needToSubdivide? 1
// : 0);
// }
// if( needToSubdivide )
// {
// stack[i+1].Left() = stack[i].Left();
// stack[i+1].Mid().Reset();
// stack[i+1].Right() = stack[i].Mid();
// i = i + 1;
// }
// else
// {
// T prevValue = I[adaptiveIndex];
// T h = stack[i].GetH()/MAKE_FLOAT(4.0);
// T mid_f = stack[i].Mid().F;
// T right_f = stack[i].Right().F;
// t[adaptiveIndex+1] = stack[i].Mid().TVal;
// t[adaptiveIndex+2] = stack[i].Right().TVal;
// f[adaptiveIndex+1] = mid_f;
// f[adaptiveIndex+2] = right_f;
// T leftContribution = h * (stack[i].Left().F +
// mid_f);
// T rightContribution = h * (mid_f + right_f);
// I[adaptiveIndex+1] = prevValue +
// leftContribution;
// I[adaptiveIndex+2] = prevValue +
// leftContribution+rightContribution;
// if( traceEnabled )
// {
// printf("Integral Value at %f = %f\n",
// t[adaptiveIndex+1], I[adaptiveIndex+1]);
// printf("Integral Value at %f = %f\n",
// t[adaptiveIndex+2], I[adaptiveIndex+2]);
// }
// adaptiveIndex += 2;
// }
// }
// else
// {
// if( stack[i].Right().TVal == stack[i-1].Mid().TVal )
// {
// // We just finished traversing the left side,
// now go to
// // the right.
// stack[i].Left() = stack[i-1].Mid();
// stack[i].Mid().Reset();
// stack[i].Right() = stack[i-1].Right();
// }
// else
// {
// // We finished this branch. Remove it and go up
// to
// // the next one.
// i = i-1;
// }
// }
// }
// }
// __device__ T SampleInnerIntegral(T t_i, T sample,
// TransferFunctionChannel channel, const TransferFunction*
// densityFunc) const
// {
// if( t_i < t[0] ||
// t_i > t[adaptiveIndex] )
// {
// return MAKE_FLOAT(0.0);
// }
// if( t_i == t[0] ) return MAKE_FLOAT(0.0);
// if( t_i == t[adaptiveIndex] ) return I[adaptiveIndex];
// const T* a = &(t[0]);
// const T* b = &(t[adaptiveIndex]);
// while(b-a > 1 )
// {
// const T* mid = (b-a)/2 + a;
// if( *mid == t_i )
// {
// return I[mid-a];
// }
// if( t_i < *mid )
// {
// b = mid;
// }
// else
// {
// a = mid;
// }
// }
// T baseline = I[a-t];
// T segment = (t_i-*a)/MAKE_FLOAT(2.0) * ( f[a-t] +
// densityFunc->Sample(channel, sample));
// return baseline+segment;
// }
// __device__ T OverallValue() const
// {
// return I[adaptiveIndex];
// }
// static const unsigned int arraySize = (0x01 << n) + 1;
// T t[arraySize];
// T f[arraySize];
// T I[arraySize];
// private:
// unsigned int adaptiveIndex;
// };
}
#endif
|
13,213 | #include "includes.h"
__global__ void transpose_v4(float* a,float* b, int n){
int blockIdx_x = blockIdx.y;
int blockIdx_y = (blockIdx.x+blockIdx.y)%gridDim.x;
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx_x;
int by = blockIdx_y;
int i = bx*BX + tx;
int j = by*BY + ty;
__shared__ float tile[BY][BX+1]; //Very slight modification to avoid bank conflict in shared mem
if(i >= n || j >= n) return;
tile[ty][tx] = a[j*n+i];
__syncthreads();
i = by*BY + tx;
j = bx*BX + ty;
b[j*n+i] = tile[tx][ty];
} |
13,214 | #include <math.h>
__global__ void maxPoly(const double x0, const double* coef,
const double tol, const int nParam, double* argMax){
// Thread ID
int i = blockIdx.x*blockDim.x + threadIdx.x;
// The Kernel should only execute if i < nParam
if(i >= nParam){
return;
} else {
// Iterate to convergence
double x = x0;
double diff = tol+1;
double firstDeriv, secondDeriv, xNew;
while(diff > tol){
// Compute the first derivative
firstDeriv = 2*coef[i]*x + 2.3;
// Compute the second derivative
secondDeriv = 2*coef[i];
// Newton step
xNew = x - firstDeriv/secondDeriv;
// Compute difference for convergence check and update
diff = fabs(xNew - x);
x = xNew;
}
// Function outpout
argMax[i] = x;
}
}
|
13,215 | #include<stdio.h>
#include<cuda.h>
__global__ void find_unique_id(int* arr) {
int bid = (blockIdx.z * gridDim.y * gridDim.x) + (blockIdx.y * gridDim.x) + blockIdx.x;
int tid = (bid * blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * blockDim.y * blockDim.x) + (threadIdx.y * blockDim.x) + threadIdx.x;
arr[tid] = tid;
}
int main() {
dim3 grid(1,2,3);
dim3 block(4,5,6);
int threads = 1*2*3*4*5*6;
int *arr, *darr;
arr = (int*)malloc(threads*sizeof(int));
cudaMalloc(&darr, threads*sizeof(int));
find_unique_id<<<grid, block>>>(darr);
cudaDeviceSynchronize();
cudaMemcpy(arr, darr, threads*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0; i<threads; i++)
printf("%d\n", arr[i]);
printf("\n");
return 0;
}
|
13,216 | __global__ void k(int *input, int *output, int count)
{
for(int i =0; i<count; i++)
output[i]=input[i]*16;
}
|
13,217 | #include <stdio.h>
__global__ void addition(float* d_A, float* d_B, float* d_C, int numElements){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float tmp_1, tmp_2;
if(row < numElements && col < numElements) {
tmp_1 = d_A[col];
__syncthreads();
tmp_2 = d_B[col];
__syncthreads();
d_C[row] = tmp_1 + tmp_2;
__syncthreads();
}
}
__global__
void additionByRows(float* d_A, float* d_B, float* d_C, int numRows) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float tmp_1, tmp_2;
if(row<numRows && col<numRows){
for(int i=0; i<numRows; i++){
tmp_1 = d_A[row+i*col];
__syncthreads();
tmp_2 = d_B[row+i*col];
__syncthreads();
d_C[row] = tmp_1 + tmp_2;
__syncthreads();
}
}
}
__global__
void additionByColumns(float* d_A, float* d_B, float* d_C, int numColumns) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float tmp_1, tmp_2;
if(row<numColumns && col<numColumns){
for(int i=0; i<numColumns; i++){
tmp_1 = d_A[row*i+col];
__syncthreads();
tmp_2 = d_B[row*i+col];
__syncthreads();
d_C[row] = tmp_1 + tmp_2;
__syncthreads();
}
}
}
int main() {
// size of matrixes
int numArows = 512;
int numAcolumns = 512;
int numBcolumns = 512;
float *h_A;
float *h_B;
float *h_C;
float *d_A;
float *d_B;
float *d_C;
// allocate data in host
h_A = (float*) malloc( numArows*numAcolumns*sizeof(float));
h_B = (float*) malloc( numAcolumns*numBcolumns*sizeof(float));
h_C = (float*) malloc( numArows*numBcolumns*sizeof(float));
// matrix initialization
for(int i=0; i<numArows*numAcolumns; i++)
h_A[i] = i;
for(int i=0; i<numAcolumns*numBcolumns; i++)
h_B[i] = i;
// allocate data in device
cudaMalloc((void**)&d_A, numArows*numAcolumns*sizeof(float));
cudaMalloc((void**)&d_B, numAcolumns*numBcolumns*sizeof(float));
cudaMalloc((void**)&d_C, numArows*numBcolumns*sizeof(float));
// copy inputs to device
cudaMemcpy(d_A, h_A, numArows*numAcolumns*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, numAcolumns*numBcolumns*sizeof(float),cudaMemcpyHostToDevice);
// launch kernel
int threadsPerBlock = 32;
int n_blocks = ceil(numAcolumns*numAcolumns/32.0);
addition<<<n_blocks, threadsPerBlock>>>(d_A,d_B,d_C, numAcolumns*numAcolumns);
// copy output to host
cudaMemcpy(h_C, d_C, numArows*numBcolumns*sizeof(float),cudaMemcpyDeviceToHost);
// freeing space
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
|
13,218 | /*Heat Conduction Computation Exercise From nvidia.qwiklab.com Optimized Version!!!
https://medium.com/@lucidlearning314/general-heat-conduction-equation-cartesian-coordinates-9be71b546b76
*/
#include <stdio.h>
#include <math.h>
#include <assert.h>
#include <iostream>
#define I2D(num, c, r) ((r)*(num)+(c))
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
__global__
void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
int idy = threadIdx.x + blockIdx.x * blockDim.x;
int idx = threadIdx.y + blockIdx.y * blockDim.y;
if (idy > 0 && idx > 0 && idy < nj-1 && idx < ni-1) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, idx, idy);
im10 = I2D(ni, idx-1, idy);
ip10 = I2D(ni, idx+1, idy);
i0m1 = I2D(ni, idx, idy-1);
i0p1 = I2D(ni, idx, idy+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
void initRandom(float *temp1_ref, float *temp2_ref, float *temp1, float *temp2, int ni, int nj){
for( int i = 0; i < ni*nj; ++i) {
temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f);
}
}
void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// loop over all points in domain (except boundary)
for ( int j=1; j < nj-1; j++ ) {
for ( int i=1; i < ni-1; i++ ) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
int main()
{
int istep;
int nstep = 200; // number of time steps
int deviceId;
int numberOfSMs;
// Specify our 2D dimensions
const int ni = 200;
const int nj = 100;
float tfac = 8.418e-5; // thermal diffusivity of silver
float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp;
const int size = ni * nj * sizeof(float);
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
//CPU-GPU Memory Allocations
checkCuda(cudaMallocManaged(&temp1_ref, size));
checkCuda(cudaMallocManaged(&temp2_ref, size));
checkCuda(cudaMallocManaged(&temp1, size));
checkCuda(cudaMallocManaged(&temp2, size));
//Avoid From CPU Page Faults
cudaMemPrefetchAsync(temp1_ref, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(temp2_ref, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(temp1, size, cudaCpuDeviceId);
cudaMemPrefetchAsync(temp2, size, cudaCpuDeviceId);
initRandom(temp1_ref, temp2_ref, temp1, temp2, ni, nj);
//CPU Computation
for (istep=0; istep < nstep; istep++) {
step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref);
// swap the temperature pointers
temp_tmp = temp1_ref;
temp1_ref = temp2_ref;
temp2_ref= temp_tmp;
}
//Avoid From GPU Page Faults
cudaMemPrefetchAsync(temp1_ref, size, deviceId);
cudaMemPrefetchAsync(temp2_ref, size, deviceId);
cudaMemPrefetchAsync(temp1, size, deviceId);
cudaMemPrefetchAsync(temp2, size, deviceId);
int thread_size = 32;
int scale = ni / nj;
dim3 threads_per_block(thread_size, (int)(thread_size / scale), 1);
dim3 number_of_blocks ((nj / threads_per_block.x) + 1, (ni / threads_per_block.y) + 1, 1);
//GPU Computation
for (int i=0; i < nstep; i++) {
step_kernel_mod<<<number_of_blocks, threads_per_block>>>(ni, nj, tfac, temp1, temp2);
//Check Errors and Synchronize
checkCuda(cudaGetLastError());
checkCuda(cudaDeviceSynchronize());
//Swap the temperature pointers
temp_tmp = temp1;
temp1 = temp2;
temp2= temp_tmp;
}
float maxError = 0;
// Output should always be stored in the temp1 and temp1_ref at this point
for( int i = 0; i < ni*nj; ++i ) {
if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); }
}
// Check and see if our maxError is greater than an error bound
if (maxError > 0.0005f)
printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError);
else
printf("The Max Error of %.5f is within acceptable bounds.\n", maxError);
checkCuda(cudaFree( temp1_ref ));
checkCuda(cudaFree( temp2_ref ));
checkCuda(cudaFree( temp1 ));
checkCuda(cudaFree( temp2 ));
return 0;
}
|
13,219 | // 20181010
// Yuqiong Li
// an example that uses CUDA to multiply all elements of an array by 2
#include <stdlib.h>
#include <stdio.h>
#define index(i, j, n) ((i) * (n) + (j))
__global__ void pictureKernel(float * a, unsigned int m, unsigned int n);
int main(){
unsigned int m = 10, n = 3; // dimensions
unsigned int size = m * n * sizeof(float);
float * a; // declare matrices
a = (float * ) malloc(size); // a is m by n
int i = 0, j = 0;
// initializing a
for (i = 0; i < m; i++){
for (j = 0; j < n; j++)
a[index(i, j, n)] = (i + j) / 1.3;
}
printf("Now print out the original values: \n");
for (i = 0; i < m; i++){
for (j = 0; j < n; j++){
printf("%.2f\t", a[index(i, j, n)]);
}
}
// 1. allocate device memory for a
float * d_a;
cudaMalloc(& d_a, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
// 2. invoke kernel function
dim3 blocksPerGrid(ceil(m/16.0), ceil(n/16.0), 1);
dim3 threadsPerBlock(16, 16, 1);
pictureKernel<<<blocksPerGrid, threadsPerBlock>>> (d_a, m, n);
// 3. copy the results back
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
// check results
printf("\nNow print out the changed values: \n");
for (i = 0; i < m; i++){
for (j = 0; j < n; j++){
printf("%.2f\t", a[index(i, j, n)]);
}
}
printf("\n");
free(a);
cudaFree(d_a);
return 0;
}
__global__ void pictureKernel(float * a, unsigned int m, unsigned int n){
// kernel function to double every element of matrix a
unsigned int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int row = blockIdx.y * blockDim.y + threadIdx.y;
if ((row < m) && (col < n))
a[row * n + col] = 2 * a[row * n + col];
}
|
13,220 | // Launch config for huge data
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define BLOCKSIZE 1024
__global__ void dkernel(unsigned *vector, unsigned vectorSize)
{
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if(id < vectorSize) vector[id] = id;
}
int main(int nn, char *str[])
{
unsigned N = atoi(str[1]);
unsigned *vector, *hvector;
unsigned i = 0;
cudaMalloc(&vector, N * sizeof(unsigned));
hvector = (unsigned *)malloc(N * sizeof(unsigned));
unsigned nBlocks = ceil((float)N / BLOCKSIZE);
printf("nBlocks = %d\n", nBlocks);
dkernel<<<nBlocks, BLOCKSIZE>>>(vector, N);
cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++)
{
printf("%d ", hvector[i]);
}
return 0;
} |
13,221 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#define A_Row 3
#define A_Col 2
#define B_Row 2
#define B_Col 4
__global__ void matrix_mul_gpu(int *A_gpu, int *B_gpu, int *C_gpu, int K, int COL)
{
int row = blockDim.x * blockIdx.x + threadIdx.x;
int col = blockDim.y * blockIdx.y + threadIdx.y;
int sum = 0;
for(int k = 0;k<K;k++)
{
sum += A_gpu[row * K + k] * B_gpu[(k * K)+col];
}
C_gpu[row * COL + col] = sum;
}
int main()
{
if (A_Col != B_Row)
exit(0);
struct timeval start, end;
gettimeofday( &start, NULL );
// malloc host memory
int *A = (int*)malloc(sizeof(int) * A_Row * A_Col);
int *B = (int*)malloc(sizeof(int) * B_Row * B_Col);
int *C = (int*)malloc(sizeof(int) * A_Row * B_Col);
for (int i = 0; i < A_Row * A_Col; i++) {
A[i] = 90;
}
for (int i = 0; i < B_Row * B_Col; i++) {
B[i] = 10;
}
int *A_gpu, *B_gpu, *C_gpu;
// malloc device memory
cudaMalloc((void **)&A_gpu, sizeof(int) * A_Row * A_Col);
cudaMalloc((void **)&B_gpu, sizeof(int) * B_Row * B_Col);
cudaMalloc((void **)&C_gpu, sizeof(int) * A_Row * B_Col);
// copy data from host (CPU) to device (GPU)
cudaMemcpy(A_gpu, A, sizeof(int) * A_Row * A_Col, cudaMemcpyHostToDevice);
cudaMemcpy(B_gpu, B, sizeof(int) * B_Row * B_Col, cudaMemcpyHostToDevice);
// The threadPerBlock (x, y) should be a factor of (C_Row, C_Rol) (i.e., (A_Row, B_Col))
dim3 threadPerBlock(3, 4);
dim3 blockNumber((A_Row+ threadPerBlock.x -1) / threadPerBlock.x, (B_Col+threadPerBlock.y-1)/ threadPerBlock.y);
printf("Block(%d,%d) Grid(%d,%d).\n", threadPerBlock.x, threadPerBlock.y, blockNumber.x, blockNumber.y);
matrix_mul_gpu<<<blockNumber, threadPerBlock>>>(A_gpu, B_gpu, C_gpu, A_Col, B_Col);
cudaMemcpy(C, C_gpu, sizeof(int) * A_Row * B_Col, cudaMemcpyDeviceToHost);
for (int i = 0;i<A_Row;i++)
{
for (int j = 0;j<B_Col;j++)
{
printf("%4d ", C[i * B_Col + j]);
}
printf("\n");
}
free(A);
free(B);
free(C);
cudaFree(A_gpu);
cudaFree(B_gpu);
cudaFree(C_gpu);
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("total time is %d ms\n", timeuse/1000);
} |
13,222 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define BLOCKSIZE 256
__global__ void kern_set_val (float *gpu_ptr, float value, int n) {
int i;
//TO DO: evaluate the value of i
i = threadIdx.x + blockIdx.x * blockDim.x;
gpu_ptr[i] = value;
}
int main () {
int i, failed=0;
int N = 1024; // size of vector
float *ptr; // Host pointer
float *gpu_ptr; // Device pointer
/* Allocate vector in Host*/
ptr = (float *)malloc(sizeof(float)*N);
/* Allocate vector in Device*/
cudaMalloc (&gpu_ptr, sizeof(float)*N);
//TO DO : write kernel invocation here
kern_set_val<<<(N)/BLOCKSIZE,BLOCKSIZE>>>(gpu_ptr, 11, N);
cudaDeviceSynchronize ();
//TO DO : copy data to host
cudaMemcpy(ptr, gpu_ptr, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree (gpu_ptr);
/* Now check that it did what we want */
for (i = 0; i < 10; i++)//first ten values are written
printf ("%f\t", ptr[i]);
printf ("\n");
for (i = N-10; i < N; i++)//last ten values are written
printf ("%f\t", ptr[i]);
printf ("\n");
for (i = 0; i < N; i++) {//All values are compared
if (fabs(ptr[i]-11.0) > 1e-8) {
failed=1;
}
}
if (failed) {
printf ("FAILED !!\n");
} else {
printf ("PASSED !!\n");
}
free (ptr);
}
|
13,223 | #include "includes.h"
#define N 15000
using namespace std;
//Çäåñü: threadIdx.x èäåíòèôèêàòîð ïîòîêà â áëîêå ïî êîîðäèíàòå x,
//blockIdx.x èäåíòèôèêàòîð áëîêà â ãðèäå ïî êîîðäèíàòå x,
//blockDim.x êîëè÷åñòâî ïîòîêîâ â îäíîì áëîêå.
__global__ void MatrVectMul(int *d_c, int *d_a, int *d_b)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<N)
{
d_c[i]=0;
for (int k=0;k<N;k++)
d_c[i]+=d_a[i+k*N]*d_b[k];
}
} |
13,224 | //
// Created by gautam on 17/04/20.
//
#include "Metadata.cuh"
Metadata::Metadata(std::string tableName) {
this->tableName = tableName;
this->valid = true;
keyMap = std::map<std::string, int>();
metadataFileName = utils::getMetadataFileName(tableName);
dataFileName = utils::getDataFileName(tableName);
columns = std::vector<std::string>();
datatypes = std::vector<ColType>();
keyCols = std::vector<std::string>();
colMap = std::map<std::string, int>();
rowSize = 0;
rowCount = 0;
if (utils::fileExists(metadataFileName)) {
std::ifstream metadataIn(metadataFileName);
std::string line, val;
// Read column names
getline(metadataIn, line);
std::istringstream iss(line);
int index = 0;
while (iss >> val) {
columns.push_back(val);
colMap[val] = index++;
}
// Read column datatypes
getline(metadataIn, line);
iss = std::istringstream(line);
while (iss >> val) {
ColType temp = newColType(val);
rowSize += temp.size;
datatypes.push_back(temp);
}
// Read key columns
getline(metadataIn, line);
iss = std::istringstream(line);
index = 0;
while (iss >> val) {
keyCols.push_back(val);
keyMap[val] = index++;
}
// Read row count
getline(metadataIn, line);
iss = std::istringstream(line);
iss >> rowCount;
metadataIn.close();
}
// No need to create now, wait till destructor is reached. Might be invalidated later.
// else {
// std::ofstream fout(metadataFileName);
// fout.close();
// fout = std::ofstream(dataFileName);
// fout.close();
// }
}
std::string Metadata::getColName(int col) {
return columns[col];
}
std::string Metadata::operator[](int col) {
return columns[col];
}
bool Metadata::append(std::string &colName, ColType &colType, bool isKey) {
if (colMap.find(colName) == colMap.end()) {
colMap[colName] = columns.size();
columns.push_back(colName);
datatypes.push_back(colType);
rowSize += colType.size;
if (isKey && keyMap.find(colName) == keyMap.end()) {
keyMap[colName] = keyCols.size();
keyCols.push_back(colName);
}
return true;
}
return false;
}
void Metadata::commit() {
if (valid) {
metadataFileName = utils::getMetadataFileName(tableName);
std::ofstream fout(metadataFileName);
for (const auto &colName : columns) {
fout << colName << " ";
}
fout << std::endl;
for (const ColType &colType : datatypes) {
fout << colType.str << " ";
}
fout << std::endl;
for (const std::string &keyCol : keyCols) {
fout << keyCol << " ";
}
fout << std::endl;
fout << rowCount << " ";
fout << std::endl;
fout.close();
fout = std::ofstream(dataFileName, std::ios::app);
fout.close();
}
}
bool Metadata::appendKey(std::string &keyName) {
if(columns.empty() || colMap.find(keyName) == colMap.end()) // Col with that name doesn't exist
return false;
if (keyMap.find(keyName) == keyMap.end()) { // Col exists but not designated as key
keyMap[keyName] = keyCols.size();
keyCols.push_back(keyName);
}
return true;
}
void Metadata::invalidate() {
this->valid = false;
columns.clear();
colMap.clear();
datatypes.clear();
keyCols.clear();
keyMap.clear();
}
ColType Metadata::getColType(int col) {
return datatypes[col];
}
ColType Metadata::getColType(std::string &colName) {
return datatypes[colMap[colName]];
}
Metadata::Metadata() {
rowSize = 0;
rowCount = 0;
valid = false;
} |
13,225 | /*
* Module to test CUDA module loading and execution.
* To be compiled with:
* nvcc -ptx module_test.cu
*/
#ifdef __cplusplus
extern "C" {
#endif
/// Sets the first N elements of array to value.
__global__ void testMemset(float* array, float value, int N){
int i = ( blockIdx.y*gridDim.x + blockIdx.x ) * blockDim.x + threadIdx.x;
if(i < N){
array[i] = value;
}
}
#ifdef __cplusplus
}
#endif |
13,226 | /* All the source */
#include <iostream> // Cin/ Cout
#include <fstream> // For Read/Write File
#include <string>
#include <sstream> // using sstream
#include <vector> // For using Vector
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <ctime>
using namespace std;
#define TILE_DIM 32 // For 2D Thread Block Allocation
#define Block_Size 128 // For 1D Thread Block Allocation
enum run_mode {GPU_PARA=0, CPU_GOLDEN, READ_THRU};
//int parse_cmd_line(int argc, char* argv[], string & G_File_Name, string & K_File_Name, int & Individual_Num, int & MarkerBlock_Size);
int parse_cmd_line(int argc, char* argv[], string & Z_File_Name, string & W_File_Name, string & K_File_Name, int & Individual_Num, int & Marker_Num_Length, int & MarkerPair_Block_Size, char & Run_Mode)
{
if((argc!=2)&&(argc <15))
{
cerr <<"Please specific the correct parameters, or use parameter -u for usermanuals!" <<endl;
return 1;
}
else
{
for (int i=1; i<argc;i++)
{
string arg=argv[i];
if((arg=="-z")||(arg=="-Z"))
{
if(i+1<argc)
{
Z_File_Name =argv[i+1];
}
else
{
cerr<<"Parse cmd_line fail, Need to clearly specific the full file name of the first genotype matrix!" <<endl;
return 1; // Parse cmd_line fail
}
}
if((arg=="-w")||(arg=="-W"))
{
if(i+1<argc)
{
W_File_Name =argv[i+1];
}
else
{
cerr<<"Parse cmd_line fail, Need to clearly specific the full file name of the second genotype matrix!" <<endl;
return 1; // Parse cmd_line fail
}
}
if((arg=="-k")||(arg=="-K"))
{
if(i+1<argc)
{
K_File_Name =argv[i+1];
}
else
{
cerr<<"Parse cmd_line fail, Need to clearly specific the full file name of the output kinship matrix !" <<endl;
return 1; // Parse cmd_line fail
}
}
if((arg=="-i")||(arg=="-I"))
{
if(i+1<argc)
{
Individual_Num =atoi(argv[i+1]);
}
else
{
cerr<<"Parse cmd_line fail, Need to clearly specific the Individual Number!" <<endl;
return 1; // Parse cmd_line fail
}
}
if((arg=="-l")||(arg=="-L"))
{
if(i+1<argc)
{
Marker_Num_Length =atoi(argv[i+1]);
}
else
{
cerr<<"Parse cmd_line fail, Need to clearly specific the Direct Marker Number Length!" <<endl;
return 1; // Parse cmd_line fail
}
}
if((arg=="-b")||(arg=="-B"))
{
if(i+1<argc)
{
MarkerPair_Block_Size =atoi(argv[i+1]);
}
else
{
cerr<<"Parse cmd_line fail, Need to clearly specific the Marker Pair Block Size!" <<endl;
return 1; // Parse cmd_line fail
}
}
if((arg=="-m")||(arg=="-M"))
{
if(i+1<argc)
{
Run_Mode =atoi(argv[i+1]);
}
else
{
cerr<<"Parse cmd_line fail, Need to clearly specific the Run Mode(0: GPU Paral; 1: Golden CPU; 2: Read thorugh) !" <<endl;
return 1; // Parse cmd_line fail
}
}
if((arg=="-h")||(arg=="-H"))
{
cout<<"Welcome to use this program to do Kinship Matrix Calculation" <<endl;
cout << "The usage of input parameter arguments are listed as followings:" <<endl;
cout << "-h or -H: Output this Help usage message" <<endl;
cout << "-z or -Z: The full name of the 1st Genotype file"<<endl;
cout << "-w or -W: The full name of the 2nd Genotype file"<<endl;
cout << "-k or -K: The full name of Kinship Matrix file"<<endl;
cout << "-i or -I: The Individual number" <<endl;
cout << "-l or -L: The direct marker Length" <<endl;
cout << "-b or -B: The Block size for dividing the genotype marker pairs " <<endl;
cout << "-m or -M: Run_Mode(0, 1, 2 for the GPU/CPU/Read Through) " <<endl;
return 1; // Parse cmd_line fail
}
}
}
return 0;
}
__global__ void Genotype_Matrix_Transpose(float *GMatrix_O, float *GMatrix_I, int Matrix_Width, int Matrix_Height) //ok
{
__shared__ float block[TILE_DIM][TILE_DIM+1];
int xIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int yIndex = blockIdx.y * TILE_DIM + threadIdx.y;
int Index_In = xIndex + (yIndex)*Matrix_Width;
if( (xIndex < Matrix_Width) && (yIndex < Matrix_Height))
block[threadIdx.y][threadIdx.x] = GMatrix_I[Index_In];
else
block[threadIdx.y][threadIdx.x] = 0.0; //pad zero for the unarbitrary matrix
__syncthreads();
xIndex = blockIdx.y * TILE_DIM + threadIdx.y;
yIndex = blockIdx.x * TILE_DIM + threadIdx.x;
int Index_Out = xIndex + (yIndex)*Matrix_Height;
if( (xIndex < Matrix_Height)&&(yIndex < Matrix_Width))
GMatrix_O[Index_Out] = block[threadIdx.y][threadIdx.x];
__syncthreads();
}
__global__ void Kinship_Matrix_Cal(float *Genotype_Matrix_Transpose_In, float *Genotype_Matrix_In, float *Kinship_Matrix_Out,int Individual_Num, int Marker_Num)
{
/* Use shared memory to calculate t(GM)*GM,
* GM is original genotype matrix, which is a mxn matrix and m is marker number;
* t(GM) means the transpose of the original genotype matrix, which is a nXm matrix */
__shared__ float t_s[TILE_DIM][TILE_DIM]; // shared memory for transposed matrix
__shared__ float m_s[TILE_DIM][TILE_DIM]; // shared memory for matrix
int r_t = TILE_DIM* blockIdx.y+ threadIdx.y; // row index for transposed matrix
int c_t = threadIdx.x; // column index for transposed matrix
int r_m = threadIdx.y; // row index of matrix
int c_m = blockIdx.x* TILE_DIM+ threadIdx.x;// column index for matrix
float entry_sub =0.0;
for(int i=0; i< (Marker_Num +TILE_DIM-1)/TILE_DIM; i++)
{
float value_t, value_m;
int index_t = r_t*Marker_Num +c_t; //linear index of transposed matrix
int index_m = r_m* Individual_Num+ c_m; //linear index of genotype matrix
if ((r_t < Individual_Num )&&(c_t < Marker_Num))
value_t = Genotype_Matrix_Transpose_In[index_t];
else
value_t =0.0; // pad with 0
if ((r_m< Marker_Num)&&(c_m < Individual_Num))
value_m = Genotype_Matrix_In[index_m];
else
value_m =0.0; // pad with 0
c_t += TILE_DIM;
r_m += TILE_DIM;
t_s[threadIdx.y][threadIdx.x] = value_t;
m_s[threadIdx.y][threadIdx.x] = value_m;
__syncthreads();
for (int k=0; k< TILE_DIM; k++)
entry_sub += t_s[threadIdx.y][k]* m_s[k][threadIdx.x];
__syncthreads();
}
int r_k = TILE_DIM* blockIdx.y+ threadIdx.y; // row index for kinship matrix;
int c_k = TILE_DIM* blockIdx.x+ threadIdx.x; // column index for kinship matrix;
int index_k = r_k* Individual_Num+ c_k; //linear index of kinship matrix
if((r_k<Individual_Num) &&(c_k<Individual_Num))
Kinship_Matrix_Out[index_k] =entry_sub;
__syncthreads();
}
__global__ void Kinship_Matrix_Add(float *Kinship_Matrix_Sub, float *Kinship_Matrix, long Matrix_Size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
long stride = blockDim.x * gridDim.x;
for (long i = index; i < Matrix_Size; i += stride)
Kinship_Matrix[i] = Kinship_Matrix[i] + Kinship_Matrix_Sub[i];
}
__global__ void Kinship_Matrix_Normalize(float *Kinship_Matrix, long Matrix_Size, float Normalize_Rate)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
long stride = blockDim.x * gridDim.x;
for (long i = index; i < Matrix_Size; i += stride)
Kinship_Matrix[i] = Kinship_Matrix[i]/Normalize_Rate ;
}
__global__ void Matrix_AllValue_Set(float *Matrix, long Matrix_Size, float Value_Set)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (long i = index; i < Matrix_Size; i += stride)
Matrix[i] = Value_Set;
}
/*The golden code implementation of kinship matrix calculoation, which can be used for the base evaluation of GPU paralleling*/
void Golden_Kinship_Matrix_Cal(float *Kinship_Matrix_Out, float *GMatrix_I, int Individual_Num, int Marker_Num)
{
for (int i =0; i< Individual_Num; i++)
{
for (int j=0; j< Individual_Num; j++)
{
float sum =0.0;
for (int m=0; m< Marker_Num; m++)
{
float temp1 = *(GMatrix_I+m*Individual_Num +i);
float temp2 = *(GMatrix_I+m*Individual_Num +j);
sum += temp1 * temp2;
}
*(Kinship_Matrix_Out+i* Individual_Num+j) = sum;
}
}
}
void Golden_Kinship_Matrix_Add (float *Kinship_Matrix_Sub, float *Kinship_Matrix, long Matrix_Size)
{
for (long i = 0; i < Matrix_Size; i ++)
Kinship_Matrix[i] = Kinship_Matrix[i] + Kinship_Matrix_Sub[i];
}
void Golden_Kinship_Matrix_Normalize(float *Kinship_Matrix, long Matrix_Size, float Normalize_Rate)
{
for (long i = 0; i < Matrix_Size; i ++)
Kinship_Matrix[i] = Kinship_Matrix[i]/Normalize_Rate ;
}
void Golden_Matrix_AllValue_Set(float *Matrix, long Matrix_Size, float Value_Set)
{
for (long i = 0; i < Matrix_Size; i ++)
Matrix[i] = Value_Set;
}
int main(int argc, char* argv[])
{
clock_t c_begin, c_end;
c_begin=clock();
// begin to calculate the time
string Z_File_Name, W_File_Name, K_File_Name;
char Run_Mode;
int Individual_Num, Marker_Num, MarkerPair_Block_Size;
string OS_Path_Sep="//";
long read_line_count=0;
// Parse the command line for the inputting
if(1==parse_cmd_line(argc, argv, Z_File_Name, W_File_Name, K_File_Name, Individual_Num, Marker_Num, MarkerPair_Block_Size, Run_Mode))
return 1;
//Open and Read the Geneotype Matrix .
ifstream Z_File(Z_File_Name.c_str(), ios::in);
ifstream W_File(W_File_Name.c_str(), ios::in);
if(!Z_File.is_open())
{
cerr<< Z_File_Name <<" The 1st Genotype File Can't be accessed!"<<endl;
return 1;
}
if(!W_File.is_open())
{
cerr<< W_File_Name <<" The 2nd Genotype File Can't be accessed!"<<endl;
return 1;
}
if(Z_File && W_File)
{
float *Z_Matrix, *W_Matrix;
string sLine;
Z_Matrix = new float[Individual_Num*Marker_Num];
int Line_Count=0;
while(getline(Z_File, sLine))
{
if(sLine.empty()) ; // Ignore empty lines
else
{
stringstream ss(sLine);
vector <string> s_v;
string item;
char delim1 =',';
char delim2 ='\t';
while(getline(ss, item, delim1))
{
s_v.push_back(item);
}
int Col_Num=s_v.size();
if (Col_Num < Individual_Num)
{
s_v.clear();
while(getline(ss, item, delim2))// try delim2;
{
s_v.push_back(item);
}
Col_Num=s_v.size();
}
if(Col_Num != Individual_Num)
{
cerr<< Z_File_Name <<"File Format (Indivual Num) is not right, Error at Line=" << Line_Count<<endl;
if(Z_Matrix!=NULL)
{
delete []Z_Matrix;
Z_Matrix =NULL;
}
return 1;
}
for (int i=0; i<Col_Num;i++)
{
float value= atof(s_v.at(i).c_str());
Z_Matrix[Line_Count*Individual_Num+i] = value;
}
}
Line_Count++;
}
if(Marker_Num != Line_Count)
{
cerr<< Z_File_Name <<"File Format (Marker Num) is not Right, Total Read Line Num =" << Line_Count<<endl;
if(Z_Matrix!=NULL)
{
delete []Z_Matrix;
Z_Matrix =NULL;
}
return 1;
}
Line_Count=0;
W_Matrix = new float[Individual_Num*Marker_Num];
while(getline(W_File, sLine))
{
if(sLine.empty()) ; // Ignore empty lines
else
{
stringstream ss(sLine);
vector <string> s_v;
string item;
char delim1 =',';
char delim2 ='\t';
while(getline(ss, item, delim1))
{
s_v.push_back(item);
}
int Col_Num=s_v.size();
if (Col_Num < Individual_Num)
{
s_v.clear();
while(getline(ss, item, delim2))// try delim2;
{
s_v.push_back(item);
}
Col_Num=s_v.size();
}
if(Col_Num != Individual_Num)
{
cerr<< W_File_Name <<"File Format (Indivudal Num) is not right, Error at Line=" << Line_Count<<endl;
if(W_Matrix!=NULL)
{
delete []W_Matrix;
W_Matrix =NULL;
}
return 1;
}
for (int i=0; i<Col_Num;i++)
{
float value= atof(s_v.at(i).c_str());
W_Matrix[Line_Count*Individual_Num+i] = value;
}
}
Line_Count++;
}
if(Marker_Num != Line_Count)
{
cerr<< W_File_Name <<"File Format (Marker Num) is not Right, Total Read Line Num =" << Line_Count<<endl;
if(W_Matrix!=NULL)
{
delete []W_Matrix;
W_Matrix =NULL;
}
return 1;
}
float *G_Matrix, *G_Matrix_Tran, *KinshipMatrix_Sub, *KinshipMatrix;
const long g_matrix_size = Individual_Num*MarkerPair_Block_Size;
const long k_matrix_size = Individual_Num*Individual_Num;
switch (Run_Mode)
{
case GPU_PARA :
cudaMallocManaged(&G_Matrix, sizeof(float)*g_matrix_size);
cudaMallocManaged(&G_Matrix_Tran, sizeof(float)*g_matrix_size);
cudaMallocManaged(&KinshipMatrix_Sub, sizeof(float)*k_matrix_size);
cudaMallocManaged(&KinshipMatrix, sizeof(float)*k_matrix_size);
break;
default :
G_Matrix = new float[g_matrix_size];
KinshipMatrix_Sub = new float[k_matrix_size];
KinshipMatrix = new float[k_matrix_size];
break;
}
dim3 thread2ds(TILE_DIM,TILE_DIM);
dim3 grid2ds((Individual_Num+TILE_DIM-1)/TILE_DIM, (MarkerPair_Block_Size+TILE_DIM-1)/TILE_DIM);
dim3 grid2ds1((Individual_Num+TILE_DIM-1)/TILE_DIM, (Individual_Num+TILE_DIM-1)/TILE_DIM);
dim3 thread1ds(Block_Size);
dim3 grid1ds((Individual_Num*Individual_Num)/Block_Size);
int MairPair_Count =0;
for (int Marker_Z=0; Marker_Z < Marker_Num ; Marker_Z++)
for (int Marker_W=Marker_Z+1; Marker_W < Marker_Num ; Marker_W++)
{
for (int index =0; index < Individual_Num; index ++)
G_Matrix[MairPair_Count*Individual_Num+index] = Z_Matrix[Marker_Z*Individual_Num+index] * W_Matrix[Marker_W*Individual_Num+index];
MairPair_Count ++;
if (MairPair_Count < MarkerPair_Block_Size)
continue;
else // Process the full Marker_Pair Block
{
switch(Run_Mode)
{
case GPU_PARA :
// begin to call the GPU program to calculate the partial kinship matrix
Genotype_Matrix_Transpose<<<grid2ds, thread2ds >>>(G_Matrix_Tran, G_Matrix, Individual_Num, MarkerPair_Block_Size);
cudaDeviceSynchronize();// Wait for GPU to finish before accessing on host
Kinship_Matrix_Cal<<<grid2ds1, thread2ds >>>(G_Matrix_Tran, G_Matrix, KinshipMatrix_Sub, Individual_Num, MarkerPair_Block_Size);
cudaDeviceSynchronize(); // Wait for GPU to finish before accessing on host
// add the block kinship matrix into the all kinship matrix
Kinship_Matrix_Add <<<grid1ds, thread1ds>>>(KinshipMatrix_Sub, KinshipMatrix, k_matrix_size);
cudaDeviceSynchronize(); // Wait for GPU to finish before accessing on host
Matrix_AllValue_Set <<<grid1ds, thread1ds>>> (G_Matrix, g_matrix_size, 0.0);
cudaDeviceSynchronize(); // Wait for GPU to finish before accessing on host
break;
case CPU_GOLDEN:
Golden_Kinship_Matrix_Cal(KinshipMatrix_Sub, G_Matrix, Individual_Num, MarkerPair_Block_Size);
Golden_Kinship_Matrix_Add(KinshipMatrix_Sub, KinshipMatrix, k_matrix_size);
Golden_Matrix_AllValue_Set(G_Matrix, g_matrix_size, 0.0);
break;
default :
break;
}
MairPair_Count =0;
}
}
if(MairPair_Count>0) // At the rear part of the residual marker pair lines
{
switch(Run_Mode)
{
case GPU_PARA :
Genotype_Matrix_Transpose<<<grid2ds, thread2ds >>>(G_Matrix_Tran, G_Matrix, Individual_Num, MarkerPair_Block_Size);
cudaDeviceSynchronize();// Wait for GPU to finish before accessing on host
Kinship_Matrix_Cal<<<grid2ds1, thread2ds >>>(G_Matrix_Tran, G_Matrix, KinshipMatrix_Sub, Individual_Num, MarkerPair_Block_Size);
cudaDeviceSynchronize(); // Wait for GPU to finish before accessing on host
// add the block kinship matrix into the all kinship matrix
Kinship_Matrix_Add <<<grid1ds, thread1ds>>>(KinshipMatrix_Sub, KinshipMatrix, k_matrix_size);
cudaDeviceSynchronize(); // Wait for GPU to finish before accessing on host
break;
case CPU_GOLDEN:
Golden_Kinship_Matrix_Cal(KinshipMatrix_Sub, G_Matrix, Individual_Num, MarkerPair_Block_Size);
Golden_Kinship_Matrix_Add(KinshipMatrix_Sub, KinshipMatrix, k_matrix_size);
Golden_Matrix_AllValue_Set(G_Matrix, g_matrix_size, 0.0);
break;
default:
break;
}
MairPair_Count =0; // Reset the MairPair_Count to 0
}
// Begin to calculate the Normalized ratio
float Normalize_Rate =0.0;
for (int i_index =0; i_index < Individual_Num; i_index++)
Normalize_Rate += KinshipMatrix[i_index*Individual_Num+i_index];
Normalize_Rate =Normalize_Rate/Individual_Num;
switch(Run_Mode)
{
case GPU_PARA :
Kinship_Matrix_Normalize <<<grid1ds, thread1ds>>>(KinshipMatrix, k_matrix_size, Normalize_Rate) ;
cudaDeviceSynchronize(); // Wait for GPU to finish before accessing on host
break;
case CPU_GOLDEN:
Golden_Kinship_Matrix_Normalize(KinshipMatrix, k_matrix_size, Normalize_Rate);
break;
default :
break;
}
//Output the kinship matrix
ofstream Kinship_File; // used to output the calculated kinship matrix
Kinship_File.open(K_File_Name.c_str());
if(Kinship_File.is_open())
{
if(Run_Mode <READ_THRU)
{
char delim ='\t';
for (int i_individual=0; i_individual < Individual_Num; i_individual++)
{
Kinship_File<<KinshipMatrix[i_individual*Individual_Num];
for (int j_individual=1; j_individual < Individual_Num; j_individual++)
{
Kinship_File<< delim<< KinshipMatrix[i_individual*Individual_Num + j_individual];
}
Kinship_File<<endl; // end line
}
}
else
{
Kinship_File << "Read through, Read_Line_Count is:" << read_line_count << endl;
}
}
else
{
cerr <<"Error open the result file for the output Kinship Matrix!"<<endl;
}
Kinship_File.close();
// Free the allocated matrix
switch (Run_Mode)
{
case GPU_PARA :
cudaFree(G_Matrix);
cudaFree(G_Matrix_Tran);
cudaFree(KinshipMatrix_Sub);
cudaFree(KinshipMatrix);
break;
default:
delete []G_Matrix;
delete []KinshipMatrix_Sub;
delete []KinshipMatrix;
break;
}
}
c_end =clock();
double elapse_time = double (c_end-c_begin)/CLOCKS_PER_SEC;
cout << "Hello, the elapse time is " << elapse_time << " seconds" << endl;
}
void test_matrix_transpose()
{
/* The following are GPU interface for testing the GPU version for Matrix Transpose at arbitrary dimension*/
int Matrix_Width =10000;
int Matrix_Height =50000;
dim3 threads(TILE_DIM,TILE_DIM);
dim3 grids((Matrix_Width+TILE_DIM-1)/TILE_DIM, (Matrix_Height+TILE_DIM-1)/TILE_DIM);
const long mem_size = sizeof(float)*Matrix_Width*Matrix_Height;
float *GMatrix_O, *GMatrix_I;
cudaMallocManaged(&GMatrix_O, mem_size);
cudaMallocManaged(&GMatrix_I, mem_size);
// initalize host data
for(long i = 0; i < (Matrix_Width*Matrix_Height); ++i)
GMatrix_I[i] = (float) i;
Genotype_Matrix_Transpose<<<grids, threads >>>(GMatrix_O, GMatrix_I, Matrix_Width, Matrix_Height);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < Matrix_Height; i++)
{
for (int j = 0; j < Matrix_Width; j++)
{
maxError = fmax(maxError, fabs(GMatrix_I[i*Matrix_Width+j]-GMatrix_O[j*Matrix_Height+i]));
if(maxError>0.1)
cout << "i=" <<i << "j=" <<j << "GMatrix_I(i,j)=" << GMatrix_I[i*Matrix_Width+j] << "GMatrix_O(j,i)=" << GMatrix_O[j*Matrix_Height+i]<< endl;
}
}
std::cout << "Max error: " << maxError << std::endl;
cudaFree(GMatrix_O);
cudaFree(GMatrix_I);
return ;
}
void test_kinship()
{
/* The following are GPU interface for testing the GPU version for kinship matrix calculating by multiplying the transpose of genotype Matrix and genotype matrix at arbitrary dimension*/
int Marker_Num =1000;
int Individual_Num =2500;
dim3 threads(TILE_DIM,TILE_DIM);
dim3 grids((Individual_Num+TILE_DIM-1)/TILE_DIM, (Marker_Num+TILE_DIM-1)/TILE_DIM);
const long genotype_matrix_size = sizeof(float)*Marker_Num*Individual_Num;
const long kinship_matrix_size = sizeof(float)* Individual_Num*Individual_Num;
float *Genotype_Matrix_T, *Genotype_Matrix, *Kinship_Matrix;
cudaMallocManaged(&Genotype_Matrix_T, genotype_matrix_size);
cudaMallocManaged(&Genotype_Matrix, genotype_matrix_size);
cudaMallocManaged(&Kinship_Matrix, kinship_matrix_size);
// initalize host data
for(long i = 0; i < (Marker_Num*Individual_Num); ++i)
Genotype_Matrix[i] = (float) 1.0*i/(Marker_Num);
Genotype_Matrix_Transpose<<<grids, threads >>>(Genotype_Matrix_T, Genotype_Matrix, Individual_Num, Marker_Num);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
float maxError = 0.0f;
for (int i = 0; i < Marker_Num; i++)
{
for (int j = 0; j < Individual_Num; j++)
{
maxError = fmax(maxError, fabs(Genotype_Matrix[i*Individual_Num+j]-Genotype_Matrix_T[j*Marker_Num+i]));
if(maxError>0.1)
cout << "i=" <<i << "j=" <<j << "Genotype_Matrix(i,j)=" << Genotype_Matrix[i*Individual_Num+j] << "Genotype_Matrix_T(j,i)=" << Genotype_Matrix_T[j*Marker_Num+i]<< endl;
}
}
std::cout << "Matrix Transpose Max error: " << maxError << std::endl;
dim3 grids1((Individual_Num+TILE_DIM-1)/TILE_DIM, (Individual_Num+TILE_DIM-1)/TILE_DIM);
Kinship_Matrix_Cal<<<grids1, threads >>>(Genotype_Matrix_T, Genotype_Matrix, Kinship_Matrix, Marker_Num, Individual_Num);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
std::cout <<"Kinship_Matrix[0,0]="<< Kinship_Matrix[0] <<"Kinship_Matrix[0,1]=" <<Kinship_Matrix[1] << "Kinship_Matrix[0,1023]="<< Kinship_Matrix[1023] <<"Kinship_Matrix[1,1]=" <<Kinship_Matrix[1024]<< std::endl;
cudaFree(Genotype_Matrix_T);
cudaFree(Genotype_Matrix);
cudaFree(Kinship_Matrix);
return;
}
void test_mis()
{
/* std::cout << "g_matrix(0,0)=" << G_Matrix[0]<< std::endl;
cudaMemset(G_Matrix, 0, g_mem_size*sizeof(float));
cudaMemset(KinshipMatrix_Sub, 0, k_mem_size*sizeof(float));
std::cout << "Line_Count= " << Line_Count << std::endl;
std::cout << "g_matrix(0,0)=" << G_Matrix[0]<< std::endl;
std::cout << "kmatrix_sub(0,0)=" << KinshipMatrix_Sub[0]<< std::endl;
std::cout << "kmatrix(0,0)=" << KinshipMatrix[0]<< std::endl;
std::cout << "Line_Count= " << Line_Count << std::endl;
std::cout << "kmatrix_sub( 0,0)=" << KinshipMatrix_Sub[0]<< std::endl;
std::cout << "kmatrix(0,0)=" << KinshipMatrix[0]<< std::endl;
*/
/* dim3 thread2ds(TILE_DIM,TILE_DIM);
dim3 grid2ds((Individual_Num+TILE_DIM-1)/TILE_DIM, (MarkerBlock_Size+TILE_DIM-1)/TILE_DIM);*/
/*dim3 grid2ds1((Individual_Num+TILE_DIM-1)/TILE_DIM, (Individual_Num+TILE_DIM-1)/TILE_DIM);*/
/*dim3 thread1ds(Block_Size);
dim3 grid1ds((Individual_Num*Individual_Num)/Block_Size);*/
/* dim3 thread2ds(TILE_DIM,TILE_DIM);
dim3 grid2ds((Individual_Num+TILE_DIM-1)/TILE_DIM, (MarkerBlock_Size+TILE_DIM-1)/TILE_DIM);*/
/* dim3 grid2ds1((Individual_Num+TILE_DIM-1)/TILE_DIM, (Individual_Num+TILE_DIM-1)/TILE_DIM);*/
/* dim3 thread1ds(Block_Size);
dim3 grid1ds((Individual_Num*Individual_Num)/Block_Size);*/
/* dim3 thread1ds(Block_Size);
dim3 grid1ds((Individual_Num*Individual_Num)/Block_Size);*/
return;
}
|
13,227 | #include<stdio.h>
__global__ void multiThreads(){
const int block = blockIdx.x;
const int threadx = threadIdx.x;
const int thready = threadIdx.y;
printf("I come from block %d and thread(%d, %d).\n", block, threadx, thready);
}
int main(){
dim3 block_size(2, 4);
multiThreads<<<1, block_size>>>();
return 0;
} |
13,228 | #pragma once
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void LinearAddIntsKernel(int *d_out, const int *d_in, unsigned int length)
{
__shared__ int cache[512];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int cumer = 0;
while (tid < length)
{
cumer += d_in[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = cumer;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
for (int i = 0; i < blockDim.x; i++)
{
d_out[blockIdx.x] = cache[0];
}
}
}
__global__ void BlockAddInts_32_Kernel(int *d_out, const int *d_in)
{
__shared__ int cache[1024];
int cacheIndex = threadIdx.y + threadIdx.x * blockDim.x;
int span = gridDim.x * blockDim.x;
int rowIn = threadIdx.y + blockIdx.y * blockDim.y;
int colIn = threadIdx.x + blockIdx.x * blockDim.x;
int indexIn = rowIn + colIn * span;
int indexOut = blockIdx.y + blockIdx.x * gridDim.x;
cache[cacheIndex] = d_in[indexIn];
__syncthreads();
int i = 512;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
d_out[indexOut] = cache[0];
}
}
__global__ void BlockAddInts_16_Kernel(int *d_out, const int *d_in)
{
__shared__ int cache[256];
int cacheIndex = threadIdx.y + threadIdx.x * blockDim.x;
int span = gridDim.x * blockDim.x;
int rowIn = threadIdx.y + blockIdx.y * blockDim.y;
int colIn = threadIdx.x + blockIdx.x * blockDim.x;
int indexIn = rowIn + colIn * span;
int indexOut = blockIdx.y + blockIdx.x * gridDim.x;
cache[cacheIndex] = d_in[indexIn];
__syncthreads();
int i = 128;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
d_out[indexOut] = cache[0];
}
}
__global__ void LinearAddFloatsKernel(float *d_out, const float *d_in, unsigned int length)
{
__shared__ float cache[512];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIndex = threadIdx.x;
int cumer = 0;
while (tid < length)
{
cumer += d_in[tid];
tid += blockDim.x * gridDim.x;
}
cache[cacheIndex] = cumer;
__syncthreads();
int i = blockDim.x / 2;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
for (int i = 0; i < blockDim.x; i++)
{
d_out[blockIdx.x] = cache[0];
}
}
}
__global__ void BlockAddFloats_32_Kernel(float *d_out, const float *d_in)
{
__shared__ float cache[1024];
int cacheIndex = threadIdx.y + threadIdx.x * blockDim.x;
int span = gridDim.x * blockDim.x;
int rowIn = threadIdx.y + blockIdx.y * blockDim.y;
int colIn = threadIdx.x + blockIdx.x * blockDim.x;
int indexIn = rowIn + colIn * span;
int indexOut = blockIdx.y + blockIdx.x * gridDim.x;
cache[cacheIndex] = d_in[indexIn];
__syncthreads();
int i = 512;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
d_out[indexOut] = cache[0];
}
}
__global__ void BlockAddFloats_16_Kernel(float *d_out, const float *d_in)
{
__shared__ float cache[256];
int cacheIndex = threadIdx.y + threadIdx.x * blockDim.x;
int span = gridDim.x * blockDim.x;
int rowIn = threadIdx.y + blockIdx.y * blockDim.y;
int colIn = threadIdx.x + blockIdx.x * blockDim.x;
int indexIn = rowIn + colIn * span;
int indexOut = blockIdx.y + blockIdx.x * gridDim.x;
cache[cacheIndex] = d_in[indexIn];
__syncthreads();
int i = 128;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
{
d_out[indexOut] = cache[0];
}
}
|
13,229 | #include<iostream>
#include<vector>
#include<cstdlib>
const int M = 7; // size of the mask
__constant__ double mask[M];
__global__ void convolution_kernel(double *arr, double *output, int N){
auto i = blockDim.x*blockIdx.x+threadIdx.x;
auto start = i - (M/2);
auto temp = 0.0;
for(auto k = 0; k < M; k++){
if((start+k >=0) && (start+k <N)){
temp += arr[start+k]*mask[k];
}
}
output[i] = temp;
}
int main(){
int N = 1048576; // size of the array = 2^20
size_t size_N = N*sizeof(double);
// int M = 7; // size of the mask
size_t size_M = M*sizeof(double);
std::vector<double> h_array(N);
std::vector<double> h_mask(M);
std::vector<double> h_output(N);
for(auto& i:h_array){i = rand()%100;}
for(auto& j:h_mask){j = rand()%10;}
double *d_array, *d_output;
cudaMalloc(&d_array, size_N);
cudaMalloc(&d_output, size_N);
// cudaMalloc(&d_mask, size_M);
cudaMemcpy(d_array, h_array.data(), size_N, cudaMemcpyHostToDevice);
// cudaMemcpy(d_mask, h_mask.data(), size_M, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(mask, h_mask.data(), size_M);
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
convolution_kernel<<<blocksPerGrid, threadsPerBlock>>>(d_array, d_output, N);
cudaMemcpy(h_output.data(), d_output, size_N, cudaMemcpyDeviceToHost);
// Uncomment to print the output
// for(auto& i:h_output){std::cout << i << std::endl;}
cudaFree(d_array);
cudaFree(d_output);
// cudaFree(d_mask);
return 0;
}
|
13,230 | __global__ void vis2hid(float* vis,float* w8s,float* hid){
hid[threadIdx.x]+=vis[threadIdx.y]*w8s[threadIdx.y*blockDim.x+threadIdx.x];
}
__global__ void hid2vis(float* hid,float* w8s,float* vis){
vis[threadIdx.y]+=hid[threadIdx.x]*w8s[threadIdx.y*blockDim.x+threadIdx.x];
}
__global__ void learning(
float* w8s,float* posv,float* posh,float* negv,float* negh
){
w8s[threadIdx.y*blockDim.x+threadIdx.x]+=
posv[threadIdx.y]*posh[threadIdx.x]-negv[threadIdx.y]*negh[threadIdx.x];
}
void deconstruct(float* vis,float* w8s,float* hid,unsigned nv,unsigned nh){
memset(hid,0,nh*sizeof(float));
vis2hid<<<nv,nh>>>(vis,w8s,hid);
}
void reconstruct(float* vis,float* w8s,float* hid,unsigned nv,unsigned nh){
memset(vis,0,nv*sizeof(float));
hid2vis<<<nv,nh>>>(vis,w8s,hid);
}
|
13,231 | #include <iostream>
#include <fstream>
#include <vector>
#include <stdlib.h>
#include <string>
#include <unistd.h>
using namespace std;
const int THREADS_PER_BLOCK = 512;
__global__
void setStatus(char A[], int B[])
{
int i;
i = blockIdx.x * blockDim.x + threadIdx.x;
if(A[i] == 'X') //check if it's alive
{
if(B[i] < 2)
{
A[i] = '-';//dead less than 2 living neighbours
}
else if(B[i] <= 3)
{
A[i] = 'X'; //do nothing status is already alive
}
else
{
A[i] = '-';//dead greater than 3 living neighbours
}
}
else
{ //dead cell
if(B[i] == 3)
{
A[i] = 'X';// dead to alive
}
}
}
__global__
void callCheck(int rows, int cols,char A[], int B[])
{
int i, k, j, count, iIndex, jIndex;
i = blockIdx.x * blockDim.x + threadIdx.x;
int rowIndex, colIndex;//index of current thread
iIndex = i/cols; //row index
jIndex = i%cols; // col index
count = 0;
for(k = iIndex-1; k <= iIndex+1; k++)
{
for (j = jIndex-1; j <= jIndex+1; j++) //Each line ends with newline character \n (Unix formatting)
{
// k < 0 j >0 can't have negative index
//k > rows j > cols can't have index larger than array Max
if(k<0)
{
rowIndex = rows-1;
}
else if(k>=rows)
{
rowIndex = 0;
}
else
{
rowIndex = k;
}
if(j<0)
{
colIndex = cols-1;
}
else if(j>=cols)
{
colIndex = 0;
}
else
{
colIndex = j;
}
if (A[rowIndex*cols+colIndex] == 'X' && (rowIndex*cols+colIndex!= i))
{
count++;
}
}
}
B[i] = count;
}
int main(int argc, char *argv[])
{
int i,j, rows, cols;
char temp = '=';
rows = 1;
cols = 1;
vector<char> tempS;
ifstream fin;
ofstream fout;
bool printAll = false;
int opts = 0;
string input;
int iterations = 1;
while(opts < argc)
{
if(string(argv[opts]) == "-i")
{
iterations = strtol(argv[opts+1], NULL, 10);
}
if(string(argv[opts]) == "-v")
{
printAll = true;
}
if(opts == argc-1)
{
string ext;
string temp = argv[opts];
for(i = temp.length()-4; i < temp.length(); i++) ext += temp[i];
if(ext == ".txt") input = temp;
}
opts++;
}
fin.open(input.c_str());
if(!fin)
{
cout<< "Could not find the input file please try running again with valid file";
exit(1);
}
fout.open("output.txt");
i=0;
fin >> temp;
int totalcount = 0; //total number of elements
while(!fin.eof())
{
totalcount++;
if(temp == 'X' || temp == '-')
{
if(fin.peek() == '\n')
{
rows++;
}else if(rows == 1)
{
cols++;
}
tempS.push_back(temp); //read in status
}
else
{
cout << "Invalid input = " << temp << endl;
}
fin >> temp;
i++;
}
fin.close();
int Array_size = cols*rows;
if(Array_size <= 8)
{
cout <<"Matrix must be at least 9 elements";
exit(1);
}
if(totalcount != Array_size)
{
cout << "Matrix is not even";
exit(1);
}
char S[Array_size];
for(j=0; j<Array_size; j++)
{
S[j]= tempS[j];
}
tempS.clear();
char *A;
int *B;
int GD;
i=1;
while(i <= THREADS_PER_BLOCK)
{
if (Array_size%i == 0)
{
GD = i; //find greatest denominator of Array_size < THREADS_PER_BLOCK
}
i++;
}
cudaMalloc((void** ) &A, Array_size*(sizeof(char)));
cudaMalloc((void** ) &B, Array_size*(sizeof(int))); //allocates bytes from device heap and returns pointer to allocated memory or null
cudaMemcpy(A, S, Array_size*sizeof(char), cudaMemcpyHostToDevice);
int l = 0;
while(l < iterations)
{
callCheck<<<Array_size/GD,GD>>>(rows,cols,A, B);
setStatus<<<Array_size/GD,GD>>>(A, B);
if(printAll == true || l == iterations-1)
{
cudaDeviceSynchronize();
cudaMemcpy(S, A, Array_size*sizeof(char), cudaMemcpyDeviceToHost);
printf("\033[2J\033[H");
for(i = 0; i < rows; i++)
{
for(j = 0; j<cols; j++)
{
cout << S[i*cols+j];
}
cout << endl;
}
}
l++;
}
cudaFree(A);
cudaFree(B);
return 0;
}
|
13,232 | #include "includes.h"
#define NUMAR_NODURI 500
#define NUMAR_MUCHII 500
#define COST_MAXIM 1000000
typedef struct
{
int nod1;
int nod2;
} Muchie;
typedef struct
{
int nodId;
bool vizitat;
} Nod;
//Gaseste costul drumului de la nodul start la nodul stop
__global__ void UpdateCostDrumuri(Nod *noduri, int *costuriTemporale, int *costuriFinale)
{
int nod = threadIdx.x;
if (costuriTemporale[nod] > costuriFinale[nod])
{
costuriTemporale[nod] = costuriFinale[nod];
noduri[nod].vizitat = false;
}
costuriFinale[nod] = costuriTemporale[nod];
} |
13,233 | #define BLOCK_SIZE 512
#define SECTION_SIZE 1024 // define section size (size of subarray to be handled) to be twice the block size
__global__ void work_efficient_inclusive_scan(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
// Load elements from input into in-place array
unsigned int t = threadIdx.x;
unsigned int start = 2 * blockIdx.x * blockDim.x;
// Each thread loads 2 elements, since section size is double block size
if(t + start < in_size) {
if(t == 0 && blockIdx.x == 0) {
XY[t] = 0;
} else {
XY[t] = X[start + t - 1];
}
}
if(t + start + BLOCK_SIZE < in_size) {
XY[t + BLOCK_SIZE] = X[start + t + BLOCK_SIZE - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t + start < in_size) {
Y[start + t] = XY[t];
}
if(t + start < in_size && t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = XY[t + BLOCK_SIZE];
}
}
/*
* Note: this kernel is based off the assumption that the GRID_DIM is 1024, or exactly
* twice the BLOCK_DIM. This way, one section/2 blocks/1 thread block in this prefix-scan stage will be able to
* exactly handle all the block outputs from the previous prefix-scan stage.
*/
__global__ void work_efficient_inclusive_scan_2(float *X, float *Y, unsigned in_size) {
__shared__ float XY[SECTION_SIZE];
unsigned int t = threadIdx.x;
// Each thread loads 2 elements, each element being the last element of every SECTION from last kernel
if(SECTION_SIZE * (t+1) - 1 < in_size) {
XY[t] = X[SECTION_SIZE * (t+1) - 1];
}
if(SECTION_SIZE * (t+BLOCK_SIZE+1) - 1 < in_size) {
XY[t+BLOCK_SIZE] = X[SECTION_SIZE * (t+BLOCK_SIZE+1) - 1];
}
// Reduction tree step (increase stride size)
for(unsigned int stride = 1; stride <= BLOCK_SIZE; stride *=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i < SECTION_SIZE) {
XY[i] += XY[i-stride];
}
}
// Distribution tree step (decrease stride size)
for(unsigned int stride = SECTION_SIZE/4; stride>0; stride/=2) {
__syncthreads();
unsigned int i = (threadIdx.x+1) * 2 * stride - 1;
if(i + stride < SECTION_SIZE) {
XY[i + stride] += XY[i];
}
}
__syncthreads();
// Cp threads to output array
t = threadIdx.x;
if(t < in_size) {
Y[t] = XY[t];
}
if(t+BLOCK_SIZE < in_size) {
Y[t+BLOCK_SIZE] = XY[t+BLOCK_SIZE];
}
}
__global__ void work_efficient_inclusive_scan_3(float *X2, float *X, float *Y, unsigned in_size) {
unsigned int t = threadIdx.x;
// Cp threads to output array (each thread copies 2 elements and add result from prev kernel
unsigned int start = 2 * blockIdx.x * blockDim.x;
if(start != 0) { // Do for blocks 1 onwards
if(start + t < in_size) {
Y[start + t] = X2[start + t] + X[blockIdx.x - 1];
}
if(start + t + BLOCK_SIZE < in_size) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE] + X[blockIdx.x - 1];
}
} else {
if(start + t < in_size) {
Y[start + t] = X2[start + t];
}
if(start + t + BLOCK_SIZE) {
Y[start + t + BLOCK_SIZE] = X2[start + t + BLOCK_SIZE];
}
}
}
void preScan(float *out2, float *in, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan<<<DimGrid, DimBlock>>>(in, out2, in_size);
}
void preScan2(float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid(1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_2<<<DimGrid, DimBlock>>>(out2, out3, in_size);
}
void preScan3(float *out, float *out3, float *out2, unsigned in_size) {
// INSERT CODE HERE
dim3 DimGrid((in_size-1)/(BLOCK_SIZE*2)+1, 1, 1);
dim3 DimBlock(BLOCK_SIZE, 1, 1);
work_efficient_inclusive_scan_3<<<DimGrid, DimBlock>>>(out2, out3, out, in_size);
}
|
13,234 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <cuda.h>
__global__ void kernel() {
printf("I am from %d block, %d thread (global index: %d)\n", blockIdx.x, threadIdx.x, blockIdx.x * blockDim.x + threadIdx.x);
}
int main(int agrc, char* argv[]) {
printf("Now cuda say:\n");
kernel<<<2,2>>>();
cudaDeviceSynchronize();
return 0;
}
|
13,235 | #include "includes.h"
# define MAX(a, b) ((a) > (b) ? (a) : (b))
# define GAUSSIAN_KERNEL_SIZE 3
# define SOBEL_KERNEL_SIZE 5
# define TILE_WIDTH 32
# define SMEM_SIZE 128
__global__ void highHysterisis(int width, int height, float* d_nonMax, float highThreshold, float *d_highThreshHyst) {
int ix = threadIdx.x + blockIdx.x * blockDim.x;
int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix < width && iy < height) {
int tid = iy * width + ix;
d_highThreshHyst[tid] = 0.0f;
if(d_nonMax[tid] > highThreshold)
d_highThreshHyst[tid] = 1.0f;
}
} |
13,236 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/transform.h>
#include <math.h>
#include <stdio.h>
#define N 64
// DistanceFrom(ref,n) (x) ->sqrt((x/9n-1)-ref)*(x/(n-1)-ref))
struct DistanceFrom {
DistanceFrom(float ref, int n) : mRef(ref), mN(n) {}
__host__ __device__
float operator()(const float &x) {
float scaledX = x / (mN - 1);
return std::sqrt((scaledX - mRef)*(scaledX - mRef));
}
float mRef;
int mN;
};
int main() {
const float ref = 0.5;
thrust::device_vector<float> dvec_dist(N);
thrust::transform(thrust::counting_iterator<float>(0),
thrust::counting_iterator<float>(N), dvec_dist.begin(),
DistanceFrom(ref, N));
thrust::host_vector<float> hvec_dist = dvec_dist;
for (int i = 0; i < N; ++i) {
printf("x[%d]=%.3f, dist=%.3f\n", i, 1.f*i/(N-1), hvec_dist[i]);
}
return 0;
}
|
13,237 | /*------------hello_gpu.cu----------------------------------------------------//
*
* hello_gpu
*
* Purpose: Hello world with our first kernel on the side!
*
*-----------------------------------------------------------------------------*/
#include <iostream>
using namespace std;
__global__ void kernel(void){
}
int main(void){
kernel<<<1,1>>>();
cout << "hey guys. I thought we were supposed to be printing off the gpu..."
<< "but I was wrong. =(" << endl;
return 0;
}
|
13,238 | #include "includes.h"
__global__ void backward_dropblock_kernel(float *pass, float *delta, int size)
{
const int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index >= size) return;
if (pass[index] == 0) delta[index] = 0;
} |
13,239 | // Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
//
// This work is made available under the Nvidia Source Code License-NC.
// To view a copy of this license, visit
// https://nvlabs.github.io/stylegan2/license.html
#include <cstdio>
void checkCudaError(cudaError_t err)
{
if (err != cudaSuccess)
{
printf("%s: %s\n", cudaGetErrorName(err), cudaGetErrorString(err));
exit(1);
}
}
__global__ void cudaKernel(void)
{
printf("GPU says hello.\n");
}
int main(void)
{
printf("CPU says hello.\n");
checkCudaError(cudaLaunchKernel((void*)cudaKernel, 1, 1, NULL, 0, NULL));
checkCudaError(cudaDeviceSynchronize());
return 0;
}
|
13,240 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <sys/time.h>
#define NUM_RUNS 100
#define SIZE 10*1024*1024
float
cuda_malloc_test(int size, int up)
{
int *a, *dev_a;
struct timeval start, stop;
float elapsed_time;
a = (int *)malloc(SIZE * sizeof(int));
cudaMalloc((void **)&dev_a, SIZE * sizeof(int));
gettimeofday(&start, NULL);
for(int i = 0; i < NUM_RUNS; i++){
if(up)
cudaMemcpy(dev_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
else
cudaMemcpy(a, dev_a, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
}
gettimeofday(&stop, NULL);
elapsed_time = stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000;
free(a);
cudaFree(dev_a);
return elapsed_time;
}
float
cuda_host_alloc_test(int size, int up)
{
int *a, *dev_a;
struct timeval start, stop;
float elapsed_time;
cudaHostAlloc((void **)&a, SIZE * sizeof(int), cudaHostAllocDefault);
cudaMalloc((void **)&dev_a, SIZE * sizeof(int));
gettimeofday(&start, NULL);
for(int i = 0; i < NUM_RUNS; i++){
if(up)
cudaMemcpy(dev_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
else
cudaMemcpy(a, dev_a, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
}
gettimeofday(&stop, NULL);
elapsed_time = stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000;
cudaFreeHost(a);
cudaFree(dev_a);
return elapsed_time;
}
int
main(void)
{
// Check the device properties if our device supports mapping host memory
cudaDeviceProp properties;
int my_device;
cudaGetDevice(&my_device);
cudaGetDeviceProperties(&properties, my_device);
if(properties.canMapHostMemory != 1){
printf("The device cannot map host memory. \n");
exit(0);
}
// Place the CUDA runtime in a state which supports mapping memory on the host
cudaSetDeviceFlags(cudaDeviceMapHost);
float elapsed_time;
float MB = (float)NUM_RUNS*SIZE*sizeof(int)/(1024*1024); // Total size of data transferred in MB
// Benchmark the transfer time when using cudaMalloc up to device, that is from host to the device
elapsed_time = cuda_malloc_test(SIZE, 1);
printf("Elapsed time using cudaMalloc: %3.1f s \n", elapsed_time);
printf("MB/s during copy up: %3.1f \n", MB/elapsed_time);
// Benchmark the transfer time when using cudaMalloc in the opposite direction, that is from device to the host
elapsed_time = cuda_malloc_test(SIZE, 0);
printf("Elapsed time using cudaMalloc: %3.1f s \n", elapsed_time);
printf("MB/s during copy down: %3.1f \n", MB/elapsed_time);
// Benchmark the transfer time when using cudaHostAlloc up to device, that is from host to the device
elapsed_time = cuda_host_alloc_test(SIZE, 1);
printf("Elapsed time using cudaHostAlloc: %3.1f s \n", elapsed_time);
printf("MB/s during copy up: %3.1f \n", MB/elapsed_time);
// Benchmark the transfer time when using cudaHostAlloc in the opposite direction, that is from device to the host
elapsed_time = cuda_host_alloc_test(SIZE, 0);
printf("Elapsed time using cudaHostAlloc: %3.1f s \n", elapsed_time);
printf("MB/s during copy down: %3.1f \n", MB/elapsed_time);
exit(0);
}
|
13,241 | #include "includes.h"
__global__ void calculation( char* dev_a, char* dev_b, char* dev_c, int num_matrices, int matrix_size ) {
// Each thread handles a matrix
int k = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id
if (k >= num_matrices) return;
// If first element is different than 0 do the computation
if (dev_a[k*matrix_size*matrix_size] != 0){
for (int j = 0; j < matrix_size; j++){
//If first value in the row of the matrix, do addition
if (dev_a[k*matrix_size*matrix_size+j*matrix_size] < threshold){
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
dev_c[index] = dev_a[index] + dev_b[index];
}
//Do subtraction
} else {
for (int i = 0; i < matrix_size; i++){
int index = k*matrix_size*matrix_size+j*matrix_size+i;
dev_c[index] = dev_a[index] - dev_b[index];
}
}
}
}
} |
13,242 |
#include <stdio.h>
__device__ void mul(double a, double b, double *res)
{
*res = a * b;
// NaN
*res = (*res)-(*res) / (*res)-(*res);
}
__global__ void dot_prod(double *x, double *y, int size)
{
double d;
for (int i=0; i < size; ++i)
{
double tmp;
mul(x[i], y[i], &tmp);
d += tmp;
}
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
printf("dot: %f\n", d);
}
/**
* Contains information needed for one quadrature set direction.
*/
struct QuadraturePoint {
double xcos; /* Absolute value of the x-direction cosine. */
double ycos; /* Absolute value of the y-direction cosine. */
double zcos; /* Absolute value of the z-direction cosine. */
double w; /* weight for the quadrature rule.*/
int id; /* direction flag (= 1 if x-direction
cosine is positive; = -1 if not). */
int jd; /* direction flag (= 1 if y-direction
cosine is positive; = -1 if not). */
int kd; /* direction flag (= 1 if z-direction
cosine is positive; = -1 if not). */
int octant;
};
double x1 /* x = 9 */ = 8;
double kd; /* direction flag (= 1 if z-direction
cosine is positive; = -1 if not). */
kd = 98.8;
kd = kd + 1;
/*
* Comment
*
*
*
*
*/
}
|
13,243 | #include <algorithm>
#include <assert.h>
#include <iostream>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <sys/time.h>
#include <cuda_runtime.h>
float* d_A = NULL; // Pointer to matrix A in device memory
float* d_x = NULL; // Pointer to vector x in device memory
float* d_y = NULL; // Pointer to result vector y in device memory
double time_memcpy = 0;
double time_compute = 0; // Timers
/////////////////////////////////////////////////////////////////////////
// Matrix-vector Multiplication on CPU: Policy 1
/////////////////////////////////////////////////////////////////////////
void mv_cpu(float* y, const float* A, const float* x, int n) {
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++) {
y[i] += A[i*n + j] * x[j];
}
}
}
/////////////////////////////////////////////////////////////////////////
// Matrix-vector Multiplication on GPU: Policy 2
/////////////////////////////////////////////////////////////////////////
__global__ void mv_cuda_simple(float* y, float* A, float* x, int n)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n) {
float temp = 0;
for (int k = 0; k < n; k++) {
temp += A[row*n + k] * x[k];
}
y[row] = temp;
}
}
/////////////////////////////////////////////////////////////////////////
// Matrix-vector Multiplication on GPU using shared memory: Policy 3
/////////////////////////////////////////////////////////////////////////
__global__ void mv_cuda_shared(float* y, float* A, float* x, int n)
{
extern __shared__ float s_x[];
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < n) {
float temp = 0;
for (int j = 0; j < n; j += blockDim.x) {
if (j + threadIdx.x < n) {
s_x[threadIdx.x] = x[j + threadIdx.x];
}
__syncthreads();
for (int k = j; k < min(n, j + blockDim.x); k++) {
temp += A[row*n + k] * s_x[k - j];
}
}
y[row] = temp;
}
}
/////////////////////////////////////////////////////////////////////////
// Allocate device memory and copy from host to device memory
/////////////////////////////////////////////////////////////////////////
void copy_host_to_device(float* A, float* x, int n)
{
double time_start, time_end;
struct timeval tv;
struct timezone tz;
gettimeofday (&tv , &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
cudaMalloc((void**) &d_A, sizeof(float) * n * n);
cudaMalloc((void**) &d_x, sizeof(float) * n);
cudaMalloc((void**) &d_y, sizeof(float) * n);
assert(d_A);
assert(d_x);
assert(d_y);
cudaMemcpy(d_A, A, sizeof(float) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_x, x, sizeof(float) * n, cudaMemcpyHostToDevice);
gettimeofday (&tv , &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
time_memcpy += time_end - time_start;
}
/////////////////////////////////////////////////////////////////////////
// Copy result from device to host memory and free device memory
/////////////////////////////////////////////////////////////////////////
void copy_device_to_host(float* y, int n)
{
double time_start, time_end;
struct timeval tv;
struct timezone tz;
gettimeofday (&tv , &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
cudaMemcpy(y, d_y, sizeof(float) * n, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_x);
cudaFree(d_y);
gettimeofday (&tv , &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
time_memcpy += time_end - time_start;
}
/////////////////////////////////////////////////////////////////////////
// Program main
/////////////////////////////////////////////////////////////////////////
int main(int argc, char** argv)
{
double time_start, time_end;
struct timeval tv;
struct timezone tz;
if(argc != 5) {printf("Usage: mat_vec_gpu <array size> <block size> <policy (1|2|3)> <(debug|nodebug)>\n") ; exit(2) ;}
int N = atoi(argv[1]) ; // array size
int block_size = atoi(argv[2]) ; // size of thread block
int policy = atoi(argv[3]) ; // 1, 2 or 3
bool debug = (!strcmp(argv[4], "debug")) ? true : false;
if(debug) {
printf("\n Array size = %d , block size = %d and policy = %d\n ", N, block_size, policy);
}
// set seed for rand()
srand(2020);
// allocate host memory for matrix A and vector x
int matrix_size = N * N;
float* h_A = (float*)malloc(sizeof(float) * matrix_size);
float* h_x = (float*)malloc(sizeof(float) * N);
// randomly initialize host memory
for (int i = 0; i < matrix_size; ++i){
h_A[i] = rand() / (float)RAND_MAX;
}
for (int i = 0; i < N; i++){
h_x[i] = rand() / (float)RAND_MAX;
}
// allocate host memory for the result
float* h_y = (float*)malloc(sizeof(float) * N);
for (int i = 0; i < N; i++) {
h_y[i] = 0.0;
}
//call the relevant policy
if(policy==1){ //CPU baseline
gettimeofday (&tv , &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
mv_cpu(h_y, h_A, h_x, N);
gettimeofday (&tv , &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
time_compute = time_end - time_start;
}
else if(policy==2){ //CUDA MV multiplication
// call mv_cuda_simple <<< >>> ( )
copy_host_to_device(h_A, h_x, N);
gettimeofday (&tv , &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
int nblocks = (N + block_size - 1) / block_size;
mv_cuda_simple <<<nblocks, block_size>>> (d_y, d_A, d_x, N);
cudaDeviceSynchronize();
gettimeofday (&tv , &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
time_compute = time_end - time_start;
copy_device_to_host(h_y, N);
}
else if(policy==3){ //CUDA MV multiplication with shared memory
// call mv_cuda_shared<<<grid, block_size >>>(d_y, d_A, d_x, N);
copy_host_to_device(h_A, h_x, N);
gettimeofday (&tv , &tz);
time_start = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
int nblocks = (N + block_size - 1) / block_size;
mv_cuda_shared <<<nblocks, block_size, sizeof(float) * block_size>>> (d_y, d_A, d_x, N);
cudaDeviceSynchronize();
gettimeofday (&tv , &tz);
time_end = (double)tv.tv_sec + (double)tv.tv_usec / 1000000.0;
time_compute = time_end - time_start;
copy_device_to_host(h_y, N);
}
if (debug) {
// print selected results (for checking correctness) and print time elapsed
for (int i=0; i<N; i+=N/10) { printf(" %10.6f",h_y[i]); }
printf("\n");
}
printf("Memcpy Time: %f seconds\n", time_memcpy);
printf("Compute Time: %f seconds\n", time_compute);
// clean up memory allocated
free(h_A);
free(h_x);
free(h_y);
cudaDeviceReset();
}
|
13,244 | #include <stdio.h>
#include <iostream>
#include <cuda_runtime.h>
void printDeviceProp(const cudaDeviceProp &prop)
{
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %lu.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %lu.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %lu.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %lu.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %lu.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
}
bool InitCUDA()
{
int count;
cudaGetDeviceCount(&count);
if (count == 0) {
std::cout << "no cuda device found." << std::endl;
return false;
}
int i, index;
for (i = 0; i < count; i++) {
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, i) == cudaSuccess) {
printf("Device %d.\n", i);
printDeviceProp(prop);
printf("\n");
if (prop.major >= 1) {
index = i;
}
}
}
cudaSetDevice(index);
std::cout << "cuda initialized with set device " << index << std::endl;
return true;
}
int cudaGetClockRate()
{
cudaDeviceProp prop;
if (cudaGetDeviceProperties(&prop, 0) == cudaSuccess){
return prop.clockRate * 1000;
} else {
std::cout << "cudaGetClockRate fails" << std::endl;
return 10^9;
}
}
int main()
{
if (!InitCUDA()) {
return 0;
}
std::cout << "cuda initialized." << std::endl;
return 0;
}
|
13,245 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
/* Define the matrix size */
#define NX 200
#define NY 100
/* Size of a block */
#define BLOCKSIZE 256
__global__ void kernadd (float* mout, float* min1, float *min2, int nx, int ny)
{
int i, j, index;
index = blockDim.x*blockIdx.x+threadIdx.x;
j = index/nx;
i = index - j*nx;
if ((i < nx) && (j < ny))
mout[index] = min1[index] + min2[index];
}
/*******************************************************/
/* We initialize the vectors with random values */
/*******************************************************/
void Init(float* mat, int nx, int ny) {
int i, j;
for (i = 0; i < nx; i++) { /* 2D loop */
for (j = 0; j < ny; j++) {
mat[i+j*nx] = drand48 (); /* position of cell (i,j) */
}
}
}
/*******************************************************/
/* MAIN PROGRAM */
/*******************************************************/
int main () {
int i=0, error=0, nx=NX, ny=NY;
float diff;
int size = nx * ny * sizeof(float);
/* Matrix allocation */
float *mat_in1 = (float*) malloc(size);
float *mat_in2 = (float*) malloc(size);
float *mat_out = (float*) malloc(size);
/* Matrix allocation on device */
float *mat_out_gpu, *mat_in1_gpu, *mat_in2_gpu;
/* TO DO : do the allocation below, using cudaMalloc()*/
cudaMalloc (&mat_in1_gpu, size);
cudaMalloc (&mat_in2_gpu, size);
cudaMalloc (&mat_out_gpu, size);
/* Matrix initialization */
Init(mat_in1, nx, ny);
Init(mat_in2, nx, ny);
/* TO DO : write below the instructions to copy it to the device */
cudaMemcpy(mat_in1_gpu, mat_in1, size, cudaMemcpyHostToDevice);
cudaMemcpy(mat_in2_gpu, mat_in2, size, cudaMemcpyHostToDevice);
cudaMemcpy(mat_out_gpu, mat_out, size, cudaMemcpyHostToDevice);
/* TO DO : complete the number of blocks below */
int numBlocks = (nx * ny + BLOCKSIZE-1) / BLOCKSIZE;
/* TO DO : kernel invocation */
kernadd<<<numBlocks, BLOCKSIZE>>>(mat_out_gpu, mat_in1_gpu, mat_in2_gpu, nx, ny);
cudaDeviceSynchronize();
/* We now transfer back the matrix from the device to the host */
/* TO DO : write cudaMemcpy() instruction below */
cudaMemcpy(mat_out, mat_out_gpu, size, cudaMemcpyDeviceToHost);
/* free memory */
cudaFree(mat_out_gpu);
cudaFree(mat_in1_gpu);
cudaFree(mat_in2_gpu);
/* We now check that the result is correct */
for (i=0; i< nx*ny; i++) { /* No need for a 2D loop, actually ! */
diff = mat_out[i] - (mat_in1[i]+mat_in2[i]);
if (fabs(diff) > 0.0000001f) {
error = 1;
}
}
if (error) {
printf("FAILED\n");
}
else {
printf("PASSED\n");
}
free (mat_in1);
free (mat_in2);
free (mat_out);
}
|
13,246 | #include "includes.h"
__global__ void bigstencil(int* in, int* out) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
out[i] = in[i] + 2;
} |
13,247 | #define O_CHANNELS 5
void __global__ sconv_packed_fwd_row_f32_ker(
float *v_dst, float *v_src, int w, int h, int bsize) {
/*
ALWAYS ASSUME:
w,h both less or equal than 512 and is power of 2
*/
const int x = threadIdx.x;
const int y = threadIdx.y + blockIdx.y*blockDim.y;
const int z = threadIdx.z + blockIdx.z*blockDim.z;
const int by = blockDim.y;
const int ty = threadIdx.y;
const int tz = threadIdx.z;
if(x>=w) return;
if(y>=h) return;
if(z>=bsize) return;
extern __shared__ float _inp[];
float *src_base = v_src+z*w*h;
float *inp = _inp + (ty+tz*by)*(w+2);
inp[x+1] = src_base[x+y*w];
inp[0] = 0.f;
inp[w+1] = 0.f;
__syncthreads();
float *dst_base = v_dst+z*O_CHANNELS*w*h;
dst_base[x+y*w] = inp[x+2] - inp[x];
dst_base[x+(y+h)*w] = inp[x+2]+2*inp[x+1]+inp[x];
}
void __global__ sconv_packed_fwd_col_f32_ker(
float *v_dst, float *v_src, int w, int h, int bsize) {
/*
ALWAYS ASSUME:
w,h both less or equal than 512 and is power of 2
*/
const int x = threadIdx.x + blockIdx.x*blockDim.x;
const int y = threadIdx.y;
const int z = threadIdx.z + blockIdx.z*blockDim.z;
const int bx = blockDim.x;
const int tx = threadIdx.x;
const int tz = threadIdx.z;
if(x>=w) return;
if(y>=h) return;
if(z>=bsize) return;
extern __shared__ float _inp[];
float *inp = _inp + (tx+tz*bx)*2*(h+2);
float *inp2 = inp+h+2;
float *dst_base = v_dst+z*O_CHANNELS*w*h;
inp[y+1] = dst_base[x+y*h];
inp2[y+1]= dst_base[x+(y+w)*h];
inp[0] = 0.f;
inp2[0] = 0.f;
inp[h+1] = 0.f;
inp2[h+1] = 0.f;
__syncthreads();
dst_base[x+y*w] = v_src[x+(y+z*h)*w];
dst_base[x+(y+h)*w] = inp[y+2]-inp[y];
dst_base[x+(y+h*3)*w] = inp2[y+2]-inp2[y];
dst_base[x+(y+h*2)*w] = inp[y+2]+inp[y+1]*2+inp[y];
dst_base[x+(y+h*4)*w] = inp2[y+2]+inp2[y+1]*2+inp2[y]; }
void __global__ sconv_packed_bwd_col_f32_ker(
float *v_dedx, float *v_tmp, float *v_dedy, int w, int h, int bsize) {
const int x = threadIdx.x + blockIdx.x*blockDim.x;
const int y = threadIdx.y;
const int z = threadIdx.z + blockIdx.z*blockDim.z;
const int bx = blockDim.x;
const int tx = threadIdx.x;
const int tz = threadIdx.z;
if(x>=w) return;
if(y>=h) return;
if(z>=bsize) return;
extern __shared__ float _inp[];
float *inp0 = _inp + (tx+tz*bx)*4*(h+2);
float *inp1 = inp0+h+2;
float *inp2 = inp0+2*(h+2);
float *inp3 = inp0+3*(h+2);
float *dedy_base = v_dedy+z*O_CHANNELS*w*h;
v_dedx[x+(y+z*h)*w] = dedy_base[x+y*h];
inp0[y+1]= dedy_base[x+(y+w)*h];
inp1[y+1]= dedy_base[x+(y+w*2)*h];
inp2[y+1]= dedy_base[x+(y+w*3)*h];
inp3[y+1]= dedy_base[x+(y+w*4)*h];
inp0[0] = 0.f; inp0[h+1]=0.f;
inp1[0] = 0.f; inp1[h+1]=0.f;
inp2[0] = 0.f; inp2[h+1]=0.f;
inp3[0] = 0.f; inp3[h+1]=0.f;
__syncthreads();
float *tmp_base = v_tmp+z*h*w*2;
tmp_base[x+y*w] = inp0[y]-inp0[y+2]+inp1[y+2]+inp1[y+1]*2+inp1[y];
tmp_base[x+(y+h)*w] = inp2[y]-inp2[y+2]+inp3[y+2]+inp3[y+1]*2+inp3[y];
}
void __global__ sconv_packed_bwd_row_f32_ker(
float *v_dedx, float *v_tmp, int w, int h, int bsize) {
const int x = threadIdx.x;
const int y = threadIdx.y + blockIdx.y*blockDim.y;
const int z = threadIdx.z + blockIdx.z*blockDim.z;
const int by = blockDim.y;
const int ty = threadIdx.y;
const int tz = threadIdx.z;
if(x>=w) return;
if(y>=h) return;
if(z>=bsize) return;
extern __shared__ float _inp[];
float *tmp_base = v_tmp+z*w*h*2;
float *inp0 = _inp + (ty+tz*by)*2*(w+2);
float *inp1 = inp0 + w+2;
inp0[x+1] = tmp_base[x+y*w];
inp1[x+1] = tmp_base[x+(y+h)*w];
inp0[0] = 0.f;
inp0[w+1] = 0.f;
inp1[0] = 0.f;
inp1[w+1] = 0.f;
__syncthreads();
v_dedx[x+(y+z*h)*w] +=
inp0[x]-inp0[x+2]+inp1[x+2]+inp1[x+1]*2+inp1[x];
}
|
13,248 | /*
* SVFourierCosine.cpp
*
* Created on: Aug 28, 2013
* Author: Sabbir
* Rewritten on: Jan 19, 2014
* Author: Matthew Dixon
*/
#define _USE_MATH_DEFINES
#include <iostream>
#include <string>
#include <vector>
#include <cuComplex.h>
#include <stdio.h>
#include <fstream>
#include <sstream>
#include<stdlib.h>
#define pi 3.1415926535897932384626433832795
#define rpart(x) (cuCreal(x))
#define ipart(x) (cuCimag(x))
#define cmplx(x,y) (make_cuDoubleComplex(x,y))
float totaltime = 0.0;
float counter = 1.0;
double s0 = 0.0;
double r0 = 0.0;
double q0 = 0.0;
size_t num_blocks = 0;
size_t block_size = 256;
int num_bytes = 0;
bool bLoad = false;
//host global arrays
double *h_K,*h_T,*h_W,*h_OP;
char *h_Types;
//device variables
double* d_sums = 0;
double *d_input_p0 = 0;
double *d_input_K = 0;
double *d_input_T = 0;
char *d_input_Types = 0;
char *model = 0;
__device__ cuDoubleComplex squareRoot(cuDoubleComplex x)
{
cuDoubleComplex inc;
cuDoubleComplex c = x;
cuDoubleComplex r = c;
for(int j=0; j < 10; j++)
{
inc = cuCadd(r,r);
inc = cuCdiv((cuCsub(cuCmul(r,r), c)),inc);
r = cuCsub(r,inc);
}
return r;
}
__device__ double carg(const cuDoubleComplex& z) {return (double)atan2(ipart(z), rpart(z));} // polar angle
__device__ double cabs(const cuDoubleComplex& z) {return (double)cuCabs(z);}
__device__ cuDoubleComplex cPow(const cuDoubleComplex& z, const int &n)
{
return cmplx((pow(cabs(z), n)*cos(n*carg(z))), (pow(cabs(z), n)*sin(n*carg(z))));
}
__device__ cuDoubleComplex cPow(const cuDoubleComplex& z, const double &n)
{
return cmplx((pow(cabs(z), n)*cos(n*carg(z))), (pow(cabs(z), n)*sin(n*carg(z))));
}
__device__ cuDoubleComplex my_complex_exp (cuDoubleComplex arg)
{
cuDoubleComplex res;
double s, c;
double e = exp(arg.x);
sincos(arg.y, &s, &c);
res.x = c * e;
res.y = s * e;
return res;
}
__device__ cuDoubleComplex HestonCF(
double u,
double T,
double r,
double q,
double sigma,
double lmbda,
double meanV,
double v0,
double rho
)
{
cuDoubleComplex j1={0.0,1.0};
double a = lmbda*meanV;
double b = lmbda;
double sigma2 = sigma*sigma;
cuDoubleComplex d = squareRoot(cuCadd(cPow((cuCsub(cuCmul(j1,cmplx(rho*sigma*u,0)),cmplx(b,0))),2),cuCmul((cuCadd(cmplx(u*u,0),cuCmul(j1,cmplx(u,0)))),cmplx(sigma2,0))));
cuDoubleComplex g = cuCdiv(cuCsub(cmplx(b,0),cuCadd(cuCmul(j1,cmplx(rho*sigma*u,0)),d)), cuCadd(cuCsub(cmplx(b,0),cuCmul(j1,cmplx(rho*sigma*u,0))),d));
cuDoubleComplex ret = my_complex_exp(cuCmul(j1, make_cuDoubleComplex(u*(r-q)*T,0)));
cuDoubleComplex temp2 = cuCmul(cuCsub(cuCsub(cmplx(b,0),cuCmul(cmplx(rho*sigma*u,0),j1)),d),cmplx(T,0)); // (b - rho*j1*sigma*u - d)*T
double root_sqr = cabs(cuCdiv(cuCsub(cmplx(1.0,0), cuCmul(g,my_complex_exp(cuCmul(cuCsub(cmplx(0,0),d),cmplx(T,0))))),cuCsub(cmplx(1.0,0),g)));
double logarithm = log(root_sqr);
double arg = carg(cuCdiv(cuCsub(cmplx(1.0,0), cuCmul(g,my_complex_exp(cuCmul(cuCsub(cmplx(0,0),d),cmplx(T,0))))),cuCsub(cmplx(1.0,0),g)));
cuDoubleComplex temp3 = cuCmul(cmplx(2.0,0),cuCadd(cmplx(logarithm,0),cmplx(0,arg))); // 2.0*log((1.0-g*exp(-d*T))/(1.0-g));
cuDoubleComplex temp1 = cuCsub(temp2,temp3); // ((b - rho*j1*sigma*u - d)*T - 2.0*log((1.0-g*exp(-d*T))/(1.0-g)))
ret = cuCmul(ret,my_complex_exp(cuCmul(cmplx(a/sigma2,0),temp1)));
temp1 = cmplx(v0/sigma2,0);
temp2 = cuCsub(cuCsub(cmplx(b,0),cuCmul(cmplx(rho*sigma*u,0),j1)),d); // (b - rho*j1*sigma*u - d)
temp3 = cuCsub(cmplx(1.0,0),my_complex_exp(cuCmul(cuCsub(cmplx(0,0),d),cmplx(T,0)))); // (1.0-exp(-d*T))
cuDoubleComplex temp4 = cuCsub(cmplx(1.0,0),cuCmul(g,my_complex_exp(cuCmul(cuCsub(cmplx(0,0),d),cmplx(T,0))))); // (1.0-g*exp(-d*T))
temp1 = cuCmul(temp1, temp2);
temp1 = cuCmul (temp1,temp3);
temp1 = cuCdiv(temp1,temp4);
temp1 = my_complex_exp(temp1);
return cuCmul(ret,temp1);
}
__device__ cuDoubleComplex BatesCF(
double u,
double T,
double r,
double q,
double sigma,
double lmbda,
double meanV,
double v0,
double rho,
double a,
double b,
double lmbda_prime
)
{
cuDoubleComplex HCF = HestonCF(u,T,r,q,sigma,lmbda,meanV,v0,rho);
cuDoubleComplex j1 = {0.0,1.0};
cuDoubleComplex temp1 = cuCmul(cmplx(lmbda_prime*T*-a*u,0),j1);
cuDoubleComplex uj = cuCmul(cmplx(u,0),j1);
cuDoubleComplex temp2 = cuCsub(uj,cmplx(1.0,0));
cuDoubleComplex temp3 = cuCmul(cmplx(0.5*b*b,0),uj);
temp3 = cuCmul(temp3,temp2);
temp3 = cuCadd(cuCmul(uj,cmplx(log(1.0+a),0)),temp3);
temp3 = cuCsub(my_complex_exp(temp3),cmplx(1.0,0));
temp3 = cuCmul(cmplx(lmbda_prime*T,0),temp3);
temp3 = cuCmul(cmplx(lmbda_prime*T,0),temp3);
temp3 = my_complex_exp(cuCadd(temp1,temp3));
//emp = my_complex_exp(lmbda_prime*T*(-a*u*j + (exp(u*j*log(1.0+a)+0.5*b*b*u*j*(u*j-1.0))-1.0)));
return (cuCmul(HCF,temp3));
}
__device__ cuDoubleComplex VGCF(
double u,
double T,
double r,
double q,
double sigma,
double theta,
double nu
)
{
cuDoubleComplex j1 = {0.0,1.0};
double sigma2 = sigma*sigma;
double omega = (1.0/nu)*(log(1.0-theta*nu-sigma2*nu/2.0));
double tmp = 1.0 + 0.5*sigma2*u*u*nu;
//tmp <- 1.0 - j*theta*nu*u + 0.5*sigma*sigma*u*u*nu
cuDoubleComplex temp= cuCsub(cmplx(tmp,0),cuCmul(cmplx(theta*nu*u,0),j1));
temp = cPow(temp, T/nu);
temp = cuCdiv(my_complex_exp(cuCmul(cmplx((r+omega-q)*u*T,0),j1)),temp);
//ret <- exp(j*u*(r + omega - q)*T - T*log(tmp)/nu)
//= exp(j*u*(r+omega-q)*T)/tmp**(T/nu)
return temp;
}
__device__ cuDoubleComplex CGMYCF(
double u,
double T,
double r,
double q,
double C,
double G,
double M,
double Y
)
{
cuDoubleComplex j1 = {0.0,1.0};
double tg = tgamma(-Y);
double m = -C*tg*(pow(M-1.0,Y)-pow(M,Y)+pow((G+1.0),Y)-pow(G,Y));
cuDoubleComplex uj = cuCmul(cmplx(u,0),j1);
cuDoubleComplex tp= cuCsub(cPow(cuCsub(cmplx(M,0),uj),Y),cmplx(pow(M,Y),0));
cuDoubleComplex tn= cuCsub(cPow(cuCadd(cmplx(G,0),uj),Y),cmplx(pow(G,Y),0));
cuDoubleComplex temp = cuCmul(cmplx(C*T*tg,0),cuCadd(tp,tn));
//tmp = C*T*gamma(-Y)*((M-j*u)**Y-M**Y+(G+j*u)**Y-G**Y)
return(my_complex_exp(cuCadd(cuCmul(uj, cmplx((r-q+m)*T,0)), temp)));
// ret <- exp(j*u*(r-q+m)*T + tmp)
}
__device__ double xi(
double k,
double a,
double b,
double c,
double d)
{
double ret = 1.0/(1.0+pow(k*pi/(b-a),2))*(cos(k*pi*(d-a)/(b-a))*exp(d)-cos(k*pi*(c-a)/(b-a))*exp(c)+k*pi/(b-a)*sin(k*pi*(d-a)/(b-a))*exp(d)-k*pi/(b-a)*sin(k*pi*(c-a)/(b-a))*exp(c));
return ret;
}
__device__ double psi(
double k,
double a,
double b,
double c,
double d)
{
double ret = 0.0;
if (k==0)
ret = d-c;
else
ret = (sin(k*pi*(d-a)/(b-a))-sin(k*pi*(c-a)/(b-a)))*(b-a)/(k*pi);
return ret;
}
__global__ void HestonCOS(
double* p0, //parameters
double S, //option underlying
double r0, //risk free inst. short rate
double q0, //annual dividend rate
double* K, //option strike
double* T, //option maturity
char* Types, //option type {'C','P'}
bool bIncorporateNLContraint,
double* results)
{
extern __shared__ cuDoubleComplex sdata[];
double kappa, theta, sigma, rho, v0;
double U, unit = 0.5;
kappa = p0[0]; theta =p0[1]; sigma = p0[2]; rho = p0[3]; v0=p0[4];
if (bIncorporateNLContraint)
kappa = (kappa + sigma*sigma)/(2.0*theta);
double lmbda = kappa, meanV = theta;
cuDoubleComplex j1 = {0.0, 1.0};
double sigma2 = sigma*sigma;
double lmbda2 = lmbda*lmbda;
double c1 = r0*T[blockIdx.x]+(1-exp(-lmbda*T[blockIdx.x]))*(meanV-v0)/(2.0*lmbda)-0.5*meanV*T[blockIdx.x];
double c2 = 1.0/(8.0*lmbda2*lmbda)*(sigma*T[blockIdx.x]*lmbda*exp(-lmbda*T[blockIdx.x])*(v0-meanV)*(8.0*lmbda*rho-4.0*sigma)+lmbda*rho*sigma*(1-exp(-lmbda*T[blockIdx.x]))*(16.0*meanV-8.0*v0)+2.0*meanV*lmbda*T[blockIdx.x]*(-4.0*lmbda*rho*sigma+sigma2+4.0*lmbda2)+sigma2*((meanV-2.0*v0)*exp(-2.0*lmbda*T[blockIdx.x])+meanV*(6.0*exp(-lmbda*T[blockIdx.x])-7.0)+2.0*v0)+8.0*lmbda2*(v0-meanV)*(1-exp(-lmbda*T[blockIdx.x])));
double a = c1-12.0*sqrt(fabs(c2));
double b = c1+12.0*sqrt(fabs(c2));
double x = log(S/K[blockIdx.x]);
int tx = threadIdx.x;
if (Types[blockIdx.x] == 'C')
U = 2.0/(b-a)*(xi(tx,a,b,0,b) - psi(tx,a,b,0,b));
else
U = 2.0/(b-a)*(-xi(tx,a,b,a,0) + psi(tx,a,b,a,0));
cuDoubleComplex HCF = HestonCF(tx*pi/(b-a),T[blockIdx.x],r0,q0,sigma,lmbda,meanV,v0,rho);
if(threadIdx.x == 0)
{
sdata[tx] = cuCmul(HCF, cuCmul(cmplx(unit*U, 0),my_complex_exp(cuCdiv(cuCmul(j1,cmplx(pi*(x-a)*double(tx), 0)),cmplx((b-a),0))))); // unit*HCF*exp(j1*double(k)*pi*(x-a)/(b-a))*U;
}
else
{
unit = 1.0;
sdata[tx] = cuCmul(cuCmul(cuCmul(cmplx(unit,0),HCF),my_complex_exp(cuCdiv (cuCmul(j1,cmplx(pi*(x-a)*double(tx), 0)), cuCsub(cmplx(b,0),cmplx(a,0))))), cmplx(U,0));
}
__syncthreads();
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
sdata[tx] = cuCadd(sdata[tx], sdata[tx+ offset]);
}
__syncthreads();
}
if(threadIdx.x == 0)
{
results[blockIdx.x] = K[blockIdx.x]*exp(-r0*T[blockIdx.x])*rpart(sdata[0]);
}
}
__global__ void BatesCOS(
double* p0, //parameters
double S, //option underlying
double r0, //risk free inst. short rate
double q0, //annual dividend rate
double* K, //option strike
double* T, //option maturity
char* Types, //option type {'C','P'}
bool bIncorporateNLContraint,
double* results)
{
extern __shared__ cuDoubleComplex sdata[];
double kappa, theta, sigma, rho, v0, a_prime, b_prime, lmbda_prime;
double U, unit = 0.5;
kappa = p0[0]; theta =p0[1]; sigma = p0[2]; rho = p0[3]; v0=p0[4]; a_prime =p0[5]; b_prime = p0[6]; lmbda_prime = p0[7];
if (bIncorporateNLContraint)
kappa = (kappa + sigma*sigma)/(2.0*theta);
double lmbda = kappa, meanV = theta;
cuDoubleComplex j1 = {0.0, 1.0};
double sigma2 = sigma*sigma;
double lmbda2 = lmbda*lmbda;
double c1 = r0*T[blockIdx.x]+(1-exp(-lmbda*T[blockIdx.x]))*(meanV-v0)/(2.0*lmbda)-0.5*meanV*T[blockIdx.x];
double c2 = 1.0/(8.0*lmbda2*lmbda)*(sigma*T[blockIdx.x]*lmbda*exp(-lmbda*T[blockIdx.x])*(v0-meanV)*(8.0*lmbda*rho-4.0*sigma)+lmbda*rho*sigma*(1-exp(-lmbda*T[blockIdx.x]))*(16.0*meanV-8.0*v0)+2.0*meanV*lmbda*T[blockIdx.x]*(-4.0*lmbda*rho*sigma+sigma2+4.0*lmbda2)+sigma2*((meanV-2.0*v0)*exp(-2.0*lmbda*T[blockIdx.x])+meanV*(6.0*exp(-lmbda*T[blockIdx.x])-7.0)+2.0*v0)+8.0*lmbda2*(v0-meanV)*(1-exp(-lmbda*T[blockIdx.x])));
double a = c1-12.0*sqrt(fabs(c2));
double b = c1+12.0*sqrt(fabs(c2));
double x = log(S/K[blockIdx.x]);
int tx = threadIdx.x;
if (Types[blockIdx.x] == 'C')
U = 2.0/(b-a)*(xi(tx,a,b,0,b) - psi(tx,a,b,0,b));
else
U = 2.0/(b-a)*(-xi(tx,a,b,a,0) + psi(tx,a,b,a,0));
cuDoubleComplex BCF = BatesCF(tx*pi/(b-a),T[blockIdx.x],r0,q0,sigma,lmbda,meanV,v0,rho,a_prime, b_prime, lmbda_prime);
if(threadIdx.x == 0)
{
sdata[tx] = cuCmul(BCF, cuCmul(cmplx(unit*U, 0),my_complex_exp(cuCdiv(cuCmul(j1,cmplx(pi*(x-a)*double(tx), 0)),cmplx((b-a),0))))); // unit*HCF*exp(j1*double(k)*pi*(x-a)/(b-a))*U;
}
else
{
unit = 1.0;
sdata[tx] = cuCmul(cuCmul(cuCmul(cmplx(unit,0),BCF),my_complex_exp(cuCdiv (cuCmul(j1,cmplx(pi*(x-a)*double(tx), 0)), cuCsub(cmplx(b,0),cmplx(a,0))))), cmplx(U,0));
}
__syncthreads();
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
sdata[tx] = cuCadd(sdata[tx], sdata[tx+ offset]);
}
__syncthreads();
}
if(threadIdx.x == 0)
{
results[blockIdx.x] = K[blockIdx.x]*exp(-r0*T[blockIdx.x])*rpart(sdata[0]);
}
}
__global__ void VGCOS(
double* p0, //parameters
double S, //option underlying
double r0, //risk free inst. short rate
double q0, //annual dividend rate
double* K, //option strike
double* T, //option maturity
char* Types, //option type {'C','P'}
bool bIncorporateNLContraint,
double* results)
{
extern __shared__ cuDoubleComplex sdata[];
double sigma, theta, nu;
double U, unit = 0.5;
sigma = p0[0]; theta =p0[1]; nu = p0[2];
cuDoubleComplex j1 = {0.0, 1.0};
double sigma2 = sigma*sigma;
double theta2 = theta*theta;
double nu2 = nu*nu;
double c1 = (r0+theta)*T[blockIdx.x];
double c2 = (sigma2 + nu*theta2)*T[blockIdx.x];
double c4 = 3.0*(sigma2*sigma2*nu +2.0*theta2*theta2*nu2*nu+4.0*sigma2*theta2*nu2)*T[blockIdx.x];
double a = c1-10.0*sqrt(c2 + sqrt(c4));
double b = c1+10.0*sqrt(c2 + sqrt(c4));
double x = log(S/K[blockIdx.x]);
int tx = threadIdx.x;
if (Types[blockIdx.x] == 'C')
U = 2.0/(b-a)*(xi(tx,a,b,0,b) - psi(tx,a,b,0,b));
else
U = 2.0/(b-a)*(-xi(tx,a,b,a,0) + psi(tx,a,b,a,0));
cuDoubleComplex VGCF_ = VGCF(tx*pi/(b-a),T[blockIdx.x],r0,q0,sigma,theta,nu);
if(threadIdx.x == 0)
{
sdata[tx] = cuCmul(VGCF_, cuCmul(cmplx(unit*U, 0),my_complex_exp(cuCdiv(cuCmul(j1,cmplx(pi*(x-a)*double(tx), 0)),cmplx((b-a),0))))); // unit*HCF*exp(j1*double(k)*pi*(x-a)/(b-a))*U;
}
else
{
unit = 1.0;
sdata[tx] = cuCmul(cuCmul(cuCmul(cmplx(unit,0),VGCF_),my_complex_exp(cuCdiv (cuCmul(j1,cmplx(pi*(x-a)*double(tx), 0)), cuCsub(cmplx(b,0),cmplx(a,0))))), cmplx(U,0));
}
__syncthreads();
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
sdata[tx] = cuCadd(sdata[tx], sdata[tx+ offset]);
}
__syncthreads();
}
if(threadIdx.x == 0)
{
results[blockIdx.x] = K[blockIdx.x]*exp(-r0*T[blockIdx.x])*rpart(sdata[0]);
}
}
__global__ void CGMYCOS(
double* p0, //parameters
double S, //option underlying
double r0, //risk free inst. short rate
double q0, //annual dividend rate
double* K, //option strike
double* T, //option maturity
char* Types, //option type {'C','P'}
bool bIncorporateNLContraint,
double* results)
{
extern __shared__ cuDoubleComplex sdata[];
double sigma, theta, nu,Y;
double U, unit = 0.5;
sigma = p0[0]; theta =p0[1]; nu = p0[2]; Y = p0[3];
cuDoubleComplex j1 = {0.0, 1.0};
double C = 1.0/nu;
double sigma2 = sigma*sigma;
double theta2 = theta*theta;
double G = theta/sigma2 + sqrt(theta2/(sigma2*sigma2) + 2.0/(nu*sigma2));
double M = theta/sigma2 + sqrt(theta2/(sigma2*sigma2) + 2.0/(nu*sigma2));
double c1 = r0*T[blockIdx.x] + C*T[blockIdx.x]*tgamma(1.0-Y)*(pow(M,Y-1) - pow(G,Y-1));
double c2 = sigma2*T[blockIdx.x] + C*T[blockIdx.x]*tgamma(2.0-Y)*(pow(M,Y-2.0) + pow(G,Y-2.0));
double c4 = C*T[blockIdx.x]*tgamma(4.0-Y)*(pow(M,Y-4) + pow(G,Y-4));
double a = c1-10.0*sqrt(c2 + sqrt(c4));
double b = c1+10.0*sqrt(c2 + sqrt(c4));
double x = log(S/K[blockIdx.x]);
int tx = threadIdx.x;
if (Types[blockIdx.x] == 'C')
U = 2.0/(b-a)*(xi(tx,a,b,0,b) - psi(tx,a,b,0,b));
else
U = 2.0/(b-a)*(-xi(tx,a,b,a,0) + psi(tx,a,b,a,0));
cuDoubleComplex CGMYCF_ = CGMYCF(tx*pi/(b-a),T[blockIdx.x],r0,q0,C,G,M,Y);
if(threadIdx.x == 0)
{
sdata[tx] = cuCmul(CGMYCF_, cuCmul(cmplx(unit*U, 0),my_complex_exp(cuCdiv(cuCmul(j1,cmplx(pi*(x-a)*double(tx), 0)),cmplx((b-a),0))))); // unit*HCF*exp(j1*double(k)*pi*(x-a)/(b-a))*U;
}
else
{
unit = 1.0;
sdata[tx] = cuCmul(cuCmul(cuCmul(cmplx(unit,0),CGMYCF_),my_complex_exp(cuCdiv (cuCmul(j1,cmplx(pi*(x-a)*double(tx), 0)), cuCsub(cmplx(b,0),cmplx(a,0))))), cmplx(U,0));
}
__syncthreads();
for(int offset = blockDim.x / 2; offset > 0; offset >>= 1)
{
if(tx < offset)
{
sdata[tx] = cuCadd(sdata[tx], sdata[tx+ offset]);
}
__syncthreads();
}
if(threadIdx.x == 0)
{
results[blockIdx.x] = K[blockIdx.x]*exp(-r0*T[blockIdx.x])*rpart(sdata[0]);
}
}
#ifdef __cplusplus
extern "C"
#endif
void copy_data (double* K, double* T, double* W, double* OP, char* Types,double s0_, double r0_, double q0_,int length){
h_K =K; h_T=T; h_W=W; h_OP=OP;h_Types=Types;s0=s0_;r0=r0_;q0=q0_;
num_blocks = length;
num_bytes = sizeof(double)*num_blocks;
cudaMalloc((void**)&d_sums, num_bytes);
cudaMalloc((void**)&d_input_K, num_bytes);
cudaMalloc((void**)&d_input_T, num_bytes);
cudaMalloc((void**)&d_input_Types, num_bytes);
cudaMemcpy(d_input_K, h_K, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_input_T, h_T, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_input_Types, h_Types, sizeof(char) * num_blocks, cudaMemcpyHostToDevice);
bLoad = true;
}
#ifdef __cplusplus
extern "C"
#endif
void dealloc_data (){
cudaFree(d_sums);
cudaFree(d_input_K);
cudaFree(d_input_T);
cudaFree(d_input_Types);
bLoad = false;
}
#ifdef __cplusplus
extern "C"
#endif
void set_model(char* model_){
model = model_;
}
#ifdef __cplusplus
extern "C"
#endif
void set_block_size(int block_size_){
block_size = (size_t)block_size_;
}
#ifdef __cplusplus
extern "C"
#endif
double error_func (double *p0, int length)
{
std::cout.precision(16);
if (!bLoad){
std::cerr<<"Error: call copy_data first to load option data"<<std::endl;
return 0.0;
}
//float elapsedTime;
//cudaEvent_t start, stop;
double rmse = 0.0;
bool bIncorporateNLConstraint = true;
double* h_result = new double[num_blocks];
cudaMalloc((void**)&d_input_p0, sizeof(double) * length);
cudaMemcpy(d_input_p0, &p0[0], sizeof(double) * length, cudaMemcpyHostToDevice);
/*for(int z=0; z<5;z++)
{
printf("p0[%d] = %f\n",z,p0[z]);
}*/
//cudaEventCreate(&start);
//cudaEventRecord(start,0);
if (strcmp(model,"Heston")==0)
HestonCOS <<<num_blocks, block_size, (block_size*2) * sizeof(double)>>>(d_input_p0, s0, r0, q0, d_input_K, d_input_T, d_input_Types, bIncorporateNLConstraint, d_sums);
else if (strcmp(model,"Bates")==0)
BatesCOS <<<num_blocks, block_size, (block_size*2) * sizeof(double)>>>(d_input_p0, s0, r0, q0, d_input_K, d_input_T, d_input_Types, bIncorporateNLConstraint, d_sums);
else if (strcmp(model,"VG")==0)
VGCOS <<<num_blocks, block_size, (block_size*2) * sizeof(double)>>>(d_input_p0, s0, r0, q0, d_input_K, d_input_T, d_input_Types, bIncorporateNLConstraint, d_sums);
else if (strcmp(model,"CGMY")==0)
CGMYCOS <<<num_blocks, block_size, (block_size*2) * sizeof(double)>>>(d_input_p0, s0, r0, q0, d_input_K, d_input_T, d_input_Types, bIncorporateNLConstraint, d_sums);
cudaMemcpy(h_result, d_sums, num_bytes, cudaMemcpyDeviceToHost);
for(int index = 0 ; index < num_blocks ; index++)
{
rmse += pow(h_W[index]*(h_result[index] - h_OP[index]),2);
}
rmse = sqrt(rmse/num_blocks);
//totaltime += elapsedTime;
//std::cout<<"TotalTime= "<<totaltime<<std::endl;
//std::cout<<"Iteration= "<<counter<<std::endl;
counter++;
delete [] h_result;
cudaFree(d_input_p0);
return rmse;
}
|
13,249 | /*
* Author : Benet Manzanares Salor
*
* Date : 20 / 1 / 2019
*
* Description:
* A program to compare the execution time between CUDA execution and CPU execution.
* Specifically, after check the parameters, inicialization and choose between CUDA or CPU,
* the program do a work (loop/s, operations ...) for every element of an array and
* indicate the execution time.
*
* At the CUDA call, the configuration priority is :
* CUDA_THREADS (multiple of 32 if is possible) > CUDA_BLOCKS (at least 1 ) > ChargePerThread
*
* This code only test one thread of CPU, to use multiple threads you need to use the
* CPUVersion ( locate at the CPUVerison folder ).
*
* Parameters:
* Number of elements : Length of the array to do the work
* Option : Choose between CUDA or CPU
* 0 -> CPU || !=0 -> CUDA
*
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <stdbool.h>
#include <math.h>
/********************** CONSTANTS **********************/
#define NUM_PARAMS 2
#define WORK_ITERATIONS 1000
#define CPU_THREADS 8
#define MAX_CUDA_THREADS_PER_BLOCK 1024 //Divided in warps of 32 threads
#define MAX_CUDA_BLOCKS 1024*1024
#define MAX_CHARGE_PER_THREAD 1024
/********************** VARIABLES **********************/
int numElements;
int option;
int programResult;
float* a;
float* b;
float* resultArray;
float* c_a;
float* c_b;
int CUDA_THREADS = MAX_CUDA_THREADS_PER_BLOCK;
int CUDA_BLOCKS;
int chargePerThread;
/********************** METHODS **********************/
bool CorrectParameters(int na, char* arg[]);
void InicialitzeData();
void CpuExecution();
bool CudaExecution();
void ResultControl();
void FreeData();
/********** MAIN **********/
int main(int na, char* arg[])
{
programResult = 0;
if(CorrectParameters(na,arg))
{
InicialitzeData();
clock_t tMed = clock();
if (option == 0)
CpuExecution();
else
CudaExecution();
printf("Execution time = %f seconds\n", ((double)(clock() - tMed)) / CLOCKS_PER_SEC);
ResultControl();
FreeData();
}
return programResult;
}
/* CorrectParameters
* Description:
* Check if the enter parameters of the programs are correct
* Parameters:
* na : Number of arguments introduced by user
* arg : Reference of the table with arguments
* Return:
* True if all is correct
*/
bool CorrectParameters(int na, char* arg[])
{
if (na != NUM_PARAMS + 1)
{
printf("PARAM ERROR : The program need only %i parameters ( #ELEMENTS , Option ( 0 -> CPU , !=0 -> CUDA ) ) \n", NUM_PARAMS);
programResult = 1;
}
else
{
numElements = atoi(arg[1]);
option = atoi(arg[2]);
if (numElements <= 0)
{
printf("PARAM ERROR : The first parameter (#ELEMENTS) should be 1 or more\n");
programResult = 2;
}
}
return ( programResult == 0 );
}
/* InicialitzeData
* Description:
* Inicialize all the elements of the arrays used in the program with constant values.
* This arrays are directly use at the CPU execution and copyed
* for the CUDA execution at the MyCudaMemInicialization method. *
*/
void InicialitzeData()
{
a = (float *)malloc(numElements * sizeof(float));
b = (float *)malloc(numElements * sizeof(float));
resultArray = (float *)malloc(numElements * sizeof(float));
int i;
for (i = 0; i < numElements; i++)
{
a[i] = 1.5;
b[i] = 0.127;
}
}
/* FreeData
* Description:
* Free the arrays used
*/
void FreeData()
{
free(a);
free(b);
free(resultArray);
}
/* Calc
* Description:
* Do the calculation for a element a and b.
* Parameters:
* a : first value
* b : second value
* Return:
* The final value of the operation to put it in the resultArray
*/
float Calc(float a, float b)
{
float result = a;
int i;
for (i = 0; i < WORK_ITERATIONS; i++)
{
result += b;
}
return result;
}
int NumOfFails()
{
int notCorrect = 0;
bool perfect = true;
int i;
for (i = 0; i < numElements; i++)
{
if (resultArray[i] != (Calc(a[i], b[i]))) {
notCorrect++;
if (perfect) printf("Not perfect results\n");
perfect = false;
}
}
return notCorrect;
}
void ResultControl()
{
int notCorrects = NumOfFails();
if (notCorrects == 0)
printf("Results : All correct!\n");
else
printf("Results : %i not corrects\n", notCorrects);
}
/********************** CPU 1 THREAD **********************/
void CpuExecution()
{
int i;
for (i = 0; i < numElements; i++)
{
resultArray[i] = Calc(a[i], b[i]);
}
}
/********************** CUDA **********************/
__device__ float CalcCUDA(float a, float b)
{
float result = a;
int i;
for (i = 0; i < WORK_ITERATIONS; i++)
{
result += b;
}
return result;
}
__global__ void DoCudaWork(int chargePerThread, float* c_a, float* c_b, int numElements) {
int i = (blockIdx.x + (blockIdx.y * gridDim.x)) * blockDim.x * blockDim.y * chargePerThread +
(threadIdx.x + (threadIdx.y * blockDim.x)) * chargePerThread ;
int limit = i + chargePerThread - 1;
//printf("Th (%i,%i)(%i,%i) : %i to %i\n", blockIdx.x, blockIdx.y, threadIdx.x,threadIdx.y, i, limit);
while (i <= limit && i < numElements)
{
c_a[i] = CalcCUDA(c_a[i], c_b[i]);
i++;
}
//printf("Thread %i finished\n", blockIdx.x * blockDim.x + threadIdx.x);
}
void MyCudaMemInicialization()
{
// Memory allocation and copy
cudaMalloc(&c_a, numElements * sizeof(float));
cudaMalloc(&c_b, numElements * sizeof(float));
cudaMemcpy(c_a, a, numElements*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(c_b, b, numElements*sizeof(float), cudaMemcpyHostToDevice);
}
void CudaChargeControl()
{
/*********** SET BLOCKS AND THREADS ***********/
// Control too many threads
while (numElements < CUDA_THREADS)
{
if (CUDA_THREADS <= 32)
{
CUDA_THREADS--;
}
else
{
CUDA_THREADS -= 32;
}
}
//Adjust #BLOCKS
CUDA_BLOCKS = 1;
while (CUDA_THREADS * CUDA_BLOCKS < numElements && CUDA_BLOCKS < MAX_CUDA_BLOCKS)
{
CUDA_BLOCKS++;
}
// Adjust charge by thread
chargePerThread = numElements / (CUDA_THREADS * CUDA_BLOCKS);
if (chargePerThread == 0) chargePerThread = 1;
// Control manage all the elements incrementing charge per thread
while (numElements > (CUDA_THREADS * CUDA_BLOCKS * chargePerThread))
{
chargePerThread++;
}
}
void CudaReturnAndFree(cudaError returnCode)
{
/*********** RETURN INFO AND FREE ***********/
if (returnCode != cudaSuccess)
{
printf("CUDA ERROR! Error type: %s\n", cudaGetErrorString(returnCode));
}
cudaMemcpy(resultArray, c_a, numElements*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(a);
cudaFree(b);
}
bool CudaExecution()
{
bool result = true;
MyCudaMemInicialization();
CudaChargeControl();
/*********** WORKING ***********/
printf("CUDA launch with %i block/s and %i threads per block and a charge of %i per thread\n",
CUDA_BLOCKS, CUDA_THREADS , chargePerThread);
printf("%i elements not managed in CUDA execution\n",
numElements - ( CUDA_THREADS * CUDA_BLOCKS * chargePerThread ) );
clock_t tExe = clock();
DoCudaWork <<< CUDA_BLOCKS, CUDA_THREADS >>>(chargePerThread, c_a, c_b, numElements);
cudaError returnCode = cudaDeviceSynchronize();
printf("CUDA time (without memory alloc and copy)= %f seconds\n", ((double)(clock() - tExe)) / CLOCKS_PER_SEC);
CudaReturnAndFree(returnCode);
return result;
}
|
13,250 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <cuda_runtime.h>
// simple kernel function that adds two vectors
__global__ void vect_add(float *a, float *b, int N)
{
int idx = threadIdx.x;
if (idx<N) a[idx] = a[idx] + b[idx];
}
// function called from main fortran program
extern "C" void kernel_wrapper_(float *a, float *b, int *Np)
{
float *a_d, *b_d; // declare GPU vector copies
int blocks = 1; // uses 1 block of
int N = *Np; // N threads on GPU
// Allocate memory on GPU
cudaMalloc( (void **)&a_d, sizeof(float) * N );
cudaMalloc( (void **)&b_d, sizeof(float) * N );
// copy vectors from CPU to GPU
cudaMemcpy( a_d, a, sizeof(float) * N, cudaMemcpyHostToDevice );
cudaMemcpy( b_d, b, sizeof(float) * N, cudaMemcpyHostToDevice );
// call function on GPU
vect_add<<< blocks, N >>>( a_d, b_d, N);
// copy vectors back from GPU to CPU
cudaMemcpy( a, a_d, sizeof(float) * N, cudaMemcpyDeviceToHost );
cudaMemcpy( b, b_d, sizeof(float) * N, cudaMemcpyDeviceToHost );
// free GPU memory
cudaFree(a_d);
cudaFree(b_d);
return;
}
|
13,251 | #include <iostream>
#include <cstdlib>
#include <vector>
#define N 4
#define n 6
#define blocksize 16
#define PI 3.141592654
#define seed 7
//Definition of Functions
void print(double * M,int cols,int rows);
__global__ void matrixMul(double * a,double * b, double * C, int rows,int cols, int cols2);
double matrixSum_comp(double * M,int rows,int cols);
double normal_rand(void);
int main(int argc, char *argv[])
{
srand(atoi(argv[1]));// Seed initilized
//Definition of Variables
double *X,*Y,*Z; // Matrix, Transpose and product
int size = N * n * sizeof (double); // Number of bytes of an N x n matrix
int size2 = N * 1 * sizeof (double); // Number of bytes of an N x n matrix
int size3 = n * 1 * sizeof (double); // Number of bytes of an N x n matrix
cudaMallocManaged (&X, size);
cudaMallocManaged (&Y, size2);
cudaMallocManaged (&Z, size3);
//Initialization of the arrays
for( int row = 0; row < N; ++row )
{
for( int col = 0; col < n; ++col )
{
X[row*n + col] = normal_rand(); //Gaussiand distributed (Function taken from the sshpc github)
Z[col]=0.;
}
Y[row]=normal_rand();
}
//Arrays filled of zeros
dim3 threads_per_block (1, 1, 1);
dim3 number_of_blocks ((n / threads_per_block.x) + 1, (n / threads_per_block.y) + 1, 1);
print(X,N,n);
print(Y,1,N);
print(Z,1,n);
matrixMul <<< number_of_blocks, threads_per_block >>> (X,Y,Z,1,n,N);
cudaDeviceSynchronize();
std::cout<<"------------"<<std::endl;
print(Z,1,n);
cudaFree(X);
cudaFree(Y);
cudaFree(Z);
return 0;
}
void print(double * M,int cols,int rows){
for( int row = 0; row < rows; ++row ){
for( int col = 0; col < cols; ++col )
{
std::cout<<M[col + row*cols]<<'\t';
}
std::cout<<"\n";
}
}
double matrixSum_comp(double * M,int rows,int cols){
double sum=0;
for(int row=0;row<rows;row++){
for(int col=0;col<cols;col++){
sum+=M[row+col*rows];
}
}
return sum;
}
__global__ void matrixMul(double * a,double * b, double * C, int cols,int rows,int cols2)
{
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < rows && col < cols){
for (int k = 0; k < cols2; k++){
C[row*cols+col]+=b[k*cols+col]*a[row*cols2+k];
}
}
}
// Random number generator as per Abramowitz & Stegun
// Source taken from:
// http://c-faq.com/lib/gaussian.html
double normal_rand(void){
static double U, V;
static int phase = 0;
double Z;
if(phase == 0) {
U = (rand() + 1.) / (RAND_MAX + 2.);
V = rand() / (RAND_MAX + 1.);
Z = sqrt(-2 * log(U)) * sin(2 * PI * V);
} else
Z = sqrt(-2 * log(U)) * cos(2 * PI * V);
phase = 1 - phase;
return Z;
}
|
13,252 | /*
Monte Roybal
CS_577 Parallel and Distributed Programming
5-10-2018
Dr. Gil Gallegos
N x M Matrix Solution Kernels Final
*/
/*Link Section*/
#include <stdio.h>
#include <stdlib.h>
/*Global Constant's Declarations and Definitions*/
#define N 5
#define M 4
/*Matrix Vector Multiply Kernel*/
__global__ void DeviceMatrixVectorMult(double *c,double *a,double *b,int n)//Matrix x Vector Multiplication Kernel
{
int j;
double c_sum = 0.0;
int idx = threadIdx.x;
for (j=0;j<n;j++)
{
c_sum += a[idx * n + j] * b[j];
}
c[idx] = c_sum;
}
/*Matrix Matrix Multiply Kernel*/
__global__ void DeviceMatrixMatrixMult(double *c,double *a,double *b,int n,int m)//Matrix x Vector Multiplication Kernel
{
int j,k;
double sum_m = 0.0;
int idx = threadIdx.x;
for (j=0;j<m;j++)
{
for (k=0;k<n;k++)
{
sum_m += a[idx * n + k] * b[k * m + j];
}
c[idx * m + j] = sum_m;
sum_m = 0.0;
}
}
/*Open File Function*/
void open_file(FILE **ptr, const char file[48],const char mode [8])
{
*ptr = fopen(file, mode);
if (file == NULL)
{
printf("No file found");
}
}
/*Two-Dimensional Flat Memory Allocation Function*/
void allocate_mem_2d(double **arr, int n,int m)
{
*arr = (double*)malloc(n * m * sizeof(double));
}
/*One-Dimensional Memory Allocation Function*/
void allocate_mem_1d(double **arr,int m)
{
*arr = (double*)malloc(m * sizeof(double));
}
/*Print NxM Matrix Function*/
void print_matrices(double *matr,const char matr_name[50],int rows,int cols,int X)
{
int i,j;
for (i=0;i<rows;i++)
{
for (j=0;j<cols;j++)
{
printf("%s[%d][%d] = %f ",matr_name,i,j,matr[i *X + j]);
}
printf("\n");
}
printf("\n");
}
/*Print Vector Function*/
void print_vectors(double *vec,const char vec_name[50],int n)
{
int i;
for (i=0;i<n;i++)
{
printf("%s[%d] = %f\n",vec_name,i,vec[i]);
}
printf("\n");
}
/*Matrix Transpose Function*/
void matrix_transpose(double *matr_trans,double *matr,int n,int m)
{
int i,j;
/*Transpose A Matrix*/
for (i=0;i<m;i++)
{
for (j=0;j<n;j++)
{
matr_trans[i * n + j] = matr[j * m + i];
}
}
}
/*Main Function*/
int main(void)
{
/*Local Variable's Declarations and Definitions*/
int i,j;
double temp_vec;
double *B_vec,*A_matrix,*A_trans,*A_star,*B_star;
double *dev_a_t,*dev_a_s,*dev_b_s,*dev_a_m,*dev_b_v;
FILE *A_ptr,*B_ptr,*A_star_ptr,*B_star_ptr;
/*Open Matrix and Vector Files for Reading*/
open_file(&A_ptr,"A_matrix_final.dat","r");
open_file(&B_ptr,"b_vector_final.dat","r");
/*Open Matrix and Vector Result Files for Writing*/
open_file(&A_star_ptr,"A_star_final.dat","w");
open_file(&B_star_ptr,"b_star_final.dat","w");
/*Host Memory Allocation*/
allocate_mem_2d(&A_matrix,M,N);
allocate_mem_2d(&A_trans,N,M);
allocate_mem_2d(&A_star,M,M);
allocate_mem_1d(&B_vec,N);
allocate_mem_1d(&B_star,M);
/*CUDA Memory Allocation*/
cudaMalloc((void **) &dev_a_t, M*N*sizeof(double));
cudaMalloc((void **) &dev_a_m, N*M*sizeof(double));
cudaMalloc((void **) &dev_a_s, M*M*sizeof(double));
cudaMalloc((void **) &dev_b_v, N*sizeof(double));
cudaMalloc((void **) &dev_b_s, M*sizeof(double));
/*Scan Data From File to B Vector*/
for (i=0;i<N;i++)
{
fscanf(B_ptr,"%lf",&temp_vec);
B_vec[i] = temp_vec;
}
print_vectors(B_vec,"B_vec",N);
/*Scan Data From File to A Matrix*/
for (i=0;i<N;i++)
{
for (j=0;j<M;j++)
{
fscanf(A_ptr,"%lf",&temp_vec);
A_matrix[i* M +j] = temp_vec;
}
}
print_matrices(A_matrix,"A_matrix",N,M,M);
/*Transpose A Matrix*/
matrix_transpose(A_trans,A_matrix,N,M);
print_matrices(A_trans,"A_trans",M,N,N);
/*CUDA Memory Copy from Host fo Device*/
cudaMemcpy(dev_a_t, A_trans, M*N*sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_a_m, A_matrix, N*M*sizeof(double), cudaMemcpyHostToDevice);
/*Matrix Matrix Multiply Kernel Call with 1 Block and N Threads*/
DeviceMatrixMatrixMult<<<1,N>>>(dev_a_s,dev_a_t,dev_a_m,N,M);
/*CUDA Memory Copy A Star from Device to Host*/
cudaMemcpy(A_star,dev_a_s,M*M*sizeof(double), cudaMemcpyDeviceToHost);
/*Print Kernel Calculated A Star to File*/
printf("\nFrom Device:\n");
print_matrices(A_star,"A_star",M,M,M);
for (i=0;i<M;i++)
{
for (j=0;j<M;j++)
{
fprintf(A_star_ptr,"%f ",A_star[i * M + j]);
}
fprintf(A_star_ptr,"\n");
}
/*CUDA Memory Copy from Host fo Device*/
cudaMemcpy(dev_b_v, B_vec, N*sizeof(double), cudaMemcpyHostToDevice);
/*Matrix Vector Multiply Kernel Call with 1 Block and N Threads*/
DeviceMatrixVectorMult<<<1,N>>>(dev_b_s,dev_a_t,dev_b_v,N);
/*CUDA Memory Copy B Star from Device to Host*/
cudaMemcpy(B_star,dev_b_s,M*sizeof(double), cudaMemcpyDeviceToHost);
/*Print Kernel Calculated B Star to File*/
printf("\nFrom Device:\n");
print_vectors(B_star,"B_star",M);
for (i=0;i<M;i++)
{
fprintf(B_star_ptr, "%f ",B_star[i]);
}
/*Free Host Allocated Memory*/
free(A_matrix);
free(A_trans);
free(B_vec);
free(A_star);
free(B_star);
/*Free CUDA Allocated Memory*/
cudaFree(dev_a_t);
cudaFree(dev_a_m);
cudaFree(dev_a_s);
cudaFree(dev_b_v);
cudaFree(dev_b_s);
/*Close File Pointers*/
fclose(A_ptr);
fclose(B_ptr);
fclose(A_star_ptr);
fclose(B_star_ptr);
/*Return 0 for Successful Execution*/
return 0;
} |
13,253 | #include "includes.h"
__global__ void times(float *input, unsigned int input_size, float *output, unsigned int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index < n * input_size)
output[index] = input[index % input_size];
} |
13,254 | #include "includes.h"
__global__ void gpu_stencil37_hack2_cp_cols(double * dst, double * shared_rows, double *shared_cols,double *shared_slices,int d_xpitch, int d_ypitch, int d_zpitch,int s_xpitch,int s_ypitch, int s_zpitch, int n_rows, int n_cols,int n_slices,int tile_x,int tile_y, int tile_z){
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0)&& threadIdx.x==0 && threadIdx.z==0){
printf("copy cols: begin\n");
printf("copy cols: n_cols=%d,n_rows=%d,n_slices=%d\n",n_cols,n_rows,n_slices);
printf("copy cols: gridDim.x=%d,gridDim.y=%d,gridDim.z=%d\n",gridDim.x,gridDim.y,gridDim.z);
printf("copy cols: blockDim.x=%d,blockDim.y=%d,blockDim.z=%d\n",blockDim.x,blockDim.y,blockDim.z);
printf("copy cols: tile_x=%d,tile_y=%d,tile_z=%d\n",tile_x,tile_y,tile_z);
}
#endif
int base_global_slice = tile_z * blockIdx.z;
int base_global_row = blockDim.y * blockIdx.y;
int base_global_col = tile_x * blockIdx.x;
//int dst_area = n_rows*n_cols;
//int shared_area = gridDim.x*n_rows*2;
int dst_area = d_ypitch*d_xpitch;
int shared_area = gridDim.x*s_ypitch*2;
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0&&threadIdx.x==0&&threadIdx.z==0)){
printf("copy cols: shared_area=%d\n",shared_area);
}
#endif
//int base_global_idx = base_global_slice*dst_area + base_global_row * n_cols + base_global_col;
int base_global_idx = base_global_slice*dst_area + base_global_row * d_xpitch + base_global_col;
int nextCol= base_global_col+1;
bool legalNextCol = (nextCol<n_cols)?1:0;
int ty = threadIdx.y;
bool legalCurRow = (base_global_row + ty)<n_rows;
for(int tz=0;tz<tile_z;++tz){
bool legalCurSlice = (base_global_slice + tz)<n_slices;
//int idx = (base_global_slice+tz)*shared_area + blockIdx.x*2*n_rows+blockIdx.y*blockDim.y+ty;
//int idx_dst =base_global_idx + tz*dst_area + ty*n_cols ;
int idx = (base_global_slice+tz)*shared_area + blockIdx.x*2*s_ypitch+blockIdx.y*blockDim.y+ty;
int idx_dst =base_global_idx + tz*dst_area + ty*d_xpitch ;
if(legalCurRow && legalCurSlice){
shared_cols[idx] = dst[idx_dst];
}
if(legalCurRow && legalCurSlice && legalNextCol){
//shared_cols[idx + n_rows] = dst[idx_dst + 1];
shared_cols[idx + s_ypitch] = dst[idx_dst + 1];
}
__syncthreads();
}
__syncthreads();
#ifdef CUDA_CUDA_DEBUG
if(blockIdx.z ==0 && blockIdx.y==0 && blockIdx.x==0 && (threadIdx.x==0)){
// printf("shared_cols: addr:%d, val = %f\n", threadIdx.y,shared_cols[threadIdx.y]);
}
if(blockIdx.y==0 && blockIdx.x==0 &&blockIdx.z==0 ){
if((threadIdx.x==0 || threadIdx.x==1 || threadIdx.x==2 ) && threadIdx.y==0){
int d_addr0 = base_global_idx+0*dst_area+threadIdx.x;
int d_addr1 = base_global_idx+1*dst_area+threadIdx.x;
int addr = base_global_slice+blockIdx.x*blockDim.x + threadIdx.x;
int addr1 = shared_area*(base_global_slice+1)+blockIdx.x*blockDim.x+ threadIdx.x;
int addr2 = shared_area*(base_global_slice+2)+blockIdx.x*blockDim.x+ threadIdx.x;
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,d_addr0,dst[d_addr0]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,dst : z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,d_addr1,dst[d_addr1]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,0,addr,shared_cols[addr]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,1,addr1,shared_cols[addr1]);
printf("copy cols: blockIdx.x=%d, blockIdx.y=%d,blockIdx.z=%d,shared_cols: z:%d, addr:%d, val = %f\n",blockIdx.x, blockIdx.y,blockIdx.z,2,addr2,shared_cols[addr2]);
}
}
#endif
#ifdef CUDA_CUDA_DEBUG
if((blockIdx.x==0)&&(blockIdx.y==0)&&(blockIdx.z==0)&&(threadIdx.y==0 && threadIdx.x==0 && threadIdx.z==0)){
printf("copy cols end!\n");
}
#endif
} |
13,255 | #include "includes.h"
__global__ void reduce_normal_eqs_64_GPU(float *d_C_reduced, float *d_C, int gridDim_x_normal_equations) {
int tid = threadIdx.x;
int bx = blockIdx.x;
// put data in shared memory
int ind = blockIdx.y * gridDim.x * gridDim_x_normal_equations * 64 +
bx * gridDim_x_normal_equations * 64 + tid;
__shared__ float DATA[64];
// load and sum the first 20 elements
float tmp = 0.0f;
for (int i = 0; i < gridDim_x_normal_equations; i++)
tmp += d_C[ind + i * 64];
DATA[tid] = tmp;
__syncthreads(); // ensure reading stage has finished
// reduction
if (tid < 32) { // warp-reduce
DATA[tid] += DATA[tid + 32];
__syncthreads();
DATA[tid] += DATA[tid + 16];
__syncthreads();
DATA[tid] += DATA[tid + 8];
__syncthreads();
DATA[tid] += DATA[tid + 4];
__syncthreads();
DATA[tid] += DATA[tid + 2];
__syncthreads();
DATA[tid] += DATA[tid + 1];
__syncthreads();
}
// write results
if (tid == 0)
d_C_reduced[blockIdx.y * gridDim.x + bx] = DATA[0];
} |
13,256 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34) {
if (comp >= (-1.7167E36f * var_1)) {
comp += (var_3 * atan2f((-1.8125E-19f - acosf(-1.5877E-44f)), var_4 + var_5));
float tmp_1 = +0.0f;
float tmp_2 = +1.9347E-37f;
comp += tmp_2 - tmp_1 + (var_6 / (var_7 / atan2f(expf((var_8 - var_9 + +1.9231E-42f - (var_10 - -1.4863E35f))), (+1.6161E-1f - var_11 * floorf(-1.4446E-11f / +0.0f * var_12 / var_13 / var_14)))));
for (int i=0; i < var_2; ++i) {
comp += var_15 + ldexpf(var_16 / (var_17 - (+0.0f * ldexpf(cosf(var_18 / var_19), 2))), 2);
}
if (comp >= var_20 * var_21 / (var_22 - -1.4621E-44f + var_23)) {
comp += var_24 - sqrtf(log10f(atanf(-1.0365E-44f)));
comp += fabsf(+0.0f + (var_25 - fabsf(var_26 * (var_27 * -1.0839E-41f))));
}
if (comp >= (+1.6967E-41f + var_28)) {
comp = var_29 + var_30 / +0.0f;
comp += +0.0f + var_31;
comp += var_32 - +1.3521E-9f;
comp = (var_33 - (+0.0f / -0.0f - var_34));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35);
cudaDeviceSynchronize();
return 0;
}
|
13,257 | #include "node.cuh"
#include "helper.cuh"
template class node<int>;
template class node<double>;
node<double> NDinit;
void *temp_var;
// __device__ int temp_int;
// __device__ double temp_double_1;
// __device__ double temp_double_2;
template<typename T> __device__ __host__ T maxx(T a, T b){
return a > b ? a : b;
}
template<typename T> __device__ __host__ T minn(T a, T b){
return a < b ? a : b;
}
template __device__ __host__ int maxx(int a, int b);
template __device__ __host__ double maxx(double a, double b);
template __device__ __host__ int minn(int a, int b);
template __device__ __host__ double minn(double a, double b);
template<typename T> bool __device__ __host__ node<T>::operator < (const node<T> &B){
if(data < B.data) return true;
else if(data == B.data && pos < B.pos) return true;
else return false;
};
// template __device__ __host__ bool node<int>::operator < (const node<int> &B);
// template __device__ __host__ bool node<double>::operator < (const node<double> &B);
template<typename T> bool __device__ __host__ node<T>::operator > (const node<T> &B){
if(data > B.data) return true;
else if(data == B.data && pos < B.pos) return true;
else return false;
};
// template __device__ __host__ bool node<int>::operator > (const node<int> &B);
// template __device__ __host__ bool node<double>::operator > (const node<double> &B);
template<typename T> node<T> __device__ __host__ node<T>::operator + (const node<T> &B){
node<T> res;
res.pos = minn(pos, B.pos);
if(data == INT_MAX || B.data == INT_MAX)
res.data = INT_MAX;
else
res.data = data + B.data;
return res;
};
// template __device__ __host__ node<int> node<int>::operator + (const node<int> &B);
// template __device__ __host__ node<double> node<double>::operator + (const node<double> &B);
template<typename T> node<T> __device__ __host__ node<T>::operator - (const node<T> &B){
node<T> res;
res.pos = minn(pos, B.pos);
res.data = data - B.data;
return res;
};
// template __device__ __host__ node<int> node<int>::operator - (const node<int> &B);
// template __device__ __host__ node<double> node<double>::operator - (const node<double> &B);
/*
template<typename T> node<T> & __device__ __host__ node<T>::operator = (const node<T> &B){
pos = B.pos;
data = B.data;
return *this;
};
// template __device__ __host__ node<int>& node<int>::operator = (const node<int> &B);
// template __device__ __host__ node<double>& node<double>::operator = (const node<double> &B);
*/
void __device__ __host__ copy(node<int> *A, const node<int> B)
{
A->data = B.data;
A->pos = B.pos;
}
void __device__ __host__ copy(node<double> *A, const node<double> B)
{
A->data = B.data;
A->pos = B.pos;
}
void __device__ __host__ copy(int *A, const int B)
{
*A=B;
}
void __device__ __host__ copy(double *A, const double B)
{
*A=B;
}
int __device__ __host__ get_data(node<int> A)
{
return A.data;
}
double __device__ __host__ get_data(node<double> A)
{
return A.data;
}
int __device__ __host__ get_data(int A)
{
return A;
}
double __device__ __host__ get_data(double A)
{
return A;
}
int __device__ __host__ get_pos(node<int> A)
{
return A.pos;
}
int __device__ __host__ get_pos(node<double> A)
{
return A.pos;
}
int __device__ __host__ get_pos(int A)
{
return 0;
}
int __device__ __host__ get_pos(double A)
{
return 0;
}
template<typename T> __host__ T* get_temp(T a){
T* temp = (T*)temp_var;
vectorInit<<<1,1>>>(temp, 1, a);
return temp;
}
template __host__ int* get_temp(int a);
template __host__ double* get_temp(double a);
template __host__ node<int>* get_temp(node<int> a);
template __host__ node<double>* get_temp(node<double> a);
template<typename T> __host__ T delete_temp(T* a){
T ans;
checkCudaErrors(cudaMemcpy(&ans, a, sizeof(T), cudaMemcpyDeviceToHost));
return ans;
}
template __host__ int delete_temp(int* a);
template __host__ double delete_temp(double* a);
template __host__ node<int> delete_temp(node<int>* a);
template __host__ node<double> delete_temp(node<double>* a);
template<typename T> bool __device__ __host__ operator < (const node<T> &A, const node<T> &B){
if(A.data < B.data) return true;
else if(A.data == B.data && A.pos < B.pos) return true;
else return false;
};
template __device__ __host__ bool operator < (const node<int> &A, const node<int> &B);
template __device__ __host__ bool operator < (const node<double> &A, const node<double> &B);
template __device__ __host__ node<int> maxx(node<int> a, node<int> b);
template __device__ __host__ node<double> maxx(node<double> a, node<double> b);
template __device__ __host__ node<int> minn(node<int> a, node<int> b);
template __device__ __host__ node<double> minn(node<double> a, node<double> b);
template<typename T> __global__ void vectorToNode(T* S, node<T>* V, int n, int p)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index < n) {
V[index].data = S[index];
V[index].pos = index / p;
}
}
// 将node数组提取pos
template<typename T> __global__ void nodeToPos(node<T>* S, int* V, int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index < n) {
if(S[index].data == 0) V[index] = -1;
else V[index] = S[index].pos;
}
}
// 将node数组提起data
template<typename T> __global__ void nodeToData(node<T>* S, T* V, int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index < n)
V[index] = S[index].data;
}
template __global__ void vectorToNode<int>(int* S, node<int>* V, int n, int p);
template __global__ void vectorToNode<double>(double* S, node<double>* V, int n, int p);
template __global__ void nodeToPos<int>(node<int>* S,int* V, int n);
template __global__ void nodeToPos<double>(node<double>* S,int* V, int n);
template __global__ void nodeToData<int>(node<int>* S,int* V, int n);
template __global__ void nodeToData<double>(node<double>* S,double* V, int n);
__device__ __host__ int NumBlocks(int n){return (n+NUM_THREADS-1)/NUM_THREADS;};
__device__ __host__ int ID2D(int i, int j, int n){return (i*n+j);};
// 数组初始化为s
template<typename T> __global__ void vectorInit(T* V, int n, T s)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index < n)
V[index] = s;
}
// 数组初始化为序号
template<typename T> __global__ void vectorInitIndex(T* V, int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index < n)
V[index] = (T)index;
}
// 数组拷贝
template<typename T> __global__ void vectorCopy(T* S, T* V, int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index < n)
copy(V + index, S[index]);
}
// 数组相加
template<typename T> __global__ void vectorAdd(T* S1, T* S2, T* V, int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index < n)
copy(&V[index], S1[index] + S2[index]);
}
// 数组相减
template<typename T> __global__ void vectorSub(T* S1, T* S2, T* V, int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index < n)
copy(&V[index], S1[index] - S2[index]);
}
// 数组比较,若S均<=T,则res=0,否则res非零
template<typename T> __global__ void vectorCmpLess(T* S, T* V, int n, int* res)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
int pd = 0;
if(index < n)
pd = (S[index] - V[index] > 0) ? 1 : 0;
*res = __syncthreads_or(pd);
}
template<typename T> __global__ void debug_vector(int id, T* V, int m, int n)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
int index = blockDim.x * bid + tid;
if(index == 0){
printf("【DEBUG】 %d\n", id);
for(int i = 0; i < m; ++i){
printf("\t");
for(int j = 0; j < n; ++j)
printf("(%lf,%d)\t", (double)get_data(V[i*n+j]), get_pos(V[i*n+j]));
printf("\n");
}
}
}
template __global__ void debug_vector<int>(int id, int* V, int m, int n);
template __global__ void debug_vector<double>( int id, double* V, int m, int n);
template __global__ void debug_vector<node<int> >(int id, node<int>* V, int m, int n);
template __global__ void debug_vector<node<double> >( int id, node<double>* V, int m, int n);
template __global__ void vectorInit<int>(int* V, int n, int s);
template __global__ void vectorInit<double>(double* V, int n, double s);
template __global__ void vectorInitIndex<int>(int* V, int n);
template __global__ void vectorInitIndex<double>(double* V, int n);
template __global__ void vectorCopy<int>(int* S,int* V, int n);
template __global__ void vectorCopy<double>(double* S,double* V, int n);
template __global__ void vectorCopy<node<int> >(node<int>* S,node<int>* V, int n);
template __global__ void vectorCopy<node<double> >(node<double>* S,node<double>* V, int n);
template __global__ void vectorAdd<int>(int* S1, int* S2, int* V, int n);
template __global__ void vectorAdd<double>(double* S1, double* S2, double* V, int n);
template __global__ void vectorAdd<node<int> >(node<int>* S1, node<int>* S2, node<int>* V, int n);
template __global__ void vectorAdd<node<double> >(node<double>* S1, node<double>* S2, node<double>* V, int n);
template __global__ void vectorSub<int>(int* S1, int* S2, int* V, int n);
template __global__ void vectorSub<double>(double* S1, double* S2, double* V, int n);
template __global__ void vectorSub<node<int> >(node<int>* S1, node<int>* S2, node<int>* V, int n);
template __global__ void vectorSub<node<double> >(node<double>* S1, node<double>* S2, node<double>* V, int n);
template __global__ void vectorCmpLess<int>(int* S, int* V, int n, int* res);
template __global__ void vectorCmpLess<double>(double* S, double* V, int n, int* res);
|
13,258 | #include "includes.h"
__global__ void ecuaciones(int a, int b, int c, float *sol){
int index = threadIdx.x;
float d = 0;
float x=0, y=0;
d = b*b-4*a*c;
if (d > 0) {
x = (-b+sqrt(d))/(2*a);
y = (-b-sqrt(d))/(2*a);
sol[index] = x;
sol[index+1]=y;
}
else if (d == 0) {
x = (-b)/(2*a);
sol[index] = x;
}
} |
13,259 | ///usr/local/cuda/bin/nvcc
template <typename T>
__global__ void addKernel(T *gpu_a, T *gpu_b, T *gpu_c, size_t s) {
size_t index = blockIdx.x * 1024 + threadIdx.y * 32 + threadIdx.x;
if(index < s)
gpu_c[index] = gpu_a[index] + gpu_b[index];
}
template <typename T>
__global__ void dotKernel(T *gpu_a, T *gpu_b, T *gpu_c, size_t same_for_both, size_t length, size_t width) {
size_t index = blockIdx.x * 1024 + threadIdx.y * 32 + threadIdx.x;
if(index >= length * width)
return ;
int a = (int) (index / width);
size_t index_a = same_for_both * a;//on doit parcourir dans la longueur donc on fait juste +1
size_t max = index_a + same_for_both;
size_t index_b = (index % width);//on doit parcourir la hauteur donc on fait + width
for(; index_a < max; ++index_a) {
gpu_c[index] += gpu_a[index_a] * gpu_b[index_b];
index_b += width;
}
}
template <typename T>
__global__ void transpoKernel(T *gpu_a, T *res, size_t length, size_t width) {
size_t index = blockIdx.x * 1024 + threadIdx.y * 32 + threadIdx.x;
size_t i = index / width;
size_t j = index % length;
res[(i * length) + j] = gpu_a[(j * width) + i] ;
}
namespace GPU {
template <typename T>
void add(T *a, T *b, T *res, size_t size) {
T *gpu_a;
T *gpu_b;
T *gpu_c;
cudaMalloc(&gpu_a, size * sizeof(T));
cudaMalloc(&gpu_b, size * sizeof(T));
cudaMalloc(&gpu_c, size * sizeof(T));
cudaMemcpy(gpu_a, a, size * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, size * sizeof(T), cudaMemcpyHostToDevice);
size_t nbBlocks = size / 1024 + (size % 1024 ? 1 : 0);
dim3 grid(nbBlocks), block(32, 32);
addKernel<<<grid, block>>>(gpu_a, gpu_b, gpu_c, size);
cudaDeviceSynchronize();
cudaMemcpy(res, gpu_c, size * sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
}
template <typename T>
void dot(T *a, T *b, T *res, size_t same_for_both, size_t length, size_t width) {
T *gpu_a;
T *gpu_b;
T *gpu_c;
cudaMalloc(&gpu_a, same_for_both * length * sizeof(T));
cudaMalloc(&gpu_b, same_for_both * width * sizeof(T));
cudaMalloc(&gpu_c, length * width * sizeof(T));
cudaMemcpy(gpu_a, a, same_for_both * length * sizeof(T), cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, same_for_both * width * sizeof(T), cudaMemcpyHostToDevice);
size_t nbBlocks = length * width / 1024 + (length * width % 1024 ? 1 : 0);
dim3 grid(nbBlocks), block(32, 32);
dotKernel<<<grid, block>>>(gpu_a, gpu_b, gpu_c, same_for_both, length, width);
cudaDeviceSynchronize();
cudaMemcpy(res, gpu_c, length * width * sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
}
template <typename T>
void transpose(T *a, size_t length, size_t width) {
T *gpu_a;
T *res;
cudaMalloc(&gpu_a, length * width * sizeof(T));
cudaMalloc(&res, length * width * sizeof(T));
cudaMemcpy(gpu_a, a, length * width * sizeof(T), cudaMemcpyHostToDevice);
size_t nbBlocks = length * width / 1024 + (length * width % 1024 ? 1 : 0);
dim3 grid(nbBlocks), block(32, 32);
transpoKernel<<<grid, block>>>(gpu_a, res, length, width);
cudaMemcpy(a, res, length * width * sizeof(T), cudaMemcpyDeviceToHost);
cudaFree(gpu_a);
cudaFree(res);
}
};
template void GPU::add(int *, int *, int *, size_t);
template void GPU::add(double *, double *, double *, size_t);
template void GPU::add(short *, short *, short *, size_t);
template void GPU::add(char *, char *, char *, size_t);
template void GPU::add(long *, long *, long *, size_t);
template void GPU::add(float *, float *, float *, size_t);
template void GPU::dot(int *, int *, int *, size_t, size_t, size_t);
template void GPU::dot(float *, float *, float *, size_t, size_t, size_t);
template void GPU::dot(char *, char *, char *, size_t, size_t, size_t);
template void GPU::dot(short *, short *, short *, size_t, size_t, size_t);
template void GPU::dot(double *, double *, double *, size_t, size_t, size_t);
template void GPU::dot(long *, long *, long *, size_t, size_t, size_t);
template void GPU::transpose(int *, size_t, size_t);
|
13,260 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void mat_add(int*a , int *b,int *c,int n)
{
int i,index;
int row_id=threadIdx.x;
for(i=0;i<n;i++)
{
index=row_id*n+i;
c[index]=a[index]+b[index];
}
}
int main(int argc, char const *argv[])
{
int *a,*b,*c,m,n,i,j;
int *d_a, *d_b,*d_c;
printf("enter the value of m \n");
scanf("%d",&m);
printf("enter the value of n\n");
scanf("%d",&n);
int size= sizeof(int)*m*n;
a=(int*)malloc(m*n*sizeof(int));
b=(int*)malloc(m*n*sizeof(int));
c=(int*)malloc(m*n*sizeof(int));
printf("enter the input1 matrix\n");
for(i=0;i<m*n;i++)
scanf("%d",&a[i]);
printf("enter the input2 matrix\n");
for(i=0;i<m*n;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_c,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
mat_add<<<1,m>>>(d_a,d_b,d_c,n);
cudaMemcpy(c,d_c,size,cudaMemcpyDeviceToHost);
printf("the result vector is :\n");
for(i=0;i<m;i++)
{
for(j=0;j<n;j++)
printf("%d\t",c[i*n+j] );
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
13,261 | // "Copyright 2019 <Fabio M. Graetz>"
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <iostream>
template<class T>
__device__ T scanHillisSteele(T *keys) {
unsigned int i = threadIdx.x;
unsigned int n = blockDim.x;
for (int offset = 1; offset < n; offset <<=1) {
T temp;
if (i >= offset) {
temp = keys[i - offset];
}
__syncthreads();
if (i >= offset) {
keys[i] = temp + keys[i];
}
__syncthreads();
}
return keys[i];
}
__global__ void partition_by_bit(unsigned int *keys, double *values, unsigned int bit) {
unsigned int i = threadIdx.x;
unsigned int size = blockDim.x;
unsigned int x_i = keys[i]; // key of integer at position i
double val = values[i];
unsigned int p_i = (x_i >> bit) & 1; // least significant bit
// replace keys with least significant bits
keys[i] = p_i;
__syncthreads();
// inclusive sum scan to calc the number of 1's up to and including keys[i]
unsigned int True_before = scanHillisSteele(keys);
unsigned int True_total = keys[size-1]; // total number of True bits
unsigned int False_total = size - True_total; // total number of False bits
__syncthreads();
// now, x_i needs to be put in the right position (has to be stable sort)
if (p_i) { // bit is a 1
keys[False_total + True_before - 1] = x_i;
values[False_total + True_before - 1] = val;
} else { // bit is a 0
keys[i - True_before] = x_i;
values[i - True_before] = val;
}
__syncthreads();
}
void radix_sort(unsigned int *keys, double * values, const int ARRAY_SIZE) {
for (int bit = 0; bit < 32; ++bit) {
partition_by_bit<<<1, ARRAY_SIZE>>>(keys, values, bit);
}
}
int main() {
const int ARRAY_SIZE = 15;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(unsigned int);
// generate the input array on the host
unsigned int h_keys[ARRAY_SIZE]{
48, 30, 10, 72, 47, 23, 98, 89, 29, 35, 97, 91, 33, 28, 41};
double h_values[ARRAY_SIZE]{
48.8, 30.0, 10.0, 72.2, 47.7, 23.3, 98.9, 89.9, 29.9, 35.5, 97.7, 91.1, 33.3, 28.8, 41.1};
// declare GPU memory pointers
unsigned int * d_keys;
double * d_values;
// allocate GPU memory
cudaMalloc((void **) &d_keys, ARRAY_BYTES);
cudaMalloc((void **) &d_values, ARRAY_SIZE * sizeof(double));
// transfer the array to the GPU
cudaMemcpy(d_keys, h_keys, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaMemcpy(d_values, h_values, ARRAY_SIZE * sizeof(double), cudaMemcpyHostToDevice);
// launch the kernel
radix_sort(d_keys, d_values, ARRAY_SIZE);
cudaDeviceSynchronize();
// transfer the resulting array to the cpu
cudaMemcpy(h_keys, d_keys, ARRAY_BYTES, cudaMemcpyDeviceToHost);
cudaMemcpy(h_values, d_values, ARRAY_SIZE * sizeof(double), cudaMemcpyDeviceToHost);
std::cout << "keys:" << std::endl;
for (int i = 0; i < ARRAY_SIZE; ++i) {
std::cout << h_keys[i] << " " << std::flush;
}
std::cout << std::endl << "values:" << std::endl;
for (int i = 0; i < ARRAY_SIZE; ++i) {
std::cout << h_values[i] << " " << std::flush;
}
std::cout << std::endl;
// free GPU memory allocation
cudaFree(d_keys);
cudaFree(d_values);
return 0;
}
// http://www.compsci.hunter.cuny.edu/~sweiss/course_materials/csci360/lecture_notes/radix_sort_cuda.cc
|
13,262 | //
// Created by postaron on 04/04/2019.
//
#include "compute_functions.cuh"
#ifdef HAVE_CUB
#include <cub/cub.cuh>
#endif
__device__ void keepHeat(float *__restrict d_val,
float *__restrict d_val_new,
const size_t m,
const size_t n,
const heatPoint *__restrict srcs,
const size_t numHeat,
const uint32_t x,
const uint32_t y) {
for (size_t i = 0; i < numHeat; ++i) {
if (srcs[i].x == x && srcs[i].y == y) {
d_val[offset(srcs[i].x, srcs[i].y, m)] = 1.0f;
d_val_new[offset(srcs[i].x, srcs[i].y, m)] = 1.0f;
}
}
}
__global__ void simulationKernel(float *__restrict d_val_new,
float *__restrict d_val,
const size_t m,
const size_t n,
const float convergence,
const uint32_t nite,
const heatPoint *__restrict const d_srcsHeat,
const size_t numHeat) {
const std::uint32_t ix = blockIdx.x * blockDim.x + threadIdx.x,
iy = blockIdx.y * blockDim.y + threadIdx.y;
float error = 1.0f;
if (ix > 1 && ix < n && iy > 1 && iy < m) {
for (size_t i = 0; i < nite && error > convergence; ++i) {
d_val_new[offset(ix, iy, m)] = 0.25f *
(d_val[offset(ix, iy - 1, m)] +
d_val[offset(ix, iy + 1, m)] +
d_val[offset(ix - 1, iy, m)] +
d_val[offset(ix + 1, iy, m)]);
error = fmaxf(error, fabsf(d_val_new[offset(ix, iy, m)] - d_val[offset(ix, iy, m)]));
d_val[offset(ix, iy, m)] = d_val_new[offset(ix, iy, m)];
keepHeat(d_val, d_val_new, m, n, d_srcsHeat, numHeat, ix, iy);
}
}
}
|
13,263 | #include "includes.h"
__global__ void zero_dm_outliers_kernel_one(unsigned short *d_input, int nchans, int nsamp)
{
int t = blockIdx.x * blockDim.x + threadIdx.x;
int count = 0;
int iters = 0;
float stdev = 1000000.0f;
float mean = MEAN;
float mean_last = 0.0f;
float sum = 0.0f;
float sum_squares = 0.0f;
float cutoff = (CUT * stdev);
__shared__ float g_mean[1024];
__shared__ float g_stdev[1024];
while(abs(mean - mean_last) > ACC) {
sum = 0.0f;
sum_squares = 0.0f;
count = 0;
for(int c = 0; c < nchans; c++) {
float data=(float)d_input[t*nchans + c];
if(data < (mean + cutoff) && data > (mean - cutoff) ) {
sum+=data;
sum_squares+=(data*data);
count++;
}
}
mean_last = mean;
mean = (sum/(float)count);
sum_squares = ((sum_squares / count) - (mean * mean));
stdev = sqrt(sum_squares);
cutoff = (CUT * stdev);
iters++;
if(iters > ITER) break;
}
if(count == 0 || iters > ITER || mean == 0.0f || stdev == 0.0f) {
for(int c = 0; c < nchans; c++) {
d_input[t*nchans + c] = MEAN;
}
g_mean[threadIdx.x] = mean = MEAN;
g_stdev[threadIdx.x] = stdev = 0.0f;
} else {
g_mean[threadIdx.x] = mean;
g_stdev[threadIdx.x] = stdev;
}
__syncthreads();
float mean_of_mean = 0.0f;
float stdev_of_mean = 0.0f;
float m_cutoff = 0.0f;
sum_squares = 0.0f;
for(int i = 0; i<blockDim.x; i++) {
mean_of_mean += g_mean[i];
sum_squares += (g_mean[i]* g_mean[i]);
}
mean_of_mean /= blockDim.x;
sum_squares = ((sum_squares / blockDim.x) - (mean_of_mean * mean_of_mean));
stdev_of_mean = sqrt(sum_squares);
m_cutoff = (3.0*stdev_of_mean);
float mean_of_stdev = 0.0f;
float stdev_of_stdev = 0.0f;
float s_cutoff = 0.0f;
sum_squares = 0.0f;
for(int i = 0; i<blockDim.x; i++) {
mean_of_stdev += g_stdev[i];
sum_squares += (g_stdev[i]* g_stdev[i]);
}
mean_of_stdev /= blockDim.x;
sum_squares = ((sum_squares / blockDim.x) - (mean_of_stdev * mean_of_stdev));
stdev_of_stdev = sqrt(sum_squares);
s_cutoff = (3.0*stdev_of_stdev);
if((g_mean[threadIdx.x] - mean_of_mean) > m_cutoff || (g_mean[threadIdx.x] - mean_of_mean) < -m_cutoff) {
for(int c = 0; c < nchans; c++) {
d_input[t*nchans + c] = MEAN;
}
} else if((g_stdev[threadIdx.x] - mean_of_stdev) > s_cutoff || (g_stdev[threadIdx.x] - mean_of_stdev) < -s_cutoff) {
for(int c = 0; c < nchans; c++) {
d_input[t*nchans + c] = MEAN;
}
} else {
for(int c = 0; c < nchans; c++) {
if((d_input[t*nchans + c]-mean < R_CUT*stdev) && (d_input[t*nchans + c]-mean > - R_CUT*stdev)) {
d_input[t*nchans + c] = (unsigned short)((float)d_input[t*nchans + c]-(float)mean+MEAN);
} else {
d_input[t*nchans + c] = MEAN;
}
}
}
} |
13,264 | #include "includes.h"
#define BLOCK_SIZE 32
__global__ void derivativeError(float *output, float *actual, float *deriv_err)
{
__shared__ float sdata[1024];
//ideally block is 1024x1 and grid is ??? x units
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y;
sdata[threadIdx.x] = output[row*gridDim.y + col];
__syncthreads();
for(int s= blockDim.x / 2; s>0; s>>=1)
{
if(threadIdx.x < s)
sdata[threadIdx.x] += sdata[threadIdx.x+s];
__syncthreads();
}
if(threadIdx.x == 0) //only tid0 can write
{
/*deriv_err[blockIdx.x] = sdata[0]*/deriv_err[blockDim.y*blockIdx.x+col] = sdata[blockIdx.x];
}
} |
13,265 | #include "cuda.h"
/* Simple kernel:
* Given in[0 ... (nodeN-1)], copies the segment in[partX0 ... (partX0 + partNX -1)] to out[0 ... (partNX-1)]
* and helpfully wraps addresses circularly
* invoke with gridDim.x * blockDim.x >= partNX
*/
__global__ void cukern_FetchPartitionSubset1D(double *in, int nodeN, double *out, int partX0, int partNX)
{
// calculate output address
int addrOut = threadIdx.x + blockDim.x * blockIdx.x;
if(addrOut >= partNX) return;
// Affine map back to input address
int addrIn = addrOut + partX0;
if(addrIn < 0) addrIn += partNX;
out[addrOut] = in[addrIn];
}
// Needed with the gradient calculators in 2D because they leave the empty directions uninitialized
// Vomits the value f into array x, from x[0] to x[numel-1]
__global__ void writeScalarToVector(double *x, long numel, double f)
{
long a = threadIdx.x + blockDim.x*blockIdx.x;
for(; a < numel; a+= blockDim.x*gridDim.x) {
x[a] = f;
}
}
|
13,266 | /*
============================================================================
Name : add_vector_with_streams.cu
Author :
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <fstream>
#include <chrono>
#include <assert.h>
#define CUDA_API_PER_THREAD_DEFAULT_STREAM
#include <cuda.h>
#include <cuda_runtime.h>
#define SIZE 2<<23
#define NOVEC 32
inline cudaError_t checkCUDA(cudaError_t result){
if(result != cudaSuccess){
fprintf(stderr, "CUDA Runtime error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
__global__ void addVector(float* a, float* b, float* c, int N) {
int stride = blockDim.x * gridDim.x;
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += stride) {
c[i] = a[i] + b[i];
}
}
__global__
void init( float *a, int n)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i += stride)
{
a[i]=static_cast <float> (i);
}
}
struct Average{
double sum = 0;
int n = 0;
void operator ()(double x){
sum+=x;
n++;
}
double get(){
return n>0?sum/n:0;
}
};
double addWithStreams(long N){
std::chrono::system_clock::time_point start;
std::chrono::system_clock::time_point stop;
std::chrono::duration<double> elapsed_time;
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
//const long N = SIZE;
size_t big = N * 3*NOVEC* sizeof(float);
float *data;
cudaMallocManaged(&data, big);
cudaMemPrefetchAsync(data, big, deviceId);
cudaStream_t streams[NOVEC];
start = std::chrono::high_resolution_clock::now();
for(int i = 0; i < NOVEC; i++){
size_t threadsPerBlock = 256;
size_t numberOfBlocks = 32*2;
cudaStreamCreate(&streams[i]);
init<<<numberOfBlocks, threadsPerBlock, 0, streams[i]>>>(&data[i*N], N);
init<<<numberOfBlocks, threadsPerBlock, 0, streams[i]>>>(&data[i*N+N*NOVEC], N);
addVector<<<numberOfBlocks, threadsPerBlock, 0, streams[i]>>>(&data[i*N],&data[i*N+N*NOVEC],&data[i*N+N*2*NOVEC], N);
checkCUDA(cudaGetLastError());
}
checkCUDA(cudaDeviceSynchronize());
stop = std::chrono::high_resolution_clock::now();
elapsed_time = stop - start;
//std::cout << "with: "<< elapsed_time.count() << std::endl;
cudaDeviceReset();
return elapsed_time.count();
}
double addWithoutStreams(long N){
std::chrono::system_clock::time_point start;
std::chrono::system_clock::time_point stop;
std::chrono::duration<double> elapsed_time;
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
//const long N = SIZE;
size_t big = N * 3*NOVEC* sizeof(float);
float *data;
cudaMallocManaged(&data, big);
cudaMemPrefetchAsync(data, big, deviceId);
start = std::chrono::high_resolution_clock::now();
for(int i = 0; i < NOVEC; i++){
size_t threadsPerBlock = 256;
size_t numberOfBlocks = numberOfSMs * 32;
init<<<numberOfBlocks, threadsPerBlock>>>(&data[i*N], N);
init<<<numberOfBlocks, threadsPerBlock>>>(&data[i*N+N*NOVEC], N);
cudaDeviceSynchronize();
addVector<<<numberOfBlocks, threadsPerBlock>>>(&data[i*N],&data[i*N+N*NOVEC],&data[i*N+N*2*NOVEC], N);
}
cudaDeviceSynchronize();
stop = std::chrono::high_resolution_clock::now();
elapsed_time = stop - start;
//std::cout << "without: "<<elapsed_time.count() << std::endl;
cudaFree(data);
return elapsed_time.count();
}
double addOneLongVector(long N){
std::chrono::system_clock::time_point start;
std::chrono::system_clock::time_point stop;
std::chrono::duration<double> elapsed_time;
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
//const long N = SIZE;
size_t big = N * 3*NOVEC* sizeof(float);
float *data;
cudaMallocManaged(&data, big);
cudaMemPrefetchAsync(data, big, deviceId);
start = std::chrono::high_resolution_clock::now();
size_t threadsPerBlock = 256;
size_t numberOfBlocks = numberOfSMs * 32;
init<<<numberOfBlocks, threadsPerBlock>>>(data, N*NOVEC);
init<<<numberOfBlocks, threadsPerBlock>>>(&data[N*NOVEC], N*NOVEC);
cudaDeviceSynchronize();
addVector<<<numberOfBlocks, threadsPerBlock>>>(data,&data[N*NOVEC],&data[N*2*NOVEC], N*NOVEC);
cudaDeviceSynchronize();
stop = std::chrono::high_resolution_clock::now();
elapsed_time = stop - start;
//std::cout << "one long: "<<elapsed_time.count() << std::endl;
cudaFree(data);
return elapsed_time.count();
}
int main()
{
std::cout << "Work"<< std::endl;
std::ofstream WithFile("with.txt"), WithoutFile("without.txt"), OneFile("one.txt");
for(long N = 1000000;N <= 28000000;N+=1000000){
Average with, without, one;
for(int i = 0 ; i < 3; i++){
with(addWithStreams(N));
without(addWithoutStreams(N));
one(addOneLongVector(N));
}
std::cout << "Average with for N = "<< N<<": "<<with.get() << std::endl;
std::cout << "Average without for N = "<< N<<": "<<without.get() << std::endl;
std::cout << "Average one for N = "<< N<<": "<<one.get() << std::endl;
WithFile << N<<"\t"<<with.get() << std::endl;
WithoutFile << N<<"\t"<<without.get() << std::endl;
OneFile << N<<"\t"<<one.get() << std::endl;
}
WithFile.close();
WithoutFile.close();
OneFile.close();
std::cout << "End"<< std::endl;
}
|
13,267 |
__global__
void sposv_batched_kernel(int n, int threads_per_block, float *dA);
////////////////////////////////////////////////////////////////////////////////
extern "C"
void sposv_batched(int n, int batch, float *dA, cudaStream_t stream)
{
/*
* Each block has 1024 threads. The default block size is 50000.
* If the batch is larger than block size, call sposv_batched_kernel several times accordingly.
*/
int threads_per_block = 1024;
int block = 50000;
int num_blocks;
int size = 0;
int count = ((batch%block) == 0) ? (batch/block) : (batch/block + 1);
for (int i = 0; i < count; i++)
{
num_blocks = (batch>block) ? block : batch;
batch -= block;
dim3 dimGrid(num_blocks, 1, 1);
dim3 dimBlock(threads_per_block, 1, 1);
sposv_batched_kernel<<<dimGrid, dimBlock, 0, stream>>>(n, threads_per_block, &dA[size*n*n]);
size += num_blocks;
}
}
////////////////////////////////////////////////////////////////////////////////
__global__
void sposv_batched_kernel(int N, int threads_per_block, float *dA)
{
int k;
int i;
int n;
int s;
int blockid = blockIdx.x;
// get the address for one matrix
float *pA = &dA[blockid*N*N];
int threadid = threadIdx.x;
int matrix_size = N*N;
int repeat = ((matrix_size % threads_per_block) == 0) ? (matrix_size / threads_per_block) : (matrix_size / threads_per_block + 1);
// Single Cholesky factorization.
for (k = 0; k < N; k++) {
// Panel factorization.
// Only one thread in the block do the sqrtf function
if(threadid == 0) {
pA[k*N+k] = sqrtf(pA[k*N+k]);
}
__syncthreads();
// If N is larger than thread size, each thread may compute more than onece accordingly.
if(N > threads_per_block) {
for(i=0; i<repeat; i++) {
if((threadid > k) && (threadid < N)){
pA[k*N+threadid] /= pA[k*N+k];
}
threadid += threads_per_block;
}
//reset threadid
threadid = threadIdx.x;
}else {
if((threadid > k) && (threadid < N)){
pA[k*N+threadid] /= pA[k*N+k];
}
}
__syncthreads();
// Update of the trailing submatrix.
// If matrix size is larger than thread size, one loop is not parallelled.
if (matrix_size > threads_per_block )
{
if(N > threads_per_block) {
for(i=0; i<repeat; i++) {
if((threadid > k) && (threadid < N)) {
for (s = threadid; s < N; s++)
pA[threadid*N+s] -= (pA[k*N+threadid]*pA[k*N+s]);
}
threadid += threads_per_block;
}
//reset threadid
threadid = threadIdx.x;
}else {
if((threadid > k) && (threadid < N)) {
for (s = threadid; s < N; s++)
pA[threadid*N+s] -= (pA[k*N+threadid]*pA[k*N+s]);
}
}
}else {
n = threadid / N;
s = threadid % N;
if((n > k) && (n < N)) {
if((s >= n) && (s < N))
pA[n*N+s] -= (pA[k*N+s]*pA[k*N+n]);
}
}
__syncthreads();
}
}
|
13,268 | #include <stdio.h>
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes c = a + b;
*/
__global__ void add(int* a, int* b, int* c)
{
*c = *a + *b;
}
/**
* Host main routine
*/
int main(void)
{
// Allocate the host input A
int *h_A = (int *)malloc(sizeof(int));
// Allocate the host input B
int *h_B = (int *)malloc(sizeof(int));
// Allocate the host output C
int *h_C = (int *)malloc(sizeof(int));
*h_A = 1;
*h_B = 1;
// Allocate the device inputs
int *d_A = NULL;
cudaMalloc((void **)&d_A, sizeof(int));
int *d_B = NULL;
cudaMalloc((void **)&d_B, sizeof(int));
// Allocate the device output C
int *d_C = NULL;
cudaMalloc((void **)&d_C, sizeof(int));
// device memory
printf("Copy input data from the host memory to the CUDA device\n");
cudaMemcpy(d_A, h_A, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(int), cudaMemcpyHostToDevice);
add<<<1, 1>>>(d_A, d_B, d_C );
printf("Copy output data from the CUDA device to host memory\n");
cudaMemcpy(h_C, d_C, sizeof(int), cudaMemcpyDeviceToHost);
printf("Done, result : %d\n", *h_C);
return 0;
}
|
13,269 | /*Realizar un programa CUDA que dado un vector V de N números enteros multiplique a
cada número por una constante C, se deben realizar dos implementaciones:
a.Tanto C como N deben ser pasados como parámetros al kernel.
b.Tanto C como N deben estar almacenados en la memoria de constantes de la GPU*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//M and N number of threads (grid and block)
#define M 1
#define N 1
int h_array_size=10; //host
__constant__ int d_array_size; //device
int h_c=2; //host
__constant__ int d_c; //device
__global__ void multiply( int array[] , const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
if(index<d_array_size){
if(d_array_size<=thread_number){ //if more threads than array size
printf("Thread %i; Modifying value of index %i for %i * %i because < d_array_size %i\n", index, index, array[index], d_c, d_array_size);
array[index]*=d_c;
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with h_array_size/thread_nb array entries
for(int i=index*(int)(d_array_size/thread_number); i< index*(int)(d_array_size/thread_number)+(int)(d_array_size/thread_number); i++){
printf("Thread %i; Modifying value of index %i for %i * %i because < d_array_size %i\n", index, i, array[i], d_c, d_array_size);
array[i]*=d_c;
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(d_array_size/thread_number); i< d_array_size; i++){
printf("Thread %i; Modifying value of index %i for %i * %i because < d_array_size %i\n",index, i, array[i], d_c, d_array_size);
array[i]*=d_c;
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
time_begin=clock();
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
int h_array_size=10;
//copy variables from host to device
cudaMemcpyToSymbol(d_array_size,&h_array_size,sizeof(h_array_size));
cudaMemcpyToSymbol(d_c,&h_c,sizeof(h_c));
// malloc a host array
host_array = (int*)malloc( h_array_size * sizeof(int));
for(int i=0; i<h_array_size; i++){
host_array[i]=rand()%10;
printf("%i\t", host_array[i]);
}
printf("\n");
// cudaMalloc a device array
cudaMalloc(&device_array,h_array_size * sizeof(int));
// download and inspect the result on the host:
cudaMemcpy(device_array, host_array, sizeof(int)*h_array_size, cudaMemcpyHostToDevice);
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(M,M); //Grid bidimensional de M*M bloques
int thread_number= N*N*M*M;
multiply<<<grid, bloque>>>(device_array, thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(host_array, device_array, sizeof(int)*h_array_size, cudaMemcpyDeviceToHost);
for(int i=0; i<h_array_size; i++)
printf("%i\t", host_array[i]);
// deallocate memory
free(host_array);
cudaFree(device_array);
printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.167s
} |
13,270 | //include "gaussian.h"
#define BLOCK_SIZE 16
__global__ void
Gaussian_CUDA(int* A, int wA, int row, int* temp)
{
int bx, tx, by, ty;
// Block index
//MODIFY HERE to get your block indexes
bx=blockIdx.x;
by=blockIdx.y;
// Thread index
//MODIFY HERE to get your thread indexes
tx=threadIdx.x;
ty=threadIdx.y;
int index = wA * BLOCK_SIZE * by + BLOCK_SIZE * bx + wA * ty + tx;
int index_col = BLOCK_SIZE * bx + tx;
int index_row = BLOCK_SIZE * by + ty;
// int i;
__shared__ int s;
/*
if(index_row==row&&index_col==row)
{
for(i=row+1;i<wA;i++)
temp[i]=A[(i*wA)+row]/A[(row*wA)+row];
}
__syncthreads();
if(index_row>row)
A[(index_row*wA)+index_col]-=(temp[(index_row)]*A[(row*wA)+index_col]);
__syncthreads();
*/
if(index_row==row)
temp[index_col]=A[(index_row*wA)+index_col];
__syncthreads();
if(index_row>row)
{
s=A[(index_row*wA)+row]/temp[row];
__syncthreads();
A[index]-=A[(row*wA)+index_col]*s;
}
__syncthreads();
}
|
13,271 | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <cuda_runtime_api.h>
#define HAS_KERNEL 1
#define SIZE 512
#if HAS_KERNEL
__global__ void mykernel(float* t){
int id = blockDim.x * blockIdx.x + threadIdx.x;
t[id] = id*10.0f;
}
__global__ void mykernel2(float* t){
int id = blockDim.x * blockIdx.x + threadIdx.x;
t[id] = t[id]*2;
}
#endif
int main(){
float* test;
float* test_h;
cudaError_t res;
test_h = (float*)malloc(sizeof(float)*SIZE);
for(int i = 0 ; i < SIZE ; i ++){
test_h[i] = i;
}
res = cudaMalloc((void**)&test, sizeof(float) * SIZE);
printf("cudaMalloc => : %d\n", res);
res = cudaMemcpy(test, test_h, sizeof(float) * SIZE , cudaMemcpyHostToDevice);
printf("cudaMemcpy => : %d(H to D)\n", res);
#if HAS_KERNEL
dim3 threads(512, 1, 1);
dim3 blocks(1, 1, 1);
mykernel<<<blocks, threads>>>(test);
mykernel2<<<blocks, threads>>>(test);
#endif
res = cudaMemcpy(test_h, test, sizeof(float) * SIZE , cudaMemcpyDeviceToHost);
printf("cudaMemcpy => : %d(D to H)\n", res);
for(int i = 0 ; i < SIZE ; i ++){
if(test_h[i] != i*10.0f*2){
printf("Result check : Failed...\n");
exit(-1);
break;
}
}
printf("Result check : OK!!\n");
res = cudaFree(test);
printf("cudaFree => : %d\n", res);
return 0;
}
|
13,272 | __global__ void branch(int *A){
int tid = threadIdx.x;
if ((tid % 2) == 1) {
__syncthreads();
int a = A[tid + 2];
A[tid] += a;
}
}
|
13,273 | #include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void dm_reduce(double *dm, unsigned int bit, double *dm0, unsigned int state, unsigned int no_qubits) {
const int addr = blockIdx.x*blockDim.x + threadIdx.x;
if(addr >= (1<< (2*no_qubits))) return;
const int low_mask = (1 << (2*bit))-1; //0000011111
const int high_mask = (~low_mask) << 2; //1110000000
if(((addr >> (2*bit)) & 0x3) == state*0x3) {
dm0[ (addr & low_mask) | ((addr & high_mask) >> 2) ] = dm[addr];
}
} |
13,274 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h"
#include <iostream>
using namespace std;
__global__ void sumSingleBlock(int *d)
{
int tid = threadIdx.x;
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>=1, stepSize <<= 1)
{
if (tid < tc)
{
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
d[pa] += d[pb];
}
}
}
__global__ void sumSingleBlock_shm(int *d)
{
extern __shared__ int dcopy[];
int tid = threadIdx.x;
dcopy[tid*2] = d[tid*2];
dcopy[tid*2+1] = d[tid*2+1];
for (int tc = blockDim.x, stepSize = 1; tc > 0; tc >>=1, stepSize <<= 1)
{
if (tid < tc)
{
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
dcopy[pa] += dcopy[pb];
}
}
if (tid == 0)
{
d[0] = dcopy[0];
}
}
int main()
{
const int count = 32;
const size_t size = count * sizeof(int);
int h[count];
for (int i=0; i<count; ++i)
{
h[i] = i+1;
}
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sumSingleBlock<<<1, count/2>>>(d);
int result;
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost);
//cudaFree(d);
std::cout << "use global mem:" << result << std::endl;
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sumSingleBlock_shm<<<1, count/2, count>>>(d);
cudaMemcpy(&result, d, sizeof(int), cudaMemcpyDeviceToHost);
std::cout << "use shared mem:" << result << std::endl;
cudaFree(d);
} |
13,275 | #include <cuda_runtime.h>
#include <stdio.h>
__global__ void demo_kernel(){
if(blockIdx.x == 0 && threadIdx.x == 0)
printf("Run kernel. blockIdx = %d,%d,%d threadIdx = %d,%d,%d\n",
blockIdx.x, blockIdx.y, blockIdx.z,
threadIdx.x, threadIdx.y, threadIdx.z
);
}
void launch(int* grids, int* blocks){
dim3 grid_dims(grids[0], grids[1], grids[2]);
dim3 block_dims(blocks[0], blocks[1], blocks[2]);
demo_kernel<<<grid_dims, block_dims, 0, nullptr>>>();
} |
13,276 | /* Autores:
* Walter Martínez Santana
* José Carlos Castro
*
*Cholesky en Paralelo en CUDA
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
__global__ void multMatriz(float *da, float *db, float *dc, int num){
float sum=0;
int j = threadIdx.x + blockIdx.x * blockDim.x;
int i = threadIdx.y + blockIdx.y * blockDim.y;
while(j<num){
while(i<num){
for (unsigned int k = 0; k<num; k++)
sum += da[i * num + k] * db[k * num + j];
dc[i*num + j] = (float) sum;
i += gridDim.y * blockDim.y;
}
j+=gridDim.x * blockDim.x;
i = threadIdx.y + blockIdx.y * blockDim.y;
}
}
__global__ void indices(){
int id=threadIdx.x + blockIdx.x*blockDim.x;
printf("blockdimy: %d threadx: %d Blockidx: %d blockdimx: %d id: %d raiz: %f\n",
blockDim.y,threadIdx.x , blockIdx.x,blockDim.x, id,sqrt((double)id));
__syncthreads();
}
__global__ void choleskyParalelo(float *db, int num){
int id=threadIdx.x + blockIdx.x*blockDim.x;
int x=0;
int inicio=0;
int k=0, N=num;
int id1=id+inicio, ids=id,id2;
int N2 = N;
int NN=0, KK=0;
while(k < N){
id1=id+inicio;
//Checamos si es un elemnto de la diagonal
if(id1 == inicio){
db[id1] = sqrt(db[id1]);
}else //si no es elemento de la diagonal, lo dividimos por el elemento diagonal de su columna
{
x=0;
while(id1 <N2){
while(x<1000)
x++;
__syncthreads();
db[id1] = db[id1]/db[inicio];
id1 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();//hacemos que todos los threads esperen a los que faltan
}__syncthreads();
//id=ids;
inicio += (N-k); //Preparo el siguiente salto al siguiente elemento diagonal
NN = N2; //Empiezo actaulizar valores de las columnas restantes a la actualizada
KK = k+1;//cada columna posterior tiene 1 elemento menos a la anterior
while(NN < (int)N*(N+1)/2){
id2=id + NN; // saltamos a la siguiente columna
while(id2 < NN + (N-KK)){
db[id2] = db[id2] -db[id + KK]* db[KK];
id2 += gridDim.x * blockDim.x;
__syncthreads();
}
//__syncthreads();
NN += (N-KK);
KK++;
}
//__syncthreads();
k++; //pasamos a la siguiente columna
N2 += (N-k); //Siguiente elemento diagonal
__syncthreads();
}
}
#define n 5
#define SIZE n*n*sizeof(float)
int main(){
int N=n,i,j;
float *A, *B, *C;
float *da, *db, *dc;
int m, P=1,U=6;
srand(time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 dimGrid(16, 16);
dim3 dimBlock(16, 16);
A=(float *)malloc(SIZE);
B=(float *)malloc(SIZE);
C=(float *)malloc(SIZE);
for(m=0;m<N*N;m++){
A[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
//B[m]=(float)P+(int)(((U-P+1.0)*rand())/(RAND_MAX+1.0));
C[m]=(float)0;
}
//Transpuesta de A
for( i = 0;i<N;i++)
for(j=0;j<N;j++)
B[j + i*N] = A[i + j*N];
cudaMalloc((void**)&da, SIZE);
cudaMalloc((void**)&db, SIZE);
cudaMalloc((void**)&dc, SIZE);
cudaMemcpy(da,A, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(db,B, SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(dc,C, SIZE, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
multMatriz<<<dimGrid , dimBlock >>>(da,db,dc,N);
//cudaThreadSynchronize();
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(C,dc, SIZE, cudaMemcpyDeviceToHost);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(B);
//Optimizacion de memoria
//Almacenamos la parte debajo de la diagonal y la diagonal de la matriz
int nuevoSize = N*(N+1)/2;
j=0;
int k;
B=(float *)malloc(nuevoSize*sizeof(float));
for(m=0;m<N;m++){
for(k=m;k<N;k++){
B[j++]=C[m + N*k];
}
}
//Desplegar nuevo almacenamiento en arreglo unidimensional
for(m=0;m<nuevoSize;m++)
printf("%5.0f ",B[m]);
printf("\n\n");
/*
for(m=0;m<N*N;m++){
printf("%08.0f",A[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
for(m=0;m<N*N;m++){
printf("%08.0f",B[m]);
printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
printf("\n\n");
*/
int NN;
NN=n;
//for(m=0;m<NN*NN;m++){
//int NN=16;
for(m=0;m<NN;m++){
for(k=0;k<NN;k++){
printf("%05.0f",C[k + m*N]);
printf("%c",( ((m*N+k)%NN)<(NN-1) ) ? ' ':'\n');
//printf("%c",( (m%N)<(N-1) ) ? ' ':'\n');
}
}
printf("\n\n");
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
printf("Tiempo %4.6f milseg\n\n",elapsedTime);
cudaMalloc((void**)&db, nuevoSize*sizeof(float));
cudaMemcpy(db,B, nuevoSize*sizeof(float), cudaMemcpyHostToDevice);
choleskyParalelo<<<1,512>>>(db,n);
cudaMemcpy(B,db, nuevoSize*sizeof(float), cudaMemcpyDeviceToHost);
printf("\n\n");
for(m=0;m<nuevoSize;m++)
printf("%4.4f ",B[m]);
printf("\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(db);
free(B);
free(C);
free(A);
return 0;
} |
13,277 |
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#include <string.h>
#define MAX 32
__global__ void toggle_case(char *x, char *y)
{
int i=threadIdx.x;
if(x[i] >= 'a' && x[i] <='z')
y[i] = x[i]-32;
else
if(x[i] >= 'A' && x[i] <='Z')
y[i] = x[i]+32;
else
{
y[i] = x[i];
}
}
int main()
{
int n;
cudaEvent_t start, stop;
float time;
char A[MAX],B[MAX],*d,*e;
printf("Enter String to be toggled: ");
scanf("%s",A);
n = strlen(A)+1;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void **)&d,n*sizeof(char));
cudaMalloc((void **)&e,n*sizeof(char));
cudaMemcpy(d,A,n*sizeof(char),cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
toggle_case<<<1,n>>>(d,e);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaMemcpy(B,e,n*sizeof(char),cudaMemcpyDeviceToHost);
printf("The toggled case String is %s", B);
printf("\n");
cudaFree(d);
cudaFree(e);
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the kernel: %f ms\n", time);
return 0;
}
|
13,278 | __global__ void newdt0 (double * dt, const double * __restrict__ dtr,
double * dtp, double * drp) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
dtp[ip] = dt[ip];
drp[ip] = dtr[ip];
dt[ip] += dtr[ip];
}
__global__ void newdt(double * dt, const double * __restrict__ dtr,
double * dtp, double * drp,
double s1, double s2, double m, int niv, int biv) {
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double u = dt[ip] + s1 * (dtp[ip] - dt[ip]) + s2 * (dtp[ip + niv] - dt[ip]);
double v = dt[ip] + dtr[ip]
+ s1 * (dtp[ip] + drp[ip] - dt[ip] - dtr[ip])
+ s2 * (dtp[ip + niv] + drp[ip + niv] - dt[ip] - dtr[ip]);
dtp[ip + biv] = dt[ip];
drp[ip + biv] = dtr[ip];
dt[ip] = u + m * (v - u);
}
__global__ void theta30(double * ds, const double * __restrict__ dtr,
const double * __restrict__ drp, int niv) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x
+ blockIdx.y * blockDim.x * gridDim.x;
double t1 = dtr[ip] - drp[ip];
double t2 = dtr[ip] - drp[ip + niv];
sdata[threadIdx.x] = dtr[ip] * t1;
sdata[threadIdx.x + blockDim.x] = dtr[ip] * t2;
sdata[threadIdx.x + blockDim.x * 2] = t1 * t1;
sdata[threadIdx.x + blockDim.x * 3] = t1 * t2;
sdata[threadIdx.x + blockDim.x * 4] = t2 * t2;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
sdata[threadIdx.x] += sdata[threadIdx.x + s];
sdata[threadIdx.x + blockDim.x] += sdata[threadIdx.x + blockDim.x + s];
sdata[threadIdx.x + blockDim.x * 2]
+= sdata[threadIdx.x + blockDim.x * 2 + s];
sdata[threadIdx.x + blockDim.x * 3]
+= sdata[threadIdx.x + blockDim.x * 3 + s];
sdata[threadIdx.x + blockDim.x * 4]
+= sdata[threadIdx.x + blockDim.x * 4 + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
ds[blockIdx.x + blockIdx.y * gridDim.x] = sdata[0];
ds[blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y] =
sdata[blockDim.x];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 2] =
sdata[blockDim.x * 2];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 3] =
sdata[blockDim.x * 3];
ds[blockIdx.x + blockIdx.y * gridDim.x + (gridDim.x * gridDim.y) * 4] =
sdata[blockDim.x * 4];
}
}
__global__ void theta31(double * ds2, double * ds) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x + blockIdx.x * blockDim.x;
sdata[threadIdx.x] = ds[ip];
sdata[threadIdx.x + blockDim.x] = ds[ip + blockDim.x * gridDim.x];
sdata[threadIdx.x + blockDim.x * 2] = ds[ip + blockDim.x * gridDim.x * 2];
sdata[threadIdx.x + blockDim.x * 3] = ds[ip + blockDim.x * gridDim.x * 3];
sdata[threadIdx.x + blockDim.x * 4] = ds[ip + blockDim.x * gridDim.x * 4];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s) {
sdata[threadIdx.x] += sdata[threadIdx.x + s];
sdata[threadIdx.x + blockDim.x] += sdata[threadIdx.x + blockDim.x + s];
sdata[threadIdx.x + blockDim.x * 2]
+= sdata[threadIdx.x + blockDim.x * 2 + s];
sdata[threadIdx.x + blockDim.x * 3]
+= sdata[threadIdx.x + blockDim.x * 3 + s];
sdata[threadIdx.x + blockDim.x * 4]
+= sdata[threadIdx.x + blockDim.x * 4 + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
ds2[blockIdx.x] = sdata[0];
ds2[blockIdx.x + gridDim.x] = sdata[blockDim.x];
ds2[blockIdx.x + gridDim.x * 2] = sdata[blockDim.x * 2];
ds2[blockIdx.x + gridDim.x * 3] = sdata[blockDim.x * 3];
ds2[blockIdx.x + gridDim.x * 4] = sdata[blockDim.x * 4];
}
}
__global__ void theta32(double * ds, double * ds2) {
extern __shared__ double sdata[];
unsigned int ip = threadIdx.x;
sdata[ip] = ds2[ip];
sdata[ip + blockDim.x] = ds2[ip + blockDim.x];
sdata[ip + blockDim.x * 2] = ds2[ip + blockDim.x * 2];
sdata[ip + blockDim.x * 3] = ds2[ip + blockDim.x * 3];
sdata[ip + blockDim.x * 4] = ds2[ip + blockDim.x * 4];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (ip < s) {
sdata[ip] += sdata[ip + s];
sdata[ip + blockDim.x] += sdata[ip + blockDim.x + s];
sdata[ip + blockDim.x * 2] += sdata[ip + blockDim.x * 2 + s];
sdata[ip + blockDim.x * 3] += sdata[ip + blockDim.x * 3 + s];
sdata[ip + blockDim.x * 4] += sdata[ip + blockDim.x * 4 + s];
}
__syncthreads();
}
if (ip == 0) {
ds[0] = sdata[0];
ds[1] = sdata[blockDim.x];
ds[2] = sdata[blockDim.x * 2];
ds[3] = sdata[blockDim.x * 3];
ds[4] = sdata[blockDim.x * 4];
}
}
|
13,279 | #include <cstdlib>
#include <iostream>
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 500
// Declared new variable type as array
typedef double arr[N];
/******** Code for elementwise matrix multiplication for dot product*********************/
__global__ void dotmat(arr *A, arr *Temp, arr *q, int k,arr *temp) {
int row=blockIdx.x;
int col=blockIdx.y;
if( col<k){
temp[row][col]=A[row][k]*q[row][col];
}
}
/******summing up above matrix column wise to get k dot products******************/
__global__ void dot(arr *temp,arr *r,arr *Temp, arr *q, int k) {
int j;
int x=threadIdx.x;
r[0][x]=0.0;
if(x<k){
for(j=0;j<N;j++){
r[0][x]+=temp[j][x];
}
}
}
/**********suming over all dot products and respective column of Q**************/
__global__ void submat(arr *LR, arr *Temp, arr *q, int k,arr *r) {
int row=blockIdx.x;
int col=blockIdx.y;
if( col<k){
LR[row][col]=r[0][col]*q[row][col];
}
}
/**********Add all the columns element wise into single vector and substract from A[k] and calculate q[k]********************/
__global__ void Qcal(arr *LR, arr *Temp, arr *q, int k) {
__shared__ double p[N];
__shared__ double nr;
int j;
int x=threadIdx.x;
p[x]=0.0;
for(j=0;j<k;j++){
p[x]+=LR[x][j];
}
__syncthreads();
Temp[x][k]=Temp[x][k]-p[x];
__syncthreads();
if(x==0){
nr=0.0;
for(j=0;j<N;j++){
nr+=Temp[j][k]*Temp[j][k];
}
}
__syncthreads();
q[x][k]=Temp[x][k]/sqrt(nr);
}
/**********Matrix multiplication AtB***************************************/
__global__ void matmultt(arr *l,arr *m, arr *n)
{
int x=blockIdx.x;
int y=blockIdx.y;
__shared__ double p[N];
int i;
int k=threadIdx.x;
n[x][y]=0;
p[k]=0;
p[k]=l[k][x]*m[k][y];
__syncthreads();
if(k==0){
for(i=0;i<N;i++){
n[x][y]=n[x][y]+p[i];
}
}
}
/**************matrix multiplication AB************/
__global__ void matmult(arr *l,arr *m, arr *n)
{
int x=blockIdx.x;
int y=blockIdx.y;
__shared__ double p[N];
int i;
int k=threadIdx.x;
n[x][y]=0;
p[k]=0;
p[k]=l[x][k]*m[k][y];
__syncthreads();
if(k==0){
for(i=0;i<N;i++){
n[x][y]=n[x][y]+p[i];
}
}
}
int main(int argc, char** argv) {
int i,j,k,l,L=250;
double time_spent = 0.0;
clock_t begin = clock();
size_t bytes = N * N * sizeof(double);
// Allocate memory for our matrices
arr *A, *q,*Temp,*temp,*MR,*KR,*FR,*LR,*r,*CR,*Q,*phiBt,*KK;
cudaMallocManaged(&A, bytes);
cudaMallocManaged(&q, bytes);
cudaMallocManaged(&Temp, bytes);
cudaMallocManaged(&temp, bytes);
cudaMallocManaged(&r, bytes);
cudaMallocManaged(&CR, bytes);
cudaMallocManaged(&Q, bytes);
cudaMallocManaged(&KK, bytes);
cudaMallocManaged(&phiBt, bytes);
cudaMallocManaged(&FR, bytes);
cudaMallocManaged(&LR, bytes);
cudaMallocManaged(&KR, bytes);
cudaMallocManaged(&MR, bytes);
/****************Import matries**********************/
float K[N][N];
float M[N][N];
FILE *filek;
filek=fopen("KG.txt","r");
if(filek==NULL){
printf("file doesnt exist");
return 0;
}
while(!feof(filek)){
printf("entered1");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
fscanf(filek, "%f", &K[i][j]);
}}}
FILE *file;
file=fopen("MG.txt","r");
if(file==NULL){
printf("file doesnt exist");
return 0;
}
while(!feof(file)){
printf("entered2");
for(i=0;i<N;i++){
for(j=0;j<N;j++){
fscanf(file, "%f", &M[i][j]);
}}}
printf("K of A is:\n");
for(i = N-5; i < N; i++) {
for (j = N-5; j < N; j++) {
printf("%f ",K[i][j]);
} printf("\n");
}
printf("M is:\n");
for(i = 0; i <10; i++) {
for(j = 0; j < 10; j++) {
printf("%f ",M[i][j]);
} printf("\n");
}
//initialize q and Temp
for(i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
A[i][j] = M[i][j];
q[i][j] = 0.0;
Temp[i][j] = M[i][j];
KK[i][j] = K[i][j];
}
}
//******************setting grid parameters*****************
dim3 grid(N,N);
for(int x = 0;x< 2; x++) {
//*************QR decompose*********************
printf("A for %dis:\n",x);
for(i = 0; i < 10; i++) {
for(j = 0; j < 10; j++) {
printf("%f ",A[i][j]);
} printf("\n");
}
for(l = 0;l< L; l++) {
printf("l is %d for x \n",l);
for(i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
q[i][j] = 0.0;
MR[i][j] = 0.0;
Temp[i][j] = A[i][j];
CR[i][j] = A[i][j];
}
}
//Calculate q[0]
double nr=0.0;
for(j=0;j<N;j++){
nr+=A[j][0]*A[j][0];
}
for(j=0;j<N;j++){
q[j][0]=A[j][0]/sqrt(nr);
}
for(k = 1;k< N; k++) {
for(i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
temp[i][j] = 0.0;
r[0][i] = 0.0;
}
}
//**************kernel function for dot products**************
dotmat<<<grid,1>>>(A,Temp,q,k,temp);
cudaDeviceSynchronize();
dot<<<1,N>>>(temp,r,Temp,q,k);
cudaDeviceSynchronize();
submat<<<grid,1>>>(LR,Temp,q,k,r);
cudaDeviceSynchronize();
Qcal<<<1,N>>>(LR,Temp,q,k);
cudaDeviceSynchronize();
/*
for(j=0;j<N;j++){
for(i=0;i<N;i++){
r[0][j]+=temp[i][j];
}
}
for(j=0;j<k;j++){
for(i=0;i<N;i++){
Temp[i][k]=Temp[i][k]-r[0][j]*q[i][j];
}
}
double nr=0.0;
for(j=0;j<N;j++){
nr+=Temp[j][k]*Temp[j][k];
// nr+=r[1][j];
=0;j<N;j++){
q[j][k]=Temp[j][k]/sqrt(nr);
}
*/
}
//****************QRD ends****************
//*************New A cal******************
matmultt<<<grid, N>>>(q, A, MR);
cudaDeviceSynchronize();
matmult<<<grid,N>>>(MR, q, A);
cudaDeviceSynchronize();
//***************New Q cal*****************
if(l<1){
for(i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
Q[i][j] =q[i][j];
}
}
}
if(l>0){
matmult<<<grid, N>>>(Q, q, KR);
cudaDeviceSynchronize();
for(i = 0; i < N; i++) {
for (j = 0; j < N; j++){
Q[i][j] =KR[i][j];
}
}
}
//*****************************************
} //l loop
if(x<1){
//*******Cal phiBt**********
for(i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
LR[i][j] =0.00;
if(i==j){
LR[i][i] =1/sqrt(A[i][i]);
}
} }
matmult<<<grid,N>>>(Q, LR,phiBt);
cudaDeviceSynchronize();
//***********************transformed K******************
matmultt<<<grid, N>>>(phiBt,KK,FR);
cudaDeviceSynchronize();
matmult<<<grid, N>>>(FR, phiBt,A);
cudaDeviceSynchronize();
}
} //xloop
//**************Eigen vectors********
matmult<<<grid, N>>>(phiBt, Q,CR);
cudaDeviceSynchronize();
printf("Eigenvalues of A are\n");
for(i = N-5; i < N; i++) {
for (j =N-5; j < N; j++) {
printf("%f ",A[i][j]);
} printf("\n");
}
printf("Eigenvectors of A is:\n");
for(i = N-5; i < N; i++) {
for (j = N-5; j < N; j++) {
printf("%f ",CR[i][j]);
} printf("\n");
}
/*#####implementation of mdm#####*/
int h,modes=10;
double w=20000.0,dt=0.00001; //frequency
double phi[N][modes], f[N],fmat[modes],tmat[modes][80],wr[modes],u[N][80];
//*********setting EV and ev and f ************
for(i=0;i<modes;i++){
for(j=0;j<N;j++){
phi[j][i]=CR[j][N-i-1];
f[j]=0.0;
}
wr[i]=sqrt(A[N-1-i][N-1-i]);
}
f[89]=5*pow(10,10);
f[440]=5*pow(10,10);
//************cal fmat******
for(h=0;h<modes;h++){
fmat[h]=0;
for(k=0;k<N;k++){
fmat[h]+=phi[k][h]*f[k];
}
}
//******q cal********
for(i=0;i<modes;i++){
for(j=0;j<80;j++){
tmat[i][j]=fmat[i]*(1/(pow(wr[i],2)-pow(w,2)))*(sin(w*j*dt)-((w/wr[i])*sin(wr[i]*j*dt)));
}
}
//**********u cal*********
for(h=0;h<N;h++){
for(j=0;j<80;j++){
u[h][j]=0.0;
for(k=0;k<modes;k++){
u[h][j]+=phi[h][k]*tmat[k][j];
}
}
}
printf("response\n");
for(h=0;h<80;h++){
printf("%8.8f ",u[89][h]);
}
clock_t end = clock();
time_spent += (double)(end - begin) / CLOCKS_PER_SEC;
printf("Time elpased is %f min\n\n", time_spent/60);
return 0;
}
|
13,280 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,int var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27) {
if (comp < -1.7421E20f / (var_2 + (-1.9119E-29f + (+1.6852E36f - -1.5755E36f * var_3)))) {
for (int i=0; i < var_1; ++i) {
comp += var_6 * var_7 * (var_8 - var_9);
if (comp <= (+1.4077E29f - +0.0f * coshf(var_10 + cosf((var_11 / (var_12 + (var_13 - var_14))))))) {
float tmp_1 = +1.3231E1f;
comp += tmp_1 * ceilf(-0.0f);
}
for (int i=0; i < var_4; ++i) {
comp = floorf(fmodf(var_15 / (-1.0474E-36f * fmodf(-1.4453E-37f, (var_16 / var_17))), (-1.4296E-37f + +0.0f * var_18)));
comp += var_19 / -1.4939E-36f * (var_20 + (-1.2341E-15f / var_21 * +1.6801E-35f));
}
for (int i=0; i < var_5; ++i) {
float tmp_2 = (-0.0f / -1.2244E-37f / var_22 / +1.2306E-42f);
comp = tmp_2 * +1.6979E-43f + (-1.0369E34f - (var_23 * sinf(tanhf(var_24 + (var_25 - (-1.1242E-44f + (var_26 + (var_27 - +1.0765E-37f))))))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
int tmp_5 = atoi(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28);
cudaDeviceSynchronize();
return 0;
}
|
13,281 | #include "includes.h"
using namespace std;
// https://stackoverflow.com/questions/26853363/dot-product-for-dummies-with-cuda-c
__global__ void dotCuda3(float *a, float *b, float *c){
__shared__ float cache[1024];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int cacheIndex = threadIdx.x;
float temp = a[tid] * b[tid];//+ a[tid + blockDim.x] * b[tid + blockDim.x];
cache[cacheIndex] = temp;
__syncthreads();
for (unsigned int i = blockDim.x >> 1; i > 0; i >>= 1) {
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
}
if (cacheIndex == 0){
c[blockIdx.x] = cache[0];
}
} |
13,282 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define PRECISION 0.00001
#define uN 5.0
#define uS 5.0
#define uW 0.0
#define uE 10.0
#define TAM_BLOCO 1024
double intervaloX, intervaloY;
double denominador1, denominador2;
double *h_m, *d_m;
int divX, divY, laps;
//Essas sao as variaveis globais da GPU
__device__ double d_intervaloX;
__device__ double d_intervaloY;
__device__ double d_denominador1;
__device__ double d_denominador2;
__device__ int d_divX;
__device__ int d_divY;
__device__ double a(int i, int j){
double x = i*d_intervaloX;
double y = i*d_intervaloY;
return 500 * x * (1 - x) * (0.5 - y);
}
__device__ double b(int i, int j){
double x = i*d_intervaloX;
double y = i*d_intervaloY;
return 500 * y * (y - 1) * (x - 0.5);
}
__device__ double n(int i, int j){
return (2.0 - d_intervaloX * b(i,j))/d_denominador2;
}
__device__ double s(int i, int j){
return (2.0 + d_intervaloX * b(i,j))/d_denominador2;
}
__device__ double e(int i, int j){
return (2.0 - d_intervaloX * a(i,j))/d_denominador1;
}
__device__ double w(int i, int j){
return (2.0 + d_intervaloX * a(i,j))/d_denominador1;
}
__device__ double malhaGPU(double *matriz, int i, int j){
i++;
j++;
// //OBS: Casos de canto não importam, pois nunca serão usados no problema.
// //e.g. u(0,0) ou u(1,0) nunca serão usados no problema então tanto faz
// //o valor retornado.
if(j <= 0){
// //uW foi escolhido para representar o uO do trabalho.
// //W vem de 'West', visto que todas as variáveis exceto essa foram
// //nomeadas de acordo com sua cardinalidade em inglês
return uW;
}
if(j >= d_divX + 2){
return uE;
}
if(i <= 0){
return uN;
}
if(i >= d_divY + 2){
return uS;
}
i--;
j--;
return matriz[i * d_divX + j];
}
__device__ double u(double *matriz, int i, int j){
// if(i == 0){
// return uW;
// }
// if(i == divX + 2){
// return uE;
// }
// if(j == 0){
// return uN;
// }
// if(j == divX + 2){
// return uS;
// }
// printf("u(%d, %d) = %lf * %lf + %lf * %lf + %lf * %lf + %lf * %lf\n", i, j, w(i,j), malha(i, j-1), e(i,j), malha(i, j+1), s(i,j), malha(i-1, j), n(i,j), malha(i+1, j));
return w(i,j)*malhaGPU(matriz, i, j-1) + e(i,j)*malhaGPU(matriz, i, j+1) + s(i,j)*malhaGPU(matriz, i-1, j) + n(i,j)*malhaGPU(matriz, i+1, j);
}
__global__ void calculoAzul(double *matriz){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int i, j;
i = 2*tidX/d_divX;
j = 2*tidX%d_divY;
matriz[i * (d_divX + 2) + j] = u(matriz, i, j);
}
__global__ void calculoVermelho(double *matriz){
int tidX = blockIdx.x * blockDim.x + threadIdx.x;
int i, j;
i = 2*tidX/d_divX;
j = 2*tidX%d_divY + 1;
matriz[i * (d_divX + 2) + j] = u(matriz, i, j);
}
double malhaCPU(double *matriz, int i, int j){
i++;
j++;
// //OBS: Casos de canto não importam, pois nunca serão usados no problema.
// //e.g. u(0,0) ou u(1,0) nunca serão usados no problema então tanto faz
// //o valor retornado.
if(j <= 0){
// //uW foi escolhido para representar o uO do trabalho.
// //W vem de 'West', visto que todas as variáveis exceto essa foram
// //nomeadas de acordo com sua cardinalidade em inglês
return uW;
}
if(j >= divX + 2){
return uE;
}
if(i <= 0){
return uN;
}
if(i >= divY + 2){
return uS;
}
i--;
j--;
return h_m[i * divX + j];
}
void printM(){
int i = 0, j = 0;
for(i = 0; i < divX + 2; i++){
printf("%lf ", malhaCPU(h_m, 0 ,i));
}
printf("\n");
for(i = 0; i < divX; i++){
for(j = 0; j < divY; j++){
if(j == 0){
printf("%lf ", malhaCPU(h_m ,i ,j-1));
}
printf("%lf ", h_m[i* divX + j]);
if(j == divY - 1){
printf("%lf ", malhaCPU(h_m ,i ,j + 1));
}
}
printf("\n");
}
for(i = 0; i < divX + 2; i++){
printf("%lf ", malhaCPU(h_m, divX + 1,i));
}
printf("\n");
}
void printMat(){
int i, j;
for(i = 0; i < divX + 2; i++){
for(j = 0; j < divY + 2; j++){
printf("%lf", h_m[i*(divX + 2) + j]);
if(j != divY + 1) printf(" ");
}
if(i != divX + 1)
printf("\n");
}
printf("\n");
}
void setupM(){
int i;
for(i = 0; i < divY + 2; i++){
h_m[i * (divX + 2)] = uW;
h_m[i*(divX + 2) + divY + 1] = uE;
}
for(i = 0; i < divX + 2; i++){
h_m[i] = uN;
h_m[(divX + 1)*(divX + 2) + i] = uS;
}
}
int main(int argc, char** argv){
int laps = 0;
int i;
int threadsAzuis;
int threadsVermelhas;
if(argc < 2){
printf("Número incorreto de parâmetros:\n");
printf("Número de divisões faltando:\n");
printf("\tPara valores iguais: %s <número de divisões> <Quantidade de iteracoes>\n", argv[0]);
printf("\tPara valores diferentes: %s <divisões em X> <divisões em Y> <Quantidade de iteracoes>\n", argv[0]);
exit(-1);
}
divX = atoi(argv[1]);
divY = (argc > 2)? atoi(argv[2]): divX;
laps = (argc > 3)? atoi(argv[3]): 1000;
intervaloX = 1.0/(divX + 1);
intervaloY = 1.0/(divY + 1);
denominador1 = 4*(1 + ((intervaloX*intervaloX)/(intervaloY*intervaloY)));
denominador2 = 4*(1 + ((intervaloY*intervaloY)/(intervaloX*intervaloX)));
cudaMalloc(&d_m, (divX + 2) * (divY + 2) * sizeof(double));
h_m = (double *) malloc((divX + 2) * (divY + 2) * sizeof(double));
setupM();
//Usando "cudaMemcpyToSymbol" para copiar as variaveis da CPU para a GPU
cudaMemcpyToSymbol(d_intervaloX, &intervaloX, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_intervaloY, &intervaloY, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador1, &denominador1, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_denominador2, &denominador2, sizeof(double), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_divX, &divX, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_divY, &divY, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpy(d_m, h_m, (divX + 2) * (divY + 2) * sizeof(double), cudaMemcpyHostToDevice);
dim3 num_threads(TAM_BLOCO, TAM_BLOCO);
dim3 num_blocos(((divX + 2) + num_threads.x -1)/num_threads.x, ((divY + 2) + num_threads.y -1)/num_threads.y);
//Calcula a quantidade de posiçes que devem ser calculadas por threads de cada cor
threadsAzuis = (divX * divY)/2; //Par
threadsVermelhas = threadsAzuis + (divX * divY)%2; //Impar
int blocosAzuis, blocosVermelhos;
blocosAzuis = (threadsAzuis + TAM_BLOCO -1)/TAM_BLOCO;
blocosVermelhos = (threadsVermelhas + TAM_BLOCO -1)/TAM_BLOCO;
for(i = 0; i < laps; i++){
calculoAzul<<<blocosAzuis, TAM_BLOCO>>>(d_m);
calculoVermelho<<<blocosVermelhos, TAM_BLOCO>>>(d_m);
}
cudaMemcpy(h_m, d_m, (divX + 2) * (divY + 2) * sizeof(double), cudaMemcpyDeviceToHost);
printM();
cudaDeviceReset();
return 0;
} |
13,283 | #include<cuda.h>
#include<stdio.h>
#include<math.h>
#define TILE_WIDTH 32
/**
* This is a kernel MatrixMul function of parallel Matmul
*
* @param A Matrix (m,dim)
* @param B Matrix (dim,n)
* @param C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
__global__
void MatrixMulKernel(float* A,
float* B,
float* C,
int m,
int n,
int dim){
__shared__ float Ads[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Identify the row and column of the C element to work on
// Each thread works on an element of C
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
// Calculate the number of phase
int phase_num = ceil(dim / (float)TILE_WIDTH);
bool cond0 = Row < m;
bool cond1 = Col < n;
float Cvalue = 0;
// Each thread loads 'Row'th row of A and 'Col'th column of B
for (int ph = 0; ph < phase_num; ++ph) {
if(ph * TILE_WIDTH + tx < dim){
Ads[ty][tx] = (cond0)?A[Row * dim + ph*TILE_WIDTH + tx]:0;
}
else{
Ads[ty][tx] = 0;
}
if(ph * TILE_WIDTH + ty < dim){
Bds[ty][tx] = (cond1)?B[(ph*TILE_WIDTH + ty)*n + Col]:0;
}
else{
Bds[ty][tx] = 0;
}
__syncthreads();
for (int k = 0; k < TILE_WIDTH; ++k) {
Cvalue += Ads[ty][k] * Bds[k][tx];
}
__syncthreads();
}
if(cond0 && cond1){
C[Row * n + Col] = Cvalue;
}
}
/**
* This is a parallel Stub function of parallel Matmul
*
* @param h_A Matrix (m,dim)
* @param h_B Matrix (dim,n)
* @param h_C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
void parallelMatMul(float* h_A,
float* h_B,
float* h_C,
int m,
int n,
int dim){
// Using device parallel calculate the result and finally print the time
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float *d_A, *d_B, *d_C;
size_t size_of_float = sizeof(float);
size_t size_A = m*dim*size_of_float;
size_t size_B = n*dim*size_of_float;
size_t size_C = m*n*size_of_float;
cudaMalloc((void**)&d_A, size_A);
cudaMalloc((void**)&d_B, size_B);
cudaMalloc((void**)&d_C, size_C);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
// Invoke kernel
dim3 dimGrid(ceil(n/(float)(TILE_WIDTH)),ceil(m/(float)(TILE_WIDTH)),1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
MatrixMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, m, n, dim);
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Parallel invoke Matmul function need %.1fs.\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
/**
* This is a baseline kernel function of parallel Matmul
*
* @param A Matrix (m,dim)
* @param B Matrix (dim,n)
* @param C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
__global__
void MatrixMulKernel_Baseline(float* A,
float* B,
float* C,
int m,
int n,
int dim){
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
// Identify the row and column of the C element to work on
// Each thread works on an element of C
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
bool cond0 = Row < m;
bool cond1 = Col < n;
if(cond0 && cond1){
float Cvalue = 0;
for(int i = 0;i<dim;i++){
Cvalue += A[Row*dim+i]*B[i*n+Col];
}
C[Row*n+Col] = Cvalue;
}
}
/**
* This is a baseline Parallel Stub function of parallel Matmul
*
* @param h_A Matrix (m,dim)
* @param h_B Matrix (dim,n)
* @param h_C Result Matrix (m,n)
* @param m number of row in h_A
* @param n number of column in h_B
* @param dim number of row in h_B
*/
void parallelMatMul_baseline(float* h_A,
float* h_B,
float* h_C,
int m,
int n,
int dim){
// Using device parallel calculate the result and finally print the time
cudaEvent_t start, stop;
float elapsedTime = 0.0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float *d_A, *d_B, *d_C;
size_t size_of_float = sizeof(float);
size_t size_A = m*dim*size_of_float;
size_t size_B = n*dim*size_of_float;
size_t size_C = m*n*size_of_float;
cudaMalloc((void**)&d_A, size_A);
cudaMalloc((void**)&d_B, size_B);
cudaMalloc((void**)&d_C, size_C);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
// Invoke kernel
dim3 dimGrid(ceil(n/(float)(TILE_WIDTH)),ceil(m/(float)(TILE_WIDTH)),1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH,1);
MatrixMulKernel_Baseline<<<dimGrid, dimBlock>>>(d_A, d_B, d_C, m, n, dim);
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
// Free device memory for A, B, C
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("The baseline Parallel invoke Matmul function need %.1fs.\n",elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
} |
13,284 | /*-----------
*
* increment_kernel.cu
*
* This is the source file of an increment kernel.
*
* This kernel is from CUDA samples. asyncAPI.cu
*
* streamsOptBenchmark/increment_kernel.cu
*
* By Hao Li
*
*------------
*/
// #include "functions.h"
// #include <cuda_runtime.h>
__global__ void increment_kernel(float *g_idata, float *g_odata, int inc_value)
{
for(int l = 0; l < 100; l++)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// g_data[idx] += inc_values;
g_odata[idx] = g_idata[idx];
for(int i = 0; i <= inc_value; ++i){
g_odata[idx] += 1;
}
}
}
// int main(int argc, char **argv){
// int *h_data;
// }
|
13,285 | #include "includes.h"
__global__ void sleepKernel(double* cycles, int64_t waitCycles) {
extern __shared__ int s[];
long long int start = clock64();
for (;;) {
auto total = clock64() - start;
if (total >= waitCycles) { break; }
}
*cycles = (double(clock64() - start));
} |
13,286 | #include <iostream>
#include <algorithm>
#include <cstdlib>
__global__
void saxpy( int n, float a, float* x, float* y )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) y[i] = a * x[i] + y[i];
}
int main( int argc, char** argv )
{
if( argc < 2 )
{
std::cout << "Usage: " << argv[0] << " <buffer size in MB>" << std::endl;
exit( 1 );
}
unsigned int N = (1 << 20) * atoi( argv[1] ); // Buffer size
std::cout << "Buffer size: " << N << std::endl;
float* x = new float[N];
float* y = new float[N];
float* d_x = NULL;
float* d_y = NULL;
cudaMalloc( &d_x, N*sizeof(float) );
cudaMalloc( &d_y, N*sizeof(float) );
std::fill( x, x + N, 1.0 );
std::fill( y, y + N, 2.0 );
cudaMemcpy( d_x, x, N*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( d_y, y, N*sizeof(float), cudaMemcpyHostToDevice );
// Run kernel
int num_blocks = (N + 255) / 256;
int block_size = 256;
saxpy<<<num_blocks, block_size>>>( N, 2.0f, d_x, d_y );
cudaMemcpy( y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost );
float maxErr = 0.0f;
for( unsigned int i = 0; i < N; ++i )
maxErr = std::max( maxErr, std::abs(y[i] - 4.0f) );
std::cout << "Max error: " << maxErr << std::endl;
cudaFree( d_x );
cudaFree( d_y );
delete[] x;
delete[] y;
return 0;
}
|
13,287 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <unistd.h>
#include <sys/time.h>
#include <string.h>
/*
Row wise multiplication algorithm implemented in parallel. Accepts arbitrary numbers of equivalent size.
Both times will be printed to see the benefits of paralle computation for the large integer and answeres
will be verified based upon comparison of both algorithms (pass/fail). Regular row wise multiplication is
used and the carries for each product multiplication is dealt with by a sequential carry adder. The
multiplication algorithm is beneficially in the use of RSA encryption or Diffie Hellman key exchange. The algorithm could be implemented in other applications were large amounts of multiplication can be used in parallel to reduce computation time.
*/
__global__ void get_products(unsigned char a[], unsigned char b[], unsigned int accumulator[], unsigned int n);
int main(int argc, char *argv[]) {
if (argc != 3) {
printf("usage: ./a.out N ThreadsPerBlock\n");
exit(1);
}
printf("Version1, n = %s, threads = %s\n", argv[1], argv[2]);
unsigned int n = atoi(argv[1]);
unsigned int threads = atoi(argv[2]);
unsigned char *p = (unsigned char *) malloc(n);
unsigned char *q = (unsigned char *) malloc(n);
//replace with the ability to read in file
int t = 0;
unsigned char hex;
while(t < n) {
hex = (unsigned char) (rand() % 255) + 1;
p[t] = hex;
t++;
}
t = 0;
while(t < n) {
hex = (unsigned char) (rand() % 255) + 1;
q[t] = hex;
t++;
}
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// Row wise GPU version
////////////////////////////////////////////////////////////////////////////////////////////////////////////////
unsigned char *d_A;
cudaMalloc(&d_A, n);
unsigned char *d_B;
cudaMalloc(&d_B, n);
unsigned int *d_C;
cudaMalloc(&d_C, 2*n*sizeof(unsigned int));
cudaMemcpy(d_A, p, n, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, q, n, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, 2*n*sizeof(unsigned int));
dim3 blocksPerGrid(n/threads);
dim3 threadsPerBlock(threads);
cudaError_t error;
cudaEvent_t start;
error = cudaEventCreate(&start);
if(error != cudaSuccess)
printf("error\n");
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if(error != cudaSuccess)
printf("error\n");
error = cudaEventRecord(start, NULL);
//call kernel to multiply a * b = c where a and b are of size n
get_products<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, n);
//compute final answer with sequential adder
unsigned char *final = (unsigned char *) malloc(2*n);
memset(final, 0, 2*n);
unsigned int *transfer = (unsigned int *) malloc(2*n*sizeof(unsigned int));
//copy result of multiplication to cpu copy to calculate carries
cudaMemcpy(transfer, d_C, 2*n*sizeof(unsigned int), cudaMemcpyDeviceToHost);
unsigned int index = 0;
while(index < 2*n) {
//cast to character and add to index of final result
final[index] = (unsigned char) transfer[index];
//collect the other three bytes and add to the next sequential
//integer index
transfer[index + 1] += (unsigned int) (transfer[index]>>8);
index++;
}
error = cudaEventRecord(stop, NULL);
error = cudaEventSynchronize(stop);
if(error != cudaSuccess)
printf("error\n");
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);// return is miliseconds
printf("GPU time: %.6f\n", msecTotal / 1000);
///////////////////////////////////////////////////////////////////////////////////////////////////
// Row Wise CPU for time comparison
//////////////////////////////////////////////////////////////////////////////////////////////////
unsigned char *cpu_result = (unsigned char *) malloc(2*n);
memset(cpu_result, 0, 2*n);
unsigned int multiplicand_position;
struct timeval cpu_start, cpu_end;
struct timezone tzp;
gettimeofday(&cpu_start, &tzp);
//loop through n rows of products
for(multiplicand_position = 0; multiplicand_position < n; multiplicand_position++) {
unsigned int result_position = multiplicand_position;
unsigned char result_carry = 0;
unsigned short cpu_product = 0;
unsigned int multiplier_position = 0;
unsigned short cpu_sum;
unsigned int loop = 0;
//loop through n multipliers
while(loop < n) {
//calculate the product of ch * ch
unsigned short cpu_sum;
cpu_product = p[multiplier_position] * q[multiplicand_position];
multiplier_position++;
//calculate the sum of previous carry, current result index, and current product
cpu_sum = (cpu_result[result_position] + (cpu_product<<8>>8) + result_carry);
//shift carry bits from upper half of short sum
result_carry = (cpu_sum >> 8);
//update current indexs result
cpu_result[result_position] = cpu_sum;
result_position++;
loop++;
}
//compute final carry of last index from each row
cpu_sum = (cpu_result[result_position] + result_carry);
cpu_result[result_position] = cpu_sum;
//update carry for those rows which are not equal to n
result_carry = (cpu_sum >> 8);
cpu_result[result_position+ 1] += result_carry;
}
gettimeofday(&cpu_end, &tzp); // return is in microseconds
printf("CPU time: %.6f\n", (cpu_end.tv_sec - cpu_start.tv_sec) + (cpu_end.tv_usec - cpu_start.tv_usec) / 1000000.0);
unsigned int err = 0;
unsigned int g = 0;
//compare for finding error in the result of cpu vs. gpu
while(g<2*n){
if(final[g] != cpu_result[g]) {
err++;
}
g++;
}
if(err == 0)
printf("PASS\n");
else
printf("FAIL\n");
//free memory
free(p);
free(q);
free(cpu_result);
free(final);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
//each thread will compute a complete row of products where the index of the kernel array is the multiplicand
//for the specific threads multiplicand. The thread will loop through the other multilpiers to calculate a
//row of products. Atomically add to assure that data is not missed or overwritten.
__global__ void get_products(unsigned char a[], unsigned char b[], unsigned int accumulator[], unsigned int n) {
int multiplier = 0;
unsigned int index = (blockIdx.x * blockDim.x) + threadIdx.x;// kernel index
unsigned int multiplicand = index;
//atomic add only uses integers so product will only store a short becuase atomic add integer
//cannot be casted to a short
unsigned int product = 0;
//loop through multipliers and find products
while(multiplier < n) {
//compute ch * ch and produce a short
product = (unsigned int) a[multiplier] * b[multiplicand];
//add the first character to the respective result index
atomicAdd(&accumulator[multiplier + index], product<<24>>24);
//add the second character to the respective result index
atomicAdd(&accumulator[multiplier + index + 1], product>>8);
multiplier++;
}
return;
}
|
13,288 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void vecMultiply(int *arr, int size){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<size){
for(int i = 0;i<100000;i++){
*(arr + tid) += 10;
}
}
}
int main(int argc, char *argv[]){
// Initialize
int elementSize = 64;
int threadsPerBlock = 32;
int blockSize = (elementSize+threadsPerBlock-1)/threadsPerBlock;
int *host_input_arr = (int*)malloc(sizeof(int) * elementSize);
int *host_output_arr = (int*)malloc(sizeof(int) * elementSize);
int *device_arr;
for(int i = 0;i<elementSize;i++){
host_input_arr[i] = i;
}
cudaMalloc((void**)&device_arr, sizeof(int) * elementSize);
cudaMemcpy(device_arr, host_input_arr, sizeof(int) * elementSize, cudaMemcpyHostToDevice);
vecMultiply<<<blockSize, threadsPerBlock>>>(device_arr, elementSize);
cudaMemcpy(host_output_arr, device_arr, sizeof(int) * elementSize, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int i = 0;i<elementSize;i++){
printf("%d ", host_output_arr[i]);
}
printf("\n");
cudaFree(device_arr);
free(host_input_arr);
free(host_output_arr);
return 0;
} |
13,289 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <sys/resource.h>
#include <unistd.h>
#include <sys/types.h>
double myclock();
__global__
void createMatrix(int** matrix, int rows, int columns, int *ksuCompact, int *sampleCompact, int *AB) {
int i, j;
//printf("Creating matrix...\n");
matrix[0][0] = 0;
//printf("matrix[0][0] = %d\n", matrix[0][0]);
for(i = 1; i < columns + 1;i++) {
matrix[0][i] = ksuCompact[i - 1];
}
for(i = 1; i < rows + 1; i++) {
matrix[i][0] = sampleCompact[i - 1];
//printf("sampleCompact[%d] = %d, matrix[%d][0] = %d\n", i-1, sampleCompact[i-1], i, matrix[i][0]);
}
for(j = 1; j < columns + 1; j++){
for(i = 1;i < rows + 1;i++) {
matrix[i][j] = AB[(i - 1) + (j - 1)*(rows)];
//printf("matrix[%d][%d] = %d, AB[%d] = %d\n", i, j, matrix[i][j], ((i - 1) + (j - 1)*rows), AB[(i - 1) + (j - 1)*rows]);
}
}
printf("finished createMatrix.\n");
}
int getColumns(int *ksu, int lines, int *ksuCompact) {
int i, j;
int exists;
int compactIndex = 1;
//create compact array (unique ksuID's)
//place first element
ksuCompact[0] = ksu[0];
// printf("ksuCompact[0] = %d\n", ksu[0]);
for(i = 1; i < lines; i++) {
exists = 0;
for(j = 0; j < i; j++) {
// printf("ksuCompact[j] = %d, ksu[i] = %d\n", ksu[j], ksu[i]);
if(ksu[i] == ksuCompact[j]) {
exists = 1;
}
}
if (!exists) {
ksuCompact[compactIndex] = ksu[i];
compactIndex++;
}
}
return compactIndex;
}
int getRows(int lines, int columns, int *sample, int *sampleCompact) {
int i, j;
int exists;
int compactIndex = 1;
//create compact array (unique sampleID's)
//place first element
sampleCompact[0] = sample[0];
// printf("sampleCompact[0] = %d\n", sample[0]);
for(i = 1; i < lines; i++) {
exists = 0;
for(j = 0; j < i; j++) {
// printf("sampleCompact[j] = %d, sample[i] = %d\n", sampleCompact[j], sample[i]);
//if sample[i] currently exists in sampleCompact
if(sample[i] == sampleCompact[j]) {
exists = 1;
}
}
if (!exists) {
sampleCompact[compactIndex] = sample[i];
compactIndex++;
}
}
return compactIndex;
}
int main(int argc, char **argv) {
// double tstart;
// double ttotal;
// struct rusage r_usage;
int i, j, err;
FILE *fd;
int maxlines = atoi(argv[1]);
int nlines;
//nColumns and nRows refers to the number of columns and rows of DATA not total in matrix.
int nColumns;
int nRows;
int** matrixAB;
int *ksuID;
int *sampleID;
int *genotypeAB;
int *ksuIDCompact;
int *sampleIDCompact;
// tstart = myclock();
// tstart = myclock();
printf("Allocating memory...\n");
fflush(stdout);
//allocate memory for each line
ksuID = (int*) malloc(maxlines * sizeof(int));
sampleID = (int*) malloc(maxlines * sizeof(int));
genotypeAB = (int*) malloc(maxlines * sizeof(int));
ksuIDCompact = (int*) malloc(maxlines * sizeof(int));
sampleIDCompact = (int*) malloc(maxlines * sizeof(int));
/*
cudaMallocManaged(&ksuID, maxlines * sizeof(int));
cudaMallocManaged(&sampleID, maxlines * sizeof(int));
cudaMallocManaged(&genotypeAB, maxlines * sizeof(int));
cudaMallocManaged(&ksuIDCompact, maxlines * sizeof(int));
cudaMallocManaged(&sampleIDCompact, maxlines * sizeof(int));
*/
//assume input file is already 3 columns needed for data matrix
//Only need the unique animals and SNPs
fd = fopen("./rawdata.txt", "r");
printf("Opened rawdata.txt.\n");
if (fd != NULL) {
nlines = 0;
do {
err = fscanf(fd, "%d", &ksuID[nlines]);
err = fscanf(fd, "%d", &sampleID[nlines]);
err = fscanf(fd, "%d", &genotypeAB[nlines]);
printf("%d %d %d\n", ksuID[nlines], sampleID[nlines], genotypeAB[nlines]);
nlines++;
} while(err != EOF && nlines < maxlines);
fclose(fd);
}
printf("File read successfully.\nWriting matrix...\n");
fflush(stdout);
/* output matrix: ksu ids in columns, sample ids in rows, genotypeAB everywhere else
* matrix looks like this...
* 0 1 2 3 ...
* 737 2 1 2 ...
* 926 2 1 3 ...
* 948 3 1 1 ...
* ... ... ... ...
*/
//get number of columns
nColumns = getColumns(ksuID, nlines, ksuIDCompact);
printf("nColumns = %d\n", nColumns);
//get number of rows
nRows = getRows(nlines, nColumns, sampleID, sampleIDCompact);
printf("nRows = %d\n", nRows);
//matrix = (int**) malloc((rows+1) * sizeof(int*));
cudaMallocManaged(&matrixAB, (nRows+1) * sizeof(int*));
for(i = 0; i < nRows+1; i++)
//matrix[i] = (int*) malloc((columns+1) * sizeof(int));
cudaMallocManaged(&matrixAB[i], (nColumns+1) * sizeof(int));
createMatrix<<<1,1>>>(matrixAB, nRows, nColumns, ksuIDCompact, sampleIDCompact, genotypeAB);
cudaDeviceSynchronize();
printf("Matrix created.\n");
//Write matrix to file
fd = fopen("./SNPmatrix.txt", "w");
if (fd != NULL) {
printf("SNPmatrix.txt created/opened\n");
//create first row
for(i = 0; i < nRows + 1; i++) {
for(j = 0; j < nColumns; j++) {
printf("i = %d, j = %d\n, matrix[i][j] = %d\n", i, j, matrixAB[i][j]);
fprintf(fd, "%d\t", matrixAB[i][j]);
}
fprintf(fd, "%d\n", matrixAB[i][nColumns]);
}
fclose(fd);
}
printf("Closed SNPmatrix.txt.\n");
// ttotal = myclock() - tstart;
//getrusage(RUSAGE_SELF, &r_usage);
// printf("SNPmatrix.txt created.\nExec. Time = %f, RAM Usage = %ld", ttotal, r_usage.ru_maxrss);
free(ksuID);
free(sampleID);
free(genotypeAB);
free(sampleIDCompact);
free(ksuIDCompact);
cudaFree(matrixAB);
return 1;
}
double myclock() {
static time_t t_start = 0;
struct timespec ts;
clock_gettime(CLOCK_REALTIME, &ts);
if( t_start == 0 ) t_start = ts.tv_sec;
return (double) (ts.tv_sec - t_start) + ts.tv_nsec * 1.0e-9;
}
|
13,290 | /*
* ------------------------------------------------------------------------------
*
* MIT License
*
* Copyright (c) 2021 Parallel Applications Modelling Group - GMAP
* GMAP website: https://gmap.pucrs.br
*
* Pontifical Catholic University of Rio Grande do Sul (PUCRS)
* Av. Ipiranga, 6681, Porto Alegre - Brazil, 90619-900
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* ------------------------------------------------------------------------------
*
* The original NPB 3.4 version was written in Fortran and belongs to:
* http://www.nas.nasa.gov/Software/NPB/
*
* ------------------------------------------------------------------------------
*
* The serial C++ version is a translation of the original NPB 3.4
* Serial C++ version: https://github.com/GMAP/NPB-CPP/tree/master/NPB-SER
*
* Authors of the C++ code:
* Dalvan Griebler <dalvangriebler@gmail.com>
* Gabriell Araujo <hexenoften@gmail.com>
* Júnior Löff <loffjh@gmail.com>
*
* ------------------------------------------------------------------------------
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define DEVICE_NAME_INFO 0
#define WARP_SIZE_INFO 1
#define THREADS_PER_BLOCK_INFO 2
int main(int argc, char** argv){
int gpu_device_id = atoi(argv[1]);
int functionality = atoi(argv[2]);
cudaDeviceProp gpu_device_properties;
cudaSetDevice(gpu_device_id);
cudaGetDeviceProperties(&gpu_device_properties, gpu_device_id);
if(functionality==DEVICE_NAME_INFO){
printf("%s\n", gpu_device_properties.name);
} else if(functionality==WARP_SIZE_INFO){
printf("%d\n", gpu_device_properties.warpSize);
}else if(functionality==THREADS_PER_BLOCK_INFO){
printf("%d\n", gpu_device_properties.maxThreadsPerBlock);
}
return 0;
} |
13,291 | #include <stdio.h>
#include <cuda.h>
__global__ void K() {
// Original: if (condition) atomicInc(&counter, 1000000);
//unsigned val = __ballot(condition);
// leader.
//unsigned wcount = __popc(val);
//if (threadIdx.x % 32 == 0) printf("%d\n", __popc(val));
printf("%d\n", __ffs(0xF0000000));
}
int main() {
K<<<1, 1>>>();
cudaDeviceSynchronize();
return 0;
}
|
13,292 | #include <iostream>
#include <vector>
using namespace std;
__global__
void count_samples_in_circles(float* d_randNumsX, float* d_randNumsY, int* d_countInBlocks, int num_blocks, int nsamples)
{
__shared__ int shared_blocks[500];
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * num_blocks;
int inCircle = 0;
for (int i = index; i < nsamples; i+= stride) {
float xValue = d_randNumsX[i];
float yValue = d_randNumsY[i];
if (xValue*xValue + yValue*yValue <= 1.0f) {
inCircle++;
}
}
shared_blocks[threadIdx.x] = inCircle;
__syncthreads();
if (threadIdx.x == 0)
{
int totalInCircleForABlock = 0;
for (int j = 0; j < blockDim.x; j++)
{
totalInCircleForABlock += shared_blocks[j];
}
d_countInBlocks[blockIdx.x] = totalInCircleForABlock;
}
}
int nsamples;
int main(int argc, char* argv[]) {
int nsamples = atoi(argv[1]);
printf("nsamples: %d\n", nsamples);
vector<float> h_randNumsX(nsamples);
vector<float> h_randNumsY(nsamples);
srand(time(NULL));
for (int i = 0; i < h_randNumsX.size(); ++i)
{
h_randNumsX[i] = float(rand()) / RAND_MAX;
h_randNumsY[i] = float(rand()) / RAND_MAX;
}
size_t size = nsamples * sizeof(float);
float* d_randNumsX;
float* d_randNumsY;
cudaMalloc(&d_randNumsX, size);
cudaMalloc(&d_randNumsY, size);
cudaMemcpy(d_randNumsX, &h_randNumsX.front(), size, cudaMemcpyHostToDevice);
cudaMemcpy(d_randNumsY, &h_randNumsY.front(), size, cudaMemcpyHostToDevice);
int threadsPerBlock = 500;
int num_blocks = nsamples / (1000 * threadsPerBlock);
size_t countBlocks = num_blocks * sizeof(int);
int* d_countInBlocks;
cudaMalloc(&d_countInBlocks, countBlocks);
count_samples_in_circles<<<num_blocks, threadsPerBlock>>>(d_randNumsX, d_randNumsY, d_countInBlocks, num_blocks, nsamples);
if ( cudaSuccess != cudaGetLastError() )
cout << "Error!\n";
int* h_countInBlocks = new int[num_blocks];
cudaMemcpy(h_countInBlocks, d_countInBlocks, countBlocks, cudaMemcpyDeviceToHost);
int nsamples_in_circle = 0;
for (int i = 0 ; i < num_blocks; i++) {
nsamples_in_circle = nsamples_in_circle + h_countInBlocks[i];
}
cudaFree(d_randNumsX);
cudaFree(d_randNumsY);
cudaFree(d_countInBlocks);
float estimatedValue = 4.0 * float(nsamples_in_circle) / nsamples;
cout << "Estimated Value: " << estimatedValue << endl;
}
|
13,293 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#define MAX_NUM_LISTS 32
#define NUM_ELEM 256
#define ARR_SIZE (NUM_ELEM * sizeof(int))
#define MAX_VAL 0xFFFFFFFF
/**
* 这种基数排序是对一个整数的每个二进制位进行比较的,所以只能用于比较无符号整数(因为有符号整数小于0时最高二进制位总是1,会将负数排到正数后面)
* 一共生成256个随机数,每32个为一组分为8组,可以看成是一个8 x 32的矩阵,然后每一列可以看成一个列表,32个线程分别对每一列的8个元素进行基数排序,
* 然后将32个列表并行合并,32个线程正好是一个wrap的大小。
*/
__device__ void radix_sort(unsigned int *const sort_tmp, const unsigned int num_lists, const unsigned int num_elements, const unsigned int tid, unsigned int *const sort_tmp_1) {
for(unsigned int bit = 0; bit < 32; bit++) {
const unsigned int bit_mask = (1 << bit);
unsigned int base_cnt_0 = 0;
unsigned int base_cnt_1 = 0;
for (unsigned int i = 0; i < num_elements; i += num_lists) {
const unsigned int elem = sort_tmp[tid + i];
if((elem & bit_mask) > 0) {
sort_tmp_1[base_cnt_1 + tid] = elem;
base_cnt_1 += num_lists;
} else {
sort_tmp[tid + base_cnt_0] = elem;
base_cnt_0 += num_lists;
}
}
for (unsigned int i = 0; i < base_cnt_1; i += num_lists) {
sort_tmp[base_cnt_0 + i + tid] = sort_tmp_1[i + tid];
}
}
__syncthreads();
}
/**
* 复制数据到共享内存,对共享内存中的数据进行排序,在合并列表时再复制回原列表
*/
__device__ void copy_data_to_shared(const unsigned int *const data, unsigned int *const sort_tmp, const unsigned int num_lists, const unsigned int num_elements, const int tid) {
for (unsigned int i = 0; i < num_elements; i += num_lists) {
sort_tmp[i + tid] = data[i + tid];
}
__syncthreads();
}
/**
* 合并列表
*/
__device__ void merge_array(const unsigned int *const src_array, unsigned int *const dest_array, const unsigned int num_lists, const unsigned int num_elements, const unsigned int tid) {
const unsigned int num_elements_per_list = num_elements / num_lists;
__shared__ unsigned int list_indexs[MAX_NUM_LISTS];
list_indexs[tid] = 0;
__syncthreads();
for (unsigned int i = 0; i < num_elements; i++) {
__shared__ unsigned int min_val;
__shared__ unsigned int min_tid;
unsigned int data;
if (list_indexs[tid] < num_elements_per_list) {
const unsigned int src_idx = tid + (list_indexs[tid] * num_lists);
data = src_array[src_idx];
} else {
data = MAX_VAL;
}
if (tid == 0) {
min_val = MAX_VAL;
min_tid = MAX_VAL;
}
__syncthreads();
atomicMin(&min_val, data);
__syncthreads();
if (min_val == data) {
atomicMin(&min_tid, tid);
}
__syncthreads();
if (tid == min_tid) {
list_indexs[tid]++;
dest_array[i] = data;
}
}
}
__global__ void gpu_sort_array(unsigned int *const data, const unsigned int num_lists, const unsigned int num_elements) {
const unsigned int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
__shared__ unsigned int sort_tmp[NUM_ELEM];
__shared__ unsigned int sort_tmp_1[NUM_ELEM];
copy_data_to_shared(data, sort_tmp, num_lists, num_elements, tid);
radix_sort(sort_tmp, num_lists, num_elements, tid, sort_tmp_1);
merge_array(sort_tmp, data, num_lists, num_elements, tid);
}
int main() {
unsigned int cpu_arr[NUM_ELEM];
unsigned int *gpu_arr;
srand((unsigned int)time(NULL));
unsigned int i;
for (i = 0; i < NUM_ELEM; i++) {
cpu_arr[i] = rand() % 101;
}
printf("before sort:\n");
for (i = 0; i < NUM_ELEM; i++) {
printf("%u ", cpu_arr[i]);
}
printf("\n");
cudaMalloc((void**)&gpu_arr, ARR_SIZE);
cudaMemcpy(gpu_arr, cpu_arr, ARR_SIZE, cudaMemcpyHostToDevice);
gpu_sort_array<<<1, 32>>>(gpu_arr, 32, NUM_ELEM);
cudaDeviceSynchronize();
cudaMemcpy(cpu_arr, gpu_arr, ARR_SIZE, cudaMemcpyDeviceToHost);
cudaFree(gpu_arr);
printf("after sort:\n");
for (i = 0; i < NUM_ELEM; i++) {
printf("%u ", cpu_arr[i]);
}
printf("\n");
return 0;
} |
13,294 | #include "includes.h"
__global__ void kernel(void){
} |
13,295 | #include <stdio.h>
int main(void)
{
int count;
cudaGetDeviceCount(&count);
printf("Info : cudaGetDeviceCount = %d \n",count);
return 0;
}
|
13,296 | #include<stdio.h>
#include<stdlib.h>
__global__ void VecAdd(float *A, float *B, float *C, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;//col
int j = blockIdx.y * blockDim.y + threadIdx.y;//row
int indexOfMatrix = i + j * N;
if(i < N && j < N)
C[indexOfMatrix] = A[indexOfMatrix] + B[indexOfMatrix];
}
void RandomInit(float *data, int n)
{
for(int i = 0; i < n*n; i++)
data[i] = rand()/(float)RAND_MAX;
}
void readValue(int *value, char * msg, int lowerBound, int upperBound)
{
while(true)
{
printf("%s(%d-%d): ", msg, lowerBound, upperBound);
scanf("%d", value);
if(*value <= upperBound && *value >= lowerBound)
return;
}
}
int main()
{
//Have some variables required for loop counters.
int i;
//have variables for threads per block, number of blocks.
int threadsPerBlock = 0, blocksInGrid = 0;
//create cuda event variables
cudaEvent_t hostStart, hostStop, deviceStart, deviceStop;
float timeDifferenceOnHost, timeDifferenceOnDevice;
//program variables
int N = 0;
size_t size; //variable to have the size of arrays on device
//int *matA, *matB, *matC, *matCFromGPU; //matrices for host
float *h_A;
float *h_B;
float *h_C;
float *h_D;
float *d_A;
float *d_B;
float *d_C; //matrices for Device
//initialize cuda timing variables
cudaEventCreate(&hostStart);
cudaEventCreate(&hostStop);
cudaEventCreate(&deviceStart);
cudaEventCreate(&deviceStop);
printf("Enter the size: ");
scanf("%d",&N);
//calculate the size required on GPU
size = N * N * sizeof(float);
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
RandomInit(h_A,N);
RandomInit(h_B,N);
printf("Adding matrices on CPU...\n");
cudaEventRecord(hostStart, 0);
for(i = 0 ; i < N * N; i ++)
h_C[i] = h_A[i] + h_B[i];
cudaEventRecord(hostStop, 0);
cudaEventElapsedTime(&timeDifferenceOnHost, hostStart, hostStop);
/**printf("Matrix addition over. Time taken on CPU: %5.5f\n", timeDifferenceOnHost);**/
printf("Processing time for CPU: %5.5f (ms)\n",timeDifferenceOnHost);
printf("CPU: %fGflops\n",3*N/(1000000*timeDifferenceOnHost));
cudaMalloc((void**)&d_A,size);
cudaMalloc((void**)&d_B,size);
cudaMalloc((void**)&d_C,size);
cudaMemcpy(d_A,h_A,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,h_B,size,cudaMemcpyHostToDevice);
bool done = false;
while(!done)
{
h_D = (float *)malloc(size);
//create a proper grid block using dim3
readValue(&threadsPerBlock, "Enter no. of threads per block(input of 'P' will construct PxP threads in block)", 4, 32);
readValue(&blocksInGrid, "Enter no. of blocks in grid(input of 'P' will construct PxP blocks)", (N + threadsPerBlock -1)/threadsPerBlock, 65535);
printf("Threads Per block: %d, Blocks in grid: %d\n", threadsPerBlock, blocksInGrid);
printf("Adding matrices on GPU..\n");
dim3 blocks(threadsPerBlock, threadsPerBlock);
dim3 grid(blocksInGrid, blocksInGrid); //(matrixSize + threadsPerBlock - 1/blocks.x), (matrixSize + blocks.y - 1/blocks.y));
//call the kernels to execute
cudaEventRecord(deviceStart, 0);
printf("Total linear threads: %d\n", blocksInGrid*threadsPerBlock);
VecAdd<<<grid, blocks>>>(d_A, d_B, d_C, N);
cudaEventRecord(deviceStop, 0);
cudaEventSynchronize(deviceStop);
cudaEventElapsedTime(&timeDifferenceOnDevice, deviceStart, deviceStop);
printf("Processing time for GPU: %5.5f (ms)\n",timeDifferenceOnDevice);
printf("GPU: %fGflops\n",3*N/(1000000*timeDifferenceOnDevice));
//copy the result back into host memory
cudaMemcpy(h_D, d_C, size, cudaMemcpyDeviceToHost);
printf("Speedup: %5.5f\n", (float)timeDifferenceOnHost/timeDifferenceOnDevice);
double sum = 0.;
double diff;
for(int i = 0; i < N * N ; i++){
diff = abs(h_D[i]-h_C[i]);
sum += diff*diff;
}
sum = sqrt(sum);
printf("norm(h_C - h_D)=%20.15e\n\n",sum);
char c = 'n';
printf("Again?(y/n): ");
while(true)
{
c = getchar();
if(c == 'y' || c == 'n')
break;
}
if(c == 'n')
break;
free(h_D);
}
free(h_A);
free(h_B);
free(h_C);
cudaEventDestroy(deviceStart);
cudaEventDestroy(deviceStop);
cudaEventDestroy(hostStart);
cudaEventDestroy(hostStop);
return 0;
}
|
13,297 | #include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
//device
__global__ void primer_kernel(){}
//host
int main(int argc, char *argv[]){
int DeviceCount = 0;
//inicializamos CUDA
if(cuInit(0) != 0){
printf("ERROR en la inicializacion\n");
exit(0);
}
//obtenemos el numero de dispositivos compatibles con CUDA
cuDeviceGetCount(&DeviceCount);
if(DeviceCount == 0){
printf("ERROR, ningun dispositivo soporta CUDA\n");
return EXIT_FAILURE;
}
//llamamos al codigo del kernel
primer_kernel<<<1,1,0,0>>>();
printf("Se dispone de %d unidad(es) GPU\n", DeviceCount);
} |
13,298 | #include "includes.h"
/* This code will generate a Sobel image and a Gray Scale image. Uses OpenCV, to compile:
nvcc FinalProject.cu `pkg-config --cflags --libs opencv`
Copyright (C) 2018 Jose Andres Cortez Villao
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.*/
typedef enum color {BLUE, GREEN, RED} Color; //Constants that contains the values for each color of the image
/*The gray function obtain an average of each pixel and assigned to the correct position in the array using
Channels and step constants*/
/*The sobel function uses a convolution algorithm to obtain the edges of the image */
__global__ void sobel(unsigned char *src, unsigned char *dest, int width, int heigth, int step, int channels){
int i, j;
int ren, col, tmp_ren, tmp_col;
int gx[3][3]={{-1,0,1},{-2,0,2},{-1,0,1}}; // gx is defined in the Sobel algorithm
int gy[3][3]={{1,2,1},{0,0,0},{-1,-2,-1}}; // gy is defined in the Sobel algorithm
char temp[3][3];
ren = blockIdx.x;
col = threadIdx.x;
tmp_ren = 0;
tmp_col = 0;
//Multiplication of the 3x3 matrix for each color
for (i = -1; i < 2; i++) {
for (j = -1; j < 2; j++) {
temp[i+1][j+1]=(int) src[(ren * step) + (col * channels) + RED + i + 1];
tmp_ren=tmp_ren + temp[i+1][j+1]*gx[i+1][j+1];
tmp_col=tmp_col + temp[i+1][j+1]*gy[i+1][j+1];
}
}
dest[(ren * step) + (col * channels) + RED] = (unsigned char) sqrtf(tmp_col*tmp_col+tmp_ren*tmp_ren);;
tmp_ren = 0;
tmp_col = 0;
for (i = -1; i < 2; i++) {
for (j = -1; j < 2; j++) {
temp[i+1][j+1]=(int) src[(ren * step) + (col * channels) + GREEN + i + 1];
tmp_ren=tmp_ren + temp[i+1][j+1]*gx[i+1][j+1];
tmp_col=tmp_col + temp[i+1][j+1]*gy[i+1][j+1];
}
}
dest[(ren * step) + (col * channels) + GREEN] = (unsigned char) sqrtf(tmp_col*tmp_col+tmp_ren*tmp_ren);;
tmp_ren = 0;
tmp_col = 0;
for (i = -1; i < 2; i++) {
for (j = -1; j < 2; j++) {
temp[i+1][j+1]=(int) src[(ren * step) + (col * channels) + BLUE + i + 1];
tmp_ren=tmp_ren + temp[i+1][j+1]*gx[i+1][j+1];
tmp_col=tmp_col + temp[i+1][j+1]*gy[i+1][j+1];
}
}
dest[(ren * step) + (col * channels) + BLUE] = (unsigned char) sqrtf(tmp_col*tmp_col+tmp_ren*tmp_ren);
} |
13,299 | //
// Tomás Oliveira e Silva, November 2017
//
// simple CUDA kernel (each thread writes one char)
//
extern "C" __global__
//__launch_bounds__(128,1)
void hello_kernel(char *buffer,int buffer_size)
{
int idx;
idx = (int)threadIdx.x + (int)blockDim.x * (int)blockIdx.x; // thread number
if(idx >= 0 && idx < buffer_size)
switch(idx)
{
case 0: buffer[idx] = 'H'; break;
case 1: buffer[idx] = 'e'; break;
case 2: buffer[idx] = 'l'; break;
case 3: buffer[idx] = 'l'; break;
case 4: buffer[idx] = 'o'; break;
case 5: buffer[idx] = ','; break;
case 6: buffer[idx] = ' '; break;
case 7: buffer[idx] = 'W'; break;
case 8: buffer[idx] = 'o'; break;
case 9: buffer[idx] = 'r'; break;
case 10: buffer[idx] = 'l'; break;
case 11: buffer[idx] = 'd'; break;
case 12: buffer[idx] = '!'; break;
case 13: buffer[idx] = '\0'; break;
default: buffer[idx] = 'X'; break;
}
}
|
13,300 | /* Programma di simulazione di dinamica molecolare */
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <math.h>
#include <cuda.h>
#define Rcut 2.5f // cutoff distance
#define DIM 3
#define dt 0.0005f
#define eCut -0.01631689114f // 4 * ((1 / ( pow (Rcut, 12))) - (1 / pow(Rcut,6))) // cutoff energy
#define mass 1.0f
#define PI 3.1415926535f
#define NUM_THREAD 64 // Number of threads per block
#define NUM_BLOCK (int) ceil (Natoms/(float)NUM_THREAD) // Numb of thread blocks
typedef struct {
float x, y, z; // coordinates
float w; // free parameter
} M_double4;
/****************************************************************************************/
/* first half kick for Verlet integration */
__global__ void HalfStep (M_double4 *v, M_double4 *a, int Natoms){
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < Natoms){
v[i].x += 0.5f * dt * a[i].x;
v[i].y += 0.5f * dt * a[i].y;
v[i].z += 0.5f * dt * a[i].z;
v[i].w = 0.5f * mass * ((v[i].x * v[i].x) + (v[i].y * v[i].y) + (v[i].z * v[i].z)); // kinetic energy
}
}
/****************************************************************************************/
/* positions updates */
__global__ void Position (M_double4 *r, M_double4 *v, M_double4 *a, int Natoms){
int i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < Natoms){
v[i].x += 0.5f * dt * a[i].x;
v[i].y += 0.5f * dt * a[i].y;
v[i].z += 0.5f * dt * a[i].z;
r[i].x += v[i].x * dt;
r[i].y += v[i].y * dt;
r[i].z += v[i].z * dt;
}
}
/****************************************************************************************/
/* the function generates normal distributed number with Box-Muller algorithm */
float Gaussian (void){
float x, y, s;
x = ((float)lrand48()/RAND_MAX);
y = ((float)lrand48()/RAND_MAX);
s = sqrt (-2.0f * log(x)) * cos (2.0f * PI * y);
return s;
}
/****************************************************************************************/
/* calculus of forces with Lennard Jones potential and cutoff energy (see Molecular Simulation by D. Frenkel) */
__global__ void Acceleration(M_double4 *r, M_double4 *a, int Natoms, float rho){
int i, j, axis;
float d2, d2inv, d6inv, f, dr[DIM];
float L = powf(Natoms / rho, 1.0f/3);
i = blockIdx.x*blockDim.x+threadIdx.x;
if (i < Natoms){
a[i].x = 0.0f;
a[i].y = 0.0f;
a[i].z = 0.0f;
a[i].w = 0.0f;
for (j=0;j<Natoms;j++){
if (i == j) continue;
dr[0] = r[i].x - r[j].x;
dr[1] = r[i].y - r[j].y;
dr[2] = r[i].z - r[j].z;
dr[0] -= floorf((dr[0] / L) + 0.5f) * L;
dr[1] -= floorf((dr[1] / L) + 0.5f) * L;
dr[2] -= floorf((dr[2] / L) + 0.5f) * L;
for (d2=0.0f, axis=0; axis<DIM; axis++) d2 += dr[axis] * dr[axis];
if (d2 < Rcut * Rcut){
d2inv = 1.0f / d2;
d6inv = d2inv * d2inv * d2inv;
f = 24.0f * d2inv * d6inv * (2.0f * d6inv -1.0f);
a[i].x += f * dr[0];
a[i].y += f * dr[1];
a[i].z += f * dr[2];
a[i].w += 4.0f * d6inv * (d6inv - 1.0f) - eCut;
} /* endif d2 */
} /* endfor j */
} /* endfor i */
}
/****************************************************************************************/
void Rescale (M_double4 *v, double T, int Natoms){
int i;
float vSum2=0.0f, fs;
for (i=0;i<Natoms;i++) vSum2 += (v[i].x * v[i].x) + (v[i].y * v[i].y) + (v[i].z * v[i].z);
fs = sqrt (3.0f * T * Natoms / vSum2); /* scaling factor to set the temperature */
for (i=0;i<Natoms;i++){
v[i].x *= fs;
v[i].y *= fs;
v[i].z *= fs;
}
}
/****************************************************************************************/
/* The function initializes the r (fcc lattice), v, a arrays and sets c.o.m. speed to zero */
void Initialization (M_double4 *r, M_double4 *v, M_double4 *a, float T, int *seed, int Natoms, float rho){
int i, axis, k, M=1, nX, nY, nZ;
float vSum[DIM]={0.0f};
float L = pow(Natoms / rho, 1.0f/3); // total box lenght
float firstCell[4][3] = {
{0.25f, 0.25f, 0.25f},
{0.75f, 0.75f, 0.25f},
{0.75f, 0.25f, 0.75f},
{0.25f, 0.75f, 0.75f}};
while (4 * M * M * M < Natoms) M++; // M^3 will be the nuber of boxes to contain all the Natoms
float l = L / M; // is the single box lenght
int n = 0;
for (nX=0; nX<M;nX++)
for (nY=0; nY<M;nY++)
for (nZ=0; nZ<M;nZ++)
for (k=0; k<4; k++)
if (n<Natoms){
r[n].x = (nX + firstCell[k][0]) * l;
r[n].y = (nY + firstCell[k][1]) * l;
r[n].z = (nZ + firstCell[k][2]) * l;
n++;
}
for (i=0;i<Natoms;i++){
v[i].x = Gaussian();
v[i].y = Gaussian();
v[i].z = Gaussian();
vSum[0] += v[i].x;
vSum[1] += v[i].y;
vSum[2] += v[i].z;
}
for (axis=0;axis<DIM;axis++) vSum[axis] /= Natoms;
for (i=0;i<Natoms;i++){
v[i].x -= vSum[0]; /* total momentum = NULL */
v[i].y -= vSum[1];
v[i].z -= vSum[2];
a[i].x = 0.0f;
a[i].y = 0.0f;
a[i].z = 0.0f;
}
Rescale (v, T, Natoms);
}
/****************************************************************************************/
void Evolution (M_double4 *r, M_double4 *v, M_double4 *a, int Natoms, float rho){
dim3 dimGrid (NUM_BLOCK, 1, 1);
dim3 dimBlock (NUM_THREAD, 1, 1);
Position <<<dimGrid, dimBlock>>>(r, v, a, Natoms);
Acceleration <<<dimGrid, dimBlock>>>(r, a, Natoms, rho);
HalfStep <<<dimGrid, dimBlock>>>(v, a, Natoms);
}
/****************************************************************************************/
int main () {
int stepCount, stepLimit, seed, Natoms;
float T, mtime;
float rho;
long seconds, useconds;
M_double4 *h_r, *h_v, *h_a;
M_double4 *d_r, *d_v, *d_a;
struct timeval start, end;
cudaEvent_t gpu_start, gpu_stop;
float gpu_runtime;
FILE *fp;
seed = time(0);
srand48(seed);
fp = fopen("cuda_time.dat","w+");
stepLimit = 100;
T = 0.5f;
rho = 0.2f;
while (rho<1.0f){
for (Natoms=10;Natoms<=1500;Natoms+=100){
size_t size = NUM_BLOCK * NUM_THREAD * sizeof ( M_double4 );
/* allocating memory on the host */
h_r = ( M_double4 * ) malloc ( size );
h_v = ( M_double4 * ) malloc ( size );
h_a = ( M_double4 * ) malloc ( size );
/* allocating memory on the device */
cudaMalloc (&d_r, size);
cudaMalloc (&d_v, size);
cudaMalloc (&d_a, size);
Initialization (h_r, h_v, h_a, T, &seed, Natoms, rho);
/* copying data from host to device */
cudaMemcpy (d_r, h_r, size, cudaMemcpyHostToDevice);
cudaMemcpy (d_v, h_v, size, cudaMemcpyHostToDevice);
cudaMemcpy (d_a, h_a, size, cudaMemcpyHostToDevice);
gettimeofday (&start, NULL);
cudaEventCreate (&gpu_start);
cudaEventCreate (&gpu_stop);
cudaEventRecord (gpu_start, 0);
/* main cycle */
for (stepCount=0; stepCount<=stepLimit; stepCount++){
Evolution (d_r, d_v, d_a, Natoms, rho);
}
cudaEventRecord (gpu_stop, 0);
cudaEventSynchronize (gpu_stop);
cudaEventElapsedTime (&gpu_runtime, gpu_start, gpu_stop);
gettimeofday (&end, NULL);
seconds = end.tv_sec - start.tv_sec;
useconds = end.tv_usec - start.tv_usec;
mtime = (float)seconds + (float)useconds/1000000.0f;
fprintf (fp, "%d\t%.5lf\n", Natoms, gpu_runtime /*1000 * mtime*/ / (stepLimit + 1));
free (h_r); // freeing host memory
free (h_v);
free (h_a);
cudaFree (d_r); // freeing device memory
cudaFree (d_v);
cudaFree (d_a);
}
fprintf (fp,"\n\n");
rho += 0.5f;
}
printf ("\nComputational times in file cuda_time.dat\n", mtime);
fclose(fp);
}
/****************************************************************************************/
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.