serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
19,501 | /* Cuda GPU Based Program that use GPU processor for finding cosine of numbers */
/* --------------------------- header secton ----------------------------*/
#include<stdio.h>
#include<cuda.h>
#define COS_THREAD_CNT 10
#define N 10
/* --------------------------- target code ------------------------------*/
struct cosParams {
float *arg;
float *res;
int n;
};
struct myParams {
int *number;
int n;
};
__global__ void myFunction(struct myParams parms)
{
int i;
for (i = threadIdx.x; i < parms.n; i += COS_THREAD_CNT)
{
parms.number[0] = i;
}
}
__global__ void cos_main(struct cosParams parms)
{
int i;
for (i = threadIdx.x; i < parms.n; i += COS_THREAD_CNT) {
parms.res[i] = __cosf(parms.arg[i] );
}
}
/* --------------------------- host code ------------------------------*/
int main (int argc, char *argv[])
{
int i = 0;
cudaError_t cudaStat;
float* cosRes = 0;
float* cosArg = 0;
float* arg = (float *) malloc(N*sizeof(arg[0]));
float* res = (float *) malloc(N*sizeof(res[0]));
struct cosParams funcParams;
struct myParams myFucnParams;
/* ... fill arguments array "arg" .... */
for(i=0; i < N; i++ ){
arg[i] = (float)i;
}
cudaStat = cudaMalloc ((void **)&cosArg, N * sizeof(cosArg[0]));
if( cudaStat )
printf(" value = %d : Memory Allocation on GPU Device failed\n", cudaStat);
cudaStat = cudaMalloc ((void **)&cosRes, N * sizeof(cosRes[0]));
if( cudaStat )
printf(" value = %d : Memory Allocation on GPU Device failed\n", cudaStat);
cudaStat = cudaMemcpy (cosArg, arg, N * sizeof(arg[0]), cudaMemcpyHostToDevice);
if( cudaStat )
printf(" Memory Copy from Host to Device failed.\n", cudaStat);
funcParams.res = cosRes;
funcParams.arg = cosArg;
funcParams.n = N;
cos_main<<<1,COS_THREAD_CNT>>>(funcParams);
cudaStat = cudaMemcpy (res, cosRes, N * sizeof(cosRes[0]), cudaMemcpyDeviceToHost);
if( cudaStat )
printf(" Memory Copy from Device to Host failed.\n" , cudaStat);
for(i=0; i < N; i++ ){
printf("cosf(%f) = %f \n", arg[i], res[i] );
}
}
/* nvcc cosine.cu -use_fast_math */
|
19,502 | #include "BitMapper.cuh"
// Get the index of a field in the bit array. The field contains the bit corresponding to value.
__host__ __device__ unsigned int BitMapper::getIndexInBitArray(unsigned int value)
{
// 5 = log2(32)
return (value >> 5); // 5 Bits, to fit 32 different positions
}
// Get the position of a bit in a field in the bit array. The bit corresponds to value.
__host__ __device__ unsigned int BitMapper::getPosInBitField(unsigned int value)
{
// 31 = 32-1
return (value & 31); // 31 = 11111 -> five 1s
}
// Converts a bit array to a vector
vector<unsigned int> BitMapper::bitArrayToVector(unsigned int* bitArray, unsigned int arraySize)
{
vector<unsigned int> vec;
unsigned int currentField;
for (unsigned int i = 0; i < arraySize; i++)
{
currentField = bitArray[i];
unsigned int j = 0;
while (currentField)
{
if (currentField & 0x1)
{
vec.push_back(i * 32 + j);
}
j++;
currentField = currentField >> 1;
}
}
return vec;
}
// Adds a value to a bit array
__host__ void BitMapper::addValueToBitArray(unsigned int value, unsigned int* bitArray)
{
unsigned int arrayIndex = getIndexInBitArray(value);
int posInField = getPosInBitField(value);
bitArray[arrayIndex] |= (1 << posInField);
}
// Merges two bit arrays so that all set bits are contained in one array
__host__ void BitMapper::mergeBitArrays(unsigned int* targetArray, unsigned int* sourceArray, unsigned int arraySize)
{
for (int i = 0; i < arraySize; i++)
{
targetArray[i] |= sourceArray[i];
}
}
// Adds a value to a bit array on the device (requires atomic operation)
__device__ void BitMapper::dev_addValueToBitArray(unsigned int value, unsigned int* bitArray)
{
unsigned int arrayIndex = getIndexInBitArray(value);
int posInField = getPosInBitField(value);
atomicOr(bitArray + arrayIndex, (1 << posInField));
}
// Merges two bit arrays on the device (requires atomic operation)
__device__ void BitMapper::dev_mergeBitArrays(unsigned int* targetArray, unsigned int* sourceArray, unsigned int arraySize)
{
for (int i = 0; i < arraySize; i++)
{
atomicOr(targetArray + i, sourceArray[i]);
}
}
|
19,503 | #include <stdio.h>
#include <iostream>
#include <chrono>
/*
__global__ void VecAdd(float* A, float *B, float *C)
{
int idx = threadIdx.x;
C[idx] = A[idx] + B[idx];
}
// Matrix Addtion using 1 block (threadIdx has limitation about 1024)
__global__ void MatAdd(float A[N][N], float B[N][N], float C[N][N])
{
int idx1 = threadIdx.x;
int idx2 = threadIdx.y;
C[idx1][idx2] = A[idx1][idx2] + B[idx1][idx2];
}
// Matrix Addition using multiple blocks to solve the problem of limitation of thread
__global__ void MatAdd(float A[N][N], float B[N][N], float C[N][N])
{
int idx1 = blockIdx.x * blockDim.x + threadIdx.x;
int idx2 = blockIdx.y * blockDim.y + threadIdx.y;
C[idx1][idx2] = A[idx1][idx2] + B[idx1][idx2];
}
*/
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
float *elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 32
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A to devide memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
auto time_memcpy1_start = std::chrono::high_resolution_clock::now();
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
auto time_memcpy1_end = std::chrono::high_resolution_clock::now();
std::cout << "Memcpy Time : " << (double)std::chrono::duration_cast<std::chrono::microseconds>(time_memcpy1_end - time_memcpy1_start).count() / 1000000. << " seconds" << std::endl;
// Load B to devide memory
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
auto time_memcpy2_start = std::chrono::high_resolution_clock::now();
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
auto time_memcpy2_end = std::chrono::high_resolution_clock::now();
std::cout << "Memcpy Time : " << (double)std::chrono::duration_cast<std::chrono::microseconds>(time_memcpy2_end - time_memcpy2_start).count() / 1000000. << " seconds" << std::endl;
// Allocate C in devide memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Kernel part
auto time_kernel_start = std::chrono::high_resolution_clock::now();
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
auto time_kernel_end = std::chrono::high_resolution_clock::now();
std::cout << "Kernel Time : " << (double)std::chrono::duration_cast<std::chrono::microseconds>(time_kernel_end - time_kernel_start).count() / 1000000. << " seconds" << std::endl;
// Read C from device memory
auto time_memcpy3_start = std::chrono::high_resolution_clock::now();
cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
auto time_memcpy3_end = std::chrono::high_resolution_clock::now();
std::cout << "Memcpy Time : " << (double)std::chrono::duration_cast<std::chrono::microseconds>(time_memcpy3_end - time_memcpy3_start).count() / 1000000. << " seconds" << std::endl;
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
float Cval = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for(int i = 0; i < A.width; i++)
{
Cval += A.elements[row * A.width + i] * B.elements[i * B.width + col];
C.elements[row * C.width + col] = Cval;
}
}
int main()
{
int test_dim = 1024;
Matrix A, B, C;
A.width = test_dim; A.height = test_dim;
B.width = test_dim; B.height = test_dim;
C.width = test_dim; C.height = test_dim;
A.elements = new float[test_dim * test_dim];
B.elements = new float[test_dim * test_dim];
C.elements = new float[test_dim * test_dim];
for(int i = 0; i < test_dim; i++)
{
for(int j = 0; j < test_dim; j++)
{
*(A.elements + i * test_dim + j) = 1;
*(B.elements + i * test_dim + j) = 1;
}
}
auto time_start = std::chrono::high_resolution_clock::now();
MatMul(A, B, C);
auto time_end = std::chrono::high_resolution_clock::now();
std::cout << "MatMul 1 Time : " << (double)std::chrono::duration_cast<std::chrono::microseconds>(time_end - time_start).count() / 1000000. << " seconds" << std::endl;
time_start = std::chrono::high_resolution_clock::now();
MatMul(A, B, C);
time_end = std::chrono::high_resolution_clock::now();
std::cout << "MatMul 2 Time : " << (double)std::chrono::duration_cast<std::chrono::microseconds>(time_end - time_start).count() / 1000000. << " seconds" << std::endl;
for(int i = 0; i < 10; i++)
{
std::cout << "C[0][" << i << "] = " << C.elements[i] << std::endl;
}
return 0;
}
|
19,504 | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <math.h>
#include <cuda_runtime.h>
#define N (1 << 25)
#define blocksize 8
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
void fillArray(float *arr){
for(int i = 0; i < N; i++){
arr[i] = 1;
//arr[i] = rand() % 100;
}
}
void seqSum(float *a, float *out){
for(int i = 0; i < N; i++){
(*out) += a[i];
}
}
__global__ void gpuGlobalSum(float *a, float *out){
int i = threadIdx.x + blockDim.x*blockIdx.x;
int id = threadIdx.x;
for(int k = blockDim.x/2; k > 0; k= k/2){
if(id < k){
a[i] += a[i + k];
}
__syncthreads();
}
if(id == 0){
atomicAdd(out, a[i]);
}
}
__global__ void gpuSharedSum(float *a, float *out){
__shared__ float psum[blocksize];
int i = threadIdx.x + blockDim.x*blockIdx.x;
int id = threadIdx.x;
psum[id] = (i < N) ? a[i]:0;
__syncthreads();
for(int k = blockDim.x/2; k > 0; k = k/2){
if(id < k){
psum[id] += psum[id + k];
}
__syncthreads();
}
if(id == 0){
atomicAdd(out, psum[id]);
}
}
int main(void){
// Setup time variables
float timecpu = 0;
float timegpug = 0;
float timegpus = 0;
float tpcpu = 0;
float tpgpug = 0;
float tpgpus = 0;
cudaEvent_t launch_begin_seq, launch_end_seq;
// Host variables
float *h_vec = (float*)malloc(N*sizeof(float));
float h_result = 0.0;
float *h_global = (float*)malloc(N*sizeof(float));
float *h_shared = (float*)malloc(N*sizeof(float));
//Device variables
float *d_vec1, *d_vec2, *d_out1, *d_out2;
cudaMalloc((void**)&d_vec1, N*sizeof(float));
cudaMalloc((void**)&d_vec2, N*sizeof(float));
cudaMalloc((void**)&d_out1, N*sizeof(float));
cudaMalloc((void**)&d_out2, N*sizeof(float));
// Check Memory Allocation
if(h_vec == 0 || h_global == 0 || h_shared == 0 || d_vec1 == 0 || d_vec2 == 0 || d_out1 == 0 || d_out2 == 0){
printf("Memory Allocation Failed!\n");
return 1;
}
// Fill Array
fillArray(h_vec);
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
//Start CPU sum
cudaEventRecord(launch_begin_seq,0);
seqSum(h_vec, &h_result);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
cudaEventElapsedTime(&timecpu, launch_begin_seq, launch_end_seq);
printf("CPU time: %f ms\n", timecpu);
printf("Sum = %f\n\n", h_result);
tpcpu = 1e-9*N/(timecpu*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpcpu);
// Prep Grid and Block variables
dim3 dimGrid(N/blocksize);
dim3 dimBlock(blocksize);
// Prep device memory
cudaMemset(d_vec1, 0, N*sizeof(float));
cudaMemcpy(d_vec1, h_vec, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_vec2, 0, N*sizeof(float));
cudaMemcpy(d_vec2, h_vec, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_out1, 0, N*sizeof(float));
cudaMemset(d_out2, 0, N*sizeof(float));
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
// Start global GPU sum
cudaEventRecord(launch_begin_seq,0);
gpuGlobalSum<<<dimGrid, dimBlock>>>(d_vec1, d_out1);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
// Copy Memory back to Host
cudaMemcpy(h_global, d_out1, N*sizeof(float), cudaMemcpyDeviceToHost);
// Check For Cuda Errors
checkCUDAError("gpuGlobalSum");
if(h_result == h_global[0]){
cudaEventElapsedTime(&timegpug, launch_begin_seq, launch_end_seq);
printf("Sum Successful!\n");
printf("Global Memory GPU time: %f ms\n\n", timegpug);
tpgpug = 1e-9*N/(timegpug*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpgpug);
}else{
printf("Sum Failed!\n");
printf("Expected = %f\nReceived = %f\n", h_result, h_global[0]);
}
// Create time variables
cudaEventCreate(&launch_begin_seq);
cudaEventCreate(&launch_end_seq);
// Start shared GPU sum
cudaEventRecord(launch_begin_seq,0);
gpuSharedSum<<<dimGrid, dimBlock>>>(d_vec2, d_out2);
cudaEventRecord(launch_end_seq,0);
cudaEventSynchronize(launch_end_seq);
// Copy Memory back to Host
cudaMemcpy(h_shared, d_out2, N*sizeof(float), cudaMemcpyDeviceToHost);
// Check For Cuda Errors
checkCUDAError("gpuSharedSum");
if(h_result == h_shared[0]){
cudaEventElapsedTime(&timegpus, launch_begin_seq, launch_end_seq);
printf("Sum Successful!\n");
printf("Shared Memory GPU time: %f ms\n\n", timegpus);
tpgpus = 1e-9*N/(timegpus*1e-3);
printf("Throughput = %f Gflops/s\n\n", tpgpus);
}else{
printf("Sum Failed!\n");
printf("Expected = %f\nReceived = %f\n", h_result, h_shared[0]);
}
printf("Global Speed up = %f \n", timecpu/timegpug);
printf("Global ratio = %f \n\n", tpgpug/tpcpu);
printf("Shared Speed up = %f \n", timecpu/timegpus);
printf("Shared ratio = %f \n\n", tpgpus/tpcpu);
printf("CSV output:\n");
printf("%i,%i,%i,%f,%f,%f,%f,%f,%f,%f,%f,%f,%f", N, blocksize, blocksize, timecpu, timegpug, timegpus, tpcpu, tpgpug, tpgpus, timecpu/timegpug, timecpu/timegpus, tpgpug/tpcpu, tpgpus/tpcpu);
free(h_vec);
free(h_global);
free(h_shared);
cudaFree(d_vec1);
cudaFree(d_vec2);
cudaFree(d_out1);
cudaFree(d_out2);
return 0;
}
|
19,505 | #include <cstdlib>
#include <iostream>
#include <time.h>
#define DIM1 3
#define DIM2 3
__global__ void avg(float* in, float* out, int radius)
{
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid < DIM1 * DIM2)
{
int x = tid / DIM1;
int y = tid % DIM2;
float count = 0;
float val = 0;
for(int i = -1 * radius; i <= radius; i++)
{
int nx = i + x;
if(nx >= 0 && nx < DIM1)
for(int j = -1 * radius; j <= radius; j++)
{
int ny = j + y;
if(i*i + j*j <= radius * radius && ny >= 0 && ny < DIM2)
val += in[nx * DIM1 + ny], count++;
}
}
out[tid] = val/count;
}
}
int main()
{
float* in = (float*)malloc(DIM1*DIM2*sizeof(float));
srand(time(NULL));
for(int i = 0; i < DIM1*DIM2; i++)
in[i] = rand()%1000;
/* std::cout << "Original:" << std::endl;
for(int i = 0; i < DIM1; ++i) {
for(int j = 0; j < DIM2; ++j)
std::cout << in[i*DIM1 + j] << " ";
std::cout << std::endl;
}*/
float* din;
float* dout;
cudaMalloc((void**)&din, DIM1 * DIM2 * sizeof(float));
cudaMalloc((void**)&dout, DIM1 * DIM2 * sizeof(float));
cudaMemcpy(din, in, DIM1 * DIM2 * sizeof(float), cudaMemcpyHostToDevice);
int TPB = 9;
avg<<<(DIM1*DIM2 + TPB - 1)/TPB, TPB>>>(din, dout, 2);
float* out = (float*)malloc(DIM1*DIM2*sizeof(float));
cudaMemcpy(out, dout, DIM1 * DIM2 * sizeof(float), cudaMemcpyDeviceToHost);
std::cout << "Averaged:" << std::endl;
for(int x = 0; x < DIM1; x++)
{
if (x % 10 == 0)
{
for(int y = 0; y < DIM2; y++)
if(y % 10 == 0)
std::cout << out[x*DIM1+y] << " ";
std::cout << std::endl;
}
}
cudaFree(din); cudaFree(dout);
free(in); free(out);
}
|
19,506 | #include <cstdlib>
#include <string>
#include <iostream>
__global__ void kernel(int* arr,int n){
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n){
arr[idx]=5;
}
return;
}
__host__ void error(std::string message,bool warning=false){
cudaError_t err=cudaSuccess;
err=cudaGetLastError();
if(err!=cudaSuccess){
std::cerr<<message<<std::endl;
if(warning)
return;
exit(0);
}
return;
}
__host__ int main(int argc,char* argv[]){
int* arr=NULL;
int* cuArr=NULL;
const int n=100;
size_t size=n*sizeof(int);
arr=(int*)malloc(size);
cudaMalloc((void**)&cuArr,size);
kernel<<<2,64>>>(cuArr,n);
cudaMemcpy(arr,cuArr,size,cudaMemcpyDeviceToHost);
cudaFree(cuArr);
for(int i=0;i<n;i++){
std::cout<<arr[i]<<" ";
}
std::cout<<std::endl;
free(arr);
return 0;
}
|
19,507 | #include <stdio.h>
const char STR_LENGTH = 52;
__device__ const char *STR = "HELLO WORLD! HELLO WORLD! HELLO WORLD! HELLO WORLD! ";
__global__ void hello() {
printf("%c", STR[blockIdx.x]);
}
int main(int argc, char** argv) {
int device = atoi(argv[1]);
cudaSetDevice(device);
hello<<<STR_LENGTH, 1>>>();
printf("\n");
cudaDeviceSynchronize();
return 0;
}
|
19,508 | //numThreads should be multiple of 32
__global__ void mediumKernel(int *offset, int *col_id, int *medium, int sizeMedium, int *color, int currentColor)
{
extern __shared__ bool set[];
if( (blockIdx.x*blockDim.x+threadIdx.x)/32 < sizeMedium) {
int node = medium[(blockIdx.x*blockDim.x+threadIdx.x)/32];
if(color[node]==0) {
int node_offset = (blockIdx.x*blockDim.x+threadIdx.x)%32;
int neighLen = offset[node+1]-offset[node];
set[threadIdx.x/32]=1;
__syncthreads();
for(int i=node_offset; i<neighLen; i=i+32)
{
int item = col_id[offset[node]+i];
if(item >= node && (color[item]==0 || color[item]==currentColor))
set[threadIdx.x/32]=0;
}
__syncthreads();
if(node_offset == 0){
if(set[threadIdx.x/32]==1)
color[node]=currentColor;
}
}
}
}
|
19,509 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <time.h>
#include<sys/time.h>
//#difine LINUX_IMP
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if(error != cudaSuccess) \
{\
printf("Error: %s:%d,", __FILE__,__LINE__);\
printf("code:%d,reason:%s\n",error,cudaGetErrorString(error));\
exit(1);\
}\
}
//don't forget the time
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
void checkResult(float *hostRef, float *gpuRef,const int N){
double epsilon=1.0E-8;
bool match = 1;
for(int idx = 0; idx < N; idx++){
if(abs(hostRef[idx]-gpuRef[idx]) > epsilon){
match = 0;
printf("Arrays do not match!\n");
printf("host %5.6f gpu %5.6f at current %d\n",hostRef[idx],gpuRef[idx],idx);
break;
}
}
if(match)printf("Arrays match .\n\n");
}
//generate different seed for random number
void initiaData(float *ip,int size){
time_t t;
srand((unsigned)time(&t));
try{
for(int i = 0; i < size; i++){
ip[i] = (float)(rand() & 0xFF)/10.0f;
//ip[i] = float(0.001*i);
//ip[i] = float(0.01);
}
}catch(...){
printf("I don't know why!");
}
}
void sumMatrixOnHost(float *A,float *B, float *C, const int nx, const int ny){
for(int idy = 0; idy < ny; idy++)
for(int idx = 0; idx < nx; idx++){
C[idx+idy*nx] = A[idx+idy*nx] + B[idx+idy*nx];
}
}
__global__ void sumMatrixOnGPU2D(float *A, float *B, float *C, int NX, int NY) {
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int idx = iy * NX + ix;
if (ix < NX && iy < NY) {
C[idx] = A[idx] + B[idx];
}
}
/*****
//kernel function
__global__ void sumArrayOnGPU(float *A,float *B, float *C){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int idy = threadIdx.y + blockIdx.x * blockDim.y;
//printf("%d+%d*%d*%d = %d\n",idx,idy,blockDim.x,gridDim.x,idx+idy*blockDim.x*gridDim.x);
C[idx + idy * blockDim.x * gridDim.x] = A[idx + idy * blockDim.x * gridDim.x] + B[idx + idy * blockDim.x * gridDim.x];
//printf("%f\n",C[idx+idy*blockDim.x*gridDim.x]);
}
*****/
void printDeviceProp(const cudaDeviceProp &prop)
{
printf("Device Name : %s.\n", prop.name);
printf("totalGlobalMem : %ld.\n", prop.totalGlobalMem);
printf("sharedMemPerBlock : %ld.\n", prop.sharedMemPerBlock);
printf("regsPerBlock : %d.\n", prop.regsPerBlock);
printf("warpSize : %d.\n", prop.warpSize);
printf("memPitch : %ld.\n", prop.memPitch);
printf("maxThreadsPerBlock : %d.\n", prop.maxThreadsPerBlock);
printf("maxThreadsDim[0 - 2] : %d %d %d.\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("maxGridSize[0 - 2] : %d %d %d.\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("totalConstMem : %ld.\n", prop.totalConstMem);
printf("major.minor : %d.%d.\n", prop.major, prop.minor);
printf("clockRate : %d.\n", prop.clockRate);
printf("textureAlignment : %ld.\n", prop.textureAlignment);
printf("deviceOverlap : %d.\n", prop.deviceOverlap);
printf("multiProcessorCount : %d.\n", prop.multiProcessorCount);
printf("\n\n\n");
}
int main(int argc, char **argv){
printf("%s Starting...\n",argv[0]);
double istart,iElaps;
//get the cuda device count
int count;
cudaGetDeviceCount(&count);
if(count == 0){
fprintf(stderr, "There is no device.\n");
exit(1);
}
//find the device >= 1.x
int idxDevice = 0;
for(int idxDevice = 0; idxDevice < count; ++idxDevice){
cudaDeviceProp prop;
if(cudaGetDeviceProperties(&prop,idxDevice) == cudaSuccess)
if(prop.major >= 1){
printDeviceProp(prop);
break;
}
}
if(idxDevice == count){
fprintf(stderr,"there is no device supporting CUDA 1.x. \n");
}
CHECK(cudaSetDevice(idxDevice))
//set up data size of Matrix
int nx = 1<< 12;
int ny = 1<< 12;
int nxy = nx*ny;
size_t nBytes = nxy * sizeof(float);
printf("Matrix size: %d\n", nxy);
printf("Matrix volume: %zu\n",nBytes);
//malloc host memory
float *h_A,*h_B,*hostRef,*gpuRef;
istart = cpuSecond();
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
iElaps = cpuSecond() -istart;
printf("malloc host memory:%lfs\n",iElaps);
//initialize data at host side
istart = cpuSecond();
initiaData(h_A, nxy);
initiaData(h_B, nxy);
iElaps = cpuSecond() - istart;
memset(hostRef,0,nBytes);
memset(gpuRef,0,nBytes);
printf("initiaData spent time:%lfs\n",iElaps);
//add Matrix at host side for result checks
istart = cpuSecond();
sumMatrixOnHost(h_A,h_B, hostRef, nx, ny);
iElaps = cpuSecond() - istart;
printf("sumArrayOnHost spent time:%lfs\n",iElaps);
//malloc device global memory
float *d_A, *d_B, *d_C;
cudaMalloc((float **) &d_A,nBytes);
cudaMalloc((float **) &d_B,nBytes);
cudaMalloc((float **) &d_C,nBytes);
//transfer data from host to device
cudaMemcpy(d_A, h_A,nBytes,cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B,nBytes,cudaMemcpyHostToDevice);
//invoke kenel at host side
int dimx = 32;
int dimy = 1;
dim3 block(dimx,dimy);
dim3 grid((nx+block.x-1)/block.x, (ny+block.y-1)/block.y);
//dim3 grid(512,512);
istart = cpuSecond();
sumMatrixOnGPU2D<<<grid, block>>>(d_A,d_B,d_C,nx,ny);
cudaDeviceSynchronize();
iElaps = cpuSecond() - istart;
printf("sumMatrixOnGPU2D <<<(%d,%d), (%d,%d)>>> elapsed %lf sec\n", grid.x,grid.y, block.x, block.y, iElaps);
//copy kenel result back to host side
cudaMemcpy(gpuRef, d_C,nBytes,cudaMemcpyDeviceToHost);
//for(int i = ;i<nxy;i++){printf("%f",gpuRef[i])}
//check device results
checkResult(hostRef, gpuRef,nxy);
//free device global memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
//free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
//reset device
cudaDeviceReset();
return 0;
}
|
19,510 | // Device code
extern "C" __global__ void m3shell_memset_kernel(char *ptr, int sz, char val)
{
// Dummy kernel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (; idx < sz; idx += (gridDim.x * blockDim.x)) {
ptr[idx] = val;
}
}
|
19,511 | #include <stdio.h>
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
__global__ void
matmult_kernel1(int m, int n, int k, double *A, double *B, double *C){
// set C to zeros
for (int i=0;i<m;i++){
for (int p=0;p<n;p++){
C[i*n+p]=0; //C[i][p]
}
}
// do matmult with mkn loop order
for (int i=0;i<m;i++) {
for (int j=0;j<k;j++){
for (int p=0;p<n;p++) {
C[i*n+p] += A[i*k+j] * B[j*n+p];
}
}
}
}
__global__ void
matmult_kernel2(int m, int n, int k, double *A, double *B, double *C){
int j = blockIdx.x*blockDim.x+threadIdx.x; //looping through n
int i = blockIdx.y*blockDim.y+threadIdx.y; //looping through m
double tmp;
if((j<n)&&(i<m)){
for(int p=0; p<k; p++){
//read row of A and col of B
//row of A is A[mit*k+kit]
//col of B is B[kit*n+nit]
tmp += A[i*k+p] * B[p*n+j];
}
//C is C[mit*n+nit]
C[i*n+j] = tmp;
}
}
__global__ void
matmult_kernel3(int m, int n, int k, double *A, double *B, double *C){
//compute C(i,j) and C(i,j+1)
int j = 2*(blockIdx.x*blockDim.x+threadIdx.x); //looping through n (only half as many threads/blocks)
int i = blockIdx.y*blockDim.y+threadIdx.y; //looping through m
double tmp1 = 0.0;
double tmp2 = 0.0;
if((j<n)&&(i<m)){
//additional j to compute (here, either 1 or 0)
int j_add = MIN(1,n-1-j);
for(int p=0; p<k; p++){
//row of A and col of B
tmp1 += A[i*k+p] * B[p*n+j];
if(j_add == 1)
tmp2 += A[i*k+p] * B[p*n+j+1];
}
C[i*n+j] = tmp1;
if(j_add == 1)
C[i*n+j+1] = tmp2;
}
}
__global__ void
matmult_kernel4(int m, int n, int k, double *A, double *B, double *C, int s){
//compute C(i,j), C(i,j+1), ... C(i,j+s)
int j = s*(blockIdx.x*blockDim.x+threadIdx.x); //looping through n (only 1/s as many threads/blocks)
int i = blockIdx.y*blockDim.y+threadIdx.y; //looping through m
double tmp1 = 0.0;
double tmp2 = 0.0;
double tmp3 = 0.0;
double tmp4 = 0.0;
double tmp5 = 0.0;
double tmp6 = 0.0;
double tmp7 = 0.0;
double tmp8 = 0.0;
double tmp9 = 0.0;
double tmp10 = 0.0;
double tmp11 = 0.0;
double tmp12 = 0.0;
double tmp13 = 0.0;
double tmp14 = 0.0;
double tmp15 = 0.0;
double tmp16 = 0.0;
if((j<n)&&(i<m)){
//additional j to compute (here, from 0 to s-1)
int j_add = MIN(s-1,n-1-j);
for(int p=0; p<k; p++){
//row of A and col of B
tmp1 += A[i*k+p] * B[p*n+j+0];
if(j_add > 0)
tmp2 += A[i*k+p] * B[p*n+j+1];
if(j_add > 1)
tmp3 += A[i*k+p] * B[p*n+j+2];
if(j_add > 2)
tmp4 += A[i*k+p] * B[p*n+j+3];
if(j_add > 3)
tmp5 += A[i*k+p] * B[p*n+j+4];
if(j_add > 4)
tmp6 += A[i*k+p] * B[p*n+j+5];
if(j_add > 5)
tmp7 += A[i*k+p] * B[p*n+j+6];
if(j_add > 6)
tmp8 += A[i*k+p] * B[p*n+j+7];
if(j_add > 7)
tmp9 += A[i*k+p] * B[p*n+j+8];
if(j_add > 8)
tmp10 += A[i*k+p] * B[p*n+j+9];
if(j_add > 9)
tmp11 += A[i*k+p] * B[p*n+j+10];
if(j_add > 10)
tmp12 += A[i*k+p] * B[p*n+j+11];
if(j_add > 11)
tmp13 += A[i*k+p] * B[p*n+j+12];
if(j_add > 12)
tmp14 += A[i*k+p] * B[p*n+j+13];
if(j_add > 13)
tmp15 += A[i*k+p] * B[p*n+j+14];
if(j_add > 14)
tmp16 += A[i*k+p] * B[p*n+j+15];
}
C[i*n+j] = tmp1;
if(j_add > 0)
C[i*n+j+1] = tmp2;
if(j_add > 1)
C[i*n+j+2] = tmp3;
if(j_add > 2)
C[i*n+j+3] = tmp4;
if(j_add > 3)
C[i*n+j+4] = tmp5;
if(j_add > 4)
C[i*n+j+5] = tmp6;
if(j_add > 5)
C[i*n+j+6] = tmp7;
if(j_add > 6)
C[i*n+j+7] = tmp8;
if(j_add > 7)
C[i*n+j+8] = tmp9;
if(j_add > 8)
C[i*n+j+9] = tmp10;
if(j_add > 9)
C[i*n+j+10] = tmp11;
if(j_add > 10)
C[i*n+j+11] = tmp12;
if(j_add > 11)
C[i*n+j+12] = tmp13;
if(j_add > 12)
C[i*n+j+13] = tmp14;
if(j_add > 13)
C[i*n+j+14] = tmp15;
if(j_add > 14)
C[i*n+j+15] = tmp16;
}
}
/*
BEGIN GPU 5
##############################################################################
*/
#define BLOCK_SIZE 16
typedef struct {
int width;
int height;
int stride;
double* elements;
} Matrix;
// Get a matrix element
__device__ double GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
double value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication kernel called by MatMul()
__global__ void gpu5_kernel(const Matrix A, const Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csub
// by accumulating results into Cvalue
double Cvalue = 0;
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ double As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
for (int e = 0; e < BLOCK_SIZE; ++e)
Cvalue += As[row][e] * Bs[e][col];
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
SetElement(Csub, row, col, Cvalue);
} |
19,512 | #include "includes.h"
extern "C"
__global__ void add(int n, float *a, float *sum)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i<n)
{
for (int j = 0; j < n; j++)
{
sum[i] = sum[i] + a[i*n + j];
}
}
} |
19,513 | // Program corresponding to CythonBM.cu that can be run directly from the command lin. For testing purposes.
//Attempt to Parallelize function for crossing time. Slower than other methods.
//#include <cmath>
#include <curand_kernel.h>
#include <stdio.h>
#include <cuda.h>
// Error handling code used in Nvidia example found here: https://docs.nvidia.com/cuda/curand/host-api-overview.html#generator-options
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
//Function to generate brownian path, which is stored in results. Executes on the GPU, hence the __global__ identifier
__global__ void randomWalk(double *results, int *crossTimes, int T, int N, int numSims) {
int crossTimeIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (crossTimeIndex < numSims) {
curandState_t state;
curand_init (1234, 0, 0, &state);
double random;
int start = (threadIdx.x + blockIdx.x * blockDim.x) * N;
crossTimes[crossTimeIndex] = 0;
results[start] = 0.0;
for (int j = start + 1; j < start + N; j++) {
random = curand_normal_double(&state);
results[j] = results[j-1] + random * sqrt((double) T / N);
}
}
/*
Generate 2 doubles at once. Test later to see if this is more efficient:
double curand_normal2_double (state);
*/
}
__global__ void getCrossingTimes(double *results, int *crossTimes, int N, int numSims, int lowerThreshold, int upperThreshold) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < N * numSims) {
if (crossTimes[tid/N] == 0) {
if (results[tid] <= lowerThreshold) {
crossTimes[tid/N] = tid % N;
}
else if (results[tid] >= upperThreshold) {
crossTimes[tid/N] = tid % N;
}
}
tid += blockDim.x + gridDim.x;
}
}
int main() {
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//Arrays to store the brownian path, one for the host and one for the device
int N = 99000;
int T = 1;
int numSims = 100000;
int numBlocks = (127 + numSims) / numSims;
int numThreads = 128;
double lowerThreshold = -1;
double upperThreshold = 1;
double *results = new double[N * numSims];
double *dev_results;
int *crossTimes = new int[numSims];
int *dev_crossTimes;
int numBlocks2 = (511 + N * numSims) / 512;
// Allocate space for results array on device
CUDA_CALL(cudaMalloc(&dev_results, N * numSims * sizeof(double)));
CUDA_CALL(cudaMalloc(&dev_crossTimes, numSims * sizeof(int)));
//Call GPU function, with ony one block and one thread
randomWalk<<<numBlocks, numThreads>>>(dev_results, dev_crossTimes, T, N, numSims);
//copy results array from device to host
CUDA_CALL(cudaMemcpy(results, dev_results , N * numSims * sizeof(double), cudaMemcpyDeviceToHost));
getCrossingTimes<<<numBlocks2,512>>>(dev_results, dev_crossTimes, N, numSims, lowerThreshold, upperThreshold);
CUDA_CALL(cudaMemcpy(crossTimes, dev_crossTimes, numSims * sizeof(int), cudaMemcpyDeviceToHost));
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Time to generate: %3.1f ms/n", elapsedTime);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("\n");
/*
// print out path
for (int i=0; i < (N * numSims); i++) {
printf("%f ", results[i]);
}
printf("\n");
printf("\n");
printf("\n");
// print out cross times
for (int i=0; i < numSims; i++) {
printf("%d ", crossTimes[i]);
}
printf("\n");
*/
//clean up
CUDA_CALL(cudaFree(dev_results));
return 0;
}
|
19,514 |
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
__global__ void kernelMatrixMul( float* a, float*b, float*c, int n )
{
int ii = blockIdx.x * blockDim.x + threadIdx.x;
if( ii >= n*n )
return;
int i= ii / n ;
int j= ii %n;
for(int k=0;k<n;k++)
c[ i*n +j] += a[ i*n + k] * b[ k*n +j] ;
}
extern "C" void matrixMulGPU( float* a, float*b, float*c, int n )
{
float *aDev,*bDev,*cDev;
cudaMalloc( (void**)&aDev, n*n*sizeof(float) );
cudaMalloc( (void**)&bDev, n*n*sizeof(float) );
cudaMalloc( (void**)&cDev, n*n*sizeof(float) );
cudaMemcpy( aDev, a, n*n*sizeof(float), cudaMemcpyHostToDevice );
cudaMemcpy( bDev, b, n*n*sizeof(float), cudaMemcpyHostToDevice );
int nBlock = 256;
int nGrid = (n*n + nBlock-1)/nBlock;
kernelMatrixMul<<< nGrid, nBlock >>>( aDev, bDev, cDev, n );
cudaError_t err = cudaGetLastError();
if( err != cudaSuccess )
cout << "error" << endl;
cudaMemcpy( c, cDev, n*n*sizeof(float), cudaMemcpyDeviceToHost );
} |
19,515 | #include "includes.h"
using namespace std;
__global__ void add(int a, int b, int *c)//kernel函数,在gpu上运行。
{
*c = a + b;
} |
19,516 | #include <cmath>
#include <iostream>
#include <vector>
int main()
{
size_t n = 50000000;
std::vector<double> a(n);
std::vector<double> b(n);
for (size_t i = 0; i < n; i++) {
a[i] = sin(i) * sin(i);
b[i] = cos(i) * cos(i);
}
std::vector<double> c(n);
for (size_t i = 0; i < n; i++) {
c[i] = a[i] + b[i];
}
double sum = 0;
for (size_t i = 0; i < n; i++) {
sum += c[i];
}
std::cout << "final result " << (sum / n) << std::endl;
return 0;
}
|
19,517 | // Jin Pyo Jeon
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
// N Stream Non-Stream
// 3 * 2^15 0.11 0.11
// 3 * 2^10*700 0.15 0.15
// 3 * 2^20 0.22 0.22
// 3 * 2^24 3.45 3.46
// 3 * 2^25 6.89 6.90
#define N (3 * 1024 * 700)
#define T (N / 3 / 1024)
#define gpuErrCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line) {
if (code != cudaSuccess) {
fprintf(stderr, "GPUAssert: %s %s %d\n", cudaGetErrorString(code), file, line);
}
}
__global__ void VecAddKernel(int* a, int* b, int* c, long width){
int x = blockDim.x * blockIdx.x + threadIdx.x;
c[x] = a[x] + b[x];
}
void random_ints(int * arr, size_t size){
int i = 0;
for (i = 0; i < size; i++) {
arr[i] = rand() % 2;
}
}
void printVec(int* arr, size_t size) {
for (int i = 0; i < size; i++) {
printf("%d ", arr[i]);
}
printf("\n");
}
int main(int argc, char**argv) {
srand(time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int *h_A, *h_B, *h_C;
long size = N * sizeof(int);
cudaStream_t stream0, stream1, stream2;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
int *d_A0, *d_B0, *d_C0;
int *d_A1, *d_B1, *d_C1;
int *d_A2, *d_B2, *d_C2;
int SegSize = N / 3;
int SegSizeByte = sizeof(int) * SegSize;
cudaMalloc((void**)&d_A0, SegSizeByte);
cudaMalloc((void**)&d_B0, SegSizeByte);
cudaMalloc((void**)&d_C0, SegSizeByte);
cudaMalloc((void**)&d_A1, SegSizeByte);
cudaMalloc((void**)&d_B1, SegSizeByte);
cudaMalloc((void**)&d_C1, SegSizeByte);
cudaMalloc((void**)&d_A2, SegSizeByte);
cudaMalloc((void**)&d_B2, SegSizeByte);
cudaMalloc((void**)&d_C2, SegSizeByte);
//cudaMalloc((void**)&d_a, size);
//cudaMalloc((void**)&d_b, size);
//cudaMalloc((void**)&d_c, size);
assert(SegSize % T == 0);
assert(N % 3 == 0);
h_A = (int *)malloc(size);
h_B = (int *)malloc(size);
h_C = (int *)malloc(size);
random_ints(h_A, N);
random_ints(h_B, N);
for (int i = 0; i < N; i += SegSize * 3) {
cudaMemcpyAsync(d_A0, h_A+i, SegSizeByte, cudaMemcpyHostToDevice, stream0);
cudaMemcpyAsync(d_B0, h_B+i, SegSizeByte, cudaMemcpyHostToDevice, stream0);
if (i > 0) {
cudaMemcpyAsync(h_C+i-SegSize, d_C2, SegSizeByte, cudaMemcpyDeviceToHost, stream2);
}
VecAddKernel<<<SegSize/T, T, 0, stream0>>>(d_A0, d_B0, d_C0, N);
cudaMemcpyAsync(d_A1, h_A+i+SegSize, SegSizeByte,cudaMemcpyHostToDevice,stream1);
cudaMemcpyAsync(d_B1, h_B+i+SegSize, SegSizeByte,cudaMemcpyHostToDevice,stream1);
//
cudaMemcpyAsync(h_C+i, d_C0, SegSizeByte, cudaMemcpyDeviceToHost, stream0);
VecAddKernel<<<SegSize/T, T, 0, stream1>>>(d_A1, d_B1, d_C1, N);
cudaMemcpyAsync(d_A2, h_A+i+(2 * SegSize), SegSizeByte, cudaMemcpyHostToDevice,stream2);
cudaMemcpyAsync(d_B2, h_B+i+(2 * SegSize), SegSizeByte, cudaMemcpyHostToDevice,stream2);
//
cudaMemcpyAsync(h_C+i+SegSize, d_C1, SegSizeByte, cudaMemcpyDeviceToHost,stream1);
VecAddKernel<<<SegSize/T, T, 0, stream2>>>(d_A2, d_B2, d_C2, N);
}
cudaMemcpyAsync(h_C+(N-SegSize), d_C2, SegSizeByte, cudaMemcpyDeviceToHost, stream2);
gpuErrCheck( cudaDeviceSynchronize() );
//printVec(h_A, N);
//printVec(h_B, N);
//printVec(h_C, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("The elapsed time is %fs\n", elapsedTime / 1000.0);
free(h_A); free(h_B); free(h_C);
return 0;
}
|
19,518 | #include "includes.h"
extern "C"
{
}
__global__ void A_emult_Bg0(const int n, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
if (b[i]>0.0)
{c[i] += a[i];}
else
{c[i] += 0.0;}
}
} |
19,519 | #include <iostream>
#include <chrono>
#include <ctime>
#include <stdio.h>
#include <math.h>
#include <assert.h>
__global__ void helloFromGPU() {
printf("Hello from GPU!\n");
}
int main() {
std::chrono::time_point<std::chrono::system_clock> start, end;
start = std::chrono::system_clock::now();
// cuda function here
helloFromGPU<<<1,10>>>();
end = std::chrono::system_clock::now();
std::chrono::duration<double> elapsed_sec = end - start;
printf("%.2f seconds\n", elapsed_sec);
// cleans up device resources
cudaDeviceReset();
}
|
19,520 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
int BlockDim()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
return prop.maxThreadsPerBlock;
}
int GridDim()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
return prop.maxGridSize[0];
}
int major()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
return prop.major;
}
int minor()
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,0);
return prop.minor;
} |
19,521 | #include <math.h>
#include <stdio.h>
#define N 200
__global__ void reverse(int *a, int *b) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
b[gridDim.x - idx - 1] = a[idx];
}
void random_ints(int *p, int n) {
int i;
for (i = 0; i < n; i++) {
p[i] = rand() % 100;
}
}
int main(void) {
int *a, *b; // host copies of a, b, c
int *dev_a, *dev_b; // device copies of a, b, c
int size = N * sizeof(int); // we need space for 512
// // integers
int i;
// allocate device copies of a, b, c
cudaMalloc((void **)&dev_a, size);
cudaMalloc((void **)&dev_b, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
random_ints(a, N);
// copy inputs to device
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
// launch an add() kernel with N threads
reverse<<<N, 1>>>(dev_a, dev_b);
// copy device result back to host copy of c
cudaMemcpy(b, dev_b, size, cudaMemcpyDeviceToHost);
for (i = 0; i < N; i++) {
if (b[i] != a[N - 1 - i]) {
printf("Uncorrect\n");
break;
}
}
printf("Correct\n");
free(a);
free(b);
cudaFree(dev_a);
cudaFree(dev_b);
return 0;
}
|
19,522 | __global__ void expit_kernel(float *d_a, float *d_aout, int size) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= size) {
return;
}
const float x = d_a[id];
float tmp;
if (x < 0) {
tmp = expf(x);
d_aout[id] = tmp / (1.0 + tmp);
} else {
d_aout[id] = 1.0 / (1.0 + expf(-x));
}
}
__global__ void expit_fast_kernel(float *d_a, float *d_aout, int size) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= size) {
return;
}
const float x = d_a[id];
float tmp;
if (x <= -6) {
d_aout[id] = 0;
} else if (x >= 6) {
d_aout[id] = 1;
} else if (x < 0) {
tmp = __expf(x);
d_aout[id] = tmp / (1.0 + tmp);
} else {
d_aout[id] = 1.0 / (1.0 + __expf(-x));
}
}
__global__ void expit_back_kernel(float *d_a, float *d_err, float *d_out, int size) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= size) {
return;
}
const float x = d_a[id];
d_out[id] = x * (1-x) * d_err[id];
}
__global__ void exp_fast_kernel(float *d_a, float *d_aout, int size) {
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= size) {
return;
}
const float x = d_a[id];
if (x <= -87) {
d_aout[id] = 1.6458115E-38;
} else if (x >= 87) {
d_aout[id] = 6.0760303E+37;
} else {
d_aout[id] = __expf(x);
}
}
|
19,523 | #include "../Headers/Includes.cuh"
/////////////// Importing the Setup Paramaters ///////////////
void InputSetup( string &NAME, string &OUTPUTMOD, unsigned &IT, float &x_start, float &x_end, float &y_start,
float &y_end, float &z_start, float &z_end, unsigned &XDIVI, unsigned &YDIVI,
unsigned &ZDIVI, unsigned &ITHREADSPB, unsigned &IBLOCKS, unsigned &CONES,
unsigned &CTHREADSPB, unsigned &CBLOCKS, unsigned &TOTALIT, unsigned &SAVEEVERY,
float &CUTOFF, float &UE, bool &CORE_OUT, bool &MATT_OUT ){
cout << "\nReading Setup File:\n";
fstream file ( "Setup.txt" );
string line;
vector<float> all_numbers;
while ( getline(file, line) ) {
char start = line[0];
string entry;
stringstream sep(line);
string cell;
while ( getline ( sep, cell, '=') ) {
entry = cell.c_str();
}
// The input file name
if ( start == '1' ) NAME = entry.substr(1);
// The output file modifier
else if ( start == '2' ){
OUTPUTMOD = entry.substr(1);
if ( OUTPUTMOD[0] == ')' || OUTPUTMOD.length() == 0 ) OUTPUTMOD = "";
else OUTPUTMOD = "_" + OUTPUTMOD;
}
else if ( start == '3' ){
string check = entry.substr(1);
if ( check[0] == ')' || check.length() == 0 ) IT = 0;
else IT = atoi( check.c_str() );
}
// The setup numbers
else if ( start != ' ' ) {
stringstream sep(entry);
string piece;
while ( getline( sep, piece, ',' ) ) {
all_numbers.push_back( atof( piece.c_str() ) );
}
}
}
x_start = all_numbers[0];
x_end = all_numbers[1];
y_start = all_numbers[2];
y_end = all_numbers[3];
z_start = all_numbers[4];
z_end = all_numbers[5];
XDIVI = all_numbers[6];
YDIVI = all_numbers[7];
ZDIVI = all_numbers[8];
ITHREADSPB = all_numbers[9];
IBLOCKS = all_numbers[10];
CONES = all_numbers[11];
CTHREADSPB = all_numbers[12];
CBLOCKS = all_numbers[13];
TOTALIT = all_numbers[14];
SAVEEVERY = all_numbers[15];
CUTOFF = all_numbers[16];
UE = all_numbers[17];
CORE_OUT = all_numbers[18];
MATT_OUT = all_numbers[19];
cout << " -- Done\n";
}
/////////////// Debuggers ///////////////
int InputDebugger( unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI, unsigned ITHREADSPB, unsigned IBLOCKS,
unsigned CONES, unsigned CTHREADSPB, unsigned CBLOCKS, bool CORE_OUT, bool MATT_OUT, unsigned IT, string output, string input ){
int errors = 0;
int thread_check = XDIVI*YDIVI*ZDIVI - ITHREADSPB*IBLOCKS;
if ( thread_check != 0 ){
cout << "\nWhoops!!! Number of VOXELS does not equal the number of called threads!!!\n";
errors++;
}
thread_check = CONES - CTHREADSPB*CBLOCKS;
if ( thread_check != 0 ){
cout << "\nWhoops!!! Number of CONES does not equal the number of called threads!!!\n";
errors++;
}
if ( thread_check != 0 ){
cout << "\nWhoops!!! Number of CONES does not equal the number of called threads!!!\n";
errors++;
}
if ( CORE_OUT == 0 && MATT_OUT == 0){
cout << "\nWhoops!!! You aren't saving any data!!! Change this an run again!!!\n";
errors++;
}
ifstream infile ( input );
if ( !infile.good() ){
cout << "\nWhoops!!! Input data-file not found in Input/Filtered. Please try again!!!\n";
errors++;
}
infile.close();
if ( IT != 0 ){
ifstream file ( output + to_string(IT) + string(".csv") );
if ( !file.good() ) {
cout << "\nCant locate: " << output + to_string(IT) + string(".csv\n");
cout << "\nWhoops!!! You are trying to load previously iterated file that doesnt exist in the Output folder!!!\n";
errors++;
}
file.close();
}
if ( errors != 0 ) {
cout << "\nProgram Aborted :(\n\n";
}
return errors;
}
int MemDebugger( unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI ){
size_t free_mem;
size_t total_mem;
cudaMemGetInfo ( &free_mem, &total_mem );
size_t used_mem = total_mem - free_mem;
size_t matrix_mem = XDIVI * YDIVI * ZDIVI * sizeof(float);
if( used_mem < matrix_mem ){
cout << "\nWhoops!!! GPU ran out of memory. Reduce the number of VOXELS or CONES\n";
cout << "\nProgram Aborted :(\n";
return 1;
}
float percent_mem_used = 100*(float)used_mem / total_mem;
printf (" -- Percentage of GPU memory used = %2.2f%% \n", percent_mem_used);
return 0;
}
/////////////// Time-Printout ///////////////
void Print_Time_Remaining( float clock_start , float clock_end , unsigned IT, unsigned TOTALIT ){
unsigned total_time = (int)( (TOTALIT-IT)*(clock_end - clock_start)/(CLOCKS_PER_SEC) );
unsigned minutes = total_time/60;
unsigned seconds = total_time%60;
if ( minutes > 0 ) printf( " -- %4u -- Time Remaining = %4u minutes and %4u seconds \r" , IT , minutes , seconds );
else printf( " -- %4u -- Time Remaining = %4u seconds \r" , IT , seconds );
cout.flush();
}
void Print_Time_Complete( float clock_start , float clock_end , bool fin = 0 ){
float total_time = (clock_end - clock_start)/(CLOCKS_PER_SEC);
unsigned minutes = total_time/60;
float seconds = total_time - minutes*60;
string pref = " -- Time Taken =";
string post = " \n -- Done\n";
if ( fin == 1 ){
pref = "\nReconstruction Complete \nTotal Runtime =";
post = " \n\n";
}
if ( minutes > 0 ) printf( "%s %4u minutes and %4.2f seconds %s" , pref.c_str() , minutes , seconds , post.c_str() );
else printf( "%s %4.2f seconds %s" , pref.c_str() , seconds , post.c_str() );
}
/////////////// A Couple of Vector Functions ///////////////
vector<float> ScalVec(float c, vector<float> x){ // Just a simple scalar x vector function
unsigned len = x.size();
vector<float> z(len);
for (unsigned i = 0; i < len; i++) {
z[i] = c * x[i];
}
return z;
}
vector<float> unit_vector(vector<float> start, vector<float> stop){ // Gives the unit vector which points between two locations
unsigned len = start.size();
float magsq = 0;
vector<float> vec(len);
for (unsigned i = 0; i < len; i++) {
vec[i] = stop[i] - start[i];
magsq += vec[i] * vec[i];
}
return ScalVec( 1.0/sqrt(magsq) , vec );
}
/////////////// Listing functions ///////////////
void DefinePositions( vector<vector<vector<vector<float> > > > &position_matrix, float *f, string output, unsigned IT, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI,
float x_start, float y_start, float z_start, float delx, float dely, float delz){ // A function which gives the position values to the position matrix
cout << "Defining lattice Positions and Values:\n";
for (unsigned i = 0; i < XDIVI; i++) {
for (unsigned j = 0; j < YDIVI; j++) {
for (unsigned k = 0; k < ZDIVI; k++) {
position_matrix[i][j][k] = { x_start + delx * ( (float)(i) + 0.5f) , y_start + dely * ( (float)(j) + 0.5f) , z_start + delz * ( (float)(k) + 0.5f) };
f[ i + j * XDIVI + k * XDIVI * YDIVI ] = 0.0; // Initial guess for f is one everywhere
}
}
}
if (IT != 0){
ifstream file ( output + to_string(IT) + string(".csv") );
vector<float> linedata;
string line;
unsigned i, j, k;
float load_f;
while( getline ( file, line, '\n')){
stringstream sep(line);
string cell;
while (getline ( sep, cell, ',')) {
linedata.push_back( atof(cell.c_str()) );
}
load_f = linedata[0];
i = linedata[4];
j = linedata[5];
k = linedata[6];
f[ i + j * XDIVI + k * XDIVI * YDIVI ] = load_f;
linedata.clear();
}
}
cout << " -- Done\n";
}
float UncertaintyDoubleScatter( float E1, float E2, float UE ){
float MeCsq = 0.5109989461;
return sqrt( fabs( ( (pow(E1,4) + 4 * pow(E1,3) * E2 + 4 * pow(E1,2) * pow(E2,2) + pow(E2,2)) * MeCsq * pow(UE,2)) / ( E1 * pow(E2,2) * pow(E1+E2,2) * ( 2 * E2 * (E1+E2) - E1 * MeCsq ) ) ) );
}
float ScatteringAngle( float E1, float E2 ){
float MeCsq = 0.5109989461;
float value = 1.0 + MeCsq * ( 1.0/(E1+E2) - 1.0/(E2) );
if ( fabs(value) < 1 ) {
return acos( value );
}
return 0;
}
float KleinNishina( float E1, float E2 ){
float E0 = E1 + E2;
return ( E2/E0 + E0/E2 );
}
void CreateCones( float* conelist_1D , string input, unsigned &CONES, float UE ){ // Creating the list of cones by importing from DATAFILE
cout << "Importing Input Data to Cone-Matrix:\n";
ifstream file ( input );
vector<float> linedata;
string line;
float E1; // The energy deposited at the first scattering location
float E2; // The energy deposited at the second scattering location
float theta; // The scattering angle
float utheta; // The uncertainty on the scattering angle
float KN; // The first part of the Klein Nishina coefficiant
unsigned errors = 0;
for (unsigned i = 0; i < CONES; i++) {
getline ( file, line, '\n');
stringstream sep(line);
string cell;
while (getline ( sep, cell, ',')) {
linedata.push_back( atof(cell.c_str()) );
}
E1 = linedata[0];
E2 = linedata[4];
theta = ScatteringAngle( E1, E2 );
utheta = UncertaintyDoubleScatter( E1, E2, UE );
KN = KleinNishina( E1, E2 );
if ( theta == 0 ){
errors++;
KN = 0; // This effectively kills all weighting of an unphysical cone
}
vector<float> axis = unit_vector ( { linedata[5] , linedata[6] , linedata[7] } , { linedata[1] , linedata[2] , linedata[3] } );
//The axis is the unit vector which points in the direction from the second scatter to the first
conelist_1D [ 0 + i * 9 ] = linedata[1]; // First scattering location
conelist_1D [ 1 + i * 9 ] = linedata[2];
conelist_1D [ 2 + i * 9 ] = linedata[3];
conelist_1D [ 3 + i * 9 ] = axis[0]; // Axis of the cone
conelist_1D [ 4 + i * 9 ] = axis[1];
conelist_1D [ 5 + i * 9 ] = axis[2];
conelist_1D [ 6 + i * 9 ] = theta;
conelist_1D [ 7 + i * 9 ] = utheta;
conelist_1D [ 8 + i * 9 ] = KN;
linedata.clear();
}
file.close();
if ( errors ) {
cout << " -- Warning: " << errors << " cones were found to be be unphysical\n";
}
cout << " -- Done\n";
}
/////////////// Printing the data ///////////////
void StoreF_MATT( float *f, unsigned IT, string output, vector<vector<vector<vector<float> > > > position_matrix, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI ){ // How we store the final f values, only need the non-zero voxels
ofstream outfile;
string name = string(output) + to_string(IT) + string(".csv");
outfile.open ( name );
outfile.precision(7);
for (unsigned i = 0; i < XDIVI; i++) {
for (unsigned j = 0; j < YDIVI; j++) {
for (unsigned k = 0; k < ZDIVI; k++) {
if (f[i + j*XDIVI + k *XDIVI*YDIVI]!=0){
outfile << f[i + j*XDIVI + k *XDIVI*YDIVI] << ","
<< position_matrix[i][j][k][0]<< "," << position_matrix[i][j][k][1] << "," << position_matrix[i][j][k][2] << ','
<< i << ',' << j << ',' << k << '\n';
}
}
}
}
outfile.close();
}
void StoreF_CORE( float *f, unsigned IT, string output, unsigned XDIVI, unsigned YDIVI, unsigned ZDIVI,
float x_start, float y_start, float z_start, float delx, float dely, float delz ){
ofstream outfile;
string name = string(output) + to_string(IT) + string(".dat");
outfile.open ( name );
outfile.precision(7);
outfile << XDIVI << ' ' << YDIVI << ' ' << ZDIVI << '\n';
for (unsigned i = 0; i < XDIVI + 1; i++) {
outfile << x_start + i*(delx) << ' ';
}
outfile << '\n';
for (unsigned j = 0; j < YDIVI + 1; j++) {
outfile << y_start + j*(dely) << ' ';
}
outfile << '\n';
for (unsigned k = 0; k < ZDIVI + 1; k++) {
outfile << z_start + k*(delz) << ' ';
}
outfile << '\n';
for (unsigned k = 0; k < ZDIVI; k++) {
for (unsigned j = 0; j < YDIVI; j++) {
for (unsigned i = 0; i < XDIVI; i++) {
outfile << f[i + j*XDIVI + k *XDIVI*YDIVI] << " ";
}
}
outfile << '\n';
}
outfile.close();
}
//
|
19,524 | extern "C"
__global__ void backwardExponentiationKernel (int length, float *forwardResults, float *chain, float *backwardResults)
{
int globalId = blockDim.x * blockIdx.x + threadIdx.x;
if(globalId < length) {
backwardResults[globalId] = chain[globalId] * forwardResults[globalId];
}
} |
19,525 | #include <stdio.h>
#include <stdlib.h>
#include <ctime>
#include <chrono>
#include <curand.h>
#include <curand_kernel.h>
#include <iostream>
using namespace std;
__device__ unsigned int reduce_sum(unsigned int in)
{
extern __shared__ unsigned int sdata[];
// Perform first level of reduction:
// - Write to shared memory
unsigned int ltid = threadIdx.x;
sdata[ltid] = in;
__syncthreads();
// Do reduction in shared mem
for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1)
{
if (ltid < s)
{
sdata[ltid] += sdata[ltid + s];
}
__syncthreads();
}
return sdata[0];
}
__global__ void mykernel(int vectorsize, int *count, double *rands)
{
int id = blockIdx.x *blockDim.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
const double *rand1 = rands + id;
const double *rand2 = rand1 + vectorsize;
int tempcount = 0;
for (int i = 0; i < vectorsize; i += step, rand1 +=step, rand2 += step)
{
double x = *rand1;
double y = *rand2;
if(((x*x)+(y*y)) < 1 )
tempcount++;
}
tempcount = reduce_sum(tempcount);
if (threadIdx.x == 0)
{
count[blockIdx.x] = tempcount;
}
}
double * createrands(double vectorsize)
{
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_MTGP32);
double *rands = 0;
cudaMalloc((void **)&rands, 2* vectorsize * sizeof(double));
curandSetPseudoRandomGeneratorSeed(prng, 1337);
curandGenerateUniformDouble(prng, (double *)rands, 2 * vectorsize);
curandDestroyGenerator(prng);
return rands;
}
int main(void)
{
auto t_start = std::chrono::high_resolution_clock::now();
//int numgpus = 2;
double vectorsize = 33553920;
// cin >> vectorsize;
int blocksize = 1024;
int gridsize = ceil(vectorsize/blocksize);
size_t sharedmemsize = blocksize * sizeof(int);
int *count1, *count2, *cuda_count1, *cuda_count2;
count1 = (int *)malloc (gridsize * sizeof(int));
count2 = (int *)malloc (gridsize * sizeof(int));
//1st gpu
cudaSetDevice(0);
double *rands1 = createrands(vectorsize);
cudaMalloc((void **)&cuda_count1, gridsize *sizeof(int));
mykernel <<<gridsize, blocksize, sharedmemsize>>>(vectorsize, cuda_count1, rands1);
//2nd gpu
cudaSetDevice(1);
double *rands2 = createrands(vectorsize);
cudaMalloc((void **)&cuda_count2, gridsize *sizeof(int));
mykernel <<<gridsize, blocksize, sharedmemsize>>>(vectorsize, cuda_count2, rands2);
//1st gpu
cudaSetDevice(0);
if (cudaMemcpy (count1, cuda_count1, gridsize *sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
printf("failed to cpy back 1\n");
cudaFree(cuda_count1);
//2nd gpu
cudaSetDevice(1);
if (cudaMemcpy (count2, cuda_count2, gridsize *sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess)
printf("failed to cpy back 2\n");
cudaFree(cuda_count2);
int totalcount = 0;
for (int i = 0; i < gridsize; i ++)
{
totalcount += count1[i];
}
for (int i = 0; i < gridsize; i ++)
{
totalcount += count2[i];
}
printf("count = %d\n", totalcount);
float ratio = totalcount / (2*vectorsize);
printf("pi = %.15f \n", (ratio * 4));
auto t_end = std::chrono::high_resolution_clock::now();
printf("duration: %f\n", (std::chrono::duration<double, std::milli>(t_end-t_start).count()/1000));
return 0;
}
|
19,526 | #include <stdint.h>
#define IPAD 0x36363636
#define OPAD 0x5c5c5c5c
#include "sha1.cuh"
__device__ void memxor (void * dest, const void * src,size_t n) {
int rest = n%4;
n = n/4;
const int * s = (int*)src;
int *d = (int*)dest;
const char * s2 = (char*)src+4*n;
char *d2 = (char*)dest+4*n;
for (; n > 0; n--)
*d++ ^= *s++;
for (; rest > 0; rest--)
*d2++ ^= *s2++;
}
__device__ void hmac_sha1 (const void * key, uint32_t keylen, const void *in, uint32_t inlen, void *resbuf, struct globalChars *chars) {
struct sha1_ctx inner;
struct sha1_ctx outer;
sha1_init_ctx (&inner);
cudaMemsetDevice (chars->block, IPAD, sizeof (chars->block));
memxor(chars->block, key, keylen);
sha1_process_block (chars->block, 64, &inner);
sha1_process_bytes (in, inlen, &inner);
sha1_finish_ctx (&inner, chars->innerhash);
/* Compute result from KEY and INNERHASH. */
sha1_init_ctx (&outer);
cudaMemsetDevice (chars->block, OPAD, sizeof (chars->block));
memxor(chars->block, key, keylen);
sha1_process_block (chars->block, 64, &outer);
sha1_process_bytes (chars->innerhash, 20, &outer);
sha1_finish_ctx (&outer, resbuf);
}
|
19,527 | #include "includes.h"
// CUDA runtime
// Helper functions and utilities to work with CUDA
#define N 256
//#define M 256
//__global__ÉùÃ÷µÄº¯Êý£¬¸æËß±àÒëÆ÷Õâ¶Î´úÂë½»ÓÉCPUµ÷Óã¬ÓÉGPUÖ´ÐÐ
__global__ void matrix_mult(float *dev_a, float* dev_b, float* dev_c, int Width)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
for (int k = 0; k < Width; k++)
{
Pvalue += dev_a[Row*Width + k] * dev_b[k*Width+Col];
}
dev_c[Row*Width + Col] = Pvalue;
}
} |
19,528 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Fall 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void exclusive_prefix_sum_gpu(int * oldSum, int * newSum, int distance, int numElements) {
/** YOUR CODE GOES BELOW **/
int threadNum = blockDim.x * gridDim.x;
int threadId = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
for (int i = threadId; i < numElements; i += threadNum) {
if (distance == 0) {
newSum[0] = 0;
if (i < numElements - 1) {
newSum[i + 1] = oldSum[i];
}
} else {
if (i >= distance) {
newSum[i] = oldSum[i] + oldSum[i - distance];
}
else {
newSum[i] = oldSum[i];
}
}
}
/** YOUR CODE GOES ABOVE **/
}
|
19,529 | #include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <thrust/generate.h>
#include <thrust/random.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <cmath>
const double niter = 10000;
struct montecarlo :
public thrust::unary_function<unsigned int, double>
{
__host__ __device__
double operator()(unsigned int thread_id)
{
unsigned int seed = 123^thread_id;
double x, y, z, sum = 0.0;
thrust::default_random_engine rng(seed);
thrust::uniform_real_distribution<double> u01(0, 1);
for (int i = 0; i < niter; ++i)
{
x = u01(rng);
y = u01(rng);
z = (x * x) + (y * y);
if (z <= 1)
sum += 1;
}
return sum;
}
};
int main(int argc, char* argv[])
{
double pi;
double count = 0.0;
count = thrust::transform_reduce(thrust::counting_iterator<double>(0),
thrust::counting_iterator<double>(niter),
montecarlo(),
0.0,
thrust::plus<double>());
pi = (4.0 * count) / (niter * niter);
std::cout << "Pi: " << pi << std::endl;
}
|
19,530 | #include<iostream>
#include<cuda.h>
#include<math.h>
#include <time.h>
/* 1- nvcc acopladas_B3-2.cu -o acopladas_B3-2
2-./acopladas_B3-2
We are using Dormand-Prince Method based on http://depa.fquim.unam.mx/amyd/archivero/DormandPrince_19856.pdf
*/
using namespace std;
__global__ void suma(int *a,int *b,int *c)
{
int id=threadIdx.x;
c[id]=a[id]+b[id];
};
__global__ void Resuelve(double *dev_w, double *dev_Ppo, double *dev_PsL_Ppo,double *dev_K,double *dev_Delta,double *dev_PpF,double *dev_PsF)
{
int id=threadIdx.x;
double x0[2];double y1[3],y2[3];
double eps=0.000001;
double t0=0;
double h=0.01;
double hmin=0.00001;
double hmax=0.1;
double gB=4e-14;
double Aeff=85e-18;
double a=0.2/4.343;
double b=gB/(1.2*Aeff);
double K=0.53;
double PsL_Ppo=0.8;
int c=0;
while (PsL_Ppo>0.0000011)
{
dev_K[id]=dev_K[id]-dev_Delta[id];
// K=K-0.000001;
K=dev_K[id];
x0[0]=dev_Ppo[id]; //x0[0]=0.0005;
x0[1]=K*x0[0]; //x0[1]=dev_K[id]*dev_Ppo[id];
y1[0]=x0[0]; y1[1]=x0[0];
y2[0]=x0[1]; y2[1]=x0[1];
c=c+1;
t0=0;
h=0.01;
while(t0<=25)
{
///////////////
double k11=-(a)*y1[0]-(b)*y1[0]*y2[0];
double k12=(a)*y2[0]-(b)*y1[0]*y2[0];
k11=k11*h;
k12=k12*h;
double ty1_k11=y1[0]+(k11)*(1.0/5);
double ty2_k12=y2[0]+(k12)*(1.0/5);
double k21=-(a)*ty1_k11-(b)*ty1_k11*ty2_k12;
double k22=(a)*ty2_k12-(b)*ty1_k11*ty2_k12;
k21=k21*h;
k22=k22*h;
double ty1_k21=y1[0]+(3.0/40)*(k11)+(9.0/40)*(k21);
double ty2_k22=y2[0]+(3.0/40)*(k12)+(9.0/40)*(k22);
double k31=-(a)*ty1_k21-(b)*ty1_k21*ty2_k22;
double k32=(a)*ty2_k22-(b)*ty1_k21*ty2_k22;
k31=k31*h;
k32=k32*h;
double ty1_k31=y1[0]+(44.0/45)*k11-(56.0/15)*k21+(32.0/9)*(k31);
double ty2_k32=y2[0]+(44.0/45)*k12-(56.0/15)*k22+(32.0/9)*(k32);
double k41=-(a)*ty1_k31-(b)*ty1_k31*ty2_k32;
double k42=(a)*ty2_k32-(b)*ty1_k31*ty2_k32;
k41=k41*h;
k42=k42*h;
double ty1_k41=y1[0]+(19372.0/6561)*k11-(25360.0/2187)*k21+(64448.0/6561)*(k31)-(212.0/729)*(k41);
double ty2_k42=y2[0]+(19372.0/6561)*k12-(25360.0/2187)*k22+(64448.0/6561)*(k32)-(212.0/729)*(k42);
double k51=-(a)*ty1_k41-(b)*ty1_k41*ty2_k42;
double k52=(a)*ty2_k42-(b)*ty1_k41*ty2_k42;
k51=k51*h;
k52=k52*h;
double ty1_k51=y1[0]+(9017.0/3168)*k11-(355.0/33)*k21-(46732.0/5247)*(k31)+(49.0/176)*(k41)-(5103.0/18656)*(k51);
double ty2_k52=y2[0]+(9017.0/3168)*k12-(355.0/33)*k22-(46732.0/5247)*(k32)+(49.0/176)*(k42)-(5103.0/18656)*(k52);
double k61=-(a)*ty1_k51-(b)*ty1_k51*ty2_k52;
double k62=(a)*ty2_k52-(b)*ty1_k51*ty2_k52;
k61=k61*h;
k62=k62*h;
double ty1_k61=y1[0]+(35.0/384)*k11+(500.0/1113)*(k31)+(125.0/192)*(k41)-(2187.0/6784)*(k51)+(11.0/84)*(k61);
double ty2_k62=y2[0]+(35.0/384)*k12+(500.0/1113)*(k32)+(125.0/192)*(k42)-(2187.0/6784)*(k52)+(11.0/84)*(k62);
double k71=-(a)*ty1_k61-(b)*ty1_k61*ty2_k62;
double k72=(a)*ty2_k62-(b)*ty1_k61*ty2_k62;
k71=k71*h;
k72=k72*h;
double tmpy1=y1[0]+((35.0/384)*k11+(500.0/1113)*k31+(125.0/192)*k41-(2187.0/6784)*k51+(11.0/84)*k61);
double tmpy2=y2[0]+((35.0/384)*k12+(500.0/1113)*k32+(125.0/192)*k42-(2187.0/6784)*k52+(11.0/84)*k62);
// double tmpz1=y1[0]+(5179.0/57600)*k11+(7571.0/16695)*k31+(393.0/640)*k41-(92097.0/339200)*k51+(187.0/2100)*k61+(1.0/40)*k71;
double tmpz2=y2[0]+(5179.0/57600)*k12+(7571.0/16695)*k32+(393.0/640)*k42-(92097.0/339200)*k52+(187.0/2100)*k62+(1.0/40)*k72;
double err=abs(tmpy2-tmpz2);
double s=pow((eps*h)/(2.0*err),1.0/5);
double h1=s*h;
if (h1<hmin)
h1=hmin;
else if(h1>hmax) h1=hmax;
t0=t0+h;
y1[0]=tmpy1;
y2[0]=tmpy2;
h=h1;
///////////////
} //fin del for
double PsL=y2[0];
//dev_PsL_Ppo[id]=double(PsL)/dev_Ppo[id];
PsL_Ppo=double(PsL)/dev_Ppo[id];
} //fin del while tolerancia
dev_PpF[id]=y1[0];
dev_PsF[id]=y2[1];
//dev_PpF[id]=10*log10(y1[0]/1e-3);
//dev_PsF[id]=10*log10(y2[1]/1e-3) ;
//dev_PpF[id]=8;
//dev_PsF[id]=9;
};
int main()
{
double DELTA;
double *host_w,*host_Ppo,*host_PpF,*host_PsF,*host_PsL_Ppo,*host_K,*host_Delta;
double *dev_w,*dev_Ppo,*dev_PpF,*dev_PsF,*dev_PsL_Ppo,*dev_K,*dev_Delta;
DELTA=0.000001;
double incrementow=12.5;
double iniciow=0.0;
double finalw=25.0;
int Nw=ceil((finalw-iniciow)/incrementow);
host_w=new double[Nw];
Nw=Nw+1;
for (int i=0;i<Nw;i++)
{
host_w[i]=iniciow;
iniciow=iniciow+incrementow;
};
double incrementoPpo=0.0001;
double inicioPpo=0.0005;
double finalPpo=0.0045;
int N=ceil((finalPpo-inicioPpo)/incrementoPpo);
N=N+1;
host_Ppo=new double[N];
host_PsL_Ppo=new double[N];
host_K= new double[N];
host_Delta=new double[N];
host_PpF=new double[N];
host_PsF=new double[N];
for (int i=0;i<N;i++)
{
host_PpF[i]=0.0;
host_PsF[i]=0.0;
host_PsL_Ppo[i]=0.8;
host_K[i]=0.53;
host_Delta[i]=DELTA;
host_Ppo[i]=inicioPpo;
inicioPpo=inicioPpo+incrementoPpo;
};
int memw=sizeof(double)*Nw;
int mem=sizeof(double)*N;
cudaMalloc((void **)&dev_Ppo,mem);
cudaMalloc((void **)&dev_w,memw);
cudaMalloc((void **)&dev_PsL_Ppo,mem);
cudaMalloc((void **)&dev_K,mem);
cudaMalloc((void **)&dev_Delta,mem);
cudaMalloc((void **)&dev_PpF,mem);
cudaMalloc((void **)&dev_PsF,mem);
cudaMemcpy(dev_w,host_w,memw,cudaMemcpyHostToDevice);
cudaMemcpy(dev_Ppo,host_Ppo,mem,cudaMemcpyHostToDevice);
cudaMemcpy(dev_PsL_Ppo,host_PsL_Ppo,mem,cudaMemcpyHostToDevice);
cudaMemcpy(dev_K,host_K,mem,cudaMemcpyHostToDevice);
cudaMemcpy(dev_Delta,host_Delta,mem,cudaMemcpyHostToDevice);
cudaMemcpy(dev_PpF,host_PpF,mem,cudaMemcpyHostToDevice);
cudaMemcpy(dev_PsF,host_PsF,mem,cudaMemcpyHostToDevice);
cudaEvent_t start,stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
Resuelve<<<1,N>>>(dev_w,dev_Ppo,dev_PsL_Ppo,dev_K,dev_Delta,dev_PpF,dev_PsF);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
cout<<"tiempo : "<<time/1000<<endl;
printf("\nelapsedTime in ms= %f:\n",time);
cudaMemcpy(host_K,dev_K,mem,cudaMemcpyDeviceToHost);
cudaMemcpy(host_PsL_Ppo,dev_PsL_Ppo,mem,cudaMemcpyDeviceToHost);
cudaMemcpy(host_PpF,dev_PpF,mem,cudaMemcpyDeviceToHost);
cudaMemcpy(host_PsF,dev_PsF,mem,cudaMemcpyDeviceToHost);
cout<<endl;
cout<<N;
cout<<endl;
cout<<"PpF"<<endl;
for(int i=0;i<N;i++)
{
cout<<" "<<host_PpF[i]<<" ";
};
cout<<endl;
cout<<N;
cout<<endl;
cout<<"PsF"<<endl;
for(int i=0;i<N;i++)
{
cout<<" "<<host_PsF[i]<<" ";
};
cout<<endl;
cudaFree(dev_Ppo);
cudaFree(dev_w);
cudaFree(dev_K);
cudaFree(dev_PsL_Ppo);
cudaFree(dev_Delta);
cudaFree(dev_PpF);
cudaFree(dev_PsF);
delete [] host_Ppo;
delete [] host_w;
delete [] host_K;
delete [] host_PsL_Ppo;
delete [] host_Delta;
delete [] host_PpF;
delete [] host_PsF;
};
|
19,531 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <time.h>
using namespace std;
//
void simpleMatMul(int* c, int* a, int* b, int rows1, int cols1, int cols2) {
for (unsigned int i = 0; i < rows1; i++)
{
for (unsigned int j = 0; j < cols2; j++)
{
c[i * cols2 + j] = 0;
for (unsigned int k = 0; k < cols1; k++)
{
c[i * cols2 + j] += a[i * cols1 + k] * b[k * cols2 + j];
}
}
}
}
__global__ void matMulKernel(int* c, int* a, int* b, int rows1, int cols1, int cols2)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= rows1 || j >= cols2)
{
return;
}
c[i * cols2 + j] = 0;
for (int k = 0; k < cols1; k++)
{
c[i * cols2 + j] += a[i * cols1 + k] * b[k * cols2 + j];
}
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t matMulWithCuda(int* c, int* a, int* b, int rows1, int cols1, int cols2)
{
int* dev_a = 0;
int* dev_b = 0;
int* dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, rows1 * cols2 * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dev_a, rows1 * cols1 * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
cudaStatus = cudaMalloc((void**)&dev_b, cols1 * cols2 * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, rows1 * cols1 * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaStatus = cudaMemcpy(dev_b, b, cols1 * cols2 * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
// , Cuda, , 1-2 ( if)
dim3 blockSize = dim3(32, 32, 1);
dim3 gridSize = dim3(rows1 / 32 + 1, cols1 / 32 + 1, 1);
// Launch a kernel on the GPU with one thread for each element.
matMulKernel <<< gridSize, blockSize >>> (dev_c, dev_a, dev_b, rows1, cols1, cols2);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "matMulKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching matMulKernel!\n", cudaStatus);
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, rows1 * cols2 * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
}
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
int main()
{
int rows1;
int cols1;
int cols2;
cout << "Enter the number of rows and columns" << endl;
cout << "Number of rows for 1 matrix:" << endl;
cin >> rows1;
cout << "Number of columns for 1 matrix:" << endl;
cin >> cols1;
cout << "Number of columns for 2 matrix:" << endl;
cin >> cols2;
//
int* a = new int[rows1 * cols1];
int* b = new int[cols1 * cols2];
int* c = new int[rows1 * cols2];
for (int i = 0; i < rows1; i++)
{
for (int j = 0; j < cols1; j++)
{
a[i * cols1 + j] = i * cols1 + j;
}
}
for (int i = 0; i < cols1; i++)
{
for (int j = 0; j < cols2; j++)
{
b[i * cols2 + j] = i * cols2 + j;
}
}
clock_t start = clock();
// Add vectors in parallel.
cudaError_t cudaStatus = matMulWithCuda(c, a, b, rows1, cols1, cols2);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "matMulWithCuda failed!");
}
clock_t end = clock();
cout << "Cuda time: " << (double)(end - start) / CLOCKS_PER_SEC << endl;
c = new int[rows1 * cols2];
start = clock();
simpleMatMul(c, a, b, rows1, cols1, cols2);
end = clock();
cout << "CPU time: " << (double)(end - start) / CLOCKS_PER_SEC << endl;
/*for (int i = 0; i < rows1; i++)
{
for (int j = 0; j < cols1; j++)
{
cout << a[i * cols1 + j] << ' ';
}
cout << endl;
}
for (int i = 0; i < cols1; i++)
{
for (int j = 0; j < cols2; j++)
{
cout << b[i * cols2 + j] << ' ';
}
cout << endl;
}
for (int i = 0; i < rows1; i++)
{
for (int j = 0; j < cols2; j++)
{
cout << c[i * cols2 + j] << ' ';
}
cout << endl;
}*/
delete[] a;
delete[] b;
delete[] c;
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
19,532 | #include "moment-update.hh"
#include <cassert>
#include <stdexcept>
#include "graph.hh"
#include "mse-grad.hh"
#include "ops-builder.hh"
#include "variable.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
MomentUpdate::MomentUpdate(Variable* var, Op* dt,
dbl_t coeff1, dbl_t coeff2, bool sq_update)
: Op("moment_update", var->shape_get(), {var, dt})
, var_(var)
, coeff1_(coeff1)
, coeff2_(coeff2)
, sq_update_(sq_update)
{}
void MomentUpdate::compile()
{
auto& g = Graph::instance();
auto& cdt = g.compiled(preds()[1]);
Shape out_shape = cdt.out_shape;
std::size_t len = out_shape.total();
dbl_t* ptr = var_->data_begin();
auto out_node = sq_update_?
rt::Node::op_moment_update2(ptr, cdt.out_data,
coeff1_, coeff2_, len,
{cdt.out_node})
: rt::Node::op_moment_update(ptr, cdt.out_data,
coeff1_, coeff2_, len,
{cdt.out_node});
g.add_compiled(this, {out_node}, {}, out_node, out_shape, ptr);
}
}
|
19,533 | #include <cassert>
// Intentionally doing a cuda assert to generate xid error 43
extern "C" __global__ void make_assert(int* buf, size_t size, int iterations)
{
assert(false);
}
|
19,534 | #include <stdio.h>
const int INPUT_DIM = 100;
const int FILTER_DIM= 5; // should be factor of INPUT_DIM
const int CONV_OUT_DIM = INPUT_DIM / FILTER_DIM;
const int CONV_LAYER_SIZE = 10;
const int OUT_NEURON_DIM = CONV_OUT_DIM * CONV_OUT_DIM * CONV_LAYER_SIZE;
const int OUT_LAYER_SIZE = 10;
extern "C" __global__ void convolution_layer(double* input, double* conv_filters, double* outputs) {
for(int i = 0; i < INPUT_DIM; i += FILTER_DIM) {
for(int j = 0; j < INPUT_DIM; j += FILTER_DIM) {
double prod = 0;
for(int x = 0; x < FILTER_DIM; x++) {
for(int y = 0; y < FILTER_DIM; y++) {
prod += input[(i + x) * INPUT_DIM + j + y] * conv_filters[blockIdx.x * FILTER_DIM * FILTER_DIM + x * FILTER_DIM + y];
}
}
outputs[blockIdx.x * CONV_OUT_DIM * CONV_OUT_DIM + i / FILTER_DIM * CONV_OUT_DIM + j / FILTER_DIM] = prod;
}
}
}
extern "C" __global__ void relu_layer(double* conv_out) {
for(int i = 0; i < CONV_OUT_DIM; i++) {
for(int j = 0; j < CONV_OUT_DIM; j++) {
if(conv_out[blockIdx.x * CONV_OUT_DIM * CONV_OUT_DIM + i * CONV_OUT_DIM + j] < 0.0) {
conv_out[blockIdx.x * CONV_OUT_DIM * CONV_OUT_DIM + i * CONV_OUT_DIM + j] = 0.0;
}
}
}
}
extern "C" __global__ void output_layer(double* input, double* weights, double* output) {
double prod = 0;
for(int x = 0; x < OUT_NEURON_DIM; x++) {
double weight = weights[blockIdx.x * OUT_NEURON_DIM + x];
prod += weight * input[x];
}
output[blockIdx.x] = prod;
}
// extern "C" __global__ void debug(const double* info) {
// printf("WTF %.2f \n", info[0]);
// }
|
19,535 | #include <stdio.h>
__global__ void add(int *a, int *b, int *c) {
*c = *a + *b;
}
int main(void) {
int a, b, c; // host copies of a, b, c
int *gpu_a, *gpu_b, *gpu_c; // device copies of a, b, c
int size = sizeof(int);
// Allocate space for device copies of a, b, c
cudaMalloc((void **) &gpu_a, size);
cudaMalloc((void **) &gpu_b, size);
cudaMalloc((void **) &gpu_c, size);
// Setup input values
a = 2;
b = 7;
// Copy inputs to device
cudaMemcpy(gpu_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, &b, size, cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
add <<<1, 1>>> (gpu_a, gpu_b, gpu_c);
// Copy result back to host
cudaMemcpy(&c, gpu_c, size, cudaMemcpyDeviceToHost);
// Cleanup
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
printf("%i", c);
return 0;
}
|
19,536 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<sys/time.h>
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
__global__ void calculate(char *mem, int num,int skip)
{
int *arr=(int *)mem;
int i = blockDim.x * blockIdx.x + threadIdx.x;
if((i*2*skip) >= num-1)
return;
*(arr+i*2*skip)=(*(arr+i*2*skip))^(*(arr+i*2*skip+skip));
return;
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i,seed;
char *ptr;
char *sptr;
int *pa;
char *gpu_mem;
unsigned long num = NUM; /*Default value of num from MACRO*/
int blocks;
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
seed=atoi(argv[2]);
}
srand(seed);
/* Allocate host (CPU) memory and initialize*/
ptr = (char *)malloc(num * sizeof(int));
sptr = ptr;
for(i=0; i<num; ++i){
pa = (int*) sptr;
*pa=(int)rand();
sptr +=sizeof(int);
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&gpu_mem, num * sizeof(int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_mem, ptr, num * sizeof(int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
blocks = num /1024;
if(num % 1024)
++blocks;
int skip=1;
for(i=num;i>1;){
calculate<<<blocks, 1024>>>(gpu_mem, num,skip);
CUDA_ERROR_EXIT("kernel invocation");
skip=skip*2;
if(i%2==0)
i=i/2;
else i=i/2+1;
}
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(ptr, gpu_mem, num * sizeof(int) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(gpu_mem);
sptr = ptr;
/*Print the answer*/
pa = (int*)sptr;
printf("result=%d\n", *pa);
free(ptr);
}
|
19,537 | #include <stdlib.h>
#include <stdio.h>
#define FILENAME "./dblp-co-authors.txt"
#define NumAuthor 317080
#define DataLen 1049866
#define BlockSize 1024
#define GridSize int(DataLen/BlockSize) + 1
#define MAX 343
#define newGridSize int(NumAuthor/BlockSize) + 1
int dataset[DataLen * 2];// array to store the raw dataset
void dataset_read(int * dataset);
__global__ void dataset_parse(int * dataset, int * output, int * full_output);
//int dataset_maxCoAuthor(int * output, int lenght);
//void dataset_plot(int * output, int lenght, int max);
__global__ void output_parse(int * full_output, int * output, int * num_author_array);
__device__ void check_pair(int * full_output, int start, int len, int possible_pair, int * pair_array, int * pair_len);
__device__ void count_diff_auth(int * pair_array, int * new_array,int pair_len, int * pure_len, int indx);
int main(int argc, char * argv[])
{
int output[NumAuthor] = {0};
int full_output[MAX * NumAuthor] = { 0 };
int * cu_output;//array to store the co-authors number of each author
int * cu_full_output;
dataset_read(dataset);
// Set device that we will use for our cuda code
cudaSetDevice(0);
// Time Variables
cudaEvent_t start, stop;
cudaEventCreate (&start);
cudaEventCreate (&stop);
float time;
int * cu_dataset;
cudaEventRecord(start,0);
cudaMalloc((void**)&cu_output, NumAuthor * sizeof(int));
cudaMalloc((void**)&cu_full_output, MAX * NumAuthor * sizeof(int));
cudaMalloc((void**)&cu_dataset, DataLen * 2 * sizeof(int));
cudaMemcpy(cu_dataset, dataset, DataLen * 2 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cu_full_output, full_output, MAX * NumAuthor * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(cu_output, output, NumAuthor * sizeof(int), cudaMemcpyHostToDevice);
dataset_parse<<<GridSize, BlockSize>>>(cu_dataset, cu_output, cu_full_output);
cudaDeviceSynchronize();
cudaMemcpy(output, cu_output, NumAuthor * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(full_output, cu_full_output, MAX * NumAuthor * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
//int max = dataset_maxCoAuthor(output, NumAuthor);
//printf("Time elapsed: %f\n", time);
int * cu_num_author_array;
int * num_author_array = (int*)malloc(NumAuthor * sizeof(int));
cudaMalloc((void**)&cu_num_author_array, NumAuthor * sizeof(int));
cudaMemset(cu_num_author_array, 0, NumAuthor * sizeof(int));
cudaEventRecord(start,0);
output_parse<<<newGridSize, BlockSize>>>(cu_full_output, cu_output, cu_num_author_array);
cudaDeviceSynchronize();
printf("Error in Kernel output_parse:%s\n",cudaGetErrorString(cudaGetLastError()));
cudaDeviceSynchronize();
cudaMemcpy(num_author_array, cu_num_author_array, NumAuthor * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time in Kernel output_parse: %f\n", time);
int total_number = 0;
for (int i = 0; i < NumAuthor; i++)
total_number += num_author_array[i];
printf("Total number of authors is %d\n", total_number/2);
return 0;
}
void dataset_read( int * dataset)
{
FILE * datafile;
datafile = fopen( FILENAME, "r");
char line[255];
while (true)
{
fscanf(datafile, "%s", line);
if (atoi(line) == 1)
{
dataset[0] = 1;
break;
}
}
for(int i = 1; i < 2 * DataLen; i++){
fscanf(datafile, "%d", &dataset[i]);
}
fclose(datafile);
}
__global__ void dataset_parse(int * dataset, int * output, int * full_output)
{
int indx = threadIdx.x + blockIdx.x * blockDim.x;
int i, j;
if(indx < DataLen){
i = atomicAdd(&(output[dataset[2*indx]-1]), 1);
full_output[(dataset[2*indx]-1) * MAX + i] = dataset[2*indx+1];
j = atomicAdd(&(output[dataset[2*indx+1]-1]), 1);
full_output[(dataset[2*indx + 1]-1) * MAX + j] = dataset[2*indx];
}
}
/*
int dataset_maxCoAuthor(int * output, int lenght)
{
int max =0;
int max_num = 0;
int max_ind[1000] = { 0 };
//memset(max_ind, 0, 1000);
for(int i = 0; i < lenght; i++)
{
//printf("output:%d, %d", i, output[i]);
if(max < output[i])
{
// printf("Max right now:%d, %d\n", i, output[i]);
max = output[i];
max_num = 0;
memset(max_ind, 0, 1000);
max_ind[max_num] = i;
}
else if(max == output[i])
{
max_num++;
max_ind[max_num] = i;
}
//else{
//printf("max is:%d, %d\n", max, max_ind[0]);
//}
}
printf("The list of authors with most co-authors:\n");
for(int i = 0; i <= max_num; i++)
{
printf("Author: %6d has %6d co-authors.\n", max_ind[i] + 1, output[max_ind[i]]);
}
return output[max_ind[0]];
}
void dataset_plot(int * output, int lenght, int max)
{
//int* numCoAuthorList;
int* numCoAuthorList = (int*)malloc(max * sizeof(int));
memset(numCoAuthorList, 0, max);
for(int i = 0; i < lenght; i++)
{
if(output[i] <= max)
{
numCoAuthorList[output[i] - 1]++;
}
else{
printf("\nError in Finding MAX!!!\n");
}
}
FILE *fp;
fp = fopen("./output.txt", "wb");
fwrite(numCoAuthorList, sizeof(int), max, fp);
fclose(fp);
}
*/
__global__ void output_parse(int * full_output, int * output, int * num_author_array)
{
int indx = threadIdx.x + blockIdx.x * blockDim.x;
if(indx < NumAuthor){
int pair_array[10000] = { 0 };
int pair_len = 0;
int coauthor, coauthor_co_len, possible_pair;
for(int i = 0; i < output[indx]; i++){
coauthor = full_output[indx * MAX + i];
coauthor_co_len = output[coauthor-1];
for(int j = 0; j < coauthor_co_len; j++){
possible_pair = full_output[(coauthor - 1) * MAX + j];
check_pair(full_output, indx * MAX, output[indx], possible_pair, pair_array, &pair_len);
}
}
//int * new_array = (int*)malloc(pair_len * sizeof(int));
//memset(new_array, 0, pair_len );
int new_array[10000] = {0};
int num_authors = 0;
count_diff_auth(pair_array, new_array, pair_len, &num_authors, indx);
num_author_array[indx] = num_authors;
}
}
__device__ void check_pair(int * full_output, int start, int len, int possible_pair, int * pair_array, int * pair_len){
for(int i = 0; i < len; i++){
if(full_output[start + i] == possible_pair){
pair_array[*pair_len] = possible_pair;
(*pair_len)++;
break;
}
}
}
__device__ void count_diff_auth(int * pair_array,int * new_array, int pair_len, int * pure_len, int indx){
//int * new_array;
//cudaMalloc((void**)&new_array, pair_len * sizeof(int));
//cudaMemset(new_array, 0, pair_len * sizeof(int));
//printf("pair_array[0]:%d, pair_len:%d, %d\n",pair_array[0], pair_len, indx);
new_array[0] = pair_array[0];
*pure_len = 1;
for(int i = 1; i < pair_len; i++){
int j;
for(j = 0; j< *pure_len; j++){
if(pair_array[i] == new_array[j]){
break;
}
}
if (j == *pure_len){
new_array[*pure_len] = pair_array[i];
(*pure_len)++;
}
}
}
|
19,538 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void mult1(int *A, int *B, int *C, int n){ //each thread computes the product of elements row-wise
int row = threadIdx.x;
for(int i=0;i<n;i++){
C[row*n+i] = A[row*n +i] * B[row*n+i];
}
}
__global__ void mult2(int *A, int *B, int *C, int m){ //each thread computes the product of elements column-wise
int col = threadIdx.x;
for(int i=0;i<m;i++){
C[col*m+i] = A[col*m +i] * B[col*m+i];
}
}
__global__ void mult3(int *A, int *B, int *C){ //each thread computes product of 2 elements
int ele = threadIdx.x, row=blockIdx.x, no_eles = blockDim.x;
C[row*no_eles + ele] = A[row*no_eles + ele] * B[row*no_eles + ele];
}
int main(){
int *a, *b, *t, m, n;
int *d_a, *d_b, *d_t;
printf("Enter the value of m: "); scanf("%d",&m);
printf("Enter the value of n: "); scanf("%d",&n);
int size = sizeof(int)*m*n;
a=(int*)malloc(size);
b=(int*)malloc(size);
t=(int*)malloc(size);
printf("Enter input matrix A: \n");
for(int i=0; i<m*n; i++)
scanf("%d",&a[i]);
printf("Enter input matrix B: \n");
for(int i=0; i<m*n; i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_t,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
mult1<<<1,m>>>(d_a,d_b,d_t,n);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
mult2<<<1,n>>>(d_a,d_b,d_t,m);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
mult3<<<m,n>>>(d_a,d_b,d_t);
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Resultant matrix ADD3:\n");
for(int i=0; i<m; i++){
for(int j=0; j<n; j++){
printf("%d ",t[i*n+j]);
}
printf("\n");
}
cudaFree(d_a);
cudaFree(d_t);
return 0;
}
|
19,539 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda_runtime.h>
#define max(x,y) (x>y?x:y)
#define min(x,y) (x>y?y:x)
#define THREAD_NUM 256
int BLOCK_NUM=0;
void matgen(double* a, int n, int m)
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < m; j++)
{
a[i * m + j] = (double)rand() / RAND_MAX +
(double)rand() / ((long)RAND_MAX * RAND_MAX);
}
}
}
__global__ static void MatMultKernel(const double* a, const double* b, double* c, int n, int m, int k)
{
const int tid = threadIdx.x;
const int bid = blockIdx.x;
const int idx = bid * THREAD_NUM + tid;
const int row = idx / k;
const int column = idx % k;
if (row < n && column < k)
{
double t = 0;
for (int i = 0; i < m; i++)
{
t += a[row * m + i] * b[i * k + column];
}
c[idx] = t;
}
}
void MatMultWithCuda(const double *a, const double *b, double *c, int n, int m, int k){
double *cuda_a, *cuda_b, *cuda_c;
cudaMalloc((void**)&cuda_a, sizeof(double)* n * m);
cudaMalloc((void**)&cuda_b, sizeof(double)* m * k);
cudaMalloc((void**)&cuda_c, sizeof(double)* n * k);
cudaMemcpy(cuda_a, a, sizeof(double)* n * m, cudaMemcpyHostToDevice);
cudaMemcpy(cuda_b, b, sizeof(double)* m * k, cudaMemcpyHostToDevice);
BLOCK_NUM = min(n,k) * (max(n,k) + THREAD_NUM - 1) / THREAD_NUM;
MatMultKernel<<< BLOCK_NUM, THREAD_NUM, 0 >>>(cuda_a , cuda_b , cuda_c , n, m, k);
cudaMemcpy(c, cuda_c, sizeof(double)* n * k, cudaMemcpyDeviceToHost);
cudaFree(cuda_a);
cudaFree(cuda_b);
cudaFree(cuda_c);
}
int main()
{
srand(time(NULL));
double *a, *b, *c;
int n, m, k;
scanf("%d%d%d",&n,&m,&k);
a = (double*)malloc(sizeof(double)* n * m);
b = (double*)malloc(sizeof(double)* m * k);
c = (double*)malloc(sizeof(double)* n * k);
srand(time(NULL));
matgen(a, n, m);
matgen(b, m, k);
MatMultWithCuda(a, b, c, n, m, k);
for(int i=0;i<n;i++){
for(int j=0;j<k;j++){
printf("%lf\t",c[i*k+j]);
}
printf("\n");
}
return 0;
} |
19,540 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//M and N number of threads (grid and block)
__global__ void multiply( const int a[] ,const int b[], int c[] , const int sqrt_dim,const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
//for an element in matrix[i][j] , its coordinate k in array[] is i+j*sqrt(size_array)
int index_i = index < sqrt_dim ? index : (int)index%sqrt_dim;
int index_j = (index-index_i)/sqrt_dim;
int dim=sqrt_dim*sqrt_dim;
if(index<dim){
c[index]=0;
if(dim<=thread_number){ //if more threads than array size
printf("Thread %i; Modifying value of index %i\n ", index, index);
for(int i=0; i<sqrt_dim;i++){ //row of first matrix
c[index]+=a[i+index_j * sqrt_dim ]*b[index_i+ i* sqrt_dim];
//printf("c[%i]+=a[%i]*b[%i]\n", index,i+index_j * sqrt_dim ,index_i+ i* sqrt_dim );
}
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim/thread_number); i< index*(int)(dim/thread_number)+(int)(dim/thread_number); i++){
printf("Thread %i; Modifying value of index %i \n", index, i);
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
for(int j=0; j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[index_i+ j* sqrt_dim];
}
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim/thread_number); i< dim; i++){
printf("Thread %i; Modifying value of index %i\n",index, i );
index_i = (int)i%sqrt_dim;
index_j = (i-index_i)/sqrt_dim;
for(int j=0;j<sqrt_dim;j++){ //row of first matrix
c[i]+=a[j+index_j * sqrt_dim ]*b[index_i+ j* sqrt_dim];
}
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
time_begin=clock();
// pointers to host & device arrays
int *d_array1 = 0,*d_array2 = 0,*d_array3 = 0;
int *h_array1 = 0,*h_array2 = 0,*h_array3 = 0;
int size_array=9; //here, size_array =L hqs to be a square
int N=3;
if(argc == 3){
size_array=atoi(argv[1]) * atoi(argv[1]) ;
N=atoi(argv[2]);
}
// malloc columns of host arrays
h_array1 = (int*)malloc( size_array * sizeof(int));
h_array2 = (int*)malloc( size_array * sizeof(int));
h_array3 = (int*)malloc( size_array * sizeof(int));
for(int i=0; i<size_array; i++){
h_array1[i]=1;//rand()%10;
h_array2[i]=1;//rand()%10;
// printf("%i|%i\t", h_array1[i], h_array2[i]);
//if((i+1)%(int)sqrt((float)size_array)==0)
//printf("\n");
}
//printf("\n");
// cudaMalloc a device array
cudaMalloc(&d_array1,size_array * sizeof(int));
cudaMalloc(&d_array2,size_array * sizeof(int));
cudaMalloc(&d_array3,size_array * sizeof(int));
// download and inspect the result on the host:
cudaMemcpy(d_array1, h_array1, sizeof(int)*size_array, cudaMemcpyHostToDevice);
cudaMemcpy(d_array2, h_array2, sizeof(int)*size_array, cudaMemcpyHostToDevice);
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(1,1); //Grid bidimensional de M*M bloques
int thread_number= N*N;
multiply<<<grid, bloque>>>(d_array1, d_array2 , d_array3,sqrt((float)size_array), thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaMemcpy(h_array3, d_array3, sizeof(int)*size_array, cudaMemcpyDeviceToHost);
for(int i=0; i<size_array; i++){
printf("%i\t", h_array3[i]);
if((i+1)%(int)(sqrt((float)size_array))==0)
printf("\n");
}
printf("\n");
// deallocate memory
free(h_array3); free(h_array2); free(h_array1);
cudaFree(d_array3);cudaFree(d_array2);cudaFree(d_array1);
printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.18s
}
|
19,541 | #include <curand.h>
#include <curand_kernel.h>
extern "C" {
__global__ void init( unsigned long long int* seed, curandState * state){
int id = threadIdx.x;
curand_init(*seed, id, 0, &state[id]);
}
__device__ void pi(const float &x, float *pars, float &p){
p = expf(-powf(fabsf(x*pars[2]), pars[1]));
}
__device__ void f(const float &x, float *pars, float &s){
s += x*sinf(x*pars[0]);
}
__global__ void mcmc(curandState* states, unsigned int * num_samples, float * Pars, int * npar,
float * Sigma, float * result){
int id = threadIdx.x;
curandState state = states[id];
unsigned int N = *num_samples;
float sigma = *Sigma;
float *pars = new float[*npar];
memcpy(pars, &Pars[*npar*id], *npar*sizeof(float));
float xi = curand_uniform(&state);
float xg = 0.0;
float s = 0.0;
float p_xi = 0.0;
float p_xg = 0.0;
pi(xi, pars, p_xi);
for(unsigned int i=0;i<N;i++){
xg = sigma*curand_normal(&state)+xi;
pi(xg, pars, p_xg);
if (curand_uniform(&state)<(p_xg/p_xi)){
xi = xg;
p_xi = p_xg;
}
f(xi, pars, s);
}
result[id] = s/float(N);
delete pars;
}
} |
19,542 | #include "includes.h"
static const int NTHREADS = 32;
__global__ void cunn_ClassNLLCriterion_updateGradInput_kernel1( float* gradInput, float* weights, float* target, float* total_weight, int size_average, int n_classes)
{
if (*total_weight <= 0) {
return;
}
float norm = size_average ? (1.0f / *total_weight) : 1.0f;
int t = (int)*target - 1;
if (t >= 0 && t < n_classes) {
gradInput[t] = -(weights ? weights[t] : 1.0f) * norm;
}
} |
19,543 | // Amarjot Singh Parmar
#include <iostream>
#include <math.h>
#include <stdio.h>
#include <unistd.h>
__device__
int getIndex(int x, int y, int rows){
// (size * 3) * y + (x * 3)
int result = (rows * 3) * y;
result = result + (x * 3);
return result;
}
__device__
int getCellNeighbours(int index, int *gen, int rows, int columns){
int neighbours = 0;
int indexNeighbour, xNeighbour, yNeighbour;
int x = gen[index];
int y = gen[index + 1];
// L
if (x == 0){
indexNeighbour = getIndex((rows - 1), y, rows);
}
else{
indexNeighbour = getIndex((x - 1), y, rows);
}
neighbours = gen[indexNeighbour + 2];
// R
if (x == rows -1){
indexNeighbour = getIndex(0, y, rows);
}
else{
indexNeighbour = getIndex((x + 1), y, rows);
}
neighbours = neighbours + gen[indexNeighbour + 2];
// T
if (y == 0){
indexNeighbour = getIndex(x , columns - 1, rows);
}
else{
indexNeighbour = getIndex(x , (y - 1), rows);
}
neighbours = neighbours + gen[indexNeighbour + 2];
// N
if (y == columns - 1){
indexNeighbour = getIndex(x , 0 , rows);
}
else{
indexNeighbour = getIndex(x, (y + 1), rows);
}
neighbours = neighbours + gen[indexNeighbour + 2];
// TL
if (x == 0 && y == 0){
xNeighbour = rows - 1;
yNeighbour = columns - 1;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else if (y == 0){
xNeighbour = x - 1;
yNeighbour = columns - 1;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else if (x == 0){
indexNeighbour = getIndex((rows -1), y - 1, rows);
}
else{
xNeighbour = x - 1;
yNeighbour = y - 1;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
neighbours = neighbours + gen[indexNeighbour + 2];
// TR
if (x == (rows - 1) && y == 0){
xNeighbour = 0;
yNeighbour = columns - 1;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else if (y == 0){
xNeighbour = x + 1;
yNeighbour = columns - 1;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else if (x == (rows - 1)){
xNeighbour = 0;
yNeighbour = y - 1;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else{
xNeighbour = x + 1;
yNeighbour = y -1;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
neighbours = neighbours + gen[indexNeighbour + 2];
// BL
if (x == 0 && y == (columns -1)){
xNeighbour = rows - 1;
yNeighbour = 0;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else if (y == (columns - 1)){
xNeighbour = x - 1;
yNeighbour = 0;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else if (x == 0){
xNeighbour = rows - 1;
yNeighbour = y + 1;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else{
indexNeighbour = getIndex((x - 1), (y + 1), rows);
}
neighbours = neighbours + gen[indexNeighbour + 2];
// BR
if (x == (rows - 1) && y == (columns -1)){
xNeighbour = 0;
yNeighbour = 0;
indexNeighbour = 0;
}
else if (y == (columns - 1)){
xNeighbour = x + 1;
yNeighbour = 0;
indexNeighbour = getIndex(xNeighbour, yNeighbour, rows);
}
else if (x == (rows - 1)){
indexNeighbour = getIndex(0, (y + 1), rows);
}
else{
indexNeighbour = getIndex((x + 1), (y + 1), rows);
}
neighbours = neighbours + gen[indexNeighbour + 2];
return neighbours;
}
__device__
void cellNextCycle(int *gen, int *newGen, int index, int rows, int columns){
int neighbours = getCellNeighbours(index, gen, rows, columns);
// Any live cell
if (gen[index + 2] == 1){
//Any live cell with fewer than two live neighbours dies, as if caused by underpopulation.
if (neighbours < 2)
{
newGen[index + 2] = 0;
} //Any live cell with more than three live neighbours dies, as if by overpopulation.
else if (neighbours == 2 || neighbours == 3){
newGen[index + 2] = 1;
}
else if (neighbours > 3){
newGen[index + 2] = 0;
}
}
else if (neighbours == 3){
//Any dead cell with exactly three live neighbours becomes a live cell, as if by reproduction.
newGen[index + 2] = 1;
}
else{
newGen[index + 2] = 0;
}
}
// Gets every cells next value which gets stored in newGen
__global__
void calculateBoard(int *gen, int *newGen, int amountofCells, int rows, int columns){
int index = blockIdx.x * blockDim.x + threadIdx.x;
if(index >= amountofCells){
return;
}
int count = index * 3;
cellNextCycle(gen, newGen, count, rows, columns);
}
int getIndexCPU(int x, int y, int rows){
// (size * 3) * y + (x * 3)
int result;
result = (rows * 3) * y;
result = result + (x * 3);
return result;
}
void printBoard(int *gen ,int amountofCells, int rows){
int count = 0;
int rowCount = 0;
printf("\n");
while(count < amountofCells){
count++;
count++;
if(gen[count] == 0){
printf(" . ");
}else{
printf(" x ");
}
rowCount++;
if(rowCount == rows){
printf("\n");
rowCount = 0;
}
count++;
}
}
void setupGlider(int *gen, int *newGen, int rows){
int index;
index = getIndexCPU(1,0,rows) + 2;
gen[index] = 1;
index = getIndexCPU(2,1,rows) + 2;
gen[index] = 1;
index = getIndexCPU(0,2,rows) + 2;
gen[index] = 1;
index = getIndexCPU(1,2,rows) + 2;
gen[index] = 1;
index = getIndexCPU(2,2,rows) + 2;
gen[index] = 1;
}
void populateArray(int *gen, int rows, int columns){
int count = 0;
for (int y = 0; y < columns; y++) {
for(int x = 0; x < rows; x++){
gen[count] = x;
count++;
gen[count] = y;
count++;
gen[count] = 0;;
count++;
}
}
}
int main(void){
int rows = 50;
int columns = 50;
int runTime = 1000;
int amountofCells = rows * columns;
int lengthofArray = ((amountofCells * 2) + amountofCells);
int loopCount = 0;
int amountOFBlocks = (amountofCells / 1024) + 1;
printf("User wants %d X %d , Total Cells needed : %d , Array Size : %d \n", rows , columns, amountofCells, lengthofArray);
// Allocate Unified Memory – accessible from CPU or GPU
int *gen, *newGen;
cudaMallocManaged(&gen, lengthofArray*sizeof(int));
cudaMallocManaged(&newGen, lengthofArray*sizeof(int));
// populate board
populateArray(gen, rows, columns);
populateArray(newGen, rows, columns);
setupGlider(gen, newGen, rows);
cudaDeviceSynchronize();
// Keep calculating board & printing
while(loopCount < runTime){
usleep(9000);
if((loopCount % 2) == 0){
calculateBoard<<<amountOFBlocks,1024>>>(gen, newGen, amountofCells, rows, columns);
cudaDeviceSynchronize();
printBoard(newGen, lengthofArray, rows);
} else{
calculateBoard<<<amountOFBlocks,1024>>>(newGen, gen, amountofCells, rows, columns);
cudaDeviceSynchronize();
printBoard(gen, lengthofArray, rows);
}
loopCount++;
}
cudaFree(gen);
cudaFree(newGen);
return 0;
} |
19,544 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <chrono>
using namespace std;
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number)
{
if(err!=cudaSuccess)
{
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
#define SAFE_CALL(call,msg) _safe_cuda_call(call,msg,__FILE__,__LINE__)
// inline double seconds()
// {
// struct timeval tp;
// struct timezone tzp;
// int i = gettimeofday(&tp, &tzp);
// return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
// }
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(float *a, float *b, float *c, int n)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
int main( int argc, char* argv[] )
{
cudaEvent_t start, stop;
SAFE_CALL(cudaEventCreate(&start), "Error creating start event");
SAFE_CALL(cudaEventCreate(&stop), "Error creating stop event");
// Size of vectors
int n = 1<<23;
// Host input vectors
float *h_a;
float *h_b;
//Host output vector
float *h_c;
// Device input vectors
float *d_a;
float *d_b;
//Device output vector
float *d_c;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(float);
// Allocate memory for each vector on host
h_a = (float*)malloc(bytes);
h_b = (float*)malloc(bytes);
h_c = (float*)malloc(bytes);
// Allocate memory for each vector on GPU
SAFE_CALL(cudaMalloc(&d_a, bytes), "Error allocating da");
SAFE_CALL(cudaMalloc(&d_b, bytes), "Error allocating db");
SAFE_CALL(cudaMalloc(&d_c, bytes), "Error allocating dc");
// Initialize vectors on host
for(int i = 0; i < n; i++ ) {
h_a[i] = 1 ;
h_b[i] = 1 ;
}
// Copy host vectors to device
SAFE_CALL(cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice), "Error copying ha -> da");
SAFE_CALL(cudaMemcpy( d_b, h_b, bytes, cudaMemcpyHostToDevice), "Error copying hb -> db");
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
printf("Gridsize: %d Blocksize: %d\n", gridSize, blockSize);
auto start_cpu = chrono::high_resolution_clock::now();
SAFE_CALL(cudaEventRecord(start, 0), "Error recording event");
// Execute the kernel
vecAdd<<<gridSize, blockSize>>>(d_a, d_b, d_c, n);
SAFE_CALL(cudaDeviceSynchronize(),"Kernel Launch Failed");
auto end_cpu = chrono::high_resolution_clock::now();
SAFE_CALL(cudaEventRecord(stop, 0), "Error recording event stop");
SAFE_CALL(cudaEventSynchronize(stop), "Error synchronizing events");
chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
float elapsedTime;
SAFE_CALL(cudaEventElapsedTime(&elapsedTime, start, stop), "Error calculating elapsed time");
printf("Time spent for %d elements: %.5f ms; %f\n",n, elapsedTime, duration_ms.count());
// Copy array back to host
SAFE_CALL(cudaMemcpy( h_c, d_c, bytes, cudaMemcpyDeviceToHost ), "Error copying dc -> hc");
// Sum up vector c and print result divided by n, this should equal 1 within error
float sum = 0;
for(int i=0; i<n; i++)
sum += h_c[i];
printf("final result: %f \n", sum/n);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Release device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
19,545 | #include "includes.h"
__global__ void kNormLimitColumnwise(float* mat, float* target, float norm, unsigned int width, unsigned int height) {
__shared__ float sum_vals[33];
float cur_sum = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
cur_sum += mat[blockIdx.x * height + i] * mat[blockIdx.x * height + i];
}
sum_vals[threadIdx.x] = cur_sum;
__syncthreads();
if (threadIdx.x == 0) {
cur_sum = 0;
for (unsigned int i = 0; i < 32; i++)
cur_sum += sum_vals[i];
sum_vals[32] = norm > cur_sum ? 1 : norm / sqrt(cur_sum);
}
float scale = sum_vals[32];
for (unsigned int i = threadIdx.x; i < height; i += 32) {
target[blockIdx.x * height + i] = mat[blockIdx.x * height + i] * scale;
}
__syncthreads();
} |
19,546 | /**********************************************************************
* DESCRIPTION:
* Wave Equation - cu Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS+2], /* values at time t */
oldval[MAXPOINTS+2], /* values at time (t-dt) */
newval[MAXPOINTS+2]; /* values at time (t+dt) */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
__global__ void init_line(int tpoints, float *GPU_values, float *GPU_oldval)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
/* Calculate initial values based on sine curve */
/* Initialize old values array */
GPU_values[id] = sin((2.0 * PI)*((float)id/(float)(tpoints-1)));
GPU_oldval[id] = sin((2.0 * PI)*((float)id/(float)(tpoints-1)));
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
__global__ void update(int tpoints, int nsteps, float *GPU_values, float *GPU_oldval, float *GPU_newval)
{
int i;
int id = blockIdx.x * blockDim.x + threadIdx.x;
/* Update values for each time step */
for (i = 1; i<= nsteps; i++) {
/* Update points along line for this time step */
if ((id == 0) || (id == tpoints-1))
GPU_newval[id] = 0.0;
else
GPU_newval[id] = 1.82 * GPU_values[id] - GPU_oldval[id];
/* Update old values with new values */
GPU_oldval[id] = GPU_values[id];
GPU_values[id] = GPU_newval[id];
}
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if (i%10 == 9)
printf("\n");
}
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
float *GPU_values, *GPU_oldval, *GPU_newval;
// GPU memory
cudaMalloc(&GPU_values, sizeof(values));
cudaMalloc(&GPU_oldval, sizeof(values));
cudaMalloc(&GPU_newval, sizeof(values));
printf("Initializing points on the line...\n");
init_line<<<((tpoints + 1023) >> 10), 1024>>>(tpoints, GPU_values, GPU_oldval);
printf("Updating all points for all time steps...\n");
// <<<numBlocks, threadsPerBlock>>>
update<<<((tpoints + 1023) >> 10), 1024>>>(tpoints, nsteps, GPU_values, GPU_oldval, GPU_newval);
printf("Printing final results...\n");
cudaMemcpy(values, GPU_values, sizeof(values), cudaMemcpyDeviceToHost);
printfinal();
printf("\nDone.\n\n");
return 0;
}
|
19,547 | /**
* @ Author: Minhua Chen
* @ Create Time: 2019-08-24 11:41:39
* @ Modified by: Minhua Chen
* @ Modified time: 2019-08-24 12:09:28
* @ Description:
*/
#include <stdio.h>
#include<cuda.h>
#include<cuda_runtime.h>
#define BLOCK_NUM 32 //块数量
#define THREAD_NUM 256 // 每个块中的线程数
#define R_SIZE BLOCK_NUM * THREAD_NUM
#define M_SIZE R_SIZE * R_SIZE
__global__ void mat_mul(int *mat1, int *mat2, int *result) {
const int bid = blockIdx.x;
const int tid = threadIdx.x;
// 每个线程计算一行
const int row = bid * THREAD_NUM + tid;
for (int c = 0; c < R_SIZE; c++) {
for (int n = 0; n < R_SIZE; n++) {
result[row*R_SIZE+c] += mat1[row*R_SIZE+n] * mat2[n*R_SIZE+c];
}
}
}
int main(int argc, char *argv[]) {
int *mat1, *mat2, *result;
int *g_mat1, *g_mat2, *g_mat_result;
// 用一位数组表示二维矩阵
mat1 = (int*) malloc(M_SIZE * sizeof(int));
mat2 = (int*) malloc(M_SIZE * sizeof(int));
result = (int*) malloc(M_SIZE * sizeof(int));
// initialize
for (int i = 0; i < M_SIZE; i++) {
mat1[i] = rand()/1000000;
mat2[i] = rand()/1000000;
result[i] = 0;
}
cudaMalloc((void **)&g_mat1, sizeof(int) * M_SIZE);
cudaMalloc((void **)&g_mat2, sizeof(int) * M_SIZE);
cudaMalloc((void **)&g_mat_result, sizeof(int) * M_SIZE);
cudaMemcpy(g_mat1, mat1, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(g_mat2, mat2, sizeof(int) * M_SIZE, cudaMemcpyHostToDevice);
mat_mul<<<BLOCK_NUM, THREAD_NUM>>>(g_mat1, g_mat2, g_mat_result);
cudaMemcpy(result, g_mat_result, sizeof(int) * M_SIZE, cudaMemcpyDeviceToHost);
} |
19,548 | /* источник: https://www.packetizer.com/security/sha1/ */
/*
* Эта структура будет содержать контекстнуб информацию
* для орепации хэширования
*/
typedef struct SHA1Context
{
unsigned Message_Digest[5]; /* подписть сообщения (выходная) */
unsigned Length_Low; /* длина сообщения в битах */
unsigned Length_High; /* длина сообщения в битах */
unsigned char Message_Block[64]; /* 512-битный блок сообщения */
int Message_Block_Index; /* индекс внутри блока сообщения */
int Computed; /* Подпись посчитана? */
int Corrupted; /* Подпись сообщения испорчена? */
} SHA1Context;
/*
* Определим макрос циклического сдвига
*/
#define SHA1CircularShift(bits,word) \
((((word) << (bits)) & 0xFFFFFFFF) | \
((word) >> (32-(bits))))
/* прототипы функций */
__device__ void SHA1ProcessMessageBlock(SHA1Context *);
__device__ void SHA1PadMessage(SHA1Context *);
/*
* SHA1Reset
*
* Эта функци инициализирует структуру SHA1Context, подготавливаясь
* для рассчета новой подписи для сообщения.
*
*/
__device__ void SHA1Reset(SHA1Context *context)
{
context->Length_Low = 0;
context->Length_High = 0;
context->Message_Block_Index = 0;
context->Message_Digest[0] = 0x67452301;
context->Message_Digest[1] = 0xEFCDAB89;
context->Message_Digest[2] = 0x98BADCFE;
context->Message_Digest[3] = 0x10325476;
context->Message_Digest[4] = 0xC3D2E1F0;
context->Computed = 0;
context->Corrupted = 0;
}
/*
* SHA1Result
*
* Данная функция возвращает 160-битную поднпись в массиве
* Message_Digest, который находится в структуре SHA1Context.
*
*/
__device__ int SHA1Result(SHA1Context *context)
{
if (context->Corrupted)
{
return 0;
}
if (!context->Computed)
{
SHA1PadMessage(context);
context->Computed = 1;
}
return 1;
}
/*
* SHA1Input
*
* Эта функиця принимает массив октетов как следуюшую порцию сообщения.
*
*/
__device__ void SHA1Input( SHA1Context *context,
char *message_array,
unsigned length)
{
if (!length)
{
return;
}
if (context->Computed || context->Corrupted)
{
context->Corrupted = 1;
return;
}
while(length-- && !context->Corrupted)
{
context->Message_Block[context->Message_Block_Index++] =
(*message_array & 0xFF);
context->Length_Low += 8;
/* обрезать до 32 бит */
context->Length_Low &= 0xFFFFFFFF;
if (context->Length_Low == 0)
{
context->Length_High++;
/* обрезать до 32 бит */
context->Length_High &= 0xFFFFFFFF;
if (context->Length_High == 0)
{
/* сообщение слишком длинное */
context->Corrupted = 1;
}
}
if (context->Message_Block_Index == 64)
{
SHA1ProcessMessageBlock(context);
}
message_array++;
}
}
/*
* SHA1ProcessMessageBlock
*
* Эта функция обрабатывает следующий 512-битный кусок сообщения,
* хранящийся в массиве Message_Block.
*
*/
__device__ void SHA1ProcessMessageBlock(SHA1Context *context)
{
const unsigned K[] = /* Константы, определенные в схеме SHA-1 */
{
0x5A827999,
0x6ED9EBA1,
0x8F1BBCDC,
0xCA62C1D6
};
int t; /* счётчик циклов */
unsigned temp; /* временное хранилице слова */
unsigned W[80]; /* Последовательность слов */
unsigned A, B, C, D, E; /* Буфер слов */
/*
* Инициализировать первые 16 слов в массиве W
*/
for(t = 0; t < 16; t++)
{
W[t] = ((unsigned) context->Message_Block[t * 4]) << 24;
W[t] |= ((unsigned) context->Message_Block[t * 4 + 1]) << 16;
W[t] |= ((unsigned) context->Message_Block[t * 4 + 2]) << 8;
W[t] |= ((unsigned) context->Message_Block[t * 4 + 3]);
}
for(t = 16; t < 80; t++)
{
W[t] = SHA1CircularShift(1,W[t-3] ^ W[t-8] ^ W[t-14] ^ W[t-16]);
}
A = context->Message_Digest[0];
B = context->Message_Digest[1];
C = context->Message_Digest[2];
D = context->Message_Digest[3];
E = context->Message_Digest[4];
for(t = 0; t < 20; t++)
{
temp = SHA1CircularShift(5,A) +
((B & C) | ((~B) & D)) + E + W[t] + K[0];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
for(t = 20; t < 40; t++)
{
temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[1];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
for(t = 40; t < 60; t++)
{
temp = SHA1CircularShift(5,A) +
((B & C) | (B & D) | (C & D)) + E + W[t] + K[2];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
for(t = 60; t < 80; t++)
{
temp = SHA1CircularShift(5,A) + (B ^ C ^ D) + E + W[t] + K[3];
temp &= 0xFFFFFFFF;
E = D;
D = C;
C = SHA1CircularShift(30,B);
B = A;
A = temp;
}
context->Message_Digest[0] =
(context->Message_Digest[0] + A) & 0xFFFFFFFF;
context->Message_Digest[1] =
(context->Message_Digest[1] + B) & 0xFFFFFFFF;
context->Message_Digest[2] =
(context->Message_Digest[2] + C) & 0xFFFFFFFF;
context->Message_Digest[3] =
(context->Message_Digest[3] + D) & 0xFFFFFFFF;
context->Message_Digest[4] =
(context->Message_Digest[4] + E) & 0xFFFFFFFF;
context->Message_Block_Index = 0;
}
/*
* SHA1PadMessage
*
* Согласно стандарту, сообщение должно быть дополнено кратно
* 512 бит. Первый бит заполнения должен быть "1". Последние 64
* бита представляют длину исходного сообщения. Все биты в
* между ними должны быть равны 0. Эта функция будет дополнять сообщение
* в соответствии с этими правилами путем заполнения массива Message_Block
* соответственно. Он также вызовет SHA1ProcessMessageBlock()
* соответствующим образом. После выполнения данной функции по возвращаемому
* значению можно будет сказать, была ли испорчена подпись сообщения.
*
*/
__device__ void SHA1PadMessage(SHA1Context *context)
{
/*
* Проверим, не слишком ли мал текущий блок сообщений для хранения
* начальных бит заполнения и длины сообщения. Если это так, мы
* добавим блок, обрабатываем его, а затем продолжим заполнение
* в следующий блок.
*
*/
if (context->Message_Block_Index > 55)
{
context->Message_Block[context->Message_Block_Index++] = 0x80;
while(context->Message_Block_Index < 64)
{
context->Message_Block[context->Message_Block_Index++] = 0;
}
SHA1ProcessMessageBlock(context);
while(context->Message_Block_Index < 56)
{
context->Message_Block[context->Message_Block_Index++] = 0;
}
}
else
{
context->Message_Block[context->Message_Block_Index++] = 0x80;
while(context->Message_Block_Index < 56)
{
context->Message_Block[context->Message_Block_Index++] = 0;
}
}
/*
* Храним длину сообщения в качестве последних 8-ми октетов
*/
context->Message_Block[56] = (context->Length_High >> 24) & 0xFF;
context->Message_Block[57] = (context->Length_High >> 16) & 0xFF;
context->Message_Block[58] = (context->Length_High >> 8) & 0xFF;
context->Message_Block[59] = (context->Length_High) & 0xFF;
context->Message_Block[60] = (context->Length_Low >> 24) & 0xFF;
context->Message_Block[61] = (context->Length_Low >> 16) & 0xFF;
context->Message_Block[62] = (context->Length_Low >> 8) & 0xFF;
context->Message_Block[63] = (context->Length_Low) & 0xFF;
SHA1ProcessMessageBlock(context);
}
|
19,549 | __global__ void vec_add_kernel(float *c, float *a, float *b, int n) {
int i = 0; // Oops! Something is not right here, please fix it!
if (i < n) {
c[i] = a[i] + b[i];
}
}
|
19,550 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void print_details()
{
printf("blockIdx.x : %d, blockIdx.y : %d, blockIdx.z : %d, blockDim.x : %d, blockDim.y : %d, gridDim.x : %d, gridDim.y :%d \n",
blockIdx.x, blockIdx.y, blockIdx.z,blockDim.x, blockDim.y, gridDim.x, gridDim.y);
}
//int main()
//{
// int nx, ny;
// nx = 16;
// ny = 16;
//
// dim3 block(8, 8);
// dim3 grid(nx / block.x, ny / block.y);
//
// print_details << <grid, block >> > ();
// cudaDeviceSynchronize();
//
// cudaDeviceReset();
// return 0;
//}
|
19,551 | extern "C" __global__ void make_gpu_busy(int* buf, size_t size, int iterations)
{
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
size_t step = blockDim.x * gridDim.x;
for (size_t i = idx; i < size; i += step)
{
float f = buf[i];
double f2 = buf[i];
for (int j = 0; j < iterations; j++)
{
if (buf[i] % 2)
buf[i] = buf[i] * 3 + 1;
else
buf[i] /= 2;
// Add more calculations to burn more power
f2 = f2 * 0.5 + buf[i];
f = f * 0.5 + sqrtf(buf[i] + f);
}
buf[i] += (int) f + f2;
}
}
|
19,552 |
/************************************\
| filename: escape.c
|
| description: sequential version
| of code that outputs a .PGM file of
| a Mandelbrot fractal.
|
| notes: the number of pixels, 2400x2400
| was chosen so that it would take a fair
| amount of time to compute the image so
| that speedup may be observed on in a parallel
| implementation. it might be advisable
| to change the #defines for the purposes
| of developing a parallel version of the
| code.
|
| hint: the file output is a .PGM file which
| is viewable with the linux utility gimp.
| The 'convert' utility can convert
| from .pgm to .gif, which will save lots of disk
| space.
|
| authors: Bryan Schlief, Daegon Kim, Wim Bohm
|
\***********************************/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#define RADIUS_SQ 4.0 /* 2^2 */
#define X_RANGE 2400 /* # of pixels wide */
#define Y_RANGE 2400 /* # of pixels high */
/* helper function written to create a .pgm file, */
/* which is viewable with the program /usr/local/bin/xv */
/* alternately you can use /usr/bin/convert to convert */
/* the file from .pgm to any other file format */
/* type: convert seq_output.pgm seq_output.gif */
static void HandleError( cudaError_t err, const char *file, int line) {
if (err != cudaSuccess){
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit (EXIT_FAILURE);
}
}
#define HANDLE_ERROR(err)(HandleError(err, __FILE__, __LINE__))
void writePGM (char * filename, int * data, int width, int height);
__global__ void mandelbrotRUN(double MAX_ITER, char* outfilename, int* d_output){
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int N = blockDim.x*gridDim.x; /* index variables */
int counter; /* measures the "speed" at which */
/* a particular point diverges. */
double real_max, real_min,
imag_max, imag_min; /* varibles that define the 'c' plane; */
double real_range, imag_range; /* distance per pixel */
double c_real, c_imag, /* c and z variables */
z_real, z_imag, z_magnitude; /* for inner for loop */
double z_current_real; /* temporary variable that holds the value */
/* of z_real for the calculation of z_imag */
real_min = 0.3129928802767, real_max = 0.31299305009252; /* define the 'c' plane */
imag_min = 0.0345483210604, imag_max = 0.0345485012278; /* you can change these for fun */
real_range = (real_max - real_min) / (X_RANGE - 1);
imag_range = (imag_max - imag_min) / (Y_RANGE - 1);
// for(i=0; i<Y_RANGE; ++i) {
// for(j=0; j<X_RANGE; ++j) {
c_real = real_min + i * real_range;
c_imag = imag_min + j * imag_range;
z_real = 0.0;
z_imag = 0.0;
for(counter = 0; counter < MAX_ITER; ++counter) {
z_current_real = z_real;
z_real = (z_real * z_real) - (z_imag * z_imag) + c_real;
//y
z_imag = (2.0 * z_current_real * z_imag) + c_imag;
z_magnitude = (z_real * z_real) + (z_imag * z_imag);
if(z_magnitude > RADIUS_SQ) {
break;
}
} //end for
d_output[i+j*N] = (int)floor(((double)(255 * counter)) / (double) MAX_ITER);
// output[i*X_RANGE+j] = (int)floor(((double)(255 * counter)) / (double)MAX_ITER);
// } // end for
// } // end for
}
int main(int argc, char ** argv) {
// parse command line arguments
if ( argc < 3 ) {
printf("Usage : %s PERTHREAD BLOCKSIZE ITERATIONS\n", argv[0]);
exit(0);
}
// int size = atoi(argv[1]);
int perThread = atoi(argv[1]);
int blockSize = atoi(argv[2]);
double MAX_ITER = atoi(argv[3]); /* first command line argument... */
char *outfilename = "Mandelbrot.pgm"; /* the sequential output filename */
/* allocate memory to store output values for pixels */
int * output = (int*) malloc(sizeof(int) * X_RANGE * Y_RANGE);
int *d_output;
HANDLE_ERROR(cudaMalloc((void**)&d_output,sizeof(int)*X_RANGE*Y_RANGE));
dim3 blockDim(blockSize, blockSize);
dim3 gridDim(X_RANGE/blockDim.x, Y_RANGE/blockDim.y);
float time; /*timer*/
cudaEvent_t start, stop;
HANDLE_ERROR(cudaEventCreate(&start));
HANDLE_ERROR(cudaEventCreate(&stop));
HANDLE_ERROR(cudaEventRecord(start, 0));
//need more in here I think
//E.G vecAdd4.cu Line 76-85
// int calcPerBlock = blockSize*perThread;
// int numBlocks = size/calcPerBlock;
// if(size%calcPerBlock); numBlocks++;
mandelbrotRUN<<<gridDim, blockDim, 0>>>(MAX_ITER, outfilename, d_output );
HANDLE_ERROR(cudaMemcpy(output, d_output, sizeof(int)*X_RANGE*Y_RANGE, cudaMemcpyDeviceToHost));
HANDLE_ERROR(cudaEventRecord(stop, 0));
HANDLE_ERROR(cudaEventSynchronize(stop));
HANDLE_ERROR(cudaEventElapsedTime(&time, start, stop));
printf("Elapsed time: %lf sec\n", time/1000.);
/* write the pgm file to the file specified */
/* in the first command line argument. */
writePGM(outfilename, output, X_RANGE, Y_RANGE);
free(output);
return 0;
}
void writePGM (char * filename, int * data, int width, int height) {
int i, j; /* index variables */
int max=-1; /* for pgm file output */
/* PGM file format requires the largest */
/* pixel value. Calculate this. */
for(i=0; i<width*height; ++i) {
if (data[i] > max) {
max = data[i];
}
}
/* open the file for writing. omit error checking. */
FILE * fout = fopen(filename, "w");
/* PGM file header */
fprintf(fout, "P2\n");
fprintf(fout, "%d\t%d\n", width, height);
fprintf(fout, "%d\n",max);
/* throw out the data */
for(i=0; i<height; ++i) {
for(j=0; j<width; ++j) {
fprintf(fout, "%d\t", data[i*width+j]);
}
fprintf(fout,"\n");
}
/* flush the buffer and close the file */
fflush(fout);
fclose(fout);
}
|
19,553 | /*
Name: Matthew Matze
Date: 11/1/2016
Class: csc4310
Location: ~/csc4310/cuda_mult3
General Summary of Program
The program is designed to take two matrices via input files and output
the result into the resultant file.
To Compile:
nvcc cudamultv3.cu -o cudamultv3
To Execute:
cudamultv3 Device_Number Tile_Width File_One File_Two Output_File
To Script:
nohup cudaRun.sh Device_Number &
*/
#include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#include<sys/time.h>
void load(FILE *file1, float *matrix, int n);
/*
* The load function puts the matrix from the file into a 1-d array of ints.
*
* Precondition: The file has the row/column dimension on the first line
* which has already been read in. On the following lines it will have that
* number of rows and columns of integers to be read in. The next parameter
* is an empty array of integers large enough to hold the contents of the
* input file. Lastly we have the row/column value in the final in parameter
*
* Postcondition: After Execution the file has been completely read through
* and the integer array is now fully loaded from the provided input file
*/
//__global__ void kernelmult(int *matrix1, int *matrix2, int *output, int n);
__global__ void kernelmult(float *Md, float *Nd, float *Pd, int Width,
int TILE_WIDTH);
/*
* The Kernel Multiply function multiplies the the matrices together
*
* Precondition: The first two parameters are integer arrays to be multiplied,
* third is a array to put the output in, and the last if for the size
*
* Postcondtion: The multiplier function multiplys the first two matrices and
* puts the output in the third
*
*/
void multiply(int tilewidth, float *matrix1,float *matrix2, float *output, int n,
FILE *kerneltime);
/*
* The multiply function sets up the kernel and executes the kernel multiply
* function.
*
* Precondition: The tilewidth is the user inputed size of the tile, matrix1
* and matrix2 are the inputed matrices, the output matrix is the processed
* matrix and the size of the dimensions
*
* Postcondition: The output matrix is completely filled with the multiplied
* first two matrices.
*/
void outfunc(FILE *outfile, float *output, int n);
/*
* The output function takes the output matrix in the form of a 1-d array
* and puts it into an output file.
* output function outputs the matrix in the form of a 1-d array to the
* output file.
*
* Precondition: The first parameter is the array of the integers we have
* already processed and the second is the row/column dimension
*
* Postcondition: After Execution the output file is loaded with the first
* quadrant of the output array.
*/
int main(int argc, char *argv[]){
struct timeval startTime1, stopTime1, startTime2, stopTime2;
struct timeval startTime3, stopTime3, startTime4, stopTime4;
struct timeval startTime5, stopTime5, startTime6, stopTime6;
double start, stop, diff1 = 0, diff2 = 0, diff3 =0, diff4 = 0, diff5 = 0;
double diff6 = 0;
gettimeofday(&startTime1,NULL);
int device = atoi(argv[1]);
cudaSetDevice(device);
int tilewidth = atoi(argv[2]);
FILE *file1;
FILE *file2;
FILE *outfile;
FILE *timing;
FILE *cudatime1;
FILE *cudatime2;
FILE *cudatime3;
FILE *cudatime4;
FILE *cudatime5;
FILE *kerneltime;
float *matrix1;
float *matrix2;
float *output;
int n;
//Intialize Variables
file1=fopen(argv[3],"r");
file2=fopen(argv[4],"r");
outfile=fopen(argv[5],"w");
timing=fopen("tottime.csv","a");
cudatime1=fopen("cudatime1.csv","a");
cudatime2=fopen("cudatime2.csv","a");
cudatime3=fopen("cudatime3.csv","a");
cudatime4=fopen("cudatime4.csv","a");
cudatime5=fopen("cudatime5.csv","a");
kerneltime=fopen("kerneltime.csv","a");
//Open input and output files
fscanf(file2, "%d", &n);
fscanf(file1, "%d", &n);
//Scan in the size of each so both are properly incremented in read file
//fprintf(timing, "%d,%d,", n, tilewidth);
matrix1 = (float*) malloc(n*n*sizeof(float *));
matrix2 = (float*) malloc(n*n*sizeof(float *));
output = (float*) malloc(n*n*sizeof(float *));
//Allocate memory for matrix1,matrix2, and the output
load(file1, matrix1, n);
load(file2, matrix2, n);
//Load the 1-d arrays from the input files
fclose(file1);
fclose(file2);
//Close the input files
gettimeofday(&startTime2,NULL);
multiply(tilewidth, matrix1, matrix2, output, n, kerneltime);
gettimeofday(&stopTime2,NULL);
start = startTime2.tv_sec + (startTime2.tv_usec/1000000.0);
stop = stopTime2.tv_sec + (stopTime2.tv_usec/1000000.0);
diff2 = stop - start;
fprintf(cudatime1, ",%lf", diff2);
gettimeofday(&startTime3,NULL);
multiply(tilewidth, matrix1, matrix2, output, n, kerneltime);
gettimeofday(&stopTime3,NULL);
start = startTime3.tv_sec + (startTime2.tv_usec/1000000.0);
stop = stopTime3.tv_sec + (stopTime2.tv_usec/1000000.0);
diff3 = stop - start;
fprintf(cudatime2, ",%lf", diff3);
gettimeofday(&startTime4,NULL);
multiply(tilewidth, matrix1, matrix2, output, n, kerneltime);
gettimeofday(&stopTime4,NULL);
start = startTime4.tv_sec + (startTime4.tv_usec/1000000.0);
stop = stopTime4.tv_sec + (stopTime4.tv_usec/1000000.0);
diff4 = stop - start;
fprintf(cudatime3, ",%lf", diff4);
gettimeofday(&startTime5,NULL);
multiply(tilewidth, matrix1, matrix2, output, n, kerneltime);
gettimeofday(&stopTime5,NULL);
start = startTime5.tv_sec + (startTime5.tv_usec/1000000.0);
stop = stopTime5.tv_sec + (stopTime5.tv_usec/1000000.0);
diff5 = stop - start;
fprintf(cudatime4, ",%lf", diff5);
gettimeofday(&startTime6,NULL);
multiply(tilewidth, matrix1, matrix2, output, n, kerneltime);
gettimeofday(&stopTime6,NULL);
start = startTime6.tv_sec + (startTime6.tv_usec/1000000.0);
stop = stopTime6.tv_sec + (stopTime6.tv_usec/1000000.0);
diff6 = stop - start;
fprintf(cudatime5, ",%lf", diff6);
//Multiply the Matrices
outfunc(outfile, output, n);
//Output the matrix to the file
fclose(outfile);
//Close the output file
gettimeofday(&stopTime1,NULL);
start = startTime1.tv_sec + (startTime1.tv_usec/1000000.0);
stop = stopTime1.tv_sec + (stopTime1.tv_usec/1000000.0);
diff1 = stop - start;
fprintf(timing, ",%lf", diff1);
return 0;
}
void load(FILE *file1,float *matrix,int n){
for(int i=0;i<n*n;i++){
fscanf(file1,"%f", &matrix[i]);
}
}
__global__ void kernelmult(float *Md, float *Nd, float *Pd, int Width,
int TILE_WIDTH){
extern __shared__ float smem[];
float *Mds = smem;
float *Nds = smem + (TILE_WIDTH*TILE_WIDTH);
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue1 = 0;
float Pvalue2 = 0;
float Pvalue3 = 0;
float Pvalue4 = 0;
int s_offset1 = TILE_WIDTH/2;
int g_offset1 = TILE_WIDTH/2;
int s_offset2 = s_offset1*TILE_WIDTH;
int g_offset2 = g_offset1*Width;
int s_offset3 = s_offset1+s_offset2;
int g_offset3 = g_offset1+g_offset2;
int s_bp = ty*TILE_WIDTH+tx;
for(int m=0;m<Width/TILE_WIDTH;++m){
Mds[s_bp] = Md[Row*Width+(m*TILE_WIDTH+tx)];
Nds[s_bp] = Nd[Col*Width+(m*TILE_WIDTH+ty)];
Mds[s_bp+s_offset1] = Md[Row*Width+(m*TILE_WIDTH+tx+g_offset1)];
Nds[s_bp+s_offset1] = Nd[Col*Width+(m*TILE_WIDTH+ty+g_offset1)];
Mds[s_bp+s_offset2] = Md[Row*Width+(m*TILE_WIDTH+tx+g_offset2)];
Nds[s_bp+s_offset2] = Nd[Col*Width+(m*TILE_WIDTH+ty+g_offset2)];
Mds[s_bp+s_offset3] = Md[Row*Width+(m*TILE_WIDTH+tx+g_offset3)];
Nds[s_bp+s_offset3] = Nd[Col*Width+(m*TILE_WIDTH+ty+g_offset3)];
__syncthreads();
for(int k=0; k<TILE_WIDTH;k++){
Pvalue1 += Mds[ty*TILE_WIDTH+k]*Nds[k*TILE_WIDTH+tx];
Pvalue2 += Mds[ty*TILE_WIDTH+k]*Nds[k*TILE_WIDTH+tx+s_offset1];
Pvalue3 += Mds[ty*TILE_WIDTH+k+s_offset2]*Nds[k*TILE_WIDTH+tx];
Pvalue4 += Mds[ty*TILE_WIDTH+k+s_offset2]*Nds[k*TILE_WIDTH+tx+s_offset1];
__syncthreads();
}
}
Pd[(Width*Row)+Col] = Pvalue1;
Pd[(Width*Row)+Col+g_offset1] = Pvalue2;
Pd[(Width*Row)+Col+g_offset2] = Pvalue3;
Pd[(Width*Row)+Col+g_offset3] = Pvalue4;
}
/*
__global__ void kernelmult(float *Md, float *Nd, float *Pd, int Width,
int TILE_WIDTH){
extern __shared__ float smem[];
float *Mds = smem;
float *Nds = smem + (TILE_WIDTH*TILE_WIDTH);
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue =0;
for(int m=0;m<Width/TILE_WIDTH;++m){
Mds[ty*TILE_WIDTH+tx]=Md[Row*Width+(m*TILE_WIDTH+tx)];
Nds[ty*TILE_WIDTH+tx]=Nd[(m*TILE_WIDTH+ty)*Width+Col];
__syncthreads();
for(int k=0; k<TILE_WIDTH;++k){
Pvalue += (Mds[(ty*TILE_WIDTH)+k] * Nds[(k*TILE_WIDTH)+tx]);
}
__syncthreads();
}
Pd[(Width*Row)+Col] = Pvalue;
}
*/
/*
__global__ void kernelmult(int *matrix1, int *matrix2, int *output, int n){
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
output[row*n+col]=0;
for(int i=0;i<n;i++)
output[row*n+col] += (matrix1[i+(row*n)] * matrix2[(i*n)+col]);
}
*/
void multiply(int tilewidth, float *matrix1,float *matrix2, float *output, int n,
FILE *kerneltime){
int smem_size=2*tilewidth*tilewidth*sizeof(float *);
int size=n*n*sizeof(float *);
float *m1,*m2,*o;
double start, stop, diff = 0;
struct timeval startTime, stopTime;
cudaMalloc((void **) &m1, size);
cudaMemcpy(m1, matrix1, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &m2, size);
cudaMemcpy(m2, matrix2, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &o, size);
dim3 DimGrid((int)ceil((double)n/(double)tilewidth),
(int)ceil((double)n/(double)tilewidth),1);
dim3 DimBlock(tilewidth/2,tilewidth/2,1);
gettimeofday(&startTime,NULL);
kernelmult<<<DimGrid,DimBlock,smem_size>>>(m1, m2, o, n, tilewidth);
gettimeofday(&stopTime,NULL);
start = startTime.tv_sec + (startTime.tv_usec/1000000.0);
stop = stopTime.tv_sec + (stopTime.tv_usec/1000000.0);
diff = stop - start;
fprintf(kerneltime, ",%lf", diff);
cudaMemcpy(output, o, size, cudaMemcpyDeviceToHost);
cudaFree(m1);
cudaFree(m2);
cudaFree(o);
}
void outfunc(FILE *outfile, float *output, int n){
fprintf(outfile,"%d\n", n);
for(int i=0;i<n*n;i++){
fprintf(outfile,"%.0f", output[i]);
if(0!=(i+1)%n)
fprintf(outfile," ");
else
fprintf(outfile,"\n");
}
}
|
19,554 | #include "includes.h"
__global__ void profileSubphaseComputeCoarseA_kernel() {} |
19,555 | #include "includes.h"
__global__ void relabelKernel(int *components, int previousLabel, int newLabel, const int colsComponents) {
uint i = (blockIdx.x * blockDim.x) + threadIdx.x;
uint j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (components[i * colsComponents + j] == previousLabel) {
components[i * colsComponents + j] = newLabel;
}
} |
19,556 |
__global__ void per_row_kernel(int m,int n,int *A,int *B,int *C)
{
long long int total_no_of_threads=blockDim.x*blockDim.y*blockDim.z;
long long int id=threadIdx.x + blockIdx.x * blockDim.x;
for(long long int i=id;i<m;i+=total_no_of_threads)
{
for(long long int j=0;j<n;j++)
C[i*n + j]=A[i*n + j] + B[i*n + j];
}
}
__global__ void per_column_kernel(int m,int n,int *A,int *B,int *C)
{
long long int total_no_of_threads=blockDim.x*blockDim.y*blockDim.z;
long long int id=blockDim.x*blockDim.y*blockIdx.x + blockDim.x*threadIdx.y + threadIdx.x;
for(long long int i=id;i<n;i+=total_no_of_threads)
{
for(long long int j=0;j<m;j++)
C[i+j*n]=A[i + j*n] + B[i + j*n];
}
}
__global__ void per_element_kernel(int m,int n,int *A,int *B,int *C)
{
long long int total_no_of_threads=gridDim.x*gridDim.y*gridDim.z*blockDim.x*blockDim.y*blockDim.z;
long long int blockid=gridDim.x * blockIdx.y + blockIdx.x;
long long int id=blockDim.x*blockDim.y*blockid + blockDim.x*threadIdx.y + threadIdx.x;
for(int i=id;i<m*n;i+=total_no_of_threads)
{
C[i]=A[i] + B[i];
}
}
|
19,557 | #include "fastgemm.cuh"
void printMatrix(float* mat, int row, int col)
{
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
if (j < 10)
printf("%6.1lf ", mat[i*col + j]);
else {
printf(" ...");
break;
}
}
printf("\n");
if(i > 10)
break;
}
}
void verify(float * C_ref, float * C, int row, int col)
{
for (int i = 0; i < row; i++) {
for (int j = 0; j < col; j++) {
if (C_ref[i*col+j] != C[i*col+j]) {
printf("ERROR at (%d,%d) \n", i, j);
return;
}
}
}
printf("SUCCESS! no errors comparing with reference.\n");
}
void ref_mmul(float * C, float * A, float * B)
{
for (int k = 0; k < K; k++) {
for (int m = 0; m < M; m++) {
for (int n = 0; n < N; n++) {
C[m*N + n] += A[m*K + k] * B[k*N + n];
}
}
}
}
// Have to look into if this is neccessary
__device__ __forceinline__ int conflict_free_index(int local_id, int real_idx)
{
return real_idx * NUM_THREADS + local_id;
}
__device__ __forceinline__ void outer_prod(float* C, float* A, float4* B, int id, int stride)
{
float4 b = B[id];
#pragma unroll
for (int m = 0; m < M; m++) {
C[conflict_free_index(id, m*4 + 0)] = fmaf(A[m], b.x, C[conflict_free_index(id, m*4 + 0)]);
C[conflict_free_index(id, m*4 + 1)] = fmaf(A[m], b.y, C[conflict_free_index(id, m*4 + 1)]);
C[conflict_free_index(id, m*4 + 2)] = fmaf(A[m], b.z, C[conflict_free_index(id, m*4 + 2)]);
C[conflict_free_index(id, m*4 + 3)] = fmaf(A[m], b.w, C[conflict_free_index(id, m*4 + 3)]);
}
}
__global__ void fastgemm(float* C, float4* A, float4* B)
{ // Assuming K = 1 for now
// Memory Instantiation
extern __shared__ float sharedMem[];
float registers_0[M];
float registers_1[M];
float* this_registers;
float* next_registers;
float* tmp;
float4* A_vec;
float4* B_vec;
// Identification
int id = threadIdx.x;
int stride = blockDim.x;
// Load C (Incorperate into L1 later)
for (int i = id; i < MAX_SHARED_SIZE_FLOAT; i+=stride)
sharedMem[i] = 0.0;
// Preload Setup
for (int m = 0; m < MBY4; m+= 1) {
float4 num = A[m];
registers_0[m*4 + 0] = num.x;
registers_0[m*4 + 1] = num.y;
registers_0[m*4 + 2] = num.z;
registers_0[m*4 + 3] = num.w;
}
next_registers = registers_0;
this_registers = registers_1;
for (int k = 1; k < K; k++) {
// Ping pong for preload
tmp = this_registers;
this_registers = next_registers;
next_registers = tmp;
A_vec = A + k*MBY4;
// Preload the next set of A_vec in
#pragma unroll
for (int m = 0; m < MBY4; m+= 1) {
float4 num = A_vec[m];
next_registers[m*4 + 0] = num.x;
next_registers[m*4 + 1] = num.y;
next_registers[m*4 + 2] = num.z;
next_registers[m*4 + 3] = num.w;
}
B_vec = B + (k-1)*NBY4;
outer_prod(sharedMem, this_registers, B_vec, id, stride);
}
// Need to turn C into float4 later
if (id == 0)
printf("Store C back\n");
for (int i = id; i < MAX_SHARED_SIZE_FLOAT; i+=stride)
C[i] = sharedMem[i];
}
void launchFastGemm(float* C, float4* A, float4* B)
{
fastgemm<<<NUM_BLOCKS, NUM_THREADS, MAX_SHARED_SIZE_BYTES>>>(C,A,B);
cudaDeviceSynchronize();
}
|
19,558 | #include <stdio.h>
const int N = 256;
__global__
void hello(char *a)
{
printf("Hello from thread %d\n", threadIdx.x);
// printf("Hello from thread %d with letter %c\n", threadIdx.x, a[threadIdx.x % 32]);
}
int main()
{
char a[N] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ01234";
char *a_d;
const int csize = N*sizeof(char);
//printf("%s", a);
cudaMalloc( (void**)&a_d, csize );
cudaMemcpy( a_d, a, csize, cudaMemcpyHostToDevice );
hello<<<1, 256>>>(a_d);
cudaMemcpy( a, a_d, csize, cudaMemcpyDeviceToHost );
cudaFree( a_d );
//printf("%s\n", a);
return EXIT_SUCCESS;
}
|
19,559 | #include <iostream>
#include <iomanip>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_fp16.h"
// float16 半精度计算 100万2048维向量,占显存4G
// 注意:精度降低可能导致计算结果错误
using namespace std;
const int D = 2048;
const int N1 = 10000; // 数据文件条数
const int D1 = 100; // 数据重复倍数,方便模拟海量数据
const unsigned long N = N1*D1;
__global__ void cal_dis(half *train_data, half *test_data, half *dis, int pitch)
{
//long tid = blockIdx.x;
unsigned long tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid<N)
{
half temp = 0.0;
half sum = 0.0;
for(int i=0;i<D;i++)
{
temp = __hsub(*((half*)((char*)train_data + tid * pitch) + i), test_data[i]);
sum = __hadd(sum, __hmul(temp, temp));
}
dis[tid] = sum;
}
}
void print(half *data)
{
cout<<"training data:"<<endl;
for(unsigned long i=0;i<N;i++)
{
for(int j=0;j<D;j++)
{
cout<< fixed << setprecision(8)<<__half2float(*(data+i*D+j))<<" ";
}
cout<<endl;
}
}
void print(half *data, unsigned long n)
{
for(unsigned long i=0;i<n;i++)
{
cout<< fixed << setprecision(8)<<__half2float(data[i])<<" ";
}
cout<<endl;
}
int read_data(half *data_set)
{
float f1;
const char s[2] = ",";
char *token, *line;
FILE *fp;
half test[D];
// 一个数字假设占20字符,目前是保留16位小数,一共18个字符
line = (char *)malloc(20*D*sizeof(char));
fp = fopen("../vector.data" , "r");
if(fp == NULL) {
perror("打开文件时发生错误");
return(-1);
}
// 读N+1行,最后1行做测试
for(int i=0;i<N1+1;i++) {
if( fgets (line, 20*D*sizeof(char), fp)!=NULL ) {
token = strtok(line, s);
int j = 0;
while (token != NULL)
{
f1 = atof(token);
//printf("%.8f ", f1);
*(data_set+i*D+j)=__float2half(f1*10.0); // 增加10倍的精度
token = strtok(NULL, s);
j++;
}
//puts("");
} else {
break;
}
}
fclose(fp);
free(line);
for(int i=0;i<D;i++) test[i]=*(data_set+N1*D+i); // 保存测试向量
for(int d=1;d<D1;d++){ // 复制数据
for(int i=0;i<N1;i++){
for(int j=0;j<D;j++){
*(data_set+(N1*d+i)*D+j)= *(data_set+i*D+j);
}
}
}
for(int i=0;i<D;i++) *(data_set+N*D+i)=test[i]; // 恢复测试向量
return 0;
}
int main()
{
half *h_train_data, *h_test_data;
half distance[N];
half *d_train_data , *d_test_data , *d_dis;
float time1, time2;
//printf("%d %d %d\n", sizeof(float), sizeof(half2), sizeof(half));
// 显示GPU资源
int dev = 0;
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, dev);
std::cout << "使用GPU device " << dev << ": " << devProp.name << std::endl;
std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl;
std::cout << "每个线程块的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl;
std::cout << "每个线程块的最大线程数:" << devProp.maxThreadsPerBlock << std::endl;
std::cout << "每个EM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << "每个EM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl;
//-----------
cudaEvent_t start1, stop1, stop2;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
cudaEventCreate(&stop2);
cout<<"num= "<<N<<"\tdim= "<<D<<endl;
h_train_data = (half*)malloc((N+1)*D*sizeof(half));
if (h_train_data==NULL){
puts("alloc memory fail!");
exit(-1);
}
size_t pitch_d;
size_t pitch_h = D * sizeof(half) ;
//allocate memory on GPU
cudaMallocPitch( &d_train_data, &pitch_d, D*sizeof(half), N);
cudaMalloc((void**)&d_test_data, D*sizeof(half));
cudaMalloc((void**)&d_dis, N*sizeof(half)); // d_ids[N] 存最小值
//initialize training data
read_data(h_train_data);
//print(h_train_data);
//initialize testing data
h_test_data = h_train_data+D*N;
//cout<<"testing data:"<<endl;
//print(h_test_data,D);
//copy training and testing data from host to device
cudaMemcpy2D(d_train_data, pitch_d, h_train_data, pitch_h, D*sizeof(half), N, cudaMemcpyHostToDevice);
cudaEventRecord(start1, 0); // 批量数据复制进GPU的耗时,不计入,现实中会提前载入
cudaMemcpy(d_test_data, h_test_data, D*sizeof(half), cudaMemcpyHostToDevice);
// 定义kernel的执行配置
dim3 blockSize(256);
dim3 gridSize((N + blockSize.x - 1) / blockSize.x);
printf("grid size: %d\tblock size: %d\n", gridSize.x, blockSize.x);
// 执行kernel
cal_dis<<<gridSize, blockSize>>>(d_train_data,d_test_data,d_dis,pitch_d);
//calculate the distance
//cal_dis<<<N,1>>>(d_train_data,d_test_data,d_dis,pitch_d);
//copy distance data from device to host
cudaMemcpy(distance, d_dis, N*sizeof(half), cudaMemcpyDeviceToHost);
cudaEventRecord(stop1, 0);
// 找最小值
float minimum = __half2float(distance[0]);
unsigned long min_pos = 0;
for(unsigned long i=1;i<N;i++) {
float tmp_dis = __half2float(distance[i]);
if (tmp_dis<minimum) {
minimum=tmp_dis;
min_pos=i;
}
}
cudaEventRecord(stop2, 0);
//cout<<"distance:"<<endl;
//print(distance, N);
cudaFree(d_train_data);
cudaFree(d_test_data);
cudaFree(d_dis);
free(h_train_data);
printf("min= %.8f\tpos= %ld\n", minimum, min_pos);
cudaEventElapsedTime(&time1, start1, stop1);
cudaEventElapsedTime(&time2, stop1, stop2);
printf("[ time taken: %fms %fms ]\n",time1, time2);
return 0;
}
|
19,560 | #include <iostream>
#include <fstream>
#include <ctime>
#include <cuda.h>
#include <cuda_runtime.h>
//#define WRITE_TO_FILE
using namespace std;
//Обработчик ошибок
static void HandleError(cudaError_t err,
const char *file,
int line)
{
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err),
file, line);
exit(EXIT_FAILURE);
}
}
#define HANDLE_ERROR( error ) (HandleError( error, __FILE__, __LINE__ ))
//Ядро программы
__global__ void stepKernel(float *Tdev,float *Tplusdev,float h,float tau,int N)
{
int tid=blockIdx.x*blockDim.x+threadIdx.x;
if(tid==0)
{
Tplusdev[tid]=0.0;
}
else if(tid==N-1)
{
Tplusdev[tid]=tau/h*((-Tdev[tid]+Tdev[tid-1])/h+5.0)+Tdev[tid];
}
else if(tid<N-1)
{
Tplusdev[tid]=tau/h/h*(Tdev[tid+1]-2.0*Tdev[tid]+Tdev[tid-1])+Tdev[tid];
}
}
int main()
{
#ifdef WRITE_TO_FILE
ofstream ofile("../therm1dexpl/data.dat");
ofile.precision(16);
int counter=0, writeeach=1;
#endif
int N=101;
float L=10.0,tau=0.001,tmax=5.0,t=0.0;
float h=L/N;
float *T, *Tplus,*Tdev,*Tplusdev,*temp;
float cputime,gputime;
T=new float[N];
Tplus=new float[N];
HANDLE_ERROR( cudaMalloc(&Tdev,N*sizeof(float)) );
HANDLE_ERROR( cudaMalloc(&Tplusdev,N*sizeof(float)) );
HANDLE_ERROR( cudaMemset(Tdev,0,N*sizeof(float)) );
memset(T,0,N*sizeof(float));
dim3 threads(1024,1,1);
dim3 blocks((N%1024==0)?(N/1024):(N/1024+1),1,1);
cudaEvent_t start,end;
HANDLE_ERROR( cudaEventCreate(&start) );
HANDLE_ERROR( cudaEventCreate(&end) );
HANDLE_ERROR( cudaEventRecord(start) );
HANDLE_ERROR( cudaEventSynchronize(start) );
while(t<tmax-tau/2.0)
{
stepKernel<<<blocks,threads>>>(Tdev,Tplusdev,h,tau,N);
HANDLE_ERROR( cudaGetLastError() );
HANDLE_ERROR( cudaDeviceSynchronize() );
temp=Tdev;
Tdev=Tplusdev;
Tplusdev=temp;
t+=tau;
#ifdef WRITE_TO_FILE
HANDLE_ERROR( cudaMemcpy(T,Tdev,N*sizeof(float),cudaMemcpyDeviceToHost) );
if(counter%writeeach==0)
{
for(int i=0;i<N;i++)
ofile<<T[i]<<endl;
ofile<<endl;
ofile<<endl;
}
counter++;
#endif
}
HANDLE_ERROR( cudaMemcpy(T,Tdev,N*sizeof(float),cudaMemcpyDeviceToHost) );
HANDLE_ERROR( cudaEventRecord(end) );
HANDLE_ERROR( cudaEventSynchronize(end) );
HANDLE_ERROR( cudaEventElapsedTime(&gputime,start,end) );
gputime/=1000.0;
int cl=0;
cl-=clock();
t=0;
while(t<tmax-tau/2.0)
{
Tplus[0]=0.0;
Tplus[N-1]=tau/h*((-T[N-1]+T[N-2])/h+5.0)+T[N-1];
for(int i=1;i<N-1;i++)
Tplus[i]=tau/h/h*(T[i+1]-2.0*T[i]+T[i-1])+T[i];
t+=tau;
temp=T;
T=Tplus;
Tplus=temp;
}
cl+=clock();
cputime=(float)cl/CLOCKS_PER_SEC;
cout<<"CPU time: "<<cputime<<endl;
cout<<"GPU time: "<<gputime<<endl;
cout<<"Ratio: "<<cputime/gputime<<endl;
#ifdef WRITE_TO_FILE
ofile.close();
#endif
HANDLE_ERROR( cudaFree(Tdev) );
HANDLE_ERROR( cudaFree(Tplusdev) );
HANDLE_ERROR( cudaEventDestroy(start) );
HANDLE_ERROR( cudaEventDestroy(end) );
delete[] T;
delete[] Tplus;
return 0;
}
|
19,561 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-06-14
*/
#include "SetAscendingOrder.cuh"
#include "../../XDevice.h"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
set the cell to the ascending order along a given dimension (kernel code)
>> data - the data array
>> stride - how many items we go ove when move to the next item along the dimension
>> strideNum - size of the given dimension
>> blockNum - block number
*/
__global__
void KernelSetAscendingOrder(int * data, int stride, int strideNum, int blockNum)
{
__shared__ int iBlock[MAX_CUDA_THREAD_NUM_PER_BLOCK];
__shared__ int iOffset[MAX_CUDA_THREAD_NUM_PER_BLOCK];
/* index along the "stride" dimension */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* index along the leading dimension */
int j = blockDim.y * blockIdx.y + threadIdx.y;
if(i >= stride * blockNum || j >= strideNum)
return;
if(threadIdx.y == 0){
iBlock[threadIdx.x] = i / stride;
iOffset[threadIdx.x] = i % stride;
}
__syncthreads();
int * d = (int*)data + (iBlock[threadIdx.x] * strideNum + j) * stride + iOffset[threadIdx.x];
*d = j;
}
/*
set the cell to the ascending order along a given dimension
>> a - the tensor
>> dim - the dimension
*/
void CudaSetAscendingOrder(XTensor * a, int dim)
{
CheckNTErrors((a->dataType == X_INT), "TODO!");
int stride = 1;
int blockNum = 1;
int strideNum = a->dimSize[dim];
for(int i = 0; i < dim; i++)
blockNum *= a->dimSize[i];
for(int i = dim + 1; i < a->order; i++)
stride *= a->dimSize[i];
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread2D(a->devID, strideNum, stride * blockNum, MAX_INT, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(a->devID, devIDBackup);
KernelSetAscendingOrder<<<dim3(gridSize[1], gridSize[0]), dim3(blockSize[1], blockSize[0])>>>
((int*)a->data, stride, strideNum, blockNum);
BacktoCudaDev(a->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
19,562 | #include<bits/stdc++.h>
#include<thrust/device_vector.h>
#include<thrust/transform.h>
#include<thrust/extrema.h>
#include<thrust/copy.h>
#include<thrust/functional.h>
using namespace std;
struct process {
__host__ __device__
int operator()(const float& x, const float& y) const {
return (x-y>=0?x-y:0);
}
};
int schedule(int N, int M, int* arrival_times, int* burst_times, int** cores_schedules, int* cs_lengths){
// Change turnaround_time to GPU and finally only copy it back Should save 20ms
long int turnaround_time=0;
thrust::device_vector<int> core(M, 0);
vector<vector<int>>data(M);
int last_val=arrival_times[0];
for (int i=0;i<N;i++){
if(last_val!=arrival_times[i]){
thrust::device_vector<int> tmp(M, arrival_times[i]-last_val);
thrust::transform(core.begin(), core.end(),tmp.begin(),core.begin(), process());
last_val = arrival_times[i];
}
int minElementIndex = thrust::min_element(core.begin(),core.end()) - core.begin();
core[minElementIndex]+=burst_times[i]; // Same here
turnaround_time+=core[minElementIndex]; // These Function take so much time to copy
data[minElementIndex].push_back(i);
}
for (int i=0;i<M;i++){
cs_lengths[i]=data[i].size();
cores_schedules[i] = (int*)malloc(cs_lengths[i] * sizeof(int*));
for (int j=0;j<cs_lengths[i];j++){
cores_schedules[i][j] = data[i][j];
}
}
return turnaround_time;
}
|
19,563 | //Based on the work of Andrew Krepps
// C
#include <stdio.h>
// C++
#include <chrono>
#include <functional>
#include <initializer_list>
#include <vector>
///////////////////////////////////////////////////////////////////////////////
// Constants
///////////////////////////////////////////////////////////////////////////////
// The range of the random values to generate for the second input array.
constexpr int RANDOM_MIN = 0;
constexpr int RANDOM_MAX = 3;
///////////////////////////////////////////////////////////////////////////////
// Utilities
///////////////////////////////////////////////////////////////////////////////
/**
* Calculate the index of the current GPU thread.
*
* This is intended to be called by a kernel function.
*/
__device__
unsigned threadIndex()
{
return (blockIdx.x * blockDim.x) + threadIdx.x;
}
/**
* Generate a random value in the range [min, max).
*/
int randRange(int min, int max)
{
return (rand() % max) + min;
}
/**
* Calculate the time it takes to perform a function.
*/
std::chrono::nanoseconds timeIt(std::function<void()> func)
{
auto start = std::chrono::high_resolution_clock::now();
func();
auto end = std::chrono::high_resolution_clock::now();
return std::chrono::duration_cast<std::chrono::nanoseconds>(end - start);
}
///////////////////////////////////////////////////////////////////////////////
// Kernels
///////////////////////////////////////////////////////////////////////////////
__global__
void add(const int a[], const int b[], int c[])
{
unsigned index = threadIndex();
c[index] = a[index] + b[index];
}
__global__
void subtract(const int a[], const int b[], int c[])
{
unsigned index = threadIndex();
c[index] = a[index] - b[index];
}
__global__
void multiply(const int a[], const int b[], int c[])
{
unsigned index = threadIndex();
c[index] = a[index] * b[index];
}
__global__
void modulo(const int a[], const int b[], int c[])
{
unsigned index = threadIndex();
c[index] = a[index] % b[index];
}
///////////////////////////////////////////////////////////////////////////////
// Main
///////////////////////////////////////////////////////////////////////////////
/**
* Parse the command line args.
*
* The body of this function was supplied in the starter file. It was not written by me.
*/
void parseArgs(int argc, char** argv,
int& totalThreads,
int& blockSize,
int& numBlocks)
{
// read command line arguments
totalThreads = (1 << 20);
blockSize = 256;
if (argc >= 2) {
totalThreads = atoi(argv[1]);
}
if (argc >= 3) {
blockSize = atoi(argv[2]);
}
numBlocks = totalThreads/blockSize;
// validate command line arguments
if (totalThreads % blockSize != 0) {
++numBlocks;
totalThreads = numBlocks*blockSize;
printf("Warning: Total thread count is not evenly divisible by the block size\n");
printf("The total number of threads will be rounded up to %d\n", totalThreads);
}
}
int main(int argc, char** argv)
{
int totalThreads = 0;
int blockSize = 0;
int numBlocks = 0;
parseArgs(argc, argv, totalThreads, blockSize, numBlocks);
printf("With %d blocks of size %d (%d total threads)...\n", numBlocks, blockSize, totalThreads);
// Create GPU arrays.
int* gpuInputA;
int* gpuInputB;
int* gpuOutput;
int arraySize = totalThreads * sizeof(int);
cudaMalloc((void**)&gpuInputA, arraySize);
cudaMalloc((void**)&gpuInputB, arraySize);
cudaMalloc((void**)&gpuOutput, arraySize);
// Fill input arrays.
std::vector<int> cpuA(totalThreads);
std::vector<int> cpuB(totalThreads);
srand(time(NULL));
for (int i = 0; i < totalThreads; i++)
{
cpuA[i] = i;
cpuB[i] = randRange(RANDOM_MIN, RANDOM_MAX + 1);
}
cudaMemcpy(gpuInputA, cpuA.data(), arraySize, cudaMemcpyHostToDevice);
cudaMemcpy(gpuInputB, cpuB.data(), arraySize, cudaMemcpyHostToDevice);
// Time how long it takes to perform each of the arithmetic operations.
using OperationFunc = void (*)(const int*, const int*, int*);
std::initializer_list<std::pair<const char*, OperationFunc>> operations =
{
{ "Add ", add },
{ "Subtract", subtract },
{ "Multiply", multiply },
{ "Modulo ", modulo }
};
for (auto operation : operations)
{
auto name = operation.first;
auto func = operation.second;
// Clear the output array.
cudaMemset((void*)gpuOutput, 0, arraySize);
// Time the operation.
cudaDeviceSynchronize();
auto time = timeIt([&]
{
func<<<numBlocks, blockSize>>>(gpuInputA, gpuInputB, gpuOutput);
cudaDeviceSynchronize();
});
printf("%s %7lldns\n", name, (long long)time.count());
}
printf("\n");
// Free GPU arrays.
cudaFree(gpuInputA);
cudaFree(gpuInputB);
cudaFree(gpuOutput);
}
|
19,564 | #include "includes.h"
__global__ void LSTMDeltaKernel( float *cellStateErrors, float *outputGateDeltas, float *cellStates, float *outputGateActivations, float *outputGateActivationDerivatives, float *deltas, int cellCount, int cellsPerBlock )
{
int memoryBlockId = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (memoryBlockId < cellCount / cellsPerBlock)
{
float outputGateDeltaSum = 0.0;
for (int cellId = memoryBlockId * cellsPerBlock; cellId < (memoryBlockId + 1) * cellsPerBlock; cellId++)
{
float delta = deltas[cellId];
cellStateErrors[cellId] = outputGateActivations[memoryBlockId] * delta;
outputGateDeltaSum += cellStates[cellId] * delta;
}
outputGateDeltas[memoryBlockId] = outputGateActivationDerivatives[memoryBlockId] * outputGateDeltaSum;
}
} |
19,565 | #include<stdio.h>
int main(){
printf("Hello World!!!");
return 0;
}
|
19,566 | #include "kernel.cuh"
namespace kernel
{
__device__ void WarpReduce(
volatile int* shared, const unsigned int tid, const unsigned int tid_global, const unsigned int size)
{
if (tid_global + 32 < size) {
shared[tid] += shared[tid + 32];
}
if (tid_global + 16 < size) {
shared[tid] += shared[tid + 16];
}
if (tid_global + 8 < size) {
shared[tid] += shared[tid + 8];
}
if (tid_global + 4 < size) {
shared[tid] += shared[tid + 4];
}
if (tid_global + 2 < size) {
shared[tid] += shared[tid + 2];
}
if (tid_global + 1 < size) {
shared[tid] += shared[tid + 1];
}
}
__global__ void ReduceSum0(const int* in, int* out, const unsigned int size)
{
extern __shared__ int shared[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
if (i < size) {
shared[tid] = in[i];
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
if (tid % (2 * s) == 0 && i + s < size) {
shared[tid] += shared[tid + s];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared[0];
}
}
__global__ void ReduceSum1(const int* in, int* out, const unsigned int size)
{
extern __shared__ int shared[];
unsigned int tid = threadIdx.x;
unsigned int t0 = blockIdx.x * blockDim.x;
unsigned int i = t0 + tid;
if (i < size) {
shared[tid] = in[i];
}
__syncthreads();
for (unsigned int s = 1; s < blockDim.x; s *= 2) {
unsigned int index1 = 2 * s * tid;
unsigned int index2 = index1 + s;
if (index1 < blockDim.x && t0 + index2 < size) {
shared[index1] += shared[index2];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared[0];
}
}
__global__ void ReduceSum2(const int* in, int* out, const unsigned int size)
{
extern __shared__ int shared[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x + tid;
if (i < size) {
shared[tid] = in[i];
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && i + s < size) {
shared[tid] += shared[tid + s];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared[0];
}
}
__global__ void ReduceSum3(const int* in, int* out, const unsigned int size)
{
extern __shared__ int shared[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (2 * blockDim.x) + tid;
if (i + blockDim.x < size) {
shared[tid] = in[i] + in[i + blockDim.x];
}
else if (i < size) {
shared[tid] = in[i];
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s && i + s < size) {
shared[tid] += shared[tid + s];
}
__syncthreads();
}
if (tid == 0) {
out[blockIdx.x] = shared[0];
}
}
__global__ void ReduceSum4(const int* in, int* out, const unsigned int size)
{
extern __shared__ int shared[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (2 * blockDim.x) + tid;
if (i + blockDim.x < size) {
shared[tid] = in[i] + in[i + blockDim.x];
}
else if (i < size) {
shared[tid] = in[i];
}
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1) {
if (tid < s && i + s < size) {
shared[tid] += shared[tid + s];
}
__syncthreads();
}
if (tid < 32) {
WarpReduce(shared, tid, i, size);
}
if (tid == 0) {
out[blockIdx.x] = shared[0];
}
}
__global__ void ReduceMin(const int* in, int* out, const unsigned int size)
{
extern __shared__ int shared[];
unsigned int tid_local = threadIdx.x;
unsigned int tid_global = blockIdx.x * (2 * blockDim.x) + tid_local;
if (tid_global + blockDim.x < size) {
shared[tid_local] = (in[tid_global] < in[tid_global + blockDim.x]) ? in[tid_global] : in[tid_global + blockDim.x];
}
else if (tid_global < size) {
shared[tid_local] = in[tid_global];
}
__syncthreads();
for (unsigned int i = blockDim.x / 2; i > 0; i >>= 1) {
if (tid_local < i && tid_global + i < size) {
shared[tid_local] = (shared[tid_local] < shared[tid_local + i]) ? shared[tid_local] : shared[tid_local + i];
}
__syncthreads();
}
if (tid_local == 0) {
out[blockIdx.x] = shared[0];
}
}
} // namespace kernel |
19,567 | /* Odd-even sort
* This will need to be called within a loop that runs from 0 to
* the ceiling of N/2 - 1, where N is the number of eigenvalues
* We assume a linear array of threads and it will be the caller's
* responsibility to ensure the thread indices are in bounds
* Note to self: There is a GPU Quicksort available, but I have to modify
* it to also move around eigenvectors... challenging, striving for accuracy
*/
__global__ void oddEvenEigSort( float *eigenvalues, float *eigenvectors, int N, int odd = 0 ) {
int elementNum = blockIdx.x * blockDim.x + threadIdx.x;
elementNum *= 2;
if( odd ) {
elementNum++;
}
if( elementNum >= N - 1 ) {
return;
}
if( eigenvalues[elementNum] > eigenvalues[elementNum + 1] ) {
float tmp = eigenvalues[elementNum];
eigenvalues[elementNum] = eigenvalues[elementNum + 1];
eigenvalues[elementNum + 1] = tmp;
for( int i = 0; i < N; i++ ) {
tmp = eigenvectors[i * N + elementNum];
eigenvectors[i * N + elementNum] = eigenvectors[i * N + elementNum + 1];
eigenvectors[i * N + elementNum + 1] = tmp;
}
}
}
|
19,568 | #include <stdio.h>
#include <stdexcept>
#include <cuda_runtime.h>
#include <math.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
void LayerSynchronize()
{
if (cudaGetLastError() != cudaError::cudaSuccess)
{
throw std::runtime_error("CUDA method returned an error");
}
if (cudaDeviceSynchronize() != cudaError::cudaSuccess)
{
throw std::runtime_error("CUDA syncronize returned an error");
}
} |
19,569 | /**
Copyright (c) 2015 <wataro>
This software is released under the MIT License.
http://opensource.org/licenses/mit-license.php
*/
#include <cuda.h>
void * allocate_cuda_memory(size_t size)
{
void * p = nullptr;
cudaMalloc(&p, size);
return p;
}
void delete_cuda_memory(void * p)
{
cudaFree(p);
}
|
19,570 | #include <cmath>
#include <cstdio>
#include <cstring>
#include <string>
#include <algorithm>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <cuda_runtime_api.h>
using namespace std;
typedef double ld;
typedef long long LL;
namespace output {
const int OutputBufferSize = 1e6+5;
char buffer[OutputBufferSize];
char *s = buffer;
inline void flush() {
fwrite(buffer, 1, s-buffer, stdout);
s = buffer;
fflush(stdout);
}
inline void print(const char ch) {
// putchar(ch); return;
if (s-buffer>OutputBufferSize-2) flush();
*s++ = ch;
}
inline void print(char *str) {
while (*str!=0) print(char(*str++));
}
inline void print(int x) {
// printf("%d", x); return;
char buf[25] = {0}, *p = buf;
if (x<0) print('-'), x=-x;
if (x == 0) print('0');
while (x) *(++p) = x%10, x/=10;
while (p != buf) print(char(*(p--)+'0'));
}
inline void print(ld x) {
// printf("%.2f", x);
static char buf[100];
sprintf(buf, "%.2f", x);
print(buf);
}
}
struct ios {
static const int IN_LEN=1<<18|1;
char buf[IN_LEN],*s,*t;
inline char read(){
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t?-1:*s++;
}
inline bool isEOF() {
return (s==t)&&(t=(s=buf)+fread(buf,1,IN_LEN,stdin)),s==t;
}
inline ios & operator >> (int &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios & operator >> (LL &x){
static char c11,boo;
for(c11=read(),boo=0;!isdigit(c11);c11=read()){
if(c11==-1)return *this;
boo|=c11=='-';
}
for(x=0;isdigit(c11);c11=read())x=x*10+(c11^'0');
boo&&(x=-x);
return *this;
}
inline ios &operator >> (char *s) {
int len = 0;
char ch;
for (ch=read(); ch=='\n' || ch == ' '; ch=read());
if (ch == -1) {
s[len] = 0;
return *this;
}
for (; ch!='\n' && ch != ' ' && ch != -1;ch=read())
s[len++] = ch;
s[len] = 0;
return *this;
}
inline ios &operator>>(ld &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
inline ios &operator>>(long double &x)
{
char ch;
bool neg = false, dec = false;
double now = 0.1;
for (ch=read(); !isdigit(ch) && (ch!='.' && ch!='-') && ch!=-1; ch=read());
if (ch == '-') neg = true;
else if (ch == '.') { x = 0; dec = true; }
else if (ch != -1) x = ch-'0';
else return *this;
if (!dec) {
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x = x * 10 + ch-'0';
}
}
if (ch == '.')
for (ch=read(); isdigit(ch) && ch!=-1; ch=read()) {
x += now * (ch - '0'); now *= 0.1;
}
if (neg) x = -x;
return *this;
}
} io;
inline void handleCudaError(cudaError_t err, string name = "fuck") {
if (err != cudaSuccess) {
cerr << name << endl;
cerr << cudaGetErrorString(err) << endl;
exit(0);
}
}
ld *d_a, *d_b, *d_c, *h_a, *h_b, *h_c;
int an, am, bn, bm;
int n, m;
void copyMatrix(ld *&src, ld *&dst, int n, int m) {
int size = sizeof(ld) * n * m;
handleCudaError(cudaMalloc(&dst, size), "cudaMalloc in copyMatrix");
handleCudaError(cudaMemcpy(dst, src, size, cudaMemcpyHostToDevice), "memcpy in copyMatrix");
}
template<typename T>
__global__ void matrixMult(T *d_a, T *d_b, T *d_c, int an, int bm, int am) {
int index = blockDim.x * blockIdx.x + threadIdx.x;
int i = index / bm, j = index % bm;
if (i >= an || j >= bm) return;
ld sum = 0;
if (i < an && j < bm) {
for (int k=0; k<am; ++k)
sum += d_a[i * am + k] * d_b[k * bm + j];
}
if (i * bm + j < an * bm)
d_c[i * bm + j] = sum;
// int index = threadIdx.x;
// if (index < an * bm)
// d_c[index] = 1;
}
void outputMatrix(ld *a, int n, int m) {
// output::print(n); output::print(',');
// output::print(m); output::print('\n');
for (int i=0; i<n; ++i) {
int base = i * m;
output::print(a[base]);
for (int j=1; j<m; ++j) {
output::print(',');
output::print(a[base + j]);
}
output::print('\n');
}
}
int main()
{
// #ifndef Weaverzhu
freopen("input.txt", "r", stdin);
freopen("output.txt", "w", stdout);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
cerr << prop.name << endl;
// #endif
io >> an >> am; h_a = (ld*)malloc(sizeof(ld) * an * am);
for (int i=0; i<an; ++i)
for (int j=0; j<am; ++j)
io >> h_a[i*am + j];
io >> bn >> bm; h_b = (ld*)malloc(sizeof(ld) * bn * bm);
for (int i=0; i<bn; ++i)
for (int j=0; j<bm; ++j)
io >> h_b[i*bm + j];
// B.readtrans();
// outputMatrix(h_a, an, am);
// outputMatrix(h_b, bn, bm);
n = an;
m = bm;
int block_size = prop.maxThreadsPerBlock, grids = (n * m + block_size - 1) / block_size;
copyMatrix(h_a, d_a, an, am);
copyMatrix(h_b, d_b, bn, bm);
handleCudaError(cudaMalloc(&d_c, sizeof(ld) * n * m), "allocate for h_c");
matrixMult<<<grids, block_size>>>(d_a, d_b, d_c, an, bm, am);
h_c = (ld*)malloc(sizeof(ld) * n * m);
int size = sizeof(ld) * n * m;
handleCudaError(cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost), "memcpy back");
outputMatrix(h_c, n, m);
output::flush();
return 0;
}
|
19,571 | #include<iostream>
#include<fstream>
void write_ply(float *triangles, int data_length, char *output_file){
std::fstream plyfile;
plyfile.open(output_file, std::fstream::out);
printf("Writing\n");
plyfile << "ply\nformat ascii 1.0\n";
plyfile << "element vertex \n"; // need to come back and add amount of vertices
plyfile << "property float x\nproperty float y\nproperty float z\n";
plyfile << "element face \n";// need to come back here and add amount of faces
plyfile << "property list uchar int vertex_index\n";
plyfile << "end_header\n";
int i = 0;
int edge_num=0;
while(i<data_length-3){
//printf("%d \n",i );
if(triangles[i] > 0.00001){
plyfile << triangles[i] << " " << triangles[i+1] << " " <<triangles[i+2] << "\n";
i+=2;
edge_num++;
}
i++;
}
printf("edge_num %d\n", edge_num);
int f = 0;
int x = 0;
while(f < edge_num / 3){
plyfile <<"3 " << x << " " << x+1 << " " << x+2 <<"\n";
x+=3;
f++;
}
plyfile.clear();
plyfile.seekg(36, std::ios::beg);
plyfile << edge_num;
plyfile.seekg(116, std::ios::beg);
plyfile << edge_num/3;
plyfile.seekg(0, std::ios::end);
plyfile.close();
printf("face num: %d\n", edge_num / 3);
}
void compute_cpu_marching_cubes(unsigned int *input_images, unsigned int thresh, unsigned int data_width, unsigned int data_height, unsigned int data_depth, int *lookup_one, int *lookup_two, float *triangles){
for(unsigned int i=0; i<data_depth; i++){
for(unsigned int j=0; j<data_width; j++){
for(unsigned int k=0; k<data_height; k++){
unsigned int thresh_vertex = i * data_width * data_height + k * data_width + j;
if(input_images[thresh_vertex] > thresh){
input_images[thresh_vertex] = 255;
}
else{
input_images[thresh_vertex] = 0;
}
}
}
}
for(int global_z=0; global_z<data_depth; global_z++){
for(int global_x=0; global_x<data_width; global_x++){
for(int global_y=0; global_y<data_height; global_y++){
const int master_vertex = global_z * data_width * data_height + global_y * data_width + global_x;
// double check that these refer to the right vertices
int cube[8][4]{
{global_x, global_y, global_z,1},
{global_x, global_y+1, global_z,2},
{global_x+1, global_y+1, global_z,4},
{global_x+1, global_y, global_z,8},
{global_x, global_y, global_z+1,16},
{global_x, global_y+1, global_z+1,32},
{global_x+1, global_y+1, global_z+1,64},
{global_x+1, global_y, global_z+1,128}};
if(global_x + 1 < data_width-1 && global_y + 1 < data_height-1 && global_z + 1 < data_depth-1){
int case_lookup_idx = 0;
for(unsigned int ci=0; ci<8; ci++){
const int x = cube[ci][0];
const int y = cube[ci][1];
const int z = cube[ci][2];
const int vertex = z * data_width * data_height + y * data_width + x;
if (input_images[vertex] ==255){
case_lookup_idx |= cube[ci][3];
}
}
int edge_actual[12][6] = {
{cube[0][0],cube[0][1],cube[0][2],cube[1][0],cube[1][1],cube[1][2]},
{cube[1][0],cube[1][1],cube[1][2],cube[2][0],cube[2][1],cube[2][2]},
{cube[2][0],cube[2][1],cube[2][2],cube[3][0],cube[3][1],cube[3][2]},
{cube[3][0],cube[3][1],cube[3][2],cube[0][0],cube[0][1],cube[0][2]},
{cube[4][0],cube[4][1],cube[4][2],cube[5][0],cube[5][1],cube[5][2]},
{cube[5][0],cube[5][1],cube[5][2],cube[6][0],cube[6][1],cube[6][2]},
{cube[6][0],cube[6][1],cube[6][2],cube[7][0],cube[7][1],cube[7][2]},
{cube[7][0],cube[7][1],cube[7][2],cube[4][0],cube[4][1],cube[4][2]},
{cube[4][0],cube[4][1],cube[4][2],cube[0][0],cube[0][1],cube[0][2]},
{cube[5][0],cube[5][1],cube[5][2],cube[1][0],cube[1][1],cube[1][2]},
{cube[6][0],cube[6][1],cube[6][2],cube[2][0],cube[2][1],cube[2][2]},
{cube[7][0],cube[7][1],cube[7][2],cube[3][0],cube[3][1],cube[3][2]}
};
for(unsigned int tm=0; tm<(5*3*3); tm++){
triangles[master_vertex + tm] = 0.0;
}
if(case_lookup_idx != 255 && case_lookup_idx != 0){
int current =0;
int edge_counter = 0;
for(int w=0; w<16; w++){
current = lookup_two[case_lookup_idx * 16 + w];
// current now gives an edge index so we need to add the point to the triangle list
if(current != -1){
int point1_x = edge_actual[current][0];
int point1_y = edge_actual[current][1];
int point1_z = edge_actual[current][2];
int point2_x = edge_actual[current][3];
int point2_y = edge_actual[current][4];
int point2_z = edge_actual[current][5];
triangles[master_vertex * (5*3*3) +(edge_counter*3) + 0] = (((float)point1_x + (float)point2_x)/2.0);
triangles[master_vertex * (5*3*3) +(edge_counter*3) + 1] = ((float)point1_y + (float)point2_y)/2.0;
triangles[master_vertex * (5*3*3) +(edge_counter*3) + 2] = (((float)point1_z + (float)point2_z)/2.0)*4;// could do better interpolation here
edge_counter++;
}
}
// printf("\n");
}
}
}
}
}
}
int* get_lookup_one(void){
// lookup one tells you how many triangles are generated per case
int edgeTable[256]={
0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c,
0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00,
0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c,
0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90,
0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c,
0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30,
0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac,
0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0,
0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c,
0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60,
0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc,
0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0,
0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c,
0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950,
0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc ,
0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0,
0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc,
0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0,
0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c,
0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650,
0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc,
0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0,
0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c,
0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460,
0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac,
0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0,
0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c,
0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230,
0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c,
0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190,
0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c,
0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 };
int *output = (int *)malloc(sizeof(int) * 256);
for(int i=0; i<256; i++){
output[i] = edgeTable[i];
}
return output;
}
int* get_lookup_two(void){
// lookup two tells you which edges of the cube to connect
int triTable[256][16] =
{{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1},
{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1},
{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1},
{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1},
{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1},
{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1},
{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1},
{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1},
{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1},
{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1},
{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1},
{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1},
{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1},
{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1},
{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1},
{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1},
{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1},
{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1},
{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1},
{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1},
{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1},
{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1},
{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1},
{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1},
{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1},
{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1},
{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1},
{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1},
{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1},
{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1},
{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1},
{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1},
{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1},
{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1},
{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1},
{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1},
{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1},
{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1},
{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1},
{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1},
{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1},
{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1},
{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1},
{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1},
{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1},
{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1},
{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1},
{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1},
{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1},
{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1},
{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1},
{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1},
{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1},
{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1},
{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1},
{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1},
{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1},
{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1},
{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1},
{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1},
{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1},
{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1},
{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1},
{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1},
{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1},
{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1},
{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1},
{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1},
{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1},
{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1},
{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1},
{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1},
{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1},
{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1},
{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1},
{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1},
{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1},
{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1},
{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1},
{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1},
{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1},
{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1},
{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1},
{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1},
{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1},
{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1},
{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1},
{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1},
{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1},
{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1},
{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1},
{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1},
{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1},
{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1},
{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1},
{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1},
{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1},
{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1},
{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1},
{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1},
{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1},
{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1},
{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1},
{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1},
{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1},
{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1},
{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1},
{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1},
{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1},
{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1},
{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1},
{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1},
{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1},
{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1},
{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1},
{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1},
{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1},
{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1},
{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1},
{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1},
{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1},
{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1},
{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1},
{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1},
{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1},
{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1},
{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1},
{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1},
{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}};
int *output = (int *)malloc(sizeof(int) * 256 * 16);
for(int i=0; i<256; i++){
for(int j=0; j<16; j++){
output[i*16 + j] = triTable[i][j];
}
}
return output;
} |
19,572 | #include <math.h>
#include <cstdio>
#include <cstdlib>
#include <time.h>
// Assertion to check for errors
#define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#define GIG 1000000000
#define NANO_TO_MILLI 1000000
#define CPG 2.53 // Cycles per GHz -- Adjust to your computer
#define NUM_THREADS_PER_BLOCK 16
#define NUM_BLOCKS 16
#define PRINT_TIME 1
#define SM_ARR_LEN 50000
#define TOL 1e-6
#define NN 2048 //matrix size
#define THREADS_PER_BLOCK 16
#define THREADS_PER_BLOCK_Y 16
#define NUM_BLOCKS_X 16
#define NUM_BLOCKS_Y 16
#define IMUL(a, b) __mul24(a, b)
void initialize_array(float *A, int len, int seed);
void write_2d_array_to_file(float *A, char *filename);
struct timespec diff(struct timespec start, struct timespec end);
__global__ void kernel_MMM(float *A, float *B, float *C, int N){
int j = (blockIdx.x * blockDim.x) + threadIdx.x;
int i = (blockIdx.y * blockDim.y) + threadIdx.y;
int k;
float sum = 0;
if(i > N || j > N)
return;
for(k = 0; k < N; k++){
sum += (A[i*N+k] * B[k*N+j]);
}
C[i*N+j] = sum;
}
__global__ void kernel_shared_MMM(float *A, float *B, float *C, int N){
__shared__ float As[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
__shared__ float Bs[THREADS_PER_BLOCK][THREADS_PER_BLOCK];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int Row = by * THREADS_PER_BLOCK + ty;
int Col = bx * THREADS_PER_BLOCK + tx;
float Pvalue = 0;
for(int m = 0; m < N/THREADS_PER_BLOCK; ++m){
As[ty][tx] = A[Row*N+(m*THREADS_PER_BLOCK+tx)];
Bs[ty][tx] = B[Col+(m*THREADS_PER_BLOCK+ty)*N];
__syncthreads();
for(int k = 0; k < THREADS_PER_BLOCK; ++k)
Pvalue += As[ty][k] * Bs[k][tx];
__syncthreads();
}
C[Row*N+Col] = Pvalue;
}
int main(int argc, char **argv){
// GPU Timing variables
int i, j, k;
int not_a_number;
cudaEvent_t start_i, start_o, stop_i, stop_o;
float elapsed_gpu_internal, elapsed_gpu_with_copy;
float max_difference, min_difference, average_difference, difference, average;
struct timespec diff(struct timespec start, struct timespec end);
struct timespec time1, time2, elapsed_cpu;
//Arrays on GPU global memory
float *g_A, *g_B, *g_C;
//arrays on host memory
float *h_A, *h_B, *h_C, *h_C_control;
//Allocate arrays on GPU memory
printf("Allocating arrays\n");
CUDA_SAFE_CALL(cudaMalloc((void **) &g_A, NN*NN*sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc((void **) &g_B, NN*NN*sizeof(float)));
CUDA_SAFE_CALL(cudaMalloc((void **) &g_C, NN*NN*sizeof(float)));
//Allocate arrays on host memory
h_A = (float*) malloc(NN*NN*sizeof(float));
h_B = (float*) malloc(NN*NN*sizeof(float));
h_C = (float*) malloc(NN*NN*sizeof(float));
h_C_control = (float*) malloc(NN*NN*sizeof(float));
//initialize host arrays
printf("Initializing host arrays\n");
initialize_array(h_A, NN*NN, 1000);
initialize_array(h_B, NN*NN, 2000);
//create cuda events
printf("Starting outer cuda timing\n");
cudaEventCreate(&start_o);
cudaEventCreate(&stop_o);
//record event on default stream
cudaEventRecord(start_o, 0);
//transfer array to GPU memory
printf("Transfering arrays to GPU\n");
CUDA_SAFE_CALL(cudaMemcpy(g_A, h_A, NN*NN*sizeof(float), cudaMemcpyHostToDevice));
CUDA_SAFE_CALL(cudaMemcpy(g_B, h_B, NN*NN*sizeof(float), cudaMemcpyHostToDevice));
//Launch the kernel
//NN*NN/256 = # of blocks
dim3 dimGrid((NN+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK,(NN+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK);
dim3 dimBlock(THREADS_PER_BLOCK,THREADS_PER_BLOCK);
//launch the kernel
printf("Starting outer cuda timing\n");
cudaEventCreate(&start_i);
cudaEventCreate(&stop_i);
//record event on default stream
cudaEventRecord(start_i, 0);
printf("Running kernel\n");
kernel_shared_MMM<<<dimGrid, dimBlock>>>(g_A, g_B, g_C, NN);
cudaThreadSynchronize();
cudaEventRecord(stop_i,0);
cudaEventSynchronize(stop_i);
cudaEventElapsedTime(&elapsed_gpu_internal, start_i, stop_i);
cudaEventDestroy(start_i);
cudaEventDestroy(stop_i);
//check for errors during launch
CUDA_SAFE_CALL(cudaPeekAtLastError());
//transfer results back to host
printf("Copying cuda results to host\n");
CUDA_SAFE_CALL(cudaMemcpy(h_C, g_C, NN*NN*sizeof(float), cudaMemcpyDeviceToHost));
//stop and destroy the timer
cudaEventRecord(stop_o,0);
cudaEventSynchronize(stop_o);
cudaEventElapsedTime(&elapsed_gpu_with_copy, start_o, stop_o);
cudaEventDestroy(start_o);
cudaEventDestroy(stop_o);
//compute result on host
printf("Computing MMM on host\n");
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time1);
for(i = 0; i < NN; i++){
for(j = 0; j < NN; j++){
h_C_control[i*NN+j] = 0;
for(k = 0; k < NN; k++){
h_C_control[i*NN+j] += (h_A[i*NN+k] * h_B[k*NN + j]);
}
}
}
clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &time2);
elapsed_cpu = diff(time1, time2);
//write arrays to file
// printf("Writing arrays to file\n");
// write_2d_array_to_file(h_A, "A_matrix.txt");
// write_2d_array_to_file(h_C, "gpu_MMM.txt");
// write_2d_array_to_file(h_C_control, "cpu_MMM.txt");
//compare the results
printf("Comparing results\n");
max_difference = 0;
min_difference = NN*NN;
average_difference = 0;
average = 0;
not_a_number = 0;
for(i = 0; i < NN; i++){
for(j = 0; j < NN; j++){
if(h_C[i*NN+j] != h_C[i*NN+j])
not_a_number++;
else{
average += h_C[i*NN+j];
average += h_C_control[i*NN+j];
difference = abs(h_C[i*NN+j] - h_C_control[i*NN+j]);
if(difference > max_difference)
max_difference = difference;
if(difference < min_difference)
min_difference = difference;
average_difference += difference;
}
}
}
average_difference /= (float)(NN*NN);
average /= (float) (2*NN*NN);
//free memory
printf("Freeing memory\n");
CUDA_SAFE_CALL(cudaFree(g_A));
CUDA_SAFE_CALL(cudaFree(g_B));
CUDA_SAFE_CALL(cudaFree(g_C));
free(h_A);
free(h_B);
free(h_C);
free(h_C_control);
//print results
printf("Printing results\n");
printf("\nGPU outer loop time: %f (msec)\n", elapsed_gpu_with_copy);
printf("\nGPU inner loop time: %f (msec)\n", elapsed_gpu_internal);
printf("\nCPU time: %f(msec)\n", (float)(((double)GIG*elapsed_cpu.tv_sec + elapsed_cpu.tv_nsec)/(double)NANO_TO_MILLI));
printf("Max difference = %f, Min difference = %f, Average difference = %f, Average = %f, Not a number = %d\n", max_difference, min_difference, average_difference, average, not_a_number);
printf("Max Tolerance = %f%%, Min Tolerance = %f%%, Average Tolerance = %f%% NaN Tolerance = %f%%\n", max_difference/average*100, min_difference/average*100, average_difference/average*100, (float)not_a_number/(float)(NN*NN)*(float)100);
return 0;
}
void initialize_array(float *A, int len, int seed){
int i;
float randNum;
srand(seed);
for(i = 0; i < len; i++){
randNum = (float) rand();
A[i] = randNum;
}
}
struct timespec diff(struct timespec start, struct timespec end)
{
struct timespec temp;
if ((end.tv_nsec-start.tv_nsec)<0) {
temp.tv_sec = end.tv_sec-start.tv_sec-1;
temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec;
} else {
temp.tv_sec = end.tv_sec-start.tv_sec;
temp.tv_nsec = end.tv_nsec-start.tv_nsec;
}
return temp;
}
void write_2d_array_to_file(float *A, char *filename){
int i;
FILE *f = fopen(filename, "w");
for(i = 0; i < NN*NN; i++){
fprintf(f, "%.0f, ", A[i]);
if(i%NN == 0)
fprintf(f, "\n");
}
fclose(f);
}
|
19,573 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void createQueryPoints(int noPoints, int noDims, int dimRes, int control, int noControls, int year, float* xmins, float* xmaxes, float* regression, float* queryPts) {
// Global thread index
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < noPoints) {
// First, deconstruct the index into the index along each dimension
int *dimIdx;
dimIdx = (int*)malloc(noDims*sizeof(int));
int rem = idx;
for (int ii = 0; ii < noDims; ii++) {
int div = (int)(rem/pow(dimRes,noDims-ii-1));
dimIdx[ii] = div;
rem = rem - div*pow(dimRes,noDims-ii-1);
}
// We use the highest and lowest x values for each dimension
// among ALL the controls, not just for this control
// Get the query point coordinates
for (int ii = 0; ii < noDims; ii++) {
// queryPts[idx + ii*noPoints] = ((float)dimIdx[ii])*(xmaxes[
// control*noDims + ii] - xmins[control*noDims + ii])/(
// float)(dimRes-1) + xmins[control*noDims + ii];
queryPts[idx + ii*noPoints] = ((float)dimIdx[ii])*(xmaxes[
noControls*noDims + ii] - xmins[noControls*noDims +
ii])/(float)(dimRes-1) + xmins[noControls*noDims +
ii];
// Save the X value for the query point
regression[year*noControls*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + control*(dimRes*noDims + (int)pow(dimRes,
noDims)*2) + ii*dimRes + dimIdx[ii]] = queryPts[idx + ii*
noPoints];
}
free(dimIdx);
}
} |
19,574 | // Dan Wolf
#include <iostream>
#include <string>
#include <chrono>
// https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api/14038590#14038590
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void multMat(int n, int *arrForce_d, int *arrDistance_d, int *arrAnswer_d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n){
arrAnswer_d[i] = arrForce_d[i] * arrDistance_d[i];
}
}
int main(int argc, char **argv) {
auto n = atoi(argv[1]);
size_t bytes = n* sizeof(int);
// host pointers
int* arrForce;
int* arrDistance;
int* arrAnswer;
// device pointers
int* arrForce_d;
int* arrDistance_d;
int* arrAnswer_d;
// allocate on host
arrForce = (int*)malloc(bytes);
arrDistance = (int*)malloc(bytes);
arrAnswer = (int*)malloc(bytes);
// initialize on host, at n=16, this initializes to
// (1, 2, 3, 4, 5, 6, 7, 8, 9, 8, 7, 6, 5, 4, 3, 2)
// (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 1, 2, 3, 4, 5)
// Answer: 1 + 4 + 9 + 16 + 25 + 36 + 49 + 64 + 81 + 80 + 77 + 6 + 10 + 12 + 12 + 10 = 492
int forceValue = 1;
int distanceValue = 0;
for (int i = 0; i < n; i++){
arrForce[i] = forceValue;
if (i < n/2) {
forceValue = forceValue + 1;
} else {
forceValue = forceValue - 1;
}
distanceValue = distanceValue > 10 ? distanceValue - 10 : distanceValue + 1;
arrDistance[i] = distanceValue;
}
// allocate on device
// print
std::cout << "force: ";
for (int i = 0; i < n; i++){
std::cout << arrForce[i] << " ";
}
std::cout << '\n' << "dist: ";
for (int i = 0; i < n; i++){
std::cout << arrDistance[i] << " ";
}
std::chrono::high_resolution_clock::time_point t1 = std::chrono::high_resolution_clock::now();
// if more than the number of elements is passed in, don't use the gpu
if (argc > 2) {
std::cout << '\n' << "using CPU" << '\n';
for (int i = 0; i < n; i++) {
arrAnswer[i] = arrForce[i] * arrDistance[i];
}
} else {
std::cout << '\n' << "using GPU" << '\n';
const int BLOCK_SIZE = 1024;
dim3 dimBlock (BLOCK_SIZE);
dim3 dimGrid = (int)ceil((float)n / BLOCK_SIZE);
gpuErrchk(cudaMalloc(&arrForce_d, bytes));
gpuErrchk(cudaMalloc(&arrDistance_d, bytes));
gpuErrchk(cudaMalloc(&arrAnswer_d, bytes));
gpuErrchk(cudaMemcpy(arrForce_d, arrForce, bytes, cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(arrDistance_d, arrDistance, bytes, cudaMemcpyHostToDevice));
multMat<<<dimGrid, dimBlock>>>(n, arrForce_d, arrDistance_d, arrAnswer_d);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaMemcpy(arrAnswer, arrAnswer_d, bytes, cudaMemcpyDeviceToHost));
gpuErrchk(cudaFree(arrForce_d))
gpuErrchk(cudaFree(arrDistance_d))
gpuErrchk(cudaFree(arrAnswer_d))
}
std::chrono::high_resolution_clock::time_point t2 = std::chrono::high_resolution_clock::now();
int total = 0;
for (int i = 0; i < n; i++) {
total = total + arrAnswer[i];
}
std::cout << "answer: " << total << '\n' << '\n';
std::cout << "time for calculation: " << (t2 - t1).count() << "ns";
return 0;
}
|
19,575 | #include <stdio.h>
#include <stdlib.h>
#include <string.h> /* memcpy */
#include <math.h>
#include <stdint.h>
void *cuda_upload_var(void *host_var, int size)
{
void *cuda_var;
cudaMalloc(&cuda_var, 4);
cudaMemcpy(cuda_var, host_var, size, cudaMemcpyHostToDevice);
return cuda_var;
}
void cuda_download_var(void *cuda_var, void *host_var, int size)
{
cudaMemcpy(host_var, cuda_var, size, cudaMemcpyDeviceToHost);
cudaFree(cuda_var);
}
typedef struct intfield1
{
int *m;
int size[1];
int is_device_field;
} intfield1;
void memcpy_field_intfield1(intfield1 dst, intfield1 src)
{
if (dst.is_device_field == 0 && src.is_device_field == 0) {
cudaMemcpy(dst.m, src.m, (sizeof(*dst.m))*dst.size[0], cudaMemcpyHostToHost);
}
if (dst.is_device_field == 1 && src.is_device_field == 0) {
cudaMemcpy(dst.m, src.m, (sizeof(*dst.m))*dst.size[0], cudaMemcpyHostToDevice);
}
if (dst.is_device_field == 0 && src.is_device_field == 1) {
cudaMemcpy(dst.m, src.m, (sizeof(*dst.m))*dst.size[0], cudaMemcpyDeviceToHost);
}
if (dst.is_device_field == 1 && src.is_device_field == 1) {
cudaMemcpy(dst.m, src.m, (sizeof(*dst.m))*dst.size[0], cudaMemcpyDeviceToDevice);
}
}
int size_intfield1(intfield1 field, int index)
{
return field.size[index];
}
intfield1 alloc_host_field_intfield1(int size_0)
{
intfield1 field;
field.m = (int*)malloc((sizeof(*field.m))*size_0);
field.size[0] = size_0;
field.is_device_field = 0;
return field;
}
void free_host_field_intfield1(intfield1 field)
{
free(field.m);
}
intfield1 alloc_device_field_intfield1(int size_0)
{
intfield1 field;
cudaMalloc((void**)&field.m, (sizeof(*field.m))*size_0);
field.size[0] = size_0;
field.is_device_field = 1;
return field;
}
void free_device_field_intfield1(intfield1 field)
{
cudaFree(field.m);
}
typedef struct intmat1
{
int m[1];
} intmat1;
int printf(const char *fmt, ...); /* TODO: Remove */
typedef intfield1 Field; /* One-dimensional integer field type */
__global__ void kernel_0(intfield1 a, intfield1 b)
{
if (threadIdx.x + blockIdx.x*blockDim.x >= a.size[0]) {
return;
}
intmat1 id;
id.m[1*0] = (threadIdx.x + blockIdx.x*blockDim.x) % a.size[0]/1;
a.m[1*id.m[1*0]] += b.m[1*id.m[1*0]];
}
int main()
{
int N = 5;
intfield1 a_data = alloc_host_field_intfield1(N);
intfield1 b_data = alloc_host_field_intfield1(N);
a_data.m[1*0] = 1;
a_data.m[1*1] = 2;
a_data.m[1*2] = 3;
a_data.m[1*3] = 4;
a_data.m[1*4] = 5;
b_data.m[1*0] = 10;
b_data.m[1*1] = 20;
b_data.m[1*2] = 30;
b_data.m[1*3] = 40;
b_data.m[1*4] = 50;
intfield1 a = alloc_device_field_intfield1(N);
intfield1 b = alloc_device_field_intfield1(N);
memcpy_field_intfield1(a, a_data);
memcpy_field_intfield1(b, b_data);
{
dim3 dim_grid(a.size[0]/128 + 1, 1, 1);
dim3 dim_block(128, 1, 1);
kernel_0<<<dim_grid, dim_block>>>(a, b);
}
memcpy_field_intfield1(a_data, a);
for (int i = 0; i < N; ++i) {
printf("%i ", a_data.m[1*i]);
}
free_host_field_intfield1(a_data);
free_host_field_intfield1(b_data);
free_device_field_intfield1(a);
free_device_field_intfield1(b);
return 0;
}
|
19,576 | #include <iostream>
#include <algorithm>
#include <ctime>
using namespace std;
#define N 100000
#define RADIUS 3
#define BLOCK_SIZE 16
__global__ void stencil_1d(int *in, int *out){
__shared__ int temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
temp[lindex] = in[gindex];
if(threadIdx.x < RADIUS) {
temp[lindex - RADIUS] = in[gindex - RADIUS];
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
__syncthreads();
int result = 0;
for(int offset = -RADIUS; offset <= RADIUS; offset++)
result += temp[lindex + offset];
out[gindex] = result;
}
void fill_ints(int *x, int n){
fill_n(x, n, 1);
}
int main(void) {
int *in, *out;
int *d_in, *d_out;
clock_t start, end;
int size = (N + 2 * RADIUS) * sizeof(int);
in = (int*)malloc(size);
fill_ints(in, N + 2 * RADIUS);
out = (int*)malloc(size);
fill_ints(out, N + 2 * RADIUS);
cudaMalloc((void**)&d_in, size);
cudaMalloc((void**)&d_out, size);
cudaMemcpy(d_in, in, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_out, out, size, cudaMemcpyHostToDevice);
start = clock();
stencil_1d<<<N/BLOCK_SIZE, BLOCK_SIZE>>>(d_in + RADIUS, d_out + RADIUS);
end = clock();
cudaMemcpy(out, d_out, size, cudaMemcpyDeviceToHost);
cout << "CUDA Duration: " << end - start << "ms." << endl;
free(in);
free(out);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
19,577 | #include <assert.h>
#include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#define MAX_SAMPLE 10000
#define MAX_FEATURE 100
#define STEP_SIZE 0.005
#define NUM_ITER 10000
#define TILE_WIDTH 32
#define BLOCK_SIZE 1024
/**
* Check error when calling CUDA API.
*/
void checkCudaError(cudaError_t errorCode)
{
if (errorCode != cudaSuccess)
{
fprintf(stderr, "Error %d\n", errorCode);
exit(1);
}
}
/**
* Allocate memory on host for an mxn float matrix and return its pointer.
*/
float** create_matrix_on_host(int m, int n)
{
float **mat;
mat = (float **) malloc(m*sizeof(float *));
if (!mat)
{
fprintf(stderr, "Error allocating row memory");
exit(1);
}
mat[0] = (float *) malloc(m*n*sizeof(float));
if (!mat[0])
{
fprintf(stderr, "error allocating col memory");
exit(1);
}
for (int i = 1; i < m; i++)
mat[i] = mat[i-1] + n;
return mat;
}
/**
* Free matrix memory on host.
*/
void free_matrix_on_host(float **mat)
{
free(mat[0]);
free(mat);
}
/**
* Read a CSV file and return it as a matrix.
*/
void read_csv(char *filename, float **mat, int &m, int &n)
{
const int BUFFER_SIZE = 100000;
const char *DELIM = ",";
int i, j;
char *buffer, *pch;
FILE *f;
buffer = (char *) malloc(BUFFER_SIZE*sizeof(char));
f = fopen(filename, "r");
// Read until EOF
i = 0;
while (feof(f) == 0)
{
fscanf(f, "%s\n", buffer);
pch = strtok(buffer, DELIM);
j = 0;
while (pch != NULL)
{
mat[i][j] = atof(pch);
pch = strtok(NULL, DELIM);
j++;
}
i++;
}
m = i;
n = j;
free(buffer);
fclose(f);
}
/**
* Print a matrix to stdout.
*/
void print_matrix(float **mat, int m, int n)
{
int i, j;
for (i = 0; i < m; i++)
{
for (j = 0; j < n; j++)
printf(" %.2f", mat[i][j]);
printf("\n");
}
}
/**
* Sigmoid function for a vector of length n.
*/
void vectorized_sigmoid_on_host(float *y, float *x, int n)
{
int i;
for (i = 0; i < n; i++)
y[i] = 1.0f / (1.0f+exp(-x[i]));
}
__global__ void vectorized_sigmoid_on_device(float *y, float *x, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n)
y[idx] = 1.0f / (1.0f+exp(-x[idx]));
}
void test_vectorized_sigmoid_on_device()
{
float *x_h, *y_h, *yt_h;
float *x_d, *y_d;
int i, grid_size, n = 10, nbytes = n*sizeof(float);
x_h = (float *) malloc(nbytes);
y_h = (float *) malloc(nbytes);
yt_h = (float *) malloc(nbytes);
checkCudaError(cudaMalloc((void **) &x_d, nbytes));
checkCudaError(cudaMalloc((void **) &y_d, nbytes));
for (i = 0; i < n; i++)
x_h[i] = rand() % 10;
checkCudaError(cudaMemcpy(x_d, x_h, nbytes, cudaMemcpyHostToDevice));
vectorized_sigmoid_on_host(y_h, x_h, n);
grid_size = (n/BLOCK_SIZE) + (n%BLOCK_SIZE==0?0:1);
dim3 grid(grid_size), block(BLOCK_SIZE);
vectorized_sigmoid_on_device<<<grid, block>>>(y_d, x_d, n);
checkCudaError(cudaMemcpy(yt_h, y_d, nbytes, cudaMemcpyDeviceToHost));
for (i = 0; i < n; i++)
assert(abs(y_h[i] - yt_h[i]) < 1e-5);
free(x_h); free(y_h); free(yt_h);
cudaFree(x_d); cudaFree(y_d);
}
/**
* Subtract vector a and b (a - b) and store the result in c.
*
* All vectors should have length n.
*/
void subtract_vector_and_vector_on_host(float *c, float *a, float *b, int n)
{
int i;
for (i = 0; i < n; i++)
c[i] = a[i] - b[i];
}
__global__ void subtract_vector_and_vector_on_device(float *c, float *a, float *b, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n)
c[idx] = a[idx] - b[idx];
}
void test_subtract_vector_and_vector_on_device()
{
float *c_h, *a_h, *b_h, *ct_h;
float *c_d, *a_d, *b_d;
int i, grid_size, n = 10, nbytes = n*sizeof(float);
c_h = (float *) malloc(nbytes);
a_h = (float *) malloc(nbytes);
b_h = (float *) malloc(nbytes);
ct_h = (float *) malloc(nbytes);
checkCudaError(cudaMalloc((void **) &c_d, nbytes));
checkCudaError(cudaMalloc((void **) &a_d, nbytes));
checkCudaError(cudaMalloc((void **) &b_d, nbytes));
for (i = 0; i < n; i++)
a_h[i] = rand() % 10;
for (i = 0; i < n; i++)
b_h[i] = rand() % 10;
checkCudaError(cudaMemcpy(a_d, a_h, nbytes, cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(b_d, b_h, nbytes, cudaMemcpyHostToDevice));
subtract_vector_and_vector_on_host(c_h, a_h, b_h, n);
grid_size = (n/BLOCK_SIZE) + (n%BLOCK_SIZE==0?0:1);
dim3 grid(grid_size), block(BLOCK_SIZE);
subtract_vector_and_vector_on_device<<<grid, block>>>(c_d, a_d, b_d, n);
checkCudaError(cudaMemcpy(ct_h, c_d, nbytes, cudaMemcpyDeviceToHost));
for (i = 0; i < n; i++)
assert(abs(c_h[i] - ct_h[i]) < 1e-5);
free(c_h); free(a_h); free(b_h); free(ct_h);
cudaFree(c_d); cudaFree(a_d); cudaFree(b_d);
}
/**
* Multiply vector x and scalar c and store the result in y.
*
* All vectors should have length n.
*/
void multiply_vector_and_scalar_on_host(float *y, float *x, float c, int n)
{
int i;
for (i = 0; i < n; i++)
y[i] = x[i] * c;
}
__global__ void multiply_vector_and_scalar_on_device(float *y, float *x, float c, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n)
y[idx] = x[idx] * c;
}
void test_multiply_vector_and_scalar_on_device()
{
float *x_h, *y_h, *yt_h;
float *x_d, *y_d;
int i, grid_size, n = 10, nbytes = n*sizeof(float);
x_h = (float *) malloc(nbytes);
y_h = (float *) malloc(nbytes);
yt_h = (float *) malloc(nbytes);
checkCudaError(cudaMalloc((void **) &x_d, nbytes));
checkCudaError(cudaMalloc((void **) &y_d, nbytes));
for (i = 0; i < n; i++)
x_h[i] = rand() % 10;
checkCudaError(cudaMemcpy(x_d, x_h, nbytes, cudaMemcpyHostToDevice));
multiply_vector_and_scalar_on_host(y_h, x_h, 1.5f, n);
grid_size = (n/BLOCK_SIZE) + (n%BLOCK_SIZE==0?0:1);
dim3 grid(grid_size), block(BLOCK_SIZE);
multiply_vector_and_scalar_on_device<<<grid, block>>>(y_d, x_d, 1.5f, n);
checkCudaError(cudaMemcpy(yt_h, y_d, nbytes, cudaMemcpyDeviceToHost));
for (i = 0; i < n; i++)
assert(abs(y_h[i] - yt_h[i]) < 1e-5);
free(x_h); free(y_h); free(yt_h);
cudaFree(x_d); cudaFree(y_d);
}
/**
* Compute the dot product of vector u and v.
*
* All vectors should have length n.
*/
float dot_product(float *u, float *v, int n)
{
int i;
float sum = 0.0;
for (i = 0; i < n; i++)
sum += u[i] * v[i];
return sum;
}
/**
* Multiply a matrix A and a vector x and store the result in vector y.
*
* Matrix A is of size mxn and hence vector x should have size n. Vector y
* will have size m.
*/
void multiply_matrix_and_vector_on_host(float *y, float **A, float *x, int m, int n)
{
int i;
for (i = 0; i < m; i++)
y[i] = dot_product(A[i], x, n);
}
__global__ void multiply_matrix_and_vector_on_device(float *y, float *A, float *x, int m, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < m)
{
int i;
float sum = 0.0;
for (i = 0; i < n; i++)
sum += A[idx*n+i] * x[i];
y[idx] = sum;
}
}
void test_multiply_matrix_and_vector_on_device()
{
float **A_h, *x_h, *y_h, *yt_h;
float *A_d, *x_d, *y_d;
int i, j, grid_size, m = 4, n = 3;
A_h = create_matrix_on_host(m, n);
x_h = (float *) malloc(n*sizeof(float));
y_h = (float *) malloc(m*sizeof(float));
yt_h = (float *) malloc(m*sizeof(float));
checkCudaError(cudaMalloc((void **) &A_d, m*n*sizeof(float)));
checkCudaError(cudaMalloc((void **) &x_d, n*sizeof(float)));
checkCudaError(cudaMalloc((void **) &y_d, m*sizeof(float)));
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
A_h[i][j] = rand() % 10;
for (i = 0; i < n; i++)
x_h[i] = rand() % 10;
checkCudaError(cudaMemcpy(A_d, A_h[0], m*n*sizeof(float), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(x_d, x_h, n*sizeof(float), cudaMemcpyHostToDevice));
multiply_matrix_and_vector_on_host(y_h, A_h, x_h, m, n);
grid_size = (m/BLOCK_SIZE) + (m%BLOCK_SIZE==0?0:1);
dim3 grid(grid_size), block(BLOCK_SIZE);
multiply_matrix_and_vector_on_device<<<grid, block>>>(y_d, A_d, x_d, m, n);
checkCudaError(cudaMemcpy(yt_h, y_d, m*sizeof(float), cudaMemcpyDeviceToHost));
for (i = 0; i < m; i++)
assert(abs(y_h[i] - yt_h[i]) < 1e-5);
free_matrix_on_host(A_h); free(x_h); free(y_h); free(yt_h);
cudaFree(A_d); cudaFree(x_d); cudaFree(y_d);
}
/**
* Transpose a matrix A of size mxn and store the result in T.
*/
void transpose_matrix_on_host(float **T, float **A, int m, int n)
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
T[i][j] = A[j][i];
}
__global__ void transpose_matrix_on_device(float *T, float *A, int m, int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i < n && j < m)
T[i*m+j] = A[j*n+i];
}
void test_transpose_matrix_on_device()
{
float **X_h, **T_h, **Tt_h;
float *X_d, *T_d;
int i, j, gridx, gridy, m = 3, n = 4, nbytes = m*n*sizeof(float);
X_h = create_matrix_on_host(m, n);
T_h = create_matrix_on_host(n, m);
Tt_h = create_matrix_on_host(n, m);
checkCudaError(cudaMalloc((void **) &X_d, nbytes));
checkCudaError(cudaMalloc((void **) &T_d, nbytes));
for (i = 0; i < m; i++)
for (j = 0; j < n; j++)
X_h[i][j] = rand() % 10;
checkCudaError(cudaMemcpy(X_d, X_h[0], nbytes, cudaMemcpyHostToDevice));
transpose_matrix_on_host(T_h, X_h, m, n);
gridx = (n/TILE_WIDTH) + (n%TILE_WIDTH==0?0:1);
gridy = (m/TILE_WIDTH) + (m%TILE_WIDTH==0?0:1);
dim3 grid(gridx, gridy), block(TILE_WIDTH, TILE_WIDTH);
transpose_matrix_on_device<<<grid, block>>>(T_d, X_d, m, n);
checkCudaError(cudaMemcpy(Tt_h[0], T_d, nbytes, cudaMemcpyDeviceToHost));
for (i = 0; i < n; i++)
for (j = 0; j < m; j++)
assert(abs(T_h[i][j] - Tt_h[i][j]) < 1e-5);
free_matrix_on_host(X_h); free_matrix_on_host(T_h); free_matrix_on_host(Tt_h);
cudaFree(X_d); cudaFree(T_d);
}
/**
* Find the best parameter for logistic regression using gradient descent.
*
* The result will be stored in theta. Arguments init_theta, step_size, X, and y
* are the initial value for the parameter, step size of gradient descent, design
* matrix, and target vector respectively. Design matrix X should have size mxn.
* The number of gradient descent iterations is controlled by niter.
*/
void logistic_regression_gradient_descent_on_host(float *theta, float *init_theta, float step_size, float **X, float *y, int m, int n, int niter)
{
int t;
float **T, *mtemp, *ntemp;
T = create_matrix_on_host(n, m);
mtemp = (float *) malloc(m*sizeof(float));
ntemp = (float *) malloc(n*sizeof(float));
memcpy(theta, init_theta, n*sizeof(float));
for (t = 0; t < niter; t++)
{
// Compute X*theta
multiply_matrix_and_vector_on_host(mtemp, X, theta, m, n);
// Compute h(X)
vectorized_sigmoid_on_host(mtemp, mtemp, m);
// Compute error
subtract_vector_and_vector_on_host(mtemp, mtemp, y, m);
// Compute transpose of X
transpose_matrix_on_host(T, X, m, n);
// Compute derivative of cost function J
multiply_matrix_and_vector_on_host(ntemp, T, mtemp, n, m);
multiply_vector_and_scalar_on_host(ntemp, ntemp, (float) step_size / m, n);
// Update theta
subtract_vector_and_vector_on_host(theta, theta, ntemp, n);
}
free_matrix_on_host(T);
free(mtemp);
free(ntemp);
}
void parallel_logistic_regression_gradient_descent(float *theta, float *init_theta, float step_size, float **X, float *y, int m, int n, int niter)
{
float *mtemp_d, *X_d, *theta_d, *y_d, *T_d;
int t, grid_size, gridx, gridy;
memcpy(theta, init_theta, n*sizeof(float));
checkCudaError(cudaMalloc((void **) &mtemp_d, m*sizeof(float)));
checkCudaError(cudaMalloc((void **) &X_d, m*n*sizeof(float)));
checkCudaError(cudaMalloc((void **) &theta_d, n*sizeof(float)));
checkCudaError(cudaMalloc((void **) &y_d, m*sizeof(float)));
checkCudaError(cudaMalloc((void **) &T_d, m*sizeof(float)));
checkCudaError(cudaMemcpy(X_d, X[0], m*n*sizeof(float), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(theta_d, theta, n*sizeof(float), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(y_d, y, m*sizeof(float), cudaMemcpyHostToDevice));
for (t = 0; t < niter; t++)
{
// Compute X*theta
grid_size = (m/BLOCK_SIZE) + (m%BLOCK_SIZE==0?0:1);
dim3 grid(grid_size), block(BLOCK_SIZE);
multiply_matrix_and_vector_on_device<<<grid, block>>>(mtemp_d, X_d, theta_d, m, n);
// Compute h(X)
vectorized_sigmoid_on_device<<<grid, block>>>(mtemp_d, mtemp_d, m);
// Compute error
subtract_vector_and_vector_on_device<<<grid, block>>>(mtemp_d, mtemp_d, y_d, m);
// Compute transpose of X
gridx = (m/TILE_WIDTH) + (m%TILE_WIDTH==0?0:1);
gridy = (n/TILE_WIDTH) + (n%TILE_WIDTH==0?0:1);
dim3 grid2d(gridx, gridy), block2d(TILE_WIDTH, TILE_WIDTH);
transpose_matrix_on_device<<<grid2d, block2d>>>(T_d, X_d, m, n);
}
}
/**
* Predict based on estimate.
*/
void predict_on_host(float *prediction, float *estimate, int n)
{
int i;
for (i = 0; i < n; i++)
{
if (estimate[i] >= 0.5f)
prediction[i] = 1.0f;
else
prediction[i] = 0.0f;
}
}
__global__ void predict_on_device(float *prediction, float *estimate, int n)
{
int idx = blockIdx.x*blockDim.x + threadIdx.x;
if (idx < n)
{
if (estimate[idx] >= 0.5f)
prediction[idx] = 1.0f;
else
prediction[idx] = 0.0f;
}
}
void test_predict_on_device()
{
float *est_h, *pred_h, *predt_h;
float *est_d, *pred_d;
int i, grid_size, n = 10, nbytes = n*sizeof(float);
est_h = (float *) malloc(nbytes);
pred_h = (float *) malloc(nbytes);
predt_h = (float *) malloc(nbytes);
checkCudaError(cudaMalloc((void **) &est_d, nbytes));
checkCudaError(cudaMalloc((void **) &pred_d, nbytes));
for (i = 0; i < n; i++)
est_h[i] = (float) rand() / RAND_MAX;
checkCudaError(cudaMemcpy(est_d, est_h, nbytes, cudaMemcpyHostToDevice));
predict_on_host(pred_h, est_h, n);
grid_size = (n/BLOCK_SIZE) + (n%BLOCK_SIZE==0?0:1);
dim3 grid(grid_size), block(BLOCK_SIZE);
predict_on_device<<<grid, block>>>(pred_d, est_d, n);
checkCudaError(cudaMemcpy(predt_h, pred_d, nbytes, cudaMemcpyDeviceToHost));
for (i = 0; i < n; i++)
assert(abs(pred_h[i] - predt_h[i]) < 1e-5);
free(est_h); free(pred_h); free(predt_h);
cudaFree(est_d); cudaFree(pred_d);
}
/**
* Compute zero-one loss function, i.e. number of misclassification.
*/
int zero_one_loss_function(float *prediction, float *y, int n)
{
int i, res = 0;
for (i = 0; i < n; i++)
if (prediction[i] != y[i])
res++;
return res;
}
void run_tests()
{
srand(time(NULL));
test_vectorized_sigmoid_on_device();
test_subtract_vector_and_vector_on_device();
test_multiply_vector_and_scalar_on_device();
test_multiply_matrix_and_vector_on_device();
test_transpose_matrix_on_device();
test_predict_on_device();
}
void logistic_regression_on_host(char *train_filename, char *test_filename)
{
int i, j, m, mt, n, loss;
float **X, **Xt, **mat, *y, *yt, *theta, *init_theta, *ypred;
// Get design matrix and target vector
mat = create_matrix_on_host(MAX_SAMPLE, MAX_FEATURE);
read_csv(train_filename, mat, m, n);
X = create_matrix_on_host(m, n-1);
y = (float *) malloc(m*sizeof(float));
for (i = 0; i < m; i++)
{
for (j = 0; j < n-1; j++)
X[i][j] = mat[i][j];
y[i] = mat[i][n-1];
}
n--;
// Perform gradient descent to find best parameter value
theta = (float *) malloc(n*sizeof(float));
init_theta = (float *) malloc(n*sizeof(float));
memset(init_theta, 0, n*sizeof(float));
logistic_regression_gradient_descent_on_host(theta, init_theta, STEP_SIZE, X, y, m, n, NUM_ITER);
// Get testing data
read_csv(test_filename, mat, mt, n);
Xt = create_matrix_on_host(mt, n-1);
yt = (float *) malloc(mt*sizeof(float));
for (i = 0; i < mt; i++)
{
for (j = 0; j < n-1; j++)
Xt[i][j] = mat[i][j];
yt[i] = mat[i][n-1];
}
n--;
free_matrix_on_host(mat);
// Compute zero-one loss for training data
ypred = (float *) malloc(m*sizeof(float));
multiply_matrix_and_vector_on_host(ypred, X, theta, m, n);
vectorized_sigmoid_on_host(ypred, ypred, m);
predict_on_host(ypred, ypred, m);
loss = zero_one_loss_function(ypred, y, m);
printf("Loss on train data: %d/%d\n", loss, m);
free(ypred);
// Compute zero-one loss for testing data
ypred = (float *) malloc(mt*sizeof(float));
multiply_matrix_and_vector_on_host(ypred, Xt, theta, mt, n);
vectorized_sigmoid_on_host(ypred, ypred, mt);
predict_on_host(ypred, ypred, mt);
loss = zero_one_loss_function(ypred, yt, mt);
printf("Loss on testing data: %d/%d\n", loss, mt);
free_matrix_on_host(X); free_matrix_on_host(Xt);
free(y); free(yt); free(theta); free(init_theta); free(ypred);
}
void logistic_regression_on_device(char *train_filename, char *test_filename)
{
}
int main(int argc, char **argv)
{
run_tests();
logistic_regression_on_host(argv[1], argv[2]);
return 0;
}
|
19,578 | #include <cuda.h>
#include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include "getopt.h"
__global__ void register_bandwidth_test(){
}
int main(){
return 0;
}
|
19,579 | #include <cuComplex.h>
__global__ void gen(int px_per_block[2],int px_per_thread[2],int size[2],float position[2],float *zoom,
int *iterations,int *result, int* progress,int action)
{
//blockDim = size of threads per block
//gridDim = size of blocks
//int size[2] argument is just to make sure we don't fall off the edge and crash the entire machine...
//actions: 0 = write
// 1 = read+write
// 2 = none
// 3 = atomicAddTest
// 4 = overlapMap
int startx = (blockIdx.x*px_per_block[0])+(threadIdx.x*px_per_thread[0]);
int starty = (blockIdx.y*px_per_block[1])+(threadIdx.y*px_per_thread[1]);
float t_x, t_y;
int i, x, y;
int pixelVal; // long int needed???
cuFloatComplex z = cuFloatComplex();
cuFloatComplex z_unchanging = cuFloatComplex();
float z_real, z_imag;
for(x = startx; x < startx+px_per_thread[0]; x++){
for(y = starty; y < starty+px_per_thread[1]; y++){
pixelVal = x + (y*size[0]); // map2Dto1D(x,y,size[0]);
if(action==4) //generate overlap map
{
result[pixelVal] = result[pixelVal] + 1;
continue;
}
if(action==3)
{
atomicAdd(progress,1);
}
t_x = (x+position[0])/(*zoom);
t_y = (y+position[1])/(*zoom);
z.x = t_x;
z.y = t_y;
z_unchanging.x = t_x;
z_unchanging.y = t_y; //optomize this with pointer magic?
for(i = 0; i<(*iterations) + 1; i++) {
z = cuCmulf(z,z);
z = cuCaddf(z,z_unchanging); //z = z^2 + z_orig
z_real = cuCrealf(z);
z_imag = cuCimagf(z);
if((z_real*z_real + z_imag*z_imag)>4){
if(action==0)//act cool, do the default
{
result[pixelVal] = i;
} else if(action==1)// read+write test
{
result[pixelVal] = result[pixelVal] + 1;
}//else if action==2, do nothing
break;
}
}
}
}
}
/* Local Variables: */
/* mode: c */
/* comment-column: 0 */
/* End: */
|
19,580 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define BLOCK_SIZE 16
// Kernel
__global__ void cudaMultiplyArrays(int* dA, int* dB, int* dC, int hA, int wA,
int hB, int wB, int hC, int wC) {
int y = blockIdx.y * BLOCK_SIZE + threadIdx.y; // row
int x = blockIdx.x * BLOCK_SIZE + threadIdx.x; // column
if (y >= hA || x >= wB) return;
int result = 0;
for (unsigned int i = 0; i < wA; i++)
result += dA[y * wA + i] * dB[i * wB + x];
dC[y * wC + x] = result;
}
// Kernel using shared memory
__global__ void cudaMultiplyArraysShared(int* dA, int* dB, int* dC, int hA,
int wA, int hB, int wB, int hC,
int wC) {
// Thread and block indices
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
// Number of subarrays for each block
int nsubs = (wA + BLOCK_SIZE - 1) / BLOCK_SIZE;
// Initialize subarrays in shared memory
__shared__ int sdA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ int sdB[BLOCK_SIZE][BLOCK_SIZE];
// Loop over each subarray
int result = 0;
for (unsigned int r = 0; r < nsubs; r++) {
// Fill the subarrays in shared memory
sdA[ty][tx] = dA[(by * BLOCK_SIZE + ty) * wA + (r * BLOCK_SIZE + tx)];
sdB[ty][tx] = dB[(r * BLOCK_SIZE + ty) * wB + (bx * BLOCK_SIZE + tx)];
__syncthreads();
// Don't add out of bounds elements
int s_max;
if ((r + 1) * BLOCK_SIZE > wA)
s_max = wA - r * BLOCK_SIZE;
else
s_max = BLOCK_SIZE;
for (unsigned int s = 0; s < s_max; s++) result += sdA[ty][s] * sdB[s][tx];
__syncthreads();
}
// Don't fill out of bounds elements
if (bx * BLOCK_SIZE + tx >= wC) return;
if (by * BLOCK_SIZE + ty >= hC) return;
// Fill result array
dC[(by * BLOCK_SIZE + ty) * wB + (bx * BLOCK_SIZE + tx)] = result;
}
int int_power(int x, int n) {
if (n <= 0) return 1;
int y = 1;
while (n > 1) {
if (n % 2 == 0) {
x *= x;
n /= 2;
} else {
y *= x;
x *= x;
n = (n - 1) / 2;
}
}
return x * y;
}
void fill_array(int* A, int hA, int wA) {
for (unsigned int i = 0; i < hA; i++)
for (unsigned int j = 0; j < wA; j++) A[i * wA + j] = i + j;
}
int main(int argc, char* argv[]) {
int m, n, p, nruns;
bool shared, prt;
if (argc == 1) {
m = 16;
n = 32;
p = 1;
nruns = 65536;
shared = false;
prt = true;
} else if (argc == 5) {
int siz = int_power(2, atoi(argv[1]));
m = siz;
n = siz;
p = siz;
nruns = int_power(2, atoi(argv[2]));
if (atoi(argv[3]) > 0)
shared = true;
else
shared = false;
if (atoi(argv[4]) > 0)
prt = true;
else
prt = false;
}
// Array sizes
int hA = m, wA = n;
int hB = n, wB = p;
int hC = m, wC = p;
int sA = hA * wA;
int sB = hB * wB;
int sC = hC * wC;
// Allocate host arrays
int* A, *B, *C;
A = (int*)malloc(sizeof(int) * sA);
B = (int*)malloc(sizeof(int) * sB);
C = (int*)malloc(sizeof(int) * sC);
// Allocate device arrays
int* dA, *dB, *dC;
cudaMalloc(&dA, sizeof(int) * sA);
cudaMalloc(&dB, sizeof(int) * sB);
cudaMalloc(&dC, sizeof(int) * sC);
// Fill A and B with some integers
fill_array(A, hA, wA);
fill_array(B, hB, wB);
// Set up block grid
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid((wC + BLOCK_SIZE - 1) / BLOCK_SIZE,
(hC + BLOCK_SIZE - 1) / BLOCK_SIZE);
// Set up timing
struct timespec start_in, end_in;
long dur_in_ns;
double dur_in = 0.0, dur_in_total = 0.0;
double dur_in_min = 1e99, dur_in_max = 0.0;
for (int i = 0; i < nruns; i++) {
// Start inclusive timing
clock_gettime(CLOCK_MONOTONIC, &start_in);
// Copy host arrays to the device
cudaMemcpy(dA, A, sizeof(int) * sA, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, sizeof(int) * sB, cudaMemcpyHostToDevice);
if (shared)
// Invoke the device kernel which multiplies the arrays with shared memory
cudaMultiplyArraysShared <<<dimGrid, dimBlock>>>
(dA, dB, dC, hA, wA, hB, wB, hC, wC);
else
// Invoke the device kernel which multiplies the arrays
cudaMultiplyArrays <<<dimGrid, dimBlock>>>
(dA, dB, dC, hA, wA, hB, wB, hC, wC);
// Copy the result array back to the host
cudaMemcpy(C, dC, sizeof(int) * sC, cudaMemcpyDeviceToHost);
// End inclusive timing
clock_gettime(CLOCK_MONOTONIC, &end_in);
// Calculate duration
dur_in_ns = (end_in.tv_sec - start_in.tv_sec) * 1000000000l +
end_in.tv_nsec - start_in.tv_nsec;
dur_in = (double)(dur_in_ns / 1000000.0);
dur_in_total += dur_in;
if (dur_in < dur_in_min) dur_in_min = dur_in;
if (dur_in > dur_in_max) dur_in_max = dur_in;
}
// Write result to file
if (prt) {
FILE* fp;
fp = fopen("problem2.out", "w");
for (int i = 0; i < hC; i++) {
for (int j = 0; j < wC; j++) fprintf(fp, "%12d ", C[i * wC + j]);
fprintf(fp, "\n");
}
fprintf(fp, "\n");
fclose(fp);
}
// Free memory
free(A);
free(B);
free(C);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
// Get device properties
cudaDeviceProp gpu_props;
cudaGetDeviceProperties(&gpu_props, 0);
// Print some information
printf("Device name: %s\n", gpu_props.name);
printf("Dimension 1 (m): %12d\n", m);
printf("Dimension 2 (n): %12d\n", n);
printf("Dimension 3 (p): %12d\n", p);
printf("Block size: %12d\n", BLOCK_SIZE);
printf("Number of runs: %12d\n", nruns);
printf("Using shared memory?: %12s\n", shared ? "True" : "False");
printf("Inclusive time (min): %12.6f ms\n", dur_in_min);
printf("Inclusive time (avg): %12.6f ms\n", dur_in_total / nruns);
printf("Inclusive time (max): %12.6f ms\n", dur_in_max);
printf("\n");
return 0;
}
|
19,581 | #include <cmath>
#include <iostream>
int main(void)
{
int devices = 0;
cudaGetDeviceCount(&devices);
cudaDeviceProp prop;
for ( int i = 0; i < devices; ++i )
{
cudaGetDeviceProperties(&prop, i);
std::cout << "=== Device number " << i << " ===" << std::endl;
std::cout << "name = " << prop.name << std::endl;
std::cout << "totalGlobalMem = " << prop.totalGlobalMem << std::endl;
std::cout << "totalGlobalMem (MB) = " << prop.totalGlobalMem / 1024.0 / 1024 << std::endl;
std::cout << "sharedMemPerBlock = " << prop.sharedMemPerBlock << std::endl;
std::cout << "sharedMemPerBlock (kB) = " << prop.sharedMemPerBlock / 1024.0 << std::endl;
std::cout << "regsPerBlock = " << prop.regsPerBlock << std::endl;
std::cout << "warpSize = " << prop.warpSize << std::endl;
std::cout << "memPitch = " << prop.memPitch << std::endl;
std::cout << "maxThreadsPerBlock = " << prop.maxThreadsPerBlock << std::endl;
std::cout << "maxThreadsDim[3] = ["
<< prop.maxThreadsDim[0] << ", "
<< prop.maxThreadsDim[1] << ", "
<< prop.maxThreadsDim[2]
<< "]" << std::endl;
std::cout << "maxGridSize[3] = ["
<< prop.maxGridSize[0] << ", "
<< prop.maxGridSize[1] << ", "
<< prop.maxGridSize[2]
<< "]" << std::endl;
std::cout << "totalConstMem = " << prop.totalConstMem << std::endl;
std::cout << "major = " << prop.major << std::endl;
std::cout << "minor = " << prop.minor << std::endl;
std::cout << "clockRate = " << prop.clockRate << std::endl;
std::cout << "textureAlignment = " << prop.textureAlignment << std::endl;
std::cout << "deviceOverlap = " << prop.deviceOverlap << std::endl;
std::cout << "multiProcessorCount = " << prop.multiProcessorCount << std::endl;
std::cout << "kernelExecTimeoutEnabled = " << prop.kernelExecTimeoutEnabled << std::endl;
std::cout << "integrated = " << prop.integrated << std::endl;
std::cout << "canMapHostMemory = " << prop.canMapHostMemory << std::endl;
std::cout << "computeMode = " << prop.computeMode << std::endl;
std::cout << "concurrentKernels = " << prop.concurrentKernels << std::endl;
std::cout << "ECCEnabled = " << prop.ECCEnabled << std::endl;
std::cout << "pciBusID = " << prop.pciBusID << std::endl;
std::cout << "pciDeviceID = " << prop.pciDeviceID << std::endl;
std::cout << "tccDriver = " << prop.tccDriver << std::endl;
std::cout << std::endl;
}
return 0;
}
|
19,582 |
#include <cuda.h>
#include <stdio.h>
#include <float.h>
#define ARRAY_SIZE 2000000 // 2 MB
#define BLOCK_SIZE 256 // with 512 block size the shared memory requirement will overshoot the available limit
#define NTIMES 10
#define MIN(x,y) ((x)<(y)?(x):(y))
#define MAX(x,y) ((x)>(y)?(x):(y))
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// kernel for copying the input array of float3 datatype into another array of float3
// the access pattern is not coalesced
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void nonCoalescedFloat3Access(float3 *dest, float3* src,long size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < size)
{
dest[idx].x = src[idx].x;
dest[idx].y = src[idx].y;
dest[idx].z = src[idx].z;
}
}
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// kernel for copying the input array of float3 datatype into another array of float3
// the access pattern is made coalesced using Shared memory
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void coalescedFloat3Access(float* dest, float* src,long size)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ float sInArray[3 * BLOCK_SIZE * sizeof(float)];
if(idx < size)
{
// loading into the shared memory from the device global memory
sInArray[threadIdx.x] = src[idx];
sInArray[threadIdx.x + BLOCK_SIZE] = src[idx + BLOCK_SIZE];
sInArray[threadIdx.x + (2*BLOCK_SIZE)] = src[idx + (2*BLOCK_SIZE)];
__syncthreads(); //syncronizing before writing into the dest array in global memory
dest[idx] = sInArray[threadIdx.x];
dest[idx + BLOCK_SIZE] = sInArray[threadIdx.x + BLOCK_SIZE];
dest[idx + (2*BLOCK_SIZE)] = sInArray[threadIdx.x + (2*BLOCK_SIZE)];
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// kernel for setting the array of float3 with given element
//
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void setArray(float3 *array, float3 value, int size)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < size)
{
array[idx].x = value.x;
array[idx].y = value.y;
array[idx].z = value.z;
}
}
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
//
// the main routene
// for timing the two access patterns -- non-coalesced float3 access and coalesced float3 access using the shared memory
// finding the band width for the two access patterns
// printing the results achieved
//
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
int main(int argc,char* argv[])
{
double avgtime[2] = {0}, maxtime[2] = {0}, mintime[2]={FLT_MAX,FLT_MAX};
float bandWidths[2] = {0};
float3 *srcArray , *destArray;
float elapsedTimes[2][NTIMES];
cudaEvent_t start,stop;
cudaError_t err = cudaSuccess;
double bytes = 2 * sizeof(float3) * ARRAY_SIZE;
// allocating memory on hte device
err = cudaMalloc((void **)&srcArray,ARRAY_SIZE*sizeof(float3));
if(err == cudaErrorMemoryAllocation)
{
printf("error in device memory allocation for - srcArray\n exiting out of the program.....\n");
exit(-1);
}
err = cudaMalloc((void **)&destArray,ARRAY_SIZE*sizeof(float3));
if(err == cudaErrorMemoryAllocation)
{
printf("error in device memory allocation for - destArray\n exiting out of the program.....\n");
exit(-1);
}
// event creation, which will be used for timing the code
cudaEventCreate(&start);
cudaEventCreate(&stop);
//finding the 1D grid size
int gridSize = ARRAY_SIZE/BLOCK_SIZE;
if( ARRAY_SIZE % BLOCK_SIZE != 0 )
gridSize += 1;
// intializing the arrays on the device
setArray <<< gridSize,BLOCK_SIZE >>> (srcArray,make_float3 (1,2,3),ARRAY_SIZE);
setArray <<< gridSize,BLOCK_SIZE >>> (destArray,make_float3 (0,0,0),ARRAY_SIZE);
cudaThreadSynchronize();
// timing the different kernels
for(int i=0; i < NTIMES; i++)
{
//timing the kernel with non-coalesced float3 access
cudaEventRecord(start,0);
nonCoalescedFloat3Access <<< gridSize,BLOCK_SIZE >>> (destArray,srcArray,ARRAY_SIZE);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[0][i],start,stop);
//timing the kernel with coalesced float3 access
cudaEventRecord(start,0);
coalescedFloat3Access <<< gridSize,BLOCK_SIZE >>> ((float *)destArray,(float *)srcArray,ARRAY_SIZE);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTimes[1][i],start,stop);
}
// calculating max, min, average timings taken by the two kernels
for (int i=1; i < NTIMES; i++) // skiping the first iteration
{
for (int k=0; k < 2; k++)
{
avgtime[k] = avgtime[k] + elapsedTimes[k][i];
mintime[k] = MIN(mintime[k],elapsedTimes[k][i]);
maxtime[k] = MAX(maxtime[k], elapsedTimes[k][i]);
}
}
for(int i=0; i < 2; i++)
{
avgtime[i] = avgtime[i]/(double)(NTIMES-1); // finding the average time
bandWidths[i] = bytes/mintime[i];
}
printf("\nDemonstrating the usage of shared memory for coalesced access of float3 array from the global memory of the devices with compute capability <= 1.1\n");
printf("The array size (single precision): %d\n",ARRAY_SIZE);
printf("\n-------------------------------------------------------------------------------------------------------------------------------\n");
printf("Pattern \t\t\t\t Bandwidth (GB/sec) \t Avg time (ms) \t Min time (ms) \t Max time (ms)\n");
printf("-------------------------------------------------------------------------------------------------------------------------------\n");
//printing the results -- deifferent bandwidths achieved by the two kernels
for(int i=0; i < 2; i++)
{
switch(i)
{
case 0: printf("Non-coalesced access ");
break;
case 1: printf("coalesced access using shared memory");
break;
}
printf("\t %.6f \t\t %f \t\t %f \t\t %f\n",bandWidths[i]/1000000,avgtime[i],mintime[i],maxtime[i]);
}
printf("\n ------------------------------------------------------------------------------------------------------------------------------\n");
printf("\n\n**** successful termination of the program ****\n\n");
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(srcArray);
cudaFree(destArray);
return 0;
} // end of main routene
////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
|
19,583 | /*******************************************************************************
* serveral useful gpu functions will be defined in this file to facilitate
* the surface redistance scheme
******************************************************************************/
typedef struct
{
double sR;
double sL;
} double_eno_derivative;
__device__ inline
double max2(double x, double y)
{
return (x<y) ? y : x;
}
__device__ inline
double min2(double x, double y)
{
return (x<y) ? x : y;
}
__device__ inline
double min_mod(double x, double y)
{
return (x*y<0) ? 0.0 : (fabs(x)<fabs(y) ? x : y);
}
__device__ inline
double sign(double x)
{
return (x>0) ? 1.0 : -1.0;
}
__device__ inline
bool same_sign(double x, double y)
{
return (x*y>0) || (x==0 && y==0);
}
__device__ inline
void advection_velocity(double & H1, double & H2, double & H3, double sign, double Dx, double Dy, double Dz, double nx, double ny, double nz)
{
double normal_d = nx * Dx + ny + Dy + nz * Dz;
H1 = sign * (Dx - nx * normal_d);
H2 = sign * (Dy - ny * normal_d);
H3 = sign * (Dz - nz * normal_d);
double H_mag = sqrt(H1*H1+H2*H2+H3*H3+1e-6);
H1 = H1/H_mag;
H2 = H2/H_mag;
H3 = H3/H_mag;
}
// convert subindex to linear index
// periodic boundary conditions are assumed
__device__ inline
int sub2ind(int row_idx, int col_idx, int pge_idx, int rows, int cols, int pges)
{
int row_idxn = min2(rows-1, max2(0, row_idx));
int col_idxn = min2(cols-1, max2(0, col_idx));
int pge_idxn = min2(pges-1, max2(0, pge_idx));
int ind = pge_idxn * rows * cols + col_idxn * rows + row_idxn;
return ind;
}
/******************************************************************************
* calculate Eno derivatives at node v0: [v4,v1,v0,v2,v3]
******************************************************************************/
__device__ inline
double_eno_derivative eno_derivative( double v4, double v1, double v0, double v2, double v3, double pr, double pl, double ds)
{
double p2m;
double_eno_derivative eno_d;
double p2 = v1 - 2.0 * v0 + v2;
double p2r = v0 - 2.0 * v2 + v3;
p2m = 0.5 * min_mod(p2, p2r) / pow(ds, 2);
double vr = (pr==ds) ? v2 : 0;
eno_d.sR = (vr - v0) / pr - pr * p2m;
double p2l = v0 - 2.0 * v1 + v4;
p2m = 0.5 * min_mod(p2, p2l) / pow(ds, 2);
double vl = (pl==ds) ? v1 : 0;
eno_d.sL = (v0 - vl) / pl + pl * p2m;
return eno_d;
}
// calculate surface redistance step
// now lsf represents the auxilary level set function(not the level set function)
// inputs : the auxilary level set function, sign of the initial level set function, distance to the interface, normal vectors
__global__
void surface_redistance_step(double * step, double const * lsf, double const * sign, double const * deltat, double const * nx, double const * ny, double const * nz, double const * xpr, double const * xpl, double const * ypf, double const * ypb, double const * zpu, double const * zpd, int rows, int cols, int pges, double dx, double dy, double dz, int num_ele)
{
int row_idx = blockIdx.x * blockDim.x + threadIdx.x;
int col_idx = blockIdx.y * blockDim.y + threadIdx.y;
int pge_idx = blockIdx.z * blockDim.z + threadIdx.z;
if(row_idx >= rows || col_idx >= cols || pge_idx >= pges){
return;
}
int ind = sub2ind(row_idx, col_idx, pge_idx, rows, cols, pges);
int right = sub2ind(row_idx, col_idx+1, pge_idx, rows, cols, pges);
int right2 = sub2ind(row_idx, col_idx+2, pge_idx, rows, cols, pges);
int left = sub2ind(row_idx, col_idx-1, pge_idx, rows, cols, pges);
int left2 = sub2ind(row_idx, col_idx-2, pge_idx, rows, cols, pges);
double_eno_derivative eno_dx = eno_derivative( lsf[left2], lsf[left], lsf[ind], lsf[right], lsf[right2], xpr[ind], xpl[ind], dx);
double Dx[3] = {eno_dx.sR, 0, eno_dx.sL};
int front = sub2ind(row_idx+1, col_idx, pge_idx, rows, cols, pges);
int front2 = sub2ind(row_idx+2, col_idx, pge_idx, rows, cols, pges);
int back = sub2ind(row_idx-1, col_idx, pge_idx, rows, cols, pges);
int back2 = sub2ind(row_idx-2, col_idx, pge_idx, rows, cols, pges);
double_eno_derivative eno_dy = eno_derivative( lsf[back2], lsf[back], lsf[ind], lsf[front], lsf[front2], ypf[ind], ypb[ind], dy);
double Dy[3] = {eno_dy.sR, 0, eno_dy.sL};
int up = sub2ind(row_idx, col_idx, pge_idx+1, rows, cols, pges);
int up2 = sub2ind(row_idx, col_idx, pge_idx+2, rows, cols, pges);
int down = sub2ind(row_idx, col_idx, pge_idx-1, rows, cols, pges);
int down2 = sub2ind(row_idx, col_idx, pge_idx-2, rows, cols, pges);
double_eno_derivative eno_dz = eno_derivative( lsf[down2], lsf[down], lsf[ind], lsf[up], lsf[up2], zpu[ind], zpd[ind], dz);
double Dz[3] = {eno_dz.sR, 0, eno_dz.sL};
//Forward=-1, None=0, BackWard=1
int const choice_x[26] = {-1,-1,-1,-1, 1, 1, 1, 1, 0, 0, 0, 0,-1,-1, 1, 1,-1,-1, 1, 1, -1, 1, 0, 0, 0, 0};
int const choice_y[26] = {-1,-1, 1, 1,-1,-1, 1, 1, -1,-1, 1, 1, 0, 0, 0, 0,-1, 1,-1, 1, 0, 0,-1, 1, 0, 0};
int const choice_z[26] = {-1, 1,-1, 1,-1, 1,-1, 1, -1, 1,-1, 1,-1, 1,-1, 1, 0, 0, 0, 0, 0, 0, 0, 0,-1, 1};
double Nx = nx[ind];
double Ny = ny[ind];
double Nz = nz[ind];
double Sign = sign[ind];
double dx_c = (Dx[0] + Dx[2]) / 2;
double dy_c = (Dy[0] + Dy[2]) / 2;
double dz_c = (Dz[0] + Dz[2]) / 2;
double maxH1 = 0;
double maxH2 = 0;
double maxH3 = 0;
for(int i=0;i<26;i++){
double dr_x = Dx[choice_x[i]+1];
double dr_y = Dy[choice_y[i]+1];
double dr_z = Dz[choice_z[i]+1];
double H1, H2, H3; // information propagation direction
advection_velocity(H1,H2,H3,Sign,dr_x,dr_y,dr_z,Nx,Ny,Nz);
maxH1 = (fabs(H1)>maxH1) ? fabs(H1) : maxH1;
maxH2 = (fabs(H2)>maxH2) ? fabs(H2) : maxH1;
maxH3 = (fabs(H3)>maxH3) ? fabs(H3) : maxH1;
}
double dt = deltat[ind];
//step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*(Dx[0]-Dx[2]) - 0.5*(Dy[0]-Dy[2]) - 0.5*(Dz[0]-Dz[2]);
step[ind] = dt*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1) - 0.5*dt*(maxH1*(Dx[0]-Dx[2]) - maxH2*(Dy[0]-Dy[2]) - maxH3*(Dz[0]-Dz[2]));
//step[ind] = deltat[ind]*Sign*(sqrt( pow(dx_c*Nz-Nx*dz_c,2)+pow(dy_c*Nx-Ny*dx_c,2)+pow(dz_c*Ny-Nz*dy_c,2) )-1);
//step[ind] = maxH3;
//step[ind] = Dz[0] - Dz[2];
// step[ind] = Dz[0];
}
|
19,584 | #include <iostream>
#define checkCudaErrors(val) check_cuda((val), #val, __FILE__, __LINE__)
void check_cuda(cudaError_t result, char const* const func, char const* const file, int const line)
{
if (result)
{
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
//! Make sure we call CUDA device reset before exiting
cudaDeviceReset();
exit(99);
}
}
int main(int argc, char* argv[])
{
int nx = 512, ny = 512;
int numPixels = nx * ny;
size_t fb_size = 3 * numPixels * sizeof(float);
//! Allocates the frame buffer
float* fb;
checkCudaErrors(cudaMallocManaged((void**)&fb, fb_size));
return 0;
} |
19,585 | #include <stdio.h>
#include <random>
#include <sys/time.h>
#include <stdlib.h>
#define SEED 123
#define MARGIN 1e-6
double cpuSecond() {
struct timeval tp;
gettimeofday(&tp,NULL);
return ((double)tp.tv_sec + (double)tp.tv_usec*1.e-6);
}
float Uniform(){
std::default_random_engine generator;
std::uniform_real_distribution<float> uniform(-10,10);
return uniform(generator);
}
__host__ __device__ float gen_random(int id, int iter, int NUM_PARTICLES) {
return (SEED*id+iter) % NUM_PARTICLES;
}
class Particle {
public:
float3 position,velocity;
Particle() {
position.x=Uniform();
position.y=Uniform();
position.z=Uniform();
velocity.x=Uniform()/4;
velocity.y=Uniform()/4;
velocity.z=Uniform()/4;
}
__device__ __host__ void position_update() {
position.x+=velocity.x;
position.y+=velocity.y;
position.z+=velocity.z;
}
};
__global__ void launch_mover (Particle* particles,int N,int NUM_ITERATIONS){
int id =blockIdx.x*blockDim.x+threadIdx.x;
if(id<N)
for(int i=0;i<NUM_ITERATIONS;i++)
{
particles[id].position_update();
particles[id].velocity.x+=gen_random(id, i, N)/5;
particles[id].velocity.y+=gen_random(id, i, N)/4;
particles[id].velocity.z+=gen_random(id, i, N)/3;
}
}
void one_timestep_cpu(Particle* particles,int iter,int N) {
for(int i=0;i<N;i++)
{
particles[i].position_update();
particles[i].velocity.x+=gen_random(i, iter, N)/5;
particles[i].velocity.y+=gen_random(i, iter, N)/4;
particles[i].velocity.z+=gen_random(i, iter, N)/3;
}
}
int main(int argc, char* argv[]) {
double start,gpu_time=0,cpu_time=0;
int NUM_PARTICLES = atoi(argv[1]);
int NUM_ITERATIONS = atoi(argv[2]);
int BLOCK_SIZE = atoi(argv[3]);
printf("NUM_PARTICLES:%d\nNUM_ITERATIONS:%d\nBLOCK_SIZE:%d\n",NUM_PARTICLES,NUM_ITERATIONS,BLOCK_SIZE);
int nBytes=sizeof(Particle)*NUM_PARTICLES;
int grid_size =(NUM_PARTICLES+BLOCK_SIZE-1)/BLOCK_SIZE;
Particle* particles=new Particle[NUM_PARTICLES];
Particle* res=(Particle*)malloc(nBytes);
start=cpuSecond();
Particle* d_particles;
cudaMalloc(&d_particles, nBytes);
cudaMemcpy(d_particles, particles, nBytes, cudaMemcpyHostToDevice);
gpu_time+=cpuSecond()-start;
start=cpuSecond();
for(int i=0;i<NUM_ITERATIONS;i++)
one_timestep_cpu(particles,i,NUM_PARTICLES);
cpu_time+=cpuSecond()-start;
printf("CPU costs:%lf\n",cpu_time);
start=cpuSecond();
launch_mover<<<grid_size,BLOCK_SIZE>>>(d_particles,NUM_PARTICLES,NUM_ITERATIONS);
cudaDeviceSynchronize();
cudaMemcpy(res, d_particles, nBytes, cudaMemcpyDeviceToHost);
gpu_time+=cpuSecond()-start;
printf("GPU costs:%lf\n",gpu_time);
int c = 0;
for (int i=0;i<NUM_PARTICLES;i++){
float xCPU = particles[i].position.x;
float yCPU = particles[i].position.y;
float zCPU = particles[i].position.z;
float xGPU = res[i].position.x;
float yGPU = res[i].position.y;
float zGPU = res[i].position.z;
if(fabs(xCPU - xGPU) > MARGIN | fabs(yCPU - yGPU) > MARGIN | fabs(zCPU - zGPU) > MARGIN)
c++;
}
printf("mismatch:%d\n",c);
cudaFree(d_particles);
}
|
19,586 | #include <stdio.h>
int main() {
int nDevices;
cudaGetDeviceCount(&nDevices);
printf("N dispositivos: %d\n",nDevices);
cudaDeviceProp prop;
for (int i = 0; i < nDevices; i++) {
cudaGetDeviceProperties(&prop, i);
printf("Device Number: %d\n", i);
printf(" Device name: %s\n", prop.name);
printf(" Size warp: %d\n", prop.warpSize);
printf(" Memoria compartida por bloque: %d KB \n", prop.sharedMemPerBlock/1024);
printf(" registros por bloque: %d\n", prop.regsPerBlock);
printf(" Max hilos por bloque: %d\n", prop.maxThreadsPerBlock);
printf(" Max dimension de hilos en x: %d\n", prop.maxThreadsDim[0]);
printf(" Max dimension de hilos en y: %d\n", prop.maxThreadsDim[1]);
printf(" Max dimension de hilos en z: %d\n", prop.maxThreadsDim[2]);
printf(" Max size grid en x: %d\n", prop.maxGridSize[0]);
printf(" Max size grid en y: %d\n", prop.maxGridSize[1]);
printf(" Max size grid en z: %d\n", prop.maxGridSize[2]);
printf(" Frecuencia del reloj del device (KHz): %d\n", prop.clockRate);
printf(" Memory Clock Rate (KHz): %d\n", prop.memoryClockRate);
printf(" Memory Bus Width (bits): %d\n", prop.memoryBusWidth);
printf(" Peak Memory Bandwidth (GB/s): %f\n", 2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf(" Numero de SMs %d\n", prop.multiProcessorCount);
printf(" Max numero de threads por SM: %d\n", prop.maxThreadsPerMultiProcessor);
}
}
|
19,587 | // Save the position and momentum of particle 0
#include <stdlib.h>
#include <math.h>
#include <stdio.h>
void save_seq( double time, long nseq, double *r_gpu, double *p_gpu, double *f_gpu, FILE *fseq, FILE *fseq2, FILE *fseq3)
{
long i;
double pp[nseq],rr[nseq],ff[nseq],tpi,r2;
tpi=6.2831853071795864770;
cudaMemcpy(pp, p_gpu, nseq*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(rr, r_gpu, nseq*sizeof(double), cudaMemcpyDeviceToHost);
cudaMemcpy(ff, f_gpu, nseq*sizeof(double), cudaMemcpyDeviceToHost);
for (i=0;i<nseq;i++)
{
r2=rr[i];
while (r2>tpi) {r2-=tpi;};
while (r2<0) {r2+=tpi;};
fprintf(fseq,"%lf %lf %lf",time,r2,pp[i]);
fprintf(fseq2,"%lf %lf ",time,r2);
fprintf(fseq3,"%lf %lf ",time,ff[i]);
};
fprintf(fseq,"\n");
fprintf(fseq2,"\n");
fprintf(fseq3,"\n");
return;
}
|
19,588 | #include "includes.h"
__global__ void _mat_sum_col(float *m, float *target,int nrow, int ncol){
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid < ncol){
float sum = 0;
for(int i = 0; i < nrow; i++){
sum += m[i*ncol+tid];
}
target[tid] = sum;
}
} |
19,589 | #include "includes.h"
// filename: gax.cu
// a simple CUDA kernel to add two vectors
extern "C" // ensure function name to be exactly "gax"
{
}
__global__ void vmultbangupdate(const int lengthA, const double alpha, const double *a, const double *b, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
c[i] += alpha*a[i] * b[i];
}
} |
19,590 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float* var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) {
if (comp > (var_3 - sinf(var_4 + +1.1880E20f + -1.5717E22f))) {
for (int i=0; i < var_1; ++i) {
if (comp > atanf((var_5 * var_6))) {
comp += var_7 + (var_8 - var_9);
comp += (-1.4369E-7f / (+1.5416E-35f / (var_10 / (+1.5498E-43f / var_11))));
comp = asinf(-1.3356E-41f / +1.1703E-36f);
comp += (+0.0f + var_12 - var_13 + (var_14 + fabsf(acosf((var_15 + (-0.0f * log10f((-1.6169E3f + atanf(-1.5079E36f)))))))));
if (comp <= var_16 + -1.4471E36f / (var_17 / var_18 + (var_19 / var_20))) {
comp = (var_21 + var_22 - log10f((+1.5675E35f - (-1.9158E34f + ldexpf(-0.0f, 2)))));
float tmp_1 = var_23 * +1.3799E36f * +1.7034E-35f;
comp = tmp_1 + -0.0f - var_24 / var_25 + +1.6619E10f;
}
for (int i=0; i < var_2; ++i) {
comp = (-1.2967E9f / -1.0508E18f * +1.5166E-36f * -1.2234E-26f / var_27);
var_26[i] = +1.6957E35f / var_28 + (var_29 + var_30);
comp = var_26[i] / (var_31 * -1.3135E-36f / (+0.0f + (-0.0f + (var_32 / var_33))));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float* tmp_27 = initPointer( atof(argv[27]) );
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34);
cudaDeviceSynchronize();
return 0;
}
|
19,591 | /*
* EDDL Library - European Distributed Deep Learning Library.
* Version: 1.1
* copyright (c) 2022, Universitat Politècnica de València (UPV), PRHLT Research Centre
* Date: March 2022
* Author: PRHLT Research Centre, UPV, (rparedes@prhlt.upv.es), (jon@prhlt.upv.es)
* All rights reserved
*/
#include <string.h>
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <cuda.h>
|
19,592 | #define get_global_size() (blockDim.x * gridDim.x)
#define get_global_id() (threadIdx.x + blockIdx.x * blockDim.x)
#define get_local_id() (threadIdx.x)
#define get_local_size() (blockDim.x)
__device__ double gpu_mmap_result = 0.0;
extern "C" __global__ __launch_bounds__(1024)
void gpu_mmap_init(char *buffer, size_t buffer_sz)
{
float *values = (float *)buffer;
size_t i, N = buffer_sz / sizeof(float);
unsigned int seed = 0xdeadbeaf + get_global_id();
for (i=get_global_id(); i < N; i += get_global_size())
{
unsigned long next = (unsigned long)seed * (unsigned long)seed;
seed = (next >> 16) & 0x7fffffffU;
values[i] = 100.0 * ((double)seed / (double)UINT_MAX);
}
}
extern "C" __global__ __launch_bounds__(1024)
void gpu_mmap_kernel(char *buffer, size_t buffer_sz)
{
__shared__ double sum;
float *values = (float *)buffer;
size_t i, N = buffer_sz / sizeof(float);
float __sum = 0.0;
if (get_local_id() == 0)
sum = 0.0;
for (i=get_global_id(); i < N; i += get_global_size())
__sum += values[i];
__syncthreads();
atomicAdd(&sum, __sum);
__syncthreads();
if (get_local_id() == 0)
atomicAdd(&gpu_mmap_result, sum);
}
|
19,593 | #include <iostream>
int main()
{
cudaDeviceProp prop;
int count = 0;
cudaGetDeviceCount(&count);
for(int i = 0; i < count; ++i)
{
cudaGetDeviceProperties(&prop, i);
std::cout << "Information for device #" << i << std::endl;
std::cout << "Name " << prop.name << std::endl;
std::cout << "Compute capability " << prop.major << "." << prop.minor << std::endl;
std::cout << "Total memory " << prop.totalGlobalMem / (1024*1024*1024) << "GB" << std::endl;
std::cout << "Multiprocessor count " << prop.multiProcessorCount << std::endl;
std::cout << "Threads in warp " << prop.warpSize << std::endl;
std::cout << "Max threads per block " << prop.maxThreadsPerBlock << std::endl;
}
return 0;
}
|
19,594 | #include "includes.h"
__global__ void cu_divide(const float numerator, const float* denominator, float* dst, const int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
if(0 == denominator[tid]) dst[tid] = 0.0;
else dst[tid] = __fdividef(numerator, denominator[tid]);
tid += stride;
}
} |
19,595 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35) {
if (comp == var_1 + -1.4990E34f + -1.9152E-37f - sinhf(+0.0f)) {
float tmp_1 = +1.0509E-5f + -0.0f;
comp = tmp_1 + cosf(ceilf(+1.2209E36f));
comp += +0.0f - -1.1021E36f;
if (comp >= (var_2 + expf(var_3 * -1.0067E36f + var_4))) {
comp = fabsf((+1.2168E3f + +1.5074E35f + -1.0978E-35f - asinf((+1.9784E34f - var_5 - +1.2501E-30f * +1.1535E-35f))));
comp += var_6 / var_7 * sinf((var_8 * -1.3983E-35f + (var_9 * +1.2576E-35f)));
}
if (comp <= (var_10 / (+1.2377E-35f * atan2f(+1.7563E34f, fmodf(acosf((var_11 + log10f(ceilf(floorf(coshf((-1.0989E-44f + (+1.7458E-43f - var_12 * var_13 - +1.1736E6f)))))))), (var_14 / log10f(-1.5772E-37f / (var_15 - var_16 / (+1.8813E-43f + var_17 / -0.0f))))))))) {
comp += (var_18 * var_19 - -1.7282E34f + var_20);
float tmp_2 = -1.5526E16f + +1.4690E-28f;
comp = tmp_2 + var_21 + sqrtf(sqrtf(-1.6186E34f + (-1.5640E35f * var_22)));
}
if (comp <= (-1.4081E-36f - +1.7406E1f - (var_23 - -1.0287E-26f - -1.5336E-14f - var_24))) {
comp = (+0.0f + var_25 - +0.0f * ldexpf(var_26 * fabsf(-1.0631E-36f), 2));
comp = var_27 + (var_28 + var_29 + var_30);
float tmp_3 = -1.5326E-30f * (var_31 + var_32);
comp += tmp_3 + asinf((var_33 * var_34 - var_35 / +1.0389E-36f));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36);
cudaDeviceSynchronize();
return 0;
}
|
19,596 | #include "includes.h"
__global__ void cube(float * d_out, float * d_in)
{
int id = threadIdx.x;
float num = d_in[id];
d_out[id] = num * num;
} |
19,597 | /**
* Parametric equalizer back-end GPU code.
*/
#include <cuda_runtime.h>
#include <cufft.h>
#include "parametric_eq_cuda.cuh"
const float PI = 3.14159265358979;
/**
* This kernel takes an array of Filters, and creates the appropriate
* output transfer function in the frequency domain. This just involves a
* superposition of the transfer functions described by each filter. The
* resulting transfer function will be floor(bufSamples/2) + 1 in length,
* since we only care about the positive frequencies in our FFT (since
* we're FFTing real signals).
*
*/
__global__
void cudaFilterSetupKernel(const Filter *filters,
const unsigned int numFilters,
cufftComplex *transferFunc,
const unsigned int samplingRate,
const unsigned int bufSamples)
{
// This is the index in "transferFunc" to which we're initially writing
// on this thread.
unsigned int transFuncInd = blockIdx.x * blockDim.x + threadIdx.x;
// The resolution of this transfer function will be
// samplingRate/bufSamples (in Hz), as is standard in FFTs.
const float transFuncRes = ((float) samplingRate) / bufSamples;
// The length of the resulting transfer function.
const unsigned int transFuncLength = (bufSamples / 2) + 1;
// Each thread might have to write to multiple indices of the transfer
// function array.
while (transFuncInd < transFuncLength)
{
// The frequency of this element is just the index times the
// resolution, but we multiply by "2 pi sqrt(-1)" to get an
// imaginary angular frequency (as desired for s-plane transfer
// functions).
float thisFreq = transFuncInd * transFuncRes;
float thisSReal = 2.0 * PI * thisFreq;
cufftComplex s;
s.x = 0.0;
s.y = thisSReal;
// The output to store in the appropriate index of the transfer
// function.
cufftComplex output;
output.x = 1.0;
output.y = 0.0;
// Iterate through all of the filters and superimpose their
// transfer functions. This "superposition" is actually a
// multiplication, since we're dealing with transfer functions.
for (int i = 0; i < numFilters; i++)
{
Filter thisFilter = filters[i];
FilterType thisFilterType = thisFilter.type;
switch (thisFilterType)
{
case FT_BAND_BOOST:
case FT_BAND_CUT:
{
// For boosts, use the transfer function:
//
// H(s) = (s^2 + K * omegaNought/Q * s + omegaNought^2)
// / (s^2 + omegaNought/Q * s + omegaNought^2)
//
// And use the reciprocal of this for cuts.
cufftComplex sSq;
float omegaNought, Q, K, omegaNoughtOvQ, omegaNoughtSq;
omegaNought = thisFilter.bandBCProp->omegaNought;
Q = thisFilter.bandBCProp->Q;
K = thisFilter.bandBCProp->K;
// Do some precomputation
sSq = cuCmulf(s, s);
omegaNoughtOvQ = omegaNought / Q;
omegaNoughtSq = omegaNought * omegaNought;
// The numerator and denominator of the above H(s) for
// boosts.
cufftComplex numerBoost;
cufftComplex denomBoost;
numerBoost.x = sSq.x + K * omegaNoughtOvQ * s.x +
omegaNoughtSq;
numerBoost.y = sSq.y + K * omegaNoughtOvQ * s.y;
denomBoost.x = sSq.x + omegaNoughtOvQ * s.x +
omegaNoughtSq;
denomBoost.y = sSq.y + omegaNoughtOvQ * s.y;
// If this is a boost, then just add numerBoost /
// denomBoost to the output element. Otherwise, if it's
// a cut, add the reciprocal of this.
cufftComplex quot;
if (thisFilterType == FT_BAND_BOOST)
{
quot = cuCdivf(numerBoost, denomBoost);
}
else
{
quot = cuCdivf(denomBoost, numerBoost);
}
// Multiply the previous transfer function by this one.
output.x *= quot.x;
output.y *= quot.y;
break;
}
case FT_HIGH_SHELF:
case FT_LOW_SHELF:
{
// The real-valued transfer function for low-shelf
// filters is:
//
// H(s) = 1 + (K - 1) *
// {1 - tanh( (|s| - Omega_0) / Omega_BW ) } / 2
//
// For high-shelf filters, we negate the argument to
// the tanh.
float tanhArg, tanhVal;
float omegaNought, omegaBW, KMinus1;
float positiveExp, negativeExp;
float filterVal;
omegaNought = thisFilter.shelvingProp->omegaNought;
omegaBW = thisFilter.shelvingProp->omegaBW;
KMinus1 = thisFilter.shelvingProp->K - 1.0;
// Calculate the argument to the tanh function.
tanhArg = (thisSReal - omegaNought) / omegaBW;
// Negate if this is a high-shelf filter.
if (thisFilterType == FT_HIGH_SHELF)
{
tanhArg *= -1.0;
}
// Sometimes there's blow-up to deal with when taking a
// tanh.
if (tanhArg >= 9.0)
{
// tanh(9.0) is pretty much 1.
tanhVal = 1.0;
}
else if (tanhArg <= -9.0)
{
// tanh(-9.0) is pretty much -1.
tanhVal = -1.0;
}
else
{
// Compute tanh efficiently via __expf. Each __expf
// call is supposed to take ~1 clock cycle
// according to the CUDA Programming Guide.
positiveExp = __expf(tanhArg);
negativeExp = __expf(-tanhArg);
// tanh(x) = (e^x - e^{-x}) / (e^x + e^{-x})
tanhVal = (positiveExp - negativeExp) /
(positiveExp + negativeExp);
}
filterVal = 1.0 + KMinus1 * (1.0 - tanhVal) / 2.0;
// Only multiply the real part of the previous transfer
// function by this one (this transfer function is
// purely real).
output.x *= filterVal;
break;
}
default:
printf("Unknown filter type; exiting");
asm("trap;");
}
}
// Write the "output" to global memory.
transferFunc[transFuncInd] = output;
// This thread might have to process multiple entries.
transFuncInd += blockDim.x * gridDim.x;
}
}
/**
* This kernel takes an input-output FFT'd audio buffer, which is
* floor(bufSamples/2) + 1 long (because we ignore the negative frequencies
* in the FFT). We multiply each element in this buffer by the
* corresponding element in the transfer function, which is assumed to be
* of the same length. This multiplication is carried out in place.
*
* After multiplying, we also divide by bufSamples (since IFFT(FFT(x)) will
* be bufSamples * x otherwise).
*
*/
__global__
void cudaProcessBufKernel(cufftComplex *inOutAudioFFTBuf,
const cufftComplex *transferFunc,
const unsigned int bufSamples)
{
// The index in the buffer that this thread is initially looking at.
unsigned int bufInd = blockIdx.x * blockDim.x + threadIdx.x;
// The FFT should be floor(bufSamples/2) + 1 elements long.
const unsigned int fftLength = (bufSamples / 2) + 1;
// Each thread might have to write to more than one entry in the FFT
// buffer.
while (bufInd < fftLength)
{
// Pointwise-multiply by the transfer function, and downscale.
cufftComplex transferFuncValue = transferFunc[bufInd];
cufftComplex newValue = inOutAudioFFTBuf[bufInd];
newValue = cuCmulf(newValue, transferFuncValue);
newValue.x /= bufSamples;
newValue.y /= bufSamples;
// Write to global memory.
inOutAudioFFTBuf[bufInd] = newValue;
// Process the next entry in the FFT buffer.
bufInd += blockDim.x * gridDim.x;
}
}
/**
* This kernel takes an audio buffer of floats, and clips all of its values
* so that they can be stored in an array of signed 16-bit shorts.
*
* It is assumed that both the input and output audio buffers are
* "bufSamples" long.
*
*/
__global__
void cudaClippingKernel(const float *inAudioBuf,
const unsigned int bufSamples,
int16_t *outAudioBuf)
{
// The index in the input buffer we're initially dealing with.
unsigned int bufInd = blockIdx.x * blockDim.x + threadIdx.x;
// Each thread might have to write to multiple entries.
while (bufInd < bufSamples)
{
float input = inAudioBuf[bufInd];
// Use fminf and fmaxf for clamping, since these are probably
// optimized (and allow for less divergence and better pipelining).
float output = fminf(input, 32767.0 - 0.001);
output = fmaxf(output, -32768.0 + 0.001);
outAudioBuf[bufInd] = (int16_t) output;
// Process the next entry in the input buffer.
bufInd += blockDim.x * gridDim.x;
}
}
void cudaCallFilterSetupKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const cudaStream_t stream,
const Filter *filters,
const unsigned int numFilters,
cufftComplex *transferFunc,
const unsigned int samplingRate,
const unsigned int bufSamples)
{
int shmemBytes = 0;
cudaFilterSetupKernel<<<blocks, threadsPerBlock, shmemBytes, stream>>>
(filters, numFilters, transferFunc, samplingRate, bufSamples);
}
void cudaCallProcessBufKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const cudaStream_t stream,
cufftComplex *inOutAudioFFTBuf,
const cufftComplex *transferFunc,
const unsigned int bufSamples)
{
int shmemBytes = 0;
cudaProcessBufKernel<<<blocks, threadsPerBlock, shmemBytes, stream>>>
(inOutAudioFFTBuf, transferFunc, bufSamples);
}
void cudaCallClippingKernel(const unsigned int blocks,
const unsigned int threadsPerBlock,
const cudaStream_t stream,
const float *inAudioBuf,
const unsigned int bufSamples,
int16_t *outAudioBuf)
{
int shmemBytes = 0;
cudaClippingKernel<<<blocks, threadsPerBlock, shmemBytes, stream>>>
(inAudioBuf, bufSamples, outAudioBuf);
}
|
19,598 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "cuda.h"
__global__ void kernelAddMatrices1D(int N, double *A, double *B, double *C) {
int threadId = threadIdx.x;
int blockId = blockIdx.x;
int blockSize = blockDim.x; //32
int id = threadId + blockId*blockSize;
C[id] = A[id] + B[id];
}
__global__ void kernelAddMatrices2D(int N, double *A, double* B, double *C) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
C[i+j*nx] = A[i+j*nx] + B[i+j*nx];
}
__global__ void kernelMatrixTranspose2D_v1(double *A, double *AT) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
AT[i+j*nx] = A[j+i*nx];
}
//do the transpose using shared memory to get better device memory acceses
__global__ void kernelMatrixTranspose2D_v2(double *A, double *AT) {
int tIdx = threadIdx.x;
int tIdy = threadIdx.y;
int bIdx = blockIdx.x;
int bIdy = blockIdx.y;
int bSizex = blockDim.x;
int bSizey = blockDim.y;
__shared__ double s_A[32][32];
int i = tIdx + bIdx*bSizex; //unique x coordinate
int j = tIdy + bIdy*bSizey; //unique y coordinate
int nx = 1024;
//fetch a block of A into the shared array s_A
s_A[tIdx][tIdy] = A[i+j*nx]; //read from A and write the block's transpose
__syncthreads(); // barrier the threads on this block so all the
// writes to s_A are completed
AT[i+j*nx] = s_A[tIdy][tIdx]; // write out
}
int main(int argc, char **argv) {
// dimensions of the matrices
int nx = 1024;
int ny = 1024;
int N = nx*ny;
//seed RNG
double seed = clock();
srand48(seed);
double *h_a, *h_b, *h_c; //host vectors
// allocate storage
h_a = (double *) malloc(N*sizeof(double));
h_b = (double *) malloc(N*sizeof(double));
h_c = (double *) malloc(N*sizeof(double));
//populate a and b
for (int n=0;n<N;n++) {
h_a[n] = drand48();
h_b[n] = drand48();
}
double hostStart = clock();
// c = a + b
for (int j=0;j<ny;j++) {
for (int i=0;i<nx;i++) {
int id = i+j*nx;
h_c[id] = h_a[id] + h_b[id];
}
}
double hostEnd = clock();
double hostTime = (hostEnd - hostStart)/(double) CLOCKS_PER_SEC;
size_t inputMem = 2*N*sizeof(double); //number of bytes the operation inputs
size_t outMem = 1*N*sizeof(double); //number of bytes the operation outputs
size_t totalMem = (inputMem+outMem);
printf("The host took %f seconds to add a and b \n", hostTime);
printf("The efective bandwidth of the host was: %f GB/s\n", totalMem/(1E9*hostTime));
printf("\n");
//Device arrays
double *d_a, *d_b, *d_c;
//allocate memory on the Device with cudaMalloc
cudaMalloc(&d_a,N*sizeof(double));
cudaMalloc(&d_b,N*sizeof(double));
cudaMalloc(&d_c,N*sizeof(double));
double copyStart = clock();
//copy data from the host to the device
cudaMemcpy(d_a,h_a,N*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,N*sizeof(double),cudaMemcpyHostToDevice);
double copyEnd = clock();
double copyTime = (copyEnd-copyStart)/(double)CLOCKS_PER_SEC;
printf("It took %f seconds to copy the data to device. \n",copyTime);
printf("The efective bandwidth of the copy was: %f GB/s\n", inputMem/(1E9*copyTime));
printf("\n");
//at this point the data is allocated and populated on the device
int Nthreads = 32; //get the number of threads per block from command line
int Nblocks = (N+Nthreads-1)/Nthreads;
double deviceStart = clock();
kernelAddMatrices1D <<<Nblocks , Nthreads >>>(N, d_a, d_b, d_c);
cudaDeviceSynchronize();
double deviceEnd = clock();
double deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The 1D Kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the 1D kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
//use 2D thread blocks instead
int Nthreadsx = 32;
int Nthreadsy = 32;
int Nthreadsz = 1;
//declare the size of the block
// Nthreadsx*Nthreadsy*Nthreadsz <= 1024
dim3 Nthreads3(Nthreadsx,Nthreadsy,Nthreadsz);
//set number of blocks
int Nblocksx = (nx+Nthreadsx-1)/Nthreadsx;
int Nblocksy = (ny+Nthreadsy-1)/Nthreadsy;
int Nblocksz = 1;
dim3 Nblocks3(Nblocksx,Nblocksy,Nblocksz);
deviceStart = clock();
kernelAddMatrices2D <<<Nblocks3 , Nthreads3 >>>(N, d_a, d_b, d_c);
cudaDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The 2D Kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the 2D kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
printf("The device was %f times faster\n", hostTime/deviceTime);
copyStart = clock();
cudaMemcpy(h_c,d_c,N*sizeof(double),cudaMemcpyDeviceToHost);
copyEnd = clock();
copyTime = (copyEnd-copyStart)/(double) CLOCKS_PER_SEC;
printf("It took %f seconds to copy the data back to the host. \n",copyTime);
printf("The efective bandwidth of the copy was: %f GB/s\n", outMem/(1E9*copyTime));
deviceStart = clock();
// C = A^T
kernelMatrixTranspose2D_v1 <<<Nblocks3 , Nthreads3 >>>(d_a, d_c);
cudaDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The v1 tarnspose kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the v1 transpose kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
deviceStart = clock();
// C = A^T
kernelMatrixTranspose2D_v2<<<Nblocks3 , Nthreads3 >>>(d_a, d_c);
cudaDeviceSynchronize();
deviceEnd = clock();
deviceTime = (deviceEnd-deviceStart)/(double) CLOCKS_PER_SEC;
printf("The v2 tarnspose kernel took %f seconds to add a and b \n", deviceTime);
printf("The efective bandwidth of the v2 transpose kernel was: %f GB/s\n", totalMem/(1E9*deviceTime));
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
free(h_a);
free(h_b);
free(h_c);
}
|
19,599 | #include <bits/stdc++.h>
using namespace std;
int main()
{
// size of row
int row = 5;
int colom[] = { 5, 3, 4, 2, 1 };
// Create a vector of vector with size
// equal to row.
vector<vector<int> > vec(row);
for (int i = 0; i < row; i++) {
// size of column
int col;
col = colom[i];
// declare the i-th row to size of column
vec[i] = vector<int>(col);
for (int j = 0; j < col; j++)
vec[i][j] = j + 1;
}
for (int i = 0; i < row; i++) {
for (int j = 0; j < vec[i].size(); j++)
cout << vec[i][j] << " ";
cout << endl;
}
}
|
19,600 | #include "includes.h"
__global__ void _bcnn_forward_softmax_layer_kernel(int n, int batch, float *input, float *output) {
float sum = 0.f;
float maxf = -INFINITY;
int b = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (b >= batch) {
return;
}
for (int i = 0; i < n; ++i) {
int val = input[i + b * n];
maxf = (val > maxf) ? val : maxf;
}
for (int i = 0; i < n; ++i) {
sum += exp(input[i + b * n] - maxf);
}
sum = (sum != 0) ? maxf + log(sum) : maxf - 100.f;
for (int i = 0; i < n; ++i) {
output[i + b * n] = exp(input[i + b * n] - sum);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.