serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
11,001 | #include "includes.h"
__global__ void OpenBoundaryKernel (double *Vrad, double *Dens, double *Energy, int nsec, double SigmaMed)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = 1;
if(j < nsec){
Dens[(i-1)*nsec + j] = Dens[i*nsec + j]; // copy first ring into ghost ring
Energy[(i-1)*nsec + j] = Energy[i*nsec + j];
if (Vrad[(i+1)*nsec + j] > 0.0 || (Dens[i*nsec + j] < SigmaMed))
Vrad[i*nsec + j] = 0.0; // we just allow outflow [inwards]
else
Vrad[i*nsec +j] = Vrad[(i+1)*nsec + j];
}
} |
11,002 | // Name: Jin Pyo Jeon
// CPSC375
// Times:
// B T Time
// 100 16 13.84
// 100 32 7.43
// 100 40 6.27
// 100 64 3.86
// 100 128 3.1
// 100 256 2.92
// 100 1024 2.83
// 1000 32 5.99
// 1000 256 2.76
// 1000 512 2.73
// 1000 1024 2.83
// 1024 1024 2.75
// 10000 32 5.78
// 10000 128 2.74
// 10000 200 2.94
// 10000 256 2.56
// 10000 512 2.73
// 10000 1024 2.88
// 32768 126 2.66
// 32768 256 2.65
// 32768 512 2.68
// 65535 32 5.59
// 65535 128 2.64
// 65535 256 2.63
// 65535 400 2.92
// 65535 512 2.69
// 65535 768 3.0
// 65535 1024 3.8
// Discussion: From these experimental value, it seems that the optimal value for block size and
// thread size is 10000 blocks and 256 threads per block. Beyond the most optimal, one thing that
// is evident is the fact that the optimal number of threads must be divisible by the warp size;
// in every instance where the thread number is not divisible by 32, the time suffered compared
// to the times adjacent to it. Furthermore, it seems that the size of the number range that
// each thread is assigned to does not correlate linearly for the most part .
// For example, the B/T pair (65535, 512) and (10000, 128) have similar times
// despite the threads of first pair checking only 3 numbers and the latter around 78.
// Furthermore, runs with small thread sizes suffered much more significant delay than others,
// probably due to the fact that with small thread sizes ( t < 128 ), 8 blocks (per SM)
// did not fill out the maximum number of threads possible (2048) and thus failed to fully
// use the GPU.
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define N 100000000
__global__ void testCollatz(long n, long blockNum, long* counterEx) {
long numPerBlock = ceil(n * 1.0 / blockNum);
long numPerThread = ceil(n * 1.0 / blockNum / blockDim.x);
long lowRange = (numPerBlock * blockIdx.x) + (threadIdx.x * numPerThread);
long highRange = (numPerBlock * blockIdx.x) + ((threadIdx.x + 1) * numPerThread);
long i;
for (i = lowRange; i < highRange && i < N; i++) {
long temp = i;
int iteration = 0;
if (temp == 0) continue;
while (temp != 1) {
iteration++;
if (iteration >= 1000) {
*counterEx = i;
break;
}
if (temp % 2 == 0) temp = temp / 2;
else temp = (3 * temp) + 1;
}
}
}
int main(int argc, char**argv){
long B, T;
long* h_counterEx, *d_counterEx;
if (argc >= 2) {
B = strtol(argv[1], NULL, 10);
T = strtol(argv[2], NULL, 10);
} else {
return -1;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
h_counterEx = (long*)malloc(sizeof(long));
*h_counterEx = -1;
cudaMalloc((void**) &d_counterEx, sizeof(long));
cudaMemcpy(d_counterEx, h_counterEx, sizeof(long), cudaMemcpyHostToDevice);
testCollatz<<<B,T>>>(N, B, d_counterEx);
cudaMemcpy(h_counterEx, d_counterEx, sizeof(long), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime = -1;
cudaEventElapsedTime(&elapsedTime,start, stop);
if (*h_counterEx == -1) {
printf("Verifying %ld took %f s\n", (long) N, elapsedTime / 1000.0);
} else {
printf("Found a counterexample: %ld\n", *h_counterEx);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
|
11,003 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <sys/time.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void im2col(float *A, int inputSize, int depth, int kernelSize, int stride, int pad, float *col, int outputSize) {
// 一个线程完成一次卷积操作中的转换 也就是说 一个线程转换生成col中的一个行向量
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if( !(i < outputSize) || !(j < outputSize) ) return;
int Ai = i * stride;
int Aj = j * stride;
for( int d = 0; d < depth; d++ ) {
for(int k = 0; k < kernelSize; k++ ) {
for( int l = 0; l < kernelSize; l++) {
if( Ai + k - pad < 0 || !(Ai + k - pad < inputSize) || Aj + l - pad < 0 || !( Aj + l - pad < inputSize)) {
//col[ d*outputSize*outputSize*kernelSize*kernelSize + (i*outputSize + j)*kernelSize*kernelSize + k*kernelSize + l] = 0;
col[ (i*outputSize + j)*(kernelSize*kernelSize*depth)+ d*kernelSize*kernelSize + k*kernelSize + l] = 0;
}
else col[ (i*outputSize + j)*(kernelSize*kernelSize*depth)+ d*kernelSize*kernelSize + k*kernelSize + l] \
= A[d*inputSize*inputSize + (Ai + k - pad)*inputSize + Aj + l - pad ];
}
}
}
}
// 计算 C = A*v A size m*n v size n*1
__global__
void gemm(float *A, float *B, float *C, int m, int n) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
if( !( i < m ) ) return;
float sum = 0;
for( int l = 0; l < n; l++ ) {
sum += A[i*n + l] * B[l];
}
C[i] = sum;
}
int main(int argc, char * argv[] ) {
// input: inputSize*inputSize*depth
// kernel: kernelSize*kernelSize*depth
// output: outputSize*outputSize
int inputSize = 7;
int depth = 3;
int kernelSize = 3;
int kernelNum = 3;
int stride[3] = {1 , 2 , 3 };
int pad[3] = {0,0,0};
int outputSize[3];
// 计算不同stride下需要的padding数量pad和output的规模outputSize
for(int i = 0; i < kernelNum; i++) {
if((inputSize - kernelSize)%stride[i] != 0) {
pad[i] = (stride[i] - ((inputSize - kernelSize)%stride[i])) / 2;
}
outputSize[i] = (inputSize - kernelSize + 2*pad[i] ) / stride[i] + 1;
}
// ============================= 资源申请的初始化 =========================
// ==== CPU资源申请和初始化
// input:A kernel:kernel output:B
float *A, *kernel[3], *B[3];
A = (float *)malloc(sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
kernel[i] = (float *)malloc(sizeof(float)*kernelSize*kernelSize*depth);
B[i] = (float *)malloc(sizeof(float)*outputSize[i]*outputSize[i]*depth);
}
// 初始化input A
for(int d = 0; d < depth; d++) {
for(int i=0; i<inputSize*inputSize; i++) {
A[d*inputSize*inputSize + i] = i;
}
}
// 初始化kernel
for(int i = 0; i < 3; i++){
for(int j = 0; j < kernelSize*kernelSize*depth; j++) {
kernel[i][j] = 1;
}
}
// ==== GPU资源申请和初始化
float *d_A, *d_kernel[3], *d_B[3], *d_col[3];
cudaMalloc((void**)&d_A,sizeof(float)*inputSize*inputSize*depth);
for(int i = 0; i < 3; i++) {
cudaMalloc((void**)&d_kernel[i], sizeof(float)*kernelSize*kernelSize*depth);
cudaMalloc((void**)&d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth);
cudaMalloc((void**)&d_col[i], sizeof(float)*outputSize[i]*outputSize[i]*kernelSize*kernelSize*depth);
}
cudaMemcpy(d_A,A,sizeof(float)*inputSize*inputSize*depth,cudaMemcpyHostToDevice);
for(int i = 0; i < 3; i++) {
cudaMemcpy(d_kernel[i],kernel[i],sizeof(float)*kernelSize*kernelSize*depth,cudaMemcpyHostToDevice);
}
// ============================= 调用核函数 =========================
// ===== 调用im2col
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i])+ 1);
int blocky = blockx;
dim3 Block(blockx,blocky);
dim3 Grid((inputSize+Block.x-1) / Block.x,(inputSize+Block.y-1) / Block.y );
im2col <<< Grid, Block >>> (d_A,inputSize,depth,kernelSize,stride[i],pad[i],d_col[i],outputSize[i]);
}
cudaDeviceSynchronize();
// ==== 调用gemm
struct timeval start, end;
gettimeofday( &start, NULL );
for( int i = 0; i < 3; i++ ) {
int blockx = (int) (log2(outputSize[i]*outputSize[i])+ 1);
dim3 Block(blockx);
dim3 Grid((outputSize[i]*outputSize[i]+Block.x-1) / Block.x);
gemm <<< Grid, Block >>> (d_col[i],d_kernel[i],d_B[i],outputSize[i]*outputSize[i],kernelSize*kernelSize*depth);
}
// 结果回传
for( int i = 0; i < 3; i++ ) {
cudaMemcpy(B[i],d_B[i],sizeof(float)*outputSize[i]*outputSize[i]*depth,cudaMemcpyDeviceToHost);
}
gettimeofday( &end, NULL );
int timeuse = 1000000 * ( end.tv_sec - start.tv_sec ) + end.tv_usec - start.tv_usec;
printf("total time is %f ms\n", timeuse/(float)1000);
// 输出结果
FILE *b[3];
b[0] = fopen("matrixB21.m", "wb");
b[1] = fopen("matrixB22.m", "wb");
b[2] = fopen("matrixB23.m", "wb");
for(int k = 0; k < 3; k++ ) {
fprintf(b[k], "B = [ \n");
for (int i = 0; i < outputSize[k]; i++)
{
for (int j = 0; j < outputSize[k]; j++)
fprintf(b[k], "%f ", B[k][i * outputSize[k] + j]);
fprintf(b[k], "\n");
}
fprintf(b[k], "];");
}
// ============================= 资源释放 =========================
free(A);
cudaFree(d_A);
for(int i = 0; i < 3; i++) {
free(kernel[i]);
free(B[i]);
cudaFree(d_B[i]);
cudaFree(d_kernel[i]);
cudaFree(d_col[i]);
fclose(b[i]);
}
return 0;
} |
11,004 | #include <cstdio>
#include <cstdlib>
#include <vector>
std::vector< cudaDeviceProp > get_cuda_device() {
std::vector< cudaDeviceProp > devices;
int count = -1;
cudaGetDeviceCount( & count);
for ( int i = 0; i < count; ++i) {
cudaDeviceProp prop;
cudaGetDeviceProperties( & prop, i);
devices.push_back( prop);
}
return devices;
}
const char * compute_mode( cudaComputeMode m) {
switch ( m) {
case cudaComputeModeDefault:
return "default";
case cudaComputeModeExclusive:
return "exclusive-thread";
case cudaComputeModeProhibited:
return "prohibited";
case cudaComputeModeExclusiveProcess:
return "exclusive-process";
default:
return "unknown";
}
}
int main() {
std::vector< cudaDeviceProp > devs = get_cuda_device();
for ( cudaDeviceProp dev: devs) {
std::printf("*** general device informations ***\n");
std::printf("name: %s\n", dev.name);
std::printf("compute capability: %d.%d\n", dev.major, dev.minor);
std::printf("integrated: %s\n", dev.integrated ? "true" : "false");
std::printf("multi-GPU board: %s\n", dev.isMultiGpuBoard ? "true" : "false");
std::printf("compute mode: %s\n", compute_mode( static_cast< cudaComputeMode >( dev.computeMode) ) );
std::printf("clock rate: %d MHz\n", dev.clockRate / 1000);
std::printf("async engine count: %d\n", dev.asyncEngineCount);
std::printf("kernel execution timeout: %s\n", dev.kernelExecTimeoutEnabled ? "enabled" : "disabled");
std::printf("stream priorities: %s\n", dev.streamPrioritiesSupported ? "supported" : "not supported");
std::printf("native atomic operations between device and host: %s\n", dev.hostNativeAtomicSupported ? "supported" : "not supported");
std::printf("Tesla device with TCC driver: %s\n", dev.tccDriver ? "true" : "false");
std::printf("ratio of single and double precision performance: %d\n", dev.singleToDoublePrecisionPerfRatio);
std::printf("PCI bus ID: %d\n", dev.pciBusID);
std::printf("PCI device ID: %d\n", dev.pciDeviceID);
std::printf("PCI domain ID: %d\n", dev.pciDomainID);
std::printf("\n*** memory informations ***\n");
std::printf("total global memory: %ld MB\n", dev.totalGlobalMem / (1024*1024) );
std::printf("total constant memory: %ld kB\n", dev.totalConstMem / (1024) );
std::printf("global L1 cache: %s\n", dev.globalL1CacheSupported ? "supported" : "not supported");
std::printf("local L1 cache: %s\n", dev.localL1CacheSupported ? "supported" : "not supported");
std::printf("L2 cache size: %ld MB\n", dev.l2CacheSize / (1024*1024) );
std::printf("memory bus width: %ld bit\n", dev.memoryBusWidth);
std::printf("memory clock rate: %ld MHz\n", dev.memoryClockRate / 1000);
std::printf("managed memory: %s\n", dev.managedMemory ? "supported" : "not supported");
std::printf("unified addressing: %s\n", dev.unifiedAddressing ? "supported" : "not supported");
std::printf("ECC:: %s\n", dev.ECCEnabled ? "enabled" : "disabled");
std::printf("max memory pitch: %ld MB\n", dev.memPitch / (1024*1024) );
std::printf("texture alignment: %ld\n", dev.textureAlignment);
std::printf("texture pitch alignment: %ld\n", dev.texturePitchAlignment);
std::printf("can map host memory with cudaHostAlloc/cudaHostGetDevicePointer: %s\n", dev.canMapHostMemory ? "true" : "false");
std::printf("access pageable memory concurrently without cudeHostRegister: %s\n", dev.pageableMemoryAccess ? "supported" : "not supported");
std::printf("\n*** multiprocessing informations ***\n");
std::printf("multiprocessor count: %d\n", dev.multiProcessorCount);
std::printf("registers per block: %d\n", dev.regsPerBlock);
std::printf("threads in warp: %d\n", dev.warpSize);
std::printf("max threads per multiprocessor: %d\n", dev.maxThreadsPerMultiProcessor);
std::printf("max threads per block: %d\n", dev.maxThreadsPerBlock);
std::printf("max thread dimensions: {%d, %d, %d}\n", dev.maxThreadsDim[0], dev.maxThreadsDim[1], dev.maxThreadsDim[2]);
std::printf("max grid dimensions: {%d, %d, %d}\n", dev.maxGridSize[0], dev.maxGridSize[1], dev.maxGridSize[2]);
std::printf("concurrent kernels: %s\n", dev.concurrentKernels ? "supported" : "not supported");
std::printf("registers per multiprocessor: %d\n", dev.regsPerMultiprocessor);
std::printf("shared memory per multiprocessor: %d kB\n", dev.sharedMemPerMultiprocessor / 1024);
std::printf("shared memory per block: %ld kB\n", dev.sharedMemPerBlock / 1024);
std::printf("access managed memory concurrently with CPU: %s\n", dev.concurrentManagedAccess ? "supported" : "not supported");
std::printf("\n\n");
}
return EXIT_SUCCESS;
}
|
11,005 | /* This program uses the method of reduction to add all elements of an array
*
*/
#include <stdio.h>
// =========== GLOBALS =========================
const int N = 200; // number of elements in array
// this needs to be a power of 2
const int threadsPerBlock = 256;
// Calculate number of blocks needed
const int blocksPerGrid = (N + threadsPerBlock - 1)/threadsPerBlock;
// GPU Kernel
__global__ void reduce(int *a, int *res){
// create shared memory for the threads in the block
__shared__ int cache[threadsPerBlock];
// get the thread id
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// index into the cache for this block
int cacheIndex = threadIdx.x;
// set the value in cache
cache[cacheIndex] = a[tid];
__syncthreads(); //synchronize threads before continuing
int i = blockDim.x/2; // only want first half to do work
while( i != 0 ){
if (cacheIndex < i) // make sure we are not doing bogus add
// add the current index and ith element
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads(); // we want all threads to finish
i /= 2;
}
if (cacheIndex == 0) // only one thread needs to do this
*res = cache[0];
}
int main(void){
// initialize pointers
int *a, *res;
int *d_a, *d_res;
// allocate cpu memory
a = (int*)malloc(N*sizeof(int));
res = (int*)malloc(sizeof(int));
// allocate memory on GPU
cudaMalloc((void**)&d_a, N * sizeof(int));
cudaMalloc((void**)&d_res, sizeof(int));
// fill in "a" array
for (int i=0; i<N; i++){
a[i] = 2;
}
// copy from host to device
cudaMemcpy(d_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_res, res, sizeof(int), cudaMemcpyHostToDevice);
// run kernel
reduce<<<blocksPerGrid,threadsPerBlock>>>(d_a, d_res);
// copy memory from gpu to cpu
cudaMemcpy(res, d_res, sizeof(int), cudaMemcpyDeviceToHost);
// print reslut
printf("Sum: %d\n", *res);
// clean up
cudaFree(d_a);
cudaFree(d_res);
free(a);
free(res);
}
|
11,006 | #include "cuda.h"
#include "limits.h"
#include "math.h"
#include "stdio.h"
#include "stdlib.h"
// Definitions
#define MAX_RAND 2
#define DEFAULT_NUM_ELEMENTS 16777216
#define BLOCK_SIZE 512
#define DOUBLE_BLOCK 1024
#define LOG_NUM_BANKS 4
#define CONFLICT_FREE_OFFSET(x) ((x) >> LOG_NUM_BANKS)
__global__ void prescanKernel(float* d_in, float* d_out, float* d_sums,
unsigned int N) {
extern __shared__ float s_data[];
unsigned int shared_end =
DOUBLE_BLOCK + CONFLICT_FREE_OFFSET(DOUBLE_BLOCK) - 2;
// Indexing
unsigned int offset = 1;
unsigned int ai = threadIdx.x;
unsigned int bi = threadIdx.x + BLOCK_SIZE;
unsigned int ag = ai + DOUBLE_BLOCK * blockIdx.x;
unsigned int bg = bi + DOUBLE_BLOCK * blockIdx.x;
unsigned int bankOffsetA = CONFLICT_FREE_OFFSET(ai);
unsigned int bankOffsetB = CONFLICT_FREE_OFFSET(bi);
// Load data into shared memory
s_data[ai + bankOffsetA] = (ag < N) ? d_in[ag] : 0.f;
s_data[bi + bankOffsetB] = (bg < N) ? d_in[bg] : 0.f;
// Build sum in place up the tree
for (unsigned int d = BLOCK_SIZE; d > 0; d >>= 1) {
__syncthreads();
if (threadIdx.x < d) {
unsigned int ai = offset * (2 * threadIdx.x + 1) - 1;
unsigned int bi = offset * (2 * threadIdx.x + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
s_data[bi] += s_data[ai];
}
offset <<= 1;
}
// Write the last element of shared memory to the auxilary array and clear it
if (threadIdx.x == 0) {
d_sums[blockIdx.x] = s_data[shared_end];
s_data[shared_end] = 0.f;
}
// Traverse down the tree and build scan
for (unsigned int d = 1; d <= BLOCK_SIZE; d <<= 1) {
offset >>= 1;
__syncthreads();
if (threadIdx.x < d) {
unsigned int ai = offset * (2 * threadIdx.x + 1) - 1;
unsigned int bi = offset * (2 * threadIdx.x + 2) - 1;
ai += CONFLICT_FREE_OFFSET(ai);
bi += CONFLICT_FREE_OFFSET(bi);
float temp = s_data[ai];
s_data[ai] = s_data[bi];
s_data[bi] += temp;
}
}
// Write results to global memory
__syncthreads();
if (ag < N) d_out[ag] = s_data[ai + bankOffsetA];
if (bg < N) d_out[bg] = s_data[bi + bankOffsetB];
}
__global__ void additionKernel(float* d_in, float* d_out, float* d_sums,
unsigned int N) {
unsigned int i = DOUBLE_BLOCK * blockIdx.x + threadIdx.x;
if (i >= N) return;
d_out[i] = d_in[i] + d_sums[blockIdx.x];
d_out[i + BLOCK_SIZE] = d_in[i + BLOCK_SIZE] + d_sums[blockIdx.x];
}
void prescanOnDevice(float* h_in, float* h_out, float** d_arr, unsigned int N,
int tree_depth, unsigned int* lengths, dim3* dimBlock,
dim3* dimGrid, unsigned int shared_size, float& dur_ex,
float& dur_in) {
// Setup timing
cudaEvent_t start_ex, end_ex, start_in, end_in;
cudaEventCreate(&start_ex);
cudaEventCreate(&end_ex);
cudaEventCreate(&start_in);
cudaEventCreate(&end_in);
// Copy host array to device
cudaEventRecord(start_in, 0);
cudaMemcpy(d_arr[0], h_in, lengths[0] * sizeof(float),
cudaMemcpyHostToDevice);
// Perform prescan on device
cudaEventRecord(start_ex, 0);
for (int i = 0; i < tree_depth; i++)
prescanKernel <<<dimGrid[i], dimBlock[i], shared_size>>>
(d_arr[i], d_arr[i], d_arr[i + 1], lengths[i]);
for (int i = tree_depth - 2; i >= 0; i--)
additionKernel <<<dimGrid[i], dimBlock[i]>>>
(d_arr[i], d_arr[i], d_arr[i + 1], lengths[i]);
cudaEventRecord(end_ex, 0);
cudaEventSynchronize(end_ex);
// Copy device array back to host
cudaMemcpy(h_out, d_arr[0], lengths[0] * sizeof(float),
cudaMemcpyDeviceToHost);
cudaEventRecord(end_in, 0);
cudaEventSynchronize(end_in);
// Calculate durations
cudaEventElapsedTime(&dur_ex, start_ex, end_ex);
cudaEventElapsedTime(&dur_in, start_in, end_in);
// Cleanup timing
cudaEventDestroy(start_ex);
cudaEventDestroy(end_ex);
cudaEventDestroy(start_in);
cudaEventDestroy(end_in);
}
void prescanOnHost(float* h_in, float* h_ref, unsigned int N, float& dur_cpu) {
// Setup timing
cudaEvent_t start_cpu, end_cpu;
cudaEventCreate(&start_cpu);
cudaEventCreate(&end_cpu);
// Perform prescan on host
cudaEventRecord(start_cpu, 0);
h_ref[0] = 0;
for (unsigned int i = 1; i < N; i++) h_ref[i] = h_in[i - 1] + h_ref[i - 1];
cudaEventRecord(end_cpu, 0);
cudaEventSynchronize(end_cpu);
// Calculate duration
cudaEventElapsedTime(&dur_cpu, start_cpu, end_cpu);
// Cleanup timing
cudaEventDestroy(start_cpu);
cudaEventDestroy(end_cpu);
}
unsigned int checkResults(float* h_out, float* h_ref, unsigned int N,
float eps) {
unsigned int nDiffs = 0;
for (unsigned int i = 0; i < N; i++) {
float delta = abs(h_out[i] - h_ref[i]);
if (delta > eps) nDiffs++;
}
return nDiffs;
}
float* allocateHostArray(unsigned int size) {
float* h_array;
cudaError_t code = cudaMallocHost(&h_array, size);
if (code != cudaSuccess) {
printf("Memory allocation on the host was unsuccessful.\n");
exit(EXIT_FAILURE);
}
return h_array;
}
float* allocateDeviceArray(unsigned int size) {
float* d_arr;
cudaError_t code = cudaMalloc(&d_arr, size);
if (code != cudaSuccess) {
printf("Memory allocation on the device was unsuccessful.\n");
exit(EXIT_FAILURE);
}
return d_arr;
}
void exitUsage() {
printf("Usage: ./p2 N [dur_max]\n");
exit(EXIT_SUCCESS);
}
void parseInput(int argc, char** argv, unsigned int& N, float& dur_max) {
if (argc == 1) {
N = DEFAULT_NUM_ELEMENTS;
dur_max = 1000.f;
return;
}
if (argc != 2 && argc != 3) exitUsage();
if (sscanf(argv[1], "%u", &N) != 1) exitUsage();
if (argc == 2) {
dur_max = 1000.f;
return;
}
if (sscanf(argv[2], "%f", &dur_max) != 1) exitUsage();
dur_max *= 1000;
}
int main(int argc, char** argv) {
unsigned int N;
float dur_max;
parseInput(argc, argv, N, dur_max);
// Setup timing
int nruns_gpu = 0;
int nruns_cpu = 0;
float dur_ex, dur_in, dur_cpu;
float dur_ex_total = 0.f;
float dur_in_total = 0.f;
float dur_cpu_total = 0.f;
float dur_ex_min = 1e99;
float dur_in_min = 1e99;
float dur_cpu_min = 1e99;
// Calculate the tree depth
int tree_depth = 0;
{
unsigned int length = N;
while (length > 1) {
length = (length + DOUBLE_BLOCK - 1) / DOUBLE_BLOCK;
tree_depth++;
}
}
// Calculate the lengths of the device arrays
unsigned int lengths[tree_depth + 1];
lengths[0] = N;
for (int i = 1; i < tree_depth + 1; i++)
lengths[i] = (lengths[i - 1] + DOUBLE_BLOCK - 1) / DOUBLE_BLOCK;
// Setup grid
dim3 dimBlock[tree_depth];
dim3 dimGrid[tree_depth];
for (int i = 0; i < tree_depth; i++) {
dimBlock[i].x = BLOCK_SIZE;
dimGrid[i].x = lengths[i + 1];
}
// Shared memory size
unsigned int shared_size =
(DOUBLE_BLOCK + CONFLICT_FREE_OFFSET(DOUBLE_BLOCK)) * sizeof(float);
// Allocate host arrays
float* h_in = allocateHostArray(N * sizeof(float));
float* h_out = allocateHostArray(N * sizeof(float));
float* h_ref = allocateHostArray(N * sizeof(float));
// Fill host array with random numbers
srand(73);
for (unsigned int i = 0; i < N; i++)
// h_in[i] = ((double)rand() / RAND_MAX - 0.5f) * 2 * M;
h_in[i] = (int)(rand() % MAX_RAND);
// h_in[i] = 1.f;
// Allocate device arrays
float* d_arr[tree_depth + 1];
for (int i = 0; i < tree_depth + 1; i++)
d_arr[i] = allocateDeviceArray(sizeof(float) * lengths[i]);
// Perform prescan on the device a number of times
while (dur_in_total < dur_max) {
nruns_gpu++;
prescanOnDevice(h_in, h_out, d_arr, N, tree_depth, lengths, dimBlock,
dimGrid, shared_size, dur_ex, dur_in);
dur_ex_total += dur_ex;
dur_in_total += dur_in;
if (dur_ex < dur_ex_min) dur_ex_min = dur_ex;
if (dur_in < dur_in_min) dur_in_min = dur_in;
if (dur_in_total == 0.f) break;
}
// Perform prescan on the host a number of times
while (dur_cpu_total < dur_max) {
nruns_cpu++;
prescanOnHost(h_in, h_ref, N, dur_cpu);
dur_cpu_total += dur_cpu;
if (dur_cpu < dur_cpu_min) dur_cpu_min = dur_cpu;
if (dur_cpu_total == 0.f) break;
}
dur_ex = dur_ex_total / nruns_gpu;
dur_in = dur_in_total / nruns_gpu;
dur_cpu = dur_cpu_total / nruns_cpu;
// Compare device and host results
float eps = (float)MAX_RAND * 0.001f;
unsigned int nDiffs = checkResults(h_out, h_ref, N, eps);
if (nDiffs == 0)
printf("Test PASSED\n");
else
printf("Test FAILED; %u differences\n", nDiffs);
// Print stuff
printf("N: %u\n", N);
printf("Tree depth: %d\n", tree_depth);
printf("Block sizes: %d", dimBlock[0].x);
for (int i = 1; i < tree_depth; i++) printf(", %d", dimBlock[i].x);
printf("\n");
printf("Grid sizes: %d", dimGrid[0].x);
for (int i = 1; i < tree_depth; i++) printf(", %d", dimGrid[i].x);
printf("\n");
printf("GPU array lengths: %d", lengths[0]);
for (int i = 1; i < tree_depth + 1; i++) printf(", %d", lengths[i]);
printf("\n");
printf("GPU last element: %24.14f\n", h_out[N - 1]);
printf("CPU last element: %24.14f\n", h_ref[N - 1]);
printf("Timing results %12s %12s %8s\n", "Average", "Minimum", "Num_runs");
printf("GPU exclusive: %12.6f %12.6f %8d\n", dur_ex, dur_ex_min, nruns_gpu);
printf("GPU inclusive: %12.6f %12.6f %8d\n", dur_in, dur_in_min, nruns_gpu);
printf("CPU: %12.6f %12.6f %8d\n", dur_cpu, dur_cpu_min, nruns_cpu);
printf("\n");
// Free arrays
cudaFree(h_in);
cudaFree(h_out);
cudaFree(h_ref);
for (int i = 0; i < tree_depth + 1; i++) cudaFree(d_arr[i]);
return 0;
}
|
11,007 | #include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
#define C10_WARP_SIZE 32
#define N 32*1024*1024
constexpr int kCUDABlockReduceNumThreads = 512;
// Algorithmic limitation: BlockReduce does two WarpReduce calls, each
// of which reduces C10_WARP_SIZE elements. So, at most
// C10_WARP_SIZE**2 elements can be reduced at a time.
// NOTE: This is >= the max block size on current hardware anyway (1024).
constexpr int kCUDABlockReduceMaxThreads = C10_WARP_SIZE * C10_WARP_SIZE;
// Sums `val` accross all threads in a warp.
//
// Assumptions:
// - The size of each block should be a multiple of `C10_WARP_SIZE`
// 实际上 WARP_SHFL_DOWN 的实现如下:
// template <typename T>
// __device__ __forceinline__ T WARP_SHFL_DOWN(T value, unsigned int delta, int width = warpSize, unsigned int mask = 0xffffffff)
// {
// #if !defined(USE_ROCM)
// return __shfl_down_sync(mask, value, delta, width);
// #else
// return __shfl_down(value, delta, width);
// #endif
// }
// 可以看到这里使用了warp原语__shfl_down_sync来对一个warp内的val进行规约求和。
// __shfl_down_sync的相关介绍:https://docs.nvidia.com/cuda/cuda-c-programming-guide/#warp-shuffle-functions
// && https://developer.nvidia.com/zh-cn/blog/using-cuda-warp-level-primitives/
// 示意图如:https://developer.nvidia.com/blog/wp-content/uploads/2018/01/reduce_shfl_down-625x275.png 所示
// 函数原型:T __shfl_down_sync(unsigned mask, T var, unsigned int delta, int width=warpSize);
// mask表示一个warp中thread的激活表;
// var表示规约求和的变量;
// delta表示当前线程与另一个线程求和时跨越的线程偏移;
// width表示求和的宽度(个数)
// 根据循环体for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1),
// 第一次循环时 delta 是 16,即 Lane0(Lane0表示一个warp中的lane_index)与 Lane16 求和,Lane1 与 Lane17 求和,***,
// 依此类推,一个 warp 内的 32 个 thread 的 val 规约成 16 个val。第二次循环时 delta 是 8 ,即 Lane0 与 Lane4 求和,Lane1 与 Lane5,
// 与上图的第一次行为相同。依次类推,最终不同 warp 中的 val 规约求和到了 Lane0 所持有的 val 中。
// 至此执行完 val = WarpReduceSum(val); 后,所有 warp 的和都规约到了每个 warp 中的 lane0 的线程中了,即 lid == 0 的线程,
// wid 则代表了不同的 lane(或不同的 warp )。
template <typename T>
__inline__ __device__ T WarpReduceSum(T val) {
#pragma unroll
for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {
val += WARP_SHFL_DOWN(val, offset);
// 或者:val += __shfl_down_sync(0xffffffff, val, offset, warpSize);//warpSize为内置数据,值为32
}
return val;
}
struct Block1D {
static __forceinline__ __device__ int Tid() { return threadIdx.x; }
static __forceinline__ __device__ int Warps() {
return blockDim.x / C10_WARP_SIZE;
}
};
struct Block2D {
static __forceinline__ __device__ int Tid() {
return threadIdx.x + threadIdx.y * blockDim.x;
}
static __forceinline__ __device__ int Warps() {
return blockDim.x * blockDim.y / C10_WARP_SIZE;
}
};
// Sums `val` across all threads in a block.
//
// Warning: the return value is only valid for thread 0.
// Assumptions:
// - The size of each block should be a multiple of `C10_WARP_SIZE`
// - `shared` should be a pointer to shared memory with size of, at least,
// `sizeof(T) * number_of_warps`
// lane的中文翻译为车道(路宽),lane表示一个warp中的thread个数,
// 在 Block1D中在一个 lane 中的索引 lane_index 为 [0, warpSize - 1] 。
// 在一个 block 中会有多个 lane,lane_id = threadIdx.x / C10_WARP_SIZE ,最多有 1024 / C10_WARP_SIZE = 32 个lane。
template <typename T, typename B = Block1D>
__inline__ __device__ T BlockReduceSum(T val, T* shared) {
const int tid = B::Tid(); // 获取 block 中的线程 id
const int lid = tid % C10_WARP_SIZE; // lane id,表示当前 thread 在当前 lane 中的索引
const int wid = tid / C10_WARP_SIZE; // wid表示当前 thread 在第多少个 lane
// 每个 thread 对应寄存器中都有一个 val 值,WarpReduceSum 函数便是对 warp 中的所有 thread 所持有的 val 进行求和
val = WarpReduceSum(val);
// 下面要将各个 warp 规约求和的值进行一次规约,需要通过 shared-memory 将数据保存到同一个 warp 中的不同线程中,
// 在数据保存前需要__syncthreads(); 同步一下
__syncthreads(); // prevent races when BlockReduces are called in a row.
if (lid == 0) {
shared[wid] = val;
}
__syncthreads();
// 默认申请 32 大小的 shared-memory(32其实也是一个 Block 最大的 lane 个数),
// 当 Block 内线程束较少时,无法刷新 shared-memory 上全部的 32 个值,需要对未使用到的内存进行初始化;
val = (tid < B::Warps()) ? shared[lid] : T(0);
// 再次使用 WarpReduceSum 对这 32 个 thread 的值进行求和,最终一个 Block 内的值便全部规约求和到了 threadIdx.x == 0 的线程所持有的 val 值了。
// 这也就是说对于调用 BlockReduceSum 函数的代码来说,在使用规约求和后的值时需要通过 threadIdx.x == 0 的线程获取。
if (wid == 0) {
val = WarpReduceSum(val);
}
return val;
}
template <typename T, class ReduceOp>
__inline__ __device__ T WarpReduce(T val, const ReduceOp& op) {
#pragma unroll
for (int offset = (C10_WARP_SIZE >> 1); offset > 0; offset >>= 1) {
val = op.combine(val, op.warp_shfl_down(val, offset));
}
return val;
}
template <typename T, class ReduceOp, typename B = Block1D>
__inline__ __device__ T
BlockReduce(T val, const ReduceOp& op, const T& identity_element, T* shared) {
const int tid = B::Tid();
const int lid = tid % C10_WARP_SIZE;
const int wid = tid / C10_WARP_SIZE;
val = WarpReduce(val, op);
__syncthreads(); // prevent races when BlockReduces are called in a row.
if (lid == 0) {
shared[wid] = val;
}
__syncthreads();
val = (tid < B::Warps()) ? shared[lid] : identity_element;
if (wid == 0) {
val = WarpReduce(val, op);
}
return val;
}
int main(){
return 0;
} |
11,008 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
#include <math.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
using namespace std;
#ifdef LARGE
#define RENDER_WIDTH 1024
#define RENDER_HEIGHT 1024
#else
#define RENDER_WIDTH 128
#define RENDER_HEIGHT 128
#endif
#define TILE_SIZE 16
#define STACK_CAPACITY 128
#define SHARED_MEM_CAP STACK_CAPACITY * RENDER_WIDTH * RENDER_HEIGHT
#define SPP 1024
#define RR_RATE 0.9
#define PI 3.1415926
// BMP Operation
// 文件信息头结构体
typedef struct
{
unsigned int bfSize; // 文件大小 以字节为单位(2-5字节)
unsigned short bfReserved1; // 保留,必须设置为0 (6-7字节)
unsigned short bfReserved2; // 保留,必须设置为0 (8-9字节)
unsigned int bfOffBits; // 从文件头到像素数据的偏移 (10-13字节)
} _BITMAPFILEHEADER;
//图像信息头结构体
typedef struct
{
unsigned int biSize; // 此结构体的大小 (14-17字节)
int biWidth; // 图像的宽 (18-21字节)
int biHeight; // 图像的高 (22-25字节)
unsigned short biPlanes; // 表示bmp图片的平面属,显然显示器只有一个平面,所以恒等于1 (26-27字节)
unsigned short biBitCount; // 一像素所占的位数,一般为24 (28-29字节)
unsigned int biCompression; // 说明图象数据压缩的类型,0为不压缩。 (30-33字节)
unsigned int biSizeImage; // 像素数据所占大小, 这个值应该等于上面文件头结构中bfSize-bfOffBits (34-37字节)
int biXPelsPerMeter; // 说明水平分辨率,用象素/米表示。一般为0 (38-41字节)
int biYPelsPerMeter; // 说明垂直分辨率,用象素/米表示。一般为0 (42-45字节)
unsigned int biClrUsed; // 说明位图实际使用的彩色表中的颜色索引数(设为0的话,则说明使用所有调色板项)。 (46-49字节)
unsigned int biClrImportant; // 说明对图象显示有重要影响的颜色索引的数目,如果是0,表示都重要。(50-53字节)
} _BITMAPINFOHEADER;
__host__ void save_image(unsigned char* target_img, int width, int height)
{
FILE* file_ptr = fopen("RenderResult.bmp", "wb+");
unsigned short fileType = 0x4d42;
_BITMAPFILEHEADER fileHeader;
_BITMAPINFOHEADER infoHeader;
fileHeader.bfSize = (width) * (height) * 3 + 54;
fileHeader.bfReserved1 = 0;
fileHeader.bfReserved2 = 0;
fileHeader.bfOffBits = 54;
infoHeader.biSize = 40;
infoHeader.biWidth = width;
infoHeader.biHeight = height;
infoHeader.biPlanes = 1;
infoHeader.biBitCount = 24;
infoHeader.biCompression = 0;
infoHeader.biSizeImage = (width) * (height) * 3;
infoHeader.biXPelsPerMeter = 0;
infoHeader.biYPelsPerMeter = 0;
infoHeader.biClrUsed = 0;
infoHeader.biClrImportant = 0;
fwrite(&fileType, sizeof(unsigned short), 1, file_ptr);
fwrite(&fileHeader, sizeof(_BITMAPFILEHEADER), 1, file_ptr);
fwrite(&infoHeader, sizeof(_BITMAPINFOHEADER), 1, file_ptr);
fwrite(target_img, sizeof(unsigned char), (height) * (width) * 3, file_ptr);
fclose(file_ptr);
}
// 3D resources
struct Trianle {
float3 tri_a;
float3 tri_b;
float3 tri_c;
float3 normal_line;
bool is_light;
float brdf_rate;
};
#define LIGHT_TRI_COUNT 2
__constant__ float d_light_irradiance = 42;
#define BRDF_rate 0.5
__constant__ int OBJ_TRI_COUNT;
// Trianle{float3{}, float3{}, float3{}, float3{}, false, BRDF_rate},
// Trianle* h_scene_objects;
__device__ Trianle* d_scene_objects;
// camera position
__constant__ float3 d_camera_position = float3{150, -400, 150};
__constant__ float3 d_camera_direction = float3{0, 1, 0};
__constant__ float3 d_camera_up_direction = float3{0, 0, 1};
__constant__ float3 d_camera_left_direction = float3{1, 0, 0};
// 浮点精度考虑,设置较大焦距和成像平面
__constant__ float d_camera_focal_length = 200;
__constant__ float d_camera_width = 150;
__constant__ float d_camera_height = 150;
__constant__ float d_camera_pixel_width = 150.0 / RENDER_WIDTH;
__constant__ float d_camera_pixel_height= 150.0 / RENDER_HEIGHT;
__host__ void load_obb_file(string file_path)
{
ifstream fin("test.off");
string head;
fin >> head;
int point_count, surface_count, temp;
fin >> point_count >> surface_count >> temp;
float3* points = new float3[point_count];
float min_x = 214748364;
float min_y = 214748364;
float min_z = 214748364;
float max_x = -214748368;
float max_y = -214748368;
float max_z = -214748368;
for (int i = 0; i < point_count; ++i) {
float x, y, z;
fin >> x >> y >> z;
if (min_x > x) {
min_x = x;
}
if (min_y > y) {
min_y = y;
}
if (min_z > z) {
min_z = z;
}
if (max_x < x) {
max_x = x;
}
if (max_y < y) {
max_y = y;
}
if (max_z < z) {
max_z = z;
}
points[i] = make_float3(x, y, z);
}
printf("%f %f %f %f %f %f\n", min_x, min_y, min_z, max_x, max_y, max_z);
float x_scale = 250 / (max_x - min_x);
float y_scale = 250 / (max_y - min_y);
float z_scale = 250 / (max_z - min_z);
float scale = 2147483647;
if (x_scale < scale) {
scale = x_scale;
}
if (y_scale < scale) {
scale = y_scale;
}
if (z_scale < scale) {
scale = z_scale;
}
// printf("%f\n", scale);
for (int i = 0; i < point_count; ++i) {
points[i].x -= min_x;
points[i].x *= scale;
points[i].x += 25;
points[i].y -= min_y;
points[i].y *= scale;
points[i].y += 25;
points[i].z -= min_z;
points[i].z *= scale;
points[i].z += 25;
}
Trianle* h_scene_objects = new Trianle[surface_count + 4];
// Light
h_scene_objects[0] = Trianle{float3{110, 110, 300}, float3{110, 190, 300}, float3{190, 110, 300}, float3{0, 0, -1}, true, BRDF_rate};
h_scene_objects[1] = Trianle{float3{190, 110, 300}, float3{110, 190, 300}, float3{190, 190, 300}, float3{0, 0, -1}, true, BRDF_rate},
// Ceiling
h_scene_objects[2] = Trianle{float3{0, 0, 0}, float3{0, 300, 0}, float3{300, 0, 0}, float3{0, 0, 1}, false, BRDF_rate};
h_scene_objects[3] = Trianle{float3{0, 300, 0}, float3{300, 0, 0}, float3{300, 300, 0}, float3{0, 0, 1}, false, BRDF_rate};
for (int i = 4; i < surface_count + 4; ++i) {
int index_x, index_y, index_z;
fin >> temp >> index_x >> index_y >> index_z;
float3 yx = make_float3(points[index_y].x - points[index_x].x, points[index_y].y - points[index_x].y, points[index_y].z - points[index_x].z);
float3 yz = make_float3(points[index_y].x - points[index_z].x, points[index_y].y - points[index_z].y, points[index_y].z - points[index_z].z);
float3 normal_line = make_float3(yx.y * yz.z - yx.z * yz.y, yx.z * yz.x - yx.x * yz.z, yx.x * yz.y - yx.y * yz.x);
float normal_length = 1 / sqrt(normal_line.x * normal_line.x + normal_line.y * normal_line.y + normal_line.z * normal_line.z);
h_scene_objects[i] = Trianle{points[index_x], points[index_y], points[index_z], float3{normal_length * normal_line.x, normal_length * normal_line.y, normal_length * normal_line.z}, false, BRDF_rate};
// printf("%f, %f, %f\n", h_scene_objects[i].normal_line.x, h_scene_objects[i].normal_line.y, h_scene_objects[i].normal_line.z);
}
fin.close();
surface_count += 4;
cudaMemcpyToSymbol(OBJ_TRI_COUNT, &surface_count, sizeof(int));
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "copy int launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
Trianle* temp_scene_objects;
cudaMalloc(&temp_scene_objects, sizeof(Trianle) * surface_count);
cudaMemcpy(temp_scene_objects, h_scene_objects, sizeof(Trianle) * surface_count, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(d_scene_objects, &temp_scene_objects, sizeof(Trianle*));
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "copy array launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
delete[] points;
delete[] h_scene_objects;
printf("Load Done.\n");
}
__device__ inline float mixed_product(float3 vec_a, float3 vec_b, float3 vec_c)
{
return vec_a.x * (vec_b.y * vec_c.z - vec_b.z * vec_c.y) +
vec_a.y * (vec_b.z * vec_c.x - vec_b.x * vec_c.z) +
vec_a.z * (vec_b.x * vec_c.y - vec_b.y * vec_c.x);
}
__device__ inline float3 sub_float3(float3 opr1, float3 opr2)
{
return make_float3(opr1.x - opr2.x, opr1.y - opr2.y, opr1.z - opr2.z);
}
__device__ inline float3 scalar_mult_float3(float3 vec, float scalar)
{
return make_float3(vec.x * scalar, vec.y * scalar, vec.z * scalar);
}
__device__ float dot(float3 opr1, float3 opr2)
{
return opr1.x * opr2.x + opr1.y * opr2.y + opr1.z * opr2.z;
}
__device__ inline float3 add_float3(float3 opr1, float3 opr2)
{
return make_float3(opr1.x + opr2.x, opr1.y + opr2.y, opr1.z + opr2.z);
}
__device__ float size(Trianle triangle)
{
float3 vec1 = sub_float3(triangle.tri_b, triangle.tri_a);
float3 vec2 = sub_float3(triangle.tri_c, triangle.tri_a);
float3 cross_product = make_float3(vec1.y * vec2.z - vec1.z * vec2.y, vec1.z * vec2.x - vec1.x * vec2.z, vec1.x * vec2.y - vec1.y * vec2.x);
return 0.5 * norm3df(cross_product.x, cross_product.y, cross_product.z);
}
__device__ float3 check_obj_hit(int src_tri_idx, float3 src_point, float3 direction, int& hit_obj_idx)
{
// normalize direction
float div_length = 1 / norm3df(direction.x, direction.y, direction.z);
float3 normal_direction = make_float3(direction.x * div_length, direction.y * div_length, direction.z * div_length);
hit_obj_idx = -1;
float3 hit_point;
float min_distance = 2147483647;
for (int i = 0; i < OBJ_TRI_COUNT; ++i) {
if (i == src_tri_idx) {
continue;
}
// make shadow
Trianle shadow_tri = Trianle{sub_float3(d_scene_objects[i].tri_a, scalar_mult_float3(normal_direction, dot(normal_direction, sub_float3(d_scene_objects[i].tri_a, src_point)))),
sub_float3(d_scene_objects[i].tri_b, scalar_mult_float3(normal_direction, dot(normal_direction, sub_float3(d_scene_objects[i].tri_b, src_point)))),
sub_float3(d_scene_objects[i].tri_c, scalar_mult_float3(normal_direction, dot(normal_direction, sub_float3(d_scene_objects[i].tri_c, src_point)))),
normal_direction};
// check in center
float3 vec_pa = sub_float3(shadow_tri.tri_a, src_point);
float3 vec_pb = sub_float3(shadow_tri.tri_b, src_point);
float3 vec_pc = sub_float3(shadow_tri.tri_c, src_point);
float papb = mixed_product(normal_direction, vec_pa, vec_pb);
float pbpc = mixed_product(normal_direction, vec_pb, vec_pc);
float pcpa = mixed_product(normal_direction, vec_pc, vec_pa);
if ((papb > 0 && pbpc > 0 && pcpa > 0) || (papb < 0 && pbpc < 0 && pcpa < 0)) {
// in center
// get hit point
// get coordinary, reuse vec_pb ,vec_pc
vec_pb = sub_float3(shadow_tri.tri_b, shadow_tri.tri_a);
vec_pc = sub_float3(shadow_tri.tri_c, shadow_tri.tri_a);
vec_pa = sub_float3(src_point, shadow_tri.tri_a);
float divider = vec_pb.x * vec_pc.y - vec_pb.y * vec_pc.x;
float rate_a = (vec_pc.y * vec_pa.x - vec_pc.x * vec_pa.y) / divider;
float rate_b = (-vec_pb.y * vec_pa.x + vec_pb.x * vec_pa.y) / divider;
vec_pb = sub_float3(d_scene_objects[i].tri_b, d_scene_objects[i].tri_a);
vec_pc = sub_float3(d_scene_objects[i].tri_c, d_scene_objects[i].tri_a);
vec_pa.x = d_scene_objects[i].tri_a.x + rate_a * vec_pb.x + rate_b * vec_pc.x;
vec_pa.y = d_scene_objects[i].tri_a.y + rate_a * vec_pb.y + rate_b * vec_pc.y;
vec_pa.z = d_scene_objects[i].tri_a.z + rate_a * vec_pb.z + rate_b * vec_pc.z;
float distance = dot(sub_float3(vec_pa, src_point), normal_direction);
// printf("Rate : %f %f %f\n", rate_a, rate_b, distance / norm3df(vec_pa.x - src_point.x, vec_pa.y - src_point.y, vec_pa.z - src_point.z));
if (distance > 0) {
// printf("In Center : %f, %f, %f %f\n", papb, pbpc, pcpa, distance);
// ray will hit object
if (distance < min_distance) {
min_distance = distance;
hit_point = vec_pa;
hit_obj_idx = i;
}
}
}
}
// printf("Src : %d Dst : %d Direction : %f, %f, %f\n", src_tri_idx, hit_obj_idx, direction.x, direction.y, direction.z);
return hit_point;
}
__device__ float3 check_light_hit(int src_tri_idx, float3 src_point, float3 direction, int& hit_obj_idx)
{
float3 hit_point = check_obj_hit(src_tri_idx, src_point, direction, hit_obj_idx);
if (hit_obj_idx > -1 && !d_scene_objects[hit_obj_idx].is_light) {
hit_obj_idx = -1;
}
return hit_point;
}
__device__ float stack_dir[SHARED_MEM_CAP];
__device__ float stack_indir_rate[SHARED_MEM_CAP];
__device__ float shade(int object_idx, float3 src_point, float3 direction, curandState* curand_state)
{
// __shared__ float stack_dir[SHARED_MEM_CAP];
// __shared__ float stack_indir_rate[SHARED_MEM_CAP];
// int stack_size = 0;
float l_dir = 0;
int stack_offset = ((blockIdx.y * TILE_SIZE + threadIdx.y) * RENDER_WIDTH + (blockIdx.x * TILE_SIZE + threadIdx.x)) * STACK_CAPACITY;
int stack_ori = stack_offset;
float3 out_direction = direction; // use in BRDF, here is ignored.
float3 ray_src = src_point;
int src_object_idx = object_idx;
while (true) {
// Contribution from the light source.
l_dir = 0;
for (int i = 0; i < LIGHT_TRI_COUNT; ++i) {
// random select a point on light triangle
float rand_x = curand_uniform(curand_state);
float rand_y = curand_uniform(curand_state);
if (rand_x + rand_y > 1) {
rand_x = 1 - rand_x;
rand_y = 1 - rand_y;
}
float3 random_point = add_float3(d_scene_objects[i].tri_a, add_float3(scalar_mult_float3(sub_float3(d_scene_objects[i].tri_b, d_scene_objects[i].tri_a), rand_x), scalar_mult_float3(sub_float3(d_scene_objects[i].tri_c, d_scene_objects[i].tri_a), rand_y)));
// test block
float3 obj_light_direction = sub_float3(random_point, ray_src);
int test_block_idx;
check_obj_hit(-1, ray_src, obj_light_direction, test_block_idx);
// printf("Direction %f %f %f %d\n", obj_light_direction.x, obj_light_direction.y, obj_light_direction.z, test_block_idx);
if (test_block_idx == i) {
// printf("Hit Light!\n");
float direction_length_square = obj_light_direction.x * obj_light_direction.x + obj_light_direction.y * obj_light_direction.y + obj_light_direction.z * obj_light_direction.z;
l_dir += d_light_irradiance * d_scene_objects[src_object_idx].brdf_rate * fabs(dot(d_scene_objects[src_object_idx].normal_line, obj_light_direction) * dot(d_scene_objects[i].normal_line, obj_light_direction))
/ direction_length_square / direction_length_square * size(d_scene_objects[i]);
}
}
// Contribution from other reflectors.
// test Russian Roulette
float rr_result = curand_uniform(curand_state);
if (rr_result < RR_RATE) {
float indir_rate = 0;
// random select a ray from src_point
float cosine_theta = 2 * (curand_uniform(curand_state) - 0.5);
float sine_theta = sqrtf(1 - cosine_theta * cosine_theta);
float fai_value = 2 * PI * curand_uniform(curand_state);
float3 ray_direction = make_float3(sine_theta * cosf(fai_value), sine_theta * sinf(fai_value), cosine_theta);
if (dot(ray_direction, d_scene_objects[src_object_idx].normal_line) * dot(out_direction, d_scene_objects[src_object_idx].normal_line) < 0) {
ray_direction.x *= -1;
ray_direction.y *= -1;
ray_direction.z *= -1;
cosine_theta *= -1;
}
int hit_obj_idx;
float3 hit_point = check_obj_hit(src_object_idx, ray_src, ray_direction, hit_obj_idx);
if (hit_obj_idx > -1 && !d_scene_objects[hit_obj_idx].is_light) {
// printf("Hit Object!\n");
ray_direction.x *= -1;
ray_direction.y *= -1;
ray_direction.z *= -1;
indir_rate = d_scene_objects[hit_obj_idx].brdf_rate * fabs(dot(ray_direction, d_scene_objects[hit_obj_idx].normal_line)) / RR_RATE;
src_object_idx = hit_obj_idx;
ray_src = hit_point;
out_direction = ray_direction;
stack_dir[stack_offset] = l_dir;
stack_indir_rate[stack_offset] = indir_rate;
++stack_offset;
}
else {
// stack_dir[stack_offset] = l_dir;
// stack_indir_rate[stack_offset] = indir_rate;
// ++stack_offset;
break;
}
}
else {
break;
}
}
// calc final irradiance
for (int i = stack_offset - 1; i >= stack_ori; --i) {
// printf("%f %f\n", stack_indir_rate[i], stack_dir[i]);
l_dir *= stack_indir_rate[i];
l_dir += stack_dir[i];
}
return l_dir;
}
__device__ __forceinline__ float ray_generation(float3 pixel_center_position, curandState* curand_states)
{
float pixel_radiance = 0;
for (int i = 0; i < SPP; ++i) {
float width_bias = d_camera_pixel_width * (curand_uniform(&curand_states[threadIdx.x]) - 0.5);
float height_bias = d_camera_pixel_height * (curand_uniform(&curand_states[threadIdx.x]) - 0.5);
int hit_obj_idx;
// printf("Pixel bias : %f %f\n", width_bias, height_bias);
float3 ray_direction = sub_float3(add_float3(pixel_center_position, make_float3(width_bias, 0, height_bias)), d_camera_position);
float3 hit_light_point = check_light_hit(-1, d_camera_position, ray_direction, hit_obj_idx);
if (hit_obj_idx > -1) {
// printf("Ray Hit!\n");
pixel_radiance += 1.0 / SPP * d_light_irradiance;
}
else {
float3 hit_point = check_obj_hit(-1, d_camera_position, ray_direction, hit_obj_idx);
if (hit_obj_idx > -1) {
// printf("Obj Hit!\n");
float3 reverse_ray_direction = make_float3(-ray_direction.x, -ray_direction.y, -ray_direction.z);
pixel_radiance += 1.0 / SPP * shade(hit_obj_idx, hit_point, reverse_ray_direction, &curand_states[threadIdx.x]);
// printf("Ray Obj General : %f\n", pixel_radiance);
}
}
}
// printf("Ray General : %f\n", 1.0 / SPP * d_light_irradiance);
return pixel_radiance;
}
__global__ void render_pixel(unsigned char* target_img, curandState* curand_states)
{
// printf("Hit\n");
// printf("%f\n", d_scene_objects[0].tri_a.z);
int target_pixel_width = blockIdx.x * TILE_SIZE + threadIdx.x;
int target_pixel_height = blockIdx.y * TILE_SIZE + threadIdx.y;
// printf("%d, %d\n", target_pixel_width, target_pixel_height);
// printf("%f %f %f\n", d_camera_position.x, d_camera_position.y, d_camera_position.z);
float3 delta_left = scalar_mult_float3(d_camera_left_direction, (target_pixel_width + 0.5 - RENDER_WIDTH / 2.0) * d_camera_pixel_width);
float3 delta_up = scalar_mult_float3(d_camera_up_direction, (target_pixel_height + 0.5 - RENDER_HEIGHT / 2.0) * d_camera_pixel_height);
float3 delta = add_float3(delta_left, add_float3(delta_up, scalar_mult_float3(d_camera_direction, d_camera_focal_length)));
// float3 delta = make_float3((target_pixel_width + 0.5 - RENDER_WIDTH / 2.0) * d_camera_pixel_width, d_camera_focal_length, (target_pixel_height + 0.5 - RENDER_HEIGHT / 2.0) * d_camera_pixel_height);
float3 pixel_center = make_float3(d_camera_position.x + delta.x, d_camera_position.y + delta.y, d_camera_position.z + delta.z);
float pixel_radiance = ray_generation(pixel_center, curand_states);
// float pixel_radiance = d_light_irradiance * curand_uniform(&curand_states[threadIdx.x]);
// Gamma correction
pixel_radiance /= d_light_irradiance;
if (pixel_radiance > 1) {
pixel_radiance = 1;
}
pixel_radiance = powf(pixel_radiance, 0.454545454545);
unsigned char rgb_value = (unsigned char)(pixel_radiance * 255);
// printf("%d, %d : %d\n", target_pixel_width, target_pixel_height, rgb_value);
int base_idx = 3 * (target_pixel_height * RENDER_WIDTH + target_pixel_width);
target_img[base_idx] = rgb_value;
target_img[base_idx + 1] = rgb_value;
target_img[base_idx + 2] = rgb_value;
}
__global__ void init_curand(curandState* curand_states, int seed)
{
curand_init(seed, threadIdx.x, 0, &(curand_states[threadIdx.x]));
}
int main()
{
string file_path;
// cin >> file_path;
dim3 grid{RENDER_WIDTH / TILE_SIZE, RENDER_HEIGHT / TILE_SIZE, 1};
dim3 block{TILE_SIZE, TILE_SIZE, 1};
unsigned char* d_target_img;
cudaMalloc(&d_target_img, RENDER_WIDTH * RENDER_HEIGHT * 3);
curandState* curand_states;
cudaMalloc(&curand_states, TILE_SIZE * sizeof(curandState));
init_curand <<<1, TILE_SIZE>>> (curand_states, 0);
load_obb_file(file_path);
cudaDeviceSynchronize();
cudaError_t cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "curand init launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "before render launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
render_pixel <<<grid, block>>> (d_target_img, curand_states);
unsigned char* h_target_img = (unsigned char*)malloc(RENDER_WIDTH * RENDER_HEIGHT * 3);
cudaDeviceSynchronize();
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "render launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
cudaMemcpy(h_target_img, d_target_img, RENDER_WIDTH * RENDER_HEIGHT * 3, cudaMemcpyDeviceToHost);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess)
{
fprintf(stderr, "copy launch failed: %s\n", cudaGetErrorString(cudaStatus));
}
save_image(h_target_img, RENDER_WIDTH, RENDER_HEIGHT);
free(h_target_img);
cudaFree(d_target_img);
cudaFree(curand_states);
cudaDeviceReset();
return 0;
} |
11,009 | #include <stdio.h>
#define N 64
__global__ void matmul( int *A, int *B, int *C ) {
int val = 0;
int row = blockIdx.x * blockDim.x + threadIdx.x;
int col = blockIdx.y * blockDim.y + threadIdx.y;
if (row < N && col < N) {
for ( int k = 0; k < N; ++k ) {
val += A[row * N + k] * B[k * N + col];
}
C[row * N + col] = val;
}
}
int main() {
int *A, *B, *C;
int size = N * N * sizeof (int);
// Allocate memory
cudaMallocManaged (&A, size);
cudaMallocManaged (&B, size);
cudaMallocManaged (&C, size);
// Initialize memory
for( int row = 0; row < N; ++row ) {
for( int col = 0; col < N; ++col ) {
A[row*N + col] = row;
B[row*N + col] = col+2;
C[row*N + col] = 0;
}
}
dim3 threads_per_block (16, 16, 1); // A 16 x 16 x 1 block threads
dim3 number_of_blocks ((N / threads_per_block.x) + 1, (N / threads_per_block.y) + 1, 1);
matmul <<< number_of_blocks, threads_per_block >>> ( A, B, C );
cudaDeviceSynchronize();
// Check if we got it all correct
bool error = false;
for( int row = 0; row < N; ++row ) {
for( int col = 0; col < N; ++col ) {
int val = 0;
for( int k = 0; k < N; ++k ) {
val += A[row * N + k] * B[k * N + col];
}
if(C[row * N + col] != val) {
error = true;
}
}
}
if(error) {
printf("Incorrect result!");
} else {
printf("Success!");
}
// Free all our allocated memory
cudaFree(A); cudaFree(B); cudaFree(C);
}
|
11,010 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/functional.h>
#include <thrust/reduce.h>
#include <iostream>
#include <chrono>
// nvcc -O3 -std=c++14 tarefa1.cu -o t1 && ./t1 < stocks.txt
int main()
{
int N = 2518;
thrust::host_vector<double> host(N);
for (int i = 0; i < N; i++)
{
std::cin >> host[i];
}
auto start_time = std::chrono::high_resolution_clock::now();
/* na linha abaixo os dados são copiados para GPU */
thrust::device_vector<double> dev(host);
auto end_time = std::chrono::high_resolution_clock::now();
auto runtime = std::chrono::duration_cast<std::chrono::milliseconds>(end_time - start_time);
std::cerr << "Alocação e cópia para GPU: " << runtime.count() << "ms\n";
double media = thrust::reduce(dev.begin(), dev.end(), 0.0, thrust::plus<double>()) / N;
std::cout << "Preço médio: " << media << "\n";
double mediaUltimoAno = thrust::reduce(dev.begin() + N - 365, dev.end(), 0.0, thrust::plus<double>()) / N;
std::cout << "Preço médio último ano: " << mediaUltimoAno << "\n";
double maxVal = thrust::reduce(dev.begin(), dev.end(), 0.0, thrust::maximum<double>());
std::cout << "Preço máximo: " << maxVal << "\n";
double maxValUltimoAno = thrust::reduce(dev.begin() + N - 365, dev.end(), 0.0, thrust::maximum<double>());
std::cout << "Preço máximo último ano: " << maxValUltimoAno << "\n";
double minVal = thrust::reduce(dev.begin(), dev.end(), maxVal, thrust::minimum<double>());
std::cout << "Preço mínimo: " << minVal << "\n";
double minValUltimoAno = thrust::reduce(dev.begin() + N - 365, dev.end(), maxVal, thrust::minimum<double>());
std::cout << "Preço mínimo último ano: " << minValUltimoAno << "\n";
}
|
11,011 | /*
Implement your CUDA kernel in this file
*/
__global__ void mirror_boundaries(double *E_prev, const int n, const int m)
{
int row = blockIdx.y*blockDim.y + threadIdx.y + 1;
int col = blockIdx.x*blockDim.x + threadIdx.x + 1;
if (col == 1) {
E_prev[row*(n+2)] = E_prev[row*(n+2) + 2];
E_prev[row*(n+2) + n + 1] = E_prev[row*(n+2) + n - 1];
}
if (row == 1) {
E_prev[col] = E_prev[2*(n+2) + col];
E_prev[(m+1)*(n+2) + col] = E_prev[(m-1)*(n+2) + col];
}
}
__global__ void simulate (double *E, double *E_prev, double *R, const double alpha,
const int n, const int m, const double kk,
const double dt, const double a, const double epsilon,
const double M1,const double M2, const double b)
{
int row = blockIdx.y*blockDim.y + threadIdx.y + 1;
int col = blockIdx.x*blockDim.x + threadIdx.x + 1;
if ((row - 1 < m) && (col - 1 < n)) {
E[row*(n+2)+col] = E_prev[row*(n+2)+col] + alpha*(E_prev[row*(n+2)+col+1] + E_prev[row*(n+2)+col-1] - 4*E_prev[row*(n+2)+col] + E_prev[(row+1)*(n+2)+col] + E_prev[(row-1)*(n+2)+col]);
double tmp_E = E[row*(n+2)+col];
double tmp_R = R[row*(n+2)+col];
E[row*(n+2)+col] = tmp_E = tmp_E - dt*(kk*tmp_E*(tmp_E - a)*(tmp_E - 1) + tmp_E*tmp_R);
R[row*(n+2)+col] = tmp_R + dt*(epsilon + M1*tmp_R/(tmp_E + M2))*(-tmp_R - kk*tmp_E*(tmp_E - b - 1));
}
}
|
11,012 | __global__ void poly2mask_cuda(
int * mask,
int nMaskPoints,
int nPolygonEdges,
float * xs,
float * ys,
int height)
{
int idx = blockDim.x * (gridDim.x * blockIdx.y + blockIdx.x) + threadIdx.x;
if (idx >= nMaskPoints || nPolygonEdges < 3) // At least 3 polyon points.
{
return;
}
int x = idx / height;
int y = idx % height;
float x0, y0, x1, y1;
int wn = 0;
for (int i = 0; i < nPolygonEdges; i++)
{
x0 = xs[i];
y0 = ys[i];
x1 = xs[i+1];
y1 = ys[i+1];
if (y0 <= y && y < y1)
{
if (((x1 - x0) * (y - y0) - (x - x0) * (y1 - y0)) > 0)
{
++wn;
}
}
else if (y1 <= y && y < y0)
{
if (((x1 - x0) * (y - y0) - (x - x0) * (y1 - y0)) < 0)
{
--wn;
}
}
}
if (wn != 0)
{
mask[idx] = 1;
}
}
|
11,013 | #include <cuda_runtime_api.h>
#include <stdint.h>
__global__ void cast_u8_to_f32_kernel(
const uint8_t *x,
uint32_t dim,
float *y)
{
uint32_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < dim) {
float y_i = (float)(x[idx]);
y[idx] = y_i;
}
}
extern "C" void neuralops_cuda_cast_u8_to_f32(
const uint8_t *x,
size_t dim,
float *y,
cudaStream_t stream)
{
cast_u8_to_f32_kernel<<<(dim+1024-1)/1024, 1024, 0, stream>>>(
x, dim, y);
}
|
11,014 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/count.h>
#include <thrust/sort.h>
#include <stdio.h>
#include <stdlib.h>
#include <cstdio>
using namespace std;
struct count5{
__host__ __device__
bool operator()(int x) {
return (x > 5);
}
};
int main (int argc, char *argv[]) {
const int N = 15;
//populate data
thrust::device_vector<int> day(N);
day[0] = 0; day[1] = 0; day[2] = 1;
day[3] = 2; day[4] = 5; day[5] = 5;
day[6] = 6; day[7] = 6; day[8] = 7;
day[9] = 8; day[10] = 9; day[11] = 9;
day[12] = 9; day[13] = 10; day[14] = 11;
thrust::device_vector<int> site(N);
site[0] = 2; site[1] = 3; site[2] = 0;
site[3] = 1; site[4] = 1; site[5] = 2;
site[6] = 0; site[7] = 1; site[8] = 2;
site[9] = 1; site[10] = 3; site[11] = 4;
site[12] = 0; site[13] = 1; site[14] = 2;
thrust::device_vector<int> mmt(N);
mmt[0] = 9; mmt[1] = 5; mmt[2] = 6;
mmt[3] = 3; mmt[4] = 3; mmt[5] = 8;
mmt[6] = 2; mmt[7] = 6; mmt[8] = 5;
mmt[9] = 10; mmt[10] = 9; mmt[11] = 11;
mmt[12] = 8; mmt[13] = 4; mmt[14] = 1;
thrust::device_vector<int> x(N);
thrust::device_vector<int> y(N);
thrust::reduce_by_key(day.begin(), day.end(), mmt.begin(), x.begin(), y.begin());
int count = thrust::count_if(y.begin(), y.end(), count5());
cout << "No. of days with rainfall > 5 = " << count << endl;
thrust::sort_by_key(site.begin(), site.end(), mmt.begin());
thrust::reduce_by_key(site.begin(), site.end(), mmt.begin(), x.begin(), y.begin());
for (int i = 0; i < 5; i++) {
cout << "Site: " << x[i] << " Rainfall: " << y[i] << endl;
}
return 0;
}
|
11,015 | #include <cstdlib>
#include <iostream>
using namespace std;
int main () {
int active_gpu_id;
int gpus_count;
cudaDeviceProp gpu_props;
cudaGetDeviceCount(&gpus_count);
if (gpus_count) {
cudaGetDevice(&active_gpu_id);
cout << "There is " << gpus_count << " GPUs available on your machine which are :" << endl;
for (int i = 0; i < gpus_count; i++) {
cudaGetDeviceProperties(&gpu_props, i);
cout << "- " << gpu_props.name << " (id=" << i << ")";
if (gpu_props.integrated) cout << " [INTEGRATED]";
if (i == active_gpu_id) cout << " [ACTIVE]";
cout << endl;
cout << "---> maxThreadsPerBlock = " << gpu_props.maxThreadsPerBlock << endl;
cout << "---> maxThreadsDim = (" << gpu_props.maxThreadsDim[0] << ", " << gpu_props.maxThreadsDim[1] << ", " << gpu_props.maxThreadsDim[2] << ")" << endl;
cout << "---> maxGridSize = (" << gpu_props.maxGridSize[0] << ", " << gpu_props.maxGridSize[1] << ", " << gpu_props.maxGridSize[2] << ")" << endl;
}
}
else
cout << "Sorry but no GPU available on your machine" << endl;
return EXIT_SUCCESS;
}
|
11,016 | #include <stdio.h>
#include <cuda.h>
#define N 6
void initialize(float * vector, int num) {
for(int i=0; i<num; i++) {
vector[i] = i;
}
}
void plotVector(float * vector, int num) {
for(int i=0; i<num; i++) {
printf("%f ", vector[i]);
}
printf("\n");
}
__global__ void dot_product(float *a, float *b, float *c)
{
c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x];
}
int main(void)
{
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int size = N * sizeof(float);
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, size);
h_a = (float *)malloc(size); initialize(h_a, N);
h_b = (float *)malloc(size); initialize(h_b, N);
h_c = (float *)malloc(size);
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
dot_product<<<1,N>>>(d_a, d_b, d_c);
cudaMemcpy(h_c, d_c, size, cudaMemcpyDeviceToHost);
float sum = 0;
for(int i=0; i<N; i++) {
sum += h_c[i];
}
printf("Vector A: \n"); plotVector(h_a, N);
printf("Vector B: \n"); plotVector(h_b, N);
printf("Dot Product, A*B = %f\n", sum);
return 0;
} |
11,017 | /* Molecular dynamics simulation linear code for binary Lennard-Jones liquid
under NVE ensemble; Author: You-Liang Zhu, Email: youliangzhu@ciac.ac.cn
Copyright: You-Liang Zhu
This code is free: you can redistribute it and/or modify it under the terms
of the GNU General Public License.*/
#include <ctype.h>
#include <cuda_runtime.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
// periodic boundary condition
void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(-1);
}
}
__host__ __device__ float
pbc(float x, float box_len) // implement periodic bondary condition
{
float box_half = box_len * 0.5;
if (x > box_half)
x -= box_len;
else if (x < -box_half)
x += box_len;
return x;
}
// randome number generator [0.0-1.0)
float R2S() {
int ran = rand();
float fran = (float)ran / (float)RAND_MAX;
return fran;
}
// initially generate the position and mass of particles
void init(unsigned int np, float4 *r, float4 *v, float3 box, float min_dis) {
for (unsigned int i = 0; i < np; i++) {
bool find_pos = false;
float4 ri;
while (!find_pos) {
ri.x = (R2S() - 0.5) * box.x;
ri.y = (R2S() - 0.5) * box.y;
ri.z = (R2S() - 0.5) * box.z;
find_pos = true;
for (unsigned int j = 0; j < i; j++) {
float dx = pbc(ri.x - r[j].x, box.x);
float dy = pbc(ri.y - r[j].y, box.y);
float dz = pbc(ri.z - r[j].z, box.z);
float r = sqrt(dx * dx + dy * dy + dz * dz);
if (r < min_dis) // a minimum safe distance to avoid the overlap of LJ
// particles
{
find_pos = false;
break;
}
}
}
if (R2S() > 0.5) // randomly generate the type of particle, 1.0 represent
// type A and 2.0 represent type B
ri.w = 1.0;
else
ri.w = 2.0;
r[i] = ri;
v[i].w = 1.0;
}
}
// first step integration of velocity verlet algorithm
extern "C" __global__ void first_integration_kernel(unsigned int np, float dt,
float3 box, float4 *r,
float4 *v, float4 *f) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < np) {
float4 ri = r[i];
float mass = v[i].w;
v[i].x += 0.5 * dt * f[i].x / mass;
v[i].y += 0.5 * dt * f[i].y / mass;
v[i].z += 0.5 * dt * f[i].z / mass;
ri.x += dt * v[i].x;
ri.y += dt * v[i].y;
ri.z += dt * v[i].z;
r[i].x = pbc(ri.x, box.x);
r[i].y = pbc(ri.y, box.y);
r[i].z = pbc(ri.z, box.z);
}
}
void first_integration(unsigned int np, float dt, float3 box, float4 *r,
float4 *v, float4 *f, unsigned int block_size) {
dim3 grid((np / block_size) + 1, 1, 1);
dim3 block(block_size, 1, 1);
first_integration_kernel<<<grid, block>>>(np, dt, box, r, v, f);
// block until the device has completed
// cudaThreadSynchronize();
cudaDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
}
// non-bonded force calculation
extern "C" __global__ void force_calculation_kernel(unsigned int np, float3 box,
float3 lj1, float3 lj2,
float4 *r, float4 *f,
float rcutsq) {
extern __shared__ float4 spos[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
float4 force = make_float4(0.0, 0.0, 0.0, 0.0);
float4 ri = make_float4(0.0, 0.0, 0.0, 0.0);
if (i < np)
ri = r[i];
for (int start = 0; start < np; start += blockDim.x) {
// load data
float4 posj = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (start + threadIdx.x < np)
posj = r[start + threadIdx.x];
__syncthreads();
spos[threadIdx.x] = posj;
__syncthreads();
int end_offset = blockDim.x;
end_offset = min(end_offset, np - start);
if (i < np) {
for (int cur_offset = 0; cur_offset < end_offset; cur_offset++) {
float4 rj = spos[cur_offset];
int j = start + cur_offset;
/* particles have no interactions with themselves */
if (i == j)
continue;
/* calculated the shortest distance between particle i and j */
float dx = pbc(ri.x - rj.x, box.x);
float dy = pbc(ri.y - rj.y, box.y);
float dz = pbc(ri.z - rj.z, box.z);
float type = ri.w + rj.w;
float rsq = dx * dx + dy * dy + dz * dz;
/* compute force and energy if within cutoff */
if (rsq < rcutsq) {
float lj1_ij, lj2_ij;
if (type == 2.0) // i=1.0, j=1.0
{
lj1_ij = lj1.x;
lj2_ij = lj2.x;
} else if (type == 3.0) // i=1.0, j=2.0; or i=2.0, j=1.0
{
lj1_ij = lj1.y;
lj2_ij = lj2.y;
} else if (type == 4.0) // i=2.0, j=2.0
{
lj1_ij = lj1.z;
lj2_ij = lj2.z;
}
float r2inv = float(1.0) / rsq;
float r6inv = r2inv * r2inv * r2inv;
float ffac = r2inv * r6inv *
(float(12.0) * lj1_ij * r6inv -
float(6.0) * lj2_ij); // force between particle i and j
float epot = r6inv * (lj1_ij * r6inv -
lj2_ij); // potential between particle i and j
force.x += ffac * dx;
force.y += ffac * dy;
force.z += ffac * dz;
force.w += epot;
}
}
}
}
if (i < np) {
f[i] = force;
// printf("%d %f %f %f %f \n", i, force.x, force.y, force.z,
//force.w);
}
}
void force_calculation(unsigned int np, float3 box, float3 epsilon,
float3 sigma, float4 *r, float4 *f, float rcut,
unsigned int block_size) {
dim3 grid((np / block_size) + 1, 1, 1);
dim3 block(block_size, 1, 1);
unsigned int shared_bytes = sizeof(float4) * block_size;
force_calculation_kernel<<<grid, block, shared_bytes>>>(np, box, epsilon,
sigma, r, f, rcut);
// block until the device has completed
// cudaThreadSynchronize();
cudaDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
}
// second step integration of velocity verlet algorithm
extern "C" __global__ void second_integration_kernel(unsigned int np, float dt,
float4 *v, float4 *f) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < np) {
float mass = v[i].w;
v[i].x += 0.5 * dt * f[i].x / mass;
v[i].y += 0.5 * dt * f[i].y / mass;
v[i].z += 0.5 * dt * f[i].z / mass;
}
}
void second_integration(unsigned int np, float dt, float4 *v, float4 *f,
unsigned int block_size) {
dim3 grid((np / block_size) + 1, 1, 1);
dim3 block(block_size, 1, 1);
second_integration_kernel<<<grid, block>>>(np, dt, v, f);
// block until the device has completed
// cudaThreadSynchronize();
cudaDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
}
// system information collection for temperature, kinetic energy, potential and
// total energy
__global__ void compute_info_sums_kernel(unsigned int np, float4 *v, float4 *f,
float2 *scratch) {
extern __shared__ float2 sdata[];
int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
float2 tempo = make_float2(0.0, 0.0);
if (i < np) {
float4 vi = v[i];
float mass = vi.w;
tempo.x = mass * (vi.x * vi.x + vi.y * vi.y + vi.z * vi.z);
tempo.y = f[i].w;
if (i + blockDim.x < np) {
vi = v[i + blockDim.x];
mass = vi.w;
tempo.x += mass * (vi.x * vi.x + vi.y * vi.y + vi.z * vi.z);
tempo.y += f[i + blockDim.x].w;
}
}
sdata[threadIdx.x] = tempo;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0) {
if (threadIdx.x < offs) {
sdata[threadIdx.x].x += sdata[threadIdx.x + offs].x;
sdata[threadIdx.x].y += sdata[threadIdx.x + offs].y;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0) {
scratch[blockIdx.x].x = sdata[0].x;
scratch[blockIdx.x].y = sdata[0].y;
}
}
__global__ void compute_info_final_kernel(unsigned int np, float *info,
float2 *scratch,
unsigned int num_partial_sums) {
extern __shared__ float2 sdata[];
float2 final_sum = make_float2(0.0, 0.0);
for (int start = 0; start < num_partial_sums; start += blockDim.x * 2) {
float2 tempo = make_float2(0.0, 0.0);
if (start + threadIdx.x < num_partial_sums) {
float2 scr = scratch[start + threadIdx.x];
tempo.x = scr.x;
tempo.y = scr.y;
if (start + threadIdx.x + blockDim.x < num_partial_sums) {
scr = scratch[start + threadIdx.x + blockDim.x];
tempo.x += scr.x;
tempo.y += scr.y;
}
}
sdata[threadIdx.x] = tempo;
__syncthreads();
int offs = blockDim.x >> 1;
while (offs > 0) {
if (threadIdx.x < offs) {
sdata[threadIdx.x].x += sdata[threadIdx.x + offs].x;
sdata[threadIdx.x].y += sdata[threadIdx.x + offs].y;
}
offs >>= 1;
__syncthreads();
}
if (threadIdx.x == 0) {
final_sum.x += sdata[0].x;
final_sum.y += sdata[0].y;
}
}
if (threadIdx.x == 0) {
float ekin = 0.5 * final_sum.x;
float potential = 0.5 * final_sum.y;
unsigned int nfreedom = 3 * np - 3;
float temp = 2.0 * ekin / float(nfreedom);
float energy = ekin + potential;
info[0] = temp;
info[1] = potential;
info[2] = energy;
}
}
void compute_info(unsigned int np, float4 *v, float4 *f, float2 *scratch,
float *info, unsigned int block_size) {
unsigned int n_blocks = (int)ceil((float)np / (float)block_size);
dim3 grid(n_blocks, 1, 1);
dim3 threads(block_size / 2, 1, 1);
unsigned int shared_bytes = sizeof(float2) * block_size / 2;
compute_info_sums_kernel<<<grid, threads, shared_bytes>>>(np, v, f, scratch);
// block until the device has completed
// cudaThreadSynchronize();
cudaDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
int final_block_size = 512;
grid = dim3(1, 1, 1);
threads = dim3(final_block_size, 1, 1);
shared_bytes = sizeof(float2) * final_block_size;
compute_info_final_kernel<<<grid, threads, shared_bytes>>>(np, info, scratch,
n_blocks);
// block until the device has completed
// cudaThreadSynchronize();
cudaDeviceSynchronize();
// check if kernel execution generated an error
checkCUDAError("kernel execution");
}
// output system information and frame in XYZ formation which can be read by VMD
void output(FILE *traj, unsigned int step, float *info, float4 *r,
unsigned int np) {
float temp = info[0];
float potential = info[1];
float energy = info[2];
fprintf(traj, "%d\n step=%d temp=%20.8f pot=%20.8f ener=%20.8f\n", np,
step, temp, potential, energy);
for (unsigned int i = 0; i < np; i++) {
float4 ri = r[i];
if (ri.w == 1.0)
fprintf(traj, "A %20.8f %20.8f %20.8f\n", ri.x, ri.y, ri.z);
else if (ri.w == 2.0)
fprintf(traj, "B %20.8f %20.8f %20.8f\n", ri.x, ri.y, ri.z);
}
}
// main function
int main(int argc, char **argv) {
/* phase seperation parameters for video:
unsigned int nsteps = 200000;
unsigned int nprint = 5000;
float3 epsilon = make_float3(1.0, 0.2, 1.0);
*/
// running parameters
unsigned int np = 2700; // the number of particles
unsigned int nsteps = 500; // the number of time steps
float dt = 0.001; // integration time step
float rcut = 3.0; // the cutoff radius of interactions
unsigned int nprint = 100; // period for data output
unsigned int block_size = 128; // the number of threads in a block
timeval start; // start time
timeval end; // end time
float3 box =
make_float3(15.0, 15.0, 15.0); // box size in x, y, and z directions
float3 epsilon = make_float3(
1.0, 0.5, 1.0); // epsilon.x for type 1.0 and 1.0; epsilon.y for type 1.0
// and 2.0; epsilon.z for type 1.0 and 2.0
float3 sigma = make_float3(
1.0, 1.0, 1.0); // sigma.x for type 1.0 and 1.0; sigma.y for type 1.0
// and 2.0; sigma.z for type 1.0 and 2.0
float min_dis =
sigma.x *
0.9; // the minimum distance between particles for system generation
float3 lj1, lj2;
lj1.x = 4.0 * epsilon.x * pow(sigma.x, int(12));
lj1.y = 4.0 * epsilon.y * pow(sigma.y, int(12));
lj1.z = 4.0 * epsilon.z * pow(sigma.z, int(12));
lj2.x = 4.0 * epsilon.x * pow(sigma.x, int(6));
lj2.y = 4.0 * epsilon.y * pow(sigma.y, int(6));
lj2.z = 4.0 * epsilon.z * pow(sigma.z, int(6));
// host memory allocation
float4 *h_r =
(float4 *)malloc(np * sizeof(float4)); // rx, ry, rz, type(0, 1, 2 ...)
float4 *h_v = (float4 *)malloc(np * sizeof(float4)); // vx, vy, vz, mass
float4 *h_f = (float4 *)malloc(np * sizeof(float4)); // fx, fy, fz, potential
float *h_info =
(float *)malloc(16 * sizeof(float)); // temperature, potential, energy ...
// device memory allocation
float4 *d_r;
float4 *d_v;
float4 *d_f;
float *d_info;
float2 *d_scratch;
cudaMalloc((void **)&d_r,
np * sizeof(float4)); // rx, ry, rz, type(0, 1, 2 ...)
cudaMalloc((void **)&d_v, np * sizeof(float4)); // vx, vy, vz, mass
cudaMalloc((void **)&d_f, np * sizeof(float4)); // fx, fy, fz, potential
cudaMalloc((void **)&d_info,
16 * sizeof(float)); // temperature, potential, energy ...
cudaMalloc((void **)&d_scratch,
(np / block_size + 1) * sizeof(float2)); // temporary data ...
FILE *traj = fopen(
"traj.xyz", "w"); // trajectory file in XYZ format that can be open by VMD
/* generate system information */
printf("Starting simulation with %d atoms for %d steps.\n", np, nsteps);
printf("Generating system.\n", np, nsteps);
init(np, h_r, h_v, box, min_dis);
cudaMemcpy(d_r, h_r, np * sizeof(float4), cudaMemcpyHostToDevice);
cudaMemcpy(d_v, h_v, np * sizeof(float4), cudaMemcpyHostToDevice);
gettimeofday(&start, NULL); // get start time
/* main MD loop */
printf("Running simulation.\n", np, nsteps);
for (unsigned int step = 0; step <= nsteps; step++) // running simulation loop
{
/* first integration for velverlet */
first_integration(np, dt, box, d_r, d_v, d_f, block_size);
/* force calculation */
force_calculation(np, box, lj1, lj2, d_r, d_f, rcut * rcut, block_size);
/* compute temperature and potential */
compute_info(np, d_v, d_f, d_scratch, d_info, block_size);
/* second integration for velverlet */
second_integration(np, dt, d_v, d_f, block_size);
/* write output frames and system information, if requested */
if ((step % nprint) == 0) {
cudaMemcpy(h_r, d_r, np * sizeof(float4), cudaMemcpyDeviceToHost);
cudaMemcpy(h_info, d_info, 16 * sizeof(float), cudaMemcpyDeviceToHost);
output(traj, step, h_info, h_r, np);
printf("time step %d \n", step);
}
}
gettimeofday(&end, NULL); // get end time
long timeusr =
(end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec);
printf("time is %ld microseconds\n",
timeusr); // the spending time on simulation in microseconds
fclose(traj);
free(h_r);
free(h_v);
free(h_f);
free(h_info);
cudaFree(d_r);
cudaFree(d_v);
cudaFree(d_f);
cudaFree(d_info);
return 0;
}
|
11,018 | #include "device_launch_parameters.h"
#include <iostream>
#include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
using namespace std;
__global__ void add(const float *a, const float *b, float *c, int n){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < n){
c[i] = a[i] + b[i];
}
}
__global__ void matadd(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
int idx = i * m + j;
if(i < n and j < m){
c[idx] = a[idx] + b[idx];
}
}
void check_matadd(const float *a, const float *b, const float *c, int n, int m){
for(int i = 0; i < n; i++){
for(int j = 0; j < m; j++){
int idx = i * m + j;
if(a[idx] + b[idx] != c[idx]){
printf("Not equal !!! \n");
exit(1);
}
}
}
printf("Check matadd success !!\n");
}
__global__ void hello(const char *str){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
printf("%s from gpu on thread %d\n", str, idx);
}
__global__ void showgrid(){
printf("thread: %d, %d %d\nblock Idxs: %d, %d %d\nblock Dims: %d, %d %d\ngrid: %d, %d %d\n\n\n", threadIdx.x, threadIdx.y, threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z, blockDim.x, blockDim.y, blockDim.z, gridDim.x, gridDim.y, gridDim.z);
}
int main(){
int n = 1<<14;
int m = 1<<14;
int total = n * m;
size_t size = (total) * sizeof(float);
float *ha = (float*)malloc(size);
float *hb = (float*)malloc(size);
float *hc = (float*)malloc(size);
float *da = NULL, *db = NULL, *dc = NULL;
cudaMalloc((void**)&da, size);
cudaMalloc((void**)&db, size);
cudaMalloc((void**)&dc, size);
for(int i = 0; i < total; i++){
ha[i] = rand() / (RAND_MAX);
hb[i] = rand() / (RAND_MAX);
}
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice);
//int threadPerBlock = 512;
//int blockPerGrid = (total + threadPerBlock - 1) / threadPerBlock;
clock_t st = clock();
dim3 threadPerBlock(32, 16);
dim3 blockPerGrid((n+threadPerBlock.x-1)/threadPerBlock.x, (m+threadPerBlock.y-1)/threadPerBlock.y);
matadd<<<threadPerBlock, blockPerGrid>>>(da, db, dc, n, m);
clock_t ed = clock();
cout<<"time used: "<<ed-st<<endl;
cudaDeviceSynchronize();
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
check_matadd(ha, hb, hc, n, m);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
free(ha);
free(hb);
free(hc);
return 0;
}
// #define CHECK(call){
// const cudaError_t err = call
// if(err != cudaSuccess){
// printf("Error: %s:%d \n", __FILE__, __LINE__);
// printf("Code:%d reason: %s\n", err, cudaErrorString(err));
// exit(1);
// }
// }
/*
int main(){
int n = 6;
dim3 block(3);
dim3 grid((n + block.x - 1) / block.x);
printf("on cpu\n");
printf("block: %d, %d %d\n", block.x, block.y, block.z);
printf("grid: %d, %d %d\n", grid.x, grid.y, grid.z);
showgrid<<<grid, block>>> ();
cudaDeviceReset();
return 0;
}
*/
/*
int main(){
int threadPerBlock = 32;
int blockPerGrid = (1 + threadPerBlock - 1) / threadPerBlock;
char *s = new char[10];
scanf("%s", s);
size_t size = strlen(s) * sizeof(char);
char *gpus;
cudaMalloc((void**)&gpus, size);
cudaMemcpy(gpus, s, size, cudaMemcpyHostToDevice);
hello<<<blockPerGrid, threadPerBlock>>> (gpus);
cudaFree(gpus);
//cudaDeviceReset();
cudaDeviceSynchronize();
return 0;
}
*/
/*
int main(){
int n = 5000000;
size_t size = n * sizeof(float);
//cpu中给abc申请内存空间
float *ha = (float*)malloc(size);
float *hb = (float*)malloc(size);
float *hc = (float*)malloc(size);
float *hd = (float*)malloc(size);
//init
for(int i = 0; i < n; i++){
ha[i] = rand() / (float)(RAND_MAX);
hb[i] = rand() / (float)(RAND_MAX);
}
clock_t st = clock();
for(int i = 0; i < n; i++){
hd[i] = ha[i] + hb[i];
}
clock_t ed = clock();
//printf("Time on cpu is %f\n", ed-st);
cout<<"cpu: "<< ed - st<<endl;
float *d_A = NULL;
float *d_B = NULL;
float *d_C = NULL;
//给GPU中三个ABC申请显存空间
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_B, size);
cudaMalloc((void **)&d_C, size);
//把数据从cpu内存送到gpu显存中
cudaMemcpy(d_A, ha, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, hb, size, cudaMemcpyHostToDevice);
st = clock();
//执行GPU kernel
int threadPerBlock = 256; //32的倍数
int blockPerGrid = (n + threadPerBlock - 1) / threadPerBlock; //最少线程块的个数!???
add<<<blockPerGrid, threadPerBlock>>>(d_A, d_B, d_C, n);
//CHECK(cudaMemcpy(d_C, hc, size, cudaMemcpyDeviceToHost));
cudaMemcpy(d_C, hc, size, cudaMemcpyDeviceToHost);
ed = clock();
cout<<"gpu: "<<ed - st<<endl;
//printf('Time on gpu is %f\n', ed - st);
double eps = 1e-5;
for(int i = 0; i < n; i++){
if(fabs(ha[i] + hb[i] - hc[i]) < eps){
fprintf(stderr, "result not same");
exit(EXIT_FAILURE);
}
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(ha);
free(hb);
free(hc);
//printf("test passed!\n");
return 0;
}
*/
/*
int main()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for(int i=0;i<deviceCount;i++)
{
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
std::cout << "使用GPU device " << i << ": " << devProp.name << std::endl;
std::cout << "设备全局内存总量: " << devProp.totalGlobalMem / 1024 / 1024 << "MB" << std::endl;
std::cout << "SM的数量:" << devProp.multiProcessorCount << std::endl;
std::cout << "每个线程块的共享内存大小:" << devProp.sharedMemPerBlock / 1024.0 << " KB" << std::endl;
std::cout << "每个线程块的最大线程数:" << devProp.maxThreadsPerBlock << std::endl;
std::cout << "设备上一个线程块(Block)种可用的32位寄存器数量: " << devProp.regsPerBlock << std::endl;
std::cout << "每个EM的最大线程数:" << devProp.maxThreadsPerMultiProcessor << std::endl;
std::cout << "每个EM的最大线程束数:" << devProp.maxThreadsPerMultiProcessor / 32 << std::endl;
std::cout << "设备上多处理器的数量: " << devProp.multiProcessorCount << std::endl;
std::cout << "======================================================" << std::endl;
}
return 0;
}
*/ |
11,019 | #include <stdio.h>
__global__ void shift(int *xdata, int length) {
__shared__ int data[1024];
if (threadIdx.x >= length-1) return;
data[threadIdx.x] = threadIdx.x;
if (threadIdx.x > 0) {
data[threadIdx.x-1] = data[threadIdx.x];
}
// copy to global so host can see it
for (int i = 0; i < length; i++) {
xdata[i] = data[i];
}
}
int main() {
int h_data[1024];
for (int i = 0; i < 1024; i++) {
h_data[i] = i;
}
void *d_data;
cudaMalloc(&d_data,1024*sizeof(int));
cudaMemcpy(d_data,h_data,1024*sizeof(int),cudaMemcpyHostToDevice);
shift<<<1,1024>>>((int*) d_data,1024);
cudaMemcpy(h_data,d_data,1024*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(d_data);
// lets make sure answer is correct
for (int i = 0; i < 1023; i++) {
if (h_data[i] != (i+1)) {
printf("Differ at position %d value computed %d value expected %d\n",i,h_data[i],i+1);
}
}
}
|
11,020 | #include <cstdlib>
#include <vector>
#include <string>
#include <iostream>
#include <algorithm>
#include <fstream>
#include <chrono>
#include <CL/cl.h>
auto to_seconds(cl_ulong t1, cl_ulong t2){ return (std::max(t1, t2) - std::min(t1, t2)) / 1000.0 /1000.0 /1000.0; }
int main(int argc, char** argv)
{
using T = float;
size_t szmin = ((size_t)1) << 2;
size_t szmax = ((size_t)1) << 28;
if (argc > 1)
szmin = szmax = atoi(argv[1]);
int ndev = 0;
cudaGetDeviceCount(&ndev);
//for(int k=0; k<ndev; ++k)
int k = 0;
{
cudaSetDevice(k);
cudaDeviceProp prop{};
cudaGetDeviceProperties(&prop, k);
std::cout << "Device: " << prop.name << "\n";
for(auto i=szmin; i<=szmax; i *= 2)
{
T* ptr = new T[i];
void* device_ptr;
cudaError_t cudaStat = cudaMalloc(&device_ptr, i*sizeof(T));
auto t0 = std::chrono::high_resolution_clock::now();
cudaMemcpy(device_ptr, ptr, i*sizeof(T), cudaMemcpyHostToDevice);
auto t1 = std::chrono::high_resolution_clock::now();
//cudaMemcpy(hostArray,deviceArray,bytes,cudaMemcpyDeviceToHost);
cudaFree(device_ptr);
auto sec = std::chrono::duration_cast<std::chrono::nanoseconds>(t1-t0).count() / 1000.0 / 1000.0 / 1000.0;
auto GB = (i*sizeof(T))/1024.0 / 1024.0 / 1024.0;
std::cout << i*sizeof(T)/1024 << " dt = " << sec << " " << GB/sec <<" GB/s\n";
delete[] ptr;
}
}
return 0;
} |
11,021 | #include "includes.h"
#define NUM_THREADS 32
__global__ void euclidean_kernel(const float * vg_a, size_t pitch_a, size_t n_a, const float * vg_b, size_t pitch_b, size_t n_b, size_t k, float * d, size_t pitch_d, float p)
{
size_t x = blockIdx.x;
size_t y = blockIdx.y;
// If an element is to be computed
if(x < n_a && y < n_b) {
__shared__ float temp[NUM_THREADS];
temp[threadIdx.x] = 0.0;
for(size_t offset = threadIdx.x; offset < k; offset += blockDim.x) {
float t = vg_a[x * pitch_a + offset] - vg_b[y * pitch_b + offset];
temp[threadIdx.x] += (t * t);
}
// Sync with other threads
__syncthreads();
// Reduce
for(size_t stride = blockDim.x >> 1; stride > 0; stride >>= 1) {
if(threadIdx.x < stride) {
temp[threadIdx.x] += temp[threadIdx.x + stride];
}
__syncthreads();
}
// Write to global memory
if(threadIdx.x == 0) {
d[y * pitch_d + x] = sqrt(temp[0]);
}
}
} |
11,022 | #include <stdio.h>
#include <stdlib.h>
#include <cstdlib>
#include <assert.h>
#include <fstream>
#include <iostream>
#include <cmath>
#include <ctime>
#include "matrix_cuda.cuh"
using namespace std;
#define BLOCK_SIZE 16
double* WEIGHTS;
int ROW;
int COL;
double fRand(double fMin, double fMax)
{
double f = (double)rand() / RAND_MAX;
return fMin + f * (fMax - fMin);
}
void init_mat(double* a, double minVal, double maxVal) {
for (int i = 0; i < ROW * COL; i++) {
a[i] = fRand(minVal, maxVal);
}
for (int i = 0; i < COL * COL; i++) {
WEIGHTS[i] = fRand(minVal, maxVal);
}
}
void cpu_matrix_mult(double *h_a, double *h_result, int m, int n, int k) {
for (int i = 0; i < m; ++i)
{
for (int j = 0; j < k; ++j)
{
double tmp = 0.0;
for (int h = 0; h < n; ++h)
{
tmp += h_a[i * n + h] * WEIGHTS[h * k + j];
}
h_result[i * k + j] = tmp;
}
}
}
void Function(double t, double* z, double* f) {
// Serial Matmul
cpu_matrix_mult(z, f, ROW, COL, COL);
}
void rk4(double t_init, double t_stop, double* input_mat, double* output_mat, int iteration,
double* previous_f0, double* previous_f1, double* previous_f2, double* previous_f3) {
float gpu_elapsed_time_ms;
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start to count execution time of GPU version
cudaEventRecord(start, 0);
double dt = t_stop - t_init;
// Init
//
// Parallel
//
// Init
double *weights_cuda, *outputs_cuda;
double *z0_cuda, *z1_cuda, *z2_cuda, *z3_cuda;
double *f0_cuda, *f1_cuda, *f2_cuda, *f3_cuda;
cudaMalloc((void **) &weights_cuda, sizeof(double)*COL*COL);
cudaMalloc((void **) &z0_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &z1_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &z2_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &z3_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &f0_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &f1_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &f2_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &f3_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &outputs_cuda, sizeof(double)*ROW*COL);
// Collecting stages
// double *tmp1, *tmp2;
// cudaMalloc((void **) &tmp1, sizeof(double)*ROW*COL);
// cudaMalloc((void **) &tmp2, sizeof(double)*ROW*COL);
cudaMemcpy(z0_cuda, input_mat, sizeof(double)*ROW*COL, cudaMemcpyHostToDevice);
cudaMemcpy(weights_cuda, WEIGHTS, sizeof(double)*COL*COL, cudaMemcpyHostToDevice);
unsigned int grid_rows = (ROW + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (COL + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
/// Stage 0
//Parallel
if(ROW == COL)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(z0_cuda, weights_cuda, f0_cuda, COL);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock>>>(z0_cuda, weights_cuda, f0_cuda, ROW, COL, COL);
}
/// Stage 1
// Function(t1, z1, f1);
gpu_matrix_add<<<2, (ROW * COL + 1) / 2>>>(z0_cuda, f0_cuda, z1_cuda, ROW*COL, 0.5 * dt);
if(ROW == COL)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(z1_cuda, weights_cuda, f1_cuda, COL);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock>>>(z1_cuda, weights_cuda, f1_cuda, ROW, COL, COL);
}
/// Stage 2
gpu_matrix_add<<<2, (ROW * COL + 1) / 2>>>(z0_cuda, f1_cuda, z2_cuda, ROW*COL, 0.5 * dt);
if(ROW == COL)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(z2_cuda, weights_cuda, f2_cuda, COL);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock>>>(z2_cuda, weights_cuda, f2_cuda, ROW, COL, COL);
}
/// Stage 3
gpu_matrix_add<<<2, (ROW * COL + 1) / 2>>>(z0_cuda, f2_cuda, z3_cuda, ROW*COL, 0.5 * dt);
if(ROW == COL)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock>>>(z3_cuda, weights_cuda, f3_cuda, COL);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock>>>(z3_cuda, weights_cuda, f3_cuda, ROW, COL, COL);
}
/// Collect Stages:
gpu_matrix_add_multi<<<2, (ROW * COL + 1) / 2>>>(z0_cuda, f0_cuda, f1_cuda, f2_cuda, f3_cuda, outputs_cuda,
ROW*COL, 1.0, dt / 6.0, dt / 3.0, dt / 3.0, dt / 6.0);
/// save previous K:
cudaMemcpy(previous_f0, f0_cuda, sizeof(double)*ROW*COL, cudaMemcpyDeviceToHost);
cudaMemcpy(previous_f1, f1_cuda, sizeof(double)*ROW*COL, cudaMemcpyDeviceToHost);
cudaMemcpy(previous_f2, f2_cuda, sizeof(double)*ROW*COL, cudaMemcpyDeviceToHost);
cudaMemcpy(previous_f3, f3_cuda, sizeof(double)*ROW*COL, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
// printf("Time elapsed on matrix multiplication of on GPU: %f ms.\n\n", gpu_elapsed_time_ms);
cudaFree(f0_cuda);
cudaFree(f1_cuda);
cudaFree(z1_cuda);
cudaFree(f2_cuda);
cudaFree(z2_cuda);
cudaFree(f3_cuda);
cudaFree(z3_cuda);
cudaFree(weights_cuda);
}
void parallelRK4(double t_init, double t_stop, double* input_mat, double* output_mat, int iteration,
double* previous_f0, double* previous_f1, double* previous_f2, double* previous_f3) {
cudaStream_t stream0, stream1, stream2, stream3;
cudaStreamCreate(&stream0);
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
double dt = t_stop - t_init;
// Init
//
// Parallel
//
// Init
double *weights_cuda, *outputs_cuda;
double *z0_cuda, *z1_cuda, *z2_cuda, *z3_cuda;
double *f0_cuda, *f1_cuda, *f2_cuda, *f3_cuda;
double *pre_f0, *pre_f1, *pre_f2, *pre_f3;
// Allocate memories
cudaMalloc((void **) &weights_cuda, sizeof(double)*COL*COL);
cudaMalloc((void **) &outputs_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &z0_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &f0_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &pre_f0, sizeof(double)*ROW*COL);
cudaMalloc((void **) &z1_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &f1_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &pre_f1, sizeof(double)*ROW*COL);
cudaMalloc((void **) &z2_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &f2_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &pre_f2, sizeof(double)*ROW*COL);
cudaMalloc((void **) &z3_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &f3_cuda, sizeof(double)*ROW*COL);
cudaMalloc((void **) &pre_f3, sizeof(double)*ROW*COL);
// Copy from Host to Device
cudaMemcpy(z0_cuda, input_mat, sizeof(double)*ROW*COL, cudaMemcpyHostToDevice);
cudaMemcpy(pre_f0, previous_f0, sizeof(double)*ROW*COL, cudaMemcpyHostToDevice);
cudaMemcpy(weights_cuda, WEIGHTS, sizeof(double)*COL*COL, cudaMemcpyHostToDevice);
cudaMemcpy(pre_f1, previous_f1, sizeof(double)*ROW*COL, cudaMemcpyHostToDevice);
cudaMemcpy(pre_f2, previous_f2, sizeof(double)*ROW*COL, cudaMemcpyHostToDevice);
cudaMemcpy(pre_f3, previous_f3, sizeof(double)*ROW*COL, cudaMemcpyHostToDevice);
unsigned int grid_rows = (ROW + BLOCK_SIZE - 1) / BLOCK_SIZE;
unsigned int grid_cols = (COL + BLOCK_SIZE - 1) / BLOCK_SIZE;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
float gpu_elapsed_time_ms;
// some events to count the execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// start to count execution time of GPU version
cudaEventRecord(start, 0);
// printf("Here");
// Parallel
gpu_matrix_add_multi_four<<<2, (ROW * COL + 1) / 2, 0, stream1>>>(z0_cuda, pre_f0, pre_f1, pre_f3, z1_cuda,
ROW*COL, 1.0, dt * -0.75, dt * 0.5, dt * 0.75);
gpu_matrix_add_multi_four<<<2, (ROW * COL + 1) / 2, 0, stream2>>>(z0_cuda, pre_f0, pre_f1, pre_f2, z2_cuda,
ROW*COL, 1.0, dt * -1, dt * 2.0, dt * -0.5);
gpu_matrix_add_multi_three<<<2, (ROW * COL + 1) / 2, 0, stream3>>>(z0_cuda, pre_f0, pre_f3, z3_cuda,
ROW*COL, 1.0, dt * 0.5, dt * 0.5);
/// Stage 0
//Parallel
if(ROW == COL)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock, 0, stream0>>>(z0_cuda, weights_cuda, f0_cuda, COL);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock, 0, stream0>>>(z0_cuda, weights_cuda, f0_cuda, ROW, COL, COL);
}
/// Stage 1
// Function(t1, z1, f1);
if(ROW == COL)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock, 0, stream1>>>(z1_cuda, weights_cuda, f1_cuda, COL);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock, 0, stream1>>>(z1_cuda, weights_cuda, f1_cuda, ROW, COL, COL);
}
/// Stage 2
if(ROW == COL)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock, 0, stream2>>>(z2_cuda, weights_cuda, f2_cuda, COL);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock, 0, stream2>>>(z2_cuda, weights_cuda, f2_cuda, ROW, COL, COL);
}
/// Stage 3
if(ROW == COL)
{
gpu_square_matrix_mult<<<dimGrid, dimBlock, 0, stream3>>>(z3_cuda, weights_cuda, f3_cuda, COL);
}
else
{
gpu_matrix_mult<<<dimGrid, dimBlock, 0, stream3>>>(z3_cuda, weights_cuda, f3_cuda, ROW, COL, COL);
}
cudaStreamSynchronize(stream1);
cudaStreamSynchronize(stream2);
cudaStreamSynchronize(stream3);
/// Collect Stages:
gpu_matrix_add_multi<<<2, (ROW * COL + 1) / 2, 0, stream0>>>(z0_cuda, f0_cuda, f1_cuda, f2_cuda, f3_cuda, outputs_cuda,
ROW*COL, 1.0, dt / 6.0, dt / 3.0, dt / 3.0, dt / 6.0);
cudaStreamSynchronize(stream0);
cudaThreadSynchronize();
// time counting terminate
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
/// save previous K:
cudaMemcpy(previous_f0, f0_cuda, sizeof(double)*ROW*COL, cudaMemcpyDeviceToHost);
cudaMemcpy(previous_f1, f1_cuda, sizeof(double)*ROW*COL, cudaMemcpyDeviceToHost);
cudaMemcpy(previous_f2, f2_cuda, sizeof(double)*ROW*COL, cudaMemcpyDeviceToHost);
cudaMemcpy(previous_f3, f3_cuda, sizeof(double)*ROW*COL, cudaMemcpyDeviceToHost);
// compute time elapse on GPU computing
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Time elapsed on matrix multiplication of on GPU: %f ms.\n\n", gpu_elapsed_time_ms);
cudaFree(f0_cuda);
cudaFree(f1_cuda);
cudaFree(z1_cuda);
cudaFree(f2_cuda);
cudaFree(z2_cuda);
cudaFree(f3_cuda);
cudaFree(z3_cuda);
cudaFree(weights_cuda);
}
int main() {
srand(3333);
double t_init = 0;
double t_stop = 1;
double dt = 1;
printf("please type in row and col\n");
scanf("%d %d", &ROW, &COL);
//
// Input and Output shape: row * col
// WEIGHTS shape: col * col
//
WEIGHTS = new double[COL * COL];
// int nStep = ceil((t1 - t0) / dt);
int nStep = 100;
double *input_mat = new double[ROW * COL];
double *output_mat = new double[ROW * COL];
double *previous_f0; previous_f0 = new double[ROW*COL];
double *previous_f1; previous_f1 = new double[ROW*COL];
double *previous_f2; previous_f2 = new double[ROW*COL];
double *previous_f3; previous_f3 = new double[ROW*COL];
// Initialize our matrices
init_mat(input_mat, 0, 255);
for (int i = 0; i < nStep; i++) {
if (i < 3) {
rk4(t_init, t_stop, input_mat, output_mat, i, previous_f0, previous_f1, previous_f2, previous_f3);
*input_mat = *output_mat;
}
else {
parallelRK4(t_init, t_stop, input_mat, output_mat, i, previous_f0, previous_f1, previous_f2, previous_f3);
*input_mat = *output_mat;
}
}
}
|
11,023 | #pragma warning (disable : 4267)
#pragma warning (disable : 4244)
#include <thrust/random.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/random/normal_distribution.h>
#include <iostream>
#include <iomanip>
#include <cmath>
#include "Example_MC_BS.cuh"
__host__ __device__
unsigned int hashBS(unsigned int a)
{
a = (a+0x7ed55d16) + (a<<12);
a = (a^0xc761c23c) ^ (a>>19);
a = (a+0x165667b1) + (a<<5);
a = (a+0xd3a2646c) ^ (a<<9);
a = (a+0xfd7046c5) + (a<<3);
a = (a^0xb55a4f09) ^ (a>>16);
return a;
}
struct estimate_BS : public thrust::unary_function<unsigned int,float>
{
__device__
float operator()(unsigned int thread_id)
{
float sum = 0;
unsigned int N = 100000; // samples per thread
unsigned int seed = thread_id;
// seed a random number generator
thrust::default_random_engine rng(seed);
// create a mapping from random numbers to N(0,1)
thrust::random::normal_distribution<float> ndist(0.0f, 1.0f);
float S0 = 20.0f;
float sig = 0.28f;
float r = 0.045f;
float K = 21.0f;
float T = 0.5f;
float sqrtT = sqrtf(T);
float sig2 = sig*sig;
// take N samples in a quarter circle
for(unsigned int i = 0; i < N; ++i)
{
float W = ndist(rng);
float ST = S0 * expf((r - 0.5f*sig2)*T + sig*sqrtT*W);
float ST_at = S0 * expf((r - 0.5f*sig2)*T - sig*sqrtT*W);
sum += (((ST-K > 0.0f)? ST-K:0.0f) + ((ST_at-K > 0.0f)? ST_at-K:0.0f))/2.0f;
}
// discount back
sum *= expf(-r*T);
// divide by N
return sum / N;
}
};
void exmpl_thrust_MC_BS()
{
// use 30K independent seeds
int M = 50000;
float estimate = thrust::transform_reduce(thrust::counting_iterator<int>(0),
thrust::counting_iterator<int>(M),
estimate_BS(),
0.0f,
thrust::plus<float>());
estimate /= M;
std::cout << std::setprecision(10);
std::cout << "Option price is approximately " << estimate << std::endl;
cudaDeviceReset();
}; |
11,024 | //Time-stamp: <2013-12-04 13:20:59 hamada>
#include <iostream>
#include <string.h>
#include <stdio.h>
#include <math.h>
#include <assert.h>
//#define NUM (1024*1024*1024/4) // 1GB
//#define NUM ((1024*1024/4)*(1024 + 512)) // 1.5GB
//#define NUM ((1024*1024/4)*(1024*2) -1) // 2GB-1
//#define NUM ((1024*1024/4)*(1024*2) ) // 2GB
//#define NUM ((1024*1024/4)) // 1MB
#define NUM ((1024*1024/4)*(1024*6-100) ) // 6GB-100MB
using namespace std;
__global__ void kernel(int* x, int n)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int wid = blockDim.x*bid + tid;
if(wid > 1) return;
int z = 0;
for(size_t i = 0; i<n ; i++) z += x[i];
x[0] = z;
}
#include <sys/time.h>
#include <sys/resource.h>
extern "C" double get_time(void)
{
static struct timeval tv;
static struct timezone tz;
gettimeofday(&tv, &tz);
return ((double)(tv.tv_sec + tv.tv_usec*1.0e-6));
}
void myCudaMalloc(void** val, size_t mem_size)
{
double t = get_time();
cudaError_t err = cudaMalloc(val, mem_size);
assert(cudaSuccess == err);
cout << "cudaMalloc: " << get_time() - t << endl;
}
void myCudaMemcpy(void* dst, const void* src, size_t size, enum cudaMemcpyKind kind)
{
double t = get_time();
cudaError_t err = cudaMemcpy(dst, src, size, kind);
assert(cudaSuccess == err);
cout << "cudaMemcpy: " << get_time() - t << endl;
}
int main( int argc, char** argv)
{
int nb = 512; // max 65535
int nthre = 128; // max 512
size_t nword = NUM;
size_t mem_size = sizeof(int) * nword;
printf("# nword: %zd \n", nword);
printf("# threads: %d \n", nb*nthre);
printf("mem_size: %zd Kbyte\n", mem_size >> 10);
double t=0.;
cudaError_t err;
int* hval = (int*) malloc(mem_size);
int* hval2 = (int*) malloc(mem_size);
int* dval = NULL;
cout << "mem_size: " <<mem_size << endl;
cout << "(size_t)mem_size:" <<(size_t)mem_size << endl;
cout << sizeof(size_t) << endl;
myCudaMalloc((void**)&dval, mem_size);
int z = 0;
for(size_t i=0; i<nword; i++){hval[i] = 1; z += hval[i];}
myCudaMemcpy(dval, hval, mem_size, cudaMemcpyHostToDevice);
t = get_time();
kernel<<< nb, nthre >>>(dval, nword);
err = cudaThreadSynchronize();
assert(cudaSuccess == err);
cout << "GPU calc: " << get_time() - t << endl;
myCudaMemcpy(hval, dval, mem_size, cudaMemcpyDeviceToHost);
printf("GPU: %d\n", hval[0]);
printf("HOS: %d\n", z);
free(hval);
cudaFree(dval);
return (0);
}
|
11,025 | /**
* @brief Usage of pointers on host and device.
*
* In this example we allocate memory on the device for two pointers
* to floats, d_A and d_B, that point to the first element of a
* fixed block of n floats. This is an important first step to
* understanding how images are handles.
*
* Then, we allocate memory on the device for a set of pointers to
* pointers to floats, d_X. We let those pointers to floats be equal
* to the first elements of the previously uploaded arrays. This
* allows us to access multiple arrays of floats from a single array
* of pointers. This example will be required later on when we
* handle vectors of images.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
/**
* @brief Kernel demonstrating usage of pointers.
*
* @param d_X Pointer to pointer to floats
* @param d_A Pointer to floats
* @param d_B Pointer to floats
* @param[in] n Number of elements in input arrays
*/
__global__ void p_kernel(float ** d_X, float * d_A, float * d_B, int n){
printf("Addresses:\n");
printf("dX = %p\n", d_X);
printf("dA = %p\n", d_A);
printf("dB = %p\n", d_B);
printf("dX[0] = %p\n", d_X[0]);
printf("dX[0] = %p\n", d_X[1]);
float * devA = d_X[0];
float * devB = d_X[1];
printf("\nValues:\n");
for (int i=0; i<n; i++)
printf("A[%d] = %f\n", i, devA[i]);
for (int i=0; i<n; i++)
printf("B[%d] = %f\n", i, devB[i]);
}
int main(void) {
// Declarations
const int n = 10;
const int nn = n * sizeof(float);
float * h_A;
float * h_B;
float * d_A;
float * d_B;
float ** hst_ptr;
// Allocate space for h_A and h_B
h_A = (float*)malloc(nn);
h_B = (float*)malloc(nn);
// Allocate space on the host for hst_ptr
// as a mapped variable (so that the device can
// access it directly)
(cudaHostAlloc((void**)&hst_ptr, 2*sizeof(float*), cudaHostAllocMapped));
for (int i=0; i<n; ++i) {
h_A[i] = i + 1.0f;
h_B[i] = 20.0f + i;
}
// Allocate space on the device for d_A and d_A
(cudaMalloc((void**)&d_A, nn));
(cudaMalloc((void**)&d_B, nn));
(cudaMemcpy(d_A, h_A, nn, cudaMemcpyHostToDevice));
(cudaMemcpy(d_B, h_B, nn, cudaMemcpyHostToDevice));
hst_ptr[0]=d_A;
hst_ptr[1]=d_B;
p_kernel<<<1,1>>>(hst_ptr, d_A, d_B, n);
// Free the resources.
if (hst_ptr) (cudaFreeHost(hst_ptr));
if (d_A) (cudaFree(d_A));
if (d_A) (cudaFree(d_B));
if (h_A) free(h_A);
if (h_B) free(h_B);
return EXIT_SUCCESS;
}
|
11,026 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\
* Copyright (c) 2019 <GTEP> - All Rights Reserved *
* This file is part of HERMES Project. *
* Unauthorized copying of this file, via any medium is strictly prohibited. *
* Proprietary and confidential. *
* *
* Developers: *
* - Bismarck G. Souza Jr <bismarck@puc-rio.br> *
* - Nelson Inoue <inoue@puc-rio.br> *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
//
// o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o
// o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o
//
// C C C C C C U U U U D D D D D D D D D A A A A A A
// C C C C C C C C U U U U D D D D D D D D D D A A A A A A A A
// C C C C U U U U D D D D A A A A
// C C C C U U U U D D D D A A A A
// C C U U U U D D D D A A A A
// C C U U U U D D D D A A A A
// C C U U U U D D D D A A A A A A A A A A A A
// C C U U U U D D D D A A A A A A A A A A A A
// C C U U U U D D D D A A A A
// C C C C U U U U D D D D A A A A
// C C C C C C C C U U U U U U U U D D D D D D D D D D A A A A
// C C C C C C U U U U U U D D D D D D D D D A A A A
//
// o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o
// o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o+o
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
//----------------------------------------------------
// External Functions for one GPU Implementation
//----------------------------------------------------
extern "C" void EvaluateMmatrix(int Id, int BlockSizeX, int _iNumMeshNodes, int _iNumDofNode, int _inumDiaPart, double *K, double *M);
extern "C" void AssemblyStiffnessMatrixColor(dim3 blocksPerGrid, dim3 threadsPerBlock, int Id, int _iNumMeshNodes, int _iNumMeshElem, int _iNumDofNode, int _iNumElasMat, int numelcolor, int numelcolorprv,
int *connect, double *coord, double *prop, double *K, int *offsets, int offsets_size);
extern "C" void EvaluateStrainStateColor(dim3 blocksPerGrid, dim3 threadsPerBlock, int Id, int _iNumMeshNodes, int _iNumMeshElem, int _iNumDofNode, int _iNumElasMat, int numelcolor, int numelcolorprv,
int *connect, double *coord, double *U, double *strain);
extern "C" void EvaluateStressState(int Id, int BlockSizeX, int _iNumMeshElem, int _iNumElasMat, int *connect, int *LinkMeshColor, double *prop, double *strain, double *stress);
extern "C" void EvaluateNodalForceColor(dim3 blocksPerGrid, dim3 threadsPerBlock, int Id, int _iNumMeshNodes, int _iNumMeshElem, int _iNumDofNode, int _iNumElasMat, int numelcolornodalforce, int numelcolornodalforceprv,
int *connect, double *coord, int *LinkMeshMeshColor, int *LinkMeshCellColor, double *dP, double *B);
//=============================================================================
__device__ void SolidMatPropMatrix(double E, double p, double C[6][6])
{
double sho;
// Solid material matrix
sho=(E*(1-p))/((1+p)*(1-2*p));
C[0][0] = sho;
C[0][1] = sho*(p/(1-p));
C[0][2] = sho*(p/(1-p));
C[0][3] = 0.;
C[0][4] = 0.;
C[0][5] = 0.;
C[1][0] = sho*(p/(1-p));
C[1][1] = sho;
C[1][2] = sho*(p/(1-p));
C[1][3] = 0.;
C[1][4] = 0.;
C[1][5] = 0.;
C[2][0] = sho*(p/(1-p));
C[2][1] = sho*(p/(1-p));
C[2][2] = sho;
C[2][3] = 0.;
C[2][4] = 0.;
C[2][5] = 0.;
C[3][0] = 0.;
C[3][1] = 0.;
C[3][2] = 0.;
C[3][3] = sho*((1-2*p)/(2*(1-p)));
C[3][4] = 0.;
C[3][5] = 0.;
C[4][0] = 0.;
C[4][1] = 0.;
C[4][2] = 0.;
C[4][3] = 0.;
C[4][4] = sho*((1-2*p)/(2*(1-p)));
C[4][5] = 0.;
C[5][0] = 0.;
C[5][1] = 0.;
C[5][2] = 0.;
C[5][3] = 0.;
C[5][4] = 0.;
C[5][5] = sho*((1-2*p)/(2*(1-p)));
}
//=============================================================================
__device__ void DerivPhiRST(double r, double s, double t, double phi_r[8], double phi_s[8], double phi_t[8])
{
// Shape function derivative:
phi_r[0] = -0.125*(1.0-s)*(1.0-t);
phi_r[1] = 0.125*(1.0-s)*(1.0-t);
phi_r[2] = 0.125*(1.0+s)*(1.0-t);
phi_r[3] = -0.125*(1.0+s)*(1.0-t);
phi_r[4] = -0.125*(1.0-s)*(1.0+t);
phi_r[5] = 0.125*(1.0-s)*(1.0+t);
phi_r[6] = 0.125*(1.0+s)*(1.0+t);
phi_r[7] = -0.125*(1.0+s)*(1.0+t);
phi_s[0] = -0.125*(1.0-r)*(1.0-t);
phi_s[1] = -0.125*(1.0+r)*(1.0-t);
phi_s[2] = 0.125*(1.0+r)*(1.0-t);
phi_s[3] = 0.125*(1.0-r)*(1.0-t);
phi_s[4] = -0.125*(1.0-r)*(1.0+t);
phi_s[5] = -0.125*(1.0+r)*(1.0+t);
phi_s[6] = 0.125*(1.0+r)*(1.0+t);
phi_s[7] = 0.125*(1.0-r)*(1.0+t);
phi_t[0] = -0.125*(1.0-r)*(1.0-s);
phi_t[1] = -0.125*(1.0+r)*(1.0-s);
phi_t[2] = -0.125*(1.0+r)*(1.0+s);
phi_t[3] = -0.125*(1.0-r)*(1.0+s);
phi_t[4] = 0.125*(1.0-r)*(1.0-s);
phi_t[5] = 0.125*(1.0+r)*(1.0-s);
phi_t[6] = 0.125*(1.0+r)*(1.0+s);
phi_t[7] = 0.125*(1.0-r)*(1.0+s);
}
//=============================================================================
__device__ void Jacobian(double phi_r[8], double phi_s[8], double phi_t[8], double jac[3][3],
double X[8], double Y[8], double Z[8], double &detjac, double invjac[3][3])
{
int i, j;
for(i=0; i<3; i++)
for(j=0; j<3; j++)
jac[i][j] = 0.;
// Calculate the jacobian matrix by appropriately multiplying local
// derivatives by nodal coords
for(i=0; i<8; i++)
{
jac[0][0] += phi_r[i] * X[i];
jac[0][1] += phi_r[i] * Y[i];
jac[0][2] += phi_r[i] * Z[i];
jac[1][0] += phi_s[i] * X[i];
jac[1][1] += phi_s[i] * Y[i];
jac[1][2] += phi_s[i] * Z[i];
jac[2][0] += phi_t[i] * X[i];
jac[2][1] += phi_t[i] * Y[i];
jac[2][2] += phi_t[i] * Z[i];
}
// Jacob determinant
detjac = jac[0][0]*(jac[1][1]*jac[2][2]-jac[1][2]*jac[2][1])-jac[0][1]*(jac[1][0]*jac[2][2]-jac[1][2]*jac[2][0])+jac[0][2]*(jac[1][0]*jac[2][1]-jac[1][1]*jac[2][0]);
// Inverse Jacob
invjac[0][0] = (jac[1][1]*jac[2][2] - jac[1][2]*jac[2][1]) /detjac;
invjac[0][1] =-(jac[0][1]*jac[2][2] - jac[0][2]*jac[2][1]) /detjac;
invjac[0][2] = (jac[0][1]*jac[1][2] - jac[0][2]*jac[1][1]) /detjac;
invjac[1][0] =-(jac[1][0]*jac[2][2] - jac[1][2]*jac[2][0]) /detjac;
invjac[1][1] = (jac[0][0]*jac[2][2] - jac[0][2]*jac[2][0]) /detjac;
invjac[1][2] =-(jac[0][0]*jac[1][2] - jac[0][2]*jac[1][0]) /detjac;
invjac[2][0] = (jac[1][0]*jac[2][1] - jac[1][1]*jac[2][0]) /detjac;
invjac[2][1] =-(jac[0][0]*jac[2][1] - jac[0][1]*jac[2][0]) /detjac;
invjac[2][2] = (jac[0][0]*jac[1][1] - jac[0][1]*jac[1][0]) /detjac;
}
//=============================================================================
__device__ void DerivXYZ(double invjac[3][3], double phi_r[8], double phi_s[8], double phi_t[8],
double deriv_x[8], double deriv_y[8], double deriv_z[8])
{
int i;
for(i=0; i<8; i++) {
deriv_x[i] = invjac[0][0]*phi_r[i]+invjac[0][1]*phi_s[i]+invjac[0][2]*phi_t[i];
deriv_y[i] = invjac[1][0]*phi_r[i]+invjac[1][1]*phi_s[i]+invjac[1][2]*phi_t[i];
deriv_z[i] = invjac[2][0]*phi_r[i]+invjac[2][1]*phi_s[i]+invjac[2][2]*phi_t[i];
}
}
//=============================================================================
__device__ void Bmatrix(double deriv_x[8], double deriv_y[8], double deriv_z[8], double B[6][24])
{
int i;
for(i=0; i<8; i++) {
B[0][3*i ] = deriv_x[i];
B[3][3*i ] = deriv_y[i];
B[5][3*i ] = deriv_z[i];
B[1][3*i+1] = deriv_y[i];
B[3][3*i+1] = deriv_x[i];
B[4][3*i+1] = deriv_z[i];
B[2][3*i+2] = deriv_z[i];
B[4][3*i+2] = deriv_y[i];
B[5][3*i+2] = deriv_x[i];
}
}
//=============================================================================
//=============================================================================
__device__ void AssemblyK(double coeff, double C[6][6], double B[6][24], double _k[24][24])
{
int i, j, k;
double soma, aux[6][24];
for(i=0; i<6; i++) {
for(j=0; j<24; j++) {
aux[i][j] = 0.;
for(k=0; k<6; k++) {
aux[i][j] += C[i][k]*B[k][j];
}
}
}
// ------------------------------------------------------------------------
for(i=0; i<24; i++) {
for(j=0; j<24; j++) {
soma=0.;
for(k=0; k<6; k++)
soma += B[k][i]*aux[k][j];
_k[i][j] += coeff*soma;
}
}
}
//==============================================================================
__global__ void EvaluateMmatrixKernel(int _iNumMeshNodes, int _iNumDofNode, int _inumDiaPart, double *K, double *M)
{
int off;
const int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex;
off = (_inumDiaPart-1)/2;
if(thread_id < _iNumDofNode*_iNumMeshNodes) { // -----------------------
if(K[thread_id + off*_iNumDofNode*_iNumMeshNodes] != 0.)
M[thread_id] = 1/K[thread_id + off*_iNumDofNode*_iNumMeshNodes];
else
M[thread_id] = 0.;
}
}
//=====================================================================================================================
void EvaluateMmatrix(int Id, int BlockSizeX, int _iNumMeshNodes, int _iNumDofNode, int _inumDiaPart, double *K, double *M)
{
double time;
cudaSetDevice(Id);
dim3 threadsPerBlock(BlockSizeX, BlockSizeX);
dim3 blocksPerGrid(int(sqrt(double(_iNumDofNode*_iNumMeshNodes))/BlockSizeX)+1, int(sqrt(double(_iNumDofNode*_iNumMeshNodes))/BlockSizeX)+1);
time = clock();
EvaluateMmatrixKernel<<<blocksPerGrid, threadsPerBlock>>>(_iNumMeshNodes, _iNumDofNode, _inumDiaPart, K, M);
cudaDeviceSynchronize();
//printf(" Time Execution : %0.3f s \n", (clock()-time)/CLOCKS_PER_SEC);
}
//==============================================================================
__global__ void AssemblyStiffnessMatrixKernel(int numno, int numel, int numelcolor, int numelcolorprv, int numdof, int nummat, int *connect, double *coord, double *prop, double *K, int *offsets, int offsets_size)
{
double r, s, t;
double xgaus[2], wgaus[2], E, p;
int no, row, col, off_full, off_part;
int ig, jg, kg;
int i, j, LM[24];
double X[8], Y[8], Z[8], C[6][6], phi_r[8], phi_s[8], phi_t[8], jac[3][3], invjac[3][3];
double detjac, deriv_x[8], deriv_y[8], deriv_z[8], B[6][24], k[24][24];
offsets_size /= 2;
const int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex;
if(thread_id < numelcolor) { // ------------------------------------------------------------------------------------
for(i=0; i<8; i++) {
no = connect[thread_id + numelcolorprv + (i+3)*numel]-1;
//if(thread_id==0) printf("%d\n", no);
X[i] = coord[no];
Y[i] = coord[no+numno];
Z[i] = coord[no+2*numno];
//printf("%f %f %f\n", X[i], Y[i], Z[i]);
LM[3*i] = 3*no;
LM[3*i+1] = 3*no+1;
LM[3*i+2] = 3*no+2;
}
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for(i=0; i<24; i++)
for(j=0; j<24; j++)
k[i][j] = 0.;
for(i=0; i<6; i++)
for(j=0; j<24; j++)
B[i][j] = 0.;
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
E = prop[(connect[thread_id + numelcolorprv + numel])-1];
p = prop[(connect[thread_id + numelcolorprv + numel])-1+nummat];
SolidMatPropMatrix(E, p, C);
//printf("%f %f %f %f %f %f %f %f %f %f %f %f\n", C[0][0], C[0][1], C[0][2], C[1][0], C[1][1], C[1][2], C[2][0], C[2][1], C[2][2], C[3][3], C[4][4], C[5][5]);
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Points of integration:
xgaus[0]=-0.577350269189626;
xgaus[1]= 0.577350269189626;
wgaus[0]= 1.;
wgaus[1]= 1.;
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for(ig=0; ig<2; ig++) { // =================== Loop ig ===================
s=xgaus[ig];
for(jg=0; jg<2; jg++) { // =================== Loop jg ===================
r=xgaus[jg];
for(kg=0; kg<2; kg++) { // =================== Loop kg ===================
t=xgaus[kg];
// Shape function derivative:
DerivPhiRST(r, s, t, phi_r, phi_s, phi_t);
// Evaluate the Jacobian determinant and inverse Jacobian matrix:
Jacobian(phi_r, phi_s, phi_t, jac, X, Y, Z, detjac, invjac);
//if(Id == 0) printf("%f %f %f %f %f %f %f %f %f\n", jac[0][0], jac[0][1], jac[0][2], jac[1][0], jac[1][1], jac[1][2], jac[2][0], jac[2][1], jac[2][2]);
//if(Id == 0) printf("%f\n", detjac);
// Evaluate the global derivatives of the shape functions:
DerivXYZ(invjac, phi_r, phi_s, phi_t, deriv_x, deriv_y, deriv_z);
//printf("%f %f %f %f %f %f %f %f\n", deriv_z[0], deriv_z[1], deriv_z[2], deriv_z[3], deriv_z[4], deriv_z[5], deriv_z[6], deriv_z[7]);
// Evaluate the B matrix:
Bmatrix(deriv_x, deriv_y, deriv_z, B);
// Assembly the ement K matrix:
AssemblyK(detjac, C, B, k);
} // =================== Loop kg ===================
} // =================== Loop jg ===================
} // =================== Loop ig ===================
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/*for(i=0; i<24; i++)
for(j=0; j<24; j++)
printf("%f\n", k[i][j]);*/
// ------------------------------------------------------------------------------------------------------------
for(i=0; i<24; i++) {
row = LM[i];
off_part = offsets_size;
for(j=0; j<24; j++) {
col = LM[j];
off_full = col - row;
while (true) {
if (offsets[off_part] == off_full)
break;
if (offsets[off_part] < off_full)
off_part++;
else
off_part--;
}
K[row + off_part*3*numno] += k[i][j];
}
}
} // -------------------------------------------------------------------------------------------------------------
}
//=====================================================================================================================
void AssemblyStiffnessMatrixColor(dim3 blocksPerGrid, dim3 threadsPerBlock, int Id, int _iNumMeshNodes, int _iNumMeshElem, int _iNumDofNode, int _iNumElasMat, int numelcolor, int numelcolorprv,
int *connect, double *coord, double *prop, double *K, int *offsets, int offsets_size)
{
cudaSetDevice(Id);
AssemblyStiffnessMatrixKernel<<< blocksPerGrid, threadsPerBlock >>> (_iNumMeshNodes, _iNumMeshElem, numelcolor, numelcolorprv, _iNumDofNode, _iNumElasMat, connect, coord, prop, K, offsets, offsets_size);
cudaDeviceSynchronize();
}
//==============================================================================
__global__ void EvaluateStrainStateKernel(int numno, int numel, int numelcolor, int numelcolorprv, int numdof, int nummat, int *connect, double *coord, double *D, double *strain)
{
double r, s, t;
double xgaus[2], wgaus[2];
int ig, jg, kg;
int i, no, cont;
double X[8], Y[8], Z[8], phi_r[8], phi_s[8], phi_t[8], jac[3][3], invjac[3][3];
double detjac, deriv_x[8], deriv_y[8], deriv_z[8];
const int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex;
if(thread_id < numelcolor) { // ------------------------------------------------------------------------------------
for(i=0; i<8; i++) {
no = connect[thread_id + numelcolorprv + (i+3)*numel]-1;
X[i] = coord[no];
Y[i] = coord[no+numno];
Z[i] = coord[no+2*numno];
}
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Points of integration:
xgaus[0]=-0.577350269189626;
xgaus[1]= 0.577350269189626;
wgaus[0]= 1.;
wgaus[1]= 1.;
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
cont = 0;
for(ig=0; ig<2; ig++) { // =================== Loop ig ===================
s=xgaus[ig];
for(jg=0; jg<2; jg++) { // =================== Loop jg ===================
r=xgaus[jg];
for(kg=0; kg<2; kg++) { // =================== Loop kg ===================
t=xgaus[kg];
// Shape function derivative:
DerivPhiRST(r, s, t, phi_r, phi_s, phi_t);
// Evaluate the Jacobian determinant and inverse Jacobian matrix:
Jacobian(phi_r, phi_s, phi_t, jac, X, Y, Z, detjac, invjac);
// Evaluate the global derivatives of the shape functions:
DerivXYZ(invjac, phi_r, phi_s, phi_t, deriv_x, deriv_y, deriv_z);
// *******************************************************************************************************
for(i=0; i<8; i++) {
// Exx
strain[connect[thread_id + numelcolorprv]-1+0*numel+cont*numel*6] += deriv_x[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1) ];
// Eyy
strain[connect[thread_id + numelcolorprv]-1+1*numel+cont*numel*6] += deriv_y[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+1];
// Ezz
strain[connect[thread_id + numelcolorprv]-1+2*numel+cont*numel*6] += deriv_z[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+2];
// Exy
strain[connect[thread_id + numelcolorprv]-1+3*numel+cont*numel*6] += deriv_y[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1) ] +
deriv_x[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+1];
// Eyz
strain[connect[thread_id + numelcolorprv]-1+4*numel+cont*numel*6] += deriv_z[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+1] +
deriv_y[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+2];
// Ezx
strain[connect[thread_id + numelcolorprv]-1+5*numel+cont*numel*6] += deriv_z[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1) ] +
deriv_x[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+2];
}
cont++;
} // =================== Loop kg ===================
} // =================== Loop jg ===================
} // =================== Loop ig ===================
} // -------------------------------------------------------------------------------------------------------------
}
//==============================================================================
__global__ void EvaluateAverageStrainStateKernel(int numno, int numel, int numelcolor, int numelcolorprv, int numdof, int nummat, int *connect, double *coord, double *D, double *strain)
{
double r, s, t;
double xgaus[2], wgaus[2];
int ig, jg, kg;
int i, no;
double X[8], Y[8], Z[8], phi_r[8], phi_s[8], phi_t[8], jac[3][3], invjac[3][3];
double detjac, deriv_x[8], deriv_y[8], deriv_z[8];
const int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex;
if(thread_id < numelcolor) { // ------------------------------------------------------------------------------------
for(i=0; i<8; i++) {
no = connect[thread_id + numelcolorprv + (i+3)*numel]-1;
X[i] = coord[no];
Y[i] = coord[no+numno];
Z[i] = coord[no+2*numno];
}
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Points of integration:
xgaus[0]=-0.577350269189626;
xgaus[1]= 0.577350269189626;
wgaus[0]= 1.;
wgaus[1]= 1.;
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for(ig=0; ig<2; ig++) { // =================== Loop ig ===================
s=xgaus[ig];
for(jg=0; jg<2; jg++) { // =================== Loop jg ===================
r=xgaus[jg];
for(kg=0; kg<2; kg++) { // =================== Loop kg ===================
t=xgaus[kg];
// Shape function derivative:
DerivPhiRST(r, s, t, phi_r, phi_s, phi_t);
// Evaluate the Jacobian determinant and inverse Jacobian matrix:
Jacobian(phi_r, phi_s, phi_t, jac, X, Y, Z, detjac, invjac);
// Evaluate the global derivatives of the shape functions:
DerivXYZ(invjac, phi_r, phi_s, phi_t, deriv_x, deriv_y, deriv_z);
// *******************************************************************************************************
for(i=0; i<8; i++) {
// Exx
strain[connect[thread_id + numelcolorprv]-1+0*numel] += (deriv_x[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1) ])/8;
// Eyy
strain[connect[thread_id + numelcolorprv]-1+1*numel] += (deriv_y[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+1])/8;
// Ezz
strain[connect[thread_id + numelcolorprv]-1+2*numel] += (deriv_z[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+2])/8;
// Exy
strain[connect[thread_id + numelcolorprv]-1+3*numel] += (deriv_y[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1) ] +
deriv_x[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+1])/8;
// Eyz
strain[connect[thread_id + numelcolorprv]-1+4*numel] += (deriv_z[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+1] +
deriv_y[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+2])/8;
// Ezx
strain[connect[thread_id + numelcolorprv]-1+5*numel] += (deriv_z[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1) ] +
deriv_x[i]*D[3*(connect[thread_id + numelcolorprv + (i+3)*numel]-1)+2])/8;
}
} // =================== Loop kg ===================
} // =================== Loop jg ===================
} // =================== Loop ig ===================
} // -------------------------------------------------------------------------------------------------------------
}
//=====================================================================================================================
void EvaluateStrainStateColor(dim3 blocksPerGrid, dim3 threadsPerBlock, int Id, int _iNumMeshNodes, int _iNumMeshElem, int _iNumDofNode, int _iNumElasMat, int numelcolor, int numelcolorprv,
int *connect, double *coord, double *U, double *strain)
{
cudaSetDevice(Id);
//EvaluateStrainStateKernel <<< blocksPerGrid, threadsPerBlock >>> (_iNumMeshNodes, _iNumMeshElem, numelcolor, numelcolorprv, _iNumDofNode, _iNumElasMat, connect, coord, U, strain);
EvaluateAverageStrainStateKernel <<< blocksPerGrid, threadsPerBlock >>> (_iNumMeshNodes, _iNumMeshElem, numelcolor, numelcolorprv, _iNumDofNode, _iNumElasMat, connect, coord, U, strain);
cudaDeviceSynchronize();
}
//==============================================================================
__global__ void EvaluateStressStateKernel(int numel, int nummat, int *connect, double *prop, double *strain, double *stress)
{
int i, cont, ig, jg, kg;
double E, p, C[6][6];
const int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex;
if(thread_id < numel) { // ------------------------------------------------------------------------------------
E = prop[(connect[thread_id + numel])-1];
p = prop[(connect[thread_id + numel])-1+nummat];
SolidMatPropMatrix(E, p, C);
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
cont = 0;
for(ig=0; ig<2; ig++) { // =================== Loop ig ===================
for(jg=0; jg<2; jg++) { // =================== Loop jg ===================
for(kg=0; kg<2; kg++) { // =================== Loop kg ===================
for(i=0; i<6; i++) {
// Sxx
stress[thread_id+0*numel+cont*numel*6] += C[0][i]*strain[thread_id+i*numel+cont*numel*6];
// Syy
stress[thread_id+1*numel+cont*numel*6] += C[1][i]*strain[thread_id+i*numel+cont*numel*6];
// Szz
stress[thread_id+2*numel+cont*numel*6] += C[2][i]*strain[thread_id+i*numel+cont*numel*6];
// Sxy
stress[thread_id+3*numel+cont*numel*6] += C[3][i]*strain[thread_id+i*numel+cont*numel*6];
// Syz
stress[thread_id+4*numel+cont*numel*6] += C[4][i]*strain[thread_id+i*numel+cont*numel*6];
// Szx
stress[thread_id+5*numel+cont*numel*6] += C[5][i]*strain[thread_id+i*numel+cont*numel*6];
}
cont++;
} // =================== Loop kg ===================
} // =================== Loop jg ===================
} // =================== Loop ig ===================
} // -------------------------------------------------------------------------------------------------------------
}
//==============================================================================
__global__ void EvaluateAverageStressStateKernel(int numel, int nummat, int *connect, int *LinkMeshColor, double *prop, double *strain, double *stress)
{
int i;
double E, p, C[6][6];
const int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex;
if(thread_id < numel) { // ------------------------------------------------------------------------------------
E = prop[(connect[LinkMeshColor[thread_id] + numel])-1];
p = prop[(connect[LinkMeshColor[thread_id] + numel])-1+nummat];
SolidMatPropMatrix(E, p, C);
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for(i=0; i<6; i++) {
// Sxx
stress[thread_id+0*numel] += C[0][i]*strain[thread_id+i*numel];
// Syy
stress[thread_id+1*numel] += C[1][i]*strain[thread_id+i*numel];
// Szz
stress[thread_id+2*numel] += C[2][i]*strain[thread_id+i*numel];
// Sxy
stress[thread_id+3*numel] += C[3][i]*strain[thread_id+i*numel];
// Syz
stress[thread_id+4*numel] += C[4][i]*strain[thread_id+i*numel];
// Szx
stress[thread_id+5*numel] += C[5][i]*strain[thread_id+i*numel];
}
} // -------------------------------------------------------------------------------------------------------------
}
//=====================================================================================================================
void EvaluateStressState(int Id, int BlockSizeX, int numel, int nummat, int *connect, int *LinkMeshColor, double *prop, double *strain, double *stress)
{
double time;
cudaSetDevice(Id);
cudaMemset(stress, 0, sizeof(double)*numel*6);
dim3 threadsPerBlock(BlockSizeX, BlockSizeX);
dim3 blocksPerGrid(int(sqrt(double(numel))/BlockSizeX)+1, int(sqrt(double(numel))/BlockSizeX)+1);
time = clock();
EvaluateAverageStressStateKernel <<<blocksPerGrid, threadsPerBlock>>>(numel, nummat, connect, LinkMeshColor, prop, strain, stress);
//EvaluateStressStateKernel <<<blocksPerGrid, threadsPerBlock>>>(numel, nummat, connect, prop, strain, stress);
cudaDeviceSynchronize();
//printf(" Time Execution : %0.3f s \n", (clock()-time)/CLOCKS_PER_SEC);
}
//==============================================================================
__global__ void EvaluateNodalForceKernel(int numno, int numel, int numelcolor, int numelcolorprv, int numdof, int nummat, int *connect, double *coord, int *LinkMeshMeshColor, int *LinkMeshCellColor, double *dP, double *B)
{
double r, s, t;
double xgaus[2], wgaus[2];
int ig, jg, kg;
int i, no;
double X[8], Y[8], Z[8], phi_r[8], phi_s[8], phi_t[8], jac[3][3], invjac[3][3];
double detjac, deriv_x[8], deriv_y[8], deriv_z[8];
const int xIndex = blockIdx.x*blockDim.x + threadIdx.x;
const int yIndex = blockIdx.y*blockDim.y + threadIdx.y;
const int thread_id = (gridDim.x*blockDim.x)*yIndex + xIndex;
if(thread_id < numelcolor) { // ------------------------------------------------------------------------------------
for(i=0; i<8; i++) {
no = connect[LinkMeshMeshColor[thread_id + numelcolorprv] + (i+3)*numel]-1;
X[i] = coord[no];
Y[i] = coord[no+numno];
Z[i] = coord[no+2*numno];
}
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// Points of integration:
xgaus[0]=-0.577350269189626;
xgaus[1]= 0.577350269189626;
wgaus[0]= 1.;
wgaus[1]= 1.;
// ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
for(ig=0; ig<2; ig++) { // =================== Loop ig ===================
s=xgaus[ig];
for(jg=0; jg<2; jg++) { // =================== Loop jg ===================
r=xgaus[jg];
for(kg=0; kg<2; kg++) { // =================== Loop kg ===================
t=xgaus[kg];
// Shape function derivative:
DerivPhiRST(r, s, t, phi_r, phi_s, phi_t);
// Evaluate the Jacobian determinant and inverse Jacobian matrix:
Jacobian(phi_r, phi_s, phi_t, jac, X, Y, Z, detjac, invjac);
// Evaluate the global derivatives of the shape functions:
DerivXYZ(invjac, phi_r, phi_s, phi_t, deriv_x, deriv_y, deriv_z);
// *******************************************************************************************************
for(i=0; i<8; i++) {
no = connect[LinkMeshMeshColor[thread_id + numelcolorprv] + (i+3)*numel]-1;
// Fx
B[3*no ] += deriv_x[i]*detjac*dP[LinkMeshCellColor[thread_id + numelcolorprv]];
// Fy
B[3*no+1] += deriv_y[i]*detjac*dP[LinkMeshCellColor[thread_id + numelcolorprv]];
// Fz
B[3*no+2] += deriv_z[i]*detjac*dP[LinkMeshCellColor[thread_id + numelcolorprv]];
}
} // =================== Loop kg ===================
} // =================== Loop jg ===================
} // =================== Loop ig ===================
} // -------------------------------------------------------------------------------------------------------------
}
//=====================================================================================================================
void EvaluateNodalForceColor(dim3 blocksPerGrid, dim3 threadsPerBlock, int Id, int _iNumMeshNodes, int _iNumMeshElem, int _iNumDofNode, int _iNumElasMat, int numelcolor, int numelcolorprv,
int *connect, double *coord, int *LinkMeshMeshColor, int *LinkMeshCellColor, double *dP, double *B)
{
cudaSetDevice(Id);
EvaluateNodalForceKernel <<< blocksPerGrid, threadsPerBlock >>> (_iNumMeshNodes, _iNumMeshElem, numelcolor, numelcolorprv, _iNumDofNode, _iNumElasMat, connect, coord, LinkMeshMeshColor, LinkMeshCellColor, dP, B);
cudaDeviceSynchronize();
} |
11,027 | #include <stdio.h>
#include <cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void scan(int *d)
{
int myIdx = threadIdx.x;
int diff=1;
while(myIdx + diff < blockDim.x + 1)
{
d[myIdx + diff] += d[myIdx];
diff <<= 1;
__syncthreads();
}
}
int main()
{
const int count = 16;
const int size = count * sizeof(int);
int h[16];
for(int i = 0; i < count ; i++) {
h[i] = i + 1;
}
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
scan<<<1, count-1>>>(d);
cudaMemcpy(h, d, size, cudaMemcpyDeviceToHost);
for(int i = 0 ; i < count ; i++) {
printf("%d: %d\n", i, h[i]);
}
}
|
11,028 | __device__
float bspline03(float x) {
float Argument = fabsf(x);
if (Argument < 1.f)
return Argument * Argument * (Argument - 2.f) * 0.5f + 2.f / 3.f;
else if (Argument < 2.f) {
Argument -= 2.f;
return Argument * Argument * Argument * (-1.f / 6.f);
} else
return 0.f;
}
__device__
double bspline03(double x) {
double Argument = fabs(x);
if (Argument < 1.0)
return Argument * Argument * (Argument - 2.0) * 0.5 + 2.0 / 3.0;
else if (Argument < 2.0) {
Argument -= 2.0;
return Argument * Argument * Argument * (-1.0 / 6.0);
} else
return 0.0;
}
|
11,029 | #include "includes.h"
__global__ void dynamicReverse(int *d, int n)
{
extern __shared__ int s[];
int t = threadIdx.x;
int tr = n - t - 1;
s[t] = d[t];
__syncthreads();
d[t] = s[tr];
} |
11,030 | #include <stdio.h>
#include <iostream>
#include <ctime>
#include <cmath>
#include <fstream>
#include <unistd.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
/**
* CUDA Kernel Device code
*
* Computes the vector addition of A and B into C. The 3 vectors have the same
* number of elements numElements. Each thread adds k elements of vectors.
*/
__global__ void vectorAdd(const float *A, const float *B, float *C,
const int numElements, const int k) {
int threadIndex = blockDim.x * blockIdx.x + threadIdx.x;
for (int j = 0; j < k; j++) {
int i = threadIndex * k + j;
if (i < numElements) C[i] = A[i] + B[i];
}
}
int main(int argc, char** argv) {
int numElements = 0;
int k = 1;
int opt = 0;
while ((opt = getopt(argc, argv, "n:k:")) != -1) {
switch (opt) {
case 'n': numElements = atoi(optarg);
break;
case 'k': k = atoi(optarg);
break;
case '?':
{
fprintf(stderr, "Usage: ./addVector -n numElementsToAdd",
" -k numElementsToAddInOneThread\n");
exit(-1);
}
}
}
if ((numElements < 1) || (k < 1)) {
fprintf(stderr, "Bad parameters!\n");
exit(-1);
}
printf("Adding vectors of size %d ...\n", numElements);
// Allocate the host input vectors
size_t size = numElements * sizeof (float);
float *h_A = (float *) malloc(size);
float *h_B = (float *) malloc(size);
float *h_C = (float *) malloc(size);
if (h_A == NULL || h_B == NULL || h_C == NULL) {
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
// Initialize the host input vectors
srand(time(0));
for (int i = 0; i < numElements; ++i) {
h_A[i] = rand() / (float) RAND_MAX;
h_B[i] = rand() / (float) RAND_MAX;
}
// Error code to check return values for CUDA calls
cudaError_t err = cudaSuccess;
// Allocate the device input vectors
float *d_A = NULL;
err = cudaMalloc((void **) &d_A, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_B = NULL;
err = cudaMalloc((void **) &d_B, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device vector B (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_C = NULL;
err = cudaMalloc((void **) &d_C, size);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to allocate device vector C (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Copy the host input vectors A and B in host memory
// to the device input vectors in device memory
printf("Copy input data from the host memory to the CUDA device\n");
err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector B from host to device (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Launch the Vector Add CUDA Kernel
int threadsPerBlock = 512;
int blocksPerGrid = ((numElements + k - 1) / k + threadsPerBlock - 1) / threadsPerBlock;
// Cuda Events for measuring of execution time
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("CUDA kernel launch with %d blocks of %d threads\n",
blocksPerGrid, threadsPerBlock);
cudaEventRecord(start);
vectorAdd <<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements, k);
cudaEventRecord(stop);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Evaluating execution time
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
// Evaluating computational bandwidth in GFLOPs
float bandwidth = numElements / milliseconds / 1e+6;
// Copy the device result vector in device memory to the host result vector
// in host memory.
printf("Copy output data from the CUDA device to the host memory\n");
err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to copy vector C from device to host",
"(error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = numElements - 5; i < numElements; ++i) {
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) {
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
srand(time(0));
for (int counter = 0; counter < 10; counter++) {
int i = (int) ( (rand() / (float) RAND_MAX) * numElements );
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) {
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// Free device global memory
err = cudaFree(d_A);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to free device vector A (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_B);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to free device vector B (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_C);
if (err != cudaSuccess) {
fprintf(stderr, "Failed to free device vector C (error code %s)!\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Free host memory
free(h_A);
free(h_B);
free(h_C);
// Reset the device and exit
err = cudaDeviceReset();
if (err != cudaSuccess) {
fprintf(stderr, "Failed to deinitialize the device! error=%s\n",
cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// Write performance data to file for performance plot
std::ofstream performanceData;
performanceData.open("performanceData.txt", std::ios::app);
performanceData << log10(numElements) << "\t" << bandwidth << "\n";
performanceData.close();
printf("Done in %f milliseconds with computational performance %f GFLOPs\n",
milliseconds, bandwidth);
return 0;
}
|
11,031 | #include <stdio.h>
#define N 100
__global__ void add_block(int *res, int *a, int *b) {
// blockIdx.x tells us which item this kernel is
// blockIdx is a built in variable that the CUDA runtime defines.
int bid = blockIdx.x;
// We can even print it in here! No guarantee of order though
printf("%d\n", bid);
res[bid] = a[bid] + b[bid];
}
__global__ void add_thread(int *res, int *a, int *b) {
int tid = threadIdx.x;
res[tid] = a[tid] + b[tid];
}
int main(void) {
int res[N], a[N], b[N];
// Populate the arrays with something
for (int i=0; i<N; i++) {
a[i] = i;
b[i] = 2*i;
}
// Allocate space on the GPU. Ignoring errors...
int *dev_res, *dev_a, *dev_b;
cudaMalloc(&dev_res, N * sizeof(int));
cudaMalloc(&dev_a, N * sizeof(int));
cudaMalloc(&dev_b, N * sizeof(int));
// Move input arrays onto GPU
cudaMemcpy(dev_a, a, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(int), cudaMemcpyHostToDevice);
// Do computation.
// <<<N,1>>> runs N copies of the kernel in parallel
// But how does each copy of the kernel know which one it is?
// Go look at the function!
// <<<1,N>>> is, for this use case totally equivalent.
// It just splits each exec into N threads in 1 block rather than
// 1 thread in N blocks
// add_block<<<N,1>>>(dev_res, dev_a, dev_b);
add_thread<<<1,N>>>(dev_res, dev_a, dev_b);
// Move output array back to CPU
cudaMemcpy(res, dev_res, N * sizeof(int), cudaMemcpyDeviceToHost);
// Free everything on GPU
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_res);
// Print results
for (int i=0; i<N; i++) {
printf("%d, ", res[i]);
}
printf("\n");
return 0;
}
|
11,032 | #include <iostream>
#include <iomanip>
#include <ctime>
#include <cfloat>
#include <pthread.h>
#include <cuda.h>
#include <stdio.h>
using namespace std;
typedef double (*integrable)(double);
__host__ __device__ double parabola(double x)
{ return x * x; }
typedef struct integration_args_tag
{
unsigned long long bstep;
unsigned long long estep;
double h;
double a;
double b;
integrable f;
double result;
} integration_args;
__global__ void integrate(double a, double b, integrable f, unsigned long long steps, double *result)
{
extern __shared__ double pre_sums[];
const unsigned long long thread_count = blockDim.x;
unsigned long long thread_id = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long steps_per_thread = steps / thread_count;
unsigned long long bstep = thread_id * steps_per_thread;
unsigned long long estep = bstep + steps_per_thread;
pre_sums[thread_id] = 0;
double h = (b - a) / (steps * 1.0);
for (unsigned long long i = bstep; i < estep; i++)
{
double x = a + i * h;
pre_sums[thread_id] += h * f(x);
}
__syncthreads();
if(thread_id == 0)
{
*result = 0;
for (unsigned long long i = 0; i < thread_count; i++) *result += pre_sums[i];
}
}
__device__ integrable d_f = parabola;
int main(int argc, char *argv[])
{
const unsigned int thread_count = 512;
const unsigned long long steps = 1000000000;
double a = 0;
double b = 1;
unsigned long long steps_per_thread = steps / thread_count;
double *d_result;
cudaMalloc(&d_result, sizeof(double));
integrable h_fun;
cudaMemcpyFromSymbol(&h_fun, d_f, sizeof(integrable));
cudaEvent_t begin, end;
cudaEventCreate(&begin);
cudaEventCreate(&end);
cudaEventRecord(begin);
int total_blocks = 4;
dim3 grid;
grid.x = total_blocks;
dim3 block_geom;
block_geom.x = thread_count / total_blocks;
integrate<<<grid, block_geom, sizeof(double) * thread_count>>>(a, b, h_fun, steps, d_result);
cudaEventRecord(end);
cudaEventSynchronize(end);
double sum;
cudaMemcpy(&sum, d_result, sizeof(double), cudaMemcpyDeviceToHost);
cout << fixed << setprecision(DBL_DIG) << sum << endl;
float time;
cudaEventElapsedTime(&time, begin, end);
cout << fixed << setprecision(DBL_DIG) << time / 1000 << " seconds." << endl;
cudaFree(d_result);
cudaEventDestroy(begin);
cudaEventDestroy(end);
return 0;
}
|
11,033 | #include "includes.h"
__global__ void shiftWalkers ( const int dim, const int nwl, const float *xx, const float *x, float *yy ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int t = i + j * dim;
if ( i < dim && j < nwl ) {
yy[t] = xx[t] - x[i];
}
} |
11,034 | #include<iostream>
#include<cstdio>
using namespace std;
__global__ void mini1(int *a,int *b,int n)
{
int block=256*blockIdx.x;
int mini=7888888;
for(int i=block;i<min(256+block,n);i++)
{
if(mini>a[i])
{
mini=a[i];
}
}
b[blockIdx.x]=mini;
}
int main()
{
cout<<"Enter the size of array"<<endl;
int n;
cin>>n;
int a[n];
for(int i=0;i<n;i++)
{
a[i]=i+1;
}
int *ad,*bd;
int size=n*sizeof(int);
cudaMalloc(&ad,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
int grids=ceil(n*1.0f/256.0f);
cudaMalloc(&bd,grids*sizeof(int));
dim3 grid(grids,1);
dim3 block(1,1);
while(n>1)
{
mini1<<<grids,block>>>(ad,bd,n);
n=ceil(n*1.0f/256.0f);
cudaMemcpy(ad,bd,n*sizeof(int),cudaMemcpyDeviceToDevice);
}
int ans[2];
cudaMemcpy(ans,ad,4,cudaMemcpyDeviceToHost);
cout<<"The minimum element is"<<ans[0]<<endl;
}
|
11,035 | #include <stdio.h>
#define NB 1
#define TPB 256
__global__ void hello()
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
// printf("blockIdx is %2d, blockDim is %2d, threadIdx is %2d \n", blockIdx.x, blockDim.x, threadIdx.x);
printf("Hello World! My thread ID is %2d \n", i);
}
int main()
{
// Launch kernel to print "Hello World"
hello<<<NB, TPB>>>();
cudaDeviceSynchronize();
return 0;
}
|
11,036 | /*
Test Programm nach aber nur auf der CPU:
https://www.thomas-krenn.com/de/wiki/CUDA_Programmierung
*/
#include<stdio.h>
#include<cuda.h>
#include<stdlib.h>
// Vars
// Host-Vars
int* h_A;
int* h_B;
int* h_C;
// Prototypes
void RandomInit(int* data, int n);
int CheckResults(int* A, int* B, int* C, int n);
// Kernel
void VecAdd(const int* A, const int* B, int* C, int N) {
for (int i = 0; i < N; i++)
C[i] = A[i] + B[i];
}
int main(void) {
printf("Vector addtion\n");
//int i;
int N = 100000 * 10000;
size_t size = N * sizeof(int);
// Speicher auf Host allozieren
h_A = (int*)malloc(size);
h_B = (int*)malloc(size);
h_C = (int*)malloc(size);
// Random Init
RandomInit(h_A, N);
RandomInit(h_B, N);
// Kernelaufruf
VecAdd(h_A, h_B, h_C, N);
// Ergebnisse prüfen
if (CheckResults(h_A, h_B, h_C, N) == 0)
printf("Alles ok!\n");
else
printf("Fehler\n");
free(h_A);
free(h_B);
free(h_C);
return 0;
}
// Vector mit Zufallszahlen füllen
void RandomInit(int* data, int n) {
for (int i = 0; i < n; i++)
data[i] = rand() % (int) 100;
}
// Ergebnis Prüfen
int CheckResults(int* A, int* B, int* C, int n) {
int i;
for (i = 0; i < n; i++) {
if ((A[i]+B[i]) != C[i])
return -1;
}
return 0;
}
|
11,037 | /*
============================================================================
Name : ste.cu
Author : Tom Mertens
Version :
Copyright : HZB
Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU
============================================================================
*/
#include <algorithm>
#include <iostream>
#include <numeric>
#include <vector>
#include <thrust/reduce.h>
#include <thrust/device_vector.h>
template <typename T> __host__ __device__ T reciprocal(const T &x)
{
return ((T)1)/x;
}
template <typename T> class ReciprocalFunctor {
public:
__host__ __device__ T operator()(const T &x) {
return reciprocal(x);
}
};
template <typename T, class OpClass> T transformAndSumCPU(std::vector<T> data, OpClass op)
{
std::vector<T> temp(data.size());
std::transform(data.begin(), data.end(), temp.begin(), op);
return std::accumulate(temp.begin(), temp.end(), (T)0);
}
template <typename T, class OpClass> T transformAndSumGPU(std::vector<T> data, OpClass op)
{
thrust::device_vector<T> temp(data.begin(), data.end());
thrust::transform(temp.begin(), temp.end(), temp.begin(), op);
return thrust::reduce(temp.begin(), temp.end());
}
template<typename T> void initialize(std::vector<T> &data, unsigned workSize)
{
/* Initialize the vector */
for (unsigned i = 0; i < workSize; i++)
data.push_back( ((T)0.1)*(i+1) );
}
template<typename T> void doCompute(unsigned workSize)
{
std::vector<T> hostData;
initialize(hostData, workSize);
T cpuResults = transformAndSumCPU(hostData, ReciprocalFunctor<T>());
T gpuResults = transformAndSumGPU(hostData, ReciprocalFunctor<T>());
std::cout<<"transformAndSumCPU = "<<cpuResults<<std::endl;
std::cout<<"transformAndSumGPU = "<<gpuResults<<std::endl;
}
int main(void)
{
doCompute<float> (1024*1024);
return 0;
}
|
11,038 | #include <iostream>
#include <fstream>
#include <sstream>
#include <string>
#include <ctime>
#include <cstdint>
#include <thrust/reduce.h>
#include <cuda.h>
using namespace std;
__device__ int binarySearch(int* arr, int l, int r, int x)
{
while (l <= r)
{
int m = (l+r)/2;
if (arr[m] == x)
return m;
if (arr[m] < x)
l = m + 1;
else
r = m - 1;
}
return -1;
}
/*__device__ int index;
__global__ void arrfind(int* adjlist, int start , int end,int entries,int find)
{
int threadID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadID < entries)
{
if( adjlist[threadID] == find )
{
index = threadID;
}
}
}*/
__global__ void Tricount(int* beginposition , int* adjlist ,int* d_counts,int* adjver ,int vertices , int entries)
{
int adjindex = blockIdx.x * blockDim.x + threadIdx.x;
//int vertex =0 ;
// INDENTIFY WHICH VERTEX THE THREAD IS
if( adjindex < entries )
{
int vertex1 = adjlist[adjindex];
int sizeofarray1 = beginposition[ vertex1+1 ]-beginposition[ vertex1 ];
if( adjlist[adjindex]+1 == vertices)
{
sizeofarray1 = entries-beginposition[vertex1];
}
int vertex2 = adjver[adjindex];
int sizeofarray2 = beginposition[vertex2+1]-beginposition[vertex2];
if( vertex2+1 == vertices)
{
sizeofarray2 = entries-beginposition[vertex2];
}
int posofelement = beginposition[ adjlist[adjindex] ] ;
for(int i = 0 ; i < sizeofarray1 ; i++)
{
int find = adjlist[ posofelement + i ];
int result = binarySearch (adjlist ,beginposition[vertex2] , beginposition[vertex2] + sizeofarray2 - 1 ,find);
if(result != -1)
{
//printf("found an triangle with vertex %d and vertex %d with vertex %d \n",adjlist[adjindex],vertex2,find);
d_counts[adjindex] = d_counts[adjindex] + 1;
}
}
}
}
int mmioread(int* adjlist , int* beginposition) {
string line;
string file1 = "facebook_combined_adj.tsv";
ifstream myfile (file1);
cout << endl;
cout << " reading " << file1 << " ... " <<endl;
cout <<endl;
long linecount =0;
// 0 - adjlist 1 - vertex 2 - N/A
beginposition[0] = 0;
long adjlistpos = 0;
long beginlistpos = 1;
long prevnum = 0;
if (myfile.is_open())
{
while ( getline (myfile,line) )
{
istringstream buf(line);
long type =0;
for(string word; buf >> word; )
{
if( type == 0 ) // add adjlist
{
adjlist[adjlistpos] = stoi(word);
adjlistpos++;
type++;
}
else if( type == 1 ) // add begin pos
{
if(prevnum != stoi(word) )
{
if (prevnum+1 != stoi(word) )
{
//printf("now is %d but before was %d\n",stoi(word),prevnum );
for(int a = 0 ; a <stoi(word)-prevnum-1 ; a++)
{
beginposition[beginlistpos] = adjlistpos-1;
//printf("IN \n" );
//printf("putting %d at beginpos %d\n",int(adjlistpos-1),int(beginlistpos));
beginlistpos++;
}
}
beginposition[beginlistpos] = adjlistpos-1;
beginlistpos++;
prevnum = stoi(word);
}
type++;
}
else if (type == 2)
type++;
//forcount++;
}
linecount++;
}
myfile.close();
}
else cout << "Unable to open file";
return 1;
};
int main(){
int vertices = 4040;
int entries = 176468;
int* h_beginposition= new int[vertices];
int* h_adjlist= new int[entries];
int* h_adjvertex= new int[entries];
int* h_count = new int [entries];
//h_count=(int *) malloc(1*sizeof(int));
int* d_begin;
int* d_adj;
int* d_counts;
int* d_adjvertex;
cout <<"Converting MMIO to array form..." <<endl;
clock_t startTime = clock();
mmioread(h_adjlist,h_beginposition);
int pos =0;
for(int x = 1 ; x < vertices ; x++)
{
int size = h_beginposition[x+1] - h_beginposition[x];
//printf("%d \n ",size);
if(x+1 == vertices)
size = entries-h_beginposition[x];
for(int y = 0 ; y < size ; y++)
{
h_adjvertex[pos] = x;
pos++;
}
}
//printf("pos is %d is %d \n",h_adjlist[718264] ,h_adjvertex[718264]);
//printf("last is %d \n", h_beginposition[4]);
/*
printf("adjlist consist of");
for(int a = 0 ; a < entries ; a++)
printf(" %d ", h_adjlist[a]);
printf("\n");
printf("bp consist of");
for(int a = 0 ; a < vertices ; a++)
printf(" %d ", h_beginposition[a]);
printf("\n");*/
double secondsPassed = (clock() - startTime) / CLOCKS_PER_SEC;
cout <<"Transform complete : "<< secondsPassed << " seconds have passed" << endl;
cout <<"Allocating space on GPU and transfer data..."<< endl;
cudaMalloc(&d_begin, vertices*sizeof(int));
cudaMalloc(&d_adj, entries*sizeof(int));
cudaMalloc(&d_adjvertex, entries*sizeof(int));
cudaMalloc((void**)&d_counts, entries*sizeof(int));
//cudaMemset((void*)d_counts,0,10*sizeof(int));
cudaMemcpy(d_begin, h_beginposition, vertices*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adj, h_adjlist, entries*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_adjvertex, h_adjvertex, entries*sizeof(int), cudaMemcpyHostToDevice);
int blocks = (entries/1024)+1;
cout << "Now counting Triangles" <<endl;
Tricount<<<blocks, 1024>>>(d_begin,d_adj,d_counts,d_adjvertex,vertices,entries);
cudaMemcpy(h_count,d_counts,entries*sizeof(int),cudaMemcpyDeviceToHost);
cout << "Done..." <<endl;
cout << "Done with MEMCOPY...Now counting" <<endl;
int result = thrust::reduce(h_count, h_count+ entries);
printf("answer : %d \n",result/6);
cudaFree(d_begin);
cudaFree(d_adj);
cudaFree(d_counts);
//cudaDeviceReset();
//3686467
}
|
11,039 | #include <cuda.h>
#include <iostream>
#include <stdlib.h>
#include <unistd.h>
#include <time.h>
#include <stdio.h>
// defining minimum and maximum sizes
#define minSize 1024
#define maxSize 8192
using namespace std;
// function to print the current state
void StatePrint(int *a,long SIZE)
{
for (unsigned y = 1; y < SIZE - 1; y++)
{
for (unsigned x = 1; x < SIZE - 1; x++)
{
a[y*SIZE+x] == 1 ? cout << char(219) << char(219) : cout << ' ' << ' ';
}
cout << endl;
}
}
// function to build a random state in the grid
void randomGrid(int *a,long SIZE)
{
srand(time(NULL));
for (int y = 1; y < SIZE - 1; y++)
{
for (int x = 1; x < SIZE - 1; x++)
{
a[y*SIZE+x] = rand() % 2;
}
}
}
// function to add a blinker to the grid
void addBlinker(int *a, int i, int j,long SIZE)
{
int b[][3] = {{0, 1, 0}, {0, 1, 0}, {0, 1, 0}};
for (int p = 0; p < 3; p++)
{
for (int q = 0; q < 3; q++)
{
a[(i + p)*SIZE+j + q] = b[p][q];
}
}
}
// function to add a Glider to the grid at some coordinates
void addGlider(int *a, int i, int j,long SIZE)
{
int b[][3] = {{0, 0, 1},
{1, 0, 1},
{0, 1, 1}};
for (int p = 0; p < 3; p++)
{
for (int q = 0; q < 3; q++)
{
a[(i + p)*SIZE+j + q] = b[p][q];
}
}
}
// function to add a Glider gun to the grid at some coordinates
void addGliderGun(int *a, int i, int j,long SIZE)
{
int b[11][38] = {0};
b[5][1] = b[5][2] = 1;
b[6][1] = b[6][2] = 1;
b[3][13] = b[3][14] = 1;
b[4][12] = b[4][16] = 1;
b[5][11] = b[5][17] = 1;
b[6][11] = b[6][15] = b[6][17] = b[6][18] = 1;
b[7][11] = b[7][17] = 1;
b[8][12] = b[8][16] = 1;
b[9][13] = b[9][14] = 1;
b[1][25] = 1;
b[2][23] = b[2][25] = 1;
b[3][21] = b[3][22] = 1;
b[4][21] = b[4][22] = 1;
b[5][21] = b[5][22] = 1;
b[6][23] = b[6][25] = 1;
b[7][25] = 1;
b[3][35] = b[3][36] = 1;
b[4][35] = b[4][36] = 1;
for (int p = 0; p < 11; p++)
{
for (int q = 0; q < 38; q++)
{
a[(i + p)*SIZE+j + q] = b[p][q];
}
}
}
// Defining kernel function to simulate cellular automata
__global__ void cellular_automata(int *a,int *b,long SIZE)
{
int count = 0;
// number of new states to be generated
int loop=100;
// getting thread id
long int tid=blockIdx.x*blockDim.x+threadIdx.x;
long int row,col;
//figuring out row id and column id
row=int(tid/SIZE);
col=tid%SIZE;
// evaluating a cell
while(loop){
for (int i = -1; i < 2; i++)
{
for (int j = -1; j < 2; j++)
{
if (i != 0 || j != 0)
count += (a[(row + i)*SIZE +col + j] ? 1 : 0);
}
}
b[row*SIZE+col] = a[row*SIZE+col] == 1 ? count == 3 || count == 2 ? 1 : 0 : count == 3 ? 1 : 0;
loop--;
a[row*SIZE+col] = b[row*SIZE+col];
}
}
int main()
{
int *a;
int *d_a, *d_b;
float e_time1,e_time2,e_time3,milliseconds,throughput;
FILE *data=fopen("parallel_data.txt","w");
for(long SIZE=minSize;SIZE<=maxSize;SIZE*=2)
{
// allocating space for grid
a=(int*)malloc(SIZE*SIZE*sizeof(int));
// initialising grid with a random state
randomGrid(a,SIZE);
//addGlider(a, 100, 100);
//addGliderGun(a, 225, 100);
//addBlinker(a, 125, 130);
//StatePrint(a);
// allocating memory in cuda device
cudaMalloc((void**)&d_a,SIZE*SIZE*sizeof(int));
cudaMalloc((void**)&d_b,SIZE*SIZE*sizeof(int));
// creating events to record different timings
cudaEvent_t start1,stop1,start2,stop2,start3,stop3;
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
// copying memory from host to device
cudaEventRecord(start1);
cudaMemcpy(d_a,a,SIZE*SIZE*sizeof(int),cudaMemcpyHostToDevice);
cudaEventRecord(stop1);
// calculating memory copy time
cudaEventSynchronize(stop1);
milliseconds=0;
cudaEventElapsedTime(&milliseconds, start1, stop1);
e_time1=(double)milliseconds/1000;
cout<<"HTOD:"<<e_time1<<endl;
cudaEventCreate(&start2);
cudaEventCreate(&stop2);
// running the simulation
cudaEventRecord(start2);
for(int j=0;j<(maxSize/SIZE);j++)
{
cellular_automata<<<SIZE*SIZE/1024,1024>>>(d_a,d_b,SIZE);
}
cudaEventRecord(stop2);
//calculating the compute time
cudaEventSynchronize(stop2);
milliseconds=0;
cudaEventElapsedTime(&milliseconds, start2, stop2);
e_time2=(double)milliseconds/1000;
cout<<"Kernel:"<<e_time2<<endl;
cudaEventCreate(&start3);
cudaEventCreate(&stop3);
// copying back the results of the simulation from device to host
cudaEventRecord(start3);
cudaMemcpy(a,d_a,SIZE*SIZE*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop3);
// calculating memory copy time
cudaEventSynchronize(stop3);
milliseconds=0;
cudaEventElapsedTime(&milliseconds, start3, stop3);
e_time3=(double)milliseconds/1000;
cout<<"DTOH:"<<e_time3<<endl;
// calculating throughput
throughput=(sizeof(float)*maxSize*maxSize)/e_time2;
fprintf(data,"%lf,%lf,%lf,%lf\n",e_time1,e_time2*SIZE/maxSize,e_time3,throughput/1000000);
// deallocating cuda device memory
cudaFree(d_a);
cudaFree(d_b);
// deallocating grid space
free(a);
}
cout<<"ENDED";
return 0;
// End of the Program
}
|
11,040 | #include "includes.h"
__global__ void cuke_sinGrid(unsigned char *surface, int width, int height, size_t pitch, float t)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
// in the case where, due to quantization into grids, we have
// more threads than pixels, skip the threads which don't
// correspond to valid pixels
if (x >= width || y >= height) return;
// get a pointer to the pixel at (x,y)
float* pixel = (float *)(surface + y*pitch) + 4*x;
// populate it
float value_x = 0.5f + 0.5f*cos(t + 10.0f*((2.0f*x)/width - 1.0f));
float value_y = 0.5f + 0.5f*cos(t + 10.0f*((2.0f*y)/height - 1.0f));
pixel[0] = 0.5*pixel[0] + 0.5*pow(value_x, 3.0f); // red
pixel[1] = 0.5*pixel[1] + 0.5*pow(value_y, 3.0f); // green
pixel[2] = 0.5f + 0.5f*cos(t); // blue
pixel[3] = 1; // alpha
} |
11,041 | #include<stdio.h>
//Macro for checking cuda errors following a cuda launch or api call
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(0); \
} \
}
// Kernel
__global__ void print(int n) {
// Index
int tIdx = threadIdx.x;
int bIdx = blockIdx.x;
// Dimension
int bDim = blockDim.x;
int gDim = gridDim.x;
printf("Grid-Dimension: %i, Block-Dimension: %i\n Block-ID: %i, Thread-ID: %i\n\n", gDim, bDim, bIdx, tIdx);
//for (int i = 0; i < n; i++) {
// printf("Grid: %i, Block: %i, Thread: %i\n", i,i,i);
//}
}
// Main
int main(void) {
int n = 25;
print<<<2048, 1>>>(n);
cudaCheckError();
// Synx Device
cudaDeviceSynchronize();
return 0;
}
|
11,042 | #include <stdio.h>
#include <math.h>
#include <iostream>
__global__ void spmv_csr_kernel(unsigned int dim, unsigned int *csrRowPtr,
unsigned int *csrColIdx, float *csrData, float *inVector,
float *outVector) {
// INSERT KERNEL CODE HERE
// Code below is from Lecture 16 Sparse Methods, slide 12
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < dim){
float dot = 0;
int row_start = csrRowPtr[row];
int row_end = csrRowPtr[row + 1];
for (int i = row_start; i < row_end; i++){
dot += csrData[i] * inVector[csrColIdx[i]];
}
outVector[row] += dot;
}
}
__global__ void spmv_jds_kernel(unsigned int dim, unsigned int *jdsRowPerm,
unsigned int *jdsRowNNZ, unsigned int *jdsColStartIdx,
unsigned int *jdsColIdx, float *jdsData, float* inVector,
float *outVector) {
// INSERT KERNEL CODE HERE
int row = blockDim.x * blockIdx.x + threadIdx.x;
if(row < dim){
float dot = 0;
int row_start = 0;
int row_end = jdsRowNNZ[row];
for (int i = row_start; i < row_end; i++){
dot += jdsData[jdsColStartIdx[i] + row] * inVector[jdsColIdx[jdsColStartIdx[i] + row]];
}
outVector[jdsRowPerm[row]] += dot;
}
}
void spmv_csr(unsigned int dim, unsigned int *csrRowPtr, unsigned int *csrColIdx,
float *csrData, float *inVector, float *outVector) {
// INSERT CODE HERE
dim3 dimBlock(512, 1, 1);
dim3 dimGrid(ceil(dim/512.0), 1 ,1);
spmv_csr_kernel<<<dimGrid, dimBlock>>>(dim, csrRowPtr, csrColIdx, csrData, inVector, outVector);
}
void spmv_jds(unsigned int dim, unsigned int *jdsRowPerm, unsigned int *jdsRowNNZ,
unsigned int *jdsColStartIdx, unsigned int *jdsColIdx, float *jdsData,
float* inVector, float *outVector) {
// INSERT CODE HERE
dim3 dimBlock(512, 1, 1);
dim3 dimGrid(ceil(dim/512.0), 1 ,1);
spmv_jds_kernel<<<dimGrid, dimBlock>>>(dim, jdsRowPerm, jdsRowNNZ, jdsColStartIdx, jdsColIdx, jdsData, inVector, outVector);
}
|
11,043 |
extern "C"
__global__ void checkConvergence(float *coordArray, int *result)
{
float z = 0;
float zi = 0;
float ciFloat = coordArray[0];
float cFloat = coordArray[1];
int steps = result[0];
for (int i = 0; i < steps; i++) {
float ziT = 2 * (z * zi);
float zT = z * z - (zi * zi);
z = zT + cFloat;
zi = ziT + ciFloat;
if (z * z + zi * zi >= 4.0) {
*result = i;
return;
}
}
*result = steps;
} |
11,044 | #include "cuda.h"
#include "cuda_runtime.h"
#include "string.h"
#include "stdio.h"
#include "stdlib.h"
#include <chrono>
#include <iostream>
#include <iomanip>
using namespace std;
#define CUDA_DEBUG { \
cudaError_t err = cudaGetLastError(); \
if (err != cudaSuccess){ \
printf("Error at %s %s: %s\n", __LINE__, __FILE__, cudaGetErrorString(err)); \
exit(1); \
} \
}
__global__ void gpu_mat_mul(float* A, float* B, float* C, long width, long N)
{
const long i = (blockIdx.x * blockDim.x + threadIdx.x);
if(i >= N) return;
float val = 0.0;
float* a = A + (i / width);
float* b = B + (i % width);
for(long j = 0; j < width; j++){
val += *(a + j) * *(b + (width*j));
}
*(C + i) = val;
}
int main(int argc, char **argv){
auto begin = chrono::high_resolution_clock::now();
if(argc < 2){
printf("usage: ./%s <size> \n", argv[0]);
return 1;
}
long size = atol(argv[1]);
long N = size*size;
size_t bytes = sizeof(float) * N;
float* A_h = (float*)malloc(bytes);
float* B_h = (float*)malloc(bytes);
float* C_h = (float*)malloc(bytes);
for(long i = 0; i < N; i++){
*(A_h + i) = (float)i / (float)N;
*(B_h + i) = (float)i / (float)N;
}
float *A_d, *B_d, *C_d;
cudaMalloc((void**)&A_d, bytes);
cudaMalloc((void**)&B_d, bytes);
cudaMalloc((void**)&C_d, bytes);
CUDA_DEBUG
cudaMemcpy(A_d, A_h, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, bytes, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
CUDA_DEBUG
free(A_h);
free(B_h);
long threadsPerBlock = 1024;
long numBlocks = N / threadsPerBlock;
numBlocks += (N % threadsPerBlock > 0) ? 1 : 0;
gpu_mat_mul <<< numBlocks, threadsPerBlock >>> (A_d, B_d, C_d, size, N);
cudaDeviceSynchronize();
CUDA_DEBUG
cudaMemcpy(C_h, C_d, bytes, cudaMemcpyDeviceToHost);
cudaFree(C_d);
cudaFree(B_d);
cudaFree(A_d);
CUDA_DEBUG
free(C_h);
auto end = chrono::high_resolution_clock::now();
auto duration = chrono::duration_cast<chrono::milliseconds>(end - begin);
cout << "Blocksize * Gridsize: " << setw(2) << threadsPerBlock * numBlocks << " Matrix size: " << setw(9) << size;
cout << " Milliseconds taken: " << setw(15) << duration.count() << endl;
return 0;
}
|
11,045 | #include <cstdio>
#include <math.h>
// Kernel function to square a float
__global__
void square(float *d_out, float *d_in)
{
int idx = threadIdx.x; // Get thread ID
float f = d_in[idx];
d_out[idx] = f*f;
}
int main(void)
{
const int ARR_SIZE = 64;
const int ARR_BYTES = ARR_SIZE*sizeof(float);
// Generate input array
float h_in[ARR_SIZE];
for (int i=0; i<ARR_SIZE; i++){
h_in[i] = float(i);
}
float h_out[ARR_SIZE];
// Declare and alloc array on device
float *d_in;
float *d_out;
cudaMalloc((void **) &d_in, ARR_BYTES);
cudaMalloc((void **) &d_out, ARR_BYTES);
// Transfer to device
cudaMemcpy(d_in, h_in, ARR_BYTES, cudaMemcpyHostToDevice);
// Run kernel on 64 elements on the GPU
square<<<1, ARR_SIZE>>>(d_out, d_in);
// Transfer results to host
cudaMemcpy(h_out, d_out, ARR_BYTES, cudaMemcpyDeviceToHost);
// Output
for (int i=0; i<ARR_SIZE; i++){
printf("%f, ", h_out[i]);
}
printf("\n");
// Free memory
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
11,046 | #include <cstdio>
#include "WaypointsState.cuh"
__host__ __device__ void WaypointsState::successor_generation(WaypointsState *successor, unsigned int process, unsigned int ndc)
{
successor->state = state | 1 << ((kNondeterministicChoices * process) + ndc);
}
__host__ __device__ bool WaypointsState::violates()
{
/* How to generate 100 random 32 bit integers using numpy:
*
* import numpy as np
* rng = np.random.default_rng()
* rng.integers(np.iinfo(np.int32).min, np.iinfo(np.int32).max, 100, endpoint=True)
*
* Here is a little trick to save us from branching. Most probably, it could
* be improved using bitwise operators or something :)
*/
return 0 != ((state == -796893842) +
(state == 779999969) +
(state == -1168537298) +
(state == 1200431791) +
(state == 343263341) +
(state == -484020923) +
(state == 152254944) +
(state == 1580803326) +
(state == 742905064) +
(state == -1078233077) +
(state == -535171611) +
(state == 3801864) +
(state == -591946906) +
(state == -1735204896) +
(state == -2019895492) +
(state == -1892053421) +
(state == 1619348239) +
(state == -1582071078) +
(state == 975455264) +
(state == -1009488133) +
(state == -1986586248) +
(state == 1298121607) +
(state == 591829652) +
(state == 174446516) +
(state == 1146941867) +
(state == -973302145) +
(state == 1886697991) +
(state == -242879520) +
(state == 844493943) +
(state == -44194297) +
(state == -2052363332) +
(state == 2073759503) +
(state == 1427832783) +
(state == 1723743020) +
(state == -1153542822) +
(state == -311282643) +
(state == 1951362811) +
(state == 2021087655) +
(state == 1852558397) +
(state == 142999217) +
(state == 1551312664) +
(state == 1688262063) +
(state == -953275708) +
(state == 1267931653) +
(state == -424531623) +
(state == -360759171) +
(state == -1906136628) +
(state == -787884137) +
(state == -369036714) +
(state == -547100928) +
(state == 2062351048) +
(state == -241464430) +
(state == -330827675) +
(state == -1338579048) +
(state == 1000668082) +
(state == -1024509637) +
(state == -1157809220) +
(state == -1223390847) +
(state == -1226148451) +
(state == -1525868926) +
(state == -428231122) +
(state == 1686987859) +
(state == 1165889762) +
(state == -486440368) +
(state == 107898966) +
(state == -975841995) +
(state == -2120763899) +
(state == -298195237) +
(state == -480588986) +
(state == -1895479827) +
(state == -1480726272) +
(state == -426296827) +
(state == 1207040394) +
(state == 1794600435) +
(state == 1621471265) +
(state == -153254338) +
(state == 1752632236) +
(state == -1309418877) +
(state == 558531649) +
(state == -456823131) +
(state == 1704887816) +
(state == 990204918) +
(state == 1243623110) +
(state == 1383812845) +
(state == 1926782188) +
(state == 2124838199) +
(state == -721123533) +
(state == -22990483) +
(state == 1076265871) +
(state == 177235009) +
(state == -1657803211) +
(state == -206509875) +
(state == -330928781) +
(state == -946222203) +
(state == -305828449) +
(state == 1262699236) +
(state == -963772524) +
(state == -265091042) +
(state == 1933187888) +
(state == 1725002572));
}
__host__ std::string WaypointsState::str()
{
return std::to_string(state);
}
|
11,047 | // #define DEBUG
#ifdef DEBUG
#define FOURMB (2 * 1024 * 1024)
#define BYTES (FOURMB * sizeof(int))
#define NTHREADS 128
#define INITN 256
#else
#define FOURMB (2 * 1024 * 1024)
// #define FOURM
#define BYTES (FOURMB * sizeof(int))
#define NTHREADS 128
#define INITN 1024
#endif
// homework1程序
// TODO: GPU版本计算两个向量差的二范数
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <assert.h>
#define CUDA_CALL(x) \
{ \
const cudaError_t a = (x); \
if (a != cudaSuccess) \
{ \
printf("\nCUDA Error: %s (err_num = %d)\n", cudaGetErrorString(a), a); \
cudaDeviceReset(); \
assert(0); \
} \
}
// TODO: 定义GPU kernel函数,并在rbfComputeGPU中调用
__device__ void wrapReduce(volatile int *sdata, int tid)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void reduce(int *g_idata, int *g_odata, unsigned int n)
{
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * blockDim.x * 2 + threadIdx.x;
sdata[tid] = (i < n) ? g_idata[i] : 0;
sdata[tid] += (i + blockDim.x < n) ? g_idata[i + blockDim.x] : 0;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid < 32)
wrapReduce(sdata, tid);
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
__global__ void norm2(int *input, int *output, int len)
{
extern __shared__ int smem[];
int tid = threadIdx.x;
int i = blockIdx.x * blockDim.x + threadIdx.x;
smem[tid] = input[i];
__syncthreads();
int trueblockDim = (blockDim.x >> 1);
if (tid < trueblockDim)
{
smem[tid] = (smem[tid] - smem[tid + trueblockDim]) * (smem[tid] - smem[tid + trueblockDim]);
output[blockIdx.x * trueblockDim + tid] = smem[tid];
}
}
// no fusion version:
__host__ int rbfComputeGPU(int *input1, int *input2, int len)
{
int *d_idata1;
// int *d_idata2;
int *d_idata;
int *d_odata;
int *d_intermediateSums;
int res = 0;
int nReduBlocks = len / NTHREADS / 2;
int n2NormBlocks = len / NTHREADS;
int calBytes = len * sizeof(int) * 2;
// TODO: 在gpu上分配空间
// CUDA_CALL();
CUDA_CALL(cudaMalloc((void **)&d_idata1, calBytes));
// CUDA_CALL(cudaMalloc((void **)&d_idata2, calBytes));
CUDA_CALL(cudaMalloc((void **)&d_idata, calBytes / 2));
CUDA_CALL(cudaMalloc((void **)&d_odata, nReduBlocks * sizeof(int)));
CUDA_CALL(cudaMalloc((void **)&d_intermediateSums, sizeof(int) * nReduBlocks));
// TODO: 将cpu的输入拷到gpu上的globalMemory
for (int i = 0; i < len; i += NTHREADS)
{
CUDA_CALL(cudaMemcpy(&d_idata1[i * 2], &input1[i], NTHREADS * sizeof(int), cudaMemcpyHostToDevice));
CUDA_CALL(cudaMemcpy(&d_idata1[i * 2 + NTHREADS], &input2[i], NTHREADS * sizeof(int), cudaMemcpyHostToDevice));
}
// CUDA_CALL(cudaMemcpy(d_idata1, input1, calBytes, cudaMemcpyHostToDevice));
// CUDA_CALL(cudaMemcpy(d_idata2, input2, calBytes, cudaMemcpyHostToDevice));
#ifdef DEBUG
int *test2norm;
test2norm = (int *)malloc(calBytes);
assert(test2norm);
#endif
struct timespec time_start = {0, 0}, time_end = {0, 0};
clock_gettime(CLOCK_REALTIME, &time_start);
// 重复100次,比较时间
for (int idx = 0; idx < 100; idx++)
{
// res = 0;
// TODO: 启动gpu计算函数,计算两个向量间的RBF
dim3 dimBlock(NTHREADS, 1, 1);
dim3 dimGrid(n2NormBlocks, 1, 1);
int smemSize = NTHREADS * sizeof(int);
dim3 trydimBlock((NTHREADS << 1), 1, 1);
norm2<<<dimGrid, trydimBlock, (smemSize << 1)>>>(d_idata1, d_idata, len);
#ifdef DEBUG
CUDA_CALL(cudaMemcpy(test2norm, d_idata, calBytes, cudaMemcpyDeviceToHost));
#endif
dimGrid.x = nReduBlocks;
reduce<<<dimGrid, dimBlock, smemSize>>>(d_idata, d_odata, len);
int s = nReduBlocks;
while (s > 1)
{
dim3 dimGrid((s + NTHREADS - 1) / NTHREADS, 1, 1);
CUDA_CALL(cudaMemcpy(d_intermediateSums, d_odata, s * sizeof(int), cudaMemcpyDeviceToDevice));
reduce<<<dimGrid, dimBlock, smemSize>>>(d_intermediateSums, d_odata, s);
CUDA_CALL(cudaGetLastError());
s /= (NTHREADS * 2);
}
// TODO: 将gpu的输出拷回cpu
CUDA_CALL(cudaMemcpy(&res, d_odata, sizeof(int), cudaMemcpyDeviceToHost));
}
clock_gettime(CLOCK_REALTIME, &time_end);
double costTime = (time_end.tv_sec - time_start.tv_sec) * 1000 * 1000 * 1000 + time_end.tv_nsec - time_start.tv_nsec;
printf("GPU cal %d size cost:%.7lfms\n", len, costTime / 1000 / 1000);
// TODO: 释放掉申请的gpu内存
#ifdef DEBUG
printf("test for 2norm in GPU:\n");
for (int i = 0; i < len; ++i)
{
if ((input1[i] - input2[i]) * (input1[i] - input2[i]) != test2norm[i])
printf("i:%d, test:%d, true:%d\n", i, test2norm[i], (input1[i] - input2[i]) * (input1[i] - input2[i]));
}
free(test2norm);
#endif
// CUDA_CALL(cudaFree(d_idata2));
CUDA_CALL(cudaFree(d_idata1));
CUDA_CALL(cudaFree(d_idata));
CUDA_CALL(cudaFree(d_odata));
CUDA_CALL(cudaFree(d_intermediateSums));
return res;
}
// cpu版本
int rbfComputeCPU(int *input1, int *input2, int len)
{
struct timespec time_start = {0, 0}, time_end = {0, 0};
clock_gettime(CLOCK_REALTIME, &time_start);
int res = 0;
for (int idx = 0; idx < 100; idx++)
{
res = 0;
for (int i = 0; i < len; i++)
{
res += (input1[i] - input2[i]) * (input1[i] - input2[i]);
}
}
clock_gettime(CLOCK_REALTIME, &time_end);
double costTime = (time_end.tv_sec - time_start.tv_sec) * 1000 * 1000 * 1000 + time_end.tv_nsec - time_start.tv_nsec;
printf("CPU cal %d size cost:%.7lfms\n", len, costTime / 1000 / 1000);
return res;
}
__host__ int main()
{
int *h_idata1, *h_idata2;
h_idata1 = (int *)malloc(BYTES);
h_idata2 = (int *)malloc(BYTES);
assert(h_idata1);
assert(h_idata2);
srand((unsigned)time(NULL));
for (int i = 0; i < FOURMB; i++)
{
h_idata1[i] = rand() & 0xff;
h_idata2[i] = rand() & 0xff;
}
printf("initialize ready\n");
for (int n = INITN; n <= FOURMB; n *= 4)
{
printf("n=%d:\n", n);
int cpu_result = rbfComputeCPU(h_idata1, h_idata2, n);
int gpu_result = rbfComputeGPU(h_idata1, h_idata2, n);
if (cpu_result != gpu_result)
{
printf("ERROR happen when compute %d\n", n);
printf("cpu_result = %d,gpu_result = %d\n", cpu_result, gpu_result);
free(h_idata1);
free(h_idata2);
exit(1);
}
}
free(h_idata1);
free(h_idata2);
} |
11,048 | /* Command to compile on Windows:
nvcc .\lab5_2_2.cu -ccbin "C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Tools\MSVC\14.29.30133\bin\Hostx64\x64"
Output should be:
a: [22, 13, 16, 5]
b: [5, 22, 17, 37]
c: [27, 35, 33, 42]
*/
#include <stdio.h>
__global__ void vector_add(int *c, int *a, int *b) {
c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
}
void array2str(char *str, int *array, int n, int str_size) {
int written = 0;
written += snprintf(str + written, str_size - written, "[");
for (int idx = 0; idx < n - 1; idx++) {
written += snprintf(str + written, str_size - written, "%i, ", *(array + idx));
}
written += snprintf(str + written, str_size - written, "%i]", *(array + n - 1));
return;
}
int main(void) {
/* Intiialize inputs (CPU) */
const int N = 4;
int a[N] = {22, 13, 16, 5};
int b[N] = {5, 22, 17, 37};
int c[N];
char str_a[80];
char str_b[80];
array2str(str_a, a, N, 80);
array2str(str_b, b, N, 80);
printf("a: %s\n", str_a);
printf("b: %s\n", str_b);
/* Allocate memory for calculation on GPU */
int *gpu_a;
int *gpu_b;
int *gpu_c;
cudaMalloc((void**) &gpu_a, sizeof(int) * N);
cudaMalloc((void**) &gpu_b, sizeof(int) * N);
cudaMalloc((void**) &gpu_c, sizeof(int) * N);
/* Copy inputs to GPU */
cudaMemcpy(gpu_a, a, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_b, b, sizeof(int) * N, cudaMemcpyHostToDevice);
/* Do the thing */
vector_add<<<1, N>>>(gpu_c, gpu_a, gpu_b);
cudaMemcpy(c, gpu_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
/* Remember to clean up after ourselves */
cudaFree(gpu_a);
cudaFree(gpu_b);
cudaFree(gpu_c);
/* Print result */
char str_c[80];
array2str(str_c, c, N, 80);
printf("c: %s\n", str_c);
return 0;
} |
11,049 | #include <stdio.h>
#include <math.h>
void generateRuns(int *** inputRuns,int * length,int N){
// GetGPUProperties
cudaDeviceProp props;
cudaGetDeviceProperties(&props,0);
// Get maximum threads, blocks and grids
printf("Generating test runs using N= %d:\n",N);
printf("GPU Info\n");
printf("Name: %s\n",props.name);
printf("Max Threads Per Block %d\n",props.maxThreadsPerBlock);
printf("Max Threads Size %d %d %d\n",
props.maxThreadsDim[0],
props.maxThreadsDim[1],
props.maxThreadsDim[2]);
printf("Max Grid Size %d %d %d\n",
props.maxGridSize[0],
props.maxGridSize[1],
props.maxGridSize[2]);
printf("Compute Capability %d\n",props.major);
/* Get total number of tests- from linear to squared,
from threads to blocks */
int t;
if(N*N<props.maxThreadsPerBlock)
t = N*N;
else
t= props.maxThreadsPerBlock;
int b = N*N/t+1;
int threadsToBlocksTests = log2(t/32.0) + 1;
int linearToSquareTests = log2(t/1.0) + 1;
printf("%d %d\n",threadsToBlocksTests,linearToSquareTests);
*length = threadsToBlocksTests + linearToSquareTests;
// Allocate runs
int ** runs = (int**)malloc((*length)*sizeof(int*));
for(int i=0;i<*length;i++){
runs[i] = (int*)malloc(6*sizeof(int));
}
// Generate the block, grid, threads
// From linear to squared
int j = 0;
int i;
for(i=1;j<threadsToBlocksTests;i*=2){
runs[j][0]= t/i;
runs[j][1]= 1;
runs[j][2]= 1;
runs[j][3]= b*i;
runs[j][4]= 1;
runs[j][5]= 1;
j++;
}
// From threads to blocks
for(i=1;j<*length;i*=2){
runs[j][0]= t/i;
runs[j][1]= i;
runs[j][2]= 1;
runs[j][3]= b;
runs[j][4]= 1;
runs[j][5]= 1;
j++;
}
*inputRuns = runs;
}
|
11,050 | __global__ void ewsum_kernel(float *d_a, float *d_w, float *d_out, int num_w, int width, int total_dim) {
// Get the id and make sure it is within bounds
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= total_dim) {
return;
}
const int non_width = total_dim / width;
const int x = id / non_width;
const int num_sets = width / num_w;
const int w_x = x / num_sets;
d_out[id] = d_a[id] * d_w[w_x];
}
__global__ void ewsum_sum_kernel(float *d_a, float *d_out, int num_w, int width, int total_dim) {
// out is (width / num_w) x (total_dim / width)
const int in_set = width / num_w;
const int non_width = total_dim / width;
const int out_total_dim = in_set * non_width;
// Get the id and make sure it is within bounds
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= out_total_dim) {
return;
}
const int out_x = id / non_width;
const int non_x_loc = id % non_width;
// TODO: this is probably slow...
float out = 0;
for (int i = out_x; i < width; i += in_set) {
out += d_a[i*non_width + non_x_loc];
}
d_out[id] = out;
}
__global__ void ewsum_back_kernel(float *d_error, float *d_w, float *d_out,
int num_w, int err_width, int width, int total_dim) {
// Get the id and make sure it is within bounds
const int id = threadIdx.x + blockIdx.x * blockDim.x;
if (id >= total_dim) {
return;
}
const int non_width = total_dim / width;
const int x = id / non_width;
const int num_sets = width / num_w;
const int w_x = x / num_sets;
const int err_x = x % err_width;
const int non_x_loc = id % non_width;
d_out[id] = d_w[w_x] * d_error[err_x*non_width + non_x_loc];
}
|
11,051 | #define TILE_DIM 1024
template<typename T>
__device__ void math_stdd(const T* vector, double* result, const int length) {
__shared__ double threadSum[TILE_DIM];
__shared__ double threadSquareSum[TILE_DIM];
int index = threadIdx.x;
int partLength = (length + TILE_DIM - 1) / TILE_DIM;
double sum = 0;
double squareSum = 0;
for (int i = 0; i < partLength; i++) {
int valueIndex = i * TILE_DIM + index;
if (valueIndex < length) {
double value = vector[valueIndex];
sum += value;
squareSum += value * value;
}
}
threadSum[index] = sum;
threadSquareSum[index] = squareSum;
for (int d = 1; d < TILE_DIM && d < length; d <<= 1) {
__syncthreads();
if (index % (d << 1) == 0) {
int valueIndex = index + d;
if (valueIndex < TILE_DIM) {
double value = threadSum[valueIndex];
double square = threadSquareSum[valueIndex];
sum += value;
squareSum += square;
threadSum[index] = sum;
threadSquareSum[index] = squareSum;
}
}
}
if (index == 0) {
result[0] = sqrt((squareSum - (sum * sum) / length) / length);
}
} |
11,052 | /*
* thrustAssignment.cpp
*
* Created on: Apr 8, 2019
* Author: zetser
*/
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <vector>
#include <algorithm>
__host__ cudaEvent_t get_time(void) {
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
int main(int argc, const char **argv) {
uint32_t N = 512*512;
if (argc >= 2) {
N = atoi(argv[1]);
}
thrust::host_vector<uint8_t> H(N);
std::vector<uint8_t> B(N);
for(int i=0; i<N; i++) {
H[i] = rand();
B[i] = H[i];
}
cudaEvent_t startT, stopT;
float deltaT;
startT = get_time();
thrust::device_vector<int> D = H;
thrust::sort(D.begin(), D.end());
thrust::copy(D.begin(), D.end(), H.begin());
stopT = get_time();
cudaEventSynchronize(stopT);
cudaEventElapsedTime(&deltaT, startT, stopT);
printf("thrust::sort sorted %d size array in: %fms\n", N, deltaT);
cudaEvent_t startT1, stopT1;
float deltaT1;
startT1 = get_time();
std::sort(B.begin(), B.end());
stopT1 = get_time();
cudaEventElapsedTime(&deltaT1, startT1, stopT1);
printf("std::sort sorted %d size array in: %fms\n", N, deltaT1);
return 0;
}
|
11,053 | #define VERTICES 600
__constant__ float2 d_Vertices[VERTICES];
//tuning parameters
//block_size_x any sensible thread block size
//tile_size any sensible tile size value
//prefetch 0 or 1 for reusing constant memory from previous iteration
//#ifndef prefetch
//#define prefetch 0
//#endif
#ifndef use_bitmap
#define use_bitmap 0
#define coalesce_bitmap 0
#endif
#ifndef block_size_x
#define block_size_x 256
#endif
#ifndef tile_size
#define tile_size 1
#endif
__global__ void cn_PnPoly(int* bitmap, float2* points, int n) {
int ti = blockIdx.x * block_size_x * tile_size + threadIdx.x;
if (ti < n) {
// the crossing number counter
int cn[tile_size];
float2 p[tile_size];
#pragma unroll
for (int k=0; k<tile_size; k++) {
cn[k] = 0;
p[k] = points[ti+k*block_size_x];
}
int k = VERTICES-1;
// #if prefetch == 1
// float2 vj; // = d_Vertices[k];
// float2 vk;
// #endif
// loop through all edges of the polygon
for (int j=0; j<VERTICES; k = j++) { // edge from v to vp
// #if prefetch == 1
// float2 vj = d_Vertices[j];
// float2 vk = d_Vertices[k];
// #else
float2 vj = d_Vertices[j];
float2 vk = d_Vertices[k];
// #endif
#if method == 1
float vb = (vj.x - vk.x) / (vj.y - vk.y);
#endif
#pragma unroll
for (int i=0; i<tile_size; i++) {
#if method == 0
if ( ((vj.y>p[i].y) != (vk.y>p[i].y)) &&
(p[i].x < (vk.x-vj.x) * (p[i].y-vj.y) / (vk.y-vj.y) + vj.x) ) {
cn[i] = !cn[i];
}
#elif method == 1
int b = ((vk.y <= p[k].y) && (vj.y > p[k].y)) || ((vk.y > p[k].y) && (vj.y <= p[k].y));
cn[k] += b && (p[k].x < vk.x + vb * (p[k].y - vj.y));
#endif
}
}
#if use_bitmap == 1
int lane_index = threadIdx.x & (32 - 1);
unsigned int bitstring[tile_size];
#if coalesce_bitmap == 1
__shared__ unsigned int block_output[tile_size*block_size_x/32];
int warp_id = threadIdx.x/32;
#endif
#pragma unroll
for (int k=0; k<tile_size; k++) {
//write at my position in bitstring
bitstring[k] = (cn[k] & 1) << (32-lane_index);
//compute sum of bitstring within warp
#pragma unroll
for (unsigned int s=16; s>0; s>>=1) {
bitstring[k] += __shfl_xor(bitstring[k], s);
}
#if coalesce_bitmap == 1
//store bitstring for this warp in shared buffer
if (lane_index == 0) {
block_output[warp_id+k*block_size_x/32] = bitstring[k];
}
#endif
}
__syncthreads();
#endif
#pragma unroll
for (int k=0; k<tile_size; k++) {
#if use_bitmap == 0
bitmap[ti+k*block_size_x] = (cn[k] & 1); // 0 if even (out), and 1 if odd (in)
#elif use_bitmap == 1
#if coalesce_bitmap == 0
if (lane_index == 0) {
bitmap[ti/32+k*block_size_x/32] = bitstring[k];
}
#elif coalesce_bitmap == 1
//write back results in coalesced manner
if (threadIdx.x < block_size_x/32) {
bitmap[ti/32+k*block_size_x/32] = block_output[warp_id];
}
#endif
#elif use_bitmap == 2
if (cn[k] & 1 == 1) {
bitmap[ti+k*block_size_x] = 1; // 0 if even (out), and 1 if odd (in)
}
#endif
}
}
}
__global__ void cn_PnPoly_naive(int* bitmap, float2* points, int n) {
int ti = blockIdx.x * blockDim.x + threadIdx.x;
// int ti = blockIdx.y * gridDim.x * blockDim.x + blockIdx.x * blockDim.x + threadIdx.x;
if (ti < n) {
// the crossing number counter
int c = 0;
float2 p = points[ti];
int k = VERTICES-1;
for (int j=0; j<VERTICES; k = j++) { // edge from v to vp
float2 vj = d_Vertices[j];
float2 vk = d_Vertices[k];
if ( ((vj.y>p.y) != (vk.y>p.y)) &&
(p.x < (vk.x-vj.x) * (p.y-vj.y) / (vk.y-vj.y) + vj.x) )
c = !c;
}
bitmap[ti] = c; // 0 if even (out), and 1 if odd (in)
}
}
/*CPU version*/
float pnpoly_cn(int *bitmap, float2 *v, float2 *p) {
int nvert = VERTICES;
int npoint = 20000;
int i = 0;
for (i = 0; i < npoint; i++) {
int j, k, c = 0;
for (j = 0, k = nvert-1; j < nvert; k = j++) {
if ( ((v[j].y>p[i].y) != (v[k].y>p[i].y)) &&
(p[i].x < (v[k].x-v[j].x) * (p[i].y-v[j].y) / (v[k].y-v[j].y) + v[j].x) )
c = !c;
}
bitmap[i] = c & 1;
}
return 0.0;
}
/*GPU version*/
__global__ void pnpoly_cn_gpu(int *bitmap, float2 *points, int n) {
int i = blockIdx.x * block_size_x + threadIdx.x;
if (i < n) {
float2 p = points[i];
int c = 0;
int k = VERTICES-1;
int j = 0;
for (j = 0; j < VERTICES; k = j++) {
float2 vj = d_Vertices[j];
float2 vk = d_Vertices[k];
if ( ((vj.y>p.y) != (vk.y>p.y)) &&
(p.x < (vk.x-vj.x) * (p.y-vj.y) / (vk.y-vj.y) + vj.x) )
c = !c;
}
bitmap[i] = c;
}
}
|
11,054 | #define NUM_RND_BLOCKS 96
#define NUM_RND_THREADS_PER_BLOCK 128
#define NUM_RND_STREAMS (NUM_RND_BLOCKS * NUM_RND_THREADS_PER_BLOCK)
/*
* Defines for getting the values at the lower and upper 32 bits
* of a 64-bit number.
*/
#define LOW_BITS(x) ((x) & 0xffffffff)
#define HIGH_BITS(x) ((x) >> 32)
/*
* Number of iterations to run random number generator upon initialization.
*/
#define NUM_RND_BURNIN 100
/*
* CUDA grid dimensions for different types of kernels
*/
#define COPY_BLOCK_SIZE 16
#
// element-wise kernels use min(ceil(N / 512), 4096) blocks of 512 threads
#define MAX_VECTOR_OP_BLOCKS 4096
#define MAX_VECTOR_OP_THREADS_PER_BLOCK 512
#define NUM_VECTOR_OP_BLOCKS(N) (min(((N) + MAX_VECTOR_OP_THREADS_PER_BLOCK - 1)/MAX_VECTOR_OP_THREADS_PER_BLOCK, MAX_VECTOR_OP_BLOCKS))
#define NUM_VECTOR_OP_THREADS_PER_BLOCK(N) (min((N), MAX_VECTOR_OP_THREADS_PER_BLOCK))
#define PI 3.1415926535897932f
__global__ void kRandomGaussian(unsigned int* rndMults, unsigned long long* rndWords, float* gData, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned long long rndWord = rndWords[idx];
const unsigned int rndMult = rndMults[idx];
float rnd1, rnd2, R, T;
for(unsigned int i = idx; i < numElements; i += 2*NUM_RND_STREAMS) {
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd1 = ((LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
rndWord = rndMult * LOW_BITS(rndWord) + HIGH_BITS(rndWord);
rnd2 = ((LOW_BITS(rndWord)) + 1.0f) / 4294967296.0f;
T = 2 * PI * rnd2;
R = (-2 * (rnd1));
gData[i] = R * (T);
if (i + NUM_RND_STREAMS < numElements)
gData[i + NUM_RND_STREAMS] = R * __sinf(T);
}
rndWords[idx] = rndWord;
}
__global__ void kMinColumnwise(float* mat, float* target, unsigned int width, unsigned int height) {
__shared__ float min_vals[32];
float cur_min = 1.0f;
float val = 0;
for (unsigned int i = threadIdx.x; i < height; i += 32) {
val = mat[blockIdx.x * height + i];
if (val < cur_min)
cur_min = val;
}
min_vals[threadIdx.x] = cur_min;
__syncthreads();
if (threadIdx.x == 0) {
cur_min = 1.0f;
for (unsigned int i = 0; i < 32; i++)
if (min_vals[i] < cur_min)
cur_min = min_vals[i];
target[blockIdx.x] = cur_min;
}
}
|
11,055 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <assert.h>
#include <math.h>
//Reading array A from input file inp.txt
typedef struct {
int *array;
size_t used;
size_t size;
} Array;
void initArray(Array *a, size_t initialSize) {
a->array = (int*) malloc(initialSize * sizeof(int));
a->used = 0;
a->size = initialSize;
}
void insertArray(Array *a, int element) {
if (a->used == a->size) {
a->size += 1;
a->array =(int*) realloc(a->array, a->size * sizeof(int));
}
a->array[a->used++] = element;
}
Array initArrayA(){
FILE *fp;
char str[50000];
Array a;
initArray(&a, 1);
/* opening file for reading */
fp = fopen("inp.txt" , "r");
if(fp == NULL) {
printf("%s","error");
return a;
}
while( fgets (str, 50000, fp)!=NULL ) {
/* writing content to stdout */
// printf("%s\n", str);
char* token;
char* rest = str;
while ((token = strtok_r(rest, " , ", &rest)))
insertArray(&a, atoi(token));
}
fclose(fp);
return a;
}
//Asserts for GPU errors
#define gpuErrorCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
printf("GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void global_count_range_bins_kernel(int * d_out, int * d_in, int size)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
// stride is the total number of threads in the grid
// Using stride increases the performance and benefits with scalability & thread reusage
int stride = blockDim.x * gridDim.x;
// do counts in global mem
for (; myId < size; myId += stride)
{
atomicAdd(&(d_out[d_in[myId]/100]), 1);
__syncthreads(); // make sure all adds at one stage are done!
}
}
__global__ void shmem_count_range_bins_kernel(int * d_out, int * d_in, int size)
{
extern __shared__ int sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = 0;
__syncthreads();
// stride is the total number of threads in the grid
// Using stride increases the performance and benefits with scalability & thread reusage
int stride = blockDim.x * gridDim.x;
// do counts in shared mem
for (; myId < size; myId += stride)
{
atomicAdd(&(sdata[d_in[myId]/100]), 1);
__syncthreads(); // make sure all adds at one stage are done!
}
// assumes that threads per block size is atleast 10
atomicAdd(&d_out[tid], sdata[tid]);
}
//kernel to perform parallel prefix sum
//assumes only 1 block (1 block can be utilized since we have only 10 elements)
__global__ void prefixsum(int *d_out, int * d_in, int size)
{
extern __shared__ int sh_mem[];
int tid = threadIdx.x;
int myId = blockIdx.x * blockDim.x + threadIdx.x;
sh_mem[tid] = d_in[myId];
__syncthreads();
if (myId < size)
{
for (int d = 1; d < blockDim.x; d *=2)
{
if (tid >= d) {
sh_mem[tid] += sh_mem[tid - d];
}
__syncthreads();
}
}
d_out[myId] = sh_mem[tid];
}
//Function to call corresponding kernel based on memory usage
void count_bins(int * d_out, int * d_in,
int size, bool usesSharedMemory)
{
const int maxThreadsPerBlock = 512;
int threads = maxThreadsPerBlock;
// handles non power of 2 inputs
int blocks = ceil(float(size) / float(maxThreadsPerBlock));
if (usesSharedMemory)
{
//fprintf(q2a, "shared kernel in count \n");
shmem_count_range_bins_kernel<<<blocks, threads, threads * sizeof(int)>>>
(d_out, d_in, size);
}
else
{
//fprintf(q2a, "global kernel in count \n");
global_count_range_bins_kernel<<<blocks, threads>>>(d_out, d_in, size);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
}
}
int main(int argc, char **argv)
{
FILE *q2a;
FILE *q2b;
FILE *q2c;
q2a = fopen("q2a.txt", "w");
q2b = fopen("q2b.txt", "w");
q2c = fopen("q2c.txt", "w");
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(q2a, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
fprintf(q2a, "Using device %d:\n", dev);
fprintf(q2a, "%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
// generate the input array on the host
Array A = initArrayA();
int * h_in = A.array;
const int ARRAY_SIZE = A.size;
const int ARRAY_BYTES = A.size * sizeof(int);
fprintf(q2a, "Array size is %d\n", ARRAY_SIZE);
// declare GPU memory pointers
int * d_in, * d_out, * s_in, * s_out, *prefix_out, *prefix_in;;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &s_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, 10*sizeof(int));
cudaMalloc((void **) &s_out, 10*sizeof(int));
// allocate memory for prefix sum, it has only 10 buckets
cudaMalloc((void **) &prefix_out, 10*sizeof(int));
cudaMalloc((void **) &prefix_in, 10*sizeof(int));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsedTime;
//Problem 2a - Using Global Memory to get counts
fprintf(q2a,"Using Global Memory to get counts\n");
//fprintf(q2a, "Running global count\n");
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
count_bins(d_out, d_in, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2a, "Using Global memory - average time elapsed: %f\n", elapsedTime);
// copy back the counts from GPU
int b[10];
cudaMemcpy(&b, d_out, 10*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2a, "Global Memory counts returned by device B[%d]: %d\n", i, b[i]);
}
//Problem 2b - Using Shared Memory to get counts
// transfer the input array to the GPU
cudaMemcpy(s_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
fprintf(q2b, "Array size is %d\n", ARRAY_SIZE);
fprintf(q2b,"Using Shared Memory to get counts\n");
//fprintf(q2b, "Running shared count\n");
cudaEventRecord(start, 0);
count_bins(s_out, s_in, ARRAY_SIZE, false);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2b, "Using Shared memory - average time elapsed: %f\n", elapsedTime);
// copy back the counts from GPU
int s[10];
cudaMemcpy(&s, s_out, 10*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2b, "Shared Memory counts returned by device B[%d]: %d\n", i, s[i]);
}
// Problem 2c - Using Parallel Prefix SUM to calculate C
fprintf(q2c, "Array size is %d\n", ARRAY_SIZE);
// transfer the input scan array to the GPU
cudaMemcpy(prefix_in, b, 10 * sizeof(int), cudaMemcpyHostToDevice);
fprintf(q2c, "Running Parallel Prefix Sum\n");
cudaEventRecord(start, 0);
prefixsum<<<1, 10, 10 * sizeof(int)>>>(prefix_out, prefix_in, 10);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
fprintf(q2c, "Using Parallel Prefix Sum - average time elapsed: %f\n", elapsedTime);
gpuErrorCheck( cudaPeekAtLastError() );
gpuErrorCheck( cudaDeviceSynchronize() );
// copy back the counts from GPU
int c[10];
cudaMemcpy(&c, prefix_out, 10*sizeof(int), cudaMemcpyDeviceToHost);
for(int i = 0; i < 10; i++) {
fprintf(q2c, "Parallel Prefix sum returned by device: %d\n", c[i]);
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
cudaFree(prefix_out);
cudaFree(prefix_in);
cudaFree(s_in);
cudaFree(s_out);
return 0;
}
// Reference: https://developer.nvidia.com/blog/gpu-pro-tip-fast-histograms-using-shared-atomics-maxwell
|
11,056 | /*!
* \brief Sort.
*/
#include <iostream>
#include <thrust/sort.h>
#include <thrust/device_vector.h>
#define CUDA_CHECK(condition) \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
fprintf(stderr, "CUDA_CHECK error in line %d of file %s \
: %s \n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError()) ); \
exit(EXIT_FAILURE); \
} \
} while(0);
void TestDataInCPU() {
/////////////////////////////////////////////////////
{
const int N = 6;
int arr[N] = { 1, 4, 2, 8, 5, 7 };
thrust::sort(arr, arr + N);
for (int i = 0; i < N; i++)
std::cout << arr[i] << ", ";
}
std::cout << std::endl << "Finish Test1." << std::endl;
/////////////////////////////////////////////////////
{
const int N = 6;
int arr[N] = { 1, 4, 2, 8, 5, 7 };
thrust::stable_sort(arr, arr + N, thrust::greater<int>());
for (int i = 0; i < N; i++)
std::cout << arr[i] << ", ";
}
std::cout << std::endl << "Finish Test2." << std::endl;
/////////////////////////////////////////////////////
{
const int N = 6;
int keys[N] = { 1, 4, 2, 8, 5, 7 };
char values[N] = { 'a', 'b', 'c', 'd', 'e', 'f' };
thrust::sort_by_key(keys, keys + N, values);
for (int i = 0; i < N; i++)
std::cout << values[i] << "(" << keys[i] << "), ";
// keys is now { 1, 2, 4, 5, 7, 8}
// values is now {'a', 'c', 'b', 'e', 'f', 'd'}
}
std::cout << std::endl << "Finish Test3." << std::endl;
}
void TestDataInGPU() {
////////////////////////////////////////
{
const int N = 6;
int arr[N] = { 1, 4, 2, 8, 5, 7 };
thrust::device_vector<int> d_x(6, 1);
for (int i = 0; i < N; i++)
d_x[i] = arr[i];
thrust::sort(d_x.begin(), d_x.end());
for (int i = 0; i < N; i++)
std::cout << d_x[i] << ", ";
}
std::cout << std::endl << "Finish Test4." << std::endl;
/////////////////////////////////////////
{
const int N = 6;
int arr[N] = { 1, 4, 2, 8, 5, 7 };
int *d_x;
CUDA_CHECK(cudaMalloc((void **)&d_x, sizeof(int) * N));
CUDA_CHECK(cudaMemcpy(d_x, arr, sizeof(int) * N, cudaMemcpyHostToDevice));
thrust::device_ptr <int > dev_ptr(d_x);
thrust::sort(dev_ptr, dev_ptr +N);
CUDA_CHECK(cudaMemcpy(arr, d_x, sizeof(int) * N, cudaMemcpyDeviceToHost));
for (int i = 0; i < N; i++)
std::cout << arr[i] << ", ";
CUDA_CHECK(cudaFree(d_x));
}
std::cout << std::endl << "Finish Test5." << std::endl;
}
int main(void) {
TestDataInCPU();
TestDataInGPU();
return 0;
}
|
11,057 | #include "includes.h"
__global__ void cuda_accumulate_occ(float * device_mapOcc, int numObjs, int numClusters, int clusterStart, int sub_numClusters, float *device_reduceOcc) {
int objIndex = blockDim.x * blockIdx.x + threadIdx.x;
extern __shared__ float shared_objects[];
if(objIndex < numObjs) {
for(int i = 0;i < sub_numClusters; i++)
// for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
shared_objects[threadIdx.x * sub_numClusters + i] = device_mapOcc[objIndex * numClusters + i + clusterStart];
}
else {
for(int i = 0;i < sub_numClusters; i++)
//for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
shared_objects[threadIdx.x * sub_numClusters + i] = 0;
}
__syncthreads();
for(int i = (blockDim.x >> 1); i >= 1; i>>=1) {
if(threadIdx.x < i) {
for(int j = 0;j < sub_numClusters; j++) {
//for(int j = clusterStart + sub_numClusters - 1;j >= clusterStart; j--)
shared_objects[threadIdx.x * sub_numClusters + j] += shared_objects[(threadIdx.x + i) * sub_numClusters + j];
}
}
__syncthreads();
}
if(threadIdx.x == 0) {
for(int i = 0;i < sub_numClusters;i++) {
// for(int i = clusterStart + sub_numClusters - 1;i >= clusterStart; i--)
device_reduceOcc[blockIdx.x * numClusters + i + clusterStart] = shared_objects[i];
}
}
} |
11,058 | #include <cstdio>
#include <cstring>
#include <getopt.h>
//#include <pybind11.h>
#define BLOCK_SIZE 64
//#define CELL_SIZE 4
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.stride + col)
typedef struct {
int width;
int height;
int stride;
float* elements;
} Matrix;
// Get a matrix element
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.elements[row * A.stride + col];
}
// Set a matrix element
__device__ void SetElement(Matrix A, int row, int col,
float value)
{
A.elements[row * A.stride + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix Asub of A that is
// located col sub-matrices to the right and row sub-matrices down
// from the upper-left corner of A
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.width = BLOCK_SIZE;
Asub.height = BLOCK_SIZE;
Asub.stride = A.stride;
Asub.elements = &A.elements[A.stride * BLOCK_SIZE * row
+ BLOCK_SIZE * col];
return Asub;
}
// Forward declaration of the matrix multiplication kernel
__global__ void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = d_A.stride = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = d_B.stride = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = d_C.stride = C.width;
d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE/4, BLOCK_SIZE/4);
dim3 dimGrid(B.width / BLOCK_SIZE, A.height / BLOCK_SIZE);
for (int i = 0; i < 10; i++)
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Block row and column
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
// Each thread block computes one sub-matrix Csub of C
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Each thread computes one element of Csuba
// by accumulating results into Cvalue
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
// Loop over all the sub-matrices of A and B that are
// required to compute Csub
// Multiply each pair of sub-matrices together
// and accumulate the results
float c[4][4];
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
c[i][j] = 0.0f;
for (int m = 0; m < (A.width / BLOCK_SIZE); ++m) {
// Get sub-matrix Asub of A
Matrix Asub = GetSubMatrix(A, blockRow, m);
// Get sub-matrix Bsub of B
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// Shared memory used to store Asub and Bsub respectively
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
// Load Asub and Bsub from device memory to shared memory
// Each thread loads one element of each sub-matrix
for (int i = 0; i < (4); i++)
for (int j = 0; j < (4); j++) {
As[row * 4 + i][col * 4 + j] = GetElement(Asub, row * 4 + i, col * 4 + j);
Bs[row * 4 + i][col * 4 + j] = GetElement(Bsub, row * 4 + i, col * 4 + j);
}
// Synchronize to make sure the sub-matrices are loaded
// before starting the computation
__syncthreads();
// Multiply Asub and Bsub together
// for (int e = 0; e < BLOCK_SIZE; ++e)
// Cvalue += As[row][e] * Bs[e][col];
float a[4][4];
float b[4][4];
for (int k = 0; k < (BLOCK_SIZE / 4); k++) {
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++) {
a[i][j] = As[(row) * 4 + i][(k) * 4 + j];
b[i][j] = Bs[(k) * 4 + i][(col) * 4 + j];
}
for (int i = 0; i < 4; i++) //row
for (int j = 0; j < 4; j++) //col
for (int l = 0; l < 4; l++)
c[i][j] += a[i][l] * b[l][j];
}
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write Csub to device memory
// Each thread writes one element
//SetElement(Csub, row, col, Cvalue);
for (int i = 0; i < 4; i++)
for (int j = 0; j < 4; j++)
SetElement(Csub, row*4 + i, col*4 + j, c[i][j]);
}
void __create_matrix(Matrix *m, int h, int w)
{
m->width = w;
m->height = h;
m->stride = w;
m->elements = static_cast<float *>(malloc(h * w * sizeof(float)));
for (int i = 0; i < h; i++) {
for (int j = 0; j < w; j++) {
if (i==j)
m->elements[i*w + j] = i;
}
}
}
int main(int argc, char *argv[])
{
Matrix A, B, C;
int matsz = 8192;
int opt;
int show_result = 0;
while (-1 != (opt = getopt(argc, argv, "ps:"))) {
switch (opt) {
case 'p':
show_result = 1;
break;
case 's':
matsz = atoi(optarg);
break;
default:
printf("-p show result\n-s matrix size\n");
break;
}
}
__create_matrix(&A, matsz, matsz);
__create_matrix(&B, matsz, matsz);
__create_matrix(&C, matsz, matsz);
MatMul(A, B, C);
if (!show_result)
return 0;
for (int i = 0; i < C.height; i++) {
for (int j = 0; j < C.width; j++)
printf("%.0f ", C.elements[i * C.stride + j]);
printf("\n");
}
return 0;
} |
11,059 |
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdint.h>
#include <stdio.h>
#include <string.h>
inline __device__ float sigmoidGPU(const float& x) { return 1.0f / (1.0f + __expf(-x)); }
__global__ void gpuYoloLayerV3(const float* input, float* output, const uint32_t grid_h_,
const uint32_t grid_w_, const uint32_t numOutputClasses,
const uint32_t numBBoxes)
{
uint32_t x_id = blockIdx.x * blockDim.x + threadIdx.x;
uint32_t y_id = blockIdx.y * blockDim.y + threadIdx.y;
uint32_t z_id = blockIdx.z * blockDim.z + threadIdx.z;
if ((x_id >= grid_w_) || (y_id >= grid_h_) || (z_id >= numBBoxes))
{
return;
}
const int numGridCells = grid_h_ * grid_w_;
const int bbindex = y_id * grid_w_ + x_id;
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 0)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 1)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 2)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]
= __expf(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 3)]);
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + 4)]);
for (uint32_t i = 0; i < numOutputClasses; ++i)
{
output[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]
= sigmoidGPU(input[bbindex + numGridCells * (z_id * (5 + numOutputClasses) + (5 + i))]);
}
}
cudaError_t cudaYoloLayerV3(const void* input, void* output, const uint32_t& batchSize,
const uint32_t& n_grid_h_,const uint32_t& n_grid_w_,
const uint32_t& numOutputClasses, const uint32_t& numBBoxes,
uint64_t outputSize, cudaStream_t stream)
{
dim3 threads_per_block(16, 16, 4);
dim3 number_of_blocks((n_grid_w_ / threads_per_block.x) + 1,
(n_grid_h_ / threads_per_block.y) + 1,
(numBBoxes / threads_per_block.z) + 1);
for (int batch = 0; batch < batchSize; ++batch)
{
gpuYoloLayerV3<<<number_of_blocks, threads_per_block, 0, stream>>>(
reinterpret_cast<const float*>(input) + (batch * outputSize),
reinterpret_cast<float*>(output) + (batch * outputSize), n_grid_h_, n_grid_w_, numOutputClasses,
numBBoxes);
}
return cudaGetLastError();
}
|
11,060 | #include "PeopleAllocation.cuh"
__device__
float dot(float* preference, float* feature) {
float ret = 0.0;
for (int i = 0; i < 9; ++i) {
ret += preference[i] * feature[i];
}
return ret;
}
__device__
void swap(float* v1, float* v2) {
float temp = *v1;
*v1 = *v2;
*v2 = temp;
}
__global__
void movePeopleGPUKernel(cuda_person* people, int numPeople) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int step = gridDim.x * blockDim.x;
for (int loop = 0; loop < 1; ++loop) {
for (int i = idx; i < numPeople; i += step) {
float max_increase = 0.0f;
int swap_id = -1;
for (int j = i + step; j < numPeople; j += step) {
if (people[i].type == people[j].type) continue;
float increase = dot(people[j].preference, people[i].feature) + dot(people[i].preference, people[j].feature) - people[i].score - people[j].score;
if (increase > max_increase) {
max_increase = increase;
swap_id = j;
}
}
if (swap_id >= 0) {
swap(&people[i].homeLocation_x, &people[swap_id].homeLocation_x);
swap(&people[i].homeLocation_y, &people[swap_id].homeLocation_y);
for (int k = 0; k < 9; ++k) {
swap(&people[i].feature[k], &people[swap_id].feature[k]);
}
people[i].score = dot(people[i].preference, people[i].feature);
people[swap_id].score = dot(people[swap_id].preference, people[swap_id].feature);
}
}
}
}
/**
* 人を動かす(CUDA版)
*/
void movePeopleGPUfunc(cuda_person* people, int numPeople) {
// デバイスメモリを確保
float* devResults;
cudaMalloc((void**)&devResults, sizeof(cuda_person) * numPeople);
// デバイスメモリへ、人のデータを転送
cudaMemcpy(devResults, people, sizeof(cuda_person) * numPeople, cudaMemcpyHostToDevice);
// GPU側の関数を呼び出す
movePeopleGPUKernel<<<PEOPLE_ALLOCATION_GRID_SIZE, PEOPLE_ALLOCATION_BLOCK_SIZE>>>((cuda_person*)devResults, numPeople);
// 結果をCPU側のバッファへ転送する
cudaMemcpy(people, devResults, sizeof(cuda_person) * numPeople, cudaMemcpyDeviceToHost);
// デバイスメモリを開放する
cudaFree(devResults);
}
|
11,061 | #include <stdio.h>
#include <string.h>
#include <time.h>
//define the GPU computed function
__global__ void saxpy_gpu(int N, float a, float b, float* x){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
x[i] = a*x[i] + b;
}
//define the CPU computed function
__host__ void saxpy_cpu(int N, float a, float b, float* x){
int i;
for(i=0;i<N;i++){
x[i] = a*x[i] + b;
}
}
int main(int argc, char *argv[]){
int i;
int N = atoi(argv[1]);
float a = atof(argv[2]);
float b = atof(argv[3]);
int numBytes = N*sizeof(float);
// defining the cpu and the GPU objects
float* x_cpu = (float *)malloc(numBytes);
float* x_GPU;
// init the array a_cpu
for(i=0;i<N;i++){
x_cpu[i] = i;
}
// Get current time
clock_t begin = clock();
clock_t end;
// Start the computing
if(strcmp(argv[4], "GPU")==0){
// Memory allocation for the array on the GPU
cudaMalloc((void**)&x_GPU, numBytes);
cudaMemcpy(x_GPU, x_cpu, numBytes, cudaMemcpyHostToDevice);
const int nThreadsPerBlocks = (argc==6)? atoi(argv[5]): 512;
const int nBlocks = (N / nThreadsPerBlocks) + ( (N % nThreadsPerBlocks) == 0 ? 0 : 1);
saxpy_gpu<<<nBlocks, nThreadsPerBlocks>>>(N, a, b, x_GPU);
cudaMemcpy(x_cpu, x_GPU, numBytes, cudaMemcpyDeviceToHost);
end = clock();
}else if(strcmp(argv[4], "CPU")==0){
saxpy_cpu(N, a, b, x_cpu);
end = clock();
}else{
printf("Please check your compute mode");
return EXIT_FAILURE;
}
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("Elapsed: %f seconds\n", time_spent);
for(i = 0; i < 10; i++)
printf("results %d : %f \n", i, x_cpu[i]);
for(i = 10; i > 0; i--)
printf("results %d : %f \n", N-i, x_cpu[N-i]);
return 0;
}
|
11,062 | #include <stdio.h>
#define SIZE_TEXT (sizeof(text)-1)
#define SIZE_END (sizeof(end)-1)
__device__ char text[] =
"__ bottles of beer on the wall, __ bottles of beer!\n"
"Take one down, and pass it around, ## bottles of beer on the wall!\n\n";
__device__ char end[] =
"01 bottle of beer on the wall, 01 bottle of beer.\n"
"Take one down and pass it around, no more bottles of beer on the wall.\n"
"\n"
"No more bottles of beer on the wall, no more bottles of beer.\n"
"Go to the store and buy some more, 99 bottles of beer on the wall.";
__global__
void bottle99(char *addr){
int x = threadIdx.x;
addr += x * SIZE_TEXT;
int bottle = 99 - x;
if (bottle == 1) {
for (int i=0; i<SIZE_END; i++) {
addr[i] = end[i];
}
addr[SIZE_END] = '\0';
} else {
char c1 = (bottle/10) + '0';
char c2 = (bottle%10) + '0';
char d1 = ((bottle-1)/10) + '0';
char d2 = ((bottle-1)%10) + '0';
// replace '__' by the current bottle number, and replace '##' by the next bottle number
for (int i=0; i<SIZE_TEXT; i++) {
int c = text[i];
if (c == '_') {
addr[i] = c1;
addr[i+1] = c2;
i++;
} else if (c == '#') {
addr[i] = d1;
addr[i+1] = d2;
i++;
} else {
addr[i] = text[i];
}
}
}
}
int main()
{
char *buffer;
char *d_buffer;
int size = SIZE_TEXT * 98 + SIZE_END + 1;
// CPU側でメモリを確保する
buffer = new char[size];
// GPU側でメモリを確保する
cudaMalloc((void**)&d_buffer, size);
// GPU側の関数を呼び出す。()内が、そのまま関数の引数となる
bottle99<<<1, 99>>>(d_buffer);
// 指定したsize分、GPUのd_bufferから、CPUのbufferへ、データを転送する
cudaMemcpy(buffer, d_buffer, size, cudaMemcpyDeviceToHost);
// GPU側で確保したメモリを開放する
cudaFree(d_buffer);
// 結果を表示する
puts(buffer);
// CPU側で確保したメモリを開放する
free(buffer);
cudaDeviceReset();
}
|
11,063 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
// KERNEL
__global__ void Vadd(float* A, float* B, float* C, int N)
{
/* int tx = threadIdx.x;
int i = 4*(blockIdx.x * blockDim.x) + tx;
int count = 0;
while (i < N && count < 4)
{
C[i] = A[i] + B[i];
i += blockDim.x;
count ++;
}
*/
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
// HOST
int main(int argc, char** argv)
{
// Variables
float *h_A, *h_B, *h_C, *d_A, *d_B, *d_C;
int N = 5000000; // Number of floats per vector
size_t size = N * sizeof(float);
// Allocate vectors in host memory and device memory
h_A = (float*)malloc(size);
h_B = (float*)malloc(size);
h_C = (float*)malloc(size);
cudaMalloc((void**)&d_A, size);
cudaMalloc((void**)&d_B, size);
cudaMalloc((void**)&d_C, size);
// Initialize input vectors
for (int i = 0; i < N; ++i){
h_A[i] = rand() / (float)RAND_MAX;
h_B[i] = rand() / (float)RAND_MAX;
}
// Copy vectors from host memory to device memory
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
int threadsPerBlock = 256;
// FILL IN KERNEL SETUP AND INVOCATION
// int blocks = N / (4*threadsPerBlock);
// if (N % (4*threadsPerBlock) != 0) blocks++;
int blocks = N / threadsPerBlock;
if (N % threadsPerBlock != 0) blocks++;
Vadd <<< blocks, threadsPerBlock >>> (d_A, d_B, d_C, N);
cudaDeviceSynchronize();
// Copy result from device memory to host memory
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
bool correct = true;
// Calculate solution on the host and compare
for (int i = 0; i < N; i++)
{
if (h_C[i] != (h_A[i] + h_B[i]))
{
printf("ERROR: expected h_C[%i] = %f but received %f\n", i, h_A[i] + h_B[i], h_C[i]);
correct = false;
break;
}
}
if (correct) printf("---PASSED---\n");
// Free host and device memory
cudaFree(d_A); cudaFree(d_B); cudaFree(d_C);
free(h_A); free(h_B); free(h_C);
}
|
11,064 | #include "includes.h"
__global__ void convolution(float* input, int inputRows, int inputCols, int inputLd, float* kernel, int kernelRows, int kernelCols, int kernelLd, int rowStep, int colStep, float* output, int outputLd) {
int row = (blockIdx.y * blockDim.y + threadIdx.y) * rowStep;
int col = (blockIdx.x * blockDim.x + threadIdx.x) * colStep;
if (row <= inputRows - kernelRows && col <= inputCols - kernelCols) {
int i, j;
output[row+col*outputLd] = 0;
for (i=0; i<kernelRows; i++) {
for (j=0; j<kernelCols; j++) {
output[row+col*outputLd] += kernel[i+j*kernelLd] * input[(row+i)+(col+j)*inputLd];
}
}
}
} |
11,065 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <ctime>
#include <cmath>
#define N (1024*1024)
__global__ void kernel(float *data)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = 2.0f * 3.1415926f * (float) idx / (float) N;
data [idx] = tanf (x);
}
int main (int argc, char *argv[])
{
//-------------------- GPU PART --------------------
float *a = new float [N];
float *dev = NULL;
cudaEvent_t start, stop;
float gpuTime = 0.0f;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&dev, N * sizeof(float));
kernel<<<dim3((N/512),1), dim3(512,1)>>> (dev);
cudaMemcpy(a, dev, N * sizeof(float), cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpuTime, start, stop);
printf("\n==================== GPU TIME ====================\n");
printf("\nGPU compute time: %.2f milliseconds\n\n", gpuTime);
cudaFree(dev);
//-------------------- CPU PART --------------------
int start2, time2;
float *data2 = new float [N];
start2 = clock();
for (int idx2 = 0; idx2 < N; idx2++)
{
float x2 = 2.0f * 3.1415926f * (float) idx2 / (float) N;
data2 [idx2] = sinf (x2);
//printf("a[%d] = %.5f\n", idx2, data2[idx2]);
}
time2 = clock() - start2;
double time_CPU = time2;
printf("==================== CPU TIME ====================\n");
printf("\nCPU compute time: %f milliseconds\n\n", time_CPU);
//-------------------- PRINT VALUES PART --------------------
printf("===================== Values =====================\n");
int idx = 0;
printf("\nValue at point (zero):\na[%d] = %.5f\n", idx, a[idx]);
idx = N/12;
printf("\nValue at point (Pi/6):\na[%d] = %.5f\n", idx, a[idx]);
idx = N/8;
printf("\nValue at point (Pi/4):\na[%d] = %.5f\n", idx, a[idx]);
idx = N/6;
printf("\nValue at point (Pi/3):\na[%d] = %.5f\n", idx, a[idx]);
idx = N/4;
printf("\nValue at point (Pi/2):\na[%d] = %.5f\n", idx, a[idx]);
printf("\n=======================================================\n");
return 0;
} |
11,066 | #include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
#define Tile_size 2
int numARows, numAColumns;
int numBRows, numBColumns;
int numCRows, numCColumns;
__global__ void matrixMultiplyShared(float *A, float *B, float *C,
int numARows, int numAColumns,
int numBRows, int numBColumns,
int numCRows, int numCColumns) {
__shared__ float sA[Tile_size][Tile_size]; // Tile size to store elements in shared memory
__shared__ float sB[Tile_size][Tile_size];
int row = blockDim.y * blockIdx.y + threadIdx.y;
int col = blockDim.x * blockIdx.x + threadIdx.x;
float cvalue = 0.0;
sA[threadIdx.y][threadIdx.x] = 0.0;
sB[threadIdx.y][threadIdx.x] = 0.0;
for (int k = 0; k < (((numAColumns - 1) / Tile_size) + 1); k++) {
//Copy Data to Tile from Matrix (Global Memory to Shared Memory)
if ((row < numARows) && (threadIdx.x + (k * Tile_size)) < numAColumns) {
sA[threadIdx.y][threadIdx.x] = A[(row * numAColumns) + threadIdx.x + (k * Tile_size)];
} else {
sA[threadIdx.y][threadIdx.x] = 0.0;
}
//Copy Data to Tile from Matrix (Global Memory to Shared Memory)
if (col < numBColumns && (threadIdx.y + k * Tile_size) < numBRows) {
sB[threadIdx.y][threadIdx.x] = B[(threadIdx.y + k * Tile_size) * numBColumns + col];
} else {
sB[threadIdx.y][threadIdx.x] = 0.0;
}
__syncthreads();
for (int j = 0; j < Tile_size; ++j)//Multiplying Elements present in tile
{
cvalue += sA[threadIdx.y][j] * sB[j][threadIdx.x];
}
}
if (row < numCRows && col < numCColumns)//Saving Final result into Matrix C
{
C[row * numCColumns + col] = cvalue;
}
}
void printMat(int row, int col, float *Mat) {
for (int i = 0; i < row * col; i++) {
printf("%f ", *(Mat + i));
if ((i % col) == 0 && i != 0) {
printf("\n");
}
}
}
void matMultiplyOnHost(float *A, float *B, float *C, int numARows,
int numAColumns, int numBRows, int numBColumns,
int numCRows, int numCColumns) {
for (int i = 0; i < numARows; i++) {
for (int j = 0; j < numAColumns; j++) {
C[i * numCColumns + j] = 0.0;
for (int k = 0; k < numCColumns; k++) {
C[i * numCColumns + j] += A[i * numAColumns + k] * B[k * numBColumns + j];
}
}
}
}
int main(int argc, char **argv) {
float *hostA, *hostB, *hostC;
float *hostComputedC;
float *deviceA, *deviceB, *deviceC;
printf("\nEnter Rows and Columns of A:");
scanf("%d %d", &numARows, &numAColumns);
printf("\nEnter Rows and Columns of B:");
scanf("%d %d", &numBRows, &numBColumns);
hostA = (float *) malloc(sizeof(float) * numARows * numAColumns);
hostB = (float *) malloc(sizeof(float) * numBRows * numBColumns);
int lower = 10.0, upper = 20.0;
for (int i = 0; i < numARows * numAColumns; i++)
hostA[i] = (rand() % (upper - lower + 1)) + lower;
for (int i = 0; i < numBRows * numBColumns; i++)
hostB[i] = (rand() % (upper - lower + 1)) + lower;
printf("\nMatrix A Values:\n");
printMat(numARows, numAColumns, hostA);
printf("\n\nMatrix B Values:\n");
printMat(numBRows, numBColumns, hostB);
numCRows = numARows;
numCColumns = numBColumns;
hostC = (float *) malloc(sizeof(float) * numCRows * numCColumns);
hostComputedC = (float *) malloc(sizeof(float) * numCRows * numCColumns);
// Allocating GPU memory
cudaMalloc((void **) &deviceA, sizeof(float) * numARows * numAColumns);
cudaMalloc((void **) &deviceB, sizeof(float) * numBRows * numBColumns);
cudaMalloc((void **) &deviceC, sizeof(float) * numCRows * numCColumns);
// Copy memory to the GPU
cudaMemcpy(deviceA, hostA, sizeof(float) * numARows * numAColumns, cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, hostB, sizeof(float) * numBRows * numBColumns, cudaMemcpyHostToDevice);
// Initialize the grid and block dimensions
dim3 dimGrid((numCColumns / Tile_size) + 1, (numCRows / Tile_size) + 1, 1);//Number of Blocks required
dim3 dimBlock(Tile_size, Tile_size, 1);//Number of threads in each block
matrixMultiplyShared<<<dimGrid, dimBlock>>>(deviceA, deviceB, deviceC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
cudaError_t err1 = cudaPeekAtLastError();//To capture last error in function call
cudaDeviceSynchronize();//To synchronize the device
// Copy the results in GPU memory back to the CPU
cudaMemcpy(hostC, deviceC, sizeof(float) * numCRows * numCColumns, cudaMemcpyDeviceToHost);
printf("\nMatrix C From Device\n");
printMat(numCRows, numCColumns, hostC);//Function Call
matMultiplyOnHost(hostA, hostB, hostComputedC, numARows, numAColumns, numBRows, numBColumns, numCRows, numCColumns);
printf("\nMatrix C From Host\n");
printMat(numCRows, numCColumns, hostComputedC);
printf("\n\n");
for (int i = 0; i < numCColumns *
numCRows; i++)//Compare both the result matrices 1. MatrixMultiplyonHost 2. MatrixMultiplyonDevice
{
if (hostComputedC[i] != hostC[i]) {
printf("Mismatch at Row = %d Col = %d hostComputed[] = %f --device[] %f\n", i / numCColumns,
i % numCColumns, hostComputedC[i], hostC[i]);
break;
}
}
printf("\nNumber of Blocks Created:%d \n", ((numCColumns / Tile_size) + 1) * ((numCColumns / Tile_size) + 1));
printf("\nNumber of Threads Per Block: %d \n", (Tile_size * Tile_size));
// Free the GPU memory
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
//Free the Pointer Memory
free(hostA);
free(hostB);
free(hostC);
free(hostComputedC);
return 0;
}
|
11,067 | #include <cuda.h>
#include <stdio.h>
#include <string.h>
__global__ void CountSort(int*, int*, int, int);
__host__ void counting_sort(int* arr, int size, int max_val)
{
int block_num = 1000;
int thread_num_per_block = 1000;
uint64_t histo_size = sizeof(int)*max_val;
printf("size: %d\n", size);
printf("max_val: %d\n", max_val);
printf("block_num: %d\n", block_num);
printf("thread_per_block: %d\n", thread_num_per_block);
int* darr;
cudaMalloc(&darr, sizeof(int)*size);
cudaMemcpy(darr, arr, sizeof(int)*size, cudaMemcpyHostToDevice);
int* dhisto;
cudaMalloc(&dhisto, histo_size);
cudaMemset(dhisto, 0, histo_size);
printf("countsort start\n");
CountSort<<<block_num, thread_num_per_block>>>(darr, dhisto, size, max_val);
printf("countsort end\n");
//int* histo = (int*)calloc(max_val, sizeof(int));
//cudaMemcpy(histo, dhisto, sizeof(int)*max_val, cudaMemcpyDeviceToHost);
cudaMemcpy(arr, darr, sizeof(int)*size, cudaMemcpyDeviceToHost);
/*
int cnt = 0;
for(int i=0; i<max_val; i++) {
cnt += histo[i];
}
printf("cnt: %d\n", cnt);
*/
/*
int idx = 0;
for(int i=0; i<max_val; i++) {
for(int j=0; j<histo[i]; j++) {
arr[idx++] = i;
}
}
*/
//cudaFree(dhisto);
//cudaFree(darr);
//free(histo);
printf("return to main\n");
}
__global__ void CountSort(int* darr, int* dhisto, int size, int max_val) {
uint64_t thread_per_block = blockDim.x;
uint64_t total_block = gridDim.x;
uint64_t bid = blockIdx.x;
uint64_t tid = threadIdx.x;
uint64_t size_per_block, bstart, size_per_thread, start, end;
// update histogram in each block
if(size % total_block != 0 && bid == total_block - 1) {
size_per_block = size / total_block + size % total_block;
bstart = bid * (size / total_block);
size_per_thread = size_per_block / thread_per_block;
start = bstart + tid * size_per_thread;
end = start + size_per_thread;
if(size_per_block % thread_per_block != 0 &&
tid == thread_per_block - 1) {
end += size_per_block % thread_per_block;
}
}
else {
size_per_block = size / total_block;
bstart = bid * size_per_block;
size_per_thread = size_per_block / thread_per_block;
start = bstart + tid * size_per_thread;
end = start + size_per_thread;
if(size_per_block % thread_per_block != 0 && tid == thread_per_block - 1) {
end += size_per_block % thread_per_block;
}
}
for(uint64_t i=start; i<end; i++) {
atomicAdd(&dhisto[darr[i]], 1);
}
__syncthreads();
if(bid == 0 && tid == 0) {
/*
for(int i=0; i<max_val; i++) {
printf("histo[%d]: %d\n", i, dhisto[i]);
}
*/
for(int i=1; i<max_val; i++) {
dhisto[i] += dhisto[i-1];
}
/*
for(int i=0; i<max_val; i++) {
printf("histo[%d]: %d\n", i, dhisto[i]);
}
*/
}
__syncthreads();
//printf("thread_per_block: %d\n", thread_per_block);
size_per_block = max_val / total_block;
bstart = bid * size_per_block;
size_per_thread = size_per_block / thread_per_block;
start = bstart + tid * size_per_thread;
end = start + size_per_thread;
//printf("size_per_block: %d, thread_per_block: %d, bstart: %d, size_per_thread: %d\n", size_per_block, thread_per_block, bstart, size_per_thread);
//printf("bid: %d, tid: %d => start: %d, end: %d\n", tid, bid, start, end);
for(uint64_t i=start; i<end; i++) {
darr[i] = i;
}
__syncthreads();
}
|
11,068 | #include<stdio.h>
#include<cuda.h>
#include<math.h>
#include<sys/time.h>
__global__
void Matadd(double* A, double* B, double* C, int N)
{
int i = blockIdx.x* blockDim.x +threadIdx.x;
if(i<N)
C[i] = A[i] + B[i];
__syncthreads();
}
int main()
{
for(int j=10;j<=20;j++)
{
struct timeval start1,end1,start2,end2,start3,end3,start4,end4;
float time1, time2,time3,time4;
int i;
int N = pow(2,j);
size_t size = N * sizeof(double);
//allocate input matrices hA, hB, hC,refC in host memory
double* hA = (double*)malloc(size);
double* hB = (double*)malloc(size);
double* hC = (double*)malloc(size);
double* refC = (double*)malloc(size);
for(i=0;i<N;i++)
{
hA[i] = rand()%20-10;
hB[i] = rand()%20-10;
refC[i] = hA[i] + hB[i];
}
//allocate memory on the device (GPU)
double* dA;
cudaMalloc(&dA,size);
double* dB;
cudaMalloc(&dB,size);
double* dC;
cudaMalloc(&dC,size);
gettimeofday(&start1, NULL);
//copy vectors from host memory to devie memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
gettimeofday(&start2, NULL);
//invoke GPU kernel, with two blocks each having eight threads
int threadsperblock = 32;
int blockspergrid = (N + threadsperblock - 1)/ threadsperblock;
Matadd<<<blockspergrid,threadsperblock>>>(dA,dB,dC,N);
gettimeofday(&end1, NULL);
//bring the result back from the device memory into the host array
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
gettimeofday(&end2, NULL);
time1 = ((end1.tv_sec*1000000 + end1.tv_usec) - (start1.tv_sec*1000000 + start1.tv_usec));
time2 = ((end2.tv_sec*1000000 + end2.tv_usec) - (start2.tv_sec*1000000 + start2.tv_usec));
printf("\n The inclusive time for 32 threads in microseconds for 2 to power %d is %f and exclusive time is %f respectively \n",j,time1,time2);
gettimeofday(&start3, NULL);
//copy vectors from host memory to devie memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
gettimeofday(&start4, NULL);
//invoke GPU kernel, with two blocks each having eight threads
threadsperblock = 1024;
blockspergrid = (N + threadsperblock - 1)/ threadsperblock;
Matadd<<<blockspergrid,threadsperblock>>>(dA,dB,dC,N);
gettimeofday(&end3, NULL);
//bring the result back from the device memory into the host array
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
gettimeofday(&end4, NULL);
time3 = ((end3.tv_sec*1000000 + end3.tv_usec) - (start3.tv_sec*1000000 + start3.tv_usec));
time4 = ((end4.tv_sec*1000000 + end4.tv_usec) - (start4.tv_sec*1000000 + start4.tv_usec));
/*printing hte result
printf("Values stored in hostarray: ");
for(i=0;i<N;i++)
{
printf("\t %f + %f = %f\n",hA[i], hB[i], hC[i]);
}
printf("Values stored in refernece array: ");
for(i=0;i<N;i++)
{
printf("\t %f \n",refC[i]);
} */
//printing the inclusive and exclusive time in microseconds
printf("The inclusive time for 1024 threads for 2 to power %d in microseconds is %f and exclusive time is %f respectively \n",j,time3,time4);
//release the memory allocated on the GPU
free(hA);
free(hB);
free(hC);
free(refC);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
}
return 0;
}
|
11,069 | #include "includes.h"
enum ComputeMode { ADD, SUB, MUL, DIV };
cudaError_t computeWithCuda(int *c, const int *a, const int *b, unsigned int size, ComputeMode mode);
__global__ void compareWithOneKernel(float* b, const double* a)
{
int i = threadIdx.x;
if(a[i] == 1)
b[i] = b[i] + 1;
} |
11,070 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
int calculate_no_threads(int array_size){
if(array_size<256){
return array_size;
} else {
return 1024;
}
}
void print_results(double *ARRAY, int array_size){
printf("[\n");
for(int i = 0; i < array_size; i++){
printf("{");
for(int j = 0; j < array_size; j++){
printf("%1.1lf,",ARRAY[(i * array_size) +j]);
}
printf("}\n");
}
printf("]");
printf("\n");
}
__global__ void vector_dot_product(double *CUDA_A, double *CUDA_B, double *CUDA_C,double *CUDA_T,int array_size,int no_threads) {
int tid = threadIdx.x;
int bid = blockIdx.x;
int row_count = array_size;
int col_count = array_size;
int NumberThreads = no_threads;
int batch = array_size/NumberThreads;
int Remaninder = array_size%NumberThreads;
int StartRow;
int EndRow;
StartRow = batch * tid; //For testing replace tid with 0..n batch
if (StartRow == 0){
EndRow = StartRow + batch + Remaninder;
} else {
StartRow = StartRow + Remaninder;
EndRow = StartRow + batch;
}
int StarTingPoint = array_size*StartRow;
int increment = 0;
float product = 0;
for (int row = StartRow; row < EndRow; row++){
for(int column = 0; column < array_size; column++){
for (int cell = 0; cell < array_size; cell++){
product = product + CUDA_A[row * col_count + cell] * CUDA_B[ cell * row_count + column];
}
CUDA_T[(StarTingPoint)+increment++] = product;
product = 0;
}
}
__syncthreads();
}
int main(){
//int array_size = 7900;
int array_size = 3000;
double *C, *A, *B, *T;
double *CUDA_A, *CUDA_B, *CUDA_C, *CUDA_T;
A = (double *)malloc(array_size * array_size * sizeof(double));
B = (double *)malloc(array_size * array_size * sizeof(double));
T = (double *)malloc((array_size*array_size) * sizeof(double));
C = (double *)malloc(array_size * array_size * sizeof(double) );
double a = 0.5;
for(int i = 0; i<(array_size * array_size); i++){
A[i] = ((double)rand()/(double)(RAND_MAX)) * a;
B[i] = ((double)rand()/(double)(RAND_MAX)) * a;
}
// Allocate device memory
cudaMalloc((void**)&CUDA_A, sizeof(double) * array_size * array_size);
cudaMalloc((void**)&CUDA_B, sizeof(double) * array_size * array_size);
cudaMalloc((void**)&CUDA_C, sizeof(double) * array_size * array_size);
cudaMalloc((void**)&CUDA_T, sizeof(double) * (array_size*array_size));
// Transfer data from host to device memory
cudaMemcpy(CUDA_A, A, sizeof(double) * array_size * array_size, cudaMemcpyHostToDevice);
cudaMemcpy(CUDA_B, B, sizeof(double) * array_size * array_size, cudaMemcpyHostToDevice);
cudaMemcpy(CUDA_T, T, sizeof(double) * array_size * array_size, cudaMemcpyHostToDevice);
printf("calculate_no_threads %d\n",calculate_no_threads(array_size));
vector_dot_product<<<1,calculate_no_threads(array_size)>>>(CUDA_A, CUDA_B, CUDA_C, CUDA_T,array_size,calculate_no_threads(array_size));
cudaMemcpy(C, CUDA_C, sizeof(double) * array_size * array_size, cudaMemcpyDeviceToHost);
cudaMemcpy(T, CUDA_T, sizeof(double) * (array_size*array_size), cudaMemcpyDeviceToHost);
puts("DOT_PRODUCT");
print_results(A,array_size);
print_results(B,array_size);
puts("MATRIX MULTI");
print_results(T,array_size);
// Deallocate device memory
cudaFree(CUDA_A);
cudaFree(CUDA_B);
cudaFree(CUDA_C);
cudaFree(CUDA_T);
free(C);
free(A);
free(B);
free(T);
// Deallocate host memory
}
|
11,071 | #include "includes.h"
__global__ void zeroFillingKernel(float* idata, int row, int length, int height)
{
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
int tidy = blockIdx.y * blockDim.y;
if(tidx < length && tidy < height)
{
//printf("idata[%d][%d]: = %f\n", (row+tidy), tidx,idata[tidx + (row+tidy) *length]);
idata[tidx + (row+tidy) *length] = 0;
idata[tidx + (row-tidy) *length] = 0;
//printf("idata[%d][%d]: = %f\n", (row+tidy), tidx,idata[tidx + (row+tidy) *length]);
}
} |
11,072 | // Program for Matrix Addition in CUDA
// For Hadoop-CUDA Lab
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
__global__ void gpu_matrixadd(int *a,int *b, int *c, int N) {
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
int index = row * N + col;
if(col < N && row < N)
c[index] = a[index]+b[index];
}
void cpu_matrixadd(int *a,int *b, int *c, int N) {
int index;
for(int col=0;col < N; col++)
for(int row=0;row < N; row++) {
index = row * N + col;
c[index] = a[index]+b[index];
}
}
int main(int argc, char *argv[]) {
char key;
int i, j; // loop counters
int Grid_Dim_x=1, Grid_Dim_y=1; //Grid structure values
int Block_Dim_x=1, Block_Dim_y=1; //Block structure values
int noThreads_x, noThreads_y; // number of threads available in device, each dimension
int noThreads_block; // number of threads in a block
int N = 10; // size of array in each dimension
int *a,*b,*c,*d;
int *dev_a, *dev_b, *dev_c;
int size; // number of bytes in arrays
cudaEvent_t start, stop; // using cuda events to measure time
float elapsed_time_ms; // which is applicable for asynchronous code also
/* --------------------ENTER INPUT PARAMETERS AND DATA -----------------------*/
do { // loop to repeat complete program
printf ("Device characteristics -- some limitations (compute capability 1.0)\n");
printf (" Maximum number of threads per block = 512\n");
printf (" Maximum sizes of x- and y- dimension of thread block = 512\n");
printf (" Maximum size of each dimension of grid of thread blocks = 65535\n");
printf("Enter size of array in one dimension (square array), currently %d\n",N);
scanf("%d",&N);
do {
printf("\nEnter nuumber of blocks per grid in x dimension), currently %d : ",Grid_Dim_x);
scanf("%d",&Grid_Dim_x);
printf("\nEnter nuumber of blocks per grid in y dimension), currently %d : ",Grid_Dim_y);
scanf("%d",&Grid_Dim_y);
printf("\nEnter nuumber of threads per block in x dimension), currently %d : ",Block_Dim_x);
scanf("%d",&Block_Dim_x);
printf("\nEnter nuumber of threads per block in y dimension), currently %d : ",Block_Dim_y);
scanf("%d",&Block_Dim_y);
noThreads_x = Grid_Dim_x * Block_Dim_x; // number of threads in x dimension
noThreads_y = Grid_Dim_y * Block_Dim_y; // number of threads in y dimension
noThreads_block = Block_Dim_x * Block_Dim_y; // number of threads in a block
if (noThreads_x < N) printf("Error -- number of threads in x dimension less than number of elements in arrays, try again\n");
else if (noThreads_y < N) printf("Error -- number of threads in y dimension less than number of elements in arrays, try again\n");
else if (noThreads_block > 512) printf("Error -- too many threads in block, try again\n");
else printf("Number of threads not used = %d\n", noThreads_x * noThreads_y - N * N);
} while (noThreads_x < N || noThreads_y < N || noThreads_block > 512);
dim3 Grid(Grid_Dim_x, Grid_Dim_x); //Grid structure
dim3 Block(Block_Dim_x,Block_Dim_y); //Block structure, threads/block limited by specific device
size = N * N * sizeof(int); // number of bytes in total in arrays
a = (int*) malloc(size); //this time use dynamically allocated memory for arrays on host
b = (int*) malloc(size);
c = (int*) malloc(size); // results from GPU
d = (int*) malloc(size); // results from CPU
for(i=0;i < N;i++) // load arrays with some numbers
for(j=0;j < N;j++) {
a[i * N + j] = i;
b[i * N + j] = i;
}
/* ------------- COMPUTATION DONE ON GPU ----------------------------*/
cudaMalloc((void**)&dev_a, size); // allocate memory on device
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, size);
cudaMemcpy(dev_a, a , size ,cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b , size ,cudaMemcpyHostToDevice);
cudaMemcpy(dev_c, c , size ,cudaMemcpyHostToDevice);
cudaEventCreate(&start); // instrument code to measure start time
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// cudaEventSynchronize(start); // Needed?
gpu_matrixadd<<<Grid,Block>>>(dev_a,dev_b,dev_c,N);
cudaMemcpy(c,dev_c, size ,cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0); // instrument code to measue end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop );
// for(i=0;i < N;i++)
// for(j=0;j < N;j++)
// printf("%d+%d=%d\n",a[i * N + j],b[i * N + j],c[i * N + j]);
printf("Time to calculate results on GPU: %f ms.\n", elapsed_time_ms); // print out execution time
/* ------------- COMPUTATION DONE ON HOST CPU ----------------------------*/
cudaEventRecord(start, 0); // use same timing
// cudaEventSynchronize(start); // Needed?
cpu_matrixadd(a,b,d,N); // do calculation on host
cudaEventRecord(stop, 0); // instrument code to measue end time
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time_ms, start, stop );
printf("Time to calculate results on CPU: %f ms.\n", elapsed_time_ms); // print out execution time
/* ------------------- check device creates correct results -----------------*/
for(i=0;i < N*N;i++) {
if (c[i] != d[i]) printf("*********** ERROR in results, CPU and GPU create different answers ********\n");
break;
}
printf("\nEnter c to repeat, return to terminate\n");
scanf("%c",&key);
scanf("%c",&key);
} while (key == 'c'); // loop of complete program
/* -------------- clean up ---------------------------------------*/
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
11,073 | #include <stdlib.h>
#include <stdio.h>
#include <sys/time.h>
#define MAX_CHAINE 100
#define CALLOC(ptr, nr, type) if (!(ptr = (type *) calloc((size_t)(nr), sizeof(type)))) { \
printf("Erreur lors de l'allocation memoire \n") ; \
exit (-1); \
}
#define FOPEN(fich,fichier,sens) if ((fich=fopen(fichier,sens)) == NULL) { \
printf("Probleme d'ouverture du fichier %s\n",fichier); \
exit(-1); \
}
#define MIN(a, b) (a < b ? a : b)
#define MAX(a, b) (a > b ? a : b)
#define MAX_VALEUR 255
#define MIN_VALEUR 0
#define NBPOINTSPARLIGNES 15
#define MAX_DIM_GRID 65535
#define MAX_DIM_BLOCK 1024
#define false 0
#define true 1
#define boolean int
// Clock
#define initTimer struct timeval tv1, tv2; struct timezone tz
#define startTimer gettimeofday(&tv1, &tz)
#define stopTimer gettimeofday(&tv2, &tz)
#define tpsCalcul ((tv2.tv_sec-tv1.tv_sec)*1000000L + (tv2.tv_usec-tv1.tv_usec))
/* KERNEL CUDA */
__global__ void add_vec_scalaire_gpu(int *image, int *res, long N, int le_min, float etalement) {
long i = (long)blockIdx.x * (long)blockDim.x + (long)threadIdx.x;
if (i < N) {
res[i] = ((image[i] - le_min) * etalement);
}
}
int main(int argc, char *argv[]) {
/*========================================================================*/
/* Declaration de variables et allocation memoire */
/*========================================================================*/
if (argc < 2) {
printf("Usage: ./CodeSequentiel <path_to_image> [<blocksize>]\n");
exit(0);
}
long blocksize = 1; // default value
if (argc == 3) {
blocksize = atoi(argv[2]);
}
int i, n;
int LE_MIN = MAX_VALEUR;
int LE_MAX = MIN_VALEUR;
float ETALEMENT = 0.0;
int* image;
int* resultat;
int X, Y, x, y;
int TailleImage;
int P;
FILE* Src, * Dst;
char SrcFile[MAX_CHAINE];
char DstFile[MAX_CHAINE];
char ligne[MAX_CHAINE];
boolean inverse = false;
char *Chemin;
initTimer; //
/*========================================================================*/
/* Recuperation des parametres */
/*========================================================================*/
sscanf(argv[1], "%s", SrcFile);
sprintf(DstFile, "%s.new", SrcFile);
/*========================================================================*/
/* Recuperation de l'endroit ou l'on travail */
/*========================================================================*/
CALLOC(Chemin, MAX_CHAINE, char);
Chemin = getenv("PWD");
printf("Repertoire de travail : %s \n\n", Chemin);
/*========================================================================*/
/* Ouverture des fichiers */
/*========================================================================*/
printf("Operations sur les fichiers\n");
FOPEN(Src, SrcFile, "r");
printf("\t Fichier source ouvert (%s) \n", SrcFile);
FOPEN(Dst, DstFile, "w");
printf("\t Fichier destination ouvert (%s) \n", DstFile);
/*========================================================================*/
/* On effectue la lecture du fichier source */
/*========================================================================*/
printf("\t Lecture entete du fichier source ");
for (i = 0; i < 2; i++) {
fgets(ligne, MAX_CHAINE, Src);
fprintf(Dst, "%s", ligne);
}
fscanf(Src, " %d %d\n", &X, &Y);
fprintf(Dst, " %d %d\n", X, Y);
fgets(ligne, MAX_CHAINE, Src); /* Lecture du 255 */
fprintf(Dst, "%s", ligne);
printf(": OK \n");
/*========================================================================*/
/* Allocation memoire pour l'image source et l'image resultat */
/*========================================================================*/
TailleImage = X * Y;
CALLOC(image, TailleImage, int);
CALLOC(resultat, TailleImage, int);
for (i = 0;i < TailleImage;i++) {
image[i] = 0;
resultat[i] = 0;
}
x = 0;
y = 0;
printf("\t\t Initialisation de l'image [%d ; %d] : Ok \n", X, Y);
/*========================================================================*/
/* Lecture du fichier pour remplir l'image source */
/*========================================================================*/
while (!feof(Src)) {
n = fscanf(Src, "%d", &P);
image[y+x] = P;
LE_MIN = MIN(LE_MIN, P);
LE_MAX = MAX(LE_MAX, P);
x++;
if (n == EOF || (x == X && y == Y - 1)) {
break;
}
if (x == X) {
x = 0;
y++;
}
}
fclose(Src);
printf("\t Lecture du fichier image : Ok \n\n");
/*========================================================================*/
/* Calcul du facteur d'etalement */
/*========================================================================*/
if (inverse) {
ETALEMENT = 0.2;
}
else {
ETALEMENT = (float)(MAX_VALEUR - MIN_VALEUR) / (float)(LE_MAX - LE_MIN);
}
/*========================================================================*/
/* Calcul de chaque nouvelle valeur de pixel */
/*========================================================================*/
int tailleVecteur = TailleImage;
//long blocksize = 1; // TODO can change from args
long size = sizeof(int)*tailleVecteur;
int *cudaVec;
int *cudaRes;
// Select cuda GPU device to use (if multiple device)
cudaSetDevice(0);
if (cudaMalloc((void **)&cudaVec, size) == cudaErrorMemoryAllocation) {
printf("Allocation memoire qui pose probleme (cudaVec) \n");
}
if (cudaMalloc((void **)&cudaRes, size) == cudaErrorMemoryAllocation) {
printf("Allocation memoire qui pose probleme (cudaRes) \n");
}
long dimBlock = blocksize;
long dimGrid = tailleVecteur/blocksize;
if ((tailleVecteur % blocksize) != 0) {
dimGrid++;
}
int res = cudaMemcpy(&cudaVec[0], &image[0], size, cudaMemcpyHostToDevice);
printf("Copy CPU -> GPU %d \n",res);
printf("dimBlock: %ld | dimGrid: %ld\n", dimBlock, dimGrid);
startTimer;
add_vec_scalaire_gpu<<<dimGrid, dimBlock>>>(cudaVec, cudaRes, tailleVecteur, LE_MIN, ETALEMENT);
cudaDeviceSynchronize();
stopTimer;
printf("Duration %ld", tpsCalcul);
cudaMemcpy(&resultat[0], &cudaRes[0], size, cudaMemcpyDeviceToHost);
cudaFree(cudaVec);
cudaFree(cudaRes);
/*========================================================================*/
/* Sauvegarde de l'image dans le fichier resultat */
/*========================================================================*/
n = 0;
for (i = 0; i < TailleImage ; i++) {
fprintf(Dst, "%3d ", resultat[i]);
n++;
if (n == NBPOINTSPARLIGNES) {
n = 0;
fprintf(Dst, "\n");
}
}
fprintf(Dst, "\n");
fclose(Dst);
printf("\n");
/*========================================================================*/
/* Fin du programme principal */
/*========================================================================*/
exit(0);
}
|
11,074 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
float *GPU_fill(float *matrix, int rows, int cols)
{
int i;
for (i = 0; i < rows * cols; ++i)
{
matrix[i] = rand() % 10;
}
return matrix;
}
void print(float *matrix, const int rows, const int cols)
{
for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
printf("%f\t", matrix[i * cols + j]);
}
printf("\n");
}
printf("\n");
}
__global__ void cuda_multiply(float *A, float *B, float *C, const int m, const int n, const int k)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (col < n && row < m)
{
C[row * n + col] = 0;
for (int i = 0; i < k; i++)
{
C[row * n + col] += A[row * k + i] * B[i * n + col];
}
}
}
int main(void)
{
srand(time(0));
int m = 0, n = 0, k = 0;
printf("Enter: m, n and k...\n");
scanf("%d %d %d", &m, &n, &k);
float *A = NULL;
float *B = NULL;
float *C = NULL;
float *cpuA = (float *) malloc(sizeof(float) * m * k);
float *cpuB = (float *) malloc(sizeof(float) * k * n);
float *cpuC = (float *) malloc(sizeof(float) * m * n);
cudaMalloc((void **) &A, sizeof(float) * m * k);
cudaMalloc((void **) &B, sizeof(float) * k * n);
cudaMalloc((void **) &C, sizeof(float) * m * n);
cpuA = GPU_fill(cpuA, m, k);
cpuB = GPU_fill(cpuB, k, n);
cudaMemcpy(A, cpuA, m * k * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(B, cpuB, k * n * sizeof(float), cudaMemcpyHostToDevice);
if (n < 5)
{
printf("\nFirst matrix:\n");
print(cpuA, m, k);
printf("Second matrix:\n");
print(cpuB, k, n);
}
printf("\nMatrices have been initialized.\n");
const int block_size = 16;
unsigned int grid_rows = (m + block_size - 1) / block_size;
unsigned int grid_cols = (n + block_size - 1) / block_size;
dim3 dimGrid(grid_cols, grid_rows);
dim3 dimBlock(block_size, block_size);
float gpu_elapsed_time_ms = 0.0;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cuda_multiply<<<dimGrid, dimBlock>>>(A, B, C, m, n, k);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_elapsed_time_ms, start, stop);
printf("Cuda Native execution time: %f ms.\n\n", gpu_elapsed_time_ms);
cudaMemcpy(cpuC, C, m * n * sizeof(float), cudaMemcpyDeviceToHost);
if (n < 5)
{
printf("Result:\n");
print(cpuC, m, n);
}
free(cpuC);
cudaFree(A);
cudaFree(B);
cudaFree(C);
return 0;
}
|
11,075 | #include "includes.h"
__global__ void Multiply_Matrix_GPU(float* A, float* B, float* C , int BLOCK_SIZE , int N) {
// Индекс блока
int bx = blockIdx.x;
int by = blockIdx.y;
// Индекс нити
int tx = threadIdx.x;
int ty = threadIdx.y;
float total = 0.0;
int ia = N * BLOCK_SIZE * by + N * ty;
int ib = BLOCK_SIZE * bx + tx;
for (int k = 0; k < N; k++) {
total += A[ia + k] * B[ib + k * N];
}
int ic = N * BLOCK_SIZE * by + BLOCK_SIZE * bx;
//Результирующая матрица
C[ic + N * ty + tx] = total;
} |
11,076 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <stdlib.h>
//NUMERO BLOQUES = INTEGERMASALTO(NUMVERTICES/MAXTRHEADSXBLOCK)
//VARIABLES GLOBALES
#define NUMVERTICES 5 //MAXIMUM 300
#define MAXTRHEADSXBLOCK 32 //[1 - 32]
//ID gpudevice that is used
int gpudev = 0;
//Graph representation with
int *EG; //Double array of edges |NUMVERTICES|
int *VG; //Double array of vertices
int C; //Current vertex INDEX
int NUMBEREDGES;
//MST edge list: Shows the path that is followed.
int *R1source;
int *R2destination;
int *R3weigth;
//Temporal arrays used for reduction results
int *T1weights;
int *T2indexes;
//------- FUNCIONES --------
void writeFile(char* filename, char* content)
{
FILE *fp;
int error;
//Open a text file for append stream to the end
//If there is no file, it is created
fp = fopen(filename,"a+");
//Write content
error = fputs(content,fp);
if( error == EOF )
{
printf("Error al escribir en archivo!\n");
printf("ERROR: %i = %s\n", error ,strerror(error));
//clearerr(fp);
//fflush(stdout);
//Llamada recursiva para ver si ya se hace la escritura sin error
//writeFile(filename,content);
}//Fin if 1
//Cerrar archivo
fclose(fp);
}//Fin writeFile
//--------------------------------------------------------------
void printDoubleArray(int *VX)
{
int lengthArray = NUMVERTICES;
for(int i = 0; i <lengthArray; i++)
{
printf("%i ", VX[i]);
}//End for 2
printf("\n");
for(int i = lengthArray; i < (lengthArray*2); i++)
{
printf("%i ",VX[i]);
}//End for 2
printf("\n");
}//Fin funcion printDoubleArray
//------------------------
void printArrayRange(int *VX,int start,int end)
{
for(int i = start; i <= end; i++)
{
printf("%2i ", VX[i]);
}//End for 2
printf("\n");
}//Fin funcion printArrayRange
//------------------------
/*
*Function that creates the vaules of the graph
*in the strucutre.
*/
void setGraph()
{
//Asign and initialize memory for the double array of integers
//Because is a doble array it is multiplied by 2 only
VG = (int *) calloc(NUMVERTICES*2, NUMVERTICES*2*sizeof(int) );
int numberEdges = 0;
int randValue = 0;
//Inicializacion valores VG
for (int i = 0; i < NUMVERTICES; i++)
{
//Set the index of the Vertex
VG[i] = i;
//Set in random way the # of vertices to
//wich this vertex is connected
//#Of vertices can not be 0,becuase all the veritces
//have to be connected so ensure that at least all
// the nodes are connected to at leas 2 other vertices
randValue = rand() % (NUMVERTICES-2) +2;
VG[i+NUMVERTICES] = randValue;
//Keep track of the number edges
numberEdges = numberEdges + randValue;
}//Fin for 1
//!!!SAVE IN GLOBAL VARIABLE!!!
NUMBEREDGES = numberEdges;
//----------------
printf("-- Source Vertex --\n");
printDoubleArray(VG);
printf("------\n");
printf("TOTAL EDGES: %i\n",numberEdges);
//----------------
//Asign and initialize memory for the double array of integers
//Because is a doble array it is multiplied by 2 only
EG = (int *) calloc(numberEdges*2,numberEdges*2*sizeof(int));
//Initialize EDGE Double array values
int indxEdges = 0;
for(int i = 0; i < NUMVERTICES; i++)
{
//Num of vertices to wich vertex i has a path
int numVerticesConn = VG[i+NUMVERTICES];
//1)Set the destinatio id of the vertex, which can not
//be repeated and can not be the same as the source
//vertex i
int indxDestination = 0;
//2)Set randomly the value of the weight of edge 1)
//values of weight from 1 - 100
for(int j = 0; j < numVerticesConn; j++)
{
//1)
//Ojo: indxDestination = j a menos que se encuentr
//que source = destino; en ese caso y por el resto
//del for j, indxDestination ira uno arriba que j
if( i == j )
{
indxDestination++;
}//End if
EG[indxEdges] = indxDestination;
//2)
EG[indxEdges+numberEdges] = rand() % (100) +1;
indxEdges++;
indxDestination++;
}//End for 3
}//Fin for 2
//----------------
printf("-- Destination vertex --\n");
printArrayRange(EG,0,numberEdges-1);
printf("-- Weigth of Edge --\n");
printArrayRange(EG,numberEdges,(numberEdges*2)-1);
//----------------
}//Fin funcion setGraph
//--------------------------------
//Function that initializes values of R1,R2,R3 according to
//the Root vertex; and also define and initializes with 0s
//the temporal arrays
void setVariables()
{
//Rs length = |NUMVERTICES|-1 because final path always
//has one less than the #of vertices
R1source = (int *)calloc(NUMVERTICES-1,NUMVERTICES-1*sizeof(int));
R2destination = (int *)calloc(NUMVERTICES-1,NUMVERTICES-1*sizeof(int));
R3weigth = (int *)calloc(NUMVERTICES-1,NUMVERTICES-1*sizeof(int));
//Look for the actual weights in VE and VG
//for the source and destination and in case
//of not being found asign 0 as the weight
int numDestinations = VG[C+NUMVERTICES];
int startIndex = 0;
for(int k = 0; k < C; k++)
{
startIndex = startIndex+VG[k+NUMVERTICES];
}//End for
numDestinations = numDestinations+startIndex;
//----------
//printf("Range of values in EG(%i - %i)\n", startIndex, numDestinations);
//----------
//Set by default all the edges taking as the origin
//the root source, to all posible destinations
int indxValidDestinations = -1;
for(int i = 0; i < NUMVERTICES; i++)
{
//Only do not take as destination when source
//and destination are equal
if(C != i)
{
indxValidDestinations++;
//Set source index
R1source[indxValidDestinations] = C;
R2destination[indxValidDestinations] = i;
//Recorrer solamente los destinos para el source
for(int j = startIndex; j < numDestinations; j++)
{
int idDestino = EG[j];
//Se encontro el destino .:. poner peso correspondiente
//----------
//printf("%i == %i\n", idDestino, i);
//----------
if(idDestino == i)
{
R3weigth[indxValidDestinations] = EG[j+NUMBEREDGES];
}//End if
}//End for 2
}//End if
}//Fin for 1
//--------------
//Recordar que para el print se considera un elemento menos
//del limite superior ya que realmnete hace el print hasta
//la posicion indicada
//printf("R1: \n");
//printArrayRange(R1source,0,NUMVERTICES-2);
//printf("R2: \n");
//printArrayRange(R2destination,0,NUMVERTICES-2);
//printf("R3: \n");
//printArrayRange(R3weigth,0,NUMVERTICES-2);
//--------------
}//End fucntions setVariables
//--------------------------------
__global__ void kernel1(int *v, int *e, int *r1, int *r2, int *r3, int *c, int *t1, int *t2)
{
//Define and construct T1 and T2
//T1weights = (int *)calloc(MAXTRHEADSXBLOCK,MAXTRHEADSXBLOCK*sizeof(int));
//T2indexes = (int *)calloc(MAXTRHEADSXBLOCK,MAXTRHEADSXBLOCK*sizeof(int));
int idBloque = blockIdx.x;
//ID de cada hilo (IDHILOBLOQUE+IDBLOQUE*HILOSPORBLOQUE)
int i = threadIdx.x + idBloque*blockDim.x;
//MIN REDUCTION AND WRITE RESULTS IN T1 AND T2
//1)All threads in the grid make reduction operation on an array
//of input data, ann obtain min weight and index of each thread
//Solo trabajar |v|-1 hilos
//V-1 porque Rs son de size |V|-1
if( i < NUMVERTICES-1 )
{
//----------------------
//printf("| idh: %i | ", i);
//printf("| %i %i | ", v[i],v[i+NUMVERTICES]);
//----------------------
//---------------------
//printf("| %i %i : %i | ", r1[i], r2[2], r3[2]);
//printf(" %i < %i //", r3[i], t1[idBloque]);
//---------------------
//Con weiht mwnor al actual pero que sea un
//weigth valido (diferente de 0)
if(r3[i] < t1[idBloque] && r3[i] != 0 )
{
//Guardar Weight
t1[idBloque] = r3[i];
//Guardar Indice
t2[idBloque] = r2[i];
}//Nuevo menor encontrado
}//End if
//printf("| %i | ", r3[i]);
//i < MAXNUMBEREDGES
/*if(i < 15)
{
//[i+MAXNUMBEREDGES]
printf("! %i %i ! ", e[i],e[i+15]);
}//End if*/
//2)All threads in every block make reduction of the result data in 1)
//And obtain the minim value and index of every thread block
}//End function kernel1
//--------------------------------
__global__ void kernel2(int *numBlocks, int *weights, int *indxs)
{
int N = numBlocks[0];
//Reservar espacio en zona de memoria compartida
__shared__ int temporal[MAXTRHEADSXBLOCK];
__shared__ int tempids[MAXTRHEADSXBLOCK];
//Indice de cada hilo en un solo bloque
int i = threadIdx.x;
if(i < N)
{
//Copiamos el vector de pesos en temporal y sincronizamos
temporal[i] = weights[i];
tempids[i] = indxs[i];
__syncthreads();
//---------------------
//printf("|%i) %i : %i | ", i ,weights[i], indxs[i]);
//printf("| %i | ", temporal[i]);
//----------------------
//Inicio de reduccion paralela
int salto = N/2;
//log2(N) iteraciones
while(salto)
{
//Solo trabajan la mitad de los hilos
if(i < salto)
{
//Si se encuentra un vertex con peso menor se elige
//como mejor candidato
if( temporal[i+salto] < temporal[i] && temporal[i] != 0 )
{
temporal[i] = temporal[i+salto];
tempids[i] = tempids[i+salto];
}//End if
}//End if 2
__syncthreads();
salto = salto/2;
}//End while
//Hilo 0 escibe el resultado final en memoria global
if(i == 0)
{
weights[0] = temporal[0];
indxs[0] = tempids[0];
}//End if 2
}//End if 1
}//End function kernel2
//--------------------------------
//Comparing and update MST
__global__ void kernel3(int *v, int *e, int *r1, int *r3, int *c, int *numEdges)
{
//1)Read current Vertex index
//2)Fin the weight between current vertex and the
//other vertices (n other Vertex in actual moment)
//For every Vertex n if new weight with this C vertex is < old weight
// Adjust corresponding values of R1 and R3 by :
//if(W[n] < R3[n] )
//R1[n] = C
//R3[n] = W[n]
//Look for the actual weights in VE and VG
//for the source and destination and in case
//of not being found asign 0 as the weight
int NE = numEdges[0];
int C = c[0];
int numDestinations = v[C+NUMVERTICES];
int startIndex = 0;
for(int k = 0; k < C; k++)
{
startIndex = startIndex+v[k+NUMVERTICES];
}//End for
numDestinations = numDestinations+startIndex;
//----------
//printf("Range of values in EG(%i - %i)\n", startIndex, numDestinations);
//----------
int idBloque = blockIdx.x;
//ID de cada hilo (IDHILOBLOQUE+IDBLOQUE*HILOSPORBLOQUE)
int i = threadIdx.x + idBloque*blockDim.x;
//Set by default all the edges taking as the origin
//the root source, to all posible destinations
int indxValidDestinations = -1;
if(i < NUMVERTICES)
{
//Only do not take as destination when source
//and destination are equal
if(C != i)
{
indxValidDestinations++;
//Recorrer solamente los destinos para el source
for(int j = startIndex; j < numDestinations; j++)
{
int idDestino = e[j];
//Se encontro el destino .:. poner peso correspondiente
//----------
//printf("%i == %i\n", idDestino, i);
//----------
if(idDestino == i)
{
if(e[j+NE] < r3[indxValidDestinations])
{
r3[indxValidDestinations] = e[j+NE];
r1[indxValidDestinations] = C;
}//End if
}//End if
}//End for 2
}//End if
}//Fin for 1
}//End function kernel3
//--------------------------------
void primMST(int *v, int *e, int *r1, int * r2, int *r3, int c)
{
//Define size of CUDA grid
int g_row = (int)ceil((float)NUMVERTICES/(float)MAXTRHEADSXBLOCK);
int g_col = (int)ceil((float)NUMVERTICES/(float)MAXTRHEADSXBLOCK);
int numBloques = g_row;
dim3 bloques(g_col,g_row);
dim3 hilos(MAXTRHEADSXBLOCK,MAXTRHEADSXBLOCK);
cudaEvent_t start, stop;
printf("Bloques: %i == %i \n", bloques, numBloques);
printf("Hilos: %i \n", hilos);
printf("Grid (%d,%d)\n", g_row, g_col);
//vARIABLES IN DEVICE
int *VGD, *VED, *R1D, *R2D, *R3D; //Arrays
int *T1D, *T2D;
int *CD, *NED; //Variables
//Define and construct T1 and T2? HERE
T1weights = (int *)calloc(numBloques,numBloques*sizeof(int));
T2indexes = (int *)calloc(numBloques,numBloques*sizeof(int));
//Initialize temporal weights with a very high value
//in order to make that any wieght is better than
//the init value
for(int i = 0; i < numBloques; i++)
{
T1weights[i] = 99999;
}//End for 1
//--------------
//Recordar que para el print se considera un elemento menos
//del limite superior ya que realmnete hace el print hasta
//la posicion indicada
printf("R1: \n");
printArrayRange(r1,0,NUMVERTICES-2);
printf("R2: \n");
printArrayRange(r2,0,NUMVERTICES-2);
printf("R3: \n");
printArrayRange(r3,0,NUMVERTICES-2);
//--------------
//TRANSFER FROM HOST (CPU) TO DEVICE GPU
cudaSetDevice(gpudev);
cudaEventCreate(&start); cudaEventCreate(&stop);
cudaEventRecord(start,0);
//1)Asignar memoria para variables en GPU
cudaMalloc(&VGD, NUMVERTICES*2*sizeof(int) );
cudaMalloc(&VED, NUMBEREDGES*2*sizeof(int) );
cudaMalloc(&R1D, (NUMVERTICES-1)*sizeof(int) );
cudaMalloc(&R2D, (NUMVERTICES-1)*sizeof(int) );
cudaMalloc(&R3D, (NUMVERTICES-1)*sizeof(int) );
cudaMalloc(&T1D, (numBloques)*sizeof(int) );
cudaMalloc(&T2D, (numBloques)*sizeof(int) );
cudaMalloc(&CD, int(sizeof(int)) );
cudaMalloc(&NED, int(sizeof(int)) );
//INICIO LOOP |NUMVERTICES|-1 VECES
int iCountVer = 0;
while(iCountVer < NUMVERTICES-1)
{
//----
printf("---- %i) ----- \n", iCountVer);
printf("Current vertex: %i \n", c);
//----
//2)Copiar datos del host al device
cudaMemcpy(VGD,v,NUMVERTICES*2*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(VED,e,NUMBEREDGES*2*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(R1D,r1,(NUMVERTICES-1)*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(R2D,r2,(NUMVERTICES-1)*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(R3D,r3,(NUMVERTICES-1)*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(T1D,T1weights,numBloques*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(T2D,T2indexes,numBloques*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(CD,&c,sizeof(int),cudaMemcpyDefault);
cudaMemcpy(NED,&NUMBEREDGES,sizeof(int),cudaMemcpyDefault);
//3)Ejecutar kernel
//INVOQUE KERNEL 1 AND WRITE RESULTS IN T1 AND T2
kernel1<<<bloques, hilos>>>(VGD,VED,R1D,R2D,R3D,CD,T1D,T2D);
//4)Copiar datos del device al host
//T1 Y T2
// Valores de T1[0] y T2[0] son añadidos
// a los correspondientes R1 Y R3
//T2[0] sobreescribe a C
cudaMemcpy(T1weights,T1D,numBloques*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(T2indexes,T2D,numBloques*sizeof(int),cudaMemcpyDefault);
//---------------
printf("\n Minimum weight found for each block (Global memory reduction) \n");
printf("Id: \n");
printArrayRange(T2indexes,0,numBloques-1);
printf("Weight: \n");
printArrayRange(T1weights,0,numBloques-1);
//---------------
//Verificar si se inicia al Kernel 2
//MAXTRHEADSXBLOCK > numBloques > 1
if(numBloques > 1)
{
//Definir variable en device
int *NBD;
//1)Asinar memoria para vairable en GPU/device
cudaMalloc(&NBD, int(sizeof(int)) );
//2)Copiar datos del host al device
cudaMemcpy(NBD,&numBloques,sizeof(int),cudaMemcpyDefault);
//3)ejecutar kermel2
printf("Invoke Kernel2\n");
kernel2<<<1,hilos>>>(NBD,T1D,T2D);
//4)Copiar datos del device al host
cudaMemcpy(T1weights,T1D,numBloques*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(T2indexes,T2D,numBloques*sizeof(int),cudaMemcpyDefault);
//---------------
printf("\n 2) Minimum weight found of each block (After shared memory reduction) \n");
printf("Id: \n");
printArrayRange(T2indexes,0,numBloques-1);
printf("Weight: \n");
printArrayRange(T1weights,0,numBloques-1);
//---------------
//5)liberar memoria NBD
}//End if kernel2
//---------------
printf("Minimum weight found: %i for vertex with ID: %i \n", T1weights[0], T2indexes[0]);
//---------------
//ADDING NEW MST EDGE
//1)Add previously found minimum weight Edge (T1[0] WEIGHT T2[0] EDGE)
//to the MST by moving this Edge to the first position of R1 R2 R3
r2[0] = T2indexes[0]; //R2[C] ?
r3[0] = T1weights[0]; //R3[C] ?
//2)Saving current Vertex C= R2[T2[0]]
c = r2[T2indexes[0]];
//Copiar datos del host al device que han sido modificados
cudaMemcpy(R2D,r2,(NUMVERTICES-1)*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(R3D,r3,(NUMVERTICES-1)*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(CD,&c,sizeof(int),cudaMemcpyDefault);
//Kernel 3: Comparing and updating MST
kernel3<<<bloques, hilos>>>(VGD,VED,R1D,R3D,CD,NED);
//Copiar datos del device al host
cudaMemcpy(r1,R1D,(NUMVERTICES-1)*sizeof(int),cudaMemcpyDefault);
cudaMemcpy(r3,R3D,(NUMVERTICES-1)*sizeof(int),cudaMemcpyDefault);
//--------------
//Recordar que para el print se considera un elemento menos
//del limite superior ya que realmnete hace el print hasta
//la posicion indicada
printf("--- MST ACTUALIZADO ---: \n");
printf("R1: \n");
printArrayRange(r1,0,NUMVERTICES-2);
printf("R2: \n");
printArrayRange(r2,0,NUMVERTICES-2);
printf("R3: \n");
printArrayRange(r3,0,NUMVERTICES-2);
//--------------
iCountVer++;
}//End while
//FIN LOOP |NUMVERTICES|-1 VECES
//5) Liberar Memoria
cudaFree(VGD);
cudaFree(VED);
cudaFree(R1D);
cudaFree(R2D);
cudaFree(R3D);
cudaFree(T1D);
cudaFree(T2D);
cudaFree(CD);
cudaFree(NED);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float milliseconds = 0;
char strvertices[10];
char strtottime[15];
cudaEventElapsedTime(&milliseconds, start, stop);
//--- Escribir resultados en archivo txt ---
//Convertir entero de numero de procesadores a cadena
//Convertir int to char itoa(stringDestination,"%d",intValue)
sprintf(strvertices,"%d",NUMVERTICES);
//Convert double to string
sprintf(strtottime,"%f",milliseconds);
//Append en archivo de resultados
writeFile("resultados.txt",strvertices);
writeFile("resultados.txt"," ");
writeFile("resultados.txt",strtottime);
writeFile("resultados.txt","\n");
}//En function primMST
//---- FIN FUNCIONES -----
//Inicio del programa
int main(int argc, char **argv)
{
setGraph();
//Set root vertex of the MST
C = 2;
setVariables();
printf("IDs threads: \n");
primMST(VG,EG,R1source,R2destination,R3weigth,C);
printf("\n");
printf("Fin del programa V4\n");
}//Fin del main |
11,077 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <algorithm>
/* Every thread gets exactly one value in the unsorted array. */
#define THREADS 1024 // 2^9
#define BLOCKS 32768 // 2^15
#define NUM_VALS THREADS*BLOCKS
using namespace std;
void print_elapsed(clock_t start, clock_t stop) {
double elapsed = ((double)(stop - start)) / CLOCKS_PER_SEC;
printf("Elapsed time: %.3fs\n", elapsed);
}
void array_print(int* arr, int length) {
int i;
for (i = 0; i < length; ++i) {
printf("%1.3f ", arr[i]);
}
printf("\n");
}
void array_fill(int* arr, int length) {
srand(time(NULL));
int i;
for (i = 0; i < length; ++i) {
arr[i] = rand();
}
}
bool comparison_arrays(int* arr1, int* arr2, int length) {
for (int i = 0; i < length; i++) {
if (arr1[i] != arr2[i]) {
return false;
}
}
return true;
}
int* get_copy_array(int* sourse, int length) {
int* dest = new int[length];
for (int i = 0; i < length; i++) {
dest[i] = sourse[i];
}
return dest;
}
int power_ceil(int x) {
if (x <= 1) return 1;
int power = 2;
x--;
while (x >>= 1) power <<= 1;
return power;
}
__global__ void bitonic_sort_step(int* dev_values, int j, int k) {
unsigned int i, ixj;
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i ^ j;
if ((ixj) > i) {
if ((i & k) == 0) {
/* Sort ascending */
if (dev_values[i] > dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i & k) != 0) {
/* Sort descending */
if (dev_values[i] < dev_values[ixj]) {
/* exchange(i,ixj); */
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
}
void bitonic_sort(int* values) {
int* dev_values;
size_t size = NUM_VALS * sizeof(int);
cudaMalloc((void**)&dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1);
dim3 threads(THREADS, 1);
int j, k;
for (k = 2; k <= NUM_VALS; k <<= 1) {
for (j = k >> 1; j > 0; j = j >> 1) {
bitonic_sort_step <<<blocks, threads>>> (dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
bool is_bitonic(int*v, int length) {
bool was_decreasing = v[length - 1] > v[0];
int num_inflections = 0;
for (int i = 0; i < length && num_inflections <= 2; i++) {
bool is_decreasing = v[i] > v[(i + 1) % length];
// Check if this element and next one are an inflection.
if (was_decreasing != is_decreasing) {
num_inflections++;
was_decreasing = is_decreasing;
}
}
return 2 == num_inflections;
}
int main(void)
{
clock_t start, stop;
int length = 0;
cout << "Enter length of the array: ";
cin >> length;
int* values = (int*)malloc(NUM_VALS * sizeof(int));
array_fill(values, NUM_VALS);
int* temp = get_copy_array(values, NUM_VALS);
sort(temp, temp + NUM_VALS);
start = clock();
bitonic_sort(values);
stop = clock();
cout << "is_bitonic: " << is_bitonic(values, NUM_VALS) << endl;;
cout << "is equals: " << comparison_arrays(values, temp, NUM_VALS) << endl;
print_elapsed(start, stop);
}
|
11,078 | #include <cuda.h>
#include <stdio.h>
const int N = 524288;
const int threadsPerBlock = 1024;
int blocksPerGrid = (N + threadsPerBlock) / threadsPerBlock - 1;
// device code
__global__ void skalarPro(float* a, float* b, float* c) {
__shared__ float cache[threadsPerBlock]; // declaring a array for every block
// in shared memory
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int cacheIdx = threadIdx.x; // cacheIdx equal the threadIdx in every block
// save the result of every thread in cache[]
// if the computing can not once finish
float temp = 0;
while (tid < N) {
temp += a[tid] * b[tid];
tid += blockDim.x * gridDim.x; // adding temp per all threads
}
cache[cacheIdx] = temp; // save the result of every thread in cache[cacheIdx]
__syncthreads();
// reduce the sum in every cache to cache[0]
int i = blockDim.x / 2;
while (i != 0) {
if (cacheIdx < i) {
cache[cacheIdx] += cache[cacheIdx + i];
}
__syncthreads();
i /= 2;
}
// store the result of cache[0] into global variable c[]
if (cacheIdx == 0) {
c[blockIdx.x] = cache[0];
}
}
// host code
int main() {
size_t size = N * sizeof(float);
size_t size_c = blocksPerGrid * sizeof(float);
// Allocate input vectors h_a and h_b in host memory
float* h_a = (float*)malloc(size);
float* h_b = (float*)malloc(size);
float* h_c = (float*)malloc(size_c);
// Initialize input vectors
for (int i = 0; i < N; i++) {
h_a[i] = i + 1;
h_b[i] = N - i;
}
// Allocate vectors in device memory
float* d_a;
float* d_b;
float* d_c;
cudaMalloc(&d_a, size);
cudaMalloc(&d_b, size);
cudaMalloc(&d_c, size_c);
// copy vectors from host memory to device memory
cudaMemcpy(d_a, h_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, size, cudaMemcpyHostToDevice);
// Invoke kernel
skalarPro<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c);
// copy result from device memory to host memory
// h_c contains the result in host memory
cudaMemcpy(h_c, d_c, size_c, cudaMemcpyDeviceToHost);
// reduce the sum in every h_c[i] to cache[0]
int i = blocksPerGrid / 2;
while (i != 0) {
for (int j = 0; j < blocksPerGrid; j++) {
if (j < i) {
h_c[j] += h_c[j + i];
}
}
i /= 2;
}
printf("Das Ergebnis vom Skalarprodukt: %f\n", h_c[0]);
// Free device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// Free host memory
free(h_a);
free(h_b);
free(h_c);
cudaThreadExit();
return 0;
}
|
11,079 | #include "includes.h"
__global__ void Split(float * xf, bool * xb, size_t idxf, size_t idxb, size_t N, float threshold)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
xb[(idxb)*N+i] = (xf[(idxf-1)*N+i] < threshold);
}
return;
} |
11,080 | #include <stdio.h>
#include <cuda.h>
#define BLOCK_SIZE 16
__global__ void matmulKernel(float* mat_in1,float* mat_in2, float* mat_out,int mat_dim);
int main() {
float *h_M, *h_N, *h_P, *d_M, *d_N, *d_P;
int i,width=10;
int size=width*width*sizeof(float);
dim3 block_dim(BLOCK_SIZE,BLOCK_SIZE,1);
int grid_size=width/BLOCK_SIZE;
if(width%BLOCK_SIZE) grid_size++;
dim3 grid_dim(grid_size,grid_size,1);
h_M=(float*)malloc(size);
h_N=(float*)malloc(size);
h_P=(float*)malloc(size);
cudaMalloc((void**)&d_M,size);
cudaMalloc((void**)&d_N,size);
cudaMalloc((void**)&d_P,size);
if(h_M==0||h_N==0||h_P==0||d_M==0||d_N==0||d_P==0) {
printf("memory locate fail!\n");
}
for(i=0;i<width*width;i++) {
h_M[i]=1.2*i;
h_N[i]=1.4*i;
}
cudaMemcpy(d_M,h_M,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_N,h_N,size,cudaMemcpyHostToDevice);
matmulKernel<<<grid_dim,block_dim>>>(d_M,d_N,d_P,width);
cudaMemcpy(h_P,d_P,size,cudaMemcpyDeviceToHost);
printf("firt row of the results matrix P:\n");
for(i=0;i<width;i++) {
printf("%f, ",h_P[i]);
}
printf("\n");
printf("the right answer should be:\n");
for(i=0;i<width;i++) {
float sum=0;
for(int k=0;k<width;k++) {
sum+=h_M[k]*h_N[k*width+i];
}
printf("%f, ",sum);
}
printf("\n");
free(h_M);free(h_N);free(h_P);
cudaFree(d_M);cudaFree(d_N);cudaFree(d_P);
return 0;
}
__global__ void matmulKernel(float* mat1,float* mat2, float* matP,int dim) {
int thread_x,thread_y,i;
thread_x=blockIdx.x*blockDim.x+threadIdx.x;
thread_y=blockIdx.y*blockDim.y+threadIdx.y;
if(thread_x<dim&&thread_y<dim) {
float P_value=0.;
for(i=0;i<dim;i++) {
P_value+=mat1[thread_y*dim+i]*mat2[i*dim+thread_x];
}
matP[thread_y*dim+thread_x]=P_value;
}
}
|
11,081 | // filename: ax.cu
// a simple CUDA kernel to add two vectors
extern "C"
{
__global__ void ax_32(const int lengthC, const float a, const float *b, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthC)
{
c[i] = a*b[i]; // REMEMBER ZERO INDEXING IN C LANGUAGE!!
}
}
} |
11,082 | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
extern "C" __global__ void default_function_kernel0(const float* __restrict__ Data,
const float* __restrict__ K0,
const float* __restrict__ K1,
const float* __restrict__ K2,
float* __restrict__ Output) {
float Output_local[2];
__shared__ float pad_temp_shared[128];
__shared__ float K0_shared[12];
__shared__ float K1_shared[3];
__shared__ float K2_shared[24];
for (int hh_inner_outer = 0; hh_inner_outer < 4; ++hh_inner_outer) {
Output_local[0] = 0.000000e+00f;
Output_local[1] = 0.000000e+00f;
for (int rr_outer = 0; rr_outer < 2; ++rr_outer) {
for (int rc_outer = 0; rc_outer < 4; ++rc_outer) {
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = ((((1 - hh_inner_outer) <= (((int)blockIdx.y) * 4)) && (1 <= (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32))) ? Data[((((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) - 33)] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < (9 - ((int)threadIdx.z))) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[(((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3))];
}
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = (((1 - hh_inner_outer) <= (((int)blockIdx.y) * 4)) ? Data[((((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) - 32)] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < (9 - ((int)threadIdx.z))) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3)) + 6)];
}
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = ((((1 - hh_inner_outer) <= (((int)blockIdx.y) * 4)) && ((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32) < 31)) ? Data[((((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) - 31)] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < (9 - ((int)threadIdx.z))) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3)) + 12)];
}
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = ((1 <= (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) ? Data[((((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) - 1)] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < (6 - ((int)threadIdx.z))) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3)) + 18)];
}
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = ((bool)1 ? Data[(((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32))] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < (6 - ((int)threadIdx.z))) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3)) + 24)];
}
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32) < 31) ? Data[((((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) + 1)] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
if (((int)threadIdx.x) < (6 - ((int)threadIdx.z))) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3)) + 30)];
}
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = ((((((int)blockIdx.y) * 4) < (31 - hh_inner_outer)) && (1 <= (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32))) ? Data[((((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) + 31)] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3)) + 36)];
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = (((((int)blockIdx.y) * 4) < (31 - hh_inner_outer)) ? Data[((((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) + 32)] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3)) + 42)];
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
__syncthreads();
pad_temp_shared[((((int)threadIdx.z) * 16) + ((int)threadIdx.x))] = ((((((int)blockIdx.y) * 4) < (31 - hh_inner_outer)) && ((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32) < 31)) ? Data[((((((((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) / 128) * 16384) + (rc_outer * 4096)) + (((((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 128) / 32) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((((int)threadIdx.z) * 16) + ((int)threadIdx.x)) % 32)) + 33)] : 0.000000e+00f);
if ((((int)threadIdx.z) * 2) < (12 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 2) {
if ((rc_outer * 4) < (16 - (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3))) {
K0_shared[((((int)threadIdx.z) * 2) + ((int)threadIdx.x))] = K0[((((rc_outer * 24) + ((((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) / 3) * 6)) + (rr_outer * 3)) + (((((int)threadIdx.z) * 2) + ((int)threadIdx.x)) % 3))];
}
}
}
if (((int)threadIdx.x) < (3 - ((int)threadIdx.z))) {
if (((int)threadIdx.x) < 1) {
K1_shared[(((int)threadIdx.x) + ((int)threadIdx.z))] = K1[((((((((int)threadIdx.x) + ((int)threadIdx.z)) / 3) * 18) + (rr_outer * 3)) + ((((int)threadIdx.x) + ((int)threadIdx.z)) % 3)) + 48)];
}
}
if ((((int)threadIdx.z) * 3) < (24 - ((int)threadIdx.x))) {
if (((int)threadIdx.x) < 3) {
if ((rr_outer * 3) < (6 - (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8))) {
K2_shared[((((int)threadIdx.z) * 3) + ((int)threadIdx.x))] = K2[((((rr_outer * 48) + ((((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) / 8) * 16)) + (((int)blockIdx.z) * 8)) + (((((int)threadIdx.z) * 3) + ((int)threadIdx.x)) % 8))];
}
}
}
__syncthreads();
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[0]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[3]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[6]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[9]) * K1_shared[0]) * K2_shared[((int)threadIdx.z)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[1]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[4]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[7]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[10]) * K1_shared[1]) * K2_shared[(((int)threadIdx.z) + 8)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[(((int)threadIdx.x) * 2)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 1)] * K0_shared[2]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 32)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 33)] * K0_shared[5]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 64)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 65)] * K0_shared[8]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[0] = (Output_local[0] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 96)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
Output_local[1] = (Output_local[1] + (((pad_temp_shared[((((int)threadIdx.x) * 2) + 97)] * K0_shared[11]) * K1_shared[2]) * K2_shared[(((int)threadIdx.z) + 16)]));
}
}
Output[(((((((int)blockIdx.z) * 8192) + (((int)threadIdx.z) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((int)threadIdx.x) * 2))] = Output_local[0];
Output[((((((((int)blockIdx.z) * 8192) + (((int)threadIdx.z) * 1024)) + (((int)blockIdx.y) * 128)) + (hh_inner_outer * 32)) + (((int)threadIdx.x) * 2)) + 1)] = Output_local[1];
}
}
void Conv2dCpFusedNchwKernelLauncher(const float* U, const float* K0,
const float* K1, const float* K2, float* V){
dim3 gridDim0(1, 8, 2);
dim3 blockDim0(16, 1, 8);
default_function_kernel0<<<gridDim0, blockDim0>>>(U, K0, K1, K2, V);
cudaDeviceSynchronize();
}
#endif
|
11,083 | #include "includes.h"
__global__ void makeFlist(unsigned int *d_trans_offset, unsigned int *d_transactions, unsigned int *d_flist, unsigned int num_transactions, unsigned int num_items_in_transactions){
__shared__ unsigned int private_items[max_unique_items];
int tx = threadIdx.x;
int index = tx + blockDim.x * blockIdx.x;
int location_x;
for (int i = 0; i < ceil(max_unique_items / (1.0 * BLOCK_SIZE)); i++){
location_x = tx + i * blockDim.x;
if ( location_x < max_unique_items)
private_items[location_x] = 0;
}
__syncthreads();
//int item_ends = 0;
// if (tx == (num_transactions - 1)){
// item_ends = num_items_in_transactions;
// }else{
// item_ends = d_trans_offset[index + 1];
// }
// //int j = 0;
// for(int i = d_trans_offset[index]; i < item_ends; i++){
// if (d_transactions[i] < max_unique_items)
// atomicAdd(&private_items[d_transactions[i]], 1);
// //j = d_transactions[i];
// }
if (index < num_items_in_transactions && d_transactions[index] < max_unique_items)
atomicAdd(&private_items[d_transactions[index]], 1);
__syncthreads();
for (int i = 0; i < ceil(max_unique_items / (1.0 * BLOCK_SIZE)); i++){
location_x = tx + i * blockDim.x;
if ( location_x < max_unique_items)
atomicAdd(&d_flist[location_x], private_items[location_x]);
}
__syncthreads();
} |
11,084 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <algorithm>
#include <cstdlib>
#include <vector>
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer ()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer ()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start ()
{
cudaEventRecord(start, 0);
}
void Stop ()
{
cudaEventRecord(stop, 0);
}
float Elapsed ()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
int main (int argc, char **argv)
{
// generate N random numbers serially
// std::vector<int> sizes = {100000, 1000000, 10000000};
std::vector<int> sizes;
sizes.push_back(100000);
sizes.push_back(1000000);
sizes.push_back(10000000);
for (std::vector<int>::iterator it=sizes.begin(); it != sizes.end(); it++) {
int N = *it;
thrust::host_vector<char> h_vec(N);
std::generate(h_vec.begin(), h_vec.end(), rand);
// transfer data to the device
thrust::device_vector<char> d_vec = h_vec;
// sort data on the device
GpuTimer timer;
timer.Start();
thrust::sort(d_vec.begin(), d_vec.end());
timer.Stop();
// move data from device to host
thrust::copy(d_vec.begin(), d_vec.end(), h_vec.begin());
printf("Thrust sorted %d keys in %g ms\n", N, timer.Elapsed());
}
return 0;
}
|
11,085 | #include "Test_tools.cuh"
template<class T>
double checkValue(T* a, T*b, int num) {
double error = 0;
double currentError = 0;
for (int i = 0; i < 3; i++) {
currentError = a[i] - b[i];
error += currentError;
}
return error;
}
template double checkValue(double* a, double*b, int num);
template double checkValue(float* a, float*b, int num);
template double checkValue(int* a, int*b, int num); |
11,086 | // Name: H.G. Manesha Washani
// Student Id: 1432289
#include <stdio.h>
#include <stdlib.h>
#define N 6
/* The _global_ indicates a function that runs on the device and it called for host code. A kernel to add two integers */
__global__ void MatAdd(int A[][N], int B[][N], int C[][N]){
int g = threadIdx.x;
int h = threadIdx.y;
C[g][h] = A[g][h] + B[g][h];
}
/* My code i used variable is int g and int h, because of that reason i changed given code variables */
void randmatfunc(int newmat[N][N]){
int g, h, k;
for(g=0;g<N;g++){
for(h=0;h<N;h++){
k = rand() % 100 + 1;;
printf("%d ", k);
newmat[g][h] =k;
}
printf("\n");
}
printf("\n-----------------------------------\n");
}
int main(){
int A[N][N];
randmatfunc(A);
int B[N][N];
randmatfunc(B);
int C[N][N];
int (*d_A)[N], (*d_B)[N], (*d_C)[N];
/* device copies of A, B, C and Allocate space for device copies of A, B, C */
cudaMalloc((void**)&d_A, (N*N)*sizeof(int));
cudaMalloc((void**)&d_B, (N*N)*sizeof(int));
cudaMalloc((void**)&d_C, (N*N)*sizeof(int));
// copy input to device
cudaMemcpy(d_A, A, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_C, C, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
// Launch add() kernel on GPU
int numBlocks = 1;
dim3 threadsPerBlock(N,N);
MatAdd<<<numBlocks,threadsPerBlock>>>(d_A,d_B,d_C);
// Copy result back to the host
cudaMemcpy(C, d_C, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
int g, h; printf("C = \n");
for(g=0;g<N;g++){
for(h=0;h<N;h++){
printf("%d ", C[g][h]);
}
printf("\n");
}
// This is cleanup
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
printf("\n");
return 0;
}
|
11,087 | #ifdef _GLIBCXX_USE_INT128
#undef _GLIBCXX_USE_INT128
#endif
#ifdef _GLIBCXX_ATOMIC_BUILTINS
#undef _GLIBCXX_ATOMIC_BUILTINS
#endif
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
// compute minimum and maximum values in a single reduction
// minmax_pair stores the minimum and maximum
// values that have been encountered so far
template <typename T>
struct minmax_pair
{
T min_val;
T max_val;
};
// minmax_unary_op is a functor that takes in a value x and
// returns a minmax_pair whose minimum and maximum values
// are initialized to x.
template <typename T>
struct minmax_unary_op : public thrust::unary_function<T,T>
{
__host__ __device__
minmax_pair<T> operator()(const T& x) const {
minmax_pair<T> result;
result.min_val = x;
result.max_val = x;
return result;
}
};
// minmax_binary_op is a functor that accepts two minmax_pair
// structs and returns a new minmax_pair whose minimum and
// maximum values are the min() and max() respectively of
// the minimums and maximums of the input pairs
template <typename T>
struct minmax_binary_op : public thrust::binary_function<T,T,T>
{
__host__ __device__
minmax_pair<T> operator()(const minmax_pair<T>& x, const minmax_pair<T>& y) const {
minmax_pair<T> result;
result.min_val = thrust::min(x.min_val, y.min_val);
result.max_val = thrust::max(x.max_val, y.max_val);
return result;
}
};
int main(void)
{
// initialize host array
int x[7] = {-1, 2, 7, -3, -4, 5};
// transfer to device
thrust::device_vector<float> d_x(x, x + 7);
// setup arguments
minmax_unary_op<int> unary_op;
minmax_binary_op<int> binary_op;
minmax_pair<int> init = unary_op(d_x[0]); // initialize with first element
// compute minimum and maximum values
minmax_pair<int> result = thrust::transform_reduce(d_x.begin(), d_x.end(), unary_op, init, binary_op);
std::cout << result.min_val << std::endl;
std::cout << result.max_val << std::endl;
std::cout << "TEST PASSED\n";
return 0;
}
|
11,088 | #include "includes.h"
__device__ float rgb2Lum(float B, float G, float R)
{
return B * 0.0722 + G * 0.7152 + R * 0.2126;
}
__global__ void find_maximum_kernel(float *array, float *max, int *mutex, unsigned int n)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
// const int size = blockSize;
extern __shared__ float cache[];
float temp = -1.0;
while(((index + offset)*3 + 2) < n) {
float B, G, R, L;
B = array[(index + offset)*3 + BLUE];
G = array[(index + offset)*3 + GREEN];
R = array[(index + offset)*3 + RED];
L = rgb2Lum(B, G, R);
temp = fmaxf(temp, L);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
} |
11,089 | #include <string>
#include <cstring>
#include <cstdlib>
#include <stdio.h>
#include <iostream>
#include "parse_graph.cuh"
#define SSSP_INF 1073741824
uint parse_graph::parse(
std::ifstream& inFile,
std::vector<initial_vertex>& initGraph,
const long long arbparam,
const bool nondirected ) {
const bool firstColumnSourceIndex = true;
std::string line;
char delim[3] = " \t"; //In most benchmarks, the delimiter is usually the space character or the tab character.
char* pch;
uint nEdges = 0;
unsigned int Additionalargc=0;
char* Additionalargv[ 61 ];
// Read the input graph line-by-line.
while( std::getline( inFile, line ) ) {
if( line[0] < '0' || line[0] > '9' ) // Skipping any line blank or starting with a character rather than a number.
continue;
char cstrLine[256];
std::strcpy( cstrLine, line.c_str() );
uint firstIndex, secondIndex;
pch = strtok(cstrLine, delim);
if( pch != NULL )
firstIndex = atoi( pch );
else
continue;
pch = strtok( NULL, delim );
if( pch != NULL )
secondIndex = atoi( pch );
else
continue;
uint theMax = std::max( firstIndex, secondIndex );
// some input source is src dst weight , some is reversed
uint srcVertexIndex = firstColumnSourceIndex ? firstIndex : secondIndex;
uint dstVertexIndex = firstColumnSourceIndex ? secondIndex : firstIndex;
if( initGraph.size() <= theMax )
initGraph.resize(theMax+1);
{ //This is just a block
// Add the neighbor. A neighbor wraps edges
neighbor nbrToAdd;
nbrToAdd.srcIndex = srcVertexIndex;
Additionalargc=0;
Additionalargv[ Additionalargc ] = strtok( NULL, delim );
while( Additionalargv[ Additionalargc ] != NULL ){
Additionalargc++;
Additionalargv[ Additionalargc ] = strtok( NULL, delim );
}
initGraph.at(srcVertexIndex).vertexValue.distance = ( srcVertexIndex != arbparam ) ? SSSP_INF : 0;
initGraph.at(dstVertexIndex).vertexValue.distance = ( dstVertexIndex != arbparam ) ? SSSP_INF : 0;
nbrToAdd.edgeValue.weight = ( Additionalargc > 0 ) ? atoi(Additionalargv[0]) : 1;
initGraph.at(dstVertexIndex).nbrs.push_back( nbrToAdd );
nEdges++;
}
if( nondirected ) {
// Add the edge going the other way
uint tmp = srcVertexIndex;
srcVertexIndex = dstVertexIndex;
dstVertexIndex = tmp;
//swap src and dest and add as before
neighbor nbrToAdd;
nbrToAdd.srcIndex = srcVertexIndex;
Additionalargc=0;
Additionalargv[ Additionalargc ] = strtok( NULL, delim );
while( Additionalargv[ Additionalargc ] != NULL ){
Additionalargc++;
Additionalargv[ Additionalargc ] = strtok( NULL, delim );
}
initGraph.at(srcVertexIndex).vertexValue.distance = ( srcVertexIndex != arbparam ) ? SSSP_INF : 0;
initGraph.at(dstVertexIndex).vertexValue.distance = ( dstVertexIndex != arbparam ) ? SSSP_INF : 0;
nbrToAdd.edgeValue.weight = ( Additionalargc > 0 ) ? atoi(Additionalargv[0]) : 1;
initGraph.at(dstVertexIndex).nbrs.push_back( nbrToAdd );
nEdges++;
}
}
return nEdges;
}
|
11,090 | #include<cuda_runtime.h>
#include<iostream>
#include<stdio.h>
#include<sys/time.h>
#include<assert.h>
using namespace std;
#define REAL double
#define BX 128
#define BY 2
#define BZ 1
#define GZ 1
const REAL cc = 0.4;
const REAL ce = 0.1;
const REAL cw = 0.1;
const REAL cs = 0.1;
const REAL cn = 0.1;
const REAL ct = 0.1;
const REAL cb = 0.1;
//Must be re-written, including all the parameters
int stencil(REAL *A, REAL *B, int nx, int ny, int nz, int steps)
{
int i, j, k, s;
#define IDX(i,j,k) ((i)*ny*nz+(j)*nz+(k))
for(s = 0; s < steps; s ++) {
for(i = 0; i < nx; i ++) {
for(j = 0; j < ny; j ++) {
for(k = 0; k < nz; k ++) {
REAL r = 0.4*A[IDX(i,j,k)];
if(k != 0) r += 0.1*A[IDX(i,j,k-1)];
else r += 0.1*A[IDX(i,j,k)];
if(k != nz-1) r += 0.1*A[IDX(i,j,k+1)];
else r += 0.1*A[IDX(i,j,k)];
if(j != 0) r += 0.1*A[IDX(i,j-1,k)];
else r += 0.1*A[IDX(i,j,k)];
if(j != ny-1) r += 0.1*A[IDX(i,j+1,k)];
else r += 0.1*A[IDX(i,j,k)];
if(i != 0) r += 0.1*A[IDX(i-1,j,k)];
else r += 0.1*A[IDX(i,j,k)];
if(i != nx-1) r += 0.1*A[IDX(i+1,j,k)];
else r += 0.1*A[IDX(i,j,k)];
B[IDX(i,j,k)] = r;
}
}
}
REAL *tmp = NULL;
tmp = A, A = B, B = tmp;
}
return 0;
}
void check(REAL *a, REAL *b, int nx, int ny, int nz) {
int slice = nx * ny;
for (int z = 1; z < nz-1; ++z) {
for (int y = 1; y < ny-1; ++y) {
for (int x = 1; x < nz-1; ++x) {
int idx = z * slice + y * nx + x;
if (abs(a[idx]-b[idx]) > 1e-5) {
cout << a[idx] << " " << b[idx] << endl;
printf("%d\n", idx);
printf("Wrong!!!!!!!!\n");
return;
}
}
}
}
printf("Right!!!!!!\n");
return;
}
__global__ void baseline(REAL* A, REAL* B, int nx, int ny, int nz)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
int kb = nz/gridDim.z*blockIdx.z;
int slice = nx*ny;
int k = kb;
//int k = kb > 0? kb: 1;
int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz;
int c = i + j*nx + k*slice;
if(i>=0 && i<nx && j>=0 && j<ny){
//#pragma unroll
for (; k < ke; k++){
int w = (i==0)?c:c-1;
int e = (i==nx-1)?c:c+1;
int n = (j==0)?c:c-nx;
int s = (j==ny-1)?c:c+nx;
int b = (k==0)?c:c-slice;
int t = (k==nz-1)?c:c+slice;
B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n]
+ct*A[t] + cb*A[b] + cc*A[c];
c += slice;
//if (k > 0 && k < nz-1 && i > 0 && i < nx-1 && j > 0 && j < ny-1){
// B[idx] = ce*A[idx+1] + cw*A[idx-1] + cs*A[idx+nx] + cn*A[idx-nx]
// +ct*A[idx+slice] + cb*A[idx-slice] + cc*A[idx];
// idx += slice;
}
}
}
__global__ void baseopt(REAL* A, REAL* B, int nx, int ny, int nz)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
int kb = nz/gridDim.z*blockIdx.z;
int slice = nx*ny;
//int k = kb > 0? kb: 1;
int k = kb;
int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz;
int c = i + j*nx + k*slice;
int b = (k==0)?c:c-slice;
int w = (i==0)?c:c-1;
int e = (i==nx-1)?c:c+1;
int n = (j==0)?c:c-nx;
int s = (j==ny-1)?c:c+nx;
int t;
double b_b = A[b];
double b_c = A[c];
double b_t;
if(i>=0 && i<nx && j>=0 && j<ny){
#pragma unroll
for (; k < ke; k++){
t = (k==nz-1)?c:c+slice;
b_t = A[t];
B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n]
+ct*b_t + cb*b_b + cc*b_c;
b_b = b_c;
b_c = b_t;
c += slice;
//b_t = B[idx+slice];
////A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx]
//// +ct*B[idx+slice] + cb*B[idx-slice] + cc*B[idx];
//A[idx] = ce*B[idx+1] + cw*B[idx-1] + cs*B[idx+nx] + cn*B[idx-nx]
// +ct*b_t + cb*b_b + cc*b_c;
//b_b = b_c;
//b_c = b_t;
//idx += slice;
}
}
return;
}
__global__ void roc(const REAL* __restrict__ A, REAL* B, int nx, int ny, int nz)
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = threadIdx.y + blockDim.y*blockIdx.y;
int kb = nz/gridDim.z*blockIdx.z;
int slice = nx*ny;
//int k = kb > 0? kb: 1;
int k = kb;
int ke = (kb+nz/gridDim.z<nz)? kb+nz/gridDim.z : nz;
int c = i + j*nx + k*slice;
int b = (k==0)?c:c-slice;
int w = (i==0)?c:c-1;
int e = (i==nx-1)?c:c+1;
int n = (j==0)?c:c-nx;
int s = (j==ny-1)?c:c+nx;
int t;
double b_b = A[b];
double b_c = A[c];
double b_t;
if(i>=0 && i<nx && j>=0 && j<ny){
#pragma unroll
for (; k < ke; k++){
t = (k==nz-1)?c:c+slice;
b_t = A[t];
B[c] = ce*A[e] + cw*A[w] + cs*A[s] + cn*A[n]
+ct*b_t + cb*b_b + cc*b_c;
b_b = b_c;
b_c = b_t;
c += slice;
}
}
return;
}
int main(int argc, char **argv){
int NX = atoi(argv[2]);
int NY = atoi(argv[3]);
int NZ = atoi(argv[4]);
int T = atoi(argv[5]);
int num = 8;
int NZ_ = NZ/num+2;
if (NX*NY*NZ <= 600*600*600) {
num = 1;
NZ_ = NZ;
}
int p1, p2;
if (NZ % num == 0) {
p1 = p2 = NZ/num;
} else {
p1 = NZ / (num-1);
p2 = NZ - p1*(num-1);
}
//int size = sizeof(REAL)*NX*NY*NZ;
int partsize1 = NX*NY*p1;
int partsize2 = NX*NY*p2;
REAL *host_A, *host_B;
int totalsize;
if (num == 1) {
totalsize = NX*NY*NZ;
host_A = new REAL[totalsize];
host_B = new REAL[totalsize];
} else {
totalsize = (partsize1+2*NX*NY)*(num-1)+partsize2;
host_A = new REAL[totalsize];
host_B = new REAL[totalsize];
}
int size_ = NZ_*NY*NX;
REAL *cpu_A = new REAL[NX*NY*NZ];
REAL *result_A = new REAL[NX*NY*NZ];
REAL *cpu_B = new REAL[NX*NY*NZ];
for (int i = 0; i < totalsize; ++i) {
host_A[i] = 1.0;
host_B[i] = 1.0;
}
//for (int k = 0; k < NZ; k++)
// for (int j = 0; j < NY; j++)
// for (int i = 0; i < NX; i++) {
// host_A[k*NY*NX+j*NX+i] = 1.0;
// host_B[k*NY*NX+j*NX+i] = 1.0;
// }
for (int k = 0; k < NZ; k++)
for (int j = 0; j < NY; j++)
for (int i = 0; i < NX; i++) {
//cout << k*NY*NX + j*NX + i << endl;
cpu_A[k*NY*NX+j*NX+i] = 1.0;
cpu_B[k*NY*NX+j*NX+i] = 1.0;
result_A[k*NY*NX+j*NX+i] = 1.0;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float elapsed_time;
float elapsed_timecopy;
double flops;
int index = 0;
int partsize;
cout << "start gpu computing..." << endl;
for (int i = 0; i < num; ++i) {
REAL *dev_A, *dev_B;
if (i == 0) {
partsize = partsize1+NX*NY;
NZ_ = p1+1;
} else if (i < num-1) {
partsize = partsize1+2*NX*NY;
NZ_ = p1+2;
} else {
partsize = partsize2+NX*NY;
NZ_ = p2+1;
}
if (num == 1) {
partsize = NX*NY*NZ;
NZ_ = NZ;
}
cudaMalloc(&dev_A, sizeof(REAL)*partsize);
cudaMalloc(&dev_B, sizeof(REAL)*partsize);
// cudaEvent_t startcopy,stopcopy;
// cudaEventCreate(&startcopy);
// cudaEventCreate(&stopcopy);
// cudaEventRecord(startcopy, 0);
cudaMemcpy(dev_A, host_A+index, sizeof(REAL)*partsize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, host_B+index, sizeof(REAL)*partsize, cudaMemcpyHostToDevice);
// cudaEventRecord(stopcopy,0);
// cudaEventSynchronize(stopcopy);
// cudaEventElapsedTime(&elapsed_timecopy, startcopy, stopcopy);
dim3 threadPerBlock(BX, BY, BZ); //128,1,1
dim3 blockPerGrid((NX+BX-1)/BX, (NY+BY-1)/BY, GZ); //512/128,512/1,1 = 4,512,1
///////////////////////////////////////////////////////////////
//baseline
for (int t = 0; t < T; t++){
roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ_);
REAL* tmp = dev_A;
dev_A = dev_B;
dev_B = tmp;
}
///////////////////////////////////////////////////////////////
if (cudaGetLastError() != cudaSuccess)
printf("cudawrong!!!\n");
cudaMemcpy(host_A+index, dev_A, sizeof(REAL)*partsize, cudaMemcpyDeviceToHost);
index += partsize;
cudaFree(dev_A);
cudaFree(dev_B);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
struct timeval t1, t2;
gettimeofday(&t1, NULL);
// stencil(cpu_A, cpu_B, NX, NY, NZ, T);
gettimeofday(&t2, NULL);
float cpubaseline_time = (t2.tv_sec-t1.tv_sec)*1e3 + (t2.tv_usec-t1.tv_usec)*1e-3;
cout << "CPU time:" << cpubaseline_time/T << " ms" << endl;
/*
if (num == 1) {
check(cpu_A, host_A, NX, NY, NZ);
} else {
int index=0, partsize=0, idx=0;
for (int i = 0; i < num; ++i) {
if (i < num-1) partsize = partsize1;
else partsize = partsize2;
for (int j = 0; j < partsize; ++j) {
result_A[idx] = host_A[index+j];
idx++;
}
index += partsize+2*NX*NY;
}
check(cpu_A, result_A, NX, NY, NZ);
}*/
//printf("baseline: Gflops = %lf\n", flops);
printf("baseline: elapsed time = %f ms\n", elapsed_time);
// printf("baseline: elapsed timecopy = %f ms\n", elapsed_timecopy*num);
flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6;
flops /= elapsed_time;
/*
///////////////////////////////////////////////////////////////
//baseopt
cudaEventRecord(start, 0);
for (int t = 0; t < T; t++){
baseopt<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ);
REAL* tmp = dev_A;
dev_A = dev_B;
dev_B = tmp;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
if (cudaGetLastError() != cudaSuccess)
printf("baseopt: wrong!!!\n");
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("baseopt: elapsed time = %f ms\n", elapsed_time/T);
flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6;
flops /= elapsed_time;
//printf("baseopt: Gflops = %lf\n", flops);
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
//read-only data cache
cudaEventRecord(start, 0);
for (int t = 0; t < T; t++){
roc<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ);
REAL* tmp = dev_A;
dev_A = dev_B;
dev_B = tmp;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
if (cudaGetLastError() != cudaSuccess)
printf("read-only data cache: wrong!!!\n");
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("read-only data cache: elapsed time = %f ms\n", elapsed_time);
flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6;
flops /= elapsed_time;
//printf("read-only data cache: Gflops = %lf\n", flops);
///////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////
//share memory raw
cudaEventRecord(start, 0);
for (int t = 0; t < T; t++){
shm_raw<<<blockPerGrid, threadPerBlock>>>(dev_A, dev_B, NX, NY, NZ);
REAL* tmp = dev_A;
dev_A = dev_B;
dev_B = tmp;
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
if (cudaGetLastError() != cudaSuccess)
printf("share memory raw: wrong!!!\n");
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("share memory raw: elapsed time = %f ms\n", elapsed_time/T);
flops = 1.0*13*(NX-2)*(NY-2)*(NZ-2)*T/1.e+6;
flops /= elapsed_time;
//printf("share memory raw: Gflops = %lf\n", flops);
///////////////////////////////////////////////////////////////
cudaEventDestroy(start);
cudaEventDestroy(stop);
*/
return 0;
}
|
11,091 | #include "includes.h"
#define SIZ 20
#define num_inp 4
using namespace std;
typedef struct edge {
int first, second;
} edges;
__global__ void x_batch_kernel(double* X_batch, double * X, int * sample_indices, int size)
{
int i = blockIdx.x;
int j = threadIdx.x;
X_batch[i*size + j] = X[sample_indices[i] * size + j];
} |
11,092 | /* Howto run ?
If executable name is 'reduce' then type the following in command terminal
./reduce 0 for global reduce
./reduce for shared memory reduce
*/
/* For global access, reduce run time - 0.911 ms approx
For shared memory access, reduce run time - 0.771 ms approx
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
/*
cuda.h - defines the public host functions and types for the CUDA driver API.
cuda_runtime_api.h - defines the public host functions and types for the CUDA
runtime API
cuda_runtime.h - defines everything cuda_runtime_api.h does, as well as built-in
type definitions and function overlays for the CUDA language extensions and
device intrinsic functions.*/
/*All info about the .h files is from stach overflow*/
/*If you were writing host code to be compiled with the host compiler which
* includes API calls, you would include either cuda.h or cuda_runtime_api.h.
* If you needed other CUDA language built-ins, like types, and were using the
* runtime API and compiling with the host compiler, you would include
* cuda_runtime.h
*/
__global__ void global_reduce_kernel(float * d_out, float * d_in){
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are
//done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t,
// shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are
//done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
void reduce(float * d_out, float * d_intermediate, float * d_in, int size, bool usesSharedMemory)
{
// assumes that size is not greater than maxThreadsPerBlock^2
// and that size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
int blocks = size / maxThreadsPerBlock;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads *sizeof(float)>>>(d_intermediate, d_in);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_intermediate, d_in);
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
// shmem_reduce_kernel<<<blocks, threads, threads *sizeof(float)>>>(d_out, d_intermediate);
global_reduce_kernel<<<blocks, threads>>>
(d_out, d_intermediate);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_out, d_intermediate);
}
}
int main(int argc, char **argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
/*cudaGetDeviceCount ( int* count )
PARAMETERS
count
RETURNS
cudaSuccess, cudaErrorNoDevice, cudaErrorInsufficientDriver
DESCRIPTION
Returns in *count the number of devices with compute capability greater or equal to 2.0 that are available for execution. If there is no such device then
cudaGetDeviceCount() will return cudaErrorNoDevice. If no driver can be loaded to determine if any such devices exist then cudaGetDeviceCount() will return
cudaErrorInsufficientDriver.*/
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
/*cudaError_t cudaSetDevice ( int device )
PARAMETERS
device
- Device on which the active host thread should execute the device code.
RETURNS
cudaSuccess, cudaErrorInvalidDevice, cudaErrorDeviceAlreadyInUse
DESCRIPTION
Sets device as the current device for the calling host thread. Valid device id's are 0 to (cudaGetDeviceCount() - 1).
Any device memory subsequently allocated from this host thread using cudaMalloc(), cudaMallocPitch() or cudaMallocArray() will be physically resident on
device. Any host memory allocated from this host thread using cudaMallocHost() or cudaHostAlloc() or cudaHostRegister() will have its lifetime associated with
device. Any streams or events created from this host thread will be associated with device. Any kernels launched from this host thread using the <<<>>>
operator or cudaLaunchKernel() will be executed on device.
This call may be made from any host thread, to any device, and at any time. This function will do no synchronization with the previous or new device, and
should be considered a very low overhead call.
*/
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
const int ARRAY_SIZE = 1 << 20;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float sum = 0.0f;
/* for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [-1.0f, 1.0f]
h_in[i] = -1.0f + (float)random()/((float)RAND_MAX/2.0f);
sum += h_in[i];
}*/
//Additional part for input array on host - used for teesting correctness added by - Kshitij
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate non random float
h_in[i] = 1.0f;
sum += h_in[i];
}
// declare GPU memory pointers
float * d_in, * d_intermediate, * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); //overallocated
cudaMalloc((void **) &d_out, sizeof(float));
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
int whichKernel = 0;
if (argc == 2) {
whichKernel = atoi(argv[1]);
//whichKernel = argv[1]; - incorrect. Explanation below.
}
/*atoi example in this directory only
Run and see for understanding it
Hence, when running reduce.cu we use ./reduce 0 or ./reduce 1
*/
/*Why use the atoi ?
When we receive argumnets from command line and stored in argv, they are stored as strings. Basically, argv is an array of strings.
Now, while checking for shared memory or global memory usage, we use switch vase statements with number (int) - whichKernel.
So,whichKernel = argv[1] won't work here as RHS is a string and LHS is an int.
We need whichKernel to be int as used in the switch case statements.
Hence, convert the received number - 0 or 1 which is stored as a string to an int and then use it for comparison ini switch case.
*/
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running global reduce\n");
cudaEventRecord(start, 0);
/*for (int i = 0; i < 100; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false);
}*/
//Addition by Kshitij - single call instead of 100 calls of the kernel
reduce(d_out, d_intermediate,d_in,ARRAY_SIZE,false);
cudaEventRecord(stop, 0);
break;
case 1:
printf("Running reduce with shared mem\n");
cudaEventRecord(start, 0);
/*for (int i = 0; i < 100; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true);
}*/
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true);
cudaEventRecord(stop, 0);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f; // 100 trials
// copy back the sum from GPU
float h_out;
cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("average time elapsed: %f\n", elapsedTime);
//Additional part - Kshitij
printf("Reduce answer :%f",h_out);
printf("Sum answer on host :%f",sum);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
return 0;
}
|
11,093 | #include <stdio.h>
// This is a kernel that does no real work but runs at least for a specified number of clocks
__global__ void clock_block(clock_t *d_o, clock_t clock_count)
{
unsigned int start_clock = (unsigned int) clock();
clock_t clock_offset = 0;
while (clock_offset < clock_count)
{
unsigned int end_clock = (unsigned int) clock();
// The code below should work like
// this (thanks to modular arithmetics):
//
// clock_offset = (clock_t) (end_clock > start_clock ?
// end_clock - start_clock :
// end_clock + (0xffffffffu - start_clock));
//
// Indeed, let m = 2^32 then
// end - start = end + m - start (mod m).
clock_offset = (clock_t)(end_clock - start_clock);
}
d_o[0] = clock_offset;
printf("End Clock Block\n");
}
|
11,094 | #include<stdio.h>
#include<cuda.h>
#define N 8000
__global__ void fun(int *a, int aLen)
{
unsigned int id = threadIdx.x;
if(id < aLen) a[id] = 0;
}
__global__ void add(int *a, int aLen)
{
unsigned int id = threadIdx.x;
if(id < aLen) a[id] += id;
}
int main()
{
int *da, i = 0;
cudaMalloc(&da, sizeof(int) * N);
fun<<<1, N>>>(da, N);
add<<<1, N>>>(da, N);
int a[N];
cudaMemcpy(a, da, sizeof(int) * N, cudaMemcpyDeviceToHost);
for(i = 0; i < N; i++)
{
printf("%d ", a[i]);
}
return 0;
} |
11,095 | //parallel program
#include <iostream>
#include <fstream>
#include <string>
#include <cstdlib>
#include <ctime>
#include <vector>
#include <algorithm>
#include <curand_kernel.h>
#include <stdio.h>
#define K 401296
#define max(a, b) (((a) > (b)) ? (a) : (b))
#define min(a, b) (((a) < (b)) ? (a) : (b))
#define NT 4
#define PS 20
#define PS2 10
#define Y 365
#define EP 15
#define CHECK(res) if(res!=cudaSuccess){printf("Error:%d\n", __LINE__);exit(-1);}
__device__ double LIMIT(double min, double max, double X)
{
if (min>X)
return min;
if (max<X)
return max;
return X;
}
__device__ double AFGEN(double *x, int n, double X)
{
double Y1 = 0;
if (X <= x[0])
Y1 = x[1];
if (X >= x[n - 2])
Y1 = x[n - 1];
for (int i = 2; i<n - 1;)
{
if ((x[i] >= X) && (X >= x[i - 2]))
{
double slope = (x[i + 1] - x[i - 1]) / (x[i] - x[i - 2]);
Y1 = x[i - 1] + (X - x[i - 2])*slope;
}
i += 2;
}
return Y1;
}
__device__ double sum(double *f, int k)
{
double sum = 0;
for (int i = 0; i<k; i++)
{
if (f[i]>0)
sum = sum + f[i];
}
return sum;
}
__device__ double max2(double *f, int k)
{
double max2 = 0;
for (int i = 0; i<k - 1; i++)
{
if (f[i + 1]>f[i])
max2 = f[i + 1];
}
return max2;
}
//WOFOST model
__device__ void LAIcal(double SPAN, double *param, double *TMIN, double *TMAX, double *AVRAD, double *JCLAImoni)
{
double IDEM = param[0];
int IDAY = 0;
int DELT = 1;
double DTSMTB[] = { 0.00, 0.00, 10.00, 0.00, 30.00, 20.00, 40.00, 30.00 };
int ILDTSM = sizeof(DTSMTB) / sizeof(double);
double TSUM1 = 1800;
double TSUM2 = 620;
double DVSI = 0.250;
double DVSEND = 2.00;
double TDWI = 65.00;
double RGRLAI = 0.0070;
double SLATB[] = { 0.00, 0.0045, 0.16, 0.0033, 0.61, 0.0030, 0.80, 0.0029, 1.00, 0.0025, 1.55, 0.0024, 2.02, 0.0018 };
int ILSLA = sizeof(SLATB) / sizeof(double);
double LAIEM = 0.10;
double TBASE = 15.0;
double KDIFTB[] = { 0.00, 0.40, 0.65, 0.40, 1.00, 0.60, 2.00, 0.60 };
int ILKDIF = sizeof(KDIFTB) / sizeof(double);
double EFFTB[] = { 10, 0.54, 40, 0.36 };
int ILEFF = sizeof(EFFTB) / sizeof(double);
double AMAXTB[] = { 0.00, 40.00, 1.00, 40, 1.90, 40, 2.00, 40.00 };
int ILAMAX = sizeof(AMAXTB) / sizeof(double);
double TMPFTB[] = { 0.00, 0.00, 12.00, 0.69, 18.00, 0.85, 24.00, 1.00, 30.00, 1.00, 36.00, 0.87, 42.00, 0.27 };
int ILTMPF = sizeof(TMPFTB) / sizeof(double);
double TMNFTB[] = { 0.00, 0.00, 3.00, 1.00 };
int ILTMNF = sizeof(TMNFTB) / sizeof(double);
double CVL = 0.754;
double CVO = 0.684;
double CVR = 0.754;
double CVS = 0.754;
double Q10 = 2.0;
double RML = 0.0200;
double RMO = 0.0030;
double RMR = 0.0100;
double RMS = 0.0150;
double RFSETB[] = { 0.00, 1.00, 2.00, 1.00 };
int ILRFSE = sizeof(RFSETB) / sizeof(double);
double FLTB[] = { 0.00, 0.65, 0.31, 0.60, 0.53, 0.57, 0.80, 0.35, 0.94, 0.14, 1.00, 0.10, 1.2, 0.00, 2.10, 0.00 };
int ILFL = sizeof(FLTB) / sizeof(double);
double FRTB[] = { 0.00, 0.50, 0.43, 0.45, 0.65, 0.40, 0.80, 0.37, 0.85, 0.27, 0.99, 0.10, 1.00, 0.00, 2.00, 0.00 };
int ILFR = sizeof(FRTB) / sizeof(double);
double FSTB[] = { 0.00, 0.35, 0.31, 0.40, 0.53, 0.43, 0.80, 0.637, 0.94, 0.553, 1.00, 0.10, 1.20, 0.00, 2.10, 0.00 };
int ILFS = sizeof(FSTB) / sizeof(double);
double FOTB[] = { 0.00, 0.00, 0.50, 0.00, 0.80, 0.013, 0.94, 0.316, 1.00, 0.80, 1.20, 1.000, 1.50, 1.000, 2.00, 1.00 };
int ILFO = sizeof(FOTB) / sizeof(double);
double RDRRTB[] = { 0.00, 0.000, 1.50, 0.000, 1.5001, 0.020, 2.00, 0.020 };
int ILRDRR = sizeof(RDRRTB) / sizeof(double);
double RDRSTB[] = { 0.00, 0.000, 1.50, 0.000, 1.5001, 0.020, 2.00, 0.020 };
int ILRDRS = sizeof(RDRSTB) / sizeof(double);
double IDANTH = -99;
double DVS = DVSI;
double TSUM = 0;
double FR = AFGEN(FRTB, ILFR, DVS);
double FL = AFGEN(FLTB, ILFL, DVS);
double FS = AFGEN(FSTB, ILFS, DVS);
double FO = AFGEN(FOTB, ILFO, DVS);
double SLA[Y]; SLA[0] = AFGEN(SLATB, ILSLA, DVS);
double LVAGE[Y]; LVAGE[0] = 0;
double ILVOLD = 1;
double WRT = FR*TDWI;
double TADW = (1 - FR)*TDWI;
double WST = FS*TADW;
double WSO = FO*TADW;
double WLV = FL*TADW;
LAIEM = WLV * SLA[0];
double LV[Y]; LV[0] = WLV;
double LASUM = LAIEM;
double LAIEXP = LAIEM;
double LAIMAX = LAIEM;
double LAI[Y]; LAI[int(IDEM) - 1] = LASUM;
double TMINRA = 0;
double TMNSAV[7];
for (int i1 = 0; i1<7; i1++)
{
TMNSAV[i1] = -99;
}
double TEMP;
double DTEMP;
double GASS;
double MRES;
double DMI;
double DTSUM;
double DVR;
double AMAX;
double KDIF;
double EFF;
double XGAUSS[3] = { 0.1127017, 0.5000000, 0.8872983 };
double WGAUSS[3] = { 0.2777778, 0.4444444, 0.2777778 };
double PI = 3.1415926;
double DALV;
for (IDAY = IDEM - 1; IDAY < Y; IDAY++)
{
TEMP = (TMIN[IDAY] + TMAX[IDAY]) / 2;
DTEMP = (TMAX[IDAY] + TEMP) / 2;
for (int i2 = 0; i2 < 6; i2++)
TMNSAV[i2] = TMNSAV[i2 + 1];
TMNSAV[6] = TMIN[IDAY];
TMINRA = 0;
int I4 = 0;
for (int i3 = 0; i3 < 7; i3++)
{
if (TMNSAV[i3] != -99)
{
TMINRA = TMINRA + TMNSAV[i3];
I4 = I4 + 1;
}
}
TMINRA = TMINRA / I4;
DTSUM = AFGEN(DTSMTB, ILDTSM, TEMP);
if (DVS < 1)
DVR = DTSUM / TSUM1;
else
DVR = DTSUM / TSUM2;
AMAX = AFGEN(AMAXTB, ILAMAX, DVS);
AMAX = AMAX * AFGEN(TMPFTB, ILTMPF, DTEMP);
KDIF = AFGEN(KDIFTB, ILKDIF, DVS);
EFF = AFGEN(EFFTB, ILEFF, DTEMP);
double DTGA = 0;
double DAYL;
for (int i5 = 0; i5 < 3; i5++)
{
double DEC = -asin(sin(23.45*0.0174533)*cos(2 * PI*(IDAY + 1 + 10) / Y));
double SINLD = sin(0.017453292*43.85)*sin(DEC);
double COSLD = cos(0.017453292*43.85)*cos(DEC);
double AOB = SINLD / COSLD;
DAYL = 12 * (1 + 2 * asin(AOB) / PI);
double HOUR = 12 + 0.5*DAYL*XGAUSS[i5];
double HOUR2 = SINLD + COSLD*cos(2 * PI*(HOUR + 12) / 24);
double SINB = max(0, HOUR2);
double DSINBE = 3600 * (DAYL*(SINLD + 0.4*(SINLD*SINLD + COSLD*COSLD*0.5)) + 12 * COSLD*(2 + 3 * 0.4*SINLD)*sqrt(1 - AOB*AOB) / PI);
double PAR = 0.5*AVRAD[IDAY] * SINB*(1 + 0.4*SINB) / DSINBE;
double SC = 1370 * (1 + 0.033*cos(2 * PI*(IDAY + 1) / Y));
double DSINB = 3600 * (DAYL*SINLD + 24 * COSLD*sqrt(1 - AOB*AOB) / PI);
double ANGOT = SC*DSINB;
double ATMTR = AVRAD[IDAY] / ANGOT;
double FRDIF;
if (ATMTR > 0.75)
{
FRDIF = 0.23;
}
else
{
if (0.35 < ATMTR&ATMTR <= 0.75)
{
FRDIF = 1.33 - 1.46*ATMTR;
}
else
{
if (0.07 < ATMTR&ATMTR <= 0.35)
{
FRDIF = 1 - 2.3*(ATMTR - 0.07)*(ATMTR - 0.07);
}
else
FRDIF = 1;
}
}
double DIFPP = FRDIF*ATMTR*0.5*SC;
double PARDIF = min(PAR, SINB*DIFPP);
double PARDIR = PAR - PARDIF;
double SCV = 0.2;
double REFH = (1 - sqrt(1 - SCV)) / (1 + sqrt(1 - SCV));
double REFS = REFH * 2 / (1 + 1.6*SINB);
double KDIRBL = (0.5 / SINB)*KDIF / (0.8*sqrt(1 - SCV));
double KDIRT = KDIRBL*sqrt(1 - SCV);
double FGROS = 0;
for (int i6 = 0; i6 < 3; i6++)
{
double LAIC = LAI[IDAY] * XGAUSS[i6];
double VISDF = (1 - REFS)*PARDIF*KDIF*exp(-KDIF*LAIC);
double VIST = (1 - REFS)*PARDIR*KDIRT*exp(-KDIRT*LAIC);
double VISD = (1 - SCV)*PARDIR*KDIRBL*exp(-KDIRBL*LAIC);
double VISSHD = VISDF + VIST - VISD;
double FGRSH = AMAX*(1 - exp(-VISSHD*EFF / max(2.0, AMAX)));
double VISPP = (1 - SCV)*PARDIR / SINB;
double FGRSUN;
if (VISPP <= 0)
FGRSUN = FGRSH;
else
FGRSUN = AMAX*(1 - (AMAX - FGRSH)*(1 - exp(-VISPP*EFF / max(2.0, AMAX))) / (EFF*VISPP));
double FSLLA = exp(-KDIRBL*LAIC);
double FGL = FSLLA*FGRSUN + (1 - FSLLA)*FGRSH;
FGROS = FGROS + FGL*WGAUSS[i6];
}
FGROS = FGROS*LAI[IDAY];
DTGA = DTGA + FGROS*WGAUSS[i5];
}
DTGA = DTGA*DAYL;
DTGA = DTGA * AFGEN(TMNFTB, ILTMNF, TMINRA);
GASS = DTGA * 30 / 44;
double RMRES = (RMR*WRT + RML*WLV + RMS*WST + RMO*WSO)* AFGEN(RFSETB, ILRFSE, DVS);
double TEFF = pow(Q10, (TEMP - 25) / 10);
MRES = min(GASS, RMRES*TEFF);
double ASRC = GASS - MRES;
FR = AFGEN(FRTB, ILFR, DVS);
FL = AFGEN(FLTB, ILFL, DVS);
FS = AFGEN(FSTB, ILFS, DVS);
FO = AFGEN(FOTB, ILFO, DVS);
double CVF = 1 / ((FL / CVL + FS / CVS + FO / CVO)*(1 - FR) + FR / CVR);
DMI = CVF*ASRC;
double GRRT = FR*DMI;
double DRRT = WRT * AFGEN(RDRRTB, ILRDRR, DVS);
double GWRT = GRRT - DRRT;
double ADMI = (1 - FR)*DMI;
double GRST = FS*ADMI;
double DRST = AFGEN(RDRSTB, ILRDRS, DVS)*WST;
double GWST = GRST - DRST;
double GWSO = FO*ADMI;
double GRLV = FL*ADMI;
double LAICR = 3.2 / KDIF;
double DSLV = WLV * LIMIT(0, 0.03, 0.03*(LAI[IDAY] - LAICR) / LAICR);
int I7 = ILVOLD - 1;
while (DSLV > LV[I7] & I7 >= 0)
{
DSLV = DSLV - LV[I7];
I7 = I7 - 1;
}
DALV = 0;
if (LVAGE[I7] > SPAN & DSLV > 0 & I7 >= 0)
{
DALV = LV[I7] - DSLV;
DSLV = 0;
I7 = I7 - 1;
}
while (I7 >= 0 & LVAGE[I7] > SPAN)
{
DALV = DALV + LV[I7];
I7 = I7 - 1;
}
DALV = DALV / DELT;
double FYSDEL = max(0, (TEMP - TBASE) / (35 - TBASE));
double SLAT = AFGEN(SLATB, ILSLA, DVS);
double DTEFF;
double GLAIEX;
double GLASOL;
double GLA;
if (LAIEXP<6)
{
DTEFF = max(0, TEMP - TBASE);
GLAIEX = LAIEXP*RGRLAI*DTEFF;
GLASOL = GRLV*SLAT;
GLA = min(GLAIEX, GLASOL);
if (GRLV>0)
SLAT = GLA / GRLV;
}
DVS = DVS + DVR*DELT;
TSUM = TSUM + DTSUM*DELT;
if (DVS >= 1 & IDANTH == -99)
{
IDANTH = IDAY - IDEM;
DVS = 1;
}
double DSLVT = DSLV*DELT;
int I8 = ILVOLD - 1;
while (DSLVT > 0 & I8 >= 0)
{
if (DSLVT >= LV[I8])
{
DSLVT = DSLVT - LV[I8];
LV[I8] = 0;
I8 = I8 - 1;
}
else
{
LV[I8] = LV[I8] - DSLVT;
DSLVT = 0;
}
}
while (LVAGE[I8] >= SPAN&I8 >= 0)
{
LV[I8] = 0;
I8 = I8 - 1;
}
ILVOLD = I8 + 1;
int I9;
for (I9 = ILVOLD - 1; I9 > -1; I9--)
{
LV[I9 + 1] = LV[I9];
SLA[I9 + 1] = SLA[I9];
LVAGE[I9 + 1] = LVAGE[I9] + FYSDEL*DELT;
}
ILVOLD = ILVOLD + 1;
LV[0] = GRLV*DELT;
SLA[0] = SLAT;
LVAGE[0] = 0;
LASUM = 0;
double tt = 0;
int I10;
for (I10 = 0; I10 < ILVOLD; I10++)
tt = tt + LV[I10] * SLA[I10];
LASUM = tt;
WLV = sum(LV, Y);
LAIEXP = LAIEXP + GLAIEX*DELT;
WRT = WRT + GWRT*DELT;
WST = WST + GWST*DELT;
WSO = WSO + GWSO*DELT;
TADW = WLV + WST + WSO;
LAI[IDAY + 1] = LASUM;
LAIMAX = max(LAI[IDAY + 1], LAIMAX);
if (ILVOLD > 364)
break;
if (DVS >= DVSEND)
break;
LAIMAX = max2(LAI, Y);
if (LAIMAX <= 0.002 & DVS > 0.5)
break;
}
for (int i = 0; i < NT; i++)
{
JCLAImoni[i]=LAI[int(param[i+10])];
}
return;
}
__device__ void min2(double *a, int n, int &min_idx)
{
double min = a[0];
for (int i = 0; i < n; i++)
{
if (min > a[i])
{
min = a[i];
min_idx = i;
}
}
}
//PSO algorithm
__global__ void pso(double *param, double *MLAI, double *TMIN, double *TMAX, double *AVRAD, long rand, double *gbest)
{
curandState state;
int idx = threadIdx.x+blockDim.x*blockIdx.x;
if(idx<K)
{
long seed = rand;
curand_init(seed, idx, 0, &state);
double mv = (param[3] - param[2]) / 2;
double MLAI1[NT];
for (int s = 0; s < NT; s++)
{
MLAI1[s] = MLAI[idx*2 + s];
}
double vel[PS];
double pos[PS];
double pbest[PS2];
double randnum1;
double randnum2;
for (int i = 0; i < PS; i++)
{
randnum1 = abs(curand_uniform_double(&state));
vel[i] = 2*mv*randnum1 - mv;
randnum2 = abs(curand_uniform_double(&state));
pos[i] = (param[3] - param[2])*randnum2 + param[2];
if(i%2==0)
{
pbest[i/2] = pos[i];
}
}
double pbestval[PS2];
double cost;
double JCLAImoni[NT];
for (int i = 0; i < PS; i++)
{
LAIcal(pos[i], param, TMIN, TMAX, AVRAD, JCLAImoni);
if(i%2==0)
{
cost=0;
for (int j = 0; j < NT; j++)
{
cost = cost + (MLAI1[j] - JCLAImoni[j])*(MLAI1[j] - JCLAImoni[j]);
}
pbestval[i/2] = sqrt(cost) / NT;
}
}
int min_idx = 0;
min2(pbestval, PS2, min_idx);
double gbestval = pbestval[min_idx];
gbest[idx] = pbest[min_idx];
double cnt2 = 0;
double iwt[EP] = { param[6] };
double pout[PS2];
double randnum3;
double randnum4;
double tmp1=0;
double tr=0;
for (int j = 0; j < EP; j++)
{
for (int i = 0; i < PS; i++)
{
LAIcal(pos[i], param, TMIN, TMAX, AVRAD, JCLAImoni);
cost = 0;
if(i%2==0)
{
for (int jj = 0; jj < NT; jj++)
{
cost = cost + (MLAI1[jj] - JCLAImoni[jj])*(MLAI1[jj] - JCLAImoni[jj]);
}
pout[i/2] = sqrt(cost) / NT;
}
}
tr = gbestval;
for (int i = 0; i < PS2; i++)
{
if (pbestval[i] >= pout[i])
{
pbestval[i] = pout[i];
pbest[i] = pos[2*i];
}
}
int min_idx2;
min2(pbestval, PS2, min_idx2);
double iterbestval = pbestval[min_idx2];
if (gbestval >= iterbestval)
{
gbestval = iterbestval;
gbest[idx] = pbest[min_idx2];
}
if (j <= EP)
iwt[j] = ((param[7] - param[6]) / 14 )*(j - 1) + param[6];
else
iwt[j] = param[7];
double ac11;
double ac22;
double pbest2[PS];
for (int i = 0; i < PS; i++)
{
if(i%2==0)
{
pbest2[i]=pbest[i];
}
else
{
pbest2[i]=pbest[i-1];
}
randnum3=abs(curand_uniform_double(&state));
randnum4=abs(curand_uniform_double(&state));
ac11 = randnum3 * param[4];
ac22 = randnum4 * param[5];
vel[i] = iwt[i] * vel[i] + ac11 * (pbest2[i] - pos[i]) + ac22 * (gbest[idx] - pos[i]);
vel[i] = LIMIT(-mv, mv, vel[i]);
pos[i] = pos[i] + vel[i];
pos[i] = LIMIT(param[2], param[3], pos[i]);
}
tmp1 = abs(tr - gbestval);
if (tmp1 > param[8])
cnt2 = 0;
else
{
if (tmp1 <= param[8])
{
cnt2 = cnt2 + 1;
if (cnt2 >= param[9])
break;
}
}
}
}
}
using namespace std;
//main function
int main()
{
srand((unsigned int)time(NULL));
const int nop = NT + 10;
double param[nop];
param[0] = 154;
param[1] = Y - param[0] + 1;
param[2] = 10;
param[3] = 50;
param[4] = 2.1;
param[5] = 1.6;
param[6] = 0.9;
param[7] = 0.6;
param[8] = 1e-99;
param[9] = 10;
param[10] = 194;
param[11] = 210;
param[12] = 216;
param[13] = 259;
int i, j;
double** MLAI= new double* [K];
for (i=0;i<K;i++)
{
MLAI[i]=new double [NT];
}
ifstream fin("MLAI_2015.txt");
for (i = 0; i < K; i++)
{
for (j = 0; j<NT; j++)
{
fin >> MLAI[i][j];
}
}
fin.close();
const int totalpix = K*NT;
double *MLAI1=new double [totalpix];
for (int p = 0; p < K; p++)
{
for (int q = 0; q < NT; q++)
{
MLAI1[p*NT+q] = MLAI[p][q];
}
}
double TMIN[Y];
ifstream fin1("TMIN_2015.txt");
for (i = 0; i<Y; i++)
fin1 >> TMIN[i];
fin1.close();
double TMAX[Y];
ifstream fin2("TMAX_2015.txt");
for (i = 0; i<Y; i++)
fin2 >> TMAX[i];
fin2.close();
double AVRAD[Y];
ifstream fin3("AVRAD_2015.txt");
for (i = 0; i<Y; i++)
fin3 >> AVRAD[i];
fin3.close();
for (int i = 0; i < Y; i++)
{
AVRAD[i] = AVRAD[i] * 1000;
}
double *gbest=new double [K];
double *d_param;
double *d_MLAI;
double *d_TMIN;
double *d_TMAX;
double *d_AVRAD;
double *d_gbest;
cudaSetDevice(5);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
cudaError_t cudastate;
cudastate = cudaMalloc((void**)&d_param, sizeof(double)* nop); CHECK(cudastate)
cudastate = cudaMalloc((void**)&d_MLAI, sizeof(double)*totalpix); CHECK(cudastate)
cudastate = cudaMalloc((void**)&d_TMIN, sizeof(double)*Y); CHECK(cudastate)
cudastate = cudaMalloc((void**)&d_TMAX, sizeof(double)*Y); CHECK(cudastate)
cudastate = cudaMalloc((void**)&d_AVRAD, sizeof(double)*Y); CHECK(cudastate)
cudastate = cudaMalloc((void**)&d_gbest, sizeof(double)*K); CHECK(cudastate)
cudaMemcpy(d_param, param, sizeof(double)* nop, cudaMemcpyHostToDevice);
cudaMemcpy(d_MLAI, MLAI1, sizeof(double)*totalpix, cudaMemcpyHostToDevice);
cudaMemcpy(d_TMIN, TMIN, sizeof(double)*Y, cudaMemcpyHostToDevice);
cudaMemcpy(d_TMAX, TMAX, sizeof(double)*Y, cudaMemcpyHostToDevice);
cudaMemcpy(d_AVRAD, AVRAD, sizeof(double)*Y, cudaMemcpyHostToDevice);
for(i=0; i<K; ++i)
{
delete[] MLAI[i];
}
delete[] MLAI;
delete[] MLAI1;
int thread = 256;
int block = K / thread;
dim3 dimGrid(block + 1);
dim3 dimBlock(thread);
pso <<<dimGrid, dimBlock >>>(d_param, d_MLAI, d_TMIN, d_TMAX, d_AVRAD,rand(), d_gbest);
cudastate = cudaDeviceSynchronize(); CHECK(cudastate)
cudaMemcpy(gbest, d_gbest, sizeof(double)*K, cudaMemcpyDeviceToHost);
float GPU_time;
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&GPU_time, start, end);
cout << "parallel time:"<< GPU_time/1000 << endl;
FILE *p = fopen("gbest_2015.txt", "wt");
for (int i = 0; i<K; i++)
fprintf(p, "%4.2f\n", gbest[i]);
fclose(p);
delete[] gbest;
cudastate = cudaFree(d_param); CHECK(cudastate)
cudastate = cudaFree(d_MLAI); CHECK(cudastate)
cudastate = cudaFree(d_TMIN); CHECK(cudastate)
cudastate = cudaFree(d_TMAX); CHECK(cudastate)
cudastate = cudaFree(d_AVRAD); CHECK(cudastate)
cudastate = cudaFree(d_gbest); CHECK(cudastate)
}
|
11,096 | #include "includes.h"
#ifndef __CUDACC__
#define __CUDACC__
#endif
// generate a random square matrix
__global__ void matMulKernel2(float* P, float* M, float* N, int width) {
__shared__ float Mds[2][2];
__shared__ float Nds[2][2];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int row = by*2 + ty; int col = bx*2 + tx;
float pVal = 0;
for(int ph = 0; ph < width/2; ++ph) {
Mds[ty][tx] = M[row*width + ph*2 + tx];
Nds[ty][tx] = N[(ph*2 + ty)*width + col];
__syncthreads();
for(int k = 0; k < 2; ++k)
pVal += Mds[ty][k]*Nds[k][tx];
__syncthreads();
}
P[row*width + col] = pVal;
} |
11,097 |
#ifndef block_size_x
#define block_size_x 128
#endif
#ifndef threshold
#define threshold 3
#endif
/*
* Helper function that does the reduction step of the algorithm
*
* This function reduces the values in a thread block to a single value
*
*/
__device__ __forceinline__ void reduce_min_num(int *sh_min, int *sh_sum, int lmin, int lnum, int ti) {
sh_min[ti] = lmin;
sh_sum[ti] = lnum;
__syncthreads();
#pragma unroll
for (unsigned int s=block_size_x/2; s>0; s>>=1) {
if (ti < s) {
int self = sh_min[ti];
int other = sh_min[ti+s];
if (other >= threshold) {
if (self < threshold || other < self) {
sh_min[ti] = other;
}
}
sh_sum[ti] += sh_sum[ti + s];
}
__syncthreads();
}
}
/*
* This kernel recomputes the degree of each node in the graph and computes
* the minimum degree of all nodes with at least one edge.
*
* output arguments:
*
* minimum an array containing a per-thread block minimum degree
*
* num_nodes will contain the per-thread block sum of the number of nodes
* with at least 'threshold' edges
*
* degrees contains the degree of all nodes and is updated with the current degree
*
* input arguments:
*
* row_idx is the index of the node
* col_idx is the index of the node to which this node has an edge, this index
* can be -1 if the edge has been removed
*
* prefix_sum contains the start index of each row, because elements can be removed
* this subtracting two consecutive numbers no longer indicates the degree
*
* n is the number of nodes in the graph
*/
__global__ void minimum_degree(int *minimum, int *num_nodes, int *degrees, int *row_idx, int *col_idx, int *prefix_sum, int n) {
int ti = threadIdx.x;
int i = blockIdx.x * block_size_x + ti;
int degree = 0;
if (i<n) {
//obtain indices for reading col_idx
int start = 0;
if (i>0) {
start = prefix_sum[i-1];
}
int end = prefix_sum[i];
int max_degree = degrees[i];
//get the degree of this node
for (int k=start; k<end && degree < max_degree; k++) {
if (col_idx[k] != -1) {
degree++;
}
}
//update degrees array
degrees[i] = degree;
}
//start the reduce
//get the minimum value larger than 0
//and the total number of nodes with degree >= theshold (at least 'threshold' edges)
__shared__ int sh_min[block_size_x];
__shared__ int sh_sum[block_size_x];
int lnum = 0;
if (degree >= threshold) {
lnum = 1;
}
reduce_min_num(sh_min, sh_sum, degree, lnum, ti);
//write output
if (ti == 0) {
minimum[blockIdx.x] = sh_min[0];
num_nodes[blockIdx.x] = sh_sum[0];
}
}
/*
* Helper kernel to combine per-thread block results into single values
*
* call with 1 thread block, block_size_x should be sufficiently large
*/
__global__ void combine_blocked_min_num(int *minimum, int *num_nodes, int n) {
int ti = threadIdx.x;
__shared__ int sh_min[block_size_x];
__shared__ int sh_sum[block_size_x];
int lmin = 0;
int lnum = 0;
if (ti < n) {
lmin = minimum[ti];
lnum = num_nodes[ti];
}
reduce_min_num(sh_min, sh_sum, lmin, lnum, ti);
if (ti==0) {
minimum[0] = sh_min[0];
num_nodes[0] = sh_sum[0];
}
}
|
11,098 | /*
* Sample program that uses CUDA to perform element-wise add of two
* vectors. Each element is the responsibility of a separate thread.
*
* compile with:
* nvcc -o addVectors addVectors.cu
* run with:
* ./addVectors
*/
#include <stdio.h>
//problem size (vector length):
#define N 1000000
__global__ void kernel(int* res, int* a, int* b) {
//function that runs on GPU to do the addition
//sets res[i] = a[i] + b[i]; each thread is responsible for one value of i
int thread_id = threadIdx.x + blockIdx.x*blockDim.x;
if(thread_id < N) {
// res[thread_id] = a[thread_id] + b[thread_id];
res[thread_id] = thread_id + thread_id;
}
}
void check(cudaError_t retVal) {
//takes return value of a CUDA function and checks if it was an error
if(retVal != cudaSuccess) {
fprintf(stderr, "ERROR: %s\n", cudaGetErrorString(retVal));
exit(1);
}
}
int main() {
int* a; //input arrays (on host)
int* b;
int* res; //output array (on host)
int* a_dev; //input arrays (on GPU)
int* b_dev;
int* res_dev; //output array (on GPU)
//allocate memory
a = (int*) malloc(N*sizeof(int));
b = (int*) malloc(N*sizeof(int));
res = (int*) malloc(N*sizeof(int));
check(cudaMalloc((void**) &a_dev, N*sizeof(int)));
check(cudaMalloc((void**) &b_dev, N*sizeof(int)));
check(cudaMalloc((void**) &res_dev, N*sizeof(int)));
//set up contents of a and b
for(int i=0; i<N; i++)
a[i] = b[i] = i;
//allocate timers
cudaEvent_t start;
check(cudaEventCreate(&start));
cudaEvent_t stop;
check(cudaEventCreate(&stop));
//start timer
check(cudaEventRecord(start,0));
//transfer a and b to the GPU
// check(cudaMemcpy(a_dev, a, N*sizeof(int), cudaMemcpyHostToDevice));
// check(cudaMemcpy(b_dev, b, N*sizeof(int), cudaMemcpyHostToDevice));
//call the kernel
int threads = 512; //# threads per block
int blocks = (N+threads-1)/threads; //# blocks (N/threads rounded up)
kernel<<<blocks,threads>>>(res_dev, a_dev, b_dev);
//transfer res to the host
check(cudaMemcpy(res, res_dev, N*sizeof(int), cudaMemcpyDeviceToHost));
//stop timer and print time
check(cudaEventRecord(stop,0));
check(cudaEventSynchronize(stop));
float diff;
check(cudaEventElapsedTime(&diff, start, stop));
printf("time: %f ms\n", diff);
//deallocate timers
check(cudaEventDestroy(start));
check(cudaEventDestroy(stop));
/*
//verify results
for(int i=0; i<N; i++)
if(a[i] + b[i] != res[i])
printf("%d ", res[i]);
printf("\n");
*/
//free the memory
free(a);
free(b);
free(res);
check(cudaFree(a_dev));
check(cudaFree(b_dev));
check(cudaFree(res_dev));
}
|
11,099 | #include "includes.h"
__global__ void add(int *a, int *b, int *c,int size) {
c[size*blockIdx.x+ threadIdx.x] = a[size*blockIdx.x+ threadIdx.x] + b[size*blockIdx.x+ threadIdx.x];
} |
11,100 | #include "includes.h"
/*
* JCuda - Java bindings for NVIDIA CUDA driver and runtime API
* http://www.jcuda.org
*
*
* This code is based on the NVIDIA 'reduction' CUDA sample,
* Copyright 1993-2010 NVIDIA Corporation.
*/
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
__global__ void normalize(double *g_idata, double *g_odata, unsigned int n, int maxIndx)
{
double max = g_idata[maxIndx];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
g_odata[i] = exp(g_idata[i] - max);
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.