serial_no
int64
1
24.2k
cuda_source
stringlengths
11
9.01M
12,801
#include <stdio.h> #include <cuda.h> #include <stdlib.h> #include <curand.h> #include <curand_kernel.h> #define Nblock 1000 #define Nthread 1000 #define Ngrid 1 __global__ void setup(curandState *state){ int index = blockIdx.x * blockDim.x + threadIdx.x; curand_init(9999, index, 0, &state[index]); } __global__ void piEs(double *sum , int iteration, curandState *state) { int index = blockIdx.x * blockDim.x + threadIdx.x; int i; int count = 0; for(i=0; i < iteration ;i++){ double x = curand_uniform_double(&state[index]); double y = curand_uniform_double(&state[index]); if(x*x + y*y <= 1.0) count++; } sum[index] = 4.0*count / iteration; } int main(void){ // dim3 dimGrid(Nblock,1,1); // Grid dimensions // dim3 dimBlock(Nthread,1,1); // Block dimensions cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int i; int Athread = Nblock * Nthread; int iter = 1000000; double pi = 0.0; double *piSumHost = (double *)malloc(Athread * sizeof(double)); double *piSumDev; cudaMalloc((void**)&piSumDev, Athread); cudaMemset(piSumDev, 0, Athread); curandState *stateDev; cudaMalloc((void **)&stateDev, Athread); cudaEventRecord(start); setup <<<Ngrid, Nblock, Nthread>>> (stateDev); piEs <<<Ngrid, Nblock, Nthread>>> (piSumDev, iter, stateDev); cudaEventRecord(stop); cudaMemcpy(piSumHost, piSumDev, Athread, cudaMemcpyDeviceToHost); float runningTime = 0; cudaEventElapsedTime(&runningTime, start, stop); printf("CUDA done, took %f ms\n", runningTime); for(i = 0; i<Athread; i++){ pi += piSumHost[i]; } pi /= Nblock; printf("Pi estimate = %.10lf\n", pi); free(piSumHost); cudaFree(piSumDev); return 0; }
12,802
extern "C" { typedef struct { float e[4]; } array_11173; __device__ inline int threadIdx_x() { return threadIdx.x; } __device__ inline int threadIdx_y() { return threadIdx.y; } __device__ inline int threadIdx_z() { return threadIdx.z; } __device__ inline int blockIdx_x() { return blockIdx.x; } __device__ inline int blockIdx_y() { return blockIdx.y; } __device__ inline int blockIdx_z() { return blockIdx.z; } __device__ inline int blockDim_x() { return blockDim.x; } __device__ inline int blockDim_y() { return blockDim.y; } __device__ inline int blockDim_z() { return blockDim.z; } __device__ inline int gridDim_x() { return gridDim.x; } __device__ inline int gridDim_y() { return gridDim.y; } __device__ inline int gridDim_z() { return gridDim.z; } __global__ void lambda_40799(float*, float*); __global__ __launch_bounds__ (128 * 1 * 1) void lambda_40799(float* _40802_44104, float* _40803_44105) { int threadIdx_x_44114; int pthreadIdx_x_44114; int blockDim_x_44120; int pblockDim_x_44120; int blockIdx_x_44126; int pblockIdx_x_44126; int threadIdx_y_44132; int pthreadIdx_y_44132; int blockDim_y_44138; int pblockDim_y_44138; int blockIdx_y_44144; int pblockIdx_y_44144; int converge_44153; int pconverge_44153; int converge_44158; int pconverge_44158; float _44170; float p_44170; int converge_44176; int pconverge_44176; int converge_44180; int pconverge_44180; float _44190; float p_44190; int converge_44194; int pconverge_44194; int converge_44198; int pconverge_44198; float _44206; float p_44206; int converge_44211; int pconverge_44211; int converge_44215; int pconverge_44215; float _44223; float p_44223; int lower_44274; int plower_44274; int upper_44275; int pupper_44275; int step_44276; int pstep_44276; array_11173 tmp_44183_slot; array_11173* tmp_44183; tmp_44183 = &tmp_44183_slot; threadIdx_x_44114 = threadIdx_x(); pthreadIdx_x_44114 = threadIdx_x_44114; l44112: ; threadIdx_x_44114 = pthreadIdx_x_44114; blockDim_x_44120 = blockDim_x(); pblockDim_x_44120 = blockDim_x_44120; l44118: ; blockDim_x_44120 = pblockDim_x_44120; blockIdx_x_44126 = blockIdx_x(); pblockIdx_x_44126 = blockIdx_x_44126; l44124: ; blockIdx_x_44126 = pblockIdx_x_44126; threadIdx_y_44132 = threadIdx_y(); pthreadIdx_y_44132 = threadIdx_y_44132; l44130: ; threadIdx_y_44132 = pthreadIdx_y_44132; blockDim_y_44138 = blockDim_y(); pblockDim_y_44138 = blockDim_y_44138; l44136: ; blockDim_y_44138 = pblockDim_y_44138; blockIdx_y_44144 = blockIdx_y(); pblockIdx_y_44144 = blockIdx_y_44144; l44142: ; blockIdx_y_44144 = pblockIdx_y_44144; int _44146; _44146 = blockDim_x_44120 * blockIdx_x_44126; int _44147; _44147 = threadIdx_x_44114 + _44146; int _44148; _44148 = -2 + _44147; bool _44150; _44150 = _44148 < 0; if (_44150) goto l44151; else goto l44367; l44367: ; pconverge_44153 = _44148; goto l44152; l44151: ; pconverge_44153 = 0; goto l44152; l44152: ; converge_44153 = pconverge_44153; bool _44155; _44155 = 4096 <= converge_44153; if (_44155) goto l44156; else goto l44366; l44366: ; pconverge_44158 = converge_44153; goto l44157; l44156: ; pconverge_44158 = 4095; goto l44157; l44157: ; converge_44158 = pconverge_44158; int _44163; _44163 = blockDim_y_44138 * blockIdx_y_44144; int _44164; _44164 = threadIdx_y_44132 + _44163; int _44165; _44165 = 4096 * _44164; int _44166; _44166 = _44165 + converge_44158; float* idx_44167; idx_44167 = _40803_44105 + _44166; _44170 = __ldg(idx_44167); p_44170 = _44170; l44168: ; _44170 = p_44170; int _44172; _44172 = -1 + _44147; bool _44173; _44173 = _44172 < 0; if (_44173) goto l44174; else goto l44365; l44365: ; pconverge_44176 = _44172; goto l44175; l44174: ; pconverge_44176 = 0; goto l44175; l44175: ; converge_44176 = pconverge_44176; bool _44177; _44177 = 4096 <= converge_44176; if (_44177) goto l44178; else goto l44364; l44364: ; pconverge_44180 = converge_44176; goto l44179; l44178: ; pconverge_44180 = 4095; goto l44179; l44179: ; converge_44180 = pconverge_44180; int _44186; _44186 = _44165 + converge_44180; float* idx_44187; idx_44187 = _40803_44105 + _44186; float* _44184; _44184 = &tmp_44183->e[0]; *_44184 = _44170; _44190 = __ldg(idx_44187); p_44190 = _44190; l44188: ; _44190 = p_44190; bool _44191; _44191 = _44147 < 0; if (_44191) goto l44192; else goto l44363; l44363: ; pconverge_44194 = _44147; goto l44193; l44192: ; pconverge_44194 = 0; goto l44193; l44193: ; converge_44194 = pconverge_44194; bool _44195; _44195 = 4096 <= converge_44194; if (_44195) goto l44196; else goto l44362; l44362: ; pconverge_44198 = converge_44194; goto l44197; l44196: ; pconverge_44198 = 4095; goto l44197; l44197: ; converge_44198 = pconverge_44198; int _44202; _44202 = _44165 + converge_44198; float* idx_44203; idx_44203 = _40803_44105 + _44202; float* unroll_44200; unroll_44200 = &tmp_44183->e[1]; *unroll_44200 = _44190; _44206 = __ldg(idx_44203); p_44206 = _44206; l44204: ; _44206 = p_44206; int _44207; _44207 = 1 + _44147; bool _44208; _44208 = _44207 < 0; if (_44208) goto l44209; else goto l44361; l44361: ; pconverge_44211 = _44207; goto l44210; l44209: ; pconverge_44211 = 0; goto l44210; l44210: ; converge_44211 = pconverge_44211; bool _44212; _44212 = 4096 <= converge_44211; if (_44212) goto l44213; else goto l44360; l44360: ; pconverge_44215 = converge_44211; goto l44214; l44213: ; pconverge_44215 = 4095; goto l44214; l44214: ; converge_44215 = pconverge_44215; int _44219; _44219 = _44165 + converge_44215; float* _44217; _44217 = &tmp_44183->e[2]; float* idx_44220; idx_44220 = _40803_44105 + _44219; *_44217 = _44206; _44223 = __ldg(idx_44220); p_44223 = _44223; l44221: ; _44223 = p_44223; float* _44225; _44225 = &tmp_44183->e[3]; *_44225 = _44223; float _44227; _44227 = *_44184; float _44231; _44231 = _44227; float _44229; _44229 = *unroll_44200; float _44230; _44230 = _44229; bool _44232; _44232 = _44230 < _44231; if (_44232) goto l44233; else goto l44358; l44358: ; goto l44234; l44233: ; float _44350; _44350 = *_44184; float _44356; _44356 = _44350; float _44352; _44352 = *unroll_44200; float _44354; _44354 = _44352; *_44184 = _44354; *unroll_44200 = _44356; goto l44234; l44234: ; float _44236; _44236 = *_44217; float _44240; _44240 = _44236; float _44238; _44238 = *_44225; float _44239; _44239 = _44238; bool _44241; _44241 = _44239 < _44240; if (_44241) goto l44242; else goto l44348; l44348: ; goto l44243; l44242: ; float _44340; _44340 = *_44217; float _44346; _44346 = _44340; float _44342; _44342 = *_44225; float _44344; _44344 = _44342; *_44217 = _44344; *_44225 = _44346; goto l44243; l44243: ; float _44245; _44245 = *_44184; float _44249; _44249 = _44245; float _44247; _44247 = *_44217; float _44248; _44248 = _44247; bool _44250; _44250 = _44248 < _44249; if (_44250) goto l44251; else goto l44338; l44338: ; goto l44252; l44251: ; float _44330; _44330 = *_44184; float _44332; _44332 = *_44217; float _44336; _44336 = _44330; float _44334; _44334 = _44332; *_44184 = _44334; *_44217 = _44336; goto l44252; l44252: ; float _44254; _44254 = *unroll_44200; float _44258; _44258 = _44254; float _44256; _44256 = *_44225; float _44257; _44257 = _44256; bool _44259; _44259 = _44257 < _44258; if (_44259) goto l44260; else goto l44328; l44328: ; goto l44261; l44260: ; float _44320; _44320 = *unroll_44200; float _44326; _44326 = _44320; float _44322; _44322 = *_44225; float _44324; _44324 = _44322; *unroll_44200 = _44324; *_44225 = _44326; goto l44261; l44261: ; float _44263; _44263 = *unroll_44200; float _44267; _44267 = _44263; float _44265; _44265 = *_44217; float _44266; _44266 = _44265; bool _44268; _44268 = _44266 < _44267; if (_44268) goto l44269; else goto l44318; l44318: ; goto l44270; l44269: ; float _44310; _44310 = *unroll_44200; float _44316; _44316 = _44310; float _44312; _44312 = *_44217; float _44314; _44314 = _44312; *unroll_44200 = _44314; *_44217 = _44316; goto l44270; l44270: ; plower_44274 = 3; pupper_44275 = 3; pstep_44276 = 2; goto l44272; l44272: ; lower_44274 = plower_44274; upper_44275 = pupper_44275; step_44276 = pstep_44276; bool _44277; _44277 = lower_44274 < upper_44275; if (_44277) goto l44278; else goto l44302; l44302: ; int _44305; _44305 = _44165 + _44147; float _44303; _44303 = *unroll_44200; float* idx_44306; idx_44306 = _40802_44104 + _44305; float _44307; _44307 = _44303; *idx_44306 = _44307; return ; l44278: ; int _44282; _44282 = 1 + lower_44274; float* idx_44279; idx_44279 = &tmp_44183->e[lower_44274]; float _44280; _44280 = *idx_44279; float* idx_44283; idx_44283 = &tmp_44183->e[_44282]; float _44286; _44286 = _44280; float _44284; _44284 = *idx_44283; float _44285; _44285 = _44284; bool _44287; _44287 = _44285 < _44286; if (_44287) goto l44288; else goto l44301; l44301: ; goto l44289; l44288: ; float _44293; _44293 = *idx_44279; float _44299; _44299 = _44293; float _44295; _44295 = *idx_44283; float _44297; _44297 = _44295; *idx_44279 = _44297; *idx_44283 = _44299; goto l44289; l44289: ; int _44291; _44291 = lower_44274 + step_44276; plower_44274 = _44291; pupper_44275 = upper_44275; pstep_44276 = step_44276; goto l44272; } }
12,803
#include <iostream> #include <fstream> #include <string> void printProduct(const int *part2ResultsBasicStride); using namespace std; __global__ void part1DeviceCodeBasicStride(int *foo, int *bar, int N) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index ; i < N; i+= stride) { for (int x = i; x > -1; x--) { // no more duplicate compares. if (foo[i] + foo[x] == 2020) { bar[0] = foo[i] * foo[x]; return; // break } } } } __global__ void part2DeviceCodeBasicStride(int *foo, int *bar, int N) { int index = threadIdx.x; int stride = blockDim.x; for (int i = index ; i < N; i+= stride) { for (int x = i; x > -1; x--) { // no more duplicate compares. for (int y = x; y > -1; y--) { // no more duplicate compares. if (foo[i] + foo[x] + foo[y] == 2020) { bar[0] = foo[i] * foo[x] * foo[y]; return; // break } } } } } int main() { int *input; int *part1ResultsBasicStride; int *part2ResultsBasicStride; // shared memory allocation for both device and host. cudaMallocManaged(&input, (sizeof(int))); // input data cudaMallocManaged(&part1ResultsBasicStride, (sizeof(int))); // pt1 (stride) cudaMallocManaged(&part2ResultsBasicStride, (sizeof(int))); // pt2 (stride) // READ FILE ifstream inputFile("../input.txt"); string line; int N = 0; while (getline(inputFile, line)) { // Output the text from the inputFile input[N] = stoi(line); N++; } inputFile.close(); // GPU COMPUTE STARTS HERE: part1DeviceCodeBasicStride<<<1, 512>>>(input, part1ResultsBasicStride, N); part2DeviceCodeBasicStride<<<1, 512>>>(input, part2ResultsBasicStride, N); // Wait for GPU work to finish. cudaDeviceSynchronize(); // ANSWERS --> printf("Part 1\n"); printProduct(part1ResultsBasicStride); printf("Part 2\n"); printProduct(part2ResultsBasicStride); cudaFree(input); cudaFree(part1ResultsBasicStride); cudaFree(part2ResultsBasicStride); return 0; } void printProduct(const int *results) { printf("Product: %d\n\n", results[0]); }
12,804
#include <stdio.h> #define tw 2 __global__ void matadd(int *a, int *b, int *c, int n){ int ix = tw*blockIdx.x +threadIdx.x; int iy = tw*blockIdx.y + threadIdx.y; int idx = iy*n+ix; if(idx<n*n) c[idx]=a[idx]+b[idx]; } int main(void) { int n; scanf("%d",&n); int a[n][n]; int b[n][n]; int c[n][n]; for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ scanf("%d",&a[i][j]); } } for(int i=0; i<n; i++){ for(int j=0; j<n; j++){ scanf("%d",&b[i][j]); } } int *a_d, *b_d, *c_d; cudaMalloc((void **)&a_d, n*n*sizeof(int)); cudaMalloc((void **)&b_d, n*n*sizeof(int)); cudaMalloc((void **)&c_d, n*n*sizeof(int)); cudaMemcpy(a_d, a, n*n*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(b_d, b, n*n*sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid(n/2,n/2,1); dim3 dimBlock(tw,tw,1); matadd<<<dimGrid,dimBlock>>>(a_d,b_d,c_d,n); cudaMemcpy(c,c_d,n*n*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0; i<n; i++) printf("%d ",&c[i]); printf("\n"); return 0; }
12,805
#include <cuda.h> #include <stdio.h> #include <stdlib.h> __global__ void mandelKernel(int* d_img,int maxIter, float stepX, float stepY, float lowerX, float lowerY) { // To avoid error caused by the floating number, use the following pseudo code // // float x = lowerX + thisX * stepX; // float y = lowerY + thisY * stepY; int batch_size = 5; int thisX = blockDim.x*batch_size*blockIdx.x + threadIdx.x*batch_size; //0~1599 int thisY = blockIdx.y; //0~1199 // int i; for(int j = 0;j < batch_size;j++){ int index = (thisY * 1600) + thisX + j; float x = lowerX + (thisX+j) * stepX; float y = lowerY + (thisY) * stepY; float z_x = x; float z_y = y; for(i=0;i<maxIter;i++){ if(z_x*z_x + z_y*z_y > 4.f) break; float new_x = z_x*z_x - z_y*z_y; float new_y = 2.f * z_x * z_y; z_x = x + new_x; z_y = y + new_y; } d_img[index] = i; } } // Host front-end function that allocates the memory and launches the GPU kernel void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations) { float stepX = (upperX - lowerX) / resX; float stepY = (upperY - lowerY) / resY; // int width = resX; int height = resY; int N = width*height; int *d_img,*h_img; int batch_size = 5; size_t pitch; // //cudaHostAlloc(&h_img, sizeof(int)*N,0); cudaMallocPitch(&d_img, &pitch, width*sizeof(int), height); // dim3 blockSize(400/batch_size); dim3 blockNum(4,1200); mandelKernel<<<blockNum,blockSize>>>(d_img, maxIterations,stepX,stepY,lowerX,lowerY); // cudaDeviceSynchronize(); // cudaMemcpy(img,d_img,N*sizeof(int),cudaMemcpyDeviceToHost); // }
12,806
#include <stdio.h> #define N 8192 #define THREAD_PER_BLOCK_SIDE_X 8 #define THREAD_PER_BLOCK_SIDE_Y 16 #define THREAD_PER_BLOCK THREAD_PER_BLOCK_SIDE_X *THREAD_PER_BLOCK_SIDE_Y #define TYPE int #define TYPE_S "int" __global__ void transpose(TYPE *in, TYPE *out, int size) { // int temp_side = THREAD_PER_BLOCK; __shared__ TYPE temp_matrix[THREAD_PER_BLOCK_SIDE_X][THREAD_PER_BLOCK_SIDE_Y]; int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; // copy submatrix (transposed) in shared memory temp_matrix[threadIdx.x][threadIdx.y] = in[row * size + col]; __syncthreads(); // copy submatrix in main memory out[col * size + row] = temp_matrix[threadIdx.x][threadIdx.y]; } int correct(TYPE *a, TYPE *b, int side) { int i; for (i = 0; i < side * side; i++) if (a[i] != b[(i % side) * side + i / side]) return 0; return 1; } int main() { TYPE *h_in, *h_out; TYPE *d_in, *d_out; int size = N * N; int size_in_memory = size * sizeof(TYPE); int i; // timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // allocate memory in host and device h_in = (TYPE *)malloc(size_in_memory); h_out = (TYPE *)malloc(size_in_memory); cudaMalloc((void **)&d_in, size_in_memory); cudaMalloc((void **)&d_out, size_in_memory); // fill matrix in host for (i = 0; i < size; i++) h_in[i] = i; // transfer matrix from host to device cudaMemcpy(d_in, h_in, size_in_memory, cudaMemcpyHostToDevice); // transpose matrix in device dim3 grid, block; block.x = THREAD_PER_BLOCK_SIDE_X; block.y = THREAD_PER_BLOCK_SIDE_Y; grid.x = N / block.x; grid.y = N / block.y; cudaEventRecord(start); transpose<<<grid, block>>>(d_in, d_out, N); cudaEventRecord(stop); // transfer matrix from device to host cudaMemcpy(h_out, d_out, size_in_memory, cudaMemcpyDeviceToHost); // correctness test printf("\ncorrecteness: %d \n", correct(h_in, h_out, N)); // free memory free(h_in); free(h_out); cudaFree(d_in); cudaFree(d_out); // showing Bandwidth cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("\nmatrix type: %s", TYPE_S); printf("\nblock: %d x %d", block.y, block.x); printf("\nmilliseconds: %f", milliseconds); printf("\nBandwidth: %f GB/s \n", 2 * size_in_memory / milliseconds / 1e6); return 0; }
12,807
/******************************** * Anfal Alyousufi * Intro to CUDA * -TECH TALK @ PHAZERO * *********************************/ #include "stdio.h" #include "iostream" int main (void){ //printf ("Hello World!\n"); std::cout<<"Hello World!" << std::endl; return 0; }
12,808
// Copyright (c) 2015 Patrick Diehl // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) extern "C" { __global__ void stencil(size_t* count, float* in, float* out, float* s) { for (size_t i = (blockDim.x * blockIdx.x +1) + threadIdx.x; i < count[0] - 1; i += gridDim.x * blockDim.x) { out[i] = s[0] * in[i-1] + s[1] * in[i] + s[2] * in[i+1]; } } }
12,809
#include "includes.h" __global__ void LengthFromElements(float* element1, float* element2, float* output, int count) { int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid + blockDim.x*blockIdx.x //blocks preceeding current block + threadIdx.x; if(threadId < count) { output[threadId] = sqrtf(element1[threadId] * element1[threadId] + element2[threadId] * element2[threadId]); } }
12,810
#include <stdio.h> #include <stdlib.h> //#include <cuda.h> #include <cuda_runtime.h> /** * @brief Simple kernel to print out something * * `printf()` is only supported in Compute Capability >= 2 */ __global__ void helloWorldKernel () { printf( "Hi from CUDA: %u\n", threadIdx.x ); } int main () { /** * @attention Order of threads is not kept! */ helloWorldKernel <<<1,1>>> (); printf( "Hi from main\n" ); helloWorldKernel <<<1,1>>> (); /** Synchronize, othrewise kernels don't print. */ cudaDeviceSynchronize(); return 0; }
12,811
#include "includes.h" __global__ void double_value(double *x, double *y) { y[threadIdx.x] = 2. * x[threadIdx.x]; }
12,812
#include <cuda.h> #include <stdio.h> ///A device kernel function, just print out "Hello World" __global__ void helloworld() { printf("Hello World\n"); } int main(int argc, char** argv) { /// Use the first GPU on the machine cudaSetDevice(0); ///Launch 1 block , each block has 1 thread printf("Launch 1 block , each block has 1 thread"); helloworld<<< 1, 1 >>>(); printf("\n"); ///Launch 1 block , each block has 3 threads // printf("Launch 1 block , each block has 3 threads"); // helloworld<<< 1, /*TODO*/ >>>(); // printf("\n"); ///Launch 2 blocks, each block has 1 thread // printf("Launch 2 blocks, each block has 1 thread"); // helloworld<<< /*TODO*/, 1 >>>(); // printf("\n"); ///Launch 2 blocks, each block has 3 threads // printf("Launch 2 blocks, each block has 3 threads"); // helloworld<<< /*TODO*/, /*TODO*/ >>>(); // printf("\n"); /// Why this? You can try to comment this one and see cudaDeviceSynchronize(); return 0; }
12,813
#include "includes.h" #define MINVAL 1e-7 __global__ void Permute(double* Dev_Mtr, int* i, int* k, int* Dev_size) { int index=blockDim.x*blockIdx.x+threadIdx.x; if(index<*Dev_size) { double tmp=Dev_Mtr[index*(*Dev_size)+(*i)]; Dev_Mtr[index*(*Dev_size)+(*i)]=Dev_Mtr[index*(*Dev_size)+(*k)]; Dev_Mtr[index*(*Dev_size)+(*k)]=tmp; } }
12,814
#include <iostream> #include <ctime> #include <cmath> #include <cstdlib> #include <cuda.h> #include <cuda_runtime.h> #include <device_launch_parameters.h> #include <device_functions.h> #define rnd( x ) (x * std::rand() / RAND_MAX) #define SPHERES 20 #define INF 2e10f #define DIM 2048 struct Sphere { float r, b, g; float radius; float x, y, z; __device__ float hit(float ox, float oy, float *n) { float dx = ox - x; float dy = oy - y; if (dx * dx + dy * dy < radius * radius) { float dz = std::sqrt(radius * radius - dx * dx - dy * dy); *n = dz / radius; return dz + z; } return -INF; } }; void ppm_write(unsigned char* bitmap, int xdim, int ydim, FILE* fp) { fprintf(fp, "P3\n"); fprintf(fp, "%d %d\n", xdim, ydim); fprintf(fp, "255\n"); for (int y = 0; y < ydim; y++) { for (int x = 0; x < xdim; x++) { int i = x + y * xdim; fprintf(fp, "%d %d %d ", bitmap[4 * i], bitmap[4 * i + 1], bitmap[4 * i + 2]); } fprintf(fp, "\n"); } } __global__ void kernel(Sphere *s, unsigned char *ptr) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; float ox = x - DIM / 2; float oy = y - DIM - 2; float r = 0, g = 0, b = 0; float maxz = -INF; for (int i = 0; i < SPHERES; i++) { float n; float t = s[i].hit(ox, oy, &n); if (t > maxz) { float fscale = n; r = s[i].r * fscale; g = s[i].g * fscale; b = s[i].b * fscale; maxz = t; } } ptr[offset * 4 + 0] = (int)(r * 255); ptr[offset * 4 + 1] = (int)(g * 255); ptr[offset * 4 + 2] = (int)(b * 255); ptr[offset * 4 + 3] = 255; } int main(int argc, char *argv[]) { unsigned char *dev_bitmap, *bitmap; FILE* fp = fopen(argv[1], "w"); Sphere *s; std::srand((unsigned int)std::time(NULL)); cudaMalloc((void **)&dev_bitmap, sizeof(unsigned char) * DIM * DIM * 4); cudaMalloc((void **)&s, sizeof(Sphere) * SPHERES); Sphere *temp_s = new Sphere[SPHERES]; bitmap = (unsigned char *)malloc(sizeof(unsigned char) * DIM * DIM * 4); //bitmap 생성하고 다시 하자. for (int i = 0; i < SPHERES; i++) { temp_s[i].r = rnd(1.0f); temp_s[i].g = rnd(1.0f); temp_s[i].b = rnd(1.0f); temp_s[i].x = rnd(2000.0f) - 1000; temp_s[i].y = rnd(2000.0f) - 1000; temp_s[i].z = rnd(2000.0f) - 1000; temp_s[i].radius = rnd(200.0f) + 40; } cudaMemcpy(s, temp_s, sizeof(Sphere) * SPHERES, cudaMemcpyHostToDevice); delete[] temp_s; dim3 grids(DIM / 16, DIM / 16); dim3 threads(16, 16); clock_t startTime = clock(); kernel<<<grids, threads>>>(s, dev_bitmap); cudaDeviceSynchronize(); clock_t endTime = clock(); cudaMemcpy(bitmap, dev_bitmap, sizeof(unsigned char) * DIM * DIM * 4, cudaMemcpyDeviceToHost); ppm_write(bitmap, DIM, DIM, fp); cudaFree(dev_bitmap); cudaFree(s); fclose(fp); double execute_time = (double)(endTime - startTime) / CLOCKS_PER_SEC; std::cout << "CUDA ray tracing: " << std::fixed << execute_time << " sec" << std::endl; return 0; }
12,815
#include "Game.cuh" /** * Gets the index of the highest value in a list * Parameter outputs: the list to look through * Parameter numOutputs: the number of elements in * the outputs list * Returns: the index of the highest value in a list */ int getMaxIndex(double* outputs, int numOutputs){ // Gets the index with the highest int maxIndex=0; for(int output=0; output<numOutputs; output++){ if(outputs[maxIndex] <= outputs[output]){ maxIndex=output; } } return maxIndex; } /** * Changes an index into a move vector * Parameter index: the index to change into * a move vector * Returns: a vector corresponding to the move to be * made in the form (oldRow, oldCol, newRow, newCol) */ int* parseIndexToMove(int index){ // Gets the actual move from the max int* move=(int*)(calloc(4, sizeof(int))); for(int movePart=0; movePart<4; movePart++){ move[movePart]=index%8; index/=8; } return move; } /** * Changes the move into an index (for the output list) * This is the inverse of the parseIndexToMove function * and makes a 1-1 correspondance between the functions * Parameter move: the move vector (oldRow, oldCol, * newRow, newCol) * Returns: the corresponding index of the move */ int parseMoveToIndex(int* move){ int index=0; for(int part=0; part<4; part++){ index+=move[3-part]*pow(8, 3-part); } return index; } /** * Allows the user to input a move to play against the neural network * Parameter board: the board to make a move on * Parameter color: the player's color (that's making the move) * Returns: the winner's color if there is one -1 otherwise */ int makePlayerTurn(Piece** board, int color){ int winner=-1; int madeMove=0; char* buffer=(char*)calloc(80, sizeof(char)); do{ //Handles user input printf("Make a numerical move in the format: [oldRow oldCol newRow newCol]\n"); int oldRow=-1; int oldCol=-1; int newRow=-1; int newCol=-1; fgets(buffer, 78, stdin); oldRow=buffer[0]-48; oldCol=buffer[2]-48; newRow=buffer[4]-48; newCol=buffer[6]-48; // Validates the move and gets a winner if there is one if(isValidMove(board, oldRow, oldCol, newRow, newCol, color)){ if(movePiece(board, oldRow, oldCol, newRow, newCol)==KINGREWARD){ winner=color; } madeMove=1; } // An invalid move was made else{ printf("Invalid move made\n"); } }while(!madeMove); free(buffer); return winner; } /** * Makes the turn and updates the output vectors * Parameter board: the board to make a turn on * Parameter color: the color of the player whose turn it is * Parameter turn: the turn number * Parameter nn: the neural network to use * Parameter inputVector: a vector with enough space to hold * all one-hot encoded data values for the board * Parameter output: the rewards corresponding to the * output of the neural network * Parameter expected: the values that were rewarded * Parameter chosens: the chosen moves * Returns: whether or not there was a checkmate */ int makeTurn(Piece** board, int color, int turn, NeuralNet* nn, double* inputVector, double*** output, double** expected, int* chosens){ // Gets the expected output vector oneHotEncode(board, inputVector); feedForward(nn, &output[turn], inputVector); // Gets the next move int madeMove=0; int randomMove=((rand()+0.0)/RAND_MAX)<EXPLORATION; int winner=-1; do{ // If there should be a random move made if(randomMove){ // Gets the random move int random=rand(); int randomIndex=random%nn->neurons[nn->layers-1]; int* move=parseIndexToMove(randomIndex); // Validates, makes, and gets the reward for a move if(isValidMove(board, move[0], move[1], move[2], move[3], color)){ expected[turn][randomIndex]=movePiece(board, move[0], move[1], move[2], move[3])+TURNDEFICIT; chosens[turn]=randomIndex; if(expected[turn][randomIndex]==KINGREWARD+TURNDEFICIT){ winner=color; } madeMove=1; } // Penalizes invalid moves else{ expected[turn][randomIndex]=-1; } free(move); } // Otherwise choose the best move else{ int maxIndex=getMaxIndex(output[turn][nn->layers-1], nn->neurons[nn->layers-1]); int* move=parseIndexToMove(maxIndex); // Verifies the move and gets the reward for the move if(isValidMove(board, move[0], move[1], move[2], move[3], color)){ // Updates the reward expected[turn][maxIndex]= movePiece(board, move[0], move[1], move[2], move[3])+TURNDEFICIT; // Updates the chosen list chosens[turn]=maxIndex; if(expected[turn][maxIndex]==KINGREWARD+TURNDEFICIT){ winner=color; } madeMove=1; } // Penalizes invalid moves else{ expected[turn][maxIndex]=-1; } free(move); // Make a random move if the best move is invalid randomMove=1; } }while(!madeMove); return winner; } /** * Plays a single game * Parameter nn1: the first neural network * Parameter nn2: the second neural network * Parameter playerColor: the color of the player (-1 * if no player) * Parameter inputVector: the input vector allocated * with enough room for the one-hot encoded values of * the board * Parameter output1: the output values for the first * neural network * Parameter output2: the output values for the second * neural network * Parameter expected1: the reward values for the * first player * Parameter expected2: the reward values for the * second player * Parameter chosens1: the chosen move indices for the * first player * Parameter chosens2: the chosen move indices for the * second player * Parameter whiteTurns: the number of turns for the * first player * Parameter blackTurns: the number of turns for the * second player * Returns: the winner of the game (-1 if tie) */ int playGame(NeuralNet* nn1, NeuralNet* nn2, int playerColor, double* inputVector, double*** output1, double*** output2, double** expected1, double** expected2, int* chosens1, int* chosens2, int* whiteTurns, int* blackTurns){ Piece** board=makeChessBoard(); int winner; int color=0; do{ // Makes the player's turn if(color == playerColor){ printChessBoard(board); winner=makePlayerTurn(board, color); } // Makes white's turn else if(color==0){ //printf("White %d\n", *whiteTurns); winner=makeTurn(board, color, *whiteTurns, nn1, inputVector, output1, expected1, chosens1); (*whiteTurns)++; } // Makes black's turn else{ //printf("Black %d\n", *blackTurns); winner=makeTurn(board, color, *blackTurns, nn2, inputVector, output2, expected2, chosens2); (*blackTurns)++; } color=(color+1)%2; }while(winner<0 && (*whiteTurns)<TURNS && (*blackTurns)<TURNS); printChessBoard(board); freeChessBoard(board); return winner; } /** * Uses the bellman equation to alter the actual reward values that were * returned * Parameter nn: the neural network of the corresponding player * Parameter outputs: the outputs of the neural networks * Parameter expected: the reward values * Parameter chosens: the states that were chosen * Parameter numOutputs: the max number of outputs that could be chosen * Parameter won: whether or not the player corresponding to the reward * values has won * Parameter tie: whether or not the player corresponding to the reward * values has tied * Returns: nothing */ void alterExpected(NeuralNet* nn, double*** outputs, double** expected, int* chosens, int numOutputs, int won, int tie){ // The bellman equation to chain the actions together for(int output=numOutputs-1; output>=0; output--){ // Changes the non-chosen values to the output from the neural network // so no erroneous changes are made for(int reward=0; reward<nn->neurons[nn->layers-1]; reward++){ if(expected[output][reward]!=-1 && reward != chosens[output]){ expected[output][reward]=outputs[output][nn->layers-1][reward]; } } // Bellman equation on last state's chosen value if(output==numOutputs-1){ if(won){ expected[output][chosens[output]]+=DISCOUNT*WINREWARD; } else if(!tie){ expected[output][chosens[output]]+=DISCOUNT*LOSSREWARD; } } // Bellman equation on every other state's chosen value else{ expected[output][chosens[output]]+= DISCOUNT*expected[output+1][chosens[output+1]]; chosens[output+1]=-1; } } chosens[0]=-1; } /** * Trains the neural network * Parameter nn1: the first neural network to train * Parameter nn2: the second neural network to train * Parameter playerColor: the color of the player (or * -1 if there is no player) * Returns: nothing */ void train(NeuralNet* nn1, NeuralNet* nn2, int playerColor, char* file1, char* file2){ // The inputs used for both neural network feedforwards double* sharedInputs=(double*)calloc(nn1 -> neurons[0], sizeof(double*)); // Gets the output Matrices for both sides of the neural nets double*** output1=makeExpected(nn1, TURNS); double*** output2=makeExpected(nn2, TURNS); // Gets the reward Matrices for both sides of the neural nets double** expected1=makeActual(nn1, TURNS); double** expected2=makeActual(nn2, TURNS); // Gets the chosens vectors denoting what move was chosen int* chosens1=(int*)calloc(TURNS, sizeof(int)); int* chosens2=(int*)calloc(TURNS, sizeof(int)); for(int output=0; output<TURNS; output++){ chosens1[output]=-1; chosens2[output]=-1; } // Loops through infinite games and plays the game to train it for(int game=0; 1; game++){ // Serializes the neural networks every 5 games if(game%5==0){ printf("Serializing the neural networks\n"); serializeNeuralNet(nn1, file1); serializeNeuralNet(nn2, file2); } int* whiteTurns=(int*)calloc(1, sizeof(int)); int* blackTurns=(int*)calloc(1, sizeof(int)); // Plays a game printf("Training on game %d\n", game); int winner=playGame(nn1, nn2, playerColor, sharedInputs, output1, output2, expected1, expected2, chosens1, chosens2, whiteTurns, blackTurns); // Changes the rewards using the bellman equation alterExpected(nn1, output1, expected1, chosens1, *whiteTurns, winner==0, winner==-1); alterExpected(nn2, output2, expected2, chosens2, *blackTurns, winner==1, winner==-1); free(whiteTurns); free(blackTurns); // Backpropogates when there is no player if(playerColor==-1){ printf("Backpropogating\n"); backpropogate(nn1, output1, expected1, TURNS); backpropogate(nn2, output2, expected2, TURNS); } } }
12,816
#include "includes.h" __global__ void forwardPropagation(float *a_d , float *b_d ,int size) { int idx = threadIdx.x; int idy = threadIdx.y; __shared__ float temp[16][16]; temp[idy][idx] = a_d[(idy * (size+1)) + idx] ; for(int i = 1 ; i < size; i++) { if((idy + i) < size) { float var1 =(-1)*(temp[i-1][i-1] / temp[i+idy][i-1]); temp[i+idy][idx] = temp[i-1][idx] + ((var1) * (temp[i+idy][idx])); } __syncthreads(); } b_d[idy*(size+1) + idx] = temp[idy][idx]; }
12,817
#include "includes.h" __global__ void callOperation(int *a, int *b,int *res, int k, int p, int n) { int tid = blockDim.x * blockIdx.x + threadIdx.x; if (tid >= n) { return; } res[tid] = a[tid] + b[tid]; if (res[tid] > k) { res[tid] = p; } }
12,818
#if GOOGLE_CUDA #define EIGEN_USE_GPU __global__ void default_function_kernel0(const float* __restrict__ Data, const float* __restrict__ K0, const float* __restrict__ K1, const float* __restrict__ KC, float* __restrict__ Output) { float Output_local[4]; __shared__ float pad_temp_shared[512]; __shared__ float K0_shared[8]; __shared__ float K1_shared[2]; __shared__ float KC_shared[1]; for (int ww_inner_outer = 0; ww_inner_outer < 2; ++ww_inner_outer) { #pragma unroll for (int tt0_c_init = 0; tt0_c_init < 2; ++tt0_c_init) { #pragma unroll for (int tt1_c_init = 0; tt1_c_init < 2; ++tt1_c_init) { Output_local[((tt0_c_init * 2) + tt1_c_init)] = 0.000000e+00f; } } for (int rr_outer = 0; rr_outer < 11; ++rr_outer) { #pragma unroll for (int rs1_outer = 0; rs1_outer < 4; ++rs1_outer) { #pragma unroll for (int rh_outer = 0; rh_outer < 3; ++rh_outer) { #pragma unroll for (int rw_outer = 0; rw_outer < 3; ++rw_outer) { __syncthreads(); #pragma unroll for (int ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner = 0; ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner < 4; ++ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) { pad_temp_shared[(((((int)threadIdx.y) * 64) + (((int)threadIdx.x) * 4)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner)] = (((((((1 - (((((((int)threadIdx.y) * 64) + (((int)threadIdx.x) * 4)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) % 128) / 16)) - rh_outer) <= (((int)blockIdx.y) * 8)) && ((((int)blockIdx.y) * 8) < ((33 - (((((((int)threadIdx.y) * 64) + (((int)threadIdx.x) * 4)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) % 128) / 16)) - rh_outer))) && (((1 - rw_outer) - (((((int)threadIdx.x) * 4) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) % 16)) <= (ww_inner_outer * 16))) && ((ww_inner_outer * 16) < ((33 - rw_outer) - (((((int)threadIdx.x) * 4) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) % 16)))) ? Data[(((((((((((((((int)threadIdx.y) * 64) + (((int)threadIdx.x) * 4)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) / 128) * 4096) + (rs1_outer * 1024)) + (((int)blockIdx.y) * 256)) + ((((((((int)threadIdx.y) * 64) + (((int)threadIdx.x) * 4)) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) % 128) / 16) * 32)) + (rh_outer * 32)) + (ww_inner_outer * 16)) + rw_outer) + (((((int)threadIdx.x) * 4) + ax0_ax1_ax2_ax3_ax4_fused_fused_fused_fused_inner_inner_inner) % 16)) - 33)] : 0.000000e+00f); } if (((int)threadIdx.x) < (8 - ((int)threadIdx.y))) { if (((int)threadIdx.x) < 1) { K0_shared[(((int)threadIdx.x) + ((int)threadIdx.y))] = K0[((((((((int)threadIdx.x) + ((int)threadIdx.y)) / 2) * 44) + ((((int)blockIdx.z) / 2) * 22)) + (((((int)threadIdx.x) + ((int)threadIdx.y)) % 2) * 11)) + rr_outer)]; } } if (((int)threadIdx.x) < (2 - ((int)threadIdx.y))) { if (((int)threadIdx.x) < 1) { if (((((int)threadIdx.x) + ((int)threadIdx.y)) / 2) < (4 - rs1_outer)) { K1_shared[(((int)threadIdx.x) + ((int)threadIdx.y))] = K1[(((((((((int)threadIdx.x) + ((int)threadIdx.y)) / 2) * 44) + (rs1_outer * 44)) + ((((int)blockIdx.z) % 2) * 22)) + (((((int)threadIdx.x) + ((int)threadIdx.y)) % 2) * 11)) + rr_outer)]; } } } if (((int)threadIdx.x) < (1 - ((int)threadIdx.y))) { if (((int)threadIdx.x) < 1) { if (((int)threadIdx.x) < ((3 - rh_outer) - ((int)threadIdx.y))) { KC_shared[(((int)threadIdx.x) + ((int)threadIdx.y))] = KC[(((((((int)threadIdx.x) * 33) + (((int)threadIdx.y) * 33)) + (rh_outer * 33)) + (rw_outer * 11)) + rr_outer)]; } } } __syncthreads(); #pragma unroll for (int rs0_inner = 0; rs0_inner < 4; ++rs0_inner) { #pragma unroll for (int tt0_c = 0; tt0_c < 2; ++tt0_c) { #pragma unroll for (int tt1_c = 0; tt1_c < 2; ++tt1_c) { Output_local[((tt0_c * 2) + tt1_c)] = (Output_local[((tt0_c * 2) + tt1_c)] + (((pad_temp_shared[(((rs0_inner * 128) + (((int)threadIdx.y) * 16)) + ((int)threadIdx.x))] * K0_shared[((rs0_inner * 2) + tt0_c)]) * K1_shared[tt1_c]) * KC_shared[0])); } } } } } } } #pragma unroll for (int tt0_inner_inner_inner = 0; tt0_inner_inner_inner < 2; ++tt0_inner_inner_inner) { #pragma unroll for (int tt1_inner_inner_inner = 0; tt1_inner_inner_inner < 2; ++tt1_inner_inner_inner) { Output[(((((((((((int)blockIdx.z) / 2) * 8192) + (tt0_inner_inner_inner * 4096)) + ((((int)blockIdx.z) % 2) * 2048)) + (tt1_inner_inner_inner * 1024)) + (((int)blockIdx.y) * 256)) + (((int)threadIdx.y) * 32)) + (ww_inner_outer * 16)) + ((int)threadIdx.x))] = Output_local[((tt0_inner_inner_inner * 2) + tt1_inner_inner_inner)]; } } } } void Conv2dRcpFusedNchwKernelLauncher(const float* U, const float* K0, const float* K1, const float* KC, float* V){ dim3 gridDim0(1, 4, 4); dim3 blockDim0(16, 8, 1); default_function_kernel0<<<gridDim0, blockDim0>>>(U, K0, K1, KC, V); cudaDeviceSynchronize(); } #endif
12,819
__global__ void update_e( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz, float *CEx, float *CEy, float *CEz ) { int tk = threadIdx.x; int idx = blockIdx.x*blockDim.x + tk; int Nyz = Ny*Nz; int i = idx/Nyz; int j = ( idx - i*Nyz )/Nz; int k = idx - i*Nyz - j*Nz; extern __shared__ float hs[]; float* hx = (float*) hs; float* hy = (float*) &hx[blockDim.x+1]; float* hz = (float*) &hy[blockDim.x+1]; if ( i > 0 && j > 0 && k > 0 && i < Nx ) { hx[tk] = Hx[idx]; hy[tk] = Hy[idx]; hz[tk] = Hz[idx]; if ( tk==blockDim.x-1 ) { hx[tk+1] = Hx[idx+1]; hy[tk+1] = Hy[idx+1]; } } __syncthreads(); if ( i > 0 && j > 0 && k > 0 && i < Nx ) { if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( Hz[idx+Nz] - hz[tk] - hy[tk+1] + hy[tk] ); if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[tk+1] - hx[tk] - Hz[idx+Nyz] + hz[tk] ); if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyz] - hy[tk] - Hx[idx+Nz] + hx[tk] ); } } __global__ void update_h( int Nx, int Ny, int Nz, float *Ex, float *Ey, float *Ez, float *Hx, float *Hy, float *Hz ) { int tk = threadIdx.x; int idx = blockIdx.x*blockDim.x + tk; int Nyz = Ny*Nz; int i = idx/Nyz; int j = ( idx - i*Nyz )/Nz; int k = idx - i*Nyz - j*Nz; extern __shared__ float es[]; float* ex = (float*) es; float* ey = (float*) &ex[blockDim.x+1]; float* ez = (float*) &ey[blockDim.x+1]; if ( i > 0 && j > 0 && k > 0 && i < Nx ) { ex[tk+1] = Ex[idx]; ey[tk+1] = Ey[idx]; ez[tk] = Ez[idx]; if ( tk==0 ) { ex[0] = Ex[idx-1]; ey[0] = Ey[idx-1]; } } __syncthreads(); if ( i > 0 && j > 0 && k > 0 && i < Nx ) { Hx[idx] -= 0.5*( ez[tk] - Ez[idx-Nz] - ey[tk+1] + ey[tk] ); Hy[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[idx-Nyz] ); Hz[idx] -= 0.5*( ey[tk+1] - Ey[idx-Nyz] - ex[tk+1] + Ex[idx-Nz] ); } }
12,820
#include <assert.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #define GRAPH_SIZE 2000 #define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b] #define D(a, b) EDGE_COST(output, graph_size, a, b) #define INF 0x1fffffff void generate_random_graph(int *output, int graph_size) { int i, j; srand(0xdadadada); for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (i == j) { D(i, j) = 0; } else { int r; r = rand() % 40; if (r > 20) { r = INF; } D(i, j) = r; } } } } void floyd_warshall_gpu(const int *graph, int graph_size, int *output) { // TODO } void floyd_warshall_cpu(const int *graph, int graph_size, int *output) { int i, j, k; memcpy(output, graph, sizeof(int) * graph_size * graph_size); for (k = 0; k < graph_size; k++) { for (i = 0; i < graph_size; i++) { for (j = 0; j < graph_size; j++) { if (D(i, k) + D(k, j) < D(i, j)) { D(i, j) = D(i, k) + D(k, j); } } } } } int main(int argc, char **argv) { #define TIMER_START() gettimeofday(&tv1, NULL) #define TIMER_STOP() \ gettimeofday(&tv2, NULL); \ timersub(&tv2, &tv1, &tv); \ time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0 struct timeval tv1, tv2, tv; float time_delta; int *graph, *output_cpu, *output_gpu; int size; size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE; graph = (int *)malloc(size); assert(graph); output_cpu = (int *)malloc(size); assert(output_cpu); memset(output_cpu, 0, size); output_gpu = (int *)malloc(size); assert(output_gpu); generate_random_graph(graph, GRAPH_SIZE); fprintf(stderr, "running on cpu...\n"); TIMER_START(); floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); fprintf(stderr, "running on gpu...\n"); TIMER_START(); floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu); TIMER_STOP(); fprintf(stderr, "%f secs\n", time_delta); if (memcmp(output_cpu, output_gpu, size) != 0) { fprintf(stderr, "FAIL!\n"); } return 0; }
12,821
#include "includes.h" __global__ void idwt_per_X_1(float *d_dst, float *src_A, float *src_D, int rows, int cols, int next_cols, int filt_len, int halo) { extern __shared__ float s_Data[]; //Offset to the left halo edge const int baseX = ((blockIdx.x * I_X_RESULT_STEPS) - halo) * I_X_BLOCKDIM_X + threadIdx.x; // even if the last pixel of cols+l2-1 takes a new block, rows can be maintained const int baseY = blockIdx.y * I_X_BLOCKDIM_Y + threadIdx.y; const int baseX1 = blockIdx.x * I_X_RESULT_STEPS * 2 * I_X_BLOCKDIM_X + 2 * threadIdx.x; if (baseY < rows) { src_A += baseY * cols + baseX; src_D += baseY * cols + baseX; d_dst += baseY * next_cols + baseX1;//To compensate the halo int l2 = filt_len / 2; //Loading data to shared memory #pragma unroll for (int i = 0; i < halo; i++) { s_Data[(threadIdx.y*(I_X_RESULT_STEPS + 2 * halo)*I_X_BLOCKDIM_X) + threadIdx.x + i * I_X_BLOCKDIM_X] = (baseX + i * I_X_BLOCKDIM_X >= 0) ? src_A[i * I_X_BLOCKDIM_X] : src_A[i * I_X_BLOCKDIM_X + cols]; s_Data[((threadIdx.y + I_X_BLOCKDIM_Y)*(I_X_RESULT_STEPS + 2 * halo)*I_X_BLOCKDIM_X) + threadIdx.x + i * I_X_BLOCKDIM_X] = (baseX + i * I_X_BLOCKDIM_X >= 0) ? src_D[i * I_X_BLOCKDIM_X] : src_D[i * I_X_BLOCKDIM_X + cols]; } // main data and Load right halo #pragma unroll for (int i = halo; i < halo + I_X_RESULT_STEPS + halo; i++) { s_Data[(threadIdx.y*(I_X_RESULT_STEPS + 2 * halo)*I_X_BLOCKDIM_X) + threadIdx.x + i * I_X_BLOCKDIM_X] = ((baseX + i * I_X_BLOCKDIM_X) < cols) ? src_A[i * I_X_BLOCKDIM_X] : src_A[i * I_X_BLOCKDIM_X - cols]; s_Data[((threadIdx.y + I_X_BLOCKDIM_Y)*(I_X_RESULT_STEPS + 2 * halo)*I_X_BLOCKDIM_X) + threadIdx.x + i * I_X_BLOCKDIM_X] = ((baseX + i * I_X_BLOCKDIM_X) < cols) ? src_D[i * I_X_BLOCKDIM_X] : src_D[i * I_X_BLOCKDIM_X - cols]; } //Compute and store results __syncthreads(); #pragma unroll for (int i = halo; i < halo + I_X_RESULT_STEPS; i++) { int pos_x = (baseX1 + 2 * (i - halo) * I_X_BLOCKDIM_X); if ((pos_x + 1) < (2 * cols + filt_len - 2)) { float temp_1 = 0, temp_2 = 0; for (int l = 0; l < l2; ++l) { int t = 2 * l; temp_1 += c_lpr[t] * s_Data[(threadIdx.y*(I_X_RESULT_STEPS + 2 * halo)*I_X_BLOCKDIM_X) + threadIdx.x + i * I_X_BLOCKDIM_X - l] + c_hpr[t] * s_Data[((threadIdx.y + I_X_BLOCKDIM_Y)*(I_X_RESULT_STEPS + 2 * halo)*I_X_BLOCKDIM_X) + threadIdx.x + i * I_X_BLOCKDIM_X - l]; temp_2 += c_lpr[t + 1] * s_Data[(threadIdx.y*(I_X_RESULT_STEPS + 2 * halo)*I_X_BLOCKDIM_X) + threadIdx.x + i * I_X_BLOCKDIM_X - l] + c_hpr[t + 1] * s_Data[((threadIdx.y + I_X_BLOCKDIM_Y)*(I_X_RESULT_STEPS + 2 * halo)*I_X_BLOCKDIM_X) + threadIdx.x + i * I_X_BLOCKDIM_X - l]; } if ((pos_x >= l2 - 1) && (pos_x < (next_cols + l2 - 1))) d_dst[2 * (i - halo) * I_X_BLOCKDIM_X - l2 + 1] = temp_1; if ((pos_x + 1 >= l2 - 1) && (pos_x + 1 < (next_cols + l2 - 1))) d_dst[2 * (i - halo) * I_X_BLOCKDIM_X - l2 + 2] = temp_2; } } } }
12,822
#include <time.h> #include <stdio.h> #include <stdlib.h> //rand, atoi #include <sys/time.h> #include <unistd.h> #define MAX_N 10000000 #define GPU_CHECKERROR(err) (gpuCheckError(err,__FILE__,__LINE__)) static void gpuCheckError(cudaError_t err, const char * file, int line){ if(err != cudaSuccess ){ printf("%s in file %s at line %d\n", cudaGetErrorString(err), __FILE__, line); exit(EXIT_FAILURE); } } //Determines if A & B are co-prime using the Euclidean Algorithm. unsigned int __cop(unsigned int A, unsigned int B){ unsigned int T; //Swap 'em if(B>A){ T=B; B=A; A=T; } while(B){ T=B; B=A%B; A = T; } return A==1; } //Finds the number of co-prime numbers in two vectors A and B unsigned int copV(unsigned int N, unsigned int * A, unsigned int * B){ unsigned int numCop = 0; for(int i = 0 ; i < N; i++){ numCop += __cop(A[i], B[i]); } return numCop; } //Calculates if correspoding numbers in two arrays are coprime //and stores the result in the first array. __global__ void copV_GPU(unsigned int N, unsigned int * A, unsigned int * B){ unsigned int n = blockDim.x * blockIdx.x + threadIdx.x; if(n >= N) return; int a = A[n]; int b = B[n]; unsigned int t; //Swap 'em if(b>a){ t=b; b=a; a=t; } while(b){ t=b; b=a%b; a = t; } A[n] = (a==1); } //Performs sum reduction of an array A. A must be aligned to blockDim.x __global__ void reduce_GPU(unsigned int * A){ unsigned int tid = threadIdx.x; unsigned int idx = blockDim.x*blockIdx.x + threadIdx.x; for( unsigned int S = blockDim.x/2; S > 0; S/=2 ){ if( tid < S ) A[idx] += A[idx+S]; //Make sure all threads have finished before we continue __syncthreads(); } } //Code to display device properties. // http://gpucoder.livejournal.com/1064.html void printDevProp(cudaDeviceProp devProp) { printf("Major revision number: %d\n", devProp.major); printf("Minor revision number: %d\n", devProp.minor); printf("Name: %s\n", devProp.name); printf("Total global memory: %lu\n", devProp.totalGlobalMem); printf("Total shared memory per block: %lu\n", devProp.sharedMemPerBlock); printf("Total registers per block: %d\n", devProp.regsPerBlock); printf("Warp size: %d\n", devProp.warpSize); printf("Maximum memory pitch: %lu\n", devProp.memPitch); printf("Maximum threads per block: %d\n", devProp.maxThreadsPerBlock); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of block: %d\n", i, devProp.maxThreadsDim[i]); for (int i = 0; i < 3; ++i) printf("Maximum dimension %d of grid: %d\n", i, devProp.maxGridSize[i]); printf("Clock rate: %d\n", devProp.clockRate); printf("Total constant memory: %lu\n", devProp.totalConstMem); printf("Texture alignment: %lu\n", devProp.textureAlignment); printf("Concurrent copy and execution: %s\n", (devProp.deviceOverlap ? "Yes" : "No")); printf("Number of multiprocessors: %d\n", devProp.multiProcessorCount); printf("Kernel execution timeout: %s\n", (devProp.kernelExecTimeoutEnabled ? "Yes" : "No")); return; } int main(int argc, char * argv[]){ //Create an array on N unsigned int N; unsigned int N2; size_t sz; size_t sz2; unsigned int * h_A; unsigned int * h_B; unsigned int nThreads ; srand(time(NULL)); //Query device properties. int dev_count, best_dev; cudaDeviceProp dev_prop_curr; cudaDeviceProp dev_prop_max_; cudaDeviceProp *dev_prop_max = &dev_prop_max_; dev_prop_max->maxThreadsPerBlock = 0; dev_prop_max->multiProcessorCount = 0; dev_prop_max->clockRate = 0; cudaGetDeviceCount( &dev_count ); for( int i = 0; i < dev_count ; i++ ){ cudaGetDeviceProperties( &dev_prop_curr , i); printf("\n-------------------Device %d----------------------\n",i); printDevProp( dev_prop_curr ); //Select the best device if( (dev_prop_curr.maxThreadsPerBlock > dev_prop_max->maxThreadsPerBlock) && (dev_prop_curr.multiProcessorCount > dev_prop_max->multiProcessorCount) && (dev_prop_curr.clockRate > dev_prop_max->clockRate) ){ best_dev = i; dev_prop_max = &dev_prop_curr; nThreads = dev_prop_curr.maxThreadsPerBlock; } } cudaSetDevice(best_dev); printf("\nBest Device found %d\n",best_dev); if(argc < 2){ //Fill the arrays with 1000 random numbers N = 1000; N2=((N-1)/nThreads + 1)*nThreads; sz = sizeof( unsigned int ) * N; //Make sure array size is a multiple of blockDim; sz2 = sizeof( unsigned int ) * N2; h_A = (unsigned int *)malloc( sz2 ); h_B = (unsigned int *)malloc( sz ); printf("Argc=%d Filling array A & B with %d integers %d\n", argc-1, N,N2); for(unsigned int i = 0 ; i < N; i++){ h_A[i] = rand() % MAX_N; h_B[i] = rand() % MAX_N; //printf("%d\n%d\n",h_A[i],h_B[i]); } }else if(argc == 2){ //Fill the arrays with random numbers N = (unsigned int) atoi( argv[1] ); N2=((N-1)/nThreads + 1)*nThreads; sz = sizeof( unsigned int ) * N; //Make sure array size is a multiple of blockDim; sz2 = sizeof( unsigned int ) * N2; h_A = (unsigned int *)malloc( sz2 ); h_B = (unsigned int *)malloc( sz ); printf("\nArgc=%d Filling array A & B with %d integers and %d\n\n", argc-1, N,N2); for(unsigned int i = 0 ; i < N; i++){ h_A[i] = rand() % MAX_N; h_B[i] = rand() % MAX_N; //printf("%d\n%d\n",h_A[i],h_B[i]); } }else if ( argc > 2){ //Read from file printf( "Reading input from file %s\n",argv[2]); FILE *fr; fr = fopen(argv[2], "r"); if( !fr ){ fprintf(stderr, "Can't open input file!\n"); exit(1); } char oneword[100]; int count = 0; //First count the number of entries in the file while(fscanf(fr,"%s",oneword) != EOF ){ count ++; } N = count/2; N2=((N-1)/nThreads + 1)*nThreads; rewind(fr); //Now re-allocate a new array sz = sizeof( unsigned int ) * N; //Make sure array size is a multiple of blockDim; sz2 = sizeof( unsigned int ) * N2; printf("Starting %d, %d\n", N,N2); h_A = (unsigned int *)malloc( sz2 ); h_B = (unsigned int *)malloc( sz ); count = 0; while(fscanf(fr,"%s",oneword) != EOF ){ //sprintf("%s\n",oneword); if(count % 2){ h_B[count/2] = atoi( oneword ); //printf("%d\n",h_B[count/2]); } else{ h_A[count/2] = atoi( oneword ); //printf("%d ",h_A[count/2]); } count ++; } fclose(fr); } //-------------------------------------------------------------------------- //Serial Version // //-------------------------------------------------------------------------- struct timeval t0, t1, t2; gettimeofday(&t0,0); unsigned int numCoPrimes = copV(N, h_A, h_B); gettimeofday(&t1,0); float timdiff1 = (1000000.0*(t1.tv_sec - t0.tv_sec) + (t1.tv_usec - t0.tv_usec)) / 1000000.0; printf ("\nDone: time taken for serial version is %3.1f s\n", timdiff1); printf("Number co-primes found by CPU = %d\n\n", numCoPrimes); //-------------------------------------------------------------------------- //Parallel Version // //-------------------------------------------------------------------------- printf("Best device %d\n",best_dev); //Allocate vectors and count in device memory gettimeofday(&t1,0); unsigned int * d_A; GPU_CHECKERROR( cudaMalloc(&d_A,sz2)); unsigned int * d_B; GPU_CHECKERROR( cudaMalloc(&d_B,sz)); GPU_CHECKERROR( cudaMemset((void *) &d_A[N],0,sz2-sz) ); //Pad with 0's //Copy vectors from host to device memory GPU_CHECKERROR( cudaMemcpy(d_A,h_A,sz2,cudaMemcpyHostToDevice)); GPU_CHECKERROR( cudaMemcpy(d_B,h_B,sz,cudaMemcpyHostToDevice)); unsigned int nBlocks = (N + nThreads - 1)/nThreads; copV_GPU<<< nBlocks, nThreads>>>(N,d_A,d_B); cudaDeviceSynchronize(); //Reduce 'em reduce_GPU<<< nBlocks, nThreads>>>(d_A); cudaDeviceSynchronize(); //Fetch the results; GPU_CHECKERROR( cudaMemcpy( (void *) h_A, (void *) d_A, sz2, cudaMemcpyDeviceToHost) ); int sum = 0; for( int i = 0; i < N2 ; i++ ){ if(!(i%nThreads)){ sum += h_A[i]; //printf("\n\n"); } //printf("h_A[%d]=%u\t",i,h_A[i]); } printf("Number co-primes found by GPU = %d Num reduced %d\n", sum, N2); gettimeofday(&t2,0); float timdiff2 = (1000000.0*(t2.tv_sec - t1.tv_sec) + (t2.tv_usec - t1.tv_usec)) / 1000000.0; printf ("done: time taken for parallel version is %3.1f s threads %d\n", timdiff2, 0); //Free device memory GPU_CHECKERROR( cudaFree(d_A) ); GPU_CHECKERROR( cudaFree(d_B) ); //Free Host memory free(h_A); free(h_B); }
12,823
#include <stdint.h> #include <stdio.h> #define uint uint32_t #define WST 8 #define WSU 8 #define WSV 8 #define WS (WST*WSU*WSV) #define CELL_LENGTH 3 #define CELL_SIZE (CELL_LENGTH*CELL_LENGTH*CELL_LENGTH) #define BLOCK_SIZE (WS*CELL_SIZE) #define WLT (WST*CELL_LENGTH) #define WLU (WSU*CELL_LENGTH) #define WLV (WSV*CELL_LENGTH) #define WS_MASK (WS-1) #define TID_MASK (WST-1) #define UID_MASK (WSU-1) #define VID_MASK (WSV-1) #include <stdint.h> #define uint uint32_t __device__ uint Rng4(uint4& state){ uint t=state.w; t^= t << 11; t^= t >> 8; state.w=state.z; state.z=state.y; state.y=state.x; t ^= state.x; t ^= state.x>>19; state.x=t; return t; } __device__ int TUVToIndex(int t, int u, int v){ int index=0; index += (t%3)<<9; index += ((u%3)*3)<<9; index += ((v%3)*9)<<9; index += (t/3)+(u/3)*WST+(v/3)*WST*WSU; return index; } __device__ void IndexToTUV(int index, int& t, int& u, int& v){ t=(index>>9)%3; u=((index>>9)/3)%3; v=((index>>9)/9); t+= (index&0x7)*3; u+= ((index>>3)&0x7)*3; v+= ((index>>6)&0x7)*3; } __device__ int AddUnitToIndex(int unit, int index, int& OOB){ int dt = ((unit>>0)&0x1); int du = ((unit>>1)&0x1); int dv = ((unit>>2)&0x1); int dw = (unit>>3)&0x1; int t,u,v; IndexToTUV(index, t,u,v); t+= dt-dw; u+= du-dw; v+= dv-dw; OOB = ((t+WLT)/WLT-1); OOB |= ((u+WLU)/WLU-1); OOB |= ((v+WLV)/WLV-1); int newIndex = TUVToIndex(t,u,v); return newIndex; } __device__ uint GPUValidateAddUnitVectors(int a, int b, int& c){ int valid; if((a|b) != 0xf && (a&b)) return 0; c = (((a|b)==0xf)?(a&b):(a|b)); valid = (c==0x3||c==0xc)?0:1; return valid; } __device__ uint GPUAddUnitVectors(uint a, uint b){ return (((a|b)==0xf)?(a&b):(a|b)); } __device__ void TransForw(char* lattice, int index, uint* trans, uint4& rngState){ int OOB; int latSiteComplete = lattice[index]; if(!latSiteComplete) return; int next = latSiteComplete&0xf; int label = latSiteComplete&0x30; int sl = latSiteComplete&0x40; int newIndex = AddUnitToIndex(next, index, OOB); if(OOB) return; int newSiteComp = lattice[newIndex]; int newSl=newSiteComp&0x40; if(sl+newSl==0) return; uint rand = Rng4(rngState); int newBond1 = (trans[next/4]>>(4*(2*(next%4)+(rand&0x1))))&0xf; int newBond2 = GPUAddUnitVectors((~newBond1)&0xf, next); int temp = newBond1; newBond1 = (rand&0x2)?newBond1:newBond2; newBond2 = (rand&0x2)?newBond2:temp; int destIndex = AddUnitToIndex(newBond1,index, OOB); if(OOB) return; int destSiteComp = lattice[destIndex]; if(destSiteComp) return; int moveFirst; if(sl+newSl==0x80){ moveFirst = (rand&0x4)>>2; } else if(sl) moveFirst = 1; else moveFirst = 0; // int t,u,v, tn, un, vn,td,ud,vd; // IndexToTUV(index, t,u,v); // IndexToTUV(newIndex,tn,un,vn); // IndexToTUV(destIndex,td,ud,vd); // printf("index=%i (%i,%i,%i) [%x], newIndex=%i (%i %i %i) [%x], destIndex=%i (%i %i %i) [%x]\n", index, t,u,v, latSiteComplete, newIndex, tn,un,vn, newSiteComp, destIndex, td,ud,vd, destSiteComp); destSiteComp = newBond2; if(moveFirst){ latSiteComplete = newBond1|((label>>1)&0x10); destSiteComp |= label&0x10; } else{ latSiteComplete = newBond1|label|sl; destSiteComp |= (newSiteComp&0x20)>>1; newSiteComp = newSiteComp&0x1f; } // printf("index=%i (%i,%i,%i) [%x], newIndex=%i (%i %i %i) [%x], destIndex=%i (%i %i %i) [%x]: %x=>%x+%x, %x\n", index, t,u,v, latSiteComplete, newIndex, tn,un,vn, newSiteComp, destIndex, td,ud,vd, destSiteComp, next, newBond1, newBond2, (trans[next/4]>>(4*(2*(next%4))))&0xff); lattice[index] = latSiteComplete; lattice[destIndex] = destSiteComp; if(!moveFirst) lattice[newIndex] = newSiteComp; } __device__ void TransBack(char* lattice, int index, uint* trans, uint4& rngState){ int OOB; int latSiteComplete = lattice[index]; int next = latSiteComplete&0xf; int label = latSiteComplete&0x30; int sl = latSiteComplete&0x40; if(!latSiteComplete) return; int srcIndex = AddUnitToIndex(next, index, OOB); if(OOB) return; int srcSiteComp = lattice[srcIndex]; int srcNext = srcSiteComp&0xf; int srcLabel= srcSiteComp&0x30; int srcSl = srcSiteComp&0x40; int newNext; if(srcSl) return; if(!GPUValidateAddUnitVectors(next, srcNext, newNext)) return; int newIndex = AddUnitToIndex(newNext, index, OOB); if(OOB) return; int newSiteComp = lattice[newIndex]; int newSiteSl = newSiteComp&0x40; if(sl+newSiteSl == 0x80) return; uint rand = Rng4(rngState); int moveFirst; if(sl+newSiteSl == 0x0){ moveFirst = rand&0x1; } else if(sl == 0x40) moveFirst = 0; else moveFirst = 1; if(moveFirst){ latSiteComplete = newNext|(label<<1)|srcLabel|0x40; } else{ latSiteComplete = newNext|label|sl; newSiteComp = (newSiteComp&0x3f)|(srcLabel<<1)|0x40; } // printf("%x + %x -> %x\n", next, srcNext, newNext); lattice[srcIndex]=0; lattice[index] = latSiteComplete; lattice[newIndex] = newSiteComp; } __device__ void DiffuseSL(char* lattice, int index){ int OOB; int latSiteComplete = lattice[index]; int next = latSiteComplete&0xf; int label = latSiteComplete&0x30; int sl = latSiteComplete&0x40; if(!latSiteComplete) return; int newIndex = AddUnitToIndex(next, index, OOB); if(OOB) return; int newSiteComp = lattice[newIndex]; int newSiteLabel = newSiteComp&0x30; int newSiteSl = newSiteComp&0x40; if(newSiteSl + sl != 0x40) return; if(sl){ newSiteComp = newSiteComp | ((label&0x10)<<1) | 0x40; latSiteComplete = next|((label>>1)&0x10); } else{ latSiteComplete = next|(label<<1)|((newSiteLabel>>1)&0x10)|0x40; newSiteComp = newSiteComp&0x1f; } // if(!sl){ // int t,u,v, tn, un, vn; // IndexToTUV(index, t,u,v); // IndexToTUV(newIndex,tn,un,vn); // printf("sl=%i, next=%x, index=%i (%i,%i,%i), newIndex=%i (%i %i %i)\n", sl, next, index, t,u,v, newIndex, tn,un,vn); lattice[index] = latSiteComplete; lattice[newIndex] = newSiteComp; // } } __global__ void polmove(int nStep, uint4* seeds, char* srcLattice, char* dstLattice, uint* gTrans, int dtuv, int dtuv_next, uint NWT, uint NWU, uint NWV){ __shared__ char lattice[BLOCK_SIZE]; uint trans[4]; int lid = threadIdx.x; int wid = blockIdx.x; int gid = wid * blockDim.x + lid; int widt = wid%NWT; int widu = (wid/NWT)%NWU; int widv = wid/(NWU*NWT); uint4 rngl; uint4 rngp; uint site; int dt = dtuv%WLT; int du = (dtuv/WLT)%(WLU); int dv = dtuv/(WLT*WLU); int p=0; int dtBlock=WLT-dt; int duBlock=WLU-du; int dvBlock=WLV-dv; int pSwitchNext=dtBlock*duBlock*dvBlock; int memOffSet=0; // printf("pSwitchNext=%i\n", pSwitchNext); int src; for(src=lid*4; src<BLOCK_SIZE; src += 4*WS){ for(int i=0; i<4 && i+src<BLOCK_SIZE; i++){ while(i+src>=pSwitchNext){ memOffSet = pSwitchNext; p++; dtBlock = (p&0x1)?dt:(WLT-dt); duBlock = (p&0x2)?du:(WLU-du); dvBlock = (p&0x4)?dv:(WLV-dv); pSwitchNext += dtBlock*duBlock*dvBlock; } int offSet = src+i-memOffSet; int t = ((p&0x1)?(WLT-dt):0) + (offSet%dtBlock); int u = ((p&0x2)?(WLU-du):0) + ((offSet/dtBlock)%duBlock); int v = ((p&0x4)?(WLV-dv):0) + (offSet/(dtBlock*duBlock)); int index = TUVToIndex(t,u,v); lattice[index]=srcLattice[src+i+wid*BLOCK_SIZE]; } } for(int i=0; i<4; i++) trans[i] = gTrans[i]; int indexStart = ((lid&0x1f)<<2)|((lid&0x60)>>5)|(lid&0x180); rngp = seeds[gid*2]; rngl = seeds[gid*2+1]; __syncthreads(); for(int i=0; i<nStep; i++){ // site = indexStart | ((Rng4(rngl)%27)<<9); // DiffuseSL(lattice, site); __syncthreads(); uint randLoc; do { randLoc = Rng4(rngl); }while(randLoc>=4294574721); /// 8081*27^4, so that we are getting good random numbers. // site = indexStart | ((Rng4(rngl)%27)<<9); // TransForw(lattice, site, trans, rngp); __syncthreads(); // // site = indexStart | ((Rng4(rngl)%27)<<9); // DiffuseSL(lattice, site); __syncthreads(); // // site = indexStart | ((Rng4(rngl)%27)<<9); // TransForw(lattice, site, trans, rngp); __syncthreads(); // // site = indexStart | ((Rng4(rngl)%27)<<9); // TransBack(lattice, site, trans, rngp); __syncthreads(); site = indexStart | ((randLoc%27)<<9); TransForw(lattice, site, trans, rngp); __syncthreads(); randLoc /= 27; site = indexStart | ((randLoc%27)<<9); DiffuseSL(lattice, site); __syncthreads(); randLoc /= 27; site = indexStart | ((randLoc%27)<<9); TransForw(lattice, site, trans, rngp); __syncthreads(); randLoc /= 27; site = indexStart | ((randLoc%27)<<9); TransBack(lattice, site, trans, rngp); __syncthreads(); } dt = dtuv_next%WLT; du = (dtuv_next/WLT)%(WLU); dv = dtuv_next/(WLT*WLU); memOffSet=0; // printf("????\n"); for(int p=0; p<8; p++){ int dtBlock = (p&0x1)?dt:(WLT-dt); int duBlock = (p&0x2)?du:(WLU-du); int dvBlock = (p&0x4)?dv:(WLV-dv); int dstWid = (widt+NWT-(((p>>0)&0x1)))%NWT; dstWid += ((widu+NWU-(((p>>1)&0x1)))%NWU)*NWT; dstWid += ((widv+NWV-(((p>>2)&0x1)))%NWV)*NWT*NWU; // if(lid==0) // printf("p=%i, wid=(%i,%i,%i), dstWid=(%i,%i,%i)=%i\n", p,widt,widu,widv,(widt+NWT-(((p>>0)&0x1)))%NWT, ((widu+NWU-(((p>>1)&0x1)))%NWU), ((widv+NWV-(((p>>2)&0x1)))%NWV), dstWid); // if(lid==0 && wid==0) // printf("block=(%i,%i,%i), p=%i\n", dtBlock, duBlock, dvBlock, p); for(int i=lid; i<dtBlock*duBlock*dvBlock; i+=WS){ int t = i%dtBlock + ((p&0x1)?0:dt); int u = (i/dtBlock)%duBlock + ((p&0x2)?0:du); int v = i/(dtBlock*duBlock) + ((p&0x4)?0:dv); int dst = dstWid*BLOCK_SIZE+memOffSet+i; int index = TUVToIndex(t, u, v); // if(lid%55==0) // printf("dstWid=%i,%i (p=%i), memOffSet=%i, i=%i, (%i,%i,%i)\n", dstWid, dst, p, memOffSet, i, t,u,v); dstLattice[dst] = lattice[index]; } memOffSet += dtBlock*duBlock*dvBlock; } seeds[gid*2]=rngp; seeds[gid*2+1]=rngl; __syncthreads(); }
12,824
#include <stdio.h> #include <assert.h> //implement the kernel __global__ void reverseArrayBlock(int *out_d, int *in_d) { int inOffset = blockDim.x * blockIdx.x; int outOffset = blockDim.x * (gridDim.x - 1 - blockIdx.x); int in = inOffset + threadIdx.x; int out = outOffset + (blockDim.x - 1 - threadIdx.x); out_d[out] = in_d[in]; } //program main int main(int argc, char** argv) { //pointer to host memory and size int *a_h; int dimA = 256 * 1024; // 256K elements (1MB total) //pointer to device memory int *b_d; int *a_d; //define grid and block size int numThreads = 256; //compute number of blocks needed based on array size and desired block size int numBlocks = dimA / numThreads; //allocate host and device memory size_t memSize = numBlocks * numThreads * sizeof(int); a_h = (int *) malloc(memSize); cudaMalloc((void **) &a_d, memSize); cudaMalloc((void **) &b_d, memSize); //initialise input array on host for(int i = 0; i < dimA; ++i) { a_h[i] = i; } //copy host array to device array cudaMemcpy(a_d, a_h, memSize, cudaMemcpyHostToDevice); //launch kernel reverseArrayBlock<<<numBlocks, numThreads>>>(b_d, a_d); //device to host copy cudaMemcpy(a_h, b_d, memSize, cudaMemcpyDeviceToHost ); //verify the data returned to the host is correct for (int i = 0; i < dimA; i++) { assert(a_h[i] == dimA - 1 - i); } //free device memory cudaFree(a_d); cudaFree(b_d); //free host memory free(a_h); //if the program makes it this far, then the results are correct and there are no run-time errors printf("Correct!\n"); return 0; }
12,825
#include <stdio.h> __global__ void helloFromGPU() { printf("Hello from GPU! BlockID: %d - ThreadID: %d.\n", blockIdx.x, threadIdx.x); } int main() { // Hello from CPU. printf("Hello from CPU!\n"); helloFromGPU<<<2, 5>>>(); cudaDeviceReset(); return 0; }
12,826
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <cuda.h> #include <cuda_runtime.h> int calculate_no_threads(int array_size){ return 1; } void print_results(double *ARRAY, int array_size){ printf("[\n"); for(int i = 0; i < array_size; i++){ printf("{"); for(int j = 0; j < array_size; j++){ printf("%1.1f,",ARRAY[(i * array_size) +j]); } printf("}\n"); } printf("]"); printf("\n"); } __global__ void vector_dot_product(double *CUDA_A, double *CUDA_B, double *CUDA_C,double *CUDA_T,int array_size,int no_threads) { int tid = threadIdx.x; int bid = blockIdx.x; int row_no = array_size; int col_no = array_size; double todo = 0; int chin=0; for (int row = 0; row < array_size; row++){ for(int i = 0; i < array_size; i++){ for (int j = 0; j < array_size; j++){ todo = todo + CUDA_A[row * col_no + j] * CUDA_B[ j * row_no + i]; } CUDA_T[chin++]=todo; todo = 0; } } } int main(){ //int array_size = 7900; int array_size =3000; double *C, *A, *B, *T; double *CUDA_A, *CUDA_B, *CUDA_C, *CUDA_T; A = (double *)malloc(array_size * array_size * sizeof(double)); B = (double *)malloc(array_size * array_size * sizeof(double)); T = (double *)malloc((array_size*array_size) * sizeof(double)); C = (double *)malloc(array_size * array_size * sizeof(double) ); double a = 0.5; for(int i = 0; i<(array_size * array_size); i++){ A[i] = ((double)rand()/(double)(RAND_MAX)) * a; B[i] = ((double)rand()/(double)(RAND_MAX)) * a; } // Allocate device memory cudaMalloc((void**)&CUDA_A, sizeof(double) * array_size * array_size); cudaMalloc((void**)&CUDA_B, sizeof(double) * array_size * array_size); cudaMalloc((void**)&CUDA_C, sizeof(double) * array_size * array_size); cudaMalloc((void**)&CUDA_T, sizeof(double) * (array_size*array_size)); // Transfer data from host to device memory cudaMemcpy(CUDA_A, A, sizeof(double) * array_size * array_size, cudaMemcpyHostToDevice); cudaMemcpy(CUDA_B, B, sizeof(double) * array_size * array_size, cudaMemcpyHostToDevice); cudaMemcpy(CUDA_T, T, sizeof(double) * array_size * array_size, cudaMemcpyHostToDevice); printf("calculate_no_threads %d\n",calculate_no_threads(array_size)); vector_dot_product<<<1,calculate_no_threads(array_size)>>>(CUDA_A, CUDA_B, CUDA_C, CUDA_T,array_size,calculate_no_threads(array_size)); cudaMemcpy(C, CUDA_C, sizeof(double) * array_size * array_size, cudaMemcpyDeviceToHost); cudaMemcpy(T, CUDA_T, sizeof(double) * (array_size*array_size), cudaMemcpyDeviceToHost); puts("DOT_PRODUCT"); print_results(A,array_size); print_results(B,array_size); puts("MATRIX MULTI"); print_results(T,array_size); cudaFree(CUDA_A); cudaFree(CUDA_B); cudaFree(CUDA_C); cudaFree(CUDA_T); free(C); free(A); free(B); free(T); }
12,827
#include "cuda_runtime.h" #include <stdio.h> #include <iostream> #include <time.h> #define M 5 #define N 3 // cuComplex or cuDoubleComplex #define CPLX cuDoubleComplex void init_rand(int *data,int size){ for (int i = 0; i < size; ++i) data[i] = rand() %100; } void print_cplx(int *data,int m,int n){ for(int i=0;i<m;i++){ for(int j=0;j<n;j++){ printf("%d ",data[i*n+ j]); } printf("\n"); } printf("\n"); } __global__ void transpose(int*X,int*X_T,int m,int n); int main(void) { int *d_a, *d_b, *d_c; int *h_a, *h_b, *h_c; time_t t; srand(time(&t)); int memsize = sizeof(int)*M*N; h_a = (int*)malloc(memsize); h_b = (int*)malloc(memsize); h_c = (int*)malloc(memsize); init_rand(h_a,M*N); print_cplx(h_a,M,N); cudaMalloc((void **)&d_a, memsize); cudaMalloc((void **)&d_b, memsize); cudaMalloc((void **)&d_c, memsize); cudaMemcpy(d_a, h_a, memsize, cudaMemcpyHostToDevice); transpose<<<M,N>>>(d_a,d_b,M,N); cudaMemcpy(h_b, d_b, memsize, cudaMemcpyDeviceToHost); print_cplx(h_b,N,M); /************************************************************/ cudaMemcpy(d_a, h_b, memsize, cudaMemcpyHostToDevice); //Roll Back transpose<<<N,M>>>(d_a,d_b,N,M); cudaMemcpy(h_a, d_b, memsize, cudaMemcpyDeviceToHost); print_cplx(h_a,M,N); free(h_a);free(h_b);free(h_c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); return 0; } __global__ void transpose(int*X,int*X_T,int m,int n){ int idxX = threadIdx.x ; int idxY = blockIdx.x ; int idx_in = idxX + n*idxY; int idx_out = idxY + m*idxX; X_T[idx_out] = X[idx_in]; }
12,828
#include <stdio.h> #include <math.h> const short N = 10240 ; // CUDA Kernel for Vector Addition __global__ void Vector_Addition ( const int *dev_a , const int *dev_b , int *dev_c) { //Get the id of thread within a block unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x ; if ( tid < N ) // check the boundry condition for the threads dev_c [tid] = dev_a[tid] + dev_b[tid] ; } int main (void) { //Host array int Host_a[N], Host_b[N], Host_c[N]; //Device array int *dev_a , *dev_b, *dev_c ; //Allocate the memory on the GPU cudaMalloc((void **)&dev_a , N*sizeof(int) ) ; cudaMalloc((void **)&dev_b , N*sizeof(int) ) ; cudaMalloc((void **)&dev_c , N*sizeof(int) ) ; //fill the Host array with random elements on the CPU for ( int i = 0; i <N ; i++ ) { Host_a[i] = -i;//sin(i)*sin(i); Host_b[i] = i*i;//cos(i)*cos(i); } //Copy Host array to Device array cudaMemcpy (dev_a , Host_a , N*sizeof(int) , cudaMemcpyHostToDevice); cudaMemcpy (dev_b , Host_b , N*sizeof(int) , cudaMemcpyHostToDevice); //Make a call to GPU kernel Vector_Addition <<<(N)/512, 512 >>> (dev_a , dev_b , dev_c ) ; //Copy back to Host array from Device array cudaMemcpy(Host_c , dev_c , N*sizeof(int) , cudaMemcpyDeviceToHost); //Display the result //for ( int i = 0; i<N; i++ ) // printf ("%d + %d = %d\n", Host_a[i] , Host_b[i] , Host_c[i] ) ; //Free the Device array memory cudaFree (dev_a) ; cudaFree (dev_b) ; cudaFree (dev_c) ; return 0 ; }
12,829
#include "includes.h" using namespace std; #define GAUSS_WIDTH 5 #define SOBEL_WIDTH 3 typedef struct images { char *pType; int width; int height; int maxValColor; unsigned char *data; } image; /** Reads the input file formatted as pnm. The actual implementation supports only P5 type pnm images (grayscale). */ __global__ void applySobelFilter(unsigned char *in, unsigned char *intensity, float *direction, int ih, int iw) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; int gx, gy; if (x > 0 && x + 1 < iw && y > 0 && y + 1 < ih) { gx = 1 * in[(y - 1) * iw + (x - 1)] + (-1) * in[(y - 1) * iw + (x + 1)] + 2 * in[y * iw + (x - 1)] + (-2) * in[y * iw + (x + 1)] + 1 * in[(y + 1) * iw + (x - 1)] + (-1) * in[(y + 1) * iw + (x + 1)]; gy = 1 * in[(y - 1) * iw + (x - 1)] + 2 * in[(y - 1) * iw + x] + 1 * in[(y - 1) * iw + (x + 1)] + (-1) * in[(y + 1) * iw + (x - 1)] + (-2) * in[(y + 1) * iw + x] + (-1) * in[(y + 1) * iw + (x + 1)]; intensity[y * iw + x] = (unsigned char)sqrt((float)(gx) * (float)(gx) + (float)(gy) * (float)(gy)); direction[y * iw + x] = atan2((float)gy, (float)gx); } }
12,830
#include "includes.h" __global__ void chol_kernel_optimized_div(float * U, int k, int stride) { //With stride... //General thread id int tx = blockIdx.x * blockDim.x + threadIdx.x; //Iterators unsigned int j; unsigned int num_rows = MATRIX_SIZE; //Only let one thread do this if (tx == 0) { // Take the square root of the diagonal element U[k * num_rows + k] = sqrt(U[k * num_rows + k]); //Don't bother doing check...live life on the edge! } //Each thread does some part of j //Stide in units of 'stride' //Thread 0 does 0, 16, 32 //Thread 1 does 1, 17, 33 //..etc. int offset = (k + 1); //From original loop int jstart = threadIdx.x + offset; int jstep = stride; //Only continue if in bounds? //Top limit on i for whole (original) loop int jtop = num_rows - 1; //Bottom limit on i for whole (original) loop int jbottom = (k + 1); //Do work for this i iteration //Division step //Only let one thread block do this if (blockIdx.x == 0) { for (j = jstart; (j >= jbottom) && (j <= jtop); j += jstep) { U[k * num_rows + j] /= U[k * num_rows + k]; // Division step } } }
12,831
#include "includes.h" __global__ void dist_calc ( float *coord, int *close_num, int *close_flag, int *close_idx, int num_atom, int num_atom2) { // close_flag is a 1024 x num_atom2 int matrix initialized to 0. // close_idx: A num_atom x 200 int matrix, row i of which only the first close_num[i] elements are defined. (Otherwise it's -1). __shared__ float x_ref, y_ref, z_ref; __shared__ int idz; __shared__ int temp[2048]; // Calc distance for (int ii = blockIdx.x; ii < num_atom; ii += gridDim.x) { if (threadIdx.x == 0) { x_ref = coord[3*ii ]; y_ref = coord[3*ii+1]; z_ref = coord[3*ii+2]; } int idy = ii % gridDim.x; // This will be what row of close_flag this block is putting its value in. __syncthreads(); for (int jj = threadIdx.x; jj < num_atom; jj += blockDim.x) { float r2t = (coord[3*jj ] - x_ref) * (coord[3*jj ] - x_ref) + (coord[3*jj+1] - y_ref) * (coord[3*jj+1] - y_ref) + (coord[3*jj+2] - z_ref) * (coord[3*jj+2] - z_ref); if (r2t < 34.0) { close_flag[idy*num_atom2+jj] = 1; // roughly 2 A + 2 A vdW + 2 * 1.8 A probe } else { close_flag[idy*num_atom2+jj] = 0; } if (ii == jj) close_flag[idy*num_atom2+jj] = 0; } __syncthreads(); // Do pre scan idz = 0; int temp_sum = 0; for (int jj = threadIdx.x; jj < num_atom2; jj += 2 * blockDim.x) { int idx = jj % blockDim.x; int offset = 1; temp[2 * idx] = close_flag[idy * num_atom2 + 2 * blockDim.x * idz + 2 * idx]; temp[2 * idx + 1] = close_flag[idy * num_atom2 + 2 * blockDim.x * idz + 2 * idx + 1]; for (int d = 2 * blockDim.x>>1; d > 0; d >>= 1) { // up-sweep __syncthreads(); if (idx < d) { int ai = offset * (2 * idx + 1) - 1; int bi = offset * (2 * idx + 2) - 1; temp[bi] += temp[ai]; } offset *= 2; } __syncthreads(); temp_sum = close_num[ii]; __syncthreads(); if (idx == 0) { close_num[ii] += temp[2 * blockDim.x - 1]; // log the total number of 1's in this blockDim temp[2 * blockDim.x - 1] = 0; } __syncthreads(); for (int d = 1; d < blockDim.x * 2; d *= 2) { //down-sweep offset >>= 1; __syncthreads(); if (idx < d) { int ai = offset * (2 * idx + 1) - 1; int bi = offset * (2 * idx + 2) - 1; int t = temp[ai]; temp[ai] = temp[bi]; temp[bi] += t; } } __syncthreads(); // Finally assign the indices if (close_flag[idy * num_atom2 + 2 * blockDim.x * idz + 2 * idx] == 1) { close_idx[ii * 1024 + temp[2*idx] + temp_sum] = 2 * idx + 2 * blockDim.x * idz; } if (close_flag[idy * num_atom2 + 2 * blockDim.x * idz + 2 * idx + 1] == 1) { close_idx[ii * 1024 + temp[2*idx+1] + temp_sum] = 2*idx+1 + 2 * blockDim.x * idz; } idz++; __syncthreads(); } } }
12,832
#include <iostream> #include <cuda.h> #include <ctime> #include <stdlib.h> __global__ void AddInts (int* a, int* b, const int count) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < count) a[i] += b[i]; } int main () { const int count = 1000; int *h_a = new int[count]; int *h_b = new int[count]; int *d_a; int *d_b; // init host memory std::srand(time(NULL)); for (int i=0; i<count; ++i) { h_a[i] = rand() % 1000; h_b[i] = rand() % 1000; } std::cout << "Prior to calculation" << std::endl; for (int i = 0; i < 5; ++i) { std::cout << h_a[i] << " + " << h_b[i] << std::endl; } // alloc GPU memory cudaMalloc(&d_a, sizeof(int)*count); if (d_a == NULL) { std::cout << "Fail to alloc GPU mem\n"; return -1; } cudaMalloc(&d_b, sizeof(int)*count); if (d_b == NULL) { std::cout << "Fail to alloc GPU mem\n"; return -1; } // copy initialized host memory to device if (cudaMemcpy(d_a, h_a, count*sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) { std::cout << "Failed to move mem from host to device\n"; cudaFree(d_a); cudaFree(d_b); } if (cudaMemcpy(d_b, h_b, count*sizeof(int), cudaMemcpyHostToDevice) != cudaSuccess) { std::cout << "Failed to move mem from host to device\n"; cudaFree(d_a); cudaFree(d_b); } // Parrellel calculation AddInts<<< count/256+1, 256>>>(d_a, d_b, count); // copy result from device to host if (cudaMemcpy(h_a, d_a, count*sizeof(int), cudaMemcpyDeviceToHost) != cudaSuccess) { std::cout << "Failed to move data from device to host\n"; cudaFree(d_a); cudaFree(d_b); return -1; } for (int i = 0; i < 5; ++i) { std::cout << "It's " << h_a[i] << std::endl; } cudaFree(d_a); cudaFree(d_b); return 0; }
12,833
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <cuda.h> #define ACC_Jf 19.013f #define ACC_Kf 25.253f #define ACC_Lf 6503.0f __constant__ int d_excitations_number,d_ionizations_number, d_datapoints, d_block_mult; __constant__ float d_T_r_f; void gauss_integration_setup32_f(float *weights, float *x) { x[0]=1.3680690752591596E-03f; x[1]=7.1942442273659202E-03f; x[2]=1.7618872206246805E-02f; x[3]=3.2546962031130167E-02f; x[4]=5.1839422116973843E-02f; x[5]=7.5316193133715015E-02f; x[6]=1.0275810201602886E-01f; x[7]=1.3390894062985509E-01f; x[8]=1.6847786653489233E-01f; x[9]=2.0614212137961868E-01f; x[10]=2.4655004553388526E-01f; x[11]=2.8932436193468253E-01f; x[12]=3.3406569885893617E-01f; x[13]=3.8035631887393162E-01f; x[14]=4.2776401920860185E-01f; x[15]=4.7584616715613093E-01f; x[16]=5.2415383284386907E-01f; x[17]=5.7223598079139815E-01f; x[18]=6.1964368112606838E-01f; x[19]=6.6593430114106378E-01f; x[20]=7.1067563806531764E-01f; x[21]=7.5344995446611462E-01f; x[22]=7.9385787862038115E-01f; x[23]=8.3152213346510750E-01f; x[24]=8.6609105937014474E-01f; x[25]=8.9724189798397114E-01f; x[26]=9.2468380686628515E-01f; x[27]=9.4816057788302599E-01f; x[28]=9.6745303796886994E-01f; x[29]=9.8238112779375319E-01f; x[30]=9.9280575577263397E-01f; x[31]=9.9863193092474067E-01f; weights[0]=3.5093050047349198E-03f; weights[1]=8.1371973654528751E-03f; weights[2]=1.2696032654631021E-02f; weights[3]=1.7136931456510726E-02f; weights[4]=2.1417949011113720E-02f; weights[5]=2.5499029631187890E-02f; weights[6]=2.9342046739268091E-02f; weights[7]=3.2911111388180682E-02f; weights[8]=3.6172897054423871E-02f; weights[9]=3.9096947893535162E-02f; weights[10]=4.1655962113473763E-02f; weights[11]=4.3826046502202044E-02f; weights[12]=4.5586939347882056E-02f; weights[13]=4.6922199540401971E-02f; weights[14]=4.7819360039637472E-02f; weights[15]=4.8270044257364274E-02f; weights[16]=4.8270044257363830E-02f; weights[17]=4.7819360039637784E-02f; weights[18]=4.6922199540401846E-02f; weights[19]=4.5586939347881918E-02f; weights[20]=4.3826046502201850E-02f; weights[21]=4.1655962113473798E-02f; weights[22]=3.9096947893534850E-02f; weights[23]=3.6172897054424745E-02f; weights[24]=3.2911111388180932E-02f; weights[25]=2.9342046739267064E-02f; weights[26]=2.5499029631188164E-02f; weights[27]=2.1417949011113362E-02f; weights[28]=1.7136931456510799E-02f; weights[29]=1.2696032654631212E-02f; weights[30]=8.1371973654529653E-03f; weights[31]=3.5093050047351631E-03f; } __device__ float d_j_int_f(float E0,float E_j,float *B_vector) { float Eq=E_j/E0; float integrand=-logf(Eq)*B_vector[0]+B_vector[1]+Eq*(B_vector[2]+Eq*B_vector[3]); return integrand; } __device__ float d_k_int_f(float E0,float E1,float E_i,float *C_vector) { float E_i_pow=E_i*E_i, E0_pow=E0*E0, E1_pow=E1*E1, E1_prime=E0-E_i-E1, E1_prime_pow=E1_prime*E1_prime; float a=0.5f*(sqrtf(E0_pow+4.0f*E_i_pow)-E0); float b=a+E_i; float integrand=(1.0f/((E1+a)*(E1+b))+1.0f/((E1_prime+a)*(E1_prime+b)))*C_vector[0]; integrand+=2.0f*C_vector[1]/E0; integrand+=2.0f*C_vector[2]*(E0-E_i)/E0_pow; E0_pow*=E0; integrand+=3.0f*C_vector[3]*(E1_pow+E1_prime_pow)/E0_pow; E0_pow*=E0; E1_pow*=E1; E1_prime_pow*=E1_prime; integrand+=4.0f*C_vector[4]*(E1_pow+E1_prime_pow)/E0_pow; integrand*=0.5f/E_i; return integrand; } __device__ float d_l_int_f(float EGamma,float T_r,float *D_vector) { float exp_EG=expf(EGamma/T_r)-1.0f; float integrand=(D_vector[0]+D_vector[1]/EGamma)/exp_EG; return integrand; } //Carry out an integral for the collisional excitation coefficient __global__ void d_j_calc_f(float *d_params, float *E_j, float *B_vector, float *d_j_up, float *w, float *x) { //Calculate the integrand on each thread int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ float temp[]; //Integrand is stored in shared memory float *d_j_up_temp=&temp[0]; float lim=(E_j[integral_num]+2.0f*fabsf(d_params[1])+ACC_Jf*d_params[0]); float E0=x[node_num]*lim+E_j[integral_num]; float integrand=d_j_int_f(E0,E_j[integral_num],B_vector+integral_num*4)*w[node_num]; float fermi=1.0f/(1.0f+expf((E0-d_params[1])/d_params[0])); float fermi_m=1.0f/(1.0f+expf((E0-E_j[integral_num]-d_params[1])/d_params[0])); d_j_up_temp[threadIdx.x]=integrand*fermi*(1.0f-fermi_m); //Perform a standard reduction to integrate for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_j_up_temp[threadIdx.x] += d_j_up_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_j_up[integral_num]=d_j_up_temp[threadIdx.x]*lim; } } //Carry out a double integral for the collisional ionization coefficient //Involves now two reductions and the intermediate results stored in main GPU memory __global__ void d_k_calc_f(float *d_params, float *E_i, float *C_vector,float *d_k_up, float *w, float *x) { //Calculate the integrand on each thread int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ float temp[]; float *d_k_up_temp=&temp[0]; float lim=(fabsf(d_params[1])+50.0f+ACC_Kf*d_params[0]); float E0prime=x[node_num]*lim; float E0=E0prime+E_i[integral_num]; float fermiE0=1.0f/(1.0f+exp((E0-d_params[1])/d_params[0])); float E1, integrand, fermiE1, fermiE0prime, int_w_w=0.0f; for (int idx=0;idx<d_datapoints;idx++) { E1=x[idx]*E0prime; integrand=d_k_int_f(E0,E1,E_i[integral_num],C_vector+integral_num*5); fermiE1=1.0f/(1.0f+exp((E1-d_params[1])/d_params[0])); fermiE0prime=1.0f/(1.0f+exp((E0prime-E1-d_params[1])/d_params[0])); int_w_w+=integrand*w[node_num]*w[idx]*E0prime*fermiE0*(1.0f-fermiE1)*(1.0f-fermiE0prime); } d_k_up_temp[threadIdx.x]=int_w_w; //Series of reductions over the first integral for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_k_up_temp[threadIdx.x] += d_k_up_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_k_up[integral_num]=d_k_up_temp[threadIdx.x]*lim; } } //Carry out an integral for the photoionization coefficient and energy change due to photoionization //Single integral, similar to d_j_calc() __global__ void d_l_calc_f(float *d_params, float *E_i, float *D_vector, float *d_l, float *d_le, float *w, float *x) { int node_num, integral_num; integral_num=threadIdx.x/d_datapoints; node_num=threadIdx.x % d_datapoints; integral_num+=blockIdx.x*d_block_mult; extern __shared__ float temp[]; float *d_l_temp=&temp[0]; float *d_le_temp=&temp[blockDim.x]; float lim=ACC_Lf; if(d_params[1]>0.0f){lim+=d_params[1];} float EGammaPrime=x[node_num]*lim; float EGamma=EGammaPrime+E_i[integral_num]; float fermi_m=1.0f-1.0f/(1.0f+expf((EGammaPrime-d_params[1])/d_params[0])); float integrand=d_l_int_f(EGamma,d_T_r_f,D_vector+integral_num*2)*w[node_num]*fermi_m; d_l_temp[threadIdx.x]=integrand; d_le_temp[threadIdx.x]=integrand*EGammaPrime; for (int d = d_datapoints>>1; d > 0; d >>= 1) { __syncthreads(); if (node_num<d) { d_l_temp[threadIdx.x] += d_l_temp[threadIdx.x+d]; d_le_temp[threadIdx.x] += d_le_temp[threadIdx.x+d]; } } __syncthreads(); if (node_num==0) { d_l[integral_num]=d_l_temp[threadIdx.x]*lim; d_le[integral_num]=d_le_temp[threadIdx.x]*lim; } } void d_setup_f(float **d_params, float **d_B_vector, float **d_C_vector, float **d_D_vector, float **d_E_j, float **d_E_i, float **d_j, float **d_k, float **d_l, float **d_x, float **d_w, float *B_vector, float *C_vector, float *D_vector, float *E_j, float *E_i, float T_r, float *h_x, float *h_w, int ionizations_number, int excitations_number, int h_datapoints, cudaStream_t *streams, int h_block_mult) { cudaMalloc((void **)d_params,sizeof(float)*2); cudaMalloc((void **)d_B_vector,sizeof(float)*excitations_number*4); cudaMalloc((void **)d_C_vector,sizeof(float)*ionizations_number*5); cudaMalloc((void **)d_D_vector,sizeof(float)*ionizations_number*2); cudaMalloc((void **)d_E_j,sizeof(float)*excitations_number); cudaMalloc((void **)d_E_i,sizeof(float)*ionizations_number); cudaMalloc((void **)d_j,sizeof(float)*excitations_number); cudaMalloc((void **)d_k,sizeof(float)*ionizations_number); cudaMalloc((void **)d_l,2*sizeof(float)*ionizations_number); cudaMalloc((void **)d_x,sizeof(float)*h_datapoints); cudaMalloc((void **)d_w,sizeof(float)*h_datapoints); cudaMemcpyToSymbol(d_ionizations_number,&ionizations_number,sizeof(ionizations_number)); cudaMemcpyToSymbol(d_excitations_number,&excitations_number,sizeof(excitations_number)); cudaMemcpyToSymbol(d_datapoints,&h_datapoints,sizeof(h_datapoints)); cudaMemcpyToSymbol(d_block_mult,&h_block_mult,sizeof(h_block_mult)); cudaMemcpyToSymbol(d_T_r_f,&T_r,sizeof(T_r)); cudaMemcpy(*d_B_vector,B_vector,sizeof(float)*excitations_number*4,cudaMemcpyHostToDevice); cudaMemcpy(*d_C_vector,C_vector,sizeof(float)*ionizations_number*5,cudaMemcpyHostToDevice); cudaMemcpy(*d_D_vector,D_vector,sizeof(float)*ionizations_number*2,cudaMemcpyHostToDevice); cudaMemcpy(*d_E_j,E_j,sizeof(float)*excitations_number,cudaMemcpyHostToDevice); cudaMemcpy(*d_E_i,E_i,sizeof(float)*ionizations_number,cudaMemcpyHostToDevice); cudaMemcpy(*d_x,h_x,sizeof(float)*h_datapoints,cudaMemcpyHostToDevice); cudaMemcpy(*d_w,h_w,sizeof(float)*h_datapoints,cudaMemcpyHostToDevice); cudaStreamCreate(&streams[0]); cudaStreamCreate(&streams[1]); } void d_cleanup_f(float *d_params, float *d_B_vector, float *d_C_vector, float *d_E_j, float *d_E_i, float *d_j, float *d_k, float *d_l, float *d_x, float *d_w) { cudaFree(d_params); cudaFree(d_B_vector); cudaFree(d_C_vector); cudaFree(d_E_j); cudaFree(d_E_i); cudaFree(d_j); cudaFree(d_k); cudaFree(d_l); cudaFree(d_x); cudaFree(d_w); cudaDeviceReset(); } void d_calculate_rates_f(float *d_params,float *d_B_vector, float *d_C_vector, float *d_D_vector, float *d_E_j, float *d_E_i, float *d_j, float *d_k, float *d_l, float *d_x, float *d_w,float *h_params, float *h_j,float *h_k,float *h_l,float *h_w, float *h_x, float T_r, int ionizations_number,int excitations_number,int h_datapoints,cudaStream_t *streams, int h_block_mult) { cudaMemcpy(d_params,h_params,sizeof(float)*2,cudaMemcpyHostToDevice); d_j_calc_f<<<excitations_number/h_block_mult,h_datapoints*h_block_mult,h_datapoints*h_block_mult*sizeof(float),streams[0]>>>(d_params,d_E_j,d_B_vector,d_j,d_w,d_x); d_l_calc_f<<<ionizations_number/h_block_mult,h_datapoints*h_block_mult,2*h_datapoints*h_block_mult*sizeof(float),streams[0]>>>(d_params,d_E_i,d_D_vector,d_l,d_l+ionizations_number,d_w,d_x); d_k_calc_f<<<ionizations_number/h_block_mult,h_datapoints*h_block_mult,h_datapoints*h_block_mult*sizeof(float),streams[1]>>>(d_params,d_E_i,d_C_vector,d_k,d_w,d_x); cudaMemcpyAsync(h_j,d_j,sizeof(float)*excitations_number,cudaMemcpyDeviceToHost,streams[0]); cudaMemcpyAsync(h_l,d_l,2*sizeof(float)*ionizations_number,cudaMemcpyDeviceToHost,streams[0]); cudaMemcpyAsync(h_k,d_k,sizeof(float)*ionizations_number,cudaMemcpyDeviceToHost,streams[1]); cudaDeviceSynchronize(); } //CPU memory allocation void h_allocate_arrays_f(int ionizations_number, int excitations_number, int h_datapoints, float **h_params, float **E_i,float **E_j,float **B_vector, float **C_vector, float **D_vector, float **h_j, float **h_k, float **h_l, float **h_w, float **h_x) { *h_params=(float*)malloc(2*sizeof(float)); *E_i=(float*)malloc(ionizations_number*sizeof(float)); *E_j=(float*)malloc(excitations_number*sizeof(float)); *B_vector=(float*)malloc(excitations_number*4*sizeof(float)); *C_vector=(float*)malloc(ionizations_number*5*sizeof(float)); *D_vector=(float*)malloc(ionizations_number*2*sizeof(float)); *h_j=(float*)malloc(excitations_number*sizeof(float)); *h_k=(float*)malloc(ionizations_number*sizeof(float)); *h_l=(float*)malloc(2*ionizations_number*sizeof(float)); *h_x=(float*)malloc(h_datapoints*sizeof(float)); *h_w=(float*)malloc(h_datapoints*sizeof(float)); } float h_j_int_f(float E0,float E_j,float *B_vector) { float Eq=E_j/E0; float integrand=-logf(Eq)*B_vector[0]+B_vector[1]+Eq*(B_vector[2]+Eq*B_vector[3]); return integrand; } //Evaluate the differential cross section for collisional ionization //A Mott-type cross section, compatable with the BELI formula, is used float h_k_int_f(float E0,float E1,float E_i,float *C_vector) { float E_i_pow=E_i*E_i, E0_pow=E0*E0, E1_pow=E1*E1, E1_prime=E0-E_i-E1, E1_prime_pow=E1_prime*E1_prime; float a=0.5f*(sqrtf(E0_pow+4.0f*E_i_pow)-E0); float b=a+E_i; float integrand=(1.0f/((E1+a)*(E1+b))+1.0f/((E1_prime+a)*(E1_prime+b)))*C_vector[0]; integrand+=2.0f*C_vector[1]/E0; integrand+=2.0f*C_vector[2]*(E0-E_i)/E0_pow; E0_pow*=E0; integrand+=3.0f*C_vector[3]*(E1_pow+E1_prime_pow)/E0_pow; E0_pow*=E0; E1_pow*=E1; E1_prime_pow*=E1_prime; integrand+=4.0f*C_vector[4]*(E1_pow+E1_prime_pow)/E0_pow; integrand*=0.5f/E_i; return integrand; } //Evaluate photoionization cross section float h_l_int_f(float EGamma,float E_i, float T_r,float *D_vector) { float exp_EG=expf(EGamma/T_r)-1.0f; float integrand=(D_vector[0]+D_vector[1]/EGamma)/exp_EG; return integrand; } //Full collisional excitation calculation void h_j_gauss_integration_f(float T_e,float mu,float E_j,float *B_vector, int datapoints, float *h_j_up, float *weights, float *x) { float integrand=0.0f, E0, fermi, fermi_m, integ_temp; float region_difference=(E_j+2.0f*fabsf(mu)+ACC_Jf*T_e); int idx; for(idx=0;idx<datapoints;idx++) { E0=x[idx]*region_difference+E_j; integ_temp=h_j_int_f(E0,E_j,B_vector); fermi=1.0f/(1.0f+expf((E0-mu)/T_e)); fermi_m=1.0f/(1.0f+expf((E0-E_j-mu)/T_e)); integrand+=weights[idx]*integ_temp*fermi*(1.0f-fermi_m); } *h_j_up=integrand*region_difference; } void h_k_gauss_integration_f(float T_e,float mu,float E_i,float *C_vector, int datapoints, float *k_up, float *weights, float *x) { float integrand0=0.0f, integrand1, E0, E1, E0prime, fermiE0, fermiE1, fermiE0prime, integ_temp; float region_difference=(fabsf(mu)+50.0f+ACC_Kf*T_e); int idx0,idx1; for(idx0=0;idx0<datapoints;idx0++) { E0prime=x[idx0]*region_difference; E0=E0prime+E_i; integrand1=0.0f; for(idx1=0;idx1<datapoints;idx1++) { E1=x[idx1]*E0prime; integ_temp=h_k_int_f(E0, E1, E_i,C_vector)*weights[idx1]; fermiE0=1.0f/(1.0f+expf((E0-mu)/T_e)); fermiE1=1.0f/(1.0f+expf((E1-mu)/T_e)); fermiE0prime=1.0f/(1.0f+expf((E0prime-E1-mu)/T_e)); integrand1+=integ_temp*fermiE0*(1.0f-fermiE1)*(1.0f-fermiE0prime); } integrand0+=weights[idx0]*E0prime*integrand1; } *k_up=integrand0*region_difference; } void h_l_gauss_integration_f(float T_e,float mu,float E_i,float T_r,float *D_vector, int datapoints, float *h_l, float *h_le, float *weights, float *x) { float integrand0=0.0f, integrand1=0.0f, EGamma, EGammaPrime, fermi_m, integ_temp; float region_difference=ACC_Lf; if (mu>0.0f){region_difference+=mu;} int idx; for(idx=0;idx<datapoints;idx++) { EGammaPrime=x[idx]*region_difference; EGamma=EGammaPrime+E_i; fermi_m=1.0f-1.0f/(1.0f+expf((EGammaPrime-mu)/T_e)); integ_temp=h_l_int_f(EGamma,E_i,T_r,D_vector)*weights[idx]*fermi_m; integrand0+=integ_temp; integrand1+=integ_temp*EGammaPrime; } *h_l=integrand0*region_difference; *h_le=integrand1*region_difference; } //The following functions carry out sequential integration for all relevant states void h_j_gauss_integration_full_f(int excitations_number,float T_e,float mu,float *E_j,float *B_vector, int datapoints, float *h_j, float *weights, float *x) { int idx_j; for (idx_j=0;idx_j<excitations_number;idx_j++) { h_j_gauss_integration_f(T_e,mu,E_j[idx_j],B_vector+idx_j*4,datapoints,h_j+idx_j,weights,x); } } void h_k_gauss_integration_full_f(int ionizations_number,float T_e,float mu,float *E_i,float *C_vector, int datapoints, float *h_k, float *weights, float *x) { int idx_k; for (idx_k=0;idx_k<ionizations_number;idx_k++) { h_k_gauss_integration_f(T_e,mu,E_i[idx_k],C_vector+idx_k*5,datapoints,h_k+idx_k,weights,x); } } void h_l_gauss_integration_full_f(int ionizations_number,float T_e,float mu,float T_r,float *E_i,float *D_vector, int datapoints, float *h_l, float *weights, float *x) { int idx_l; for (idx_l=0;idx_l<ionizations_number;idx_l++) { h_l_gauss_integration_f(T_e,mu,E_i[idx_l],T_r,D_vector+idx_l*2,datapoints,h_l+idx_l,h_l+ionizations_number+idx_l,weights,x); } } int main(int argc, char *argv[]) { int h_datapoints=32, ionizations_number=10000, excitations_number=10000, idx, h_block_mult=1; if (argc>1){ionizations_number=atoi(argv[1]);} if (argc>2){excitations_number=atoi(argv[2]);} if (argc>3){h_block_mult=atoi(argv[3]);} float *h_params, *E_i, *E_j, *B_vector, *C_vector, *D_vector, T_r; float *h_j, *h_k, *h_l, *h_x, *h_w; float h_j2,h_k2,h_l2,h_le2; FILE *INPUTFILE1, *INPUTFILE2; clock_t h_start_t, h_end_t; float h_total_t; h_allocate_arrays_f(ionizations_number,excitations_number,h_datapoints,&h_params,&E_i,&E_j,&B_vector,&C_vector, &D_vector,&h_j, &h_k,&h_l,&h_w,&h_x); gauss_integration_setup32_f(h_w,h_x); h_params[0]=10.0f; h_params[1]=3.0f; T_r=300.0f; if ((INPUTFILE1=fopen("Test_Ionization_Coeffs.txt", "r"))==NULL) { printf("Cannot open file! Error!\n"); exit(2); } for(idx=0;idx<ionizations_number;idx++) { fscanf(INPUTFILE1,"%f %f %f %f %f %f %f %f", &E_i[idx], &C_vector[idx*5], &C_vector[idx*5+1], &C_vector[idx*5+2], &C_vector[idx*5+3], &C_vector[idx*5+4], &D_vector[idx*2], &D_vector[idx*2+1]); } fclose(INPUTFILE1); if ((INPUTFILE2=fopen("Test_Excitation_Coeffs.txt", "r"))==NULL) { printf("Cannot open file! Error!\n"); exit(2); } for(idx=0;idx<excitations_number;idx++) { fscanf(INPUTFILE2,"%f %f %f %f %f", &E_j[idx], &B_vector[idx*4], &B_vector[idx*4+1], &B_vector[idx*4+2], &B_vector[idx*4+3]); } fclose(INPUTFILE2); int device_count=0; cudaGetDeviceCount(&device_count); printf("Device count: %i\n",device_count); cudaSetDevice(0); //Run on device 0 by default - can be changed if multiple GPUs etc are present cudaStream_t streams[2]; float gpu_time, gpu_time1, gpu_time2; cudaEvent_t start, stop, start1, stop1, start2, stop2; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventCreate(&start1); cudaEventCreate(&stop1); cudaEventCreate(&start2); cudaEventCreate(&stop2); float *d_params, *d_B_vector, *d_C_vector, *d_D_vector, *d_E_j, *d_E_i, *d_j, *d_k, *d_l, *d_x, *d_w; d_setup_f(&d_params,&d_B_vector,&d_C_vector,&d_D_vector,&d_E_j,&d_E_i, &d_j, &d_k, &d_l, &d_x, &d_w,B_vector, C_vector, D_vector, E_j, E_i, T_r, h_x,h_w, ionizations_number,excitations_number, h_datapoints,streams, h_block_mult); h_start_t=clock(); cudaEventRecord(start); cudaEventRecord(start1,streams[0]); cudaEventRecord(start2,streams[1]); d_calculate_rates_f(d_params,d_B_vector, d_C_vector,d_D_vector, d_E_j, d_E_i, d_j, d_k, d_l, d_x, d_w,h_params,h_j,h_k,h_l,h_w,h_x,T_r, ionizations_number,excitations_number,h_datapoints,streams, h_block_mult); cudaEventRecord(stop); cudaEventRecord(stop1,streams[0]); cudaEventRecord(stop2,streams[1]); cudaEventSynchronize(stop); cudaEventSynchronize(stop1); cudaEventSynchronize(stop2); cudaEventElapsedTime(&gpu_time, start, stop); cudaEventElapsedTime(&gpu_time1, start1, stop1); cudaEventElapsedTime(&gpu_time2, start2, stop2); h_end_t=clock(); h_total_t=(float)(h_end_t-h_start_t)/CLOCKS_PER_SEC; printf("Time: %E CUDA times: %f %f %f\n", h_total_t, gpu_time, gpu_time1, gpu_time2); for(idx=0;idx<10;idx++) { h_j_gauss_integration_f(h_params[0],h_params[1],E_j[idx],B_vector+4*idx,h_datapoints,&h_j2, h_w, h_x); h_k_gauss_integration_f(h_params[0],h_params[1],E_i[idx],C_vector+5*idx,h_datapoints,&h_k2, h_w, h_x); h_l_gauss_integration_f(h_params[0],h_params[1],E_i[idx],T_r,D_vector+2*idx,h_datapoints,&h_l2, &h_le2, h_w, h_x); printf("%E %E %E %E %E %E %E %E\n",h_j[idx],h_j2,h_k[idx],h_k2,h_l[idx],h_l2,h_l[idx+ionizations_number],h_le2); } h_j_gauss_integration_f(h_params[0],h_params[1],E_j[excitations_number-1],B_vector+4*(excitations_number-1),h_datapoints,&h_j2, h_w, h_x); h_k_gauss_integration_f(h_params[0],h_params[1],E_i[ionizations_number-1],C_vector+5*(ionizations_number-1),h_datapoints,&h_k2, h_w, h_x); h_l_gauss_integration_f(h_params[0],h_params[1],E_i[ionizations_number-1],T_r,D_vector+2*(ionizations_number-1),h_datapoints,&h_l2, &h_le2, h_w, h_x); printf("%E %E %E %E %E %E %E %E\n",h_j[excitations_number-1],h_j2,h_k[ionizations_number-1],h_k2,h_l[ionizations_number-1],h_l2,h_l[2*ionizations_number-1],h_le2); d_cleanup_f(d_params, d_B_vector, d_C_vector, d_E_j, d_E_i, d_j, d_k, d_l, d_x, d_w); exit(0); }
12,834
// *********************************************************************** // // Demo program for education in subject // Computer Architectures and Paralel Systems // Petr Olivka, dep. of Computer Science, FEI, VSB-TU Ostrava // email:petr.olivka@vsb.cz // // Example of CUDA Technology Usage // Multiplication of elements in float array // // *********************************************************************** #include <stdio.h> // Function prototype from .cu file void run_mult( float *pole, int L, float mult ); #define N 200 int main() { // Array initialization float * prvky; prvky = new float[N]; //OCfloat prvky[ N ]; for ( int i = 0; i < N; i++ ) prvky[ i ] = ( float ) i; // Function calling run_mult( prvky, N, (float) 3.14 ); // Print result for ( int i = 0; i < N; i++ ) printf( "%8.2f\n", prvky[ i ] ); printf( "\n" ); return 0; }
12,835
#pragma once #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> const unsigned int SIZE = 1024; // addition kernel __global__ void add(const int *in_a, const int *in_b, int *out) { int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx < SIZE) out[idx] = in_a[idx] + in_b[idx]; } int main(){ // Host pointers for io data int *a = new int[SIZE]; int *b = new int[SIZE]; int *c = new int[SIZE]; // Device pointers int *d_a, *d_b, *d_c; for (int i = 0; i < SIZE; i++){ a[i] = i; b[i] = 2*i; } // Allocate memory on the device const unsigned int size = SIZE * sizeof(int); cudaMalloc((void**)&d_a, size); cudaMalloc((void**)&d_b, size); cudaMalloc((void**)&d_c, size); // Copy the input data to the device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); dim3 dimGrid(1); dim3 dimBlock(SIZE); add <<<dimGrid, dimBlock >>> (d_a, d_b, d_c); cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost); for (int i = 0; i < SIZE; i++){ if (a[i] + b[i] != c[i]) return 1; } std::cout << "Sucess!" << std::endl; cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); delete[] a; delete[] b; delete[] c; return 0; }
12,836
#include "includes.h" __global__ void update_local_time(int *next, double *local_time, double GTIME){ unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x; int who = next[gtid]; if(who < 0) return; local_time[who] = GTIME; }
12,837
#include "includes.h" __device__ void remove_cols(short *deleted_cols, int *col_group, const int conflict_col_id, const int total_dl_matrix_col_num) { for (int i = threadIdx.x; i < total_dl_matrix_col_num; i = i + blockDim.x) { if (col_group[i] == col_group[conflict_col_id]) { deleted_cols[i] = -1; } } } __global__ void remove_cols(int *deleted_cols, int *col_group, const int conflict_col_id, const int total_dl_matrix_col_num) { for (int i = threadIdx.x; i < total_dl_matrix_col_num; i = i + blockDim.x) { if (col_group[i] == col_group[conflict_col_id]) { deleted_cols[i] = -1; } } }
12,838
#include <stdio.h> #define N 2048 * 2048 // Number of elements in each vector /* * Optimize this already-accelerated codebase. Work iteratively, * and use nvprof to support your work. * * Aim to profile `saxpy` (without modifying `N`) running under * 20us. * * Some bugs have been placed in this codebase for your edification. */ __global__ void saxpy(int * a, int * b, int * c, int stride) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < N; tid+=stride) { c[tid] = a[tid] * 2 + b[tid]; } } __global__ void init(int *a, int val, int stride) { int tid = blockIdx.x * blockDim.x + threadIdx.x; for (; tid < N; tid+=stride) { a[tid] = val; } } int main() { int *a, *b, *c; int size = N * sizeof (int); // The total number of bytes per vector cudaMallocManaged(&a, size); cudaMallocManaged(&b, size); cudaMallocManaged(&c, size); int deviceId = 0; cudaGetDevice(&deviceId); int SMc = 1; cudaDeviceGetAttribute(&SMc, cudaDevAttrMultiProcessorCount, deviceId); int threads_per_block = 256; int number_of_blocks = 32 * SMc; int stride = threads_per_block * number_of_blocks; init<<<number_of_blocks, threads_per_block>>>(a,2,stride); init<<<number_of_blocks, threads_per_block>>>(b,1,stride); init<<<number_of_blocks, threads_per_block>>>(c,0,stride); cudaDeviceSynchronize(); saxpy <<< number_of_blocks, threads_per_block >>> ( a, b, c, stride ); cudaDeviceSynchronize(); // Print out the first and last 5 values of c for a quality check for( int i = 0; i < 5; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); for( int i = N-5; i < N; ++i ) printf("c[%d] = %d, ", i, c[i]); printf ("\n"); cudaFree( a ); cudaFree( b ); cudaFree( c ); }
12,839
#include<stdio.h> #include<iostream> #include<cuda.h> using namespace std; //Catch Cuda errors void catchCudaError(cudaError_t error){ if(error!=cudaSuccess) { printf("\n====== Cuda Error Code %i ======\n %s\n",error,cudaGetErrorString(error)); exit(-1); } } //===================================================================== #define N 400000 #define MAX_THREAD 1024 //Kernel function __global__ void add(int *a, int *b, int *c){ //Skip till required block + the required thread index in the block uint id = blockDim.x * blockIdx.x + threadIdx.x; if(id<N) c[id] = a[id] + b[id]; } int main(){ int a[N], b[N], c[N]; //Host arrays int *d_a, *d_b, *d_c; //Device arrays clock_t start, end; cudaEvent_t d_start, d_end; catchCudaError(cudaEventCreate(&d_start)); catchCudaError(cudaEventCreate(&d_end)); //Allocate device memory(double ptr as assigning value to a pointer as defined in CUDA API) catchCudaError(cudaMalloc((void **)&d_a, N * sizeof(int))); catchCudaError(cudaMalloc((void **)&d_b, N * sizeof(int))); catchCudaError(cudaMalloc((void **)&d_c, N * sizeof(int))); //Initial values of a,b for(uint i=0; i<N; ++i){ a[i] = i; b[i] = 2*i; } //Copy to Device catchCudaError(cudaMemcpy(d_a, a, N*sizeof(int), cudaMemcpyHostToDevice)); catchCudaError(cudaMemcpy(d_b, b, N*sizeof(int), cudaMemcpyHostToDevice)); catchCudaError(cudaEventRecord(d_start)); //Max 1024 threads in each block(max 65,535) add <<< ceil(1.0*N/MAX_THREAD), MAX_THREAD >>>(d_a, d_b, d_c); catchCudaError(cudaEventRecord(d_end)); //Copy to Host catchCudaError(cudaMemcpy(c, d_c, N*sizeof(int), cudaMemcpyDeviceToHost)); //Wait for all threads to finish //catchCudaError(cudaDeviceSynchronize(d_end)); //Waits till event is recorded catchCudaError(cudaEventSynchronize(d_end)); start = clock(); for(uint i=0; i<N; ++i) if(a[i]+b[i] != c[i]){ printf("Incorrect vector addition\n"); exit(-3); } end = clock(); float time_taken = 1000.0* (end - start)/CLOCKS_PER_SEC; float d_time_taken; cudaEventElapsedTime(&d_time_taken, d_start, d_end); printf("Correct Vector addition\n"); printf("Host time = %f ms\nDevice Time = %f ms\n", time_taken, d_time_taken); //Free device memory catchCudaError(cudaFree(d_a)); catchCudaError(cudaFree(d_b)); catchCudaError(cudaFree(d_c)); } /* Output Correct Vector addition Host time = 1.143000 ms Device Time = 0.384800 ms */
12,840
#include <stdio.h> #include <stdlib.h> #include <malloc.h> #include <time.h> __global__ void gInit(int *a, int *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; a[i] = 2 * i; b[i] = 2 * i + 1; } __global__ void gSum(int *a, int *b) { int i = threadIdx.x + blockIdx.x * blockDim.x; a[i] += b[i]; } // a[i] := 2*i + (b[i]) := 2*i + 2*i + 1 = 4*i + 1; int main(int argc, char **argv) { if (argc < 3) { fprintf(stderr, "USAGE: <prog> <log_2{N}> <num_threads>"); return -1; } double timer, timer_alloc; clock_t start, start_alloc, stop; cudaEvent_t startE, stopE; cudaEventCreate(&startE); cudaEventCreate(&stopE); float gpuTime = 0.0f; int N = 1 << atoi(argv[1]); if (N == 1) N = 30720; int num_threads = atoi(argv[2]); int num_blocks = N / num_threads; int *a, *b; int *a_h; start_alloc = clock(); cudaMalloc((void **)&a, N * sizeof(int)); cudaMalloc((void **)&b, N * sizeof(int)); a_h = (int *)calloc(N, sizeof(int)); gInit<<<num_blocks, num_threads>>>(a, b); cudaDeviceSynchronize(); cudaEventRecord(startE, 0); start = clock(); gSum<<<num_blocks, num_threads>>>(a, b); cudaDeviceSynchronize(); cudaEventRecord(stopE, 0); cudaEventSynchronize(stopE); cudaEventElapsedTime(&gpuTime, startE, stopE); cudaMemcpy(a_h, a, N * sizeof(int), cudaMemcpyDeviceToHost); stop = clock(); timer = 1000 * ((double)(stop - start)) / (double)CLOCKS_PER_SEC; timer_alloc = 1000 * ((double)(stop - start_alloc)) / (double)CLOCKS_PER_SEC; // for (int i = 0; i < N; i += N / 16) // fprintf(stdout, "%d\n", a_h[i]); fprintf(stdout, "CUDA Elapsed time: %g ms (%g ms)\n", gpuTime, timer_alloc); cudaFree(a); cudaFree(b); free(a_h); return 0; }
12,841
// file esempio sommavettore_gpu #include "stdio.h" #define N 32 // 100 #define NumThPerBlock 32 //256 #define NumBlocks 1 __global__ void add(int *d_a, int *d_b, int *d_c) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < N) d_c[tid] = d_a[tid] + d_b[tid]; } int main( void ) { int a[N], b[N], c[N]; // host variables containing host pointers int *dev_a, *dev_b, *dev_c; // host variables containing device pointers // static allocation on device memory cudaMalloc((void**)&dev_a, N*sizeof(int)); cudaMalloc((void**)&dev_b, N*sizeof(int)); cudaMalloc((void**)&dev_c, N*sizeof(int)); // host initializes arrays for (int i=0; i<N; i++) { a[i] = -i; b[i] = i * i; } // copy arrays from host to device cudaMemcpy(dev_a, a, N*sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, N*sizeof(int), cudaMemcpyHostToDevice); add<<<NumBlocks, NumThPerBlock>>>(dev_a, dev_b, dev_c); // Wait threads completion cudaDeviceSynchronize(); //retrieve the result from device dev_c into c cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost); //show results for (int i=0; i<N; i++) { printf("%d + %d = %d\n", a[i], b[i], c[i]); } //free device memory cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c); return 0; }
12,842
/* Below code is based on https://github.com/NVIDIA-developer-blog/code-samples/tree/master/series/cuda-cpp/transpose. nvcc transpose_rectangle.cu -o transpose_rectangle */ #include <assert.h> #include <stdio.h> #define DEBUG // Convenience function for checking CUDA runtime API results // can be wrapped around any runtime API call. No-op in release builds. inline cudaError_t checkCuda(cudaError_t result) { #if defined(DEBUG) || defined(_DEBUG) if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result)); assert(result == cudaSuccess); } #endif return result; } const int nx = 1024; const int ny = 1024; const int TILE_DIM = 32; const int BLOCK_ROWS = 8; const int NUM_REPS = 100; // Check errors and print GB/s void postprocess(const float* ref, const float* res, int n, float ms) { bool passed = true; for (int i = 0; i < 256; i++) { if (res[i] != ref[i]) { printf("%d %f %f\n", i, ref[i], res[i]); // printf("%25s\n", "*** FAILED ***"); passed = false; break; } } if (passed) printf("%20.2f\n", 2 * n * sizeof(float) * 1e-6 * NUM_REPS / ms); } // Original coalesced transpose // Uses shared memory to achieve coalesing in both reads and writes // Tile width == #banks causes shared memory bank conflicts. __global__ void transposeCoalescedRectangle_Orig(float* odata, const float* idata) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int height = gridDim.y * TILE_DIM; if ((x < nx) && (y < ny)) { tile[threadIdx.y][threadIdx.x] = idata[y * width + x]; } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; if ((x < ny) && (y < nx)) { odata[y * height + x] = tile[threadIdx.x][threadIdx.y]; } } // Naive transpose // Simplest transpose; doesn't use shared memory. // Global memory reads are coalesced but writes are not. __global__ void transposeNaiveRectangle(float* odata, const float* idata) { int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int height = gridDim.y * TILE_DIM; if ((x < nx) && (y < ny)) { odata[(x)*height + y] = idata[width * y + (x)]; } } // Shared __global__ void transposeCoalescedRectangle(float* odata, const float* idata) { __shared__ float tile[TILE_DIM][TILE_DIM]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int height = gridDim.y * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if ((x < nx) && ((y + j) < ny)) { tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x]; } } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if ((x < ny) && ((y + j) < nx)) { odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j]; } } } __global__ void transposeNoBankConflictsRectangle(float* odata, const float* idata) { __shared__ float tile[TILE_DIM][TILE_DIM + 1]; int x = blockIdx.x * TILE_DIM + threadIdx.x; int y = blockIdx.y * TILE_DIM + threadIdx.y; int width = gridDim.x * TILE_DIM; int height = gridDim.y * TILE_DIM; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if ((x < nx) && ((y + j) < ny)) { tile[threadIdx.y + j][threadIdx.x] = idata[(y + j) * width + x]; } } __syncthreads(); x = blockIdx.y * TILE_DIM + threadIdx.x; // transpose block offset y = blockIdx.x * TILE_DIM + threadIdx.y; for (int j = 0; j < TILE_DIM; j += BLOCK_ROWS) { if ((x < ny) && ((y + j) < nx)) { odata[(y + j) * height + x] = tile[threadIdx.x][threadIdx.y + j]; } } } int main(int argc, char** argv) { const int mem_size = nx * ny * sizeof(float); dim3 dimGrid(nx / TILE_DIM, ny / TILE_DIM, 1); dim3 dimBlock(TILE_DIM, TILE_DIM, 1); int devId = 0; if (argc > 1) devId = atoi(argv[1]); cudaDeviceProp prop; checkCuda(cudaGetDeviceProperties(&prop, devId)); printf("\nDevice : %s\n", prop.name); printf("%d.%d\n", prop.major, prop.minor); printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM); printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z); checkCuda(cudaSetDevice(devId)); float* h_idata = (float*)malloc(mem_size); float* h_cdata = (float*)malloc(mem_size); float* h_tdata = (float*)malloc(mem_size); float* gold = (float*)malloc(mem_size); float *d_idata, *d_cdata, *d_tdata; checkCuda(cudaMalloc(&d_idata, mem_size)); checkCuda(cudaMalloc(&d_cdata, mem_size)); checkCuda(cudaMalloc(&d_tdata, mem_size)); // check parameters and calculate execution configuration if (nx % TILE_DIM || ny % TILE_DIM) { printf("nx and ny must be a multiple of TILE_DIM\n"); goto error_exit; } if (TILE_DIM % BLOCK_ROWS) { printf("TILE_DIM must be a multiple of BLOCK_ROWS\n"); goto error_exit; } // host for (int j = 0; j < ny; j++) { for (int i = 0; i < nx; i++) { h_idata[j * nx + i] = j * nx + i; } } /* Print for tfjs sensor // correct result for error checking printf("\n["); for (int i = 0; i < ny; i++) { printf("\n"); for (int j = 0; j < nx; j++) { printf("%d,",(int)h_idata[i*nx+j]); } } printf("\n],[64,64]);"); */ /* for (int j = 0; j < nx; j++) { printf("%d ",(int)h_idata[j]); } */ // correct result for error checking for (int j = 0; j < ny; j++) { for (int i = 0; i < nx; i++) { gold[i * ny + j] = h_idata[j * nx + i]; } } /* Print for tfjs sensor // correct result for error checking printf("\n["); for (int i = 0; i < nx; i++) { printf("\n"); for (int j = 0; j < ny; j++) { printf("%d,",(int)gold[i*ny+j]); } } printf("\n],[64,64]);"); */ /* for (int j = 0; j < ny; j++) { printf("%d ",(int)gold[j]); } */ printf("\nmem_size=%d\n\n", mem_size); // device checkCuda(cudaMemcpy(d_idata, h_idata, mem_size, cudaMemcpyHostToDevice)); // events for timing cudaEvent_t startEvent, stopEvent; checkCuda(cudaEventCreate(&startEvent)); checkCuda(cudaEventCreate(&stopEvent)); float ms; // ------------ // time kernels // ------------ printf("%35s%20s\n", "Routine", "Bandwidth (GB/s)"); { /* printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny, TILE_DIM, TILE_DIM, TILE_DIM, TILE_DIM); printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z); */ // -------------- // transposeNaiveRectangle // -------------- printf("%35s", "transposeNaiveRectangle"); checkCuda(cudaMemset(d_tdata, 0, mem_size)); // warmup transposeNaiveRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda(cudaEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) transposeNaiveRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost)); printf(" ms=%f\n", ms / NUM_REPS); postprocess(gold, h_tdata, nx * ny, ms); } { dim3 dimGrid(nx / TILE_DIM, ny / TILE_DIM, 1); // dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); /* printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM); printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z); */ // ------------------ // transposeCoalescedRectangle // ------------------ printf("%35s", "transposeCoalescedRectangle"); checkCuda(cudaMemset(d_tdata, 0, mem_size)); // warmup transposeCoalescedRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda(cudaEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) transposeCoalescedRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost)); printf(" ms=%f\n", ms / NUM_REPS); postprocess(gold, h_tdata, nx * ny, ms); } { dim3 dimGrid(nx / TILE_DIM, ny / TILE_DIM, 1); // dim3 dimBlock(TILE_DIM, TILE_DIM, 1); dim3 dimBlock(TILE_DIM, BLOCK_ROWS, 1); /* printf("Matrix size: %d %d, Block size: %d %d, Tile size: %d %d\n", nx, ny, TILE_DIM, BLOCK_ROWS, TILE_DIM, TILE_DIM); printf("dimGrid: %d %d %d. dimBlock: %d %d %d\n", dimGrid.x, dimGrid.y, dimGrid.z, dimBlock.x, dimBlock.y, dimBlock.z); */ // ------------------ // transposeNoBankConflictsRectangle // ------------------ printf("%35s", "transposeNobankConflictsRectangle"); checkCuda(cudaMemset(d_tdata, 0, mem_size)); // warmup transposeNoBankConflictsRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda(cudaEventRecord(startEvent, 0)); for (int i = 0; i < NUM_REPS; i++) transposeNoBankConflictsRectangle<<<dimGrid, dimBlock>>>(d_tdata, d_idata); checkCuda(cudaEventRecord(stopEvent, 0)); checkCuda(cudaEventSynchronize(stopEvent)); checkCuda(cudaEventElapsedTime(&ms, startEvent, stopEvent)); checkCuda(cudaMemcpy(h_tdata, d_tdata, mem_size, cudaMemcpyDeviceToHost)); printf(" ms=%f\n", ms / NUM_REPS); postprocess(gold, h_tdata, nx * ny, ms); } error_exit: // cleanup checkCuda(cudaEventDestroy(startEvent)); checkCuda(cudaEventDestroy(stopEvent)); checkCuda(cudaFree(d_tdata)); checkCuda(cudaFree(d_cdata)); checkCuda(cudaFree(d_idata)); free(h_idata); free(h_tdata); free(h_cdata); free(gold); }
12,843
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <iostream> #include <fstream> #include <ctime> #include <cstdlib> /* * Class: CSCI563 - Introduction to Parallel Computing * Student: Zachary Nahman * Professor: Dr. Wu * Assignment: Course Project for Graduate Students * Due Date: 5/6/2018 */ /* * TODO: * - take a positive integer N as an argument * - create an input integer array of size N * - populate the array with integers from the range [1,1000] * - sort the array using sequential bubblesort * - sort the array using parallel bubblesort * - compare sequential bubblesort and parallel bubblesort */ // CUDA kernel - even comparisons __global__ void even_swapper(int *X, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i % 2 == 0 && i < N-1){ if(X[i+1] < X[i]){ // switch in the x array int temp = X[i]; X[i] = X[i+1]; X[i+1] = temp; } } } // CUDA kernel - odd comparisons __global__ void odd_swapper(int *X, int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if(i % 2 != 0 && i < N-2){ if(X[i+1] < X[i]){ // switch in the x array int temp = X[i]; X[i] = X[i+1]; X[i+1] = temp; } } } int main( int argc, char* argv[] ) { int N; // get the command line argument N (the size of the array) if(argc == 2){ N = atoi(argv[1]); } else if(argc == 1){ std::cout << "No number entered for N - please run with N specified" << "\n"; } else if(argc > 2){ std::cout << "Too many arguments entered, expected 1 - the array size" << "\n"; } // declare the host input array int *h_input_array; size_t bytes = N * sizeof(int); h_input_array= (int*)malloc(bytes); // declare the host output array int *h_output_array; int *seq_h_output_array; h_output_array = (int*)malloc(bytes); seq_h_output_array = (int*)malloc(bytes); // fill the host input array with integers between [1,1000] // seed random number generator srand(time(0)); //std::cout << "Array: " << "\n"; for(int i = 0; i < N; i++){ h_input_array[i] = (rand() % 1000) + 1; //std::cout << h_input_array[i] << "\n"; } // sequential bubblesort algorithm // copy the array to sort for(int k = 0; k < N; k++){ seq_h_output_array[k] = h_input_array[k]; //std::cout << seq_h_output_array[k] << "\n"; } // sort seq_h_output_array (with bubble sort) clock_t seq_begin = clock(); bool sorted = false; while(!sorted){ bool swapped = false; for(int k = 0; k < N-1; k++){ if(seq_h_output_array[k+1] < seq_h_output_array[k]){ int temp = seq_h_output_array[k]; seq_h_output_array[k] = seq_h_output_array[k+1]; seq_h_output_array[k+1] = temp; swapped = true; } } if(!swapped){ sorted = true; } } clock_t seq_end = clock(); // print results of sequential bubble sort for debugging // std::cout << "Sequential Array (sorted with bubblesort):" << "\n"; // for(int k = 0; k < N; k++){ // std::cout << seq_h_output_array[k] << "\n"; // } // declare and allocate device memory for arrays int *d_input_array; int *d_output_array; cudaMalloc(&d_input_array, bytes); cudaMalloc(&d_output_array, bytes); // Copy host input array to device cudaMemcpy(d_input_array, h_input_array, bytes, cudaMemcpyHostToDevice); int threadsToLaunch = ceil(N/32.0); //invoke the kernel functions (both even swapping and odd swapping) clock_t par_begin = clock(); for(int i = 0; i < N; i++){ even_swapper<<<threadsToLaunch, 32>>>(d_input_array, N); odd_swapper<<<threadsToLaunch, 32>>>(d_input_array, N); } clock_t par_end = clock(); // Copy array back to host cudaMemcpy(h_output_array, d_input_array, bytes, cudaMemcpyDeviceToHost); // printing parallel result for debugging // std::cout << "Parallel: " << "\n"; // for(int k = 0; k < N; k++){ // std::cout << h_output_array[k] << "\n"; // } // confirm both arrays are sorted: bool seq_sorted = true; bool par_sorted = true; for(int i = 0; i < N-1; i++){ if(h_output_array[i] > h_output_array[i+1]){ par_sorted = false; } if(seq_h_output_array[i] > seq_h_output_array[i+1]){ seq_sorted = false; } } if(seq_sorted){ std::cout << "The sequential array is sorted properly!" << "\n"; }else{ std::cout << "The sequential array is NOT sorted properly!" << "\n"; } if(par_sorted){ std::cout << "The parallel array is sorted properly!" << "\n"; }else{ std::cout << "The parallel array is NOT sorted properly!" << "\n"; } double seq_elapsed_secs = double(seq_end - seq_begin)/CLOCKS_PER_SEC; std::cout << "\n"; std::cout << "Elapsed Time for Sequential Bubblesort: "; std::cout << seq_elapsed_secs; std::cout << " seconds"; std::cout << "\n"; double par_elapsed_secs = double(par_end - par_begin)/CLOCKS_PER_SEC; std::cout << "\n"; std::cout << "Elapsed Time for Parallel Bubblesort: "; std::cout << par_elapsed_secs; std::cout << " seconds"; std::cout << "\n"; // Release device memory cudaFree(d_input_array); cudaFree(d_output_array); // Release host memory free(h_input_array); free(h_output_array); free(seq_h_output_array); return 0; }
12,844
#include "includes.h" __global__ void mean_interpolate_forward(int B, int N, int M, int C, int K, const int* nnIndex, const int* nnCount, const float* input, float* output) { for(int i=blockIdx.x;i<B;i+=gridDim.x) { for(int j=threadIdx.x;j<N*C;j+=blockDim.x) { int n = j/C; int c = j%C; int nnSize = nnCount[i*N+n]; for(int k=0;k<nnSize;k++) { int m = nnIndex[i*N*K+n*K+k]; output[i*N*C+j] += input[i*M*C+m*C+c]/nnSize; } } } }
12,845
#include "includes.h" __global__ void calc2points(float* point_coordinate_1, float* point_coordinate_2 , float* coordinates_arr) { int tid = threadIdx.x; // 52 coordinates_arr[tid] = pow(point_coordinate_1[tid] - point_coordinate_2[tid],2); }
12,846
#include<stdio.h> #include<stdlib.h> #include<cuda_runtime.h> __global__ void dummy0() { int i = blockIdx.x * blockDim.x + threadIdx.x; } int main(void) { int N=1 << 20; unsigned int threads = 256; unsigned int blocks = (N + 255) / threads; // dummy0<<<blocks, threads>>>(); void *args[] = {}; printf("cudaLaunchKernel with 0 arguments\n"); cudaError_t cudaError = cudaLaunchKernel((void*)dummy0, dim3(blocks), dim3(threads), args, 0, NULL); printf("cudaError:%d\n",cudaError); return 0; }
12,847
__global__ void vecAdd(float *l, float *r, float *result, size_t N) { for (size_t i = 0; i < N; ++i) { for (size_t j = 0; j < N; ++j) { for (size_t k = 0; k < N; ++k) { for (size_t v = 0; v < N; ++v) { for (size_t u = 0; u < N; ++u) { result[i + j + k + v + u] = l[i + j] + r[v + u + k]; } } } } } }
12,848
#include <iostream> // inform that the function should run on device instead of the host __global__ void add_kernel(int a, int b, int *c) { *c = a + b; } int main(int argc, char *argv[]) { // get device info int count; cudaGetDeviceCount(&count); cudaDeviceProp prop; cudaGetDeviceProperties(&prop, 0); std::cout << "Device Count: " << count << "\n"; std::cout << "Name: " << prop.name << "\n"; std::cout << "\t mem: " << prop.maxThreadsPerBlock << "\n"; int c; int *dev_c; // allocate memory on the device // returned pointer should not be dereferenced cudaMalloc((void**)&dev_c, sizeof(int)); // used to send device code to device compiler // angle brackets denote arguments we plan to pass for the device add_kernel<<< 1, 1 >>> (2, 7, dev_c); cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost); std::cout << "SUM: " << c << "\n"; cudaFree(dev_c); std::cout << "HELLO WORLD!" << "\n"; return 0; }
12,849
#include <stdio.h> // Return true if chunk x,z is a slime chunk, false otherwise. Use s as seed. // https://minecraft.gamepedia.com/Slime // https://docs.oracle.com/javase/7/docs/api/java/util/Random.html // http://developer.classpath.org/doc/java/util/Random-source.html __device__ bool isSlimeChunk(long long s, long long x, long long z) { unsigned long long seed = (s + (int) (x * x * 0x4c1906LL) + (int) (x * 0x5ac0dbLL) + (int) (z * z) * 0x4307a7LL + (int) (z * 0x5f24fLL) ^ 0x5E434E432LL) & ((1LL << 48) - 1); int bits, val; do { seed = (seed * 0x5DEECE66DLL + 0xBLL) & ((1LL << 48) - 1); bits = (int)(seed >> 17); val = bits % 10; } while (bits - val + 9 < 0); return val == 0; } // Return the square root of x __device__ __host__ int sqrt(int x) { if (x == 0 || x == 1) return x; int i = 1, result = 1; while (result <= x) { ++i; result = i*i; } return i-1; } // Calculate the slime chunks in the initial row. This is different because the old values cannot be reused. // Use x as the x coordinate, s as the seed and store the result in row. // Each bit in row is one chunk, each int in row is one z coordinate. __global__ void setInitialX(long long s, int x, int* row) { int z = blockIdx.y * blockDim.y + threadIdx.y; int current = 0; for (int i = 0; i < 32; ++i) { current |= isSlimeChunk(s, x+i, z - 1875000) << (31-i); } row[z] = current; } // Same as setInitialX, except it reuses a few chunks (which were already calculated in the previous setX/setInitialX call). __global__ void setX(long long s, int x, int* row) { int z = blockIdx.y * blockDim.y + threadIdx.y; int current = row[z] << 16; // 17 chunks are tested at a time so 16 need to be included in the next search again // 32-16 == 16 for (int i = 16; i < 32; ++i) { current |= isSlimeChunk(s, x+i, z - 1875000) << (31-i); } row[z] = current; } // Count the amount of "1" bits in x and return it. // This is only better if most bits are 0, which is the case for slime chunks (9/10 is 0 and 1/10 is 1). // https://en.wikipedia.org/wiki/Hamming_weight // TODO see if a lookup table is faster and if it is, implement it __device__ int popcount(int x) { int count; for (count = 0; x; count++) { x &= x - 1; } return count; } // Count how many blocks are loaded and in a slime chunk at the same time. // Take the slime chunks from row and return the result in n. // currentMax is the current highest value. If the value cannot exceed this, the calculation will be stopped earlier to save time. __global__ void count(int* row, unsigned short* n, int currentMax, short* sb, short* sbs) { int x = blockIdx.x * blockDim.x; int z = blockIdx.y * blockDim.y + threadIdx.y; // Avoid going too high and trying to access unavailable memory if (z >= 3750000) return; // Load the chunks from row int chunks[17]; for (int i=0; i<17; ++i) { chunks[i] = row[z+i] >> 15-x & 0x1FFFF; } // First count all in a 17x17 area to simplify, if this doesn't succeed, don't check all different locations within that chunk. short sum = 0; for (int i=0; i<17; ++i) { sum += popcount(chunks[i]); } if (sum*256 <= currentMax) { n[16*z+x] = 0; return; } // Second simplification, exclude the blocks that are not in any of the spawnblocks and test again int cl[289]; // chunklist, 1 if a chunk is a slime chunk, otherwise 0 for each chunk in a 17x17 area for (int i=0; i<17; ++i) { for (int j=0; j<17; ++j) { cl[i*17+j] = chunks[i] >> (16-j) & 1; } } sum = 0; for (int i=0; i<289; ++i) { sum += cl[i]==1 ? sbs[i] : 0; } if (sum <= currentMax) { n[16*z+x] = 0; return; } // Calculate different rates for all heights and positions inside the chunk // Each value of j is a new height and/or position inside the chunk // Each value of i is the coordinates for a chunk around the chunk the player is in short highest = 0; for (int j=0; j<6144; ++j) { sum = 0; for (int i=0; i<289; ++i) { sum += cl[i]==1 ? sb[j*289+i] : 0; } if (sum > highest) { highest = sum; } } if (highest > currentMax) { //printf("%u,%u - %u\n",x,z,highest); n[16*z+x] = highest; } else { n[16*z+x] = 0; } return; } // Fill f with the amount of blocks within spawn radius (24 < x <= 128) in each chunk, for each x and z coordinate in the chunk and 24 y coordinates // it uses 17x17 chunks, not 16x16 __global__ void calcSpawnableBlocks(short int* f) { int thread = blockIdx.x * blockDim.x + threadIdx.x; int x = thread/384; int y = thread%384/16; int z = thread%16; int i = x*110976+z*6936 + y*289; // for (int xx=-128; xx<129; ++xx) { // for (int zz=-128; zz<129; ++zz) { for (int xx=-128; xx<145; ++xx) { for (int zz=-128; zz<145; ++zz) { int t = sqrt((x-xx)*(x-xx) + (z-zz)*(z-zz) + y*y); if (24 < t && t <= 128) { // f[i + (xx+128)/17*17+(zz+128)/17]++; f[i + (xx+128)/16*17+(zz+128)/16]++; } } } } // Count blocks if slime can spawn there at any x, y, or z position of the player void calcSecondSpawnableBlocks(short* a) { for (int i=0; i<17*17; ++i) a[i] = 0; for (int xx=-128; xx<145; ++xx) { for (int zz=-128; zz<145; ++zz) { bool temp = false; for (int x=0; x<16 && !temp; ++x) { for (int z=0; z<16 && !temp; ++z) { if (sqrt((x-xx)*(x-xx) + (z-zz)*(z-zz)) <= 128) temp = true; } } if (temp) a[(xx+128)/16*17 + (zz+128)/16]++; } } } // Find the highest amount of slime chunks in an area and the location of those slime chunks // c (count) is the amount of slime chunks in an area and l (location) is the location of those slime chunks. // the highest 1024 will be returned in the first 1024 items in count and location // http://developer.download.nvidia.com/compute/cuda/1_1/Website/projects/reduction/doc/reduction.pdf __global__ void findHighest(unsigned short *c, unsigned short *o, int *l) { __shared__ short scount[1024]; __shared__ int slocation[1024]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; if(i >= 59999232) return; scount[tid] = c[i]; slocation[tid] = i; __syncthreads(); for(unsigned int s=blockDim.x/2; s>0; s>>=1) { if (tid < s) { if (scount[tid+s] > scount[tid]) { scount[tid] = scount[tid+s]; slocation[tid] = slocation[tid+s]; } } __syncthreads(); } if(tid==0) { o[blockIdx.x] = scount[0]; l[blockIdx.x] = slocation[0]; } } // Inefficient function to move the location and count of the highest chunk to index 0 of c and l void findHighestCpu(unsigned short* c, int* l, unsigned short* oc, int* cl) { cudaMemcpy(c, oc, 58608 * sizeof(short), cudaMemcpyDeviceToHost); cudaMemcpy(l, cl, 58608 * sizeof(int), cudaMemcpyDeviceToHost); unsigned short highest = 0; for (int i=0; i<58608; ++i) highest = c[i]>highest ? c[i] : highest; for (int i=0; i<58608; ++i) { if (c[i] == highest) { c[0] = c[i]; l[0] = l[i]; return; } } } void printError() { cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); } int main() { // Cuda variables int *chunks; // Each bit represents a chunk. 1 if the chunk is a slime chunk, otherwise 0 cudaMalloc((void**)&chunks, sizeof(int) * 3750912); unsigned short *chunkCount; // The amount of blocks slimes can spawn on in a 17x17 area centered around the player cudaMalloc((void**)&chunkCount, sizeof(short) * 3750000 * 16); short int *spawnBlocks; // A list with the amount of spawnable blocks in each chunk for each position in the chunk and each height cudaMalloc((void**)&spawnBlocks, sizeof(short int) * 17*17 * 16*16*24); cudaMemset(spawnBlocks, 0, sizeof(short int) * 17*17 * 16*16*24); int *chunkLocation; // The location of the chunk corresponding to the count in chunkCount. Format is z*16+x cudaMalloc((void**)&chunkLocation, sizeof(int) * 3663 * 16); unsigned short *outCount; // The output of the findHighest function cudaMalloc((void**)&outCount, sizeof(short) * 3663 * 16); short *spawnBlocksSecond; // Combination of all possible heights and positions of spawnBlocks cudaMalloc((void**)&spawnBlocksSecond, sizeof(short) * 17 * 17); // Host variables for outCount and chunkLocation unsigned short *countt = (unsigned short*) malloc(58608 * sizeof(short)); int *location = (int*) malloc(58608 * sizeof(int)); // Get seed, minx and maxx from user input long long seed = 0; char temp[8] = {0}; int minx = -1875000; int maxx = 1875000; int highest = 0; printf("Enter seed: "); scanf("%d", &seed); printf("Enter x start (min is -1875000) ('d' for default -1875000): "); scanf("%s", temp); if (temp[0] != 'd') sscanf(temp, "%d", &minx); printf("Enter x end (max is 1875000) ('d' for default 1875000): "); scanf("%s", temp); if (temp[0] != 'd') sscanf(temp, "%d", &maxx); if (minx < -1875000 || maxx > 1875000 || maxx < minx) { printf("Illegal minx or maxx, range is -1875000 to 1875000 and minx must be smaller than maxx"); return 1; } printf("Enter highest to begin with: "); scanf("%d", &highest); printf("Calculating spawnable blocks\n"); calcSpawnableBlocks<<<6, 1024>>>(spawnBlocks); cudaDeviceSynchronize(); short *sbsCpu = (short*) malloc(17*17*sizeof(short)); printf("Calculating second spawnable blocks\n"); calcSecondSpawnableBlocks(sbsCpu); cudaDeviceSynchronize(); cudaMemcpy(spawnBlocksSecond, sbsCpu, 17*17*sizeof(short), cudaMemcpyHostToDevice); printf("Setting initial X\n"); setInitialX<<<dim3(1, 3663), dim3(1, 1024)>>>(seed, minx, chunks); cudaDeviceSynchronize(); printf("Counting slime chunks/blocks\n"); count<<<dim3(16, 3663), dim3(1, 1024)>>>(chunks,chunkCount,highest,spawnBlocks,spawnBlocksSecond); cudaDeviceSynchronize(); printf("Finding highest chunk\n"); findHighest<<<3663 * 16, 1024>>>(chunkCount, outCount, chunkLocation); cudaDeviceSynchronize(); findHighestCpu(countt, location, outCount, chunkLocation); if (countt[0] > highest) { printf("New highest in first row: %u - %u\n", countt[0], location[0]); highest = countt[0]; } else { printf("No new highest in first row, continuing with %d\n", highest); } printf("Starting computation of other chunks\n"); for (int i = minx+16; i < maxx; i += 16) { setX<<<dim3(1, 3663), dim3(1, 1024)>>>(seed, i, chunks); cudaDeviceSynchronize(); count<<<dim3(16, 3663), dim3(1, 1024)>>>(chunks,chunkCount,highest,spawnBlocks,spawnBlocksSecond); cudaDeviceSynchronize(); findHighest<<<3663 * 16, 1024>>>(chunkCount, outCount, chunkLocation); cudaDeviceSynchronize(); findHighestCpu(countt, location, outCount, chunkLocation); if (countt[0] > highest) { printf("New highest value: %u - %u with i %d, absolute location: %d, %d\n", countt[0], location[0], i, location[0]%16*16+i*16, location[0]/16*16-30000000); highest = countt[0]; } } // Freeing memory cudaFree(chunks); cudaFree(chunkCount); cudaFree(spawnBlocks); cudaFree(chunkLocation); cudaFree(outCount); cudaFree(spawnBlocksSecond); free(countt); free(location); free(sbsCpu); }
12,850
// Performance based GPU implementation of Matrix Multiply using NVIDIA CUDA Programming Language //Sanil Rao 5/8/17 CS4444 #include<stdio.h> #include<sys/time.h> #include<stdlib.h> #include<iostream> //number of threads per block sent to the kernel #define threads_per_block 2 using namespace std; //----------------------------------- Structures and Globals--------------------------------------------- typedef struct { int dimension1; int dimension2; } ArrayMetadata2D; // metadata variables describing dimensionalities of all data structures involved in the computation ArrayMetadata2D A_MD, B_MD, C_MD; // pointers for input and output arrays in the host memory float *A, *B, *C, *C_CPU; // pointers for input and output arrays in the device memory (NVIDIA DRAM) float *A_GPU, *B_GPU, *C_GPU; //----------------------------------- host function definitions ----------------------------------------- void allocateAndInitializeAB(); void computeCpuMMM(); void copyMatricesToGPU(); void copyResultFromGPU(); void compareHostAndGpuOutput(); void die(const char *error); void check_error(cudaError e); //----------------------------------- CUDA function definitions ----------------------------------------- //barebones GPU implementation kernel void GPU_matrix_multiply(); //optimized GPU implementation kernel void GPU_matrix_multiply_opt(); //------------------------------------------------------------------------------------------------------- int main(int argc, char **argv) { A_MD.dimension1 = (argc > 1) ? atoi(argv[1]) : 100; A_MD.dimension2 = (argc > 2) ? atoi(argv[2]) : A_MD.dimension1; B_MD.dimension1 = (argc > 3) ? atoi(argv[3]) : A_MD.dimension2; B_MD.dimension2 = (argc > 4) ? atoi(argv[4]) : B_MD.dimension1; C_MD.dimension1 = A_MD.dimension1; C_MD.dimension2 = B_MD.dimension2; printf("Matrix A is %d-by-%d\n", A_MD.dimension1, A_MD.dimension2); printf("Matrix B is %d-by-%d\n", B_MD.dimension1, B_MD.dimension2); printf("Matrix C is %d-by-%d\n", C_MD.dimension1, C_MD.dimension2); allocateAndInitializeAB(); //Timing the CPU implementation that was unmodified // matrix matrix multiplication in the CPU // clock_t start = clock(); // computeCpuMMM(); // clock_t end = clock(); // double elapsed = (end - start) / (double) CLOCKS_PER_SEC; // printf("Computation time in the CPU: %f seconds\n", elapsed); //Timing the barebones GPU implementation //clock_t startGPU = clock(); //GPU_matrix_multiply(); //clock_t endGPU = clock(); //Timing the optimized GPU implementation clock_t startGPU = clock(); GPU_matrix_multiply_opt(); clock_t endGPU = clock(); double elapsedGPU = (endGPU-startGPU)/ (double) CLOCKS_PER_SEC; printf("Computation time in the GPU: %f seconds\n", elapsedGPU); //double elapsedGPU2 = (endGPU2-startGPU1)/ (double) CLOCKS_PER_SEC; //printf("Computation time in the GPU: %f seconds\n", elapsedGPU2); //compareHostAndGpuOutput(); return 0; } //Optimzed GPU kernel using tiling, shared memory, and transposed matricies //2 dimensional grid and block size to ease with computation overhead I think better in 2D for a 2D problem __global__ void mm_kernel_opt(float *A_GPU,float *B_GPU, float * C_GPU, ArrayMetadata2D A_MD) { //determing which block that is being computed int block_id_row = blockIdx.y; int block_id_col = blockIdx.x; //resultant value to placed into the output matrix float val; val = 0; //determing specific row and column value for each thread int row = threadIdx.y; int col = threadIdx.x; //declaring shared memory to be used during computation __shared__ float A[threads_per_block][threads_per_block]; __shared__ float B[threads_per_block][threads_per_block]; //looping over the size of each tile and computnig the value for(int j = 0; j < (threads_per_block + A_MD.dimension1 -1)/threads_per_block; j++) { //thread out of bounds check if((j * threads_per_block + col) < A_MD.dimension1 &&(block_id_row*threads_per_block+row) < A_MD.dimension1) //intital values to be placed into shared memory A[row][col] = A_GPU[(block_id_row*threads_per_block + row) * A_MD.dimension1 + j*threads_per_block + col]; else //excess values used to not impact computation A[row][col] = 0.0; if((j*threads_per_block + row) < A_MD.dimension1 && (block_id_col*threads_per_block+col) < A_MD.dimension1) //intital values to be placed into shared memory this case transposed B[row][col] = B_GPU[(block_id_col*threads_per_block+col) * A_MD.dimension1 +(j*threads_per_block + row)]; else //excess values used to not impact computation B[row][col] = 0.0; //barrier to make sure copying was completed before computing __syncthreads(); //computation phase for(int c = 0; c < threads_per_block; c++) { val += A[row][c] * B[c][col]; } //barrier to make sure computation phase was done correctly __syncthreads(); //final bounds check if((block_id_row*threads_per_block+row) < A_MD.dimension1 && (block_id_col*threads_per_block + col) < A_MD.dimension1) //placing the value into the output matrix C_GPU[((block_id_row * blockDim.y + row) * A_MD.dimension1) + (block_id_col * blockDim.x) + col] = val; } } //barebones GPU kernel 1 dimension grid and block size __global__ void mm_kernel(float *A_GPU, float*B_GPU, float *C_GPU, ArrayMetadata2D A_MD) { //definitions to aid in GPU programming SR 4/30/17 //block id gives value of each indivdual block which together make the whole grid //block dim gives the value of the block size // thread id which gives each thread within the block. //determing each threads block and thread number int block_id = blockIdx.x; int global_thread_id = blockDim.x * block_id + threadIdx.x; int k, i; float val; //computing over the matricies using the global thread number as the column as it was inputed in 1 dimension for(i = 0; i < A_MD.dimension1; i++) { val = 0; for(k = 0; k < A_MD.dimension2; k++) { val += A_GPU[i*A_MD.dimension2 + k] * B_GPU[k * A_MD.dimension2 + global_thread_id]; C_GPU[i*A_MD.dimension2 + global_thread_id] = val; } } } //host code to launch the optimized kernel void GPU_matrix_multiply_opt() { copyMatricesToGPU(); dim3 block_size(threads_per_block, threads_per_block); dim3 grid_size(ceil(((float)A_MD.dimension1)/threads_per_block),ceil(((float)A_MD.dimension1)/threads_per_block)); mm_kernel_opt<<<grid_size, block_size>>> (A_GPU, B_GPU, C_GPU, A_MD); copyResultFromGPU(); } // host code to launch the base kernel void GPU_matrix_multiply() { copyMatricesToGPU(); dim3 grid_size((A_MD.dimension2 + threads_per_block*threads_per_block -1)/(threads_per_block*threads_per_block)); dim3 block_size(threads_per_block * threads_per_block); mm_kernel <<<grid_size, block_size>>> (A_GPU,B_GPU,C_GPU,A_MD); copyResultFromGPU(); } // allocate and initialize A and B using a random number generator void allocateAndInitializeAB() { size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float); A = (float*) malloc(sizeofA); srand(time(NULL)); for (int i = 0; i < A_MD.dimension1; i++) { for (int j = 0; j < A_MD.dimension2; j++) { int index = i * A_MD.dimension2 + j; A[index] = (rand() % 1000) * 0.001; } } size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float); B = (float*) malloc(sizeofB); for (int i = 0; i < B_MD.dimension1; i++) { for (int j = 0; j < B_MD.dimension2; j++) { int index = i * B_MD.dimension2 + j; B[index] = (rand() % 1000) * 0.001; } } } // allocate memory in the GPU for all matrices, and copy A and B content from the host CPU memory to the GPU memory //if barebones kernel is to be launched dont transpose the matrix comment out the transpose void copyMatricesToGPU() { size_t sizeofA = A_MD.dimension1 * A_MD.dimension2 * sizeof(float); check_error(cudaMalloc((void **) &A_GPU, sizeofA)); check_error(cudaMemcpy(A_GPU, A, sizeofA, cudaMemcpyHostToDevice)); size_t sizeofB = B_MD.dimension1 * B_MD.dimension2 * sizeof(float); check_error(cudaMalloc((void **) &B_GPU, sizeofB)); int i,j; /*for(i = 0; i < B_MD.dimension1; i++) { for(j = 0; j < B_MD.dimension2; j++) { int index = i * B_MD.dimension2 + j; printf("%f ", B[index]); } printf("\n"); }*/ for(i = 0; i < B_MD.dimension1; i++) { int fi = i * B_MD.dimension2; for(j = 0; j < B_MD.dimension2; j++) { if(j > i) { int index = fi +j; int newindex = j * B_MD.dimension2 + i; float tmp = B[index]; B[index] = B[newindex]; B[newindex] = tmp; } } } /*for(i = 0; i < B_MD.dimension1; i++) { for(j = 0; j < B_MD.dimension2; j++) { int index = i * B_MD.dimension2 + j; printf("%f ", B[index]); } printf("\n"); }*/ check_error(cudaMemcpy(B_GPU, B, sizeofB, cudaMemcpyHostToDevice)); size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float); check_error(cudaMalloc((void **) &C_GPU, sizeofC)); } // copy results from C_GPU which is in GPU card memory to C_CPU which is in the host CPU for result comparison void copyResultFromGPU() { size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float); C_CPU = (float*) malloc(sizeofC); check_error(cudaMemcpy(C_CPU, C_GPU, sizeofC, cudaMemcpyDeviceToHost)); } // do a straightforward matrix-matrix multiplication in the CPU // notice that this implementation can be massively improved in the CPU by doing proper cache blocking but we are // not providing you the efficient CPU implementation as that reveals too much about the ideal GPU implementation void computeCpuMMM() { // allocate the result matrix for the CPU computation size_t sizeofC = C_MD.dimension1 * C_MD.dimension2 * sizeof(float); C = (float*) malloc(sizeofC); // compute C[i][j] as the sum of A[i][k] * B[k][j] for all columns k of A for (int i = 0; i < A_MD.dimension1; i++) { int a_i = i * A_MD.dimension2; int c_i = i * C_MD.dimension2; for (int j = 0; j < B_MD.dimension2; j++) { int c_index = c_i + j; C[c_index] = 0; for (int k = 0; k < B_MD.dimension1; k++) { int a_index = a_i + k; int b_index = k * B_MD.dimension2 + j; C[c_index] += A[a_index] * B[b_index]; } } } } // function to determine if the GPU computation is done correctly by comparing the output from the GPU with that // from the CPU void compareHostAndGpuOutput() { int totalElements = C_MD.dimension1 * C_MD.dimension2; int missmatchCount = 0; for (int i = 0; i < totalElements; i++) { if (fabs(C[i] - C_CPU[i]) > 0.01) { missmatchCount++; printf("mismatch at index %i: %f\t%f\n", i, C[i], C_CPU[i]); } } if (missmatchCount > 0) { printf("Computation is incorrect: outputs do not match in %d indexes\n", missmatchCount); } else { printf("Computation is correct: CPU and GPU outputs match\n"); } } // Prints the specified error message and then exits void die(const char *error) { printf("%s", error); exit(1); } // If the specified error code refers to a real error, report it and quit the program void check_error(cudaError e) { if (e != cudaSuccess) { printf("\nCUDA error: %s\n", cudaGetErrorString(e)); exit(1); } }
12,851
#include <thrust/pair.h> #include <thrust/transform.h> #include <thrust/host_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #include <thrust/device_vector.h> #define N 10 struct make_pair_functor { template<typename T> __host__ __device__ thrust::pair<T, T> operator() (const T &x, const T &y) { return thrust::make_pair(x, y); } }; struct pair_to_vector_first { template<typename T> __host__ __device__ int operator() (const T &x) { return x.first; } }; struct pair_to_vector_second { template<typename T> __host__ __device__ int operator() (const T &x) { return x.second; } }; struct accumulate_diff { int *keys; int *values; int *counts; accumulate_diff(int *k, int *v, int *c) : keys(k), values(v), counts(c) {} template<typename T> __device__ void operator() (const T &i) { __shared__ volatile int _sd[N]; _sd[i] = counts[i]; //__threadfence_system(); if (i == 0) counts[keys[i]] = 1; //__threadfence_system();} if (i > 0) { //while (keys[i] != keys[i-1] && _sd[i] == 0); if (keys[i] != keys[i-1]) { //_sd[i] = 1; atomicAdd(&counts[keys[i]], 1); //__threadfence_system(); //_sd[i] = 0; } else { if (values[i] != values[i-1]) { //printf("777777777777777 %d\n", keys[i]); //_sd[i] = 1; atomicAdd(&counts[keys[i]], 1); //__threadfence_system(); //_sd[i] = 0; } } } //__threadfence_system(); //counts[keys[i]] = _sd[keys[i]]; } }; int main() { int A[N] = {1, 3, 3, 3, 3, 2, 1, 2, 2, 1}; int B[N] = {9, 8, 7, 5, 6, 7, 8, 7, 6, 9}; int C[N]; int D[N]; thrust::device_vector<int> counts(N, 0); typedef thrust::pair<int, int> P; thrust::host_vector<P> h_pairs(N); thrust::transform(A, A+N, B, h_pairs.begin(), make_pair_functor()); thrust::sort(h_pairs.begin(), h_pairs.end()); thrust::transform(h_pairs.begin(), h_pairs.end(), C, pair_to_vector_first()); thrust::transform(h_pairs.begin(), h_pairs.end(), D, pair_to_vector_second()); thrust::device_vector<int> c_vec(C, C+N); thrust::device_vector<int> d_vec(D, D+N); //thrust::reduce_by_key(C, C+7, thrust::constant_iterator<int>(1), C, ) accumulate_diff acc(thrust::raw_pointer_cast(c_vec.data()), thrust::raw_pointer_cast(d_vec.data()), thrust::raw_pointer_cast(counts.data())); thrust::for_each(thrust::counting_iterator<unsigned int>(0), thrust::counting_iterator<unsigned int>(N), acc); for (int i = 0; i < N; i++) { std::cout << h_pairs[i].first << ": " << h_pairs[i].second << "\n"; std::cout << "=========" << C[i] << "\n"; std::cout << "++++++++++++++" << counts[i] << "\n"; } }
12,852
#include "cuda.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "sdf_gen.cuh" #include <memory.h> #include <stdlib.h> #include <stdio.h> #include <math.h> CUDA_SDF::SDFGenerationContext::~SDFGenerationContext() { cudaFree(data); cudaFree(sdf_data); //cudaFree(out_data); delete[] out_data; } void CUDA_SDF::SDFGenerationContext::CopyImage(unsigned char* img_data) { cudaFree(data); cudaFree(sdf_data); //cudaFree(out_data); delete[] out_data; size_t img_size = (size_t)width * (size_t)height * (size_t)numComponents; cudaMallocManaged(&data, img_size); cudaMallocManaged(&sdf_data, img_size * sizeof(float)); //cudaMallocManaged(&out_data, img_size); out_data = new unsigned char[img_size]; memcpy(data, img_data, img_size); } template <typename type> inline __host__ __device__ type clamp(type d, type min, type max) { const type t = d < min ? min : d; return t > max ? max : t; } inline __host__ __device__ float distance(int x1, int y1, int x2, int y2) { float x = (float)x2 - (float)x1; float y = (float)y2 - (float)y1; return sqrt(x * x + y * y); } #define max(a,b) (((a) > (b)) ? (a) : (b)) #define min(a,b) (((a) < (b)) ? (a) : (b)) __global__ void sdf_calc(int width, int height, unsigned char* data, float* out) { int x = threadIdx.x; int y = blockIdx.x; int stride = blockDim.x; #define SAMPLE(_x, _y) data[_y * stride + _x] #define WRITE(_x, _y, val) out[_y * stride + _x] = val bool isBaseSample = SAMPLE(x, y) > 0x00; const int maxRadius = 16; float dist = distance(0, 0, width, height); bool foundSample = false; for (int ringSize = 1; ringSize <= maxRadius; ringSize++) { int fromX = clamp(x - ringSize, 0, width - 1); int toX = clamp(x + ringSize, 0, width - 1); int fromY = clamp(y - ringSize, 0, height - 1); int toY = clamp(y + ringSize, 0, height - 1); // *** // --- // --- for (int i = fromX; i <= toX; i++) { int uv_x = i; int uv_y = fromY; unsigned char sample = SAMPLE(uv_x, uv_y); if (sample > 0 && !isBaseSample || sample == 0 && isBaseSample) { dist = min(dist, distance(x, y, uv_x, uv_y)); foundSample = true; } } // --- // --- // *** for (int i = fromX; i <= toX; i++) { int uv_x = i; int uv_y = toY; unsigned char sample = SAMPLE(uv_x, uv_y); if (sample > 0 && !isBaseSample || sample == 0 && isBaseSample) { dist = min(dist, distance(x, y, uv_x, uv_y)); foundSample = true; } } // --- // *-- // --- for (int i = fromY + 1; i < toY; i++) { int uv_x = fromX; int uv_y = i; unsigned char sample = SAMPLE(uv_x, uv_y); if (sample > 0 && !isBaseSample || sample == 0 && isBaseSample) { dist = min(dist, distance(x, y, uv_x, uv_y)); foundSample = true; } } // --- // --* // --- for (int i = fromY + 1; i < toY; i++) { int uv_x = toX; int uv_y = i; unsigned char sample = SAMPLE(uv_x, uv_y); if (sample > 0 && !isBaseSample || sample == 0 && isBaseSample) { dist = min(dist, distance(x, y, uv_x, uv_y)); foundSample = true; } } //if (foundSample) //{ // break; //} } //dist = 1.0f - (dist / (float)maxRadius); //dist = clamp(dist, 0.0f, 1.0f); if (!isBaseSample) dist = -dist; WRITE(x, y, dist); #undef SAMPLE #undef WRITE } void CUDA_SDF::GenerateSDF(SDFGenerationContext& ctx) { printf("Generating signed distance field.\n"); int numThreads = ctx.width * ctx.height; int blockSize = ctx.width; int numBlocks = (numThreads + blockSize - 1) / blockSize; //add << <numBlocks, blockSize >> > (N, x, y); sdf_calc << < numBlocks, blockSize >> > (ctx.width, ctx.height, ctx.data, ctx.sdf_data); cudaDeviceSynchronize(); float maxDist = distance(0, 0, ctx.width, ctx.height); float sdf_min = maxDist; float sdf_max = -sdf_min; for (int i = 0; i < ctx.width * ctx.height; i++) { const float& val = ctx.sdf_data[i]; //if the value is not a max distance sample if (val > -maxDist && val < maxDist) { sdf_min = min(sdf_min, val); sdf_max = max(sdf_max, val); } } printf("SDF min %f max %f\n", sdf_min, sdf_max); float sdf_range = sdf_max - sdf_min; for (int i = 0; i < ctx.width * ctx.height; i++) { const float& val = ctx.sdf_data[i]; float normalized_val = (val - sdf_min) / sdf_range; //float normalized_val = val / sdf_range * 0.5f + 0.5f; normalized_val = clamp(normalized_val, 0.0f, 1.0f); ctx.out_data[i] = (unsigned char)(normalized_val * 255.0f); } }
12,853
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/resource.h> #define NewArray(Type, N) ((Type *)(malloc(N*sizeof(Type)))) #define cudaTry(cudaStatus) _cudaTry(cudaStatus, __FILE__, __LINE__) void _cudaTry(cudaError_t cudaStatus, const char *fileName, int lineNumber) { if(cudaStatus != cudaSuccess) { fprintf(stderr, "%s in %s line %d\n", cudaGetErrorString(cudaStatus), fileName, lineNumber); exit(1); } } __global__ void f_kernel(uint n, uint m, float *x, float *xm) { uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins if(i < n) { float a = 5.0/2.0; float y = x[i]; for(uint j = 0; j < m; j++) { y = a*(y*y*y - y); } xm[i] = y; } } void f(float *x, float *xm, uint n, uint m) { int size = n*sizeof(float); float *dev_x, *dev_xm; cudaTry(cudaMalloc((void**)(&dev_x), size)); cudaTry(cudaMalloc((void**)(&dev_xm), size)); cudaTry(cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice)); f_kernel<<<ceil(n/256.0),256>>>(n, m, dev_x, dev_xm); cudaTry(cudaMemcpy(xm, dev_xm, size, cudaMemcpyDeviceToHost)); cudaTry(cudaFree(dev_x)); cudaTry(cudaFree(dev_xm)); } void f_cpu(float *x, float *xm, uint n, uint m) { float a = 5.0/2.0; for(uint i = 0; i < n; i++) { float y = x[i]; for(uint j = 0; j < m; j++) y = a*(y*y*y - y); x[i] = y; } } void f_test(uint n, uint m, int gpu_flag) { float *x, *xm; struct rusage r0, r1; x = NewArray(float, n); xm = NewArray(float, n); for(uint i = 0; i < n; i++) x[i] = (1.0+i)/(n+1); getrusage(RUSAGE_SELF, &r0); if(gpu_flag) f(x, xm, n, m); else f_cpu(x, xm, n, m); getrusage(RUSAGE_SELF, &r1); double t_elapsed = (r1.ru_utime.tv_sec - r0.ru_utime.tv_sec) + 1e-6*(r1.ru_utime.tv_usec - r0.ru_utime.tv_usec); printf("f%s(n, ...): t_elapsed = %10.3e, throughput = %10.3e\n", gpu_flag ? "" : "_cpu", t_elapsed, n*m/t_elapsed); } __global__ void saxpy_kernel(uint n, float a, float *x, float *y) { uint i = blockIdx.x*blockDim.x + threadIdx.x; // nvcc built-ins if(i < n) { float yi = y[i]; float xi = x[i]; float out; //for (int j=0; j<10; j++) { out = a*xi + yi; //} y[i] = out; } } void saxpy(uint n, float a, float *x, float *y) { int size = n*sizeof(float); float *dev_x, *dev_y; // will be allocated on the GPU. cudaTry(cudaMalloc((void**)(&dev_x), size)); cudaTry(cudaMalloc((void**)(&dev_y), size)); cudaTry(cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice)); cudaTry(cudaMemcpy(dev_y, y, size, cudaMemcpyHostToDevice)); saxpy_kernel<<<ceil(n/256.0),256>>>(n, a, dev_x, dev_y); cudaTry(cudaMemcpy(y, dev_y, size, cudaMemcpyDeviceToHost)); cudaTry(cudaFree(dev_x)); cudaTry(cudaFree(dev_y)); } void saxpy_cpu(uint n, float a, float *x, float *y) { for (int i=0; i< n; i++) { float yi = y[i]; float xi = x[i]; float out; //for (int j = 0; j<10; j++) { out = a*xi + yi; //} y[i] = out; } } void saxpy_test(uint n, float a, int gpu_flag) { float *x, *y; struct rusage r0, r1; x = NewArray(float, n); y = NewArray(float, n); for(uint i = 0; i < n; i++) { x[i] = i; y[i] = i*i; } getrusage(RUSAGE_SELF, &r0); if(gpu_flag) saxpy(n, a, x, y); else saxpy_cpu(n, a, x, y); getrusage(RUSAGE_SELF, &r1); double t_elapsed = (r1.ru_utime.tv_sec - r0.ru_utime.tv_sec) + 1e-6*(r1.ru_utime.tv_usec - r0.ru_utime.tv_usec); printf("saxpy%s(n, ...): t_elapsed = %10.3e\n", gpu_flag ? "" : "_cpu", t_elapsed); } // f_main: for command lines of the form // hw4 f n m // or // hw4 f_cpu n m void f_main(int argc, char **argv, int gpu_flag) { uint n, m; if(argc != 4) { fprintf(stderr, "usage: hw4 %s n m\n", argv[1]); exit(1); } n = strtoul(argv[2], NULL, 10); m = strtoul(argv[3], NULL, 10); f_test(n, m, gpu_flag); } // saxpy_main: for command lines of the form // hw4 saxpy n [a] // or // hw4 saxpy n [a] // The parameter 'a' is optional. If omitted, we set a=3.0. void saxpy_main(int argc, char **argv, int gpu_flag) { int n; float a; if((argc < 3) || (4 < argc)) { fprintf(stderr, "usage: hw4 saxpy n [a]\n"); exit(1); } n = strtoul(argv[2], NULL, 10); if(argc >= 4) a = atof(argv[3]); else a = 3.0; saxpy_test(n, a, gpu_flag); } int main(int argc, char **argv) { if(argc < 2) { fprintf(stderr, "usage: hw4 testName testArgs\n"); exit(1); } if(strcmp(argv[1], "f") == 0) f_main(argc, argv, true); else if(strcmp(argv[1], "f_cpu") == 0) f_main(argc, argv, false); else if(strcmp(argv[1], "saxpy") == 0) saxpy_main(argc, argv, true); else if(strcmp(argv[1], "saxpy_cpu") == 0) saxpy_main(argc, argv, false); else { fprintf(stderr, "hw4, unrecognized test case: %s\n", argv[1]); exit(1); } exit(0); }
12,854
/* * Default code in Managedcuda document */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> extern "C" { __global__ void multKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] * b[i]; } int main() { return 0; } }
12,855
#include <stdlib.h> #include <stdio.h> #include <cuda_runtime.h> #include <sys/time.h> #define NUM_STEPS 2048 #define BLOCK_SIZE 128 #define MAX_OPTIONS 1024 #define ELEMS_PER_THREAD (NUM_STEPS/BLOCK_SIZE) /* at T_N = NUM_STEPS, there are (NUM_STEPS + 1) leaves*/ double cpuSecond( ) { double sec; struct timeval tp; gettimeofday(&tp, NULL); sec = (double) tp.tv_sec + (double) tp.tv_usec*1e-6; return sec; } // cumulative standard normal distribution __device__ __host__ double CND(double d) { const double b = 0.2316419; const double a1 = 0.31938153; const double a2 = -0.356563782; const double a3 = 1.781477937; const double a4 = -1.821255978; const double a5 = 1.330274429; const double rsqrt2pi = 0.3989422804014327; double K = 1.0/(1.0 + b*abs(d)); double ans; ans = K*(a1 + K*(a2 + K*(a3 + K*(a4 + K*a5) ) ) ); ans *= rsqrt2pi*exp(-d*d/2.0); if (d > 0) { ans = 1.0 - ans; } return ans; } __host__ double BlackScholes(double S0, double X, double r, double v, double T) { double sqrtT = sqrt(T); double d1 = ( log(S0/X) + (r + 0.5*v*v)*T )/(v*sqrtT); double d2 = d1 - v*sqrtT; double expRT = exp(-r*T); double CE; CE = S0*CND(d1) - X*expRT*CND(d2); return CE; } /* CPU verion of the binomial model */ __host__ double binomial(double S, double X, double r, double v, double T, int N, double* V ) { // S current stock price; X strike price; // r, v the risk-free interest rate and volatility; // T the expiry (unit yr); N the number of time steps in the binomial model. // V[N+1] is the array to do the iteration; // In the end, V[0] stores the price of the call option double dt = T/(double) N; double vdt = v*sqrt(dt); double u = exp( vdt ); double d = 1.0/u; double disc = exp( r * dt ); // discounting factor double discr = 1.0/disc; double pu = (disc - d)/(u - d); // risk-neutral/martingale probability double pd = 1.0 - pu; int i, j; double Si; // intermediate stock price in the node // initialize the CALL option value on expiry; for (i = 0; i <= N; i++) { Si = S * exp( vdt*(2*i - N) ); // S[0] lowest stock price at expiry. V[i] = (Si - X > 0) ? (Si - X) : 0; // Call option, use "max(X-Si, 0.0)" for puts } // iterate backward of the binomial tree (j the time step). for (j = N-1; j >= 0; j--) { for (i = 0; i <= j; i++) { V[i] = (pd * V[i] + pu * V[i+1]) * discr; } } return V[0]; } /* 1D grid, and 1D block; blocksize << NUM_STEPS each block deals with only one option each thread deals with part of the binomial tree */ __global__ void binomialGPUv5(double* Sptr, double* Xptr, double* Cptr, double r, double v, double T) { int tx = threadIdx.x; int bx = blockIdx.x; double S = Sptr[bx]; double X = Xptr[bx]; double dt = T/(double) NUM_STEPS; double vdt = v*sqrt(dt); double u = exp(vdt); double d = 1.0/u; double disc = exp( r * dt ); // discounting factor double discr = 1.0/disc; double pu = (disc - d)/(u - d); // risk-neutral/martingale probability double pd = 1.0 - pu; int i, j, k; double Si; double call_loc[ELEMS_PER_THREAD + 1]; //local array __shared__ double call_bound[BLOCK_SIZE + 1]; for (i = 0; i < ELEMS_PER_THREAD; i++) { k = tx * ELEMS_PER_THREAD + i; Si = S * exp( vdt * (2*k - NUM_STEPS) ); call_loc[i] = (Si - X > 0) ? (Si - X) : 0; } if (tx == BLOCK_SIZE - 1) { Si = S * exp( vdt * NUM_STEPS); call_bound[BLOCK_SIZE] = (Si - X > 0) ? (Si - X) : 0; } call_bound[tx] = call_loc[0]; __syncthreads(); call_loc[ELEMS_PER_THREAD] = call_bound[tx + 1]; __syncthreads(); for (j = 0; j < (BLOCK_SIZE - tx)*ELEMS_PER_THREAD; j++) { for (i = 0; i < ELEMS_PER_THREAD; i++) { call_loc[i] = (pd*call_loc[i] + pu*call_loc[i+1])*discr; } call_bound[tx] = call_loc[0]; __syncthreads(); call_loc[ELEMS_PER_THREAD] = call_bound[tx + 1]; __syncthreads(); // must be there, otherwise, fail } if (tx == 0) { Cptr[bx] = call_loc[0]; } } /* general uniform random number between [low, high] */ double UniRand(double low, double high) { double t = (double) rand() / (double) RAND_MAX; return (1.0 - t)*low + t*high; } /* test the GPU binomial model for European Call Pricing */ int main(int argc, char** argv) { int OPT_N = MAX_OPTIONS; double * Sptr_h; double * Sptr_d; double * Xptr_h; double * Xptr_d; double * Cptr_h; double * Cptr_d; double * Cptr_c; double * Cptr_b; // from the Black-Scholes model int size = OPT_N*sizeof(double); int i; double * Vptr_h; // used by the CPU verions double r = 0.02; // risk-free rate double v = 0.30; // volatility double T = 1.00; // maturity cudaError_t error; double tStart; double tStop1, tStop2; double sumDel = 0, sumRef = 0; Sptr_h = (double *) malloc(size); // input Xptr_h = (double *) malloc(size); // input Cptr_h = (double *) malloc(size); // CPU version output Cptr_c = (double *) malloc(size); // GPU version output Cptr_b = (double *) malloc(size); // CPU Black-Scholes output Vptr_h = (double *) malloc((NUM_STEPS+1)*sizeof(double)); for (i = 0; i < OPT_N; i++) { Sptr_h[i] = UniRand(5.0, 30.0); Xptr_h[i] = UniRand(1.0, 40.0); } cudaMalloc( (void **) &Sptr_d, size); cudaMalloc( (void **) &Xptr_d, size); cudaMalloc( (void **) &Cptr_d, size); cudaMemcpy(Sptr_d, Sptr_h, size, cudaMemcpyHostToDevice); cudaMemcpy(Xptr_d, Xptr_h, size, cudaMemcpyHostToDevice); dim3 dimGrid(MAX_OPTIONS); dim3 dimBlock(BLOCK_SIZE); printf("Depth of the tree NUM_STEPS = %d\n", NUM_STEPS); printf("grid structure: <<<%d, %d>>>\n", dimGrid.x, dimBlock.x); printf("Starting the GPU code...\n"); tStart = cpuSecond(); cudaDeviceSynchronize(); binomialGPUv5<<<dimGrid, dimBlock>>>(Sptr_d, Xptr_d, Cptr_d, r, v, T); cudaMemcpy(Cptr_c, Cptr_d, size, cudaMemcpyDeviceToHost); error = cudaPeekAtLastError(); if (error != cudaSuccess) { printf("GPU code failed %s\n", cudaGetErrorString(error) ); exit(-1); } else { tStop1 = cpuSecond() - tStart; printf("GPU code finished within %12.6f seconds\n", tStop1); } tStart = cpuSecond(); for (i = 0; i < OPT_N; i++) { Cptr_h[i] = binomial(Sptr_h[i], Xptr_h[i], r, v, T, NUM_STEPS, Vptr_h); } tStop2 = cpuSecond() - tStart; printf("CPU code finished within %12.6f seconds\n", tStop2); printf("Speed up you got %8.2f\n", tStop2/tStop1); for (i = 0; i < OPT_N; i++) { Cptr_b[i] = BlackScholes(Sptr_h[i], Xptr_h[i], r, v, T); } printf("Compare the GPU and CPU binary model now...\n"); for (i = 0; i < OPT_N; i++) { if ( abs( Cptr_h[i] - Cptr_c[i] ) > 1e-8 ) { printf("%d %8.2f %8.2f %12.6f %12.6f \n", i, Sptr_h[i], Xptr_h[i], Cptr_h[i], Cptr_c[i]); printf("comparion failed\n"); exit(-1); } } printf("passed comparison between GPU and CPU binomial model\n"); printf("Compare the binary model with Black-Scholes model now...\n"); for (i = 0; i < OPT_N; i++) { sumDel += abs( Cptr_h[i] - Cptr_b[i] ); sumRef += Cptr_h[i]; } if ( sumDel/sumRef > 1e-4 ) { printf("Black-Scholes VS Binomial comparison failed\n"); exit(-1); } printf("passed comparison between binomial and Black-Scholes model\n"); printf("Here are outputs for the first 10 lines\n"); for (i = 0; i < 10; i++) { printf("%8.2f, %8.2f, %10.4f, %10.4f, %10.4f\n", Sptr_h[i], Xptr_h[i], Cptr_c[i], Cptr_h[i], Cptr_b[i]); } free(Sptr_h); free(Xptr_h); free(Cptr_h); free(Cptr_c); free(Cptr_b); free(Vptr_h); cudaFree(Sptr_d); cudaFree(Xptr_d); cudaFree(Cptr_d); cudaDeviceReset(); return(0); }
12,856
#include <stdio.h> __device__ void clock_block(int kernel_time, int clockRate) { int finish_clock; int start_time; for(int temp=0; temp<kernel_time; temp++){ start_time = clock(); finish_clock = start_time + clockRate; bool wrapped = finish_clock < start_time; while( clock() < finish_clock || wrapped) wrapped = clock()>0 && wrapped; } } __global__ void superKernel(volatile int *init, int numThreads, int *result) { // init and result are arrays of integers where result should end up // being the result of incrementing all elements of init. // They have n elements and are (n+1) long. The should wait for the // first element to be set to zero int warp_size = 32; int threadID = (threadIdx.x + threadIdx.y * blockDim.x)%warp_size; int warpID = (threadIdx.x + threadIdx.y * blockDim.x)/warp_size; //clock_block(10,706000000); int count = 1; while(init[0]==0) count++; if(threadID<numThreads && warpID==0) result[threadID+1] = count; //__syncthreads(); //this will need to be a warp wide sync using (PTX barriers) if(threadID==0) result[0] = count; }
12,857
#include <stdio.h> #include "cuda.h" #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) #define ceil(a,b) ((a) % (b) == 0 ? (a) / (b) : ((a) / (b)) + 1) void check_error (const char* message) { cudaError_t error = cudaGetLastError (); if (error != cudaSuccess) { printf ("CUDA error : %s, %s\n", message, cudaGetErrorString (error)); exit(-1); } } __global__ void __launch_bounds__ (128,2) sw4_1 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4; double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4; double a_r1, b_r1; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 3 for (int k=2; k<=N-3; k+=2) { a_mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2]; a_mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1]; a_mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i]; a_mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2]; a_muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2]; a_muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1]; a_muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j]; a_muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2]; a_muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2]; a_muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1]; a_muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k]; a_muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2]; a_r1 = 1e0 / 6 * (strx[i] * ((2 * a_mux1 + la[k][j][i-1] * strx[i-1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i-2] * strx[i-2]) * (u_0[k][j][i-2] - u_0[k][j][i]) + (2 * a_mux2 + la[k][j][i-2] * strx[i-2] + la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i] + 3 * la[k][j][i-1] * strx[i-1]) * (u_0[k][j][i-1] - u_0[k][j][i]) + (2 * a_mux3 + la[k][j][i-1] * strx[i-1] + la[k][j][i+2] * strx[i+2] + 3 * la[k][j][i+1] * strx[i+1] + 3 * la[k][j][i] * strx[i]) * (u_0[k][j][i+1] - u_0[k][j][i]) + (2 * a_mux4 + la[k][j][i+1] * strx[i+1] - 3e0 / 4 * la[k][j][i] * strx[i] - 3e0 / 4 * la[k][j][i+2] * strx[i+2]) * (u_0[k][j][i+2] - u_0[k][j][i])) + stry[j] * (a_muy1 * (u_0[k][j-2][i] - u_0[k][j][i]) + a_muy2 * (u_0[k][j-1][i] - u_0[k][j][i]) + a_muy3 * (u_0[k][j+1][i] - u_0[k][j][i]) + a_muy4 * (u_0[k][j+2][i] - u_0[k][j][i])) + strz[k] * (a_muz1 * (u_0[k-2][j][i] - u_0[k][j][i]) + a_muz2 * (u_0[k-1][j][i] - u_0[k][j][i]) + a_muz3 * (u_0[k+1][j][i] - u_0[k][j][i]) + a_muz4 * (u_0[k+2][j][i] - u_0[k][j][i]))); a_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k][j][i-2] * (u_1[k][j-2][i-2] - u_1[k][j+2][i-2] + 8 * (-u_1[k][j-1][i-2] + u_1[k][j+1][i-2])) - 8 * (la[k][j][i-1] * (u_1[k][j-2][i-1] - u_1[k][j+2][i-1] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j+1][i-1]))) + 8 * (la[k][j][i+1] * (u_1[k][j-2][i+1] - u_1[k][j+2][i+1] + 8 * (-u_1[k][j-1][i+1] + u_1[k][j+1][i+1]))) - (la[k][j][i+2] * (u_1[k][j-2][i+2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j-1][i+2] + u_1[k][j+1][i+2])))); a_r1 += strx[i] * strz[k] * (1e0 / 144) * (la[k][j][i-2] * (u_2[k-2][j][i-2] - u_2[k+2][j][i-2] + 8 * (-u_2[k-1][j][i-2] + u_2[k+1][j][i-2])) - 8 * (la[k][j][i-1] * (u_2[k-2][j][i-1] - u_2[k+2][j][i-1] + 8 * (-u_2[k-1][j][i-1] + u_2[k+1][j][i-1]))) + 8 * (la[k][j][i+1] * (u_2[k-2][j][i+1] - u_2[k+2][j][i+1] + 8 * (-u_2[k-1][j][i+1] + u_2[k+1][j][i+1]))) - (la[k][j][i+2] * (u_2[k-2][j][i+2] - u_2[k+2][j][i+2] + 8 * (-u_2[k-1][j][i+2] + u_2[k+1][j][i+2])))); a_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k][j-2][i-2] - u_1[k][j-2][i+2] + 8 * (-u_1[k][j-2][i-1] + u_1[k][j-2][i+1])) - 8 * (mu[k][j-1][i] * (u_1[k][j-1][i-2] - u_1[k][j-1][i+2] + 8 * (-u_1[k][j-1][i-1] + u_1[k][j-1][i+1]))) + 8 * (mu[k][j+1][i] * (u_1[k][j+1][i-2] - u_1[k][j+1][i+2] + 8 * (-u_1[k][j+1][i-1] + u_1[k][j+1][i+1]))) - (mu[k][j+2][i] * (u_1[k][j+2][i-2] - u_1[k][j+2][i+2] + 8 * (-u_1[k][j+2][i-1] + u_1[k][j+2][i+1])))); a_r1 += strx[i] * strz[k] * (1e0 / 144) * (mu[k-2][j][i] * (u_2[k-2][j][i-2] - u_2[k-2][j][i+2] + 8 * (-u_2[k-2][j][i-1] + u_2[k-2][j][i+1])) - 8 * (mu[k-1][j][i] * (u_2[k-1][j][i-2] - u_2[k-1][j][i+2] + 8 * (-u_2[k-1][j][i-1] + u_2[k-1][j][i+1]))) + 8 * (mu[k+1][j][i] * (u_2[k+1][j][i-2] - u_2[k+1][j][i+2] + 8 * (-u_2[k+1][j][i-1] + u_2[k+1][j][i+1]))) - (mu[k+2][j][i] * (u_2[k+2][j][i-2] - u_2[k+2][j][i+2] + 8 * (-u_2[k+2][j][i-1] + u_2[k+2][j][i+1])))); uacc_0[k][j][i] = a1 * uacc_0[k][j][i] + cof * a_r1; b_mux1 = mu[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i-2] * strx[i-2]; b_mux2 = mu[k+1][j][i-2] * strx[i-2] + mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i] + 3.0 * mu[k+1][j][i-1] * strx[i-1]; b_mux3 = mu[k+1][j][i-1] * strx[i-1] + mu[k+1][j][i+2] * strx[i+2] + 3.0 * mu[k+1][j][i+1] * strx[i+1] + 3.0 * mu[k+1][j][i] * strx[i]; b_mux4 = mu[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k+1][j][i] * strx[i] - 3e0 / 4 * mu[k+1][j][i+2] * strx[i+2]; b_muy1 = mu[k+1][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] -3e0 / 4 * mu[k+1][j-2][i] * stry[j-2]; b_muy2 = mu[k+1][j-2][i] * stry[j-2] + mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j] + 3.0 * mu[k+1][j-1][i] * stry[j-1]; b_muy3 = mu[k+1][j-1][i] * stry[j-1] + mu[k+1][j+2][i] * stry[j+2] + 3.0 * mu[k+1][j+1][i] * stry[j+1] + 3.0 * mu[k+1][j][i] * stry[j]; b_muy4 = mu[k+1][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k+1][j][i] * stry[j] - 3e0 / 4 * mu[k+1][j+2][i] * stry[j+2]; b_muz1 = mu[k+1-1][j][i] * strz[k+1-1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k+1-2][j][i] * strz[k+1-2]; b_muz2 = mu[k+1-2][j][i] * strz[k+1-2] + mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k+1-1][j][i] * strz[k+1-1]; b_muz3 = mu[k+1-1][j][i] * strz[k+1-1] + mu[k+1+2][j][i] * strz[k+1+2] + 3.0 * mu[k+1+1][j][i] * strz[k+1+1] + 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz4 = mu[k+1+1][j][i] * strz[k+1+1] - 3e0 / 4 * mu[k+1][j][i] * strz[k+1] - 3e0 /4 * mu[k+1+2][j][i] * strz[k+1+2]; b_r1 = 1e0 / 6 * (strx[i] * ((2 * b_mux1 + la[k+1][j][i-1] * strx[i-1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i-2] * strx[i-2]) * (u_0[k+1][j][i-2] - u_0[k+1][j][i]) + (2 * b_mux2 + la[k+1][j][i-2] * strx[i-2] + la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i] + 3 * la[k+1][j][i-1] * strx[i-1]) * (u_0[k+1][j][i-1] - u_0[k+1][j][i]) + (2 * b_mux3 + la[k+1][j][i-1] * strx[i-1] + la[k+1][j][i+2] * strx[i+2] + 3 * la[k+1][j][i+1] * strx[i+1] + 3 * la[k+1][j][i] * strx[i]) * (u_0[k+1][j][i+1] - u_0[k+1][j][i]) + (2 * b_mux4 + la[k+1][j][i+1] * strx[i+1] - 3e0 / 4 * la[k+1][j][i] * strx[i] - 3e0 / 4 * la[k+1][j][i+2] * strx[i+2]) * (u_0[k+1][j][i+2] - u_0[k+1][j][i])) + stry[j] * (b_muy1 * (u_0[k+1][j-2][i] - u_0[k+1][j][i]) + b_muy2 * (u_0[k+1][j-1][i] - u_0[k+1][j][i]) + b_muy3 * (u_0[k+1][j+1][i] - u_0[k+1][j][i]) + b_muy4 * (u_0[k+1][j+2][i] - u_0[k+1][j][i])) + strz[k+1] * (b_muz1 * (u_0[k+1-2][j][i] - u_0[k+1][j][i]) + b_muz2 * (u_0[k+1-1][j][i] - u_0[k+1][j][i]) + b_muz3 * (u_0[k+1+1][j][i] - u_0[k+1][j][i]) + b_muz4 * (u_0[k+1+2][j][i] - u_0[k+1][j][i]))); b_r1 += strx[i] * stry[j] * (1e0 / 144) * (la[k+1][j][i-2] * (u_1[k+1][j-2][i-2] - u_1[k+1][j+2][i-2] + 8 * (-u_1[k+1][j-1][i-2] + u_1[k+1][j+1][i-2])) - 8 * (la[k+1][j][i-1] * (u_1[k+1][j-2][i-1] - u_1[k+1][j+2][i-1] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j+1][i-1]))) + 8 * (la[k+1][j][i+1] * (u_1[k+1][j-2][i+1] - u_1[k+1][j+2][i+1] + 8 * (-u_1[k+1][j-1][i+1] + u_1[k+1][j+1][i+1]))) - (la[k+1][j][i+2] * (u_1[k+1][j-2][i+2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j-1][i+2] + u_1[k+1][j+1][i+2])))); b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (la[k+1][j][i-2] * (u_2[k+1-2][j][i-2] - u_2[k+1+2][j][i-2] + 8 * (-u_2[k+1-1][j][i-2] + u_2[k+1+1][j][i-2])) - 8 * (la[k+1][j][i-1] * (u_2[k+1-2][j][i-1] - u_2[k+1+2][j][i-1] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1+1][j][i-1]))) + 8 * (la[k+1][j][i+1] * (u_2[k+1-2][j][i+1] - u_2[k+1+2][j][i+1] + 8 * (-u_2[k+1-1][j][i+1] + u_2[k+1+1][j][i+1]))) - (la[k+1][j][i+2] * (u_2[k+1-2][j][i+2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1-1][j][i+2] + u_2[k+1+1][j][i+2])))); b_r1 += strx[i] * stry[j] * (1e0 / 144) * (mu[k+1][j-2][i] * (u_1[k+1][j-2][i-2] - u_1[k+1][j-2][i+2] + 8 * (-u_1[k+1][j-2][i-1] + u_1[k+1][j-2][i+1])) - 8 * (mu[k+1][j-1][i] * (u_1[k+1][j-1][i-2] - u_1[k+1][j-1][i+2] + 8 * (-u_1[k+1][j-1][i-1] + u_1[k+1][j-1][i+1]))) + 8 * (mu[k+1][j+1][i] * (u_1[k+1][j+1][i-2] - u_1[k+1][j+1][i+2] + 8 * (-u_1[k+1][j+1][i-1] + u_1[k+1][j+1][i+1]))) - (mu[k+1][j+2][i] * (u_1[k+1][j+2][i-2] - u_1[k+1][j+2][i+2] + 8 * (-u_1[k+1][j+2][i-1] + u_1[k+1][j+2][i+1])))); b_r1 += strx[i] * strz[k+1] * (1e0 / 144) * (mu[k+1-2][j][i] * (u_2[k+1-2][j][i-2] - u_2[k+1-2][j][i+2] + 8 * (-u_2[k+1-2][j][i-1] + u_2[k+1-2][j][i+1])) - 8 * (mu[k+1-1][j][i] * (u_2[k+1-1][j][i-2] - u_2[k+1-1][j][i+2] + 8 * (-u_2[k+1-1][j][i-1] + u_2[k+1-1][j][i+1]))) + 8 * (mu[k+1+1][j][i] * (u_2[k+1+1][j][i-2] - u_2[k+1+1][j][i+2] + 8 * (-u_2[k+1+1][j][i-1] + u_2[k+1+1][j][i+1]))) - (mu[k+1+2][j][i] * (u_2[k+1+2][j][i-2] - u_2[k+1+2][j][i+2] + 8 * (-u_2[k+1+2][j][i-1] + u_2[k+1+2][j][i+1])))); uacc_0[k+1][j][i] = a1 * uacc_0[k+1][j][i] + cof * b_r1; } } } __global__ void __launch_bounds__ (128,2) sw4_2 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double a_mux1, a_mux2, a_mux3, a_mux4, a_muy1, a_muy2, a_muy3, a_muy4, a_muz1, a_muz2, a_muz3, a_muz4; double b_mux1, b_mux2, b_mux3, b_mux4, b_muy1, b_muy2, b_muy3, b_muy4, b_muz1, b_muz2, b_muz3, b_muz4; double a_r2, b_r2; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 3 for (int k=2; k<=N-3; k+=2) { double b_muy4; double b_muy3; double b_muy2; double _t_86_; double _t_84_; double b_muy1; double _t_82_; double _t_80_; double _t_81_; double _t_79_; double _t_83_; double _t_85_; double _t_87_; double b_r2; double b_muz2; double b_muz3; double b_muz1; double b_muz4; double b_mux2; double b_mux3; double b_mux1; double b_mux4; double _t_88_; double _t_74_; double a_muy4; double a_muy3; double a_muy2; double _t_13_; double _t_11_; double a_muy1; double _t_9_; double _t_7_; double _t_10_; double _t_6_; double _t_12_; double _t_14_; double _t_8_; double a_r2; double a_muz3; double _t_15_; double a_muz4; double a_muz2; double a_muz1; double a_mux2; double a_mux1; double a_mux3; double _t_1_; double a_mux4; double _t_21_; double _t_34_; double _t_60_; double _t_47_; double _t_48_; double _t_61_; double _t_53_; double _t_64_; double _t_69_; double _t_56_; double _t_51_; double _t_59_; double _t_72_; double _t_66_; double _t_20_; double _t_22_; double _t_35_; double _t_27_; double _t_38_; double _t_30_; double _t_40_; double _t_43_; double _t_46_; double _t_25_; double _t_33_; double _t_142_; double _t_134_; double _t_121_; double _t_126_; double _t_129_; double _t_137_; double _t_124_; double _t_132_; double _t_139_; double _t_145_; double _t_107_; double _t_94_; double _t_120_; double _t_93_; double _t_133_; double _t_95_; double _t_108_; double _t_100_; double _t_111_; double _t_103_; double _t_113_; double _t_116_; double _t_119_; double _t_106_; double _t_98_; double uacc_1kc0jc0ic0; double uacc_1kp1jc0ic0; b_muy4 = -3.0 / 4.0 * mu[k+1][j+2][i] * stry[j+2]; b_muy3 = mu[k+1][j+2][i] * stry[j+2]; b_muy3 += 3.0 * mu[k+1][j+1][i] * stry[j+1]; b_muy4 += mu[k+1][j+1][i] * stry[j+1]; b_muy2 = mu[k+1][j+1][i] * stry[j+1]; b_muy2 += 3.0 * mu[k+1][j][i] * stry[j]; b_muy3 += 3.0 * mu[k+1][j][i] * stry[j]; b_muy4 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j]; _t_86_ = 2.0 * b_muy4; b_muy2 += 3.0 * mu[k+1][j-1][i] * stry[j-1]; b_muy3 += mu[k+1][j-1][i] * stry[j-1]; _t_84_ = 2.0 * b_muy3; b_muy1 = mu[k+1][j-1][i] * stry[j-1]; b_muy1 -= 3.0 / 4.0 * mu[k+1][j][i] * stry[j]; b_muy1 -= 3.0 / 4.0 * mu[k+1][j-2][i] * stry[j-2]; b_muy2 += mu[k+1][j-2][i] * stry[j-2]; _t_82_ = 2.0 * b_muy2; _t_80_ = 2.0 * b_muy1; _t_80_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j]; _t_82_ += 3.0 * la[k+1][j][i] * stry[j]; _t_84_ += 3.0 * la[k+1][j][i] * stry[j]; _t_86_ -= 3.0 / 4.0 * la[k+1][j][i] * stry[j]; _t_80_ += la[k+1][j-1][i] * stry[j-1]; _t_82_ += 3.0 * la[k+1][j-1][i] * stry[j-1]; _t_84_ += la[k+1][j-1][i] * stry[j-1]; _t_82_ += la[k+1][j+1][i] * stry[j+1]; _t_84_ += 3.0 * la[k+1][j+1][i] * stry[j+1]; _t_86_ += la[k+1][j+1][i] * stry[j+1]; _t_80_ -= 3.0 / 4.0 * la[k+1][j-2][i] * stry[j-2]; _t_82_ += la[k+1][j-2][i] * stry[j-2]; _t_84_ += la[k+1][j+2][i] * stry[j+2]; _t_86_ -= 3.0 / 4.0 * la[k+1][j+2][i] * stry[j+2]; _t_81_ = u_1[k+1][j-2][i]; _t_81_ -= u_1[k+1][j][i]; _t_79_ = _t_80_ * _t_81_; _t_83_ = -u_1[k+1][j][i]; _t_83_ += u_1[k+1][j-1][i]; _t_79_ += _t_82_ * _t_83_; _t_85_ = -u_1[k+1][j][i]; _t_85_ += u_1[k+1][j+1][i]; _t_79_ += _t_84_ * _t_85_; _t_87_ = -u_1[k+1][j][i]; _t_87_ += u_1[k+1][j+2][i]; _t_79_ += _t_86_ * _t_87_; b_r2 = 1.0 / 6.0 * stry[j] * _t_79_; b_muz2 = 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz3 = 3.0 * mu[k+1][j][i] * strz[k+1]; b_muz1 = -3.0 / 4.0 * mu[k+1][j][i] * strz[k+1]; b_muz4 = -3.0 / 4.0 * mu[k+1][j][i] * strz[k+1]; b_mux2 = 3.0 * mu[k+1][j][i] * strx[i]; b_mux3 = 3.0 * mu[k+1][j][i] * strx[i]; b_mux1 = -3.0 / 4.0 * mu[k+1][j][i] * strx[i]; b_mux4 = -3.0 / 4.0 * mu[k+1][j][i] * strx[i]; b_muz3 += mu[k+3][j][i] * strz[k+3]; b_muz4 -= 3.0 / 4.0 * mu[k+3][j][i] * strz[k+3]; b_muz1 += mu[k][j][i] * strz[k]; b_muz2 += 3.0 * mu[k][j][i] * strz[k]; b_muz3 += mu[k][j][i] * strz[k]; b_muz2 += mu[k+2][j][i] * strz[k+2]; b_muz3 += 3.0 * mu[k+2][j][i] * strz[k+2]; b_muz4 += mu[k+2][j][i] * strz[k+2]; _t_88_ = -b_muz3 * u_1[k+1][j][i]; _t_88_ -= b_muz4 * u_1[k+1][j][i]; _t_88_ += b_muz4 * u_1[k+3][j][i]; _t_88_ += b_muz3 * u_1[k+2][j][i]; b_muz1 -= 3.0 / 4.0 * mu[k-1][j][i] * strz[k-1]; _t_88_ -= b_muz1 * u_1[k+1][j][i]; b_muz2 += mu[k-1][j][i] * strz[k-1]; _t_88_ -= b_muz2 * u_1[k+1][j][i]; _t_88_ += b_muz2 * u_1[k][j][i]; _t_88_ += b_muz1 * u_1[k-1][j][i]; b_r2 += 1.0 / 6.0 * strz[k+1] * _t_88_; b_mux1 += mu[k+1][j][i-1] * strx[i-1]; b_mux2 += 3.0 * mu[k+1][j][i-1] * strx[i-1]; b_mux3 += mu[k+1][j][i-1] * strx[i-1]; b_mux2 += mu[k+1][j][i+1] * strx[i+1]; b_mux3 += 3.0 * mu[k+1][j][i+1] * strx[i+1]; b_mux4 += mu[k+1][j][i+1] * strx[i+1]; b_mux1 -= 3.0 / 4.0 * mu[k+1][j][i-2] * strx[i-2]; b_mux2 += mu[k+1][j][i-2] * strx[i-2]; _t_74_ = -b_mux1 * u_1[k+1][j][i]; _t_74_ -= b_mux2 * u_1[k+1][j][i]; _t_74_ += b_mux1 * u_1[k+1][j][i-2]; _t_74_ += b_mux2 * u_1[k+1][j][i-1]; b_mux3 += mu[k+1][j][i+2] * strx[i+2]; _t_74_ -= b_mux3 * u_1[k+1][j][i]; b_mux4 -= 3.0 / 4.0 * mu[k+1][j][i+2] * strx[i+2]; _t_74_ -= b_mux4 * u_1[k+1][j][i]; _t_74_ += b_mux3 * u_1[k+1][j][i+1]; _t_74_ += b_mux4 * u_1[k+1][j][i+2]; b_r2 += 1.0 / 6.0 * strx[i] * _t_74_; a_muy4 = -3.0 / 4.0 * mu[k][j][i] * stry[j]; a_muy4 -= 3.0 / 4.0 * mu[k][j+2][i] * stry[j+2]; a_muy3 = mu[k][j+2][i] * stry[j+2]; a_muy3 += 3.0 * mu[k][j][i] * stry[j]; a_muy3 += 3.0 * mu[k][j+1][i] * stry[j+1]; a_muy4 += mu[k][j+1][i] * stry[j+1]; a_muy2 = mu[k][j+1][i] * stry[j+1]; a_muy2 += 3.0 * mu[k][j][i] * stry[j]; _t_13_ = 2.0 * a_muy4; a_muy2 += 3.0 * mu[k][j-1][i] * stry[j-1]; a_muy3 += mu[k][j-1][i] * stry[j-1]; _t_11_ = 2.0 * a_muy3; a_muy1 = mu[k][j-1][i] * stry[j-1]; a_muy1 -= 3.0 / 4.0 * mu[k][j][i] * stry[j]; a_muy1 -= 3.0 / 4.0 * mu[k][j-2][i] * stry[j-2]; a_muy2 += mu[k][j-2][i] * stry[j-2]; _t_9_ = 2.0 * a_muy2; _t_7_ = 2.0 * a_muy1; _t_7_ -= 3.0 / 4.0 * la[k][j-2][i] * stry[j-2]; _t_9_ += la[k][j-2][i] * stry[j-2]; _t_11_ += la[k][j+2][i] * stry[j+2]; _t_13_ -= 3.0 / 4.0 * la[k][j+2][i] * stry[j+2]; _t_7_ += la[k][j-1][i] * stry[j-1]; _t_9_ += 3.0 * la[k][j-1][i] * stry[j-1]; _t_11_ += la[k][j-1][i] * stry[j-1]; _t_9_ += la[k][j+1][i] * stry[j+1]; _t_11_ += 3.0 * la[k][j+1][i] * stry[j+1]; _t_13_ += la[k][j+1][i] * stry[j+1]; _t_7_ -= 3.0 / 4.0 * la[k][j][i] * stry[j]; _t_9_ += 3.0 * la[k][j][i] * stry[j]; _t_11_ += 3.0 * la[k][j][i] * stry[j]; _t_13_ -= 3.0 / 4.0 * la[k][j][i] * stry[j]; _t_10_ = -u_1[k][j][i]; _t_10_ += u_1[k][j-1][i]; _t_6_ = _t_9_ * _t_10_; _t_12_ = -u_1[k][j][i]; _t_12_ += u_1[k][j+1][i]; _t_6_ += _t_11_ * _t_12_; _t_14_ = -u_1[k][j][i]; _t_14_ += u_1[k][j+2][i]; _t_6_ += _t_13_ * _t_14_; _t_8_ = -u_1[k][j][i]; _t_8_ += u_1[k][j-2][i]; _t_6_ += _t_7_ * _t_8_; a_r2 = 1.0 / 6.0 * stry[j] * _t_6_; a_muz3 = mu[k-1][j][i] * strz[k-1]; a_muz3 += mu[k+2][j][i] * strz[k+2]; a_muz3 += 3.0 * mu[k+1][j][i] * strz[k+1]; a_muz3 += 3.0 * mu[k][j][i] * strz[k]; _t_15_ = -a_muz3 * u_1[k][j][i]; _t_15_ += a_muz3 * u_1[k+1][j][i]; a_muz4 = mu[k+1][j][i] * strz[k+1]; a_muz4 -= 3.0 / 4.0 * mu[k][j][i] * strz[k]; a_muz4 -= 3.0 / 4.0 * mu[k+2][j][i] * strz[k+2]; _t_15_ -= a_muz4 * u_1[k][j][i]; _t_15_ += a_muz4 * u_1[k+2][j][i]; a_muz2 = mu[k+1][j][i] * strz[k+1]; a_muz2 += 3.0 * mu[k][j][i] * strz[k]; a_muz2 += 3.0 * mu[k-1][j][i] * strz[k-1]; a_muz1 = mu[k-1][j][i] * strz[k-1]; a_muz1 -= 3.0 / 4.0 * mu[k][j][i] * strz[k]; a_mux2 = 3.0 * mu[k][j][i] * strx[i]; a_mux1 = -3.0 / 4.0 * mu[k][j][i] * strx[i]; a_mux1 -= 3.0 / 4.0 * mu[k][j][i-2] * strx[i-2]; a_mux2 += mu[k][j][i-2] * strx[i-2]; a_mux3 = 3.0 * mu[k][j][i] * strx[i]; a_mux1 += mu[k][j][i-1] * strx[i-1]; a_mux2 += 3.0 * mu[k][j][i-1] * strx[i-1]; a_mux3 += mu[k][j][i-1] * strx[i-1]; _t_1_ = -a_mux1 * u_1[k][j][i]; _t_1_ += a_mux1 * u_1[k][j][i-2]; a_mux4 = -3.0 / 4.0 * mu[k][j][i] * strx[i]; a_mux2 += mu[k][j][i+1] * strx[i+1]; _t_1_ -= a_mux2 * u_1[k][j][i]; a_mux3 += 3.0 * mu[k][j][i+1] * strx[i+1]; a_mux4 += mu[k][j][i+1] * strx[i+1]; _t_1_ += a_mux2 * u_1[k][j][i-1]; a_mux3 += mu[k][j][i+2] * strx[i+2]; _t_1_ -= a_mux3 * u_1[k][j][i]; a_mux4 -= 3.0 / 4.0 * mu[k][j][i+2] * strx[i+2]; _t_1_ -= a_mux4 * u_1[k][j][i]; _t_1_ += a_mux3 * u_1[k][j][i+1]; _t_1_ += a_mux4 * u_1[k][j][i+2]; a_r2 += 1.0 / 6.0 * strx[i] * _t_1_; a_muz1 -= 3.0 / 4.0 * mu[k-2][j][i] * strz[k-2]; _t_15_ -= a_muz1 * u_1[k][j][i]; a_muz2 += mu[k-2][j][i] * strz[k-2]; _t_15_ -= a_muz2 * u_1[k][j][i]; _t_15_ += a_muz2 * u_1[k-1][j][i]; _t_15_ += a_muz1 * u_1[k-2][j][i]; a_r2 += 1.0 / 6.0 * strz[k] * _t_15_; _t_21_ = 1.0 / 144.0 * strx[i] * stry[j]; _t_34_ = 1.0 / 144.0 * strx[i] * stry[j]; _t_60_ = 1.0 / 144.0 * stry[j] * strz[k]; _t_47_ = 1.0 / 144.0 * stry[j] * strz[k]; _t_48_ = la[k][j-2][i] * u_2[k-2][j-2][i]; _t_61_ = mu[k-2][j][i] * u_2[k-2][j-2][i]; _t_48_ -= la[k][j+2][i] * u_2[k-2][j+2][i]; _t_61_ -= mu[k-2][j][i] * u_2[k-2][j+2][i]; _t_48_ -= la[k][j-2][i] * u_2[k+2][j-2][i]; _t_61_ -= mu[k+2][j][i] * u_2[k+2][j-2][i]; _t_48_ += la[k][j+2][i] * u_2[k+2][j+2][i]; _t_61_ += mu[k+2][j][i] * u_2[k+2][j+2][i]; _t_53_ = -u_2[k+2][j-1][i]; _t_53_ += u_2[k-2][j-1][i]; _t_64_ = -u_2[k-2][j-1][i]; _t_53_ += 8.0 * u_2[k+1][j-1][i]; _t_69_ = 8.0 * -u_2[k+1][j-1][i]; _t_56_ = u_2[k-2][j+1][i]; _t_64_ += u_2[k-2][j+1][i]; _t_61_ += mu[k-2][j][i] * 8.0 * _t_64_; _t_56_ += 8.0 * u_2[k+1][j+1][i]; _t_69_ += 8.0 * u_2[k+1][j+1][i]; _t_69_ += u_2[k+1][j-2][i]; _t_51_ = u_2[k+1][j-2][i]; _t_69_ -= u_2[k+1][j+2][i]; _t_61_ += 8.0 * mu[k+1][j][i] * _t_69_; _t_59_ = u_2[k+1][j+2][i]; _t_56_ -= u_2[k+2][j+1][i]; _t_72_ = -u_2[k+2][j-1][i]; _t_72_ += u_2[k+2][j+1][i]; _t_61_ -= mu[k+2][j][i] * 8.0 * _t_72_; _t_53_ += 8.0 * -u_2[k-1][j-1][i]; _t_48_ -= 8.0 * la[k][j-1][i] * _t_53_; _t_66_ = 8.0 * -u_2[k-1][j-1][i]; _t_56_ += 8.0 * -u_2[k-1][j+1][i]; _t_48_ += 8.0 * la[k][j+1][i] * _t_56_; _t_66_ += 8.0 * u_2[k-1][j+1][i]; _t_51_ += -u_2[k-1][j-2][i]; _t_48_ += la[k][j-2][i] * 8.0 * _t_51_; _t_66_ += u_2[k-1][j-2][i]; _t_59_ += -u_2[k-1][j+2][i]; _t_48_ -= la[k][j+2][i] * 8.0 * _t_59_; _t_66_ -= u_2[k-1][j+2][i]; _t_61_ -= 8.0 * mu[k-1][j][i] * _t_66_; _t_20_ = _t_47_ * _t_48_; _t_20_ += _t_60_ * _t_61_; _t_22_ = mu[k][j][i-2] * u_0[k][j-2][i-2]; _t_35_ = la[k][j-2][i] * u_0[k][j-2][i-2]; _t_22_ -= mu[k][j][i+2] * u_0[k][j-2][i+2]; _t_35_ -= la[k][j-2][i] * u_0[k][j-2][i+2]; _t_22_ -= mu[k][j][i-2] * u_0[k][j+2][i-2]; _t_35_ -= la[k][j+2][i] * u_0[k][j+2][i-2]; _t_22_ += mu[k][j][i+2] * u_0[k][j+2][i+2]; _t_35_ += la[k][j+2][i] * u_0[k][j+2][i+2]; _t_27_ = u_0[k][j-2][i-1]; _t_38_ = -u_0[k][j-2][i-1]; _t_38_ += u_0[k][j-2][i+1]; _t_35_ += la[k][j-2][i] * 8.0 * _t_38_; _t_30_ = u_0[k][j-2][i+1]; _t_27_ += 8.0 * -u_0[k][j-1][i-1]; _t_40_ = 8.0 * -u_0[k][j-1][i-1]; _t_30_ += 8.0 * -u_0[k][j-1][i+1]; _t_40_ += 8.0 * u_0[k][j-1][i+1]; _t_27_ += 8.0 * u_0[k][j+1][i-1]; _t_43_ = 8.0 * -u_0[k][j+1][i-1]; _t_30_ += 8.0 * u_0[k][j+1][i+1]; _t_43_ += 8.0 * u_0[k][j+1][i+1]; _t_27_ -= u_0[k][j+2][i-1]; _t_22_ -= 8.0 * mu[k][j][i-1] * _t_27_; _t_46_ = -u_0[k][j+2][i-1]; _t_30_ -= u_0[k][j+2][i+1]; _t_22_ += 8.0 * mu[k][j][i+1] * _t_30_; _t_46_ += u_0[k][j+2][i+1]; _t_35_ -= la[k][j+2][i] * 8.0 * _t_46_; _t_40_ += u_0[k][j-1][i-2]; _t_25_ = -u_0[k][j-1][i-2]; _t_25_ += u_0[k][j+1][i-2]; _t_22_ += mu[k][j][i-2] * 8.0 * _t_25_; _t_43_ += u_0[k][j+1][i-2]; _t_40_ -= u_0[k][j-1][i+2]; _t_35_ -= 8.0 * la[k][j-1][i] * _t_40_; _t_33_ = -u_0[k][j-1][i+2]; _t_33_ += u_0[k][j+1][i+2]; _t_22_ -= mu[k][j][i+2] * 8.0 * _t_33_; _t_20_ += _t_21_ * _t_22_; _t_43_ -= u_0[k][j+1][i+2]; _t_35_ += 8.0 * la[k][j+1][i] * _t_43_; _t_20_ += _t_34_ * _t_35_; a_r2 += _t_20_; _t_142_ = u_2[k+2][j-2][i]; _t_142_ -= u_2[k+2][j+2][i]; _t_142_ += 8.0 * -u_2[k+2][j-1][i]; _t_142_ += 8.0 * u_2[k+2][j+1][i]; _t_134_ = mu[k-1][j][i] * u_2[k-1][j-2][i]; _t_134_ -= mu[k-1][j][i] * u_2[k-1][j+2][i]; _t_134_ += 8.0 * mu[k+2][j][i] * _t_142_; _t_121_ = la[k+1][j-2][i] * u_2[k-1][j-2][i]; _t_121_ -= la[k+1][j+2][i] * u_2[k-1][j+2][i]; _t_126_ = u_2[k-1][j-1][i]; _t_126_ += 8.0 * u_2[k+2][j-1][i]; _t_129_ = u_2[k-1][j+1][i]; _t_129_ += 8.0 * u_2[k+2][j+1][i]; _t_137_ = -u_2[k-1][j-1][i]; _t_137_ += u_2[k-1][j+1][i]; _t_134_ += mu[k-1][j][i] * 8.0 * _t_137_; _t_124_ = u_2[k+2][j-2][i]; _t_132_ = u_2[k+2][j+2][i]; _t_121_ -= la[k+1][j-2][i] * u_2[k+3][j-2][i]; _t_134_ -= mu[k+3][j][i] * u_2[k+3][j-2][i]; _t_121_ += la[k+1][j+2][i] * u_2[k+3][j+2][i]; _t_134_ += mu[k+3][j][i] * u_2[k+3][j+2][i]; _t_126_ += 8.0 * -u_2[k][j-1][i]; _t_139_ = 8.0 * -u_2[k][j-1][i]; _t_129_ += 8.0 * -u_2[k][j+1][i]; _t_139_ += 8.0 * u_2[k][j+1][i]; _t_124_ += -u_2[k][j-2][i]; _t_121_ += la[k+1][j-2][i] * 8.0 * _t_124_; _t_139_ += u_2[k][j-2][i]; _t_132_ += -u_2[k][j+2][i]; _t_121_ -= la[k+1][j+2][i] * 8.0 * _t_132_; _t_139_ -= u_2[k][j+2][i]; _t_134_ -= 8.0 * mu[k][j][i] * _t_139_; _t_126_ -= u_2[k+3][j-1][i]; _t_121_ -= 8.0 * la[k+1][j-1][i] * _t_126_; _t_145_ = -u_2[k+3][j-1][i]; _t_129_ -= u_2[k+3][j+1][i]; _t_121_ += 8.0 * la[k+1][j+1][i] * _t_129_; _t_145_ += u_2[k+3][j+1][i]; _t_134_ -= mu[k+3][j][i] * 8.0 * _t_145_; _t_107_ = 1.0 / 144.0 * strx[i] * stry[j]; _t_94_ = 1.0 / 144.0 * strx[i] * stry[j]; _t_120_ = 1.0 / 144.0 * stry[j] * strz[k+1]; _t_93_ = _t_120_ * _t_121_; _t_133_ = 1.0 / 144.0 * stry[j] * strz[k+1]; _t_93_ += _t_133_ * _t_134_; _t_95_ = mu[k+1][j][i-2] * u_0[k+1][j-2][i-2]; _t_108_ = la[k+1][j-2][i] * u_0[k+1][j-2][i-2]; _t_95_ -= mu[k+1][j][i+2] * u_0[k+1][j-2][i+2]; _t_108_ -= la[k+1][j-2][i] * u_0[k+1][j-2][i+2]; _t_95_ -= mu[k+1][j][i-2] * u_0[k+1][j+2][i-2]; _t_108_ -= la[k+1][j+2][i] * u_0[k+1][j+2][i-2]; _t_95_ += mu[k+1][j][i+2] * u_0[k+1][j+2][i+2]; _t_108_ += la[k+1][j+2][i] * u_0[k+1][j+2][i+2]; _t_100_ = u_0[k+1][j-2][i-1]; _t_111_ = -u_0[k+1][j-2][i-1]; _t_111_ += u_0[k+1][j-2][i+1]; _t_108_ += la[k+1][j-2][i] * 8.0 * _t_111_; _t_103_ = u_0[k+1][j-2][i+1]; _t_100_ += 8.0 * -u_0[k+1][j-1][i-1]; _t_113_ = 8.0 * -u_0[k+1][j-1][i-1]; _t_103_ += 8.0 * -u_0[k+1][j-1][i+1]; _t_113_ += 8.0 * u_0[k+1][j-1][i+1]; _t_100_ += 8.0 * u_0[k+1][j+1][i-1]; _t_116_ = 8.0 * -u_0[k+1][j+1][i-1]; _t_103_ += 8.0 * u_0[k+1][j+1][i+1]; _t_116_ += 8.0 * u_0[k+1][j+1][i+1]; _t_100_ -= u_0[k+1][j+2][i-1]; _t_95_ -= 8.0 * mu[k+1][j][i-1] * _t_100_; _t_119_ = -u_0[k+1][j+2][i-1]; _t_103_ -= u_0[k+1][j+2][i+1]; _t_95_ += 8.0 * mu[k+1][j][i+1] * _t_103_; _t_119_ += u_0[k+1][j+2][i+1]; _t_108_ -= la[k+1][j+2][i] * 8.0 * _t_119_; _t_106_ = -u_0[k+1][j-1][i+2]; _t_113_ -= u_0[k+1][j-1][i+2]; _t_106_ += u_0[k+1][j+1][i+2]; _t_95_ -= mu[k+1][j][i+2] * 8.0 * _t_106_; _t_116_ -= u_0[k+1][j+1][i+2]; _t_98_ = -u_0[k+1][j-1][i-2]; _t_113_ += u_0[k+1][j-1][i-2]; _t_108_ -= 8.0 * la[k+1][j-1][i] * _t_113_; _t_98_ += u_0[k+1][j+1][i-2]; _t_95_ += mu[k+1][j][i-2] * 8.0 * _t_98_; _t_93_ += _t_94_ * _t_95_; _t_116_ += u_0[k+1][j+1][i-2]; _t_108_ += 8.0 * la[k+1][j+1][i] * _t_116_; _t_93_ += _t_107_ * _t_108_; b_r2 += _t_93_; uacc_1kc0jc0ic0 = a1 * uacc_1[k][j][i]; uacc_1kc0jc0ic0 += cof * a_r2; uacc_1[k][j][i] = uacc_1kc0jc0ic0; uacc_1kp1jc0ic0 = a1 * uacc_1[k+1][j][i]; uacc_1kp1jc0ic0 += cof * b_r2; uacc_1[k+1][j][i] = uacc_1kp1jc0ic0; } } } __global__ void __launch_bounds__ (128,2) sw4_3 (double * uacc_in_0, double * uacc_in_1, double * uacc_in_2, double * __restrict__ u_in_0, double * __restrict__ u_in_1, double * __restrict__ u_in_2, double * __restrict__ mu_in, double * __restrict__ la_in, double * strx, double * stry, double * strz, int N) { //Determing the block's indices int blockdim_i= (int)(blockDim.x); int i0 = (int)(blockIdx.x)*(blockdim_i); int i = max (i0, 0) + (int)(threadIdx.x); int blockdim_j= (int)(blockDim.y); int j0 = (int)(blockIdx.y)*(blockdim_j); int j = max (j0, 0) + (int)(threadIdx.y); // Assumptions int a1 = 1; double h = 3.7; double cof = 1e0 / ( h * h); double (*uacc_0)[304][304] = (double (*)[304][304])uacc_in_0; double (*uacc_1)[304][304] = (double (*)[304][304])uacc_in_1; double (*uacc_2)[304][304] = (double (*)[304][304])uacc_in_2; double (*u_0)[304][304] = (double (*)[304][304])u_in_0; double (*u_1)[304][304] = (double (*)[304][304])u_in_1; double (*u_2)[304][304] = (double (*)[304][304])u_in_2; double (*mu)[304][304] = (double (*)[304][304])mu_in; double (*la)[304][304] = (double (*)[304][304])la_in; double mux1, mux2, mux3, mux4, muy1, muy2, muy3, muy4, muz1, muz2, muz3, muz4; double r1, r2, r3; if (i>=2 & j>=2 & i<=N-3 & j<=N-3) { #pragma unroll 10 for (int k=2; k<=N-3; k++) { mux1 = mu[k][j][i-1] * strx[i-1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i-2] * strx[i-2]; mux2 = mu[k][j][i-2] * strx[i-2] + mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i] + 3.0 * mu[k][j][i-1] * strx[i-1]; mux3 = mu[k][j][i-1] * strx[i-1] + mu[k][j][i+2] * strx[i+2] + 3.0 * mu[k][j][i+1] * strx[i+1] + 3.0 * mu[k][j][i] * strx[i]; mux4 = mu[k][j][i+1] * strx[i+1] - 3e0 / 4 * mu[k][j][i] * strx[i] - 3e0 / 4 * mu[k][j][i+2] * strx[i+2]; muy1 = mu[k][j-1][i] * stry[j-1] - 3e0 / 4 * mu[k][j][i] * stry[j] -3e0 / 4 * mu[k][j-2][i] * stry[j-2]; muy2 = mu[k][j-2][i] * stry[j-2] + mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j] + 3.0 * mu[k][j-1][i] * stry[j-1]; muy3 = mu[k][j-1][i] * stry[j-1] + mu[k][j+2][i] * stry[j+2] + 3.0 * mu[k][j+1][i] * stry[j+1] + 3.0 * mu[k][j][i] * stry[j]; muy4 = mu[k][j+1][i] * stry[j+1] - 3e0 / 4 * mu[k][j][i] * stry[j] - 3e0 / 4 * mu[k][j+2][i] * stry[j+2]; muz1 = mu[k-1][j][i] * strz[k-1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 / 4 * mu[k-2][j][i] * strz[k-2]; muz2 = mu[k-2][j][i] * strz[k-2] + mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k] + 3.0 * mu[k-1][j][i] * strz[k-1]; muz3 = mu[k-1][j][i] * strz[k-1] + mu[k+2][j][i] * strz[k+2] + 3.0 * mu[k+1][j][i] * strz[k+1] + 3.0 * mu[k][j][i] * strz[k]; muz4 = mu[k+1][j][i] * strz[k+1] - 3e0 / 4 * mu[k][j][i] * strz[k] - 3e0 /4 * mu[k+2][j][i] * strz[k+2]; r3 = 1e0 / 6 * (strx[i] * (mux1 * (u_2[k][j][i-2] - u_2[k][j][i]) + mux2 * (u_2[k][j][i-1] - u_2[k][j][i]) + mux3 * (u_2[k][j][i+1] - u_2[k][j][i]) + mux4 * (u_2[k][j][i+2] - u_2[k][j][i])) + stry[j] * (muy1 * (u_2[k][j-2][i] - u_2[k][j][i]) + muy2 * (u_2[k][j-1][i] - u_2[k][j][i]) + muy3 * (u_2[k][j+1][i] - u_2[k][j][i]) + muy4 * (u_2[k][j+2][i] - u_2[k][j][i])) + strz[k] * ((2 * muz1 + la[k-1][j][i] * strz[k-1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k-2][j][i] * strz[k-2]) * (u_2[k-2][j][i] - u_2[k][j][i]) + (2 * muz2 + la[k-2][j][i] * strz[k-2] + la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k] + 3 * la[k-1][j][i] * strz[k-1]) * (u_2[k-1][j][i] - u_2[k][j][i]) + (2 * muz3 + la[k-1][j][i] * strz[k-1] + la[k+2][j][i] * strz[k+2] + 3 * la[k+1][j][i] * strz[k+1] + 3 * la[k][j][i] * strz[k]) * (u_2[k+1][j][i] - u_2[k][j][i]) + (2 * muz4 + la[k+1][j][i] * strz[k+1] - 3e0 / 4 * la[k][j][i] * strz[k] - 3e0 / 4 * la[k+2][j][i] * strz[k+2]) * (u_2[k+2][j][i] - u_2[k][j][i]))); r3 += strx[i] * strz[k] * (1e0 / 144) * (mu[k][j][i-2] * (u_0[k-2][j][i-2] - u_0[k+2][j][i-2] + 8 * (-u_0[k-1][j][i-2] + u_0[k+1][j][i-2])) - 8 * (mu[k][j][i-1] * (u_0[k-2][j][i-1] - u_0[k+2][j][i-1] + 8 * (-u_0[k-1][j][i-1] + u_0[k+1][j][i-1]))) + 8 * (mu[k][j][i+1] * (u_0[k-2][j][i+1] - u_0[k+2][j][i+1] + 8 * (-u_0[k-1][j][i+1] + u_0[k+1][j][i+1]))) - (mu[k][j][i+2] * (u_0[k-2][j][i+2] - u_0[k+2][j][i+2] + 8 * (-u_0[k-1][j][i+2] + u_0[k+1][j][i+2])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (mu[k][j-2][i] * (u_1[k-2][j-2][i] - u_1[k+2][j-2][i] + 8 * (-u_1[k-1][j-2][i] + u_1[k+1][j-2][i])) - 8 * (mu[k][j-1][i] * (u_1[k-2][j-1][i] - u_1[k+2][j-1][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k+1][j-1][i]))) + 8 * (mu[k][j+1][i] * (u_1[k-2][j+1][i] - u_1[k+2][j+1][i] + 8 * (-u_1[k-1][j+1][i] + u_1[k+1][j+1][i]))) - (mu[k][j+2][i] * (u_1[k-2][j+2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k-1][j+2][i] + u_1[k+1][j+2][i])))); r3 += strx[i] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_0[k-2][j][i-2] - u_0[k-2][j][i+2] + 8 * (-u_0[k-2][j][i-1] + u_0[k-2][j][i+1])) - 8 * (la[k-1][j][i] * (u_0[k-1][j][i-2] - u_0[k-1][j][i+2] + 8 * (-u_0[k-1][j][i-1] + u_0[k-1][j][i+1]))) + 8 * (la[k+1][j][i] * (u_0[k+1][j][i-2] - u_0[k+1][j][i+2] + 8 * (-u_0[k+1][j][i-1] + u_0[k+1][j][i+1]))) - (la[k+2][j][i] * (u_0[k+2][j][i-2] - u_0[k+2][j][i+2] + 8 * (-u_0[k+2][j][i-1] + u_0[k+2][j][i+1])))); r3 += stry[j] * strz[k] * (1e0 / 144) * (la[k-2][j][i] * (u_1[k-2][j-2][i] - u_1[k-2][j+2][i] + 8 * (-u_1[k-2][j-1][i] + u_1[k-2][j+1][i])) - 8 * (la[k-1][j][i] * (u_1[k-1][j-2][i] - u_1[k-1][j+2][i] + 8 * (-u_1[k-1][j-1][i] + u_1[k-1][j+1][i]))) + 8 * (la[k+1][j][i] * (u_1[k+1][j-2][i] - u_1[k+1][j+2][i] + 8 * (-u_1[k+1][j-1][i] + u_1[k+1][j+1][i]))) - (la[k+2][j][i] * (u_1[k+2][j-2][i] - u_1[k+2][j+2][i] + 8 * (-u_1[k+2][j-1][i] + u_1[k+2][j+1][i])))); uacc_2[k][j][i] = a1 * uacc_2[k][j][i] + cof * r3; } } } extern "C" void host_code (double *h_uacc_0, double *h_uacc_1, double *h_uacc_2, double *h_u_0, double *h_u_1, double *h_u_2, double *h_mu, double *h_la, double *h_strx, double *h_stry, double *h_strz, int N) { double *uacc_0; cudaMalloc (&uacc_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_0\n"); cudaMemcpy (uacc_0, h_uacc_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *uacc_1; cudaMalloc (&uacc_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_1\n"); cudaMemcpy (uacc_1, h_uacc_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *uacc_2; cudaMalloc (&uacc_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for uacc_2\n"); cudaMemcpy (uacc_2, h_uacc_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_0; cudaMalloc (&u_0, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_0\n"); cudaMemcpy (u_0, h_u_0, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_1; cudaMalloc (&u_1, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_1\n"); cudaMemcpy (u_1, h_u_1, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *u_2; cudaMalloc (&u_2, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for u_2\n"); cudaMemcpy (u_2, h_u_2, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *mu; cudaMalloc (&mu, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for mu\n"); cudaMemcpy (mu, h_mu, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *la; cudaMalloc (&la, sizeof(double)*N*N*N); check_error ("Failed to allocate device memory for la\n"); cudaMemcpy (la, h_la, sizeof(double)*N*N*N, cudaMemcpyHostToDevice); double *strx; cudaMalloc (&strx, sizeof(double)*N); check_error ("Failed to allocate device memory for strx\n"); cudaMemcpy (strx, h_strx, sizeof(double)*N, cudaMemcpyHostToDevice); double *stry; cudaMalloc (&stry, sizeof(double)*N); check_error ("Failed to allocate device memory for stry\n"); cudaMemcpy (stry, h_stry, sizeof(double)*N, cudaMemcpyHostToDevice); double *strz; cudaMalloc (&strz, sizeof(double)*N); check_error ("Failed to allocate device memory for strz\n"); cudaMemcpy (strz, h_strz, sizeof(double)*N, cudaMemcpyHostToDevice); dim3 blockconfig (16, 8); dim3 gridconfig (ceil(N, blockconfig.x), ceil(N, blockconfig.y), 1); sw4_1 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); sw4_2 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); sw4_3 <<<gridconfig, blockconfig>>> (uacc_0, uacc_1, uacc_2, u_0, u_1, u_2, mu, la, strx, stry, strz, N); cudaMemcpy (h_uacc_0, uacc_0, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_uacc_1, uacc_1, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaMemcpy (h_uacc_2, uacc_2, sizeof(double)*N*N*N, cudaMemcpyDeviceToHost); cudaFree (uacc_0); cudaFree (uacc_1); cudaFree (uacc_2); cudaFree (u_0); cudaFree (u_1); cudaFree (u_2); cudaFree (mu); cudaFree (la); cudaFree (strx); cudaFree (stry); cudaFree (strz); }
12,858
#include <iostream> #include <fstream> #include <iomanip> #include <vector> #include <string> #include <algorithm> //scan all imputfiles for maximum radial velocity and write to std::out int main( int argc, char* argv[]) { if( argc == 1) { std::cerr << "Usage: "<<argv[0]<<" [input1.dat] [input2.dat] ...\n"; return -1; } std::ifstream is; double x, y; for( int i=1; i< argc; i++) { std::vector<double> v; is.open( argv[i]); std::string s; std::getline( is, s); while( is.good()) { is >> x >> x>> x>>y; //4th data is VelX std::getline( is, s); //throw away until newline v.push_back(y); } std::cout << *std::max_element(v.begin(), v.end()) <<"\n"; is.close(); } return 0; }
12,859
// Copyright (c) OpenMMLab. All rights reserved. #include <cstdint> namespace mmdeploy { namespace operation { namespace cuda { template <typename From, typename To> __global__ void _Cast(const From* src, To* dst, size_t n) { auto idx = threadIdx.x + static_cast<size_t>(blockIdx.x) * blockDim.x; for (size_t i = idx; i < n; i += blockDim.x * gridDim.x) { dst[i] = static_cast<To>(src[i]); } } template <typename From, typename To> void Cast(const From* src, To* dst, size_t n, cudaStream_t stream) { size_t n_threads = 256; size_t n_blocks = (n + n_threads - 1) / n_threads; _Cast<<<n_blocks, n_threads, 0, stream>>>(src, dst, n); } template void Cast(const uint8_t*, float*, size_t, cudaStream_t); } // namespace cuda } // namespace operation } // namespace mmdeploy
12,860
#include <stdio.h> using namespace std; __global__ void mathKernel(float *c){ int idx = threadIdx.x + threadIdx.y*blockDim.x; float a,b; a = b = 0.0f; if(idx % 2 == 0) a = 100.f; else b = 200.f; c[idx] = a + b; } __global__ void mathKernel2(float *c){ int idx = threadIdx.x + threadIdx.y * blockDim.x; float a = 0.0f, b = a; if((idx/warpSize)%2 == 0){ a = 100.f; }else b = 200.f; c[idx] = a + b; } int main(){ int dev = 1; cudaSetDevice(dev); int nElem = 64; int size = nElem*sizeof(float); float *h_c; h_c = (float *)malloc(size); float *d_c; cudaMalloc((void **)&d_c,size); int blockSize = 64; dim3 block(blockSize,1); dim3 grid((size + block.x - 1)/block.x,1); mathKernel<<<block,grid>>>(d_c); cudaMemcpy(h_c,d_c,size,cudaMemcpyDeviceToHost); mathKernel2<<<block,grid>>>(d_c); cudaMemcpy(h_c,d_c,size,cudaMemcpyDeviceToHost); free(h_c); cudaFree(d_c); return 0; }
12,861
#include "includes.h" __device__ float sigmoid(float x) { return 1.0f / (1 + __expf(-x)); } __global__ void initializeBiasKernel_sigmoid(float* b, int size){ int index = blockIdx.x * blockDim.x + threadIdx.x; if(index < size){ b[index] = 0.0; } }
12,862
#include <cstdio> #include <cuda.h> #include <iostream> #include <thrust/device_vector.h> #include <thrust/fill.h> #include <thrust/host_vector.h> #include <thrust/sequence.h> #include <thrust/transform.h> #define SIZE 10 #define V 0.1 #define T 1 using namespace std; __global__ void kernel(float *f, float *res) { int cur = threadIdx.x + blockDim.x * blockIdx.x; int prev = cur - 1; if (prev == -1) prev = SIZE - 1; if (cur >= 0 && cur < SIZE) { res[cur] = f[cur] + (V * T) * (f[prev] - f[cur]); } } struct saxpy_functor { const float a; saxpy_functor(float _a) : a(_a) {} __host__ __device__ float operator()(float x, float y) { return a * x + y; } }; void saxpy(float a, thrust::device_vector<float> &x, thrust::device_vector<float> &y) { saxpy_functor func(a); thrust::transform(x.begin(), x.end(), y.begin(), y.begin(), func); } int main() { cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); float F[SIZE]; float *frez; float *tempa; float DATA[SIZE]; for (int i = 0; i < SIZE; i++) { DATA[i] = rand() % 10; F[i] = DATA[i]; } cudaMalloc((void **)&frez, sizeof(float) * SIZE); cudaMalloc((void **)&tempa, sizeof(float) * SIZE); cudaMemcpy(tempa, F, sizeof(float) * SIZE, cudaMemcpyHostToDevice); cudaEventRecord(start, 0); for (int i = 0; i < 100; i++) { kernel<<<1, SIZE>>>(tempa, frez); cudaMemcpy(F, frez, sizeof(float) * SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(tempa, frez, sizeof(float) * SIZE, cudaMemcpyHostToDevice); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); fprintf(stderr, "Time (Raw CUDA C) %g\n", elapsedTime); thrust::host_vector<float> h1(SIZE); thrust::host_vector<float> h2(SIZE); for (int i = 0; i < SIZE; i++) { h1[i] = DATA[i]; if ((i - 1) >= 0) h2[i] = DATA[i - 1]; else h2[i] = DATA[SIZE - 1]; h2[i] = h2[i] * V * T; } thrust::device_vector<float> d1 = h1; thrust::device_vector<float> d2 = h2; cudaEventRecord(start, 0); for (int j = 0; j < 100; j++) { saxpy(1 - V * T, d1, d2); h2 = d2; d1 = h2; for (int i = 0; i < SIZE; i++) { if ((i - 1) >= 0) h1[i] = h2[i - 1]; else h1[i] = h2[SIZE - 1]; h1[i] = h1[i] * V * T; } d2 = h1; } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); fprintf(stderr, "Time (Trust SAXPY) %g\n", elapsedTime); }
12,863
#include <stdio.h> #include <stdlib.h> #define MAT_ROW 4 #define MAT_COL 6 #define CHANNELS 1 #define MASK_WIDTH 3 #define MASK_RADIUS MASK_WIDTH/2 #define O_TILE_WIDTH 12 #define BLOCK_WIDTH (O_TILE_WIDTH + (MASK_WIDTH-1)) void print_matrix(float* a,int n,int m) { int i,j; for(i=0;i<n;i++) { for(j=0;j<m;j++) { printf("%f ",a[i*m+j]); } printf("\n"); } } void fill_mat(float* a,int n,int m) { //srand(time(NULL)); int i,j; for(i=0;i<n;i++) { for(j=0;j<m;j++) { //a[i*n+j] = (rand()%2+1)*1.0; a[i*m+j] = 1.0; } } } __global__ void convolution_shared(float *in, float* out,const float* __restrict__ M,int height, int width, int channels) { float sum, pixel, maskVal; __shared__ float Ns[BLOCK_WIDTH][BLOCK_WIDTH]; int tx = threadIdx.x; int ty = threadIdx.y; int row_o = blockIdx.y*O_TILE_WIDTH + ty; int col_o = blockIdx.x*O_TILE_WIDTH + tx; int row_i = row_o - MASK_RADIUS; int col_i = col_o - MASK_RADIUS; for (int c = 0; c < channels; c++) { if ( (row_i >= 0) && (row_i < height) && (col_i >= 0) && (col_i < width) ) { Ns[ty][tx] = in[(row_i*width + col_i)*channels + c]; } else { Ns[ty][tx] = 0.0f; } __syncthreads(); sum = 0.0; if (ty < O_TILE_WIDTH && tx < O_TILE_WIDTH) { for (int y = 0; y < MASK_WIDTH; y++){ for (int x = 0; x < MASK_WIDTH; x++){ pixel = Ns[ty + y][tx + x]; maskVal = M[y*MASK_WIDTH + x]; sum += pixel*maskVal; } } if (row_o < height && col_o < width) { //out[ (row_o * width + col_o) * channels + c] = min(max(0.0f,sum),1.0f); out[ (row_o * width + col_o) * channels + c] = sum; } } // __syncthreads(); } } int main() { float *mat,*d_mat; float *mask,*d_mask; float *result,*d_result; //float elapsed_time=0; //cudaEvent_t start,stop; //cudaEventCreate(&start); //cudaEventCreate(&stop); int mat_size = MAT_ROW*MAT_COL*sizeof(float); int mask_size = MASK_WIDTH*MASK_WIDTH*sizeof(float); mat = (float*) malloc(mat_size); result = (float*) malloc(mat_size); mask = (float*) malloc(mask_size); fill_mat(mat,MAT_ROW,MAT_COL); fill_mat(mask,MASK_WIDTH,MASK_WIDTH); printf("Printing Matrix \n"); print_matrix(mat,MAT_ROW,MAT_COL); printf("Printing Mask\n"); print_matrix(mask,MASK_WIDTH,MASK_WIDTH); printf("\n"); cudaMalloc((void** )&d_mat,mat_size); cudaMalloc((void** )&d_result,mat_size); cudaMalloc((void** )&d_mask,mask_size); cudaMemcpy(d_mat,mat,mat_size,cudaMemcpyHostToDevice); cudaMemcpy(d_mask,mask,mask_size,cudaMemcpyHostToDevice); dim3 my_block(BLOCK_WIDTH,BLOCK_WIDTH); dim3 my_grid((MAT_COL + BLOCK_WIDTH-1)/my_block.x,(MAT_ROW + BLOCK_WIDTH-1)/my_block.y); convolution_shared<<<my_grid,my_block>>>(d_mat, d_result, d_mask, MAT_ROW,MAT_COL,CHANNELS); cudaMemcpy(result,d_result,mat_size,cudaMemcpyDeviceToHost); printf("Printing result\n"); print_matrix(result,MAT_ROW,MAT_COL); //printf("Elapsed time: %f\n",elapsed_time); return 0; }
12,864
#include "includes.h" __global__ void index_init(int* out_data, int h, int w) { int idx = threadIdx.x + blockIdx.x * blockDim.x; for (int i = idx; i < h * w; i += blockDim.x * gridDim.x) { int w_id = i % w; out_data[i] = w_id; } }
12,865
#include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/sort.h> #include <thrust/copy.h> #include <algorithm> #include <cstdlib> #define SIZE 16 << 20 //compile with: // nvcc -m64 -arch=sm_35 thrustsort.cu -lcudart -O3 -o thrustsort // nvcc thrustsort.cu -o thrustsort int main(void) { const uint N = SIZE; cudaSetDevice (0); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // generate 16M random numbers serially thrust::host_vector<int> h_vec_key(SIZE); thrust::host_vector<float> h_vec_value(SIZE); std::generate(h_vec_key.begin(), h_vec_key.end(), rand); std::generate(h_vec_value.begin(), h_vec_value.end(), rand); // double* h_arr = &h_vec_key[0]; // transfer data to the device thrust::device_vector<int> d_vec_key = h_vec_key; thrust::device_vector<int> d_vec_value = h_vec_value; float elapsedTime; cudaDeviceSynchronize(); cudaEventRecord(start, 0); // sort data on the device (846M keys per second on GeForce GTX 480) // thrust::sort(d_vec_key.begin(), d_vec_key.end()); thrust::sort_by_key(d_vec_key.begin(), d_vec_key.end(), d_vec_value.begin()); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); // transfer data back to host thrust::copy(d_vec_key.begin(), d_vec_key.end(), h_vec_key.begin()); thrust::copy(d_vec_value.begin(), d_vec_value.end(), h_vec_value.begin()); cudaEventDestroy(start); cudaEventDestroy(stop); double dTimeSecs = 1.0e-3 * elapsedTime ; printf("sortingNetworks-thrust, Throughput = %.4f MElements/s, Time = %.5f s, Size = %u elements, NumDevsUsed = %u\n", (1.0e-6 * (double)N/dTimeSecs), dTimeSecs , N, 1); printf("Processing time: %f (ms)\n", elapsedTime); return 0; }
12,866
/* * A program that empirically estimates the probability of flipping 16 coins * and getting the following sequence HHTHTHHHTHHHTHTH. The estimate should * get better as more coins are flipped. * * This implementation uses the CUDA random number generator API function * which uses the Marsenne Twister algorithm to generate random numbers. * basic functionality: (Generate numbers -> checks numbers) -> loop * * TODO: 1) Efficiency. The sets are checked on the host, perhaps it can be * writen as a device kernel to optimize speed and resources... * 2) Maybe add option to set run time in seconds? * * Author: Joseph Osborne * Date: 11/30/2018 * */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <time.h> #include <device_launch_parameters.h> #include <cuda_runtime.h> #include <curand.h> long double goldenNum = powl((long double)(0.5), (long double)(16)); // (1/2)^16 void query_device(); int main(int argc, char **argv) { // Take input from command line unsigned long long int input = 0; if (argc < 2) { printf("USE ONE PARAMETER! How many times to run.\n"); exit(1); } else { input = atol(argv[1]); } printf("---------------------------------------------\n"); query_device(); // get device info and print printf("---------------------------------------------\n"); printf("--- Flipping [%lld] sets of 16 coins ---\n", (input*1024)); printf("---------------------------------------------\n\n"); // ------------------------------------------------------- // TIMER // counts how long program runs cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // START ------------------------------------------------- cudaEventRecord(start); // time start // ------------------------------------------------------- // HHT HT HHHT HHHT HT H // H = 1 // T = 0 int succ = 0, fail = 0; // desired sequence const int flips[] = { 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1 }; // ------------------------------------------------------- // Mersenne Twister number generator // Each output element is a 32-bit unsigned int where all bits are random // generator initialization // set seed curandGenerator_t genGPU; curandCreateGenerator(&genGPU, CURAND_RNG_PSEUDO_MTGP32); curandSetPseudoRandomGeneratorSeed(genGPU, CURAND_ORDERING_PSEUDO_BEST); // "Hence the most efficient use of MTGP32 is to generate a multiple of 16384 samples." // ------------------------------------------------------- // 1024 x 64 size array const int x = 16, y = 1024, n = x * y; // 16384 unsigned int GPU[n], *d_GPU; // allocate space on gpu cudaMalloc(&d_GPU, n * sizeof(unsigned int)); // ------------------------------------------------------- printf("Now THIS is pod racing!\n"); int offset; for (int z = 1; z <= input; z++) { // generate numbers // use generator genGPU at target d_GPU curandGenerate(genGPU, d_GPU, n); // cpy memory back to host cudaMemcpy(GPU, d_GPU, n * sizeof(unsigned int), cudaMemcpyDeviceToHost); // check for matches for (int i = 0; i < y; ++i) { // ROW (y) for (int j = 0; j < x; ++j) { // COL (x) offset = i + j; GPU[offset] = (GPU[offset] % 2); //printf("%u", GPU[offset]); //printf("%d:%d ", GPU[offset], flips[j]); if (GPU[offset] == flips[j]) { if (j == 15) { // if all match succ++; break; } } else { fail++; break; } } //printf("\n"); } // do it again, lol } // STOP ------------------------------------------------- cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); float seconds = (milliseconds / 1000); // ------------------------------------------------------- long double ratio = ((long double)succ / (long double)fail); long double diff = fabs(ratio - goldenNum); printf("\nMatches: %d\nFailed : %d\n\n", succ, fail); printf("Time (sec) : %f\n", seconds); //printf("Nearest match: %d\n", nearestMatch); printf("Achieved Ratio : %1.12Lf \nProbability : %1.12Lf\nDifference : %1.12Lf\n", ratio, goldenNum, diff); printf("---------------------------------------------\n"); // ------------------------------------------------------- // ------------------------------------------------------- cudaDeviceReset(); // clean up curandDestroyGenerator(genGPU); cudaFree(d_GPU); return 0; } void query_device() { int deviceCount = 0; cudaGetDeviceCount(&deviceCount); if (deviceCount == 0) printf("No CUDA support device found"); int devNo = 0; cudaDeviceProp d_prop; cudaGetDeviceProperties(&d_prop, devNo); printf("Device %d: %s\n", devNo, d_prop.name); printf(" Number of multiprocessors: %d\n", d_prop.multiProcessorCount); printf(" Clock rate: %d\n", d_prop.clockRate); printf(" Compute capability: %d.%d\n", d_prop.major, d_prop.minor); printf(" Total amount of global memory: %4.2f KB\n", d_prop.totalGlobalMem / 1024.0); }
12,867
#include <iostream> #include <vector> #define N 60000 using namespace std; __global__ void add( float *a, float *b, float *c ) { int tid = blockIdx.x; if ( tid < N ) c[ tid ] = a[ tid ] + b[ tid ]; } int main ( void ) { //host vectors vector < float > firstVec( N, 1.11f ); vector < float > secondVec( N, 3.01f ); vector < float > resultsVec( N, 0.0f ); //GPU memory allocation float *dev_a, *dev_b, *dev_c; cudaMalloc( ( void** )&dev_a, N * sizeof( float ) ); cudaMalloc( ( void** )&dev_b, N * sizeof( float ) ); cudaMalloc( ( void** )&dev_c, N * sizeof( float ) ); //copy / download data in direction HostToDevice cudaMemcpy( dev_a, &firstVec[0], N * sizeof( float ), cudaMemcpyHostToDevice ); cudaMemcpy( dev_b, &secondVec[0], N * sizeof( float ), cudaMemcpyHostToDevice ); //calculate vectors sum, using Blocks add<<<N,1>>> ( dev_a, dev_b, dev_c ); //copy / upload results data c[] in direction DeviceToHost cudaMemcpy( &resultsVec[0], dev_c, N * sizeof( float ), cudaMemcpyDeviceToHost ); //show results for ( int i = 0; i < 5; i++ ) cout << firstVec[ i ] << " + " << secondVec[ i ] << " = " << resultsVec[ i ] << endl; //free GPU memory cudaFree( dev_a ); cudaFree( dev_b ); cudaFree( dev_c ); cudaDeviceReset(); return 0; }
12,868
#include "includes.h" __global__ void getOptimalShifts( float2 * __restrict__ optimalShifts, const float2 * __restrict__ bestShifts, int imageCount, int tileCountX, int tileCountY, int optimalShiftsPitch, int referenceImage, int imageToTrack) { int tileIdxX = blockIdx.x * blockDim.x + threadIdx.x; int tileIdxY = blockIdx.y * blockDim.y + threadIdx.y; if (tileIdxX >= tileCountX || tileIdxY >= tileCountY) return; int n1 = imageCount - 1; const float2* r = &bestShifts[(tileIdxX + tileIdxY * tileCountX) * n1]; float2 totalShift = make_float2(0, 0); if (referenceImage < imageToTrack) { for (int i = referenceImage; i < imageToTrack; i++) { totalShift.x += r[i].x; totalShift.y += r[i].y; } } else if(imageToTrack < referenceImage) { for (int i = imageToTrack; i < referenceImage; i++) { totalShift.x -= r[i].x; totalShift.y -= r[i].y; } } *(((float2*)((char*)(optimalShifts) +optimalShiftsPitch * tileIdxY)) + tileIdxX) = totalShift; }
12,869
// Este codigo calcula (u[i]-u)/u_d_m #include <stdio.h> void operacionCPU(float* u, float* lu, float u_m, float u_d, int n) { int idx = 0; while (idx < n) { lu[idx] = (u[idx]-u_m)/u_d; idx += 1; } } int main(int argc, char**argv) { unsigned int n; if(argc == 1) { n = 25; } else if(argc == 2) { n = atoi(argv[1]); } else { printf("\n Parametros no validos!" "\n Uso: ./derivCPU # Vector of longitud 10,000" "\n Uso: ./derivCPU <m> # Vector of longitud m" "\n"); exit(0); } // u_h, lu_h const int u_m = 0; const int u_d = 255; int size = n*sizeof(float); float* u_h = (float*) malloc( size ); for (unsigned int i=0; i < n; i++) { u_h[i] = i; } float* lu_h = (float*) malloc( size ); operacionCPU(u_h, lu_h, u_m, u_d, n); const float toleranciaRelativa = 1e-4; // verificar for(int i=0; i<n; i++) { float operBasic = (u_h[i]-u_m)/u_d; float errorRelativo = (operBasic-lu_h[i])/operBasic; if (errorRelativo > toleranciaRelativa || errorRelativo < -toleranciaRelativa) { printf("PRUEBA FALLIDA\n\n"); exit(0); } } printf("PRUEBA SUPERADA\n\n"); if (n==25) { for(int i=0; i<n; i++) { printf("%10.8f\t",lu_h[i]); } printf("\n"); } free(u_h); free(lu_h); }
12,870
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <iostream> using namespace std; #define SIZE 1000 __global__ void allocateMemory(float *d_mat_A, float *d_mat_B) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < SIZE && col < SIZE) { d_mat_A[row * SIZE + col] = 5.5; d_mat_B[row * SIZE + col] = 5.5; } } __global__ void matrixMultiplication(float *d_mat_A, float *d_mat_B, float *d_mat_C) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; if (row < SIZE && col < SIZE) { for (int i = 0; i < SIZE; i++) { d_mat_C[row * SIZE + col] += d_mat_A[col * SIZE + i] * d_mat_B[i * SIZE + col]; } } } int main() { float *h_mat_C = new float[SIZE*SIZE]; float *d_mat_A, *d_mat_B, *d_mat_C; cudaMalloc((void**)&d_mat_A, SIZE*SIZE * sizeof(float)); cudaMalloc((void**)&d_mat_B, SIZE*SIZE * sizeof(float)); cudaMalloc((void**)&d_mat_C, SIZE*SIZE * sizeof(float)); dim3 threadsPerBlock(256, 4); dim3 blocksPerGrid(40, 2500); allocateMemory<<<blocksPerGrid, threadsPerBlock>>>(d_mat_A, d_mat_B); matrixMultiplication<<<blocksPerGrid, threadsPerBlock>>>(d_mat_A, d_mat_B, d_mat_C); cudaDeviceSynchronize(); cudaMemcpy(h_mat_C, d_mat_C, SIZE*SIZE * sizeof(float), cudaMemcpyDeviceToHost); for (int i = 0; i < 5; i++) { for (int j = 0; j < 5; j++) { printf("%.8f \t", h_mat_C[i * SIZE + j]); } printf("\n"); } cudaFree(d_mat_A); cudaFree(d_mat_B); cudaFree(d_mat_C); delete[] h_mat_C; return 0; }
12,871
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> int DEBUG = 1; char* data; int* get_list(int len) { int* suffix_list = (int*)malloc(len * sizeof(int)); int i; for (i = 0; i < len; i++) { suffix_list[i] = i; } return suffix_list; } void quicksort(int* x, int first, int last) { int pivot, j, i; float temp; if (first < last) { pivot = first; i = first; j = last; while (i < j) { while (x[i] <= x[pivot] && i < last) i++; while (x[j] > x[pivot]) j--; if (i < j) { temp = x[i]; x[i] = x[j]; x[j] = temp; } } temp = x[pivot]; x[pivot] = x[j]; x[j] = temp; quicksort(x, first, j - 1); quicksort(x, j + 1, last); } } void print_suffix_list(int* list, int len) { int i = 0; for (i = 0; i < len; i++) { printf("%d", list[i]); if (i != (len - 1)) printf(" "); } printf("\n"); } //merge sort void merge_sort(int i, int j, int a[], int aux[]) { if (j <= i) { return; // the subsection is empty or a single element } int mid = (i + j) / 2; // left sub-array is a[i .. mid] // right sub-array is a[mid + 1 .. j] merge_sort(i, mid, a, aux); // sort the left sub-array recursively merge_sort(mid + 1, j, a, aux); // sort the right sub-array recursively int pointer_left = i; // pointer_left points to the beginning of the left sub-array int pointer_right = mid + 1; // pointer_right points to the beginning of the right sub-array int k; // k is the loop counter // we loop from i to j to fill each element of the final merged array for (k = i; k <= j; k++) { if (pointer_left == mid + 1) { // left pointer has reached the limit aux[k] = a[pointer_right]; pointer_right++; } else if (pointer_right == j + 1) { // right pointer has reached the limit aux[k] = a[pointer_left]; pointer_left++; } else if (a[pointer_left] < a[pointer_right]) { // pointer left points to smaller element aux[k] = a[pointer_left]; pointer_left++; } else { // pointer right points to smaller element aux[k] = a[pointer_right]; pointer_right++; } } for (k = i; k <= j; k++) { // copy the elements from aux[] to a[] a[k] = aux[k]; } } int main(int argc, char* argv[]) { //quick sort sequential /*clock_t start, end; double runTime; int size = 10; start = clock(); int* data = (int*)malloc((size + 1) * sizeof(int)); for (int i = 0; i < size; i++) { data[i] = i; } quicksort(data, 0, size-1); print_suffix_list(data, size); end = clock(); free(data); runTime = (end - start) / (double)CLOCKS_PER_SEC; printf("Quicksort sequential size: %d, and runtime: %f\n", size, runTime);*/ clock_t start_m, end_m; double runTime_m; int size_m = 50; int a[100], aux[100], n, i, d, swap; for (int i = 0; i < size_m; i++) { a[i] = rand() % 50; } start_m = clock(); merge_sort(0, size_m - 1, a, aux); end_m = clock(); runTime_m = (end_m - start_m) / (double)CLOCKS_PER_SEC; printf("Printing the sorted array:\n"); for (i = 0; i < size_m; i++) printf(" %d, ", a[i]); printf("\n"); printf("Mergesort sequential size: %d, and runtime: %f\n", size_m, runTime_m); }
12,872
#include <thrust/device_vector.h> #include <thrust/host_vector.h> #include<stdio.h> #include <stdlib.h> #include<iostream> int num; float* read_data(char c1[]) { //printf("%s",c1); FILE *file = fopen(c1, "r"); int i=0; // int num=0; fscanf(file, "%d", &num) ; // printf("%d",num); // i++; float * fobj=(float *)malloc(sizeof(float)*num); float fint; for( i=0;i<num;++i) { fscanf(file, "%f", &fint) ; fobj[i]=fint; //printf("%f",fint); // i++; } //for(int i=0;i<num;++i) //printf("%f",fobj[i]); return fobj; } int main(int argc, char *argv[]) { // float *hostInput1 = nullptr; // float *hostInput2 = nullptr; // float *hostOutput = nullptr; int inputLength; int i; std::cout<<num; //return 0; float *input1=read_data(argv[3]); float *input2=read_data(argv[2]); float *expectedoutput=read_data(argv[1]); /*for(int i=0;i<num;++i) printf("%f",input1[i]); */ float *hostInput1 = input1; float *hostInput2 = input2; float *hostOutput = NULL; /* parse the input arguments */ //@@ Insert code here // Import host input data thrust::host_vector<float> h_vec1(num); for(int i=0;i<num;++i) h_vec1[i]=input1[i]; thrust::host_vector<float> h_vec2(num); for(int i=0;i<num;++i) h_vec2[i]=input2[i]; thrust::host_vector<float> h_out(num); /*for(int i = 0; i < num; i++) std::cout << "H[" << i << "] = " << h_vec1[i] << std::endl; for(int i = 0; i < num; i++) std::cout << "H1[" << i << "] = " << h_vec2[i] << std::endl;*/ //@@ Read data from the raw files here //@@ Insert code here // hostInput1 = // hostInput2 = // Declare and allocate host output //@@ Insert code here hostOutput=(float *)malloc(sizeof(float)*num); // Declare and allocate thrust device input and output vectors //@@ Insert code here // Copy to device //@@ Insert code here thrust::device_vector<float> d_vec1 = h_vec1; thrust::device_vector<float> d_vec2 = h_vec2; thrust::device_vector<float>d_out(num); // Execute vector addition //@@ Insert Code here thrust::transform(d_vec1.begin(),d_vec1.end(),d_vec2.begin(),d_out.begin(),thrust::plus<float>()); ///////////////////////////////////////////////////////// h_out=d_out; FILE *outp = fopen(argv[4], "w"); fprintf(outp, "%d", num); for(int i = 0; i <num; ++i) { fprintf(outp, "\n%.2f", h_out[i]); } float *recievedoutput=read_data(argv[4]); int matchflag=0; for(long i = 0; i < 3987; i++) { // std::cout <<i<<"--"<<recievedoutput[i]<<"\t"; if(fabs(recievedoutput[i] - expectedoutput[i]) >= 0.001) { matchflag=i+1; std::cout<<" xxxzzz"<<recievedoutput[i]<<"ccc"<<expectedoutput[i]<<"\n"; break; } } std::cout<<"xxxxxx"<<matchflag; if(matchflag==0) std::cout<<" matched"; else std::cout<<"not matched"; // Copy data back to host //@@ Insert code here free(hostInput1); free(hostInput2); free(hostOutput); return 0; }
12,873
#ifndef _PRESCAN_CU_ #define _PRESCAN_CU_ // includes, kernels #include <assert.h> #define NUM_BANKS 16 #define LOG_NUM_BANKS 4 // MP4.2 - You can use any other block size you wish. #define BLOCK_SIZE 256 // MP4.2 - Host Helper Functions (allocate your own data structure...) // MP4.2 - Device Functions // MP4.2 - Kernel Functions // **===-------- MP4.2 - Modify the body of this function -----------===** // You may need to make multiple kernel calls, make your own kernel // function in this file, and then call them from here. void prescanArray(float *outArray, float *inArray, int numElements) { } // **===-----------------------------------------------------------===** #endif // _PRESCAN_CU_
12,874
#include "DeviceCalculations.cuh" #include <thrust/device_vector.h> #include <thrust/iterator/zip_iterator.h> #include <thrust/transform.h> #include <thrust/reduce.h> #include <curand.h> struct inside_circle { __device__ int8_t operator()(const thrust::tuple<float, float>& p) const { return (((thrust::get<0>(p) - 0.5) * (thrust::get<0>(p) - 0.5) + (thrust::get<1>(p) - 0.5) * (thrust::get<1>(p) - 0.5)) < 0.25) ? 1 : 0; } }; __host__ size_t calc_on_device(size_t numberOfPoints) { thrust::device_vector<float> pointsX(numberOfPoints); thrust::device_vector<float> pointsY(numberOfPoints); // Generate random points using cuRAND curandGenerator_t generator; curandCreateGenerator(&generator, /*CURAND_RNG_QUASI_DEFAULT*/CURAND_RNG_PSEUDO_DEFAULT); curandGenerateUniform(generator, thrust::raw_pointer_cast(pointsX.data()), numberOfPoints); curandGenerateUniform(generator, thrust::raw_pointer_cast(pointsY.data()), numberOfPoints); // Count points inside circle using reduction from Thrust thrust::device_vector<int8_t> insideCircle(numberOfPoints); auto first = thrust::make_zip_iterator(thrust::make_tuple(pointsX.begin(), pointsY.begin())); auto last = thrust::make_zip_iterator(thrust::make_tuple(pointsX.end() , pointsY.end() )); thrust::transform(first, last, insideCircle.begin(), inside_circle()); size_t total = thrust::reduce(insideCircle.begin(), insideCircle.end(), (size_t)0, thrust::plus<size_t>()); return total; }
12,875
/* Molecular dynamics simulation linear code for binary Lennard-Jones liquid under NVE ensemble; Author: You-Liang Zhu, Email: youliangzhu@ciac.ac.cn Copyright: You-Liang Zhu This code is free: you can redistribute it and/or modify it under the terms of the GNU General Public License.*/ #include <ctype.h> #include <cuda_runtime.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> // periodic boundary condition __global__ void myFirstKernel() { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < 9) printf("hello world, thread %d \n", idx); } int main() { timeval start; // start time timeval end; // end time gettimeofday(&start, NULL); // get start time dim3 dimGrid(1); // grid dim3 dimBlock(32); // block // kernel function myFirstKernel<<<dimGrid, dimBlock>>>(); // block until the device has completed cudaThreadSynchronize(); gettimeofday(&end, NULL); // get end time long timeusr = (end.tv_sec - start.tv_sec) * 1000000 + (end.tv_usec - start.tv_usec); printf("time is %ld microseconds\n", timeusr); // the spending time on simulation in microseconds return 0; }
12,876
#include <iostream> #include <random> #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> // nvcc -O3 media_movel.cu -o t1 && ./t1 < in.txt struct MediaMovel { double *serie; MediaMovel(double *serie) : serie(serie){}; __host__ __device__ double operator()(const int &i) { if (i < 6) return 0; double mean = 0.0; for (int j = 0; j < 7; j++) { mean += serie[j + i - 6] / 7; } return mean; } }; int main() { thrust::host_vector<double> serie; while (std::cin.good()) { double t; std::cin >> t; serie.push_back(t); } thrust::device_vector<double> serie_device(serie); thrust::device_vector<double> media_movel(serie.size()); thrust::counting_iterator<int> iter(0); MediaMovel mm(serie_device.data().get()); thrust::transform(iter, iter + serie.size(), media_movel.begin(), mm); for (auto i = media_movel.begin(); i != media_movel.end(); i++) { std::cout << *i << "\n"; // este acesso é lento! -- GPU } }
12,877
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Vector addition: C = A + B. * * This sample is a very basic sample that implements element by element * vector addition. It is the same as the sample illustrating Chapter 2 * of the programming guide with some additions like error checking. */ #include <stdio.h> #include <stdlib.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> /** * CUDA Kernel Device code * * Computes the vector addition of A and B into C. The 3 vectors have the same * number of elements numElements. */ __global__ void VecAdd(float* A, float* B, float* C, int numElements) { int i = blockDim.x *blockIdx.x + threadIdx.x; if (i < numElements) C[i] = A[i] + B[i]; } /** * Host main routine */ int main(void) { // Error code to check return values for CUDA calls cudaError_t err = cudaSuccess; // Print the vector length to be used, and compute its size int numElements = 20; size_t size = numElements * sizeof(float); printf("[Vector addition of %d elements]\n", numElements); float* h_A = (float*) malloc(size); float* h_B = (float*) malloc(size); float* h_C = (float*) malloc(size); if(h_A == NULL || h_B == NULL || h_C == NULL) { fprintf(stderr, "Error allocating the host buffer!\n"); return EXIT_FAILURE; } // initialize host vectors for (int i=0; i < numElements; i++) { h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; printf("init: %f | %f\n", h_A[i], h_B[i]); } float* d_A, *d_B, *d_C; err = cudaMalloc((void**) &d_A, size); if (err != cudaSuccess) { fprintf(stderr, "Error allocating %d bytes on the device!\nError: %s\n", size, cudaGetErrorString(err)); return EXIT_FAILURE; } err = cudaMalloc((void**) &d_B, size); if (err != cudaSuccess) { fprintf(stderr, "Error allocating %d bytes on the device!\nError: %s\n", size, cudaGetErrorString(err)); return EXIT_FAILURE; } err = cudaMalloc((void**) &d_C, size); if (err != cudaSuccess) { fprintf(stderr, "Error allocating %d bytes on the device!\nError: %s\n", size, cudaGetErrorString(err)); return EXIT_FAILURE; } printf("copy vectors to the device!\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Error copying buffer A to the device: %s\n", cudaGetErrorString(err)); return EXIT_FAILURE; } err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); if (err != cudaSuccess) { fprintf(stderr, "Error copying buffer B to the device: %s\n", cudaGetErrorString(err)); return EXIT_FAILURE; } // call kernel int threadsPerBlock = 256; int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock; VecAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements); err = cudaGetLastError(); if (err != cudaSuccess) { fprintf(stderr, "Error invoking kernel function: %s\n", cudaGetErrorString(err)); return EXIT_FAILURE; } err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if (err != cudaSuccess) { fprintf(stderr, "Error copying result buffer C back to the host: %s\n", cudaGetErrorString(err)); return EXIT_FAILURE; } for (int i=0; i < numElements; i++) { printf("%d: %f + %f = %f\n", i, h_A[i], h_B[i], h_C[i]); } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); printf("Done\n"); return 0; }
12,878
#include <stdio.h> #include <stdlib.h> #include <cuda.h> #include <math.h> #define N 16777216 #define THREADS_PER_BLOCK 1024 __global__ void KMeansClustering(double *centroids,double *data,double *clstr1,double *clstr2,double *clstr3,int n,int *noOfCPoints) { int tid = (blockIdx.x*blockDim.x +threadIdx.x)*3; if(tid<3*n){ int index = (blockIdx.x*blockDim.x +threadIdx.x)*2; //__shared__ int clusters_point_count[3][THREADS_PER_BLOCK]; __shared__ int s_cluster[3]; s_cluster[0] = 0; s_cluster[1] = 0; s_cluster[2] = 0; // for(int i=0;i<blockDim.x;i++){ // clusters_point_count[0][i] = 0; // clusters_point_count[1][i] = 0; // clusters_point_count[2][i] = 0; //} __syncthreads(); double data_x = data[tid]; double data_y = data[tid+1]; double *cluster[3]; double d_1 = pow(data_x-centroids[0],2)+pow(data_y-centroids[1],2); double d_2 = pow(data_x-centroids[2],2)+pow(data_y-centroids[3],2); double d_3 = pow(data_x-centroids[4],2)+pow(data_y-centroids[5],2); cluster[0] = clstr1; cluster[1] = clstr2; cluster[2] = clstr3; int clusterIndex = d_1 > d_2 ? d_2 > d_3 ? 2 : 1 : d_1 < d_3 ? 0: 2 ; for(int i=0;i<3;i++){ if(i!=clusterIndex){ double * clusterPtr = cluster[i]; clusterPtr[index] = 0.0; clusterPtr[index+1] = 0.0; }else{ double * clusterPtr = cluster[clusterIndex]; clusterPtr[index] = data_x; clusterPtr[index+1] = data_y; } } atomicAdd(&s_cluster[clusterIndex],1); __syncthreads(); if(threadIdx.x < 3){ atomicAdd(&noOfCPoints[threadIdx.x],s_cluster[threadIdx.x]); } } } __global__ void sumCluster(double *cluster1,double *cluster2,double *cluster3,int n){ int tid = (blockIdx.x*blockDim.x +threadIdx.x)*2; __shared__ double shared_data_1[THREADS_PER_BLOCK*2]; __shared__ double shared_data_2[THREADS_PER_BLOCK*2]; __shared__ double shared_data_3[THREADS_PER_BLOCK*2]; if(tid < n){ shared_data_1[2*threadIdx.x] = cluster1[tid]; shared_data_1[2*threadIdx.x+1] = cluster1[tid+1]; shared_data_2[2*threadIdx.x] = cluster2[tid]; shared_data_2[2*threadIdx.x+1] = cluster2[tid+1]; shared_data_3[2*threadIdx.x] = cluster3[tid]; shared_data_3[2*threadIdx.x+1] = cluster3[tid+1]; __syncthreads(); } int stride = blockDim.x; while((stride >= 2) && (threadIdx.x < stride/2)){ shared_data_1[2*threadIdx.x] += shared_data_1[2*threadIdx.x+stride]; //addition for y shared_data_1[2*threadIdx.x+1]+=shared_data_1[2*threadIdx.x+stride+1]; //addition for x shared_data_2[2*threadIdx.x]+=shared_data_2[2*threadIdx.x+stride]; //addition for y shared_data_2[2*threadIdx.x+1]+=shared_data_2[2*threadIdx.x+stride+1]; //addition for x shared_data_3[2*threadIdx.x]+=shared_data_3[2*threadIdx.x+stride]; //addition for y shared_data_3[2*threadIdx.x+1]+=shared_data_3[2*threadIdx.x+stride+1]; __syncthreads(); stride = stride>>1; } if(threadIdx.x == 0){ cluster1[blockIdx.x*2] = shared_data_1[threadIdx.x]; cluster1[blockIdx.x*2+1] = shared_data_1[threadIdx.x+1]; cluster2[blockIdx.x*2] = shared_data_2[threadIdx.x]; cluster2[blockIdx.x*2+1] = shared_data_2[threadIdx.x+1]; cluster3[blockIdx.x*2] = shared_data_3[threadIdx.x]; cluster3[blockIdx.x*2+1] = shared_data_3[threadIdx.x+1]; } } void checkCudaError(cudaError_t error,int lineNo){ if (error !=cudaSuccess) { printf("%s in %s at line %d\n", cudaGetErrorString(error),__FILE__,lineNo); exit(EXIT_FAILURE); } } int main(int argc, char *argv[]) { cudaSetDevice(0); FILE *inFile = fopen("16777216_CLUSTER_DATA.csv", "r"); if(inFile == NULL){ printf("Unable to read the data from the file"); exit(1); } //Host memory allocation double *host_data = (double *)malloc(sizeof(double)*N*3); //CUDA memory allocation double *dev_data; cudaError_t error = cudaMalloc(&dev_data,N*3*sizeof(double)); checkCudaError(error,__LINE__-1); for(int i =0;i<N;i++){ fscanf(inFile, "%lf,%lf,%lf\n", &host_data[i*3],&host_data[i*3+1],&host_data[i*3+2]); } double *host_cluster_1 = (double *)calloc(N*2,sizeof(double)); double *host_cluster_2 = (double *)calloc(N*2,sizeof(double)); double *host_cluster_3 = (double *)calloc(N*2,sizeof(double)); double *dev_c_1; double *dev_c_2; double *dev_c_3; error = cudaMalloc((void**)&dev_c_1,N*2*sizeof(double)); checkCudaError(error,__LINE__-1); error = cudaMalloc((void**)&dev_c_2,N*2*sizeof(double)); checkCudaError(error,__LINE__-1); error = cudaMalloc((void**)&dev_c_3,N*2*sizeof(double)); checkCudaError(error,__LINE__-1); double* host_centroids = (double*)malloc(6*sizeof(double)); double* dev_centroids; error = cudaMalloc((void**)&dev_centroids,6*sizeof(double)); checkCudaError(error,__LINE__-1); //Randomly initialising K centroids for the clusters srand(41); int index1 = (rand() % N )*3; host_centroids[0] = host_data[index1]; host_centroids[1] = host_data[index1+1]; int index2 = (rand() % N)*3; host_centroids[2] = host_data[index2]; host_centroids[3] = host_data[index2+1]; int index3 = (rand() % N)*3; host_centroids[4] = host_data[index3]; host_centroids[5] = host_data[index3+1]; printf("Initial Centroid Estimate\n"); for(int i=0;i<=4;i+=2){ printf("centroid[%d][0] = %lf centroid[%d][1] = %lf\n",i,host_centroids[i],i,host_centroids[i+1]); } //Data transfer to GPU /* error = cudaMemcpy(dev_centroids,host_centroids,6*sizeof(double),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1);*/ error = cudaMemcpy(dev_data,host_data,N*3*sizeof(double),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1); int *h_noOfCPoints = (int*)calloc(3,sizeof(int)); int *c_noOfCPoints; error = cudaMalloc((void**)&c_noOfCPoints,3*sizeof(int)); checkCudaError(error,__LINE__-1); error = cudaMemcpy(c_noOfCPoints,h_noOfCPoints,3*sizeof(int),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); double previous_centroids[6]; int noOfIterations = 0; while(1){ noOfIterations++; for(int i=0;i<6;i++){ previous_centroids[i] = host_centroids[i] ; } error = cudaMemcpy(dev_centroids,host_centroids,6*sizeof(double),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1); for(int i=0;i<3;i++){ h_noOfCPoints[i] = 0 ; } error = cudaMemcpy(c_noOfCPoints,h_noOfCPoints,3*sizeof(int),cudaMemcpyHostToDevice); checkCudaError(error,__LINE__-1); KMeansClustering<<<N/THREADS_PER_BLOCK,THREADS_PER_BLOCK>>>(dev_centroids,dev_data,dev_c_1,dev_c_2,dev_c_3,N,c_noOfCPoints); error = cudaGetLastError(); checkCudaError(error,__LINE__-2); error = cudaMemcpy(h_noOfCPoints,c_noOfCPoints,3*sizeof(int),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); //printf("\ncluster points %d %d %d\n",h_noOfCPoints[0],h_noOfCPoints[1],h_noOfCPoints[2]); int blockSize = THREADS_PER_BLOCK; int temp = N; while(1){ if(temp>blockSize){ sumCluster<<<temp/blockSize,blockSize>>>(dev_c_1,dev_c_2,dev_c_3,temp*2); error = cudaGetLastError(); checkCudaError(error,__LINE__-2); } else if (temp >= 32){ sumCluster<<<1,temp>>>(dev_c_1,dev_c_2,dev_c_3,temp*2); error = cudaGetLastError(); //printf("%d,%d\n",temp,blockSize); checkCudaError(error,__LINE__-2); break; } else{ error = cudaMemcpy(host_cluster_1,dev_c_1,N*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); error = cudaMemcpy(host_cluster_2,dev_c_2,N*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); error = cudaMemcpy(host_cluster_3,dev_c_3,N*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); for(int i = 1 ; i < temp ; i++){ host_cluster_1[0] += host_cluster_1[2*i]; host_cluster_1[1] += host_cluster_1[2*i+1]; host_cluster_2[0] += host_cluster_2[2*i]; host_cluster_2[1] += host_cluster_2[2*i+1]; host_cluster_3[0] += host_cluster_3[2*i]; host_cluster_3[1] += host_cluster_3[2*i+1]; } break; } if(temp > blockSize){ temp = temp/blockSize; } } if(temp>=32){ error = cudaMemcpy(host_cluster_1,dev_c_1,N*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); error = cudaMemcpy(host_cluster_2,dev_c_2,N*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); error = cudaMemcpy(host_cluster_3,dev_c_3,N*2*sizeof(double),cudaMemcpyDeviceToHost); checkCudaError(error,__LINE__-1); } double sumXcluster1 = host_cluster_1[0]; double sumYcluster1 = host_cluster_1[1]; double sumXcluster2 = host_cluster_2[0]; double sumYcluster2 = host_cluster_2[1]; double sumXcluster3 = host_cluster_3[0]; double sumYcluster3 = host_cluster_3[1]; host_centroids[0] = sumXcluster1/(double)h_noOfCPoints[0]; host_centroids[1] = sumYcluster1/(double)h_noOfCPoints[0]; host_centroids[2] = sumXcluster2/(double)h_noOfCPoints[1]; host_centroids[3] = sumYcluster2/(double)h_noOfCPoints[1]; host_centroids[4] = sumXcluster3/(double)h_noOfCPoints[2]; host_centroids[5] = sumYcluster3/(double)h_noOfCPoints[2]; //for(int i=0;i<=4;i+=2){ // printf("centroid[%d][0] = %lf centroid[%d][1] = %lf\n",i,host_centroids[i],i,host_centroids[i+1]); //} int count = 0; for(int i=0;i<6;i++){ if(host_centroids[i] != previous_centroids[i]){ break; } count++; } if(count == 6){ break; } } cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); for(int i=0;i<=4;i+=2){ printf("centroid[%d][0] = %lf centroid[%d][1] = %lf\n",i,host_centroids[i],i,host_centroids[i+1]); } //Total no computations for one iteration //16 * N for kernel 1 //2 * N for kernel 2 double throughput = (24 * 2.0 * noOfIterations) *N/(1000*milliseconds); printf("\nThroughput is %lf MFLOPS",throughput); printf("\nTime is %f ms",milliseconds); return 0; }
12,879
#include <cstdlib> #include <cstdint> namespace memory { namespace gpu { template <typename T, typename I> __global__ void fill_kernel(T* v, T value, I n) { std::size_t tid = threadIdx.x + blockDim.x*blockIdx.x; std::size_t grid_step = blockDim.x * gridDim.x; while(tid < n) { v[tid] = value; tid += grid_step; } } void fill8(uint8_t* v, uint8_t value, std::size_t n) { auto thread_dim = int{192}; dim3 dim_block(thread_dim); dim3 dim_grid(n/dim_block.x + (n%dim_block.x ? 1 : 0)); fill_kernel<uint8_t><<<dim_grid, dim_block>>>(v, value, n); }; void fill16(uint16_t* v, uint16_t value, std::size_t n) { auto thread_dim = int{192}; dim3 dim_block(thread_dim); dim3 dim_grid(n/dim_block.x + (n%dim_block.x ? 1 : 0)); fill_kernel<uint16_t><<<dim_grid, dim_block>>>(v, value, n); }; void fill32(uint32_t* v, uint32_t value, std::size_t n) { auto thread_dim = int{192}; dim3 dim_block(thread_dim); dim3 dim_grid(n/dim_block.x + (n%dim_block.x ? 1 : 0)); fill_kernel<uint32_t><<<dim_grid, dim_block>>>(v, value, n); }; void fill64(uint64_t* v, uint64_t value, std::size_t n) { auto thread_dim = int{192}; dim3 dim_block(thread_dim); dim3 dim_grid(n/dim_block.x + (n%dim_block.x ? 1 : 0)); fill_kernel<uint64_t><<<dim_grid, dim_block>>>(v, value, n); }; } // namespace gpu } // namespace memory
12,880
#include<stdlib.h> #include<stdio.h> #include <cuda_runtime.h> #define seed 13 #define block_size 16 void printMatrix(float *matrix, int size){ int i; for(i=0;i<size*size;i++){ if(i%size == 0 && i != 0) printf("\n"); printf("%10.1f", matrix[i]); } printf("\n"); } __global__ void version_1_matrixMul(float *dev_A, float *dev_B, float* dev_C, int N){ // Each thread computes one elements of h_C // by accumulating results into dev_C float partial = 0.0; int i = blockIdx.y * blockDim.y + threadIdx.y; int j = blockIdx.x * blockDim.x + threadIdx.x; int k; for(k =0; k < N;k++){ partial += dev_A[i * N + k] * dev_B[k * N + j]; } dev_C[i * N + j] = partial; } __global__ void version_2_matrixMul(float *dev_A, float *dev_B, float *dev_C, int matrix_size) { __shared__ float A_tile[block_size][block_size]; __shared__ float B_tile[block_size][block_size]; float partial = 0.0; // block index int bx = blockIdx.x; int by = blockIdx.y; // thread index int tx = threadIdx.x; int ty = threadIdx.y; int row = by * blockDim.y + ty; int col = bx * blockDim.x + tx; // by the block int m; for( m=0 ; m < matrix_size/blockDim.x; m++){ A_tile[ty][tx] = dev_A[row * matrix_size + (m * block_size + tx)]; B_tile[ty][tx] = dev_B[col + (m * block_size + ty) * matrix_size]; __syncthreads(); int k; for(k=0; k< blockDim.x; k++) partial += A_tile[ty][k] * B_tile[k][tx]; __syncthreads(); dev_C[row * matrix_size + col] = partial; } } int main(int argc, char **argv){ srand(seed); if(argc != 2){ printf("Usage: \n"); printf("/lab4 <matrixSize>"); return 1; } int matrix_size = atoi(argv[1]); float *h_A = (float*) malloc(matrix_size * matrix_size * sizeof(float)); float *h_B = (float*) malloc(matrix_size * matrix_size * sizeof(float)); float *h_C = (float*) malloc(matrix_size * matrix_size * sizeof(float)); int i,j; for(i=0;i<matrix_size;i++){ for(j=0;j<matrix_size;j++){ h_A[i * matrix_size + j] = (float)rand()/((float)RAND_MAX/10.0); h_B[i * matrix_size + j] = (float)rand()/((float)RAND_MAX/10.0); } } //printf("This is matrix A: %d\n", matrix_size); //printMatrix(h_A, matrix_size); //printf("This is matrix B: \n"); //printMatrix(h_B, matrix_size); float *d_A, *d_B, *d_C; cudaMalloc((void**) &d_A, matrix_size * matrix_size * sizeof(float)); cudaMalloc((void**) &d_B, matrix_size * matrix_size * sizeof(float)); cudaMalloc((void**) &d_C, matrix_size * matrix_size * sizeof(float)); dim3 Block(block_size, block_size, 1); dim3 Grid(matrix_size / Block.x, matrix_size / Block.y, 1); float elapsedTime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); printf("=====This is naive version.======\n"); cudaEventRecord(start, 0); cudaMemcpy(d_A, h_A, matrix_size * matrix_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, matrix_size * matrix_size * sizeof(float), cudaMemcpyHostToDevice); version_1_matrixMul<<< Grid, Block >>>(d_A, d_B, d_C, matrix_size); cudaMemcpy(h_C, d_C, matrix_size * matrix_size * sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("For naive version, the elapsed time is %.4f(ms).\n", elapsedTime); //printf("This is matrix C: \n"); //printMatrix(h_C, matrix_size); printf("=====This is tiled version.======\n"); cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); cudaMemcpy(d_A, h_A, matrix_size * matrix_size * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_B, h_B, matrix_size * matrix_size * sizeof(float), cudaMemcpyHostToDevice); version_2_matrixMul<<< Grid, Block >>>(d_A, d_B, d_C, matrix_size); cudaMemcpy(h_C, d_C, matrix_size* matrix_size * sizeof(float), cudaMemcpyDeviceToHost); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); cudaEventDestroy(start); cudaEventDestroy(stop); printf("For tiled version, the elapsed time is %.4f(ms).\n", elapsedTime); //printf("This is matrix C: \n"); //printMatrix(h_C, matrix_size); cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(h_A); free(h_B); free(h_C); return 0; }
12,881
#include <iostream> #include <sstream> #include <string> #include <ctime> std::string zeropad_number(int n) { std::stringstream ss; ss << n; std::string str; ss >> str; int len = str.length(); for (int i = 0; i < 4 - len; i++) { str = "0" + str; } return str; }; int main(){ std::cout << zeropad_number(1) << std::endl; std::cout << zeropad_number(28) << std::endl; return 0; };
12,882
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <string.h> __global__ void toggle(char *a){ int tid = threadIdx.x; if (a[tid] >= 'A' && a[tid] <= 'Z') {a[tid]+=32;} else if(a[tid] >= 'a' && a[tid] <= 'z') {a[tid] -=32;} } int main(void){ int n,size; char a[100],*d_a; printf("Enter the string to be toggled:\n"); scanf("%s",a); n=strlen(a); size = sizeof(char); cudaMalloc((void **)&d_a,size*n); cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice); toggle <<<1,n>>> (d_a); cudaMemcpy(a,d_a,size*n,cudaMemcpyDeviceToHost); printf("%s",a); printf("\n"); cudaFree(d_a); return 0; }
12,883
#include <stdio.h> #include "cuda_profiler_api.h" /* Scratch space positions */ const int max_ns = 1024; const int lower_delta_pos = 0; const int mid_delta_pos = lower_delta_pos + max_ns; const int upper_delta_pos = mid_delta_pos + max_ns; const int lower_gamma_pos = upper_delta_pos + max_ns; const int mid_gamma_pos = lower_gamma_pos + max_ns; const int upper_gamma_pos = mid_gamma_pos + max_ns; const int matrix_equal_pos = upper_gamma_pos + max_ns; const int scratch_space_size = matrix_equal_pos + max_ns; void print_cuda_error(cudaError_t err, char *at) { if (err) { printf("Error from CUDA at : %s\n", at); printf("Message: %s\n", cudaGetErrorString(err)); } } __device__ float call_payoff(const float s, const float k) { return fmaxf(0.0f, s - k); } __device__ void get_coeffs(const float *const grid, float *const scratch, const int ns, const int i) { /* Difference vs. the grid below */ float d0; float d1; if (i == 0) { d0 = grid[1] - grid[0]; d1 = grid[2] - grid[1]; } else if (i == (ns - 1)) { d0 = grid[i - 1] - grid[i - 2]; d1 = grid[i] - grid[i - 1]; } else { d0 = grid[i] - grid[i - 1]; d1 = grid[i + 1] - grid[i]; } const float d1_p_d2 = d0 + d1; /* Delta coeffs */ /* Middle */ if ((i != 0) & (i != (ns - 1))) { scratch[lower_delta_pos + i] = -d1 / (d0 * d1_p_d2); scratch[mid_delta_pos + i] = (d1 - d0) / (d0 * d1); scratch[upper_delta_pos + i] = d0 / (d1 * d1_p_d2); } /* Lower boundary */ else if (i == 0) { scratch[lower_delta_pos + i] = (-2.0f * d0 - d1) / (d0 * d1_p_d2); scratch[mid_delta_pos + i] = d1_p_d2 / (d0 * d1); scratch[upper_delta_pos + i] = -d0 / (d1 * d1_p_d2); } /* Upper boundary */ else if (i == (ns - 1)) { scratch[lower_delta_pos + i] = d1 / (d0 * d1_p_d2); scratch[mid_delta_pos + i] = (-d0 - d1) / (d0 * d1); scratch[upper_delta_pos + i] = (d0 + 2.0f * d1) / (d1 * d1_p_d2); } /* Gamma coeffs */ /* Middle */ if ((i != 0) & (i != (ns - 1))) { scratch[lower_gamma_pos + i] = 2.0f / (d0 * d1_p_d2); scratch[mid_gamma_pos + i] = -2.0f / (d0 * d1); scratch[upper_gamma_pos + i] = 2.0f / (d1 * d1_p_d2); } __syncthreads(); } /* Populate the matrix */ __device__ void explicit_step(float *const scratch, float *const matrix_equal, const float *const tp1, const float *const grid, const float half_sigma_sq, const float r, const float t_inc, const int ns, const int i) { /* Boundary conditions */ /* s = 0.0 */ if (i == 0) { const float b = -r * t_inc; matrix_equal[0] = (1.0f + b) * tp1[0]; } /* s = s_max*/ else if (i == (ns - 1)) { const float r_s = r * grid[ns - 1]; const float a = -r_s * t_inc; const float b = -(r - r_s) * t_inc; matrix_equal[ns - 1] = a * tp1[ns - 2]; matrix_equal[ns - 1] += (1.0f + b) * tp1[ns - 1]; } else if (i < ns) { const float g = half_sigma_sq * grid[i] * grid[i]; const float r_s = r * grid[i]; const float a = ((scratch[lower_delta_pos + i] * r_s) + (scratch[lower_gamma_pos + i] * g)) * t_inc; const float b = ((scratch[mid_delta_pos + i] * r_s) + (scratch[mid_gamma_pos + i] * g) - r) * t_inc; const float c = ((scratch[upper_delta_pos + i] * r_s) + (scratch[upper_gamma_pos + i] * g)) * t_inc; matrix_equal[i] = a * tp1[i - 1]; matrix_equal[i] += (1.0f + b) * tp1[i]; matrix_equal[i] += c * tp1[i + 1]; } __syncthreads(); } __global__ void explicit_scheme(const float *const grid, float *const scratch, const float half_sigma_sq, const float r, const float t_inc, const float k, const int ns, const int nt) { const int i = threadIdx.x; if (ns & 0x1f) { /* Only multiple of 32 space steps are supported */ return; } /* Move grid to shared memory, needed for off by 1 access and reused */ __shared__ float shared_equal[max_ns]; shared_equal[i] = grid[i]; __syncthreads(); /* Build grid based coeffs, completely parrallel */ __shared__ float shared_tp1[max_ns]; shared_tp1[i] = call_payoff(shared_equal[i], k); get_coeffs(shared_equal, scratch, ns, i); /* Solve back in time */ for (unsigned int j = 0; j < nt >> 1; ++j) { explicit_step(scratch, shared_equal, shared_tp1, grid, half_sigma_sq, r, t_inc, ns, i); shared_equal[i] = fmaxf(shared_equal[i], call_payoff(shared_equal[i], k)); explicit_step(scratch, shared_tp1, shared_equal, grid, half_sigma_sq, r, t_inc, ns, i); shared_tp1[i] = fmaxf(shared_tp1[i], call_payoff(shared_tp1[i], k)); } scratch[matrix_equal_pos + i] = shared_tp1[i]; } void american_call_test() { /* Pricing set up */ printf("Pricing American Call\n"); const unsigned int ns = 1024; /* Want multiples of warp size (32) */ const float s = 100.0f; const float r = 0.05f; const float sigma = 0.2f; const float half_sigma_sq = 0.5f * sigma * sigma; const float k = 100.0f; const float t = 1.0f; const float t_inc = 0.9f / (static_cast<float>(ns * ns) * sigma * sigma); const int nt = static_cast<int>(t / t_inc) + 1; /* Build regular grid based at 0 */ float *grid = new float [ns]; const float s_inc = (s * 3.0f) / ns; for (unsigned int i = 0; i < ns; ++i) { grid[i] = i * s_inc; } print_cuda_error(cudaSetDevice(0), "Set device"); /* Prepare device memory */ float *dev_grid; print_cuda_error(cudaMalloc((void **)&dev_grid, ns * sizeof(float)), "Malloc grid"); print_cuda_error(cudaMemcpy(dev_grid, grid, ns * sizeof(float), cudaMemcpyHostToDevice), "Copy grid to device"); float *dev_scratch; print_cuda_error(cudaMalloc((void **)&dev_scratch, scratch_space_size * sizeof(float)), "Malloc scratch"); print_cuda_error(cudaMemset(dev_scratch, 0, scratch_space_size * sizeof(float)), "Clear scratch"); /* Call kernels */ cudaProfilerStart(); explicit_scheme<<<1, ns>>>(dev_grid, dev_scratch, half_sigma_sq, r, t_inc, k, ns, nt); cudaProfilerStop(); print_cuda_error(cudaGetLastError(), "Kernel execution"); float *res = new float [ns]; print_cuda_error(cudaMemcpy(res, &dev_scratch[matrix_equal_pos], ns * sizeof(float), cudaMemcpyDeviceToHost), "Copy grid to host"); for (unsigned int i = 0; i < ns; ++i) { printf("%.2f: %.2f\n", grid[i], res[i]); } /* Clean up */ print_cuda_error(cudaFree(dev_grid), "Free grid"); print_cuda_error(cudaFree(dev_scratch), "Free scratch"); print_cuda_error(cudaDeviceReset(), "Device reset"); delete [] grid; delete [] res; } int main() { american_call_test(); return 0; }
12,884
/* SAXPY accelerated with CUDA. In this version CUDA error checks and timing calls have been added. Compile with: nvcc -arch=sm_20 -O2 saxpy_cuda_timed.cu */ #include <cuda.h> /* CUDA runtime API */ #include <cstdio> #include <sys/time.h> #include <time.h> int timeval_subtract (double *result, struct timeval *x, struct timeval *y) { struct timeval result0; /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (y->tv_usec - x->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result0.tv_sec = x->tv_sec - y->tv_sec; result0.tv_usec = x->tv_usec - y->tv_usec; *result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } void saxpy_cpu(float *vecY, float *vecX, float alpha, int n) { int i; for (i = 0; i < n; i++) vecY[i] = alpha * vecX[i] + vecY[i]; } __global__ void saxpy_gpu(float *vecY, float *vecX, float alpha ,int n) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i<n) vecY[i] = alpha * vecX[i] + vecY[i]; } int main(int argc, char *argv[]) { float *x_host, *y_host; /* arrays for computation on host*/ float *x_dev, *y_dev; /* arrays for computation on device */ float *y_shadow; /* host-side copy of device results */ int n = 32*1024; float alpha = 0.5f; int nerror; size_t memsize; int i, blockSize, nBlocks; int error; double restime; struct timeval tdr0, tdr1; cudaEvent_t start, stop; float kernel_timer; memsize = n * sizeof(float); /* allocate arrays on host */ x_host = (float *)malloc(memsize); y_host = (float *)malloc(memsize); y_shadow = (float *)malloc(memsize); /* allocate arrays on device */ if(error = cudaMalloc((void **) &x_dev, memsize)) { printf ("Error in cudaMalloc %d\n", error); exit (error); } if(error = cudaMalloc((void **) &y_dev, memsize)) { printf ("Error in cudaMalloc %d\n", error); exit (error); } /* catch any errors */ /* initialize arrays on host */ for ( i = 0; i < n; i++) { x_host[i] = rand() / (float)RAND_MAX; y_host[i] = rand() / (float)RAND_MAX; } /* copy arrays to device memory (synchronous) */ gettimeofday (&tdr0, NULL); if (error = cudaMemcpy(x_dev, x_host, memsize, cudaMemcpyHostToDevice)) { printf ("Error %d\n", error); exit (error); } if (error = cudaMemcpy(y_dev, y_host, memsize, cudaMemcpyHostToDevice)) { printf ("Error %d\n", error); exit (error); } /* set up device execution configuration */ blockSize = 512; nBlocks = n / blockSize + (n % blockSize > 0); /* execute kernel (asynchronous!) */ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); saxpy_gpu<<<nBlocks, blockSize>>>(y_dev, x_dev, alpha, n); cudaEventRecord(stop, 0); cudaEventSynchronize( stop ); cudaEventElapsedTime( &kernel_timer, start, stop ); printf("Test Kernel took %f ms\n",kernel_timer); /* check success of kernel */ if(error = cudaGetLastError()) { printf ("Error detected after kernel %d\n", error); exit (error); } cudaEventDestroy(start); cudaEventDestroy(stop); /* retrieve results from device (synchronous) */ if (error = cudaMemcpy(y_shadow, y_dev, memsize, cudaMemcpyDeviceToHost)) { printf ("Error %d\n", error); exit (error); } /* check if GPU calculation completed without error */ if (error = cudaDeviceSynchronize()) { printf ("Error %d\n", error); exit (error); } gettimeofday (&tdr1, NULL); timeval_subtract (&restime, &tdr1, &tdr0); printf ("gpu kernel and memcopy %e s\n", restime); gettimeofday (&tdr0, NULL); /* execute host version (i.e. baseline reference results) */ saxpy_cpu(y_host, x_host, alpha, n); gettimeofday (&tdr1, NULL); timeval_subtract (&restime, &tdr1, &tdr0); printf ("cpu kernel %e s\n", restime); /* check results */ nerror=0; for(i=0; i < n; i++) { if(y_shadow[i]!=y_host[i]) nerror=nerror+1; } printf("test comparison shows %d errors\n",nerror); /* free memory */ cudaFree(x_dev); cudaFree(y_dev); free(x_host); free(y_host); free(y_shadow); return 0; }
12,885
// Exemplo para o curso de Super Computacao // Criado por: Luciano P. Soares // Modificado por: Igor Montagner #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <iostream> #include <cmath> /* Rotina para somar dois vetores na GPU */ __global__ void add(double *a, double *b, double *c, int N) { int i=blockIdx.x * blockDim.x + threadIdx.x; if (i<N) { c[i] = a[i] + b[i]; } } /* Programa cria dois vetores e soma eles em GPU */ int main() { int n = 1<<23; int blocksize = 256; thrust::host_vector<double> A(n), B(n); for (int i = 0; i < n; i++) { A[i] = (double)i; B[i] = (double)n-i; } thrust::device_vector<double> A_d(A), B_d(B), C_d(n); add<<<ceil((double) n/blocksize),blocksize>>>(thrust::raw_pointer_cast(A_d.data()), thrust::raw_pointer_cast(B_d.data()), thrust::raw_pointer_cast(C_d.data()), n ); thrust::host_vector<double> C(C_d); for(int i=0;i<n;i++) { if(!(i%(n/8))) { printf("a[%d] + b[%d] = c[%d] => ",i,i,i); printf("%6.1f + %6.1f = %6.1f\n",A[i],B[i],C[i]); } } return 0; }
12,886
#include <cstdlib> #include <iostream> using namespace std; __global__ void vecAdd_kernel(const float *a, const float *b, float *result, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) result[i] = a[i] + b[i]; } int main() { int n = 100; float *a = new float[n], *a_gpu; cudaMalloc((void**) &a_gpu, n * sizeof(float)); float *b = new float[n], *b_gpu; cudaMalloc((void**) &b_gpu, n * sizeof(float)); float *result = new float[n], *result_gpu; cudaMalloc((void**) &result_gpu, n * sizeof(float)); for (int i = 0; i < n; i++) a[i] = b[i] = i; cudaMemcpy(a_gpu, a, n * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(b_gpu, b, n * sizeof(float), cudaMemcpyHostToDevice); const int block_size = 256; int num_blocks = (n + block_size - 1) / block_size; vecAdd_kernel <<<num_blocks, block_size>>>(a_gpu, b_gpu, result_gpu, n); cudaMemcpy(result, result_gpu, n * sizeof(float), cudaMemcpyDeviceToHost); for (int x = 0; x < 10; x++) cout<<result[x]<<endl; delete[] a; delete[] b; delete[] result; cudaFree(a_gpu); cudaFree(b_gpu); cudaFree(result_gpu); return 0; }
12,887
#include "includes.h" #define N 18 __global__ void sum(double *a, double *b, double *c) { int index = threadIdx.x + blockIdx.x * blockDim.x; c[index] = a[index] + b[index]; }
12,888
//programma per calcolo prodotto puntuale tra 2 matrici #include <cuda.h> #include <stdio.h> #include <iostream> #include <time.h> using namespace std; //input: dimensioni M,N matrici (quindi questo darà il numero totale di thread nella griglia cuda) , n righe threads in blocco, n colonne threads in blocco //NB: in cuda x indica la direzione righe, y le colonne __host__ void calcolaProdPuntualeCPU(int *a,int *b,int *c,int m,int n){ for(int i=0;i<m;i++) for(int j=0;j<n;j++){ c[i*n + j]=a[i*n + j] * b[i*n + j]; } } __host__ void stampaMatrice(int *a,int m,int n){ for(int i=0;i<m;i++){ for(int j=0;j<n;j++) cout<<a[i*n + j]<<" "; cout<<endl; } } __global__ void calcolaProdPuntualeGPU(int *a,int *b,int *c,int m,int n){ //ottengo indice del thread SULL'INTERA GRIGLIA //prima riga poi colonna int indRiga = threadIdx.x + blockIdx.x * blockDim.x; int indCol = threadIdx.y + blockIdx.y * blockDim.y; if(indRiga<m && indCol<n) c[indRiga * n + indCol] = a[indRiga * n + indCol]*b[indRiga * n + indCol]; } void inizializzaMatrice(int *a,int m,int n){ srand((unsigned int)time(NULL)); for(int i=0;i<m;i++) for(int j=0;j<n;j++) a[i*n + j]=1+rand()%10; } int main(int argc,char *argv[]){ int m,n,totRigheThreadsInGriglia,totColonneThreadsInGriglia,nRigheThreadsInBlocco,nColonneThreadsInBlocco,nRigheBlocchiInGriglia,nColonneBlocchiInGriglia; dim3 nBlocchiInGriglia_d3, nThreadsInBlocco_d3; //partono i default constructors if(argc!=5){ m=5; n=5; nRigheThreadsInBlocco=2; nColonneThreadsInBlocco=2; } else{ sscanf(argv[1],"%d",&m); //n righe matrice sscanf(argv[2],"%d",&n); //n colonne matrice sscanf(argv[3],"%d",&nRigheThreadsInBlocco); //n righe di un blocco (righe di threads) sscanf(argv[4],"%d",&nColonneThreadsInBlocco); //n colonne di un blocco (colonne di threads) } //si noti che il totale di righe di threads e di colonne di threads nella griglia è uguale proprio al numero di righe matrice e di colonne (per scelta di distribuzione 1 cella = 1 thread) totRigheThreadsInGriglia = m; totColonneThreadsInGriglia = n; //calcolo numero di blocchi (quindi il numero di righe di blocchi e di colonne di blocchi che costituiscono la nostra griglia cuda) nRigheBlocchiInGriglia = totRigheThreadsInGriglia / nRigheThreadsInBlocco; if(totRigheThreadsInGriglia % nRigheThreadsInBlocco != 0) nRigheBlocchiInGriglia++; nColonneBlocchiInGriglia = totColonneThreadsInGriglia / nColonneThreadsInBlocco; if(totColonneThreadsInGriglia % nColonneThreadsInBlocco != 0) nColonneBlocchiInGriglia++; nBlocchiInGriglia_d3.x = nRigheBlocchiInGriglia; //x è per le righe in cuda nBlocchiInGriglia_d3.y = nColonneBlocchiInGriglia; //y è per le colonne nThreadsInBlocco_d3.x = nRigheThreadsInBlocco; nThreadsInBlocco_d3.y = nColonneThreadsInBlocco; int *h_a,*h_b,*h_c; //le matrici 2d, siccome i size sono decisi a runtime, e siccome per la mappatura ci serve che siano allocate sequenzialmente, sono allocate dinamicamente (heap host) //come vettori monodimensionali che simulano l'allocazione per righe della struttura 2d int *d_a,*d_b,*d_c; //alloco le strutture sull'heap dell'host h_a=(int *)malloc(m*n*sizeof(int)); h_b=(int *)malloc(m*n*sizeof(int)); h_c=(int *)malloc(m*n*sizeof(int)); inizializzaMatrice(h_a,m,n); inizializzaMatrice(h_b,m,n); calcolaProdPuntualeCPU(h_a,h_b,h_c,m,n); stampaMatrice(h_c,m,n); //alloco le strutture in memoria gpu cudaMalloc((void **)&d_a,m*n*sizeof(int)); cudaMalloc((void **)&d_b,m*n*sizeof(int)); cudaMalloc((void **)&d_c,m*n*sizeof(int)); cudaMemset(d_c,0,m*n*sizeof(int)); //facoltativo //copio sulla memoria gpu il contenuto delle matrici in memoria ordinaria cudaMemcpy(d_a,h_a,m*n*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_b,h_b,m*n*sizeof(int),cudaMemcpyHostToDevice); //lancio kernel calcolaProdPuntualeGPU<<< nBlocchiInGriglia_d3, nThreadsInBlocco_d3 >>>(d_a,d_b,d_c,m,n); //ricopio il risultato nella memoria host int *copiedFromGPU = (int *)malloc(n*m*sizeof(int)); cudaMemcpy(copiedFromGPU,d_c,n*m*sizeof(int),cudaMemcpyDeviceToHost); //stampo cout<<"---------------------------------------------"<<endl; stampaMatrice(copiedFromGPU,m,n); }
12,889
#include <iostream> #include <vector> __global__ void convolution_1d_y(float *const input_image, int width, int channels, int kernel_size, float *result) { int col = blockDim.x * blockIdx.x + threadIdx.x; int row = blockIdx.y; const int stride = gridDim.x * blockDim.x; const int offset = row * width * channels; const float kernel_weight = 1.0f / kernel_size; const int half_kernel = kernel_size / 2; // Individual threads, each one dealing with 1 channel for (; col >= half_kernel * channels && col < (width - half_kernel) * channels; col += stride) { // Sum neighbors for (int i = -half_kernel * channels; i <= half_kernel * channels; i += channels) result[offset + col] += input_image[offset + col + i]; // Reweigh result[offset + col] *= kernel_weight; } } void blur_separable_gpu(float *const input_image, int width, int height, int channels, float* result) { const dim3 blockSize(256, 1); const dim3 gridSize((width * 3 + blockSize.x - 1) / blockSize.x, height); convolution_1d_y<<<gridSize, blockSize>>>(input_image, width, 3, 9, result); }
12,890
#include <cuda.h> #include <bits/stdc++.h> #define BLOCK_SIZE 32 using namespace std; void fill_vector_random (int *vec, int size, int max = 10){ for (int i = 0; i < size; i++) vec[i] = 1; } void print_vector (int *vec, int size){ for (int i = 0; i < size; i++) cout << vec[i] << endl; cout << "___________" << endl; } int vector_reduction_seq (int *vec, int size){ int ans = 0; for (int i = 0; i < size; i++){ ans += vec[i]; } return ans; } __global__ void vector_reduction_kernel (int *vec, int *out){ int pos = blockIdx.x * blockDim.x * 2 + threadIdx.x; __shared__ int svec[BLOCK_SIZE]; svec[threadIdx.x] = vec[pos] + vec[pos + blockDim.x]; __syncthreads(); for (int i = blockDim.x / 2; i > 0; i >>= 1){ if (threadIdx.x < i) svec[threadIdx.x] += svec[threadIdx.x + i]; __syncthreads(); } if (threadIdx.x == 0) out[blockIdx.x] = svec[0]; } int vector_reduction_con (int *vec, int size){ while (size >= BLOCK_SIZE * 2){ int *d_vec, *d_out; cudaMalloc (&d_vec, size * sizeof(int)); cudaMalloc (&d_out, (size / (BLOCK_SIZE * 2)) * sizeof(int)); cudaMemcpy (d_vec, vec, size * sizeof(int), cudaMemcpyHostToDevice); dim3 dimGrid (ceil (size / float(BLOCK_SIZE)), 1, 1); dim3 dimBlock (BLOCK_SIZE, 1, 1); vector_reduction_kernel<<<dimGrid, dimBlock>>> (d_vec, d_out); cudaDeviceSynchronize(); size = size / (BLOCK_SIZE * 2); cudaMemcpy (vec, d_out, size * sizeof(int), cudaMemcpyDeviceToHost); cudaFree (d_vec); cudaFree (d_out); } return vector_reduction_seq(vec, size); } int main(int argc, char **argv){ if (argc < 2){ cout << "Usage: ./reduction max_vector_size" << endl; return 0; } const int max_size = atoi(argv[1]); srand (time (NULL)); ofstream x("x.mio"), y_seq ("y_seq.mio"), y_con ("y_con.mio"); clock_t begin, end; double secs; int ans1, ans2; int first_vec[64]; fill_vector_random(first_vec, 64); vector_reduction_con (first_vec, 64); for (int i = 64; i <= max_size; i += 64){ int vec[i]; fill_vector_random (vec, i); x << i << endl; begin = clock(); ans1 = vector_reduction_seq (vec, i); end = clock(); secs = double(end - begin) / CLOCKS_PER_SEC; y_seq << secs << endl; begin = clock(); ans2 = vector_reduction_con (vec, i); end = clock(); secs = double(end - begin) / CLOCKS_PER_SEC; y_con << secs << endl; if (ans1 != ans2) cout << "SWW" << endl << ans1 << " " << ans2 << endl; } return 0; }
12,891
#include "includes.h" __global__ void interStep_k(float* payoff, size_t scheduleCounter, float dx, float Smin, size_t P1, size_t P2, float barrier) { size_t spot_idx = blockIdx.x; size_t state_idx = threadIdx.x; // shared memory for the payoff for a fixed spot (each block corresponds to a spot_dx) //extern __shared__ float shared_payoff_x[]; //shared_payoff_x[state_idx] = payoff[spot_idx + state_idx * gridDim.x]; //__syncthreads(); float temp = 0.0; size_t P1_k = P1 > scheduleCounter ? P1 - scheduleCounter : 0; float spot = Smin * expf(spot_idx * dx); if (state_idx == P2) { temp = payoff[spot_idx + P2 * gridDim.x] * (spot >= barrier); } else if (P1_k <= state_idx && state_idx < P2) { temp = payoff[spot_idx + state_idx * gridDim.x] * (spot >= barrier) + payoff[spot_idx + (state_idx + 1) * gridDim.x] * (spot < barrier); } else if (state_idx == P1_k) { temp = payoff[spot_idx + P1_k * gridDim.x] * (spot < barrier); } __syncthreads(); payoff[spot_idx + state_idx * gridDim.x] = temp; }
12,892
#include "includes.h" __global__ void simple_corner_turn_kernel(float *d_input, float *d_output, int primary_size, int secondary_size){ size_t primary = blockIdx.x * blockDim.x + threadIdx.x; size_t secondary = blockIdx.y * blockDim.y + threadIdx.y; d_output[(size_t)primary*secondary_size + secondary] = (float) __ldg(&d_input[(size_t)secondary*primary_size + primary]); }
12,893
#include <stdio.h> //#include <cuda.h> /* ** START of auxiliary functions */ //Matrix multiplication kernel function __global__ void MatrixMulKernel ( int *Md, int *Nd, int *Pd, int Width ){ //2D thread ID int tx = threadIdx.x; int ty = threadIdx.y; //Pvalue stores Pd element computed by thread int Pvalue = 0; for ( int k = 0; k < Width; ++k ){ int Mdelement = Md[ ty * Width + k ]; int Ndelement = Nd[ k * Width + tx ]; Pvalue += Mdelement * Ndelement; } //Write matrix to device memory; each thread writes one element Pd[ ty * Width + tx ] = Pvalue; }// End of Matrix multiplication kernel function //function to print matrix void printMatrix ( int *M, int rows, int columns ){ //assumes matrix is in row-major format printf ( "\n %s: \n", "M" ); for ( int v = 0; v < rows; v++ ){ //assumes a square matrix for ( int w = 0; w < columns; w++ ) { printf ( " %03d ", M[ v * columns + w ] ); } printf ( " \n " ); } }//End of printMatrix function //Matrix multiplication function // assumes a SQUARE matrix for now void MatrixMul( int *M, int *N, int *P, int Width ){ int size = Width * Width * sizeof( int ); int *Md = NULL; int *Nd = NULL; int *Pd = NULL; //Transfer M, N to device cudaMalloc( (void**) &Md, size ); cudaMemcpy( Md, M, size, cudaMemcpyHostToDevice ); cudaMalloc( (void**) &Nd, size ); cudaMemcpy( Nd, N, size, cudaMemcpyHostToDevice ); cudaMalloc( (void**) &Pd, size ); //invoke kernel dim3 dimBlock( Width, Width ); dim3 dimGrid( 1, 1 ); //Launch kernel MatrixMulKernel<<< dimGrid, dimBlock >>>( Md, Nd, Pd, Width ); //transfer from device to host cudaMemcpy( P, Pd, size, cudaMemcpyDeviceToHost ); //Print matrix P /* for ( int w = 0; w < Width * Width; w++ ){ printf( "\n" ); printf( " %d: %d ", w, P[ w ] ); printf( "\n" ); } */ //printMatrix( P, 4, 4 ); //Free device matrices cudaFree( Md ); cudaFree( Nd ); cudaFree ( Pd ); }//End of MatrixMul function //Start of getMatWidth => Get width i.e. # of columns int getMatWidth( char *filename ){ int width; //assumes space separate integer values e.g. -1 23 4 -56 6 77 //assumes first integer in file is row, 2nd integer is column FILE *ptr = fopen( filename, "r" ); if ( ptr == 0 ){ printf( "\n could not open file %s \n", filename ); width = 0; } else{ fscanf( ptr, "%d", &width ); } fclose( ptr ); return width; }//end of getMatWidth function //Start of getMatHeight => Get height i.e. # of rows int getMatHeight( char *filename ){ int height, dummy; //assumes space separate integer values e.g. -1 23 4 -56 6 77 //assumes first integer in file is row, 2nd integer is column FILE *ptr = fopen( filename, "r" ); if ( ptr == 0 ){ printf( "\n could not open file %s \n", filename ); height = 0; } else{ for ( int count = 1; count < 3; count++ ){ if ( count != 2 ) fscanf( ptr, "%d", &dummy ); else fscanf( ptr, "%d", &dummy ); height = dummy; } } fclose( ptr ); return height; }//end of getMatHeight function //START of loadMatrixFile function int *loadMatrixFile( FILE *ptr, int cols, int rows ) { int y = 1; int x, *z; // int offset = 4; //int w = 0; fscanf( ptr, "%d", &x ); while( !feof( ptr ) ){ if ( y < 2 ){ fscanf( ptr, "%d", &x ); printf( "\n A: y: %d MatEl: %d \n", y, x ); } else { fscanf( ptr, "%d", &z[ y - 2 ] ); printf( " B: z[ w ]: %d \n", z[ y - 2 ] ); } y++; } return z; }//END of loadMatrixFile function /* ** END OF Auxiliary functions */ /* ** START OF MAIN FUNCTION */ int main ( int argc, char *argv[ ] ) { char *filename1 = argv[ 1 ]; char *filename2 = argv[ 2 ]; int *matA; //holds first matrix int *matB; //holds sencond matrix int *matC; int sqWidth; if ( argc != 3 ) /* argc should be 4 for correct execution */ { /* We print argv[0] assuming it is the program name */ printf( "\nusage: %s matrixFile1 matrixFile2 \n\n", argv [0 ] ); } else { //returns # of cols of matrix, zero otherwise int matWidthA = getMatWidth ( filename1 ); //get # of rows of matrix, zero otherwise int matHeightA = getMatHeight( filename1 ); //returns # of cols of matrix, zero otherwise int matWidthB = getMatWidth ( filename2 ); //get # of rows of matrix, zero otherwise int matHeightB = getMatHeight( filename2 ); //load matrices from files FILE *ptr1 = fopen( argv[ 1 ], "r" ); FILE *ptr2 = fopen( argv[ 2 ], "r" ); if ( ptr1 == 0 && ptr2 == 0 ) printf( "\n could not open one of the following files: %s %s \n", argv[ 1 ], argv[ 2 ] ); else { matA = loadMatrixFile( ptr1, matWidthA, matHeightA ); matB = loadMatrixFile( ptr2, matWidthB, matHeightB ); fclose( ptr1 ); fclose( ptr2 ); //Print matrix P for ( int w = 0; w < matWidthA * matWidthA; w++ ){ printf( "\n" ); printf( " %d: %d ", w, matA[ w ] ); printf( "\n" ); } } printMatrix( matA, matWidthA, matHeightA ); printMatrix( matB, matWidthB, matHeightB ); if ( matWidthB > matWidthA ) sqWidth = matWidthB; else sqWidth = matWidthB; printf( "\n DEBUG \n" ); //make matrices square ones first before multiplying //MatrixMul( matA, matB, matC, sqWidth ); //printMatrix( matC, sqWidth, sqWidth ); fclose(ptr1); fclose( ptr2 ); } } /* ** END OF MAIN FUNCTION */
12,894
#include <stdio.h> #include <stdlib.h> __global__ void calPi(double * pi_D) { int t_rank = (blockIdx.x*blockDim.x) + threadIdx.x ; int x = 2 + (2 * t_rank); double y = ((double)4/x) * (double)1/(x+1) * (double)1/(x+2); if(t_rank % 2 != 0) y = -y; pi_D[t_rank] = y; } int main() { printf("pi calculate...\n"); int thread_size = 500, block_size = 5; double *pi_D; double *pi_H; pi_H = (double*) malloc(sizeof(double) *thread_size*block_size); cudaMalloc( (void **)&pi_D, sizeof(double)*thread_size*block_size); calPi<<<block_size,thread_size>>>(pi_D); cudaMemcpy(pi_H, pi_D, thread_size*block_size * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(pi_D); double pi = 3; for(int i = 0 ; i < thread_size*block_size ; i++) pi = (double) pi + pi_H[i]; /* Change double to string, prevent it round the decimal */ char result[12]; sprintf(result, "%.11lf", pi); result[strlen(result)-1] = '\0'; printf("calculated pi = %s \n",result); }
12,895
#include "includes.h" __global__ void cuInsertionSort(float *dist, long *ind, int width, int height, int k){ // Variables int l, i, j; float *p_dist; long *p_ind; float curr_dist, max_dist; long curr_row, max_row; unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x; if (xIndex<width){ // Pointer shift, initialization, and max value p_dist = dist + xIndex; p_ind = ind + xIndex; max_dist = p_dist[0]; p_ind[0] = 1; // Part 1 : sort kth firt elementZ for (l=1; l<k; l++){ curr_row = l * width; curr_dist = p_dist[curr_row]; if (curr_dist<max_dist){ i=l-1; for (int a=0; a<l-1; a++){ if (p_dist[a*width]>curr_dist){ i=a; break; } } for (j=l; j>i; j--){ p_dist[j*width] = p_dist[(j-1)*width]; p_ind[j*width] = p_ind[(j-1)*width]; } p_dist[i*width] = curr_dist; p_ind[i*width] = l+1; } else { p_ind[l*width] = l+1; } max_dist = p_dist[curr_row]; } // Part 2 : insert element in the k-th first lines max_row = (k-1)*width; for (l=k; l<height; l++){ curr_dist = p_dist[l*width]; if (curr_dist<max_dist){ i=k-1; for (int a=0; a<k-1; a++){ if (p_dist[a*width]>curr_dist){ i=a; break; } } for (j=k-1; j>i; j--){ p_dist[j*width] = p_dist[(j-1)*width]; p_ind[j*width] = p_ind[(j-1)*width]; } p_dist[i*width] = curr_dist; p_ind[i*width] = l+1; max_dist = p_dist[max_row]; } } } }
12,896
/********************************************************************* Copyright(c) 2020 LIMITGAME Permission is hereby granted, free of charge, to any person obtaining a copy of this softwareand associated documentation files(the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and /or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions : The above copyright noticeand this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ---------------------------------------------------------------------- @file GenerateModelNormals.h @brief Generate normals of model using vertices @author minseob (https://github.com/rasidin) **********************************************************************/ #include "CudaDeviceUtils.cuh" namespace CUDAImageUtilities { __global__ void ComputeModelNormals(float* InVertices, float* OutNormals, unsigned int InNormalNum) { unsigned int normalindex = blockIdx.x * blockDim.x + threadIdx.x; if (normalindex >= InNormalNum) return; float3 v0 = make_float3(InVertices[normalindex * 3 * 3 + 0], InVertices[normalindex * 3 * 3 + 1], InVertices[normalindex * 3 * 3 + 2]); float3 v1 = make_float3(InVertices[normalindex * 3 * 3 + 3 + 0], InVertices[normalindex * 3 * 3 + 3 + 1], InVertices[normalindex * 3 * 3 + 3 + 2]); float3 v2 = make_float3(InVertices[normalindex * 3 * 3 + 6 + 0], InVertices[normalindex * 3 * 3 + 6 + 1], InVertices[normalindex * 3 * 3 + 6 + 2]); float3 v01 = make_float3(v0.x - v1.x, v0.y - v1.y, v0.z - v1.z); float3 v21 = make_float3(v2.x - v1.x, v2.y - v1.y, v2.z - v1.y); float3 normal = cross(v01, v21); normal = normalize(normal); OutNormals[normalindex * 3 + 0] = normal.x; OutNormals[normalindex * 3 + 1] = normal.y; OutNormals[normalindex * 3 + 2] = normal.z; } void GenerateModelNormals(float *InVertices, float *OutNormals, const int VerticesNum) { size_t datasize_in = VerticesNum * 3 * sizeof(float) * 3; size_t datasize_out = VerticesNum * sizeof(float) * 3; float* dIn; cudaMalloc(&dIn, datasize_in); cudaMemcpy(dIn, InVertices, datasize_in, cudaMemcpyHostToDevice); float* dOut; cudaMalloc(&dOut, datasize_out); dim3 threadsperblock(16, 1, 1); dim3 numblocks((VerticesNum + 15) / 16 * 16, 1, 1); ComputeModelNormals<<<numblocks, threadsperblock>>>(dIn, dOut, VerticesNum / 3); cudaMemcpy(OutNormals, dOut, datasize_out, cudaMemcpyDeviceToHost); cudaFree(dIn); cudaFree(dOut); } }
12,897
// nvcc -arch=sm_30 minimal.cu -run ; rm a.out #define CUDA_BOTH __device__ __host__ #include <stdio.h> CUDA_BOTH static double cubic_root(double x) ; __global__ void fquartic(float a4, float a3, float a2, float a1, float a0) { printf("fquartic %d, a4=%g a3=%g a2=%g a1=%g a0=%g \n", threadIdx.x, a4,a3,a2,a1,a0 ); } __global__ void dquartic(double a4, double a3, double a2, double a1, double a0) { double a[5] ; a[4] = a4 ; a[3] = a3 ; a[2] = a2 ; a[1] = a1 ; a[0] = a0 ; printf("dquartic %d, a[4]=%g a[3]=%g a[2]=%g a[1]=%g a[0]=%g \n", threadIdx.x, a[4],a[3],a[2],a[1],a[0] ); printf("cubic_root(27) %g \n", cubic_root(27) ); } CUDA_BOTH static double cubic_root(double x) { const double t = pow(fabs(x), 1.0 / 3.0); return x >= 0.0 ? t : -t; } int main() { double a[5] ; //float a[5] ; a[4] = 1. ; a[3] = -10. ; a[2] = 35. ; a[1] = -50. ; a[0] = 24. ; dquartic<<<1, 1>>>( a[4], a[3], a[2], a[1], a[0] ); cudaDeviceReset(); return 0; }
12,898
#include <stdio.h> #include <math.h> #include <time.h> #include <unistd.h> #include <cuda_runtime_api.h> #include <errno.h> #include <unistd.h> /****************************************************************************** * The variable names and the function names of this program is same as provided by the university. The added variable and function are the only changes made to this program. * To compile: * nvcc -o linear71 linear71.cu -lm * * To run: * .linear71 * *****************************************************************************/ typedef struct point_t { double x; double y; } point_t; int n_data = 1000; __device__ int d_n_data = 1000; point_t data[] = { {82.45,150.46},{73.37,99.47},{67.74,139.29},{72.16,137.58}, {65.30,145.07},{82.16,148.47},{68.67,112.49},{83.79,161.36}, {84.52,153.89},{82.08,158.44},{69.99,135.14},{84.51,176.33}, {84.99,146.73},{79.83,129.63},{78.61,141.50},{89.46,147.77}, {16.10,54.34},{87.86,157.44},{89.36,165.13},{84.25,131.40}, {73.76,149.11},{53.37,120.23},{32.65,80.70},{18.11,57.29}, {84.26,139.52},{ 4.02,31.04},{59.09,109.06},{46.13,100.14}, {76.03,129.01},{38.20,90.55},{51.02,107.44},{58.19,97.79}, {88.59,150.85},{20.60,71.77},{ 8.19,35.78},{ 9.61,50.48}, {28.89,67.91},{ 4.93,35.28},{82.06,149.40},{57.92,116.68}, {58.45,100.27},{51.00,122.47},{28.30,53.49},{64.45,138.54}, {99.53,170.09},{ 5.92,27.31},{ 3.65,54.86},{70.12,132.64}, {50.44,100.13},{90.40,165.35},{48.92,94.82},{11.87,53.15}, {37.57,98.49},{65.17,119.66},{48.86,102.35},{45.86,90.97}, {38.81,78.45},{37.59,85.70},{87.53,164.66},{29.07,67.13}, {18.12,58.70},{50.96,99.24},{15.15,63.11},{70.18,121.80}, {63.62,113.14},{90.83,166.13},{ 4.48,38.81},{38.43,83.31}, {72.79,133.26},{22.38,51.73},{25.67,59.25},{14.05,55.24}, {44.69,97.15},{90.25,140.25},{88.51,157.93},{97.80,185.98}, {74.47,154.67},{48.49,114.02},{ 7.20,41.63},{54.57,114.41}, {54.65,112.13},{95.78,163.85},{ 4.82,35.68},{71.40,131.67}, {11.40,51.12},{37.04,99.79},{95.18,161.77},{ 5.14,35.58}, { 5.35,28.90},{93.87,159.07},{72.62,141.94},{25.04,67.60}, {43.67,86.51},{85.14,155.38},{31.43,72.91},{31.01,86.90}, {35.43,82.49},{26.74,65.60},{78.68,145.54},{41.48,90.48}, {77.98,139.46},{42.61,96.09},{92.86,153.84},{83.90,163.50}, {27.63,80.20},{47.65,108.56},{10.11,38.32},{25.69,61.91}, {79.83,137.06},{39.61,86.60},{97.25,169.45},{35.94,96.42}, {60.77,130.01},{80.21,149.73},{23.90,66.45},{52.74,103.05}, {43.25,104.00},{76.81,129.03},{72.16,146.33},{91.57,172.00}, {68.48,141.40},{51.54,87.27},{ 7.90,28.89},{99.68,172.75}, {48.65,93.73},{24.68,76.75},{ 3.18,33.86},{59.69,116.68}, {67.10,147.15},{44.65,84.57},{43.59,90.99},{ 9.62,45.49}, {54.99,108.47},{72.77,148.88},{40.38,99.03},{14.45,55.98}, {88.97,167.83},{98.54,145.79},{61.04,120.12},{67.05,132.16}, {17.75,59.44},{ 5.30,34.23},{72.98,153.51},{99.80,175.07}, {29.63,68.05},{98.58,159.80},{17.02,51.63},{18.25,54.00}, {80.48,154.23},{45.47,92.53},{56.03,90.58},{46.51,86.95}, {88.97,159.83},{86.26,157.77},{91.91,162.33},{54.43,111.82}, { 2.13,47.66},{ 6.77,54.06},{85.91,159.88},{33.62,85.45}, {93.44,172.84},{20.76,65.09},{70.34,140.93},{49.11,98.04}, {43.81,76.76},{70.57,146.91},{18.42,56.31},{95.09,173.61}, {68.52,119.41},{47.42,116.42},{ 5.23,30.90},{32.31,79.12}, {41.39,95.35},{10.93,54.52},{19.69,49.72},{24.60,75.56}, {99.21,179.99},{13.97,71.15},{97.12,160.40},{13.17,52.07}, {99.66,179.48},{42.25,95.17},{83.49,156.19},{16.75,42.31}, { 1.19,31.66},{30.88,72.10},{98.60,178.99},{55.81,117.71}, {62.12,142.23},{ 6.09,43.64},{33.40,89.36},{81.87,154.61}, {74.97,139.11},{76.52,135.44},{55.19,106.92},{37.78,84.16}, {67.76,110.83},{15.71,39.34},{91.60,184.65},{18.83,45.24}, {73.15,154.11},{33.47,72.68},{44.80,100.12},{ 6.50,41.71}, { 9.66,59.27},{27.93,77.67},{24.80,93.83},{71.50,129.30}, {20.16,65.98},{27.35,72.63},{24.46,61.75},{41.88,69.19}, {78.69,158.61},{33.13,96.07},{97.98,171.91},{ 9.89,48.68}, { 1.34,45.10},{59.90,116.53},{20.14,57.09},{49.07,102.98}, {46.92,115.94},{61.01,125.38},{87.14,166.83},{91.70,161.23}, {90.08,170.42},{13.00,55.30},{80.93,147.78},{28.70,93.04}, {69.04,147.93},{89.94,172.60},{61.30,115.17},{74.46,147.75}, {56.55,107.46},{12.22,60.93},{32.25,63.82},{ 9.32,60.44}, {75.08,132.97},{ 8.92,49.90},{79.65,164.98},{53.12,105.06}, {40.37,99.92},{52.24,116.34},{ 8.85,61.77},{51.50,101.92}, {90.31,160.07},{43.49,97.16},{31.20,70.18},{81.02,154.89}, {57.10,128.57},{74.12,134.28},{ 6.27,44.36},{84.68,158.06}, {74.94,125.55},{ 0.23,45.24},{ 7.73,45.36},{ 8.93,36.47}, {94.15,177.63},{78.43,133.83},{93.15,158.88},{60.61,91.13}, {64.42,131.71},{63.01,137.62},{35.84,86.80},{ 5.15,51.39}, {96.12,173.38},{83.83,171.12},{51.39,97.40},{90.82,148.25}, {87.51,156.62},{71.48,141.47},{92.40,159.31},{35.89,96.75}, {12.32,45.29},{65.13,133.50},{ 7.64,39.79},{64.49,112.83}, {89.26,156.30},{ 2.35,53.20},{ 5.07,34.59},{36.59,75.43}, {77.92,147.01},{34.31,92.54},{51.65,140.84},{47.25,108.69}, {42.22,82.24},{20.37,72.83},{48.01,84.07},{30.45,81.64}, {58.80,113.41},{26.91,55.19},{78.30,140.41},{ 6.98,45.92}, {26.01,60.72},{ 4.42,36.07},{71.08,131.95},{81.60,156.62}, { 8.93,47.32},{59.53,113.25},{ 3.56,45.70},{50.12,110.80}, {68.30,128.99},{88.05,159.94},{17.20,76.09},{22.38,69.46}, {72.05,132.57},{46.85,96.47},{11.24,56.53},{21.07,50.74}, {69.14,145.04},{86.10,161.68},{54.67,99.95},{14.54,48.03}, {29.46,87.63},{36.10,94.68},{55.03,119.65},{97.51,176.48}, {55.71,108.25},{29.61,90.29},{51.74,117.78},{62.49,121.29}, {66.30,107.38},{92.40,169.05},{29.66,88.33},{56.89,101.63}, { 0.86,28.51},{46.99,95.14},{52.63,107.20},{ 9.93,45.14}, {34.71,78.83},{66.92,136.88},{49.73,92.20},{35.79,78.41}, { 3.45,50.39},{92.21,169.34},{63.55,129.44},{66.98,123.98}, {78.31,160.26},{ 9.27,37.60},{16.41,64.02},{14.91,56.34}, {73.28,146.50},{62.49,121.64},{11.70,65.29},{61.13,109.80}, {83.59,165.03},{47.86,95.75},{75.44,148.32},{19.49,56.86}, {83.96,136.91},{11.02,67.19},{73.54,137.23},{37.79,100.37}, {74.12,136.21},{49.02,90.01},{ 4.20,35.61},{22.98,80.18}, {15.13,59.46},{74.24,135.97},{64.98,138.07},{95.38,156.99}, { 0.96,28.27},{49.79,101.80},{45.05,106.94},{72.87,104.34}, {80.08,158.58},{39.09,90.44},{22.85,55.77},{86.01,132.94}, {12.97,30.27},{ 3.72,39.84},{21.37,59.85},{ 3.98,40.09}, {88.17,167.27},{79.47,145.00},{22.85,83.02},{ 8.66,46.37}, {42.42,99.25},{11.51,60.26},{88.50,165.19},{65.09,137.74}, {72.93,131.02},{61.01,129.67},{31.07,71.19},{58.85,106.86}, {26.45,64.04},{29.21,84.58},{ 6.09,34.60},{ 2.96,50.09}, {79.23,136.63},{ 8.64,45.47},{89.34,162.17},{42.91,93.76}, {77.13,149.70},{36.17,90.02},{54.36,103.91},{89.87,156.14}, {58.97,127.01},{85.82,167.12},{43.68,113.21},{47.36,109.83}, { 2.62,38.18},{46.64,116.74},{16.62,46.72},{35.65,84.88}, {76.72,130.41},{26.28,77.58},{72.79,146.30},{59.98,110.93}, {66.07,124.68},{85.44,133.49},{44.78,83.32},{44.14,88.36}, { 5.17,30.16},{54.43,122.15},{61.50,115.96},{21.49,57.14}, {26.45,77.08},{64.23,105.45},{96.23,166.31},{48.37,95.50}, {84.28,160.74},{98.90,169.24},{30.67,66.89},{34.27,85.43}, {16.94,45.96},{56.26,128.66},{51.00,112.17},{47.93,120.55}, {69.30,143.53},{88.95,160.75},{88.39,164.59},{ 5.94,39.42}, {14.26,68.73},{63.02,128.11},{60.71,112.67},{77.12,144.23}, {42.26,97.65},{71.03,149.69},{10.37,54.94},{ 3.46,41.17}, {92.48,164.09},{ 8.15,35.01},{68.11,124.03},{99.93,182.90}, {86.28,164.63},{95.83,166.15},{ 5.67,57.96},{85.12,150.47}, {20.81,75.35},{ 2.59,40.69},{53.80,111.01},{86.91,158.82}, {23.57,56.59},{81.75,164.38},{22.17,59.22},{72.09,137.26}, {76.61,164.72},{73.93,131.06},{64.89,147.15},{ 8.26,52.98}, {77.28,152.49},{74.15,147.38},{80.35,144.15},{45.37,93.04}, {31.15,72.72},{95.41,166.56},{82.85,162.74},{93.12,165.29}, {66.67,129.25},{81.84,137.06},{42.81,102.39},{19.62,57.08}, {27.05,69.89},{66.96,141.64},{80.14,154.82},{77.71,151.39}, {98.02,165.87},{43.25,92.91},{64.00,118.43},{27.21,59.42}, {34.62,95.82},{18.73,66.30},{16.38,47.23},{13.72,53.03}, { 2.76,21.18},{84.93,153.77},{87.44,166.99},{ 5.58,37.23}, {93.82,166.99},{32.88,90.43},{44.83,111.28},{17.33,71.84}, {29.59,87.74},{52.50,110.19},{ 2.02,23.13},{10.42,49.94}, {94.00,183.68},{30.99,84.86},{76.77,141.52},{26.08,73.37}, {74.59,126.67},{13.91,61.98},{76.54,131.08},{59.96,129.10}, {46.40,106.89},{97.62,151.55},{74.51,111.40},{34.75,73.16}, {80.71,154.75},{66.20,128.03},{13.94,40.50},{18.57,58.34}, {16.96,56.97},{81.41,133.03},{11.74,58.02},{65.24,111.10}, {64.42,132.46},{47.07,91.53},{ 9.49,48.83},{30.62,67.99}, {14.26,44.08},{35.54,85.62},{18.04,72.29},{28.95,98.30}, {65.53,128.21},{31.80,80.17},{48.19,107.79},{91.82,178.72}, { 2.61,50.99},{ 9.41,46.78},{11.75,50.23},{86.25,152.75}, {84.82,137.10},{21.75,66.19},{51.42,117.08},{27.45,78.20}, { 7.55,46.34},{40.18,99.96},{48.38,97.02},{90.71,166.49}, {40.21,104.86},{85.96,144.13},{28.45,62.57},{87.00,156.68}, {14.76,52.01},{12.73,54.05},{ 8.40,30.53},{55.90,124.27}, {93.82,161.77},{ 7.36,47.71},{81.45,153.81},{15.87,48.68}, {65.15,126.32},{94.02,164.99},{51.32,98.69},{12.18,53.95}, {28.58,76.38},{75.81,131.24},{40.06,73.89},{56.95,102.56}, { 5.27,37.99},{83.67,142.75},{16.28,52.91},{20.70,35.76}, {87.65,148.45},{37.53,84.93},{38.38,71.71},{21.87,70.15}, {98.43,156.15},{13.14,72.22},{ 0.89,21.83},{54.30,96.47}, {28.05,93.27},{40.06,73.92},{85.33,155.67},{78.39,135.43}, {78.29,143.53},{ 6.68,48.63},{56.66,108.28},{41.08,72.70}, {78.96,147.93},{37.57,83.89},{79.31,145.44},{92.11,150.70}, {73.61,138.09},{27.58,63.65},{36.89,95.39},{73.65,134.42}, {21.40,75.96},{76.09,134.54},{66.79,128.97},{98.25,173.49}, {64.78,125.65},{94.15,150.48},{99.28,166.74},{97.23,169.32}, {20.06,65.36},{61.51,119.94},{ 4.02,58.03},{32.71,72.40}, {81.34,156.62},{42.73,84.31},{11.80,42.08},{61.37,114.16}, {99.06,181.94},{94.37,179.24},{52.27,116.42},{30.87,90.80}, {42.79,80.80},{93.69,147.33},{67.38,124.88},{22.36,63.47}, {49.43,117.10},{54.68,116.87},{99.67,193.00},{69.77,147.92}, {24.28,67.29},{49.86,100.41},{78.37,140.32},{78.03,147.24}, {18.21,61.72},{19.16,72.31},{81.68,153.39},{93.83,159.35}, {76.59,130.68},{52.08,108.48},{59.24,114.10},{90.89,168.10}, { 3.30,42.02},{10.55,43.70},{46.92,100.26},{63.08,119.95}, {70.84,134.26},{60.76,125.73},{55.62,109.24},{15.46,73.05}, {74.23,149.27},{26.60,82.40},{ 4.14,47.33},{72.49,127.61}, {41.11,93.62},{95.12,162.74},{ 5.18,44.31},{54.34,104.63}, { 8.05,30.00},{90.51,162.95},{45.71,90.03},{37.49,77.03}, {97.78,166.18},{52.21,119.75},{14.49,47.90},{23.31,58.51}, {24.29,67.32},{33.72,67.31},{ 8.56,41.46},{94.59,174.77}, {66.20,130.36},{28.39,76.43},{20.89,71.19},{85.45,136.79}, {85.61,142.45},{27.67,55.12},{41.29,86.29},{29.28,69.04}, {57.25,122.80},{ 6.56,45.30},{60.20,112.25},{84.09,144.25}, {97.82,157.26},{ 1.62,47.22},{37.40,76.17},{19.42,59.38}, {83.74,143.04},{81.13,150.39},{87.22,166.45},{50.60,108.20}, {70.05,147.53},{47.80,94.15},{45.91,105.79},{61.78,120.52}, {13.11,72.56},{37.43,99.42},{37.85,85.91},{15.87,57.76}, {65.49,140.24},{14.11,58.28},{11.31,59.60},{24.80,69.36}, {36.63,93.58},{91.21,147.99},{61.08,146.80},{35.48,58.34}, {54.82,113.98},{80.93,155.58},{56.82,107.15},{38.92,86.13}, {94.78,184.70},{79.23,139.92},{86.01,152.59},{57.01,121.54}, {39.58,96.26},{81.79,149.19},{17.44,42.42},{96.55,163.99}, {96.33,171.38},{66.48,127.35},{83.03,142.27},{21.57,74.86}, {63.00,118.36},{32.87,90.04},{16.13,51.71},{88.11,155.17}, { 7.40,29.32},{35.76,103.34},{11.21,65.18},{94.18,150.83}, {23.18,85.51},{55.22,117.37},{31.87,64.39},{54.25,111.38}, {56.52,111.22},{74.89,152.23},{98.31,193.90},{28.02,62.20}, {69.85,114.81},{51.99,98.74},{ 7.96,49.10},{77.78,141.76}, {12.15,44.59},{30.91,62.88},{38.67,82.49},{28.22,69.36}, {68.52,138.65},{ 5.79,30.43},{68.09,132.39},{42.05,95.70}, {22.47,66.77},{98.94,170.71},{ 2.21,41.82},{ 4.23,36.85}, {72.95,130.22},{ 5.72,39.85},{18.98,59.65},{82.71,164.52}, {25.76,84.61},{45.00,100.59},{52.37,121.13},{94.86,183.47}, {99.24,184.57},{25.70,71.98},{38.77,92.04},{32.30,93.67}, {92.81,176.51},{59.70,127.12},{49.47,111.06},{32.27,93.66}, {51.23,113.48},{86.92,155.44},{19.42,52.61},{ 4.23,55.05}, {14.55,47.34},{31.44,77.54},{38.96,97.88},{90.82,146.84}, {90.21,160.70},{20.25,73.58},{65.81,126.12},{85.24,166.37}, {18.43,78.38},{61.83,122.91},{96.43,173.09},{35.20,77.21}, {94.27,179.72},{38.66,93.33},{59.81,120.63},{22.72,55.84}, {95.34,171.29},{ 7.10,45.24},{10.56,43.26},{59.61,129.82}, {85.38,141.68},{81.49,133.59},{16.78,57.23},{27.71,65.19}, {34.88,73.94},{49.09,92.77},{49.75,105.73},{91.65,158.88}, {70.73,151.27},{18.05,68.53},{95.34,170.00},{76.50,133.10}, { 2.33,31.45},{60.03,102.32},{13.60,53.89},{52.12,97.60}, {58.80,127.56},{20.34,54.56},{15.48,56.33},{40.91,93.16}, {20.99,65.23},{76.05,128.21},{71.45,137.84},{29.67,86.72}, {43.66,84.36},{89.82,148.28},{48.44,106.55},{50.31,93.99}, {13.38,61.45},{49.11,102.92},{38.02,85.42},{10.03,45.76}, {11.30,46.16},{47.14,92.08},{16.51,46.01},{62.51,126.00}, {69.96,149.33},{55.16,110.34},{72.40,120.72},{ 2.51,44.67}, {69.40,125.13},{ 3.01,37.43},{17.79,60.57},{15.87,58.84}, { 1.35,44.57},{42.39,105.71},{89.37,154.86},{55.35,129.53}, { 8.80,47.82},{18.08,61.18},{15.11,63.97},{24.54,63.57}, {76.89,153.13},{ 5.35,29.49},{14.11,45.19},{36.72,92.87}, {83.69,167.04},{91.54,156.23},{18.21,59.17},{69.03,127.84}, {79.85,139.89},{74.79,145.94},{74.70,137.21},{96.06,172.21}, {35.03,89.17},{76.02,143.77},{61.20,112.20},{94.85,169.84}, {23.73,57.48},{ 9.52,53.46},{ 2.20,33.52},{89.67,150.08}, {22.35,81.01},{60.50,118.26},{75.89,126.14},{63.18,118.22}, {89.71,154.73},{19.60,36.96},{96.58,177.52},{53.72,114.16}, {72.10,148.54},{ 5.22,34.93},{ 4.59,31.68},{77.37,137.07}, {34.25,83.15},{77.21,143.58},{66.03,127.89},{79.36,143.24}, {50.58,113.56},{53.20,91.02},{40.28,102.91},{33.55,70.36}, {84.86,136.18},{92.14,166.33},{20.87,73.45},{19.78,50.49}, {60.79,133.64},{35.19,78.95},{36.78,89.51},{79.79,152.32}, {77.03,153.89},{62.22,119.01},{30.27,73.10},{68.55,139.53}, {78.63,155.84},{70.57,141.69},{21.34,62.09},{13.82,61.04}, {57.71,125.42},{70.10,135.80},{33.75,71.82},{ 8.10,24.70}, { 7.97,20.36},{99.62,177.94},{61.96,124.03},{59.84,86.03}, { 6.92,25.96},{58.05,93.59},{19.87,51.04},{52.27,95.82}, {78.97,149.47},{97.92,167.61},{50.73,95.03},{33.97,69.40}, {49.57,92.86},{91.48,171.48},{85.74,150.32},{57.90,131.07}, {76.31,135.99},{ 3.18,39.17},{89.70,174.49},{24.76,80.90}, {98.38,180.22},{72.31,134.82},{98.30,167.99},{60.53,133.04}, {17.61,61.16},{91.45,167.76},{96.45,190.83},{27.35,79.14}, {44.17,95.88},{84.85,154.04},{65.51,133.25},{55.81,99.39}, {30.97,100.98},{72.66,149.76},{ 6.61,46.97},{72.24,141.63}, {32.62,86.42},{94.97,149.19},{34.26,83.94},{67.17,122.29}, {10.06,54.72},{26.55,81.33},{ 1.42,42.87},{57.35,108.15}, {83.56,168.69},{66.75,126.86},{21.58,50.05},{29.57,78.12}, {90.77,156.82},{99.23,180.95},{53.72,96.08},{18.60,66.55}, {34.47,90.78},{16.06,73.57},{45.18,92.22},{39.41,89.93}, {45.16,106.32},{88.85,180.75},{41.20,87.77},{27.75,49.39}, {11.75,40.45},{ 8.14,42.90},{91.19,155.95},{52.97,91.85}, {98.85,172.11},{70.94,129.96},{63.27,115.87},{81.61,165.53}, {61.87,139.53},{88.25,165.17},{32.01,74.16},{30.38,68.49}, {39.41,101.93},{41.57,91.22},{63.90,120.81},{87.25,151.40}, {42.66,96.20},{25.67,80.13},{87.04,145.31},{55.69,102.92}, {10.13,61.05},{ 7.92,44.48},{34.58,73.42},{73.13,137.99}, {40.21,90.86},{72.55,132.36},{15.85,48.75},{49.42,106.04}, {88.93,166.91},{57.21,120.80},{45.50,96.14},{77.54,144.76}, { 7.04,47.78},{33.27,69.61},{61.29,130.83},{34.89,97.04}, {67.35,114.88},{14.56,41.19},{ 0.05,42.80},{11.91,62.81}, {72.50,140.75},{29.16,67.27},{12.44,58.40},{70.85,139.97}, {95.44,156.19},{53.73,110.00},{58.34,128.98},{24.20,69.91} }; double residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } __device__ double d_residual_error(double x, double y, double m, double c) { double e = (m * x) + c - y; return e * e; } double rms_error(double m, double c) { int i; double mean; double error_sum = 0; for(i=0; i<n_data; i++) { error_sum += residual_error(data[i].x, data[i].y, m, c); } mean = error_sum / n_data; return sqrt(mean); } __global__ void d_rms_error(double *m, double *c, double *error_sum_arr, point_t *d_data) { int i = threadIdx.x + blockIdx.x * blockDim.x; error_sum_arr[i] = d_residual_error(d_data[i].x, d_data[i].y, *m, *c); } // Calculate the difference between two times. Returns zero on // success and the time difference through an argument. It will // be unsuccessful if the start time is after the end time. int time_difference(struct timespec *start, struct timespec *finish, long long int *difference) { long long int d_sec = finish->tv_sec - start->tv_sec; long long int d_nsec = finish->tv_nsec - start->tv_nsec; if(d_nsec < 0 ) { d_sec--; d_nsec += 1000000000; } *difference = d_sec * 1000000000 + d_nsec; return !(*difference > 0); } int main() { int i; double bm = 1.3; double bc = 10; double be; double dm[8]; double dc[8]; double e[8]; double step = 0.01; double best_error = 999999999; int best_error_i; int minimum_found = 0; double om[] = {0,1,1, 1, 0,-1,-1,-1}; double oc[] = {1,1,0,-1,-1,-1, 0, 1}; struct timespec start, finish; long long int time_elapsed; clock_gettime(CLOCK_MONOTONIC, &start); cudaError_t error; double *d_dm; double *d_dc; double *d_error_sum_arr; point_t *d_data; be = rms_error(bm, bc); error = cudaMalloc(&d_dm, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_dc, (sizeof(double) * 8)); if(error){ fprintf(stderr, "cudaMalloc on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_error_sum_arr, (sizeof(double) * 1000)); if(error){ fprintf(stderr, "cudaMalloc on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaMalloc(&d_data, sizeof(data)); if(error){ fprintf(stderr, "cudaMalloc on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } while(!minimum_found) { for(i=0;i<8;i++) { dm[i] = bm + (om[i] * step); dc[i] = bc + (oc[i] * step); } error = cudaMemcpy(d_dm, dm, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dm returned %d %s\n", error, cudaGetErrorString(error)); } error = cudaMemcpy(d_dc, dc, (sizeof(double) * 8), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_dc returned %d %s\n", error, cudaGetErrorString(error)); } error = cudaMemcpy(d_data, data, sizeof(data), cudaMemcpyHostToDevice); if(error){ fprintf(stderr, "cudaMemcpy to d_data returned %d %s\n", error, cudaGetErrorString(error)); } for(i=0;i<8;i++) { double h_error_sum_arr[1000]; double error_sum_total; double error_sum_mean; d_rms_error <<<100,10>>>(&d_dm[i], &d_dc[i], d_error_sum_arr, d_data); cudaThreadSynchronize(); error = cudaMemcpy(&h_error_sum_arr, d_error_sum_arr, (sizeof(double) * 1000), cudaMemcpyDeviceToHost); if(error){ fprintf(stderr, "cudaMemcpy to error_sum returned %d %s\n", error, cudaGetErrorString(error)); } for(int j=0; j<n_data; j++) { error_sum_total += h_error_sum_arr[j]; } error_sum_mean = error_sum_total / n_data; e[i] = sqrt(error_sum_mean); if(e[i] < best_error) { best_error = e[i]; best_error_i = i; } error_sum_total = 0; } if(best_error < be) { be = best_error; bm = dm[best_error_i]; bc = dc[best_error_i]; } else { minimum_found = 1; } } error = cudaFree(d_dm); if(error){ fprintf(stderr, "cudaFree on d_dm returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_dc); if(error){ fprintf(stderr, "cudaFree on d_dc returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_data); if(error){ fprintf(stderr, "cudaFree on d_data returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } error = cudaFree(d_error_sum_arr); if(error){ fprintf(stderr, "cudaFree on d_error_sum_arr returned %d %s\n", error, cudaGetErrorString(error)); exit(1); } printf("minimum m,c is %lf,%lf with error %lf\n", bm, bc, be); clock_gettime(CLOCK_MONOTONIC, &finish); time_difference(&start, &finish, &time_elapsed); printf("Time elapsed was %lldns or %0.9lfs\n", time_elapsed, (time_elapsed/1.0e9)); return 0; }
12,899
#include "includes.h" __global__ void updatePhi_kernel(int n, bool* d_flags, float* d_energy, float* d_fatigue, float theta) { unsigned int index = threadIdx.x + blockDim.x * blockIdx.x; unsigned int stride = blockDim.x * gridDim.x; while (index < n) { d_flags[index] = (d_energy[index] - d_fatigue[index]) > theta ? true : false; index += stride; } }
12,900
#include "includes.h" __device__ float compute(int idx, float* buf, int s) { // some random calcs to make the kernel unempty float k=0.0f; for (int x=0;x<s;x++ ){ k+=cosf(x*0.1f*idx); buf[x]=k; } for (int x=0;x<s/2;x++){ buf[x]=buf[x]*buf[x]; } float sum=0.0f; for (int x=s-1;x>=1;x--) { sum += buf[x-1]/(fabsf(buf[x])+0.1f); } return sum; } __global__ void testWithGlobal(int n, int s, float* result, float* buf) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < n) { result [idx] = compute(idx, &buf [idx * s],s); } }