serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
7,401 | #include <iostream>
#include <math.h>
#include <stdio.h>
#include <time.h>
__global__ void op_single(int n, double r1) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
float total = 0.0;
for (int i = index; i < n; i += step) {
total += atan(r1);
}
printf("tot single: %f\n", total);
}
__global__ void op_multi(int n, double r1, float *device_arr) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int step = blockDim.x * gridDim.x;
for (int i = index; i < n; i += step) {
device_arr[i] += atan(r1);
}
}
int main(int argc, char *argv[]) {
srand(42);
double r1 = ((double) rand()) / ((double) (RAND_MAX));
int n_iterations = 4194304;
// multi thread
printf("running multi\n");
int block_size = 1024;
int num_blocks = n_iterations / block_size;
int n_threads = num_blocks * block_size;
int blocks_per_grid = n_iterations / block_size;
clock_t t = clock();
size_t mem_size = n_iterations * sizeof(float);
printf("using byte: %d\n", (int) mem_size);
float *host_arr = (float *) malloc(mem_size);
for (int i = 0; i < n_iterations; i++) {
host_arr[i] = 0.0;
}
float *device_arr = NULL;
cudaMalloc((void **) &device_arr, mem_size);
cudaMemcpy(device_arr, host_arr, mem_size, cudaMemcpyHostToDevice);
clock_t t2 = clock();
printf("num grids: %d, num threads: %d\n", blocks_per_grid, block_size);
op_multi<<<blocks_per_grid, block_size>>>(n_iterations, r1, device_arr);
t2 = clock() - t2;
cudaMemcpy(host_arr, device_arr, mem_size, cudaMemcpyDeviceToHost);
float sum = 0.0;
for (int i = 0; i < n_iterations; i++) {
sum += host_arr[i];
}
t = clock() - t;
printf("tot multi: %f\n", sum);
cudaFree(device_arr);
free(host_arr);
printf("It took GPU multi with malloc: %f s.\n", (((float) t) / 1000000));
printf("It took GPU multi kernel only: %f s.\n", (((float) t2) / 1000000));
//single thread
printf("running single\n");
t = clock();
op_single<<<1, 1>>>(n_iterations, r1);
cudaDeviceSynchronize();
printf("It took GPU single %f s.\n", (((float) clock() - t) / 1000000));
return 0;
}
|
7,402 | // Copyright 2016 Boris Dimitrov, Portola Valley, CA 94028.
// Questions? Contact http://www.facebook.com/boris
//
// This program counts all permutations of the sequence 1, 1, 2, 2, 3, 3, ..., n, n
// in which the two appearances of each m are separated by precisely m other numbers.
// These permutations are called Langford pairings. For n=3, only one such exists,
// (modulo left-to-right reversal): 2, 3, 1, 2, 1, 3. Pairings exist only for n
// that are congruent with 0 or 3 mod 4, and their count grows very rapidly with n.
// See http://www.dialectrix.com/langford.html or Knuth volume 4a page 1 (chapter 7).
//
// The crux of this program does not use off-chip memory; everything fits in GPU L2
// and (I hope) in CPU L1 cache. No floating point, just int ops and array derefs.
//
// Compile on ubuntu with cuda8rc as follows:
//
// nvcc -o langford -std=c++11 langford.cu
//
// Comparing this same algorithm on three different processors,
//
// 22nm i7-4790K 14nm E5-2699v4 16nm TitanX Pascal GPU
// 4c @ 4 GHz 22c @ 2.8 GHz 28c @ 1.82 GHz
// --------------------------------------------------------------------------
//
// n = 15 28.1 seconds 8.9 seconds 2.35 seconds
//
// n = 16 309.1 seconds 92.4 seconds 23.5 seconds
//
// n = 19 n/a 41.62 hours 10.72 hours
// --------------------------------------------------------------------------
// normalized 1.0x 3.25x 12.5x
// perf units
//
// cost per
// unit perf $335 $1,415 $96
// (fall 2016)
//
// Matching perf to hardware,
//
// 1.0x == Late-2014 iMac5k with i7-4790K CPU running all 4 cores at 4 GHz
//
// 3.25x == Early-2016 Xeon E5-2699 v4 CPU running all 22 cores at 2.8 GHz
// (overall about 3.25x faster than the reference iMac)
//
// 12.5x == Q3-2016 Titan X Pascal GPU running all 28 SM's at 1.82 GHz
//
// 25x == dual GPU (projection)
//
// Each GPU SM is 3.05x faster than its Xeon counterpart (2.67x with Turbo Boost).
// Overall, the $1,200 Pascal GPU is 3.9x faster than the $4,600 Broadwell CPU.
// That makes GPU 15x cheaper than CPU per unit of perf.
//
// This is GPU version z3 and the comparison was to CPU version z2
// (available upon request).
//
// The value of N is the only "input" to this program.
constexpr int n = 16;
static_assert(n >= 7, "so we can unroll n <= 6");
#include <iostream>
#include <chrono>
using namespace std;
using chrono::steady_clock;
#include <cuda_runtime.h>
struct Seconds {
chrono::duration<double> d;
Seconds(chrono::duration<double> d_) : d(d_) {}
};
ostream& operator << (ostream& out, const Seconds& seconds) {
return (out << seconds.d.count());
}
constexpr int n_minus_1 = n - 1;
constexpr int two_n = 2 * n;
constexpr int padded_n = ((n + 3) >> 2) << 2;
constexpr int64_t lsb = 1;
constexpr int64_t msb = lsb << (2 * n - 1);
constexpr int64_t full = msb | (msb - 1);
// Fewer threads per block => more threads can be packed in L2 => higher "occupancy",
// which would normally be a good thing, except... it causes higher power draw,
// and occupancy > 0.25 actually results in lower perf.
//
// for n=15, threads_per_block=12 is optimal, with 16, 8, and 32 all pretty close
// for n=16, threads_per_blocks=16 and 32 take 23.5 seconds; 12 takes 24.1 seconds; 8 takes 25 sec
// for n=19, threads_per_blocks=32 takes 13.13 hours; 16 takes 11 hours; 8 takes 10.7 hours
// so 8 works fine overall, but we can micro-optimize for some n
constexpr int threads_per_block = (n == 15) ? 12
: (n == 16) ? 16
: 8;
constexpr int div_up(int p, int q) {
return (p + (q - 1)) / q;
}
// We launch (n-1) * (2*n) * (2*n) * (2*n) threads. Each thread starts with a specific initial
// placement of m = 1, 2, 3, 4 and its job is to extend and complete that placement in every
// possible way for m = 5, 6, ..., 2*n. The placement of m = 1 is restricted to positions
// 0, 1, ..., n-2 in order to avoid double-counting left-to-right reversal twins.
//
// The threads are organized in a 3D grid of blocks, with each block comprising a small number
// of threads, say 8 or 16 or 32. The threads within a block share the L2 of their streaming
// multiprocessor. They divide that L2 into separate areas used for their stacks and heaps.
//
// All threads in a block share the placement of m = 3 and m = 4 which is directly given by
// the block's y and z coordinates. The block's x coordinate and each thread's x index together
// encode the placement of m = 1 and m = 2 in that thread.
//
// Alternative mappings (for example, a simple 1-D mapping) are possible, with similar perf.
// This 3-D one was chosen after the 1-D version bumped into some undocumented per-dimension
// limits in Cuda-8rc.
constexpr int blocks_x = div_up((n-1)*(2*n), threads_per_block);
constexpr int blocks_y = 2*n;
constexpr int blocks_z = 2*n;
dim3 blocks(blocks_x, blocks_y, blocks_z);
constexpr int total_threads_padded = blocks_x * blocks_y * blocks_z * threads_per_block;
__global__ void dfs(int64_t* d_count) {
// shared vars are per thread-block and held in L2 cache
// local scalar vars are per thread and held in registers
__shared__ int64_t l2_stacks[threads_per_block][padded_n << 1];
__shared__ int64_t l2_heaps[threads_per_block][padded_n];
int64_t a, m, pos_k, avail, cnt;
int top;
// Nomenclature:
//
// 0 <= me <= n-2 is the left pos for m = 1
// 0 <= me2 < 2*n is the left pos for m = 2, and me2r = me2 + 3 is the right pos
// 0 <= me3 < 2*n is the left pos for m = 3, and me3r = me3 + 4 is the right pos
// 0 <= me4 < 2*n is the left pos for m = 4, and me4r = me4 + 5 is the right pps
//
// By construction,
//
// me3 = blockIdx.y,
// me4 = blockIdx.z,
// me and me2 are packed, in a special way, into (blockIdx.x, threadIdx.x)
//
const int tmp = blockIdx.x * threads_per_block + threadIdx.x;
const int me = tmp / two_n;
if (me < n_minus_1) {
const int me2r = tmp - me * two_n + 3;
const int me3r = blockIdx.y + 4;
const int me4r = blockIdx.z + 5;
if (me2r < two_n && me3r < two_n && me4r < two_n) {
const int64_t a1 = (lsb << me) | (lsb << (me + 2));
const int64_t a2 = (lsb << (me2r - 3)) | (lsb << (me2r));
const int64_t a3 = (lsb << blockIdx.y) | (lsb << (me3r));
const int64_t a4 = (lsb << blockIdx.z) | (lsb << (me4r));
a = a1 | a2 | a3 | a4;
// are a1, a2, a3, a4 pairwise disjoint? that means we have valid placement for m=1,2,3,4
if (a == (a1 + a2 + a3 + a4)) {
// compute all positions where m = 5 can be placed, given that m=1,2,3,4 have been
// placed already in the positions given by a1, a2, a3, a4 above
avail = a ^ full; // invert a; note upper bits >= pos 2*n are all 1 (important
avail &= (avail >> 6); // for the correctness of these two lines as a block).
// can our valid placement for m=1,2,3,4 be continued for m=5?
if (avail) {
cnt = 0;
m = 5;
top = 0;
// record all possible continuations for m=4 into the stack and start DFS loop
auto& stack = l2_stacks[threadIdx.x];
auto& heap = l2_heaps[threadIdx.x];
stack[top++] = avail;
stack[top++] = m;
heap[m-1] = a;
while (top) {
m = stack[top - 1];
avail = stack[top - 2];
// extract the lowest bit that is set to 1 in avail
pos_k = avail & ~(avail - 1);
// clear that bit
avail ^= pos_k;
// "pop" that bit from the hybrid stack s
if (avail) {
stack[top - 2] = avail;
} else {
top -= 2;
}
// place m in that position
a = heap[m-1] | pos_k | (pos_k << (m + 1));
++m;
// the "avail" computed below has bit "k" set to 1 if and only if
// both of the positions "k" and "k + m + 1" in "a" contain 0
avail = a ^ full;
avail &= (avail >> (m + 1));
if (avail) {
if (m == n) {
// we've found another langford pairing, count it
++cnt;
} else {
// push all possible ways to place m, to be explored in subsequent iterations
stack[top++] = avail;
stack[top++] = m;
heap[m-1] = a;
}
}
}
// Write this thread's result to off-chip memory.
const int blid = blockIdx.x + blocks_x * (blockIdx.y + blocks_y * blockIdx.z);
d_count[blid * threads_per_block + threadIdx.x] = cnt;
}
}
}
}
}
void cdo(const char* txt, cudaError_t err) {
if (err == cudaSuccess) {
return;
}
cout << "Failed to " << txt << " (Error code " << cudaGetErrorString(err) << ")\n" << flush;
exit(-1);
}
void run() {
int64_t* count;
int64_t* d_count;
int64_t total;
{
cout << "\n";
cout << "\n";
cout << "------\n";
cout << "Computing Langford number L(2,n) for n = " << n << ".\n";
cout << "\n";
cout << "GPU init " << flush;
}
auto t0 = steady_clock::now();
auto seconds_since = [](decltype(t0) start) {
return Seconds(steady_clock::now() - start);
};
cudaEvent_t start, stop;
cdo("create start timer event",
cudaEventCreate(&start));
cdo("create stop timer event",
cudaEventCreate(&stop, cudaEventBlockingSync));
cdo("allocate host memory",
cudaMallocHost(&count, total_threads_padded * sizeof(*count)));
for (int i=0; i<total_threads_padded; ++i) {
count[i] = -1;
}
cdo("allocate GPU memory",
cudaMalloc(&d_count, total_threads_padded * sizeof(*count)));
cdo("copy initial values from host to GPU",
cudaMemcpy(d_count, count, sizeof(*count) * total_threads_padded, cudaMemcpyDeviceToHost));
cdo("insert start event",
cudaEventRecord(start));
{
cout << "took " << seconds_since(t0) << " sec.\n";
cout << "\n";
cout << "Dispatching " << total_threads_padded << " threads (" << threads_per_block << " per block, "
<< blocks.x << " x " << blocks.y << " x " << blocks.z << " blocks).\n" << flush;
}
auto t1 = steady_clock::now();
dfs<<<blocks, threads_per_block>>>(d_count);
cdo("DFS kernel launch",
cudaPeekAtLastError());
cdo("insert stop event",
cudaEventRecord(stop));
{
cout << "\n";
cout << "GPU computation " << flush;
}
cdo("wait for GPU computation to complete",
cudaEventSynchronize(stop));
float milliseconds = 0;
cdo("compute GPU elapsed time",
cudaEventElapsedTime(&milliseconds, start, stop));
{
cout << "took " << milliseconds / 1000.0 << " sec on GPU clock, "
<< seconds_since(t1) << " sec on host clock.\n";
cout << "\n";
cout << "CPU reduction " << flush;
}
auto t2 = steady_clock::now();
cdo("copy result from GPU to host",
cudaMemcpy(count, d_count, sizeof(*count) * total_threads_padded, cudaMemcpyDeviceToHost));
total = 0;
for (int i=0; i<total_threads_padded; ++i) {
if (count[i] >= 0) {
total += count[i];
}
}
{
int64_t known_results[64];
memset(&known_results[0], 0, sizeof(known_results));
known_results[3] = 1;
known_results[4] = 1;
known_results[7] = 26;
known_results[8] = 150;
known_results[11] = 17792;
known_results[12] = 108144;
known_results[15] = 39809640ll;
known_results[16] = 326721800ll;
known_results[19] = 256814891280ll;
known_results[20] = 2636337861200ll;
known_results[23] = 3799455942515488ll;
known_results[24] = 46845158056515936ll;
// beyond 23, count does not fit in 64 bits and is not definitively known!
cout << "took " << seconds_since(t2) << " sec.\n";
cout << "\n";
cout << "Result " << total << " for n = " << n << " " << flush;
cout << ((total == known_results[n]) ? "matches" : "** DOES NOT MATCH **")
<< " previously known result.\n";
cout << "------\n";
cout << "\n";
cout << "\n";
}
cdo("free GPU memory for result",
cudaFree(d_count));
cdo("free host memory for result",
cudaFreeHost(count));
}
int main(int argc, char** argv) {
run();
return 0;
}
|
7,403 | /**
* @file add1.cu
* @brief this example is for testing cudaMemcpyFrom/ToSymbol
*
* @date Apr 27, 2011
* @author Magda Slawinska, magg __at_ gatech __dot_ edu
*/
#include <stdio.h>
#include <cuda.h>
#define MAX 14
__device__ char name_device[MAX];
__device__ int tab_d[MAX];
__constant__ __device__ char hw[] = "Hello World!\n";
__global__ void helloWorldOnDevice(void) {
int idx = blockIdx.x;
name_device[idx] = hw[idx];
tab_d[idx] *= tab_d[idx];
}
__global__ void inc(void){
int idx = blockIdx.x;
tab_d[idx]++;
}
int main(void) {
int tab_h[MAX];
int tab_h1[MAX];
int i;
char name_host[MAX];
for (i = 0; i < MAX; i++)
tab_h[i] = i;
// symbol as a pointer
cudaMemcpyToSymbol(tab_d, tab_h, sizeof(int) * MAX, 0,
cudaMemcpyHostToDevice);
helloWorldOnDevice <<< MAX, 1 >>> ();
cudaThreadSynchronize();
// ----------- symbol as a pointer to a variable
cudaMemcpyFromSymbol(name_host, name_device, sizeof(char) * 13, 0,
cudaMemcpyDeviceToHost);
cudaMemcpyFromSymbol(tab_h1, tab_d, sizeof(int) * MAX, 0,
cudaMemcpyDeviceToHost);
printf("\n\nGot from GPU: %s\n", name_host);
if (strcmp(name_host, "Hello World!\n") == 0)
printf("Hello test: PASSED\n");
else
printf("Hello test: FAILED\n");
for (i = 0; i < MAX; i++) {
if (tab_h1[i] != (tab_h[i] * tab_h[i])) {
printf("FAILED!\n");
break;
} else
printf("tab_h1[%d] = %d\n", i, tab_h1[i]);
}
// ----------- now symbol as a name
// symbol as a name
cudaMemcpyToSymbol("tab_d", tab_h, sizeof(int) * MAX, 0,
cudaMemcpyHostToDevice);
inc <<< MAX, 1 >>> ();
cudaThreadSynchronize();
cudaMemcpyFromSymbol(tab_h1, "tab_d", sizeof(int) * MAX, 0,
cudaMemcpyDeviceToHost);
for (i = 0; i < MAX; i++) {
if (tab_h1[i] != (tab_h[i] + 1)) {
printf("FAILED!\n");
break;
} else
printf("tab_h1[%d] = %d\n", i, tab_h1[i]);
}
}
|
7,404 | #include <iostream>
#include <limits>
void GetInt(int *in)
{
if (!std::cin)
{
std::cin.clear();
std::cin.ignore(std::numeric_limits<std::streamsize>::max(), '\n');
}
std::cin>>*in;
}
|
7,405 | /* Pi - CUDA version 1 - uses integers for CUDA kernels
* Author: Aaron Weeden, Shodor, May 2015
*/
#include <stdio.h> /* fprintf() */
#include <float.h> /* DBL_EPSILON() */
#include <math.h> /* sqrt() */
__global__ void calculateAreas(int offset, const int numRects, const double width,
double *dev_areas) {
const int threadId = threadIdx.x + offset;
const double x = (threadId * width);
const double heightSq = (1.0 - (x * x));
const double height =
/* Prevent nan value for sqrt() */
(heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
if (threadId < numRects) {
dev_areas[threadId] = (width * height);
}
}
void calculateArea(const int numRects, double *area) {
double *areas = (double*)malloc(numRects * sizeof(double));
double *dev_areas;
int i = 0;
cudaError_t err;
if (areas == NULL) {
fprintf(stderr, "malloc failed!\n");
}
err = cudaMalloc((void**)&dev_areas, (numRects * sizeof(double)));
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed: %s\n", cudaGetErrorString(err));
}
int device;
cudaGetDevice(&device);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, device);
printf("Max number of threads per block = %i \n", prop.maxThreadsPerBlock);
if(numRects > prop.maxThreadsPerBlock){
int numPasses = ceil((double) (numRects/prop.maxThreadsPerBlock));
for(int i = 0; i < numPasses; i++){
int offset = (prop.maxThreadsPerBlock * i);
calculateAreas<<<1, numRects>>>(offset, numRects, (1.0 / numRects), dev_areas);
}
}
else{
calculateAreas<<<1, numRects>>>(0, numRects, (1.0 / numRects), dev_areas);
}
err = cudaMemcpy(areas, dev_areas, (numRects * sizeof(double)),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed: %s\n", cudaGetErrorString(err));
}
(*area) = 0.0;
for (i = 0; i < numRects; i++) {
(*area) += areas[i];
}
cudaFree(dev_areas);
free(areas);
}
|
7,406 | #include <iostream>
using namespace std;
static void HandleError(cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
cout << cudaGetErrorString(err) << " in "
<< file << " at line " << line << endl;
exit( EXIT_FAILURE );
}
}
#define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ ))
//#define MAGNITUDE (1)
#define MAGNITUDE (1024 * 1024)
#define NUM_BLOCKS 8 * MAGNITUDE
#define NUM_THREADS 16
#define NUM_ELEM 100 * MAGNITUDE
__global__ void kernel_compute(int* data) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// invalid access
data[idx] = 1111 * idx;
}
int main(int argc, char *argv[]) {
int* data = NULL;
// echivalent cu macroul DIE
HANDLE_ERROR(cudaMalloc(&data, 1 * sizeof(int)));
// launch kernel
kernel_compute<<<NUM_BLOCKS, NUM_THREADS>>>(data);
HANDLE_ERROR(cudaDeviceSynchronize());
return 0;
} |
7,407 | /**
* Vector addition: C = A + B.
* nvcc
*/
#include <stdio.h>
#include <cuda.h>
// For the CUDA runtime routines (prefixed with "cuda_")
//#include <cuda_runtime.h>
// #include <iostream>
// using namespace std;
// #define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"]<<endl"
#define myprint printf("[%s:%d] ",__FILE__,__LINE__)
#define nums_thread_pre_block 256
static void HandleError(cudaError_t err,
const char *file,
int line )
{
// Error code to check return values for CUDA calls
// cudaError_t err = cudaSuccess;
if (err != cudaSuccess)
{
fprintf(stderr, "%s in %s at line %d\n", cudaGetErrorString(err),file, line);
exit(EXIT_FAILURE);
}
}
/**检测*/
#define checkError(err) (HandleError( err, __FILE__, __LINE__ ))
template<typename T>
__device__ void add1(const T &a,const T &b,T &c)
{
c=a+b;
}
// or
template<typename T>
__device__ T add2(const T &a,const T &b)
{
T c=a+b;
return c;
}
template<typename T>
__global__ void vectorAdd(const T *A,const T *B, T *C, const int numElements)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements)
{
// C[i] = A[i] + B[i];
// add1<T>(A[i],B[i],C[i]);
C[i]=add2<T>(A[i],B[i]);
}
}
// 使用shared memory
template<typename T>
__global__ void shared_vectorAdd(const T *A,const T *B, T *C, const int numElements)
{
extern __shared__ T sdatas[]; //共享内存声明
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
if(idx>=numElements) return;
// global memory-->shared memory
// sdatas[tid]=add2<T>(A[idx],B[idx]);
sdatas[tid]=A[idx]+B[idx];
__syncthreads(); // 必须线程同步,保证所有global内存变量都拷贝到共享内存
// shared momery-->global momery
C[idx]=sdatas[tid];
}
/**cpu数据传入,调用GPU运行*/
template<typename T>
int addInference(
const T *h_a,
const T *h_b,
T *h_c,
const int n // 数据总长度
)
{
size_t size=n*sizeof(T); // int size=n*sizeof(T);
// 创建GPU变量并分配GPU内存
T *d_a=NULL,*d_b=NULL,*d_c=NULL;
checkError(cudaMalloc((void **)&d_a, size));
checkError(cudaMalloc((void **)&d_b, size));
checkError(cudaMalloc((void **)&d_c, size));
// cpu-->GPU
checkError(cudaMemcpy(d_a,h_a,size,cudaMemcpyHostToDevice));
checkError(cudaMemcpy(d_b,h_b,size,cudaMemcpyHostToDevice));
// 启动GPU计算
dim3 block(nums_thread_pre_block,1,1);
dim3 grid((n+nums_thread_pre_block-1)/nums_thread_pre_block,1,1);
cudaStream_t stream; // 创建流,如果不使用就是使用默认流
checkError(cudaStreamCreate(&stream));
// vectorAdd<<<grid,block,0,stream>>>(d_a,d_b,d_c,n); // global memory
shared_vectorAdd<<<grid,block,nums_thread_pre_block*sizeof(T),stream>>>(d_a,d_b,d_c,n); // shared memory
checkError(cudaGetLastError());// launch vectorAdd kernel
// checkError(cudaDeviceSynchronize());//CPU等待GPU,使用多流时需使用
// GPU-->CPU
checkError(cudaMemcpy(h_c,d_c,size,cudaMemcpyDeviceToHost));
// free GPU
checkError(cudaFree(d_a));
checkError(cudaFree(d_b));
checkError(cudaFree(d_c));
// 销毁流
checkError(cudaStreamDestroy(stream));
return 0;
}
int main()
{
myprint;
const int N=50000;
size_t size=N*sizeof(float);
// CPU
float *h_a=NULL,*h_b=NULL,*h_c=NULL;
// 分配内存 cudaMallocHost 比 malloc效率高
checkError(cudaMallocHost((void**)&h_a,size));
checkError(cudaMallocHost((void**)&h_b,size));
checkError(cudaMallocHost((void**)&h_c,size));
// 赋值
// Initialize the host input vectors
for (int i = 0; i < N; ++i)
{
h_a[i] = rand()/(float)RAND_MAX;
h_b[i] = rand()/(float)RAND_MAX;
}
// 调用函数执行
addInference<float>(h_a,h_b,h_c,N);
// 验证结果
// Verify that the result vector is correct
for (int i = 0; i < N; ++i)
{
if (fabs(h_a[i] + h_b[i] - h_c[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
printf("Test PASSED\n");
// free CPU
// free(h_a); // 针对malloc分配空间时
checkError(cudaFreeHost(h_a));
checkError(cudaFreeHost(h_b));
checkError(cudaFreeHost(h_c));
return 0;
} |
7,408 | #include<cstdlib>
#include<time.h>
#include<cuda.h>
#include<iostream>
#include<math.h> //Included just to use the Power function
#define BLOCK_SIZE 32
#define TILE_SIZE 32
#define MAX_MASK_WIDTH 10
__constant__ float M[MAX_MASK_WIDTH];
using namespace std;
//====== Function made to print vector =========================================
void printVector(float *A, int length)
{
for (int i=0; i<length; i++)
{
cout<<A[i]<<" | ";
}
cout<<endl;
}
//====== Function made to fill the vector with some given value ================
void fillVector(float *A, float value, int length)
{
for (int i=0; i<length; i++)
{
A[i] = value;
}
}
//====== Compare results =======================================================
void compareVector (float *A, float *B,int n)
{
for (int i=0; i<n; i++ )
{
if (A[i]!=B[i])
{
cout<<"## Secuential and Parallel results are NOT equal ##"<<endl;
}
}
cout<<"== Secuential and Parallel results are equal =="<<endl;
}
//====== Serial Convolution ====================================================
void serialConvolution(float *input, float *output, float *mask, int mask_length, int length)
{
int start = 0;
float temp = 0.0;
for (int i = 0; i < length; i++)
{
for (int j = 0; j < mask_length; j++)
{
start = i - (mask_length / 2);
if (start + j >= 0 && start + j < length)
temp += input[start + j] * mask[j];
}
output[i] = temp;
temp = 0.0;
}
}
//====== Basic convolution kernel ==============================================
__global__ void convolutionBasicKernel(float *N, float *M, float *P,
int Mask_Width, int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
//====== Convolution kernel using constant memory and caching ==================
__global__ void convolutionKernelConstant(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
float Pvalue = 0;
int N_start_point = i - (Mask_Width/2);
for (int j = 0; j < Mask_Width; j++)
{
if (N_start_point + j >= 0 && N_start_point + j < Width)
{
Pvalue += N[N_start_point + j]*M[j];
}
}
P[i] = Pvalue;
}
//===== Tiled Convolution kernel using shared memory ===========================
__global__ void convolutionKernelShared(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float N_ds[TILE_SIZE + MAX_MASK_WIDTH - 1];
int n = Mask_Width/2;
int halo_index_left = (blockIdx.x - 1)*blockDim.x + threadIdx.x;
if (threadIdx.x >= blockDim.x - n)
{
N_ds[threadIdx.x - (blockDim.x - n)] =
(halo_index_left < 0) ? 0 : N[halo_index_left];
}
N_ds[n + threadIdx.x] = N[blockIdx.x*blockDim.x + threadIdx.x];
int halo_index_right = (blockIdx.x + 1)*blockDim.x + threadIdx.x;
if (threadIdx.x < n)
{
N_ds[n + blockDim.x + threadIdx.x] =
(halo_index_right >= Width) ? 0 : N[halo_index_right];
}
__syncthreads();
float Pvalue = 0;
for(int j = 0; j < Mask_Width; j++)
{
Pvalue += N_ds[threadIdx.x + j]*M[j];
}
P[i] = Pvalue;
}
//====== A simplier tiled convolution kernel using shared memory and general cahching
__global__ void convolutionKernelSharedSimplier(float *N, float *P, int Mask_Width,
int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float N_ds[TILE_SIZE];
N_ds[threadIdx.x] = N[i];
__syncthreads();
int This_tile_start_point = blockIdx.x * blockDim.x;
int Next_tile_start_point = (blockIdx.x + 1) * blockDim.x;
int N_start_point = i - (Mask_Width/2);
float Pvalue = 0;
for (int j = 0; j < Mask_Width; j ++)
{
int N_index = N_start_point + j;
if (N_index >= 0 && N_index < Width)
{
if ((N_index >= This_tile_start_point)
&& (N_index < Next_tile_start_point))
{
Pvalue += N_ds[threadIdx.x+j-(Mask_Width/2)]*M[j];
} else
{
Pvalue += N[N_index] * M[j];
}
}
}
P[i] = Pvalue;
}
//===== Convolution kernel call ================================================
void convolutionCall (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_mask;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
cudaMalloc(&d_input, length * sizeof(float));
cudaMalloc(&d_mask, mask_length * sizeof(float));
cudaMalloc(&d_output, length * sizeof(float));
cudaMemcpy (d_input, input, length * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy (d_mask, mask, mask_length * sizeof (float), cudaMemcpyHostToDevice);
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
convolutionBasicKernel<<<dimGrid, dimBlock>>> (d_input, d_mask, d_output, mask_length, length);
cudaDeviceSynchronize();
cudaMemcpy (output, d_output, length * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_input);
cudaFree (d_mask);
cudaFree (d_output);
}
//==============================================================================
void convolutionCallConstant (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
cudaMalloc(&d_input, length * sizeof(float));
cudaMalloc(&d_output, length * sizeof(float));
cudaMemcpy (d_input, input, length * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
convolutionKernelConstant<<<dimGrid, dimBlock>>> (d_input,d_output, mask_length, length);
cudaDeviceSynchronize();
cudaMemcpy (output, d_output, length * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_input);
cudaFree (d_output);
}
//==============================================================================
void convolutionCallWithTilesComplex (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
cudaMalloc(&d_input, length * sizeof(float));
cudaMalloc(&d_output, length * sizeof(float));
cudaMemcpy (d_input, input, length * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
convolutionKernelShared<<<dimGrid, dimBlock>>> (d_input,d_output, mask_length, length);
cudaDeviceSynchronize();
cudaMemcpy (output, d_output, length * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_input);
cudaFree (d_output);
}
//====== Convolution kernel call tiled version (the simplified one) ============
void convolutionCallWithTiles (float *input, float *output, float *mask, int mask_length, int length)
{
float *d_input;
float *d_output;
float block_size = BLOCK_SIZE;//The compiler doesn't let me cast the variable
cudaMalloc(&d_input, length * sizeof(float));
cudaMalloc(&d_output, length * sizeof(float));
cudaMemcpy (d_input, input, length * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol (M, mask, mask_length * sizeof (float));
dim3 dimGrid (ceil (length / block_size), 1, 1);
dim3 dimBlock (block_size, 1, 1);
convolutionKernelSharedSimplier<<<dimGrid, dimBlock>>> (d_input,d_output, mask_length, length);
cudaDeviceSynchronize();
cudaMemcpy (output, d_output, length * sizeof (float), cudaMemcpyDeviceToHost);
cudaFree (d_input);
cudaFree (d_output);
}
//================= MAIN =======================================================
int main ()
{
for(int i=5; i<=20;i++)//to execute the program many times just to get all the test values
{
cout<<"=> EXECUTION #"<<i-5<<endl;
unsigned int length = pow(2,i);
int mask_length = 5;
int op = 1; //To select which parallel version we want to execute
//1 Paralelo Basico - 2 Paralelo con memoria constante
//3 paralelo con memoria compartida y tiling
clock_t start, finish; //Clock variables
double elapsedSecuential, elapsedParallel, elapsedParallelConstant,
elapsedParallelSharedComplex,elapsedParallelSharedTiles, optimization;
float *A = (float *) malloc(length * sizeof(float));
float *mask = (float *) malloc(mask_length * sizeof(float));
float *Cserial = (float *) malloc(length * sizeof(float));
float *Cparallel = (float *) malloc(length * sizeof(float));
float *CparallelWithTiles = (float *) malloc(length * sizeof(float));
float *CparallelConstant = (float *) malloc (length * sizeof(float));
float *CparallelWithTilesComplex = (float *) malloc(length * sizeof(float));
fillVector(A,1.0,length);
fillVector(mask,2.0,mask_length);
fillVector(Cserial,0.0,length);
fillVector(Cparallel,0.0,length);
fillVector(CparallelWithTiles,0.0,length);
fillVector(CparallelConstant,0.0,length);
fillVector(CparallelWithTilesComplex,0.0,length);
//============================================================================
cout<<"Serial result"<<endl;
start = clock();
serialConvolution(A,Cserial,mask,mask_length,length);
finish = clock();
elapsedSecuential = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The Secuential process took: " << elapsedSecuential << " seconds to execute "<< endl;
//printVector(Cserial,length);
cout<<endl;
//============================================================================
switch (op)
{
case 1:
cout<<"==============================================================="<<endl;
cout<<"Paralelo basico"<<endl;
start = clock();
convolutionCall(A,Cparallel,mask,mask_length,length);
finish = clock();
elapsedParallel = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallel << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallel;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(Cparallel,length);
compareVector(Cserial,Cparallel,length);
cout<<endl;
break;
case 2:
cout<<"==============================================================="<<endl;
cout<<"Paralelo con memoria constante"<<endl;
start = clock();
convolutionCallConstant(A,CparallelConstant,mask,mask_length,length);
finish = clock();
elapsedParallelConstant = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelConstant << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelConstant;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelConstant,length);
compareVector(Cserial,CparallelConstant,length);
cout<<endl;
break;
case 3:
cout<<"==============================================================="<<endl;
cout<<"Paralelo con memoria compartida y Tiling"<<endl;
start = clock();
convolutionCallWithTilesComplex(A,CparallelWithTilesComplex,mask,mask_length,length);
finish = clock();
elapsedParallelSharedComplex = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelSharedComplex << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelSharedComplex;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelWithTilesComplex,length);
compareVector(Cserial,CparallelWithTilesComplex,length);
cout<<endl;
break;
case 4:
cout<<"==============================================================="<<endl;
cout<<"Parallel with shared memory result simplified"<<endl;
start = clock();
convolutionCallWithTiles(A,CparallelWithTiles,mask,mask_length,length);
finish = clock();
elapsedParallelSharedTiles = (((double) (finish - start)) / CLOCKS_PER_SEC );
cout<< "The parallel process took: " << elapsedParallelSharedTiles << " seconds to execute "<< endl;
optimization = elapsedSecuential/elapsedParallelSharedTiles;
cout<< "The acceleration we've got: " << optimization <<endl;
//printVector(CparallelWithTiles,length);
compareVector(Cserial,CparallelWithTiles,length);
cout<<endl;
break;
}
free(A);
free(mask);
free(Cserial);
free(Cparallel);
free(CparallelWithTiles);
free(CparallelConstant);
free(CparallelWithTilesComplex);
}
}
|
7,409 | #include "includes.h"
__global__ void gpuPi(double *r, double width, int n) {
int idx = threadIdx.x + (blockIdx.x * blockDim.x); // Index to calculate.
int id = idx; // My array position.
double mid, height; // Auxiliary variables.
while (idx < n) { // Dont overflow array.
mid = (idx + 0.6) * width; // Formula.
height = 4.0 / (1.0 + mid * mid); // Formula.
r[id] += height; // Store result.
idx += (blockDim.x * gridDim.x); // Update index.
}
} |
7,410 | #include "stdio.h"
#include<iostream>
//Defining Number of elements in Array
#define N 5
//Defining vector addition function for CPU
void cpuAdd(int *h_a, int *h_b, int *h_c) {
int tid = 0;
while (tid < N)
{
h_c[tid] = h_a[tid] + h_b[tid];
tid += 1;
}
}
int main(void) {
int h_a[N], h_b[N], h_c[N];
//Initializing two arrays for addition
for (int i = 0; i < N; i++) {
h_a[i] = 2 * i*i;
h_b[i] = i;
}
//Calling CPU function for vector addition
cpuAdd (h_a, h_b, h_c);
//Printing Answer
printf("Vector addition on CPU\n");
for (int i = 0; i < N; i++) {
printf("The sum of %d element is %d + %d = %d\n", i, h_a[i], h_b[i], h_c[i]);
}
return 0;
}
|
7,411 | #include <iostream>
#include <fstream>
#include <string>
#include <stdlib.h>
#include <iomanip>
void printGrid(float* grid, int rows, int cols);
/**
*Kernel will update the matrix to keep the heater cells constant.
*/
__global__ void copyHeaters(float* stateGrid, float* heaterGrid, int nRows, int nCols, int iteration) {
dim3 gIdx;
gIdx.y = blockIdx.y * blockDim.y + threadIdx.y; //row
gIdx.x = blockIdx.x * blockDim.x + threadIdx.x; // col
int i = gIdx.y;
int j = gIdx.x;
if(gIdx.x < nCols && gIdx.y < nRows){
float heatValue = heaterGrid[i*nCols +j];
if(heatValue != 0)
stateGrid[i*nCols + j] =heatValue;
}
}
__global__ void updateGrid(float* inGrid, float* outGrid, float k, int nRows, int nCols) {
dim3 gIdx;
gIdx.y = blockIdx.y * blockDim.y + threadIdx.y; //row
gIdx.x = blockIdx.x * blockDim.x + threadIdx.x; // col
int i = gIdx.y;
int j = gIdx.x;
//Find these values from the inGrid
int Tlft, Trite, Tup, Tdown;
if(gIdx.x < nCols && gIdx.y < nRows){
int currentPosition = i*nCols+j;
Tlft = currentPosition -1;
Trite = currentPosition +1;
Tup = currentPosition -nCols;
Tdown = currentPosition +nCols;
float Tnew = inGrid[currentPosition];
float Top, Tbottom, Tleft, Tright;
Tbottom = (Tdown >= nCols*nRows) ? Tnew : inGrid[Tdown];
Top = (Tup < 0) ? Tnew : inGrid[Tup];
Tright = (Trite >= nCols*nRows) ? Tnew : inGrid[Trite];
Tleft = (Tlft < 0) ? Tnew : inGrid[Tlft];
Tnew = Tnew + k*(Top + Tbottom + Tleft + Tright - (4*Tnew));
outGrid[currentPosition] = Tnew;
}
}
/*------------------------------------------------------------------------------
readHeaterFile
Assumes heaterGrid points to a flattened 2d array of size [rows,cols]
Fille heaterGrid with heaters from the heater file
------------------------------------------------------------------------------*/
void readHeaterFile(const char* fileName, float* heaterGrid, int rows, int cols) {
std::ifstream inFile(fileName);
int numHeaters;
inFile >> numHeaters;
for(int i = 0; i < numHeaters; ++i) {
int hRow, hCol;
inFile >> hRow;
inFile >> hCol;
float temp;
inFile >> temp;
heaterGrid[hRow * cols + hCol] = temp;
}
inFile.close();
}
/*------------------------------------------------------------------------------
printGrid
------------------------------------------------------------------------------*/
__device__ void printGrid(float* grid, int rows, int cols) {
//std::cout << std::fixed << std::setprecision(2);
for(int i = 0; i < rows; ++i) {
for(int j = 0; j < cols; ++j) {
// std::cout << std::setw(6) << grid[i*cols+j] << " ";
printf("%f ", grid[i*cols+j]);
}
//std::cout << std::endl;
printf("\n");
}
}
/*------------------------------------------------------------------------------
printGridToFile
------------------------------------------------------------------------------*/
void printGridToFile(float* grid, int rows, int cols, char* fileName) {
std::ofstream outFile(fileName);
outFile << std::fixed << std::setprecision(2);
for(int i = 0; i < rows; ++i) {
for(int j = 0; j < cols; ++j) {
outFile << std::setw(6) << grid[i*cols+j] << " ";
}
outFile<< std::endl;
}
outFile.close();
}
/*------------------------------------------------------------------------------
main
------------------------------------------------------------------------------*/
int main(int argc, char** argv) {
if(argc != 6) {
std::cout << "Usage: " << argv[0] <<
" <numRows> <numCols> <k> <timesteps> <heaterFileName>" << std::endl;
return 0;
}
//Input arguments
int rows = atoi(argv[1]);
int cols = atoi(argv[2]);
float k = atof(argv[3]);
int timeSteps = atoi(argv[4]);
//Allocate heater grid
int gridSize = rows * cols * sizeof(float);
float* heaterGrid_h = (float*)malloc(gridSize);
//Read in heater file
readHeaterFile(argv[5], heaterGrid_h, rows, cols);
float* heaterGrid_d; //device pointer
//TODO Copy heater grid to device
cudaMalloc(&heaterGrid_d, gridSize);
cudaMemcpy(heaterGrid_d, heaterGrid_h, gridSize, cudaMemcpyHostToDevice);
//Input grid
float* inGrid_h = (float*)malloc(gridSize);
memset(inGrid_h, 0, gridSize);
float* inGrid_d; //device pointer
//TODO Allocate and copy inGrid to device
cudaMalloc(&inGrid_d, gridSize);
cudaMemcpy(inGrid_d, inGrid_h, gridSize, cudaMemcpyHostToDevice);
//Output grid
float* outGrid_h = (float*)malloc(gridSize);
memset(outGrid_h, 0, gridSize);
float* outGrid_d; //device pointer
//TODO Allocate and copy outGrid to device
cudaMalloc(&outGrid_d, gridSize);
cudaMemcpy(outGrid_d, outGrid_h, gridSize, cudaMemcpyHostToDevice);
dim3 bDim(16, 16);
dim3 gDim;
gDim.x = (rows + 16 - 1) / 16; //ceil(num_rows/16)
gDim.y = (rows + 16 - 1) / 16;
//TODO fill in update loop
for(int i = 0; i < timeSteps; ++i) {
//copy heater temps to inGrid_d (kernel call)
copyHeaters<<<gDim, bDim>>>(inGrid_d, heaterGrid_d, rows, cols, i);
//update outGrid_d based on inGrid_d (kernel call)
updateGrid<<<gDim, bDim>>>(inGrid_d, outGrid_d, k, rows, cols);
//swap pointers inGrid_d and outGrid_d
float* temp = inGrid_d;
inGrid_d = outGrid_d;
outGrid_d = temp;
}
//TODO copy inGrid_d back to host (to inGrid_h)
cudaMemcpy(inGrid_h, inGrid_d, gridSize, cudaMemcpyDeviceToHost);
printGridToFile(inGrid_h, rows, cols, "output_two.txt");
return 0;
}
|
7,412 | #include <cuda.h>
#include <cstdio>
#include <cstdlib>
#define BSIZE2D 32
// EJERCICIOS
// (1) Implemente el matmul de GPU basico
// (2) Implemente el matmul de GPU usando memoria compartida
// (3) Compare el rendimiento de GPU Matmul vs el de CPU que hizo previamente
// GPU matmul basico
__global__ void kernel_matmul(int n, float *a, float *b, float *c){
}
// GPU matmul shared memory
__global__ void kernel_matmulsm(int n, float *a, float *b, float *c){
}
void matrandom(int n, float *m){
srand(1);
for(int i=0; i<n; ++i){
for(int j=0; j<n; ++j){
m[i*n + j] = (float)rand()/((float)RAND_MAX);
//m[i*n + j] = i;
}
}
}
void printmat(int n, float *m, const char* msg){
printf("%s\n", msg);
for(int i=0; i<n; ++i){
for(int j=0; j<n; ++j){
printf("%.2f ", m[i*n + j]);
}
printf("\n");
}
}
int verify(int n, float *a, float *b, float *c, float *cgold){
float error = 0.01f;
for(int i=0; i<n; ++i){
for(int j=0; j<n; ++j){
float sum = 0.0f;
for(int k=0; k<n; ++k){
sum += a[i*n + k]*b[k*n + j];
}
cgold[i*n + j] = sum;
if(fabs(c[i*n + j] - cgold[i*n + j]) >= error){
fprintf(stderr, "error: c[%i][%i] ---> c %f cgold %f\n", i, j, c[i*n+j], cgold[i*n+j]);
return 0;
}
}
}
return 1;
}
int main(int argc, char **argv){
printf("GPU MATMUL\n");
if(argc != 2){
fprintf(stderr, "run as ./prog n\n");
exit(EXIT_FAILURE);
}
int n = atoi(argv[1]);
float msecs = 0.0f;
// (1) creando matrices en host
float *a = new float[n*n];
float *b = new float[n*n];
float *c = new float[n*n];
float *cgold = new float[n*n];
printf("initializing A and B......."); fflush(stdout);
matrandom(n, a);
matrandom(n, b);
if(n < 64){
printmat(n, a, "mat a");
printmat(n, b, "mat b");
}
printf("ok\n"); fflush(stdout);
// (2) dejando matrices en device
float *ad, *bd, *cd;
cudaMalloc(&ad, sizeof(float)*n*n);
cudaMalloc(&bd, sizeof(float)*n*n);
cudaMalloc(&cd, sizeof(float)*n*n);
cudaMemcpy(ad, a, sizeof(float)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(bd, b, sizeof(float)*n*n, cudaMemcpyHostToDevice);
cudaMemcpy(cd, c, sizeof(float)*n*n, cudaMemcpyHostToDevice);
// (3) ejecutar matmul en GPU
printf("computing C = A x B........"); fflush(stdout);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 block(BSIZE2D, BSIZE2D, 1);
dim3 grid((n+BSIZE2D-1)/BSIZE2D, (n+BSIZE2D-1)/BSIZE2D, 1);
cudaEventRecord(start);
kernel_matmul<<<grid, block>>>(n, ad, bd, cd);
//kernel_matmulsm<<<grid, block>>>(n, ad, bd, cd);
cudaDeviceSynchronize();
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&msecs, start, stop);
printf("ok: time: %f secs\n", msecs/1000.0f);
// (4) copiar resultado a host
printf("copying result to host....."); fflush(stdout);
cudaMemcpy(c, cd, sizeof(float)*n*n, cudaMemcpyDeviceToHost);
printf("ok\n"); fflush(stdout);
if(n < 50){
printmat(n, c, "mat c");
}
// (5) verificar resultado contra calculo en CPU
printf("verifying result..........."); fflush(stdout);
if(!verify(n, a, b, c, cgold)){
fprintf(stderr, "error verifying result\n");
exit(EXIT_FAILURE);
}
printf("ok\n");
printf("done!\n");
exit(EXIT_SUCCESS);
}
|
7,413 | #include "includes.h"
__global__ void VecAdd(const float* A, const float* B, float* C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
} |
7,414 | # include <stdio.h>
# include <stdint.h>
# include "cuda_runtime.h"
//compile nvcc *.cu -o test
__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned int * duration, unsigned int *index);
void parametric_measure_global(int N, int iterations, int stride);
void measure_global();
int main(){
cudaSetDevice(0);
measure_global();
cudaDeviceReset();
return 0;
}
void measure_global() {
int N, iterations, stride;
//stride in element
iterations = 1;
N = 1024 * 1024* 1024/sizeof(unsigned int); //in element
for (stride = 1; stride <= N/2; stride*=2) {
printf("\n=====%d GB array, cold cache miss, read 256 element====\n", N/1024/1024/1024);
printf("Stride = %d element, %d bytes\n", stride, stride * sizeof(unsigned int));
parametric_measure_global(N, iterations, stride );
printf("===============================================\n\n");
}
}
void parametric_measure_global(int N, int iterations, int stride) {
cudaDeviceReset();
int i;
unsigned int * h_a;
/* allocate arrays on CPU */
h_a = (unsigned int *)malloc(sizeof(unsigned int) * (N+2));
unsigned int * d_a;
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned int) * (N+2));
/* initialize array elements on CPU with pointers into d_a. */
for (i = 0; i < N; i++) {
//original:
h_a[i] = (i+stride)%N;
}
h_a[N] = 0;
h_a[N+1] = 0;
/* copy array elements from CPU to GPU */
cudaMemcpy(d_a, h_a, sizeof(unsigned int) * N, cudaMemcpyHostToDevice);
unsigned int *h_index = (unsigned int *)malloc(sizeof(unsigned int)*256);
unsigned int *h_timeinfo = (unsigned int *)malloc(sizeof(unsigned int)*256);
unsigned int *duration;
cudaMalloc ((void **) &duration, sizeof(unsigned int)*256);
unsigned int *d_index;
cudaMalloc( (void **) &d_index, sizeof(unsigned int)*256 );
cudaThreadSynchronize ();
/* launch kernel*/
dim3 Db = dim3(1);
dim3 Dg = dim3(1,1,1);
global_latency <<<Dg, Db>>>(d_a, N, iterations, duration, d_index);
cudaThreadSynchronize ();
cudaError_t error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error kernel is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaThreadSynchronize ();
cudaMemcpy((void *)h_timeinfo, (void *)duration, sizeof(unsigned int)*256, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)h_index, (void *)d_index, sizeof(unsigned int)*256, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
for(i=0;i<256;i++)
printf("%d\t %d\n", h_index[i], h_timeinfo[i]);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_index);
cudaFree(duration);
/*free memory on CPU */
free(h_a);
free(h_index);
free(h_timeinfo);
cudaDeviceReset();
}
__global__ void global_latency (unsigned int * my_array, int array_length, int iterations, unsigned int * duration, unsigned int *index) {
unsigned int start_time, end_time;
unsigned int j = 0;
__shared__ unsigned int s_tvalue[256];
__shared__ unsigned int s_index[256];
int k;
for(k=0; k<256; k++){
s_index[k] = 0;
s_tvalue[k] = 0;
}
for (k = 0; k < iterations*256; k++) {
start_time = clock();
j = my_array[j];
s_index[k]= j;
end_time = clock();
s_tvalue[k] = end_time-start_time;
}
my_array[array_length] = j;
my_array[array_length+1] = my_array[j];
for(k=0; k<256; k++){
index[k]= s_index[k];
duration[k] = s_tvalue[k];
}
}
|
7,415 | #include "includes.h"
__global__ void detectChanges(float* a, float* b, float* result, int size, float value)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x
+ blockDim.x*blockIdx.x
+ threadIdx.x;
if(threadId < size)
{
if(a[threadId] > b[threadId])
{
result[threadId] = value;
}
else if(a[threadId] <b[threadId])
{
result[threadId] = -value;
}
else
{
result[threadId] = 0;
}
}
} |
7,416 | extern "C"
{
__global__ void gfill(const int n, const double *a, double *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
c[i] = a[0];
}
}
} |
7,417 | /******************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
******************************************************************************/
// Define your kernels in this file you may use more than one kernel if you
// need to
// INSERT KERNEL(S) HERE
//extern __shared__ unsigned int Hist[];
__global__ void HistogramKernel(unsigned int *buffer, unsigned int numBins,
unsigned int* histo, unsigned int size) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
//create and initialized shared mem
extern __shared__ unsigned int histo_private[];
for(int j = threadIdx.x; j < numBins; j += blockDim.x) {
histo_private[j] = 0;
}
__syncthreads();
//fill shared mem histogram
while (i < size) {
atomicAdd(&(histo_private[(buffer[i])]), 1);
i += stride;
}
__syncthreads();
//create final histogram using private histogram
for(int j = threadIdx.x; j < numBins; j += blockDim.x) {
atomicAdd(&(histo[j]), histo_private[j]);
}
__syncthreads();
//naive implementation below:
/*
int i = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (i < size) {
unsigned int num = buffer[i];
atomicAdd(&histo[num], 1);
i += stride;
}
*/
}
/******************************************************************************
Setup and invoke your kernel(s) in this function. You may also allocate more
GPU memory if you need to
*******************************************************************************/
void histogram(unsigned int* input, unsigned int* bins, unsigned int num_elements,
unsigned int num_bins) {
// INSERT CODE HERE
//determine block/grid
const unsigned int BLOCK_SIZE = 1024;
HistogramKernel<<<ceil(num_elements/(float)BLOCK_SIZE), BLOCK_SIZE,
sizeof(unsigned int)*num_bins>>>(input, num_bins, bins, num_elements);
}
|
7,418 | #include "includes.h"
__global__ void naive_matrix_transpose(float *input, int axis_0, int axis_1, float *output)
{
__shared__ float tile[TILE_DIM][TILE_DIM + 1];
int x = blockIdx.x * TILE_DIM + threadIdx.x;
int y = blockIdx.y * TILE_DIM + threadIdx.y;
for (int i = 0; i < TILE_DIM && y + i < axis_1 && x < axis_0; i += BLOCK_HEIGHT) {
tile[threadIdx.y + i][threadIdx.x] = input[(y + i) * axis_0 + x];
}
__syncthreads();
x = blockIdx.y * TILE_DIM + threadIdx.x;
y = blockIdx.x * TILE_DIM + threadIdx.y;
for (int i = 0; i < TILE_DIM && y + i < axis_1 && x < axis_0; i += BLOCK_HEIGHT) {
output[(y + i) * axis_0 + x] = tile[(threadIdx.x)][threadIdx.y + i];
}
} |
7,419 | #include <math.h>
#define ABS(x) ((x) > 0 ? (x) : - (x))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
__global__ void kernel_projection(float *proj, float *img, float angle, float SO, float SD, float da, int na, float ai, float db, int nb, float bi, int nx, int ny, int nz){
int ia = 16 * blockIdx.x + threadIdx.x;
int ib = 16 * blockIdx.y + threadIdx.y;
if (ia >= na || ib >= nb)
return;
int id = ia + ib * na;
proj[id] = 0.0f;
float x1, y1, z1, x2, y2, z2, x20, y20, cphi, sphi;
cphi = (float)cosf(angle);
sphi = (float)sinf(angle);
x1 = -SO * cphi;
y1 = -SO * sphi;
z1 = 0.0f;
x20 = SD - SO;
y20 = (ia + ai) * da; // locate the detector cell center before any rotation
x2 = x20 * cphi - y20 * sphi;
y2 = x20 * sphi + y20 * cphi;
z2 = (ib + bi) * db;
float x21, y21, z21; // offset between source and detector center
x21 = x2 - x1;
y21 = y2 - y1;
z21 = z2 - z1;
// y - z plane, where ABS(x21) > ABS(y21)
if (ABS(x21) > ABS(y21)){
// if (ABS(cphi) > ABS(sphi)){
float yi1, yi2, ky1, ky2, zi1, zi2, kz1, kz2;
int Yi1, Yi2, Zi1, Zi2;
// for each y - z plane, we calculate and add the contribution of related pixels
for (int ix = 0; ix < nx; ix++){
// calculate y indices of intersecting voxel candidates
ky1 = (y21 - da / 2 * cphi) / (x21 + da / 2 * sphi);
yi1 = ky1 * ((float)ix + 0.5f - x1 - nx / 2) + y1 + ny / 2;
Yi1 = (int)floor(yi1); // lower boundary of related voxels at y-axis
ky2 = (y21 + da / 2 * cphi) / (x21 - da / 2 * sphi);
yi2 = ky2 * ((float)ix + 0.5f - x1 - nx / 2) + y1 + ny / 2;
Yi2 = (int)floor(yi2); // upper boundary of related voxels at y-axis
// if (Yi1 < 0)
// Yi1 = 0;
// if (Yi2 >= ny)
// Yi2 = ny - 1;
// calculate z indices of intersecting voxel candidates
kz1 = (z21 - db / 2) / x21;
zi1 = kz1 * ((float)ix + 0.5f - x1 - nx / 2) + z1 + nz / 2;
Zi1 = (int)floor(zi1); // lower boundary of related voxels at y-axis
kz2 = (z21 + db / 2) / x21;
zi2 = kz2 * ((float)ix + 0.5f - x1 - nx / 2) + z1 + nz / 2;
Zi2 = (int)floor(zi2); // upper boundary of related voxels at y-axis
// if (Zi1 < 0)
// Zi1 = 0;
// if (Zi2 >= nz)
// Zi2 = nz - 1;
// calculate contribution of a voxel to the projection value
int iy, iz;
float wy1, wy2, wz1, wz2;
if (ABS(yi2 - yi1) < 0.01f)
continue;
if (ABS(zi2 - zi1) < 0.01f)
continue;
wy1 = (MAX(Yi1, Yi2) - yi1) / (yi2 - yi1); wy2 = 1 - wy1;
wz1 = (MAX(Zi1, Zi2) - zi1) / (zi2 - zi1); wz2 = 1 - wz1;
// Yi1 == Yi2 && Zi1 == Zi2
if (Yi1 == Yi2 && Zi1 == Zi2)
{
iy = Yi1; iz = Zi1;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * 1.0f;
continue;
}
// Yi1 != Yi2 && Zi1 == Zi2
if (Yi1 != Yi2 && Zi1 == Zi2)
{
iy = Yi1; iz = Zi1;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1;
iy = Yi2; iz = Zi1;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2;
continue;
}
// Yi1 == Yi2 && Zi1 != Zi2
if (Yi1 == Yi2 && Zi1 != Zi2)
{
iy = Yi1; iz = Zi1;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wz1;
iy = Yi1; iz = Zi2;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wz2;
continue;
}
// Yi1 != Yi2 && Zi1 != Zi2
if (Yi1 != Yi2 && Zi1 != Zi2)
{
iy = Yi1; iz = Zi1;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1 * wz1;
iy = Yi1; iz = Zi2;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wy1 * wz2;
iy = Yi2; iz = Zi1;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2 * wz1;
iy = Yi2; iz = Zi2;
if (iy < ny && iy >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wy2 * wz2;
continue;
}
}
}
// x - z plane, where ABS(x21) <= ABS(y21)
else{
float xi1, xi2, kx1, kx2, zi1, zi2, kz1, kz2;
int Xi1, Xi2, Zi1, Zi2;
// for each y - z plane, we calculate and add the contribution of related pixels
for (int iy = 0; iy < ny; iy++){
// calculate y indices of intersecting voxel candidates
kx1 = (x21 - da / 2 * sphi) / (y21 + da / 2 * cphi);
xi1 = kx1 * ((float)iy + 0.5f - y1 - ny / 2) + x1 + nx / 2;
Xi1 = (int)floor(xi1); // lower boundary of related voxels at y-axis
kx2 = (x21 + da / 2 * sphi) / (y21 - da / 2 * cphi);
xi2 = kx2 * ((float)iy + 0.5f - y1 - ny / 2) + x1 + nx / 2;
Xi2 = (int)floor(xi2); // upper boundary of related voxels at y-axis
// if (Xi1 < 0)
// Xi1 = 0;
// if (Xi2 >= ny)
// Xi2 = ny - 1;
// calculate z indices of intersecting voxel candidates
kz1 = (z21 - db / 2) / y21;
zi1 = kz1 * ((float)iy + 0.5f - y1 - ny / 2) + z1 + nz / 2;
Zi1 = (int)floor(zi1); // lower boundary of related voxels at y-axis
kz2 = (z21 + db / 2) / y21;
zi2 = kz2 * ((float)iy + 0.5f - y1 - ny / 2) + z1 + nz / 2;
Zi2 = (int)floor(zi2); // upper boundary of related voxels at y-axis
// if (Zi1 < 0)
// Zi1 = 0;
// if (Zi2 >= nz)
// Zi2 = nz - 1;
// calculate contribution of a voxel to the projection value
int ix, iz;
float wx1, wx2, wz1, wz2;
if (ABS(xi2 - xi1) < 0.01f)
continue;
if (ABS(zi2 - zi1) < 0.01f)
continue;
wx1 = (MAX(Xi1, Xi2) - xi1) / (xi2 - xi1); wx2 = 1 - wx1;
wz1 = (MAX(Zi1, Zi2) - zi1) / (zi2 - zi1); wz2 = 1 - wz1;
// Xi1 == Xi2 && Zi1 == Zi2
if (Xi1 == Xi2 && Zi1 == Zi2)
{
ix = Xi1; iz = Zi1;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * 1.0f;
continue;
}
// Xi1 != Xi2 && Zi1 == Zi2
if (Xi1 != Xi2 && Zi1 == Zi2)
{
ix = Xi1; iz = Zi1;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1;
ix = Xi2; iz = Zi1;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2;
continue;
}
// Xi1 == Xi2 && Zi1 != Zi2
if (Xi1 == Xi2 && Zi1 != Zi2)
{
ix = Xi1; iz = Zi1;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wz1;
ix = Xi1; iz = Zi2;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wz2;
continue;
}
// Xi1 != Xi2 && Zi1 != Zi2
if (Xi1 != Xi2 && Zi1 != Zi2)
{
ix = Xi1; iz = Zi1;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1 * wz1;
ix = Xi1; iz = Zi2;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wx1 * wz2;
ix = Xi2; iz = Zi1;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2 * wz1;
ix = Xi2; iz = Zi2;
if (ix < nx && ix >= 0 && iz < nz && iz >= 0)
proj[id] += img[ix + iy * nx + iz * nx * ny] * wx2 * wz2;
continue;
}
}
}
} |
7,420 | #include "includes.h"
__global__ void BackwardLinear(float *dZ, float *W, int nColsW, int nRowsW, int nColsdZ, float *dA)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float dAValue = 0;
if (row < nColsW && col < nColsdZ)
{
for (int i = 0; i < nRowsW; i++)
{
dAValue += W[i * nColsW + row] * dZ[i * nColsdZ + col];
}
dA[row * nColsdZ + col] = dAValue;
}
} |
7,421 | #include <stdio.h>
#include <stdlib.h>
#define THREAD_PER_BLOCK 70
__global__
void mult_matrix(int* a, int* b, int* c,int n)
{
int col = blockDim.x*blockIdx.x+ threadIdx.x;
int row = blockDim.y*blockIdx.y+ threadIdx.y;
if ( col<n && row<n )
{
int i;
c[row*n+col] = 0;
for(i=0;i<n;i++)
{
c[row*n + col] += a[ row*n + i ]*b[ i*n + col ];
}
}
}
__global__
void mult_matrix_shared(int* a, int* b, int* c,int n)
{
__shared__ float Mds[THREAD_PER_BLOCK][THREAD_PER_BLOCK];
__shared__ float Nds[THREAD_PER_BLOCK][THREAD_PER_BLOCK];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * THREAD_PER_BLOCK + ty;
int Col = bx * THREAD_PER_BLOCK + tx;
int Pvalue = 0;
for (int ph = 0; ph < n/THREAD_PER_BLOCK; ++ph) {
Mds[ty][tx] = a[Row*n + ph*THREAD_PER_BLOCK + tx];
Nds[ty][tx] = b[(ph*THREAD_PER_BLOCK + ty)*n + Col];
__syncthreads();
for (int k = 0; k < THREAD_PER_BLOCK; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
c[Row*n + Col] = Pvalue;
}
void fill_mat(int* a,int n)
{
int i,j;
for(i=0;i<n;i++)
{
for(j=0;j<n;j++)
{
a[i*n+j] = rand()%5+1;
}
}
}
int main()
{
int *a,*b,*c;
int *d_a,*d_b,*d_c;
int mat_elem = 2000;
int my_size = mat_elem*mat_elem*sizeof(int);
float tiempo;
cudaEvent_t inicio,final;
cudaEventCreate(&inicio);
cudaEventCreate(&final);
a = (int*) malloc(my_size);
b = (int*) malloc(my_size);
c = (int*) malloc(my_size);
fill_mat(a,mat_elem);
fill_mat(b,mat_elem);
printf("\n");
cudaMalloc((void**)&d_a,my_size);
cudaMalloc((void**)&d_b,my_size);
cudaMalloc((void**)&d_c,my_size);
cudaMemcpy(d_a,a,my_size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,my_size,cudaMemcpyHostToDevice);
dim3 my_block(THREAD_PER_BLOCK,THREAD_PER_BLOCK);
dim3 my_grid((mat_elem + THREAD_PER_BLOCK-1)/my_block.x,(mat_elem + THREAD_PER_BLOCK-1)/my_block.y);
cudaEventRecord(inicio,0);
//mult_matrix_shared<<<my_grid,my_block>>>(d_a, d_b, d_c,mat_elem);
mult_matrix<<<my_grid,my_block>>>(d_a, d_b, d_c,mat_elem);
cudaEventRecord(final,0);
cudaEventSynchronize(final);
/////////////////////////////////////////////////////
cudaEventElapsedTime(&tiempo,inicio,final);
cudaMemcpy(c,d_c,my_size,cudaMemcpyDeviceToHost);
printf("tiempo %d X %d, tam=%d : %0.15f\n",THREAD_PER_BLOCK,THREAD_PER_BLOCK,mat_elem,tiempo);
return 0;
}
|
7,422 | #include "includes.h"
__global__ static void MinusByFittingFunction(int* OCTData, float* PolyValue, int SizeZ)
{
// 這邊要減掉 Fitting Data
int id = blockIdx.y * gridDim.x * gridDim.z * blockDim.x + // Y => Y * 250 * (2 * 1024)
blockIdx.x * gridDim.z * blockDim.x + // X => X * (2 * 1024)
blockIdx.z * blockDim.x + // Z => (Z1 * 1024 + Z2)
threadIdx.x;
// 先拿出他是第幾個 Z
int idZ = id % SizeZ;
// 減掉預測的值
OCTData[id] -= PolyValue[idZ];
} |
7,423 | #include <stdio.h>
#include <stdlib.h>
void print_array(int n, char str, float *a) {
printf("%c: ", str);
for(int i=0; i<n; i++) printf("\t%f", a[i]);
printf("\n");
}
void vecadd(int n, float *a, float *b, float *c) {
for(int i=0; i<n; i++) {
c[i] = a[i] + b[i];
}
}
__global__ void vecadd_gpu(int n, float *a, float *b, float *c, int offset) {
int tid = blockDim.x*blockIdx.x + threadIdx.x + offset;
if( tid<n ) {
c[tid] = a[tid] + b[tid];
}
}
int main() {
int i;
int n=300000000;
float *a, *b, *c;
// allocation in host memory
a = (float *)malloc(n*sizeof(float));
b = (float *)malloc(n*sizeof(float));
c = (float *)malloc(n*sizeof(float));
// initialize
for(i=0; i<n; i++) {
//a[i] = i+5.5;
//b[i] = -1.2*i;
a[i] = rand()/(RAND_MAX+1.);
b[i] = rand()/(RAND_MAX+1.);
}
//print_array(n, 'a', a);
//print_array(n, 'b', b);
// call the function
vecadd(n, a, b, c);
//printf("results from CPU\n");
//print_array(n, 'c', c);
// allocation in device memory
float *a_dev, *b_dev, *c_dev;
cudaMalloc((void**)&a_dev, n*sizeof(float));
cudaMalloc((void**)&b_dev, n*sizeof(float));
cudaMalloc((void**)&c_dev, n*sizeof(float));
// copy arrays 'a' and 'b' to the device
cudaMemcpy(a_dev, a, n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(b_dev, b, n*sizeof(float), cudaMemcpyHostToDevice);
// call the kernel
int tpb =256; // thread per block
int max_bpg = 65535;
int ng = n/(max_bpg*tpb); // number of grid
for(i=0; i<ng; i++) {
vecadd_gpu<<<max_bpg,tpb>>>(n, a_dev, b_dev, c_dev, i*max_bpg*tpb);
}
if( n%(max_bpg*tpb)!=0 ) {
int nn = n-ng*max_bpg*tpb;
int bpg = nn%tpb==0 ? nn/tpb : nn/tpb+1;
vecadd_gpu<<<bpg,tpb>>>(n, a_dev, b_dev, c_dev, ng*max_bpg*tpb);
}
// copy array 'c' back from the device to the host
float *c2;
c2 = (float *)malloc(n*sizeof(float));
cudaMemcpy(c2, c_dev, n*sizeof(float), cudaMemcpyDeviceToHost);
//printf("results from GPU\n");
//print_array(n, 'c', c2);
printf("n=%d\n", n);
printf("Check results..");
float diff;
for(i=0; i<n; i++) {
diff = fabs(c2[i]-c[i]);
if(diff > 1e-7) break;
}
if(diff > 1e-7) printf("Mismatch!\n");
else printf("OK!\n");
free(a);
free(b);
free(c);
free(c2);
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
return 0;
}
|
7,424 |
/*** malloc stats****/
#ifdef MALLOC_STATS
__device__ unsigned int numMallocs;
__device__ unsigned int numFrees;
__device__ unsigned int numPageAllocRetries;
__device__ unsigned int numLocklessSuccess;
__device__ unsigned int numWrongFileId;
__device__ unsigned int numRtMallocs;
__device__ unsigned int numRtFrees;
__device__ unsigned int numHT_Miss;
__device__ unsigned int numHT_Hit;
__device__ unsigned int numPreclosePush;
__device__ unsigned int numPrecloseFetch;
__device__ unsigned int numFlushedWrites;
__device__ unsigned int numFlushedReads;
__device__ unsigned int numTrylockFailed;
__device__ unsigned int numKilledBufferCache;
#endif
|
7,425 | #include "includes.h"
#define NO_HIDDEN_NEURONS 5
extern "C"
__global__ void deltasBatch(float *inputs, float *outputs, float *weights, float *weightsDeltas, int noInputs, int inputSize){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
float sum=0;
int offsetDeltas = ((inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS+1)*gid;
int offsetInput = noInputs*inputSize*gid;
int offsetOutputs = noInputs*gid;
float activationHidden[NO_HIDDEN_NEURONS];
float error;
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
for(int imageIndex=0;imageIndex<=inputSize;imageIndex++){
weightsDeltas[offsetDeltas+(inputSize+1)*hidden+imageIndex]=0;
}
}
for(int hidden=0;hidden<=NO_HIDDEN_NEURONS;hidden++){
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+hidden]=0;
}
for (int i=0;i<noInputs;i++){
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
sum=0;
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
sum+=inputs[offsetInput+i*inputSize+imageIndex]*weights[(inputSize+1)*hidden+imageIndex];
}
sum+=weights[(inputSize+1)*hidden+inputSize];
if(sum>0) activationHidden[hidden]=1;
else activationHidden[hidden]=0;
//activationHidden[hidden]=sum/(1+abs(sum));
}
sum=0;
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
sum+=activationHidden[hidden]*weights[(inputSize+1)*NO_HIDDEN_NEURONS+hidden];
}
sum+=weights[(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS];
if(sum>0)sum=1;
else sum=0;
sum=outputs[offsetOutputs+i]-sum;
if(sum!=0){
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+hidden]+=sum*activationHidden[hidden];
}
weightsDeltas[offsetDeltas+(inputSize+1)*NO_HIDDEN_NEURONS+NO_HIDDEN_NEURONS]+=sum;
for(int hidden=0;hidden<NO_HIDDEN_NEURONS;hidden++){
error=sum*weights[(inputSize+1)*NO_HIDDEN_NEURONS+hidden];
if(error>0)error=1;
else error=0;
error=error-activationHidden[hidden];
if(error!=0){
for(int imageIndex=0;imageIndex<inputSize;imageIndex++){
weightsDeltas[offsetDeltas+(inputSize+1)*hidden+imageIndex]+=error*inputs[offsetInput+i*inputSize+imageIndex];
}
weightsDeltas[offsetDeltas+(inputSize+1)*hidden+inputSize]+=error;
}
}
}
}
} |
7,426 | #include "includes.h"
__global__ void kernBlockWiseSum(const size_t numPoints, const size_t pointDim, double* dest) {
// Assumes a 2D grid of 1024x1 1D blocks
int b = blockIdx.y * gridDim.x + blockIdx.x;
int i = b * blockDim.x + threadIdx.x;
// call repeatedly for each dimension where dest is assumed to begin at dimension d
__shared__ double blockSum[1024];
if(threadIdx.x >= numPoints) {
blockSum[threadIdx.x] = 0;
} else {
blockSum[threadIdx.x] = dest[i * pointDim];
}
__syncthreads();
// Do all the calculations in block shared memory instead of global memory.
for(int s = blockDim.x / 2; threadIdx.x < s; s /= 2) {
blockSum[threadIdx.x] += blockSum[threadIdx.x + s];
__syncthreads();
}
if(threadIdx.x == 0) {
// Just do one global write
dest[i * pointDim] = blockSum[0];
}
} |
7,427 |
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <curand_kernel.h>
#define D 5
#define TRIALS_PER_THREAD 2048
#define BLOCKS 256
#define THREADS 256
__global__ void mc_int(double *res, curandState *states) {
unsigned int tid = threadIdx.x + blockDim.x*blockIdx.x;
double integral = 0.0;
double X[D];
curand_init(tid, 0, 0, &states[tid]);
for (int i = 0; i < TRIALS_PER_THREAD; i++) {
for (int j = 0; j < D; j++) {
X[j] = curand_uniform(&states[tid]);
}
double t = 0.0;
for (int j = 0; j < D; j++) {
t -= X[j] * X[j];
}
integral += exp(t) / TRIALS_PER_THREAD;
}
res[tid] = integral;
}
int main(int argc, char **argv) {
double host[BLOCKS * THREADS];
double *dev;
curandState *states;
double integral = 0.0;
double vol = 1.0;
clock_t ts = clock();
struct timeval start, end;
gettimeofday(&start, NULL);
cudaMalloc((void**) &dev, BLOCKS * THREADS * sizeof(double));
cudaMalloc((void**)&states,
BLOCKS * THREADS * sizeof(curandState));
mc_int<<<BLOCKS, THREADS>>>(dev, states);
cudaMemcpy(host, dev, BLOCKS * THREADS * sizeof(double),
cudaMemcpyDeviceToHost);
for(int i = 0; i < BLOCKS * THREADS; i++) {
integral += host[i];
}
integral /= BLOCKS * THREADS;
for (int j = 0; j < D; j++) {
vol *= 1.0;
}
integral *= vol;
gettimeofday(&end, NULL);
double elapsed = ((end.tv_sec - start.tv_sec) * 1000000u +
end.tv_usec - start.tv_usec) / 1.e6;
ts = clock() - ts;
printf("%ld clocks (%lf seconds)\n", ts, elapsed);
printf("integral is: %lf\n", integral);
cudaFree(dev);
cudaFree(states);
}
|
7,428 | #include <cstdio>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
/**
* @brief display gpu information
* @param devProp device
*/
void dispGPUInfo(const cudaDeviceProp& devProp);
/**
* @brief display gpu information
* @param dev_id gpu id
* @return GPU information
*/
cudaDeviceProp getGPUInfo(const unsigned int& dev_id);
cudaDeviceProp getGPUInfo(const unsigned int& dev_id)
{
std::printf("----------------GPU----------------\r\n");
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, dev_id);
return devProp;
}
void dispGPUInfo(const cudaDeviceProp& devProp)
{
std::printf("ʹGPU name: %s\r\n", devProp.name);
std::printf("number of SMs: %d\r\n", devProp.multiProcessorCount);
std::printf("max grid size: %d x %d x %d\r\n", devProp.maxGridSize[0], devProp.maxGridSize[1], devProp.maxGridSize[2]);
std::printf("size of shared memory per block: %f KB\r\n", devProp.sharedMemPerBlock / 1024.0);
std::printf("max number of thread per block: %d\r\n", devProp.maxThreadsPerBlock);
std::printf("max number of thread per SM: %d\r\n", devProp.maxThreadsPerMultiProcessor);
std::printf("number of block per SM: %d\r\n", devProp.maxThreadsPerMultiProcessor / devProp.maxThreadsPerBlock);
std::printf("warp size: %d\r\n", devProp.warpSize);
std::printf("max number of thread per SM per warp size: %d\r\n",
devProp.maxThreadsPerMultiProcessor / devProp.warpSize);
} |
7,429 | #include <stdio.h>
#include <cstdlib>
#include <iostream>
#include <cuda_runtime.h>
#include <float.h>
#include "cuda_preprocessing.cuh"
#include "util.cuh"
__global__ void boruvka_smallest_kernel(int n, float* x, float* y, float* pi, int* component, float* component_best, int* component_best_i, int* component_best_j, int* component_lock) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = blockIdx.y * blockDim.x;
__shared__ float shared_x[64];
__shared__ float shared_y[64];
__shared__ float shared_pi[64];
__shared__ float shared_best[64];
__shared__ float shared_best_j[64];
__shared__ float shared_component[64];
__shared__ float shared_component_i[64];
if (j+threadIdx.x < n) {
shared_x[threadIdx.x] = x[j+threadIdx.x];
shared_y[threadIdx.x] = y[j+threadIdx.x];
shared_component[threadIdx.x] = component[j+threadIdx.x];
shared_pi[threadIdx.x] = pi[j+threadIdx.x];
}
if (i < n)
shared_component_i[threadIdx.x] = component[i];
shared_best[threadIdx.x] = FLT_MAX;
shared_best_j[threadIdx.x] = 0;
__syncthreads();
if (i >= n)
return;
float best = FLT_MAX;
int best_j = -1;
int component_i = component[i];
if (!(i >= n)) {
float pi_i = pi[i], x_i = x[i], y_i = y[i];
if (j + blockDim.x > n || (i >= j && i < j + blockDim.x)) {
for (int k = 0; k < 64; ++k) {
if (j+k >= n || shared_component[k] == component_i || j+k == i)
continue;
float d_ij = pi_i + shared_pi[k];
d_ij += (x_i - shared_x[k]) * (x_i - shared_x[k]);
d_ij += (y_i - shared_y[k]) * (y_i - shared_y[k]);
if (d_ij < best) {
best = d_ij;
best_j = j+k;
}
}
} else {
for (int k = 0; k < 64; ++k) {
if (shared_component[k] == component_i)
continue;
float d_ij = pi_i + shared_pi[k];
d_ij += (x_i - shared_x[k]) * (x_i - shared_x[k]);
d_ij += (y_i - shared_y[k]) * (y_i - shared_y[k]);
if (d_ij < best) {
best = d_ij;
best_j = j+k;
}
}
}
}
shared_best[threadIdx.x] = best;
shared_best_j[threadIdx.x] = best_j;
//printf("%d best %d %.3f\n", i, best_j, best);
__syncthreads();
if (threadIdx.x == 0) {
for (int k = 0; k < 64; ++k) {
int tmp_i = blockIdx.x * blockDim.x + k;
if (tmp_i >= n)
continue;
component_i = shared_component_i[k];
best = shared_best[k];
best_j = shared_best_j[k];
if (best < component_best[component_i] || best == component_best[component_i]) {
while (atomicExch(&component_lock[component_i], 1) != 0);
if (best < component_best[component_i]) {
component_best[component_i] = best;
component_best_i[component_i] = tmp_i;
component_best_j[component_i] = best_j;
} else if (abs(best - component_best[component_i]) < 0.0000001) {
int mi_c = min(tmp_i, best_j), ma_c = max(tmp_i, best_j);
int mi_o = min(component_best_i[component_i], component_best_j[component_i]), ma_o = max(component_best_i[component_i], component_best_j[component_i]);
if (mi_c < mi_o) {
component_best_i[component_i] = tmp_i;
component_best_j[component_i] = best_j;
} else if (mi_c == mi_o && ma_c < ma_o) {
component_best_i[component_i] = tmp_i;
component_best_j[component_i] = best_j;
}
}
component_lock[component_i] = 0;
__threadfence();
}
}
}
}
__global__ void boruvka_update_components(int n,
int* component, int* successor, float* component_best, int* component_best_i, int* component_best_j, int* component_lock,
int* degrees, float* L_T, int* components) {
int i = blockIdx.x;
int component_i = component[i];
int component_j = component[component_best_j[component_i]];
component[i] = successor[i];
int vertex_ii = component_best_i[component_i];
int vertex_ij = component_best_j[component_i];
int vertex_jj = component_best_j[component_j];
if (i == vertex_ii) {
if (vertex_jj == i) {
if (i < vertex_ij) {
atomicSub(components, 1);
atomicAdd(L_T, component_best[component_i]);
}
} else {
atomicSub(components, 1);
atomicAdd(L_T, component_best[component_i]);
atomicAdd(°rees[i], 1);
atomicAdd(°rees[vertex_ij], 1);
//printf("e %d %d %.3f\n", min(i, vertex_ij), max(i, vertex_ij), component_best[component_i]);
}
__threadfence();
}
}
__global__ void boruvka_remove_cycles(int n,
int* component, float* component_best, int* component_best_i, int* component_best_j, int* component_lock,
int* degrees, float* L_T, int* components) {
int i = blockIdx.x;
int component_i = component[i];
int component_j = component[component_best_j[component_i]];
int vertex_ii = component_best_i[component_i];
int vertex_ij = component_best_j[component_i];
int vertex_jj = component_best_j[component_j];
if (i == vertex_ii) {
if (vertex_jj == i) {
if (i < vertex_ij) {
component_best_j[component_i] = i;
}
}
}
}
__global__ void boruvka_pointer_doubling(int n,
int* component, float* component_best, int* component_best_i, int* component_best_j, int* component_lock,
int* degrees, float* L_T, int* components) {
int i = blockIdx.x;
component[i] = component[component_best_j[component[i]]];
component[i] = component[component_best_j[component[i]]];
}
__global__ void second_closest_find(int n,
float* x, float* y, float* pi, int* degrees, float* closest, int* closest_i, int* lock) {
int i = blockIdx.x;
if (degrees[i] != 1)
return;
int c_j1 = 0, c_j2 = 0;
float d_j1 = FLT_MAX, d_j2 = FLT_MAX;
for (int j = 0; j < n; ++j) {
if (i == j)
continue;
float d_ij = pi[j] + pi[i];
d_ij += (x[j] - x[i]) * (x[j] - x[i]);
d_ij += (y[j] - y[i]) * (y[j] - y[i]);
if (d_ij < d_j1 && d_ij < d_j2) {
c_j2 = c_j1;
d_j2 = d_j1;
c_j1 = j;
d_j1 = d_ij;
} else if (d_ij < d_j2) {
c_j2 = j;
d_j2 = d_ij;
}
}
//printf("i sec %d %d %.3f %d %.3f %.3f\n", i, c_j1, d_j1, c_j2, d_j2, closest[0]);
if (d_j2 > closest[0]) {
while (atomicExch(&lock[0], 1) != 0);
if (d_j2 > closest[0]) {
//printf("lait i sec %d %d %.3f %d %.3f %.3f\n", i, c_j1, d_j1, c_j2, d_j2, closest[0]);
closest[0] = d_j2;
closest_i[0] = c_j2;
closest_i[1] = i;
}
lock[0] = 0;
__threadfence();
}
}
__global__ void second_closest_set(float* closest, int* closest_i, int* degrees, float* L_T) {
degrees[closest_i[0]]++;
degrees[closest_i[1]]++;
//printf("s %d %d %.3f\n", min(closest_i[0], closest_i[1]), max(closest_i[0], closest_i[1]), closest[0]);
L_T[0] += closest[0];
}
std::pair<float, std::vector<int>> gpu_boruvka_onetree(int n, float* Gx, float* Gy, float* Gpi) {
int* Gcomponent = NULL;
cudaMalloc((void**)&Gcomponent, n * sizeof(int));
std::vector<int> super_init;
for (int i = 0; i < n; ++i)
super_init.push_back(i);
cudaMemcpy(Gcomponent, super_init.data(), n * sizeof(int), cudaMemcpyHostToDevice);
int* Gsuccessor = NULL;
cudaMalloc((void**)&Gsuccessor, n * sizeof(int));
cudaMemcpy(Gsuccessor, super_init.data(), n * sizeof(int), cudaMemcpyHostToDevice);
int* Gvertex_lock = NULL;
cudaMalloc((void**)&Gvertex_lock, n * sizeof(int));
cudaMemset(Gvertex_lock, 0, n*sizeof(int));
float* Gsmallest_add = NULL;
cudaMalloc((void**)&Gsmallest_add, n * sizeof(float));
int* Gsmallest_i = NULL;
cudaMalloc((void**)&Gsmallest_i, n * sizeof(int));
int* Gsmallest_j = NULL;
cudaMalloc((void**)&Gsmallest_j, n * sizeof(int));
int* Gdegrees = NULL;
cudaMalloc((void**)&Gdegrees, n * sizeof(int));
cudaMemset(Gdegrees, 0, n*sizeof(int));
float* GL_T = NULL;
cudaMalloc((void**)&GL_T, 1 * sizeof(float));
cudaMemset(GL_T, 0, 1*sizeof(float));
int* Gcomponents = NULL;
cudaMalloc((void**)&Gcomponents, 1 * sizeof(int));
int tmp = n;
cudaMemcpy(Gcomponents, &tmp, 1 * sizeof(int), cudaMemcpyHostToDevice);
int components = n;
dim3 dimBlock(64, 1);
dim3 dimGrid(divup(n, 64), divup(n, 64));
std::vector<float> inf(n, std::numeric_limits<float>::max());
while (components > 1) {
cudaMemcpy(Gsmallest_add, inf.data(), n * sizeof(float), cudaMemcpyHostToDevice);
boruvka_smallest_kernel<<<dimGrid, dimBlock>>>(n, Gx, Gy, Gpi, Gcomponent, Gsmallest_add, Gsmallest_i, Gsmallest_j, Gvertex_lock);
CHECK(cudaGetLastError());
cudaDeviceSynchronize();
boruvka_remove_cycles<<<dim3(n, 1), dim3(1, 1)>>>(n, Gcomponent, Gsmallest_add, Gsmallest_i, Gsmallest_j, Gvertex_lock, Gdegrees, GL_T, Gcomponents);
CHECK(cudaGetLastError());
cudaDeviceSynchronize();
int n_pd = components;
while (n_pd > 0) {
boruvka_pointer_doubling<<<dim3(n, 1), dim3(1, 1)>>>(n, Gsuccessor, Gsmallest_add, Gsmallest_i, Gsmallest_j, Gvertex_lock, Gdegrees, GL_T, Gcomponents);
CHECK(cudaGetLastError());
cudaDeviceSynchronize();
n_pd /= 2;
}
boruvka_update_components<<<dim3(n, 1), dim3(1, 1)>>>(n, Gcomponent, Gsuccessor, Gsmallest_add, Gsmallest_i, Gsmallest_j, Gvertex_lock, Gdegrees, GL_T, Gcomponents);
CHECK(cudaGetLastError());
cudaDeviceSynchronize();
cudaMemcpy(&components, Gcomponents, 1 * sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
}
float* Gclosest = NULL;
cudaMalloc((void**)&Gclosest, 2 * sizeof(float));
std::vector<float> minf(2, -std::numeric_limits<float>::max());
cudaMemcpy(Gclosest, minf.data(), 2 * sizeof(float), cudaMemcpyHostToDevice);
int* Gclosest_idx = NULL;
cudaMalloc((void**)&Gclosest_idx, 2 * sizeof(int));
second_closest_find<<<dim3(n, 1), dim3(1, 1)>>>(n, Gx, Gy, Gpi, Gdegrees, Gclosest, Gclosest_idx, Gvertex_lock);
CHECK(cudaGetLastError());
cudaDeviceSynchronize();
second_closest_set<<<dim3(1, 1), dim3(1, 1)>>>(Gclosest, Gclosest_idx, Gdegrees, GL_T);
CHECK(cudaGetLastError());
cudaDeviceSynchronize();
float return_length;
cudaMemcpy(&return_length, GL_T, 1 * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
std::vector<int> d(n);
cudaMemcpy(d.data(), Gdegrees, n * sizeof(float), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaFree(Gcomponent);
cudaFree(Gsuccessor);
cudaFree(Gvertex_lock);
cudaFree(Gsmallest_add);
cudaFree(Gsmallest_i);
cudaFree(Gsmallest_j);
cudaFree(Gdegrees);
cudaFree(GL_T);
cudaFree(Gcomponents);
cudaFree(Gclosest);
cudaFree(Gclosest_idx);
return {return_length, d};
}
std::vector<float> gpu_subgradient_opt_alpha(float* x, float* y, int n) {
float* Gpi = NULL;
cudaMalloc((void**)&Gpi, n * sizeof(float));
cudaMemset(Gpi, 0, n*sizeof(float));
float* Gx = NULL;
cudaMalloc((void**)&Gx, n * sizeof(float));
cudaMemcpy(Gx, x, n * sizeof(float), cudaMemcpyHostToDevice);
float* Gy = NULL;
cudaMalloc((void**)&Gy, n * sizeof(float));
cudaMemcpy(Gy, y, n * sizeof(float), cudaMemcpyHostToDevice);
std::vector<float> pi(n, 0), best_pi(n, 0);
auto [init_w, init_d] = gpu_boruvka_onetree(n, Gx, Gy, Gpi);
float best_w = init_w;
std::vector<int> last_v(n), v(n);
bool is_tour = true;
for (int i = 0; i < n; ++i) {
last_v[i] = init_d[i] - 2;
v[i] = last_v[i];
is_tour &= (last_v[i] == 0);
}
bool initial_phase = true;
int initial_period = 1000;
int period = initial_period;
for (float t = 1.f; t > 0; t /= 2.f, period /= 2) {
for (int p = 1; t > 0 && p <= period; ++p) {
for (int i = 0; i < n; ++i) {
pi[i] += t * ( 0.7f * v[i] + 0.3f * last_v[i]);
//std::cout << pi[i] << " ";
}
//std::cout << std::endl;
cudaMemcpy(Gpi, pi.data(), n * sizeof(float), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
last_v = v;
auto [w, d] = gpu_boruvka_onetree(n, Gx, Gy, Gpi);
for (int i = 0; i < n; ++i)
w -= 2 * pi[i];
is_tour = true;
for (int i = 0; i < n; ++i) {
v[i] = d[i] - 2;
is_tour &= (v[i] == 0);
}
//printf("upt %.3f %.3f %.3f %d %d\n", w, best_w, t, p, period, initial_period);
if (w > best_w) {
best_w = w;
best_pi = pi;
if (initial_phase)
t *= 2.f;
if (p == period)
period *= 2;
} else if (initial_phase && p > initial_period / 2) {
initial_phase = false;
p = 0;
t = 0.75f * t;
}
}
}
cudaFree(Gpi);
cudaFree(Gx);
cudaFree(Gy);
return best_pi;
}
|
7,430 | #include "includes.h"
__device__ void exchange(float &a, float &b){
float temp = a;
a = b;
b = temp;
}
__global__ void flip_3D(float* coords, size_t dim_z, size_t dim_y, size_t dim_x, int do_z, int do_y, int do_x){
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t total = dim_x * dim_y * dim_z;
size_t total_xy = dim_x * dim_y;
size_t id_x = index % dim_x;
size_t id_y = (index / dim_x) % dim_x;
size_t id_z = index / (dim_x * dim_y);
if(index < total){
if(do_x && id_x < (dim_x / 2)){
exchange(coords[2 * total + id_z * total_xy + id_y * dim_x + id_x],
coords[2 * total + id_z * total_xy + id_y * dim_x + dim_x-1 - id_x]);
__syncthreads();
}
if(do_y && id_y < (dim_y / 2)){
exchange(coords[total + id_z * total_xy + id_y * dim_x + id_x],
coords[total + id_z * total_xy + (dim_y-1 - id_y) * dim_x + id_x]);
__syncthreads();
}
if(do_z && id_z < (dim_z / 2)){
exchange(coords[id_z * total_xy + id_y * dim_x + id_x],
coords[(dim_z-1 -id_z) * total_xy + id_y * dim_x + id_x]);
__syncthreads();
}
}
} |
7,431 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
__global__ void pattern_blur_kernel(
float *pattern, int pattern_size,
unsigned char *in_img, int height, int width,
unsigned char *out_img)
{
int row_ind = threadIdx.x + blockIdx.x * blockDim.x;
int col_ind = threadIdx.y + blockIdx.y * blockDim.y;
if (row_ind >= height || col_ind >= width)
{
return;
}
int radius = (pattern_size - 1) / 2;
for (int ch = 0; ch < 3; ++ch)
{
float new_value = 0.0;
for (int di = -radius; di <= radius; ++di)
{
for (int dj = -radius; dj <= radius; ++dj)
{
int rind = row_ind + di;
int cind = col_ind + dj;
if (rind < 0) rind = 0;
if (cind < 0) cind = 0;
if (rind >= height) rind = height - 1;
if (cind >= width) cind = width - 1;
new_value += (
pattern[(di + radius) * pattern_size + (dj + radius)] *
in_img[(rind * width + cind) * 3 + ch]
);
}
}
if (new_value < 0) new_value = 0;
if (new_value > 255) new_value = 255;
out_img[(row_ind * width + col_ind) * 3 + ch] = (
(unsigned char) new_value
);
}
}
int main(int argc, char *argv[])
{
if (argc < 4)
{
printf(
"Usage: %s input_pattern.txt input_img.bmp output.bmp\n", argv[0]);
return 1;
}
FILE *input_pattern = fopen(argv[1], "r");
int pattern_size;
fscanf(input_pattern, "%d", &pattern_size);
int pattern_sizeof = pattern_size * pattern_size * sizeof(float);
float *h_pattern = (float *) malloc(pattern_sizeof);
float pattern_sum = 0.0;
for (int i = 0; i < pattern_size; ++i)
{
for (int j = 0; j < pattern_size; ++j)
{
float value;
fscanf(input_pattern, "%f", &value);
h_pattern[i * pattern_size + j] = value;
pattern_sum += value;
}
}
if (pattern_sum)
{
for (int i = 0; i < pattern_size; ++i)
{
for (int j = 0; j < pattern_size; ++j)
{
h_pattern[i * pattern_size + j] /= pattern_sum;
}
}
}
fclose(input_pattern);
FILE *input_img = fopen(argv[2], "rb");
int width, height;
unsigned short int bpp;
unsigned char header[138];
fseek(input_img, 18, 0);
fread(&width, sizeof(int), 1, input_img);
fseek(input_img, 22, 0);
fread(&height, sizeof(int), 1, input_img);
fseek(input_img, 28, 0);
fread(&bpp, sizeof(unsigned char), 1, input_img);
fseek(input_img, 0, 0);
fread(&header, sizeof(unsigned char), 138, input_img);
int img_sizeof = height * width * 3 * sizeof(unsigned char);
unsigned char *h_in_img = (unsigned char *) malloc(img_sizeof);
unsigned int padding_size = (int)((width * bpp + 31) / 32) * 4 - width * 3;
unsigned char *h_padding = (unsigned char *) malloc(padding_size);
for (int i = 0; i < height; ++i)
{
for (int j = 0; j < width; ++j)
{
unsigned char b, g, r;
fread(&b, sizeof(unsigned char), 1, input_img);
fread(&g, sizeof(unsigned char), 1, input_img);
fread(&r, sizeof(unsigned char), 1, input_img);
h_in_img[(i * width + j) * 3] = r;
h_in_img[(i * width + j) * 3 + 1] = g;
h_in_img[(i * width + j) * 3 + 2] = b;
}
if (padding_size)
{
fread(&h_padding, padding_size, 1, input_img);
}
}
fclose(input_img);
unsigned char *h_out_img = (unsigned char *) malloc(img_sizeof);
float *d_pattern;
unsigned char *d_in_img;
unsigned char *d_out_img;
cudaSetDevice(0);
cudaMalloc((void **) &d_pattern, pattern_sizeof);
cudaMalloc((void **) &d_in_img, img_sizeof);
cudaMalloc((void **) &d_out_img, img_sizeof);
cudaMemcpy(d_pattern, h_pattern, pattern_sizeof, cudaMemcpyHostToDevice);
cudaMemcpy(d_in_img, h_in_img, img_sizeof, cudaMemcpyHostToDevice);
dim3 gridSize((int)(height / 16) + 1, int(width / 16) + 1);
dim3 blockSize(16, 16);
pattern_blur_kernel<<< gridSize, blockSize >>>(
d_pattern, pattern_size,
d_in_img, height, width,
d_out_img);
cudaDeviceSynchronize();
cudaMemcpy(h_out_img, d_out_img, img_sizeof, cudaMemcpyDeviceToHost);
FILE *output_img = fopen(argv[3], "wb");
fwrite(header, sizeof(unsigned char), 138, output_img);
for (int i = 0; i < height; ++i)
{
for (int j = 0; j < width; ++j)
{
unsigned char r = h_out_img[(i * width + j) * 3];
unsigned char g = h_out_img[(i * width + j) * 3 + 1];
unsigned char b = h_out_img[(i * width + j) * 3 + 2];
fwrite(&b, sizeof(unsigned char), 1, output_img);
fwrite(&g, sizeof(unsigned char), 1, output_img);
fwrite(&r, sizeof(unsigned char), 1, output_img);
}
if (padding_size)
{
fwrite(&h_padding, padding_size, 1, output_img);
}
}
fflush(output_img);
fclose(output_img);
free(h_pattern);
free(h_padding);
free(h_in_img);
free(h_out_img);
cudaFree(d_pattern);
cudaFree(d_in_img);
cudaFree(d_out_img);
return 0;
}
|
7,432 | #include "includes.h"
__global__ void gLogSoftmaxGrad(float* grad, const float* adj, const float* val, const int rows, const int cols) {
for(int bid = 0; bid < rows; bid += gridDim.x) {
int j = bid + blockIdx.x;
if(j < rows) {
extern __shared__ float _share[];
float* _sum = _share + blockDim.x;
float* gradRow = grad + j * cols;
const float* adjRow = adj + j * cols;
const float* valRow = val + j * cols;
_sum[threadIdx.x] = 0.0;
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols) {
_sum[threadIdx.x] += adjRow[id];
}
}
__syncthreads();
int len = blockDim.x;
while(len != 1) {
__syncthreads();
int skip = (len + 1) >> 1;
if(threadIdx.x < (len >> 1))
_sum[threadIdx.x] += _sum[threadIdx.x + skip];
len = (len + 1) >> 1;
}
__syncthreads();
for(int tid = 0; tid < cols; tid += blockDim.x) {
int id = tid + threadIdx.x;
if(id < cols)
gradRow[id] += adjRow[id] - (expf(valRow[id]) * _sum[0]);
}
}
}
} |
7,433 | #include <stdio.h>
#include <math.h>
#include <stdint.h> //uint32_tは符号なしintで4バイトに指定
#include <stdlib.h> //記憶域管理を使うため
#include <cuda.h>
/*記号定数として横幅と縦幅を定義*/
#define width 1024
#define heigth 1024
/*bmpの構造体*/
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER{ //構造体BITMAPFILEHEADERはファイルの先頭に来るもので,サイズは14 byte
unsigned short bfType; //bfTypeは,bmp形式であることを示すため,"BM"が入る
uint32_t bfSize; //bfsizeは,ファイル全体のバイト数
unsigned short bfReserved1; //bfReserved1と2は予約領域で,0になる
unsigned short bfReserved2;
uint32_t bf0ffBits; //bf0ffBitsは先頭から画素データまでのバイト数
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER{ //BITMAPINFOHEADERはbmpファイルの画像の情報の構造体で,サイズは40 byte
uint32_t biSize; //画像のサイズ
uint32_t biWidth; //横の画素数
uint32_t biHeight; //縦の画素数
unsigned short biPlanes; //1
unsigned short biBitCount; //一画素あたりの色の数のbit数.今回は8
uint32_t biCompression; //圧縮タイプを表す.bmpは非圧縮なので0
uint32_t biSizeImage; //bmp配列のサイズを表す.biCompression=0なら基本的に0
uint32_t biXPelsPerMeter; //biXPelsPerMeterとbiYPelsPerMeterは基本的に0
uint32_t biYPelsPerMeter;
uint32_t biCirUsed; //0
uint32_t biCirImportant; //0
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
/*cghの計算式のカーネル関数*/
__global__ void func_cgh_gpu(int *x_d, int *y_d, float *z_d, float *lumi_intensity_d, int *points_d){
int i, j, k;
j=blockDim.x*blockIdx.x+threadIdx.x; //widthのループの置き換え
i=blockDim.y*blockIdx.y+threadIdx.y; //heigthのループの置き換え
//計算に必要な変数の定義
float interval=10.5F; //画素間隔
float wave_len=0.633F; //光波長
float wave_num=2.0*M_PI/wave_len; //波数
for(k=0; k<*points_d; k++){
lumi_intensity_d[i*width+j]=lumi_intensity_d[i*width+j]+cosf(interval*wave_num*sqrt((j-x_d[k])*(j-x_d[k])+(i-y_d[k])*(i-y_d[k])+z_d[k]*z_d[k]));
}
}
/*main関数*/
int main(){
BITMAPFILEHEADER bmpFh;
BITMAPINFOHEADER bmpIh;
RGBQUAD rgbQ[256];
/*ホスト側の変数*/
int i, j;
int points; //物体点
float *lumi_intensity; //光強度用の配列
float min, max, mid; //2値化に用いる
unsigned char *img; //bmp用の配列
FILE *fp, *fp1;
/*BITMAPFILEHEADERの構造体*/
bmpFh.bfType =19778; //'B'=0x42,'M'=0x4d,'BM'=0x4d42=19778
bmpFh.bfSize =14+40+1024+(width*heigth); //1024はカラーパレットのサイズ.256階調で4 byte一組
bmpFh.bfReserved1 =0;
bmpFh.bfReserved2 =0;
bmpFh.bf0ffBits =14+40+1024;
/*BITMAPINFOHEADERの構造体*/
bmpIh.biSize =40;
bmpIh.biWidth =width;
bmpIh.biHeight =heigth;
bmpIh.biPlanes =1;
bmpIh.biBitCount =8;
bmpIh.biCompression =0;
bmpIh.biSizeImage =0;
bmpIh.biXPelsPerMeter =0;
bmpIh.biYPelsPerMeter =0;
bmpIh.biCirUsed =0;
bmpIh.biCirImportant =0;
/*RGBQUADの構造体*/
for(i=0; i<256; i++){
rgbQ[i].rgbBlue =i;
rgbQ[i].rgbGreen =i;
rgbQ[i].rgbRed =i;
rgbQ[i].rgbReserved =0;
}
/*3Dファイルの読み込み*/
fp=fopen("cube284.3d","rb"); //バイナリで読み込み
fread(&points, sizeof(int), 1, fp); //データのアドレス,サイズ,個数,ファイルポインタを指定
//取り出した物体点を入れる配列
int x[points]; //~~データを読み込むことで初めてこの配列が定義できる~~
int y[points];
float z[points];
int x_buf, y_buf, z_buf; //データを一時的に溜めておくための変数
/*各バッファに物体点座標を取り込み,ホログラム面と物体点の位置を考慮したデータを各配列に入れる*/
for(i=0; i<points; i++){
fread(&x_buf, sizeof(int), 1, fp);
fread(&y_buf, sizeof(int), 1, fp);
fread(&z_buf, sizeof(int), 1, fp);
x[i]=x_buf*40+width*0.5; //物体点を離すために物体点座標に40を掛け,中心の座標を足す
y[i]=y_buf*40+heigth*0.5;
z[i]=((float)z_buf)*40+100000.0F;
}
fclose(fp);
lumi_intensity=(float *)malloc(sizeof(float)*width*heigth); //malloc関数でメモリを動的に確保
/*デバイス側の変数*/
int *x_d, *y_d;
float *z_d;
float *lumi_intensity_d;
int *points_d;
dim3 block(32,32,1); //ブロックサイズ(スレッド数)の配置
dim3 grid(ceil(width/block.x),ceil(heigth/block.y),1); //グリッドサイズ(ブロック数)の配置
// dim3 grid((width+block.x-1)/block.x,(heigth+block.y-1)/block.y,1);
/*デバイス側のメモリ確保*/
cudaMalloc((void**)&x_d, points*sizeof(int));
cudaMalloc((void**)&y_d, points*sizeof(int));
cudaMalloc((void**)&z_d, points*sizeof(float));
cudaMalloc((void**)&lumi_intensity_d, width*heigth*sizeof(float));
cudaMalloc((void**)&points_d, sizeof(int));
/*ホスト側からデバイス側へデータ転送*/
cudaMemcpy(x_d, x, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, points*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(z_d, z, points*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(lumi_intensity_d, lumi_intensity, width*heigth*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(points_d, &points, sizeof(int), cudaMemcpyHostToDevice);
/*カーネル関数の起動*/
func_cgh_gpu<<< grid, block >>>(x_d, y_d, z_d, lumi_intensity_d, points_d);
/*デバイス側からホスト側へデータ転送*/
cudaMemcpy(lumi_intensity, lumi_intensity_d, width*heigth*sizeof(float), cudaMemcpyDeviceToHost);
/*デバイスのメモリ解放*/
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(lumi_intensity_d);
cudaFree(points_d);
//最大・最小値用の変数を比較できるようにとりあえずlumi_intensity[0]を入れる
min=lumi_intensity[0];
max=lumi_intensity[0];
/*最大値,最小値を求める*/
for(i=0; i<heigth; i++){
for(j=0; j<width; j++){
if(min>lumi_intensity[i*width+j]){
min=lumi_intensity[i*width+j];
}
if(max<lumi_intensity[i*width+j]){
max=lumi_intensity[i*width+j];
}
}
}
mid=(min+max)*0.5F; //中間値(閾値)を求める
printf("min=%lf, max=%lf, mid=%lf\n", min, max, mid);
/*malloc関数でメモリを動的に確保*/
img=(unsigned char *)malloc(sizeof(unsigned char)*width*heigth);
/*各々の光強度配列の値を中間値と比較し,2値化する*/
for(i=0; i<width*heigth; i++){
if(lumi_intensity[i]<mid){
img[i]=0;
}
if(lumi_intensity[i]>mid){
img[i]=255;
}
}
/*宣言したfpと使用するファイル名,その読み書きモードを設定.バイナリ(b)で書き込み(w)*/
fp1=fopen("root-gpu.bmp","wb");
/*書き込むデータのアドレス,データのサイズ,データの個数,ファイルのポインタを指定*/
fwrite(&bmpFh, sizeof(bmpFh), 1, fp1); //(&bmpFh.bfType, sizeof(bmpFh.bfType), 1, fp);というように個別に書くことも可能
fwrite(&bmpIh, sizeof(bmpIh), 1, fp1);
fwrite(&rgbQ[0], sizeof(rgbQ[0]), 256, fp1);
fwrite(img, sizeof(unsigned char), width*heigth, fp1); //bmpに書き込み
printf("'root-gpu.bmp' was saved.\n\n");
/*malloc関数で確保したホスト側のメモリを開放する*/
free(lumi_intensity);
free(img);
fclose(fp1);
return 0;
}
|
7,434 | // Simple vector addition, from the samples provided in the CUDA SDK.
// Author: Allen Porter <allen@thebends.org>
#include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
__global__ void VecAdd(float* A, float* B, float* C) {
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main(int argc, char**argv)
{
int N = 10;
size_t size = N * sizeof(float);
// Input; Host memory
float* h_A = (float*)malloc(size);
float* h_B = (float*)malloc(size);
for (int i = 0; i < N; i++) {
h_A[i] = i;
h_B[i] = i;
}
// Device memory
float* d_A;
cudaMalloc((void**)&d_A, size);
float* d_B;
cudaMalloc((void**)&d_B, size);
float* d_C;
cudaMalloc((void**)&d_C, size);
// Copy from host to device
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
// Invoke kernel
VecAdd<<<1, N>>>(d_A, d_B, d_C);
float* h_C = (float*)malloc(size);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%0.f ", h_A[i]);
}
printf("\n");
for (int i = 0; i < N; i++) {
printf("%0.f ", h_B[i]);
}
printf("\n");
for (int i = 0; i < N; i++) {
printf("%0.f ", h_C[i]);
}
printf("\n");
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
7,435 | #include <cmath>
#include <fstream>
#include <iomanip>
#include <limits>
#include <stdexcept>
#include <string>
#include <vector>
#include <cfloat>
using namespace std;
namespace param {
const int n_steps = 200000;
const double dt = 60;
const double eps = 1e-3;
const double G = 6.674e-11;
__host__ __device__ double gravity_device_mass(double m0, double t) {
return m0 + 0.5 * m0 * fabs(sin(t / 6000));
}
const double planet_radius = 1e7;
const double missile_speed = 1e6;
double get_missile_cost(double t) { return 1e5 + 1e3 * t; }
} // namespace param
__device__ __managed__ int n, planet, asteroid; // global
void read_input(const char* filename, int& n, int& planet, int& asteroid,
std::vector<double>& qx, std::vector<double>& qy, std::vector<double>& qz,
std::vector<double>& vx, std::vector<double>& vy, std::vector<double>& vz,
std::vector<double>& m, std::vector<std::string>& type) {
std::ifstream fin(filename);
fin >> n >> planet >> asteroid;
qx.resize(n);
qy.resize(n);
qz.resize(n);
vx.resize(n);
vy.resize(n);
vz.resize(n);
m.resize(n);
type.resize(n);
for (int i = 0; i < n; i++) {
fin >> qx[i] >> qy[i] >> qz[i] >> vx[i] >> vy[i] >> vz[i] >> m[i] >> type[i];
}
}
void write_output(const char* filename, double min_dist, int hit_time_step,
int gravity_device_id, double missile_cost) {
std::ofstream fout(filename);
fout << std::scientific
<< std::setprecision(std::numeric_limits<double>::digits10 + 1) << min_dist
<< '\n'
<< hit_time_step << '\n'
<< gravity_device_id << ' ' << missile_cost << '\n';
}
__global__ void run_step_kernel(int step, double* qx, double* qy, double* qz, double* vx, double* vy, double* vz, double* m, int* type) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) {
double ax = 0;
double ay = 0;
double az = 0;
for (int j = 0; j < n; j++) {
if (j == i) continue;
double mj = m[j];
if (type[j] == 1) {
mj = param::gravity_device_mass(mj, step * param::dt);
}
//__syncthreads();
double dx = qx[j] - qx[i];
double dy = qy[j] - qy[i];
double dz = qz[j] - qz[i];
double dist3 = pow(dx * dx + dy * dy + dz * dz + param::eps * param::eps, 1.5);
ax += param::G * mj * dx / dist3;
ay += param::G * mj * dy / dist3;
az += param::G * mj * dz / dist3;
//__syncthreads();
}
//__syncthreads();
vx[i] += ax * param::dt;
vy[i] += ay * param::dt;
vz[i] += az * param::dt;
qx[i] += vx[i] * param::dt;
qy[i] += vy[i] * param::dt;
qz[i] += vz[i] * param::dt;
}
}
double dist(double x1, double y1, double z1, double x2, double y2, double z2)
{
double dx = x1 - x2;
double dy = y1 - y2;
double dz = z1 - z2;
return sqrt(dx*dx + dy*dy + dz*dz);
}
double project_x(double x1, double y1, double z1, double x2, double y2, double z2)
{
double dx = x1 - x2;
double dy = y1 - y2;
double dz = z1 - z2;
double dist = sqrt(dx*dx + dy*dy + dz*dz);
double dist_xy = sqrt(dx*dx + dy*dy);
return (dist_xy/dist) * (fabs(x1-x2)/dist_xy);
}
double project_y(double x1, double y1, double z1, double x2, double y2, double z2)
{
double dx = x1 - x2;
double dy = y1 - y2;
double dz = z1 - z2;
double dist = sqrt(dx*dx + dy*dy + dz*dz);
double dist_xy = sqrt(dx*dx + dy*dy);
return (dist_xy/dist) * (fabs(y1-y2)/dist_xy);
}
double project_z(double x1, double y1, double z1, double x2, double y2, double z2)
{
double dx = x1 - x2;
double dy = y1 - y2;
double dz = z1 - z2;
double dist = sqrt(dx*dx + dy*dy + dz*dz);
return (fabs(z1-z2)/dist);
}
int main(int argc, char** argv) {
if (argc != 3) {
throw std::runtime_error("must supply 2 arguments");
}
//int n; // n: n bodies
std::vector<double> qx, qy, qz, vx, vy, vz, m;
std::vector<std::string> type;
vector<int> t_temp;
auto distance = [&](int i, int j) -> double {
double dx = qx[i] - qx[j];
double dy = qy[i] - qy[j];
double dz = qz[i] - qz[j];
return sqrt(dx * dx + dy * dy + dz * dz);
};
// read in
read_input(argv[1], n, planet, asteroid, qx, qy, qz, vx, vy, vz, m, type);
int t[n];
for(int i=0; i<n; i++){
if(type[i] == "device")
t[i] = 1;
else
t[i] = 0;
}
int threadsPerBlock = 32;
int blocksPerGrid = (n + threadsPerBlock-1)/threadsPerBlock;
// convert std::vector to array
// for P1
double qx_h1[n], qy_h1[n], qz_h1[n];
copy(qx.begin(), qx.end(), qx_h1);
copy(qy.begin(), qy.end(), qy_h1);
copy(qz.begin(), qz.end(), qz_h1);
double vx_h1[n], vy_h1[n], vz_h1[n];
copy(vx.begin(), vx.end(), vx_h1);
copy(vy.begin(), vy.end(), vy_h1);
copy(vz.begin(), vz.end(), vz_h1);
double m_h1[n];
copy(m.begin(), m.end(), m_h1);
vector<int> devList; // device list for P3
for (int i = 0; i < n; i++) {
if (t[i] == 1) {
devList.push_back(i);
m_h1[i] = 0; // no device in problem 1
}
}
// for P2
double qx_h2[n], qy_h2[n], qz_h2[n];
copy(qx.begin(), qx.end(), qx_h2);
copy(qy.begin(), qy.end(), qy_h2);
copy(qz.begin(), qz.end(), qz_h2);
double vx_h2[n], vy_h2[n], vz_h2[n];
copy(vx.begin(), vx.end(), vx_h2);
copy(vy.begin(), vy.end(), vy_h2);
copy(vz.begin(), vz.end(), vz_h2);
double m_h2[n];
copy(m.begin(), m.end(), m_h2);
// device's variable
double *qx_dev1, *qy_dev1, *qz_dev1, *vx_dev1, *vy_dev1, *vz_dev1, *m_dev1;
int *t_dev1;
double *qx_dev2, *qy_dev2, *qz_dev2, *vx_dev2, *vy_dev2, *vz_dev2, *m_dev2;
int *t_dev2;
// Problem 1 & 2
double min_dist = std::numeric_limits<double>::infinity();
int hit_time_step = -2;
cudaSetDevice(0);
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaMalloc(&qx_dev1, sizeof(double)*n);
cudaMemcpyAsync(qx_dev1, qx_h1, sizeof(double)*n, cudaMemcpyHostToDevice, stream1);
cudaMalloc(&qy_dev1, sizeof(double)*n);
cudaMemcpyAsync(qy_dev1, qy_h1, sizeof(double)*n, cudaMemcpyHostToDevice, stream1);
cudaMalloc(&qz_dev1, sizeof(double)*n);
cudaMemcpyAsync(qz_dev1, qz_h1, sizeof(double)*n, cudaMemcpyHostToDevice, stream1);
cudaMalloc(&vx_dev1, sizeof(double)*n);
cudaMemcpyAsync(vx_dev1, vx_h1, sizeof(double)*n, cudaMemcpyHostToDevice, stream1);
cudaMalloc(&vy_dev1, sizeof(double)*n);
cudaMemcpyAsync(vy_dev1, vy_h1, sizeof(double)*n, cudaMemcpyHostToDevice, stream1);
cudaMalloc(&vz_dev1, sizeof(double)*n);
cudaMemcpyAsync(vz_dev1, vz_h1, sizeof(double)*n, cudaMemcpyHostToDevice, stream1);
cudaMalloc(&m_dev1, sizeof(double)*n);
cudaMemcpyAsync(m_dev1, m_h1, sizeof(double)*n, cudaMemcpyHostToDevice, stream1);
cudaMalloc(&t_dev1, sizeof(int)*n);
cudaMemcpyAsync(t_dev1, t, sizeof(int)*n, cudaMemcpyHostToDevice, stream1);
cudaMalloc(&qx_dev2, sizeof(double)*n);
cudaMemcpyAsync(qx_dev2, qx_h2, sizeof(double)*n, cudaMemcpyHostToDevice, stream2);
cudaMalloc(&qy_dev2, sizeof(double)*n);
cudaMemcpyAsync(qy_dev2, qy_h2, sizeof(double)*n, cudaMemcpyHostToDevice, stream2);
cudaMalloc(&qz_dev2, sizeof(double)*n);
cudaMemcpyAsync(qz_dev2, qz_h2, sizeof(double)*n, cudaMemcpyHostToDevice, stream2);
cudaMalloc(&vx_dev2, sizeof(double)*n);
cudaMemcpyAsync(vx_dev2, vx_h2, sizeof(double)*n, cudaMemcpyHostToDevice, stream2);
cudaMalloc(&vy_dev2, sizeof(double)*n);
cudaMemcpyAsync(vy_dev2, vy_h2, sizeof(double)*n, cudaMemcpyHostToDevice, stream2);
cudaMalloc(&vz_dev2, sizeof(double)*n);
cudaMemcpyAsync(vz_dev2, vz_h2, sizeof(double)*n, cudaMemcpyHostToDevice, stream2);
cudaMalloc(&m_dev2, sizeof(double)*n);
cudaMemcpyAsync(m_dev2, m_h2, sizeof(double)*n, cudaMemcpyHostToDevice, stream2);
cudaMalloc(&t_dev2, sizeof(int)*n);
cudaMemcpyAsync(t_dev2, t, sizeof(int)*n, cudaMemcpyHostToDevice, stream2);
bool P2 = true;
for (int step = 0; step <= param::n_steps; step++) {
if (step > 0) {
//cudaSetDevice(0);
run_step_kernel<<<blocksPerGrid, threadsPerBlock, 0, stream1>>>(step, qx_dev1, qy_dev1, qz_dev1, vx_dev1, vy_dev1, vz_dev1, m_dev1, t_dev1);
if(P2)
run_step_kernel<<<blocksPerGrid, threadsPerBlock, 0, stream2>>>(step, qx_dev2, qy_dev2, qz_dev2, vx_dev2, vy_dev2, vz_dev2, m_dev2, t_dev2);
//cudaDeviceSynchronize();
}
cudaMemcpyAsync(qx_h1, qx_dev1, sizeof(double)*n, cudaMemcpyDeviceToHost, stream1);
cudaMemcpyAsync(qy_h1, qy_dev1, sizeof(double)*n, cudaMemcpyDeviceToHost, stream1);
cudaMemcpyAsync(qz_h1, qz_dev1, sizeof(double)*n, cudaMemcpyDeviceToHost, stream1);
double dx = qx_h1[planet] - qx_h1[asteroid];
double dy = qy_h1[planet] - qy_h1[asteroid];
double dz = qz_h1[planet] - qz_h1[asteroid];
min_dist = std::min(min_dist, sqrt(dx * dx + dy * dy + dz * dz));
if(P2){
cudaMemcpyAsync(qx_h2, qx_dev2, sizeof(double)*n, cudaMemcpyDeviceToHost, stream2);
cudaMemcpyAsync(qy_h2, qy_dev2, sizeof(double)*n, cudaMemcpyDeviceToHost, stream2);
cudaMemcpyAsync(qz_h2, qz_dev2, sizeof(double)*n, cudaMemcpyDeviceToHost, stream2);
double dx = qx_h2[planet] - qx_h2[asteroid];
double dy = qy_h2[planet] - qy_h2[asteroid];
double dz = qz_h2[planet] - qz_h2[asteroid];
if (dx * dx + dy * dy + dz * dz < param::planet_radius * param::planet_radius) {
hit_time_step = step;
P2 = false;
}
}
}
cudaFree(qx_dev1);
cudaFree(qy_dev1);
cudaFree(qz_dev1);
cudaFree(vx_dev1);
cudaFree(vy_dev1);
cudaFree(vz_dev1);
cudaFree(m_dev1);
cudaFree(t_dev1);
cudaFree(qx_dev2);
cudaFree(qy_dev2);
cudaFree(qz_dev2);
cudaFree(vx_dev2);
cudaFree(vy_dev2);
cudaFree(vz_dev2);
cudaFree(m_dev2);
cudaFree(t_dev2);
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
// Problem 3
// TODO
int gravity_device_id = -999;
double missile_cost = DBL_MAX;
double *qx_dev3, *qy_dev3, *qz_dev3, *vx_dev3, *vy_dev3, *vz_dev3, *m_dev3;
int *t_dev3;
cudaSetDevice(1);
cudaMalloc(&qx_dev3, sizeof(double)*n);
cudaMalloc(&qy_dev3, sizeof(double)*n);
cudaMalloc(&qz_dev3, sizeof(double)*n);
cudaMalloc(&vx_dev3, sizeof(double)*n);
cudaMalloc(&vy_dev3, sizeof(double)*n);
cudaMalloc(&vz_dev3, sizeof(double)*n);
cudaMalloc(&m_dev3, sizeof(double)*n);
cudaMalloc(&t_dev3, sizeof(int)*n);
int hitNum = 0;
for(int i=0; i<devList.size(); i++){
read_input(argv[1], n, planet, asteroid, qx, qy, qz, vx, vy, vz, m, type);
double *qx_h3 = &qx[0];
double *qy_h3 = &qy[0];
double *qz_h3 = &qz[0];
double *vx_h3 = &vx[0];
double *vy_h3 = &vy[0];
double *vz_h3 = &vz[0];
double *m_h3 = &m[0];
cudaMemcpy(qx_dev3, qx_h3, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(qy_dev3, qy_h3, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(qz_dev3, qz_h3, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(vx_dev3, vx_h3, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(vy_dev3, vy_h3, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(vz_dev3, vz_h3, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(m_dev3, m_h3, sizeof(double)*n, cudaMemcpyHostToDevice);
cudaMemcpy(t_dev3, t, sizeof(int)*n, cudaMemcpyHostToDevice);
double qx_m, qy_m, qz_m; // position of missile
double travelDist = 0;
qx_m = qx[planet];
qy_m = qy[planet];
qz_m = qz[planet];
for (int step = 0; step <= param::n_steps; step++) {
if (step > 0) {
run_step_kernel<<<blocksPerGrid, threadsPerBlock>>>(step, qx_dev3, qy_dev3, qz_dev3, vx_dev3, vy_dev3, vz_dev3, m_dev3, t_dev3);
}
cudaMemcpy(qx_h3, qx_dev3, sizeof(double)*n, cudaMemcpyDeviceToHost);
cudaMemcpy(qy_h3, qy_dev3, sizeof(double)*n, cudaMemcpyDeviceToHost);
cudaMemcpy(qz_h3, qz_dev3, sizeof(double)*n, cudaMemcpyDeviceToHost);
qx_m += param::missile_speed * param::dt * project_x(qx_m, qy_m, qz_m, qx_h3[devList[i]], qy_h3[devList[i]], qz_h3[devList[i]]);
qy_m += param::missile_speed * param::dt * project_y(qx_m, qy_m, qz_m, qx_h3[devList[i]], qy_h3[devList[i]], qz_h3[devList[i]]);
qz_m += param::missile_speed * param::dt * project_z(qx_m, qy_m, qz_m, qx_h3[devList[i]], qy_h3[devList[i]], qz_h3[devList[i]]);
travelDist += param::missile_speed * param::dt;
if(travelDist >= dist(qx_h3[planet], qy_h3[planet], qz_h3[planet], qx_h3[devList[i]], qy_h3[devList[i]], qz_h3[devList[i]])){
m_h3[devList[i]] = 0; // device’s mass becomes zero after it is destroyed
double c = param::get_missile_cost(step * param::dt);
if(missile_cost > c){
missile_cost = c;
gravity_device_id = devList[i];
}
cudaMemcpy(m_dev3, m_h3, sizeof(double)*n, cudaMemcpyHostToDevice);
}
// determine hitting
double dx = qx_h3[planet] - qx_h3[asteroid];
double dy = qy_h3[planet] - qy_h3[asteroid];
double dz = qz_h3[planet] - qz_h3[asteroid];
if (dx * dx + dy * dy + dz * dz < param::planet_radius * param::planet_radius) {
hitNum++;
//printf("%d\n", hitNum);
break;
}
}
}
if(hitNum == devList.size() || hit_time_step == -2){
gravity_device_id = -1;
missile_cost = 0;
}
cudaFree(qx_dev3);
cudaFree(qy_dev3);
cudaFree(qz_dev3);
cudaFree(vx_dev3);
cudaFree(vy_dev3);
cudaFree(vz_dev3);
cudaFree(m_dev3);
cudaFree(t_dev3);
write_output(argv[2], min_dist, hit_time_step, gravity_device_id, missile_cost);
}
|
7,436 | #include <stdio.h>
#include <math.h>
#include <iostream>
#include <fstream>
using namespace std;
const int N = 128;
const int ITERATIONS = 500;
const string filename = "result.txt";
__global__ void laplacian(int n, double *d_array, double *d_result)
{
int globalidx = threadIdx.x + blockIdx.x * blockDim.x;
int x = globalidx / n;
int y = globalidx - n * x;
if(globalidx < n * n) {
if (y == 0 || x == 0)
d_result[y + x * n] = 1;
else {
d_result[y + x * n] = 0.25 * (
(y - 1 >= 0 ? d_array[(y - 1) + x * n] : 0) +
(y + 1 <= n - 1 ? d_array[(y + 1) + x * n] : 0) +
(x - 1 >= 0 ? d_array[y + (x - 1) * n] : 0) +
(x + 1 <= n - 1 ? d_array[y + (x + 1) * n] : 0));
}
}
__syncthreads();
d_array[y + x * n] = d_result[y + x * n];
}
double* create_grid(int size) {
double *h_array = (double *) calloc(sizeof(double), size * size);
for (int i = 0; i < size; i++) {
h_array[i] = 1;
h_array[i * size] = 1;
}
return h_array;
}
void write_to(ofstream& writer, double *res_i, int n)
{
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++)
writer << res_i[i*N + j] << "\t";
writer << "\n";
}
}
int main() {
double *d_array;
double *d_result;
cudaMalloc(&d_array, sizeof(double) * N * N);
cudaMalloc(&d_result, sizeof(double) * N * N);
double *h_array = create_grid(N);
cudaMemcpy(d_array, h_array, sizeof(double) * N * N, cudaMemcpyHostToDevice);
int threadNum = 1024;
dim3 dimBlock(threadNum);
dim3 dimGrid(N * N / threadNum);
ofstream writer;
writer.open(filename);
for (int k = 0; k < ITERATIONS; k++) {
laplacian<<<dimGrid, dimBlock>>>(N, d_array, d_result);
}
cudaMemcpy(h_array, d_array, sizeof(double)*N*N, cudaMemcpyDeviceToHost);
write_to(writer, h_array, N);
writer.close();
return 0;
}
|
7,437 | #include <stdio.h>
#include <cuda.h>
#define N 1024 // must be a power of 2.
#define BLOCKSIZE N
__global__ void RKPlusNBy2(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[id] += nelements[id + off];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
__global__ void RKNminusI(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[id] += nelements[2 * off - id - 1];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
__global__ void RKConsecutive(unsigned *nelements) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
for (int off = N / 2; off; off /= 2) {
if (id < off)
nelements[N / off * id] += nelements[N / off * id + N / 2 / off];
__syncthreads();
}
if (id == 0)
printf("GPU sum = %d\n", *nelements);
}
int main() {
unsigned hnelements[N];
unsigned sum = 0;
for (unsigned ii = 0; ii < N; ++ii) {
hnelements[ii] = rand() % 20;
sum += hnelements[ii];
}
printf("CPU sum = %d\n", sum);
unsigned nblocks = (N + BLOCKSIZE - 1) / BLOCKSIZE;
unsigned *nelements;
cudaMalloc(&nelements, N * sizeof(unsigned));
cudaMemcpy(nelements, hnelements, N * sizeof(unsigned), cudaMemcpyHostToDevice);
RKPlusNBy2<<<nblocks, BLOCKSIZE>>>(nelements);
cudaMemcpy(nelements, hnelements, N * sizeof(unsigned), cudaMemcpyHostToDevice);
RKNminusI<<<nblocks, BLOCKSIZE>>>(nelements);
cudaMemcpy(nelements, hnelements, N * sizeof(unsigned), cudaMemcpyHostToDevice);
RKConsecutive<<<nblocks, BLOCKSIZE>>>(nelements);
cudaDeviceSynchronize();
return 0;
}
|
7,438 | #include "includes.h"
//ADD TWO MATRICES
__global__ void MatAdd(int *a, int *b, int *c, int ROW, int COLUMNS){
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
int idx = iy * COLUMNS + ix;
if (ix < ROW && iy < COLUMNS)
{
c[idx] = a[idx] + b[idx];
}
} |
7,439 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void codeWithoutDivergence(){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int a, b;
a = b = 0;
int warpId = gid / 32;
if(warpId % 2 == 0){
a = 100;
b = 50;
}
else {
a = 200;
b = 75;
}
}
__global__ void codeWithDivergence(){
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int a, b;
a = b = 0;
if(gid % 2 == 0){
a = 100;
b = 50;
}
else {
a = 200;
b = 75;
}
}
int main(){
int size = 1 << 22;
dim3 blockSize(128);
dim3 gridSize((size + blockSize.x - 1)/blockSize.x);
codeWithoutDivergence <<<gridSize, blockSize>>>();
cudaDeviceSynchronize();
codeWithDivergence <<<gridSize, blockSize>>>();
cudaDeviceReset();
return 0;
}
|
7,440 | #define _BSD_SOURCE
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <iostream>
#include <cstdio>
using namespace std;
#define cudaSucceeded(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess) {
std::cerr << "cudaAssert failed: "
<< cudaGetErrorString(code)
<< file << ":" << line
<< std::endl;
if (abort) {
exit(code);
}
}
}
int useconds() {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec*1000000+t.tv_usec;
}
int main() {
for (int i = 1; i < 1<<30; i *= 2) {
int *mem;
const int num_tries = 100;
int time = 0;
int min_time = 0x7FFFFFFF;
int max_time = 0;
for (int j = 0; j < num_tries; j++) {
int start = useconds();
cudaSucceeded(cudaMalloc(&mem, i));
cudaSucceeded(cudaMemset(mem, 0, 0));
cudaSucceeded(cudaDeviceSynchronize());
int aft = useconds();
int this_time = aft-start;
time += this_time;
min_time = min_time < this_time ? min_time : this_time;
max_time = max_time < this_time ? this_time : max_time;
cudaSucceeded(cudaFree(mem));
}
printf("%d bytes; average: %dus; min: %dus; max: %dus\n", i, time/num_tries, min_time, max_time);
}
}
|
7,441 | #include "includes.h"
extern "C"
__global__ void invert(uchar4* data, int w, int h)
{
int x = threadIdx.x+blockIdx.x*blockDim.x;
int y = threadIdx.y+blockIdx.y*blockDim.y;
if (x < w && y < h)
{
int index = y*w+x;
uchar4 pixel = data[index];
pixel.x = 255 - pixel.x;
pixel.y = 255 - pixel.y;
pixel.z = 255 - pixel.z;
pixel.w = 255 - pixel.w;
data[index] = pixel;
}
} |
7,442 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20) {
if (comp < var_2 / var_3 - var_4 + fabsf(+1.6968E21f)) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = +0.0f - (var_6 / var_7 * -1.4756E-43f - powf(fmodf(-1.6327E-43f, (-1.1981E-42f / (-1.7529E-37f + +0.0f))), asinf(+1.7393E20f)));
comp += tmp_1 / -1.3600E35f + +1.6405E-35f + cosf((var_8 / (+1.7154E-43f - (-1.4625E9f - var_9 / var_10))));
if (comp >= (+1.5578E-43f - logf(log10f(-1.1290E23f)))) {
comp = var_11 * var_12 - (+1.5927E8f + +0.0f);
}
for (int i=0; i < var_5; ++i) {
comp = +1.5094E36f + (var_13 + ldexpf(tanhf(+1.4825E2f + var_14 * (var_15 * fmodf(powf(tanhf(fmodf((-1.9049E-35f / var_16), var_17 - var_18 + +1.5543E-42f)), (+1.3079E-36f * (-1.3300E36f / floorf(-1.6612E35f)))), atan2f(ldexpf(+1.1272E-41f, 2), logf(var_19 * -1.9125E-44f - var_20))))), 2));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21);
cudaDeviceSynchronize();
return 0;
}
|
7,443 | #include <stdio.h>
#include <cuda_profiler_api.h>
#include <cuda_runtime_api.h>
#include <stdlib.h>
#include <time.h>
#define DIMX 512
#define cudaCheck(e) do { \
if (cudaSuccess != (e)) { \
fprintf(stderr, "Cuda runtime error in line %d of file %s \
: %s \n", __LINE__, __FILE__, cudaGetErrorString(cudaGetLastError()) ); \
exit(EXIT_FAILURE); \
} \
} while(0);
template <typename DType>
__global__ void reduceGmem(DType* out, DType* in, size_t n) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx >= n) return;
int tid = threadIdx.x;
DType* idata = in + blockIdx.x * blockDim.x;
if(blockDim.x >= 1024 && tid < 512 ) idata[tid] += idata[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) idata[tid] += idata[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) idata[tid] += idata[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) idata[tid] += idata[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = idata;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = idata[0];
//printf("ID:%d, sum:%5f\n", blockIdx.x, idata[0]);
}
}
template <typename DType>
__global__ void reduceSmem(DType* out, DType* in, size_t n) {
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
__shared__ DType smem[DIMX];
int tid = threadIdx.x;
DType* idata = in + blockIdx.x * blockDim.x;
/// global mem. -> shared mem.
if(idx < n) smem[tid] = idata[tid];
else smem[tid] = 0;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
//printf("ID:%d, sum:%5f\n", blockIdx.x, idata[0]);
}
}
template <typename DType>
__global__ void reduceSmemUnroll(DType* out, DType* in, size_t n) {
__shared__ DType smem[DIMX];
int tid = threadIdx.x;
size_t idx = threadIdx.x + blockIdx.x * blockDim.x * 4;
/// global mem. -> shared mem.
DType tmp_sum = 0;
if(idx + 3 * blockDim.x < n) {
DType a1 = in[idx];
DType a2 = in[idx + blockDim.x];
DType a3 = in[idx + blockDim.x*2];
DType a4 = in[idx + blockDim.x*3];
tmp_sum = a1 + a2 + a3 + a4;
}
smem[tid] = tmp_sum;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
}
}
template <typename DType>
__global__ void reduceSmemUnrollDynamic(DType* out, DType* in, size_t n) {
extern __shared__ DType smem[]; //! dynamic shared memory
int tid = threadIdx.x;
size_t idx = threadIdx.x + blockIdx.x * blockDim.x * 4;
/// global mem. -> shared mem.
DType tmp_sum = 0;
if(idx + 3 * blockDim.x < n) {
DType a1 = in[idx];
DType a2 = in[idx + blockDim.x];
DType a3 = in[idx + blockDim.x*2];
DType a4 = in[idx + blockDim.x*3];
tmp_sum = a1 + a2 + a3 + a4;
}
smem[tid] = tmp_sum;
__syncthreads();
if(blockDim.x >= 1024 && tid < 512 ) smem[tid] += smem[tid+512];
__syncthreads();
if(blockDim.x >= 512 && tid < 256 ) smem[tid] += smem[tid + 256];
__syncthreads();
if(blockDim.x >= 256 && tid < 128 ) smem[tid] += smem[tid + 128];
__syncthreads();
if(blockDim.x >= 128 && tid < 64 ) smem[tid] += smem[tid + 64];
__syncthreads();
if(tid < 32) {
volatile DType* vsmem = smem;
vsmem[tid] += vsmem[tid + 32];
vsmem[tid] += vsmem[tid + 16];
vsmem[tid] += vsmem[tid + 8];
vsmem[tid] += vsmem[tid + 4];
vsmem[tid] += vsmem[tid + 2];
vsmem[tid] += vsmem[tid + 1];
}
if(tid == 0) {
out[blockIdx.x] = smem[0];
}
}
int main(int argc, char* agv[]) {
srand(time(NULL));
cudaStream_t stream[2];
cudaCheck(cudaSetDevice(0)); //! CUDA Streams
for(int i = 0; i < 2; ++i) cudaCheck(cudaStreamCreate(&stream[i]));
cudaProfilerStart();
void * buffers[7];
const size_t N = 1 << 24;
float * pdata = new float[N];
float res = 0;
double res_check = 0;
for(size_t i = 0; i < N; ++i) {
//pdata[i] = 1;
pdata[i] = rand() / double(RAND_MAX) * 0.5;
res_check += pdata[i];
}
const int threads_per_block = DIMX;
const int num_blocks = (N + threads_per_block - 1) / threads_per_block;
const int num_blocks2 = (num_blocks + threads_per_block - 1) / threads_per_block;
printf("threads_per_block:%d, num_blocks:%d, %d\n", threads_per_block, num_blocks, num_blocks2);
/// allocate gpu mem.
cudaCheck(cudaMalloc(&buffers[0], sizeof(float)*num_blocks*threads_per_block));
cudaCheck(cudaMalloc(&buffers[1], sizeof(float)*num_blocks2 * threads_per_block));
cudaCheck(cudaMalloc(&buffers[2], sizeof(float)*num_blocks2));
cudaCheck(cudaMalloc(&buffers[3], sizeof(float)*num_blocks*threads_per_block));
cudaCheck(cudaMalloc(&buffers[4], sizeof(float)*num_blocks2*threads_per_block));
cudaCheck(cudaMalloc(&buffers[5], sizeof(float)*num_blocks2));
cudaCheck(cudaMalloc(&buffers[6], sizeof(float)*4));
/// pinned memory
float * c_buffer;
cudaCheck(cudaMallocHost(&c_buffer, sizeof(float)*N));
double cpu_res = 0.;
for(size_t i = 0 ; i < N; ++i) {
c_buffer[i] = rand() / double(RAND_MAX) * 0.1;
cpu_res += c_buffer[i];
}
printf("Starting reduction ...");
/// cpu mem. -> gpu mem.
cudaCheck(cudaMemcpyAsync(buffers[0], pdata, sizeof(float)*N, cudaMemcpyHostToDevice, stream[0]));
/// reduceGmem
reduceGmem<float><<<num_blocks, threads_per_block, 0, stream[0]>>>((float*)buffers[1], (float*)buffers[0], N);
reduceGmem<float><<<num_blocks2, threads_per_block, 0, stream[0]>>>((float*)buffers[2], (float*)buffers[1], num_blocks);
reduceGmem<float><<<1, threads_per_block, 0, stream[0]>>>((float*)buffers[6], (float*)buffers[2], num_blocks2);
cudaCheck(cudaMemsetAsync(buffers[1], 0, sizeof(float)*num_blocks2*threads_per_block, stream[0]));
cudaCheck(cudaMemsetAsync(buffers[2], 0, sizeof(float)*num_blocks2, stream[0]));
cudaCheck(cudaMemcpyAsync(buffers[0], pdata, sizeof(float)*N, cudaMemcpyHostToDevice, stream[0]));
/// reduceSmem
reduceSmem<float><<<num_blocks, threads_per_block, 0, stream[0]>>>((float*)buffers[1], (float*)buffers[0], N);
reduceSmem<float><<<num_blocks2, threads_per_block, 0, stream[0]>>>((float*)buffers[2], (float*)buffers[1], num_blocks);
reduceSmem<float><<<1, threads_per_block, 0, stream[0]>>>((float*)buffers[6]+1, (float*)buffers[2], num_blocks2);
/// stream[1]
cudaCheck(cudaMemcpyAsync(buffers[3], c_buffer, sizeof(float)*N, cudaMemcpyHostToDevice, stream[1]));
/// reduceSmemUnroll
reduceSmemUnroll<float><<<num_blocks / 4, threads_per_block, 0, stream[1]>>>((float*)buffers[4], (float*)buffers[3], N);
reduceSmemUnroll<float><<<num_blocks2 / 16, threads_per_block, 0, stream[1]>>>((float*)buffers[5], (float*)buffers[4], num_blocks / 4);
reduceSmem<float><<<1, threads_per_block, 0, stream[1]>>>((float*)buffers[6]+2, (float*)buffers[5], num_blocks2 / 16);
/// reduceSmemUnrollDynamic
cudaCheck(cudaMemsetAsync(buffers[4], 0, sizeof(float)*num_blocks2*threads_per_block, stream[1]));
cudaCheck(cudaMemsetAsync(buffers[5], 0, sizeof(float)*num_blocks2, stream[1]));
cudaCheck(cudaMemcpyAsync(buffers[3], c_buffer, sizeof(float)*N, cudaMemcpyHostToDevice, stream[1]));
reduceSmemUnrollDynamic<float><<<num_blocks / 4, threads_per_block, sizeof(float)*threads_per_block, stream[1]>>>((float*)buffers[4], (float*)buffers[3], N);
reduceSmemUnrollDynamic<float><<<num_blocks2 / 16, threads_per_block, sizeof(float)*threads_per_block, stream[1]>>>((float*)buffers[5], (float*)buffers[4], num_blocks / 4);
reduceSmem<float><<<1, threads_per_block, 0, stream[1]>>>((float*)buffers[6]+3, (float*)buffers[5], num_blocks2 / 16);
/// compare results
cudaCheck(cudaMemcpy(&res, buffers[6], sizeof(float), cudaMemcpyDeviceToHost));
printf("Global memory GPU Sum:%5f CPU Sum:%5f\n", res, res_check);
cudaCheck(cudaMemcpy(&res, (float*)buffers[6]+1, sizeof(float), cudaMemcpyDeviceToHost));
printf("Shared memory GPU Sum:%5f CPU Sum:%5f\n", res, res_check);
cudaCheck(cudaMemcpy(&res, (float*)buffers[6]+2, sizeof(float), cudaMemcpyDeviceToHost));
printf("Shared memory Unroll GPU Sum:%5f CPU Sum:%5f\n", res, cpu_res);
cudaCheck(cudaMemcpy(&res, (float*)buffers[6]+3, sizeof(float), cudaMemcpyDeviceToHost));
printf("Shared memory Unroll Dynamic GPU Sum:%5f CPU Sum:%5f\n", res, cpu_res);
/// free
cudaDeviceSynchronize();
cudaProfilerStop();
for(auto & e: buffers) cudaCheck(cudaFree(e));
cudaCheck(cudaFreeHost(c_buffer));
delete [] pdata;
return 0;
}
|
7,444 | #include "includes.h"
/**
* Vector Addition - Simple addition using Cuda.
* Author - Malhar Bhatt
* Subject - High Performance Computing
*/
/** Function Add -
* Usage - Add 2 values
* Returns - Void
*/
__global__ void add( int num1, int num2, int *ans )
{
*ans = num1 + num2;
} |
7,445 | #include <stdlib.h>
#include <stdio.h>
#include <cuda.h>
#include <stdlib.h>
void fill_matrix(double *mat, unsigned numRows, unsigned numCols)
{
for(unsigned i=0; i < numRows; i++)
for(unsigned j=0; j < numCols; j++)
{
mat[i*numCols + j] = i*2.1f + j*3.2f;
}
}
void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols)
{
const char *fname = "assignment2_3_out";
FILE *f = fopen(fname, "w");
for(unsigned i=0; i < numRows; i++)
{
for(unsigned j=0; j < numCols; j++)
fprintf(f,"%4.4f ", mat[i*numCols + j]);
fprintf(f,"\n");
}
fclose(f); }
template<int TILE_WIDTH>
__global__ void MatrixMulKernel_col_maj(double* M, double* N, double* P, int Width) {
extern __shared__ double buffer[];
double *ds_M = &buffer[0]; // TILE_WIDTH WIDTH
double *ds_N = &buffer[TILE_WIDTH*Width]; // WIDTH TILE_WIDTH
//__shared__ float ds_M[Width][Width];
//__shared__ float ds_N[Width][Width];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * blockDim.y + ty;
int Col = bx * blockDim.x + tx;
// Loop over the M and N tiles required to compute the P element
for (int p = 0; p < Width/TILE_WIDTH; ++p) {
// Collaborative loading of M and N tiles into shared memory
ds_M[ty*Width + tx + p*blockDim.x ] = M[Row*Width + p*TILE_WIDTH+tx];
ds_N[ty*TILE_WIDTH + blockDim.y*TILE_WIDTH*p + tx] = N[(p*TILE_WIDTH+ty)*Width + Col];
__syncthreads();
}
double Pvalue = 0;
for (int i = 0; i < TILE_WIDTH; ++i){
Pvalue += ds_M[ty*Width + i] * ds_N[i*Width + tx];
}
__syncthreads();
P[Row*Width+Col] = Pvalue;
}
int main(int argc,char **argv) {
int N_ll[2]; int N;
int loop, loop1, loop2; // loop variables
float time_spent;
N_ll[0]=16; N_ll[1]=8192;
for (loop=0;loop<2;loop++){
N=N_ll[loop];
size_t size = N *N* sizeof(double);
double*h_matA = (double*)malloc(size);
double*h_matB = (double*)malloc(size);
double*h_matC = (double*)malloc(size); // result
fill_matrix(h_matA,N,N);
fill_matrix(h_matB,N,N);
printf("\nMatrix A (first 10*10 inputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matA + N*loop1 + loop2));
printf("\n");
}
printf("\n\nMatrix B (first 10*10 inputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matB + N*loop1 + loop2));
printf("\n");
}
double* d_matA; cudaMalloc(&d_matA, size);
double* d_matB; cudaMalloc(&d_matB, size);
double* d_matC; cudaMalloc(&d_matC, size);
//GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy vectors from host memory to device memory
cudaMemcpy(d_matA, h_matA, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_matB, h_matB, size,cudaMemcpyHostToDevice);
// Invoke kernel
dim3 threadsPerBlock (16,16);
dim3 blocksPerGrid ((N + threadsPerBlock.x - 1) /threadsPerBlock.x,(N + threadsPerBlock.y - 1) /threadsPerBlock.y);
cudaEventRecord(start, 0);
size_t blocksize = 2 * N * 16;
(MatrixMulKernel_col_maj<16>)<<<blocksPerGrid, threadsPerBlock, sizeof(double)*blocksize>>>(d_matA,d_matB, d_matC, N);
cudaError_t err1 = cudaPeekAtLastError();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
printf("\nTime spent in col maj %f\n",time_spent);
// h_C contains the result in host memory
cudaMemcpy(h_matC, d_matC, size,cudaMemcpyDeviceToHost);
printf("\n\nMatrix C (first 10*10 outputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matC + N*loop1 + loop2));
printf("\n");
}
// Log outputs
printf("\nWritting to file assignment_2_1_out as Mat C");
print_matrix_to_file(h_matC,N,N);
// Free device memory
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
// Free host memory
free(h_matA);
free(h_matB);
free(h_matC);
}
return 0;
}
|
7,446 | __global__ void gammaKernel(float *ptr, int width, int height, float invGamma)
{
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
if(x < width && y < height)
{
int idx = 3 * (width * y + x);
ptr[idx] = powf(ptr[idx], invGamma);
ptr[idx + 1] = powf(ptr[idx + 1], invGamma);
ptr[idx + 2] = powf(ptr[idx + 2], invGamma);
}
}
extern "C"
void applyGamma(float *ptr, int width, int height, float invGamma)
{
int image_memory = width * height * 3 * sizeof(*ptr);
float *gpuPtr = NULL;
cudaMalloc((void**) &gpuPtr, image_memory);
cudaMemcpy(gpuPtr, ptr, image_memory, cudaMemcpyHostToDevice);
dim3 threads(16, 16);
dim3 blocks((width + threads.x - 1) / threads.x, (height + threads.y - 1) / threads.y);
gammaKernel<<<blocks, threads>>>(gpuPtr, width, height, invGamma);
cudaMemcpy(ptr, gpuPtr, image_memory, cudaMemcpyDeviceToHost);
cudaFree(gpuPtr);
} |
7,447 | #include <cstdio>
#include <cstdlib>
#include <thrust/count.h>
#include <cassert>
#include <thrust/sort.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <time.h>
#include <sstream>
#define MAXFEATURE 10
#define MAXDATA 2000000
#define MAXLABEL 20
#define MAXDEPTH 15
#define noDDEBUG
typedef struct {
double feature[MAXFEATURE];
int label;
double impurity;
} DataNode;
typedef struct ClassifyNode{
int featureId;
double threshold;
int label;
struct ClassifyNode *left, *right;
} ClassifyNode;
__global__ void calImpurity (DataNode *x_train, int numLabel, int targetF,int left, int right) {
int pos = blockIdx.x * blockDim.x + threadIdx.x;
x_train[pos].impurity = right- left;
//no cut boundary
if (pos >= right || pos <= left) return;
//equal to targetFeature belong to right side
if (x_train[pos].feature[targetF] == x_train[pos-1].feature[targetF]) return;
//left
//compute each label appear times
double left_imp = 0;
{
double Labelcount[MAXLABEL] = {};
for (int i = left; i < pos; i++)
Labelcount[ x_train[i].label ]++;
//compute impurity
for (int i = 0; i < numLabel; i++)
left_imp += (Labelcount[i]/(pos-left))*(Labelcount[i]/(pos-left) );
//mul weight
left_imp = 1 - left_imp;
if (left_imp < 0) printf("error:%d, pos = %d, left = %d, numLabel = %d\n",left_imp, pos, left, numLabel);
left_imp *= (pos-left);
}
//right
//compute each label appear times
double right_imp = 0;
{
double Labelcount[MAXLABEL] = {};
for (int i = pos; i < right; i++)
Labelcount[ x_train[i].label ]++;
//compute impurity
for (int i = 0; i < numLabel; i++)
right_imp += (Labelcount[i]/(right-pos)) * (Labelcount[i]/(right-pos)) ;
//mul weight
right_imp = 1-right_imp;
right_imp *= (right-pos);
}
x_train[pos].impurity = right_imp + left_imp;
return;
}
struct cmp_feature {
int32_t i;
cmp_feature(int32_t i): i(i) {}
__host__ __device__ bool operator()(const DataNode &ldn, const DataNode &rdn) const
{
return ldn.feature[i] < rdn.feature[i];
}
};
struct cmp_impurity {
__host__ __device__ bool operator()(const DataNode &ldn,
const DataNode &rdn) const
{
return ldn.impurity < rdn.impurity;
}
};
struct cmp_label {
__host__ __device__ bool operator()(const DataNode &ldn, const DataNode &rdn) const
{
return ldn.label < rdn.label;
}
};
struct count_label {
int32_t i;
count_label(int32_t i): i(i) {}
__host__ __device__ bool operator()(const DataNode &dn) const
{
return dn.label == i;
}
};
void rec_buildTree(DataNode *x_train, ClassifyNode *parent, int numX, int numLabel, int numF, int left, int right, int depth ) {
//printf("left = %d, right = %d targetF = %d\n",left, right, targetF);
//stop control
/*DataNode *min_label = thrust::min_element( thrust::device,
x_train+left, x_train+right,
cmp_label() );
DataNode *max_label = thrust::max_element( thrust::device,
x_train+left, x_train+right,
cmp_label() );
DataNode *cpu_min_label = (DataNode *)malloc( sizeof(DataNode) );
cudaMemcpy( cpu_min_label, min_label, sizeof(DataNode), cudaMemcpyDeviceToHost );
DataNode *cpu_max_label = (DataNode *)malloc( sizeof(DataNode) );
cudaMemcpy( cpu_max_label, max_label, sizeof(DataNode), cudaMemcpyDeviceToHost );
*/
{
if (right - left < 2) {
int labelcount = 0, maxlabelcount = 0, reslabel;
for (int label = 0; label < numLabel; label++) {
labelcount = thrust::count_if(thrust::device, x_train+left, x_train+right, count_label(label) );
if (labelcount > maxlabelcount) {
maxlabelcount = labelcount;
reslabel = label;
}
}
parent->featureId = -1;
parent->left = NULL;
parent->right = NULL;
parent->label = reslabel;
printf("dist cut ,depth = %d, set %d as label, rate = %lf, dist = %d\n", depth, reslabel, maxlabelcount/(double)(right-left), right-left);
return;
}
}
{
for(int label = 0; label < numLabel; label++) {
int labelcount = thrust::count_if(thrust::device, x_train+left, x_train+right, count_label(label) );
if(labelcount/(double)(right-left) > 0.95) {
parent->featureId = -1;
parent->left = NULL;
parent->right = NULL;
parent->label = label;
printf("rate cut depth = %d, set %d as label, rate = %lf, dist = %d\n", depth, label, labelcount/(double)(right-left), right-left);
return;
}
}
}
//create leaf node
/*
if (cpu_min_label->label == cpu_max_label->label) {
parent->featureId = -1;
parent->left = NULL;
parent->right = NULL;
parent->label = cpu_min_label->label;
return;
}
*/
/*
{
printf("d = %d\n", depth);
if (depth > MAXDEPTH) {
int labelcount = 0, maxlabelcount = 0, reslabel;
for (int label = 0; label < numLabel; label++) {
labelcount = thrust::count_if(thrust::device, x_train+left, x_train+right, count_label(label) );
if (labelcount > maxlabelcount) {
maxlabelcount = labelcount;
reslabel = label;
}
}
parent->featureId = -1;
parent->left = NULL;
parent->right = NULL;
parent->label = reslabel;
printf("depth cut, depth = %d, set %d as label, rate = %lf, dist = %d\n", depth, reslabel, maxlabelcount/(double)(right-left), right-left);
return;
}
}
*/
double best_min_impurity = 2147483647;
int best_feature;
int best_threshold;
unsigned int best_pos;
for (int targetF = 0; targetF < numF; targetF++) {
//sort by target feature
thrust::sort(thrust::device, x_train + left, x_train + right, cmp_feature(targetF));
//calculate impurity for all cut
calImpurity<<< (numX)/1024 + 1, (1<<10) >>>(x_train, numLabel, targetF, left, right);
//find min impurity cut
DataNode *min_impurity = thrust::min_element( thrust::device, x_train+left, x_train+right, cmp_impurity() );
DataNode *cpu_min_impurity = (DataNode *)malloc( sizeof(DataNode) );
cudaMemcpy( cpu_min_impurity, min_impurity, sizeof(DataNode), cudaMemcpyDeviceToHost );
if (cpu_min_impurity-> impurity < best_min_impurity) {
best_min_impurity = cpu_min_impurity->impurity;
best_feature = targetF;
best_threshold = cpu_min_impurity->feature[targetF];
best_pos = min_impurity-x_train;
}
free(cpu_min_impurity);
}
//set classify tree node
parent->threshold = best_threshold;
parent->featureId = best_feature;
//find cut position
unsigned int shreshold_pos = best_pos;
//sort on best axis
thrust::sort(thrust::device, x_train + left, x_train + right, cmp_feature(best_feature));
//dfs create calssify tree
ClassifyNode *left_child = (ClassifyNode *)malloc( sizeof(ClassifyNode) );
parent->left = left_child;
rec_buildTree(x_train, left_child, numX, numLabel, numF, left, shreshold_pos, depth+1);
ClassifyNode *right_child = (ClassifyNode *)malloc( sizeof(ClassifyNode) );
parent->right = right_child;
rec_buildTree(x_train, right_child, numX, numLabel, numF, shreshold_pos, right, depth+1);
//free
//free(cpu_min_label);
//free(cpu_max_label);
//free(cpu_min_impurity);
}
int rec_predict(DataNode *target, ClassifyNode *clTree) {
if(clTree->featureId <0) return clTree->label;
if(target->feature[clTree->featureId] < clTree->threshold)
return rec_predict(target, clTree->left);
if(target->feature[clTree->featureId] >= clTree->threshold)
return rec_predict(target, clTree->right);
printf("something error\n");
return -1;
}
int * predict(DataNode *x_train, int numX, ClassifyNode *clTree) {
int *ret = (int *)malloc( numX*sizeof(int) );
for (int i = 0; i < numX; i++)
ret[i] = rec_predict( &x_train[i], clTree);
return ret;
}
void shuffle(DataNode *array, size_t n)
{
if (n > 1)
{
size_t i;
for (i = 0; i < n - 1; i++)
{
size_t j = i + rand() / (RAND_MAX / (n - i) + 1);
DataNode t = array[j];
array[j] = array[i];
array[i] = t;
}
}
}
DataNode gl_x_train[MAXDATA];
int main() {
int H, W;
scanf("%d%d", &H, &W);
printf("Size of training data = (%d, %d)\n",H, W);
int numX = 1000;//, label;
int numF = 10;
int numLabel = 10;
int numT = 1;
/*
int label;
for(int i = 0; i < H; i++)
for(int j = 0; j < W; j++) {
gl_x_train[numX].feature[0] = i;
gl_x_train[numX].feature[1] = j;
scanf("%d", &label);
gl_x_train[numX].label = label;
numX++;
}
*/
for (int i = 0; i < numX; i++) {
static char line[1024];
scanf("%s", line);
for (int j = 0; line[j]; j++) {
if (line[j] == ',')
line[j] = ' ';
}
std::stringstream sin(line);
for ( int f = 0; f < numF; f++)
sin >> gl_x_train[i].feature[f];
sin >> gl_x_train[i].label;
}
#ifdef DDEBUG
for(int i = 0; i < numX; i++) {
for(int j = 0; j < numF; j++)
printf("%f ", gl_x_train[i].feature[j]);
printf("%d\n",gl_x_train[i].label);
}
#endif
/*
//copy data to gpu
cudaError_t st;
DataNode *gpu_x_train;
st = cudaMalloc( (void**)&gpu_x_train, numX*sizeof(DataNode) );
assert(st == cudaSuccess);
st = cudaMemcpy( gpu_x_train, gl_x_train, numX*sizeof(DataNode), cudaMemcpyHostToDevice );
assert(st == cudaSuccess);
//buildtree
ClassifyNode *classifyTree = (ClassifyNode *)malloc( sizeof(ClassifyNode) );
long start = clock();
rec_buildTree(gpu_x_train, classifyTree, numX, numLabel, numF, 0, 0, numX);
printf("time = %lf\n",(double)(clock()-start)/CLOCKS_PER_SEC);
*/
//buildforest
float forest_rate = 1;
DataNode *gpu_x_train;
cudaMalloc( (void**)&gpu_x_train, forest_rate*numX*sizeof(DataNode) );
ClassifyNode * forest[numT];
srand(time(NULL));
for (int i = 0; i < numT; i++) {
forest[i] = (ClassifyNode *)malloc( sizeof(ClassifyNode) );
shuffle(gl_x_train, numX);
cudaMemcpy( gpu_x_train, gl_x_train, forest_rate*numX*sizeof(DataNode), cudaMemcpyHostToDevice );
long start = clock();
rec_buildTree(gpu_x_train, forest[i], (int)numX*forest_rate, numLabel, numF ,0 , numX*forest_rate, 0);
printf("time = %lf\n",(double)(clock()-start)/CLOCKS_PER_SEC);
}
//predict
int *res;
for (int f = 0; f < numT; f++) {
int incorrect = 0;
res = predict(gl_x_train, numX, forest[f]);
for (int i = 0; i < numX; i++)
if(res[i] != gl_x_train[i].label)
incorrect++;
printf("accuracy = %lf\n", (numX-incorrect)/(double)numX );
}
//free
//free(classifyTree);
free(res);
}
|
7,448 | #include "includes.h"
__global__ void kernel(float* red, float* green, float* blue, unsigned long N){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned long tid = x + (y * blockDim.x * gridDim.x);
if(tid < N){
red[tid] = .5;
blue[tid] = .7;
green[tid]= .2;
}
} |
7,449 | #include <unistd.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCKS 50000
#define THREADS 200
#define N (BLOCKS * THREADS)
#define CHECK(x) {\
cudaError_t code = (x);\
if(code != cudaSuccess) {\
printf("Error in %s, line %d: %s.\n",\
__FILE__,\
__LINE__,\
cudaGetErrorString(code) );\
exit(code);\
}\
}\
/* run the collatz conjecture and return the number of steps */
__global__ void collatz(unsigned int* step) {
//Set x to the initial value of step
unsigned int x = step[blockIdx.x * blockDim.x + threadIdx.x];
//Reset step to 0
step[blockIdx.x * blockDim.x + threadIdx.x] = 0;
/* do the iterative process */
while (x != 1) {
if ((x % 2) == 0) {
x = x / 2;
} else {
x = 3 * x + 1;
}
step[blockIdx.x * blockDim.x + threadIdx.x]++;
}
}
int main( ) {
/* store the number of steps for each number up to N */
unsigned int* cpu_steps = (unsigned int*)malloc(N * sizeof(unsigned int));
unsigned int* gpu_steps;
/* allocate space on the GPU */
CHECK( cudaMalloc((void**) &gpu_steps, N * sizeof(unsigned int)) );
for(int i=0; i < N; i++) {
cpu_steps[i] = i+1;
}
/* send gpu_steps to the GPU */
CHECK( cudaMemcpy(gpu_steps,
cpu_steps,
N * sizeof(unsigned int),
cudaMemcpyHostToDevice) );
/* run the collatz conjecture on all N items */
collatz<<<BLOCKS, THREADS>>>(gpu_steps);
CHECK(cudaPeekAtLastError());
/* send gpu_steps back to the CPU */
CHECK( cudaMemcpy(cpu_steps,
gpu_steps,
N * sizeof(unsigned int),
cudaMemcpyDeviceToHost) );
/* free the memory on the GPU */
CHECK( cudaFree(gpu_steps) );
/* find the largest */
unsigned int largest = cpu_steps[0], largest_i = 0;
for (int i = 1; i < N; i++) {
if (cpu_steps[i] > largest) {
largest = cpu_steps[i];
largest_i = i;
}
}
/* report results */
printf("The longest collatz chain up to %d is %d with %d steps.\n",
N, largest_i + 1, largest);
return 0;
}
|
7,450 | #include <stdlib.h>
#include <stdio.h>
#define checkCuda(error) __checkCuda(error, __FILE__, __LINE__)
__global__ void kernel(int *d_buff, int size){
int ix = blockIdx.x*blockDim.x + threadIdx.x;
d_buff[ix] = ix+1;
}
extern "C" {
inline void __checkCuda(cudaError_t error, const char *file, const int line){
if (error != cudaSuccess){
printf("checkCuda error at %s:%i: %s\n", file, line, cudaGetErrorString(cudaGetLastError()));
exit(-1);
}
return;
}
void callKernel(int *d_buff,int size){
dim3 grid, block;
block.x = 1024;
grid.x = (size + block.x - 1) / block.x;
kernel<<<block,grid>>>(d_buff,size);
//int *h2_buff;
//int i;
//h2_buff = (int*)malloc(size*sizeof(int));
//checkCuda(cudaMemcpy(h2_buff, d_buff,sizeof(int)*size,cudaMemcpyDeviceToHost));
//for(i=0;i<size;i++){
// printf("%d\t",h2_buff[i]);
// }
// printf("\n");
}
void init(int **d_buff, int **d_rank,int *rank,int size){
checkCuda(cudaMalloc((void**)d_buff,sizeof(int)*size));
//checkCuda(cudaMalloc((void**)d_rank,sizeof(int)));
//checkCuda(cudaMemcpy(*d_rank, rank,sizeof(int),cudaMemcpyHostToDevice));
}
void MPI_standard(int *h_buff,int *d_buff,int rank, int size){
if(rank==0){
checkCuda(cudaMemcpy(h_buff, d_buff,sizeof(int)*size,cudaMemcpyDeviceToHost));
}else{
checkCuda(cudaMemcpy(d_buff, h_buff,sizeof(int)*size,cudaMemcpyHostToDevice));
dim3 grid, block;
block.x = 1024;
grid.x = (size + block.x - 1) / block.x;
kernel<<<block,grid>>>(d_buff,size);
//int *h2_buff;
//int i;
//h2_buff = (int*)malloc(size*sizeof(int));
//checkCuda(cudaMemcpy(h2_buff, d_buff,sizeof(int)*size,cudaMemcpyDeviceToHost));
//for(i=0;i<size;i++){
// printf("%d\t",h2_buff[i]);
//}
//printf("\n");
}
}
void transfer_intra_P2P(int n_buffer){
int gpu1 = 0;
int gpu2 = 1;
int *d_buffer;
int *d2_buffer;
int i;
//int nDevices;
//cudaGetDeviceCount(&nDevices);
//printf("Number of Devices: %d\n",nDevices);
dim3 grid, block;
block.x = 1024;
grid.x = (n_buffer + block.x - 1) / block.x;
//printf("Antes de criar stream_0\n");
checkCuda(cudaSetDevice(gpu1));
cudaStream_t stream_0;
checkCuda(cudaStreamCreate(&stream_0));
//printf("Antes de alocar d_buffer\n");
checkCuda(cudaMalloc(&d_buffer,sizeof(int)*n_buffer));
checkCuda(cudaSetDevice(gpu2));
//printf("Antes de alocar d2_buffer\n");
checkCuda(cudaMalloc(&d2_buffer,sizeof(int)*n_buffer));
//printf("Antes de entrar no for que envia os pacotes\n");
for(i=0;i<1;i++){
//printf("Entrei no for i: %d \n",i);
checkCuda(cudaMemcpyPeerAsync(d2_buffer,gpu2,d_buffer,gpu1,n_buffer*sizeof(int),stream_0));
cudaDeviceSynchronize();
kernel<<<block,grid>>>(d2_buffer,n_buffer);
}
checkCuda(cudaFree(d2_buffer));
checkCuda(cudaSetDevice(gpu1));
checkCuda(cudaFree(d_buffer));
//int *h2_buff;
//h2_buff = (int*)malloc(n_buffer*sizeof(int));
//checkCuda(cudaMemcpy(h2_buff, d2_buffer,sizeof(int)*n_buffer,cudaMemcpyDeviceToHost));
//for(i=0;i<n_buffer;i++){
// printf("%d\t",h2_buff[i]);
//}
//printf("\n");
}
void transfer_intra_standard(int n_buffer){
int gpu1 = 0;
int gpu2 = 1;
int *d_buffer;
int *d2_buffer;
int *buffer;
int i;
dim3 grid, block;
block.x = 1024;
grid.x = (n_buffer + block.x - 1) / block.x;
buffer = (int*) malloc(sizeof(int)*n_buffer);
checkCuda(cudaSetDevice(gpu1));
checkCuda(cudaMalloc(&d_buffer,sizeof(int)*n_buffer));
//checkCuda(cudaMemcpy(buffer,d_buffer,n_buffer*sizeof(int),cudaMemcpyDeviceToHost));
checkCuda(cudaSetDevice(gpu2));
checkCuda(cudaMalloc(&d2_buffer,sizeof(int)*n_buffer));
//checkCuda(cudaMemcpy(d2_buffer,buffer,n_buffer*sizeof(int),cudaMemcpyHostToDevice));
for(i=0;i<1;i++){
checkCuda(cudaSetDevice(gpu1));
checkCuda(cudaMemcpy(buffer,d_buffer,n_buffer*sizeof(int),cudaMemcpyDeviceToHost));
checkCuda(cudaSetDevice(gpu2));
checkCuda(cudaMemcpy(d2_buffer,buffer,n_buffer*sizeof(int),cudaMemcpyHostToDevice));
kernel<<<block,grid>>>(d2_buffer,n_buffer);
cudaDeviceSynchronize();
}
checkCuda(cudaSetDevice(gpu1));
checkCuda(cudaFree(d_buffer));
checkCuda(cudaSetDevice(gpu2));
checkCuda(cudaFree(d2_buffer));
//int *h2_buff;
//int i;
//h2_buff = (int*)malloc(n_buffer*sizeof(int));
//checkCuda(cudaMemcpy(h2_buff, d2_buffer,sizeof(int)*n_buffer,cudaMemcpyDeviceToHost));
//for(i=0;i<n_buffer;i++){
// printf("%d\t",h2_buff[i]);
//}
//printf("\n");
}
void setDevice(int device){
cudaSetDevice(device);
}
void getResult(int *d_buff, int *h_buff,int size){
checkCuda(cudaMemcpy(h_buff,d_buff,size*sizeof(int),cudaMemcpyDeviceToHost));
}
void clean(int **d_buff, int **d_rank){
checkCuda(cudaFree(*d_buff));
checkCuda(cudaFree(*d_rank));
}
}
|
7,451 | /**
* 2DConvolution.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <sgrauerg@gmail.com>
* Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <unistd.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <stdlib.h>
#include <stdarg.h>
#include <string.h>
#include <cuda.h>
#define NUM_ITERATIONS 10
/* Problem size */
//#define NI 15000l
//#define NJ 15000l
/* Thread block dimensions */
#define DIM_THREAD_BLOCK_X 32
#define DIM_THREAD_BLOCK_Y 8
/* Can switch DATA_TYPE between float and double */
typedef double DATA_TYPE;
void init(DATA_TYPE* A, int NI, int NJ)
{
int i, j;
for (i = 0; i < NI; ++i)
{
for (j = 0; j < NJ; ++j)
{
A[i*NJ + j] = (DATA_TYPE)rand()/RAND_MAX;
}
}
}
__global__ void Convolution2D_kernel(DATA_TYPE *A, DATA_TYPE *B, int NI, int NJ)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33;
c11 = +0.2; c21 = +0.5; c31 = -0.8;
c12 = -0.3; c22 = +0.6; c32 = -0.9;
c13 = +0.4; c23 = +0.7; c33 = +0.10;
if ((i < NI-1) && (j < NJ-1) && (i > 0) && (j > 0))
{
B[i * NJ + j] = c11 * A[(i - 1) * NJ + (j - 1)] + c21 * A[(i - 1) * NJ + (j + 0)] + c31 * A[(i - 1) * NJ + (j + 1)]
+ c12 * A[(i + 0) * NJ + (j - 1)] + c22 * A[(i + 0) * NJ + (j + 0)] + c32 * A[(i + 0) * NJ + (j + 1)]
+ c13 * A[(i + 1) * NJ + (j - 1)] + c23 * A[(i + 1) * NJ + (j + 0)] + c33 * A[(i + 1) * NJ + (j + 1)];
}
}
void convolution2DCuda(DATA_TYPE* A, DATA_TYPE* B, int NI, int NJ)
{
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((size_t)ceil( ((float)NI) / ((float)block.x) ), (size_t)ceil( ((float)NJ) / ((float)block.y)) );
Convolution2D_kernel<<<grid, block>>>(A, B, NI, NJ);
// Wait for GPU to finish before accessing on host
// mock synchronization of memory specific to stream
cudaDeviceSynchronize();
}
int main(int argc, char *argv[])
{
if(argc < 2){
printf("pls no troll\n");
return 1;
}
int NI = atoi(argv[1]);
int NJ = atoi(argv[1]);
DATA_TYPE* A;
DATA_TYPE* B;
float average_time = 0;
cudaEvent_t start, end;
float time;
#ifndef UNMANAGED
cudaMallocManaged( &A, NI*NJ*sizeof(DATA_TYPE) );
cudaMallocManaged( &B, NI*NJ*sizeof(DATA_TYPE) );
//initialize the arrays
init(A, NI, NJ);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
convolution2DCuda(A, B, NI, NJ);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&average_time, start, end);
#else
DATA_TYPE *gA, *gB;
cudaMalloc( &gA, NI*NJ*sizeof(DATA_TYPE) );
cudaMalloc( &gB, NI*NJ*sizeof(DATA_TYPE) );
A = (DATA_TYPE *) malloc( NI*NJ*sizeof(DATA_TYPE) );
B = (DATA_TYPE *) malloc( NI*NJ*sizeof(DATA_TYPE) );
//initialize the arrays
init(A, NI, NJ);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
cudaMemcpy(gA, A, NI*NJ*sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
convolution2DCuda(gA, gB, NI, NJ);
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&average_time, start, end);
cudaMemcpy(B, gB, NI*NJ*sizeof(DATA_TYPE), cudaMemcpyDeviceToHost);
#endif
printf("%f\n", average_time);
#ifndef UNMANAGED
cudaFree(A);
cudaFree(B);
#else
cudaFree(gA);
cudaFree(gB);
free(A);
free(B);
#endif
return 0;
}
|
7,452 | #include "cuda.h"
#include "stdio.h"
void printi(int i){
printf("%d\n", i);
}
void init_CPU_array(int* array, int n){
for(int i = 0; i < n; i++) {
array[i] = 1;
}
}
void print_CPU_array(int array[], int n){
for(int i = 0; i < n; i++) {
printi(array[i]);
}
}
void print_CPU_matrix(int array[], int n){
for(int i = 0; i < n; i++) {
if(i % 16 == 0)
printf("%s\n", "");
printf("%d ", array[i]);
}
}
// realiza la suma de determinantes
__global__ void sumador(int* arreglo, int* result, float N)
{
__shared__ int compartida[10];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
compartida[threadIdx.x] = arreglo[tid];
__syncthreads();
for(int i=1; pow((float)2,(float)i-1) < N; i++)
{
int acceso = pow((float)2,(float)i);
int offset = pow((float)2, (float)i-1);
if(threadIdx.x < (N/acceso) && (threadIdx.x * acceso + offset) < (N - blockIdx.x * blockDim.x))
{
compartida[threadIdx.x * acceso] = compartida[threadIdx.x * acceso] + compartida[threadIdx.x * acceso + offset];
compartida[threadIdx.x * acceso + offset] = 0;
printf("%s\n", "TRABAJO");
result[blockIdx.x] = compartida[0];
}
printf("%s\n", "");
}
}
int* arreglo_suma1;
int* d_arreglo_suma1;
int* arreglo_result;
int* d_arreglo_suma2;
int main(int argc, char** argv){
int N = 8;
//##################################################################################
//############################## INICIALIZACION ####################################
arreglo_suma1 = (int*) malloc(N * sizeof(int));
cudaMalloc(&d_arreglo_suma1, N * sizeof(int));
arreglo_result = (int*) malloc(N * sizeof(int));
cudaMalloc(&d_arreglo_suma2, N * sizeof(int));
init_CPU_array(arreglo_suma1, N);
cudaMemcpy(d_arreglo_suma1, arreglo_suma1, N * sizeof(int), cudaMemcpyHostToDevice);
int threads_per_block = 10;
int block_count = ceil((float)N / threads_per_block);
//##################################################################################
//################################ EJECUCIONES #####################################
dim3 miGrid1D_1(block_count,1);
dim3 miBloque1D_1(threads_per_block,1);
sumador<<<miGrid1D_1, miBloque1D_1>>>(d_arreglo_suma1, d_arreglo_suma2, N);
//###################################################################################
//################################### READ BACK #####################################
cudaMemcpy(arreglo_result, d_arreglo_suma2, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("%s\n", "RESULTADO DE LA SUMA:");
print_CPU_matrix(arreglo_result, N);
free(arreglo_suma1);
cudaFree (d_arreglo_suma1);
free(arreglo_result);
cudaFree (d_arreglo_suma2);
} |
7,453 | #include "includes.h"
#define MAX_CUDA_THREADS_PER_BLOCK 1024
__global__ void Max_Interleaved_Addressing_Global(float* data, int data_size){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < data_size){
for(int stride=1; stride < data_size; stride *= 2) {
if (idx % (2*stride) == 0) {
float lhs = data[idx];
float rhs = data[idx + stride];
data[idx] = lhs < rhs ? rhs : lhs;
}
__syncthreads();
}
}
} |
7,454 | __global__ void batch_tensordot(int * x_in, float * w, float * v, int batchSize, int N_in, int N_out, int t_max)
{
int index = blockIdx.z * blockDim.z + threadIdx.z;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0;
if (index < batchSize && row < N_out && col < t_max)
{
for (int j = 0; j < N_in; j++)
{
sum += x_in[index*N_in*t_max + j*t_max + col] * w[j*N_out + row];
}
v[index*N_out*t_max + row*t_max + col] = sum;
}
}
__global__ void batch_cumsum(float * v, int N_out, int t_max, int batchSize)
{
int index = blockIdx.z * blockDim.z + threadIdx.z;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (index < batchSize && row < N_out && col == 0){
for (int j = 1; j < t_max; j++){
v[index*N_out*t_max + row*t_max + j] += v[index*N_out*t_max + row*t_max + j-1];
}
}
}
__global__ void batch_thresholding(int batchSize, int * x, int * firing_t, float * v, int N_out, int t_max, float th_val)
{
int index = blockIdx.z * blockDim.z + threadIdx.z;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (index < batchSize && row < N_out && col == 0){
for (int j = 0; j < t_max; j++){
if (v[index*N_out*t_max + row*t_max + j] >= th_val){
firing_t[index*N_out + row] = j;
x[index*N_out*t_max + row*t_max + j] = 1;
break;
}
}
}
}
void batch_dense(int * x_in, int * x_out, int * firing_t, float * w, int N_in, int N_out, int batchSize, float th_val, int t_max){
float * v;
cudaMalloc((void **) &v, sizeof(float)*batchSize*N_out*t_max);
dim3 threadsPerBlock(8, 8, 16);
dim3 blocksPerGrid(1, 1, 1);
blocksPerGrid.z = ceil(float(batchSize)/float(threadsPerBlock.x));
blocksPerGrid.y = ceil(float(N_out)/float(threadsPerBlock.y));
blocksPerGrid.x = ceil(float(t_max)/float(threadsPerBlock.x));
// cout<<blocksPerGrid.x<<'\t'<<blocksPerGrid.y<<'\t'<<blocksPerGrid.z<<endl;
batch_tensordot<<<blocksPerGrid, threadsPerBlock>>>(x_in, w, v, batchSize, N_in, N_out, t_max);
batch_cumsum<<<blocksPerGrid, threadsPerBlock>>>(v, N_out, t_max, batchSize);
batch_thresholding<<<blocksPerGrid, threadsPerBlock>>>(batchSize, x_out, firing_t, v, N_out, t_max, th_val);
}
|
7,455 | /*
============================================================================
Name : hello-world.cu
Author : Raul Vidal (github Raulvo)
Version : 1.0
Copyright : Public Domain
Description : CUDA Hello World string reverse
============================================================================
*/
#include <iostream>
#include <string>
#include <numeric>
#include <stdlib.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
__global__ void helloWorldKernel(char* helloworld,size_t string_size) {
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
unsigned int j = string_size - i - 1;
char tmp;
if (i < j && i < string_size) {
tmp = helloworld[i];
helloworld[i] = helloworld[j];
helloworld[j] = tmp;
}
__syncthreads(); /* Only needed if there are more than 32 threads */
}
void gpuHelloWorld(std::string& helloworld)
{
char* hosthwascii = new char[helloworld.length()+1];
size_t hwsize = helloworld.length();
size_t copied = helloworld.copy(hosthwascii,helloworld.size());
char* gpuhwascii;
static const int BLOCK_SIZE = 32;
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuhwascii, sizeof(char)*hwsize));
CUDA_CHECK_RETURN(cudaMemcpy(gpuhwascii, hosthwascii, sizeof(char)*hwsize, cudaMemcpyHostToDevice));
helloWorldKernel<<<1, BLOCK_SIZE>>> (gpuhwascii,hwsize);
CUDA_CHECK_RETURN(cudaThreadSynchronize());
CUDA_CHECK_RETURN(cudaMemcpy(hosthwascii, gpuhwascii, sizeof(char)*hwsize, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(gpuhwascii));
hosthwascii[hwsize] = '\0';
std::cout<<"gpu Hello World = "<< std::string(hosthwascii) << std::endl;
delete[] hosthwascii;
}
void cpuHelloWorld(std::string& helloworld) {
char tmp = 0;
for (unsigned int i = 0, j = helloworld.length()-1;
i < j;
i++, j--)
{
tmp = helloworld.at(i);
helloworld.replace(i,1,&helloworld[j],1);
helloworld.replace(j,1,&tmp,1);
}
std::cout<<"cpu Hello World = "<< helloworld << std::endl;
}
int main(void)
{
std::string helloworld = std::string("dlroW olleH");
cpuHelloWorld(helloworld);
gpuHelloWorld(helloworld);
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
7,456 | #include "includes.h"
__global__ void magnitude(float *vec, const int n)
{
unsigned int xIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (xIndex < n) { vec[xIndex] = abs(vec[xIndex]); }
} |
7,457 | /* Based on code from here: http://devblogs.nvidia.com/parallelforall/easy-introduction-cuda-c-and-c/ */
#include <stdio.h>
#include <stdlib.h>
/* Calculate SAXPY, single-precision vector math */
/* y[i]=a*x[i]+y[i] */
__global__
void saxpy (int n, float a, float *x, float *y) {
int i=blockIdx.x*blockDim.x+threadIdx.x;
/* Only run calculation if we are in range */
/* where i is valid. It can be out of range */
/* if our vector is shorter than a */
/* multiple of the blocksize */
if (i<n) {
y[i]=a*x[i]+y[i];
}
}
int main(int argc, char **argv) {
int i,j;
float *x, *y, *dev_x, *dev_y;
float a;
long long N=(1000*1000*8),loops=1;
if (argc>1) {
N=atoll(argv[1]);
}
if (argc>2) {
loops=atoll(argv[2]);
}
/* Allocate vectors on CPU */
x=(float *)malloc(N*sizeof(float));
y=(float *)malloc(N*sizeof(float));
/* Allocate vectors on GPU */
cudaMalloc((void **)&dev_x,N*sizeof(float));
cudaMalloc((void **)&dev_y,N*sizeof(float));
/* Initialize the host vectors */
for(i=0;i<N;i++) {
x[i]=(float)i;
y[i]=(float)(10.0*i);
}
cudaMemcpy(dev_x,x,N*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_y,y,N*sizeof(float),cudaMemcpyHostToDevice);
printf("Size: %d\n",(N+255)/256);
a=5.0;
for(j=0;j<loops;j++) {
/* Perform SAXPY */
saxpy<<<(N+255)/256,256>>>(N,a,dev_x,dev_y);
}
// make the host block until the device is finished
cudaDeviceSynchronize();
// check for error
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
cudaMemcpy(y,dev_y,N*sizeof(float),cudaMemcpyDeviceToHost);
/* results */
i=100;
printf("y[%d]=%f, y[%lld]=%f\n",i,y[i],N-1,y[N-1]);
/* y[i]=a*x[i]+y[i] */
/* 0: a=5, x=0, y=0 ::::::: y=0 */
/* 1: a=5, x=1, y=10 ::::::: y=15 */
/* 2: a=5, x=2, y=20 ::::::: y=30 */
/* 3: a=5, x=3, y=30 ::::::: y=45 */
/* 4: a=5, x=4, y=40 ::::::: y=60 */
/* ... */
/* 100: a=5, x=100, y=1000 y=1500 */
cudaFree(dev_x);
cudaFree(dev_y);
return 0;
}
|
7,458 | #include <iostream>
#include <fstream>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <algorithm>
#include <set>
#include <list>
#include <cuda.h>
#include <cuda_runtime.h>
#include <iomanip>
using namespace std;
__global__ void temp1(int *x,int *y,int *z){
int thId = threadIdx.x;
z[thId]=x[thId]+y[thId];
z[thId]=100;
__syncthreads();
}
int main()
{
cudaSetDevice(0);
cudaDeviceReset();
int *x_d,*y_d,*z_d,*x,*y,*z;
x=(int*)malloc(sizeof(int));
y=(int*)malloc(sizeof(int));
z=(int*)malloc(sizeof(int));
*x=1;
*y=2;
cudaMalloc((void**)&x_d, sizeof(int));
cudaMalloc((void**)&y_d, sizeof(int));
cudaMalloc((void**)&z_d, sizeof(int));
cudaMemcpy(x_d, x, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, sizeof(int), cudaMemcpyHostToDevice);
temp1<<<1,1>>>(x_d,y_d,z_d);
cout<<"result="<<*z<<endl;
cudaMemcpy(z, z_d, sizeof(int), cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cout<<"result="<<*z<<endl;
}
|
7,459 | #include "add.hh"
#include <cassert>
#include <stdexcept>
#include "graph.hh"
#include "ops-builder.hh"
#include "sigmoid-grad.hh"
#include "../runtime/node.hh"
#include "../memory/alloc.hh"
namespace ops
{
Add::Add(Op* left, Op* right)
: Op("add", left->shape_get(), {left, right})
{}
void Add::compile()
{
auto& g = Graph::instance();
auto& cleft = g.compiled(preds()[0]);
auto& cright = g.compiled(preds()[1]);
std::size_t len = cleft.out_shape.total();
Shape out_shape = cleft.out_shape;
dbl_t* out_data = tensor_alloc(len);
auto out_node = rt::Node::op_add(cleft.out_data, cright.out_data, out_data,
len,
{cleft.out_node, cright.out_node});
g.add_compiled(this, {out_node}, {out_data}, out_node, out_shape, out_data);
}
Op* Add::child_grad(std::size_t index, Op* dout)
{
assert(index < 2);
(void) index;
return dout;
}
}
|
7,460 | #include "includes.h"
__global__ void _GPU_Floyd_kernel(int k, int *G,int *P, int N){//G will be the adjacency matrix, P will be path matrix
int col=blockIdx.x*blockDim.x + threadIdx.x;
if(col>=N)return;
int idx=N*blockIdx.y+col;
__shared__ int best;
if(threadIdx.x==0)
best=G[N*blockIdx.y+k];
__syncthreads();
if(best==INF || best > 10)return;
int tmp_b=G[k*N+col];
if(tmp_b==INF || tmp_b > 10)return;
// if (cur > 1)
// return;
int cur = best + tmp_b;
if(cur<G[idx]){
G[idx]=cur;
P[idx]=k;
}
} |
7,461 | //
// Created by gautam on 17/04/20.
//
#include "CLI.cuh"
CLI::CLI(): done(false), line("") {
// load tables
// get some stuff ready later
}
std::string CLI::readLine() {
std::string query;
do {
std::cout << "> ";
std::cout.flush();
std::getline(std::cin, line);
} while (testLine());
if(line == "exit" || line == "quit") {
return "";
}
return line;
}
bool CLI::testLine() {
utils::trim(line);
utils::toLower(line);
return line.empty();
}
|
7,462 | /*
#include "SVD.cuh"
__host__ __device__
void computeSVD2(Mat3x3 mat, float3& eVals, float3& v0, float3& v1, float3& v2) {
float& a11 = mat.r0.x;
float& a12 = mat.r0.y;
float& a13 = mat.r0.z;
float& a21 = mat.r1.x;
float& a22 = mat.r1.y;
float& a23 = mat.r1.z;
float& a31 = mat.r2.x;
float& a32 = mat.r2.y;
float& a33 = mat.r2.z;
float c2 = -a11 - a22 - a33;
float c1 = a11 * a22 + a11 * a33 + a22 * a33 - a12 * a12 - a13 * a13 - a23 * a23;
float c0 = a11 * a23 * a23 + a22 * a13 * a13 + a33 * a12 * a12 - a11 * a22 * a33 - 2 * a12 * a13 * a23;
float p = c2 * c2 - 3 * c1;
float q = -27.0 * c0 / 2.0 - c2 * c2 * c2 + 9.0 * c1 * c2 / 2.0;
float temp = abs(27.0 * (0.25 * c1 * c1 * (p - c1) + c0 * (q + 27.0 * c0 / 4.0)));
float phi = atan2(sqrt(temp), q) / 3.0;
float cosPhi = cos(phi);
float sinPhi = sin(phi);
float x1 = 2 * cosPhi;
float x2 = -cosPhi - sqrt(3) * sinPhi;
float x3 = -cosPhi + sqrt(3) * sinPhi;
//eVals.x = x1 * sqrt(p) / 3 - c2 / 3.0;
//eVals.y = x2 * sqrt(p) / 3 - c2 / 3.0;
//eVals.z = x3 * sqrt(p) / 3 - c2 / 3.0;
float3 A1 = { a11,a21,a31 };
float3 A2 = { a12,a22,a32 };
float3 e1 = { 1,0,0 };
float3 e2 = { 0,1,0 };
//v0 = cross(A1 - eVals.x * e1, A2 - eVals.x * e2);
//v1 = cross(A1 - eVals.y * e1, A2 - eVals.y * e2);
//v2 = cross(A1 - eVals.z * e1, A2 - eVals.z * e2);
}
*/ |
7,463 | #include <stdio.h>
#define T 8 // As Threads
#define N 16
__global__ void vecMatrix(int *A, int *B)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int id = (i * N) + j;
if(i < N && j < N)
{
B[id] = A[id] + 1;
}
}
int main (int argc, char *argv[])
{
int i,j;
int size[N*N];
int A[N][N];
int sizearr = N*N *sizeof(int);
int *Adefault,*B;
for (i=0; i< N; i++)
{
for(j = 0 ; j<N ; j++ )
{
A[i][j] = ((i*i) +1) * (j+1);
printf("%5d ", A[i][j]);
}
}
printf("\n");
cudaMalloc( (void**)&Adefault,sizearr);
cudaMalloc( (void**)&B,sizearr);
cudaMemcpy( Adefault, A, sizearr, cudaMemcpyHostToDevice);
dim3 dimBlock(T,T);
dim3 dimGrid((N+ dimBlock.x - 1)/ dimBlock.x ,(N + dimBlock.y - 1) / dimBlock.y);
vecMatrix<<<dimGrid,dimBlock>>>(Adefault,B);
cudaMemcpy(size, B, sizearr, cudaMemcpyDeviceToHost);
cudaFree(Adefault);
cudaFree(B);
printf("Result\n");
for (i=0; i < N * N; i++)
{
printf("%5d ",size[i]);
}
printf("\n");
return 0;
}
|
7,464 | #include <math.h>
#include <float.h>
// ****************************************************************************
// DEVICE FUNCTIONS
// ****************************************************************************
// vector calculations
__device__ float getVectorLength(float xCoord, float yCoord) {
return (float) sqrt(xCoord * xCoord + yCoord * yCoord);
}
// return the angle of the vector
__device__ float getVectorAngle(float xCoord, float yCoord) {
float angleRad = atan2(yCoord, xCoord);
float angleDeg = (angleRad / M_PI) * 180.0f;
return angleDeg;
}
__device__ float getAngleBetween(float x1, float y1, float x2, float y2) {
// normalize vectors
float lenght1 = sqrt(x1 * x1 + y1 * y1) + FLT_EPSILON;
float normX1 = x1 / lenght1;
float normY1 = y1 / lenght1;
float lenght2 = sqrt(x2 * x2 + y2 * y2) + FLT_EPSILON;
float normX2 = x2 / lenght2;
float normY2 = y2 / lenght2;
// calculate angle
float angle = (atan2(normY2, normX2) - atan2(normY1, normX1)) / M_PI * 180.0f;
// correct angle at 180°-overflow
if (angle < -180.0f)
angle += 360.0f;
if (angle > 180.0f)
angle -= 360.0f;
return angle;
}
// ****************************************************************************
// HOST FUNCTIONS
// ****************************************************************************
// vector calculations
float getVectorLengthSerial(float xCoord, float yCoord) {
return (float) sqrt(xCoord * xCoord + yCoord * yCoord);
}
// return the angle of the vector
float getVectorAngleSerial(float xCoord, float yCoord) {
float angleRad = atan2(yCoord, xCoord);
float angleDeg = (angleRad / M_PI) * 180.0f;
return angleDeg;
}
float getAngleBetweenSerial(float x1, float y1, float x2, float y2) {
// normalize vectors
float lenght1 = sqrt(x1 * x1 + y1 * y1) + FLT_EPSILON;
float normX1 = x1 / lenght1;
float normY1 = y1 / lenght1;
float lenght2 = sqrt(x2 * x2 + y2 * y2) + FLT_EPSILON;
float normX2 = x2 / lenght2;
float normY2 = y2 / lenght2;
// calculate angle
float angle = (atan2(normY2, normX2) - atan2(normY1, normX1)) / M_PI * 180.0f;
// correct angle at 180°-overflow
if (angle < -180.0f)
angle += 360.0f;
if (angle > 180.0f)
angle -= 360.0f;
return angle;
}
|
7,465 | //pass
//--gridDim=[196,1,1] --blockDim=[512,1,1]
__global__
void incKernel(int *data, int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N)
data[i]++;
}
|
7,466 | #include <stdio.h>
__global__ void kernel(void) {
}
int main( void ) {
kernel<<<1,1>>>();
printf("Hello World\n");
return 0;
} |
7,467 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#include <math_constants.h>
#include <cuda_runtime.h>
extern "C"
{
__global__ void
rtruncnorm_kernel(float *vals, int n,
float *mu, float *sigma,
float *lo, float *hi,
int mu_len, int sigma_len,
int lo_len, int hi_len,
int maxtries,
float nullnum,
int rng_a, // RNG seed constant
int rng_b, // RNG seed constant
int rng_c) // RNG seed constant
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Notes/Hints from class
// i.e. threadIdx.x .y .z map these to a single index
//
// Check whether idx < N
if(idx < n) {
// initialize variables
int counter = 0; // counter for number of tries
int keeptrying = 1; // flag for when to stop the loop
int useRobert = 0; // flag to use Robert algorithm
float newnum; // initialize the number to be generated
vals[idx] = CUDART_NAN_F; // vals initializes to NaN
// Initialize random number generator
curandState rng;
curand_init(rng_a*idx+rng_b,rng_c,0,&rng);
// Determine whether to use Robert algorithm.
// It will use the Robert's algorithm if the truncation limits are on the same side.
float std_lo = (lo[idx] - mu[idx])/sigma[idx];
float std_hi = (hi[idx] - mu[idx])/sigma[idx];
if (std_lo * std_hi > 0) {
useRobert = 1;
}
// sampling by truncating random normal
if (!useRobert) {
while ((counter < maxtries) && (keeptrying)) {
counter = counter+1;
// Sample N(mu, sigma^2):
newnum = mu[idx] + sigma[idx]*curand_normal(&rng);
// if random number is within truncated space, do not reject, stop the loop
if ((newnum > lo[idx]) && (newnum < hi[idx]) ) {
keeptrying = 0;
vals[idx] = newnum;
} //end if. Else, try to generate another number
} // end while loop
} // end truncating random normal algorithm
// sampling using Robert algorithm
if (useRobert) {
float mu_minus; // truncation side
float alpha; //
float hitruncate;
float tmpunif;
float z;
float psi;
float tmparg; // temporary float for holding values to put in math functions
int negative = 0; // flag for whether truncating positive or negative side of normal
// we already know that std_lo and std_hi have the same sign
if (std_lo < 0 ) {
negative = 1;
mu_minus = -std_hi;
hitruncate = -std_lo;
} else {
mu_minus = std_lo;
hitruncate = std_hi;
}
// alpha = (mu_minus + sqrtf(mu_minus^2+4))/2;
tmparg = mu_minus*mu_minus+4;
alpha = (mu_minus + sqrtf(tmparg))/2;
while((counter < maxtries) && keeptrying) {
counter = counter + 1;
// z = mu_minus + Exp(alpha)
tmpunif = curand_uniform(&rng);
z = mu_minus - __logf(tmpunif)/alpha;
// get psi
if (mu_minus < alpha) {
tmparg = -(alpha-z)*(alpha-z)/2;
psi = __expf(tmparg);
} else {
tmparg = -(alpha-z)*(alpha-z)/2 + (mu_minus-alpha)*(mu_minus-alpha)/2;
psi = __expf(tmparg);
}
// accept if U < psi, and if z is within the truncation area
tmpunif = curand_uniform(&rng);
if ((tmpunif < psi) && (z < hitruncate)) {
if (negative) {
newnum = mu[idx] - z*sigma[idx];
} else {
newnum = mu[idx] + z*sigma[idx];
}
vals[idx] = newnum;
keeptrying = 0;
}
} // end while loop
} // end if using Robert algorithm
// debugging purposes
// vals[idx] = (float) counter;
// vals[idx] = newnum;
} // end if(idx<n)
return;
} // end kernel
} // END extern "C"
// Other notes
// To get the random uniform curand_uniform(&rng)
// Setup the RNG:
// Sample:
// Sample the truncated normal
// mu for this index is mu[idx]
// sigma for this index is sigma[idx]
// a for this index is a[idx]
// b for this index is b[idx]
// X_i ~ Truncated-Normal(mu_i,sigma_i;[a_i,b_i])
|
7,468 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <cmath>
#include <cuda.h>
using namespace std;
#define GRID_SIZE 32
#define SHARED_MEM 16384
__global__ void findY(float *x, float *y, int n, float h, float z, int zLoc, float *returnVal) {
// int col = blockIdx.x * blockDim.x + threadIdx.x;
// int row = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float sum;
sum = 0;
// float absVal = 0;
int count = 0;
for(int i = 0; i < n; i++) {
// absVal = abs(x[i] - z);
if(abs(x[i] - z) < h) {
//sum = atomicAdd(&sum, y[zLoc]);
sum += y[i];
// cuPrintf("sum = %d\n", sum);
count++;
}
}
*returnVal = sum / count;
// sum = 0;
// count = 0;
}
void smoothc(float *x, float *y, float *m, int n, float h) {
// qsort(x,n,sizeof(float),compare_floats);
dim3 dimGrid(1, 1);
dim3 dimBlock(1, 1, 1);
int chunksize = (SHARED_MEM / 2) - 32;
float *xChunk;
float *yChunk;
float *mdev;
//min size of x is 512 bytes or 64 entries
int msize = chunksize * sizeof(float);
cudaMalloc((void **) &xChunk, 80);
cudaMalloc((void **) &yChunk, 80);
cudaMalloc((void **) &mdev, 80);
for(int i = 0; i < n; i++) {
cudaMemcpy(xChunk, x, 80, cudaMemcpyHostToDevice);
cudaMemcpy(yChunk, y, 80, cudaMemcpyHostToDevice);
findY<<<dimGrid, dimBlock>>>(xChunk, yChunk, 10, h, x[i], i, &mdev[i]);
}
cudaMemcpy(m, mdev, 80, cudaMemcpyDeviceToHost);
cudaFree(xChunk);
cudaFree(yChunk);
cudaFree(mdev);
}
int main (int argc, char** argv) {
float x[10] = {1,2,3,4,5,6,7,8,9,10};
float y[10] = {11,12,13,14,15,16,17,18,19,20};
float m[10];
for(int i = 0; i < 10; i++)
cout << m[i] << endl;
smoothc(x,y,m,10,3);
cout<<"**********RETURN VALUES:***********"<<endl;
for(int i = 0; i < 10; i++)
cout << m[i] << endl;
return 0;
}
|
7,469 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__device__ volatile int lock = -1;
__device__ volatile int counter = 0;;
__global__ void spinlol()
{
__shared__ int intraCTAlock;
if (!threadIdx.x && !threadIdx.y)
intraCTAlock = -1;
__syncthreads();
if (!threadIdx.x && !threadIdx.y)
while (atomicCAS((int*)&lock, -1, blockIdx.x) != -1);
__syncthreads();
if (threadIdx.x % 32 == 0)
{
while (atomicCAS(&intraCTAlock, -1, 12) != -1);
counter++;
__threadfence();
atomicExch(&intraCTAlock, -1);
}
__syncthreads();
if (!threadIdx.x && !threadIdx.y)
atomicExch((int*)&lock, -1);
}
int main(int argc, char** argv)
{
int hostcounter = -1;
spinlol<<<60, 512>>>();
cudaThreadSynchronize();
printf("err = %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyFromSymbol(&hostcounter, "counter", sizeof(int), 0, cudaMemcpyDeviceToHost);
printf("counter = %d\n", hostcounter);
}
|
7,470 | //
// Created by igor on 26.03.2021.
//
#include "Vector2.cuh"
__host__ __device__ Vector2 operator-(const Vector2 &l, const Vector2 &r) {
return {l.x - r.x,
l.y - r.y};
}
|
7,471 | #include <iostream>
#include "../ginkgo/GOrderList.h"
#include <thrust/device_vector.h>
#define def_dvec(t) thrust::device_vector<t>
using namespace std;
__global__ void test(){
int pos = 0, ppos = 0, pnl = 0, tqty;
// Creating an OrderList struct
gpu_ginkgo::OrderList<100, 6> ggol(true, 1024, 10);
ggol.getTime(1.5, 1.0);
printf("<<< CREATING A NEW ORDER LIST STRUCTURE >>>\n");
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Sending new Orders
printf("<<< SENDING FOUR NEW ORDERS >>>\n");
int q_lim = 25;
ggol.sendNewOrder(1024, q_lim);
ggol.sendNewOrder(1024, q_lim);
ggol.sendNewOrder(1025, q_lim);
ggol.sendNewOrder(1026, q_lim);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Sending new Orders
printf("<<< SENDING ANOTHER ORDERS >>>\n");
q_lim = 20;
ggol.getTime(1.7, 1.0);
ggol.sendNewOrder(1026, q_lim);
ggol.sendNewOrder(1027, q_lim);
ggol.sendNewOrder(1028, q_lim);
ggol.sendNewOrder(1029, q_lim);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Sending multiple orders to the last_level
printf("<<< SENDING ANOTHER ORDERS >>>\n");
ggol.getTime(2.0, 1.0);
q_lim = 2;
ggol.sendNewOrder(1029, q_lim);
q_lim = 2;
ggol.sendNewOrder(1029, q_lim);
q_lim = 2;
ggol.sendNewOrder(1029, q_lim);
q_lim = 2;
ggol.sendNewOrder(1029, q_lim);
q_lim = 2;
ggol.sendNewOrder(1029, q_lim);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Acking new orders
ggol.getTime(2.6, 1.0);
printf("<<< ACKING NEW ORDERS >>>");
for(auto j=ggol.porders.begin(); j!=ggol.porders.end(); ){
if(ggol.porders.at(j).acked_time < ggol.cur_time){
ggol.ackPendingOrder(j, 10);
}
else ggol.porders.increment(j);
}
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Canceling orders with price = 1026
printf("<<< CANCELING ORDERS WITH PRICE = 1026 >>>");
for(auto j=ggol.porders.begin(); j!=ggol.porders.end(); ){
if(ggol.porders.at(j).price == 1026){
ggol.cancelPendingOrder(j);
}
else ggol.porders.increment(j);
}
for(auto j=ggol.orders.begin(); j!=ggol.orders.end(); ){
if(ggol.orders.at(j).price == 1026){
ggol.cancelAckedOrder(j);
}
else ggol.orders.increment(j);
}
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Swipe orders with price = 1024
printf("<<< SWIPING ORDERS WITH PRICE = 1024 >>>");
for(auto j=ggol.orders.begin(); j!=ggol.orders.end(); ){
if(ggol.orders.at(j).price == 1024){
ggol.swipeAckedOrder(j);
}
else ggol.orders.increment(j);
}
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Acking new orders
ggol.getTime(2.71, 1.0);
printf("<<< ACKING NEW ORDERS >>>");
for(auto j=ggol.porders.begin(); j!=ggol.porders.end(); ){
if(ggol.porders.at(j).acked_time < ggol.cur_time){
ggol.ackPendingOrder(j, 10);
}
else ggol.porders.increment(j);
}
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// A trade comes with price = 1026, qty = 17
printf("<<< A AGGRESSIVE TRADE COMES WITH PRICE = 1026, QTY = 17 >>>");
int tv = 17, prc = 1026;
int book_size[100];
ggol.getTradedThrough(tv, prc, book_size);
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Canceling orders with price = 1029
ggol.getTime(2.71, 0.1);
printf("<<< CANCELING ORDERS WITH PRICE = 1029 >>>");
for(auto j=ggol.porders.begin(); j!=ggol.porders.end(); ){
if(ggol.porders.at(j).price == 1029){
ggol.cancelPendingOrder(j);
}
else ggol.porders.increment(j);
}
for(auto j=ggol.orders.begin(); j!=ggol.orders.end(); ){
if(ggol.orders.at(j).price == 1029){
ggol.cancelAckedOrder(j);
}
else ggol.orders.increment(j);
}
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Canceling orders with price = 1029
ggol.getTime(3.91, 0.1);
printf("<<< CLEAN UP CANCEL ORDERS >>>");
for(auto j=ggol.corders.begin(); j!= ggol.corders.end(); ){
if(ggol.corders.at(j).cancel_time < ggol.cur_time){
ggol.cancelOrder(j);
}
else ggol.corders.increment(j);
}
ggol.showLevelQtyInfo();
ggol.showPendingOrderInfo();
ggol.showAckedOrderInfo();
ggol.showCanceledOrderInfo();
ggol.showUpdateInfo();
ggol.reset(pos, pnl, tqty);
printf("position = %d, pending position = %d, pnl = %d \n", pos, ppos, pnl);
printf("--------------------------------------------------------\n\n");
// Test finished
printf("\n <<< TEST FINISHED !!! >>>\n");
}
int main(){
def_dvec(float) dev_out(1, 0);
test<<<1, 1>>>();
return 0;
} |
7,472 | //#include "VoxelCopyToVAO.cuh"
//#include "COpenGLTypes.h"
//#include "CVoxelFunctions.cuh"
//#include "CSVOTypes.h"
//#include <cstdio>
//#include <cassert>
//#include "GIVoxelPages.h"
//
//__global__ void VoxCountPage(int& totalVox,
//
// const CVoxelPage* gVoxPages,
// const CVoxelGrid& gGridInfo,
// const uint32_t pageCount)
//{
// unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
// unsigned int pageId = globalId / GIVoxelPages::PageSize;
// unsigned int pageLocalId = (globalId - pageId * GIVoxelPages::PageSize);
//
// // All one normal means invalid voxel
// if(gVoxPages[pageId].dGridVoxPos[pageLocalId] != 0xFFFFFFFF)
// atomicAdd(&totalVox, 1);
//}
//
//__global__ void VoxCpyPage(// Two ogl Buffers for rendering used voxels
// CVoxelNormPos* voxelNormPosData,
// uchar4* voxelColorData,
// unsigned int& atomicIndex,
// const unsigned int maxBufferSize,
//
// // Per Obj Segment
// ushort2** gObjectAllocLocations,
//
// // Per obj
// unsigned int** gObjectAllocIndexLookup,
//
// // Per vox
// CVoxelAlbedo** gVoxelRenderData,
//
// // Page
// const CVoxelPage* gVoxPages,
// uint32_t pageCount,
// const CVoxelGrid& gGridInfo)
//{
// unsigned int globalId = threadIdx.x + blockIdx.x * blockDim.x;
// unsigned int pageId = blockIdx.x / GIVoxelPages::BlockPerPage;
// unsigned int pageLocalId = globalId - (pageId * GIVoxelPages::PageSize);
// unsigned int pageLocalSegmentId = pageLocalId / GIVoxelPages::SegmentSize;
// unsigned int segmentLocalVoxId = pageLocalId % GIVoxelPages::SegmentSize;
//
// // Skip whole segment if necessary
// if(ExpandOnlyOccupation(gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId].packed) == SegmentOccupation::EMPTY) return;
// assert(ExpandOnlyOccupation(gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId].packed) != SegmentOccupation::MARKED_FOR_CLEAR);
//
// // Data Read
// CVoxelPos voxPosPacked = gVoxPages[pageId].dGridVoxPos[pageLocalId];
//
// // All one normal means invalid voxel
// if(voxPosPacked != 0xFFFFFFFF)
// {
// CVoxelPos voxNormpacked = gVoxPages[pageId].dGridVoxNorm[pageLocalId];
//
// unsigned int index = atomicInc(&atomicIndex, 0xFFFFFFFF);
// assert(index < maxBufferSize);
//
// // Fetch obj Id to get color
// // ObjId Fetch
// ushort2 objectId;
// SegmentObjData objData = gVoxPages[pageId].dSegmentObjData[pageLocalSegmentId];
// objectId.x = objData.objId;
// objectId.y = objData.batchId;
// unsigned int cacheVoxelId = objData.voxStride + segmentLocalVoxId;
//
// voxelNormPosData[index] = uint2{voxPosPacked, voxNormpacked};
// voxelColorData[index] = gVoxelRenderData[objectId.y][cacheVoxelId];
// }
//}
//
|
7,473 | #include "cuda_runtime.h"
#include "stdio.h"
struct coo
{
int base_id;
int query_id;
float distance;
float for_align;
};
__global__ void Kernel(unsigned int * ptr){
atomicAdd(ptr, 1);
}
int main()
{
size_t batch_len = 8192;
size_t data_num = 900000;
size_t data_dim = 34;
// 检查device状况
int device = 0;
cudaSetDevice(device);
printf("device checked\n");
// 预先分配空间
float *data_m, *result_m, *module_m;
coo *output_m;
unsigned int *take_num_m;
size_t data_size = data_num * data_dim * sizeof(float);
// _m means managed
cudaMallocManaged((void **)&data_m, data_size); // 原始数据
// memcpy(data_m, node_data, data_size);
// cudaMemPrefetchAsync(data_d,data_size,0,NULL);
cudaMallocManaged((void **)&module_m, data_num * sizeof(float)); // 模方数据
// cudaMemPrefetchAsync(module_d,data_num*sizeof(float),0,NULL);
cudaMallocManaged((void **)&result_m, batch_len * batch_len * sizeof(float)); // 距离结果数据
// cudaMemPrefetchAsync(result_d,batch_num*batch_num*sizeof(float),0,NULL);
cudaMallocManaged((void **)&output_m, batch_len * batch_len * sizeof(coo)); // 输出coo数据
// cudaMemPrefetchAsync(output_d,batch_num*batch_num*sizeof(coo),0,NULL);
cudaMallocManaged((void **)&take_num_m, sizeof(int)); // 取边数目
// cudaMemPrefetchAsync(take_num_m,sizeof(int),0,NULL);
Kernel<<<223,14>>>(take_num_m);
printf("pre-allocation done.\n");
// 回收空间
cudaFree(data_m);
cudaFree(result_m);
cudaFree(module_m);
cudaFree(output_m);
cudaFree(take_num_m);
return 0;
}
|
7,474 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <iostream>
#include <fstream>
#include <cstdlib>
#include <string>
#include <ctime>
#include <unistd.h>
#include <sys/time.h>
#define INPUT_BMP_FILE "lenabig.bmp"
#define OUTPUT_BMP_FILE_CPU "result-cpu.bmp"
#define OUTPUT_BMP_FILE_GPU "result-cuda.bmp"
#define FILTER_WINDOW_SIZE 9
#define BLOCK_SIZE 64
using namespace std;
//#pragma pack(push)
//#pragma pack(1)
struct FileHeader
{
char id[2];
unsigned int size;
int sth;
unsigned int offset;
} __attribute__ ((packed));
struct DibHeader
{
unsigned int dib_size;
unsigned int width;
unsigned int height;
} __attribute__ ((packed));
struct Pixel
{
unsigned char b;
unsigned char g;
unsigned char r;
} __attribute__ ((packed));
//#pragma pack(pop)
struct Times // execution times [ms]
{
float cuda;
float cudaOnlyComputation;
double cpu;
} executionTimes;
void medianFilterCpu(Pixel *input, Pixel *output, int width, int height);
Pixel medianFilterCpuHelper(Pixel *image, int width, int height, int y, int x);
unsigned char selection(unsigned char window[FILTER_WINDOW_SIZE]);
void medianFilterGpu(Pixel *input, Pixel *output, int width, int height);
__global__ void medianFilterCuda(Pixel *input, Pixel *output, int width, int height);
__device__ unsigned char selectionCuda(unsigned char window[FILTER_WINDOW_SIZE]);
void displayLastError(const string &msg);
//get time (for CPU)
double get_timestamp()
{
struct timeval now;
gettimeofday(&now, NULL);
unsigned long long timeMicroseconds = now.tv_usec + now.tv_sec*1000000;
return (double)timeMicroseconds/1000.0;
}
int main(int argc, char *argv[])
{
//if(argc < 2)
//{
// cerr << "no input file specified" << endl;
// return 1;
//}
//opening input file
ifstream bmpFile(/*argv[1]*/INPUT_BMP_FILE, ios::in | ios::binary);
if(!bmpFile.is_open())
{
cerr << "file not opened" << endl;
exit(0);
}
//reading file headers
FileHeader header;
bmpFile.read((char*)&header, sizeof(header));
cout << "size:\t" << header.size << endl;
cout << "offset:\t" << header.offset << endl;
DibHeader dib;
bmpFile.read((char*)&dib, sizeof(dib));
cout << "DIB size:\t" << dib.dib_size << endl;
cout << "width:\t" << dib.width << endl;
cout << "height:\t" << dib.height << endl;
bmpFile.seekg(header.offset, ios_base::beg);
//reading image
Pixel *image = new Pixel[dib.height*dib.width];
for(int y = dib.height - 1; y >= 0 ; y--)
{
for(int x = 0; x < dib.width; x++)
bmpFile.read((char*)&(image[y*dib.width + x]), sizeof(Pixel));
bmpFile.seekg(dib.width%4, ios_base::cur);
}
int devCnt;
cudaGetDeviceCount(&devCnt);
if(devCnt == 0)
{
perror("No CUDA devices available -- exiting.");
return 1;
}
Pixel *outputCpu = new Pixel[dib.width*dib.height];
Pixel *outputGpu = new Pixel[dib.width*dib.height];
int num = 1;
Times avgTimes = {0.0, 0.0, 0.0};
for(int i=0;i<num;i++)
{
medianFilterGpu(image, outputGpu, dib.width, dib.height);
medianFilterCpu(image, outputCpu, dib.width, dib.height);
avgTimes.cpu += executionTimes.cpu/(float)num;
avgTimes.cuda += executionTimes.cuda/(float)num;
avgTimes.cudaOnlyComputation += executionTimes.cudaOnlyComputation/(float)num;
}
cout << "times:\n";
cout << "CPU:\t\t" << avgTimes.cpu << endl;
cout << "GPU:\t\t" << avgTimes.cuda << endl;
cout << "GPU (computation):\t\t" << avgTimes.cudaOnlyComputation << endl;
//saving result bmp
ofstream bmpResultCpu(OUTPUT_BMP_FILE_CPU, ios::out | ios::binary);
ofstream bmpResultGpu(OUTPUT_BMP_FILE_GPU, ios::out | ios::binary);
char *buf = new char[header.offset];
char zerosBuf[4] = {0};
bmpFile.seekg(0, ios_base::beg);
bmpFile.read(buf, header.offset);
bmpResultCpu.write(buf, header.offset);
bmpResultGpu.write(buf, header.offset);
delete [] buf;
for(int y = dib.height - 1; y >= 0 ; y--)
{
for(int x = 0; x < dib.width; x++)
{
bmpResultCpu.write((char*)&(outputCpu[y*dib.width + x]), sizeof(Pixel));
bmpResultGpu.write((char*)&(outputGpu[y*dib.width + x]), sizeof(Pixel));
}
bmpResultCpu.write(zerosBuf, dib.width%4);
bmpResultGpu.write(zerosBuf, dib.width%4);
}
bmpResultCpu.close();
bmpResultGpu.close();
bmpFile.close();
cout << endl;
delete [] image;
delete [] outputCpu;
delete [] outputGpu;
return 0;
}
void medianFilterCpu(Pixel *input, Pixel *output, int width, int height)
{
double start = get_timestamp();
for(int y = height - 1; y >= 0 ; y--)
{
for(int x = 0; x < width; x++)
{
output[y*width+x] = medianFilterCpuHelper(input, width, height, y, x);
}
}
executionTimes.cpu = get_timestamp() - start;
}
Pixel medianFilterCpuHelper(Pixel *image, int width, int height, int y, int x)
{
if(x < 1 || y < 1 || x >= width || y >= height)
return image[y*width+x];
Pixel p;
unsigned char window[FILTER_WINDOW_SIZE];
//red
window[0] = image[(y-1)%height*width+(x-1)%width].r;
window[1] = image[(y-1)%height*width+(x)%width].r;
window[2] = image[(y-1)%height*width+(x+1)%width].r;
window[3] = image[(y)%height*width+(x-1)%width].r;
window[4] = image[(y)%height*width+(x)%width].r;
window[5] = image[(y)%height*width+(x+1)%width].r;
window[6] = image[(y+1)%height*width+(x-1)%width].r;
window[7] = image[(y+1)%height*width+(x)%width].r;
window[8] = image[(y+1)%height*width+(x+1)%width].r;
p.r = selection(window);
//green
window[0] = image[(y-1)%height*width+(x-1)%width].g;
window[1] = image[(y-1)%height*width+(x)%width].g;
window[2] = image[(y-1)%height*width+(x+1)%width].g;
window[3] = image[(y)%height*width+(x-1)%width].g;
window[4] = image[(y)%height*width+(x)%width].g;
window[5] = image[(y)%height*width+(x+1)%width].g;
window[6] = image[(y+1)%height*width+(x-1)%width].g;
window[7] = image[(y+1)%height*width+(x)%width].g;
window[8] = image[(y+1)%height*width+(x+1)%width].g;
p.g = selection(window);
//blue
window[0] = image[(y-1)%height*width+(x-1)%width].b;
window[1] = image[(y-1)%height*width+(x)%width].b;
window[2] = image[(y-1)%height*width+(x+1)%width].b;
window[3] = image[(y)%height*width+(x-1)%width].b;
window[4] = image[(y)%height*width+(x)%width].b;
window[5] = image[(y)%height*width+(x+1)%width].b;
window[6] = image[(y+1)%height*width+(x-1)%width].b;
window[7] = image[(y+1)%height*width+(x)%width].b;
window[8] = image[(y+1)%height*width+(x+1)%width].b;
p.b = selection(window);
return p;
}
unsigned char selection(unsigned char window[FILTER_WINDOW_SIZE])
{
//http://en.wikipedia.org/wiki/Selection_algorithm
unsigned char minIndex, minValue;
for(int i = 0; i < FILTER_WINDOW_SIZE / 2; i++)
{
minIndex = i;
minValue = window[i];
for(int j = i + 1; j < FILTER_WINDOW_SIZE; j++)
{
if(window[j] < minValue)
{
minIndex = j;
minValue = window[j];
}
}
window[minIndex] = window[i];
window[i] = minValue;
}
return window[FILTER_WINDOW_SIZE / 2];
}
void medianFilterGpu(Pixel *input, Pixel *output, int width, int height)
{
cudaEvent_t start, stop, startComp, stopComp;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&startComp);
cudaEventCreate(&stopComp);
cudaEventRecord(start, 0);
size_t size = width * height * sizeof(Pixel);
Pixel *deviceInputImage;
cudaMalloc((void**)&deviceInputImage, size);
displayLastError("input image memory allocation");
cudaMemcpy(deviceInputImage, input, size, cudaMemcpyHostToDevice);
displayLastError("input image memcpy");
Pixel *deviceOutputImage;
cudaMalloc((void**)&deviceOutputImage, size);
displayLastError("output image memory allocation");
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(width/BLOCK_SIZE, height/BLOCK_SIZE);
cudaEventRecord(startComp, 0);
medianFilterCuda<<<dimGrid, dimBlock>>>(deviceInputImage, deviceOutputImage, width, height);
cudaEventRecord(stopComp, 0);
cudaEventSynchronize(stopComp);
cudaThreadSynchronize();
displayLastError("kernel");
cudaMemcpy(output, deviceOutputImage, size, cudaMemcpyDeviceToHost);
displayLastError("output image memcpy");
cudaFree(deviceInputImage);
displayLastError("freeing input image memory");
cudaFree(deviceOutputImage);
displayLastError("freeing output image memory");
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&executionTimes.cuda, start, stop);
cudaEventElapsedTime(&executionTimes.cudaOnlyComputation, startComp, stopComp);
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
//kernel
__global__ void medianFilterCuda(Pixel *input, Pixel *output, int width, int height)
{
Pixel result;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
unsigned char window[FILTER_WINDOW_SIZE];
//red
window[0] = input[((row-1)%height*width)+(col-1)%width].r;
window[1] = input[((row-1)%height*width)+(col)%width].r;
window[2] = input[((row-1)%height*width)+(col+1)%width].r;
window[3] = input[((row)%height*width)+(col-1)%width].r;
window[4] = input[((row)%height*width)+(col)%width].r;
window[5] = input[((row)%height*width)+(col+1)%width].r;
window[6] = input[((row+1)%height*width)+(col-1)%width].r;
window[7] = input[((row+1)%height*width)+(col)%width].r;
window[8] = input[((row+1)%height*width)+(col+1)%width].r;
result.r = selectionCuda(window);
//green
window[0] = input[((row-1)%height*width)+(col-1)%width].g;
window[1] = input[((row-1)%height*width)+(col)%width].g;
window[2] = input[((row-1)%height*width)+(col+1)%width].g;
window[3] = input[((row)%height*width)+(col-1)%width].g;
window[4] = input[((row)%height*width)+(col)%width].g;
window[5] = input[((row)%height*width)+(col+1)%width].g;
window[6] = input[((row+1)%height*width)+(col-1)%width].g;
window[7] = input[((row+1)%height*width)+(col)%width].g;
window[8] = input[((row+1)%height*width)+(col+1)%width].g;
result.g = selectionCuda(window);
//blue
window[0] = input[((row-1)%height*width)+(col-1)%width].b;
window[1] = input[((row-1)%height*width)+(col)%width].b;
window[2] = input[((row-1)%height*width)+(col+1)%width].b;
window[3] = input[((row)%height*width)+(col-1)%width].b;
window[4] = input[((row)%height*width)+(col)%width].b;
window[5] = input[((row)%height*width)+(col+1)%width].b;
window[6] = input[((row+1)%height*width)+(col-1)%width].b;
window[7] = input[((row+1)%height*width)+(col)%width].b;
window[8] = input[((row+1)%height*width)+(col+1)%width].b;
result.b = selectionCuda(window);
output[row * width + col] = result;
}
__device__ unsigned char selectionCuda(unsigned char window[FILTER_WINDOW_SIZE])
{
//http://en.wikipedia.org/wiki/Selection_algorithm
unsigned char minIndex, minValue;
for(int i = 0; i < FILTER_WINDOW_SIZE / 2; i++)
{
minIndex = i;
minValue = window[i];
for(int j = i + 1; j < FILTER_WINDOW_SIZE; j++)
{
if(window[j] < minValue)
{
minIndex = j;
minValue = window[j];
}
}
window[minIndex] = window[i];
window[i] = minValue;
}
return window[FILTER_WINDOW_SIZE / 2];
}
void displayLastError(const string &msg)
{
// cout << "Last Error (" << msg << "):\t" << cudaGetErrorString(cudaGetLastError()) << endl;
}
|
7,475 | #include <stdio.h>
#include <math.h>
#define N 512
#define MAX_ERR 1
__global__ void vector_add(float *out, float *a, float *b, int n){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
//printf("index: %d\n", index);
for(int i = index; i < n; i+=blockDim.x){
printf("i=%d \n", i);
out[i] = a[i] + b[i];
}
}
int main(){
float *a, *b, *out;
float *d_a, *d_b, *d_out;
// Allocate memory
a = (float*)malloc(sizeof(float) * N);
b = (float*)malloc(sizeof(float) * N);
out = (float*)malloc(sizeof(float) * N);
// Initialize array
for(int i = 0; i < N; i++){
a[i] = 1.0f; b[i] = 2.0f;
}
// Allocate device memory for a
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
// Transfer data from host t o device memory
cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice);
// Main function
vector_add<<</*1*/(N+256)/256,256>>>(d_out, d_a, d_b, N);
// Transfer data from device to host memory
cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// Print results
for(int i=0; i < N; i++){
//if(fabs(out[i] - a[i] - b[i]) < MAX_ERR )
//printf("failed");
printf("%i.- %f = %f + %f \n", i, out[i], a[i], b[i]);
}
// Cleanup after kernel execution
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
free(a);
free(b);
free(out);
}
|
7,476 | #include <cuda.h>
#include <device_launch_parameters.h>
// draw line as y = k*x+q
extern "C"
{
__constant__ int D_SIZE;
__constant__ float D_K;
__constant__ float D_Q;
//__constant__ float D_SCALE;
__constant__ float D_XSCALE;
__constant__ float D_YSCALE;
__constant__ float D_XMIN;
__constant__ float D_YMIN;
__global__ void DrawLineKernel(unsigned int *pixels, int count)
{
int threadId = blockDim.x*blockIdx.y*gridDim.x
+ blockDim.x*blockIdx.x
+ threadIdx.x;
if (threadId < count)
{
int xvalue = threadId;
float altx = (xvalue / D_XSCALE) + D_XMIN;
float yvalue = D_K * altx + D_Q;
int ynew = (int)((yvalue - D_YMIN) * D_YSCALE);
int pixy = D_SIZE - ynew;
if (pixy >= D_SIZE || pixy <= 0){
return;
}
int idx = pixy * D_SIZE + xvalue;
pixels[idx] = 0xFFFF0000;
}
}
} |
7,477 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform.h>
#include <thrust/fill.h>
#include <thrust/sequence.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/tuple.h>
#include <cstdio>
#include <cstdlib>
#include <sys/time.h>
double wtime() {
struct timeval tv;
gettimeofday(&tv, NULL);
return tv.tv_sec + tv.tv_usec * 1E-6;
}
struct saxpy_functor
{
const float a;
saxpy_functor(float _a) : a(_a) {}
__host__ __device__ float operator() (thrust::tuple<float&, float&> t)
{
float x = thrust::get<0>(t);
float y = thrust::get<1>(t);
return a * x + y;
}
};
void saxpy(float a, thrust::device_vector<float> &x,
thrust::device_vector<float> &y)
{
saxpy_functor func(a);
thrust::transform(
thrust::make_zip_iterator(
thrust::make_tuple(x.begin(), y.begin() )),
thrust::make_zip_iterator(
thrust::make_tuple(x.end(), y.end() )),
y.begin(),
func
);
}
__host__ void print_array(thrust::host_vector<float> &data1,
thrust::host_vector<float> &data2, int num_elem, const char *prefix)
{
printf("\n%s", prefix);
for (int i = 0; i < num_elem; i++)
printf("\n%2d: %2.4f %2.4f ", i + 1, data1[i], data2[i]);
}
int main(int argc, const char **argv)
{
double time = wtime();
int num_elem = (argc > 1) ? std::atoi(argv[1]) : 8;
const float a = 2.0;
thrust::host_vector<float> h1(num_elem);
thrust::host_vector<float> h2(num_elem);
thrust::sequence(h1.begin(), h1.end());
thrust::fill(h2.begin(), h2.end(), 0.0);
if (argc < 1)
print_array(h1, h2, num_elem, "Before Set");
thrust::device_vector<float> d1 = h1;
thrust::device_vector<float> d2 = h2;
saxpy(a, d1, d2);
h2 = d2;
h1 = d1;
if (argc < 1) {
print_array(h1, h2, num_elem, "After Set");
printf("\n");
}
time = wtime() - time;
printf("Time SAXPY thrust: %.6f s\n", time);
return 0;
}
|
7,478 | #include "includes.h"
__global__ void dropout_test(float* data, int size, float probability)
{
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
int num_threads = blockDim.x * gridDim.x;
for(int i = 0; i < size; i += num_threads)
{
int index = i + thread_index;
if(index < size)
{
data[index] = data[index] * probability;
}
}
} |
7,479 | // In this assignment you will see how different mapping of threads
// to data cab affect the performance.
//
// Your task is to write two kernels for vector addition each
// accessing data differently and choose how many threads and blocs
// to use.
//
// In your first kernel each thread will process M elements of the
// resulting vector C. Each thread will access data with step 1. That
// is tread 0 of the first block will access data for C_0 up to C_(M-1).
// Next thread will access data for C_M up to C_(2*M-1) ... thread T
// will access C_(thread_id*M) up to C_((thread_id+1)*M-1).
// In the second kernel threads will access elements with step M.
// That is for thread 0: C_0, C_M, C_(2*M), ... ,C_((M-1)*M).
// In general threads with id i will access C_(f*M+i) for all
// integers 0 < f < M.
// NOTE: You should finish your vector addition assignment first, before
// doing this one.
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#define M 32
//----------------------------------------------------------------------
// TASK 1: Write kernel for vector addition where threads access data with
// step 1 and will calculate M elements of the vector C in total.
//
// To calculate the index of the data which given thread should operate
// on use pre-set variables threadIdx, blockIdx, blockDim and gridDim.
__global__ void vector_add_uncoalesced(float *d_C, float *d_A, float *d_B){
// write your kernel here
int index = blockIdx.x*blockDim.x*M + M*threadIdx.x;
for (int i = 0; i < M; i++) {
d_C[i + index] = d_A[i + index] + d_B[i + index];
}
}
//----------------------------------------------------------------------
//----------------------------------------------------------------------
// TASK 2: Write kernel for vector addition where threads access data with
// step M and will calculate M elements of the vector C in total.
//
// To calculate the index of the data which given thread should operate
// on use pre-set variables threadIdx, blockIdx, blockDim and gridDim.
// write your kernel here
__global__ void vector_add_coalesced(float *d_C, float *d_A, float *d_B){
// write your kernel here
int index = blockIdx.x*blockDim.x*M + threadIdx.x;
for (int i = 0; i < M; i++) {
d_C[index + blockDim.x * i] = d_A[index + blockDim.x * i] + d_B[index + blockDim.x * i];
}
}
//----------------------------------------------------------------------
struct GpuTimer {
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer() {
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer() {
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start() {
cudaEventRecord(start, 0);
}
void Stop() {
cudaEventRecord(stop, 0);
}
float Elapsed() {
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
int main(void) {
GpuTimer timer;
size_t N = 5120;
float *h_A, *h_B, *h_C;
h_A = (float*) malloc(N*sizeof(*h_A));
h_B = (float*) malloc(N*sizeof(*h_B));
h_C = (float*) malloc(N*sizeof(*h_C));
for(size_t f=0; f<N; f++) {
h_A[f] = f + 1.0;
h_B[f] = f + 1.0;
h_C[f] = 0;
}
int deviceid = 0;
int devCount;
cudaGetDeviceCount(&devCount);
if(deviceid<devCount) cudaSetDevice(deviceid);
else return(1);
float *d_A, *d_B, *d_C;
cudaMalloc(&d_A, N*sizeof(*d_A));
cudaMalloc(&d_B, N*sizeof(*d_B));
cudaMalloc(&d_C, N*sizeof(*d_C));
cudaMemcpy(d_A, h_A, N*sizeof(*h_A), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, N*sizeof(*h_B), cudaMemcpyHostToDevice);
timer.Start();
for(int f=0; f<10; f++){
//----------------------------------------------------------------------
// TASK 3: Configure vector_add_coalesced. You must take into account
// how many elements are processed per thread
// put your code here
dim3 Gd(5,1,1);
dim3 Bd(32,1,1);
vector_add_coalesced<<<Gd, Bd>>>(d_C, d_A, d_B);
//----------------------------------------------------------------------
}
timer.Stop();
printf("Vector addition with coalesced memory access execution time: %f\n", timer.Elapsed()/10.0);
timer.Start();
for(int f=0; f<10; f++){
//----------------------------------------------------------------------
// TASK 4: Configure vector_add_uncoalesced. You must take into account
// how many elements are processed per thread
// put your code here
dim3 Gd(5,1,1);
dim3 Bd(32,1,1);
vector_add_uncoalesced<<<Gd, Bd>>>(d_C, d_A, d_B);
//----------------------------------------------------------------------
}
timer.Stop();
printf("Vector addition with uncoalesced memory access execution time: %f\n", timer.Elapsed()/10.0);
cudaMemcpy(h_C, d_C, N*sizeof(float), cudaMemcpyDeviceToHost);
if(N<100000){
printf("Check:\n");
for(int f=0; f<10; f++){
printf("Is %f + %f = %f?\n", h_A[f], h_B[f], h_C[f]);
}
}
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
free(h_A);
free(h_B);
free(h_C);
return(0);
}
|
7,480 | #include <iostream>
__global__ void add(int*a, int*b, int*c) {*c = *a + *b;}
int main(void){
int a,b,c; //host variables
int *d_a, *d_b, *d_c; //device variables
int size=sizeof(int);
//allocate space on device for a,b and c
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
a=1;
b=1;
cudaMemcpy(d_a, &a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &b, size, cudaMemcpyHostToDevice);
add<<<1,1>>>(d_a,d_b,d_c);
cudaMemcpy(&c, d_c, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
cudaError_t error=cudaGetLastError();
if(error!=cudaSuccess){
printf("Error: %s\n",cudaGetErrorString(error));
}
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
printf("%d",c);
return 0;
}
|
7,481 | #include <stdio.h>
int main(void) {
printf("Hello World from CPU!\n");
} |
7,482 | #include<stdio.h>
#include<stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
int main() {
int nDevice;
cudaGetDeviceCount(&nDevice);
for (int i = 0; i < nDevice; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("Dispositivo : %d\n", i);
printf(" Nombre : %s\n", prop.name);
printf(" Frecuencia Reloj : %d KHz\n", prop.memoryClockRate);
printf(" Ancho del Bus de Memoria : %d bits\n", prop.memoryBusWidth);
printf(" Ancho de Banda : %f GB/s\n\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
}
}
|
7,483 | #include "includes.h"
__global__ void convolve(unsigned char *source, int width, int height, int paddingX, int paddingY, ssize_t kOffset, int kWidth, int kHeight, unsigned char *destination)
{
// Calculate our pixel's location
int x = (blockIdx.x * blockDim.x) + threadIdx.x;
int y = (blockIdx.y * blockDim.y) + threadIdx.y;
float sum = 0.0f;
int pWidth = kWidth/2;
int pHeight = kHeight/2;
// Execute for valid pixels
if(x >= pWidth+paddingX &&
y >= pHeight+paddingY &&
x < (gridDim.x * blockDim.x)-pWidth-paddingX &&
y < (gridDim.y *blockDim.y )-pHeight-paddingY)
{
for(int j = -pHeight; j <= pHeight; j++)
{
for(int i = -pWidth; i <= pWidth; i++)
{
// Sample the weight for this location
int ki = (i+pWidth);
int kj = (j+pHeight);
float w = convolutionKernel[(kj * kWidth) + ki + kOffset];
sum += w * float(source[((y+j) * width) + (x+i)]);
}
}
}
// Average the sum
destination[(y * width) + x] = (unsigned char) sum;
} |
7,484 | #include <iostream>
#include <algorithm>
#include <list>
#include <vector>
#include <iterator>
#include <functional>
#include <time.h>
#include <chrono>
#include <cstdlib>
using namespace std;
struct triple
{
long long int set; //Set denotes which Elements are in the Subset
long int w; //Weight of the Triple //d
long int p; //Profit of the Triple //d
//Struct Constructor
triple() : set(0),
w(0.0),
p(0.0)
{}
//Comparison Operator Overloadings
bool operator< (const triple &t) const
{
return (w < t.w);
}
bool operator> (const triple &t) const
{
return (w > t.w);
}
};
void merge_lists(vector<triple> &A, vector<triple> &B, vector< pair<long long int, long long int> > &V)
{
vector<triple> T_p, Tcopy;
triple t;
long long int v1s = V.size() >> 1, v2s = V.size() - v1s;
//Initialisation for A
t.set = 0, t.w = t.p = 0;
A.push_back(t);
//Sort A in Non-Increasing Order
for (long long int i = 0; i < v1s; ++i)
{
T_p.clear();
Tcopy.clear();
//Add Elements to Subset (Triple) ti
//Add ti to T_p
for (long long int j = 0; j < (long long int)A.size(); ++j)
{
t.set = A[j].set + (1 << i);
t.w = A[j].w + V[i].first;
t.p = A[j].p + V[i].second;
T_p.push_back(t);
}
//Merge A, T_p
merge(A.begin(), A.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy));
A = Tcopy;
}
//Initialisation for B
t.set = 0, t.w = t.p = 0;
B.push_back(t);
//Sort B in Non-Increasing Order
for (long long int i = 0; i < v2s; ++i)
{
T_p.clear();
Tcopy.clear();
//Add Elements to Subset (Triple) ti
//Add ti to T_p
for (long long int j = 0; j < (long long int)B.size(); ++j)
{
t.set = B[j].set + (1 << i);
t.w = B[j].w + V[i + v1s].first;
t.p = B[j].p + V[i + v1s].second;
T_p.push_back(t);
}
//Merge B, T_p
merge(B.begin(), B.end(), T_p.begin(), T_p.end(), back_inserter(Tcopy), greater<struct triple>());
B = Tcopy;
}
}
void maxScan(vector<triple> &B, vector< pair<int, long int> > &maxB)
{
long int Bsize = B.size();
maxB[Bsize - 1].first = B[Bsize - 1].p;
maxB[Bsize - 1].second = Bsize - 1;
for (long int i = Bsize - 2; i >= 0; i--)
{
if (B[i].p>maxB[i + 1].first)
{
maxB[i].first = B[i].p;
maxB[i].second = i;
}
else
{
maxB[i].first = maxB[i + 1].first;
maxB[i].second = maxB[i + 1].second;
}
}
}
long int generate_sets(vector<triple> &A, vector<triple> &B, const int &c,
vector< pair<int, long long int> > &maxB, long int N)
{
int bestValue = 0;
pair<long long int, long long int> bestSet;
long long int i = 0, j = 0;
while (i < N && j < N)
{
if (A[i].w + B[j].w > c)
{
++j;
if (j == N) break;
else continue;
}
if (A[i].p + maxB[j].first > bestValue)
{
bestValue = A[i].p + maxB[j].first;
bestSet = make_pair(A[i].set, maxB[j].second);
}
++i;
}
return bestValue;
}
void dp_knapSack(long long int W, double wt[], double val[], long long int n)
{
long long int i, w;
vector< vector<double> > K(n + 1, vector<double>(W + 1));
// Build table K[][] in bottom up manner
for (i = 0; i <= n; i++)
{
for (w = 0; w <= W; w++)
{
if (i == 0 || w == 0)
K[i][w] = 0;
else if (wt[i - 1] <= w)
K[i][w] = max(val[i - 1] + K[i - 1][w - wt[i - 1]], K[i - 1][w]);
else
K[i][w] = K[i - 1][w];
}
}
cout << "\n\n\tBest_DP: " << K[n][W] << endl;
}
//Input : Sorted Lists -> (A, B)
//Output : Partitioned Sorted Lists -> (Ak, Bk) with N/k elements each
void list_to_blocks(vector<triple> &A, vector<triple> &B, vector< vector<triple> > &Ak,
vector< vector<triple> > &Bk, int k)
{
long long int e = A.size() / k, i;
vector<triple>::iterator Ait, Bit;
Ait = A.begin(), Bit = B.begin();
//#pragma omp parallel for shared(A, B, Ak, Bk, e, k) private(i, Ait, Bit)
for (i = 0; i < k; ++i)
{
Ait = A.begin() + i * e;
Bit = B.begin() + i * e;
copy(Ait, Ait + e, back_inserter(Ak[i]));
copy(Bit, Bit + e, back_inserter(Bk[i]));
}
}
//Input : Partitioned Sorted Lists -> (Ak, Bk)
//Output : Maximum Profit of Blocks -> (maxAi, maxBi)
void fsave_max_val(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk,
vector<double> &maxA, vector<double> &maxB)
{
//Needs to be dynamic if not equally partitioned (if N/k not an int)
long long int e = maxA.size(), i, j;
double Amax, Bmax;
#pragma omp parallel for shared(Ak, Bk, maxA, maxB) private(e, i, j, Amax, Bmax) //Here
for (i = 0; i < e; ++i)
{
Amax = Ak[i][0].p;
Bmax = Bk[i][0].p;
//Perform Parallel Max Search for Better Result
for (j = 1; j < Ak[i].size(); ++j)
{
Amax = (Amax < Ak[i][j].p) ? Ak[i][j].p : Amax;
Bmax = (Bmax < Bk[i][j].p) ? Bk[i][j].p : Bmax;
}
maxA[i] = Amax;
maxB[i] = Bmax;
}
}
//Input : Ak, Bk, maxAi, maxBi
//Output : Blocks that are within Capacity c
void prune(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector<double> &maxA,
vector<double> &maxB, vector< vector<int> > &candidate, double &bestValue)
{
int Z, Y;
int i, j, k = Ak.size(), e = Ak[0].size();
vector<int> maxValue(k);
#pragma omp parallel for reduction(max:bestValue) shared(Ak, Bk, maxA, maxB, maxValue, candidate) private(i, j, Z, Y,c, e)
for (i = 0; i < k; ++i)
{
maxValue[i] = 0;
for (j = 0; j < k; ++j) //Here - will lead to CR
{
Z = Ak[i][0].w + Bk[j][e - 1].w;
Y = Ak[i][e - 1].w + Bk[j][0].w;
if (Y <= c)
{
if (maxA[i] + maxB[j] > maxValue[i])
maxValue[i] = maxA[i] + maxB[j];
if (bestValue<maxValue[i]) //Here
bestValue = maxValue[i];
}
else if (Z <= c && Y > c)
candidate[i].push_back(j); // here make copy of block bk[j]
}
}
}
//Input : Candidate Block Pairs -> candidate
//Output : (Max[i][j][t], L[j][t]) with reference to candidate[i]
void ssave_max_val(vector< vector<triple> > &Bk, vector< vector< vector< pair<double, long long int> > > > &Max,
vector< vector<int> > &candidate, double &bestValue)
{
int i, t, l, k = Bk.size();
int j, e = Bk[0].size();
#pragma omp parallel for shared(Bk,candidate,Max) private(i,j,t,l,k,e) //Here
for (i = 0; i < k; ++i)
{
for (t = 0; t < candidate[i].size(); ++t)
{
//l is the Index of The Block Partition B of the Candidate Block Pair (Bk[l])
l = candidate[i][t];
//Initialise Last Element and Index
Max[i][e - 1][t].first = Bk[l][e - 1].p;
Max[i][e - 1][t].second = e - 1;
//Reverse Inclusive Max-Scan
for (j = e - 2; j > -1; --j)
{
if (Bk[l][j].p > Max[i][j + 1][t].first)
{
Max[i][j][t].first = Bk[l][j].p;
Max[i][j][t].second = j;
}
else
{
Max[i][j][t].first = Max[i][j + 1][t].first;
Max[i][j][t].second = Max[i][j + 1][t].second;
}
}
}
}
}
//Input : candidate, Max
//Output : Best Value
void par_search(vector< vector<triple> > &Ak, vector< vector<triple> > &Bk, double c, vector< vector<int> > &candidate,
vector< vector< vector< pair<double, long long int> > > > &Max, double &bestValue)
{
int i, j, t, l, k = Ak.size();
long long int e = Ak[0].size(), X, Y;
vector<double> maxValue(k);
vector< pair<long long int, long long int> > Xi(k);
//Xi -> (Index ID of Subset A, Index ID of Subset B)
#pragma omp parallel for shared(Ak, Bk, candidate, Max, Xi, maxValue) private(i, j, t, l, X, Y, e,k) //Here
for (i = 0; i < k; ++i)
{
maxValue[i]=0;
Xi[i].first = 0, Xi[i].second = 0;// Here
for (t = 0; t < candidate[i].size(); ++t)
{
l = candidate[i][t];
X = 0, Y = 0;
while (X < e && Y < e)
{
if (Ak[i][X].w + Bk[l][Y].w > c)
{
++Y;
continue;
}
else if (Ak[i][X].p + Max[i][Y][t].first > maxValue[i])
{
maxValue[i] = Ak[i][X].p + Max[i][Y][t].first;
Xi[i].first = Ak[i][X].set;
Xi[i].second = Bk[l][Max[i][Y][t].second].set;
}
++X;
}
}
}
//Evaluate Maximum Profit from max(maxValue[i])
long long int X1 = Xi[0].first, X2 = Xi[0].second;
for (i = 0; i < k; ++i)
if (bestValue < maxValue[i])
{
bestValue = maxValue[i];
X1 = Xi[i].first;
X2 = Xi[i].second;
}
//The Subset ID from A, Subset ID from B which gives Maximum Profit (Best Value)
cout << "\n\tSubsets : " << X1 << ", " << X2 << endl;
cout << "\tBestvalue : " << bestValue << endl;
}
int main()
{
//Input Data
int c = 0;
vector< pair<long long int, long long int> > V;
vector<double> wt_arr, p_arr;
srand(time(0));
//Number of Items
int num_items = 10;
//Input Data
for (int i = 0; i < num_items; ++i)
{
double wt = rand() % (long int)1e7;
double p = rand() % (long int)1e7;
c += wt;
V.push_back(make_pair(wt, p));
wt_arr.push_back(wt);
p_arr.push_back(p);
}
//Set capacity
c /= 2;
printf("\n\tCapacity = %d\n", c);
//Computation & Timing
auto start = chrono::steady_clock::now();
/*
[Ak, Bk] -> Ak has k Blocks with N/k elements each
[maxA, maxB] -> maxI has one Element for each Block of List I
[candidate] -> candidate[i] is a Vector of Blocks of Bk, which are candidate solutions with Ak[i]
[Max[i][j][t], L[j][t]] -> Pair of Maximum Profit & Respective Index with reference to candidate[i]
*/
int k = 4; //Number of Partitions
long long int N = 1 <<( num_items >> 12); //Number of Subsets
long long int e = N / k; //Number of Elements per Subset
double bestValue = -1; //d
vector<triple> A, B;
vector< vector<triple> > Ak(k, vector<triple>());
vector< vector<triple> > Bk(k, vector<triple>());
vector<double> maxA(k), maxB(k);
vector< vector<int> > candidate(k);
vector< vector< vector< pair<double, long long int> > > > Max(k, vector< vector< pair<double, long long int> > >(e, vector< pair<double, long long int> >(2)));
merge_lists(A, B, V); //Currently Serial Merging
list_to_blocks(A, B, Ak, Bk, k); //Partition Lists to Blocks
fsave_max_val(Ak, Bk, maxA, maxB); //Save
prune(Ak, Bk, c, maxA, maxB, candidate, bestValue);
ssave_max_val(Bk, Max, candidate, bestValue);
//par_search(Ak, Bk, c, candidate, Max, bestValue);
auto stop = chrono::steady_clock::now();
cout << "\n Computational Time (Parallel) : ";
cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0;
cout << " ms" << endl;
//Time the Serial DP Approach
start = chrono::steady_clock::now();
//dp_knapSack(c, &wt_arr[0], &p_arr[0], V.size());
stop = chrono::steady_clock::now();
cout << "\n Computational Time (DP Serial) : ";
cout << (int)(chrono::duration_cast<chrono::nanoseconds>(stop - start).count()) / 1000000.0;
cout << " ms" << endl;
cin.get();
return 0;
}
|
7,485 | #include <cuda.h>
#include <cstdlib>
#include <stdio.h>
#include <math.h>
#define DIM 4
#define MAX_THREADS 32
#define SHARED_MEM_CAPACITY (48 * 1024)
#define TILE_WIDTH 32
__global__ void matrix_multiplication
(float *A, float *B, float *C, int dim) {
// init the block index, thread index etc.
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// fixed row and col for a specific thread in a specific block
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
// Allocate the TILE on the shared memory.
__shared__ float A_sub[TILE_WIDTH][TILE_WIDTH];
__shared__ float B_sub[TILE_WIDTH][TILE_WIDTH];
// If condition to eliminate extra or dim which dim % 32 (or 2^n) != 0
if (by * blockDim.y + ty < dim && bx * blockDim.y + tx < dim) {
// partial sum
float sum = 0;
for (int k = 0; k < dim / TILE_WIDTH + 1; k++) {
// in case that k * TILE_WIDTH + thread > dim,
// we assign those values to be 0.
// Even doing the dot product everything will be 0.
A_sub[ty][tx] =
(k * TILE_WIDTH + tx) < dim ?
A[row * dim + (k * TILE_WIDTH + tx)] : 0;
B_sub[ty][tx] =
(k * TILE_WIDTH + ty) < dim ?
B[(k * TILE_WIDTH + ty) * dim + col] : 0;
// Wait until all the threads finish doing that
__syncthreads();
// At this point, all of the TILES need for the
// target tile are loaded to shared mem.
// The sum will be the cumulated sum for the
// specific thread it's computing
for (int m = 0; m < TILE_WIDTH; m++) {
sum += A_sub[ty][m] * B_sub[m][tx];
}
// Wait until all the threads finish so that
// the shared mem can be flashed
__syncthreads();
}
// classic [][]
C[row * dim + col] = sum;
}
}
int main(int argc, char **argv) {
int dim;
if (argc == 2) {
dim = atoi(argv[1]);
} else {
dim = DIM;
}
int memSize = dim * dim * sizeof(float);
float *host_A, *host_B, *host_C;
float *dev_A, *dev_B, *dev_C;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
host_A = (float *) malloc(memSize);
host_B = (float *) malloc(memSize);
host_C = (float *) malloc(memSize);
cudaMalloc(&dev_A, memSize);
cudaMalloc(&dev_B, memSize);
cudaMalloc(&dev_C, memSize);
for (int i = 0; i < dim * dim; i++) {
host_A[i] = 0.9;
host_B[i] = 0.9;
}
cudaMemcpy(dev_A, host_A, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(dev_B, host_B, memSize, cudaMemcpyHostToDevice);
cudaEventRecord(start);
// If dim < MAX_THREADS, then the threads we using can be
// the dim itself. Otherwise, threads should be max threads
// in order to use 1024 threads per block.
int threads = dim < MAX_THREADS ? dim : MAX_THREADS;
// Calculate the number of blocks that is necessary for
// the calculation
int blocks = dim * dim / threads / threads + 1;
// Figure out the square-like block geometry
// (which shouldn't be matter too much, but for simplicity)
int block_x = (int) sqrt(blocks);
int block_y = blocks / block_x;
if (block_x * block_y < blocks) {
block_x++;
}
dim3 dimGrid(block_x, block_y);
dim3 dimBlock(threads, threads);
matrix_multiplication<<<dimGrid, dimBlock>>>
(dev_A, dev_B, dev_C, dim);
cudaMemcpy(host_C, dev_C, memSize, cudaMemcpyDeviceToHost);
cudaEventRecord(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Cuda version\n");
printf("The execution time is %f milliseconds\n", milliseconds);
printf("\n");
printf("Result - for sanity check\n");
for (int i = 0; i < DIM; i++) {
for (int j = 0; j < DIM; j++) {
printf("%08f\t", host_C[i * dim + j]);
}
printf("\n");
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
free(host_A);
free(host_B);
free(host_C);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
return 0;
}
|
7,486 | /**
* Author: Nikolaus Mayer, 2014 (mayern@informatik.uni-freiburg.de)
* CUDA kernels
*/
#include <cmath> // std::ceil
/**
* Kernel
*/
__global__ void generic_CUDA_function__kernel( float* DATA,
size_t data_size
)
{
const size_t x = blockIdx.x * blockDim.x + threadIdx.x;
if ( x >= data_size )
return;
DATA[x] *= 2.0f;
}
/**
* Function
*/
void generic_CUDA_function( float* DATA,
size_t data_size
)
{
/// CUDA kernel parameters
const dim3 block(16, 1, 1);
const dim3 grid(std::ceil(data_size/(float)block.x), 1, 1);
/// Call kernel
generic_CUDA_function__kernel<<<grid,block>>>( DATA, data_size );
}
|
7,487 | #include "includes.h"
__global__ void ForwardSigmoid(float* Z, int nRowsZ, int nColsZ, float* A)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < nRowsZ * nColsZ)
{
A[index] = 1 / (1 + exp(-Z[index]));
}
} |
7,488 | #include "includes.h"
static unsigned int GRID_SIZE_N;
static unsigned int GRID_SIZE_4N;
static unsigned int MAX_STATE_VALUE;
__global__ static void cudaTIGammaKernel(double *extEV, double *x2, double *x3, unsigned char *tipX1, unsigned char *tipX2, double *r, double *uX1, double *uX2) {
__shared__ volatile double ump[64], x1px2[16], v[64];
const int tid = (threadIdx.z * 16) + (threadIdx.y * 4) + threadIdx.x;
const int offset = 16 * blockIdx.x + threadIdx.z * 4;
const int squareId = threadIdx.z * 4 + threadIdx.y;
uX1 += 16 * tipX1[blockIdx.x];
ump[tid] = x2[offset + threadIdx.x] * r[tid];
__syncthreads();
if (threadIdx.x <= 1) {
ump[tid] += ump[tid + 2];
}
__syncthreads();
if (threadIdx.x == 0) {
ump[tid] += ump[tid + 1];
uX2[4 * blockIdx.x + threadIdx.y] = ump[tid];
x1px2[squareId] = uX1[squareId] * ump[tid];
}
__syncthreads();
v[tid] = x1px2[squareId] * extEV[threadIdx.y * 4 + threadIdx.x];
__syncthreads();
if (threadIdx.y <= 1) {
v[tid] += v[tid + 8];
}
__syncthreads();
if (threadIdx.y == 0) {
v[tid] += v[tid + 4];
x3[offset + threadIdx.x] = v[tid];
}
} |
7,489 | #include <iostream>
#include <cstdlib>
#include <cstdio>
using namespace std;
__global__ void euler1 (float2 *pos, float2* vel, float2 *acc, float dt, float box) {
int i=threadIdx.x+blockDim.x*blockIdx.x;
//Moves a particle using Euler
pos[i].x += vel[i].x * dt;
pos[i].y += vel[i].y * dt;
vel[i].x += acc[i].x * dt;
vel[i].y += acc[i].y * dt;
/*
if (pos[i].x > box) {
pos[i].x -= box;
}
else if (pos[i].x < 0.0f) {
pos[i].x += box;
}
if (pos[i].y > box) {
pos[i].y -= box;
}
else if (pos[i].y < 0.0f) {
pos[i].y += box;
}
*/
//set acceleration to zero
acc[i] = (float2){0.0f,0.0f};
}
__global__ void euler2 (float2 *pos, float2* vel, float2 *acc, float dt, float box) {
int i=threadIdx.x+blockDim.x*blockIdx.x;
int locali = threadIdx.x;
//Copies to shared memory
__shared__ float2 poslocal[192];
__shared__ float2 acclocal[192];
__shared__ float2 vellocal[192];
poslocal[locali] = pos[i];
vellocal[locali] = vel[i];
acclocal[locali] = acc[i];
poslocal[locali].x += vellocal[locali].x * dt;
poslocal[locali].y += vellocal[locali].y * dt;
vellocal[locali].x += acclocal[locali].x * dt;
vellocal[locali].y += acclocal[locali].y * dt;
/*
if (pos[i].x > box) {
pos[i].x -= box;
}
else if (pos[i].x < 0.0f) {
pos[i].x += box;
}
if (pos[i].y > box) {
pos[i].y -= box;
}
else if (pos[i].y < 0.0f) {
pos[i].y += box;
}
*/
//Copies new values to global memory
pos[i] = poslocal[locali];
vel[i] = vellocal[locali];
acc[i] = (float2){0.0f,0.0f};
}
__global__ void euler3 (float2 *pos, float2* vel, float2 *acc, float dt, float box) {
int i=threadIdx.x+blockDim.x*blockIdx.x;
//Copia para a memória compartilhada
float2 poslocal = pos[i], vellocal = vel[i], acclocal = acc[i];
poslocal.x += vellocal.x * dt;
poslocal.y += vellocal.y * dt;
vellocal.x += acclocal.x * dt;
vellocal.y += acclocal.y * dt;
/*
if (pos[i].x > box) {
pos[i].x -= box;
}
else if (pos[i].x < 0.0f) {
pos[i].x += box;
}
if (pos[i].y > box) {
pos[i].y -= box;
}
else if (pos[i].y < 0.0f) {
pos[i].y += box;
}
*/
//Copia os novos valores para a memória global
pos[i] = poslocal;
vel[i] = vellocal;
acc[i] = (float2){0.0f,0.0f};
}
inline void checkCUDAError(const char *msg) {
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
int main() {
cudaEvent_t start, stop;
float time1, time2, time3;
float dt = 0.001f;
int nTests = 99;
float box = 40.0f;
int N = 10752;
int buffer_size = N*sizeof(float2);
float2 pos[N];
float2 vel[N];
float2 acc[N];
for (int i=0; i<N; i++){
pos[i].x = (float)rand()/RAND_MAX;
pos[i].y = (float)rand()/RAND_MAX;
vel[i].x = (float)rand()/RAND_MAX;
vel[i].y = (float)rand()/RAND_MAX;
acc[i].x = (float)rand()/RAND_MAX;
acc[i].y = (float)rand()/RAND_MAX;
}
//Cria os ponteiros CUDA
float2 *pos_d=0, *vel_d, *acc_d=0;
//Aloca os ponteiros no device
cudaMalloc((void**)&pos_d,buffer_size);
cudaMalloc((void**)&vel_d,buffer_size);
cudaMalloc((void**)&acc_d,buffer_size);
//Copia os vetores para o device
cudaMemcpy( pos_d, pos, buffer_size, cudaMemcpyHostToDevice );
cudaMemcpy( vel_d, vel, buffer_size, cudaMemcpyHostToDevice );
cudaMemcpy( acc_d, acc, buffer_size, cudaMemcpyHostToDevice );
//Cria os eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Marca o tempo inicial
cudaEventRecord(start, 0);
checkCUDAError("Start1");
//Roda o kernel1 nTests vezes
for (int i=0; i<nTests; i++){
euler1 <<<56, 192>>> (pos_d, vel_d, acc_d, dt, box);
checkCUDAError("Euler1");
}
//Marca o evento de parada
cudaEventRecord( stop, 0 );
checkCUDAError("Stop1");
//Para tudo até que o evento de parada seja marcado
cudaEventSynchronize( stop );
//Calcula a diferença de tempo entre start e stop
cudaEventElapsedTime( &time1, start, stop );
checkCUDAError("Time1");
//Marca o tempo inicial
cudaEventRecord(start, 0);
checkCUDAError("Start2");
//Roda o kernel2 nTests vezes
for (int i=0; i<nTests; i++){
euler2 <<<56, 192>>> (pos_d, vel_d, acc_d, dt, box);
checkCUDAError("Euler2");
}
//Marca o evento de parada
cudaEventRecord( stop, 0 );
checkCUDAError("Stop2");
//Para tudo até que o evento de parada seja marcado
cudaEventSynchronize( stop );
//Calcula a diferença de tempo entre start e stop
cudaEventElapsedTime( &time2, start, stop );
checkCUDAError("Time2");
//Marca o tempo inicial
cudaEventRecord(start, 0);
checkCUDAError("Start3");
//Roda o kernel3 nTests vezes
for (int i=0; i<nTests; i++){
euler3 <<<56, 192>>> (pos_d, vel_d, acc_d, dt, box);
checkCUDAError("Euler3");
}
//Marca o evento de parada
cudaEventRecord( stop, 0 );
checkCUDAError("Stop3");
//Para tudo até que o evento de parada seja marcado
cudaEventSynchronize( stop );
//Calcula a diferença de tempo entre start e stop
cudaEventElapsedTime( &time3, start, stop );
checkCUDAError("Time3");
//Mostra os tempos e quanto melhorou
cout << "Time without optimization: " << time1/nTests << "ms" << endl;
cout << "Time with shared mem optimization: " << time2/nTests << "ms" << endl;
cout << "Time with register optimization: " << time3/nTests << "ms" << endl;
cout << "Improvement of shared mem: " << (time1-time2)/time1*100 << "%" << endl;
cout << "Improvement of register mem: " << (time1-time3)/time1*100 << "%" << endl;
cudaEventDestroy( start );
cudaEventDestroy( stop );
}
|
7,490 | __global__ void testLocal(float *data) {
__shared__ float myshared[32];
int tid = threadIdx.x;
myshared[tid] = data[tid];
data[0] = myshared[tid + 1];
}
__global__ void testLocal2(float *data) {
__shared__ float myshared[64];
int tid = threadIdx.x;
myshared[tid] = data[tid];
data[0] = myshared[tid + 1];
myshared[tid + 1] = data[tid];
data[1] = myshared[tid];
}
|
7,491 | #include "includes.h"
// Helper function for using CUDA to add vectors in parallel.
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
} |
7,492 | #include "includes.h"
__global__ void scan(float * input, float * output, int len) {
//@@ Modify the body of this function to complete the functionality of
//@@ the scan on the device
//@@ You may need multiple kernel calls; write your kernels before this
//@@ function and call them from here
} |
7,493 | #include "includes.h"
__global__ void copyCol(int *in, int *out, const int nx, const int ny)
{
// set thread id.
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix < nx && iy < ny)
{
out[ix * ny + iy] = in[ix * ny + iy];
}
} |
7,494 | #include <stdio.h>
#include <stdlib.h>
#include <algorithm>
#define BLOCK_SIZE_X 16
#define BLOCK_SIZE_Y 16
__global__ void matAdd(int width, int height, const float* A, const float* B, float* C)
{
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i < height)
{
int j = threadIdx.y + blockIdx.y*blockDim.y;
if (j < width)
{
int index = i*width + j;
C[index] = A[index] + B[index];
}
}
}
int main()
{
int width = 1000;
int height = 100;
int numElements = width*height;
float* h_A = (float*)calloc(numElements, sizeof(float));
float* h_B = (float*)calloc(numElements, sizeof(float));
float* h_C = (float*)calloc(numElements, sizeof(float));
srand(1214134);
for (int i = 0; i < numElements; i++)
{
h_A[i] = float(rand())/float(RAND_MAX + 1.0);
h_B[i] = float(rand())/float(RAND_MAX + 1.0);
}
float* d_A;
float* d_B;
float* d_C;
cudaMalloc((void**)&d_A, numElements*sizeof(float));
cudaMalloc((void**)&d_B, numElements*sizeof(float));
cudaMalloc((void**)&d_C, numElements*sizeof(float));
cudaMemcpy(d_A, h_A, numElements*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, numElements*sizeof(float), cudaMemcpyHostToDevice);
dim3 numBlocks(height/BLOCK_SIZE_X + 1, width/BLOCK_SIZE_Y + 1);
dim3 threadsPerBlock(BLOCK_SIZE_X, BLOCK_SIZE_Y);
matAdd<<<numBlocks, threadsPerBlock>>>(width, height, d_A, d_B, d_C);
cudaMemcpy(h_C, d_C, numElements*sizeof(float), cudaMemcpyDeviceToHost);
for (int i = 0; i < std::min(5, height); i++)
{
for (int j = 0; j < std::min(5, width); j++)
{
int index = i*width + j;
printf("%3.2f + %3.2f = %3.2f;\t", h_A[index], h_B[index], h_C[index]);
}
printf("...\n");
}
printf("...\n");
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
7,495 | #ifdef CALLBACK
int callback_main(int);
#else
int no_callback_main(int);
#endif
int main(int argc, const char **argv)
{
int device = argc > 1 ? atoi(argv[1]) : 0;
#ifdef CALLBACK
callback_main(device);
#else
no_callback_main(device);
#endif
return 0;
}
|
7,496 | #include "includes.h"
__global__ void linearLayerBackprop(float* W, float* dZ, float *dA, int W_x_dim, int W_y_dim, int dZ_x_dim, int dZ_y_dim) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
// W is treated as transposed
int dA_x_dim = dZ_x_dim;
int dA_y_dim = W_x_dim;
float dA_value = 0.0f;
if (row < dA_y_dim && col < dA_x_dim) {
for (int i = 0; i < W_y_dim; i++) {
dA_value += W[i * W_x_dim + row] * dZ[i * dZ_x_dim + col];
}
dA[row * dA_x_dim + col] = dA_value;
}
} |
7,497 | __device__
int get_global_id(int dimension) {
if(dimension == 0) {
return blockIdx.x * blockDim.x + threadIdx.x;
} else if(dimension == 1) {
return blockIdx.y * blockDim.y + threadIdx.y;
} else if(dimension == 2) {
return blockIdx.z * blockDim.z + threadIdx.z;
} else {
return 0;
}
}
__device__
int get_global_size(int dimension) {
if(dimension == 0) {
return gridDim.x * blockDim.x;
} else if (dimension == 1) {
return gridDim.y * blockDim.y;
} else if (dimension == 2) {
return gridDim.z * blockDim.z;
} else {
return 0;
}
}
|
7,498 | /* ----------------------------------------------------------------------------
* This file was automatically generated by SWIG (http://www.swig.org).
* Version 4.0.0
*
* This file is not intended to be easily readable and contains a number of
* coding conventions designed to improve portability and efficiency. Do not make
* changes to this file unless you know what you are doing--modify the SWIG
* interface file instead.
* ----------------------------------------------------------------------------- */
#ifdef __cplusplus
/* SwigValueWrapper is described in swig.swg */
template<typename T> class SwigValueWrapper {
struct SwigMovePointer {
T *ptr;
SwigMovePointer(T *p) : ptr(p) { }
~SwigMovePointer() { delete ptr; }
SwigMovePointer& operator=(SwigMovePointer& rhs) { T* oldptr = ptr; ptr = 0; delete oldptr; ptr = rhs.ptr; rhs.ptr = 0; return *this; }
} pointer;
SwigValueWrapper& operator=(const SwigValueWrapper<T>& rhs);
SwigValueWrapper(const SwigValueWrapper<T>& rhs);
public:
SwigValueWrapper() : pointer(0) { }
SwigValueWrapper& operator=(const T& t) { SwigMovePointer tmp(new T(t)); pointer = tmp; return *this; }
operator T&() const { return *pointer.ptr; }
T *operator&() { return pointer.ptr; }
};
template <typename T> T SwigValueInit() {
return T();
}
#endif
/* -----------------------------------------------------------------------------
* This section contains generic SWIG labels for method/variable
* declarations/attributes, and other compiler dependent labels.
* ----------------------------------------------------------------------------- */
/* template workaround for compilers that cannot correctly implement the C++ standard */
#ifndef SWIGTEMPLATEDISAMBIGUATOR
# if defined(__SUNPRO_CC) && (__SUNPRO_CC <= 0x560)
# define SWIGTEMPLATEDISAMBIGUATOR template
# elif defined(__HP_aCC)
/* Needed even with `aCC -AA' when `aCC -V' reports HP ANSI C++ B3910B A.03.55 */
/* If we find a maximum version that requires this, the test would be __HP_aCC <= 35500 for A.03.55 */
# define SWIGTEMPLATEDISAMBIGUATOR template
# else
# define SWIGTEMPLATEDISAMBIGUATOR
# endif
#endif
/* inline attribute */
#ifndef SWIGINLINE
# if defined(__cplusplus) || (defined(__GNUC__) && !defined(__STRICT_ANSI__))
# define SWIGINLINE inline
# else
# define SWIGINLINE
# endif
#endif
/* attribute recognised by some compilers to avoid 'unused' warnings */
#ifndef SWIGUNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define SWIGUNUSED __attribute__ ((__unused__))
# else
# define SWIGUNUSED
# endif
# elif defined(__ICC)
# define SWIGUNUSED __attribute__ ((__unused__))
# else
# define SWIGUNUSED
# endif
#endif
#ifndef SWIG_MSC_UNSUPPRESS_4505
# if defined(_MSC_VER)
# pragma warning(disable : 4505) /* unreferenced local function has been removed */
# endif
#endif
#ifndef SWIGUNUSEDPARM
# ifdef __cplusplus
# define SWIGUNUSEDPARM(p)
# else
# define SWIGUNUSEDPARM(p) p SWIGUNUSED
# endif
#endif
/* internal SWIG method */
#ifndef SWIGINTERN
# define SWIGINTERN static SWIGUNUSED
#endif
/* internal inline SWIG method */
#ifndef SWIGINTERNINLINE
# define SWIGINTERNINLINE SWIGINTERN SWIGINLINE
#endif
/* qualifier for exported *const* global data variables*/
#ifndef SWIGEXTERN
# ifdef __cplusplus
# define SWIGEXTERN extern
# else
# define SWIGEXTERN
# endif
#endif
/* exporting methods */
#if defined(__GNUC__)
# if (__GNUC__ >= 4) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)
# ifndef GCC_HASCLASSVISIBILITY
# define GCC_HASCLASSVISIBILITY
# endif
# endif
#endif
#ifndef SWIGEXPORT
# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
# if defined(STATIC_LINKED)
# define SWIGEXPORT
# else
# define SWIGEXPORT __declspec(dllexport)
# endif
# else
# if defined(__GNUC__) && defined(GCC_HASCLASSVISIBILITY)
# define SWIGEXPORT __attribute__ ((visibility("default")))
# else
# define SWIGEXPORT
# endif
# endif
#endif
/* calling conventions for Windows */
#ifndef SWIGSTDCALL
# if defined(_WIN32) || defined(__WIN32__) || defined(__CYGWIN__)
# define SWIGSTDCALL __stdcall
# else
# define SWIGSTDCALL
# endif
#endif
/* Deal with Microsoft's attempt at deprecating C standard runtime functions */
#if !defined(SWIG_NO_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_CRT_SECURE_NO_DEPRECATE)
# define _CRT_SECURE_NO_DEPRECATE
#endif
/* Deal with Microsoft's attempt at deprecating methods in the standard C++ library */
#if !defined(SWIG_NO_SCL_SECURE_NO_DEPRECATE) && defined(_MSC_VER) && !defined(_SCL_SECURE_NO_DEPRECATE)
# define _SCL_SECURE_NO_DEPRECATE
#endif
/* Deal with Apple's deprecated 'AssertMacros.h' from Carbon-framework */
#if defined(__APPLE__) && !defined(__ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES)
# define __ASSERT_MACROS_DEFINE_VERSIONS_WITHOUT_UNDERSCORES 0
#endif
/* Intel's compiler complains if a variable which was never initialised is
* cast to void, which is a common idiom which we use to indicate that we
* are aware a variable isn't used. So we just silence that warning.
* See: https://github.com/swig/swig/issues/192 for more discussion.
*/
#ifdef __INTEL_COMPILER
# pragma warning disable 592
#endif
/* Errors in SWIG */
#define SWIG_UnknownError -1
#define SWIG_IOError -2
#define SWIG_RuntimeError -3
#define SWIG_IndexError -4
#define SWIG_TypeError -5
#define SWIG_DivisionByZero -6
#define SWIG_OverflowError -7
#define SWIG_SyntaxError -8
#define SWIG_ValueError -9
#define SWIG_SystemError -10
#define SWIG_AttributeError -11
#define SWIG_MemoryError -12
#define SWIG_NullReferenceError -13
#define SWIG_exception_impl(DECL, CODE, MSG, RETURNNULL) \
{ throw std::logic_error("In " DECL ": " MSG); }
#include <stdexcept>
#define SWIGVERSION 0x040000
#define SWIG_VERSION SWIGVERSION
#define SWIG_as_voidptr(a) const_cast< void * >(static_cast< const void * >(a))
#define SWIG_as_voidptrptr(a) ((void)SWIG_as_voidptr(*a),reinterpret_cast< void** >(a))
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
template<typename T>
void swig_thrust_sort(thrust::device_ptr<T> DATA, size_t SIZE) {
thrust::sort(DATA, DATA + SIZE);
}
#include <stdlib.h>
#ifdef _MSC_VER
# ifndef strtoull
# define strtoull _strtoui64
# endif
# ifndef strtoll
# define strtoll _strtoi64
# endif
#endif
struct SwigArrayWrapper {
void* data;
size_t size;
};
SWIGINTERN SwigArrayWrapper SwigArrayWrapper_uninitialized() {
SwigArrayWrapper result;
result.data = NULL;
result.size = 0;
return result;
}
#ifdef __cplusplus
extern "C" {
#endif
SWIGEXPORT void _wrap_sort(SwigArrayWrapper *farg1) {
thrust::device_ptr< float > arg1 ;
size_t arg2 ;
arg1 = thrust::device_ptr< float >(static_cast<float*>(farg1->data));
arg2 = farg1->size;
if (arg2 && !thrust::raw_pointer_cast(arg1)) {
SWIG_exception_impl("swig_thrust_sort< float >(thrust::device_ptr< float >,size_t)", SWIG_TypeError, \
"Array is not present on device", return ); \
}
swig_thrust_sort< float >(arg1,arg2);
}
#ifdef __cplusplus
}
#endif
|
7,499 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <iostream>
#include <ctime>
size_t N;
__host__ static __inline__ float rand_abs5() {
return ((rand()%11) - 5);
}
struct saxpy_func {
const int a;
saxpy_func(int _a) : a(_a) {}
__host__ __device__
int operator()(const int& x, const int& y) const {
return a * x + y;
}
};
int main(int argc, char **argv) {
if (argc != 2) {
printf("Usage: %s [nElementExp]\n", argv[0]);
return -1;
} else {
N = 2 << atoi(argv[1])-1;
}
printf("Running SAXPY on two random vectors of %zu elements\n", N);
// generate host vector and fill with rand number between -5 and 5
thrust::host_vector<int> h_x(N);
thrust::host_vector<int> h_y(N);
thrust::generate(h_x.begin(), h_x.end(), rand_abs5);
thrust::generate(h_y.begin(), h_y.end(), rand_abs5);
// create device vectors
thrust::device_vector<int> d_x = h_x;
thrust::device_vector<int> d_y = h_y;
//thrust::host_vector<int> h_res(N);
// y = 2*x + y, on both device and host
clock_t start = std::clock();
thrust::transform(h_x.begin(), h_x.end(),
h_y.begin(), h_y.begin(), saxpy_func(2));
clock_t stop = std::clock();
double host_dur = double(stop-start) / (CLOCKS_PER_SEC/1000);
start = std::clock();
thrust::transform(d_x.begin(), d_x.end(),
d_y.begin(), d_y.begin(), saxpy_func(2));
stop = std::clock();
double dev_dur = double(stop-start) / (CLOCKS_PER_SEC/1000);
printf("\tHost vector operation took %f ms\n", host_dur);
printf("\tDevice vector operation took %f ms\n", dev_dur);
bool allSame = true;
size_t i;
for (i=0; i < N; i++) {
if ( (d_y[i] - h_y[i]) != 0 ) {
allSame = false;
break;
}
}
if (allSame) {
printf("\tOperation on device and host vector produced same results\n");
return 0;
} else {
printf("Element %zu not the same...\n", i);
return -1;
}
}
|
7,500 | /**
Author: Dimitriadis Vasileios 8404
Faculty of Electrical and Computer Engineering AUTH
3rd assignment at Parallel and Distributed Systems (7th semester)
This is a parallel implementation of mean shift algorithm using the
Gaussian probability density function.
**/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include <sys/time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 60000
#define DIMENSIONS 5
#define EPSILON 0.001
#define VAR 0.001 // =σ^2 variance
#define N_Threads 1024
struct timeval startwtime, endwtime;
double seq_time;
void getinput(double *x, char *filename);
__global__ void meanshift(double *dev_x, double *dev_y, int dim, double eps, double var);
__device__ double find_distance(double *y, int i, double *x, int j, int dim);
void show_results(double *y_new);
int main(int argc, char **argv)
{
if (argc != 2)
{
printf("Need as input a dataset to process\n");
exit (1);
}
double *x = (double *)malloc(N * DIMENSIONS * sizeof(double));
if (x == NULL)
{
printf("Failed to allocate data at x...\n");
exit(1);
}
getinput(x, argv[1]);
double *y = (double *)malloc(N * DIMENSIONS * sizeof(double));
if (y == NULL)
{
printf("Failed to allocate data at y...\n");
exit(1);
}
double *dev_x;
cudaMalloc(&dev_x, N * DIMENSIONS * sizeof(double));
double *dev_y;
cudaMalloc(&dev_y, N * DIMENSIONS * sizeof(double));
cudaMemcpy(dev_x, x, N * DIMENSIONS * sizeof(double), cudaMemcpyHostToDevice);
//Initialize y as x in gpu.
cudaMemcpy(dev_y, x, N * DIMENSIONS * sizeof(double), cudaMemcpyHostToDevice);
cudaError_t error;
size_t shared_size = N_Threads * DIMENSIONS + N_Threads;
gettimeofday (&startwtime, NULL);
meanshift<<<N, N_Threads, sizeof(double) * shared_size>>>(dev_x, dev_y, DIMENSIONS, EPSILON, VAR);
gettimeofday (&endwtime, NULL);
seq_time = (double)((endwtime.tv_usec - startwtime.tv_usec)/1.0e6
+ endwtime.tv_sec - startwtime.tv_sec);
cudaMemcpy(y, dev_y, N * DIMENSIONS * sizeof(double), cudaMemcpyDeviceToHost);
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("Error at copying back: %s\n", cudaGetErrorString(error));
exit(1);
}
cudaDeviceSynchronize();
error = cudaGetLastError();
if (error != cudaSuccess)
{
printf("Error at Sync: %s\n", cudaGetErrorString(error));
exit(1);
}
printf("Time needed for mean shift is %f sec\n", seq_time);
show_results(y);
free(x);
free(y);
cudaFree(dev_x);
cudaFree(dev_y);
return (0);
}
void getinput(double *x, char *filename)
{
FILE *fin;
int i = 0, j;
char *str = (char *)malloc(2 * DIMENSIONS * sizeof(double));
char *token = (char *)malloc(sizeof(double));
fin = fopen(filename, "r");
if (fin == NULL)
{
printf("Error opening the file...");
exit(1);
}
str = fgets(str, 2 * DIMENSIONS * sizeof(double), fin); //Take one point.
while (str != NULL && i < N)
{
token = strtok(str, "\t"); //get one dimension per recursion.
j = 0;
while (token != NULL && j < DIMENSIONS)
{
x[i*DIMENSIONS + j] = atof(token);
token = strtok(NULL, "\t");
j++;
}
str = fgets(str, 2 * DIMENSIONS * sizeof(double), fin);
i++;
}
fclose(fin);
free(str);
free(token);
}
__global__
void meanshift(double *dev_x, double *dev_y, int dim, double eps, double var)
{
int start, end;
// Every block is finding the new y until convergence.
int i = blockIdx.x;
int j = threadIdx.x;
int n = gridDim.x;
int n_th = blockDim.x;
/** Every thread is processing a chunk of the data in order
to find distances between y_i and all x faster. If the
number of elements is devided equally by the number of
threads then the chunk is N/(# of Blocks). If it is not then
the first N%(# of Blocks) have one more element to process.
**/
int chunk = n / n_th;
if ((n % n_th) != 0)
{
if (j < (n % n_th))
{
chunk = chunk + 1;
start = chunk * j;
end = start + chunk;
}
else
{
start = chunk * j + (n % n_th);
end = start + chunk;
}
}
else
{
start = chunk * j;
end = start + chunk;
}
/** Each block has its own shared memory and the
size of it is number of threads multiplied by
(dimensions + 1) to store the values of nominators
and denominator that each thread finds.
**/
extern __shared__ double s[];
double *nominator = &s[0];
double *denominator = &s[n_th * dim];
__shared__ int converge;
converge = 0;
double distance = 0, k;
int l, r;
while (!converge)
{
//Initialize nominators and denominators as 0.
for (r=0; r<dim; r++)
{
nominator[j*dim + r] = 0;
}
denominator[j] = 0;
// Every thread is responsible of finding the new nominators
// and denominator in it's chunk.
for (l=start; l<end; l++)
{
distance = find_distance(dev_y, i, dev_x, l, dim);
if (sqrt(distance) <= var)
{
k = exp(-distance / (2 * var)); //Guassian possibility density function.
}
else
{
k = 0;
}
for (r=0; r<dim; r++)
{
nominator[j*dim + r] += k * dev_x[l*dim + r];
}
denominator[j] += k;
}
__syncthreads();
// Reduction
for (l=n_th/2; l>0; l>>=1)
{
if (j < l)
{
for (r=0; r<dim; r++)
{
nominator[j*dim + r] += nominator[(j+l) * dim + r];
}
denominator[j] += denominator[j+l];
}
__syncthreads();
}
// Threads from 0 to dim-1 store in the first column
// of nominator the values of new y
if (j < dim)
{
nominator[j] = nominator[j] / denominator[0];
}
__syncthreads();
// Only first thread checking the converge.
if (j == 0)
{
distance = 0;
for (r=0; r<dim; r++)
{
distance += pow(dev_y[i*dim + r] - nominator[r], 2);
}
if (sqrt(distance) < eps)
{
converge = 1;
}
}
__syncthreads();
// New y is stored in place of the previous y.
if (j < dim)
{
dev_y[i*dim + j] = nominator[j];
}
__syncthreads();
}
}
__device__
double find_distance(double *y, int i, double *x, int j, int dim)
{
double distance = 0;
for (int l=0; l<dim; l++)
{
distance = distance + pow(y[i*dim + l]-x[j*dim + l], 2);
}
return distance;
}
void show_results(double *y_new)
{
int i,j;
for(i=0; i<20; i++)
{
for (j=0; j<DIMENSIONS; j++)
{
printf("%f ", y_new[i*DIMENSIONS + j]);
}
printf("\n");
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.