serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
2,901 | #include "includes.h"
__global__ void unpack_bottom( const int x, const int y, const int halo_depth, double* field, double* buffer, const int depth)
{
const int x_inner = x - 2*halo_depth;
const int gid = threadIdx.x+blockDim.x*blockIdx.x;
if(gid >= x_inner*depth) return;
const int lines = gid / x_inner;
const int offset = x*(halo_depth - depth) + lines*2*halo_depth;
field[offset+gid] = buffer[gid];
} |
2,902 | #include "includes.h"
__global__ void callOperationSharedDynamic(int *a, int *res, int x, int n)
{
int tidx = blockDim.x * blockIdx.x + threadIdx.x;
int tidy = blockDim.y * blockIdx.y + threadIdx.y;
if (tidx >= n || tidy >= n) {
return;
}
int tid = tidx * n + tidy;
extern __shared__ int data[];
int *s_a = data;
int *s_res = &s_a[size * size];
__shared__ int s_x;
s_x = x;
s_a[tid] = a[tid];
s_res[tid] = s_a[tid] * s_x;
res[tid] = s_res[tid];
} |
2,903 | /* Code for COMP 605 HW5, problem 1
Code will calculate pi from the integral of
4/(1+x^2) on the bounds 0 - 1
using CUDA methodology.
Author: Jon Parsons
compile using
nvcc -o CudaPI.x cudapi.c
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
const int threadsPerBlock = 10;
const int blocksPerGrid = 10;
__device__ double f(double x) // Called in SimpInt subroutine
/* Function to be evaluated. Returns pi when
integrated from 0 - 1 */
{
return 4.0/(1.0 + x*x);
}
__global__ void int_kernel(double *f_vals, double b, double h, int n)
{
int tidx = blockIdx.x * blockDim.x + threadIdx.x;
double x_loc = b + (float) tidx*h;
if (tidx<n)
{
f_vals[tidx] = f(x_loc) + f(x_loc+h);
}
}
__host__ double TrapInt(double a, double b, int n)
/* Function uses simpsons rule to find pi utilizing the
equation 4/(1+x^2) from the bounds 0 - 1
Function is located in function f
*/
{
double h = (b-a)/n;
double* f_vals = (double *) malloc(n*sizeof(double));
double* f_vals_loc;
cudaMalloc((void **)&f_vals_loc,(n*sizeof(double)));
int_kernel<<<threadsPerBlock,blocksPerGrid>>>(f_vals_loc, a, h, n);
cudaMemcpy(f_vals,f_vals_loc, sizeof(float)*n, cudaMemcpyDeviceToHost);
double sum=0.0;
for (int i=0; i<n; i++) sum += f_vals[i];
sum *= h*0.5;
free(f_vals);
cudaFree(f_vals_loc);
return sum;
}
/*---------------------------------------------------*/
int main() {
double a = 0.0;
double b = 1.0;
int n = 10000000;
double integral = TrapInt(a, b, n);
printf ("Integral result %f\n ", integral);
return 0;
}
/*------------------------------------------------------------------*/
|
2,904 | /*************************************************************************
/* ECE 277: GPU Programmming 2021 Winter
/* Author and Instructer: Cheolhong An
/* Copyright 2020
/* University of California, San Diego
/*************************************************************************/
#define COLS 4
#define ROWS 4
#define RIGHT 0
#define DOWN 1
#define LEFT 2
#define UP 3
short *d_action;
int size = sizeof(int);
__global__ void cuda_init() {}
__global__ void cuda_agent(int2 *cstate, short *d_action) {
int idx = 0;
int pos_x = cstate[idx].x, pos_y = cstate[idx].y;
short action;
if (pos_y == 0) {
action = pos_x < COLS - 1 ? RIGHT : DOWN;
}
if (pos_x == COLS - 1) {
action = pos_y < ROWS - 2 ? DOWN : LEFT;
}
d_action[idx] = action;
}
void agent_init() {
// allocate a short-type global memory, d_action ptr (allocated GPU)
cudaMalloc((void **)&d_action, size);
cuda_init <<<1, 1>>> ();
}
short* agent_action(int2* cstate) {
// invokes an CUDA kernel (cuda_agent), cstate ptr (allocated GPU)
cuda_agent <<<1, 1>>> (cstate, d_action);
return d_action;
}
// cudaMemcpy(&d_action, source, size, cudaMemcpyDeviceToHost);
|
2,905 | #include <stdio.h>
#include <cuda.h>
__global__ void hello() {
int id = blockIdx.x * blockDim.x + threadIdx.x;
//if (id == 2047)
printf("my id is %d.\n", id);
}
int main() {
dim3 block(1024, 1, 1);
hello<<<2, block>>>();
cudaDeviceSynchronize();
return 0;
}
|
2,906 | #include "includes.h"
__global__ void matrixMult (int *a, int *b, int *c, int width)
{
int i, sum = 0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if(col < width && row < width)
for (i = 0; i< width; i++)
{
sum += a[row * width + i] * b[i * width + col];
}
c[row * width + col] = sum;
} |
2,907 | #include "triangle.cuh"
#include <iostream>
#include <fstream>
using namespace std;
// point class
// This constructor will help us create an instance of v3 from binary data found in the STL file.
v3::v3(char* facet)
{
float xx = *((float*)facet);
float yy = *((float*)facet + 1);
float zz = *((float*)facet + 2);
x = double(xx);
y = double(yy);
z = double(zz);
}
v3::v3(double x, double y, double z) : x(x), y(y), z(z) {}
//triangle class
triangle::triangle(v3 p1, v3 p2, v3 p3) : p1(p1), p2(p2), p3(p3) {}
// util
__host__
void read_stl(string fname, vector <triangle>& v) {
ifstream myFile(
fname.c_str(), ios::in | ios::binary);
char header_info[80] = "";
char nTri[4];
unsigned nTriLong;
unsigned count;
//read 80 byte header
if (myFile) {
myFile.read(header_info, 80);
cout << "header: " << header_info << endl;
}
else {
cout << "error" << endl;
}
//read 4-byte ulong
if (myFile) {
myFile.read(nTri, 4);
nTriLong = *((unsigned*)nTri);
cout << "Number of triangles in file: " << nTriLong << endl;
}
else {
cout << "error" << endl;
}
count = 0;
//now read in all the triangles
for (int i = 0; i < nTriLong; i++) {
char facet[50];
if (myFile) {
//read one 50-byte triangle
myFile.read(facet, 50);
//populate each point of the triangle
//using v3::v3(char* bin);
//Ignore triangles that are parallel to some pixel ray
v3 norm(facet);
if (norm.z == 0) continue;
//facet + 12 skips the triangle's unit normal
v3 p1(facet + 12);
v3 p2(facet + 24);
v3 p3(facet + 36);
//add a new triangle to the array
v.push_back(triangle(p1, p2, p3));
count++;
}
}
cout << "Number of triangles added: " << count << endl;
return;
} |
2,908 | __global__ void BilinearInterpolationForward(const float* bottom_data,
const int* bs, const float* pos_data, float* top_data, const int* ts) {
// bs = bottom_data size, ps = pos_data size, ts = top_data size
// input position = -1~1
// pos_data[:,:,1,:] = x, pos_data[:,:,2,:] = y
// top_data size = (ps[0], ps[1], bs[2], ps[3])
// note: bs[3] === ps[3]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int len = ts[0]*ts[1]*ts[2]*ts[3];
if (index >= len) return;
// get current index, [h,w,c,n]
int h = index % ts[0];
int w = (index / ts[0]) % ts[1];
int c = (index / ts[0] / ts[1]) % ts[2];
int n = index / ts[0] / ts[1] / ts[2];
int xp = n*2*ts[1]*ts[0] + 0*ts[1]*ts[0] + w*ts[0] + h;
int yp = n*2*ts[1]*ts[0] + 1*ts[1]*ts[0] + w*ts[0] + h;
float w_new = (pos_data[xp]/2.0+0.5)*(float)(bs[1]-1);
float h_new = (pos_data[yp]/2.0+0.5)*(float)(bs[0]-1);
// calc neighbor pixel index, if > size or < size, do
float v = 0.0;
for (int x = floor(w_new); x <= ceil(w_new); x++) {
for (int y = floor(h_new); y <= ceil(h_new); y++) {
if (x < 0 || x>= bs[1] || y < 0 || y >= bs[0]){
v = 0.0;
}else{
v = bottom_data[n*bs[2]*bs[1]*bs[0] + c*bs[1]*bs[0] + x*bs[0] + y];
}
top_data[index] += v * (1-abs(w_new - (float)x)) * (1-abs(h_new - (float)y));
}
}
}
__global__ void BilinearInterpolationBackward(const float* bottom_data,
const int* bs, const float* pos_data, const float* top_data, const int* ts, const float* top_diff, float* bottom_diff, float* pos_diff) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int len = ts[0]*ts[1]*ts[2]*ts[3];
if (index >= len) return;
// get current index, [h,w,c,n]
int h = index % ts[0];
int w = (index / ts[0]) % ts[1];
int c = (index / ts[0] / ts[1]) % ts[2];
int n = index / ts[0] / ts[1] / ts[2];
int xp = n*2*ts[1]*ts[0] + 0*ts[1]*ts[0] + w*ts[0] + h;
int yp = n*2*ts[1]*ts[0] + 1*ts[1]*ts[0] + w*ts[0] + h;
float w_new = (pos_data[xp]/2.0+0.5)*(float)(bs[1]-1);
float h_new = (pos_data[yp]/2.0+0.5)*(float)(bs[0]-1);
float u = 0.0;
for (int x = floor(w_new); x <= ceil(w_new); x++) {
for (int y = floor(h_new); y <= ceil(h_new); y++) {
if (x >= 0 && x < bs[1] && y >= 0 && y < bs[0]){
atomicAdd(bottom_diff + n*bs[2]*bs[1]*bs[0] + c*bs[1]*bs[0] + x*bs[0] + y, top_diff[index] * (1-abs(w_new - (float)x)) * (1-abs(h_new - (float)y)) );
u = bottom_data[n*bs[2]*bs[1]*bs[0] + c*bs[1]*bs[0] + x*bs[0] + y];
atomicAdd(pos_diff + xp, top_diff[index] *u* (1-abs(h_new - (float)y)) * ((float)x >= w_new ? 1.0:-1.0 ) );
atomicAdd(pos_diff + yp, top_diff[index] *u* (1-abs(w_new - (float)x)) * ((float)y >= h_new ? 1.0:-1.0 ) );
}
}
}
} |
2,909 | #include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define NUM 10000000
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
/*struct num_array{
double num1;
double num2;
double result;
};*/
/*__device__ void xor(int *x, int *y)
{
int a = *x & *y;
int b = ~(*x) & ~(*y);
int r = ~a & ~b;
*x = r;
return;
}*/
__global__ void calculate(int *mem, int num, int o)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i > num)
return;
// struct num_array *a = (struct num_array *)(mem + (i * 3 * sizeof(double)));
int a = *(mem + i) & *( mem + i + num);
int b = ~*(mem + i) & ~*(mem + i + num);
*(mem + i) = ~a & ~b;
if( blockIdx.x == 0 && threadIdx.x == 0 && o == 1){
// printf( "%d \n",*(mem + 2*num ));
*(mem + num ) = *(mem + 2*num );
// printf( "%d \n",*(mem + num ));
}
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i,seed;
//struct num_array *pa;
int *ptr;
int *sptr;
int *gpu_mem;
unsigned long num = NUM; /*Default value of num from MACRO*/
int blocks;
if(argc == 3){
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
num = NUM;
seed = atoi(argv[2]);
}
else{
printf("Correct Usage - ./q2 {number of elements} {seed} \n");
return 1;
}
/* Allocate host (CPU) memory and initialize*/
srand(seed);
ptr = (int *)malloc(num * sizeof(int));
sptr = ptr;
for(i=0; i<num; ++i){
//pa = (struct num_array *) sptr;
//pa->num1 = (double) i + (double) i * 0.1;
//pa->num2 = pa->num1 + 1.0;
*sptr = rand();
//if( i == num - 1)
// printf("last no is %d \n", *sptr);
sptr += 1;
}
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&gpu_mem, num * sizeof(int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_mem, ptr, num * sizeof(int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
int s_n = num/2;
int o = 0;
if(num % 2)
o = 1;
i = s_n;
while( i>=1)
{
blocks = i /1024;
if(i % 1024)
++blocks;
//printf("The call is %d %d \n",i,o);
calculate<<<blocks, dim3(1024,1,1)>>>(gpu_mem, i,o);
CUDA_ERROR_EXIT("kernel invocation");
gettimeofday(&end, NULL);
//cudaDeviceSynchronize();
i = i + o;
if ( i % 2)
o = 1;
else
o = 0;
i = i/2;
}
/* Copy back result*/
cudaMemcpy(ptr, gpu_mem, num * sizeof(int) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
//printf("Total time = %ld microsecs Processsing =%ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(gpu_mem);
//sptr = ptr;
/*Print the result*/
//pa = (struct num_array *) (sptr + (num -1)*3*sizeof(double));
printf("xor sum = %d\n", *(ptr));
free(ptr);
}
|
2,910 | // a cuda app. we will convert this to opencl, and run it :-)
#include <iostream>
#include <memory>
#include <cassert>
using namespace std;
#include <cuda_runtime.h>
__global__ void setValue(float *data, int idx, float value) {
if(threadIdx.x == 0) {
data[idx] = value;
}
}
// int main(int argc, char *argv[]) {
// int N = 1024;
// float *gpuFloats;
// cudaMalloc((void**)(&gpuFloats), N * sizeof(float));
// setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>(gpuFloats, 2, 123.0f);
// float hostFloats[4];
// cudaMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), cudaMemcpyDeviceToHost);
// cout << "hostFloats[2] " << hostFloats[2] << endl;
// assert(hostFloats[2] == 123.0f);
// setValue<<<dim3(32, 1, 1), dim3(32, 1, 1)>>>(gpuFloats, 2, 222.0f);
// cudaMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), cudaMemcpyDeviceToHost);
// cout << "hostFloats[2] " << hostFloats[2] << endl;
// assert(hostFloats[2] == 222.0f);
// hostFloats[2] = 444.0f;
// cudaMemcpy(gpuFloats, hostFloats, 4 * sizeof(float), cudaMemcpyHostToDevice);
// hostFloats[2] = 555.0f;
// cudaMemcpy(hostFloats, gpuFloats, 4 * sizeof(float), cudaMemcpyDeviceToHost);
// cout << "hostFloats[2] " << hostFloats[2] << endl;
// assert(hostFloats[2] == 444.0f);
// cudaFree(gpuFloats);
// return 0;
// }
|
2,911 | #include "cuda_MP7.cuh"
void cuda_MP7(int argc, char* argv[])
{
/* Case of 0 arguments: Default seed is used */
if (argc < 2) {
srand(0);
}
/* Case of 1 argument: Seed is specified as first command line argument */
else {
int seed = atoi(argv[1]);
srand(seed);
}
uint8_t *gold_bins = (uint8_t*)malloc(HISTO_HEIGHT*HISTO_WIDTH * sizeof(uint8_t));
// Use kernel_bins for your final result
uint8_t *kernel_bins = (uint8_t*)malloc(HISTO_HEIGHT*HISTO_WIDTH * sizeof(uint8_t));
// A 2D array of histogram bin-ids. One can think of each of these bins-ids as
// being associated with a pixel in a 2D image.
uint32_t **input = generate_histogram_bins();
cout << "Input example: " << endl;
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 14; j++) {
cout << input[i][j] << " ";
}
cout << endl;
}
// TIME_IT("ref_2dhisto",
// 50,
// ref_2dhisto(input, INPUT_HEIGHT, INPUT_WIDTH, gold_bins);)
ref_2dhisto(input, INPUT_HEIGHT, INPUT_WIDTH, gold_bins);
/* Include your setup code below (temp variables, function calls, etc.) */
uint32_t **d_input = NULL;
cudaMalloc((void**)&d_input, INPUT_HEIGHT * INPUT_WIDTH * sizeof(uint32_t));
uint8_t *d_obins = NULL;
cudaMalloc((void**)&d_obins, HISTO_HEIGHT * HISTO_WIDTH * sizeof(uint8_t));
uint32_t *temp_bins = NULL;
cudaMalloc((void**)&temp_bins, HISTO_HEIGHT * HISTO_WIDTH * sizeof(uint32_t));
cudaMemcpy(d_input, &(input[0][0]), INPUT_HEIGHT * INPUT_WIDTH * sizeof(uint32_t),
cudaMemcpyHostToDevice);
/* End of setup code */
/* This is the call you will use to time your parallel implementation */
// TIME_IT("opt_2dhisto",
// 50,
// opt_2dhisto( /*Define your own function parameters*/);)
opt_2dhisto(d_input, d_obins, temp_bins, INPUT_HEIGHT, INPUT_WIDTH);
/* Include your teardown code below (temporary variables, function calls, etc.) */
cudaMemcpy(kernel_bins, d_obins, HISTO_HEIGHT*HISTO_WIDTH * sizeof(uint8_t),
cudaMemcpyDeviceToHost);
cudaFree(temp_bins);
cudaFree(d_input);
cudaFree(d_obins);
/* End of teardown code */
int passed = 1;
cout << "Gold_Bins vs. Kernal_Bins" << endl;
for (int i = 0; i < HISTO_HEIGHT*HISTO_WIDTH; i++) {
if (gold_bins[i] != kernel_bins[i]) {
cout << i << " " << gold_bins[i] << " " << kernel_bins[i] << endl;
passed = 0;
break;
}
}
(passed) ? printf("\n Test PASSED\n") : printf("\n Test FAILED\n");
free(gold_bins);
free(kernel_bins);
}
int ref_2dhisto(uint32_t *input[], size_t height, size_t width, uint8_t bins[])
{
// Zero out all the bins
memset(bins, 0, HISTO_HEIGHT*HISTO_WIDTH * sizeof(bins[0]));
for (size_t j = 0; j < height; ++j)
{
for (size_t i = 0; i < width; ++i)
{
const uint32_t value = input[j][i];
uint8_t *p = (uint8_t*)bins;
// Increment the appropriate bin, but do not roll-over the max value
if (p[value] < UINT8_MAX)
++p[value];
}
}
return 0;
}
void** alloc_2d(size_t y_size, size_t x_size, size_t element_size)
{
const size_t x_size_padded = (x_size + 128) & 0xFFFFFF80;
uint8_t *data = (uint8_t*)calloc(x_size_padded * y_size, element_size);
void **res = (void**)calloc(y_size, sizeof(void*));
if (data == 0 || res == 0)
{
free(data);
free(res);
res = 0;
goto exit;
}
for (size_t i = 0; i < y_size; ++i)
res[i] = data + (i * x_size_padded * element_size);
exit:
return res;
}
// Generate another bin for the histogram. The bins are created as a random walk ...
static uint32_t next_bin(uint32_t pix)
{
const uint16_t bottom = pix & ((1 << HISTO_LOG) - 1);
const uint16_t top = (uint16_t)(pix >> HISTO_LOG);
int new_bottom = NEXT(bottom, SPREAD_BOTTOM)
CLAMP(new_bottom, 0, HISTO_WIDTH - 1)
int new_top = NEXT(top, SPREAD_TOP)
CLAMP(new_top, 0, HISTO_HEIGHT - 1)
const uint32_t result = (new_bottom | (new_top << HISTO_LOG));
return result;
}
// Return a 2D array of histogram bin-ids. This function generates
// bin-ids with correlation characteristics similar to some actual images.
// The key point here is that the pixels (and thus the bin-ids) are *NOT*
// randomly distributed ... a given pixel tends to be similar to the
// pixels near it.
static uint32_t **generate_histogram_bins()
{
uint32_t **input = (uint32_t**)alloc_2d(INPUT_HEIGHT, INPUT_WIDTH, sizeof(uint32_t));
input[0][0] = HISTO_WIDTH / 2 | ((HISTO_HEIGHT / 2) << HISTO_LOG);
for (int i = 1; i < INPUT_WIDTH; ++i)
input[0][i] = next_bin(input[0][i - 1]);
for (int j = 1; j < INPUT_HEIGHT; ++j)
{
input[j][0] = next_bin(input[j - 1][0]);
for (int i = 1; i < INPUT_WIDTH; ++i)
input[j][i] = next_bin(input[j][i - 1]);
}
return input;
}
/*
int gettimeofday(struct timeval * tp, struct timezone * tzp)
{
// Note: some broken versions only have 8 trailing zero's, the correct epoch has 9 trailing zero's
// This magic number is the number of 100 nanosecond intervals since January 1, 1601 (UTC)
// until 00:00:00 January 1, 1970
static const uint64_t EPOCH = ((uint64_t)116444736000000000ULL);
SYSTEMTIME system_time;
FILETIME file_time;
uint64_t time;
GetSystemTime(&system_time);
SystemTimeToFileTime(&system_time, &file_time);
time = ((uint64_t)file_time.dwLowDateTime);
time += ((uint64_t)file_time.dwHighDateTime) << 32;
tp->tv_sec = (long)((time - EPOCH) / 10000000L);
tp->tv_usec = (long)(system_time.wMilliseconds * 1000);
return 0;
}
*/
void opt_2dhisto(uint32_t **d_input, uint8_t *d_bins, uint32_t *d_temp_bins,
size_t height, size_t width)
{
/* This function should only contain a call to the GPU
histogramming kernel. Any memory allocations and
transfers must be done outside this function */
unsigned int num_blocks = ceil((float)width / TILE_SIZE_MP7);
histo_kernel << <num_blocks, 1024 >> > (d_input, d_temp_bins);
histo_32to8_kernel << <1, 1024 >> >(d_bins, d_temp_bins, 1024);
cudaThreadSynchronize();
}
__global__ void histo_kernel(uint32_t **d_input, uint32_t *d_ouput)
{
__shared__ uint32_t private_bins[HISTO_HEIGHT];
unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int ti = threadIdx.x;
private_bins[ti] = 0;
__syncthreads();
int start_col = TILE_SIZE_MP7 * blockIdx.x;
for (int i = 0; i < TILE_SIZE_MP7; i++) {
if (start_col + i < INPUT_WIDTH)
atomicAdd(&private_bins[d_input[ti][start_col + i]], 1);
}
__syncthreads();
atomicAdd(&(d_ouput[ti]), private_bins[ti]);
atomicAdd(&(d_ouput[ti + 512]), private_bins[ti + 512]);
}
__global__ void histo_cuda_kernel(int *d_input, int *d_output)
{
__shared__ int private_bins[HISTO_MAX];
// unsigned int idx = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int ti = threadIdx.x;
if (ti < HISTO_MAX)
private_bins[ti] = 0;
__syncthreads();
int start_col = TILE_SIZE_MP7 * blockIdx.x;
for (int i = 0; i < TILE_SIZE_MP7; i++) {
if (start_col + i < INPUT_WIDTH)
atomicAdd(&(private_bins[d_input[ti * INPUT_WIDTH + (start_col + i)]]), 1);
}
__syncthreads();
if (ti < HISTO_MAX)
atomicAdd(&(d_output[ti]), private_bins[ti]);
}
__global__ void histo_32to8_kernel(uint8_t *d_ouput, uint32_t *d_temp, const int sz)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < sz)
{
if (d_temp[idx]< UINT8_MAX)
d_ouput[idx] = (uint8_t)d_temp[idx];
if (d_temp[idx] >= UINT8_MAX)
d_ouput[idx] = (uint8_t)UINT_MAX;
}
__syncthreads();
}
void histo_cuda()
{
cout << "Starting CUDA histo test..." << endl;
int *input_array = (int*)malloc(INPUT_HEIGHT * INPUT_WIDTH * sizeof(int));;
int gold_histo[HISTO_MAX] = { 0 };
int kernel_histo[HISTO_MAX] = { 0 };
for (int i = 0; i < INPUT_HEIGHT; i++)
for (int j = 0; j < INPUT_WIDTH; j++)
input_array[i * INPUT_WIDTH+ j] = rand() % (HISTO_MAX - 1);
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 14; j++) {
cout << input_array[i * INPUT_WIDTH + j] << " ";
}
cout << endl;
}
for (int i = 0; i < INPUT_HEIGHT; i++)
for (int j = 0; j < INPUT_WIDTH; j++)
gold_histo[input_array[i * INPUT_WIDTH + j]]++;
cout << "Golden result..." << endl;
for (int i = 0; i < 20; i++)
cout << gold_histo[i] << " ";
cout << endl;
int *d_input = NULL;
cudaMalloc((void**)&d_input, INPUT_HEIGHT * INPUT_WIDTH * sizeof(int));
cudaMemcpy(d_input, input_array, INPUT_HEIGHT * INPUT_WIDTH *
sizeof(int), cudaMemcpyHostToDevice);
int *d_output = NULL;
cudaMalloc((void**)&d_output, HISTO_MAX * sizeof(int));
unsigned int num_blocks = ceil((float)INPUT_WIDTH / TILE_SIZE_MP7);
histo_cuda_kernel << <num_blocks, 1024 >> > (d_input, d_output);
cudaThreadSynchronize();
cudaMemcpy(kernel_histo, d_output, HISTO_MAX * sizeof(int),
cudaMemcpyDeviceToHost);
cudaFree(d_input);
cudaFree(d_output);
cout << "Kernel result..." << endl;
for (int i = 0; i < 20; i++)
cout << kernel_histo[i] << " ";
cout << endl;
bool passed = true;
cout << "Gold_Bins vs. Kernal_Bins" << endl;
for (int i = 0; i < HISTO_MAX; i++) {
if (gold_histo[i] != kernel_histo[i]) {
cout << i << ": " << gold_histo[i] << " vs. " << kernel_histo[i] << endl;
passed = false;
break;
}
}
(passed) ? printf("\n Test PASSED\n") : printf("\n Test FAILED\n");
cout << "Ending CUDA histo test..." << endl;
} |
2,912 | extern "C" __global__ void
mul_each(float2* x, float2* y)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
x[i] = make_float2(x[i].x * y[i].x - x[i].y * y[i].y, x[i].x * y[i].y + x[i].y * y[i].x);
} |
2,913 | #include "includes.h"
__global__ void matrix_transpose_k1(float* input,float* output,const int nx, const int ny)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.x*blockDim.x;
//printf("gid : %d , offset : %d , index : %d ,value : %f \n", gid, offset, offset + blockIdx.x,input[offset + blockIdx.x]);
output[gid] = input[offset + blockIdx.x];
} |
2,914 | __global__ void kernelFunc() {
}
|
2,915 | #include <stdio.h>
/*
* Currently, `initializeElementsTo`, if executed in a thread whose
* `i` is calculated to be greater than `N`, will try to access a value
* outside the range of `a`.
*
* Refactor the kernel defintition to prevent our of range accesses.
*/
__global__ void initializeElementsTo(int initialValue, int *a, int N)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < N) {
a[i] = initialValue;
}
}
int main()
{
/*
* Do not modify `N`.
*/
int N = 1000;
int *a;
size_t size = N * sizeof(int);
cudaMallocManaged(&a, size);
/*
* Assume we have reason to want the number of threads
* fixed at `256`: do not modify `threads_per_block`.
*/
size_t threads_per_block = 256;
/*
* Assign a value to `number_of_blocks` that will
* allow for a working execution configuration given
* the fixed values for `N` and `threads_per_block`.
*/
size_t number_of_blocks = (N + threads_per_block - 1) / threads_per_block;
int initialValue = 6;
initializeElementsTo<<<number_of_blocks, threads_per_block>>>(initialValue, a, N);
cudaDeviceSynchronize();
/*
* Check to make sure all values in `a`, were initialized.
*/
for (int i = 0; i < N; ++i)
{
if(a[i] != initialValue)
{
printf("FAILURE: target value: %d\t a[%d]: %d\n", initialValue, i, a[i]);
exit(1);
}
}
printf("SUCCESS!\n");
cudaFree(a);
} |
2,916 | #include "kernels.cuh"
__global__
void dot_product_kernel(float *x, float *y, float *dot, unsigned int n){
unsigned int index = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int stride = blockDim.x * gridDim.x;
__shared__ float cache[256];
double temp = 0.0;
while(index < n){
temp += x[index] * y[index];
index += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
//reduction
unsigned int i = blockDim.x/2;
while(i !=0){
if(threadIdx.x < i){
cache[threadIdx.x] = cache[threadIdx.x] + cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
//atom operation
if(threadIdx.x == 0){
atomicAdd(dot,cache[0]);
}
}
|
2,917 | # include <math.h>
# include <time.h>
# include <stdio.h>
# include <stdlib.h>
# include <iostream>
# include <sys/time.h>
# include "cuda_runtime.h"
using namespace std;
const int DIM = 1024, AS = 32, BS = 32;
const float sigma = 1.0;
const bool PRINT_RESULT = false;
/*
RBF Kernel Implementation on CPU.
Param @ sigma: the parameter sigma of RBF kernel;
@ a, b, c: the input arrays and the output array;
@ dim, as, bs: the size of the array, a should be of size as * dim, and b should be of size bs * dim.
*/
void RBF_CPU(float sigma, float a[][DIM], float b[][DIM], float c[], int dim, int as, int bs) {
for (int i = 0; i < as; ++ i)
for (int j = 0; j < bs; ++ j) {
float Pvalue = 0.0;
for (int k = 0; k < dim; ++ k)
Pvalue += (a[i][k] - b[j][k]) * (a[i][k] - b[j][k]);
c[i * bs + j] = exp(- Pvalue / (2 * sigma * sigma));
}
}
/*
Simple RBF Kernel Implementation.
Param @ sigma: the parameter sigma of RBF kernel;
@ Md, Nd, Pd: the input values and the output array;
@ dim: the dimension of features;
@ Pdim: the width of the result array.
*/
__global__ void RBFKernel_Simple(float sigma, float *Md, float *Nd, float *Pd, int dim, int Pdim) {
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0.0;
for (int k = 0; k < dim; ++ k) {
float Mds = Md[tx * dim + k];
float Nds = Nd[ty * dim + k];
Pvalue += (Mds - Nds) * (Mds - Nds);
}
Pd[tx * Pdim + ty] = exp(- Pvalue / (2 * sigma * sigma));
}
/*
Wrapper function of Simple RBF Kernel Implementation.
Param @ sigma: the parameter sigma of RBF kernel;
@ a, b, c: the input arrays and the output array;
@ dim, as, bs: the size of the array, a should be of size as * dim, and b should be of size bs * dim.
Return @ the elapsed time of GPU calculation.
*/
float RBF_CUDA_Simple(float sigma, float a[][DIM], float b[][DIM], float c[], int dim, int as, int bs) {
cudaEvent_t start, stop;
float elapsedTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 block(as, bs);
float *M, *N, *P;
cudaMalloc((void **)&M, as * dim * sizeof(float));
cudaMalloc((void **)&N, bs * dim * sizeof(float));
cudaMalloc((void **)&P, as * bs * sizeof(float));
cudaMemcpy(M, a, as * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(N, b, bs * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
RBFKernel_Simple <<<1, block>>> (sigma, M, N, P, dim, bs);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(c, P, as * bs * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(M);
cudaFree(N);
cudaFree(P);
return elapsedTime;
}
/*
RBF Kernel Implementation based on tilling (width 2).
Param @ sigma: the parameter sigma of RBF kernel;
@ Md, Nd, Pd: the input values and the output array;
@ dim: the dimension of features;
@ Pdim: the width of the result array.
*/
const int TILE_WIDTH_2 = 2;
__global__ void RBFKernel_Tilling_2(float sigma, float *Md, float *Nd, float *Pd, int dim, int Pdim) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0.0;
int Mrow = bx * TILE_WIDTH_2 + tx;
int Nrow = by * TILE_WIDTH_2 + ty;
__shared__ float Mds[TILE_WIDTH_2][TILE_WIDTH_2];
__shared__ float Nds[TILE_WIDTH_2][TILE_WIDTH_2];
for (int i = 0; i < dim / TILE_WIDTH_2; ++ i) {
Mds[tx][ty] = Md[Mrow * dim + i * TILE_WIDTH_2 + ty];
Nds[ty][tx] = Nd[Nrow * dim + i * TILE_WIDTH_2 + tx];
__syncthreads();
for (int k = 0; k < TILE_WIDTH_2; ++ k)
Pvalue += (Mds[tx][k] - Nds[ty][k]) * (Mds[tx][k] - Nds[ty][k]);
__syncthreads();
}
Pd[Mrow * Pdim + Nrow] = exp(- Pvalue / (2 * sigma * sigma));
}
/*
Wrapper function of RBF Kernel Implementation based on tilling (width 2).
Param @ sigma: the parameter sigma of RBF kernel;
@ a, b, c: the input arrays and the output array;
@ dim, as, bs: the size of the array, a should be of size as * dim, and b should be of size bs * dim.
Return @ the elapsed time of GPU calculation.
*/
float RBF_CUDA_Tilling_2(float sigma, float a[][DIM], float b[][DIM], float c[], int dim, int as, int bs) {
cudaEvent_t start, stop;
float elapsedTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 grid(as / TILE_WIDTH_2, bs / TILE_WIDTH_2);
dim3 block(TILE_WIDTH_2, TILE_WIDTH_2);
float *M, *N, *P;
cudaMalloc((void **)&M, as * dim * sizeof(float));
cudaMalloc((void **)&N, bs * dim * sizeof(float));
cudaMalloc((void **)&P, as * bs * sizeof(float));
cudaMemcpy(M, a, as * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(N, b, bs * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
RBFKernel_Tilling_2 <<<grid, block>>> (sigma, M, N, P, dim, bs);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(c, P, as * bs * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(M);
cudaFree(N);
cudaFree(P);
return elapsedTime;
}
/*
RBF Kernel Implementation based on tilling (width 4).
Param @ sigma: the parameter sigma of RBF kernel;
@ Md, Nd, Pd: the input values and the output array;
@ dim: the dimension of features;
@ Pdim: the width of the result array.
*/
const int TILE_WIDTH_4 = 4;
__global__ void RBFKernel_Tilling_4(float sigma, float *Md, float *Nd, float *Pd, int dim, int Pdim) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0.0;
int Mrow = bx * TILE_WIDTH_4 + tx;
int Nrow = by * TILE_WIDTH_4 + ty;
__shared__ float Mds[TILE_WIDTH_4][TILE_WIDTH_4];
__shared__ float Nds[TILE_WIDTH_4][TILE_WIDTH_4];
for (int i = 0; i < dim / TILE_WIDTH_4; ++ i) {
Mds[tx][ty] = Md[Mrow * dim + i * TILE_WIDTH_4 + ty];
Nds[ty][tx] = Nd[Nrow * dim + i * TILE_WIDTH_4 + tx];
__syncthreads();
for (int k = 0; k < TILE_WIDTH_4; ++ k)
Pvalue += (Mds[tx][k] - Nds[ty][k]) * (Mds[tx][k] - Nds[ty][k]);
__syncthreads();
}
Pd[Mrow * Pdim + Nrow] = exp(- Pvalue / (2 * sigma * sigma));
}
/*
Wrapper function of RBF Kernel Implementation based on tilling (width 4).
Param @ sigma: the parameter sigma of RBF kernel;
@ a, b, c: the input arrays and the output array;
@ dim, as, bs: the size of the array, a should be of size as * dim, and b should be of size bs * dim.
Return @ the elapsed time of GPU calculation.
*/
float RBF_CUDA_Tilling_4(float sigma, float a[][DIM], float b[][DIM], float c[], int dim, int as, int bs) {
cudaEvent_t start, stop;
float elapsedTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 grid(as / TILE_WIDTH_4, bs / TILE_WIDTH_4);
dim3 block(TILE_WIDTH_4, TILE_WIDTH_4);
float *M, *N, *P;
cudaMalloc((void **)&M, as * dim * sizeof(float));
cudaMalloc((void **)&N, bs * dim * sizeof(float));
cudaMalloc((void **)&P, as * bs * sizeof(float));
cudaMemcpy(M, a, as * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(N, b, bs * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
RBFKernel_Tilling_4 <<<grid, block>>> (sigma, M, N, P, dim, bs);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(c, P, as * bs * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(M);
cudaFree(N);
cudaFree(P);
return elapsedTime;
}
/*
RBF Kernel Implementation based on tilling (width 8).
Param @ sigma: the parameter sigma of RBF kernel;
@ Md, Nd, Pd: the input values and the output array;
@ dim: the dimension of features;
@ Pdim: the width of the result array.
*/
const int TILE_WIDTH_8 = 8;
__global__ void RBFKernel_Tilling_8(float sigma, float *Md, float *Nd, float *Pd, int dim, int Pdim) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0.0;
int Mrow = bx * TILE_WIDTH_8 + tx;
int Nrow = by * TILE_WIDTH_8 + ty;
__shared__ float Mds[TILE_WIDTH_8][TILE_WIDTH_8];
__shared__ float Nds[TILE_WIDTH_8][TILE_WIDTH_8];
for (int i = 0; i < dim / TILE_WIDTH_8; ++ i) {
Mds[tx][ty] = Md[Mrow * dim + i * TILE_WIDTH_8 + ty];
Nds[ty][tx] = Nd[Nrow * dim + i * TILE_WIDTH_8 + tx];
__syncthreads();
for (int k = 0; k < TILE_WIDTH_8; ++ k)
Pvalue += (Mds[tx][k] - Nds[ty][k]) * (Mds[tx][k] - Nds[ty][k]);
__syncthreads();
}
Pd[Mrow * Pdim + Nrow] = exp(- Pvalue / (2 * sigma * sigma));
}
/*
Wrapper function of RBF Kernel Implementation based on tilling (width 8).
Param @ sigma: the parameter sigma of RBF kernel;
@ a, b, c: the input arrays and the output array;
@ dim, as, bs: the size of the array, a should be of size as * dim, and b should be of size bs * dim.
Return @ the elapsed time of GPU calculation.
*/
float RBF_CUDA_Tilling_8(float sigma, float a[][DIM], float b[][DIM], float c[], int dim, int as, int bs) {
cudaEvent_t start, stop;
float elapsedTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 grid(as / TILE_WIDTH_8, bs / TILE_WIDTH_8);
dim3 block(TILE_WIDTH_8, TILE_WIDTH_8);
float *M, *N, *P;
cudaMalloc((void **)&M, as * dim * sizeof(float));
cudaMalloc((void **)&N, bs * dim * sizeof(float));
cudaMalloc((void **)&P, as * bs * sizeof(float));
cudaMemcpy(M, a, as * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(N, b, bs * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
RBFKernel_Tilling_8 <<<grid, block>>> (sigma, M, N, P, dim, bs);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(c, P, as * bs * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(M);
cudaFree(N);
cudaFree(P);
return elapsedTime;
}
/*
RBF Kernel Implementation based on tilling (width 16).
Param @ sigma: the parameter sigma of RBF kernel;
@ Md, Nd, Pd: the input values and the output array;
@ dim: the dimension of features;
@ Pdim: the width of the result array.
*/
const int TILE_WIDTH_16 = 16;
__global__ void RBFKernel_Tilling_16(float sigma, float *Md, float *Nd, float *Pd, int dim, int Pdim) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0.0;
int Mrow = bx * TILE_WIDTH_16 + tx;
int Nrow = by * TILE_WIDTH_16 + ty;
__shared__ float Mds[TILE_WIDTH_16][TILE_WIDTH_16];
__shared__ float Nds[TILE_WIDTH_16][TILE_WIDTH_16];
for (int i = 0; i < dim / TILE_WIDTH_16; ++ i) {
Mds[tx][ty] = Md[Mrow * dim + i * TILE_WIDTH_16 + ty];
Nds[ty][tx] = Nd[Nrow * dim + i * TILE_WIDTH_16 + tx];
__syncthreads();
for (int k = 0; k < TILE_WIDTH_16; ++ k)
Pvalue += (Mds[tx][k] - Nds[ty][k]) * (Mds[tx][k] - Nds[ty][k]);
__syncthreads();
}
Pd[Mrow * Pdim + Nrow] = exp(- Pvalue / (2 * sigma * sigma));
}
/*
Wrapper function of RBF Kernel Implementation based on tilling (width 16).
Param @ sigma: the parameter sigma of RBF kernel;
@ a, b, c: the input arrays and the output array;
@ dim, as, bs: the size of the array, a should be of size as * dim, and b should be of size bs * dim.
Return @ the elapsed time of GPU calculation.
*/
float RBF_CUDA_Tilling_16(float sigma, float a[][DIM], float b[][DIM], float c[], int dim, int as, int bs) {
cudaEvent_t start, stop;
float elapsedTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 grid(as / TILE_WIDTH_16, bs / TILE_WIDTH_16);
dim3 block(TILE_WIDTH_16, TILE_WIDTH_16);
float *M, *N, *P;
cudaMalloc((void **)&M, as * dim * sizeof(float));
cudaMalloc((void **)&N, bs * dim * sizeof(float));
cudaMalloc((void **)&P, as * bs * sizeof(float));
cudaMemcpy(M, a, as * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(N, b, bs * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
RBFKernel_Tilling_16 <<<grid, block>>> (sigma, M, N, P, dim, bs);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(c, P, as * bs * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(M);
cudaFree(N);
cudaFree(P);
return elapsedTime;
}
/*
RBF Kernel Implementation based on tilling (width 32).
Param @ sigma: the parameter sigma of RBF kernel;
@ Md, Nd, Pd: the input values and the output array;
@ dim: the dimension of features;
@ Pdim: the width of the result array.
*/
const int TILE_WIDTH_32 = 32;
__global__ void RBFKernel_Tilling_32(float sigma, float *Md, float *Nd, float *Pd, int dim, int Pdim) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float Pvalue = 0.0;
int Mrow = bx * TILE_WIDTH_32 + tx;
int Nrow = by * TILE_WIDTH_32 + ty;
__shared__ float Mds[TILE_WIDTH_32][TILE_WIDTH_32];
__shared__ float Nds[TILE_WIDTH_32][TILE_WIDTH_32];
for (int i = 0; i < dim / TILE_WIDTH_32; ++ i) {
Mds[tx][ty] = Md[Mrow * dim + i * TILE_WIDTH_32 + ty];
Nds[ty][tx] = Nd[Nrow * dim + i * TILE_WIDTH_32 + tx];
__syncthreads();
for (int k = 0; k < TILE_WIDTH_32; ++ k)
Pvalue += (Mds[tx][k] - Nds[ty][k]) * (Mds[tx][k] - Nds[ty][k]);
__syncthreads();
}
Pd[Mrow * Pdim + Nrow] = exp(- Pvalue / (2 * sigma * sigma));
}
/*
Wrapper function of RBF Kernel Implementation based on tilling (width 32).
Param @ sigma: the parameter sigma of RBF kernel;
@ a, b, c: the input arrays and the output array;
@ dim, as, bs: the size of the array, a should be of size as * dim, and b should be of size bs * dim.
Return @ the elapsed time of GPU calculation.
*/
float RBF_CUDA_Tilling_32(float sigma, float a[][DIM], float b[][DIM], float c[], int dim, int as, int bs) {
cudaEvent_t start, stop;
float elapsedTime = 0;
cudaEventCreate(&start);
cudaEventCreate(&stop);
dim3 grid(as / TILE_WIDTH_32, bs / TILE_WIDTH_32);
dim3 block(TILE_WIDTH_32, TILE_WIDTH_32);
float *M, *N, *P;
cudaMalloc((void **)&M, as * dim * sizeof(float));
cudaMalloc((void **)&N, bs * dim * sizeof(float));
cudaMalloc((void **)&P, as * bs * sizeof(float));
cudaMemcpy(M, a, as * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(N, b, bs * dim * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
RBFKernel_Tilling_32 <<<grid, block>>> (sigma, M, N, P, dim, bs);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaMemcpy(c, P, as * bs * sizeof(float), cudaMemcpyDeviceToHost);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(M);
cudaFree(N);
cudaFree(P);
return elapsedTime;
}
// Print the result array.
void prtResult(float res[], int n, int m) {
if (PRINT_RESULT) {
printf("Result: \n");
for (int i = 0; i < n; ++ i, printf("\n"))
for (int j = 0; j < m; ++ j)
printf("%.2lf ", res[i * m + j]);
} else {
float sum = 0.0;
for (int i = 0; i < n * m; ++ i)
sum += res[i];
printf("The sum of the matrix: %.2lf\n", sum);
}
}
int main() {
// Randomly generate array for testing.
srand(time(0));
float a[AS][DIM], b[BS][DIM];
for (int i = 0; i < AS; ++ i)
for (int j = 0; j < DIM; ++ j)
a[i][j] = 1.0 / (rand() % 500 + 1.0);
for (int i = 0; i < BS; ++ i)
for (int j = 0; j < DIM; ++ j)
b[i][j] = 1.0 / (rand() % 500 + 1.0);
// ============================================================ //
double duration; // execution time
timeval start, end; // since clock() is not accurate, we use gettimeofday(...) in 'sys/time.h' to calculate running time.
// => Baseline: CPU
float c_CPU[AS * BS];
gettimeofday(&start, 0);
RBF_CPU(sigma, a, b, c_CPU, DIM, AS, BS);
gettimeofday(&end, 0);
duration = (end.tv_sec - start.tv_sec) * 1e6 + (end.tv_usec - start.tv_usec);
prtResult(c_CPU, AS, BS);
printf("CPU method running time: %.2f us\n", duration);
// ==> CUDA, Simple
float c_GPU_simple[AS * BS];
float elapsedTime_simple = RBF_CUDA_Simple(sigma, a, b, c_GPU_simple, DIM, AS, BS);
prtResult(c_GPU_simple, AS, BS);
printf("GPU simple method running time: %.2f us\n", elapsedTime_simple * 1e3);
// ===> CUDA, Tilling (tile width = 2)
float c_GPU_tilling_2[AS * BS];
float elapsedTime_tilling_2 = RBF_CUDA_Tilling_2(sigma, a, b, c_GPU_tilling_2, DIM, AS, BS);
prtResult(c_GPU_tilling_2, AS, BS);
printf("GPU tilling method (width 2) running time: %.2f us\n", elapsedTime_tilling_2 * 1e3);
// ===> CUDA, Tilling (tile width = 4)
float c_GPU_tilling_4[AS * BS];
float elapsedTime_tilling_4 = RBF_CUDA_Tilling_4(sigma, a, b, c_GPU_tilling_4, DIM, AS, BS);
prtResult(c_GPU_tilling_4, AS, BS);
printf("GPU tilling method (width 4) running time: %.2f us\n", elapsedTime_tilling_4 * 1e3);
// ===> CUDA, Tilling (tile width = 8)
float c_GPU_tilling_8[AS * BS];
float elapsedTime_tilling_8 = RBF_CUDA_Tilling_8(sigma, a, b, c_GPU_tilling_8, DIM, AS, BS);
prtResult(c_GPU_tilling_8, AS, BS);
printf("GPU tilling method (width 8) running time: %.2f us\n", elapsedTime_tilling_8 * 1e3);
// ===> CUDA, Tilling (tile width = 16)
float c_GPU_tilling_16[AS * BS];
float elapsedTime_tilling_16 = RBF_CUDA_Tilling_16(sigma, a, b, c_GPU_tilling_16, DIM, AS, BS);
prtResult(c_GPU_tilling_16, AS, BS);
printf("GPU tilling method (width 16) running time: %.2f us\n", elapsedTime_tilling_16 * 1e3);
// ===> CUDA, Tilling (tile width = 32)
float c_GPU_tilling_32[AS * BS];
float elapsedTime_tilling_32 = RBF_CUDA_Tilling_32(sigma, a, b, c_GPU_tilling_32, DIM, AS, BS);
prtResult(c_GPU_tilling_32, AS, BS);
printf("GPU tilling method (width 32) running time: %.2f us\n", elapsedTime_tilling_32 * 1e3);
return 0;
}
|
2,918 | #include <cuda_runtime_api.h>
#define OFFSET_BANK(idx) ({ __typeof__ (idx) _idx = idx; ((_idx) + ((_idx) / 32)); })
__global__ void conv_diag_affine_white_var_fwd_batch_kernel(
const float *in_act,
int spatial_dim,
int num_channels,
int batch_size,
const float *mean,
const float *var,
float epsilon,
float *out_act)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int u = idx % spatial_dim;
int c = (idx / spatial_dim) % num_channels;
int batch_idx = idx / (spatial_dim * num_channels);
if (u < spatial_dim && c < num_channels && batch_idx < batch_size) {
float m = mean[c];
float v = var[c];
float y = (in_act[idx] - m) * rsqrtf(v + epsilon);
out_act[idx] = y;
}
}
extern "C" void neuralops_cuda_conv2d_whiten_fwd(
const float *in_act,
size_t spatial_dim,
size_t num_channels,
size_t batch_size,
const float *mean,
const float *var,
float epsilon,
float *out_act,
cudaStream_t stream)
{
int n = spatial_dim * num_channels * batch_size;
conv_diag_affine_white_var_fwd_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_act, spatial_dim, num_channels, batch_size, mean, var, epsilon, out_act);
}
__global__ void estimate_conv_mean_fast2_batch_kernel(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
float *mean)
{
__shared__ float mean_cache[1024+32];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
//int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int u0 = ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float y = 0.0f;
/*int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
y += src[i];
}*/
int i0 = warp_idx + u0 + c * spatial_dim + batch_idx * spatial_dim * num_channels;
int i_limit = i0 + min(spatial_dim - warp_idx - u0, 16*32);
for (int v = 0; v < 16*32; v += 32) {
int i = i0 + v;
if (i < i_limit) {
y += src[i];
}
}
mean_cache[bank_idx] = y;
} else {
mean_cache[bank_idx] = 0.0f;
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 2 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+1];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 4 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+2];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 8 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+4];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 16 == 0) {
mean_cache[bank_idx] += mean_cache[bank_idx+8];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 32 == 0 && u0 < spatial_dim) {
float y = (mean_cache[bank_idx] + mean_cache[bank_idx+16]) / ((float)(spatial_dim) * (float)(batch_size));
atomicAdd(&mean[c], y);
}
}
}
extern "C" void neuralops_cuda_conv2d_mean_fwd(
const float *src,
size_t spatial_dim,
size_t num_channels,
size_t batch_size,
float *mean,
cudaStream_t stream)
{
//int n = ((spatial_dim+32-1)/32) * channels * batch_size;
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int n = 32 * num_channels * block_spatial_dim * batch_size;
estimate_conv_mean_fast2_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
src, spatial_dim, num_channels, batch_size, mean);
}
__global__ void estimate_conv_var_fast2_batch_kernel(
const float *src,
int spatial_dim,
int num_channels,
int batch_size,
const float *mean,
float *var)
{
__shared__ float var_cache[1024+32];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
//int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int u0 = ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float mean_c = mean[c];
float y = 0.0f;
/*int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
float delta = src[i] - mean_c;
y += delta * delta;
}*/
int i0 = warp_idx + u0 + c * spatial_dim + batch_idx * spatial_dim * num_channels;
int i_limit = i0 + min(spatial_dim - warp_idx - u0, 16*32);
for (int v = 0; v < 16*32; v += 32) {
int i = i0 + v;
if (i < i_limit) {
float delta = src[i] - mean_c;
y += delta * delta;
}
}
var_cache[bank_idx] = y;
} else {
var_cache[bank_idx] = 0.0f;
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 2 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+1];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 4 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+2];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 8 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+4];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 16 == 0) {
var_cache[bank_idx] += var_cache[bank_idx+8];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 32 == 0 && u0 < spatial_dim) {
float y = (var_cache[bank_idx] + var_cache[bank_idx+16]) / ((float)(spatial_dim-1) * (float)(batch_size-1));
atomicAdd(&var[c], y);
}
}
}
extern "C" void neuralops_cuda_conv2d_var_fwd(
const float *src,
size_t spatial_dim,
size_t num_channels,
size_t batch_size,
const float *mean,
float *var,
cudaStream_t stream)
{
//int n = ((spatial_dim+32-1)/32) * channels * batch_size;
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int n = 32 * num_channels * block_spatial_dim * batch_size;
estimate_conv_var_fast2_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
src, spatial_dim, num_channels, batch_size, mean, var);
}
__global__ void conv_bnorm_bwd_var_batch_kernel(
const float *in_act,
int spatial_dim,
int num_channels,
int batch_size,
const float *out_delta,
const float *mean,
const float *var,
float epsilon,
float *var_grad)
{
__shared__ float d_sigma_cache[1024+32];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float mu = mean[c];
float sigma = var[c];
float inv_sqrt_sigma = rsqrtf(sigma + epsilon);
float d_sigma = 0.0f;
int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
d_sigma += out_delta[i] * -0.5f * inv_sqrt_sigma / (sigma + epsilon) * (in_act[i] - mu);
}
d_sigma_cache[bank_idx] = d_sigma;
} else {
d_sigma_cache[bank_idx] = 0.0f;
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 2 == 0) {
d_sigma_cache[bank_idx] += d_sigma_cache[bank_idx+1];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 4 == 0) {
d_sigma_cache[bank_idx] += d_sigma_cache[bank_idx+2];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 8 == 0) {
d_sigma_cache[bank_idx] += d_sigma_cache[bank_idx+4];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 16 == 0) {
d_sigma_cache[bank_idx] += d_sigma_cache[bank_idx+8];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 32 == 0 && u0 < spatial_dim) {
float d_sigma = d_sigma_cache[bank_idx] + d_sigma_cache[bank_idx+16];
atomicAdd(&var_grad[c], d_sigma);
}
}
}
__global__ void conv_bnorm_bwd_mean_batch_kernel(
const float *in_act,
int spatial_dim,
int num_channels,
int batch_size,
const float *out_delta,
const float *mean,
const float *var,
const float *var_grad,
float epsilon,
float *mean_grad)
{
__shared__ float d_mu_cache[1024+32];
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float inv_var_norm = 1.0f / ((float)(spatial_dim - 1) * (float)(batch_size - 1));
float mu = mean[c];
float sigma = var[c];
float inv_sqrt_sigma = rsqrtf(sigma + epsilon);
float d_sigma = var_grad[c];
float d_mu = 0.0f;
int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
d_mu += out_delta[i] * -inv_sqrt_sigma + d_sigma * -2.0f * inv_var_norm * (in_act[i] - mu);
}
d_mu_cache[bank_idx] = d_mu;
} else {
d_mu_cache[bank_idx] = 0.0f;
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 2 == 0) {
d_mu_cache[bank_idx] += d_mu_cache[bank_idx+1];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 4 == 0) {
d_mu_cache[bank_idx] += d_mu_cache[bank_idx+2];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 8 == 0) {
d_mu_cache[bank_idx] += d_mu_cache[bank_idx+4];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 16 == 0) {
d_mu_cache[bank_idx] += d_mu_cache[bank_idx+8];
}
}
__syncthreads();
if (c < num_channels && batch_idx < batch_size) {
if (threadIdx.x % 32 == 0 && u0 < spatial_dim) {
float d_mu = d_mu_cache[bank_idx] + d_mu_cache[bank_idx+16];
atomicAdd(&mean_grad[c], d_mu);
}
}
}
__global__ void conv_bnorm_bwd_data_batch_kernel(
const float *in_act,
int spatial_dim,
int num_channels,
int batch_size,
const float *out_delta,
const float *mean,
const float *mean_grad,
const float *var,
const float *var_grad,
float epsilon,
float *in_delta)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int bank_idx = OFFSET_BANK(threadIdx.x);
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int warp_idx = idx % 32;
int c = (idx / 32) % num_channels;
int u0 = warp_idx + ((idx / (32 * num_channels)) % block_spatial_dim) * (16*32);
int batch_idx = idx / (32 * num_channels * block_spatial_dim);
if (c < num_channels && u0 < spatial_dim && batch_idx < batch_size) {
float inv_mean_norm = 1.0f / ((float)(spatial_dim) * (float)(batch_size));
float inv_var_norm = 1.0f / ((float)(spatial_dim - 1) * (float)(batch_size - 1));
float mu = mean[c];
float d_mu = mean_grad[c];
float sigma = var[c];
float d_sigma = var_grad[c];
int i0 = c * spatial_dim + batch_idx * spatial_dim * num_channels;
int u_limit = min(spatial_dim, u0 + 16*32);
for (int u = u0; u < u_limit; u += 32) {
int i = i0 + u;
in_delta[i] = out_delta[i] * rsqrtf(sigma + epsilon) + d_mu * inv_mean_norm + d_sigma * 2.0f * inv_var_norm * (in_act[i] - mu);
}
}
}
extern "C" void neuralops_cuda_conv2d_batchnorm_bwd(
const float *in_act,
size_t spatial_dim,
size_t num_channels,
size_t batch_size,
const float *out_delta,
const float *mean,
const float *var,
float epsilon,
float *mean_grad,
float *var_grad,
float *in_delta,
cudaStream_t stream)
{
int block_spatial_dim = (spatial_dim+16*32-1)/(16*32);
int n = 32 * num_channels * block_spatial_dim * batch_size;
conv_bnorm_bwd_var_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_act, spatial_dim, num_channels, batch_size, out_delta, mean, var, epsilon, var_grad);
conv_bnorm_bwd_mean_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_act, spatial_dim, num_channels, batch_size, out_delta, mean, var, var_grad, epsilon, mean_grad);
conv_bnorm_bwd_data_batch_kernel<<<(n+1024-1)/1024, 1024, 0, stream>>>(
in_act, spatial_dim, num_channels, batch_size, out_delta, mean, mean_grad, var, var_grad, epsilon, in_delta);
}
|
2,919 | #include "cuda.h"
#include <stdio.h>
__global__ void ScatterNdOps_forward_kernel(double *out, const long long*ii, const double *update, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n){
out[ii[p]-1] = update[p];
}
}
__global__ void setzero_kernel(double *out, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n){
out[p] = 0.0;
}
}
void Gpu_ScatterNdOps_forward(double *out, const long long *ii,
const double *update, int n, int N){
setzero_kernel<<< (N - 1)/64 + 1, 64 >>>(out, N);
ScatterNdOps_forward_kernel<<< (n - 1)/64 + 1, 64 >>>(out, ii, update, n);
}
__global__ void ScatterNdOps_backward_kernel(double *grad_update,
const double *grad_out,
const double *out, const long long *ii,
const double *update, int n){
int p = blockIdx.x *blockDim.x + threadIdx.x;
if (p<n) {
grad_update[p] = grad_out[ii[p]-1];
}
}
void Gpu_ScatterNdOps_backward(
double *grad_update,
const double *grad_out,
const double *out, const long long *ii,
const double *update, int n){
setzero_kernel<<< (n - 1)/64 + 1, 64 >>>(grad_update, n);
ScatterNdOps_backward_kernel<<< (n - 1)/64 + 1, 64 >>>(grad_update, grad_out, out, ii, update, n);
}
void get_ScatterNdOps_num(long long *out, const long long *m){
cudaMemcpy(out, m, sizeof(long long), cudaMemcpyDeviceToHost);
} |
2,920 | #include<stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include<stdlib.h>
/* Thread structure: 1, m*n
This function initializes random values into an array according to the above thread structure
*/
__global__ void init(double *W){
int mx = threadIdx.x;
int nx = threadIdx.y;
int m = blockDim.x;/*, n = blockDim.y;*/
// Random state from cuda device
curandState state;
curand_init(clock64(), mx, nx, &state);
// Feeding random values according mx and nx values each time
W[m*nx + mx] = curand_uniform(&state);
}
/* Thread structure: N, m*n
This function calculates the essentials of the next layer
provided the inputs
*/
__global__ void next_layer(double *X, double *h, double *W, double *b){
int mx = threadIdx.x;
int nx = threadIdx.y;
int m = blockDim.x, n = blockDim.y;
int Nx = blockIdx.x;
// Initializing h
if(nx == 0){ // First thread to reach here should initialize
h[Nx*m + mx] = b[mx];
}
__syncthreads();
// Need to have a lock on the variable before adding, as many threads could access this variable at the same time
atomicAdd(&h[Nx*m + mx], X[Nx*n + nx] * W[m*nx + mx]);
// Applying sigmoid function here for the loss value
double e;
if(nx == 0){
e = exp(h[Nx*m + mx]);
h[Nx*m + mx] = e/(1 + e);
}
//printf("device: %d %lf\n", Nx*m + mx, h[Nx*m + mx]);
}
/* Thread structure: N, m*n
This function calculates the loss suffered by the layer
at any point in time
*/
__global__ void calc_loss(double *Z, double *Y, double *W1, double *W2, double *loss){
int mx = threadIdx.x;
int nx = threadIdx.y;
int m = blockDim.x, n = blockDim.y;
int Nx = blockIdx.x;
double d = 0, lambda = 0.01;
// Only one thread should equate it to zero
if(Nx + mx + nx == 0)
loss = 0;
__syncthreads();
// Squared loss
if(mx == 0){
d = Z[Nx*n + nx] - Y[Nx*n + nx];
d = d * d;
printf("calc_loss Z: %d %lf\n", Nx*n+nx, Z[Nx*n+nx]);
}
// Loss functions second term
if(Nx == 0){
d += lambda * (W1[m*nx + mx] * W1[m*nx + mx] + W2[n*mx + nx] * W2[n*mx + nx]);
printf("Index: W1: %d, W2: %d\n", m*nx + mx, m*nx + mx);
}
// ATOMIC OPERATION REQUIRED HERE
if(d != 0){
*loss += d;
//atomicAdd(loss, d);
}
__syncthreads();
if(Nx + mx + nx == 0)
printf("calc_loss loss: %lf\n", *loss);
}
/* Function to check wrong memory
access and other errors
*/
void checkCUDAError(const char* msg) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/* Thread structure: N, m*n
This function deals with the backpropogation and updating the values
according to the learning rate as set by the user
*/
__global__ void update(double *X, double *Y, double *Z, double *W1, double *W2, double *b1, double *b2, double *h){
int mx = threadIdx.x;
int nx = threadIdx.y;
int m = blockDim.x, n = blockDim.y;
int Nx = blockIdx.x;
double eta = 10, lambda = 0.1;
double temp;
// Frob norm
if(Nx == 0){
atomicAdd(&W2[n*mx + nx], -lambda*W2[n*mx + nx]);
atomicAdd(&W1[m*nx + mx], -lambda*W1[m*nx + mx]);
}
__syncthreads();
// Loss for b2
if(mx == 0){ // for single m
temp = (Z[n*Nx + nx] - Y[n*Nx + nx]) * Z[n*Nx + nx] * (1 - Z[n*Nx + nx]) * (-eta);
printf("Temp: %.12lf\n", temp);
atomicAdd(&b2[nx], temp);
}
// Loss for W2
temp = (Z[n*Nx + nx] - Y[n*Nx + nx]) * Z[n*Nx + nx] * (1 - Z[n*Nx + nx]) * h[m*Nx + mx] * (-eta);
atomicAdd(&W2[n*mx + nx], temp);
// Loss for b1
temp = (Z[n*Nx + nx] - Y[n*Nx + nx]) * Z[n*Nx + nx] * (1 - Z[n*Nx + nx]) * h[m*Nx + mx] * (1 - h[m*Nx + mx]) * W2[n*mx + nx] * (-eta);
atomicAdd(&b1[mx], temp);
// Loss for W1
for(int it = 0; it < n; it++){
temp = (Z[n*Nx + nx] - Y[n*Nx + nx]) * Z[n*Nx + nx] * (1 - Z[n*Nx + nx]) * h[m*Nx + mx] * (1 - h[m*Nx + mx]) * W2[n*mx + nx] * X[Nx*n + it]* (-eta);
atomicAdd(&W1[m*it + mx], temp);
}
}
/* Pre training the model
*/
void pre_train(int N, int m, int n, double *d_X, double *d_Y, double *d_Z, double *d_W1, double *d_W2, double *d_b1, double *d_b2, double *d_h, double *d_loss, int which_itr){
int T = 10;
dim3 threads(m, n), threads_T(n, m);
// Only first iteration needs initialization of these vectors
// Random initialization of vectors required only in the first iteration
if(which_itr == 0){
init<<<1, threads>>>(d_W1);
init<<<1, m>>>(d_b1);
}
// Random initialization of the vectors of the second layer required each time this function is called
init<<<1, threads_T>>>(d_W2);
init<<<1, n>>>(d_b2);
// Barrier to avoid running beyond this point
cudaDeviceSynchronize();
for(int t = 0;t < T; t++){
printf("Iteration number: %d\n", t);
// Calculating first hidden layer
next_layer<<<N, threads>>>(d_X, d_h, d_W1, d_b1);
cudaDeviceSynchronize();
//checkCUDAError("memory copy in next_layer");
// Calculating the layer next to hidden layer
next_layer<<<N, threads_T>>>(d_h, d_Z, d_W2, d_b2);
cudaDeviceSynchronize();
//checkCUDAError("memory copy in second next_layer");
// Updating the vectors according the gradient of those vectors
update<<<N, threads>>>(d_X, d_Y, d_Z, d_W1, d_W2, d_b1, d_b2, d_h);
cudaDeviceSynchronize();
// Loss
/*calc_loss<<<N, threads>>>(d_Z, d_Y, d_W1, d_W2, d_loss + t);
cudaDeviceSynchronize();
checkCUDAError("memory copy in calc_loss");
*/
}
}
/* thread structure: N, m*n
Calculates the loss for the test image
as supplied by the user
*/
__global__ void calc_loss2(double *Z, double *Y, double *W1, double *W2, double *W3, double *W4, double *loss){
int mx = threadIdx.x;
int nx = threadIdx.y;
int m = blockDim.x, n = blockDim.y;
int Nx = blockIdx.x;
double d = 0, lambda = 0.01;
// Only one thread should equate the loss to zero
if(Nx + mx + nx == 0)
loss = 0;
__syncthreads();
if(mx == 0){
d = Z[Nx*n + nx] - Y[Nx*n + nx];
d = d * d;
printf("calc_loss Z: %d %lf\n", Nx*n+nx, Z[Nx*n+nx]);
}
if(Nx == 0){
d += lambda * (W1[m*nx + mx] * W1[m*nx + mx] + W2[n*mx + nx] * W2[n*mx + nx]);
d += lambda * (W3[m*nx + mx] * W3[m*nx + mx] + W4[n*mx + nx] * W4[n*mx + nx]);
printf("Index: W1: %d, W2: %d\n", m*nx + mx, m*nx + mx);
}
// ATOMIC OPERATION REQUIRED HERE
if(d != 0){
*loss += d;
}
__syncthreads();
if(Nx + mx + nx == 0)
printf("calc_loss loss: %lf\n", *loss);
}
/* Training the full sda after the pre training
*/
void train(int N, int m, int n, double *d_X, double *d_Y, double *d_Z, double *d_W1, double *d_W2, double *d_b1, double *d_b2, double *d_h, double *d_loss, double *d_W3, double *d_b3, double *d_hh, double *d_W4, double *d_b4, double *d_hhh){
int T = 10;
dim3 threads(m, n), threads_T(n, m);
for(int t = 0; t < T; t++){
/* calculate first layer */
next_layer<<<N, threads>>>(d_X, d_h, d_W1, d_b1);
cudaDeviceSynchronize();
/* calculate second layer */
next_layer<<<N, threads_T>>>(d_h, d_hh, d_W2, d_b2);
cudaDeviceSynchronize();
/* calculate third layer */
next_layer<<<N, threads>>>(d_hh, d_hhh, d_W3, d_b3);
cudaDeviceSynchronize();
/* calculate fourth layer */
next_layer<<<N, threads_T>>>(d_hhh, d_Z, d_W4, d_b4);
cudaDeviceSynchronize();
// update rule here
// UPDATE2
}
}
/* Function to calculate the result of the test image as
supplied by the user in order to test it
*/
void test(int N, int m, int n, double *d_X, double *d_Z, double *d_W1, double *d_W2, double *d_b1, double *d_b2, double *d_h, double *d_loss, double *d_W3, double *d_b3, double *d_hh, double *d_W4, double *d_b4, double *d_hhh){
dim3 threads(m, n), threads_T(n, m);
/* calculate first layer */
next_layer<<<N, threads>>>(d_X, d_h, d_W1, d_b1);
cudaDeviceSynchronize();
/* calculate second layer */
next_layer<<<N, threads_T>>>(d_h, d_hh, d_W2, d_b2);
cudaDeviceSynchronize();
/* calculate third layer */
next_layer<<<N, threads>>>(d_hh, d_hhh, d_W3, d_b3);
cudaDeviceSynchronize();
/* calculate fourth layer */
next_layer<<<N, threads_T>>>(d_hhh, d_Z, d_W4, d_b4);
cudaDeviceSynchronize();
// calculating loss of test
// Uncomment for checking what your loss is
// calc_loss2<<<N, threads>>>(d_Z, d_Y, d_W1, d_W2, d_W3, d_W4, d_loss);
}
int main(){
int N, m, n, T;
scanf("%d %d %d", &N, &n, &m);
T = 10;
double *X, *Y, *h, *Z, *loss;
double *test_img;
// CPU memory utilized here
X = (double*)malloc(N*n*sizeof(double));
Y = (double*)malloc(N*n*sizeof(double));
h = (double*)malloc(N*m*sizeof(double));
Z = (double*)malloc(N*n*sizeof(double));
loss = (double*)malloc(T * sizeof(double));
test_img = (double*)malloc(N*n*sizeof(double));
// device memory
// first pre train variables
double *d_X, *d_Y, *d_W1, *d_W2, *d_b1, *d_b2, *d_h, *d_loss, *d_Z;
// second pre train variables
double *d_hY, *d_hZ, *d_W3, *d_W4, *d_b3, *d_b4, *d_hh;
// third pre train variables
double *d_hhY, *d_hhZ, *d_hhh;
// first pre train
cudaMalloc((void**)&d_W1, sizeof(double) * n * m);
cudaMalloc((void**)&d_W2, sizeof(double) * m * n);
cudaMalloc((void**)&d_X, sizeof(double) * N * n);
cudaMalloc((void**)&d_Y, sizeof(double) * N * n);
cudaMalloc((void**)&d_Z, sizeof(double) * N * n);
cudaMalloc((void**)&d_b1, sizeof(double) * m);
cudaMalloc((void**)&d_b2, sizeof(double) * n);
cudaMalloc((void**)&d_h, sizeof(double) * N * m);
cudaMalloc((void**)&d_loss, sizeof(double) * T);
// second pre train
cudaMalloc((void**)&d_hY, sizeof(double) * N * m);
cudaMalloc((void**)&d_hZ, sizeof(double) * N * m);
cudaMalloc((void**)&d_W3, sizeof(double) * n * m);
cudaMalloc((void**)&d_b3, sizeof(double) * m);
cudaMalloc((void**)&d_hh, sizeof(double) * N * n);
// third pre train
cudaMalloc((void**)&d_hhY, sizeof(double) * N * n);
cudaMalloc((void**)&d_hhZ, sizeof(double) * N * n);
cudaMalloc((void**)&d_W4, sizeof(double) * m * n);
cudaMalloc((void**)&d_b4, sizeof(double) * n);
cudaMalloc((void**)&d_hhh, sizeof(double) * N * m);
// Scanning the input curropted image
for(int i = 0; i < N*n; i++){
scanf("%lf", &X[i]);
printf("%lf\n", X[i]);
}
// Scanning the original image
for(int i = 0; i < N*n; i++){
scanf("%lf", &Y[i]);
printf("%lf\n", Y[i]);
}
// Scanning the test image
for(int i = 0;i < N*n; i++){
scanf("%lf", &test_img[i]);
printf("%lf\n", test_img[i]);
}
// Copy X and Y vectors to device memory
cudaMemcpy(d_X, X, sizeof(double) * N * n, cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, Y, sizeof(double) * N * n, cudaMemcpyHostToDevice);
// Allocate 2D threads
dim3 threads_T(n, m), threads(m, n);
// first pre train
pre_train(N, m, n, d_X, d_Y, d_Z, d_W1, d_W2, d_b1, d_b2, d_h, d_loss, 0);
// calculate hY
next_layer<<<N, threads_T>>>(d_Y, d_hY, d_W1, d_b1);
// second pre train
pre_train(N, n, m, d_h, d_hY, d_hZ, d_W2, d_W3, d_b2, d_b3, d_hh, d_loss, 1);
// calculate hhY
next_layer<<<N, threads>>>(d_hY, d_hhY, d_W2, d_b2);
// third pre train
pre_train(N, m, n, d_hh, d_hhY, d_hhZ, d_W3, d_W4, d_b3, d_b4, d_hhh, d_loss, 2);
// Copy h back from device memory to CPU memory
cudaMemcpy(h, d_h, sizeof(double) * N * m, cudaMemcpyDeviceToHost);
// Copy loss back from device memory to CPU memory
cudaMemcpy(loss, d_loss, sizeof(double) * T, cudaMemcpyDeviceToHost);
// Print the h vector
/*printf("h\n");
for(int i = 0;i < N*m; i++)
printf("%lf ", h[i]);*/
/******* TESTING IMAGE ***********/
cudaMemcpy(d_X, test_img, sizeof(double) * N * n, cudaMemcpyHostToDevice);
test(N, m, n, d_X, d_Z, d_W1, d_W2, d_b1, d_b2, d_h, d_loss, d_W3, d_b3, d_hh, d_W4, d_b4, d_hhh);
/********************************/
// Copy Z vecotr back from device memory to CPU memory
cudaMemcpy(Z, d_Z, sizeof(double) * N * n, cudaMemcpyDeviceToHost);
// Print the Z vector
printf("\nZ\n");
for(int i = 0;i < N*n; i++)
printf("%0.12lf ", Z[i]);
printf("\n");
// Free device memory
cudaFree(d_W1); cudaFree(d_W2); cudaFree(d_b1); cudaFree(d_b2);
cudaFree(d_X); cudaFree(d_Y); cudaFree(d_h); cudaFree(d_Z);
cudaFree(d_loss);
// Free memory of second pre train vectors
cudaFree(d_hY); cudaFree(d_hZ); cudaFree(d_W3); cudaFree(d_b3);
// Free memory of third pre train vectors
cudaFree(d_hh); cudaFree(d_hhY); cudaFree(d_hhZ); cudaFree(d_W4);
cudaFree(d_b4); cudaFree(d_hhh);
// Free CPU memory
free(X); free(Y); free(Z); free(h); free(loss);
return 0;
}
|
2,921 | #include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <math.h>
#define N 20480
// declare the kernel
__global__ void daxpy(double a, double *x, double *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) {
y[i] += a*x[i];
}
}
int main(void) {
double *x, *y, a;
double m = -1.;
double tmp;
int i;
size_t size = N*sizeof(double);
// allocate unified memory
cudaMallocManaged(&x, size);
cudaMallocManaged(&y, size);
// initialize x and y
srand(time(NULL));
a = (double)random() / RAND_MAX;
for (i=0; i<N; i++)
x[i] = (double)random() / RAND_MAX;
for (i=0; i<N; i++)
y[i] = (double)random() / RAND_MAX;
// launch the kernel function
daxpy<<<N/256,256>>>(a, x, y);
cudaDeviceSynchronize();
// deallocate device memory
cudaFree(x);
cudaFree(y);
}
|
2,922 | #include<iostream>
#include<cstdlib>
#include<cmath>
#include<time.h>
using namespace std;
__global__ void matrixVectorMultiplication(int *a, int *b, int *c, int n)
{
int row=threadIdx.x+blockDim.x*blockIdx.x;
int sum=0;
if(row<n){
for(int j=0;j<n;j++)
{
sum=sum+a[(j*n)+row]*b[j];
}
c[row]=sum;
}
}
int main()
{
int *a,*b,*c;
int *a_dev,*b_dev,*c_dev;
int n=10;
a=new int[n*n];
b=new int[n];
c=new int[n];
int *d=new int[n];
int size=n*sizeof(int);
cudaMalloc(&a_dev,size*size);
cudaMalloc(&b_dev,size);
cudaMalloc(&c_dev,size);
cout<<"\n\nMatrix is :\n\n";
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
a[i*n+j]= i*n+j+1; //rand()%n;
cout<<a[i*n+j]<<" ";
}
b[i]= i+1; //rand()%n;
cout<<"\n";
// d[i]=a[i]+b[i];
}
cout<<"\n\nVector is: \n\n";
for(int i=0;i<n;i++)
cout<<b[i]<<" ";
cout<<"\n\n";
cudaMemcpy(a_dev,a,size*size,cudaMemcpyHostToDevice);
cudaMemcpy(b_dev,b,size,cudaMemcpyHostToDevice);
dim3 threadsPerBlock(n, n);
dim3 blocksPerGrid(1, 1);
if(n*n>512){
threadsPerBlock.x=512;
threadsPerBlock.y=512;
blocksPerGrid.x=ceil((double)n/(double)threadsPerBlock.x);
blocksPerGrid.y=ceil((double)n/(double)threadsPerBlock.y);
}
matrixVectorMultiplication<<<n/256 +1,256>>>(a_dev,b_dev,c_dev,n);
cudaMemcpy(c,c_dev,size,cudaMemcpyDeviceToHost);
//CPU matrixVector multiplication
clock_t t=clock();
int sum=0;
for(int row=0;row<n;row++)
{
sum=0;
for(int col=0;col<n;col++)
{
sum=sum+a[col*n+row]*b[col];
}
d[row]=sum;
}
t=clock()-t;
cout<<"\nCPU Time Elapsed: "<<((double)t); //((double)t)/CLOCKS_PER_SEC;
int error=0;
cout<<"\n\n";
for(int i=0;i<n;i++){
error+=d[i]-c[i];
cout<<" gpu "<<c[i]<<" CPU "<<d[i]<<endl;
}
cout<<"\nError : "<<error<<"\n\n";
return 0;
}
/*
Output
==11960== NVPROF is profiling process 11960, command: ./a.out
Matrix is :
1 2 3 4 5 6 7 8 9 10
11 12 13 14 15 16 17 18 19 20
21 22 23 24 25 26 27 28 29 30
31 32 33 34 35 36 37 38 39 40
41 42 43 44 45 46 47 48 49 50
51 52 53 54 55 56 57 58 59 60
61 62 63 64 65 66 67 68 69 70
71 72 73 74 75 76 77 78 79 80
81 82 83 84 85 86 87 88 89 90
91 92 93 94 95 96 97 98 99 100
Vector is:
1 2 3 4 5 6 7 8 9 10
CPU Time Elapsed: 3
gpu 3355 CPU 3355
gpu 3410 CPU 3410
gpu 3465 CPU 3465
gpu 3520 CPU 3520
gpu 3575 CPU 3575
gpu 3630 CPU 3630
gpu 3685 CPU 3685
gpu 3740 CPU 3740
gpu 3795 CPU 3795
gpu 3850 CPU 3850
Error : 0
==11960== Profiling application: ./a.out
==11960== Profiling result:
Type Time(%) Time Calls Avg Min Max Name
GPU activities: 48.69% 4.1910us 1 4.1910us 4.1910us 4.1910us matrixVectorMultiplication(int*, int*, int*, int)
30.86% 2.6560us 2 1.3280us 1.0880us 1.5680us [CUDA memcpy HtoD]
20.45% 1.7600us 1 1.7600us 1.7600us 1.7600us [CUDA memcpy DtoH]
API calls: 99.71% 200.37ms 3 66.791ms 5.1320us 200.36ms cudaMalloc
0.16% 313.26us 97 3.2290us 124ns 206.98us cuDeviceGetAttribute
0.05% 104.47us 1 104.47us 104.47us 104.47us cuDeviceTotalMem
0.03% 64.413us 1 64.413us 64.413us 64.413us cuDeviceGetName
0.03% 62.632us 3 20.877us 19.062us 22.537us cudaMemcpy
0.02% 32.072us 1 32.072us 32.072us 32.072us cudaLaunchKernel
0.00% 4.1760us 1 4.1760us 4.1760us 4.1760us cuDeviceGetPCIBusId
0.00% 2.1090us 3 703ns 121ns 1.7920us cuDeviceGetCount
0.00% 988ns 2 494ns 137ns 851ns cuDeviceGet
0.00% 210ns 1 210ns 210ns 210ns cuDeviceGetUuid
*/ |
2,923 | // Copyright 2016 Massachusetts Institute of Technology. See LICENSE file for details.
// http://docs.nvidia.com/cuda/samples/6_Advanced/reduction/doc/reduction.pdf
template <unsigned int blockSize, typename T, typename R>
__device__ void cuda_reduce(R reduce, size_t n, T *g_idata, T *g_odata, off_t incx, off_t incy, off_t incz, double a = 0) {
extern __shared__ char vdata[];
T* sdata = (T*) vdata;
off_t grid_index = blockIdx.y*incy + blockIdx.z*incz;
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*(blockSize) + tid;
unsigned int gridSize = blockSize*gridDim.x;
if (i < n) {
sdata[tid] = g_idata[incx*i + grid_index];
i += gridSize;
}
while (i < n) {
reduce(sdata[tid], g_idata[incx*i + grid_index]);
i += gridSize;
}
__syncthreads();
if (blockSize >= 512) { if (tid < 256 && tid + 256 < n) { reduce(sdata[tid], sdata[tid + 256]); } __syncthreads(); }
if (blockSize >= 256) { if (tid < 128 && tid + 128 < n) { reduce(sdata[tid], sdata[tid + 128]); } __syncthreads(); }
if (blockSize >= 128) { if (tid < 64 && tid + 64 < n) { reduce(sdata[tid], sdata[tid + 64]); } __syncthreads(); }
if (tid < 32) {
if (blockSize >= 64) { if (tid < 32 && tid + 32 < n) { reduce(sdata[tid], sdata[tid + 32]); } }
if (blockSize >= 32) { if (tid < 16 && tid + 16 < n) { reduce(sdata[tid], sdata[tid + 16]); } }
if (blockSize >= 16) { if (tid < 8 && tid + 8 < n) { reduce(sdata[tid], sdata[tid + 8]); } }
if (blockSize >= 8) { if (tid < 4 && tid + 4 < n) { reduce(sdata[tid], sdata[tid + 4]); } }
if (blockSize >= 4) { if (tid < 2 && tid + 2 < n) { reduce(sdata[tid], sdata[tid + 2]); } }
if (blockSize >= 2) { if (tid < 1 && tid + 1 < n) { reduce(sdata[tid], sdata[tid + 1]); } }
}
if (tid == 0) {
off_t o = blockIdx.x + gridDim.x*(blockIdx.y + gridDim.y*blockIdx.z);
if (a == 0)
g_odata[o] = sdata[0];
else
g_odata[o] = a*g_odata[o] + sdata[0];
}
}
struct Sum { template<typename T> __device__ void operator() (T& acc, const T v) { acc += v; } };
struct Min { template<typename T> __device__ void operator() (T& acc, const T v) { if (v < acc) acc = v; } };
struct Max { template<typename T> __device__ void operator() (T& acc, const T v) { if (v > acc) acc = v; } };
extern "C" {
// Sum
__global__ void cuda_sum_512_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<512>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_256_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<256>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_128_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<128>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_64_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<64>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_32_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<32>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_16_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<16>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_8_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<8>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_4_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<4>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_2_double(size_t n, double* g_idata, double* g_odata, double a, off_t incx, off_t incy, off_t incz) { cuda_reduce<2>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_512_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<512>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_256_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<256>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_128_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<128>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_64_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<64>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_32_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<32>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_16_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<16>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_8_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<8>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_4_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<4>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
__global__ void cuda_sum_2_float(size_t n, float* g_idata, float* g_odata, float a, off_t incx, off_t incy, off_t incz) { cuda_reduce<2>(Sum(), n, g_idata, g_odata, incx, incy, incz, a); }
// Max
__global__ void cuda_maximum_512_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<512>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_256_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<256>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_128_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<128>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_64_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<64>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_32_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<32>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_16_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<16>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_8_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<8>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_4_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<4>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_2_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<2>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_512_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<512>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_256_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<256>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_128_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<128>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_64_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<64>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_32_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<32>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_16_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<16>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_8_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<8>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_4_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<4>(Max(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_maximum_2_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<2>(Max(), n, g_idata, g_odata, incx, incy, incz); }
// Min
__global__ void cuda_minimum_512_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<512>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_256_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<256>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_128_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<128>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_64_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<64>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_32_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<32>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_16_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<16>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_8_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<8>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_4_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<4>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_2_double(size_t n, double* g_idata, double* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<2>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_512_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<512>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_256_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<256>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_128_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<128>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_64_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<64>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_32_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<32>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_16_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<16>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_8_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<8>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_4_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<4>(Min(), n, g_idata, g_odata, incx, incy, incz); }
__global__ void cuda_minimum_2_float(size_t n, float* g_idata, float* g_odata, off_t incx, off_t incy, off_t incz) { cuda_reduce<2>(Min(), n, g_idata, g_odata, incx, incy, incz); }
}
|
2,924 | #include <stdio.h>
#include <time.h>
int main(void) {
time_t t;
time(&t);
printf("%ld\n", t);
printf(ctime(&t));
} |
2,925 | #include <cuda_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define __DEBUG
#define VSQR 0.1
#define TSCALE 1.0
#define CUDA_CALL(err) __cudaSafeCall(err, __FILE__, __LINE__)
#define CUDA_CHK_ERR() __cudaCheckError(__FILE__, __LINE__)
/**************************************
* void __cudaSafeCall(cudaError err, const char *file, const int line)
* void __cudaCheckError(const char *file, const int line)
*
* These routines were taken from the GPU Computing SDK
* (http://developer.nvidia.com/gpu-computing-sdk) include file "cutil.h"
**************************************/
inline void __cudaSafeCall(cudaError err, const char* file, const int line) {
#ifdef __DEBUG
#pragma warning(push)
#pragma warning(disable : 4127) // Prevent warning on do-while(0);
do {
if (cudaSuccess != err) {
fprintf(stderr, "cudaSafeCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString(err));
exit(-1);
}
} while (0);
#pragma warning(pop)
#endif // __DEBUG
return;
}
inline void __cudaCheckError(const char* file, const int line) {
#ifdef __DEBUG
#pragma warning(push)
#pragma warning(disable : 4127) // Prevent warning on do-while(0);
do {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() failed at %s:%i : %s.\n", file, line, cudaGetErrorString(err));
exit(-1);
}
// More careful checking. However, this will affect performance.
// Comment if not needed.
/*err = cudaThreadSynchronize();
if( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() with sync failed at %s:%i : %s.\n",
file, line, cudaGetErrorString( err ) );
exit( -1 );
}*/
} while (0);
#pragma warning(pop)
#endif // __DEBUG
return;
}
int tpdt(double* t, double dt, double end_time);
__device__ double f_gpu(double p, double t) { return -expf(-TSCALE * t) * p; }
__global__ void evolve_gpu(double* un, double* uc, double* uo, double* pebbles, int n, double h, double dt, double t)
{
__shared__ int i, j, idx;
idx = (blockIdx.y * gridDim.x + blockIdx.x) * (blockDim.x * blockDim.y) + (threadIdx.y * blockDim.x + threadIdx.x);
i = idx / n;
j = idx % n;
if (i == 0 || i == n - 1 || j == 0 || j == n - 1)
un[idx] = 0.;
else
un[idx] = 2 * uc[idx] - uo[idx] + VSQR * (dt * dt) * ((uc[idx - 1] + uc[idx + 1] + uc[idx + n] + uc[idx - n] + 0.25 * (uc[idx - 1 - n] + uc[idx - 1 + n] + uc[idx + 1 - n] + uc[idx + 1 + n]) - 5 * uc[idx]) / (h * h) + f_gpu(pebbles[idx], t));
}
void run_gpu(double* u, double* u0, double* u1, double* pebbles, int n, double h, double end_time, int nthreads) {
cudaEvent_t kstart, kstop;
float ktime;
/* HW2: Define your local variables here */
// declare the variables
double* gpu_uc;
double* gpu_uo;
double* gpu_un;
double* gpu_pebbles;
/* Set up device timers */
CUDA_CALL(cudaSetDevice(0));
CUDA_CALL(cudaEventCreate(&kstart));
CUDA_CALL(cudaEventCreate(&kstop));
/* HW2: Add CUDA kernel call preperation code here */
// malloc mem on GPU and copy the content
cudaMalloc((void**)&gpu_uc, sizeof(double) * n * n);
cudaMalloc((void**)&gpu_uo, sizeof(double) * n * n);
cudaMalloc((void**)&gpu_un, sizeof(double) * n * n);
cudaMalloc((void**)&gpu_pebbles, sizeof(double) * n * n);
cudaMemcpy((void*)gpu_uo, (void*)u0, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy((void*)gpu_uc, (void*)u1, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cudaMemcpy((void*)gpu_pebbles, (void*)pebbles, sizeof(double) * n * n, cudaMemcpyHostToDevice);
double t = 0., dt = h / 2.;
int grid_size = n / nthreads;
int block_size = nthreads;
dim3 grid(grid_size, grid_size);
dim3 block(block_size, block_size);
/* Start GPU computation timer */
CUDA_CALL(cudaEventRecord(kstart, 0));
/* HW2: Add main lake simulation loop here */
// do the calculation
while (1) {
evolve_gpu<<<grid, block>>>(gpu_un, gpu_uc, gpu_uo, gpu_pebbles, n, h, dt, t);
cudaMemcpy((void*)gpu_uo, (void*)gpu_uc, sizeof(double) * n * n, cudaMemcpyDeviceToDevice);
cudaMemcpy((void*)gpu_uc, (void*)gpu_un, sizeof(double) * n * n, cudaMemcpyDeviceToDevice);
if (!tpdt(&t, dt, end_time))
break;
}
cudaMemcpy((void*)u, (void*)gpu_un, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
/* Stop GPU computation timer */
CUDA_CALL(cudaEventRecord(kstop, 0));
CUDA_CALL(cudaEventSynchronize(kstop));
CUDA_CALL(cudaEventElapsedTime(&ktime, kstart, kstop));
printf("GPU computation: %f msec\n", ktime);
/* HW2: Add post CUDA kernel call processing and cleanup here */
/* timer cleanup */
CUDA_CALL(cudaEventDestroy(kstart));
CUDA_CALL(cudaEventDestroy(kstop));
}
|
2,926 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,float var_4,float var_5,float var_6,float var_7,int var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33) {
if (comp >= acosf(var_1 + (var_2 + coshf((-1.2501E0f * -0.0f - var_3 * var_4))))) {
if (comp >= (var_5 - (-0.0f / -1.2804E4f / var_6 - (var_7 + -0.0f)))) {
float tmp_1 = +1.9632E1f;
float tmp_2 = -1.4492E-42f / +1.6382E-41f + powf(log10f(atanf(var_9 + +1.0774E-36f + +1.7894E34f * var_10 - var_11)), sqrtf(+1.4381E34f));
comp = tmp_2 / tmp_1 + +1.7636E-19f - +0.0f * -1.8727E-36f;
for (int i=0; i < var_8; ++i) {
comp += var_12 * (-0.0f + (var_13 - var_14));
comp = atanf(-0.0f - -1.2866E-37f / tanhf((var_15 / var_16 * (var_17 * +1.7017E-43f / -1.2719E-43f))));
}
if (comp >= (-1.9785E-35f + (+0.0f * +0.0f))) {
comp = +1.9481E-21f * +0.0f * (-1.0391E-43f / sinhf((var_18 * (var_19 - var_20 + var_21 / asinf((-1.5613E-35f + var_22))))));
}
if (comp < var_23 / (var_24 * (+1.5727E3f * (+1.8163E-36f * var_25)))) {
comp = (var_26 + var_27 + var_28);
float tmp_3 = -1.0908E36f;
float tmp_4 = (-1.8329E-44f * var_29 + var_30 * atan2f(+1.6353E35f, -1.8864E2f - (+1.9070E27f * var_31)));
comp = tmp_4 / tmp_3 * (-0.0f - -1.0533E34f / atanf(var_32 / +1.8426E-44f * var_33));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
int tmp_9 = atoi(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34);
cudaDeviceSynchronize();
return 0;
}
|
2,927 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
__device__ unsigned long long int gcd(unsigned long long int a, unsigned long long int b){
unsigned long long int r=0;
while(b!=0){
r = a%b;
a = b;
b = r;
}
return a;
}
__global__ void MonteCarlo(unsigned long long int n, unsigned long long int *d){
unsigned long long int dtmp = 1;
unsigned long long int a = threadIdx.x;
unsigned long long int b = threadIdx.x;
while((dtmp==1||dtmp==n) && (*d==1||*d==n)){
a = a*a+a+1;
b = b*b+b+1;
b = b*b+b+1;
dtmp = gcd(a-b,n);
}
*d=dtmp;
}
int main(int argc, char *argv[]){
if(argc<2)
exit(0);
unsigned long long int n = atoll(argv[1]);
unsigned long long int *ptrd;
unsigned long long int d = 1;
cudaMalloc((void**) &ptrd, sizeof(unsigned long long int));
cudaMemcpy(ptrd, &d, sizeof(unsigned long long int),cudaMemcpyHostToDevice);
MonteCarlo<<<1,5>>>(n,ptrd);
cudaMemcpy(&d, ptrd, sizeof(unsigned long long int),cudaMemcpyDeviceToHost);
printf("%lld\n",d);
cudaFree(ptrd);
return 0;
}
|
2,928 | #include "includes.h"
__global__ void quickSort(int *x, int *dfirst, int *dlast, int *list)
{
int idx = threadIdx.x;
int first = dfirst[idx];
int last = dlast[idx];
list[idx] = 0;
if(first<last)
{
int pivot, j, temp, i;
pivot = first;
i = first;
j = last;
while(i<j)
{
while(x[i]<=x[pivot] && i<last)
i++;
while(x[j] > x[pivot])
j--;
if(i<j)
{
temp = x[i];
x[i] = x[j];
x[j] = temp;
}
}
temp = x[pivot];
x[pivot] = x[j];
x[j] = temp;
for(i=first; i<=last; i++)
if(x[i] > x[i+1])
{
list[idx] = j+1;
break;
}
}
} |
2,929 | #include "includes.h"
__global__ void batch_crop_kernel(float* input, const int nCropRows, const int nCropCols, const int iH, const int iW, const int nPlanes){
const int plane = blockIdx.x;
if (plane >= nPlanes)
return;
input += plane * iH * iW;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
if (ty < iH && (ty > iH-nCropRows-1 || ty < nCropRows)) {
input[ty*iW + tx] = 0;
}
if (tx < iW && (tx > iW-nCropCols-1 || tx < nCropCols)) {
input[ty*iW + tx] = 0;
}
} |
2,930 | #include <stdio.h>
#include <assert.h>
#include <cuda.h>
int main(int argc, char* argv[])
{
int* p = NULL;
int4* q = NULL;
int i = 0;
cudaError_t iRet;
p = (int*) malloc(sizeof(int4)*8*64*sizeof(int));
assert(p != NULL);
q = (int4*) malloc(2*64*sizeof(int4));
assert(q != NULL);
for (i = 0; i < sizeof(int4)*8*64; ++i)
{
p[i] = i;
}
iRet = cudaMemcpy2D(q, 2 * sizeof(int), p, 8 * sizeof(int), 2 * sizeof(int), 64, cudaMemcpyHostToHost);
printf("**********\niRet = %d\n**********\n", iRet);
for (i = 0; i < sizeof(int4)*8*64; ++i)
{
printf("%d ", p[i]);
}
printf("\n**********\n");
for (i = 0; i < 2*64; ++i)
{
printf("%d %d %d %d ", q[i].x, q[i].y, q[i].z, q[i].w);
}
printf("\n");
free(q);
free(p);
return 0;
}
|
2,931 | #ifndef THREADED_H
#define THREADED_H
#include<vector>
#include<thread>
#define THREADED(class,function, num_of_threads) \
void class::function##Threaded(){ \
std::vector< std::thread > threads; \
for(int i=0; i<num_of_threads; i++){ \
threads.push_back( std::thread([this](){this->function##Single();})); \
}; \
for(auto &t : threads){ \
t.join(); \
}; \
};
#define THREADED_T(class,function, num_of_threads) \
template<typename T> \
void class<T>::function##Threaded(){ \
std::vector< std::thread > threads; \
for(int i=0; i<num_of_threads; i++){ \
threads.push_back( std::thread( \
[this](){this->function##Single();} \
)); \
} \
for(auto &t: threads){ \
t.join(); \
} \
}
#endif
|
2,932 | #include "stdio.h"
__global__
void testKernel(float* d_data)
{
int myId = threadIdx.x;
d_data[myId] = 10;
}
void CallKernel()
{
int threads = 32;
dim3 gridSize(1, 1, 1);
dim3 blockSize(threads, 1, 1);
float* h_data;
float* d_data;
int dataLen = threads;
h_data = (float *)malloc(sizeof(float) * dataLen);
for (int i = 0; i < dataLen; i++)
{
h_data[i] = i;
}
cudaMalloc((void**)&d_data, sizeof(float) * dataLen);
cudaMemcpy(d_data, h_data, sizeof(float) * dataLen, cudaMemcpyHostToDevice);
puts("starting kernel");
testKernel<<<gridSize, blockSize>>>(d_data);
puts("kernel finished");
cudaMemcpy(h_data, d_data, sizeof(float) * dataLen, cudaMemcpyDeviceToHost);
puts("data:");
for (int i = 0; i < dataLen; i++)
{
printf(" %f\n", h_data[i]);
}
puts("end");
free(h_data);
cudaFree(d_data);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
}
|
2,933 | #include "includes.h"
__global__ void Mask_Intersect_Kernel( int* A, int* B, int* devOut)
{
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
devOut[idx] = A[idx] * B[idx];
} |
2,934 | #ifdef __cplusplus
extern "C" {
#endif
struct point{
float x;
float y;
};
__global__ void pi(const struct point* A, float* res, const int nbPoint, const float ray){
const int idx = 32*blockDim.x * blockIdx.x + threadIdx.x;
if (idx < nbPoint-32*blockDim.x)
{//blockDim.x * blockIdx.x + threadIdx.x;
const int i1 = idx+blockDim.x;
const int i2 = i1+blockDim.x;
const int i3 = i2+blockDim.x;
const int i4 = i3+blockDim.x;
const int i5 = i4+blockDim.x;
const int i6 = i5+blockDim.x;
const int i7 = i6+blockDim.x;
const int i8 = i7+blockDim.x;
const int i9 = i8+blockDim.x;
const int i10 = i9+blockDim.x;
const int i11 = i10+blockDim.x;
const int i12 = i11+blockDim.x;
const int i13 = i12+blockDim.x;
const int i14 = i13+blockDim.x;
const int i15 = i14+blockDim.x;
const int i16 = i15+blockDim.x;
const int i17 = i16+blockDim.x;
const int i18 = i17+blockDim.x;
const int i19 = i18+blockDim.x;
const int i20 = i19+blockDim.x;
const int i21 = i20+blockDim.x;
const int i22 = i21+blockDim.x;
const int i23 = i22+blockDim.x;
const int i24 = i23+blockDim.x;
const int i25 = i24+blockDim.x;
const int i26 = i25+blockDim.x;
const int i27 = i26+blockDim.x;
const int i28 = i27+blockDim.x;
const int i29 = i28+blockDim.x;
const int i30 = i29+blockDim.x;
const int i31 = i30+blockDim.x;
//int i = idx*blockDim.x;
res[idx] = (A[idx].x*A[idx].x + A[idx].y*A[idx].y <= ray);
res[i1] = (A[i1].x*A[i1].x + A[i1].y*A[i1].y <= ray);
res[i2] = (A[i2].x*A[i2].x + A[i2].y*A[i2].y <= ray);
res[i3] = (A[i3].x*A[i3].x + A[i3].y*A[i3].y <= ray);
res[i4] = (A[i4].x*A[i4].x + A[i4].y*A[i4].y <= ray);
res[i5] = (A[i5].x*A[i5].x + A[i5].y*A[i5].y <= ray);
res[i6] = (A[i6].x*A[i6].x + A[i6].y*A[i6].y <= ray);
res[i7] = (A[i7].x*A[i7].x + A[i7].y*A[i7].y <= ray);
res[i8] = (A[i8].x*A[i8].x + A[i8].y*A[i8].y <= ray);
res[i9] = (A[i9].x*A[i9].x + A[i9].y*A[i9].y <= ray);
res[i10] = (A[i10].x*A[i10].x + A[i10].y*A[i10].y <= ray);
res[i11] = (A[i11].x*A[i11].x + A[i11].y*A[i11].y <= ray);
res[i12] = (A[i12].x*A[i12].x + A[i12].y*A[i12].y <= ray);
res[i13] = (A[i13].x*A[i13].x + A[i13].y*A[i13].y <= ray);
res[i14] = (A[i14].x*A[i14].x + A[i14].y*A[i14].y <= ray);
res[i15] = (A[i15].x*A[i15].x + A[i15].y*A[i15].y <= ray);
res[i16] = (A[i16].x*A[i16].x + A[i16].y*A[i16].y <= ray);
res[i17] = (A[i17].x*A[i17].x + A[i17].y*A[i17].y <= ray);
res[i18] = (A[i18].x*A[i18].x + A[i18].y*A[i18].y <= ray);
res[i19] = (A[i19].x*A[i19].x + A[i19].y*A[i19].y <= ray);
res[i20] = (A[i20].x*A[i20].x + A[i20].y*A[i20].y <= ray);
res[i21] = (A[i21].x*A[i21].x + A[i21].y*A[i21].y <= ray);
res[i22] = (A[i22].x*A[i22].x + A[i22].y*A[i22].y <= ray);
res[i23] = (A[i23].x*A[i23].x + A[i23].y*A[i23].y <= ray);
res[i24] = (A[i24].x*A[i24].x + A[i24].y*A[i24].y <= ray);
res[i25] = (A[i25].x*A[i25].x + A[i25].y*A[i25].y <= ray);
res[i26] = (A[i26].x*A[i26].x + A[i26].y*A[i26].y <= ray);
res[i27] = (A[i27].x*A[i27].x + A[i27].y*A[i27].y <= ray);
res[i28] = (A[i28].x*A[i28].x + A[i28].y*A[i28].y <= ray);
res[i29] = (A[i29].x*A[i29].x + A[i29].y*A[i29].y <= ray);
res[i30] = (A[i30].x*A[i30].x + A[i30].y*A[i30].y <= ray);
res[i31] = (A[i31].x*A[i31].x + A[i31].y*A[i31].y <= ray);
}
}
#ifdef __cplusplus
}
#endif
|
2,935 | #include <cstdio>
int main(int argc, char **argv) {
const int N = 1024;
char *tmpPtr;
char tmpBuffer[N];
cudaMalloc(&tmpPtr, N);
cudaMemcpy(tmpPtr, tmpBuffer, N, cudaMemcpyHostToDevice);
while(getchar() != EOF);
}
|
2,936 | /* This program sorts an input array by bucket sort.
* Each bucket in turn is sorted using Parallel Bubble sort.
* The array consists of float numbers, all less than 1. To find the destination bucket,
* the float number is multiplied by 10 to get the first digit, which determines the bucket number.
* For eg., 0.1234 -> (int)(0.1234*10) = 1. Thus the bucket number for 0.1234 is 1.
* Thus the total number of buckets will be 10. (0-9)
* Implemented in CUDA.
*
*
*
* code by Anand Goyal. Dated: 12/13/2014
*/
#include<stdio.h>
#include<cuda.h>
#include<time.h>
#include<sys/time.h>
#define range 10
#define SIZE 5000
#define bucketLength (SIZE/range * 2)
__global__ void bucketSortKernel(float *inData, long size, float *outData)
{
__shared__ float localBucket[bucketLength];
__shared__ int localCount; /* Counter to track index with a bucket */
int tid = threadIdx.x; int blockId = blockIdx.x;
int offset = blockDim.x;
int bucket, index, phase;
float temp;
if(tid == 0)
localCount = 0;
__syncthreads();
/* Block traverses through the array and buckets the element accordingly */
while(tid < size) {
bucket = inData[tid] * 10;
if(bucket == blockId) {
index = atomicAdd(&localCount, 1);
localBucket[index] = inData[tid];
}
tid += offset;
}
__syncthreads();
tid = threadIdx.x;
//Sorting the bucket using Parallel Bubble Sort
for(phase = 0; phase < bucketLength; phase ++) {
if(phase % 2 == 0) {
while((tid < bucketLength) && (tid % 2 == 0)) {
if(localBucket[tid] > localBucket[tid +1]) {
temp = localBucket[tid];
localBucket[tid] = localBucket[tid + 1];
localBucket[tid + 1] = temp;
}
tid += offset;
}
}
else {
while((tid < bucketLength - 1) && (tid %2 != 0)) {
if(localBucket[tid] > localBucket[tid + 1]) {
temp = localBucket[tid];
localBucket[tid] = localBucket[tid + 1];
localBucket[tid + 1] = temp;
}
tid += offset;
}
}
}
tid = threadIdx.x;
while(tid < bucketLength) {
outData[(blockIdx.x * bucketLength) + tid] = localBucket[tid];
tid += offset;
}
}
int main()
{
float *input, *output;
float *d_input, *d_output;
int i;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start, 0);
cudaEventCreate(&stop, 0);
/* Each block sorts one bucket */
const int numOfThreads = 4;
const int numOfBlocks = range;
input = (float *)malloc(sizeof(float) * SIZE);
output = (float *)malloc(sizeof(float) * bucketLength * range);
cudaMalloc((void**)&d_input, sizeof(float) * SIZE);
cudaMalloc((void **)&d_output, sizeof(float) * bucketLength * range);
cudaMemset(d_output, 0, sizeof(float) * bucketLength * range);
/* Generating the input array to be sorted */
srand(time(NULL));
for(i = 0; i < SIZE; i++)
input[i] = (float)(rand()%10000 + 1)/(float)10000;
// Printing the input array
/* for(i = 0; i < SIZE; i++)
printf("%0.4f\n", input[i]);
printf("***********************\n");
*/
cudaEventRecord(start, 0);
cudaMemcpy(d_input, input, sizeof(float) * SIZE, cudaMemcpyHostToDevice);
bucketSortKernel<<<numOfBlocks, numOfThreads>>>(d_input, SIZE, d_output);
cudaMemcpy(output, d_output, sizeof(float) * bucketLength * range, cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
/* //Printing the sorted array
for(i = 0; i < range; i++) {
for(j = 0; j < bucketLength; j++)
if(output[i*bucketLength + j] != 0)
printf("%0.4f ", output[i*bucketLength + j]);
}
printf("\n");
*/ printf("Time : %3.1f ms \n", elapsedTime);
cudaFree(d_input);
cudaFree(d_output);
free(input);
free(output);
return 0;
}
|
2,937 | #include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/copy.h>
#include <iostream>
struct functor{
functor(float (*g)(const float&)) : _g{g} {}
__host__ __device__ float operator()(const float& x) const {
return _g(x);
}
private:
float (*_g)(const float&);
};
__host__ __device__ float g(const float& x){return 3*x;}
__device__ float (*d_g)(const float&) = g;
int main(void){
float (*h_g)(const float&) = NULL;
cudaMemcpyFromSymbol(&h_g, d_g, sizeof(void *));
thrust::device_vector<float> X(4,1);
thrust::transform(X.begin(), X.end(), X.begin(), functor(h_g));
thrust::copy_n(X.begin(), X.size(), std::ostream_iterator<float>(std::cout, ","));
std::cout << std::endl;
}
|
2,938 | #include <iostream>
#include <math.h>
#include <stdio.h>
#include <cuda.h>
using namespace std;
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Divergence of a Vector with variable coefficient- term in momentum eqn !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
__global__ void DivGPU(double* Dn, double* Phi, double* U, double* V, int row, int col,double delX,double delY){
// Get global thread ID
int k = blockIdx.x*blockDim.x+threadIdx.x;
int c = k%col;
int r = k/col;
if(c>0 && c<(col-1) && r>0 && r<(row-1)){
double PhiP = Phi[k];
double PhiE = Phi[k+1];
double PhiW = Phi[k-1];
double PhiN = Phi[k-col];
double PhiS = Phi[k+col];
double UP = U[k];
double UE = U[k+1];
double UW = U[k-1];
//double UN = U[k-col];
//double US = U[k+col];
double VP = V[k];
//double VE = V[k+1];
//double VW = V[k-1];
double VN = V[k-col];
double VS = V[k+col];
double Ee = 0.5*(UE*PhiE+UP*PhiP);
double Ew = 0.5*(UW*PhiW+UP*PhiP);
double Fn = 0.5*(VN*PhiN+VP*PhiP);
double Fs = 0.5*(VS*PhiS+VP*PhiP);
Dn[k] = delX*(Fn-Fs)+delY*(Ee-Ew);
//{printf("Hi:DivGPU:");}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Divergence of a Vector with No-coefficient- in continuity & source term !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
__global__ void DivergenceGPU(double* Dn, double* U, double* V,int row, int col, double delX, double delY){
// Get global thread ID
int k = blockIdx.x*blockDim.x+threadIdx.x;
int c = k%col;
int r = k/col;
if(c>0 && c<(col-1) && r>0 && r<(row-1)){
double UP = U[k];
double UE = U[k+1];
double UW = U[k-1];
//double UN = U[k-col];
//double US = U[k+col];
double VP = V[k];
//double VE = V[k+1];
//double VW = V[k-1];
double VN = V[k-col];
double VS = V[k+col];
double Ue = 0.5*(UE+UP);
double Uw = 0.5*(UW+UP);
//double Un = 0.5*(UN+UP);
//double Us = 0.5*(US+UP);
//double Ve = 0.5*(VE+VP);
//double Vw = 0.5*(VW+VP);
double Vn = 0.5*(VN+VP);
double Vs = 0.5*(VS+VP);
Dn[k] = (Ue-Uw)*delY+(Vn-Vs)*delX;
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Laplacian of a Scalar !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
__global__ void LaplacianGPU(double* Ln, double *Phi, int row, int col, double delX, double delY){
// Get global thread ID
int k = blockIdx.x*blockDim.x+threadIdx.x;
int c = k%col;
int r = k/col;
// Do for only inner points
if(c>0 && c<(col-1) && r>0 && r<(row-1)){
double PhiP = Phi[k];
double PhiE = Phi[k+1];
double PhiW = Phi[k-1];
double PhiN = Phi[k-col];
double PhiS = Phi[k+col];
double Ee = (PhiE-PhiP)/delX;
double Ew = (PhiP-PhiW)/delX;
double Fn = (PhiN-PhiP)/delY;
double Fs = (PhiP-PhiS)/delY;
Ln[k] = delX*(Fn-Fs)+delY*(Ee-Ew);
//{printf("Hi:Laplacian %6.3f\n",Ee);}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Gradient !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
__global__ void gradientGPU(double* gradxPhi1,double* gradyPhi1,double* Phi,
int row, int col, double delX, double delY){
// Get global thread ID
int k = blockIdx.x*blockDim.x+threadIdx.x;
int c = k%col;
int r = k/col;
// Do for only inner points
if(c>0 && c<(col-1) && r>0 && r<(row-1)){
double PhiE = Phi[k+1];
double PhiW = Phi[k-1];
double PhiN = Phi[k-col];
double PhiS = Phi[k+col];
double PhiP = Phi[k];
double Phie = 0.5*(PhiE + PhiP);
double Phiw = 0.5*(PhiW + PhiP);
double Phin = 0.5*(PhiN + PhiP);
double Phis = 0.5*(PhiS + PhiP);
gradxPhi1[k] = (Phie-Phiw)/delX;
gradyPhi1[k] = (Phin-Phis)/delY;
//{printf("Hi:gradient:%6.4f:",gradxPhi1[k]);}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Divergence of a Vector with variable coefficient- term in momentum eqn !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void Div(double* Dn, double* Phi, double* U, double* V, int row, int col,double delX,double delY){
for(int i = 1; i<(row-1); ++i){
for(int j =1; j<(col-1); ++j){
int k = i*col+j;
double PhiP = Phi[k];
double PhiE = Phi[k+1];
double PhiW = Phi[k-1];
double PhiN = Phi[k-col];
double PhiS = Phi[k+col];
double UP = U[k];
double UE = U[k+1];
double UW = U[k-1];
//double UN = U[k-col];
// double US = U[k+col];
double VP = V[k];
// double VE = V[k+1];
// double VW = V[k-1];
double VN = V[k-col];
double VS = V[k+col];
double Ee = 0.5*(UE*PhiE+UP*PhiP);
double Ew = 0.5*(UW*PhiW+UP*PhiP);
double Fn = 0.5*(VN*PhiN+VP*PhiP);
double Fs = 0.5*(VS*PhiS+VP*PhiP);
Dn[k] = delX*(Fn-Fs)+delY*(Ee-Ew);
}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Divergence of a Vector with No-coefficient- in continuity & source term !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void Divergence(double* Dn, double* U, double* V,int row, int col, double delX, double delY){
for(int i = 1; i<(row-1); ++i){
for(int j =1; j<(col-1); ++j){
int k = i*col+j;
double UP = U[k];
double UE = U[k+1];
double UW = U[k-1];
//double UN = U[k-col];
//double US = U[k+col];
double VP = V[k];
//double VE = V[k+1];
//double VW = V[k-1];
double VN = V[k-col];
double VS = V[k+col];
double Ue = 0.5*(UE+UP);
double Uw = 0.5*(UW+UP);
//double Un = 0.5*(UN+UP);
//double Us = 0.5*(US+UP);
//double Ve = 0.5*(VE+VP);
//double Vw = 0.5*(VW+VP);
double Vn = 0.5*(VN+VP);
double Vs = 0.5*(VS+VP);
Dn[k] = (Ue-Uw)*delY+(Vn-Vs)*delX;
}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Laplacian of a Scalar !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void Laplacian(double* Ln, double *Phi, int row, int col, double delX, double delY){
for(int i = 1; i<(row-1); i++){
for(int j =1; j<(col-1); j++){
int k = i*col+j;
double PhiP = Phi[k];
double PhiE = Phi[k+1];
double PhiW = Phi[k-1];
double PhiN = Phi[k-col];
double PhiS = Phi[k+col];
double Ee = (PhiE-PhiP)/delX;
double Ew = (PhiP-PhiW)/delX;
double Fn = (PhiN-PhiP)/delY;
double Fs = (PhiP-PhiS)/delY;
Ln[k] = delX*(Fn-Fs)+delY*(Ee-Ew);
}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Gradient !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void gradient(double* gradxPhi,double* gradyPhi,double* Phi,
int row, int col, double delX, double delY){
for(int i = 1; i<(row-1); ++i){
for(int j =1; j<(col-1); ++j){
int k = i*col+j;
double PhiE = Phi[k+1];
double PhiW = Phi[k-1];
double PhiN = Phi[k-col];
double PhiS = Phi[k+col];
double PhiP = Phi[k];
double Phie = 0.5*(PhiE + PhiP);
double Phiw = 0.5*(PhiW + PhiP);
double Phin = 0.5*(PhiN + PhiP);
double Phis = 0.5*(PhiS + PhiP);
gradxPhi[k] = (Phie-Phiw)/delX;
gradyPhi[k] = (Phin-Phis)/delY;
}
}
}
|
2,939 | #include "includes.h"
const int Nthreads = 1024, NrankMax = 3, nt0max = 71, NchanMax = 1024;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void getW(const double *Params, double *wtw, double *W){
int Nfilt, nt0, tid, bid, i, t, Nrank,k, tmax;
double x, x0, xmax;
volatile __shared__ double sW[nt0max*NrankMax], swtw[nt0max*nt0max], xN[1];
nt0 = (int) Params[4];
Nrank = (int) Params[6];
Nfilt = (int) Params[1];
tmax = (int) Params[11];
tid = threadIdx.x;
bid = blockIdx.x;
for (k=0;k<nt0;k++)
swtw[tid + k*nt0] = wtw[tid + k*nt0 + bid * nt0 * nt0];
for (k=0;k<Nrank;k++)
sW[tid + k*nt0] = W[tid + bid * nt0 + k * nt0*Nfilt];
__syncthreads();
// for each svd
for(k=0;k<Nrank;k++){
for (i=0;i<100;i++){
// compute projection of wtw
x = 0.0f;
for (t=0;t<nt0;t++)
x+= swtw[tid + t*nt0] * sW[t + k*nt0];
__syncthreads();
if (i<99){
sW[tid + k*nt0] = x;
__syncthreads();
if (tid==0){
x0 = 0.00001f;
for(t=0;t<nt0;t++)
x0+= sW[t + k*nt0] * sW[t + k*nt0];
xN[0] = sqrt(x0);
}
__syncthreads();
sW[tid + k*nt0] = x/xN[0];
__syncthreads();
}
}
// now subtract off this svd from wtw
for (t=0;t<nt0;t++)
swtw[tid + t*nt0] -= sW[t+k*nt0] * x;
__syncthreads();
}
xmax = sW[tmax];
__syncthreads();
sW[tid] = - sW[tid] * copysign(1.0, xmax);
// now write W back
for (k=0;k<Nrank;k++)
W[tid + bid * nt0 + k * nt0*Nfilt] = sW[tid + k*nt0];
} |
2,940 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/inner_product.h>
#include <thrust/sequence.h>
#include <bits/stdc++.h>
using namespace std;
int main(){
thrust::device_vector<int> d_B(5), d_A(5,15);
cout<<"begin\n\n";
for(auto b:d_B) cout<<b<<' ';
cout<<endl;
for(auto a:d_A) cout<<a<<' ';
cout<<endl;
thrust::sequence(d_B.begin(), d_B.end());
cout<< thrust::inner_product(d_A.begin(), d_A.end(), d_B.begin(), 0)<<endl;
}
|
2,941 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
using namespace std;
__global__ void global_get_flags(int* d_in, int* flags, int mask, int size) {
//indices
int myId = threadIdx.x + blockDim.x * blockIdx.x;
//MAP 0s in d_in as 1 in flags array, vice versa
if (myId < size && (mask & d_in[myId]) == 0) {
flags[myId] = 1;
}
else {
flags[myId] = 0;
}
//synch all threads
__syncthreads();
}
__global__ void prescan(int* d_out, int* scan_store, int* flag, int size, bool store) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
//Blelloch scan that only works on powers of 2. this is mitigated by each block running
//512 threads, and scan_store array is padded to powers of 2
// load inputs into memory
d_out[myId] = flag[myId];
__syncthreads();
//terrible log math to index to parent and children vice versa
//build up SUM
for (int h = 1; h < size; h *= 2) {
int index = h * 2 * tid + h * 2 - 1 + (size * blockIdx.x);
if (index < (size * (blockIdx.x + 1))) { //check if myid +step is smaller than size
d_out[index] += d_out[h * 2 * tid + h - 1 + (size * blockIdx.x)];
//NEED TO PAD INPUT ARRAY
}
__syncthreads();
}
__syncthreads();
//clear the last element
if (tid == 0) {
d_out[(size * (blockIdx.x+1)) - 1] = 0;
}
__syncthreads();
//terrible log math to index to parent and children vice versa
//Build down SCAN
for (int h = size / 2; h > 0; h /= 2) {
int index = h * 2 * tid + (h * 2) - 1 + (size * blockIdx.x);
int right = h * 2 * tid + (h * 1) - 1 + (size * blockIdx.x);
if (index < (size * (blockIdx.x+1))) {
int leftVal = d_out[right];
d_out[right] = d_out[index];
d_out[index] += leftVal;
}
__syncthreads();
}
__syncthreads();
//store the last element if thread 0 at block index
if (tid == size -1 && store) {
scan_store[blockIdx.x] = d_out[(size * (blockIdx.x + 1)) - 1] + flag[(size * (blockIdx.x + 1)-1)];
}
__syncthreads();
}
__global__ void combine(int* results, int* mini, int size, int* numFalse, int* scan) {
int myId = threadIdx.x + blockDim.x * blockIdx.x;
//shared value for each block to only do once
__shared__ int presum;
if (threadIdx.x == 0) {
presum = mini[blockIdx.x];
}
__syncthreads();
// add total scan of each block to each element
if (myId < size) {
results[myId] += presum;
}
__syncthreads();
// store largest num in numFalse pointer location
if (myId == size -1 ) {
*numFalse = results[size - 1] + scan[size -1];
//printf("num false cuda: %d\n", d_out[size - 1]);
}
__syncthreads();
}
__global__ void shuffle(int* d_out, int* d_in, int* scan, int* numFalse, int size, int mask) {
//overlap at 1015 at end of cycle with storing indices
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int t;
if(myId<size)
t = myId - scan[myId] + *numFalse; // true index for all
__syncthreads();
//use scan value if bit is 0, use t otherwise
if (myId < size)
if ((mask & d_in[myId]) == 0) d_out[scan[myId]] = d_in[myId];
else d_out[t] = d_in[myId];
__syncthreads();
////run next bit mask
}
__global__ void swap(int* d_in, int* d_out, int size) {
//Stores each valid element from d_out to d_in, all in parallel
int myId = threadIdx.x + blockDim.x * blockIdx.x;
__syncthreads();
if(myId < size) d_in[myId] = d_out[myId];
__syncthreads();
}
int main() {
//READING ONLY 8192 for simplicity, no padding
/*
TODO: Account for each thread taking care of 2 elements in prescan
*/
vector<int> arr;
string line;
ifstream myfile("inp.txt");
ofstream outfile2("q4.txt");
if (myfile.is_open())
{
//gets next int
//int numin = 0;
while (getline(myfile, line, ','))
{
arr.push_back(stoi(line, nullptr));
//arr.push_back(2); DEBUG
//numin++;
}
myfile.close();
}
//size calculations plus padding
int size = arr.size();
//cout << arr.size() << endl;
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
int blocks = (size / maxThreadsPerBlock);
int padblocks = 1;
int mod = size % threads;
if (mod > 0) blocks++;
//padding for block cudamalloc
while (padblocks < blocks) padblocks *= 2;
//bootleg push 2 for everything, try to not shuffle the end
for (int i = 0; i < threads - mod; i++) {
arr.push_back(2);
}
//allocate device memory
int* d_in, * d_out, * scan, * flags, * scan_store, * scan_large, *numFalse;
//int* h_false = (int*)malloc(sizeof(int));
//allocate memory for full size of padded blocks, only write to actual values
cudaMalloc((void**)&flags, arr.size() * sizeof(int));
cudaMalloc((void**)&d_out, arr.size() * sizeof(int));
cudaMalloc((void**)&scan, arr.size() * sizeof(int));
cudaMalloc((void**)&scan_store, padblocks * sizeof(int));
cudaMalloc((void**)&scan_large, padblocks * sizeof(int));
cudaMalloc((void**)&numFalse, sizeof(int));
// treat pointer to start of vector as array pointer
cudaMalloc((void**)&d_in, arr.size() * sizeof(int));
cudaMemcpy(d_in, &arr[0], arr.size() * sizeof(int), cudaMemcpyHostToDevice);
///////////////////////////////////////////////////////////////////////////
//For each bit 0-999
for (int i = 0, mask = 1; i < 10; i++, mask <<= 1) {
//MAP 0's to flags array
global_get_flags<<<blocks, threads>>>(d_in, flags, mask, size);
cudaDeviceSynchronize();
//do first scan on each block and store results in scan_store array
prescan<<<blocks, threads>>>(scan, scan_store, flags, threads, true);
cudaDeviceSynchronize();
//DEBUG
/*int* h_scan = (int*)malloc(sizeof(int) * blocks);
cudaMemcpy(h_scan, scan_store, sizeof(int) * blocks, cudaMemcpyDeviceToHost);
cout << "SCAN_STORE: " << h_scan[0];
for (int j = 1; j < blocks; j++) {
cout << "," << h_scan[j];
}
cout << endl;*/
//////////////////////////
//do secondary scan on array of scan results
prescan<<<1, blocks >>>(scan_large, NULL, scan_store, padblocks, false);
cudaDeviceSynchronize();
// Combine scans in parallel
combine<<<blocks, threads>>>(scan, scan_large, size, numFalse, flags);
cudaDeviceSynchronize();
//DEBUG
/*cudaMemcpy(h_false, numFalse, sizeof(int), cudaMemcpyDeviceToHost);
printf("number of false: %d\n", h_false[0]);*/
//shuffle to new values
shuffle<<<blocks, threads>>>(d_out, d_in, scan, numFalse, size, mask);
cudaDeviceSynchronize();
////move d_out to d_in to redo, must do in seperate kernel to avoid race conditions
swap<<<blocks, threads>>>(d_in, d_out, size);
cudaDeviceSynchronize();
}
/////////////////////////////////////////////////////////////////////////
//Copy results to host from device
int* ans_arr = (int*)malloc(sizeof(int) * (arr.size() + mod));
cudaMemcpy(ans_arr, d_in, sizeof(int) * arr.size(), cudaMemcpyDeviceToHost);
// output to file
if (outfile2.is_open())
{
//avoid comma at end of string
outfile2 << ans_arr[0];
//append integers up to original input size
for (int i = 1; i < size; i++) {
outfile2 << "," << ans_arr[i];
}
outfile2.close();
}
//free mem
cudaFree(d_in);
cudaFree(d_out);
cudaFree(scan);
cudaFree(scan_store);
cudaFree(scan_large);
cudaFree(flags);
cudaFree(numFalse);
free(ans_arr);
}
|
2,942 | /* 2dadvec_kernels.cu
*
* This file contains the kernels for the 2D advection DG method.
* We use K = number of elements
* and H = number of sides
*/
#define PI 3.14159
/***********************
*
* DEVICE VARIABLES
*
***********************/
/* These are always prefixed with d_ for "device" */
double *d_c; // holds coefficients for each element
double *d_quad_rhs; // the right hand side containing the quadrature contributions
double *d_left_riemann_rhs; // the right hand side containing the left riemann contributions
double *d_right_riemann_rhs; // the right hand side containing the right riemann contributions
// runge kutta variables
double *d_kstar;
double *d_k1;
double *d_k2;
double *d_k3;
double *d_k4;
// precomputed basis functions
// TODO: maybe making these 2^n makes sure the offsets are cached more efficiently? who knows...
// precomputed basis functions ordered like so
//
// [phi_1(r1, s1), phi_1(r2, s2), ... , phi_1(r_nq, s_nq) ]
// [phi_2(r1, s1), phi_2(r2, s2), ... , phi_2(r_nq, s_nq) ]
// [ . . . . ]
// [ . . . . ]
// [ . . . . ]
// [phi_np(r1, s1), phi_np(r2, s2), ... , phi_np(r_nq, s_nq)]
//
__device__ __constant__ double basis[2048];
// note: these are multiplied by the weights
__device__ __constant__ double basis_grad_x[2048];
__device__ __constant__ double basis_grad_y[2048];
// precomputed basis functions evaluated along the sides. ordered
// similarly to basis and basis_grad_{x,y} but with one "matrix" for each side
// starting with side 0. to get to each side, offset with:
// side_number * n_p * num_quad1d.
__device__ __constant__ double basis_side[1024];
__device__ __constant__ double basis_vertex[256];
// weights for 2d and 1d quadrature rules
__device__ __constant__ double w[32];
__device__ __constant__ double w_oned[16];
__device__ __constant__ double r1[32];
__device__ __constant__ double r2[32];
__device__ __constant__ double r_oned[32];
void set_basis(void *value, int size) {
cudaMemcpyToSymbol("basis", value, size * sizeof(double));
}
void set_basis_grad_x(void *value, int size) {
cudaMemcpyToSymbol("basis_grad_x", value, size * sizeof(double));
}
void set_basis_grad_y(void *value, int size) {
cudaMemcpyToSymbol("basis_grad_y", value, size * sizeof(double));
}
void set_basis_side(void *value, int size) {
cudaMemcpyToSymbol("basis_side", value, size * sizeof(double));
}
void set_basis_vertex(void *value, int size) {
cudaMemcpyToSymbol("basis_vertex", value, size * sizeof(double));
}
void set_w(void *value, int size) {
cudaMemcpyToSymbol("w", value, size * sizeof(double));
}
void set_w_oned(void *value, int size) {
cudaMemcpyToSymbol("w_oned", value, size * sizeof(double));
}
void set_r1(void *value, int size) {
cudaMemcpyToSymbol("r1", value, size * sizeof(double));
}
void set_r2(void *value, int size) {
cudaMemcpyToSymbol("r2", value, size * sizeof(double));
}
void set_r_oned(void *value, int size) {
cudaMemcpyToSymbol("r_oned", value, size * sizeof(double));
}
// tells which side (1, 2, or 3) to evaluate this boundary integral over
int *d_left_side_number;
int *d_right_side_number;
double *d_J; // jacobian determinant
double *d_min_J; // for the min sized jacobian
double *d_s_length; // length of sides
// the num_elem values of the x and y coordinates for the two vertices defining a side
// TODO: can i delete these after the lengths are precomputed?
// maybe these should be in texture memory?
double *d_s_V1x;
double *d_s_V1y;
double *d_s_V2x;
double *d_s_V2y;
// the num_elem values of the x and y partials
double *d_xr;
double *d_yr;
double *d_xs;
double *d_ys;
// the K indices of the sides for each element ranged 0->H-1
int *d_elem_s1;
int *d_elem_s2;
int *d_elem_s3;
// vertex x and y coordinates on the mesh which define an element
// TODO: can i delete these after the jacobians are precomputed?
// maybe these should be in texture memory?
double *d_V1x;
double *d_V1y;
double *d_V2x;
double *d_V2y;
double *d_V3x;
double *d_V3y;
// stores computed values at three vertices
double *d_Uv1;
double *d_Uv2;
double *d_Uv3;
// normal vectors for the sides
double *d_Nx;
double *d_Ny;
// index lists for sides
int *d_left_elem; // index of left element for side idx
int *d_right_elem; // index of right element for side idx
/***********************
*
* DEVICE FUNCTIONS
*
***********************/
/* flux function
*
* evaluates the flux f(u) at the point u.
*/
__device__ double flux_x(double u) {
return u;
}
__device__ double flux_y(double u) {
return u;
}
/* riemann solver
*
* evaluates the riemann problem over the boundary using Gaussian quadrature
* with Legendre polynomials as basis functions.
*/
__device__ double riemann(double u_left, double u_right) {
return 0.5 * (u_left + u_right);
}
/***********************
*
* INITIAL CONDITIONS
*
***********************/
/* initial condition function
*
* returns the value of the intial condition at point x
*/
__device__ double u0(double x, double y, int alpha) {
return x + y;//pow(x - y, alpha);
}
/* boundary exact
*
* returns the exact boundary conditions
*/
__device__ double boundary_exact(double x, double y, double t, int alpha) {
return x - 2*t + y;//u0(x, y, alpha);
}
/* u exact
*
* returns the exact value of u for error measurement.
*/
__device__ double uexact(double x, double y, double t, int alpha) {
return u0(x, y, alpha);
}
/* initial conditions
*
* computes the coefficients for the initial conditions
* THREADS: num_elem
*/
__global__ void init_conditions(double *c, double *J,
double *V1x, double *V1y,
double *V2x, double *V2y,
double *V3x, double *V3y,
int n_quad, int n_p, int num_elem, int alpha) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i, j;
double x, y, u;
if (idx < num_elem) {
for (i = 0; i < n_p; i++) {
u = 0.;
// perform quadrature
for (j = 0; j < n_quad; j++) {
// map from the canonical element to the actual point on the mesh
// x = x2 * r + x3 * s + x1 * (1 - r - s)
x = r1[j] * V2x[idx] + r2[j] * V3x[idx] + (1 - r1[j] - r2[j]) * V1x[idx];
y = r1[j] * V2y[idx] + r2[j] * V3y[idx] + (1 - r1[j] - r2[j]) * V1y[idx];
// evaluate u there
u += w[j] * u0(x, y, alpha) * basis[i * n_quad + j];
}
c[i * num_elem + idx] = u;
}
}
}
/* find min jacobian
*
* returns the min jacobian inside of min_J.
* each block computes the min jacobian inside of that block and stores it in the
* blockIdx.x spot of the shared min_J variable.
* NOTE: this is fixed for 256 threads.
*/
__global__ void min_jacobian(double *J, double *min_J, int num_elem) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int tid = threadIdx.x;
int i = blockDim.x * (blockIdx.x * 256 * 2) + threadIdx.x;
__shared__ double s_min[256];
if (idx < num_elem) {
// set all of min to J[idx] initially
s_min[tid] = J[idx];
__syncthreads();
// test a few
while (i < num_elem) {
s_min[tid] = (s_min[tid] < J[i]) ? s_min[tid] : J[i];
s_min[tid] = (s_min[tid] < J[i + 256]) ? s_min[tid] : J[i];
i += gridDim.x * 256 * 2;
__syncthreads();
}
// first half of the warps
__syncthreads();
if (tid < 128) {
s_min[tid] = (s_min[tid] < s_min[tid + 128]) ? s_min[tid] : s_min[tid + 128];
}
// first and second warps
__syncthreads();
if (tid < 64) {
s_min[tid] = (s_min[tid] < s_min[tid + 64]) ? s_min[tid] : s_min[tid + 64];
}
// unroll last warp
__syncthreads();
if (tid < 32) {
if (blockDim.x >= 64) {
s_min[tid] = (s_min[tid] < s_min[tid + 32]) ? s_min[tid] : s_min[tid + 32];
}
if (blockDim.x >= 32) {
s_min[tid] = (s_min[tid] < s_min[tid + 16]) ? s_min[tid] : s_min[tid + 16];
}
if (blockDim.x >= 16) {
s_min[tid] = (s_min[tid] < s_min[tid + 8]) ? s_min[tid] : s_min[tid + 8];
}
if (blockDim.x >= 8) {
s_min[tid] = (s_min[tid] < s_min[tid + 4]) ? s_min[tid] : s_min[tid + 4];
}
if (blockDim.x >= 4) {
s_min[tid] = (s_min[tid] < s_min[tid + 2]) ? s_min[tid] : s_min[tid + 2];
}
if (blockDim.x >= 2) {
s_min[tid] = (s_min[tid] < s_min[tid + 1]) ? s_min[tid] : s_min[tid + 1];
}
}
__syncthreads();
if (tid == 0) {
min_J[blockIdx.x] = s_min[0];
}
}
}
/***********************
*
* PRECOMPUTING
*
***********************/
/* side length computer
*
* precomputes the length of each side.
* THREADS: num_sides
*/
__global__ void preval_side_length(double *s_length,
double *s_V1x, double *s_V1y,
double *s_V2x, double *s_V2y,
int num_sides) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < num_sides) {
// compute and store the length of the side
s_length[idx] = sqrtf(pow(s_V1x[idx] - s_V2x[idx],2) + pow(s_V1y[idx] - s_V2y[idx],2));
}
}
/* jacobian computing
*
* precomputes the jacobian determinant for each element.
* THREADS: num_elem
*/
__global__ void preval_jacobian(double *J,
double *V1x, double *V1y,
double *V2x, double *V2y,
double *V3x, double *V3y,
int num_elem) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < num_elem) {
double x1, y1, x2, y2, x3, y3;
// read vertex points
x1 = V1x[idx];
y1 = V1y[idx];
x2 = V2x[idx];
y2 = V2y[idx];
x3 = V3x[idx];
y3 = V3y[idx];
// calculate jacobian determinant
// x = x2 * r + x3 * s + x1 * (1 - r - s)
J[idx] = (x2 - x1) * (y3 - y1) - (x3 - x1) * (y2 - y1);
}
}
/* evaluate normal vectors
*
* computes the normal vectors for each element along each side.
* THREADS: num_sides
*
*/
__global__ void preval_normals(double *Nx, double *Ny,
double *s_V1x, double *s_V1y,
double *s_V2x, double *s_V2y,
double *V1x, double *V1y,
double *V2x, double *V2y,
double *V3x, double *V3y,
int *left_side_number, int num_sides) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < num_sides) {
double x, y, length;
double sv1x, sv1y, sv2x, sv2y;
sv1x = s_V1x[idx];
sv1y = s_V1y[idx];
sv2x = s_V2x[idx];
sv2y = s_V2y[idx];
// lengths of the vector components
x = sv2x - sv1x;
y = sv2y - sv1y;
// normalize
length = sqrtf(pow(x, 2) + pow(y, 2));
// store the result
Nx[idx] = -y / length;
Ny[idx] = x / length;
}
}
__global__ void preval_normals_direction(double *Nx, double *Ny,
double *V1x, double *V1y,
double *V2x, double *V2y,
double *V3x, double *V3y,
int *left_elem, int *left_side_number, int num_sides) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < num_sides) {
double new_x, new_y, dot;
double initial_x, initial_y, target_x, target_y;
double x, y;
int left_idx, side;
// get left side's vertices
left_idx = left_elem[idx];
side = left_side_number[idx];
// get the normal vector
x = Nx[idx];
y = Ny[idx];
// make it point the correct direction by learning the third vertex point
switch (side) {
case 0:
target_x = V3x[left_idx];
target_y = V3y[left_idx];
initial_x = (V1x[left_idx] + V2x[left_idx]) / 2.;
initial_y = (V1y[left_idx] + V2y[left_idx]) / 2.;
break;
case 1:
target_x = V1x[left_idx];
target_y = V1y[left_idx];
initial_x = (V2x[left_idx] + V3x[left_idx]) / 2.;
initial_y = (V2y[left_idx] + V3y[left_idx]) / 2.;
break;
case 2:
target_x = V2x[left_idx];
target_y = V2y[left_idx];
initial_x = (V1x[left_idx] + V3x[left_idx]) / 2.;
initial_y = (V1y[left_idx] + V3y[left_idx]) / 2.;
break;
}
// create the vector pointing towards the third vertex point
new_x = target_x - initial_x;
new_y = target_y - initial_y;
// find the dot product between the normal and new vectors
dot = x * new_x + y * new_y;
if (dot > 0) {
Nx[idx] *= -1;
Ny[idx] *= -1;
}
}
}
__global__ void preval_partials(double *V1x, double *V1y,
double *V2x, double *V2y,
double *V3x, double *V3y,
double *xr, double *yr,
double *xs, double *ys, int num_elem) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < num_elem) {
// evaulate the jacobians of the mappings for the chain rule
// x = x2 * r + x3 * s + x1 * (1 - r - s)
xr[idx] = V2x[idx] - V1x[idx];
yr[idx] = V2y[idx] - V1y[idx];
xs[idx] = V3x[idx] - V1x[idx];
ys[idx] = V3y[idx] - V1y[idx];
}
}
/***********************
*
* MAIN FUNCTIONS
*
***********************/
/* riemann evaluation
*
* device function to solve the riemann problem.
*/
__device__ double eval_riemann(double *c_left, double *c_right,
double v1x, double v1y,
double v2x, double v2y,
double v3x, double v3y,
int j, // j, as usual, is the index of the integration point
int left_side, int right_side,
int left_idx, int right_idx,
int n_p, int n_quad1d,
int num_sides, double t, int alpha) {
double u_left, u_right;
int i;
u_left = 0.;
u_right = 0.;
for (i = 0; i < n_p; i++) {
u_left += c_left[i] * basis_side[left_side * n_p * n_quad1d + i * n_quad1d + j];
}
// make all threads in the first warps be boundary sides
if (right_idx == -1) {
double r1_eval, r2_eval;
double x, y;
// we need the mapping back to the grid space
switch (left_side) {
case 0:
r1_eval = 0.5 + 0.5 * r_oned[j];
r2_eval = 0.;
break;
case 1:
r1_eval = (1. - r_oned[j]) / 2.;
r2_eval = (1. + r_oned[j]) / 2.;
break;
case 2:
r1_eval = 0.;
r2_eval = 0.5 + 0.5 * r_oned[n_quad1d - 1 - j];
break;
}
// x = x2 * r + x3 * s + x1 * (1 - r - s)
x = v2x * r1_eval + v3x * r2_eval + v1x * (1 - r1_eval - r2_eval);
y = v2y * r1_eval + v3y * r2_eval + v1y * (1 - r1_eval - r2_eval);
// deal with the boundary element here
u_right = boundary_exact(x, y, t, alpha);
} else {
// evaluate the right side at the integration point
for (i = 0; i < n_p; i++) {
u_right += c_right[i] * basis_side[right_side * n_p * n_quad1d + i * n_quad1d + n_quad1d - 1 - j];
}
}
return riemann(u_left, u_right);
}
/* surface integral evaluation
*
* evaluate all the riemann problems for each element.
* THREADS: num_sides
*/
__device__ void eval_surface(double *c_left, double *c_right,
double *left_riemann_rhs, double *right_riemann_rhs,
double len,
double v1x, double v1y,
double v2x, double v2y,
double v3x, double v3y,
int left_idx, int right_idx,
int left_side, int right_side,
double nx, double ny,
int n_quad1d, int n_p, int num_sides,
int num_elem, double t, int idx, int alpha) {
int i, j;
double s, left_sum, right_sum;
// multiply across by the i'th basis function
for (i = 0; i < n_p; i++) {
left_sum = 0.;
right_sum = 0.;
// we're at the j'th integration point
for (j = 0; j < n_quad1d; j++) {
// solve the Riemann problem at this integration point
s = eval_riemann(c_left, c_right,
v1x, v1y, v2x, v2y, v3x, v3y,
j, left_side, right_side, left_idx, right_idx,
n_p, n_quad1d, num_sides, t, alpha);
// calculate the quadrature over [-1,1] for these sides
left_sum += (nx * flux_x(s) + ny * flux_y(s)) * w_oned[j] *
basis_side[left_side * n_p * n_quad1d + i * n_quad1d + j];
right_sum += (nx * flux_x(s) + ny * flux_y(s)) * w_oned[j] *
basis_side[right_side * n_p * n_quad1d + i * n_quad1d + n_quad1d - 1 - j];
}
// store this side's contribution in the riemann rhs vectors
left_riemann_rhs[i * num_sides + idx] = -len / 2 * left_sum;
right_riemann_rhs[i * num_sides + idx] = len / 2 * right_sum;
}
}
/* flux boundary evaluation
*
* evaulates the flux at the boundaries by handling them somehow.
* THREADS: num_boundary
*/
/* volume integrals
*
* evaluates and adds the volume integral to the rhs vector
* THREADS: num_elem
*/
__device__ void eval_volume(double *r_c, double *quad_rhs,
double x_r, double y_r,
double x_s, double y_s,
int n_quad, int n_p, int num_elem, int idx) {
int i, j, k;
double sum, u;
// evaluate the volume integral for each coefficient
for (i = 0; i < n_p; i++) {
sum = 0.;
for (j = 0; j < n_quad; j++) {
// Evaluate u at the integration point.
u = 0.;
for (k = 0; k < n_p; k++) {
u += r_c[k] * basis[n_quad * k + j];
}
// Add to the sum
// [fx fy] * [y_s, -y_r; -x_s, x_r] * [phi_x phi_y]
sum += ( flux_x(u) * ( basis_grad_x[n_quad * i + j] * y_s
-basis_grad_y[n_quad * i + j] * y_r)
+ flux_y(u) * (-basis_grad_x[n_quad * i + j] * x_s
+ basis_grad_y[n_quad * i + j] * x_r));
}
// store the result
quad_rhs[i * num_elem + idx] = sum;
}
}
/* evaluate error
*
* evaluates u at the three vertex points for output
* THREADS: num_elem
*/
__device__ void eval_error(double *c,
double v1x, double v1y,
double v2x, double v2y,
double v3x, double v3y,
double *Uv1, double *Uv2, double *Uv3,
int num_elem, int n_p, double t, int idx, int alpha) {
int i;
double uv1, uv2, uv3;
// calculate values at three vertex points
uv1 = 0.;
uv2 = 0.;
uv3 = 0.;
for (i = 0; i < n_p; i++) {
uv1 += c[i] * basis_vertex[i * 3 + 0];
uv2 += c[i] * basis_vertex[i * 3 + 1];
uv3 += c[i] * basis_vertex[i * 3 + 2];
}
// store result
Uv1[idx] = uv1 - uexact(v1x, v1y, t, alpha);
Uv2[idx] = uv2 - uexact(v2x, v2y, t, alpha);
Uv3[idx] = uv3 - uexact(v3x, v3y, t, alpha);
}
/* evaluate u
*
* evaluates u at the three vertex points for output
* THREADS: num_elem
*/
__device__ void eval_u(double *c,
double *Uv1, double *Uv2, double *Uv3,
int num_elem, int n_p, int idx) {
int i;
double uv1, uv2, uv3;
// calculate values at the integration points
uv1 = 0.;
uv2 = 0.;
uv3 = 0.;
for (i = 0; i < n_p; i++) {
uv1 += c[i] * basis_vertex[i * 3 + 0];
uv2 += c[i] * basis_vertex[i * 3 + 1];
uv3 += c[i] * basis_vertex[i * 3 + 2];
}
// store result
Uv1[idx] = uv1;
Uv2[idx] = uv2;
Uv3[idx] = uv3;
}
|
2,943 | //#include "xfasttrie-k-parallel.cuh"
//#include "Catch2/catch.hpp"
//#include "cuda/api_wrappers.h"
//
//#include "allocators/default_allocator.cuh"
//#include <cassert>
//#include <cooperative_groups.h>
//
//using XTrie = XFastTrieKParallel<unsigned char, int>;
//using XTrieKey = typename XTrie::key_type;
//using XTrie3 = XFastTrieKParallel<unsigned char, int, 3>;
//using XTrie3Key = typename XTrie3::key_type;
//using BigXTrie = XFastTrieKParallel<int, int>;
//using BigXTrieKey = typename BigXTrie::key_type;
//
//__global__ void XFastTrieKParallel_initialize_allocator_small(gpu::default_allocator* allocator, char* memory, int memory_size, XTrie3* xtrie)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
// if (block.thread_rank() == 0)
// new (allocator) gpu::default_allocator(memory, memory_size);
// block.sync();
// new (xtrie) XTrie3(block, *allocator);
//}
//
//__global__ void XFastTrieKParallel_initialize_allocator(gpu::default_allocator* allocator, char* memory, int memory_size, XTrie* xtrie)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
// if (block.thread_rank() == 0)
// new (allocator) gpu::default_allocator(memory, memory_size);
// block.sync();
// new (xtrie) XTrie(block, *allocator);
//}
//
//__global__ void XFastTrieKParallel_initialize_allocator_big(gpu::default_allocator* allocator, char* memory, int memory_size, BigXTrie* xtrie)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
// if (block.thread_rank() == 0)
// new (allocator) gpu::default_allocator(memory, memory_size);
// block.sync();
// new (xtrie) BigXTrie(block, *allocator);
//}
//
//template <typename Key, typename Value, std::size_t Universe>
//__device__ void XFastTrieKParallel_ensure_value(const XFastTrieKParallel<Key, Value, Universe>& trie, typename XFastTrieKParallel<Key, Value, Universe>::iterator it, int expected_value)
//{
// assert(it != trie.end());
// assert(it->second == expected_value);
//}
//
//__global__ void XFastTrieKParallel_test_insert_find_2(XTrie* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// XTrie& trie = *triePtr;
// auto convert = [](int value) -> XTrieKey { return value; };
// trie.insert(block, convert(3), 3);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(3)), 3);
// trie.insert(block, convert(1), 1);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(1)), 1);
// trie.insert(block, convert(6), 6);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(6)), 6);
// trie.insert(block, convert(5), 5);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(5)), 5);
//}
//
//__global__ void XFastTrieKParallel_test_insert_find(XTrie* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// XTrie& trie = *triePtr;
// auto convert = [](int value) -> XTrieKey { return value; };
// trie.insert(block, convert(255), 255);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(255)), 255);
// trie.insert(block, convert(13), 13);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(13)), 13);
// trie.insert(block, convert(251), 251);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(251)), 251);
// trie.insert(block, convert(15), 15);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(15)), 15);
//}
//
//__global__ void XFastTrieKParallel_test_insert_find_small(XTrie3* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// XTrie3& trie = *triePtr;
// auto convert = [](int value) -> XTrie3Key { return value; };
// trie.insert(block, convert(3), 3);
// if (block.thread_rank() == 0)
// trie.debug();
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(3)), 3);
// trie.insert(block, convert(1), 1);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(1)), 1);
// trie.insert(block, convert(6), 6);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(6)), 6);
// printf("==");
// trie.insert(block, convert(5), 5);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(5)), 5);
// trie.insert(block, convert(7), 7);
// auto it = trie.find(block, convert(7));
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(7)), 7);
// trie.insert(block, convert(4), 4);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(4)), 4);
// trie.insert(block, convert(0), 0);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(0)), 0);
//}
//
//__global__ void XFastTrieKParallel_test_insert_find_small_2(XTrie3* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// XTrie3& trie = *triePtr;
// auto convert = [](int value) -> XTrie3Key { return value; };
// trie.insert(block, convert(7), 7);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(7)), 7);
// trie.insert(block, convert(0), 0);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(0)), 0);
// trie.insert(block, convert(3), 3);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(3)), 3);
// trie.insert(block, convert(5), 5);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(5)), 5);
// trie.insert(block, convert(7), 7);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(7)), 7);
// trie.insert(block, convert(4), 4);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(4)), 4);
// trie.insert(block, convert(0), 0);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(0)), 0);
//}
//
//__global__ void XFastTrieKParallel_test_insert_find_small_increasing_order(XTrie3* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// XTrie3& trie = *triePtr;
// auto convert = [](int value) -> XTrie3Key { return value; };
// for (int i = 0; i != trie.size(); ++i)
// {
// trie.insert(block, convert(i), i);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(i)), i);
// }
//}
//
//__global__ void XFastTrieKParallel_test_insert_find_small_decreasing_order(XTrie3* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// XTrie3& trie = *triePtr;
// auto convert = [](int value) -> XTrie3Key { return value; };
// for (int i = trie.size() - 1; i != 0; --i)
// {
// trie.insert(block, convert(i), i);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(i)), i);
// }
//}
//
//__global__ void XFastTrieKParallel_test_predecessor_successor_small(XTrie3* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// XTrie3& trie = *triePtr;
// auto convert = [](int value) -> XTrie3Key { return value; };
// assert(trie.predecessor(block, convert(4)) == trie.end());
// assert(trie.successor(block, convert(4)) == trie.end());
//
// trie.insert(block, convert(2), 2);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(3)), 2);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(4)), 2);
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(1)), 2);
// assert(trie.predecessor(block, convert(1)) == trie.end());
// assert(trie.successor(block, convert(3)) == trie.end());
//
// trie.insert(block, convert(3), 3);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(3)), 3);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(4)), 3);
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(2)), 2);
//}
//
//__global__ void XFastTrieKParallel_test_insert_find_big(BigXTrie* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// BigXTrie& trie = *triePtr;
// auto convert = [](int value) -> BigXTrieKey { return value; };
// trie.insert(block, convert(3), 3);
// /*if (block.thread_rank() == 0)
// trie.debug();*/
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(3)), 3);
// trie.insert(block, convert(1), 1);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(1)), 1);
// trie.insert(block, convert(6), 6);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(6)), 6);
// trie.insert(block, convert(5), 5);
// XFastTrieKParallel_ensure_value(trie, trie.find(block, convert(5)), 5);
//}
//
//__global__ void XFastTrieKParallel_test_predecessor_successor(XTrie* triePtr)
//{
// cooperative_groups::thread_block block = cooperative_groups::this_thread_block();
//
// XTrie& trie = *triePtr;
// auto convert = [](int value) -> XTrieKey { return value; };
// assert(trie.predecessor(block, convert(128)) == trie.end());
// assert(trie.successor(block, convert(128)) == trie.end());
//
// trie.insert(block, convert(2), 2);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(128)), 2);
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(1)), 2);
// assert(trie.predecessor(block, convert(1)) == trie.end());
// assert(trie.successor(block, convert(3)) == trie.end());
//
// trie.insert(block, convert(13), 13);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(128)), 13);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(13)), 13);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(12)), 2);
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(3)), 13);
// assert(trie.successor(block, convert(128)) == trie.end());
//
// trie.insert(block, convert(251), 251);
// if (block.thread_rank() == 0)
// trie.debug();
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(128)), 13);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(253)), 251);
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(1)), 2);
// assert(trie.predecessor(block, convert(1)) == trie.end());
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(3)), 13);
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(128)), 251);
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(248)), 251);
// assert(trie.successor(block, convert(252)) == trie.end());
//
// trie.insert(block, convert(190), 190);
// if (block.thread_rank() == 0)
// trie.debug();
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(189)), 13);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(190)), 190);
// XFastTrieKParallel_ensure_value(trie, trie.predecessor(block, convert(250)), 190);
// XFastTrieKParallel_ensure_value(trie, trie.successor(block, convert(191)), 251);
//}
//
//SCENARIO("X-FAST-TRIE-K-PARALLEL", "[XFASTTRIE][KPARALLEL]")
//{
// int memory_size_allocated = 4 * 1024 * 1024;
// auto current_device = cuda::device::current::get();
// auto d_memory = cuda::memory::device::make_unique<char[]>(current_device, memory_size_allocated);
// auto d_allocator = cuda::memory::device::make_unique<gpu::default_allocator>(current_device);
// unsigned int number_of_warps = 2u;
//
// GIVEN("A X-fast trie for 2^3")
// {
// auto d_xtrie3 = cuda::memory::device::make_unique<XTrie3>(current_device);
// cuda::launch(
// XFastTrieKParallel_initialize_allocator_small,
// { 1u, 1u },
// d_allocator.get(), d_memory.get(), memory_size_allocated, d_xtrie3.get()
// );
//
// WHEN("We add different values")
// {
// THEN("We should be able to retrieve them")
// {
// cuda::launch(
// XFastTrieKParallel_test_insert_find_small,
// { 1u, number_of_warps * 32u },
// d_xtrie3.get()
// );
// }
// }
//
//
// WHEN("We try again")
// {
// THEN("We should be able to retrieve them")
// {
// cuda::launch(
// XFastTrieKParallel_test_insert_find_small_2,
// { 1u, number_of_warps * 32u },
// d_xtrie3.get()
// );
// }
// }
//
// WHEN("We try again in increasing order")
// {
// THEN("We should be able to retrieve them")
// {
// cuda::launch(
// XFastTrieKParallel_test_insert_find_small_increasing_order,
// { 1u, number_of_warps * 32u },
// d_xtrie3.get()
// );
// }
// }
//
// WHEN("We try again in decreasing order")
// {
// THEN("We should be able to retrieve them")
// {
// cuda::launch(
// XFastTrieKParallel_test_insert_find_small_decreasing_order,
// { 1u, number_of_warps * 32u },
// d_xtrie3.get()
// );
// }
// }
//
// WHEN("We add different values")
// {
// THEN("Predecessor and successor should be conformed")
// {
// cuda::launch(
// XFastTrieKParallel_test_predecessor_successor_small,
// { 1u, number_of_warps * 32u },
// d_xtrie3.get()
// );
// }
// }
// }
//
// GIVEN("A X-fast trie for 2^8")
// {
// auto d_xtrie = cuda::memory::device::make_unique<XTrie>(current_device);
// cuda::launch(
// XFastTrieKParallel_initialize_allocator,
// { 1u, number_of_warps * 32u },
// d_allocator.get(), d_memory.get(), memory_size_allocated, d_xtrie.get()
// );
//
// WHEN("We add different values")
// {
// THEN("We should be able to retrieve them")
// {
// cuda::launch(
// XFastTrieKParallel_test_insert_find,
// { 1u, number_of_warps * 32u },
// d_xtrie.get()
// );
// }
// }
//
// WHEN("We add different values")
// {
// THEN("We should be able to retrieve them")
// {
// cuda::launch(
// XFastTrieKParallel_test_insert_find_2,
// { 1u, number_of_warps * 32u },
// d_xtrie.get()
// );
// }
// }
//
// WHEN("We add different values")
// {
// THEN("Predecessor and successor should be conformed")
// {
// cuda::launch(
// XFastTrieKParallel_test_predecessor_successor,
// { 1u, number_of_warps * 32u },
// d_xtrie.get()
// );
// }
// }
// }
//
// GIVEN("A X-fast trie for 2^32")
// {
// auto d_xtrie = cuda::memory::device::make_unique<BigXTrie>(current_device);
// cuda::launch(
// XFastTrieKParallel_initialize_allocator_big,
// { 1u, number_of_warps * 32u },
// d_allocator.get(), d_memory.get(), memory_size_allocated, d_xtrie.get()
// );
//
// WHEN("We add different values")
// {
// THEN("We should be able to retrieve them")
// {
// cuda::launch(
// XFastTrieKParallel_test_insert_find_big,
// { 1u, number_of_warps * 32u },
// d_xtrie.get()
// );
// }
// }
// }
//}
|
2,944 | #include <stdio.h>
__global__ void multi_thread(void)
{
printf("Hello, world from the device!\n");
}
int main(void)
{
// greet from the host
printf("Hello, world from the host!\n");
// launch a kernel with a single thread to greet from the device
multi_thread<<<1,1>>>();
cudaDeviceSynchronize();
return 0;
} |
2,945 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main()
{
int devcount;
cudaGetDeviceCount(&devcount);
printf("%i device(s) found...", devcount);
return 0;
}
|
2,946 | ////////////////////////////////////////////////////////////////////////////////
//
// FILE: one_dim_convolution.cu
// DESCRIPTION: implements one-dimensional convolution in 3 ways
// AUTHOR: Dan Fabian
// DATE: 3/15/2020
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
#include <stdio.h>
using std::cout; using std::endl; using std::cin;
// constant size params
const int SIZE = 15000, MASK_SIZE = 1001, BLOCK_SIZE = 1024;
// gpu prototypes
__global__ void naive_conv(float *input, float *kernel, float *output,
int maskSize, int size); // naive implementation
__global__ void const_mem_conv(float *input, float *output,
int maskSize, int size); // uses constant memory
__global__ void shared_mem_conv(float *input, float *output,
int maskSize, int size); // uses shared and constant mem
// device constant used in const_mem_conv and shared_mem_conv
__constant__ float kernel_c[MASK_SIZE];
////////////////////////////////////////////////////////////////////////////////
//
// MAIN
int main()
{
// create arrays
float *input_d, input[SIZE], *kernel_d, kernel[MASK_SIZE], *output_d, output[SIZE];
// init kernel
for (int i = 0; i < MASK_SIZE; ++i)
if (i == MASK_SIZE / 2)
kernel[i] = 1;
else
kernel[i] = 0;
// init input array
for (int i = 0; i < SIZE; ++i)
input[i] = 1;
// memory sizes to allocate
int inOutMem = SIZE * sizeof(float), kernelMem = MASK_SIZE * sizeof(float);
// determine which version will be ran before allocating mem
cout << "Which version ('n': naive, 'c': constant mem, 's': shared mem): ";
char c; cin >> c;
// allocate memory on device
cudaMalloc((void**)&input_d, inOutMem);
if (c == 'n')
cudaMalloc((void**)&kernel_d, kernelMem);
cudaMalloc((void**)&output_d, inOutMem);
// copy frequency and sorted arrays to device
cudaMemcpy(input_d, input, inOutMem, cudaMemcpyHostToDevice);
if (c == 'n')
cudaMemcpy(kernel_d, kernel, kernelMem, cudaMemcpyHostToDevice);
// copy kernel to constant memory
if (c != 'n')
cudaMemcpyToSymbol(kernel_c, kernel, kernelMem);
// call gpu func
int gridSize = ceil(float(SIZE) / float(BLOCK_SIZE));
if (c == 'n')
naive_conv<<<gridSize, BLOCK_SIZE>>>(input_d, kernel_d, output_d, MASK_SIZE, SIZE);
else if (c == 'c')
const_mem_conv<<<gridSize, BLOCK_SIZE>>>(input_d, output_d, MASK_SIZE, SIZE);
else if (c == 's')
shared_mem_conv<<<gridSize, BLOCK_SIZE>>>(input_d, output_d, MASK_SIZE, SIZE);
// copy device memory back to host
cudaMemcpy(output, output_d, inOutMem, cudaMemcpyDeviceToHost);
// free memory
cudaFree(input_d); cudaFree(kernel_d); cudaFree(output_d);
// print result
for (int i = 0; i < SIZE; ++i)
cout << output[i] << ' ';
cout << endl;
cudaDeviceSynchronize();
}
////////////////////////////////////////////////////////////////////////////////
//
// KERNEL functions
////////////////////////////////////////
// naive implementation
__global__ void naive_conv(float *input, float *kernel, float *output,
int maskSize, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x,
start = idx - (maskSize / 2);
float val = 0;
for (int i = 0; i < maskSize; ++i)
if (0 <= start + i && start + i < size)
val += input[start + i] * kernel[i];
// copy to global mem
if (idx < size)
output[idx] = val;
}
////////////////////////////////////////
// faster implementation that uses constant memory
__global__ void const_mem_conv(float *input, float *output,
int maskSize, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x,
start = idx - (maskSize / 2);
float val = 0;
for (int i = 0; i < maskSize; ++i)
if (0 <= start + i && start + i < size)
val += input[start + i] * kernel_c[i];
// copy to global mem
if (idx < size)
output[idx] = val;
}
////////////////////////////////////////
// implementation using shared and constant memory
__global__ void shared_mem_conv(float *input, float *output,
int maskSize, int size)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x,
lowerBound = blockDim.x * blockIdx.x,
upperBound = blockDim.x * (blockIdx.x + 1),
start = idx - (maskSize / 2);
// shared mem
__shared__ float tile_s[BLOCK_SIZE];
// load elem
if (idx < size)
tile_s[threadIdx.x] = input[idx];
__syncthreads();
float val = 0;
bool bound;
for (int i = 0, elem = start; i < maskSize; ++i, ++elem)
{
// check if its within array
bound = 0 <= elem && elem < size;
// if its within bounds, use shared mem
if (lowerBound <= elem && elem < upperBound && bound)
val += tile_s[elem - lowerBound] * kernel_c[i];
// resort to global mem if needed
else if (bound)
val += input[elem] * kernel_c[i];
}
// copy to global mem
if (idx < size)
output[idx] = val;
} |
2,947 | /******************************
* Tisma Miroslav 2006/0395
* Multiprocesorski sistemi
* domaci zadatak 6 - 1. zadatak
*******************************/
/**
* 1. Sastaviti program koji kvadrira elemente dvodimenzionalne matrice.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define NUM_OF_GPU_THREADS 256
__global__ void matrixSquare(int *matrix) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
matrix[idx] = matrix[idx] * matrix[idx];
}
int main(int argc, char *argv[]) {
int i, j, m, n;
int *h_matrix, *d_matrix;
FILE *out;
printf("Matrice ce biti upisane u fajl dz6_1_izl1.in\n");
printf("Unesite broj vrsta matrice:\n");
scanf("%d", &m);
printf("Unesite broj kolona matrice:\n");
scanf("%d", &n);
out = fopen("dz6_1_izl1.in", "w");
if (out == NULL) {
printf("Greska pri otvaranju fajla!");
exit(EXIT_FAILURE);
}
fprintf(out, "ORIGINALNA MATRICA:\n");
h_matrix = (int*) malloc(m*n * sizeof(int));
for (i = 0; i < m*n; i++) {
if (i % m == 0)
fprintf(out, "\n");
h_matrix[i] = -100 + rand() % 200;
fprintf(out, "%4d ", h_matrix[i]);
}
int dimGrid(m*n / NUM_OF_GPU_THREADS + 1);
int dimBlock(NUM_OF_GPU_THREADS);
int size = (m*n * sizeof(int) / (dimGrid * dimBlock) + 1) * (dimGrid * dimBlock);
cudaMalloc((void**)&d_matrix, size);
cudaMemcpy(d_matrix, h_matrix, m*n * sizeof(int), cudaMemcpyHostToDevice);
matrixSquare<<<dimGrid, dimBlock>>>(d_matrix);
cudaThreadSynchronize();
cudaMemcpy(h_matrix, d_matrix, m*n * sizeof(int), cudaMemcpyDeviceToHost);
fprintf(out, "\n\nKVADRIRANA MATRICA\n\n");
for (i = 0; i < m; i++) {
for (j = 0; j < n; j++)
fprintf(out, "%5d ", h_matrix[i * m + j]);
fprintf(out, "\n");
}
fclose(out);
cudaFree(d_matrix);
return EXIT_SUCCESS;
} |
2,948 | #include <random>
#include <cmath>
#include <iostream>
#include <stdio.h>
#include <assert.h>
#include "cuda_runtime.h"
__global__ void bankConflictTest(float* fake_result) {
__shared__ float sm[128];
int i = threadIdx.x / 4;
fake_result[i] = sm[i];
}
int main(int argc, char *argv[]) {
float *fake_result_d;
cudaMalloc((void**)&fake_result_d, sizeof(float) * 128);
dim3 block(128);
dim3 grid(1);
bankConflictTest<<<grid, block>>>(fake_result_d);
cudaFree(fake_result_d);
} |
2,949 | /**
* Yuri Gorokhov
* lab 5 - Modulus power of two
*/
#include <stdio.h>
#include <cuda.h>
#include <math.h>
#define ITERATIONS 100000
#define THREADS 32
#define POW 30
__global__ void kernel_mod(int);
int main (void) {
cudaEvent_t start, stop;
int input[POW];
float output[POW];
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int i = 0; i < POW; i++) {
input[i] = pow(2,i);
cudaEventRecord(start,0);
kernel_mod<<<1,THREADS>>>(pow(2,i));
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&output[i], start, stop);
}
printf("[");
for(int i = 0; i < POW; i++) {
printf("%i, ", input[i]);
}
printf("\n[");
for(int i = 0; i < POW; i++) {
printf("%f, ", output[i]);
}
return 0;
}
__global__ void kernel_mod(int mod) {
__shared__ float A[THREADS];
int temp;
int target = threadIdx.x % mod;
for(int i = 1; i <= ITERATIONS; i++) {
temp = A[target];
}
__syncthreads();
}
|
2,950 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADSPERBLOCK 4
int checkArray(int [], int [], int);
// CUDA example: finds row sums of an integer matrix m
// find1elt() finds the rowsum of one row of the nxn matrix m, storing the
// result in the corresponding position in the rowsum array rs; matrix
// stored as 1-dimensional, row-major order
__global__ void find1elt(int *m, int *rs, int n);
int main(int argc, char **argv)
{
/* variables for timing */
cudaEvent_t start, stop;
float time;
if (argc != 3) {
printf("Usage: ./SR [width of matrix] [threads per block]\n");
exit(0);
}
int n = atoi(argv[1]); // number of matrix rows/cols
int *hm, // host matrix
*dm, // device matrix
*hrs, // host rowsums
*drs; // device rowsums
int *checkRs;
int msize = n * n * sizeof(int); // size of matrix in bytes
int rssize = n * sizeof(int);
int threadsPerBlock = atoi(argv[2]); // get threads per block
if (n % threadsPerBlock != 0) {
printf("Warning: width of matrix not divisible by # threads per block\n");
}
// allocate space for host matrix
hm = (int *) malloc(msize);
// create timer events
cudaEventCreate(&start);
cudaEventCreate(&stop);
// as a test, fill matrix with consecutive integers
int t = 0,i,j;
for (i = 0; i < n; i++) {
for (j = 0; j < n; j++) {
hm[i*n+j] = t++;
}
}
// compute array of sums on CPU for checking
checkRs = (int *) malloc(rssize);
for (i=0; i<n; i++) {
checkRs[i] = 0;
for (j=0; j<n; j++) {
checkRs[i] += hm[i*n+j];
}
}
// allocate space for device matrix
cudaMalloc((void **)&dm,msize);
// copy host matrix to device matrix
cudaMemcpy(dm,hm,msize,cudaMemcpyHostToDevice);
// allocate host, device rowsum arrays
hrs = (int *) malloc(rssize);
cudaMalloc((void **)&drs,rssize);
// record start timestamp
cudaEventRecord(start, 0);
// invoke the kernel
find1elt<<<n/threadsPerBlock,threadsPerBlock>>>(dm,drs,n);
// wait for kernel to finish
cudaThreadSynchronize();
// copy row vector from device to host
cudaMemcpy(hrs,drs,rssize,cudaMemcpyDeviceToHost);
// get elapsed time
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Elapsed time = %f\n", time);
// check results
int diff = checkArray(hrs, checkRs, n);
if (diff == 0) {
printf("Arrays match\n");
}
else {
printf("Arrays do not match\n");
}
// clean up
free(hm);
cudaFree(dm);
free(hrs);
cudaFree(drs);
}
int checkArray(int x[], int y[], int size) {
int i;
int numDiff = 0;
for (i=0; i<size; i++) {
if (x[i] != y[i]) {
numDiff++;
}
}
return numDiff;
}
__global__ void find1elt(int *m, int *rs, int n)
{
// this thread will handle row # rownum
int rownum = blockDim.x * blockIdx.x + threadIdx.x;
int sum = 0;
for (int k = 0; k < n; k++)
sum += m [rownum*n+k];
rs[rownum] = sum;
}
|
2,951 | #include "includes.h"
__device__ float rowcol_dot(float *matrix_a, float *matrix_b, int row, int col, int N)
{
float val = 0;
for (int k=0; k < N; k++)
{
val += matrix_a[ row*N + k ] * matrix_b[ col + k*N];
}
return(val);
}
__global__ void matrix_mult_ker(float * matrix_a, float * matrix_b, float * output_matrix, int N)
{
int row = blockIdx.x*blockDim.x + threadIdx.x;
int col = blockIdx.y*blockDim.y + threadIdx.y;
output_matrix[col + row*N] = rowcol_dot(matrix_a, matrix_b, row, col, N);
} |
2,952 | //THRUST
#include <thrust/tuple.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
//STL
#include <iostream>
#include <vector>
int N = 10;
thrust::tuple< int, const char * > tString( N, "thrust" );
int main( void )
{
std::cout << "The 1st value of tString is " << thrust::get< 0 >( tString ) << std::endl;
std::vector < float > vecStd( N, 0.1f );
thrust::host_vector< float > h_vec( N );
h_vec = vecStd;
std::cout << "h_vec from vecStd :" << std::endl;
thrust::copy( h_vec.begin(), h_vec.end(), std::ostream_iterator< float >( std::cout, "\n" ) );
thrust::device_vector< float > d_vec( N );
d_vec = vecStd;
std::cout << "d_vec from vecStd :" << std::endl;
thrust::copy( d_vec.begin(), d_vec.end(), std::ostream_iterator< float >( std::cout, "\n" ) );
return 0;
}
|
2,953 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37) {
for (int i=0; i < var_1; ++i) {
comp = var_3 / (var_4 - (-1.6661E-42f + +0.0f));
float tmp_1 = var_5 - var_6;
float tmp_2 = +1.1214E-42f;
comp = tmp_2 / tmp_1 - +0.0f / sqrtf(var_7 * (-0.0f - (+1.2595E-41f - atan2f(var_8 / (var_9 + ldexpf(ldexpf(-0.0f, 2), 2)), ceilf(var_10 + var_11 - var_12 + var_13)))));
if (comp == (+1.8695E4f / var_14)) {
float tmp_3 = log10f(var_15 * (+1.5900E35f / (-1.4083E-37f - powf((var_16 - +1.0132E11f * var_17 * ldexpf(acosf((-1.5411E36f * atan2f(-1.5994E-35f / var_18 / var_19 - +1.5974E-44f - (var_20 / var_21), sqrtf(expf((-1.0387E-13f / (+1.1526E-36f / (+1.3835E-35f * (+0.0f + ceilf((var_22 / (+1.1241E20f + var_23 - var_24)))))))))))), 2)), (var_25 * (var_26 / +1.8262E-42f - -1.2852E8f * var_27))))));
float tmp_4 = +1.0357E22f;
comp += tmp_4 + tmp_3 + (+1.3414E12f * (+1.2422E29f * var_28 + var_29));
comp += (-1.5770E-35f * var_30);
}
for (int i=0; i < var_2; ++i) {
float tmp_5 = (var_31 - (var_32 - var_33 / expf(-1.3282E-35f * -1.3221E-36f)));
comp = tmp_5 + (var_34 / -1.7788E-44f + (+1.3019E35f + -0.0f - var_35 + var_36));
comp += -1.3912E35f + var_37 * -1.1422E17f;
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38);
cudaDeviceSynchronize();
return 0;
}
|
2,954 | #include <stdio.h>
#include <unistd.h>
struct IntSandwich {
int beginning;
int middle[1];
int end;
};
__global__ void access_offset_kernel(struct IntSandwich *hostMem, int offset) {
#ifdef R
volatile int i = hostMem->middle[offset];
#elif W
hostMem->middle[offset] = 42;
#endif
}
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
int offset = 0;
int c;
while ((c = getopt(argc, argv, "o:")) != -1) {
switch(c) {
case 'o':
offset = atoi(optarg);
break;
default:
fprintf(stderr, "Usage: %s -o <offset>\n", argv[0]);
abort();
}
}
struct IntSandwich *hostMem;
cudaMalloc((void**)&hostMem, sizeof(struct IntSandwich));
access_offset_kernel<<<1,1>>>(hostMem, offset);
cudaFree(hostMem);
cudaDeviceReset();
return 0;
}
|
2,955 | #include <iostream>
#include <algorithm>
#include <ctime>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <cuda.h>
using namespace std;
__global__ void binomial_kernel(double *S, double *kvpq, double *prices, int size, int nsteps, bool am_b, bool put_b);
double *binomial_model_gpu(double *S0, double K, double T, double t, double qd, double r, double sigma, int nsteps, bool am_b, bool put_b, int size);
double binomial_model_cpu(double S0, double K, double T, double t, double qd, double r, double sigma, int nsteps, bool am_b, bool put_b);
void fill_array(double *a, int len, double low, double high);
int ss = 1000;
int main(int argc, char **argv)
{
double *S = new double[ss];
double K = 60;
double T = 200.0 / 365.0;
double t = 0;
double qd = 0.02;
double r = 0.03;
double sigma = 0.2;
int nsteps = ss;
bool am_b = true;
bool put_b = false;
fill_array(S, ss, 0, 150);
clock_t start = clock();
for (int i = 0; i < ss; i++)
binomial_model_cpu(S[i], K, T, t, qd, r, sigma, nsteps, am_b, put_b);
clock_t end = clock();
cout << "The CPU takes " << (end - start) << " milliseconds to run " << ss << " binomial models with " << ss << " steps each" << endl;
start = clock();
binomial_model_gpu(S, K, T, t, qd, r, sigma, nsteps, am_b, put_b, 1000);
end = clock();
cout << "The GPU takes " << (end - start) << " milliseconds to run " << ss << " binomial models with " << ss << " steps each" << endl;
return 0;
}
__global__ void binomial_kernel(double *S, double *kvpq, double *prices, int size, int nsteps, bool am_b, bool put_b)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= size) return;
double *SF = NULL;
while (!SF)
SF = new double[nsteps + 1];
// initialize option value array with values at expiry
for (int i = 0; i <= nsteps; i++)
{
double Si = S[id] * exp(kvpq[1] * (2.0f * i - nsteps));
if (!put_b)
SF[i] = (Si - kvpq[0]) > 0 ? (Si - kvpq[0]) : 0;
else
SF[i] = (kvpq[0] - Si) > 0 ? (kvpq[0] - Si) : 0;
}
// for each time step
for (int j = nsteps; j > 0; j--)
{
// for each node at the current time step
for (int i = 0; i < j; i++)
{
if (am_b)
{
double cv = kvpq[3] * SF[i] + kvpq[2] * SF[i + 1], sv = 0.0;
if (!put_b)
sv = (SF[i] * exp(kvpq[1]) - kvpq[0]) > 0 ? (SF[i] * exp(kvpq[1]) - kvpq[0]) : 0;
else
sv = (kvpq[0] - SF[i] * exp(kvpq[1])) > 0 ? (kvpq[0] - SF[i] * exp(kvpq[1])) : 0;
SF[i] = max(cv, sv);
}
else
// compute the discounted expected value of the option
SF[i] = kvpq[3] * SF[i] + kvpq[2] * SF[i + 1];
}
}
delete SF;
prices[id] = SF[0];
}
void fill_array(double *a, int len, double low, double high)
{
double step = (high - low) / (len - 1);
for (int i = 0; i < len; i++)
a[i] = low + step * i;
}
double *binomial_model_gpu(double *S0, double K, double T, double t, double qd, double r, double sigma, int nsteps, bool am_b, bool put_b, int size)
{
double *s_values, *kvpq, *prices, kvpq_cpu[4], *prices_cpu = new double[size];
double dt = T / nsteps;
double vdt = sigma * sqrt(dt);
double pu = (exp((r - qd) * dt) - exp(-vdt)) / (exp(vdt) - exp(-vdt));
double p = pu * exp(-r * dt);
double q = (1.0 - pu) * exp(-r * dt);
kvpq_cpu[0] = K;
kvpq_cpu[1] = vdt;
kvpq_cpu[2] = p;
kvpq_cpu[3] = q;
// allocate and copy memory to the gpu
if (cudaMalloc(&s_values, sizeof(double) * size) != cudaSuccess)
{
cout << "[+] Unable to allocate GPU memory" << endl;
return NULL;
}
if (cudaMalloc(&kvpq, sizeof(double) * 4) != cudaSuccess)
{
cout << "[+] Unable to allocate GPU memory" << endl;
return NULL;
}
if (cudaMalloc(&prices, sizeof(double) * size) != cudaSuccess)
{
cout << "[+] Unable to allocate GPU memory" << endl;
return NULL;
}
if (cudaMemcpy(s_values, S0, sizeof(double) * size, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout << "[+] Error in moving memory to the GPU" << endl;
return NULL;
}
if (cudaMemcpy(kvpq, kvpq_cpu, sizeof(double) * 4, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout << "[+] Error in moving memory to the GPU" << endl;
return NULL;
}
binomial_kernel<<< (size / 128) + 1, 128 >>>(s_values, kvpq, prices, size, nsteps, am_b, put_b);
cudaMemcpy(prices_cpu, prices, sizeof(double)*size, cudaMemcpyDeviceToHost);
return prices_cpu;
}
double binomial_model_cpu(double S0, double K, double T, double t, double qd, double r, double sigma, int nsteps, bool am_b, bool put_b)
{
double *S = new double[nsteps + 1];
double dt = T / nsteps;
double u = exp(sigma * sqrt(dt));
double d = 1 / u;
double disc = exp(r * dt);
double pu = (exp((r - qd) * dt) - d) / (u - d);
// initialize option value array with values at expiry
for (int i = 0; i <= nsteps; i++)
{
double Si = S0 * pow(u, 2 * i - nsteps);
if (!put_b)
S[i] = max(0.0, Si - K);
else
S[i] = max(0.0, K - Si);
}
// for each time step
for (int j = nsteps; j > 0; j--)
{
// for each node at the current time step
for (int i = 0; i < j; i++)
{
if (am_b)
{
double cv = ((1 - pu) * S[i] + pu * S[i + 1]) / disc, sv = 0.0;
if (!put_b)
sv = max(0.0, S[i] * u - K);
else
sv = max(0.0, K - S[i] * u);
S[i] = max(cv, sv);
}
else
// compute the discounted expected value of the option
S[i] = ((1 - pu) * S[i] + pu * S[i + 1]) / disc;
}
}
return S[0];
} |
2,956 |
// #include <iostream>
#include <cuda_runtime.h>
// #include <string>
#include <stdio.h>
using namespace std;
__global__ void test(/*string name*/) {
printf("test_gpu\n");
}
int main(){
test<<<1,1>>>(/*"Test"*/);
cudaDeviceSynchronize();
}
|
2,957 | #include <stdlib.h>
#include <stdio.h>
#define CUDA_DEVICE (0)
#define NUM_THREADS (1<<13)
#define BLOCK_DIM (64)
#define GRID_DIM (NUM_THREADS/BLOCK_DIM)
#define NUM_BYTES (NUM_THREADS*4*sizeof(float))
// Compile and run with the commands:
// nvcc float4_test.cu
// ./a.out
//
// Failure occurs on my Tesla C870 card when KERNEL_INVOCATIONS is a large
// number (e.g. 100), and TEST_KERNEL is 1 or 3. Kernels 1 and 3 are those
// which write float4 values to device memory. Failure does not occur on
// my Quadro NVS 290 card for any of the kernels.
//
#define KERNEL_INVOCATIONS (100)
#define TEST_KERNEL (1)
__global__ void testKernel1(float4* g_out, float4* g_in) {
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
g_out[idx] = g_in[idx];
}
__global__ void testKernel2(float* g_out, float4* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
float4 f4 = g_in[idx];
g_out[4*idx+0] = f4.x;
g_out[4*idx+1] = f4.y;
g_out[4*idx+2] = f4.z;
g_out[4*idx+3] = f4.w;
}
__global__ void testKernel3(float4* g_out, float* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
float x = g_in[4*idx+0];
float y = g_in[4*idx+1];
float z = g_in[4*idx+2];
float w = g_in[4*idx+3];
g_out[idx] = make_float4(x, y, z, w);
}
__global__ void testKernel4(float* g_out, float* g_in) {
const int idx = BLOCK_DIM*blockIdx.x + threadIdx.x;
g_out[NUM_THREADS*0 + idx] = g_in[NUM_THREADS*0 + idx];
g_out[NUM_THREADS*1 + idx] = g_in[NUM_THREADS*1 + idx];
g_out[NUM_THREADS*2 + idx] = g_in[NUM_THREADS*2 + idx];
g_out[NUM_THREADS*3 + idx] = g_in[NUM_THREADS*3 + idx];
}
int main( int argc, char** argv) {
cudaSetDevice(CUDA_DEVICE);
float *input = (float *)malloc(NUM_BYTES);
float *output = (float *)malloc(NUM_BYTES);
void* d_input;
void* d_output;
cudaMalloc(&d_input, NUM_BYTES);
cudaMalloc(&d_output, NUM_BYTES);
for (int i = 0; i < NUM_THREADS*4; i++) {
input[i] = i;
}
cudaMemcpy(d_input, input, NUM_BYTES, cudaMemcpyHostToDevice);
dim3 gridDim(GRID_DIM, 1, 1);
dim3 blockDim(BLOCK_DIM, 1, 1);
for (int i = 0; i < KERNEL_INVOCATIONS; i++) {
switch (TEST_KERNEL) {
case 1:
testKernel1 <<<gridDim, blockDim>>> ((float4 *)d_output, (float4 *)d_input);
break;
case 2:
testKernel2 <<<gridDim, blockDim>>> ((float *)d_output, (float4 *)d_input);
break;
case 3:
testKernel3 <<<gridDim, blockDim>>> ((float4 *)d_output, (float *)d_input);
break;
case 4:
testKernel4 <<<gridDim, blockDim>>> ((float *)d_output, (float *)d_input);
break;
}
cudaThreadSynchronize();
}
cudaMemcpy(output, d_output, NUM_BYTES, cudaMemcpyDeviceToHost);
for (int i = 0; i < NUM_THREADS*4; i++) {
if (output[i] != i) {
printf("KERNEL=%d FAILED: elem #%d = %f\n", TEST_KERNEL, i, output[i]);
}
}
free(input);
free(output);
cudaFree(d_input);
cudaFree(d_output);
}
|
2,958 | #include<stdio.h>
__global__ void device_greetings()
{
printf("Hello, world from the GPU!\n");
}
int main()
{
printf("Hello, world form the host!\n");
dim3 threadBlocks(8, 16);
dim3 gridBlocks(2, 4);
device_greetings<<<gridBlocks, threadBlocks>>>();
cudaDeviceSynchronize();
return 0;
} |
2,959 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <fstream>
#include <cstdlib>
#include <math.h>
#define BLOCK_SIZE 16
#define FEATURE_LEN 128
using namespace std;
void calMeanVar(double* v, double& mean, double& var){
double sum = 0;
for(int i=0;i<FEATURE_LEN;i++)
sum+=v[i];
mean = sum / FEATURE_LEN;
double sq_sum = 0;
for(int i=0;i<FEATURE_LEN;i++)
sq_sum+=v[i]*v[i];
var = sqrt(sq_sum / FEATURE_LEN - mean * mean);
//printf("%f,%f\n",mean,var);
return;
}
void normalize(double* v){
double mean = 0;
double var = 0;
calMeanVar(v, mean, var);
for(int i = 0; i < FEATURE_LEN; i++){
v[i] = (v[i] - mean) / var;
}
return;
}
cudaError_t MulWithCuda(double* A, double* B, double* C, int matrixSize);
__global__ void matrixMulCUDA(double *A, double *B, double *C, int blockRow, int blockCol,int featureNum)
{
double ans=0;
//Need to copy A[bx*BLOCK_SIZE:(bx+1)*BLOCK_SIZE][:]
//Need to copy B[by*BLOCK_SIZE:(by+1)*BLOCK_SIZE][:]
__shared__ double AC[BLOCK_SIZE][FEATURE_LEN];//BLOCK_SIZE*blockCol
__shared__ double BC[BLOCK_SIZE][FEATURE_LEN];
//Each Thread response for #blockCol copy
for(int k=0;k<blockCol;k++)
{
if((blockIdx.x*BLOCK_SIZE+threadIdx.x)<featureNum&&(k*BLOCK_SIZE+threadIdx.y)<FEATURE_LEN)
AC[threadIdx.x][k*BLOCK_SIZE+threadIdx.y]=A[(blockIdx.x*BLOCK_SIZE+threadIdx.x)*FEATURE_LEN+(k*BLOCK_SIZE+threadIdx.y)];
//else
//printf("blockIdx.x blockIdx.y threadIdx.x threadIdx.y %d %d %d %d %d %d\n",blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y,blockIdx.x*BLOCK_SIZE+threadIdx.x,k*BLOCK_SIZE+threadIdx.y);
if((blockIdx.y*BLOCK_SIZE+threadIdx.x)<featureNum&&(k*BLOCK_SIZE+threadIdx.y)<FEATURE_LEN)
BC[threadIdx.x][k*BLOCK_SIZE+threadIdx.y]=B[(blockIdx.y*BLOCK_SIZE+threadIdx.x)*FEATURE_LEN+(k*BLOCK_SIZE+threadIdx.y)];
//else
//printf("blockIdx.x blockIdx.y threadIdx.x threadIdx.y %d %d %d %d %d %d\n",blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y,blockIdx.x*BLOCK_SIZE+threadIdx.x,k*BLOCK_SIZE+threadIdx.y);
}
__syncthreads();
/*
for(int i=0;i<16;i++)
{
for(int j=0;j<128;j++)
printf("%f ",BC[i][j]);
printf("\n");
}
*/
//if(blockIdx.x==8&&blockIdx.y==8)
// printf("blockIdx.x blockIdx.y threadIdx.x threadIdx.y %d %d %d %d\n",blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y);
//if(blockIdx.x==8&&blockIdx.y==8&&threadIdx.x==0&&threadIdx.y==0)
// printf("%f",ans);
for(int k=0;k<FEATURE_LEN;k++)
{
ans+=(AC[threadIdx.x][k]-BC[threadIdx.y][k])*(AC[threadIdx.x][k]-BC[threadIdx.y][k]);
}
//if(threadIdx.x==0&&threadIdx.y==0&&blockIdx.x==0&&blockIdx.y==0)
// printf("%f",ans);
if((blockIdx.x*BLOCK_SIZE+threadIdx.x)<featureNum&&(blockIdx.y*BLOCK_SIZE+threadIdx.y)<featureNum)
C[(blockIdx.x*BLOCK_SIZE+threadIdx.x)*featureNum+(blockIdx.y*BLOCK_SIZE+threadIdx.y)]=ans;
}
extern "C" int mulMatrix()
{
int featureNum = 400;
fstream fa("img1_400.txt",fstream::in);
fstream fb("img2_400.txt",fstream::in);
fstream fc("400_distance.txt",fstream::in);
// if (argc == 1){
// printf("Please input your feature number!");
// return 1;
// }else if (argc == 2){
// featureNum = atoi(argv[1]);
// printf("Input size: %d\n", featureNum);
// }else{
// printf("Too many arguments!");
// return 1;
// }
double* A = (double*)malloc(featureNum * FEATURE_LEN * sizeof(double)); // We represent a 2D matrix in the form of 1D array.
double* B = (double*)malloc(featureNum * FEATURE_LEN * sizeof(double)); // We represent a 2D matrix in the form of 1D array.
double* C = (double*)malloc(featureNum * featureNum * sizeof(double)); // We represent a 2D matrix in the form of 1D array.
// Initiate matrix A.
for(int i = 0; i < featureNum; i++){
for(int j = 0; j < FEATURE_LEN; j++){
fa>>A[i * FEATURE_LEN + j];
fb>>B[i * FEATURE_LEN + j];
}
normalize(A+i*FEATURE_LEN);
normalize(B+i*FEATURE_LEN);
}
// Parallel.
cudaError_t cudaStatus = MulWithCuda(A,B,C,featureNum);
if (cudaStatus != cudaSuccess) {fprintf(stderr, "mulWithCuda failed!");return 1;}
double err_max=0;
printf("%f\n",C[0]);
for(int i = 0; i < featureNum; i++){
for(int j = 0; j < featureNum; j++){
double tmp;
fc>>tmp;
err_max=err_max<abs(C[i*featureNum+j]-tmp*tmp)?C[i*featureNum+j]-tmp*tmp:err_max;
}
}
printf("%f\n",err_max);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaDeviceReset failed!");return 1;}
free(A);free(B);free(C);
fa.close();fb.close();fc.close();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t MulWithCuda(double* A, double* B, double* C, int featureNum)
{
double *dev_A;
double *dev_B;
double *dev_C;
cudaError_t cudaStatus;
cudaEvent_t start, stop;
float gpu_time = 0.0f;
// Launch a kernel on the GPU with one thread for each element.
int blockRowNum=featureNum%BLOCK_SIZE?featureNum/BLOCK_SIZE+1:featureNum/BLOCK_SIZE;
int blockColNum=FEATURE_LEN%BLOCK_SIZE?FEATURE_LEN/BLOCK_SIZE+1:FEATURE_LEN/BLOCK_SIZE;
dim3 gridDime(blockRowNum,blockRowNum);
dim3 blockDime(BLOCK_SIZE,BLOCK_SIZE);
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for two matrix
cudaStatus = cudaMalloc((void**)&dev_A, featureNum * FEATURE_LEN * sizeof(double));
if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaMalloc A failed!");goto Error;}
cudaStatus = cudaMalloc((void**)&dev_B, featureNum * FEATURE_LEN * sizeof(double));
if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaMalloc B failed!");goto Error;}
cudaStatus = cudaMalloc((void**)&dev_C, featureNum * featureNum * sizeof(double));
if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaMalloc C failed!");goto Error;}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_A, A, featureNum * FEATURE_LEN * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaMemcpy A failed!");goto Error;}
cudaStatus = cudaMemcpy(dev_B, B, featureNum * FEATURE_LEN * sizeof(double), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaMemcpy B failed!");goto Error;}
// Set up timing
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
printf("Row,Col %d %d\n",blockRowNum,blockColNum);
matrixMulCUDA<<<gridDime,blockDime>>>(dev_A,dev_B,dev_C,blockRowNum,blockColNum,featureNum);
// copy result back
cudaStatus = cudaMemcpy(C, dev_C, featureNum * featureNum * sizeof(double), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaMemcpy C back failed!");goto Error;}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching Kernel!\n", cudaStatus);goto Error;}
// Close timing
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time, start, stop);
printf("Time spent: %.5f\n", gpu_time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
Error:
cudaFree(dev_A);cudaFree(dev_B);cudaFree(dev_C);
return cudaStatus;
}
|
2,960 | #include <stdio.h>
#include <stdlib.h>
#include "gpu_functions.cuh"
__host__ void helloGPU(void) {
__helloGPU<<< 1, 1 >>>();
cudaDeviceSynchronize();
}
__global__ void __helloGPU(void) {
printf("[gpu]> Hello world! (global)\n");
__helloGPUDevice();
}
__device__ void __helloGPUDevice(void) {
printf("[gpu]> Hello world! (device)\n");
}
|
2,961 | __global__ void count_up() {
//@ ensures x == 11
int x = 0;
//@ loop invariant x <= 11
while (x <= 10) {
x++;
}
}
|
2,962 | #include <iostream>
__global__ void kernel( void ) {
}
int main( void ) {
kernel<<<1,1>>>();
std::cout<< "Hello, World!" << std::endl;
return 0;
}
|
2,963 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <time.h>
#include <cuda_runtime.h>
#define BLOCK_SIZE 32
#define WA (10 * BLOCK_SIZE) // Matrix A width
#define HA (10 * BLOCK_SIZE) // Matrix A height
#define WB (20 * BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
void MatInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
__device__ float * GetSubMatrix(float *matrix, int m, int index, int width)
{
return matrix+width*BLOCK_SIZE*index+BLOCK_SIZE*m;
}
void CpuMul(float* C, const float* A, const float* B, int hA, int wA, int wB)
{
for (int i = 0; i < hA; ++i)
for (int j = 0; j < wB; ++j)
{
double sum = 0;
for (int k = 0; k < wA; ++k)
{
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void GpuMul1( float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int idx = bx * blockDim.x + tx;
int row = idx / wB;
int column = idx % wB;
float sum = 0;
for(int i = 0; i < wA; ++i)
{
sum += A[row * wA + i] * B[i * wB + column];
}
C[row * wB + column] = sum;
// printf("%d %d %d %d %f\n",row * wB + column,idx,row,column,sum);
}
__global__ void GpuMul2( float* C, float* A, float* B, int wA, int wB)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
float sum = 0;
for (int m= 0; m<wA/BLOCK_SIZE; m++)
{
float *subA=GetSubMatrix(A, m, by, wA);
float *subB=GetSubMatrix(B, bx, m, wB);
As[ty][tx] = *(subA+ wA * ty + tx);
Bs[ty][tx] = *(subB+ wB * ty + tx);
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
sum += As[ty][k] * Bs[k][tx];
__syncthreads();
}
float *subC=GetSubMatrix(C, bx, by, wB);
*(subC + wB * ty + tx)= sum;
}
int main()
{
// allocate host memory for matrices A and B
int size_A = WA * HA;
int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*) malloc(mem_size_A);
int size_B = WB * HB;
int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*) malloc(mem_size_B);
clock_t start, finish;
double time[3];
MatInit(h_A, size_A);
MatInit(h_B, size_B);
float* d_A;
cudaMalloc((void**) &d_A, mem_size_A);
float* d_B;
cudaMalloc((void**) &d_B, mem_size_B);
// copy host memory to device
cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) ;
cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) ;
// allocate device memory for result
int size_C = WC * HC;
int mem_size_C = sizeof(float) * size_C;
float* d1_C;
cudaMalloc((void**) &d1_C, mem_size_C);
float* d2_C;
cudaMalloc((void**) &d2_C, mem_size_C);
// allocate host memory for the result
float* h0_C = (float*) malloc(mem_size_C);
float* h1_C = (float*) malloc(mem_size_C);
float* h2_C = (float*) malloc(mem_size_C);
start=clock();
int threads1 = BLOCK_SIZE * BLOCK_SIZE;
int grid1 = WC*HC/threads1;
GpuMul1<<< grid1, threads1 >>>(d1_C, d_A, d_B, WA, WB);
cudaThreadSynchronize();
finish=clock();
time[1]=(double)(finish-start)/CLOCKS_PER_SEC;
cudaMemcpy(h1_C, d1_C, mem_size_C, cudaMemcpyDeviceToHost);
start=clock();
dim3 threads2(BLOCK_SIZE, BLOCK_SIZE);
dim3 grid2(WC / threads2.x, HC / threads2.y);
GpuMul2<<< grid2, threads2 >>>(d2_C, d_A, d_B, WA, WB);
cudaThreadSynchronize();
finish=clock();
time[2]=(double)(finish-start)/CLOCKS_PER_SEC;
cudaMemcpy(h2_C, d2_C, mem_size_C, cudaMemcpyDeviceToHost);
start=clock();
CpuMul(h0_C, h_A, h_B, HA, WA, WB);
finish=clock();
time[0]=(double)(finish-start)/CLOCKS_PER_SEC;
// for(int i=0;i<WC*HC;i++)
// printf("%f %f %f\n",h0_C[i],h1_C[i],h2_C[i]);
printf("%f %f %f",time[0],time[1],time[2]);
// clean up memory
free(h_A);
free(h_B);
free(h0_C);
free(h1_C);
free(h2_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d1_C);
cudaFree(d2_C);
cudaThreadExit();
}
|
2,964 | #include <cuda.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#define rows 1000
#define cols 1000
#define Y 32
#define X 32
__host__ void fill(double* M1, double* M2){
for(int k=0; k<rows*cols; k++){
M1[k] = sin(k);
M2[k] = cos(k);
}
}
__host__ void checkStatus(cudaError_t& status,const char *message){
if(status != cudaSuccess) printf("%s\n",message);
}
__global__ void mul(double* M1,double* M2,double* Mr){
int tx = threadIdx.x, ty = threadIdx.y;
int bx = blockIdx.x, by = blockIdx.y;
int tileWidthInX = blockDim.x, tileWidthInY = blockDim.y;
int Mri = by*tileWidthInY+ty;
int Mrj = bx*tileWidthInX+tx;
__shared__ double sM1[Y][X];
__shared__ double sM2[Y][X];
if(Mri < rows && Mrj < cols){
int totalPhases = ceil((double)rows/tileWidthInX);
double MrVal = 0.0;
for(int phase=0; phase < totalPhases; phase++ ){
sM1[ty][tx] = M1[Mri*rows + (phase*tileWidthInX + tx)];
sM2[ty][tx] = M2[(phase*tileWidthInY + ty)*cols + Mrj];
__syncthreads();
for(int k=0; k<tileWidthInX; k++){
MrVal += sM1[ty][k] * sM2[k][tx];
__syncthreads();
}
}
Mr[Mri*rows + Mrj] = MrVal;
}
}
int main(int argc, char const *argv[]) {
cudaError_t status = cudaSuccess;
double *h_M1=NULL, *h_M2=NULL, *h_Mr=NULL;
int n = rows*cols*sizeof(double);
h_M1 = (double *)malloc(n);
h_M2 = (double *)malloc(n);
h_Mr = (double *)malloc(n);
fill(h_M1,h_M2);
double *d_M1=NULL, *d_M2=NULL, *d_Mr=NULL;
status = cudaMalloc((void**)&d_M1,n);
checkStatus(status,"Unallocated memory to d_M1");
status = cudaMalloc((void**)&d_M2,n);
checkStatus(status,"Unallocated memory to d_M2");
status = cudaMalloc((void**)&d_Mr,n);
checkStatus(status,"Unallocated memory to d_Mr");
if(d_M1 != NULL && d_M2 != NULL && d_Mr != NULL){
status = cudaMemcpy(d_M1,h_M1,n,cudaMemcpyHostToDevice);
checkStatus(status,"Impossible copy data to d_M1");
status = cudaMemcpy(d_M2,h_M2,n,cudaMemcpyHostToDevice);
checkStatus(status,"Impossible copy data to d_M2");
dim3 blockS(32,32,1);
dim3 gridS(ceil((double)rows/32.0),ceil((double)cols/32.0),1);
mul<<<gridS,blockS>>>(d_M1,d_M2,d_Mr);
status = cudaMemcpy(h_Mr,d_Mr,n,cudaMemcpyDeviceToHost);
checkStatus(status,"Impossible copy data to h_Mr");
if(status == cudaSuccess) for(int k=0; k<rows*cols; k++) printf("%f ",h_Mr[k]);
}
if(d_M1 != NULL) cudaFree(d_M1);
if(d_M2 != NULL) cudaFree(d_M2);
if(d_Mr != NULL) cudaFree(d_Mr);
if(h_M1 != NULL) free(h_M1);
if(h_M2 != NULL) free(h_M2);
if(h_Mr != NULL) free(h_Mr);
return 0;
}
|
2,965 | /**
* 获取GPU属性
*/
#include <iostream>
using namespace std;
int main(int argc, char const *argv[])
{
cudaDeviceProp prop;
int count;
// 获取有所少快GPU设备
cudaGetDeviceCount(&count);
for(unsigned i = 0; i < count; ++i)
{
// 获取GPU属性信息
cudaGetDeviceProperties(&prop, i);
cout << "name: " << prop.name << endl;
cout << "totalGlobalMem: " << prop.totalGlobalMem << endl;
cout << "sharedMemPerBlock: " << prop.sharedMemPerBlock << endl;
cout << "regsPerBlock: " << prop.regsPerBlock << endl;
cout << "warpSize: " << prop.warpSize << endl;
cout << "memPitch: " << prop.memPitch << endl;
cout << "canMapHostMemory: " << prop.canMapHostMemory << endl;
cout << "pciDeviceID: " << prop.pciDeviceID << endl;
cout << "tccDriver: " << prop.tccDriver << endl;
cout << "----------------------------------------------------"<< endl;
}
// 设置当前使用那块GPU
cudaSetDevice(count);
return 0;
} |
2,966 |
// CUDA version of vector add example
#include <stdio.h>
#define RealType float
__global__ void vector_add(const RealType *a, const RealType *b, RealType *c, const int N)
{
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N) {
c[idx] = a[idx] + b[idx];
}
}
void check_status(cudaError_t status, const char *api_name)
{
if (status != cudaSuccess) {
printf("%s failed\n",api_name);
}
}
int main()
{
const int N = 1024*1024;
RealType *host_a;
RealType *host_b;
RealType *host_c;
RealType *device_a;
RealType *device_b;
RealType *device_c;
size_t bytes = N*sizeof(RealType);
double array_mb = bytes/1024.0/1024.0;
printf("Array size (MiB) = %g\n",array_mb);
host_a = new RealType[N];
host_b = new RealType[N];
host_c = new RealType[N];
cudaError_t da_err = cudaMalloc(&device_a, bytes);
check_status(da_err,"cudaMalloc for a");
cudaError_t db_err = cudaMalloc(&device_b, bytes);
check_status(db_err,"cudaMalloc for b");
cudaError_t dc_err = cudaMalloc(&device_c, bytes);
check_status(dc_err,"cudaMalloc for c");
for (int i = 0; i < N; i++) {
host_a[i] = 1.0;
host_b[i] = 1.0 * i;
}
cudaMemcpy(device_a, host_a, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_b, host_b, bytes, cudaMemcpyHostToDevice);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int blockSize = 1024;
int gridSize = (int)(double(N)/blockSize) + 1;
cudaEventRecord(start);
vector_add<<<gridSize, blockSize>>>(device_a, device_b, device_c, N);
cudaEventRecord(stop);
cudaMemcpy(host_c, device_c, bytes, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
printf("c[0] = %g\n",host_c[0]);
printf("c[N-1] = %g\n",host_c[N-1]);
float kernel_ms;
cudaEventElapsedTime(&kernel_ms, start, stop);
printf("kernel (ms) = %g\n",kernel_ms);
double bw = 3*bytes*1e-6/kernel_ms;
printf("BW (GB/s) = %g\n",bw);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_c);
delete[] host_a;
delete[] host_b;
delete[] host_c;
return 0;
}
|
2,967 | #include <iostream>
__global__ void sharedMemoryKernel(const int* x, int* y, const int N) {
__shared__ int sharedMemory[7][256];
int sum = 0;
int maxSum = 0;
int sqrSum = 0;
int maxMod = 0;
int min = x[0];
int max = 0;
int zeros = 0;
for (int tid = blockDim.x * blockIdx.x + threadIdx.x; tid < N; tid += gridDim.x * blockDim.x) {
int val = x[tid];
sum += val;
maxSum += std::abs(val);
sqrSum += val*val;
maxMod = std::abs(val) > maxMod ? val : maxMod;
min = val < min ? val : min;
max = val > max ? val :max;
zeros += val == 0 ? 1 : 0;
}
int tid = threadIdx.x;
if (tid < N) {
sharedMemory[0][threadIdx.x] = sum;
sharedMemory[1][threadIdx.x] = maxSum;
sharedMemory[2][threadIdx.x] = sqrSum;
sharedMemory[3][threadIdx.x] = maxMod;
sharedMemory[4][threadIdx.x] = min;
sharedMemory[5][threadIdx.x] = max;
sharedMemory[6][threadIdx.x] = zeros;
__syncthreads();
// blockDim.x needs to be a power of 2 in order for this to work
for (int i = blockDim.x/2; i != 0; i /= 2) {
__syncthreads();
if (tid < i) {
sharedMemory[0][tid] += sharedMemory[0][tid + i];
sharedMemory[1][tid] += sharedMemory[1][tid + i];
sharedMemory[2][tid] += sharedMemory[2][tid + i];
sharedMemory[3][tid] = sharedMemory[3][tid] > sharedMemory[3][tid + i] ? sharedMemory[3][tid] : sharedMemory[3][tid + i];
sharedMemory[4][tid] = sharedMemory[4][tid] < sharedMemory[4][tid + i] ? sharedMemory[4][tid] : sharedMemory[4][tid + i];
sharedMemory[5][tid] = sharedMemory[5][tid] > sharedMemory[5][tid + i] ? sharedMemory[5][tid] : sharedMemory[5][tid + i];
sharedMemory[6][tid] += sharedMemory[6][tid + i];
}
}
}
if (tid == 0) {
atomicAdd(y, sharedMemory[0][0]);
atomicAdd(y+1, sharedMemory[1][0]);
atomicAdd(y+2, sharedMemory[2][0]);
atomicMax(y+3, sharedMemory[3][0]);
atomicMin(y+4, sharedMemory[4][0]);
atomicMax(y+5, sharedMemory[5][0]);
atomicAdd(y+6, sharedMemory[6][0]);
}
}
template <typename T>
void printContainer(T container, int N) {
for (int i = 0; i < N; i++) {
std::cout << container[i] << " | ";
}
}
int main() {
int N = 5;
int *x = ( int *)malloc(sizeof( int) * N);
int *y = ( int *)malloc(sizeof( int) * 7);
for (int i = 0; i < N; i++) {
x[i] = i - N/2;
}
int *cuda_x;
int *cuda_y;
cudaMalloc(&cuda_x, sizeof( int) * N);
cudaMalloc(&cuda_y, sizeof( int) * 7);
cudaMemcpy(cuda_x, x, sizeof( int) * N, cudaMemcpyHostToDevice);
sharedMemoryKernel<<<256, 256>>>(cuda_x, cuda_y, N);
cudaMemcpy(y, cuda_y, sizeof( int) * 7, cudaMemcpyDeviceToHost);
std::cout << "Input" << std::endl;
printContainer(x, N);
std::cout << std::endl;
std::cout << "Sum of all entries: " << y[0] << std::endl;
std::cout << "Sum of maximum values: " << y[1] << std::endl;
std::cout << "Sum of squares: " << y[2] << std::endl;
std::cout << "Max-norm: " << y[3] << std::endl;
std::cout << "minimum value: " << y[4] << std::endl;
std::cout << "maximum value: " << y[5] << std::endl;
std::cout << "number of zeros: " << y[6] << std::endl;
free(x);
free(y);
cudaFree(cuda_x);
cudaFree(cuda_y);
return EXIT_SUCCESS;
} |
2,968 | #include "includes.h"
__global__ void Subsample_Bilinear_uchar(cudaTextureObject_t uchar_tex, unsigned char *dst, int dst_width, int dst_height, int dst_pitch, int src_width, int src_height)
{
int xo = blockIdx.x * blockDim.x + threadIdx.x;
int yo = blockIdx.y * blockDim.y + threadIdx.y;
if (yo < dst_height && xo < dst_width)
{
float hscale = (float)src_width / (float)dst_width;
float vscale = (float)src_height / (float)dst_height;
float xi = (xo + 0.5f) * hscale;
float yi = (yo + 0.5f) * vscale;
// 3-tap filter weights are {wh,1.0,wh} and {wv,1.0,wv}
float wh = min(max(0.5f * (hscale - 1.0f), 0.0f), 1.0f);
float wv = min(max(0.5f * (vscale - 1.0f), 0.0f), 1.0f);
// Convert weights to two bilinear weights -> {wh,1.0,wh} -> {wh,0.5,0} + {0,0.5,wh}
float dx = wh / (0.5f + wh);
float dy = wv / (0.5f + wv);
int y0 = tex2D<unsigned char>(uchar_tex, xi-dx, yi-dy);
int y1 = tex2D<unsigned char>(uchar_tex, xi+dx, yi-dy);
int y2 = tex2D<unsigned char>(uchar_tex, xi-dx, yi+dy);
int y3 = tex2D<unsigned char>(uchar_tex, xi+dx, yi+dy);
dst[yo*dst_pitch+xo] = (unsigned char)((y0+y1+y2+y3+2) >> 2);
}
} |
2,969 | /**
* Assignment 06 Program - moving_average.cu
* Sarah Helble
* 10/06/17
*
* Calculates the average of each index and its neighbors
*
* Usage ./aout
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
// Execution Notes
// 512, 512 gives about equivalent times
// 512, 256 register is 2x faster
// 256, 256 register is slightly better
// 256, 128 register is slightly better
// 512, 128 register is slightly better
// 1024, 256 shared is slightly better
// 1024, 512 register is slightly better
//
// First run always seems to be bad (2x slower)
#define NUM_ELEMENTS 512
#define THREADS_PER_BLOCK 256
#define MAX_INT 30
/**
* Returns the current time
*/
__host__ cudaEvent_t get_time(void)
{
cudaEvent_t time;
cudaEventCreate(&time);
cudaEventRecord(time);
return time;
}
/**
* Kernel function that takes a moving average of the values in
* @list and puts the results in @averages
* Uses registers to store the calculations.
*/
__global__ void average_using_registers(unsigned int *list, float *averages)
{
/* Calculate the current index */
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if(idx < NUM_ELEMENTS) {
unsigned int sum = list[idx];
unsigned int num = 1;
// If there is a previous element, add it to sum
if(idx > 0) {
sum = sum + list[idx - 1];
num = num + 1;
}
// If there is a next element, add it to sum
if((idx + 1) < NUM_ELEMENTS) {
sum = sum + list[idx + 1];
num = num + 1;
}
averages[idx] = (float) sum / num;
}
}
/**
* Kernel function that takes a moving average of the values in
* @list and puts the results in @averages
* Uses shared memory to store the calculations.
*/
__global__ void average_using_shared(unsigned int *list, float *averages)
{
__shared__ unsigned int sums[NUM_ELEMENTS];
__shared__ unsigned int nums[NUM_ELEMENTS];
const unsigned int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if(idx < NUM_ELEMENTS) {
sums[idx] = list[idx];
nums[idx] = 1;
// If there is a previous element, add it to sum
if(idx > 0) {
sums[idx] = sums[idx] + list[idx - 1];
nums[idx] = nums[idx] + 1;
}
// If there is a next element, add it to sum
if((idx + 1) < NUM_ELEMENTS) {
sums[idx] = sums[idx] + list[idx + 1];
nums[idx] = nums[idx] + 1;
}
// Calculate the average
averages[idx] = (float) sums[idx] / nums[idx];
}
}
/**
* Fuction to handle the printing of results.
* @list is the original array
* @averages is the result
*/
void print_results(unsigned int *list, float *averages)
{
int i = 0;
printf("\n");
for(i = 0; i < NUM_ELEMENTS; i++) {
printf("Original value at index [%d]: %d, average: %f\n", i, list[i], averages[i]);
}
printf("\n");
}
/**
* Function that sets up everything for the kernel function
*
* @array_size size of array (total number of threads)
* @threads_per_block number of threads to put in each block
* @use_registers is 1 if registers should be used. Otherwise, will call
* kernel that uses shared memory
*/
void exec_kernel(bool use_registers)
{
/* Calculate the size of the array */
int array_size_in_bytes = (sizeof(unsigned int) * (NUM_ELEMENTS));
int float_array_size_in_bytes = (sizeof(float) * (NUM_ELEMENTS));
int i = 0;
unsigned int *list;
float *averages;
//pin it
cudaMallocHost((void **)&list, array_size_in_bytes);
cudaMallocHost((void **)&averages, float_array_size_in_bytes);
// Fill array with random numbers between 0 and MAX_INT
for(i = 0; i < NUM_ELEMENTS; i++) {
list[i] = (unsigned int) rand() % MAX_INT;
}
/* Declare and allocate pointers for GPU based parameters */
unsigned int *d_list;
float *d_averages;
cudaMalloc((void **)&d_list, array_size_in_bytes);
cudaMalloc((void **)&d_averages, float_array_size_in_bytes);
/* Copy the CPU memory to the GPU memory */
cudaMemcpy(d_list, list, array_size_in_bytes, cudaMemcpyHostToDevice);
/* Designate the number of blocks and threads */
const unsigned int num_blocks = NUM_ELEMENTS/THREADS_PER_BLOCK;
const unsigned int num_threads = NUM_ELEMENTS/num_blocks;
/* Execute the kernel and keep track of start and end time for duration */
float duration = 0;
cudaEvent_t start_time = get_time();
if(use_registers) {
average_using_registers<<<num_blocks, num_threads>>>(d_list, d_averages);
} else {
average_using_shared<<<num_blocks, num_threads>>>(d_list, d_averages);
}
cudaEvent_t end_time = get_time();
cudaEventSynchronize(end_time);
cudaEventElapsedTime(&duration, start_time, end_time);
/* Copy the changed GPU memory back to the CPU */
cudaMemcpy( averages, d_averages, float_array_size_in_bytes, cudaMemcpyDeviceToHost);
printf("\tDuration: %fmsn\n", duration);
print_results(list, averages);
/* Free the GPU memory */
cudaFree(d_list);
cudaFree(d_averages);
/* Free the pinned CPU memory */
cudaFreeHost(list);
cudaFreeHost(averages);
}
/**
* Entry point for execution. Checks command line arguments
* then passes execution to subordinate function
*/
int main(int argc, char *argv[])
{
printf("\n");
/* Do the average with shared memory */
printf("First Run of Averages Calculated using Shared Memory");
exec_kernel(false);
printf("-----------------------------------------------------------------\n");
printf("Second Run of Averages Calculated using Shared Memory");
exec_kernel(false);
printf("-----------------------------------------------------------------\n");
/* Do the average with registers*/
printf("First Run of Averages Calculated using Register Memory");
exec_kernel(true);
printf("-----------------------------------------------------------------\n");
printf("Second Run of Averages Calculated using Register Memory");
exec_kernel(true);
printf("-----------------------------------------------------------------\n");
return EXIT_SUCCESS;
}
|
2,970 | #define N 4000
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
#include <stdio.h>
__global__ void matrixMult (float *a, float *b, float *c, int width) {
int k = 0;
float sum = 0.0;
int col = threadIdx.x + blockDim.x * blockIdx.x;
int row = threadIdx.y + blockDim.y * blockIdx.y;
if(col < width && row < width) {
for (k = 0; k < width; k++)
sum += a[row * width + k] * b[k * width + col];
c[row * width + col] = sum;
}
}
int main() {
//float a[N][N], b[N][N], c[N][N];
float *dev_a, *dev_b, *dev_c;
float *a,*b,*c;
a = (float*) malloc(N*N*sizeof(float));
b = (float*) malloc(N*N*sizeof(float));
c = (float*) malloc(N*N*sizeof(float));
// initialize matrices a and b with appropriate values
for (int i = 0; i< N ; i++)
for (int j=0 ; j<N ; j++)
{
a[i*N+j] = 1;
b[i*N+j] = 1;
}
int size = N * N * sizeof(float);
cudaMalloc((void **) &dev_a, size);
cudaMalloc((void **) &dev_b, size);
cudaMalloc((void **) &dev_c, size);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
int NumThreads = 32;
dim3 dimGrid(DIV_UP(N,NumThreads), DIV_UP(N,NumThreads));
dim3 dimBlock(NumThreads, NumThreads);
matrixMult<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
//measure performance
cudaError_t error;
cudaDeviceSynchronize();
cudaEvent_t start;
error = cudaEventCreate(&start);
if (error != cudaSuccess)
fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error));
cudaEvent_t stop;
error = cudaEventCreate(&stop);
if (error != cudaSuccess)
fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error));
error = cudaEventRecord(start, NULL);
if (error != cudaSuccess)
fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error));
int nIter = 10;
for (int j = 0; j < nIter; j++)
{
matrixMult<<<dimGrid, dimBlock>>>(dev_a, dev_b, dev_c, N);
}
// Record the stop event
error = cudaEventRecord(stop, NULL);
if (error != cudaSuccess)
fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error));
// Wait for the stop event to complete
error = cudaEventSynchronize(stop);
if (error != cudaSuccess)
fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error));
float msecTotal = 0.0f;
error = cudaEventElapsedTime(&msecTotal, start, stop);
if (error != cudaSuccess)
fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error));
// Compute and print the performance
float msecPerMatrixMul = msecTotal / nIter;
printf ("msec %f\n",msecPerMatrixMul);
//end measure performance
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
/*
for (int i = 0; i< N ; i++){
for (int j=0 ; j<N ; j++)
{
printf("%f , ", c[i][j]);
}
printf ("\n");
}
*/
printf("%f, %f \n",c[0],c[N*N-1] );
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
}
|
2,971 | __global__ void update_core(double *f, double *g, double *c, int nx, int ny) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int i = tid / ny;
int j = tid % ny;
if (i > 0 && j > 0 && i < nx-1 && j < ny-1) {
f[tid] = c[tid] * (g[tid-ny] + g[tid+ny] + g[tid-1] + g[tid+1]
- 4*g[tid]) + 2*g[tid] - f[tid];
}
}
__global__ void update_src(double *f, double val, int idx0) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0) {
f[idx0] += val;
}
}
|
2,972 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
#define KERNEL_SIZE 20
__constant__ int kernel[KERNEL_SIZE];
__global__ void conv1d(int *input, int *output, int l) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int r = KERNEL_SIZE / 2;
int start = tid - r;
int temp = 0;
for (int j = 0; j < KERNEL_SIZE; j++) {
if ((start + j >= 0) && (start + j < l)) {
temp += input[start + j] * kernel[j];
}
}
output[tid] = temp;
}
int main() {
int l = 20480;
int i;
int *host_input, *host_kernel, *host_output;
int *dev_input, *dev_output;
cudaMalloc((void**)&dev_input, sizeof(int) * l);
cudaMalloc((void**)&dev_output, sizeof(int) * KERNEL_SIZE);
cudaMallocHost((void**)&host_input, sizeof(int) * l);
cudaMallocHost((void**)&host_kernel, sizeof(int) * KERNEL_SIZE);
cudaMallocHost((void**)&host_output, sizeof(int) * l);
for (i = 0; i < l; i++) {
host_input[i] = round(rand());
}
for (i = 0; i < KERNEL_SIZE; i++) {
host_kernel[i] = round(rand());
}
clock_t start_time = clock();
cudaMemcpy(dev_input, host_input, sizeof(int) * l, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(kernel, host_kernel, sizeof(int) * KERNEL_SIZE);
int block = 256;
int grid = (l + block - 1) / block;
conv1d<<<grid, block>>>(dev_input, dev_output, l);
cudaMemcpy(host_output, dev_output, sizeof(int) * l, cudaMemcpyDeviceToHost);
clock_t end_time = clock();
printf("Time consuming of 1D convolution of %d array with %d kernel is %f ms.\n", l, KERNEL_SIZE, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000);
cudaFree(dev_input);
cudaFree(dev_output);
cudaFreeHost(host_input);
cudaFreeHost(host_kernel);
cudaFreeHost(host_output);
return 0;
}
|
2,973 | __global__ void clock_block(clock_t* d_o, long clock_count)
{
clock_t start_clock = clock64();
volatile long clock_offset = 0;
volatile int i = 0;
for (i = 0; i < 10000000; i++)
while (clock_offset < clock_count)
{
clock_offset = clock_count--;
}
d_o[0] = clock_offset;
}
|
2,974 | #include "includes.h"
__global__ void sobelFilterShared(unsigned char *data, unsigned char *result, int width, int height){
// Data cache: threadIdx.x , threadIdx.y
const int n = Mask_size / 2;
__shared__ int s_data[BLOCKSIZE + Mask_size * 2 ][BLOCKSIZE + Mask_size * 2];
// global mem address of the current thread in the whole grid
const int pos = threadIdx.x + blockIdx.x * blockDim.x + threadIdx.y * width + blockIdx.y * blockDim.y * width;
// load cache (32x32 shared memory, 16x16 threads blocks)
// each threads loads four values from global memory into shared mem
// if in image area, get value in global mem, else 0
int x, y; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + blockIdx.x * blockDim.x;
const int y0 = threadIdx.y + blockIdx.y * blockDim.y;
// case1: upper left
x = x0 - n;
y = y0 - n;
if ( x < 0 || y < 0 )
s_data[threadIdx.y][threadIdx.x] = 0;
else
s_data[threadIdx.y][threadIdx.x] = *(data + pos - n - (width * n));
// case2: upper right
x = x0 + n;
y = y0 - n;
if ( x > (width - 1) || y < 0 )
s_data[threadIdx.y][threadIdx.x + blockDim.x] = 0;
else
s_data[threadIdx.y][threadIdx.x + blockDim.x] = *(data + pos + n - (width * n));
// case3: lower left
x = x0 - n;
y = y0 + n;
if (x < 0 || y > (height - 1))
s_data[threadIdx.y + blockDim.y][threadIdx.x] = 0;
else
s_data[threadIdx.y + blockDim.y][threadIdx.x] = *(data + pos - n + (width * n));
// case4: lower right
x = x0 + n;
y = y0 + n;
if ( x > (width - 1) || y > (height - 1))
s_data[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = 0;
else
s_data[threadIdx.y + blockDim.y][threadIdx.x + blockDim.x] = *(data + pos + n + (width * n));
__syncthreads();
// convolution
int sum = 0;
x = n + threadIdx.x;
y = n + threadIdx.y;
for (int i = - n; i <= n; i++)
for (int j = - n; j <= n; j++)
sum += s_data[y + i][x + j] * Global_Mask[n + i] * Global_Mask[n + j];
result[pos] = sum;
} |
2,975 | #include <stdio.h>
__global__ void vector_add(int *a, int *b, int *c)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = threadIdx.x;
c[index] = a[index] + b[index];
}
#define dim 3
int main()
{
int *a, *b, *c;
int *d_a, *d_b, *d_c;
//since we will be sending an array to represend the matrix.
int size = dim * dim * sizeof( int );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* allocate space for host copies of a, b, c and setup input values */
a = (int *)malloc( size );
b = (int *)malloc( size );
c = (int *)malloc( size );
for( int i = 0; i < dim * dim; i++ )
{
a[i] = b[i] = i;
c[i] = 0;
}
printf("A and B are:\n");
for(int i=0; i< dim * dim; i++)
{
if(i%dim == 0)
printf("\n");
printf("%d ", a[i]);
}
/* copy inputs to device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
dim3 dimBlock(10, 1 );
dim3 dimGrid( 1, 1 );
vector_add<<<dimGrid,dimBlock>>>( d_a, d_b, d_c);
/* copy result back to host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
printf("\n\nTheir sum =\n");
for(int i=0; i< dim * dim; i++)
{
if(i%dim == 0)
printf("\n");
printf("%d ", c[i]);
}
free(a);
free(b);
free(c);
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return 0;
}
|
2,976 | /*
**********************************************
* CS314 Principles of Programming Languages *
* Spring 2020 *
**********************************************
*/
#include <stdio.h>
#include <stdlib.h>
__global__ void strongestNeighborScan_gpu(int * src, int * oldDst, int * newDst, int * oldWeight, int * newWeight, int * madeChanges, int distance, int numEdges) {
/*YOUR CODE HERE*/
}
|
2,977 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define dT 0.2f
#define G 0.6f
#define BLOCK_SIZE 64
// Global variables
int num_planets;
int num_timesteps;
// Host arrays
float2* velocities;
float4* planets;
// Device arrays
float2* velocities_d;
float4* planets_d;
// Parse command line arguments
void parse_args(int argc, char** argv){
if(argc != 2){
printf("Useage: nbody num_timesteps\n");
exit(-1);
}
num_timesteps = strtol(argv[1], 0, 10);
}
// Reads planets from planets.txt
void read_planets(){
FILE* file = fopen("planets.txt", "r");
if(file == NULL){
printf("'planets.txt' not found. Exiting\n");
exit(-1);
}
char line[200];
fgets(line, 200, file);
sscanf(line, "%d", &num_planets);
planets = (float4*)malloc(sizeof(float4)*num_planets);
velocities = (float2*)malloc(sizeof(float2)*num_planets);
for(int p = 0; p < num_planets; p++){
fgets(line, 200, file);
sscanf(line, "%f %f %f %f %f",
&planets[p].x,
&planets[p].y,
&velocities[p].x,
&velocities[p].y,
&planets[p].z);
}
fclose(file);
}
// Writes planets to file
void write_planets(int timestep){
char name[20];
int n = sprintf(name, "planets_out.txt");
FILE* file = fopen(name, "wr+");
for(int p = 0; p < num_planets; p++){
fprintf(file, "%f %f %f %f %f\n",
planets[p].x,
planets[p].y,
velocities[p].x,
velocities[p].y,
planets[p].z);
}
fclose(file);
}
// TODO 7. Calculate the change in velocity for p, caused by the interaction with q
__device__ float2 calculate_velocity_change_planet(float4 p, float4 q){
float2 r;
r.x = q.x - p.x;
r.y = q.y - p.y;
if(r.x == 0 && r.y == 0) {
float2 v = {0.0f, 0.0f};
return v;
}
float abs_dist = sqrt(r.x*r.x + r.y*r.y);
float dist_cubed = abs_dist*abs_dist*abs_dist;
float2 dv;
dv.x = dT*G*q.z/dist_cubed * r.x;
dv.y = dT*G*q.z/dist_cubed * r.y;
return dv;
}
// TODO 5. Calculate the change in velocity for my_planet, caused by the interactions with a block of planets
__device__ float2 calculate_velocity_change_block(float4 my_planet, float4* shared_planets){
float2 velocity = {0.0f, 0.0f};
for(int i = 0; i < blockDim.x; i++) {
float2 tempv = calculate_velocity_change_planet(my_planet, shared_planets[i]);
velocity.x += tempv.x;
velocity.y += tempv.y;
}
return velocity;
}
// TODO 4. Update the velocities by calculating the planet interactions
__global__ void update_velocities(float4* planets, float2* velocities, int num_planets){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
float4 my_planet = planets[thread_id];
__shared__ float4 shared_planets[BLOCK_SIZE];
for(int i = 0; i < num_planets; i += blockDim.x) {
shared_planets[threadIdx.x] = planets[i + threadIdx.x];
__syncthreads();
float2 tempv = calculate_velocity_change_block(my_planet, shared_planets);
velocities[thread_id].x += tempv.x;
velocities[thread_id].y += tempv.y;
__syncthreads();
}
}
// TODO 7. Update the positions of the planets using the new velocities
__global__ void update_positions(float4* planets, float2* velocities, int num_planets){
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
planets[thread_id].x += velocities[thread_id].x * dT;
planets[thread_id].y += velocities[thread_id].y * dT;
}
int main(int argc, char** argv){
parse_args(argc, argv);
read_planets();
// TODO 1. Allocate device memory, and transfer data to device
cudaMalloc(&planets_d, sizeof(float4)*num_planets);
cudaMalloc(&velocities_d, sizeof(float2)*num_planets);
cudaMemcpy(planets_d, planets, sizeof(float4)*num_planets, cudaMemcpyHostToDevice);
cudaMemcpy(velocities_d, velocities, sizeof(float2)*num_planets, cudaMemcpyHostToDevice);
// Calculating the number of blocks
int num_blocks = num_planets/BLOCK_SIZE + ((num_planets%BLOCK_SIZE == 0) ? 0 : 1);
// Main loop
for(int t = 0; t < num_timesteps; t++){
// TODO 2. Call kernels
update_velocities<<<num_blocks, BLOCK_SIZE>>>(planets_d, velocities_d, num_planets);
update_positions<<<num_blocks, BLOCK_SIZE>>>(planets_d, velocities_d, num_planets);
}
// TODO 3. Transfer data back to host
cudaMemcpy(velocities, velocities_d, sizeof(float2)*num_planets, cudaMemcpyDeviceToHost);
cudaMemcpy(planets, planets_d, sizeof(float4)*num_planets, cudaMemcpyDeviceToHost);
// Output
write_planets(num_timesteps);
}
|
2,978 | //pass
//--blockDim=1024 --gridDim=1
__device__ void bar(char **in, char **out) {
char tmp = (*in)[threadIdx.x];
out[0][threadIdx.x] = tmp;
*out = *in;
}
__global__ void foo(char *A, char *B, char c)
{
char *choice1 = c ? A : B;
char *choice2 = c ? B : A;
bar(&choice1, &choice2);
bar(&choice1, &choice2);
}
|
2,979 | #include<stdio.h>
__global__ void kernel(int *d_o,int*d_i)
{
int index=threadIdx.x+blockIdx.x*blockDim.x;
if(index<10)
{
int temp=d_i[index+1];
__syncthreads();
d_o[index]=temp;
__syncthreads();
}
}
int main()
{
const int N=10;
int h_i[N];
int h_o[N];
for(int i=0;i<N;i++)
{
h_i[i]=i;
}
int *d_i;
int *d_o;
const int size=N*sizeof(int);
cudaMalloc((void **) &d_i,size);
cudaMalloc((void **) &d_o,size);
cudaMemcpy(d_i,h_i,size,cudaMemcpyHostToDevice);
kernel<<<1,N>>>(d_o,d_i);
cudaMemcpy(h_o,d_o,size,cudaMemcpyDeviceToHost);
for (int i=0;i<N;i++)
{
printf("%d",h_o[i]);
printf("\t");
printf("%d",h_i[i]);
printf("\n");
}
} |
2,980 | /////////////////////////////////////////////////////////////////////////
// Parallel Computing Assignment 3
// Chris Jimenez
// 5/1/14
// This CUDA program finds the max integer in an array of random integers.
// This program DOES use shared meemory and DOES take thread
// divergaence in to consideration. The number of integers is set to 8192.
//
/////////////////////////////////////////////////////////////////////////
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//define numebr of integers...
#define NUM_OF_INTEGERS 8192
//define max integer
#define MAX 100000
#define WARP_SIZE 32
///////////////////////////////////
/*The folllowing is dependent on whatever GPU this program is running on
if runnign on the NYU GPU's, the max threads per block is 512.
RUnning on a NVIDIA GeForce GT 650M(on personal machine), the max threads
per block is 1024
*/
#define THREADS_PER_BLOCK 512
#define NUM_BLOCKS NUM_OF_INTEGERS/THREADS_PER_BLOCK
/****** Function declarations */
void fill_array();
__global__ void get_max(int *array, int *max_results);
/********************************/
/////////////////////////////////////////////////////////
/*******************************************************/
/* Function fills the givne array a with random integers */
void fill_array(int *a){
int i;
time_t t;
/* Intializes random number generator */
srand((unsigned) time(&t));
for(i = 0; i < NUM_OF_INTEGERS; i++){
a[i] = random() % MAX;;
}
}
/*******************************************************/
/* Kernel Function finds the max integer in given array by
using reduction technique. Ultimately, the largest
will be located at the 0th position of the array */
__global__ void get_max(int *array, int *max_results){
int temp;
__shared__ int max[THREADS_PER_BLOCK];
int index = threadIdx.x + (blockDim.x * blockIdx.x);
max[threadIdx.x] = array[index];
__syncthreads();
int nTotalThreads = blockDim.x; // Total number of active threads
while(nTotalThreads > WARP_SIZE)
{
int halfPoint = nTotalThreads/2; // divide by two
// only the first half of the threads will be active.
if (threadIdx.x < halfPoint)
{
temp = max[threadIdx.x + halfPoint];
if (temp > max[threadIdx.x]) {
max[threadIdx.x] = temp;
}
}
__syncthreads();
nTotalThreads = nTotalThreads/2; // divide by two.
}
max_results[blockIdx.x] = max[0];
}
/*******************************************************/
int main(int argc, char *argv[]){
int *h_array, *h_resultmax; //array of random integers....
int *d_array, *d_resultmax;
int max;
int i;
printf("Initializing data...\n");
//allocating space for the array on host
h_array = (int *) malloc(NUM_OF_INTEGERS * sizeof(int));
h_resultmax = (int *)malloc(sizeof(int) * NUM_BLOCKS);
//fill in random array
fill_array(h_array);
//allocate space for array and resultmax on device
cudaMalloc( (void **)&d_array, sizeof(int) * NUM_OF_INTEGERS );
cudaMalloc( (void **)&d_resultmax, sizeof(int) * NUM_BLOCKS );
//Copy array from host to device...
cudaMemcpy(d_array, h_array, sizeof(int) * NUM_OF_INTEGERS, cudaMemcpyHostToDevice);
//call kernel! using for loop
get_max<<<NUM_BLOCKS,THREADS_PER_BLOCK>>>(d_array, d_resultmax);
//Copy array from host to device...
cudaMemcpy(h_resultmax, d_resultmax, sizeof(int)*NUM_BLOCKS, cudaMemcpyDeviceToHost);
//Given the max from each threadblock, find max!
max = h_resultmax[0];
for (i = 1 ; i < NUM_BLOCKS; i++){
if (h_resultmax[i] > max) max = h_resultmax[i];
}
//print max value...
printf("The max integer in the array is: %d\n", max);
printf("Cleaning up...\n");
free(h_array);
free(h_resultmax);
cudaFree(d_array);
cudaFree(d_resultmax);
return 0;
} |
2,981 | __global__ void DotProd_kernel(float *result, const float* vec1, const float* vec2, int N)
{
// YOUR TASKS:
// - Write kernel body to compute element-wise product between elements of vec1 and vec2 and return result in
// new vec.
// - Make sure that arbitrary sizes of N can be used.
// Insert code below this line.
const unsigned int outputIdx = threadIdx.x + blockDim.x*blockIdx.x ;
//?????
if(outputIdx < N){
result[outputIdx] = vec1[outputIdx] * vec2[outputIdx];
}
}
|
2,982 | /***
* Ashutosh Dhar
* Department of Electrical and Computer Engineeing
* University of Illinois, Urbana-Champaign
*
*/
#include <cuda.h>
#include <iostream>
#include <cstdio>
#define THREADS_PER_SM 1
#define BLOCKS_PER_SM 1
int ITERATIONS;
int L2_CACHE_SIZE = 512*1024;
int DATA_SIZE;// (L2_CACHE_SIZE * ITERATIONS)
using namespace std;
__global__ void cache_latency(double *latency, int *data, int DATA_SIZE)
{
//__shared__ double sh_start;
//__shared__ double sh_stop;
__shared__ long long int run_latency;
unsigned int start_t, stop_t;
//float local;
int load=0;
for(int i=0; i<DATA_SIZE; i++)
{
start_t = clock();
load = data[load];
stop_t = clock();
__syncthreads();
//data[load] = local + 1;
run_latency += (stop_t - start_t);
__syncthreads();
}
latency[0] = (double)(run_latency)/(DATA_SIZE);
}
int main(int argc, char **argv)
{
if(argc <2)
{
cerr<<"Enter iterations!";
return -1;
}
ITERATIONS = atoi(argv[1]);
DATA_SIZE = L2_CACHE_SIZE * ITERATIONS;
//double sum;
int *data;
data = (int*) malloc(sizeof(int)*DATA_SIZE);
srand(12);
for(int i=0; i<DATA_SIZE; i++)
{
data[i] = i;//1.0*rand();
}
double *latency;
latency = (double*) malloc((sizeof(double)) *1);
double *d_latency;
int *d_data;
cudaError_t errorFlag = cudaSuccess;
errorFlag = cudaMalloc((void**) &d_latency, (sizeof(double)*1));
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Failed to alloc memory (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
errorFlag = cudaMalloc((void**) &d_data, (sizeof(int)*DATA_SIZE));
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Failed to alloc memory (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
errorFlag = cudaMemcpy(d_data, data, (sizeof(int)*DATA_SIZE), cudaMemcpyHostToDevice);
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Failed to copyback (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
dim3 dimBlock(THREADS_PER_SM,1,1);
dim3 dimGrid(BLOCKS_PER_SM,1,1);
cache_latency<<<dimGrid,dimBlock>>>(d_latency,d_data,DATA_SIZE);
cudaDeviceSynchronize();
errorFlag = cudaGetLastError();
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Kernel launch error! (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
errorFlag = cudaMemcpy(latency, d_latency, (sizeof(double)*1), cudaMemcpyDeviceToHost);
if(errorFlag != cudaSuccess)
{
fprintf(stderr, "Failed to copyback (error code %s)!\n", cudaGetErrorString(errorFlag));
exit(-1);
}
cout<<"\nLatency\n";
//for(int i=0; i<1; i++)
//{
// sum+=latency[i+ITERATIONS] - latency[i];
//}
cout<<": "<< latency[0]<<endl;
cout<<endl;
return 0;
}
|
2,983 | #include <math.h>
#define SIGN(x) ((x) > 0.0 ? 1 : -1)
__global__ void init_image_kernel(float *img) {
size_t xi = blockIdx.x;
size_t yi = blockIdx.y;
size_t zi = threadIdx.x;
size_t imgIdx = zi + yi*blockDim.x + xi*blockDim.x*gridDim.y;
img[imgIdx] = 0.0;
}
__global__ void calculate_cos_alpha_and_tempc
(float *cosAlpha, float *tempc, float *xRange, float *yRange,
float *xReceive, float *yReceive, float lenR) {
size_t xi = blockIdx.x;
size_t yi = blockIdx.y;
size_t ni = threadIdx.x;
size_t idx = ni + yi*blockDim.x + xi*blockDim.x*gridDim.y;
float dx = xRange[xi] - xReceive[ni];
float dy = yRange[yi] - yReceive[ni];
float r0 = sqrt(xReceive[ni]*xReceive[ni] + yReceive[ni]*yReceive[ni]);
float rr0 = sqrt(dx*dx + dy*dy);
cosAlpha[idx] = fabs((-xReceive[ni]*dx-yReceive[ni]*dy)/r0/rr0);
tempc[idx] = rr0 - lenR/cosAlpha[idx];
}
__global__ void backprojection_kernel_fast
(float *img, float *paDataLine,
float *cosAlpha_, float *tempc_, float *zRange,
float zReceive, float lenR, float elementHeight,
float vm, float delayIdx, float fs,
unsigned int ni, unsigned int nSteps, unsigned int lineLength) {
size_t xi = blockIdx.x;
size_t yi = blockIdx.y;
size_t zi = threadIdx.x;
size_t precompIdx = ni + yi*nSteps + xi*nSteps*gridDim.y;
float dz = zRange[zi] - zReceive;
float cosAlpha = cosAlpha_[precompIdx];
float tempc = tempc_[precompIdx];
if (fabs(dz/tempc) < fabs(elementHeight*cosAlpha/2.0/lenR)) {
/*size_t imgIdx = zi + yi*blockDim.x + xi*blockDim.x*gridDim.y;*/
size_t imgIdx = zi + xi*blockDim.y + yi*blockDim.x*gridDim.y;
float rr0 = sqrt(tempc*tempc + dz*dz)*SIGN(tempc) + lenR/cosAlpha;
float angleWeightB = tempc/sqrt(tempc*tempc+dz*dz)*cosAlpha/(rr0*rr0);
size_t idx0 = lround((rr0/vm-delayIdx)*fs);
if (idx0 < lineLength) {
img[imgIdx] += paDataLine[idx0] / angleWeightB;
}
}
}
__global__ void backprojection_2d_kernel_fast
(float *pa_img, float *pa_data, unsigned int *idxAll, float *angularWeight,
int nSteps, int nTimeSamples) {
size_t xi = blockIdx.x;
size_t yi = blockIdx.y;
size_t zi = threadIdx.x;
size_t imgIdx = xi + yi*gridDim.x + zi*gridDim.x*gridDim.y;
// all data arrays are in 'F' order
for (size_t iStep = 0; iStep < nSteps; iStep++) {
size_t idx = xi + yi*gridDim.x + iStep*gridDim.x*gridDim.y;
pa_img[imgIdx] +=
pa_data[(size_t)(idxAll[idx] - 1 + nTimeSamples*iStep + zi*nTimeSamples*nSteps)] * angularWeight[idx];
}
}
|
2,984 | #include <cmath>
__global__ void call_min(double* first, const double* second)
{
first[threadIdx.x] = std::fmin(first[threadIdx.x], second[threadIdx.x]);
}
|
2,985 | #include <stdio.h>
int main ( int argc, char *argv[ ] ) {
int x, y, *z;
if ( argc != 2 ) /* argc should be 4 for correct execution */
{
/* We print argv[0] assuming it is the program name */
printf( "\nusage: %s filenametoread \n\n", argv[0] );
}
else
{ //assumes space separate integer values e.g. -1 23 4 -56 6 77
FILE *ptr = fopen( argv[ 1 ], "r" );
if ( ptr == 0 )
printf( "\n could not open file %s \n", argv[ 1 ] );
else
{
y = 1;
//int w = 0;
fscanf( ptr, "%d", &x );
while( !feof( ptr ) ){
if ( y < 3 ){
fscanf( ptr, "%d", &x );
printf( "\n A: y: %d MatEl: %d \n", y, x );
}
else {
printf( "\n B: y: %d MatEl: %d ", y, x );
fscanf( ptr, "%d", &z[ y - 3 ] );
printf( " z[ w ]: %d \n", z[ y - 3 ] );
//w++;
//z[ y - 3 ] = x;
}
y++;
}
}
fclose( ptr );
}
}
|
2,986 | #include "includes.h"
__global__ void Prepare_1_MeansForJoin(float* input, int c_src1, int c_src2, int c_n, float* delta, int imageWidth, int imageHeight)
{
int id = blockDim.x * blockIdx.y * gridDim.x
+ blockDim.x * blockIdx.x
+ threadIdx.x;
int size = imageWidth * imageHeight;
if (id < size)
{
int px = id % imageWidth;
int py = id / imageWidth;
bool insideSrc1 = delta[c_src1 * NUM_SUMS * size + 4 * size + id] != 0;
bool insideSrc2 = delta[c_src2 * NUM_SUMS * size + 4 * size + id] != 0;
if (input[id] > 0 && (insideSrc1 || insideSrc2)) {
float2 pixPos = { 2.0f * px / imageWidth - 1, 2.0f * py / imageHeight - 1};
//w * pos
delta[c_n * NUM_SUMS * size + 0 * size + id] = input[id] * pixPos.x;
delta[c_n * NUM_SUMS * size + 1 * size + id] = input[id] * pixPos.y;
//w * pos^2
delta[c_n * NUM_SUMS * size + 2 * size + id] = input[id] * pixPos.x * pixPos.x;
delta[c_n * NUM_SUMS * size + 3 * size + id] = input[id] * pixPos.y * pixPos.y;
//w
delta[c_n * NUM_SUMS * size + 4 * size + id] = input[id];
}
else
{
delta[c_n * NUM_SUMS * size + 0 * size + id] = 0;
delta[c_n * NUM_SUMS * size + 1 * size + id] = 0;
//w * pos^2
delta[c_n * NUM_SUMS * size + 2 * size + id] = 0;
delta[c_n * NUM_SUMS * size + 3 * size + id] = 0;
//w
delta[c_n * NUM_SUMS * size + 4 * size + id] = 0;
}
}
} |
2,987 | #include "includes.h"
__global__ void findRadixOffsets(uint2* keys, uint* counters, uint* blockOffsets, uint startbit, uint numElements, uint totalBlocks)
{
__shared__ uint sStartPointers[16];
extern __shared__ uint sRadix1[];
uint groupId = blockIdx.x;
uint localId = threadIdx.x;
uint groupSize = blockDim.x;
uint2 radix2;
radix2 = keys[threadIdx.x + (blockIdx.x * blockDim.x)];
sRadix1[2 * localId] = (radix2.x >> startbit) & 0xF;
sRadix1[2 * localId + 1] = (radix2.y >> startbit) & 0xF;
// Finds the position where the sRadix1 entries differ and stores start
// index for each radix.
if(localId < 16)
{
sStartPointers[localId] = 0;
}
__syncthreads();
if((localId > 0) && (sRadix1[localId] != sRadix1[localId - 1]) )
{
sStartPointers[sRadix1[localId]] = localId;
}
if(sRadix1[localId + groupSize] != sRadix1[localId + groupSize - 1])
{
sStartPointers[sRadix1[localId + groupSize]] = localId + groupSize;
}
__syncthreads();
if(localId < 16)
{
blockOffsets[groupId*16 + localId] = sStartPointers[localId];
}
__syncthreads();
// Compute the sizes of each block.
if((localId > 0) && (sRadix1[localId] != sRadix1[localId - 1]) )
{
sStartPointers[sRadix1[localId - 1]] =
localId - sStartPointers[sRadix1[localId - 1]];
}
if(sRadix1[localId + groupSize] != sRadix1[localId + groupSize - 1] )
{
sStartPointers[sRadix1[localId + groupSize - 1]] =
localId + groupSize - sStartPointers[sRadix1[localId +
groupSize - 1]];
}
if(localId == groupSize - 1)
{
sStartPointers[sRadix1[2 * groupSize - 1]] =
2 * groupSize - sStartPointers[sRadix1[2 * groupSize - 1]];
}
__syncthreads();
if(localId < 16)
{
counters[localId * totalBlocks + groupId] = sStartPointers[localId];
}
} |
2,988 | #include "system.cuh"
#include <assert.h>
#include <string>
__device__ __host__ void job::job_release(int time) {
assert(_state == Created);
_release_time = time;
_state = Ready;
}
__device__ __host__ void job::activate() {
assert(_state == Ready);
_state = Running;
}
__device__ __host__ void job::preempt() {
assert(_state == Running);
_state = Ready;
}
__device__ __host__ void job::terminate(int time)
{
assert(_state == Completed);
_response_time = time;
if (missed())
{
//TODO: add simulator funciton
}
}
__device__ __host__ int job::get_deadline() {
return _task->_relative_deadline + _release_time;
}
__device__ __host__ int job::get_remain() { return _task->_execution_time - _executed_cycle; }
__device__ __host__ bool job::completed() { return _state == Completed; }
__device__ __host__ bool job::missed() {
if (completed()) {
return _response_time > get_deadline();
}
return false;
}
__device__ void job::run()
{
assert(_state == Running);
_executed_cycle++;
if (get_remain() == 0)
{
_state = Completed;
}
}
STATE job::get_state() { return _state; } |
2,989 | #include <stdlib.h>
#include <stdio.h>
__global__ void vector_add(const float *a, const float *b, float *c, const size_t n){
unsigned int i = threadIdx.x + blockDim.x*blockIdx.x;
if(i<n)
c[i] = a[i] + b[i];
}
int main(){
const int num_elements = 1<<20;
const int num_bytes = num_elements*sizeof(float);
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_array_b = (float*)malloc(num_bytes);
host_array_c = (float*)malloc(num_bytes);
if((host_array_c == NULL)||(host_array_b == NULL)||(host_array_a == NULL)){
printf("couldnt allocate that much memory\n");
exit(1);
}
cudaMalloc((void**)&device_array_a, num_bytes);
cudaMalloc((void**)&device_array_b, num_bytes);
cudaMalloc((void**)&device_array_c, num_bytes);
if((device_array_c == NULL)||(device_array_b == NULL)||(device_array_a == NULL)){
printf("couldnt allocate that much memory for the GPU\n");
exit(1);
}
int i;
for(i = 0; i< num_elements; i++){
host_array_a[i]= (float)i;
host_array_b[i] = rand()/((float)RAND_MAX+1.0);
}
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_array_b, host_array_b, num_bytes, cudaMemcpyHostToDevice);
const size_t block_size = 256;
size_t grid_size = num_elements/block_size;
if(num_elements%block_size) ++grid_size;
vector_add<<<grid_size, block_size>>>(device_array_a, device_array_b, device_array_c, num_elements);
cudaMemcpy(host_array_c, device_array_c, num_bytes, cudaMemcpyDeviceToHost);
for(i = 0; i< 10; i++)
printf("result %d: %1.1f + %7.1f = %7.1f\n",i,host_array_a[i], host_array_b[i], host_array_c[i]);
free(host_array_a);
free(host_array_b);
free(host_array_c);
cudaFree(device_array_a);
cudaFree(device_array_b);
cudaFree(device_array_c);
return 0;
} |
2,990 | #include <fstream>
#include "native_kernel.h"
int main(int argc, char **argv)
{
std::ifstream values;
values.open("./values.txt");
int D,N;
int niter = atoi(argv[1]);
float learn = atof(argv[2]);
float *xvalues,*y_actual,*real_weights,*weights;
values>>D>>N;
printf("N = %d D = %d",N,D);
xvalues = new float [D*N];
for(int i = 0 ; i<N;i++)
{
for (int j = 0; j<D;j++)
{
values>>xvalues[j*N + i];
}
}
y_actual = new float[N];
for(int i = 0; i<N;i++)
{
values>>y_actual[i];
}
real_weights = new float[D];
for(int i = 0; i<D;i++)
{
values>>real_weights[i];
}
weights = new float[D];
for(int i = 0; i<D;i++)
{
values>>weights[i];
}
// printf("Done Reading Data\n");
float *d_xval, *d_yval,* d_weights, *d_error,*h_error,*tempa,*d_gradient;
cudaMalloc((void**)&d_xval,sizeof(float)*D*N);
cudaMalloc((void**)&tempa,sizeof(float)*D*N);
cudaMalloc((void**)&d_weights,sizeof(float)*D);
cudaMalloc((void**)&d_gradient,sizeof(float)*D);
cudaMalloc((void**)&d_yval,sizeof(float)*N);
cudaMalloc((void**)&d_error,sizeof(float)*N);
cudaMemcpy(d_xval,xvalues,sizeof(float)*D*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_yval,(void *)y_actual,sizeof(float)*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_weights,(void *)weights,sizeof(float)*D,cudaMemcpyHostToDevice);
int count = 0;
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventCreate(&stop);
// cudaEventRecord(start);
// printf("Loop Start\n");
while(count<niter)
{
cudaMemcpyToSymbol(c_weights,weights,sizeof(float)*D,0,cudaMemcpyHostToDevice);
getdotError<<<iDivUp(N,1024),1024>>>(N,D,d_xval,d_yval,d_error);
// multiply<<<iDivUp(N*D,1024),1024>>>(N,D,d_xval,d_error,tempa);
for(int i = 0; i<D;i++)
better_reduce_kernel<256><<<20,256,256*sizeof(float)>>>(d_xval + i*N,d_error,d_gradient+i,N,D);
// cudaMemcpy(gradient,d_weights,sizeof(float)*D,cudaMemcpyDeviceToHost);
update_weights<<<iDivUp(D,128),128>>> (d_weights,d_gradient,learn);
printf("Error = \n");
count++;
}
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// float time_in_ms;
// cudaEventElapsedTime(&time_in_ms,start,stop);
h_error = new float[N];
cudaMemcpy(h_error,d_error,sizeof(float)*N,cudaMemcpyDeviceToHost);
// for(int i = 0; i<100;i++)
// {
// printf("%f ",h_error[i]);
// if(i%10==0)
// printf("\n");
// }
// printf("Compute Time = %f\n",time_in_ms);
// sdiff = sqrt(sdiff/D);
// printf("Final Error = %f\n",sdiff);
}
|
2,991 | // This example introduces CUDA's abstraction of data parallel computational
// "kernels", or __global__ functions. A __global__ function acts like the
// main() function of a GPU program, and is allowed to manipulate device
// memory directly.
#include <stdlib.h>
#include <stdio.h>
// "kernels" or __global__ functions are the entry points to code that executes on the GPU
// The keyword __global__ indicates to the compiler that this function is a GPU entry point.
// __global__ functions must return void, and may only be called or "launched" from code that
// executes on the CPU.
__global__ void kernel(int *array)
{
// compute the index of this particular thread
// in the grid:
// multiply the index of this thread's block (blockIdx.x)
// by the number of threads per block (blockDim.x)
// and add the index of this thread inside its block (threadIdx.x)
int index = blockIdx.x * blockDim.x + threadIdx.x;
// write out 7 to a single element of the array using standard
// array indexing notation:
array[index] = 7;
}
int main(void)
{
// create arrays of 256 elements
int num_elements = 256;
// compute the size of the arrays in bytes
int num_bytes = num_elements * sizeof(int);
// pointers to host & device arrays
int *device_array = 0;
int *host_array = 0;
// malloc a host array
host_array = (int*)malloc(num_bytes);
// cudaMalloc a device array
cudaMalloc((void**)&device_array, num_bytes);
// if either memory allocation failed, report an error message
if(host_array == 0 || device_array == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// launch the global function by choosing the number of CUDA threads
// to instantiate:
// choose a number of threads per block
// 128 threads (4 warps) tends to be a good number
int block_size = 128;
// divide the number of elements to process by the block size
// to determine the number of blocks to launch
int grid_size = num_elements / block_size;
// To invoke the global function, use the triple chevron notation.
// The first argument is the number of blocks (grid_size).
// The second argument is the number of threads per block (block_size).
// This is called "configuring" the launch.
// After the triple chevrons, pass function arguments as normal.
kernel<<<grid_size,block_size>>>(device_array);
// download and inspect the result on the host:
cudaMemcpy(host_array, device_array, num_bytes, cudaMemcpyDeviceToHost);
// print out the result element by element
for(int i=0; i < num_elements; ++i)
{
printf("%d ", host_array[i]);
}
printf("\n");
// deallocate memory
free(host_array);
cudaFree(device_array);
}
|
2,992 | #include<stdlib.h>
#include<stdio.h>
/* The purpose of these microkernels is to
offer the user a sanity check. These microkernels
take the exact same parameters as their "real"
implementations and perform simple modifications
so the user can be sure the kernel is unpacking
and modifying the parameters the correct way. */
__device__ void ComputeTest(void* params){
//Params| &table | offset |
//Bytes | 8 | 4 |
void *table = *((void**)params);
int offset = *((int*)(((void**)params)+1));
//Extract all the values.
int np = *((int*) table);
int nd = *(((int*) table)+1);
int size = np * nd;
double *mass = (double*)(((int*)table)+2);
double *pos = mass + 1;
double *vel = pos + size;
double *acc = vel + size;
double *f = acc + size;
double *pe = f + size;
double *ke = pe + size;
int j;
int tid = threadIdx.x % 32;
int k = offset + tid;
//Compute all the potential energy and forces.
for(j=0; j<np; j++){
if(k == j){ continue; }
int index = j + k *nd;
f[index] += 1;
pe[index] += 1;
ke[index] += 1;
}
}
|
2,993 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#define SIZE 1024
__global__ void VectoAdd(int *a, int *b, int *c, int n)
{
int i = threadIdx.x;
if (i<n)
{
c[i] = a[i] + b[i];
}
}
__global__ void square(float *dout, float* din)
{
int idx = threadIdx.x;
float f = din[idx];
dout[idx] = f*f;
}
int main()
{
const int ARRAY_SIZE = 64;
const int ARRAY_BITES = ARRAY_SIZE*sizeof(float);
// generate the input array on the host
float hin[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++)
{
hin[i] = float(i);
}
float hout[ARRAY_SIZE];
// declare GPU memory pointers
float* din;
float* dout;
// allocalte GPU memory
cudaMalloc((void**)&din, ARRAY_BITES);
cudaMalloc((void**)&dout, ARRAY_BITES);
// transfer the array to the GPU
cudaMemcpy(din,hin, ARRAY_BITES, cudaMemcpyHostToDevice);
// launch the kernel
square<<<1,ARRAY_SIZE>>>(dout,din);
// copy back the result array to the CPU
cudaMemcpy(hout,dout, ARRAY_BITES, cudaMemcpyDeviceToHost);
// print out the resulting array
for(int i =0; i<ARRAY_SIZE;i++)
{
printf("%f", hout[i]);
printf(((i%4) !=3)? "\t":"\n");
}
// free GPU memory allocation
cudaFree(din);
cudaFree(dout);
return 0;
}
|
2,994 | #include "includes.h"
__global__ void EmptyKernel() {
//extern __shared__ thrust::complex<float> filter_products[];
} |
2,995 | #include "includes.h"
__global__ void UpdateParamsLinear(float *dZ, float *A, int nRowsdZ, int nColsdZ, int nRowsA, float lr, float *W, float *b)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float dWValue = 0, dbValue = 0;
if (row < nRowsdZ && col < nRowsA)
{
for (int i = 0; i < nColsdZ; i++)
{
dWValue += dZ[row * nColsdZ + i] * A[col * nColsdZ + i];
}
W[row * nRowsA + col] = W[row * nRowsA + col] - lr * dWValue / nColsdZ;
if (col == 0)
{
for (int i = 0; i < nColsdZ; i++)
{
dbValue += dZ[row * nColsdZ + i];
}
b[row] = b[row] - lr * dbValue / nColsdZ;
}
}
} |
2,996 | #include "includes.h"
__global__ void VecAdd(const float *xs, const float *ys, float *out, const unsigned int N)
{
unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < N)
out[idx] = xs[idx] + ys[idx];
} |
2,997 | #include <stdio.h>
void handle_error( cudaError_t error, const char* message)
{
if(error!=cudaSuccess) {
fprintf(stderr,"ERROR: %s : %s\n",message,cudaGetErrorString(error));
exit(-1);
}
}
__global__ void reduce_add (int * array, int * result){
// Here's how to do an O(ln N) reduce in pure
// low-level CUDA.
extern __shared__ int cache[];
int thread_id=threadIdx.x;
cache[thread_id]=array[thread_id];
int reduction_index=blockDim.x/2;
__syncthreads();
while (reduction_index!=0){
if (thread_id<reduction_index){
cache[thread_id]+=cache[thread_id+reduction_index];
}
__syncthreads();
reduction_index/=2;
}
if (thread_id==0) {
*result=cache[0];
}
}
|
2,998 | #include <stdio.h>
#include <math.h>
__global__ void heat_step(float * d_out, float * d_in)
{
// int block_x = blockIdx.x;
// int block_y = blockIdx.y;
int x_glob;
int y_glob;
int x_total_dim = blockDim.x * gridDim.x;
int y_total_dim = blockDim.y * gridDim.y;
int location;
x_glob = blockDim.x * blockIdx.x + threadIdx.x;
y_glob = blockDim.y * blockIdx.y + threadIdx.y;
location = y_glob * x_total_dim + x_glob;
d_out[location] = 0;
if (x_glob > 0)
{
d_out[location] += 0.25 * d_in[location - 1];
}
if (x_glob < (x_total_dim - 1))
{
d_out[location] += 0.25 * d_in[location + 1];
}
if (y_glob > 0)
{
d_out[location] += 0.25 * d_in[location - x_total_dim];
}
if (y_glob < (y_total_dim - 1))
{
d_out[location] += 0.25 * d_in[location + x_total_dim];
}
if (x_glob == 0)
{
d_out[location] = 1;
}
}
int main()
{
const int N=200;
const int M=200;
const int ARRAY_SIZE = N * M;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
const int Niter = 1000;
size_t counter = 0;
FILE * writefile;
writefile=fopen("out_laplace.txt", "w");
float h_start[ARRAY_SIZE];
for(int i=0; i<ARRAY_SIZE; i++)
{
h_start[i] = 0;
}
float h_out[ARRAY_SIZE];
float * d_in;
float * d_out;
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_out, ARRAY_BYTES);
cudaMemcpy(d_in, h_start, ARRAY_BYTES, cudaMemcpyHostToDevice);
while (counter<Niter)
{
heat_step<<<dim3(10,10), dim3(N/10,M/10)>>>(d_in, d_out);
heat_step<<<dim3(10,10), dim3(N/10,M/10)>>>(d_out, d_in);
counter=counter+2;
}
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
for(int i=0; i<N; i++)
{
for(int j=0; j<M; j++)
{
fprintf(writefile,"%e\t", h_out[i * M + j]);
}
fprintf(writefile, "\n");
}
fclose(writefile);
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
|
2,999 | #include<iostream>
__global__ void transKernel(int *A, int *A_t, int N){
int x_index = threadIdx.x + blockIdx.x*blockDim.x;
int y_index = threadIdx.y + blockIdx.y*blockDim.y;
if(x_index < N && y_index < N){
A_t[x_index*N+y_index] = A[y_index*N+x_index];
}
}
int main(){
int N = 256;
int size = N*N*sizeof(int);
int A[N][N], At[N][N];
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
A[i][j] = rand()%10;
}
}
int *d_A, *d_At;
cudaMalloc((void **)&d_A, size);
cudaMalloc((void **)&d_At, size);
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
dim3 threadsPerBlock (8,8);
dim3 blocksPerGrid (N/8, N/8);
transKernel<<<threadsPerBlock, blocksPerGrid>>>(d_A, d_At, N);
cudaMemcpy(At, d_At, size, cudaMemcpyDeviceToHost);
// for(int i = 0; i < N; i++){
// for(int j = 0; j < N; j++){
// std::cout << At[i][j] << std::endl;
// }
// }
cudaFree(d_A);
cudaFree(d_At);
return 0;
}
|
3,000 | #include "includes.h"
__global__ void update_mean(double* pressure_mean_d, double* pressure_d, double* Rho_mean_d, double* Rho_d, double* Mh_mean_d, double* Mh_d, double* Wh_mean_d, double* Wh_d, int n_since_out, int num) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
int nv = gridDim.y;
int lev = blockIdx.y;
if (id < num) {
pressure_mean_d[id * nv + lev] =
1.0 / n_since_out
* (pressure_mean_d[id * nv + lev] * (n_since_out - 1) + pressure_d[id * nv + lev]);
Rho_mean_d[id * nv + lev] =
1.0 / n_since_out
* (Rho_mean_d[id * nv + lev] * (n_since_out - 1) + Rho_d[id * nv + lev]);
Mh_mean_d[3 * id * nv + 3 * lev + 0] =
1.0 / n_since_out
* (Mh_mean_d[3 * id * nv + 3 * lev + 0] * (n_since_out - 1)
+ Mh_d[3 * id * nv + 3 * lev] + 0);
Mh_mean_d[3 * id * nv + 3 * lev + 1] =
1.0 / n_since_out
* (Mh_mean_d[3 * id * nv + 3 * lev + 1] * (n_since_out - 1)
+ Mh_d[3 * id * nv + 3 * lev + 1]);
Mh_mean_d[3 * id * nv + 3 * lev + 2] =
1.0 / n_since_out
* (Mh_mean_d[3 * id * nv + 3 * lev + 2] * (n_since_out - 1)
+ Mh_d[3 * id * nv + 3 * lev + 2]);
Wh_mean_d[id * (nv + 1) + lev] =
1.0 / n_since_out
* (Wh_mean_d[id * (nv + 1) + lev] * (n_since_out - 1) + Wh_d[id * (nv + 1) + lev]);
if (lev == nv - 1) {
Wh_mean_d[id * (nv + 1) + lev + 1] =
1.0 / n_since_out
* (Wh_mean_d[id * (nv + 1) + lev + 1] * (n_since_out - 1)
+ Wh_d[id * (nv + 1) + lev + 1]);
}
}
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.