serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
19,701 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
// Row size
const int N = 3;
// Column Size
const int M = 4;
const int SIZE = M * N;
__global__ void matrixAdd(int *c, const int *a, const int *b, int cols, int rows)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
// 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
int i = row * cols + col;
if (row < rows && col < cols)
{
c[i] = a[i] + b[i];
}
}
int main()
{
srand(time(NULL));
int a[N][M], b[N][M], *c;
int *dev_a, *dev_b, *dev_c;
c = (int *)malloc(SIZE * sizeof(int));
cudaMalloc((void **)&dev_a, SIZE * sizeof(int));
cudaMalloc((void **)&dev_b, SIZE * sizeof(int));
cudaMalloc((void **)&dev_c, SIZE * sizeof(int));
for (int row = 0; row < N; ++row)
for (int col = 0; col < M; ++col)
{
a[row][col] = rand() % 10;
b[row][col] = rand() % 10;
}
cudaMemcpy(dev_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice);
dim3 dimBlock(M, N);
dim3 dimGrid((int)ceil(M / dimBlock.x), (int)ceil(N / dimBlock.y));
matrixAdd <<< dimGrid, dimBlock >>>(dev_c, dev_a, dev_b, M, N);
cudaMemcpy(c, dev_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < SIZE; ++i)
printf("c[%d] = %d\n", i, c[i]);
// Free the Host array memory
free(c);
// Free the Device array memory
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
19,702 | /*
3D IoU Calculation and Rotated NMS(modified from 2D NMS written by others)
Written by Shaoshuai Shi
All Rights Reserved 2018.
*/
#include <stdio.h>
#define THREADS_PER_BLOCK 16
#define DIVUP(m, n) ((m) / (n) + ((m) % (n) > 0))
#define DEBUG
const int THREADS_PER_BLOCK_NMS = sizeof(unsigned long long) * 8;
const float EPS = 1e-8;
struct Point {
float x, y;
__device__ Point() {}
__device__ Point(float _x, float _y){
x = _x, y = _y;
}
__device__ void set(float _x, float _y){
x = _x; y = _y;
}
__device__ Point operator +(const Point &b)const{
return Point(x + b.x, y + b.y);
}
__device__ Point operator -(const Point &b)const{
return Point(x - b.x, y - b.y);
}
};
__device__ inline float cross(const Point &a, const Point &b){
return a.x * b.y - a.y * b.x;
}
__device__ inline float cross(const Point &p1, const Point &p2, const Point &p0){
return (p1.x - p0.x) * (p2.y - p0.y) - (p2.x - p0.x) * (p1.y - p0.y);
}
__device__ int check_rect_cross(const Point &p1, const Point &p2, const Point &q1, const Point &q2){
int ret = min(p1.x,p2.x) <= max(q1.x,q2.x) &&
min(q1.x,q2.x) <= max(p1.x,p2.x) &&
min(p1.y,p2.y) <= max(q1.y,q2.y) &&
min(q1.y,q2.y) <= max(p1.y,p2.y);
return ret;
}
__device__ inline int check_in_box2d(const float *box, const Point &p){
//params: box (5) [x1, y1, x2, y2, angle]
const float MARGIN = 1e-5;
float center_x = (box[0] + box[2]) / 2;
float center_y = (box[1] + box[3]) / 2;
float angle_cos = cos(box[4]), angle_sin = sin(box[4]); // rotate the point in the opposite direction of box
float rot_x = (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x;
float rot_y = -(p.x - center_x) * angle_sin + (p.y - center_y) * angle_cos + center_y;
//#ifdef DEBUG
// printf("box: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", box[0], box[1], box[2], box[3], box[4]);
// printf("center: (%.3f, %.3f), cossin(%.3f, %.3f), src(%.3f, %.3f), rot(%.3f, %.3f)\n", center_x, center_y,
// angle_cos, angle_sin, p.x, p.y, (p.x - center_x) * angle_cos + (p.y - center_y) * angle_sin + center_x, rot_y);
//#endif
return (rot_x > box[0] - MARGIN && rot_x < box[2] + MARGIN && rot_y > box[1] - MARGIN && rot_y < box[3] + MARGIN);
}
__device__ inline int intersection(const Point &p1, const Point &p0, const Point &q1, const Point &q0, Point &ans){
// fast exclusion
if (check_rect_cross(p0, p1, q0, q1) == 0) return 0;
// check cross standing
float s1 = cross(q0, p1, p0);
float s2 = cross(p1, q1, p0);
float s3 = cross(p0, q1, q0);
float s4 = cross(q1, p1, q0);
if (!(s1 * s2 > 0 && s3 * s4 > 0)) return 0;
// calculate intersection of two lines
float s5 = cross(q1, p1, p0);
if(fabs(s5 - s1) > EPS){
ans.x = (s5 * q0.x - s1 * q1.x) / (s5 - s1);
ans.y = (s5 * q0.y - s1 * q1.y) / (s5 - s1);
}
else{
float a0 = p0.y - p1.y, b0 = p1.x - p0.x, c0 = p0.x * p1.y - p1.x * p0.y;
float a1 = q0.y - q1.y, b1 = q1.x - q0.x, c1 = q0.x * q1.y - q1.x * q0.y;
float D = a0 * b1 - a1 * b0;
ans.x = (b0 * c1 - b1 * c0) / D;
ans.y = (a1 * c0 - a0 * c1) / D;
}
return 1;
}
__device__ inline void rotate_around_center(const Point ¢er, const float angle_cos, const float angle_sin, Point &p){
float new_x = (p.x - center.x) * angle_cos + (p.y - center.y) * angle_sin + center.x;
float new_y = -(p.x - center.x) * angle_sin + (p.y - center.y) * angle_cos + center.y;
p.set(new_x, new_y);
}
__device__ inline int point_cmp(const Point &a, const Point &b, const Point ¢er){
return atan2(a.y - center.y, a.x - center.x) > atan2(b.y - center.y, b.x - center.x);
}
__device__ inline float box_overlap(const float *box_a, const float *box_b){
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
float a_x1 = box_a[0], a_y1 = box_a[1], a_x2 = box_a[2], a_y2 = box_a[3], a_angle = -box_a[4];
float b_x1 = box_b[0], b_y1 = box_b[1], b_x2 = box_b[2], b_y2 = box_b[3], b_angle = -box_b[4];
Point center_a((a_x1 + a_x2) / 2, (a_y1 + a_y2) / 2);
Point center_b((b_x1 + b_x2) / 2, (b_y1 + b_y2) / 2);
//#ifdef DEBUG
// printf("a: (%.3f, %.3f, %.3f, %.3f, %.3f), b: (%.3f, %.3f, %.3f, %.3f, %.3f)\n", a_x1, a_y1, a_x2, a_y2, a_angle,
// b_x1, b_y1, b_x2, b_y2, b_angle);
// printf("center a: (%.3f, %.3f), b: (%.3f, %.3f)\n", center_a.x, center_a.y, center_b.x, center_b.y);
//#endif
Point box_a_corners[5];
box_a_corners[0].set(a_x1, a_y1);
box_a_corners[1].set(a_x2, a_y1);
box_a_corners[2].set(a_x2, a_y2);
box_a_corners[3].set(a_x1, a_y2);
Point box_b_corners[5];
box_b_corners[0].set(b_x1, b_y1);
box_b_corners[1].set(b_x2, b_y1);
box_b_corners[2].set(b_x2, b_y2);
box_b_corners[3].set(b_x1, b_y2);
// get oriented corners
float a_angle_cos = cos(a_angle), a_angle_sin = sin(a_angle);
float b_angle_cos = cos(b_angle), b_angle_sin = sin(b_angle);
for (int k = 0; k < 4; k++){
//#ifdef DEBUG
// printf("before corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
//#endif
rotate_around_center(center_a, a_angle_cos, a_angle_sin, box_a_corners[k]);
rotate_around_center(center_b, b_angle_cos, b_angle_sin, box_b_corners[k]);
//#ifdef DEBUG
// printf("corner %d: a(%.3f, %.3f), b(%.3f, %.3f) \n", k, box_a_corners[k].x, box_a_corners[k].y, box_b_corners[k].x, box_b_corners[k].y);
//#endif
}
box_a_corners[4] = box_a_corners[0];
box_b_corners[4] = box_b_corners[0];
// get intersection of lines
Point cross_points[16];
Point poly_center;
int cnt = 0, flag = 0;
poly_center.set(0, 0);
for (int i = 0; i < 4; i++){
for (int j = 0; j < 4; j++){
flag = intersection(box_a_corners[i + 1], box_a_corners[i], box_b_corners[j + 1], box_b_corners[j], cross_points[cnt]);
if (flag){
poly_center = poly_center + cross_points[cnt];
cnt++;
}
}
}
// check corners
for (int k = 0; k < 4; k++){
if (check_in_box2d(box_a, box_b_corners[k])){
poly_center = poly_center + box_b_corners[k];
cross_points[cnt] = box_b_corners[k];
cnt++;
}
if (check_in_box2d(box_b, box_a_corners[k])){
poly_center = poly_center + box_a_corners[k];
cross_points[cnt] = box_a_corners[k];
cnt++;
}
}
poly_center.x /= cnt;
poly_center.y /= cnt;
// sort the points of polygon
Point temp;
for (int j = 0; j < cnt - 1; j++){
for (int i = 0; i < cnt - j - 1; i++){
if (point_cmp(cross_points[i], cross_points[i + 1], poly_center)){
temp = cross_points[i];
cross_points[i] = cross_points[i + 1];
cross_points[i + 1] = temp;
}
}
}
//#ifdef DEBUG
// printf("cnt=%d\n", cnt);
// for (int i = 0; i < cnt; i++){
// printf("All cross point %d: (%.3f, %.3f)\n", i, cross_points[i].x, cross_points[i].y);
// }
//#endif
// get the overlap areas
float area = 0;
for (int k = 0; k < cnt - 1; k++){
area += cross(cross_points[k] - cross_points[0], cross_points[k + 1] - cross_points[0]);
}
return fabs(area) / 2.0;
}
// iou_bev => iou3d
__device__ inline float iou3d(const float *box_a, const float *box_b, const float* box_a_z, const float* box_b_z, bool ignore_height){
// params: box_a (5) [x1, y1, x2, y2, angle]
// params: box_b (5) [x1, y1, x2, y2, angle]
// params: box_a_z (2) [z1min, z1max]
// params: box_b_z (2) [z2min, z2max]
float sa = (box_a[2] - box_a[0]) * (box_a[3] - box_a[1]);
float sb = (box_b[2] - box_b[0]) * (box_b[3] - box_b[1]);
float s_overlap = box_overlap(box_a, box_b);
// compute the height
float intersection_height = fminf(box_a_z[1], box_b_z[1]) - fmaxf(box_a_z[0], box_b_z[0]);
//compute the volume
if (!ignore_height) {
float vol_a = sa * (box_a_z[1] - box_a_z[0]);
float vol_b = sb * (box_b_z[1] - box_b_z[0]);
float vol_overlap = s_overlap * intersection_height;
//#ifdef DEBUG
// printf("sa, sb, s_overlap, vol_a, vol_b, vol_overlap: (%.3f, %.3f, %.3f, %.3f, %.3f, %.3f)\n", sa, sb, s_overlap, vol_a, vol_b, vol_overlap);
//#endif
return vol_overlap / fmaxf(vol_a + vol_b - vol_overlap, EPS);
} else {
return s_overlap / fmaxf(sa + sb, EPS);
}
}
__global__ void boxes_iou_3d_kernel(bool ignore_height, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
// params: num_a : number of boxes in boxes_a
// params: boxes_a (M, 7) [x, y, z, w, l, h, angle]
// params: num_b : number of boxes in boxes_b
// params: boxes_b (N, 7) [x, y, z, w, l, h, angle]
const int a_idx = blockIdx.y * THREADS_PER_BLOCK + threadIdx.y;
const int b_idx = blockIdx.x * THREADS_PER_BLOCK + threadIdx.x;
if (a_idx >= num_a || b_idx >= num_b){
return;
}
const float * cur_box_a = boxes_a + a_idx * 7;
const float * cur_box_b = boxes_b + b_idx * 7;
float box_a_tmp[5];
float box_b_tmp[5];
float box_a_z_tmp[2];
float box_b_z_tmp[2];
// [x, y, z, w, l ,h, r]
box_a_tmp[0] = cur_box_a[3] - cur_box_a[0] / 2; // x1,
box_a_tmp[1] = cur_box_a[4] - cur_box_a[1] / 2; // y1
box_a_tmp[2] = cur_box_a[3] + cur_box_a[0] / 2; // x2
box_a_tmp[3] = cur_box_a[4] + cur_box_a[1] / 2; // y2
box_a_tmp[4] = cur_box_a[6]; // ry
box_a_z_tmp[0] = cur_box_a[5] - cur_box_a[2] / 2; // z1min
box_a_z_tmp[1] = cur_box_a[5] + cur_box_a[2] / 2; // z1max
box_b_tmp[0] = cur_box_b[3] - cur_box_b[0] / 2; // x1,
box_b_tmp[1] = cur_box_b[4] - cur_box_b[1] / 2; // y1
box_b_tmp[2] = cur_box_b[3] + cur_box_b[0] / 2; // x2
box_b_tmp[3] = cur_box_b[4] + cur_box_b[1] / 2; // y2
box_b_tmp[4] = cur_box_b[6]; // ry
box_b_z_tmp[0] = cur_box_b[5] - cur_box_b[2] / 2; // z1min
box_b_z_tmp[1] = cur_box_b[5] + cur_box_b[2] / 2; // z1max
float cur_iou_3d = iou3d( &box_a_tmp[0], &box_b_tmp[0], &box_a_z_tmp[0], &box_b_z_tmp[0], ignore_height);
ans_iou[a_idx * num_b + b_idx] = cur_iou_3d;
}
__global__ void nms3d_kernel(const int boxes_num, const float nms_overlap_thresh,
const float *boxes, unsigned long long *mask){
//params: boxes (N, 7) [x, y, z, w, l ,h, ry] z-up coordinate system
//params: mask (N, N/THREADS_PER_BLOCK_NMS)
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size = fminf(boxes_num - row_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
const int col_size = fminf(boxes_num - col_start * THREADS_PER_BLOCK_NMS, THREADS_PER_BLOCK_NMS);
__shared__ float block_boxes[THREADS_PER_BLOCK_NMS * 7];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 7 + 0] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 0];
block_boxes[threadIdx.x * 7 + 1] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 1];
block_boxes[threadIdx.x * 7 + 2] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 2];
block_boxes[threadIdx.x * 7 + 3] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 3];
block_boxes[threadIdx.x * 7 + 4] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 4];
block_boxes[threadIdx.x * 7 + 5] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 5];
block_boxes[threadIdx.x * 7 + 6] = boxes[(THREADS_PER_BLOCK_NMS * col_start + threadIdx.x) * 7 + 6];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = THREADS_PER_BLOCK_NMS * row_start + threadIdx.x;
const float *cur_box = boxes + cur_box_idx * 7;
float cur_box_tmp[5];
float cur_box_z_tmp[2];
cur_box_tmp[0] = cur_box[3] - cur_box[0] / 2; // x1,
cur_box_tmp[1] = cur_box[4] - cur_box[1] / 2; // y1
cur_box_tmp[2] = cur_box[3] + cur_box[0] / 2; // x2
cur_box_tmp[3] = cur_box[4] + cur_box[1] / 2; // y2
cur_box_tmp[4] = cur_box[6]; // ry
cur_box_z_tmp[0] = cur_box[5] - cur_box[2] / 2; // z1min
cur_box_z_tmp[1] = cur_box[5] + cur_box[2] / 2; // z1max
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
const float *block_box_ptr = block_boxes + i * 7;
float block_box_tmp[5];
float block_box_z_tmp[2];
block_box_tmp[0] = block_box_ptr[3] - block_box_ptr[0] / 2; // x1,
block_box_tmp[1] = block_box_ptr[4] - block_box_ptr[1] / 2; // y1
block_box_tmp[2] = block_box_ptr[3] + block_box_ptr[0] / 2; // x2
block_box_tmp[3] = block_box_ptr[4] + block_box_ptr[1] / 2; // y2
block_box_tmp[4] = block_box_ptr[6]; // ry
block_box_z_tmp[0] = block_box_ptr[5] - block_box_ptr[2] / 2; // z1min
block_box_z_tmp[1] = block_box_ptr[5] + block_box_ptr[2] / 2; // z1max
float cur_iou_3d = iou3d(&cur_box_tmp[0], &block_box_tmp[0], &cur_box_z_tmp[0], &block_box_z_tmp[0], false);
if (cur_iou_3d > nms_overlap_thresh){
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(boxes_num, THREADS_PER_BLOCK_NMS);
mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void boxesIouGPUKernelLauncher(bool ignore_height, const int num_a, const float *boxes_a, const int num_b, const float *boxes_b, float *ans_iou){
dim3 blocks(DIVUP(num_b, THREADS_PER_BLOCK), DIVUP(num_a, THREADS_PER_BLOCK)); // blockIdx.x(col), blockIdx.y(row)
dim3 threads(THREADS_PER_BLOCK, THREADS_PER_BLOCK); // (256, 256)
boxes_iou_3d_kernel<<<blocks, threads>>>(ignore_height, num_a, boxes_a, num_b, boxes_b, ans_iou);
}
void nms3dGPUKernelLauncher(const int boxes_num, const float nms_overlap_thresh, const float *boxes, unsigned long long * mask ){
dim3 blocks(DIVUP(boxes_num, THREADS_PER_BLOCK_NMS),
DIVUP(boxes_num, THREADS_PER_BLOCK_NMS));
dim3 threads(THREADS_PER_BLOCK_NMS);
nms3d_kernel<<<blocks, threads>>>(boxes_num, nms_overlap_thresh, boxes, mask);
}
|
19,703 | #include "includes.h"
__device__ float gamma_correction(float f_stop, float gamma, float val)
{
return powf((val*powf(2,f_stop)),(1.0/gamma));
}
__global__ void tonemap_gamma(float* imageIn, float* imageOut, int width, int height, int channels, int depth, float f_stop, float gamma)
{
int Row = blockDim.y * blockIdx.y + threadIdx.y;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
if(Row < height && Col < width) {
imageOut[(Row*width+Col)*3+BLUE] = gamma_correction(f_stop, gamma, imageIn[(Row*width+Col)*3+BLUE]);
imageOut[(Row*width+Col)*3+GREEN] = gamma_correction(f_stop, gamma, imageIn[(Row*width+Col)*3+GREEN]);
imageOut[(Row*width+Col)*3+RED] = gamma_correction(f_stop, gamma, imageIn[(Row*width+Col)*3+RED]);
}
} |
19,704 | /*
For DIRECTED GRAPH
*/
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define MAX_NODE 100000000
#define DEBUG 0
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__device__ volatile int Cx[MAX_NODE];
__device__ volatile int PQ[MAX_NODE];
//K in parallel
__global__ void extractMin(int* PQ_size, int* expandNodes,int* expandNodes_size,int* openList,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<K && PQ_size[id]>0){
//extract min from PQ
int front = id* ( (N+K-1)/K );
int node = PQ[front];
// printf("extract min %d %d\n",id,node);
// restructure the heap
PQ[front]=PQ[front+PQ_size[id]-1];
PQ_size[id]-=1;
int pqIndex = 0;
while(2*pqIndex+1 < PQ_size[id]){
if(2*pqIndex+2 >= PQ_size[id]){
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]]){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else
break;
}
else{
if( Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+1]] && Cx[PQ[front+2*pqIndex+1]] <= Cx[PQ[front+2*pqIndex+2]] ){
int swap = PQ[front + 2*pqIndex+1];
PQ[front + 2*pqIndex+1] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+1;
}
else if(Cx[PQ[front+pqIndex]] > Cx[PQ[front+2*pqIndex+2]] && Cx[PQ[front+2*pqIndex+2]] <= Cx[PQ[front+2*pqIndex+1]] ){
int swap = PQ[front + 2*pqIndex+2];
PQ[front + 2*pqIndex+2] = PQ[front +pqIndex];
PQ[front + pqIndex] = swap;
pqIndex = 2*pqIndex+2;
}
else{
break;
}
}
}
//removed from openList
openList[node] = -1;
//added to expand next
int len = atomicAdd(expandNodes_size,1);
expandNodes[len]=node;
}
}
//for K in parallel
__global__ void A_star_expand(int* off,int* edge,unsigned int* W,int* Hx,int* parent,
int* expandNodes,int* expandNodes_size, int* lock ,int* flagfound,int* openList,
int N,int E, int K,int dest,int* nVFlag,int* PQ_size,
int flagDiff,int* diff_off,int* diff_edge,int* diff_weight ){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id< *expandNodes_size ){
int node = expandNodes[id];
//reach dest
if(node == dest){
atomicOr(flagfound,1);
// *flagfound = 1;
printf("found %d\n",id);
}
// expand
int start = off[node];
int end = E;
if(node!=N-1)
end = off[node+1];
if(DEBUG)
printf("%d$expand %d:\n",id,start);
while(start < end){
int child = edge[start];
//deleted edges
if(child<0){
start++;
continue;
}
//array L initilaized with 0
//get the lock for child to update C(x)
//loop till acquire the lock
bool leaveLoop = false;
while(leaveLoop==false){
if(atomicCAS(&lock[child],0,1)==0){
//critical section
if( Cx[child] > (Cx[node] - Hx[node])+ W[start]+ Hx[child] ){
Cx[child] = (Cx[node] - Hx[node])+ W[start]+ Hx[child];
__threadfence();
parent[child] = node;
if(DEBUG)
printf("exp: %d %d\n",node,child);
if(openList[child]>=0){
//update operating on one thread
if(DEBUG)
printf("upd: %d %d\n",node,child);
int Kq = openList[child];
int front = Kq*( (N+K-1)/K );
int index = -1;
for(int i=front;i<front+PQ_size[Kq];i++){
if(PQ[i]==child){
index = i;
}
}
if(index > 0){
int i = index;
while(i > front){
if( Cx[PQ[(i-1)/2]] > Cx[PQ[i]] ){
int swap = PQ[i];
PQ[i] = PQ[(i-1)/2];
PQ[(i-1)/2] = swap;
i = (i-1)/2;
}
else
break;
}
}
if(DEBUG)
printf("out of while\n");
}else{
nVFlag[child]=1;
//add only once
}
}
//end critical section
leaveLoop = true;
atomicCAS(&lock[child],1,0);
// if(DEBUG)
// printf("im not deadlocked\n");
}
__syncthreads();
if(DEBUG)
printf("%d$ stuck here\n",id);
}
start++;
}
if(DEBUG)
printf("%d outside while\n",id);
}
}
//N threads
__global__ void setNV(int* nextFlag,int* nextV,int* nvSize,int N){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < N){
//printf("2: %d %d\n",id,nextFlag[id]);
if(nextFlag[id]==1){
int index = atomicAdd(nvSize,1);
nextV[index]=id;
// printf("2: %d\n",id);
}
}
}
//for K in parallel
__global__ void insertPQ(int* PQS,int* nextV,int* nVsize,int K,int N,int* openList){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K){
// printf("id: %d\n",id);
int front = id*( (N+K-1)/K );
int i = id;
if(id==0)
printf("insert: %d\n",*nVsize);
while(i<*nVsize){
PQ[front+PQS[id]]= nextV[i];
PQS[id]+=1;
//add in openList
openList[nextV[i]] = id;
//printf("insert: %d, %d\n",nextV[i],PQS[id]);
if(PQS[id]>1){
int index = PQS[id]-1;
while(index>0){
if(Cx[PQ[front+ (index-1)/2]] > Cx[PQ[front+index]]){
int swap = PQ[front+index];
PQ[front+index]=PQ[front+ (index-1)/2];
PQ[front+ (index-1)/2] = swap;
index = (index-1)/2;
}
else
break;
}
}
i += K;
}
}
}
//for K in parallel
__global__ void checkMIN(int* PQ_size,int* flagEnd,int dest,int N,int K){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id < K && PQ_size[id] > 0 ){
int front = id* ( (N+K-1)/K );
int node = PQ[front];
//check if atleast one min, dont end the a*
printf("%d ",Cx[node]);
if(Cx[dest] > Cx[node] ){
atomicAnd(flagEnd,0);
}
}
}
__global__ void printCX(int dest){
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id==0){
printf("cost: %d\n",Cx[dest]);
}
}
int main(){
//the K PQ
int K ;
scanf("%d\n",&K);
int startNode,endNode;
scanf("%d %d",&startNode,&endNode);
FILE* fgraph = fopen("graph.txt","r");
int N,E;
fscanf(fgraph,"%d %d\n",&N,&E);
int* H_offset = (int*)malloc(sizeof(int)*N);
int* H_edges = (int*)malloc(sizeof(int)*E);
unsigned int* H_weight = (unsigned int*)malloc(sizeof(unsigned int)*E);
int* H_hx = (int*)malloc(sizeof(int)*N);
int* H_cx = (int*)malloc(sizeof(int)*N);
int* H_parent = (int*)malloc(sizeof(int)*N);
int* H_PQ = (int*)malloc(sizeof(int)*N);
int* H_openList = (int*)malloc(sizeof(int)*N);
int* H_PQ_size = (int*)malloc(sizeof(int)*K);
//for diff
int* H_diff_edges = (int*)malloc(sizeof(int)*E);
int* H_diff_offset = (int*)malloc(sizeof(int)*N);
int* H_diff_weight = (int*)malloc(sizeof(int)*E);
memset(H_PQ_size,0,sizeof(int)*K);
memset(H_parent,-1,sizeof(int)*N);
memset(H_openList,-1,sizeof(int)*N);
//init cx
for(int i=0;i<N;i++){
H_cx[i]=INT_MAX;
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%d",&H_edges[i]);
}
for(int i=0;i<N;i++){
fscanf(fgraph,"%d",&H_offset[i]);
}
for(int i=0;i<E;i++){
fscanf(fgraph,"%u",&H_weight[i]);
}
FILE* fhx = fopen("Hx.txt","r");
for(int i=0;i<N;i++){
fscanf(fhx,"%d",&H_hx[i]);
}
fclose(fgraph);
fclose(fhx);
printf("completed input\n");
//init Host var
int* H_flagEnd = (int*)malloc(sizeof(int));
int* H_flagfound = (int*)malloc(sizeof(int));
int* H_a0 = (int*)malloc(sizeof(int));
//required coz if many tries to add same in diff threads high low lower
int* H_nVFlag = (int*)malloc(sizeof(int)*N);
memset(H_nVFlag,-1,sizeof(int)*N);
*H_flagEnd = 0;
*H_flagfound = 0;
*H_a0 = 0;
//insert startNode in PQ[0]
H_cx[startNode]=H_hx[startNode];
H_PQ[0]=startNode;
H_PQ_size[0]=1;
H_openList[startNode]=0;
//graph struture
int* D_offset;
int* D_edges ;
unsigned int* D_weight;
int* D_hx;
int* D_parent;
//Priority queue size
int* D_PQ_size;
//flag if in openList(contains which PQ)
int* D_openList;
//lock for nodes
int* D_lock;
//Diff structure
int* D_diff_edges;
int* D_diff_offset;
int* D_diff_weight;
//next nodes flag
int* D_nVFlag;
//next nodes array to insert PQ
int* D_nV;
int* D_nV_size;
//nodes to be expanded ( extracted from PQ )
int* D_expandNodes;
int* D_expandNodes_size;
//flag to end while loop and found the destination
int* D_flagEnd;
int* D_flagfound;
gpuErrchk ( cudaMalloc(&D_offset,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_weight,sizeof(unsigned int)*E) );
gpuErrchk ( cudaMalloc(&D_hx,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_parent,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_PQ_size,sizeof(int)*K) );
gpuErrchk ( cudaMalloc(&D_openList,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_lock,sizeof(int)*N) );
//diff csr
gpuErrchk ( cudaMalloc(&D_diff_edges,sizeof(int)*E) );
gpuErrchk ( cudaMalloc(&D_diff_offset,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_diff_weight,sizeof(int)*E) );
//for next set of vertices to add in PQ
gpuErrchk ( cudaMalloc(&D_nV,sizeof(int)*N) );
gpuErrchk ( cudaMalloc(&D_nV_size,sizeof(int)) );
gpuErrchk ( cudaMalloc(&D_nVFlag,sizeof(int)*N) );
//next nodes to expand
gpuErrchk ( cudaMalloc(&D_expandNodes,sizeof(int)*K) ); //changed to K
gpuErrchk ( cudaMalloc(&D_expandNodes_size,sizeof(int)) );
//flag to end search
gpuErrchk( cudaMalloc(&D_flagEnd,sizeof(int)) );
gpuErrchk( cudaMalloc(&D_flagfound,sizeof(int)) );
gpuErrchk ( cudaMemcpy(D_offset,H_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_edges,H_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_weight,H_weight,sizeof(unsigned int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_hx,H_hx,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_parent,H_parent,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_openList,H_openList,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_diff_edges,H_diff_edges,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_diff_offset,H_diff_offset,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_diff_weight,H_diff_weight,sizeof(int)*E,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_PQ_size,H_PQ_size,sizeof(int)*K,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(Cx,H_cx, sizeof(int)*N, 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpyToSymbol(PQ,H_PQ, sizeof(int)*N, 0, cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagEnd,H_flagEnd,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_flagfound,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk ( cudaMemset(D_lock,0,sizeof(int)*N) );
int flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
int numThreads = 512;
int numBlocks = (K+numThreads-1)/numThreads;
int N_numBlocks = (N+numThreads-1)/numThreads;
//DO A* initailly on whole graph
while(*H_flagEnd==0 && flag_PQ_not_empty==1){
//extract min
extractMin<<<numBlocks,numThreads>>>(D_PQ_size, D_expandNodes,D_expandNodes_size,D_openList,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
A_star_expand<<<numBlocks,numThreads>>>(D_offset,D_edges,D_weight,D_hx,D_parent,
D_expandNodes,D_expandNodes_size, D_lock ,D_flagfound,D_openList,
N,E,K,endNode,D_nVFlag,D_PQ_size,
false,D_diff_offset,D_diff_edges,D_diff_offset );
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
if(DEBUG)
printf("expand over\n");
//gen from flag D_nV
//for N in parallel
setNV<<<N_numBlocks,numThreads>>>(D_nVFlag,D_nV,D_nV_size,N);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
insertPQ<<<numBlocks,numThreads>>>(D_PQ_size,D_nV,D_nV_size,K,N,D_openList);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
//cpy flagend and flagEmpty
gpuErrchk( cudaMemcpy(H_flagfound,D_flagfound, sizeof(int),cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(H_PQ_size,D_PQ_size, sizeof(int)*K,cudaMemcpyDeviceToHost) );
//reset nVFlag
gpuErrchk( cudaMemcpy(D_nVFlag,H_nVFlag,sizeof(int)*N,cudaMemcpyHostToDevice) );
//reset next insert array
gpuErrchk( cudaMemcpy(D_nV_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(D_expandNodes_size,H_a0,sizeof(int),cudaMemcpyHostToDevice) );
flag_PQ_not_empty = 0;
for(int i=0;i<K;i++){
if(H_PQ_size[i]>0)
flag_PQ_not_empty=1;
}
//check for mins
if( *H_flagfound==1 && flag_PQ_not_empty==1){
//end
gpuErrchk( cudaMemcpy(D_flagEnd,H_flagfound,sizeof(int),cudaMemcpyHostToDevice) );
checkMIN<<< numBlocks,numThreads >>>(D_PQ_size,D_flagEnd,endNode,N,K);
gpuErrchk(cudaPeekAtLastError() );
cudaDeviceSynchronize();
gpuErrchk( cudaMemcpy(H_flagEnd,D_flagEnd, sizeof(int),cudaMemcpyDeviceToHost) );
printf("\ninside MIN\n");
}
}
printCX<<<1,1>>>(endNode);
gpuErrchk( cudaMemcpy(H_parent,D_parent, sizeof(int)*N,cudaMemcpyDeviceToHost) );
if(*H_flagfound==1){
int p = endNode;
while(H_parent[p]!=-1){
printf("%d ",p);
p = H_parent[p];
}
printf("%d\n",startNode);
}
else{
printf("not found\n");
}
}
|
19,705 | #include "graph.hh"
#include "node.hh"
#include <algorithm>
#include <cassert>
#include <map>
#include <set>
#include <string>
#include <iostream>
namespace rt
{
Graph::~Graph()
{
for (auto n : nodes_)
delete n;
}
void Graph::add(Node* node)
{
assert(node);
nodes_.push_back(node);
}
void Graph::remove(Node* node)
{
assert(node);
auto it = std::find(nodes_.begin(), nodes_.end(), node);
assert(it != nodes_.end());
nodes_.erase(it);
for (auto x : node->preds)
{
auto pred_it = std::find(x->succs.begin(), x->succs.end(), node);
assert(pred_it != x->succs.end());
x->succs.erase(pred_it);
}
for (auto x : node->succs)
remove(x);
delete node;
}
const std::vector<Node*> Graph::nodes() const
{
return nodes_;
}
namespace
{
void add_preds(Node* node, std::set<Node*>& set)
{
if (!set.insert(node).second)
return;
for (auto x : node->preds)
add_preds(x, set);
}
std::vector<Node*> get_preds(Node* node, std::set<Node*>& graph)
{
std::vector<Node*> res;
for (auto x : node->preds)
if (graph.find(x) != graph.end())
res.push_back(x);
return res;
}
std::vector<Node*> get_succs(Node* node, std::set<Node*>& graph)
{
std::vector<Node*> res;
for (auto x : node->succs)
if (graph.find(x) != graph.end())
res.push_back(x);
return res;
}
}
/**
* L ← Empty list that will contain the sorted elements
* S ← Set of all nodes with no incoming edge
* while S is non-empty do
* remove a node n from S
* add n to tail of L
* for each node m with an edge e from n to m do
* remove edge e from the graph
* if m has no other incoming edges then
* insert m into S
* if graph has edges then
* return error (graph has at least one cycle)
* else
* return L (a topologically sorted order)
*/
std::vector<Node*> Graph::topological_sort(const std::vector<Node*>& vals)
{
std::vector<Node*> res;
std::set<Node*> graph;
for (auto n : vals)
add_preds(n, graph);
std::vector<Node*> s;
for (auto x : graph)
if (get_preds(x, graph).empty())
s.push_back(x);
while (!s.empty())
{
Node* next = s.back();
s.pop_back();
res.push_back(next);
for (auto succ : get_succs(next, graph))
{
if (get_preds(succ, graph).size() == 1)
s.push_back(succ);
}
graph.erase(next);
}
if (!graph.empty())
throw std::runtime_error {"Topological sort failed"};
return res;
}
namespace
{
std::string op_name(const Node* node,
std::map<const Node*, std::string>& names)
{
auto it = names.find(node);
if (it != names.end())
return it->second;
std::string res = node->type == Node::OP_NOP ? "nop"
: Node::OP_NAMES[node->type];
res += ":" + std::to_string(names.size());
names[node] = res;
return res;
}
}
utils::DotGraph Graph::to_dot_graph() const
{
std::map<const Node*, std::string> names;
utils::DotGraph g;
for (auto n : nodes_)
for (auto succ : n->succs)
g.add_edge(op_name(n, names), op_name(succ, names));
return g;
}
}
|
19,706 | #include<stdlib.h>
#include<stdio.h>
#include<iostream>
#include<string>
#include<math.h>
#include<fstream>
#include<sstream>
#include<cuda.h>
#include<time.h>
#define SIZE 82000
using namespace std;
size_t threadsPerBlock;
size_t numberOfBlocks;
int deviceId;
enum color {
white,
black,
grew
};
typedef struct edge_node
{
int vex;
edge_node *next;
} edge_node;
typedef struct vex_node
{
int vex_num;
color col;
edge_node *edges;
edge_node *tail;
} vex_node;
int graph_size;
__global__ void search_kernel(int N,vex_node *g,bool *findx,bool *findy,int *tmpx,int *tmpy,int x,int y)
{
int index= threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = gridDim.x * blockDim.x;
for (int i = index; i < N; i += gridStride)
{
if(g[i].vex_num==y)
{
*findy=true;
*tmpy=i;
}
if(g[i].vex_num==x)
{
*findx=true;
*tmpx=i;
}
//if(findx&&findy) break;
}
}
void printgraph(vex_node *g)
{
int i;
int total=0,max=0;
for(i=0;i<graph_size;i++)
{
printf("No.%d: num=%d,edges:",i,g[i].vex_num);
edge_node *p=g[i].edges;
total=0;
while(p!=NULL)
{
printf("%d ",g[p->vex].vex_num);
total++;
p=p->next;
}
if(max<total) max=total;
printf("\n");
}
}
void creategraph(char *filename,vex_node *g)
{
int i;
int x=1,y;
int sta;
//int tmpx=0,tmpy=0;
//bool findx,findy;
//int *d_x,*d_y;
int *d_tmpx,*d_tmpy;
bool *d_findx,*d_findy;
//cudaMallocManaged(&d_x,sizeof(int));
//cudaMallocManaged(&d_y,sizeof(int));
cudaMallocManaged(&d_tmpx,sizeof(int));
cudaMallocManaged(&d_tmpy,sizeof(int));
cudaMallocManaged(&d_findx,sizeof(bool));
cudaMallocManaged(&d_findy,sizeof(bool));
*d_tmpx=0;
*d_tmpy=0;
sta=0;
edge_node *q;
ifstream in(filename);
//printf("here2\n");
while(!in.eof())
{
// new input
in>>x>>y;
// map old point
*d_findx=false;
*d_findy=false;
/*cudaMemcpy(d_x,&x,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_y,&y,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_findx,&findx,sizeof(bool),cudaMemcpyHostToDevice);
cudaMemcpy(d_findy,&findy,sizeof(bool),cudaMemcpyHostToDevice);*/
numberOfBlocks=sta/threadsPerBlock+1;
search_kernel<<<numberOfBlocks, threadsPerBlock>>>(sta,g,d_findx,d_findy,d_tmpx,d_tmpy,x,y);
cudaDeviceSynchronize();
/*cudaMemcpy(&tmpx,d_tmpx,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&tmpy,d_tmpy,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(&findx,d_findx,sizeof(bool),cudaMemcpyDeviceToHost);
cudaMemcpy(&findy,d_findy,sizeof(bool),cudaMemcpyDeviceToHost);*/
//add new node
if(!*d_findx)
{
*d_tmpx=sta;
g[*d_tmpx].vex_num=x;
g[*d_tmpx].col=white;
g[*d_tmpx].edges=NULL;
g[*d_tmpx].tail=NULL;
sta++;
}
//printf("here4\n");
if(!*d_findy)
{
*d_tmpy=sta;
g[*d_tmpy].vex_num=y;
g[*d_tmpy].col=white;
g[*d_tmpy].edges=NULL;
g[*d_tmpx].tail=NULL;
sta++;
}
// update node edges table
edge_node *p=g[*d_tmpx].tail;
cudaMallocManaged(&q, sizeof(edge_node));
q->vex=*d_tmpy;
q->next=NULL;
if(p==NULL)
{
g[*d_tmpx].edges=q;
g[*d_tmpx].tail=q;
}
else
{
p->next=q;
g[*d_tmpx].tail=q;
}
// cheak map
if(y!=g[*d_tmpy].vex_num)
printf("Error:map wrong!\n");
//graph_size=*sta;
//printgraph(g);
printf("current vex number: %d\n",sta);
}
graph_size=sta;
}
__global__ void bfs_kernel(vex_node *g,int N,bool *change)
{
int index= threadIdx.x + blockIdx.x * blockDim.x;
int gridStride = gridDim.x * blockDim.x;
for (int i = index; i < N; i += gridStride)
{
if(g[i].col==grew)
{
edge_node *p=g[i].edges;
while(p!=NULL)
{
if(g[p->vex].col==white) {
*change=true;
//printf("get point\n");
}
g[p->vex].col=grew;
p=p->next;
}
g[i].col==black;
}
}
}
void bfs(vex_node *g)
{
int i;
bool *change;
int total=0;
cudaMallocManaged(&change,sizeof(bool));
for(i=0;i<graph_size;i++)
{
if(g[i].col==white)
{
g[i].col=grew;
*change=true;
while(*change)
{
*change=false;
//if(!*change) printf("clear\n");
total++;
cudaDeviceSynchronize();
bfs_kernel<<<numberOfBlocks, threadsPerBlock>>>(g,graph_size,change);
cudaDeviceSynchronize();
//if(*change) printf("next bfs\n");
}
}
}
printf("\ntotal bfs times:%d\n",total);
}
int main(int argc,char *argv[])
{
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
clock_t begin,end;
double time1,time2;
vex_node *g;
size_t size = SIZE * sizeof(vex_node);
cudaMallocManaged(&g, size);
begin=clock();
creategraph(argv[1],g);
end=clock();
time1=(double)(end-begin)/CLOCKS_PER_SEC;
printgraph(g);
begin=clock();
bfs(g);
end=clock();
time2=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\nread time:%.8fs",time1);
printf("\nbfs time:%.8fs\n",time2);
}
|
19,707 | #include <iostream>
#include <getopt.h>
#define no_argument 0
#define required_argument 1
#define optional_argument 2
int main(int argc, char * argv[])
{
std::cout << "Hello" << std::endl;
const struct option long_opts[] =
{
{"version", no_argument, 0, 'v'},
{"help", no_argument, 0, 'h'},
{"metric_set", required_argument, 0, 'm'},
{"eval", required_argument, 0, 'e'},
{0,0,0,0},
};
int index;
int iarg=0;
//turn off getopt error message
opterr=1;
while(iarg != -1)
{
iarg = getopt_long_only(argc, argv, "e:m:h", long_opts, &index);
switch (iarg)
{
case 'e':
std::cout << "You hit eval" << std::endl;
break;
case 'm':
std::cout << "You hit metric_set: " << optarg << std::endl;
break;
case 'h':
std::cout << "You hit help" << std::endl;
break;
}
}
std::cout << "GoodBye!" << std::endl;
return 0;
} |
19,708 | #include <stdio.h>
#include <algorithm>
#include <cmath>
__global__
void mish(int n, float* tx, float* aten_mul) {
for (int i = (threadIdx.x + blockDim.x * blockIdx.x) * 4; i < n; i += gridDim.x * blockDim.x * 4) {
float4 tx4 = __ldg(reinterpret_cast<float4*>(tx + i));
tx4.x = tx4.x * tanh(log1p(exp(tx4.x)));
tx4.y = tx4.y * tanh(log1p(exp(tx4.y)));
tx4.z = tx4.z * tanh(log1p(exp(tx4.z)));
tx4.w = tx4.w * tanh(log1p(exp(tx4.w)));
*reinterpret_cast<float4*>(aten_mul + i) = tx4;
}
}
template<typename T, typename U>
constexpr T ceildiv(T t, U u) {
return (t + u - 1) / u;
}
int main() {
constexpr int N = 1 << 28;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N * sizeof(float));
y = (float*)malloc(N * sizeof(float));
cudaMalloc(&d_x, N * sizeof(float));
cudaMalloc(&d_y, N * sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 3.0f;
y[i] = 2.0f;
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_x, x, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N * sizeof(float), cudaMemcpyHostToDevice);
// constexpr int blockSize = 512;
// constexpr int nBlocks = ceildiv(N, blockSize);
constexpr int blockSize = 512;
constexpr int nBlocks = ceildiv(N, blockSize) / 4;
float millis = 0.0f;
float temp = 0.0f;
for (int i = 0; i < 500; i++) {
cudaEventRecord(start);
mish<<<nBlocks, blockSize>>>(N, d_x, d_y);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&temp, start, stop);
millis += temp;
}
millis = millis / 500;
cudaMemcpy(y, d_y, N * sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++) {
float mv = 3.0f * tanhf(std::log1p(std::exp(3.0)));
maxError = std::max(maxError, std::abs(mv - y[i]));
}
printf("max error: %f\n", maxError);
printf("duration (ms): %f\n", millis);
printf("effective bandwidth (gb/s): %f\n", (float)N * sizeof(float) * 3 / millis / 1e6);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
return 0;
}
|
19,709 | #include <iostream>
#include <string>
#include <vector>
#include <fstream>
#include <sstream>
#include <cuda.h>
#define THREAD_COUNT 1024
// Max device memory : 4 GB
#define MAX_MEMORY ((long long)4e9)
void read_graph(std::string fname, int *&row_ptr, int *&col_ind, int &num_nodes, int &num_edges, bool zero_based = false)
{
std::ifstream input(fname.c_str());
if (input.fail())
throw "No file is found in the current path!";
// read graph
std::string line = "%";
while (line.find("%") != std::string::npos)
{
getline(input, line);
}
std::istringstream ss(line.c_str());
ss >> num_nodes >> num_nodes >> num_edges;
int edge_cnt = 0;
int v1, v2;
std::vector< std::vector<int> > adj_list(num_nodes);
for (int i = 0; i < num_edges; i++)
{
getline(input, line);
std::istringstream inp(line.c_str());
inp >> v1 >> v2;
if (!zero_based)
{
v1--; // make it 0 based
v2--;
}
if (v1 != v2)
{
adj_list[v1].push_back(v2); // add the edge v1->v2
adj_list[v2].push_back(v1); // add the edge v2->v1
edge_cnt++;
}
}
input.close();
num_edges = edge_cnt;
cudaMallocHost((void **)&row_ptr, sizeof(int) * (num_nodes + 1));
cudaMallocHost((void **)&col_ind, sizeof(int) * (2 * num_edges));
row_ptr[0] = 0;
int index = 0;
for (int v = 0; v < num_nodes; v++)
{
row_ptr[v + 1] = adj_list[v].size(); // assign number of edges going from node v
for (int i = 0; i < (int)adj_list[v].size(); i++)
{
col_ind[index] = adj_list[v][i]; // put all edges in order wrt row_ptr
index++;
}
}
for (int v = 1; v < num_nodes + 1; v++)
{ // cumulative sum
row_ptr[v] += row_ptr[v - 1];
}
}
__global__
void cent_kernel(float *results, int *dist, int *sigma, float *delta, int *rp, int *ci, int n) {
__shared__ int level;
__shared__ int improved;
for(int s = blockIdx.x; s < n; s += gridDim.x) {
if(threadIdx.x == 0) {
results[s] = rp[s + 1] - rp[s]; // degree 1
level = 0;
improved = 1;
dist[blockIdx.x * n + s] = 0;
sigma[blockIdx.x * n + s] = 1;
}
__syncthreads();
// BFS
while(improved == 1) {
if(threadIdx.x == 0) improved = 0;
for(int node = threadIdx.x; node < n; node += blockDim.x) {
for(int edge = rp[node]; edge < rp[node + 1]; edge++) {
int &adj = ci[edge];
if(dist[(blockIdx.x * n) + adj] == level && dist[(blockIdx.x * n) + node] == -1) {
dist[(blockIdx.x * n) + node] = level + 1;
improved = 1;
}
if(dist[(blockIdx.x * n) + adj] == level && dist[(blockIdx.x * n) + node] == level + 1) {
sigma[(blockIdx.x * n) + node] += (float) sigma[(blockIdx.x * n) + adj];
}
}
}
if(threadIdx.x == 0) level++;
__syncthreads();
}
int dist_sum = 0;
int dist2_cnt = 0;
// DISTANCE ADDER
if(threadIdx.x == 0) {
for(int i = 0; i < n; i++) {
if(dist[(blockIdx.x * n) + i] > 0) {
if(dist[(blockIdx.x * n) + i] <= 2) dist2_cnt++;
dist_sum += dist[(blockIdx.x * n) + i];
}
}
results[n + s] = dist2_cnt; // degree 2
results[2 * n + s] = (float) n / dist_sum; // closeness cent.
}
while(level > 0) {
for(int node = threadIdx.x; node < n; node += blockDim.x) {
if(dist[blockIdx.x * n + node] == level){
for(int edge = rp[node]; edge < rp[node + 1]; edge++) {
int adj = ci[edge];
if(dist[(blockIdx.x * n) + adj] + 1 == dist[(blockIdx.x * n) + node]) {
atomicAdd(&delta[(blockIdx.x * n) + adj], (sigma[(blockIdx.x * n) + adj] * 1.0) / sigma[(blockIdx.x * n) + node] * (1 + delta[(blockIdx.x * n) + node]));
}
}
atomicAdd(&results[3 * n + node], delta[(blockIdx.x * n) + node] / 2);
}
}
if(threadIdx.x == 0) level--;
__syncthreads();
}
for(int i = 0; i < n; i++) {
dist[(blockIdx.x * n) + i] = -1;
sigma[(blockIdx.x * n) + i] = 0;
delta[(blockIdx.x * n) + i] = 0;
}
}
}
float* compute_centralities(int *rp, int *ci, int n, float &time_taken) {
const int BLOCK_COUNT = MAX_MEMORY / (4 * 3 * n);
int *sigma, *dist;
float *delta, *d_results;
cudaMalloc((void **)&d_results, sizeof(float) * n * 4);
cudaMalloc((void **)&sigma, sizeof(int) * n * BLOCK_COUNT);
cudaMalloc((void **)&dist, sizeof(int) * n * BLOCK_COUNT);
cudaMalloc((void **)&delta, sizeof(float) * n * BLOCK_COUNT);
cudaMemset(dist, -1, sizeof(int) * n * BLOCK_COUNT);
cudaMemset(sigma, 0, sizeof(int) * n * BLOCK_COUNT);
cudaMemset(delta, 0, sizeof(float) * n * BLOCK_COUNT);
cudaMemset(d_results, 0, sizeof(float) * 4 * n);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
cent_kernel<<<BLOCK_COUNT, THREAD_COUNT>>>(d_results, dist, sigma, delta, rp, ci, n);
cudaDeviceSynchronize();
cudaEventRecord(end);
cudaEventSynchronize(end);
cudaEventElapsedTime(&time_taken, start, end);
float *results;
cudaMallocHost((void **)&results, sizeof(float) * n * 4);
cudaMemcpy(results, d_results, sizeof(float) * n * 4, cudaMemcpyDeviceToHost);
cudaFree(sigma);
cudaFree(dist);
cudaFree(delta);
cudaFree(d_results);
cudaDeviceSynchronize();
return results;
}
int main()
{
cudaSetDevice(0);
std::string filename = "../data/wave.mtx";
int *row_ptr, *col_ind;
int num_nodes, num_edges;
read_graph(filename, row_ptr, col_ind, num_nodes, num_edges);
printf("[INFO] Graph is read: %s.\n", filename.c_str());
int *rp;
int *ci;
cudaMalloc((void **)&rp, sizeof(int) * (num_nodes + 1));
cudaMalloc((void **)&ci, sizeof(int) * (num_edges * 2));
printf("[INFO] CUDA memory parameters are allocated for kernel function.\n");
cudaMemcpy(rp, row_ptr, sizeof(int) * (num_nodes + 1), cudaMemcpyHostToDevice);
cudaMemcpy(ci, col_ind, sizeof(int) * (num_edges * 2), cudaMemcpyHostToDevice);
printf("[INFO] CUDA memory parameters are set for kernel function.\n");
float time_taken;
float *results = compute_centralities(rp, ci, num_nodes, time_taken);
printf("[INFO] Kernel function is finished.\n");
printf("Centrality Results:\n");
for (int i = 0; i < num_nodes; i++)
{
printf("%.5f; %.5f; %.5f; %.5f\n ", results[i], results[num_nodes + i], results[2 * num_nodes + i], results[3 * num_nodes + i]);
}
printf("[INFO] Kernel call is ended in: %.5f ms.\n", time_taken);
cudaFreeHost(results);
cudaFreeHost(row_ptr);
cudaFreeHost(col_ind);
cudaFree(rp);
cudaFree(ci);
return 0;
}
|
19,710 | #include "includes.h"
__global__ void RemoveEdgesKernel( int *connection, int *age, int maxAge, int *activityFlag, float *winningFraction, int *winningCount, float *utility, float *localError, int *neuronAge, int maxCells )
{
int threadId = blockDim.x*blockIdx.y*gridDim.x //rows preceeding current row in grid
+ blockDim.x*blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if(threadId < maxCells)
{
if(activityFlag[threadId] == 1)
{
neuronAge[threadId] = neuronAge[threadId] + 1;
// TO DO : GET RID OF IFs & ELSEs
int activeConnections = 0;
int connId;
for(int c = 0; c < maxCells; c++)
{
connId = threadId * maxCells + c;
if(connection[connId] == 1)
{
if(age[connId] <= maxAge)
{
activeConnections++;
}
else
{
connection[connId] = 0;
age[connId] = 0;
}
}
}
if(activeConnections == 0)
{
activityFlag[threadId] = 0;
localError[threadId] = 0.00f;
neuronAge[threadId] = 0;
winningFraction[threadId] = 0.00f;
winningCount[threadId] = 0;
utility[threadId] = 0.00f;
}
}
}
} |
19,711 | # include <bits/stdc++.h>
# include <cuda.h>
#define SIZE 60000000// Global Size
#define BLOCK_SIZE 1024
using namespace std;
//::::::::::::::::::::::::::::::::::::::::::GPU::::::::::::::::::::::::::::::::
// :::: Kernel
__global__ void KernelNormalVec(double *g_idata,double *g_odata,int l){ // Sequential Addressing technique
__shared__ double sdata[BLOCK_SIZE];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<l){ // bad thing -> severely punished performance.
sdata[tid] = g_idata[i];
}else{
sdata[tid] = 0.0;
}
__syncthreads();
// do reduction in shared mem
for(unsigned int s=blockDim.x/2; s > 0; s >>= 1) {
if(tid < s){
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
// :::: Calls
void d_VectorMult(double *Vec1,double *Total){
double * d_Vec1;
double * d_Total;
double Blocksize=BLOCK_SIZE; // Block of 1Dim
cudaMalloc((void**)&d_Vec1,SIZE*sizeof(double));
cudaMalloc((void**)&d_Total,SIZE*sizeof(double));
cudaMemcpy(d_Vec1,
Vec1,SIZE*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(d_Total,Total,SIZE*sizeof(double),cudaMemcpyHostToDevice);
int temp=SIZE;
while(temp>1){
dim3 dimBlock(Blocksize,1,1);
int grid=ceil(temp/Blocksize);
dim3 dimGrid(grid,1,1);
KernelNormalVec<<<dimGrid,dimBlock>>>(d_Vec1,d_Total,temp);
cudaDeviceSynchronize();
cudaMemcpy(d_Vec1,d_Total,SIZE*sizeof(double),cudaMemcpyDeviceToDevice);
temp=ceil(temp/Blocksize);
}
cudaMemcpy(Total,d_Total,SIZE*sizeof(double),cudaMemcpyDeviceToHost);
cudaFree(d_Vec1);
cudaFree(d_Total);
}
//::::::::::::::::::::::::::::::::::::::::::CPU::::::::::::::::::::::::::::::::
void h_sum_vec(double *Vec1, double *all){
for(int i=0;i<SIZE;i++) *all+=Vec1[i];
}
//:::::::::::::::::::::::::::: Rutinary Functions
void Fill_vec(double *Vec,double Value){
for(int i =0 ; i<SIZE ; i++) Vec[i]=Value;
}
void Show_vec(double *Vec){
for (int i=0;i<SIZE;i++){
if(i%10==0 && i!=0){
cout<<endl;
}
cout<<"["<<Vec[i]<<"] ";
}
cout<<endl;
}
void Checksum(double *Answer1 , double *Answer2){
if(fabs(Answer1[0]-Answer2[0]) < 0.1) cout<<"Nice Work Guy"<<endl;
else cout<<"BAD Work Guy"<<endl;
}
// :::::::::::::::::::::::::::::::::::Clock Function::::::::::::::::::::::::::::
double diffclock(clock_t clock1,clock_t clock2){
double diffticks=clock2-clock1;
double diffms=(diffticks)/(CLOCKS_PER_SEC/1); // /1000 mili
return diffms;
}
// :::::::::::::::::::::::::::::::::::::::Main::::::::::::::::::::::::::::::::.
int main(){
double T1,T2; // Time flags
double *Vec1 = (double*)malloc((SIZE)*sizeof(double)); // Elements to compute. CPU way
double *Total2 = (double*)malloc((SIZE)*sizeof(double)); // GPU
double *Total1 = (double*)malloc(sizeof(double)); // Total Variables.
// Fill the containers vectors of data
Fill_vec(Vec1,1.0);
Fill_vec(Total2,0.0);
// Register time to finish the algorithm
// Secuential
clock_t start = clock();
h_sum_vec(Vec1,Total1);
clock_t end = clock();
T1=diffclock(start,end);
cout<<"Serial Result: "<<*Total1<<" At "<<T1<<",Seconds"<<endl;
// Parallel
start = clock();
d_VectorMult(Vec1,Total2);
end = clock();
T2=diffclock(start,end);
cout<<"Parallel Result: "<<Total2[0]<<" At "<<T2<<",Seconds"<<endl;
cout<<"Total Acceleration: "<<T1/T2<<",X"<<endl;
Checksum(Total1,Total2);
// releasing Memory
free(Vec1);
free(Total2);
return 0;
}
|
19,712 | /*
To compile:
nvcc --arch=sm_60 -O3 -o mandelbrot mandelbrot.c -lm
To create an image with 4096 x 4096 pixels
./mandelbrot 4096 4096
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
int writeMandelbrot(const char *fileName, int width, int height, float *img, int minI, int maxI);
// Q2a: add include for CUDA header file here:
#define MXITER 1000
typedef struct {
double r;
double i;
}complex_t;
// return iterations before z leaves mandelbrot set for given c
__device__ int testpoint(complex_t c){
int iter;
complex_t z;
double temp;
z = c;
for(iter=0; iter<MXITER; iter++){
temp = (z.r*z.r) - (z.i*z.i) + c.r;
z.i = z.r*z.i*2. + c.i;
z.r = temp;
if((z.r*z.r+z.i*z.i)>4.0){
return iter;
}
}
return iter;
}
// perform Mandelbrot iteration on a grid of numbers in the complex plane
// record the iteration counts in the count array
__global__ void mandelbrotKernel(int Nre, int Nim, complex_t cmin, complex_t dc, float *count){
// Q2c: replace this loop with a CUDA kernel
// for(int n=0;n<Nim;++n){
// for(int m=0;m<Nre;++m){
int n = threadIdx.y + blockIdx.y*blockDim.y;
int m = threadIdx.x + blockIdx.x*blockDim.x;
if(n<Nim && m<Nre){
complex_t c;
c.r = cmin.r + dc.r*m;
c.i = cmin.i + dc.i*n;
count[m+n*Nre] = (float) testpoint(c);
}
}
int main(int argc, char **argv){
// to create a 4096x4096 pixel image
// usage: ./mandelbrot 4096 4096
int Nre = (argc==3) ? atoi(argv[1]): 4096;
int Nim = (argc==3) ? atoi(argv[2]): 4096;
// Q2b: set the number of threads per block and the number of blocks here:
// storage for the iteration counts
float *count;
count = (float*) malloc(Nre*Nim*sizeof(float));
// Parameters for a bounding box for "c" that generates an interesting image
const float centRe = -.759856, centIm= .125547;
const float diam = 0.151579;
complex_t cmin;
complex_t cmax;
complex_t dc;
cmin.r = centRe - 0.5*diam;
cmax.r = centRe + 0.5*diam;
cmin.i = centIm - 0.5*diam;
cmax.i = centIm + 0.5*diam;
//set step sizes
dc.r = (cmax.r-cmin.r)/(Nre-1);
dc.i = (cmax.i-cmin.i)/(Nim-1);
cudaEvent_t start, end;
cudaEventCreate(&start);
cudaEventCreate(&end);
float *c_count;
cudaMalloc(&c_count, Nre*Nim*sizeof(float));
int BX = 16, BY = 16;
int GX = (Nre+BX-1)/BX, GY = (Nim+BY-1)/BY;
dim3 B(BX,BY,1);
dim3 G(GX,GY,1);
// mark event
cudaEventRecord(start);
// compute mandelbrot set
mandelbrotKernel <<< G, B >>> (Nre, Nim, cmin, dc, c_count);
// mark event
cudaEventRecord(end);
// copy from the GPU back to the host here
cudaMemcpy(count, c_count, Nre*Nim*sizeof(float), cudaMemcpyDeviceToHost);
// print elapsed time
float elapsed;
cudaEventSynchronize(end);
cudaEventElapsedTime(&elapsed, start, end);
elapsed /= 1000.;
printf("elapsed = %f\n", elapsed);
// output mandelbrot to ppm format image
printf("Printing mandelbrot.ppm...");
writeMandelbrot("mandelbrot.ppm", Nre, Nim, count, 0, 80);
printf("done.\n");
free(count);
exit(0);
return 0;
}
/* Output data as PPM file */
void saveppm(const char *filename, unsigned char *img, int width, int height){
/* FILE pointer */
FILE *f;
/* Open file for writing */
f = fopen(filename, "wb");
/* PPM header info, including the size of the image */
fprintf(f, "P6 %d %d %d\n", width, height, 255);
/* Write the image data to the file - remember 3 byte per pixel */
fwrite(img, 3, width*height, f);
/* Make sure you close the file */
fclose(f);
}
int writeMandelbrot(const char *fileName, int width, int height, float *img, int minI, int maxI){
int n, m;
unsigned char *rgb = (unsigned char*) calloc(3*width*height, sizeof(unsigned char));
for(n=0;n<height;++n){
for(m=0;m<width;++m){
int id = m+n*width;
int I = (int) (768*sqrt((double)(img[id]-minI)/(maxI-minI)));
// change this to change palette
if(I<256) rgb[3*id+2] = 255-I;
else if(I<512) rgb[3*id+1] = 511-I;
else if(I<768) rgb[3*id+0] = 767-I;
else if(I<1024) rgb[3*id+0] = 1023-I;
else if(I<1536) rgb[3*id+1] = 1535-I;
else if(I<2048) rgb[3*id+2] = 2047-I;
}
}
saveppm(fileName, rgb, width, height);
free(rgb);
return 1;
}
|
19,713 | #include "includes.h"
__device__ void convolution(int conv_col, int conv_row, float *d_kernel, int k_size, float *d_matrix, int size_x, int size_y, float *d_conv, int max_row, int max_col){
int conv_index = conv_col+ conv_row*max_col;
d_conv[conv_index] = 0;
for(int k_row = 0; k_row < k_size; k_row ++){
for(int k_col = 0; k_col < k_size ; k_col ++){
d_conv[conv_index] +=
d_kernel[k_col + (k_row*k_size)] *
d_matrix[(conv_col+k_col) + (conv_row+k_row)*size_x];
// printf("row %i col %i d_conv[] = %f \n", row, col, d_conv[col+ row*max_col]);
}
}
}
__global__ void valid_convolution(float *d_kernel, int k_size, float *d_matrix, int size_x, int size_y, float *d_conv, int max_row, int max_col){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
if(max_row > row && max_col > col){
convolution(col, row, d_kernel, k_size, d_matrix, size_x, size_y, d_conv, max_row, max_col);
}
} |
19,714 | /*
* Connected Components in the GPU
* Paper Source: An Optimized Union-Find Algorithm for Connected
* Components Labeling Using GPUs
* Adapted from: https://github.com/victormatheus/CCL-GPU
* Modified by: Imanol Luengo <imaluengo@gmail.com>
*/
typedef unsigned int uint32;
#define MAX_UINT32 0xFFFFFFFF
__device__
uint32 find(uint32* buf, uint32 x) {
uint32 p = x;
while ( x != buf[x] ) {
x = buf[x];
}
buf[p] = x; // fast linking
return x;
}
__device__
void findAndUnion(uint32* buf, uint32 g1, uint32 g2) {
bool done;
uint32 old;
do {
g1 = find(buf, g1);
g2 = find(buf, g2);
if (g1 < g2) {
old = atomicMin(&buf[g2], g1);
done = (old == g2);
g2 = old;
} else if (g2 < g1) {
old = atomicMin(&buf[g1], g2);
done = (old == g1);
g1 = old;
} else {
done = true;
}
} while ( !done );
}
__global__
void uf_local(const uint32* in, uint32* out, int3 shape) {
int3 p;
p.z = blockIdx.z * blockDim.z + threadIdx.z;
p.y = blockIdx.y * blockDim.y + threadIdx.y;
p.x = blockIdx.x * blockDim.x + threadIdx.x;
long image_plane = (shape.y * shape.x);
long block_plane = (blockDim.y * blockDim.x);
uint32 gidx = p.z * image_plane + p.y * shape.x + p.x;
uint32 bidx = threadIdx.z * block_plane + \
threadIdx.y * blockDim.x + \
threadIdx.x;
long bsize = blockDim.z * blockDim.y * blockDim.x;
extern __shared__ uint32 s_buffer[];
bool in_limits = p.z < shape.z && p.y < shape.y && p.x < shape.x;
s_buffer[bidx] = bidx;
s_buffer[bsize + bidx] = in_limits? in[p.z * image_plane + p.y * shape.x + p.x] : 0;
__syncthreads();
if ( !in_limits ) {return;}
uint32 v = s_buffer[bsize + bidx];
if ( threadIdx.x > 0 && s_buffer[bsize + bidx - 1] == v ) {
findAndUnion(s_buffer, bidx, bidx - 1);
}
__syncthreads();
if ( threadIdx.y > 0 && s_buffer[bsize + bidx - blockDim.x] == v ) {
findAndUnion(s_buffer, bidx, bidx - blockDim.x);
}
__syncthreads();
if ( threadIdx.z > 0 && s_buffer[bsize + bidx - block_plane] == v ) {
findAndUnion(s_buffer, bidx, bidx - block_plane);
}
__syncthreads();
uint32 f = find(s_buffer, bidx);
uint32 aux = f % block_plane;
uint32 fz = f / block_plane;
uint32 fy = aux / blockDim.x;
uint32 fx = aux % blockDim.x;
out[gidx] = (blockIdx.z * blockDim.z + fz) * image_plane + \
(blockIdx.y * blockDim.y + fy) * shape.x + \
(blockIdx.x * blockDim.x + fx);
}
__global__
void uf_global(const uint32* in, uint32* out, int3 shape) {
int3 p;
p.z = blockIdx.z * blockDim.z + threadIdx.z;
p.y = blockIdx.y * blockDim.y + threadIdx.y;
p.x = blockIdx.x * blockDim.x + threadIdx.x;
long image_plane = (shape.y * shape.x);
uint32 gidx = p.z * image_plane + p.y * shape.x + p.x;
if ( p.z >= shape.z || p.y >= shape.y || p.x >= shape.x ) {
return;
}
uint32 v = in[gidx];
if ( p.z > 0 && threadIdx.z == 0 && in[gidx - image_plane] == v ) {
findAndUnion(out, gidx, gidx - image_plane);
}
if ( p.y > 0 && threadIdx.y == 0 && in[gidx - shape.x] == v ) {
findAndUnion(out, gidx, gidx - shape.x);
}
if ( p.x > 0 && threadIdx.x == 0 && in[gidx - 1] == v ) {
findAndUnion(out, gidx, gidx - 1);
}
}
__global__
void uf_final(uint32* labels, int3 shape) {
int3 p;
p.z = blockIdx.z * blockDim.z + threadIdx.z;
p.y = blockIdx.y * blockDim.y + threadIdx.y;
p.x = blockIdx.x * blockDim.x + threadIdx.x;
long gidx = p.z * shape.y * shape.x + p.y * shape.x + p.x;
if ( p.z < shape.z && p.y < shape.y && p.x < shape.x ) {
labels[gidx] = find(labels, gidx);
}
}
|
19,715 | #include <iostream>
#include <cstdio>
#include <cstdlib>
// #include <helper_cuda.h>
// #include <helper_string.h>
/* Run with only HOST code *\
// Say goodbye to the universe
int main(void) {
printf("Heat death boogaloo!\n");
return 0;
}
*/
/* Run with DEVICEEEEEEE code */
__global__ void mykernel(void) {
}
int main(void) {
mykernel<<<1,1>>>();
printf("HEat death boogaloo Universe!\n");
return 0;
}
/*
blah blah comment not conflicting
__global__ indicates function that 1) runs on DEVICE , 2) called from the HOST
<<< >>> indicates function that runs on DEVICE and is called from the HOST
*////////
|
19,716 | /*
Program to add 2 matrics of size M * N in CUDA C++
Using grid of one block
Block contains M*N threads arranged in M rows and N columns
*/
#include<iostream>
#include "cuda.h"
#include "cuda_runtime.h"
#define M 2
#define N 9
__global__ void matAdd(int* a, int* b, int* c)
{
int idx = threadIdx.x * blockDim.y + threadIdx.y;
c[idx] = a[idx] + b[idx];
}
__host__ void print_matrix(int* matrix)
{
for(int i=0; i<M; ++i)
{
for(int j=0; j<N; ++j)
{
std::cout<<matrix[i*N+j]<<' ';
}
std::cout<<"\n";
}
std::cout<<"\n";
}
int main()
{
int size = M * N * sizeof(int);
int* a = new int[size];
int* b = new int[size];
int* c = new int[size];
for(int i=0; i<M; ++i)
{
for(int j=0; j<N; ++j)
{
a[i*N + j] = i; //Fill your own values here
b[i*N + j] = j; //Fill your own values here
}
}
std::cout<<"MATRIX A =\n";
print_matrix(a);
std::cout<<"MATRIX B =\n";
print_matrix(b);
/* Setting up variables on device. i.e. GPU */
int *d_a, *d_b, *d_c;
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
/* Copy data from host to device */
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
/*
Kernel Launch
Grid contains 1 block
Each block has M*N threads arranged in M*N rectangle
Hence index of vector is
idx = threadIdx.x * blockDim.y + threadIdx.y;
*/
dim3 blockSize(M, N);
matAdd<<<1, blockSize>>>(d_a, d_b, d_c);
cudaDeviceSynchronize();
/* Copy result from GPU device to host */
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
/* Print result */
std::cout<<"A + B =\n";
print_matrix(c);
std::cout<<'\n';
/* Cleanup device and host memory */
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
delete a;
delete b;
delete c;
return 0;
}
|
19,717 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sequence.h>
#include <thrust/transform.h>
#include <math.h>
#include <stdio.h>
#define N 64
using namespace thrust::placeholders;
// Define transformation SqrtOf()(x) -> sqrt(x)
struct SqrtOf {
__host__ __device__
float operator()(float x) {
return sqrt(x);
}
};
int main() {
const float ref = 0.5;
thrust::device_vector<float> dvec_x(N);
thrust::device_vector<float> dvec_dist(N);
thrust::sequence(dvec_x.begin(), dvec_x.end());
thrust::transform(dvec_x.begin(), dvec_x.end(), dvec_x.begin(), _1 / (N - 1));
thrust::transform(dvec_x.begin(), dvec_x.end(), dvec_dist.begin(), (_1 - ref)*(_1 - ref));
thrust::transform(dvec_dist.begin(), dvec_dist.end(), dvec_dist.begin(), SqrtOf());
thrust::host_vector<float> h_x = dvec_x;
thrust::host_vector<float> h_dist = dvec_dist;
for (int i = 0; i < N; ++i) {
printf("x=%3.3f, dist=%3.3f\n", h_x[i], h_dist[i]);
}
return 0;
}
|
19,718 | #include <stdio.h>
__global__ void cuda_hello_world() {
printf("Hello World from GPU! [ThreadID = %d, BlockID = %d]\n",
threadIdx.x,
blockIdx.x);
}
int main() {
cuda_hello_world<<<1, 256>>>();
cudaDeviceSynchronize();
return 0;
}
|
19,719 | #include "includes.h"
__global__ void naive_backward_cross_entropy(float *in, int *one_hot_classes, float batches, int size, float *out)
{
int bid = blockIdx.x * blockDim.x + threadIdx.x;
if (!(bid < size)) return;
out[bid] = (in[bid] - one_hot_classes[bid]) / batches;
} |
19,720 | #include "includes.h"
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
// Thread block size
#define BLOCK_SIZE 16
/* Matrices */
float A[MAXN][MAXN], B[MAXN][MAXN];
/* junk */
#define randm() 4|2[uid]&3
/* Prototype */
/* ------------------ Cuda Code --------------------- */
/****** You will replace this routine with your own parallel version *******/
/* Provided global variables are MAXN, N, A[][] and B[][],
* defined in the beginning of this code. B[][] is initialized to zeros.
*/
/* returns a seed for srand based on the time */
__global__ void matrixSD(float* d_in, float* d_mean, float* d_sd, int N)
{
extern __shared__ float sdata1[];
//each thread loads one element from global to shared mem
int idx_x = blockIdx.x * blockDim.x + threadIdx.x;
int idx_y = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int tid = threadIdx.y;
unsigned int i = idx_y * N + idx_x;
sdata1[tid] = powf(d_in[i] - d_mean[blockIdx.x], 2.0);
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.y; s *= 2)
{
if(tid +s < N)
{
if(tid % (2*s) == 0)
{
sdata1[tid] += sdata1[tid + s];
}
}
__syncthreads();
}
// write result for this block to global mem
if(tid == 0)
d_sd[blockIdx.x] = sqrtf(sdata1[0]/(float) N);
} |
19,721 | //Alfred Shaker
//10/23/2015
//Homework 2
#include <stdio.h>
//function to get and print device properties
void printDeviceProperties(cudaDeviceProp devProp)
{
//get the cuda driver version
int driverVersion = 0;
cudaDriverGetVersion(&driverVersion);
printf("Version Number: %d\n",driverVersion/1000 );
//get the number of multiprocessors
int mp = devProp.multiProcessorCount;
printf("Number of MultipPrcessors: %d\n", mp);
//check the computation capacity and calculate cores based on that
int cores = 0;
switch(devProp.major)
{
//Fermi
case 2:
if (devProp.minor == 1){ cores = mp * 48;
printf("Number of cores: %d\n", cores);
}
else{ cores = mp * 32;
printf("Number of cores: %d\n", cores);
}
break;
//Kepler
case 3:
cores = mp * 192;
printf("Number of cores: %d\n", cores);
break;
//Maxwell
case 5:
cores = mp * 128;
printf("Number of cores: %d\n", cores);
break;
default:
printf("Unknown device type\n");
break;
}
int kb = 1024;
int mb = kb * kb;
//get the total global memory in megabytes
int globalMemory = devProp.totalGlobalMem / mb;
printf("Total Global Memory: %d mb\n", globalMemory);
//get the shared memory per block in kilobytes
int sharedMemory = devProp.sharedMemPerBlock / kb;
printf("Shared Memory Per Block: %d kb\n", sharedMemory);
//get the maximum number of threads per block
int maxThreads = devProp.maxThreadsPerBlock;
printf("Maximum Threads Per Block: %d\n", maxThreads);
//get the maximum dimension size of the block
int maxBlockDim = devProp.maxThreadsDim[0];
printf("Maximum Size of Block Dimensions: %d\n", maxBlockDim);
//get the maximum dimension size for the grid
int maxGridDim = devProp.maxGridSize[0];
printf("Maximum Size of Grid Dimensions: %d\n", maxGridDim);
}
int main()
{
//get number of devices
int deviceCount;
cudaGetDeviceCount(&deviceCount);
printf("Total number of Devices: %d\n", deviceCount);
//iterate through each device and get properties of each one
for(int i = 0; i < deviceCount; ++i)
{
printf("Showing info for device number %d\n", i+1);
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, i);
printDeviceProperties(devProp);
}
return 0;
}
|
19,722 | #include <stdlib.h>
#include <stdio.h>
#include <vector>
#include <numeric>
#include <iostream>
#include<chrono>
// Here you can set the device ID that was assigned to you
#define MYDEVICE 0
constexpr bool DEBUG = false;
constexpr size_t BLOCK_SIZE = 512;
constexpr size_t SHARE_BLOCK_SIZE = 2*512;
double random_double(void)
{
return static_cast<double>(rand()) / RAND_MAX;
}
// Simple utility function to check for CUDA runtime errors
void checkCUDAError(const char *msg);
// Part 1 of 6: implement the kernel
__global__ void block_sum(const double *input,
double *per_block_results,
const size_t n)
{
//fill me
__shared__ double sdata[SHARE_BLOCK_SIZE];
int g_index = threadIdx.x + blockIdx.x * blockDim.x;
int s_index = threadIdx.x;
sdata[s_index] = (g_index < n) ? input[g_index] : 0.;
// Synchronize (ensure all the data is available)
__syncthreads();
int sdata_size = SHARE_BLOCK_SIZE;
bool loop = (n == 1) ? false : true;
while (__syncthreads_or(loop) ) {
int r_index = sdata_size-1-s_index;
// reduction origami
if(r_index >= (sdata_size+1)/2 && r_index <= sdata_size) {
sdata[s_index] += sdata[r_index];
// sdata[r_index] = 0;
}
sdata_size = (sdata_size+1) / 2;
if(sdata_size==1) loop = false;
}
if (s_index==0)
per_block_results[blockIdx.x] = sdata[s_index];
if (DEBUG) {
if (blockIdx.x < 20 && (g_index < n) && s_index < 3 )
printf("per_block_results[%d]] %f %d\n",blockIdx.x,per_block_results[blockIdx.x],s_index);
}
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int main(int argc, char* argv[])
{
// create array of 256ki elements
// 1<<N is the equivalent to 2^N
const int num_elements = (argc > 1) ? std::atoi(argv[1]) : 1<<18;
// bool DEBUG = (num_elements < 1000 ? true : false);
srand(time(NULL));
// generate random input on the host
std::vector<double> h_input(num_elements);
for(int i = 0; i < h_input.size(); ++i) {
// h_input[i] = random_double();
h_input[i] = double(i);
if (DEBUG) std::cout << " " << h_input[i];
}
if (DEBUG) std::cout << std::endl;
{
auto t0 = std::chrono::high_resolution_clock::now();
const double host_result = std::accumulate(h_input.begin(), h_input.end(), 0.);
auto t1 = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> d = t1 - t0;
std::cerr << "Host sum: " << host_result << " in " << d.count() << " s" << std::endl;
}
// WITH ONLY 1 STREAM IS NOT NECESSARY
// cudaEvent_t cuda_start, cuda_stop;
// cudaEventCreate(&cuda_start);
// cudaEventCreate(&cuda_stop);
{
auto t0 = std::chrono::high_resolution_clock::now();
//Part 1 of 6: move input to device memory
size_t memSize = num_elements*sizeof(double);
double *d_input = 0;
cudaMalloc( &d_input,memSize );
cudaMemcpyAsync( d_input, &h_input[0], memSize, cudaMemcpyHostToDevice );
auto t = std::chrono::high_resolution_clock::now();
{
auto t1 = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> d = t1 - t0;
std::cout << "h2d cudaMemcpyAsync " << d.count() << " s" << std::endl;
}
dim3 num_blocks, block_size;
block_size.x = BLOCK_SIZE;
num_blocks.x = ( num_elements + block_size.x - 1 )/block_size.x;
// Part 2 of 6: allocate the partial sums: How much space does it need?
double *d_partial_sums_and_total = 0;
memSize = num_blocks.x*sizeof(double);
cudaMalloc( &d_partial_sums_and_total,memSize );
t = std::chrono::high_resolution_clock::now();
{
// cudaEventRecord(cuda_start);
// Part 3 of 6: launch one kernel to compute, per-block, a partial sum. How much shared memory does it need?
block_sum<<<num_blocks,block_size>>>(d_input, d_partial_sums_and_total, num_elements);
auto t1 = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> d = t1 - t0;
std::cout << "block_sum<<<num_blocks,block_size>>> " << d.count() << " s " << std::chrono::duration<float>(t1-t).count() << " s" << std::endl;
}
t = std::chrono::high_resolution_clock::now();
{
// Part 4 of 6: compute the sum of the partial sums
block_sum<<<1,block_size>>>(d_partial_sums_and_total, &d_partial_sums_and_total[0], num_blocks.x);
auto t1 = std::chrono::high_resolution_clock::now();
std::chrono::duration<float> d = t1 - t0;
std::cout << "block_sum<<<1,block_size>>> " << d.count() << " s " << std::chrono::duration<float>(t1-t).count() << " s" << std::endl;
}
// check if kernel execution generated an error
checkCUDAError("kernel execution");
t = std::chrono::high_resolution_clock::now();
{
// Part 5 of 6: copy the result back to the host
double device_result = 0;
memSize = sizeof(double);
cudaMemcpyAsync(&device_result, &d_partial_sums_and_total[0], memSize, cudaMemcpyDeviceToHost);
// Part 6 of 6: deallocate device memory
cudaFree(d_input);
cudaFree(d_partial_sums_and_total);
auto t1 =std::chrono::high_resolution_clock::now();
std::chrono::duration<float> d = t1 - t0;
// float milliseconds = 0;
// cudaEventElapsedTime(&milliseconds, cuda_start, cuda_stop);
std::cout << "Device sum: " << device_result << " in " << d.count() << " s " << std::chrono::duration<float>(t1-t).count() << " s" << std::endl;
}
}
return 0;
}
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
std::cerr << "Cuda error: " << msg << " " << cudaGetErrorString(err) << std::endl;
exit(-1);
}
}
|
19,723 | #include <iostream>
__global__ void vectorAdd(int *a, int *b, int *c, int n){
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<n)
for(int j=0;j<100;j++)
c[i] = a[i] + b[i];
}
int main(void){
int * a, * b, * c;
int * d_a, * d_b, * d_c;
int * temp;
int n = 1<<24;
a = new int[n*sizeof(int)];
b = new int[n*sizeof(int)];
c = new int[n*sizeof(int)];
temp = new int[n*sizeof(int)];
cudaMalloc(&d_a, n*sizeof(int));
cudaMalloc(&d_b, n*sizeof(int));
cudaMalloc(&d_c, n*sizeof(int));
for(int i=0;i<n;i++){
a[i] = 3;
b[i] = 5;
}
int blockSize = 256;
int numBlocks = n/256;
cudaMemcpy(d_a, a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, n*sizeof(int), cudaMemcpyHostToDevice);
vectorAdd<<<numBlocks,blockSize>>>(d_a,d_b,d_c,n);
cudaDeviceSynchronize();
temp[0] = a[0];
for(int i=1;i<n;i++)
temp[i] = temp[i-1]+a[i];
vectorAdd<<<numBlocks, blockSize>>>(d_a,d_b,d_c,n);
cudaDeviceSynchronize();
temp[0] = a[0];
for(int i=1;i<n;i++)
temp[i] = temp[i-1]+a[i];
cudaDeviceSynchronize();
temp[0] = b[0];
for(int i=1;i<n;i++)
temp[i] = temp[i-1]+b[i];
vectorAdd<<<numBlocks, blockSize>>>(d_a,d_b,d_c,n);
cudaDeviceSynchronize();
cudaMemcpy(c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
temp[0] = c[0];
for(int i=1;i<n;i++)
temp[i] = temp[i-1]+c[i];
vectorAdd<<<numBlocks, blockSize>>>(d_a,d_b,d_c,n);
cudaDeviceSynchronize();
vectorAdd<<<numBlocks, blockSize>>>(d_a,d_b,d_c,n);
cudaDeviceSynchronize();
cudaFree(a);
cudaFree(b);
cudaFree(c);
delete temp;
return 0;
}
|
19,724 | #include <iostream>
using namespace std;
#include <thrust/reduce.h>
#include <thrust/sequence.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
__global__ void fillKernel(int *a, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n) a[tid] = tid;
}
void fill(int *d_a, int n) {
int nThreadsPerBlock = 512;
int nBlocks = n / nThreadsPerBlock + ((n % nThreadsPerBlock) ? 1 : 0);
fillKernel <<< nBlocks, nThreadsPerBlock >>> (d_a, n);
}
int main() {
const int N = 50000;
thrust::device_vector<int> a(N);
fill(thrust::raw_pointer_cast(&a[0]), N);
int sumA = thrust::reduce(a.begin(), a.end(), 0);
int sumCheck = 0;
for(int i = 0; i < N; i++) sumCheck += i;
if(sumA == sumCheck) cout << "ok" << endl;
else cout << "fail" << endl;
return 0;
} |
19,725 | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <complex.h>
#include <cuda_runtime.h>
#include <utility>
#include <sys/time.h>
#define K 3
#define BLCH 8
#define BLCW 32
__constant__ float filter[K*K];
int compute_tiled_naive(float *img, float *f, float * out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW) {
//compute tile num
int bbw = imgW / bw;
int bbh = imgH / bh;
for (int i = 0; i < imgN; i++){
int con = i * convW * convH;
int imgg = i * imgW * imgH;
//compute center tiles
for (int j = 0; j < bbh-1; j++){
for (int k = 0; k < bbw-1; k++){
for (int mi = 0; mi < bh; mi++){
int inm = imgW * (j*bh + mi) + k*bw;
int ind = convW * (j*bh + mi) + k*bw;
for (int mj = 0; mj < bw; mj++){
inm += mj;
ind += mj;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
inm += fi * imgW;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
out[ind + con] += img[inm + imgg] * f[inf];
}
}
}
}
}
}
//compute right most tiles
for (int j = 0; j < bbh-1; j++){
for (int mi = 0; mi < bh; mi++){
int inm = imgW * (j*bh + mi) + (bbw-1)*bw;
int ind = convW * (j*bh + mi) + (bbw-1)*bw;
for (int mj = 0; mj < (bw - nF + 1); mj++){
inm += mj;
ind += mj;
for (int fi = 0; fi < nF; fi++){
int inf = fi * nF;
inm += fi * imgW;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
out[ind + con] += img[inm + imgg] * f[inf];
}
}
}
}
}
//compute bottom tiles
for (int j = 0; j < bbw-1; j++){
for (int mi = 0; mi < (bh -nF + 1); mi++){
int inm = imgW * ((bbh-1)*bh + mi) + (j-1)*bw;
int ind = convW * ((bbh-1)*bh + mi) + (j-1)*bw;
for (int mj = 0; mj < bw; mj++){
inm += mj;
ind += mj;
for (int fi = 0; fi < nF; fi++){
int inf = fi * nF;
inm += fi * imgW;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
out[ind + con] += img[inm + imgg] * f[inf];
}
}
}
}
}
//compute the final tile
for (int mi = 0; mi < (bh -nF + 1); mi++){
int inm = imgW * ((bbh-1)*bh + mi) + (bbw-1)*bw;
int ind = convW * ((bbh-1)*bh + mi) + (bbw-1)*bw;
for (int mj = 0; mj < (bw - nF + 1); mj++){
inm += mj;
ind += mj;
for (int fi = 0; fi < nF; fi++){
int inf = fi * nF;
inm += fi * imgW;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
out[ind + con] += img[inm + imgg] * f[inf];
}
}
}
}
}
return 0;
}
__global__ void compute_gpu_naive(float *img, float *out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW){
int idX = blockDim.x * blockIdx.x + threadIdx.x;
int idY = blockDim.y * blockIdx.y + threadIdx.y;
for (int i = 0; i < imgN; i++){
int con = i * convW * convH;
int imgg = i * imgW * imgH;
if (idX < convH && idY < convW){
int ind = idY * convW + idX + con;
int inm = idY * imgW + idX + imgg;
for (int fi = 0; fi < nF; fi++){
inm += fi * imgW;
int inf = fi*nF;
for (int fj = 0; fj < nF; fj++){
out[ind] += img[fj + inm] * filter[inf + fj];
}
}
}
}
}
__global__ void compute_gpu_tiled(float *img, float *out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW){
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int nT = blockDim.x;
int nB = gridDim.x;
int bbw = imgW / bw;
int bbh = imgH / bh;
for (int i = 0; i < imgN; i++){
int con = i * convW * convH;
int imgg = i * imgW * imgH;
//compute center tiles
if(bx < (bbh-1) && by < (bbw-1)){
if(tx < bh && ty < bw){
int inm = imgW * (bx*bh + tx) + by*bw + ty;
int ind = convW * (bx*bh + ty) + by*bw + ty;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
inm += fi * imgW;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
out[ind + con] += img[inm + imgg] * filter[inf];
}
}
}
__syncthreads();
}
//compute right most tiles
if(bx < (bbh-1) && by == (bbw-1)){
if(tx < bh && ty < (bw - nF + 1)){
int inm = imgW * (bx*bh + tx) + by*bw + ty;
int ind = convW * (bx*bh + ty) + by*bw + ty;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
inm += fi * imgW;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
out[ind + con] += img[inm + imgg] * filter[inf];
}
}
}
__syncthreads();
}
//compute the bottom tiles
if(bx == (bbh-1) && by < (bbw-1)){
if(tx < (bh -nF + 1) && ty < bw){
int inm = imgW * (bx*bh + tx) + by*bw + ty;
int ind = convW * (bx*bh + ty) + by*bw + ty;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
inm += fi * imgW;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
out[ind + con] += img[inm + imgg] * filter[inf];
}
}
}
__syncthreads();
}
//compute the final tile
if(bx == (bbh-1) && by == (bbw-1)){
if(tx < (bh - nF + 1) && ty < (bw - nF + 1)){
int inm = imgW * (bx*bh + tx) + by*bw + ty;
int ind = convW * (bx*bh + ty) + by*bw + ty;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
inm += fi * imgW;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
out[ind + con] += img[inm + imgg] * filter[inf];
}
}
}
__syncthreads();
}
}
}
//remember the threads number to be larger than tile size
__global__ void compute_gpu_sm(float *img, float *out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW){
int tx = threadIdx.x;
int ty = threadIdx.y;
int bx = blockIdx.x;
int by = blockIdx.y;
int nT = blockDim.x;
int nB = gridDim.x;
int bbw = imgW / bw;
int bbh = imgH / bh;
__shared__ float sm[ (BLCH + K - 1) * (BLCW + K - 1) ];
for (int i = 0; i < imgN; i++){
int con = i * convW * convH;
int imgg = i * imgW * imgH;
//compute center tiles
if(bx < (bbh-1) && by < (bbw-1)){
if(tx < (bh+nF-1) && ty < (bw+nF-1)){
int inm = imgW * (bx*bh + tx) + by*bw + ty;
//set shared memory
sm[tx*bw+ty] = img[inm];
}
__syncthreads();
if(tx < bh && ty < bw){
int ind = convW * (bx*bh + ty) + by*bw + ty;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
int ins = (tx + fi)*bw + ty;
for (int fj = 0; fj < nF; fj++){
inf += fj;
ins += fj ;
out[ind + con] += sm[ins] * filter[inf];
}
}
}
__syncthreads();
}
//compute right most tiles
if(bx < (bbh-1) && by == (bbw-1)){
if(tx < (bh+nF-1) && ty < bw){
int inm = imgW * (bx*bh + tx) + by*bw + ty;
//set shared memory
sm[tx*bw+ty] = img[inm];
}
__syncthreads();
if(tx < bh && ty < (bw - nF + 1)){
int ind = convW * (bx*bh + ty) + by*bw + ty;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
int ins = (tx + fi)*bw + ty;
for (int fj = 0; fj < nF; fj++){
inf += fj;
ins += fj;
out[ind + con] += sm[ins] * filter[inf];
}
}
}
__syncthreads();
}
//compute the bottom tiles
if(bx == (bbh-1) && by < (bbw-1)){
if(tx < bh && ty < (bw+nF-1)){
int inm = imgW * (bx*bh + tx) + by*bw + ty;
//set shared memory
sm[tx*bw+ty] = img[inm];
}
__syncthreads();
if(tx < (bh -nF + 1) && ty < bw){
int ind = convW * (bx*bh + ty) + by*bw + ty;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
int ins = (tx + fi)*bw + ty;
for (int fj = 0; fj < nF; fj++){
inf += fj;
ins += fj;
out[ind + con] += sm[ins] * filter[inf];
}
}
}
__syncthreads();
}
//compute the final tile
if(bx == (bbh-1) && by == (bbw-1)){
if(tx < bh && ty < bw){
int inm = imgW * (bx*bh + tx) + by*bw + ty;
//set shared memory
sm[tx*bw+ty] = img[inm];
}
__syncthreads();
if(tx < (bh - nF + 1) && ty < (bw - nF + 1)){
int ind = convW * (bx*bh + ty) + by*bw + ty;
for (int fi = 0; fi < nF; fi++){
int inf = fi * convW;
int ins = (tx + fi)*bw + ty;
for (int fj = 0; fj < nF; fj++){
inf += fj;
ins += fj;
out[ind + con] += sm[ins] * filter[inf];
}
}
}
__syncthreads();
}
}
}
int main(int argc, char **argv){
//create parameters
int imgH = 2048;
int imgW = 2048;
int imgN = 10;
int blcH = BLCH;
int blcW = BLCW;
int k = K;
int s = 1;
int nB = (imgH * imgW) / (blcH * blcW);
//int nT = (blcW+k) * (blcH+k);
int nT = blcW * blcH;
int imgDims = imgH * imgW * imgN;
int imgSize = imgDims * sizeof(float);
srand (time(NULL));
// create host array that can hold pixel intensity values
float *h_img = new float[imgDims];
for(int i=0; i<imgDims; i++){
h_img[i] = (float)(rand()%10485)/10485;
}
// create filter and copy to constant memory
int filterDims = k * k;
int filterSize = filterDims * sizeof(float);
float *f = new float[filterDims];
for(int i=0; i<filterDims; i++){
f[i] = (float)(rand()%10485)/10485;
}
// create host and device array that holds the convoluted matrix
int convH = ( (imgH - k) / s ) + 1;
int convW = ( (imgW - k) / s ) + 1;
int convDims = convH * convW;
int convSize = convDims * sizeof(float);
float *h_convolved = new float[convDims];
for(int i=0; i<convDims; i++){
h_convolved[i] = 0.0;
}
// create device array that can hold pixel intensity values in GPU GM
float *d_img;
float *d_convolved;
cudaMalloc((void **) &d_img, imgSize);
cudaMemcpy(d_img, h_img, imgSize, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(filter, f, filterSize);
cudaMalloc((void **) &d_convolved, convSize);
cudaMemcpy(d_convolved, h_convolved, convSize, cudaMemcpyHostToDevice);
struct timeval starttime, endtime;
double elapsed = 0.0;
for (int i = 0; i<10000; i++){
gettimeofday(&starttime,NULL);
// call the kernel
compute_gpu_tiled<<<nB, nT>>>(d_img, d_convolved, blcH, blcW, imgH, imgW, imgN, k, convH, convW);
gettimeofday(&endtime,NULL);
elapsed += ((endtime.tv_sec-starttime.tv_sec)*1000000 + endtime.tv_usec-starttime.tv_usec)/1000000.0;
}
cudaMemcpy(h_convolved, d_convolved, convSize, cudaMemcpyDeviceToHost);
cudaDeviceReset();
printf("Input imgH: %d imgW: %d imgN: %d\n", imgH, imgW, imgN);
printf("Tile width: %d height: %d\n", blcW, blcH);
printf("Block number: %d, block size: %d \n", nB, nT);
printf("time: %f \n", elapsed);
delete h_img;
delete h_convolved;
return 0;
}
|
19,726 | // fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C" {
__global__ void zeromeanVerticallyKernel(const int h, const int w, float* output, const float* input);
}
__global__ void zeromeanVerticallyKernel(const int h, const int w, float* output, const float* input) {
const int bj = blockIdx.x;
const int wtj = threadIdx.y;
const int ttj = threadIdx.x;
const int nrThreadsW = min(1024, w);
const int nrThreadsNrThreadsW = min(32, nrThreadsW);
const int tj = wtj * (1 * nrThreadsNrThreadsW) + ttj;
if (tj < nrThreadsW) {
const int j = bj * (1 * nrThreadsW) + tj;
if (j < w) {
float sumEven = 0.0;
float sumOdd = 0.0;
for (int i = 0; i < h - 1; i += 2) {
sumEven += input[j + i * (1 * w)];
sumOdd += input[j + (i + 1) * (1 * w)];
}
const float meanEven = sumEven / ((h + 1) / 2);
const float meanOdd = sumOdd / (h / 2);
for (int i = 0; i < h - 1; i += 2) {
output[j + i * (1 * w)] = input[j + i * (1 * w)] - meanEven;
output[j + (i + 1) * (1 * w)] = input[j + (i + 1) * (1 * w)] - meanOdd;
}
}
}
}
|
19,727 | /*
* The MIT License (MIT)
* This file is part of waifu2x-converter-cpp
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
/* -*- mode: c++ -*- */
#define UNROLL9(F) \
F(0); \
F(1); \
F(2); \
F(3); \
F(4); \
F(5); \
F(6); \
F(7); \
F(8); \
#define UNROLL8x3x3(F) \
F(0,0,0); \
F(0,0,1); \
F(0,0,2); \
F(0,1,0); \
F(0,1,1); \
F(0,1,2); \
F(0,2,0); \
F(0,2,1); \
F(0,2,2); \
\
F(1,0,0); \
F(1,0,1); \
F(1,0,2); \
F(1,1,0); \
F(1,1,1); \
F(1,1,2); \
F(1,2,0); \
F(1,2,1); \
F(1,2,2); \
\
F(2,0,0); \
F(2,0,1); \
F(2,0,2); \
F(2,1,0); \
F(2,1,1); \
F(2,1,2); \
F(2,2,0); \
F(2,2,1); \
F(2,2,2); \
\
F(3,0,0); \
F(3,0,1); \
F(3,0,2); \
F(3,1,0); \
F(3,1,1); \
F(3,1,2); \
F(3,2,0); \
F(3,2,1); \
F(3,2,2); \
\
F(4,0,0); \
F(4,0,1); \
F(4,0,2); \
F(4,1,0); \
F(4,1,1); \
F(4,1,2); \
F(4,2,0); \
F(4,2,1); \
F(4,2,2); \
\
F(5,0,0); \
F(5,0,1); \
F(5,0,2); \
F(5,1,0); \
F(5,1,1); \
F(5,1,2); \
F(5,2,0); \
F(5,2,1); \
F(5,2,2); \
\
F(6,0,0); \
F(6,0,1); \
F(6,0,2); \
F(6,1,0); \
F(6,1,1); \
F(6,1,2); \
F(6,2,0); \
F(6,2,1); \
F(6,2,2); \
\
F(7,0,0); \
F(7,0,1); \
F(7,0,2); \
F(7,1,0); \
F(7,1,1); \
F(7,1,2); \
F(7,2,0); \
F(7,2,1); \
F(7,2,2); \
#define UNROLL8(F) \
F(0); \
F(1); \
F(2); \
F(3); \
F(4); \
F(5); \
F(6); \
F(7); \
#define UNROLL8x3(F) \
F(0,0); \
F(0,1); \
F(0,2); \
F(0,3); \
F(0,4); \
F(0,5); \
F(0,6); \
F(0,7); \
\
F(1,0); \
F(1,1); \
F(1,2); \
F(1,3); \
F(1,4); \
F(1,5); \
F(1,6); \
F(1,7); \
\
F(2,0); \
F(2,1); \
F(2,2); \
F(2,3); \
F(2,4); \
F(2,5); \
F(2,6); \
F(2,7); \
#define UNROLL10x3(F) \
F(0,0); \
F(0,1); \
F(0,2); \
F(0,3); \
F(0,4); \
F(0,5); \
F(0,6); \
F(0,7); \
F(0,8); \
F(0,9); \
\
F(1,0); \
F(1,1); \
F(1,2); \
F(1,3); \
F(1,4); \
F(1,5); \
F(1,6); \
F(1,7); \
F(1,8); \
F(1,9); \
\
F(2,0); \
F(2,1); \
F(2,2); \
F(2,3); \
F(2,4); \
F(2,5); \
F(2,6); \
F(2,7); \
F(2,8); \
F(2,9); \
#define BLOCK_SIZE 8
extern __shared__ float shared_buf[];
template <int nInputPlanes>
__device__ void
filter(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
float *shared_ptr = shared_buf;
float *in_block0_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block1_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block2_base = shared_ptr;
shared_ptr += nInputPlanes*(BLOCK_SIZE+2);
float *in_block0 = in_block0_base + nInputPlanes;
float *in_block1 = in_block1_base + nInputPlanes;
float *in_block2 = in_block2_base + nInputPlanes;
int lid = threadIdx.x;
float bv = biases[lid];
for (int xi0=0; xi0<wsz; xi0+=BLOCK_SIZE) {
/*for (unsigned int op=0; op<nOutputPlanes; op++) thread */
{
int op = lid;
int rem = wsz - xi0;
__syncthreads();
if (lid < nInputPlanes/2) {
int bi;
int lid2 = lid*2;
for (bi=0; bi<BLOCK_SIZE; bi++) {
int xi = xi0 + bi;
if (xi == wsz) {
break;
}
/* load to shared */
*(float2*)&in_block0[bi*nInputPlanes + lid2] = *(float2*)&in01[xi*nInputPlanes + lid2];
*(float2*)&in_block1[bi*nInputPlanes + lid2] = *(float2*)&in11[xi*nInputPlanes + lid2];
*(float2*)&in_block2[bi*nInputPlanes + lid2] = *(float2*)&in21[xi*nInputPlanes + lid2];
}
{
int xi = xi0 + bi;
if (xi == wsz) {
*(float2*)&in_block0[bi*(int)nInputPlanes + lid2] = *(float2*)&in01[(xi-1)*(int)nInputPlanes + lid2];
*(float2*)&in_block1[bi*(int)nInputPlanes + lid2] = *(float2*)&in11[(xi-1)*(int)nInputPlanes + lid2];
*(float2*)&in_block2[bi*(int)nInputPlanes + lid2] = *(float2*)&in21[(xi-1)*(int)nInputPlanes + lid2];
} else {
*(float2*)&in_block0[bi*(int)nInputPlanes + lid2] = *(float2*)&in01[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block1[bi*(int)nInputPlanes + lid2] = *(float2*)&in11[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block2[bi*(int)nInputPlanes + lid2] = *(float2*)&in21[xi*(int)nInputPlanes + lid2];
}
}
{
int xi = xi0-1;
if (xi == -1) {
*(float2*)&in_block0[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in01[lid2];
*(float2*)&in_block1[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in11[lid2];
*(float2*)&in_block2[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in21[lid2];
} else {
*(float2*)&in_block0[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in01[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block1[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in11[xi*(int)nInputPlanes + lid2];
*(float2*)&in_block2[-1*(int)nInputPlanes + (int)lid2] = *(float2*)&in21[xi*(int)nInputPlanes + lid2];
}
}
}
__syncthreads();
if (rem >= BLOCK_SIZE) {
#define DECL_PTR(y,x) float *p##y##x = &in_block##y[nInputPlanes * (x-1)];
UNROLL10x3(DECL_PTR);
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
{
const float *w0 = weight + lid;
for (int ip = 0; ip < nInputPlanes; ip++) {
#define LOAD_INPUT2(y,x) float2 i##y##x##_2 = *(float2*)&p##y##x[ip];
UNROLL10x3(LOAD_INPUT2);
#define LOAD_COEF(X) float w_##X = w[X * 128];
#define CALC(IDX,Y,I0,I1,I2,I3,I4,I5,I6,I7) \
sum0 += w_##IDX * i##Y##I0; \
sum1 += w_##IDX * i##Y##I1; \
sum2 += w_##IDX * i##Y##I2; \
sum3 += w_##IDX * i##Y##I3; \
sum4 += w_##IDX * i##Y##I4; \
sum5 += w_##IDX * i##Y##I5; \
sum6 += w_##IDX * i##Y##I6; \
sum7 += w_##IDX * i##Y##I7;
{
#define LOAD_INPUT1X(Y,X) float i##Y##X = i##Y##X##_2.x;
UNROLL10x3(LOAD_INPUT1X);
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(0,0,0,1,2,3,4,5,6,7);
CALC(1,0,1,2,3,4,5,6,7,8);
CALC(2,0,2,3,4,5,6,7,8,9);
CALC(3,1,0,1,2,3,4,5,6,7);
CALC(4,1,1,2,3,4,5,6,7,8);
CALC(5,1,2,3,4,5,6,7,8,9);
CALC(6,2,0,1,2,3,4,5,6,7);
CALC(7,2,1,2,3,4,5,6,7,8);
CALC(8,2,2,3,4,5,6,7,8,9);
}
}
ip++;
{
#define LOAD_INPUT1Y(Y,X) float i##Y##X = i##Y##X##_2.y;
UNROLL10x3(LOAD_INPUT1Y);
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(0,0,0,1,2,3,4,5,6,7);
CALC(1,0,1,2,3,4,5,6,7,8);
CALC(2,0,2,3,4,5,6,7,8,9);
CALC(3,1,0,1,2,3,4,5,6,7);
CALC(4,1,1,2,3,4,5,6,7,8);
CALC(5,1,2,3,4,5,6,7,8,9);
CALC(6,2,0,1,2,3,4,5,6,7);
CALC(7,2,1,2,3,4,5,6,7,8);
CALC(8,2,2,3,4,5,6,7,8,9);
}
}
}
#define RELU(BI) \
{ \
float *out = packed_output + (yi*wsz + (xi0+BI))*nOutputPlanes; \
\
{ \
int opIndex = lid; \
float v = sum##BI; \
v += bv; \
\
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
\
v = ltz * 0.1f + mtz; \
\
out[opIndex] = v; \
} \
}
UNROLL8(RELU);
#undef DECL_PTR
#undef LOAD_COEF
#undef CALC
#undef LOAD_INPUT2
#undef LOAD_INPUT1X
#undef LOAD_INPUT1Y
#undef RELU
}
} else {
for (int bi=0; bi<BLOCK_SIZE; bi++) {
int xi = xi0+bi;
if (xi == wsz) {
break;
}
const float *w0 = weight + lid;
float sum = 0;
for (int ip=0; ip<nInputPlanes; ip++) {
float i00, i01, i02;
float i10, i11, i12;
float i20, i21, i22;
i00 = in_block0[(bi-1)*nInputPlanes+ip];
i10 = in_block1[(bi-1)*nInputPlanes+ip];
i20 = in_block2[(bi-1)*nInputPlanes+ip];
i01 = in_block0[bi*nInputPlanes+ip];
i11 = in_block1[bi*nInputPlanes+ip];
i21 = in_block2[bi*nInputPlanes+ip];
i02 = in_block0[(bi+1)*nInputPlanes+ip];
i12 = in_block1[(bi+1)*nInputPlanes+ip];
i22 = in_block2[(bi+1)*nInputPlanes+ip];
const float *w = w0;
sum += w[(9*ip+0) * 128]*i00;
sum += w[(9*ip+1) * 128]*i01;
sum += w[(9*ip+2) * 128]*i02;
sum += w[(9*ip+3) * 128]*i10;
sum += w[(9*ip+4) * 128]*i11;
sum += w[(9*ip+5) * 128]*i12;
sum += w[(9*ip+6) * 128]*i20;
sum += w[(9*ip+7) * 128]*i21;
sum += w[(9*ip+8) * 128]*i22;
}
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes;
{
float v = sum;
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[op] = v;
}
}
}
}
}
}
extern "C" __global__ void
filter_i32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<32>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
extern "C" __global__ void
filter_i64(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<64>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
extern "C" __global__ void
filter_i128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
int nOutputPlanes,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight)
{
filter<128>(packed_input, packed_output, nOutputPlanes, biases, hsz, wsz, weight);
}
#if __CUDA_ARCH__ >= 300
static inline __device__ float
warp_sum(float v) {
v += __shfl_down_sync(0xFFFFFFFF, v, 1);
v += __shfl_down_sync(0xFFFFFFFF, v, 2);
v += __shfl_down_sync(0xFFFFFFFF, v, 4);
v += __shfl_down_sync(0xFFFFFFFF, v, 8);
v += __shfl_down_sync(0xFFFFFFFF, v, 16);
return v;
}
#endif
template <int nInputPlanes,
int nOutputPlanes>
void __device__
filter_weight_blocking(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
#define INPUT_BLOCK_SIZE 32
#define OUTPUT_BLOCK_SIZE 64 // == blockDim.x
#define X_BLOCK_SIZE 8
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float shared_buf_base[INPUT_BLOCK_SIZE * (X_BLOCK_SIZE+2) * 3];
float *in_block0_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 0;
float *in_block1_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 1;
float *in_block2_base = shared_buf_base + INPUT_BLOCK_SIZE * (BLOCK_SIZE+2) * 2;
float *in_block0 = in_block0_base + INPUT_BLOCK_SIZE;
float *in_block1 = in_block1_base + INPUT_BLOCK_SIZE;
float *in_block2 = in_block2_base + INPUT_BLOCK_SIZE;
int lid = threadIdx.x;
{ // ib0
{ // ob0
int op = lid + ob0;
float bv = biases[op];
for (int xi0=0; xi0<wsz; xi0+=BLOCK_SIZE) {
float *out_base = packed_output + (yi*wsz + xi0)*nOutputPlanes + op;
float *linp0 = in_block0 + lid;
float *linp1 = in_block1 + lid;
float *linp2 = in_block2 + lid;
__syncthreads();
int rem = wsz - xi0;
const float *inb0 = in01 + ib0+lid;
const float *inb1 = in11 + ib0+lid;
const float *inb2 = in21 + ib0+lid;
if (rem > 8 && xi0 != 0) {
if (lid < INPUT_BLOCK_SIZE) {
linp0[-1*INPUT_BLOCK_SIZE] = linp0[7*INPUT_BLOCK_SIZE];
linp1[-1*INPUT_BLOCK_SIZE] = linp1[7*INPUT_BLOCK_SIZE];
linp2[-1*INPUT_BLOCK_SIZE] = linp2[7*INPUT_BLOCK_SIZE];
linp0[0*INPUT_BLOCK_SIZE] = linp0[8*INPUT_BLOCK_SIZE];
linp1[0*INPUT_BLOCK_SIZE] = linp1[8*INPUT_BLOCK_SIZE];
linp2[0*INPUT_BLOCK_SIZE] = linp2[8*INPUT_BLOCK_SIZE];
}
__syncthreads();
if (lid < INPUT_BLOCK_SIZE) {
int bi;
#pragma unroll
for (bi=1; bi<X_BLOCK_SIZE+1; bi++) {
int xi = xi0 + bi;
/* load to shared */
linp0[bi*INPUT_BLOCK_SIZE] = inb0[xi*nInputPlanes];
linp1[bi*INPUT_BLOCK_SIZE] = inb1[xi*nInputPlanes];
linp2[bi*INPUT_BLOCK_SIZE] = inb2[xi*nInputPlanes];
}
}
} else {
if (lid < INPUT_BLOCK_SIZE) {
int bi;
for (bi=0; bi<X_BLOCK_SIZE; bi++) {
int xi = xi0 + bi;
if (xi == wsz) {
break;
}
/* load to shared */
linp0[bi*INPUT_BLOCK_SIZE] = inb0[xi*nInputPlanes];
linp1[bi*INPUT_BLOCK_SIZE] = inb1[xi*nInputPlanes];
linp2[bi*INPUT_BLOCK_SIZE] = inb2[xi*nInputPlanes];
}
{
int xi = xi0 + bi;
if (xi == wsz) {
linp0[bi*(int)INPUT_BLOCK_SIZE] = inb0[(xi-1)*(int)nInputPlanes];
linp1[bi*(int)INPUT_BLOCK_SIZE] = inb1[(xi-1)*(int)nInputPlanes];
linp2[bi*(int)INPUT_BLOCK_SIZE] = inb2[(xi-1)*(int)nInputPlanes];
} else {
linp0[bi*(int)INPUT_BLOCK_SIZE] = inb0[xi*(int)nInputPlanes];
linp1[bi*(int)INPUT_BLOCK_SIZE] = inb1[xi*(int)nInputPlanes];
linp2[bi*(int)INPUT_BLOCK_SIZE] = inb2[xi*(int)nInputPlanes];
}
}
{
int xi = xi0-1;
if (xi == -1) {
linp0[-1*(int)INPUT_BLOCK_SIZE] = inb0[0];
linp1[-1*(int)INPUT_BLOCK_SIZE] = inb1[0];
linp2[-1*(int)INPUT_BLOCK_SIZE] = inb2[0];
} else {
linp0[-1*(int)INPUT_BLOCK_SIZE] = inb0[xi*(int)nInputPlanes];
linp1[-1*(int)INPUT_BLOCK_SIZE] = inb1[xi*(int)nInputPlanes];
linp2[-1*(int)INPUT_BLOCK_SIZE] = inb2[xi*(int)nInputPlanes];
}
}
}
}
__syncthreads();
const float *w0 = weight + op;
if (rem >= BLOCK_SIZE) {
#define DECL_PTR(y,x) float *p##y##x = &in_block##y[INPUT_BLOCK_SIZE * (x-1)];
UNROLL10x3(DECL_PTR);
float sum0 = 0;
float sum1 = 0;
float sum2 = 0;
float sum3 = 0;
float sum4 = 0;
float sum5 = 0;
float sum6 = 0;
float sum7 = 0;
for (int ip1 = 0; ip1 < INPUT_BLOCK_SIZE; ip1+=2) {
int ip = ip1 + ib0;
#define LOAD_INPUT2(y,x) float2 i##y##x##_2 = *(float2*)&p##y##x[ip1];
UNROLL10x3(LOAD_INPUT2);
#define LOAD_COEF(X) float w_##X = w[X * 128];
#define CALC(SYM,IDX,Y,I0,I1,I2,I3,I4,I5,I6,I7) \
sum0 += w_##IDX * i##Y##I0##_2.SYM; \
sum1 += w_##IDX * i##Y##I1##_2.SYM; \
sum2 += w_##IDX * i##Y##I2##_2.SYM; \
sum3 += w_##IDX * i##Y##I3##_2.SYM; \
sum4 += w_##IDX * i##Y##I4##_2.SYM; \
sum5 += w_##IDX * i##Y##I5##_2.SYM; \
sum6 += w_##IDX * i##Y##I6##_2.SYM; \
sum7 += w_##IDX * i##Y##I7##_2.SYM;
{
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(x, 0,0,0,1,2,3,4,5,6,7);
CALC(x, 1,0,1,2,3,4,5,6,7,8);
CALC(x, 2,0,2,3,4,5,6,7,8,9);
CALC(x, 3,1,0,1,2,3,4,5,6,7);
CALC(x, 4,1,1,2,3,4,5,6,7,8);
CALC(x, 5,1,2,3,4,5,6,7,8,9);
CALC(x, 6,2,0,1,2,3,4,5,6,7);
CALC(x, 7,2,1,2,3,4,5,6,7,8);
CALC(x, 8,2,2,3,4,5,6,7,8,9);
}
}
ip++;
{
const float *w = (w0 + (ip * 128) * 9);
UNROLL9(LOAD_COEF);
{
CALC(y, 0,0,0,1,2,3,4,5,6,7);
CALC(y, 1,0,1,2,3,4,5,6,7,8);
CALC(y, 2,0,2,3,4,5,6,7,8,9);
CALC(y, 3,1,0,1,2,3,4,5,6,7);
CALC(y, 4,1,1,2,3,4,5,6,7,8);
CALC(y, 5,1,2,3,4,5,6,7,8,9);
CALC(y, 6,2,0,1,2,3,4,5,6,7);
CALC(y, 7,2,1,2,3,4,5,6,7,8);
CALC(y, 8,2,2,3,4,5,6,7,8,9);
}
}
}
#define RELU(BI) \
{ \
\
{ \
float v = sum##BI + out_base[BI*nOutputPlanes]; \
v += bv; \
\
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
\
v = ltz * 0.1f + mtz; \
\
out_base[BI*nOutputPlanes] = v; \
} \
}
if ((ib0+INPUT_BLOCK_SIZE) == nInputPlanes) {
UNROLL8(RELU);
} else if (ib0 == 0) {
out_base[nOutputPlanes*0] = sum0;
out_base[nOutputPlanes*1] = sum1;
out_base[nOutputPlanes*2] = sum2;
out_base[nOutputPlanes*3] = sum3;
out_base[nOutputPlanes*4] = sum4;
out_base[nOutputPlanes*5] = sum5;
out_base[nOutputPlanes*6] = sum6;
out_base[nOutputPlanes*7] = sum7;
} else {
out_base[nOutputPlanes*0] += sum0;
out_base[nOutputPlanes*1] += sum1;
out_base[nOutputPlanes*2] += sum2;
out_base[nOutputPlanes*3] += sum3;
out_base[nOutputPlanes*4] += sum4;
out_base[nOutputPlanes*5] += sum5;
out_base[nOutputPlanes*6] += sum6;
out_base[nOutputPlanes*7] += sum7;
}
} else {
for (int bi=0; bi<X_BLOCK_SIZE; bi++) {
int xi = xi0+bi;
if (xi == wsz) {
break;
}
float sum = 0;
for (int ip1=0; ip1<INPUT_BLOCK_SIZE; ip1++) {
int ip = ib0 + ip1;
float i00, i01, i02;
float i10, i11, i12;
float i20, i21, i22;
i00 = in_block0[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i10 = in_block1[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i20 = in_block2[(bi-1)*INPUT_BLOCK_SIZE+ip1];
i01 = in_block0[bi*INPUT_BLOCK_SIZE+ip1];
i11 = in_block1[bi*INPUT_BLOCK_SIZE+ip1];
i21 = in_block2[bi*INPUT_BLOCK_SIZE+ip1];
i02 = in_block0[(bi+1)*INPUT_BLOCK_SIZE+ip1];
i12 = in_block1[(bi+1)*INPUT_BLOCK_SIZE+ip1];
i22 = in_block2[(bi+1)*INPUT_BLOCK_SIZE+ip1];
sum += w0[(9*ip+0) * 128]*i00;
sum += w0[(9*ip+1) * 128]*i01;
sum += w0[(9*ip+2) * 128]*i02;
sum += w0[(9*ip+3) * 128]*i10;
sum += w0[(9*ip+4) * 128]*i11;
sum += w0[(9*ip+5) * 128]*i12;
sum += w0[(9*ip+6) * 128]*i20;
sum += w0[(9*ip+7) * 128]*i21;
sum += w0[(9*ip+8) * 128]*i22;
}
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes;
if ((ib0+INPUT_BLOCK_SIZE) == nInputPlanes) {
/* last */
float v = sum + out[op];
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[op] = v;
} else if (ib0 == 0) {
out[op] = sum;
} else {
out[op] += sum;
}
}
}
}
}
}
}
extern "C" __global__
void
filter_i128_o128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<128,128>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__
void
filter_i64_o128(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<64,128>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__
void
filter_i64_o64(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
const float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
const float * __restrict__ weight,
int ib0,
int ob0)
{
filter_weight_blocking<64,64>(packed_input,
packed_output,
biases,
hsz,
wsz,
weight,
ib0,
ob0);
}
extern "C" __global__ void
filter_i128_o1(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 128;
int nOutputPlanes = 1;
{
unsigned int yi = blockIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += yi * in_step;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
unsigned int lid = threadIdx.x;
float bv0 = biases[0];
/* 128 item */
/* x : (1width/group) */
/* y : (2height/group) */
/* iplane : 1plane / 1item * 128plane */
__shared__ float shared_buf[128 * 10];
float lin00;
float lin01;
float lin02;
float lin10;
float lin11;
float lin12;
float lin20;
float lin21;
float lin22;
float *sum_buffer = shared_buf + 128*9;
#define OUT1_LOAD_WEIGHT(I,Y,X) float w##I##Y##X = weight[(I*16 + lid)*9 + Y*3 + X];
float w00 = weight[lid*9 + 0];
float w01 = weight[lid*9 + 1];
float w02 = weight[lid*9 + 2];
float w10 = weight[lid*9 + 3];
float w11 = weight[lid*9 + 4];
float w12 = weight[lid*9 + 5];
float w20 = weight[lid*9 + 6];
float w21 = weight[lid*9 + 7];
float w22 = weight[lid*9 + 8];
const float *pin01 = in01 + lid;
const float *pin02 = in01 + nInputPlanes + lid;
const float *pin11 = in11 + lid;
const float *pin12 = in11 + nInputPlanes + lid;
const float *pin21 = in21 + lid;
const float *pin22 = in21 + nInputPlanes + lid;
lin01 = pin01[0];
lin02 = pin01[0];
lin11 = pin11[0];
lin12 = pin11[0];
lin21 = pin21[0];
lin22 = pin21[0];
#define OUT1_BODY(LEDGE,REDGE,SUM_RELU) \
{ \
float sum = 0; \
{ \
lin00 = lin01; \
lin01 = lin02; \
\
lin10 = lin11; \
lin11 = lin12; \
\
lin20 = lin21; \
lin21 = lin22; \
\
if (REDGE) { \
lin02 = lin01; \
lin12 = lin11; \
lin22 = lin21; \
} else { \
lin02 = pin02[xi*128]; \
lin12 = pin12[xi*128]; \
lin22 = pin22[xi*128]; \
} \
\
sum += w00 * lin00; \
sum += w10 * lin10; \
sum += w20 * lin20; \
\
sum += w01 * lin01; \
sum += w11 * lin11; \
sum += w21 * lin21; \
\
sum += w02 * lin02; \
sum += w12 * lin12; \
sum += w22 * lin22; \
\
} \
__syncthreads(); \
sum_buffer[lid] = sum; \
__syncthreads(); \
if (lid < 64) { \
float v2 = sum_buffer[lid+64]; \
sum_buffer[lid] += v2; \
} \
__syncthreads(); \
SUM_RELU(0); \
}
#if __CUDA_ARCH__ >= 300
#define SUM_RELU(OI) \
if (lid < 32) { \
float v0 = sum_buffer[lid] + sum_buffer[lid+32]; \
float sum = warp_sum(v0); \
\
if (lid == 0) { \
float v = sum; \
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes; \
v += bv##OI; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[OI] = v; \
} \
} \
#else
#define SUM_RELU(OI) \
if (lid < 32) { \
sum_buffer[lid] += sum_buffer[lid+32]; \
} \
__syncthreads(); \
if (lid < 16) { \
sum_buffer[lid] += sum_buffer[lid+16]; \
} \
__syncthreads(); \
if (lid < 8) { \
sum_buffer[lid] += sum_buffer[lid+8]; \
} \
__syncthreads(); \
if (lid < 4) { \
sum_buffer[lid] += sum_buffer[lid+4]; \
} \
__syncthreads(); \
if (lid < 2) { \
sum_buffer[lid] += sum_buffer[lid+2]; \
} \
__syncthreads(); \
if (lid == 0) { \
float sum = sum_buffer[0] + sum_buffer[1]; \
float v = sum; \
float *out = packed_output + (yi*wsz + xi)*nOutputPlanes; \
v += bv##OI; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[OI] = v; \
}
#endif
for (int xi=0; xi<wsz-1; xi++) {
OUT1_BODY(0,0,SUM_RELU);
}
{
int xi = wsz-1;
OUT1_BODY(0,1,SUM_RELU);
}
}
}
extern "C" __global__ void
filter_i1_o32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
//int nInputPlanes = 1;
int nOutputPlanes = 32;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float in_block0_base[256+2];
__shared__ float in_block1_base[256+2];
__shared__ float in_block2_base[256+2];
float *in_block0 = in_block0_base + 1;
float *in_block1 = in_block1_base + 1;
float *in_block2 = in_block2_base + 1;
/* 256 item / group */
/* x : (64width/group) */
/* 32 oplane : (8weight/item * 4item)*/
unsigned int xoff = lid / 4U;
unsigned int ooff = (lid % 4U) * 8;
#define IN1_LOAD_COEF(O,Y,X) \
float w##O##Y##X = weight[9 * (O + ooff) + (Y*3) + X];
UNROLL8x3x3(IN1_LOAD_COEF);
for (int xi0=0; xi0<wsz; xi0+=256) {
/* load */
__syncthreads();
{
int xi = xi0 + lid;
if (xi < wsz) {
in_block0[lid] = in01[xi0 + lid];
in_block1[lid] = in11[xi0 + lid];
in_block2[lid] = in21[xi0 + lid];
}
if (lid == 0) {
if (xi == 0) {
in_block0[-1] = in01[0];
in_block1[-1] = in11[0];
in_block2[-1] = in21[0];
} else {
in_block0[-1] = in01[xi-1];
in_block1[-1] = in11[xi-1];
in_block2[-1] = in21[xi-1];
}
}
if (xi == wsz-1) {
in_block0[lid+1] = in01[xi];
in_block1[lid+1] = in11[xi];
in_block2[lid+1] = in21[xi];
}
if ((lid == 255) && (xi < wsz-1)) {
in_block0[256] = in01[xi+1];
in_block1[256] = in11[xi+1];
in_block2[256] = in21[xi+1];
}
}
__syncthreads();
for (int xi1_base=0; xi1_base<4; xi1_base++) {
{
int xi1 = xi1_base*64 + xoff;
int xi = xi0 + xi1;
if (xi < wsz) {
#define IN1_DECLSUM(O) float sum##O = 0;
#define IN1_CALC(O,Y,X) sum##O += in_block##Y[xi1+X-1] * w##O##Y##X;
#define IN1_RELU(O) { \
float v = sum##O; \
int opIndex = ooff + O; \
float bv = biases[opIndex]; \
v += bv; \
float mtz = max(v, 0.0f); \
float ltz = min(v, 0.0f); \
v = ltz * 0.1f + mtz; \
out[opIndex] = v; \
}
UNROLL8(IN1_DECLSUM);
UNROLL8x3x3(IN1_CALC);
float *out = packed_output + (yi*wsz + xi) * nOutputPlanes;
UNROLL8(IN1_RELU);
}
}
}
}
}
/* blockDim.x == 192 */
extern "C" __global__ void
filter_i3_o32(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 3;
int nOutputPlanes = 32;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
__shared__ float in_block0_base[(64+2)*3];
__shared__ float in_block1_base[(64+2)*3];
__shared__ float in_block2_base[(64+2)*3];
__shared__ float sum_buffer[192];
float *in_block0 = in_block0_base + 3;
float *in_block1 = in_block1_base + 3;
float *in_block2 = in_block2_base + 3;
/* 192 item / group */
/* load 192 item */
/* 3 iplane : */
/* x : (64width/group) */
/* 32 oplane : (8weight/item * 4item)*/
unsigned int ioff = lid / 32U;
unsigned int ooff = lid % 32U;
#define I3_O32_LOAD_COEF(I) \
float w##I = weight[9*nOutputPlanes*ioff+ooff+I*nOutputPlanes];
UNROLL9(I3_O32_LOAD_COEF);
for (int xi0=0; xi0<wsz; xi0+=64) {
/* load */
int nelem = min(wsz - xi0, 64);
int nload = nelem * 3;
if (lid < nload) {
int xi = xi0*3 + lid;
in_block0[lid] = in01[xi];
in_block1[lid] = in11[xi];
in_block2[lid] = in21[xi];
if (lid < 3) {
if (xi <= 2) {
/* left edge */
in_block0[-3+(int)lid] = in01[lid];
in_block1[-3+(int)lid] = in11[lid];
in_block2[-3+(int)lid] = in21[lid];
} else {
/* 0, 1, 2 */
in_block0[-3+(int)lid] = in01[-3+(int)xi];
in_block1[-3+(int)lid] = in11[-3+(int)xi];
in_block2[-3+(int)lid] = in21[-3+(int)xi];
}
}
if (xi >= wsz*3-3) {
/* right edge */
in_block0[lid+3] = in01[xi];
in_block1[lid+3] = in11[xi];
in_block2[lid+3] = in21[xi];
} else if (lid >= 189) {
/* 189, 190, 191 */
in_block0[lid+3] = in01[xi+3];
in_block1[lid+3] = in11[xi+3];
in_block2[lid+3] = in21[xi+3];
}
}
__syncthreads();
for (int xi1=0; xi1<nelem; xi1++) {
int xi = xi0 + xi1;
if (lid < 96) { // 3input x 32output
float sum = 0;
sum += w0 * in_block0[(xi1 - 1)*3+(int)ioff];
sum += w1 * in_block0[(xi1 )*3+(int)ioff];
sum += w2 * in_block0[(xi1 + 1)*3+(int)ioff];
sum += w3 * in_block1[(xi1 - 1)*3+(int)ioff];
sum += w4 * in_block1[(xi1 )*3+(int)ioff];
sum += w5 * in_block1[(xi1 + 1)*3+(int)ioff];
sum += w6 * in_block2[(xi1 - 1)*3+(int)ioff];
sum += w7 * in_block2[(xi1 )*3+(int)ioff];
sum += w8 * in_block2[(xi1 + 1)*3+(int)ioff];
sum_buffer[lid] = sum;
}
__syncthreads();
if (lid < 32) {
int oi = lid;
float v = 0;
float *out = packed_output + (yi*wsz + xi) * nOutputPlanes;
/* 96 to 32 reduction */
v += sum_buffer[32 * 0 + lid];
v += sum_buffer[32 * 1 + lid];
v += sum_buffer[32 * 2 + lid];
float bv = biases[oi];
v += bv;
float mtz = max(v, 0.0f);
float ltz = min(v, 0.0f);
v = ltz * 0.1f + mtz;
out[oi] = v;
}
__syncthreads();
}
}
}
/* blockDim.x == 128 */
extern "C" __global__ void
filter_i128_o3(const float * __restrict__ packed_input,
float * __restrict__ packed_output,
float * __restrict__ biases,
unsigned int hsz,
unsigned int wsz,
float * __restrict__ weight)
{
int nInputPlanes = 128;
int nOutputPlanes = 3;
unsigned int yi = blockIdx.x;
unsigned int lid = threadIdx.x;
size_t in_step = wsz * nInputPlanes;
const float *inp = packed_input;
inp += in_step * yi;
const float *in0p = inp - in_step;
if (yi == 0) {
in0p = inp;
}
const float *in1p = inp;
const float *in2p = inp + in_step;
if (yi == hsz-1) {
in2p = in1p;
}
const float *in01 = in0p;
const float *in11 = in1p;
const float *in21 = in2p;
float lin00, lin01, lin02;
float lin10, lin11, lin12;
float lin20, lin21, lin22;
__shared__ float sum_buffer[128];
/* 128 item / group */
/* load 128 item (load 3elem/item) */
/* 128 iplane
* 1 input
* 3 output (27coeff)
*/
int ioff = lid;
float bv0 = biases[0];
float bv1 = biases[1];
float bv2 = biases[2];
#define I128_O3_LOAD_COEF(I) \
float w0##I = weight[9*0*nInputPlanes + I*nInputPlanes + ioff]; \
float w1##I = weight[9*1*nInputPlanes + I*nInputPlanes + ioff]; \
float w2##I = weight[9*2*nInputPlanes + I*nInputPlanes + ioff];
UNROLL9(I128_O3_LOAD_COEF);
lin01 = lin02 = in01[lid];
lin11 = lin12 = in11[lid];
lin21 = lin22 = in21[lid];
int addroff = 0;
char *p0 = (char*)(in01 + lid + nInputPlanes);
char *p1 = (char*)(in11 + lid + nInputPlanes);
char *p2 = (char*)(in21 + lid + nInputPlanes);
for (int xi=0; xi<wsz; xi++) {
lin00 = lin01;
lin01 = lin02;
lin10 = lin11;
lin11 = lin12;
lin20 = lin21;
lin21 = lin22;
if (xi == wsz-1) {
/* nop */
} else {
lin02 = *(float *)(p0 + addroff);
lin12 = *(float *)(p1 + addroff);
lin22 = *(float *)(p2 + addroff);
}
addroff += nInputPlanes * sizeof(float);
#define I128_O3(OI) \
{ \
float sum = 0; \
sum += w##OI##0 * lin00; \
sum += w##OI##1 * lin01; \
sum += w##OI##2 * lin02; \
\
sum += w##OI##3 * lin10; \
sum += w##OI##4 * lin11; \
sum += w##OI##5 * lin12; \
\
sum += w##OI##6 * lin20; \
sum += w##OI##7 * lin21; \
sum += w##OI##8 * lin22; \
\
__syncthreads(); \
sum_buffer[lid] = sum; \
\
/* 128 to 1 */ \
__syncthreads(); \
if (lid < 64) { \
sum_buffer[lid] += sum_buffer[lid + 64]; \
} \
__syncthreads(); \
\
SUM_RELU(OI); \
}
I128_O3(0);
I128_O3(1);
I128_O3(2);
}
}
|
19,728 | /*
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
#include <cstdio>
#include <vector>
using namespace std;
const char *sSampleName = "P2P (Peer-to-Peer) GPU Bandwidth Latency Test";
//Macro for checking cuda errors following a cuda launch or api call
#define cudaCheckError() { \
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(EXIT_FAILURE); \
} \
}
void checkP2Paccess(int numGPUs)
{
for (int i=0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j=0; j<numGPUs; j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access,i,j);
printf("Device=%d %s Access Peer Device=%d\n", i, access ? "CAN" : "CANNOT", j);
}
}
}
printf("\n***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure.\nSo you can see lesser Bandwidth (GB/s) in those cases.\n\n");
}
void enableP2P(int numGPUs)
{
for (int i=0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j=0; j<numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access,i,j);
if (access)
{
cudaDeviceEnablePeerAccess(j,0);
cudaCheckError();
}
}
}
}
void disableP2P(int numGPUs)
{
for (int i=0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j=0; j<numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access, i, j);
if (access)
{
cudaDeviceDisablePeerAccess(j);
cudaGetLastError();
}
}
}
}
void outputBandwidthMatrix(int numGPUs)
{
int numElems=10000000;
int repeat=5;
vector<int *> buffers(numGPUs);
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
for (int d=0; d<numGPUs; d++)
{
cudaSetDevice(d);
cudaMalloc(&buffers[d],numElems*sizeof(int));
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs*numGPUs);
for (int i=0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j=0; j<numGPUs; j++)
{
cudaDeviceSynchronize();
cudaCheckError();
cudaEventRecord(start[i]);
for (int r=0; r<repeat; r++)
{
cudaMemcpyPeerAsync(buffers[i],i,buffers[j],j,sizeof(int)*numElems);
}
cudaEventRecord(stop[i]);
cudaDeviceSynchronize();
cudaCheckError();
float time_ms;
cudaEventElapsedTime(&time_ms,start[i],stop[i]);
double time_s=time_ms/1e3;
double gb=numElems*sizeof(int)*repeat/(double)1e9;
bandwidthMatrix[i*numGPUs+j]=gb/time_s;
}
}
printf(" D\\D");
for (int j=0; j<numGPUs; j++)
{
printf("%6d ", j);
}
printf("\n");
for (int i=0; i<numGPUs; i++)
{
printf("%6d ",i);
for (int j=0; j<numGPUs; j++)
{
printf("%6.02f ", bandwidthMatrix[i*numGPUs+j]);
}
printf("\n");
}
for (int d=0; d<numGPUs; d++)
{
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
}
}
void outputBidirectionalBandwidthMatrix(int numGPUs)
{
int numElems=10000000;
int repeat=5;
vector<int *> buffers(numGPUs);
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
vector<cudaStream_t> stream0(numGPUs);
vector<cudaStream_t> stream1(numGPUs);
for (int d=0; d<numGPUs; d++)
{
cudaSetDevice(d);
cudaMalloc(&buffers[d],numElems*sizeof(int));
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
cudaStreamCreate(&stream0[d]);
cudaCheckError();
cudaStreamCreate(&stream1[d]);
cudaCheckError();
}
vector<double> bandwidthMatrix(numGPUs*numGPUs);
for (int i=0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j=0; j<numGPUs; j++)
{
cudaDeviceSynchronize();
cudaCheckError();
cudaEventRecord(start[i]);
for (int r=0; r<repeat; r++)
{
cudaMemcpyPeerAsync(buffers[i],i,buffers[j],j,sizeof(int)*numElems,stream0[i]);
cudaMemcpyPeerAsync(buffers[j],j,buffers[i],i,sizeof(int)*numElems,stream1[i]);
}
cudaEventRecord(stop[i]);
cudaDeviceSynchronize();
cudaCheckError();
float time_ms;
cudaEventElapsedTime(&time_ms,start[i],stop[i]);
double time_s=time_ms/1e3;
double gb=2.0*numElems*sizeof(int)*repeat/(double)1e9;
bandwidthMatrix[i*numGPUs+j]=gb/time_s;
}
}
printf(" D\\D");
for (int j=0; j<numGPUs; j++)
{
printf("%6d ", j);
}
printf("\n");
for (int i=0; i<numGPUs; i++)
{
printf("%6d ",i);
for (int j=0; j<numGPUs; j++)
{
printf("%6.02f ", bandwidthMatrix[i*numGPUs+j]);
}
printf("\n");
}
for (int d=0; d<numGPUs; d++)
{
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
cudaStreamDestroy(stream0[d]);
cudaCheckError();
cudaStreamDestroy(stream1[d]);
cudaCheckError();
}
}
void outputLatencyMatrix(int numGPUs)
{
int repeat=10000;
vector<int *> buffers(numGPUs);
vector<cudaEvent_t> start(numGPUs);
vector<cudaEvent_t> stop(numGPUs);
for (int d=0; d<numGPUs; d++)
{
cudaSetDevice(d);
cudaMalloc(&buffers[d],1);
cudaCheckError();
cudaEventCreate(&start[d]);
cudaCheckError();
cudaEventCreate(&stop[d]);
cudaCheckError();
}
vector<double> latencyMatrix(numGPUs*numGPUs);
for (int i=0; i<numGPUs; i++)
{
cudaSetDevice(i);
for (int j=0; j<numGPUs; j++)
{
cudaDeviceSynchronize();
cudaCheckError();
cudaEventRecord(start[i]);
for (int r=0; r<repeat; r++)
{
cudaMemcpyPeerAsync(buffers[i],i,buffers[j],j,1);
}
cudaEventRecord(stop[i]);
cudaDeviceSynchronize();
cudaCheckError();
float time_ms;
cudaEventElapsedTime(&time_ms,start[i],stop[i]);
latencyMatrix[i*numGPUs+j]=time_ms*1e3/repeat;
}
}
printf(" D\\D");
for (int j=0; j<numGPUs; j++)
{
printf("%6d ", j);
}
printf("\n");
for (int i=0; i<numGPUs; i++)
{
printf("%6d ",i);
for (int j=0; j<numGPUs; j++)
{
printf("%6.02f ", latencyMatrix[i*numGPUs+j]);
}
printf("\n");
}
for (int d=0; d<numGPUs; d++)
{
cudaSetDevice(d);
cudaFree(buffers[d]);
cudaCheckError();
cudaEventDestroy(start[d]);
cudaCheckError();
cudaEventDestroy(stop[d]);
cudaCheckError();
}
}
int main(int argc, char **argv)
{
int numGPUs;
cudaGetDeviceCount(&numGPUs);
printf("[%s]\n", sSampleName);
//output devices
for (int i=0; i<numGPUs; i++)
{
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop,i);
printf("Device: %d, %s, pciBusID: %x, pciDeviceID: %x, pciDomainID:%x\n",i,prop.name, prop.pciBusID, prop.pciDeviceID, prop.pciDomainID);
}
checkP2Paccess(numGPUs);
//compute cliques
vector<vector<int> > cliques;
vector<bool> added(numGPUs,false);
for (int i=0; i<numGPUs; i++)
{
if (added[i]==true)
continue; //already processed
//create new clique with i
vector<int> clique;
added[i]=true;
clique.push_back(i);
for (int j=i+1; j<numGPUs; j++)
{
int access;
cudaDeviceCanAccessPeer(&access,i,j);
if (access)
{
clique.push_back(j);
added[j]=true;
}
}
cliques.push_back(clique);
}
printf("P2P Cliques: \n");
for (int c=0; c<(int)cliques.size(); c++)
{
printf("[");
for (int j=0; j<(int)cliques[c].size()-1; j++)
{
printf("%d ",cliques[c][j]);
}
printf("%d]\n",cliques[c][cliques[c].size()-1]);
}
printf("Unidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBandwidthMatrix(numGPUs);
enableP2P(numGPUs);
printf("Unidirectional P2P=Enabled Bandwidth Matrix (GB/s)\n");
outputBandwidthMatrix(numGPUs);
disableP2P(numGPUs);
printf("Bidirectional P2P=Disabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numGPUs);
enableP2P(numGPUs);
printf("Bidirectional P2P=Enabled Bandwidth Matrix (GB/s)\n");
outputBidirectionalBandwidthMatrix(numGPUs);
disableP2P(numGPUs);
printf("P2P=Disabled Latency Matrix (us)\n");
outputLatencyMatrix(numGPUs);
enableP2P(numGPUs);
printf("P2P=Enabled Latency Matrix (us)\n");
outputLatencyMatrix(numGPUs);
// cudaDeviceReset causes the driver to clean up all state. While
// not mandatory in normal operation, it is good practice. It is also
// needed to ensure correct operation when the application is being
// profiled. Calling cudaDeviceReset causes all profile data to be
// flushed before the application exits
cudaDeviceReset();
printf("\nNOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled.\n");
exit(EXIT_SUCCESS);
}
|
19,729 |
#define LOG_2_PI 1.83787706640935f
#define LOG_PI 1.144729885849400f
__device__ int d_next_multiple(int k, int mult) {
if (k % mult)
return k + (mult - k % mult);
else
return k;
}
__device__ void copy_chunks(float* in_buf, float* out_buf,
unsigned int tid, unsigned int total) {
for (unsigned int chunk = 0; chunk + tid < total; chunk += blockDim.x) {
out_buf[chunk + tid] = in_buf[chunk + tid];
}
}
__device__ void copy_chunks_strided(float* in_buf, float* out_buf,
unsigned int tid, unsigned int ncols,
unsigned int nrows, unsigned int stride) {
unsigned int outind = 0; unsigned int total = ncols*nrows;
for (unsigned int chunk = 0; chunk + tid < total; chunk += blockDim.x) {
outind = ((chunk + tid)/ncols)*stride + (chunk + tid) % ncols;
out_buf[outind] = in_buf[chunk + tid];
}
}
__device__ inline void atomic_add(float* address, float value){
#if __CUDA_ARCH__ >= 200 // for Fermi, atomicAdd supports floats
atomicAdd(address, value);
#elif __CUDA_ARCH__ >= 110
float old = value;
while ((old = atomicExch(address, atomicExch(address, 0.0f)+old))!=0.0f);
#endif
}
|
19,730 | // ----------------------------------------------------------------------------
// CUDA code to compute minimun distance between n points
//
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include<limits>
#include<float.h>
#define MAX_POINTS 1048576
#define block_size 1024
// ----------------------------------------------------------------------------
// Kernel Function to compute distance between all pairs of points
// Input:
// X: X[i] = x-coordinate of the ith point
// Y: Y[i] = y-coordinate of the ith point
// n: number of points
// Output:
// D: D[0] = minimum distance
//
// variable to keep track of number of blocks for which minimum calculation is done on the device
__device__ unsigned int block_count = 0;
__global__ void minimum_distance(float * X, float * Y, volatile float * D, int n) {
// current thread operating = block_x * blickdim_x * threadid_x
unsigned int curr_i = blockIdx.x * blockDim.x + threadIdx.x;
float dx,dy, curr_dist;
//intializing minimum distance to a float max value
float minimum_dist = FLT_MAX;
// to check if last block is completed or not
bool last_blk;
// sharing a list of local minimum distances calculated by all threads in a block
__shared__ float block_local_mins[block_size];
// to make sure current thread lies withing the range of points in the grid
if(curr_i < n-1)
{
//calculating distance for ith point pointed by curr_i to all other points
for(int j = curr_i+1;j<n;j++)
{
dx = X[curr_i] - X[j]; //xi- xj
dy = Y[curr_i] - Y[j]; //yi- yj
curr_dist = sqrtf(dx*dx + dy*dy) ;// dij = √(xi − xj)^2+ (yi − yj)^2
//update minimum distance computed by this thread
if(curr_dist < minimum_dist)
{
minimum_dist = curr_dist;
}
}
//updating block mins list with the thread's minimum
block_local_mins[threadIdx.x] = minimum_dist;
//wait for all threads in that block to update this list
__syncthreads();
// compute block local minimum with logarithmic reduction (binary tree based approach)
int stop_index = (n%block_size);
if(stop_index == 0){
stop_index = block_size;
}
else{
if(blockIdx.x != n/block_size)
{
stop_index = block_size;
}
}
// traverse through the block adn update minimum
//step of 2
for(int i=1;i<stop_index;i *=2){
if(threadIdx.x % (2*i) == 0 && (threadIdx.x +i) < stop_index-1){
if(block_local_mins[threadIdx.x] > block_local_mins[threadIdx.x + i]){
block_local_mins[threadIdx.x] = block_local_mins[threadIdx.x + i];
}
__syncthreads();
}
}
//move the minimum computed from the first thread in every block to the correct spot in D
if(threadIdx.x == 0){
D[blockIdx.x] = block_local_mins[0];
//also increment block_count
int inc_val = atomicInc(&block_count,gridDim.x);
last_blk = (inc_val == (gridDim.x -1));
}
//make last thread in list compute global minimum and put it in D[0]
if(last_blk && threadIdx.x ==0){
int num_blocks = n/block_size + (n % block_size != 0);
for(int i=1;i<num_blocks;i++){
if(D[0] > D[i]){
D[0] = D[i];
}
}
}
}
}
// ----------------------------------------------------------------------------
// Host function to compute minimum distance between points
// Input:
// X: X[i] = x-coordinate of the ith point
// Y: Y[i] = y-coordinate of the ith point
// n: number of points
// Output:
// D: minimum distance
//
float minimum_distance_host(float * X, float * Y, int n) {
float dx, dy, Dij, min_distance, min_distance_i;
int i, j;
dx = X[1]-X[0];
dy = Y[1]-Y[0];
min_distance = sqrtf(dx*dx+dy*dy);
for (i = 0; i < n-1; i++) {
for (j = i+1; j < i+2; j++) {
dx = X[j]-X[i];
dy = Y[j]-Y[i];
min_distance_i = sqrtf(dx*dx+dy*dy);
}
for (j = i+1; j < n; j++) {
dx = X[j]-X[i];
dy = Y[j]-Y[i];
Dij = sqrtf(dx*dx+dy*dy);
if (min_distance_i > Dij) min_distance_i = Dij;
}
if (min_distance > min_distance_i) min_distance = min_distance_i;
}
return min_distance;
}
// ----------------------------------------------------------------------------
// Print device properties
void print_device_properties() {
int i, deviceCount;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
printf("------------------------------------------------------------\n");
printf("Number of GPU devices found = %d\n", deviceCount);
for ( i = 0; i < deviceCount; ++i ) {
cudaGetDeviceProperties(&deviceProp, i);
printf("[Device: %1d] Compute Capability %d.%d.\n", i, deviceProp.major, deviceProp.minor);
printf(" ... multiprocessor count = %d\n", deviceProp.multiProcessorCount);
printf(" ... max threads per multiprocessor = %d\n", deviceProp.maxThreadsPerMultiProcessor);
printf(" ... max threads per block = %d\n", deviceProp.maxThreadsPerBlock);
printf(" ... max block dimension = %d, %d, %d (along x, y, z)\n",
deviceProp.maxThreadsDim[0], deviceProp.maxThreadsDim[1], deviceProp.maxThreadsDim[2]);
printf(" ... max grid size = %d, %d, %d (along x, y, z)\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf(" ... warp size = %d\n", deviceProp.warpSize);
printf(" ... clock rate = %d MHz\n", deviceProp.clockRate/1000);
}
printf("------------------------------------------------------------\n");
}
// ----------------------------------------------------------------------------
// Main program - initializes points and computes minimum distance
// between the points
//
int main(int argc, char* argv[]) {
// Host Data
float * hVx; // host x-coordinate array
float * hVy; // host y-coordinate array
float * hmin_dist; // minimum value on host
// Device Data
float * dVx; // device x-coordinate array
float * dVy; // device x-coordinate array
float * dmin_dist; // minimum value on device
// Device parameters
int MAX_BLOCK_SIZE; // Maximum number of threads allowed on the device
int blocks; // Number of blocks in grid
int threads_per_block; // Number of threads per block
// Timing variables
cudaEvent_t start, stop; // GPU timing variables
struct timespec cpu_start, cpu_stop; // CPU timing variables
float time_array[10];
// Other variables
int i, size, num_points;
float min_distance, sqrtn;
int seed = 0;
// Print device properties
print_device_properties();
// Get device information and set device to use
int deviceCount;
cudaDeviceProp deviceProp;
cudaGetDeviceCount(&deviceCount);
if (deviceCount > 0) {
cudaSetDevice(0);
cudaGetDeviceProperties(&deviceProp, 0);
MAX_BLOCK_SIZE = deviceProp.maxThreadsPerBlock;
} else {
printf("Warning: No GPU device found ... results may be incorrect\n");
}
// Timing initializations
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Check input
if (argc != 2) {
printf("Use: %s <number of points>\n", argv[0]);
exit(0);
}
if ((num_points = atoi(argv[argc-1])) < 2) {
printf("Minimum number of points allowed: 2\n");
exit(0);
}
if ((num_points = atoi(argv[argc-1])) > MAX_POINTS) {
printf("Maximum number of points allowed: %d\n", MAX_POINTS);
exit(0);
}
// Allocate host coordinate arrays
size = num_points * sizeof(float);
hVx = (float *) malloc(size);
hVy = (float *) malloc(size);
hmin_dist = (float *) malloc(size);
// Initialize points
srand48(seed);
sqrtn = (float) sqrt(num_points);
for (i = 0; i < num_points; i++) {
hVx[i] = sqrtn * (float)drand48();
hVy[i] = sqrtn * (float)drand48();
}
// Allocate device coordinate arrays
cudaMalloc(&dVx, size);
cudaMalloc(&dVy, size);
cudaMalloc(&dmin_dist, size);
// Copy coordinate arrays from host memory to device memory
cudaEventRecord( start, 0 );
cudaMemcpy(dVx, hVx, size, cudaMemcpyHostToDevice);
cudaMemcpy(dVy, hVy, size, cudaMemcpyHostToDevice);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[0]), start, stop);
// Invoke kernel
cudaEventRecord( start, 0 );
// ------------------------------------------------------------
int num_blocks = num_points/(block_size) + ((num_points % (block_size)) !=0);
minimum_distance<<<num_blocks,block_size>>>(dVx,dVy,dmin_dist,num_points);
// ------------------------------------------------------------
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[1]), start, stop);
// Copy result from device memory to host memory
cudaEventRecord( start, 0 );
cudaMemcpy(hmin_dist, dmin_dist, sizeof(float), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&(time_array[2]), start, stop);
// Compute minimum distance on host to check device computation
clock_gettime(CLOCK_REALTIME, &cpu_start);
min_distance = minimum_distance_host(hVx, hVy, num_points);
clock_gettime(CLOCK_REALTIME, &cpu_stop);
time_array[3] = 1000*((cpu_stop.tv_sec-cpu_start.tv_sec)
+0.000000001*(cpu_stop.tv_nsec-cpu_start.tv_nsec));
// Print results
printf("Number of Points = %d\n", num_points);
printf("GPU Host-to-device = %f ms \n", time_array[0]);
printf("GPU Device-to-host = %f ms \n", time_array[2]);
printf("GPU execution time = %f ms \n", time_array[1]);
printf("CPU execution time = %f ms\n", time_array[3]);
printf("Min. distance (GPU) = %e\n", hmin_dist);
printf("Min. distance (CPU) = %e\n", min_distance);
printf("Relative error = %e\n", fabs(min_distance-hmin_dist[0])/min_distance);
// Free device memory
cudaFree(dVx);
cudaFree(dVy);
cudaFree(dmin_dist);
// Free host memory
free(hVx);
free(hVy);
}
|
19,731 | #include<stdio.h>
extern "C" void GPUDeviceInfo(const int gpu_device)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
int device;
if (deviceCount>0)
{
printf("#########\n");
for (device = 0; device < deviceCount; ++device)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Found CUDA device %d : %s\n",device, deviceProp.name);
printf("Compute capability : %d.%d\n",deviceProp.major, deviceProp.minor);
printf("#########\n");
}
}
else
{
printf("No CUDA device found \n");
}
printf("Selecting device %d\n",gpu_device);
cudaSetDevice(gpu_device);
}
|
19,732 | //#include "scale.h"
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
#include <math.h> /* fabs */
#include <string.h>
#include <stdlib.h>
#include <sstream>
#include <unordered_map>
using namespace std;
#define THREADS_PER_BLOCK 256
#define STREAM_COUNT 4
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
int flag;
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void printArray(int *arr, int nov){
for (int j=0;j<nov;j++)
{
cout << j << " " << arr[j] << endl;
}
}
__global__ void kernel3(int* adj, int* xadj, int* output, int nov){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < nov){
//int *marked = new int[n];
//memset(marked, -1, n * sizeof(int)); // bu belki silinebilir
int localcount = 0;
// int round = 0;
// 0-->
int s0 = xadj[index];
int e0 = xadj[index+1];
for(int i=s0; i < e0; i++){
// 0 --> 1
int neighbour_1 = adj[i];
int s1 = xadj[neighbour_1];
int e1 = xadj[neighbour_1+1];
for(int j=s1;j < e1; j++){
// 0 --> 1 --> 2
int neighbour_2 = adj[j];
if (neighbour_2 == index) continue;
int s2 = xadj[neighbour_2];
int e2 = xadj[neighbour_2+1];
for(int k=s2; k < e2; k++){
// 0 --> 1 --> 2 --> 3
int neighbour_3 = adj[k];
if (neighbour_3 == index){
localcount+=1;
break;
}
}
}
}
output[index] = localcount;
}
}
__global__ void kernel4(int* adj, int* xadj, int* output, int nov){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < nov){
//int *marked = new int[n];
//memset(marked, -1, n * sizeof(int)); // bu belki silinebilir
int localcount = 0;
// int round = 0;
// 0-->
int s0 = xadj[index];
int e0 = xadj[index+1];
for(int i=s0; i < e0; i++){
// 0 --> 1
int neighbour_1 = adj[i];
int s1 = xadj[neighbour_1];
int e1 = xadj[neighbour_1+1];
for(int j=s1;j < e1; j++){
// 0 --> 1 --> 2
int neighbour_2 = adj[j];
//eliminate 0 == 2
if (neighbour_2 == index) continue;
int s2 = xadj[neighbour_2];
int e2 = xadj[neighbour_2+1];
for(int k=s2; k < e2; k++){
// 0 --> 1 --> 2 --> 3
int neighbour_3 = adj[k];
//eliminate 3 == 0
if (neighbour_3 == index) continue;
// eliminate 3 ==1
if (neighbour_3 == neighbour_1) continue;
int s3 = xadj[neighbour_3];
int e3 = xadj[neighbour_3+1];
for(int n=s3; n < e3; n++){
//0 -->1 -->2 -->3 -->4
int neighbour_4 = adj[n];
if (neighbour_4 == index){
localcount+=1;
break;
}
}
}
}
}
output[index] = localcount;
}
}
__global__ void kernel5(int* adj, int* xadj, int* output, int nov){
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if(index < nov){
//int *marked = new int[n];
//memset(marked, -1, n * sizeof(int)); // bu belki silinebilir
int localcount = 0;
// int round = 0;
// 0-->
int s0 = xadj[index];
int e0 = xadj[index+1];
for(int i=s0; i < e0; i++){
// 0 --> 1
int neighbour_1 = adj[i];
int s1 = xadj[neighbour_1];
int e1 = xadj[neighbour_1+1];
for(int j=s1;j < e1; j++){
// 0 --> 1 --> 2
int neighbour_2 = adj[j];
//eliminate 0 == 2
if (neighbour_2 == index) continue;
int s2 = xadj[neighbour_2];
int e2 = xadj[neighbour_2+1];
for(int k=s2; k < e2; k++){
// 0 --> 1 --> 2 --> 3
int neighbour_3 = adj[k];
//eliminate 3 == 0
if (neighbour_3 == index) continue;
// eliminate 3 ==1
if (neighbour_3 == neighbour_1) continue;
int s3 = xadj[neighbour_3];
int e3 = xadj[neighbour_3+1];
for(int n=s3; n < e3; n++){
//0 -->1 -->2 -->3 -->4
int neighbour_4 = adj[n];
//eliminate 4 == 0
if (neighbour_4 == index) continue;
// eliminate 4 ==1
if (neighbour_4 == neighbour_1) continue;
// eliminate 4 ==2
if (neighbour_4 == neighbour_2) continue;
int s4 = xadj[neighbour_4];
int e4 = xadj[neighbour_4+1];
for(int o=s4; o < e4; o++){
//0 -->1 -->2 -->3 -->4--> 5
int neighbour_5 = adj[o];
if (neighbour_5 == index){
localcount+=1;
break;
}
}
}
}
}
}
output[index] = localcount;
}
}
void wrapper(int *xadj, int *adj, int n, int nov, int nnz){
// int X = nov;
// int Y = maxSize;
// int Z = maxSize;
//
// dim3 threadsPerBlock(8, 8, 8);
// dim3 numBlocks(X/threadsPerBlock.x, /* for instance 512/8 = 64
// Y /threadsPerBlock.y,
// Z/threadsPerBlock.z);
//
cudaSetDevice(0);
int *adj_d;
int *xadj_d;
int *output_d;
int *output_h = new int[nov];
int numBlock = (nov + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
cudaEvent_t start, stop;
float elapsedTime;
/*
int novForThread = (nov+STREAM_COUNT-1)/STREAM_COUNT;
int novStart = novForThread * threadId;
int novEnd = novForThread * (threadId+1);
if (novEnd> nov) novEnd = nov;
int numBlock = (novEnd-novStart + THREADS_PER_BLOCK-1) / THREADS_PER_BLOCK;
*/
gpuErrchk(cudaMalloc((void**)&adj_d, (nnz) * sizeof(int)));
gpuErrchk(cudaMalloc((void**)&xadj_d, (nov + 1) * sizeof(int)));
gpuErrchk(cudaMalloc((void**)&output_d, (nov) * sizeof(int)));
//gpuErrchk(cudaMallocHost((void **)&output_h, (nov) * sizeof(int)));
gpuErrchk(cudaMemcpy(adj_d, adj, (nnz) * sizeof(int), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(xadj_d, xadj, (nov + 1) * sizeof(int), cudaMemcpyHostToDevice));
cudaEventCreate(&start);
cudaEventRecord(start, 0);
if (n==3) kernel3<<<numBlock, THREADS_PER_BLOCK>>>(adj_d, xadj_d, output_d, nov);
else if (n==4) kernel4<<<numBlock, THREADS_PER_BLOCK>>>(adj_d, xadj_d, output_d, nov);
else if (n==5) kernel5<<<numBlock, THREADS_PER_BLOCK>>>(adj_d, xadj_d, output_d, nov);
//combination<<<numBlocks, threadsPerBlock>>>(adj_d, xadj_d, output_d, n, nov);
gpuErrchk(cudaDeviceSynchronize());
gpuErrchk(cudaMemcpy(output_h, output_d, (nov) * sizeof(int), cudaMemcpyDeviceToHost));
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
if(flag == 0) printArray(output_h,nov);
cudaFree(adj_d);
cudaFree(xadj_d);
if(flag == 1) printf("GPU scale took: %f s\n", elapsedTime/1000);
}
void read_mtxbin(string fname, int k){
//cout << "fname: " << fname << endl;
ifstream infile(fname);
int a, b;
int nnv = 0;
unordered_map<int, vector<int> > hashmap;
int maxElement = -1;
while (infile >> a >> b)
{
nnv+=2;
hashmap[a].push_back(b);
hashmap[b].push_back(a);
if(b > maxElement){
maxElement = b;
}
}
//cout << end1-start1 << " -- ILK OKUMA SU (s).\n";
int nov = maxElement +1;
//cout <<"nov " << nov << endl;
//cout <<"nnv " << nnv << endl;
int * adj = new int[nnv];
int * xadj = new int[nov+1];
xadj[0]=0;
int j = 0;
int maxSize = -1;
for(int i=0; i < nov ; i++ ){
auto current = hashmap.find(i);
if (current == hashmap.end()){
xadj[i+1] = xadj[i];
}
else{
int size = current->second.size();
maxSize = max(size,maxSize);
xadj[i+1] = xadj[i] + size;
for(auto val : current->second) {
adj[j] = val;
j++;
}
}
}
// cout << "maxSize: "<<maxSize<<endl;
// cout << end-start << " -- OKUMA SURE (s).\n";
wrapper(xadj,adj,k,nov,nnv);
//cout<<"CYCLES: --> "<<countCycles_sparse(xadj, adj,k,nov)<<endl;
/*double end2 = omp_get_wtime();
cout << end2-start << " -- TOTAL SURE (s).\n";*/
}
int main(int argc, char *argv[]){
char* fname = argv[1];
int k = atoi(argv[2]);
flag = atoi(argv[3]);
read_mtxbin(fname,k);
return 0;
}
|
19,733 | #include "includes.h"
__global__ void Mask_Difference_Kernel( int* A, int* B, int* devOut)
{
const int idx = blockDim.x*blockIdx.x + threadIdx.x;
if(A[idx] == B[idx])
devOut[idx] = 0;
else
devOut[idx] = 1;
// Should test if the extra algebra ops are worth removing the if-statement
// Convert to {-1, +1}
//int aval = A[idx]*2 - 1;
//int bval = B[idx]*2 - 1;
//devOut[idx] = (aval*bval+1)/2;
} |
19,734 | #include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <stdlib.h>
#include <ctime>
using namespace std;
__global__ void AddInts(int * a, int* b, int count)
{
int id = blockIdx.x * blockDim.x * threadIdx.x;
if (id < count)
{
a[id]+=b[id];
}
}
int main(int argc, char const *argv[])
{
srand (time(NULL));
int count = 100;
int * h_a = new int[count];
int * h_b = new int[count];
for (int i = 0; i < count; i++)
{
h_a[i] = rand() % 1000;
h_b[i] = rand() % 1000;
}
cout << "Prior to addition:" << endl;
for (int i = 0; i < 5; i++)
{
cout << h_a[i] << " " << h_b[i] << endl;
}
int *d_a, *d_b;
if (cudaMalloc(&d_a, sizeof(int)*count) != cudaSuccess)
{
cout<< "Could not allocate d_a" << endl;
return 1;
}
if (cudaMalloc(&d_b, sizeof(int)*count) != cudaSuccess)
{
cout<< "Could not allocate d_b" << endl;
cudaFree(d_a);
return 1;
}
if (cudaMemcpy(d_a, h_a, sizeof(int) * count, cudaMemcpyHostToDevice)!=cudaSuccess)
{
cout<< "Could not copy d_a" << endl;
cudaFree(d_a);
cudaFree(d_b);
return 1;
}
if (cudaMemcpy(d_b, h_b, sizeof(int) * count, cudaMemcpyHostToDevice)!=cudaSuccess)
{
cout<< "Could not copy d_b" << endl;
cudaFree(d_a);
cudaFree(d_b);
return 1;
}
AddInts <<<count/256+1, 256>>>(d_a,d_b,count);
if (cudaMemcpy(h_a, d_a, sizeof(int) * count, cudaMemcpyDeviceToHost)!=cudaSuccess)
{
cout<< "Could not copy back from device" << endl;
cudaFree(d_a);
cudaFree(d_b);
delete[] h_a;
delete[] h_b;
return 1;
}
for (int i = 0; i < 5; i++)
{
cout <<"It is: " << h_a[i] << endl;
}
delete[] h_a;
delete[] h_b;
return 0;
}
|
19,735 | #include "cuda_runtime.h"
//#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
// CUDA kernel
__global__
void vecCompare(int *R, int *G, int *B, int *result, int n) //A is for the green array
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
if(G[i] > 2 * R[i] || G[i] > 2 * B[i])
{
result[i] = 1;
}
else
{
result[i] = 0;
}
}
}
extern "C"
void compareMatrices(int height, int width, int*r, int*g, int*b, int*green)
{
// Size of vectors
int n = height * width;
// Device input vectors
int *d_r;
int *d_g;
int *d_b;
//Device output vector
int *d_green;
// Size, in bytes, of each vector
size_t bytes = n*sizeof(int);
// Allocate memory for each vector on GPU
cudaMalloc(&d_r, bytes);
cudaMalloc(&d_g, bytes);
cudaMalloc(&d_b, bytes);
cudaMalloc(&d_green, bytes);
// Copy host vectors to device
cudaMemcpy( d_r, r, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_g, g, bytes, cudaMemcpyHostToDevice);
cudaMemcpy( d_b, b, bytes, cudaMemcpyHostToDevice);
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid
gridSize = (int)ceil((float)n/blockSize);
// Execute the kernel
vecCompare<<<gridSize, blockSize>>>(d_r, d_g, d_b, d_green, n);
// Copy array back to host
cudaMemcpy( green, d_green, bytes, cudaMemcpyDeviceToHost );
// Release device memory
cudaFree(d_r);
cudaFree(d_g);
cudaFree(d_b);
cudaFree(d_green);
// Release host memory
free(r);
free(g);
free(b);
}
|
19,736 | #include <cstdio>
#include <iostream>
#include <vector>
#include <cmath>
#include <thrust/device_vector.h>
#include <thrust/extrema.h>
using namespace std;
#define CSC(call) do { cudaError_t res = call; if (res != cudaSuccess) { fprintf(stderr, "CUDA Error in %s:%d: %s\n", __FILE__, __LINE__, cudaGetErrorString(res)); exit(0); } } while (0)
__global__ void kernelSwap(int n, double *cols, double *vec, int step, int maxPos) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int offsetx = blockDim.x * gridDim.x;
double tmp;
for (int col = idx; col <= n; col += offsetx) {
if (col < step) {
continue;
}
if (col == n) {
tmp = vec[step];
vec[step] = vec[maxPos];
vec[maxPos] = tmp;
}
else {
tmp = cols[col * n + step];
cols[col * n + step] = cols[col * n + maxPos];
cols[col * n + maxPos] = tmp;
}
}
}
__global__ void kernelModify(int n, double *cols, double *vec, int step) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int idy = blockDim.y * blockIdx.y + threadIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
double coef;
for (int row = idx; row < n; row += offsetx) {
for (int col = idy; col <= n; col += offsety) {
if (row <= step || col <= step) {
continue;
}
coef = cols[step * n + row] / cols[step * n + step];
if (col == n) {
vec[row] = vec[row] - coef * vec[step];
}
else {
cols[col * n + row] = cols[col * n + row] - coef * cols[col * n + step];
}
}
}
}
struct compareKeyValue {
__host__ __device__
bool operator()(double a, double b) {
return fabs(a) < fabs(b);
}
};
int main() {
ios_base::sync_with_stdio(false);
int n;
cin >> n;
double *cols = new double[n * n];
double *vec = new double[n];
for (int row = 0; row < n; row++) {
for (int col = 0; col < n; col++) {
cin >> cols[col * n + row];
}
}
for (int row = 0; row < n; row++) {
cin >> vec[row];
}
double *devCols, *devVec;
CSC(cudaMalloc(&devCols, sizeof(double) * n * n));
CSC(cudaMalloc(&devVec, sizeof(double) * n));
CSC(cudaMemcpy(devCols, cols, sizeof(double) * n * n, cudaMemcpyHostToDevice));
CSC(cudaMemcpy(devVec, vec, sizeof(double) * n, cudaMemcpyHostToDevice));
int maxPos;
for (int step = 0; step < n - 1; step++) {
thrust::device_ptr<double> devPtr = thrust::device_pointer_cast(devCols + step * (n + 1));
thrust::device_ptr<double> maxPtr = thrust::max_element(devPtr, devPtr + (n - step), compareKeyValue());
maxPos = &maxPtr[0] - &devPtr[0] + step;
kernelSwap<<<256, 256>>>(n, devCols, devVec, step, maxPos);
CSC(cudaGetLastError());
kernelModify<<<dim3(16, 16), dim3(16, 16)>>>(n, devCols, devVec, step);
CSC(cudaGetLastError());
}
CSC(cudaMemcpy(cols, devCols, sizeof(double) * n * n, cudaMemcpyDeviceToHost));
CSC(cudaMemcpy(vec, devVec, sizeof(double) * n, cudaMemcpyDeviceToHost));
CSC(cudaFree(devCols));
CSC(cudaFree(devVec));
vector<double> res(n);
double tmp;
for (int row = n - 1; row >= 0; row--) {
tmp = 0;
for (int col = row + 1; col < n; col++) {
tmp += cols[col * n + row] * res[col];
}
res[row] = (vec[row] - tmp) / cols[row * n + row];
}
cout.precision(10);
cout.setf(ios::scientific);
for (int i = 0; i < n; i++) {
cout << res[i] << " ";
}
cout << endl;
delete[] cols;
delete[] vec;
return 0;
}
|
19,737 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
//CUDA RunTime API
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include "device_functions.h"
#define THREAD_NUM 256
#define raw_row 512*680
#define raw_column 224
#define MATRIX_SIZE raw_row*raw_column
const int blocks_num = 16;
//// __global__ м˷
//__global__ static void matMultCUDA(Imagdata* imagdata, CovData* imagecov, cBYTE* datatemp, clock_t* time)
//{
//
// //ʾĿǰ thread ǵڼ thread 0 ʼ㣩
// const int tid = threadIdx.x;
//
// //ʾĿǰ thread ڵڼ block 0 ʼ㣩
// const int bid = blockIdx.x;
//
// //// ֻ÷
// //// bid tid thread Ӧü row column
// //const int idx = bid * THREAD_NUM + tid;
// //const int row = idx / n;
// //const int column = idx % n;
//
// //¼㿪ʼʱ
//
// //ֻ thread 0 threadIdx.x = 0 ʱм¼ÿ block ¼ʼʱ估ʱ
// if (tid == 0) time[bid] = clock();
// if (tid == 0)
// printf("Hello thread %d, f=%f\n", tid);
//
// /*
// Э̿ʼ
// */
//
// long count;
// long count2;
//
// imagecov->row = imagdata->Dim;
// imagecov->column = imagdata->Dim;
//
// for (int i = 0; i < imagdata->Dim; ++i) {
// count = 0;
// int j = 0;
// for (j = 0; j < imagdata->imgSize; ++j) {
// count += imagdata->data[i * imagdata->imgSize + j];
// }
// int mean = count / imagdata->imgSize;
// for (j = 0; j < imagdata->imgSize; ++j) {
// datatemp[i * imagdata->imgSize + j] = imagdata->data[i * imagdata->imgSize + j] - mean;
// }
// }
// //cout << "(imgcompute.cpp)ؾֵ\n" << endl;
// printf("(imgcompute.cpp)ؾֵ...\n");
// //-----------------------------------------------------------------------------------------------------------
// //˲ʱΪ45롣Ϊ21롣ѭμ룬ʱΪ12s
//
// for (int i = 0; i < imagdata->Dim; ++i)
// {
// int j;
// for (j = i; j < imagdata->Dim; ++j)
// {
// count2 = 0;
// int k;
// //for (k = 0; k < imagdata->imgSize; ++k) {
//
// // count2 += datatemp[i * imagdata->imgSize + k] * datatemp[j * imagdata->imgSize + k];
// //}
// for (k = 0; k < imagdata->imgSize; ++k) {
//
// count2 += datatemp[i * imagdata->imgSize + k] * datatemp[j * imagdata->imgSize + k];
// }
// imagecov->data[i * (imagecov->row) + j] = count2 / (imagdata->imgSize - 1);
// if (i != j) {
// imagecov->data[j * (imagecov->row) + i] = count2 / (imagdata->imgSize - 1);
// }
// }
// }
//
// delete[]datatemp;
// printf("(imgcompute.cpp)Э\n");
//
// /*
// Э̽
// */
//
//
// //ʱ,¼ֻ thread 0 threadIdx.x = 0 ʱУÿ block ¼ʼʱ估ʱ
// if (tid == 0)
// {
// time[bid + blocks_num] = clock();
// }
//
//}
//
//
//__global__ void cudatest() {
// printf("success!");
//}
//
//
//extern "C" void cuda_juzhen(RawImag * myraw, Imagdata * imagdata, CovData * imagecov)
//{
// clock_t* time;
//
// printf("1\n");
//
// //CPUGPUڴ ͬһṹҪֿʵ
// //CPUڴ
// imagecov->data = new covBYTE[sizeof(covBYTE) * imagdata->Dim * imagdata->Dim];
// cBYTE* datatemp = new cBYTE[sizeof(cBYTE) * imagdata->rawSize]; // rawSize 512x680x224,8000W
//
// printf("2\n");
//
//
// //cudaMalloc ȡһԿڴ
// //ṹڴҪṹ屾Լṹÿһ
//
//
// Imagdata* cuda_imagdata;
// CovData* cuda_imagecov;
// cBYTE* cuda_datatemp;
//
// cuda_imagdata = new Imagdata[sizeof(Imagdata)]; // ṹ飨ָ룩ڴ
// cuda_imagecov = new CovData[sizeof(CovData)];
// cuda_datatemp = new cBYTE[sizeof(cBYTE)];
//
//
// //memset(&cuda_imagdata, 0, sizeof(Imagdata));
// //memset(&cuda_imagecov, 0, sizeof(CovData));
// //memset(&cuda_datatemp, 0, sizeof(cBYTE));
//
//
// cuda_imagdata->Col = imagdata->Col;
// cuda_imagdata->Row = imagdata->Row;
// cuda_imagdata->Dim = imagdata->Dim;
// cuda_imagdata->rawSize = imagdata->rawSize;
// cuda_imagdata->imgSize = imagdata->imgSize;
//
// cuda_imagecov->row = imagdata->Dim;
// cuda_imagecov->column = imagdata->Dim;
//
//
// cuda_imagdata->data = new covBYTE[sizeof(covBYTE) * imagdata->Dim * imagdata->Dim];;
// cuda_datatemp = new cBYTE[sizeof(cBYTE) * imagdata->rawSize]; // rawSize 512x680x224,8000W;
//
// //cudaMalloc((void**)&cuda_imagdata->Row, sizeof(int));
// //cudaMalloc((void**)&cuda_imagdata->Col, sizeof(int));
// //cudaMalloc((void**)&cuda_imagdata->Dim, sizeof(int));
// //cudaMalloc((void**)&cuda_imagdata->rawSize, sizeof(int));
// //cudaMalloc((void**)&cuda_imagdata->imgSize, sizeof(int));
//
// //cudaMalloc((void**)&cuda_imagdata, sizeof(Imagdata));
// cudaMalloc((void**)&cuda_imagdata->data, sizeof(cBYTE) * imagdata->rawSize);
//
// printf("2.1\n");
//
// //cudaMalloc((void**)&cuda_imagecov->row, sizeof(int));
// //cudaMalloc((void**)&cuda_imagecov->column, sizeof(int));
//
//
// //cudaMalloc((void**)&cuda_imagecov, sizeof(CovData));
//
// cuda_imagecov->data = new cBYTE[sizeof(cBYTE) * imagdata->rawSize];
// cudaMalloc((void**)&cuda_imagecov->data, sizeof(covBYTE) * imagdata->Dim * imagdata->Dim);
//
// printf("2.3\n");
//
// cudaMalloc((void**)&cuda_datatemp, sizeof(cBYTE) * imagdata->rawSize);
//
// cudaMalloc((void**)&time, sizeof(clock_t) * blocks_num * 2);
//
// printf("3\n");
//
//
// //cudaMemcpy ľƵԿڴ
// //cudaMemcpyHostToDevice - ڴ渴ƵԿڴ
// //cudaMemcpyDeviceToHost - Կڴ渴Ƶڴ
//
// //cudaMemcpy(cuda_imagdata, imagdata, sizeof(cBYTE) * imagdata->rawSize, cudaMemcpyHostToDevice);
// cudaMemcpy(cuda_imagdata->data, imagdata->data, sizeof(cBYTE) * imagdata->rawSize, cudaMemcpyHostToDevice);
//
// //cudaMemcpy(cuda_imagecov, imagecov, sizeof(covBYTE) * imagdata->rawSize, cudaMemcpyHostToDevice);
// cudaMemcpy(cuda_imagecov->data, imagecov->data, sizeof(covBYTE) * imagdata->rawSize, cudaMemcpyHostToDevice);
//
// cudaMemcpy(cuda_datatemp, datatemp, sizeof(cBYTE) * imagdata->rawSize, cudaMemcpyHostToDevice);
//
// printf("4\n");
//
// // CUDA ִк <<<block Ŀ, thread Ŀ, shared memory С>>>(...);
// //matMultCUDA <<< blocks_num, THREAD_NUM>>> (cuda_imag;data, cuda_imagecov, cuda_datatemp, time);
// cudatest << < 1, 64 >> > ;
//
// cudaError_t cudaStatus = cudaGetLastError();
// if (cudaStatus != cudaSuccess)
// {
// fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
// }
//
//
// printf("5\n");
//
// /*ѽʾоƬƻڴ*/
//
// clock_t time_use[blocks_num * 2];
//
// //cudaMemcpy Դиƻڴ
// Imagdata* new_imagdata;
// CovData* new_imagecov;
//
// new_imagdata = new Imagdata[sizeof(Imagdata)]; // ṹ飨ָ룩ڴ
// new_imagecov = new CovData[sizeof(CovData)];
//
// //cudaMemcpy(new_imagdata, cuda_imagdata, sizeof(cBYTE) * imagdata->rawSize, cudaMemcpyDeviceToHost);
// cudaMemcpy(new_imagdata->data, cuda_imagdata->data, sizeof(cBYTE) * imagdata->rawSize, cudaMemcpyDeviceToHost);
// //cudaMemcpy(new_imagecov, cuda_imagecov, sizeof(covBYTE) * imagdata->rawSize, cudaMemcpyDeviceToHost);
// cudaMemcpy(new_imagecov->data, cuda_imagecov->data, sizeof(covBYTE) * imagdata->rawSize, cudaMemcpyDeviceToHost);
//
// cudaMemcpy(&time_use, time, sizeof(clock_t) * blocks_num * 2, cudaMemcpyDeviceToHost);
//
// printf("6\n");
//
// //Free
// cudaFree(cuda_imagdata->data);
// //cudaFree(cuda_imagdata);
//
// cudaFree(cuda_imagecov->data);
// //cudaFree(cuda_imagdata);
//
// cudaFree(cuda_datatemp);
// cudaFree(time);
//
// //ÿ block Ŀʼʱ䣬Ľʱȡʱ
// clock_t min_start, max_end;
//
// min_start = time_use[0];
//
// max_end = time_use[blocks_num];
//
// for (int i = 1; i < blocks_num; i++)
// {
// if (min_start > time_use[i]) min_start = time_use[i];
//
// if (max_end < time_use[i + blocks_num]) max_end = time_use[i + blocks_num];
// }
//
// //˺ʱ
// clock_t final_time = max_end - min_start;
//
// printf("gputime: %d\n", final_time);
//
//}
|
19,738 | /* *
* Copyright 1993-2012 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*/
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <fstream>
using namespace std;
int patient_count;
int icd_count;
int* patients_host;
int* icds_host;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} }
void free_all() {
CUDA_CHECK_RETURN(cudaFreeHost(patients_host));
CUDA_CHECK_RETURN(cudaFreeHost(icds_host));
}
void read_patients() {
patient_count = 0;
string line;
ifstream file("csv_data/patients_sorted_short.csv");
if (file.is_open()) {
// skip first line
getline(file, line);
while (getline(file, line)) {
patient_count ++;
}
}
printf("%d patients read\n", patient_count);
CUDA_CHECK_RETURN(cudaHostAlloc((void**) &patients_host, patient_count * 4 * sizeof(int), cudaHostAllocDefault));
patient_count = 0;
file.clear();
file.seekg(0, ios::beg);
if (file.is_open()) {
// skip first line
getline(file, line);
while (getline(file, line)) {
char * dup = strdup(line.c_str());
char * record = strtok(dup, ",");
for (int i = 0; i < 4; i++) {
patients_host[patient_count * 4 + i] = atoi(record);
record = strtok(NULL, ",");
}
patient_count++;
}
}
}
void read_icds() {
icd_count = 0;
string line;
ifstream file("csv_data/icds.csv");
if (file.is_open()) {
// skip first line
getline(file, line);
while (getline(file, line)) {
icd_count ++;
}
}
printf("%d ICDs read\n", icd_count);
CUDA_CHECK_RETURN(cudaHostAlloc((void**) &icds_host, icd_count * sizeof(int), cudaHostAllocDefault));
icd_count = 0;
file.clear();
file.seekg(0, ios::beg);
if (file.is_open()) {
// skip first line
getline(file, line);
while (getline(file, line)) {
char * dup = strdup(line.c_str());
char * record = strtok(dup, ",");
icds_host[icd_count] = atoi(record);
icd_count++;
}
}
}
int main(int argc, char* argv[]) {
read_patients();
read_icds();
free_all();
}
|
19,739 | #include "includes.h"
__global__ void MarkSplits(int size, bool force, int minPartSize, int maxPartSize, int* partSizes, int* splitsToMake) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size)
{
int currentSize = partSizes[idx];
bool shouldSplit = currentSize > maxPartSize && (force || currentSize > minPartSize * 2);
splitsToMake[idx] = shouldSplit ? 1 : 0;
}
} |
19,740 | #include <cuda.h>
#include <iostream>
#include <cstdlib>
using namespace std;
#define THREADSPERBLOCK 1024
// __global__ void mandel(char *d_vet, int MAX_N, int MAX_COL, int MAX_ROW) {
__global__ void mandel(char *d_vet, int MAX_ROW, int MAX_COL, int MAX_NUM) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < MAX_ROW * MAX_COL) {
int c = i % MAX_COL;
int r = i / MAX_COL;
int n = 0;
float x=0, y=0, tmp;
while ((x*x) + (y*y) < 4 && ++n < MAX_NUM) {
tmp = x*x - y*y + ((float)c * 2 / MAX_COL - 1.5);
y = x * y * 2 + ((float)r * 2 / MAX_ROW - 1);
x = tmp;
}
d_vet[i] = (n == MAX_NUM ? '#' : '.');
}
}
int main(int argc, char **argv) {
int MAX_ROW;
int MAX_COL;
int MAX_NUM;
if (argc == 4) {
MAX_ROW = atoi(argv[1]);
MAX_COL = atoi(argv[2]);
MAX_NUM = atoi(argv[3]);
}
else
exit(1);
int MAX_TAM = MAX_ROW * MAX_COL;
size_t i, size = MAX_TAM * sizeof(char);
int nBlocks = ( MAX_TAM + THREADSPERBLOCK - 1 ) / THREADSPERBLOCK;
char *h_vet = (char *)malloc(size);
char *d_vet = (char *)malloc(size);
cudaSetDevice(0);
cudaMalloc((void**)&d_vet, size);
mandel<<<nBlocks, THREADSPERBLOCK>>>(d_vet, MAX_ROW, MAX_COL, MAX_NUM);
cudaMemcpy(h_vet, d_vet, size, cudaMemcpyDeviceToHost);
cudaFree(d_vet);
for(i = 0; i < MAX_TAM; ++i) {
cout << h_vet[i];
if (i % MAX_COL == MAX_COL-1)
cout << endl;
}
free(h_vet);
}
|
19,741 | #include <stdio.h>
__global__ void report(){
int i = blockIdx.x;
int j = threadIdx.x;
printf("My group id is %d, and my thread id is %d\n",i,j);
}
__global__ void report_in_detail(){
int ix = blockIdx.x;
int iy = blockIdx.y;
int iz = blockIdx.z;
int jx = threadIdx.x;
int jy = threadIdx.y;
int jz = threadIdx.z;
printf("My group id is (%d,%d,%d), and my thread id is (%d,%d,%d)\n",ix,iy,iz,jx,jy,jz);
}
int main(){
report_in_detail<<<3,2>>>();
cudaThreadSynchronize(); // 同步标识。让 CPU 等待 GPU 运行结束
printf("-----------\n");
report_in_detail<<<dim3(1,1,3),dim3(1,2,1)>>>();
return 0;
} |
19,742 | #include "includes.h"
__global__ void gpu_transpose(const float* src, float* dst, int colssrc, int colsdst, int n) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while (tid < n) {
int cdst = tid % colsdst;
int rdst = tid / colsdst;
int rsrc = cdst;
int csrc = rdst;
dst[tid] = src[rsrc * colssrc + csrc];
tid += stride;
}
} |
19,743 | #include <cuda_runtime.h>
int main() {
int* a;
cudaMalloc(&a, 100);
cudaFree(a);
return 0;
} |
19,744 | #include "includes.h"
__global__ void poli_warp(float* poli, const int N) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x;
if (idx < N) {
x = poli[idx];
poli[idx] = 5 + x * ( 7 - x * (9 + x * (5 + x * (5 + x))))- 1.0f/x + 3.0f/(x*x) + x/5.0f;
}
poli[idx] = x;
} |
19,745 |
__global__ void vecAdd(float * in1, float * in2, float * out, int len) {
//@@ Insert code to implement vector addition here
int i = blockDim.x*blockIdx.x+threadIdx.x;
if( i < len ) out[i] = in1[i] + in2[i];
} |
19,746 | #include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <thrust/functional.h>
int main(void)
{
thrust::device_vector<int> data(8);
data[0] = 6;
data[1] = 3;
data[2] = 7;
data[3] = 5;
data[4] = 9;
data[5] = 0;
data[6] = 8;
data[7] = 1;
thrust::sort(data.begin(), data.end());
std::cout<<"ascending"<<std::endl;
for (int i = 0; i < data.size(); i++)
std::cout<<data[i]<<std::endl;
thrust::sort(data.begin(), data.end(), thrust::greater<int>());
std::cout<<"descending"<<std::endl;
for (int i = 0; i < data.size(); i++)
std::cout<<data[i]<<std::endl;
}
|
19,747 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define BLOCKSIZE 4 // Number of threads in each thread block
// CUDA kernel. Each thread takes care of one element of a
__global__ void diffKernel( float *in, float *out, int n )
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n - 1)
out[i] = in[i + 1] - in[i];
}
int main( int argc, char* argv[] )
{
// Size of vectors
int i;
float input[] = {4, 5, 6, 7, 19, 10, 0, 4, 2, 3, 1, 7, 9, 11, 45, 23, 99, 29};
int n = sizeof(input) / sizeof(float); //careful, this usage only works with statically allocated arrays, NOT dynamic arrays
// Host input vectors
float *h_in = input;
//Host output vector
float *h_out = (float *) malloc((n - 1) * sizeof(float));
// Device input vectors
float *d_in;
//Device output vector
float *d_out;
// Size, in bytes, of each vector
size_t bytes = n * sizeof(float);
// Allocate memory for each vector on GPU
cudaMalloc(&d_in, bytes);
cudaMalloc(&d_out, bytes - sizeof(float));
// Copy host data to device
cudaMemcpy( d_in, h_in, bytes, cudaMemcpyHostToDevice);
// TODO: setup the blocksize and gridsize and launch the kernel below.
// Number of threads in each thread block
int blockSize = BLOCKSIZE;
// Number of thread blocks in grid
int threadCount = ceil( n / (float) BLOCKSIZE);
// Execute the kernel
diffKernel<<<blockSize, threadCount>>>(d_in, d_out, n);
// Copy array back to host
cudaMemcpy( h_out, d_out, bytes - sizeof(float), cudaMemcpyDeviceToHost );
// Show the result
printf("The original array is: ");
for(i = 0; i < n; i ++)
printf("%4.0f,", h_in[i] );
printf("\n\nThe diff array is: ");
for(i = 0; i < n - 1; i++)
printf("%4.0f,", h_out[i] );
puts("");
// Release device memory
cudaFree(d_in);
cudaFree(d_out);
// Release host memory
free(h_out);
return 0;
}
|
19,748 | #include <iostream>
using namespace std;
int main() {
int ver;
cudaDriverGetVersion(&ver);
cout << "DRIVER VERSION: " << ver << "\n";
cudaRuntimeGetVersion (&ver);
cout << "RUNTIME VERSION: " << ver << "\n";
cout << "\n";
size_t total_mem, free_mem;
cudaMemGetInfo(&free_mem, &total_mem);
cout << "TOTAL MEMORY: " << total_mem * 1e-6f << "MB\n";
cout << "FREE MEMORY: " << free_mem * 1e-6f << "MB\n";
cout << "\n";
cudaDeviceProp prop;
int numberOfDevices;
cudaGetDeviceCount(&numberOfDevices);
cout << "Number of devices: " << numberOfDevices << "\n";
for (int i = 0; i < numberOfDevices; i++) {
cout << "\n";
cudaGetDeviceProperties(&prop, i);
cout << "Name: " << prop.name << endl;
cout << "Clock rate: " << prop.clockRate * 1e-6f << "Ghz\n";
cout << "Device compute capability: " << prop.major << "." << prop.minor << "\n";
cout << "Multiprocessor count: " << prop.multiProcessorCount << "\n";
cout << "Total global mem: " << prop.totalGlobalMem * 1e-6f << "MB\n";
cout << "Max threads per MProcessor: " << prop.maxThreadsPerMultiProcessor << "\n";
cout << "Shared memory per Block: " << prop.sharedMemPerBlock * 1e-6f << "MB\n";
cout << "Warp size: " << prop.warpSize << "\n";
cout << "Max threads pen Block: " << prop.maxThreadsPerBlock << "\n";
cout << "Max block dimension: (" << prop.maxThreadsDim[0] << ", " << prop.maxThreadsDim[1] << ", " << prop.maxThreadsDim[2] << ")\n";
cout << "Max grid dimension: (" << prop.maxGridSize[0] << ", " << prop.maxGridSize[1] << ", " << prop.maxGridSize[2] << ")\n";
cout << "Kernel execution timeout is " << (prop.kernelExecTimeoutEnabled ? "on" : "off") << "\n";
cout << "Device overlap is " << (prop.deviceOverlap ? "on" : "off") << "\n";
}
cout << endl;
return 0;
} |
19,749 | #include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <chrono>
#include <thrust/extrema.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
int main() {
double stocks;
std::cin >> stocks;
thrust::host_vector<double> host;
for(int i =0; i < 2517; i++){
std::cin >> stocks;
host.push_back(stocks);
}
/* na linha abaixo os dados são copiados
para GPU */
thrust::device_vector<double> dev(host);
double val = thrust::reduce(dev.begin(), dev.end());
double mean = val/2517;
std::cout << "Média: " << mean << "\n";
double val2 = thrust::reduce(dev.begin()+2152, dev.end());
double mean365 = val2/365;
std::cout << "Média último ano: " << mean365 << "\n";
// auto result = thrust::minmax_element(dev.begin(), dev.end());
auto result = thrust::minmax_element(dev.begin(), dev.end());
auto result2 = thrust::minmax_element(dev.end() - 365, dev.end());
std::cout << "Mínimo 10 anos: " << *(result.first) << "\n";
std::cout << "Máximo 10 anos: " << *(result.second) << "\n";
std::cout << "Mínimo último ano: " << *(result2.first) << "\n";
std::cout << "Máximo último ano: " << *(result2.second)<< "\n";
// printf("Host vector: ");
// for (auto i = host.begin(); i != host.end(); i++) {
// std::cout << *i << " "; // este acesso é rápido -- CPU
// }
// printf("\n");
// printf("Device vector: ");
// for (auto i = dev.begin(); i != dev.end(); i++) {
// std::cout << *i << " "; // este acesso é lento! -- GPU
// }
// printf("\n");
}
|
19,750 | #define t_max 1
#define t 1
/*
(u[0][0][0][1][0]=(a*((((u[-3][0][0][0][0]+(u[0][-3][0][0][0]+u[0][0][-3][0][0]))*-2.0)+(((u[-2][0][0][0][0]+(u[0][-2][0][0][0]+u[0][0][-2][0][0]))*15.0)+((u[-1][0][0][0][0]+(u[0][-1][0][0][0]+u[0][0][-1][0][0]))*-60.0)))+((u[0][0][0][0][0]*20.0)+(((u[1][0][0][0][0]+(u[0][1][0][0][0]+u[0][0][1][0][0]))*30.0)+((u[2][0][0][0][0]+(u[0][2][0][0][0]+u[0][0][2][0][0]))*-3.0))))))
*/
__global__ void upstream_5_3d(double * * u_0_1_out, double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c)
{
//double * const u__u_0[16] = { u_0_0, u_0_1 } ;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int _idx7;
int _idx8;
int _idx9;
int chunk_idx_x;
int chunk_idx_x_max;
int chunk_idx_y;
int chunk_idx_y_max;
int chunk_idx_z;
int chunk_idx_z_max;
int idx_1_2;
int size_1_1;
int size_1_2;
//int t;
int thd_idx_x;
int thd_idx_y;
int thd_idx_z;
int thdblks_idx_x;
int thdblks_idx_x_max;
int thdblks_idx_y;
int thdblks_idx_y_max;
int thdblks_idx_z;
int thdblks_idx_z_max;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x)));
chunk_idx_x_max=(chunk_idx_x+c);
chunk_idx_y=(threadIdx.y+(tmp*blockDim.y));
chunk_idx_y_max=(chunk_idx_y+1);
chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
chunk_idx_z_max=(chunk_idx_z+1);
thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
thdblks_idx_x_max=(thdblks_idx_x+tbx);
thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y)));
thdblks_idx_y_max=(thdblks_idx_y+tby);
thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z)));
thdblks_idx_z_max=(thdblks_idx_z+tbz);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */
/* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */
/*
for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
thd_idx_z=chunk_idx_z;
thd_idx_y=chunk_idx_y;
for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1)
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0])
*/
/* _idx0 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)) */
_idx0=(((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t));
/* _idx1 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+3) */
_idx1=(((_idx0-(3*x_max))-(15*t))+3);
/* _idx2 = (((((((((thd_idx_z*x_max)+((5*t)*thd_idx_z))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(15*t))+3) */
_idx2=((((_idx0+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+3);
/* _idx3 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+1) */
_idx3=(_idx0+1);
/* _idx4 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+1)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(5*t))+3) */
_idx4=((_idx1+x_max)+(5*t));
/* _idx5 = ((((((((((((thd_idx_z+1)*x_max)+((5*t)*thd_idx_z))+(5*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(5*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(25*(t*t)))+(15*t))+3) */
_idx5=(((_idx2+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx6 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+2) */
_idx6=(_idx3+1);
/* _idx7 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+2)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(10*t))+3) */
_idx7=((_idx4+x_max)+(5*t));
/* _idx8 = ((((((((((((thd_idx_z+2)*x_max)+((5*t)*thd_idx_z))+(10*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(10*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(50*(t*t)))+(15*t))+3) */
_idx8=(((_idx5+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx9 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+3) */
_idx9=(_idx3+2);
/* _idx10 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+4) */
_idx10=(_idx3+3);
/* _idx11 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+4)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(20*t))+3) */
_idx11=((_idx9+x_max)+(5*t));
/* _idx12 = ((((((((((((thd_idx_z+4)*x_max)+((5*t)*thd_idx_z))+(20*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(20*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(100*(t*t)))+(15*t))+3) */
_idx12=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
/* _idx13 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+5) */
_idx13=(_idx3+4);
/* _idx14 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+5)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(25*t))+3) */
_idx14=((_idx11+x_max)+(5*t));
/* _idx15 = ((((((((((((thd_idx_z+5)*x_max)+((5*t)*thd_idx_z))+(25*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(25*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(125*(t*t)))+(15*t))+3) */
_idx15=(((_idx12+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u_0_1[_idx9]=(a*((((u_0_0[_idx0]+(u_0_0[_idx1]+u_0_0[_idx2]))*-2.0)+(((u_0_0[_idx3]+(u_0_0[_idx4]+u_0_0[_idx5]))*15.0)+((u_0_0[_idx6]+(u_0_0[_idx7]+u_0_0[_idx8]))*-60.0)))+((u_0_0[_idx9]*20.0)+(((u_0_0[_idx10]+(u_0_0[_idx11]+u_0_0[_idx12]))*30.0)+((u_0_0[_idx13]+(u_0_0[_idx14]+u_0_0[_idx15]))*-3.0)))));
}
}
}
}
__global__ void initialize(double * u_0_0, double * u_0_1, double a, int x_max, int y_max, int z_max, int tbx, int tby, int tbz, int c)
{
double * const u__u_0[16] = { u_0_0, u_0_1 } ;
int _idx0;
int _idx1;
int _idx10;
int _idx11;
int _idx12;
int _idx13;
int _idx14;
int _idx15;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int _idx7;
int _idx8;
int _idx9;
int chunk_idx_x;
int chunk_idx_x_max;
int chunk_idx_y;
int chunk_idx_y_max;
int chunk_idx_z;
int chunk_idx_z_max;
int idx_1_2;
int size_1_1;
int size_1_2;
//int t;
int thd_idx_x;
int thd_idx_y;
int thd_idx_z;
int thdblks_idx_x;
int thdblks_idx_x_max;
int thdblks_idx_y;
int thdblks_idx_y_max;
int thdblks_idx_z;
int thdblks_idx_z_max;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
chunk_idx_x=(c*(threadIdx.x+(blockDim.x*blockIdx.x)));
chunk_idx_x_max=(chunk_idx_x+c);
chunk_idx_y=(threadIdx.y+(tmp*blockDim.y));
chunk_idx_y_max=(chunk_idx_y+1);
chunk_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
chunk_idx_z_max=(chunk_idx_z+1);
thdblks_idx_x=(tbx*(threadIdx.x+(blockDim.x*blockIdx.x)));
thdblks_idx_x_max=(thdblks_idx_x+tbx);
thdblks_idx_y=(tby*(threadIdx.y+(tmp*blockDim.y)));
thdblks_idx_y_max=(thdblks_idx_y+tby);
thdblks_idx_z=(tbz*(threadIdx.z+(idx_1_2*blockDim.z)));
thdblks_idx_z_max=(thdblks_idx_z+tbz);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in thdblks[t=t, s=(tbx, tby, tbz)][0] */
/* Index bounds calculations for iterators in chunk[t=t, s=(c, 1, 1)][0] */
/*
for POINT thd[t=t, s=(1, 1, 1)][0] of size [1, 1, 1] in chunk[t=t, s=(:, :, :)][0] parallel 1 <level 2> schedule default { ... }
*/
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
thd_idx_z=chunk_idx_z;
thd_idx_y=chunk_idx_y;
for (thd_idx_x=chunk_idx_x; thd_idx_x<(chunk_idx_x_max-0); thd_idx_x+=1)
{
/* Index bounds calculations for iterators in thd[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=thd[t=?, s=?][0]][0]=stencil(u[t=t, s=thd[t=?, s=?][0]][0])
*/
/* _idx0 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t)) */
_idx0=(((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t));
u__u_0[(t-1)][_idx0]=0.1;
/* _idx1 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+1) */
_idx1=(_idx0+1);
u__u_0[(t-1)][_idx1]=0.1;
/* _idx2 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+2) */
_idx2=(_idx1+1);
u__u_0[(t-1)][_idx2]=0.1;
/* _idx3 = (((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+3) */
_idx3=(((_idx1-(3*x_max))-(15*t))+2);
u__u_0[(t-1)][_idx3]=0.1;
/* _idx4 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+1)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(5*t))+3) */
_idx4=((_idx3+x_max)+(5*t));
u__u_0[(t-1)][_idx4]=0.1;
/* _idx5 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+2)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(10*t))+3) */
_idx5=((_idx4+x_max)+(5*t));
u__u_0[(t-1)][_idx5]=0.1;
/* _idx6 = (((((((((thd_idx_z*x_max)+((5*t)*thd_idx_z))*y_max)+(((((5*t)*thd_idx_z)+thd_idx_y)+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(15*t))+3) */
_idx6=((((_idx1+(((-3*x_max)-(15*t))*y_max))-((15*t)*x_max))-(75*(t*t)))+2);
u__u_0[(t-1)][_idx6]=0.1;
/* _idx7 = ((((((((((((thd_idx_z+1)*x_max)+((5*t)*thd_idx_z))+(5*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(5*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(25*(t*t)))+(15*t))+3) */
_idx7=(((_idx6+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u__u_0[(t-1)][_idx7]=0.1;
/* _idx8 = ((((((((((((thd_idx_z+2)*x_max)+((5*t)*thd_idx_z))+(10*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(10*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(50*(t*t)))+(15*t))+3) */
_idx8=(((_idx7+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u__u_0[(t-1)][_idx8]=0.1;
/* _idx9 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+3) */
_idx9=(_idx1+2);
u__u_0[(t-1)][_idx9]=0.1;
/* _idx10 = ((((((((((((thd_idx_z+4)*x_max)+((5*t)*thd_idx_z))+(20*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(20*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(100*(t*t)))+(15*t))+3) */
_idx10=(((_idx9+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u__u_0[(t-1)][_idx10]=0.1;
/* _idx11 = ((((((((((((thd_idx_z+5)*x_max)+((5*t)*thd_idx_z))+(25*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(25*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(125*(t*t)))+(15*t))+3) */
_idx11=(((_idx10+((x_max+(5*t))*y_max))+((5*t)*x_max))+(25*(t*t)));
u__u_0[(t-1)][_idx11]=0.1;
/* _idx12 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+4)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(20*t))+3) */
_idx12=((_idx9+x_max)+(5*t));
u__u_0[(t-1)][_idx12]=0.1;
/* _idx13 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+5)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(25*t))+3) */
_idx13=((_idx12+x_max)+(5*t));
u__u_0[(t-1)][_idx13]=0.1;
/* _idx14 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+4) */
_idx14=(_idx1+3);
u__u_0[(t-1)][_idx14]=0.1;
/* _idx15 = ((((((((((((thd_idx_z+3)*x_max)+((5*t)*thd_idx_z))+(15*t))*y_max)+((((((5*t)*thd_idx_z)+thd_idx_y)+(15*t))+3)*x_max))+((25*(t*t))*thd_idx_z))+((5*t)*thd_idx_y))+thd_idx_x)+(75*(t*t)))+(15*t))+5) */
_idx15=(_idx1+4);
u__u_0[(t-1)][_idx15]=0.1;
u__u_0[t][_idx9]=1.1;
}
}
}
}
|
19,751 | #include "cuda_runtime.h"
int main(){
}
|
19,752 | #include <stdio.h>
#define N 64
#define TPB 32
// A scaling function to convert integers 0,1,...,N-1 to evenly spaced floats
__device__ float scale(int i, int n)
{
return ((float)i) / (n - 1);
}
// Compute the distance between 2 points on a line.
__device__ float distance(float x1, float x2)
{
return sqrt((x2 - x1) * (x2 - x1));
}
__global__ void distanceKernel(float* d_out, float ref, int len)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = scale(i, len);
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to %f is %f.\n", i, ref, x, d_out[i]);
}
int main()
{
// Choose a reference value from which distances are measured.
const float ref = 0.5;
// Declare a pointer for an array of floats
float* d_out = 0;
// Allocate device memory for d_out
cudaMalloc(&d_out, N * sizeof(float));
// Launch kernel to compute, NOTE: it is advicable to replace N/TPB with
// (N+TPB-1)/TPB to make sure the number of blocks needed is rounded up.
distanceKernel<<<N/TPB, TPB>>>(d_out, ref, N);
// Wait for device to finish before exiting
cudaDeviceSynchronize();
// Free the memory (Don't forget!!)
cudaFree(d_out);
return 0;
} |
19,753 | #include "includes.h"
__global__ void divideByCSCColSums(const float *values, const int *colPointers, float *pixels, const size_t n)
{
const size_t idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n)
return;
float weight = 0.0f;
for (size_t ridx = colPointers[idx]; ridx < colPointers[idx+1]; ++ridx)
{
weight += values[ridx];
}
pixels[idx] /= weight + 1e-6f;
} |
19,754 | #include "includes.h"
__global__ void sobelEdgeDetectionSharedMem2(int *input, int *output, int width, int height, int thresh) {
int regArr[4][4];
int i = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
int j = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
if ( i > 0 && j > 0 && i < width - 1 && j < height - 1)
{
regArr[0][0] = input[width * (j-1) + i - 1];
regArr[0][1] = input[width * (j-1) + i ];
regArr[0][2] = input[width * (j-1) + i + 1];
regArr[0][3] = input[width * (j-1) + i + 2];
regArr[1][0] = input[width * (j) + i - 1];
regArr[1][1] = input[width * (j) + i ];
regArr[1][2] = input[width * (j) + i + 1];
regArr[1][3] = input[width * (j) + i + 2];
regArr[2][0] = input[width * (j+1) + i - 1];
regArr[2][1] = input[width * (j+1) + i ];
regArr[2][2] = input[width * (j+1) + i + 1];
regArr[2][3] = input[width * (j+1) + i + 2];
regArr[3][0] = input[width * (j+2) + i - 1];
regArr[3][1] = input[width * (j+2) + i ];
regArr[3][2] = input[width * (j+2) + i + 1];
regArr[3][3] = input[width * (j+2) + i + 2];
__syncthreads();
int sum1 = 0, sum2 = 0, magnitude;
int num = 3;
for(int xind = 1; xind < num; xind++)
{
for(int yind = 1; yind < num; yind++)
{
sum1 = regArr[xind+1][yind-1] - regArr[xind-1][yind-1]
+ 2 * regArr[xind+1][yind ] - 2 * regArr[xind-1][yind ]
+ regArr[xind+1][yind+1] - regArr[xind-1][yind+1];
sum2 = regArr[xind-1][yind-1] + 2 * regArr[xind][yind-1] + regArr[xind+1][yind-1]
- regArr[xind-1][yind+1] - 2 * regArr[xind][yind+1] - regArr[xind+1][yind+1];
magnitude = sum1 * sum1 + sum2 * sum2;
if(magnitude > thresh)
output[(j + yind - 1) * width + (i + xind - 1)] = 255;
else
output[(j + yind - 1) * width + (i + xind - 1)] = 0;
}
}
}
} |
19,755 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
__global__
void add_vec(int *a,int *b, int offset,int N)
{
int i = offset + threadIdx.x + blockIdx.x*blockDim.x;
if(i < N){
a[i] = a[i] + b[i];
}
}
template <typename T>
void fill_arr(T *data,T val,int N){
for(int i=0;i<N;++i){
data[i] = val;
}
}
int main(){
int N = 1<<28;
int *a,*b,*d_a,*d_b;
int int_size = N * sizeof(int);
//allocate host memory
a = (int *)malloc(int_size);
fill_arr(a,1,N);
b = (int *)malloc(int_size);
fill_arr(b,2,N);
cudaMalloc((void **)&d_a,int_size);
cudaMalloc((void **)&d_b,int_size);
int threads = 256;
int blocks = 16;
cudaStream_t *i_stream;
int num_stream = 16;
int StreamSize = N/num_stream;
size_t StreamBytes = StreamSize * sizeof(float);
i_stream = (cudaStream_t*) new cudaStream_t[num_stream];
for (int i = 0; i < num_stream; i++){
cudaStreamCreate(&i_stream[i]);
}
for (int i = 0; i < num_stream; i++){
int offset = i * StreamSize;
cudaMemcpyAsync(&d_a[offset],&a[offset],StreamBytes,cudaMemcpyHostToDevice,i_stream[i]);
cudaMemcpyAsync(&d_b[offset],&b[offset],StreamBytes,cudaMemcpyHostToDevice,i_stream[i]);
add_vec<<< threads, blocks, 0, i_stream[i] >>>(d_a,d_b,offset,N);
cudaMemcpyAsync(&a[offset],&d_a[offset],StreamBytes,cudaMemcpyDeviceToHost,i_stream[i]);
}
for (int i = 0; i < num_stream; i++){
int offset = i * StreamSize;
cudaMemcpyAsync(&a[offset],&d_a[offset],StreamBytes,cudaMemcpyDeviceToHost,i_stream[i]);
}
cudaDeviceSynchronize();
for (int i = 0; i < num_stream; i++){
cudaStreamDestroy(i_stream[i]);
}
delete [] i_stream;
free(a); free(b); cudaFree(d_a); cudaFree(d_b);
}
|
19,756 | //#include <data_types/timeseries.hpp>
//#include <data_types/fourierseries.hpp>
//#include <data_types/candidates.hpp>
//#include <data_types/filterbank.hpp>
//#include <transforms/dedisperser.hpp>
//#include <transforms/resampler.hpp>
//#include <transforms/folder.hpp>
//#include <transforms/ffter.hpp>
//#include <transforms/dereddener.hpp>
//#include <transforms/spectrumformer.hpp>
//#include <transforms/birdiezapper.hpp>
//#include <transforms/peakfinder.hpp>
//#include <transforms/distiller.hpp>
//#include <transforms/harmonicfolder.hpp>
//#include <transforms/scorer.hpp>
//#include <utils/exceptions.hpp>
//#include <utils/utils.hpp>
//#include <utils/stats.hpp>
//#include <utils/stopwatch.hpp>
//#include <utils/progress_bar.hpp>
//#include <utils/cmdline.hpp>
//#include <utils/output_stats.hpp>
//#include <string>
//#include <iostream>
//#include <stdio.h>
//#include <unistd.h>
//#include "cuda.h"
//#include "cufft.h"
//#include "pthread.h"
//#include <cmath>
//#include <map>
//
//
//int main4(int argc, char **argv)
//{
// std::map<std::string,Stopwatch> timers;
// timers["reading"] = Stopwatch();
// timers["dedispersion"] = Stopwatch();
// timers["searching"] = Stopwatch();
// timers["folding"] = Stopwatch();
// timers["total"] = Stopwatch();
// timers["total"].start();
//
// CmdLineOptions args;
// if (!read_cmdline_options(args,argc,argv))
// ErrorChecker::throw_error("Failed to parse command line arguments.");
//
// int nthreads = std::min(Utils::gpu_count(),args.max_num_threads);
// nthreads = std::max(1,nthreads);
//
// if (args.verbose)
// std::cout << "Using file: " << args.infilename << std::endl;
// std::string filename(args.infilename);
//
// //Stopwatch timer;
// if (args.progress_bar)
// printf("Reading data from %s\n",args.infilename.c_str());
//
// timers["reading"].start();
// SigprocFilterbank filobj(filename);
// timers["reading"].stop();
//
// if (args.progress_bar){
// printf("Complete (execution time %.2f s)\n",timers["reading"].getTime());
// }
//
// Dedisperser dedisperser(filobj,nthreads);
// if (args.killfilename!=""){
// if (args.verbose)
// std::cout << "Using killfile: " << args.killfilename << std::endl;
// dedisperser.set_killmask(args.killfilename);
// }
//
// if (args.verbose)
// std::cout << "Generating DM list" << std::endl;
// dedisperser.generate_dm_list(args.dm_start,args.dm_end,args.dm_pulse_width,args.dm_tol);
// std::vector<float> dm_list = dedisperser.get_dm_list();
//
// if (args.verbose){
// std::cout << dm_list.size() << " DM trials" << std::endl;
// for (int ii=0;ii<dm_list.size();ii++)
// std::cout << dm_list[ii] << std::endl;
// std::cout << "Executing dedispersion" << std::endl;
// }
//
// if (args.progress_bar)
// printf("Starting dedispersion...\n");
//
// timers["dedispersion"].start();
// PUSH_NVTX_RANGE("Dedisperse",3)
// DispersionTrials<unsigned char> trials = dedisperser.dedisperse();
// POP_NVTX_RANGE
// timers["dedispersion"].stop();
//
// if (args.progress_bar)
// printf("Complete (execution time %.2f s)\n",timers["dedispersion"].getTime());
//
// unsigned int size;
// if (args.size==0)
// size = Utils::prev_power_of_two(filobj.get_nsamps());
// else
// //size = std::min(args.size,filobj.get_nsamps());
// size = args.size;
// if (args.verbose)
// std::cout << "Setting transform length to " << size << " points" << std::endl;
//
// AccelerationPlan acc_plan(args.acc_start, args.acc_end, args.acc_tol,
// args.acc_pulse_width, size, filobj.get_tsamp(),
// filobj.get_cfreq(), filobj.get_foff());
//
//
// //Multithreading commands
// timers["searching"].start();
// std::vector<Worker*> workers(nthreads);
// std::vector<pthread_t> threads(nthreads);
// DMDispenser dispenser(trials);
// if (args.progress_bar)
// dispenser.enable_progress_bar();
//
// for (int ii=0;ii<nthreads;ii++){
// workers[ii] = (new Worker(trials,dispenser,acc_plan,args,size,ii));
// pthread_create(&threads[ii], NULL, launch_worker_thread, (void*) workers[ii]);
// }
//
// DMDistiller dm_still(args.freq_tol,true);
// HarmonicDistiller harm_still(args.freq_tol,args.max_harm,true,false);
// CandidateCollection dm_cands;
// for (int ii=0; ii<nthreads; ii++){
// pthread_join(threads[ii],NULL);
// dm_cands.append(workers[ii]->dm_trial_cands.cands);
// }
// timers["searching"].stop();
//
// if (args.verbose)
// std::cout << "Distilling DMs" << std::endl;
// dm_cands.cands = dm_still.distill(dm_cands.cands);
// dm_cands.cands = harm_still.distill(dm_cands.cands);
//
// CandidateScorer cand_scorer(filobj.get_tsamp(),filobj.get_cfreq(), filobj.get_foff(),
// fabs(filobj.get_foff())*filobj.get_nchans());
// cand_scorer.score_all(dm_cands.cands);
//
// if (args.verbose)
// std::cout << "Setting up time series folder" << std::endl;
//
// MultiFolder folder(dm_cands.cands,trials);
// timers["folding"].start();
// if (args.progress_bar)
// folder.enable_progress_bar();
//
// if (args.npdmp > 0){
// if (args.verbose)
// std::cout << "Folding top "<< args.npdmp <<" cands" << std::endl;
// folder.fold_n(args.npdmp);
// }
// timers["folding"].stop();
//
// if (args.verbose)
// std::cout << "Writing output files" << std::endl;
// //dm_cands.write_candidate_file("./old_cands.txt");
//
// int new_size = std::min(args.limit,(int) dm_cands.cands.size());
// dm_cands.cands.resize(new_size);
//
// CandidateFileWriter cand_files(args.outdir);
// cand_files.write_binary(dm_cands.cands,"candidates.peasoup");
//
// OutputFileWriter stats;
// stats.add_misc_info();
// stats.add_header(filename);
// stats.add_search_parameters(args);
// stats.add_dm_list(dm_list);
//
// std::vector<float> acc_list;
// acc_plan.generate_accel_list(0.0,acc_list);
// stats.add_acc_list(acc_list);
//
// std::vector<int> device_idxs;
// for (int device_idx=0;device_idx<nthreads;device_idx++)
// device_idxs.push_back(device_idx);
// stats.add_gpu_info(device_idxs);
// stats.add_candidates(dm_cands.cands,cand_files.byte_mapping);
// timers["total"].stop();
// stats.add_timing_info(timers);
//
// std::stringstream xml_filepath;
// xml_filepath << args.outdir << "/" << "overview.xml";
// stats.to_file(xml_filepath.str());
//
// return 0;
//}
|
19,757 | #include<stdio.h>
#include<stdlib.h>
#include<cuda.h>
#define N 3
#define BLOCK_DIM 3
__global__ void matrixAdd(int *a,int *b,int *c)
{
int col=blockIdx.x*blockDim.x+threadIdx.x;
int row=blockIdx.y*blockDim.y+threadIdx.y;
int index=col+row*N;
printf("\n%d\t%d",threadIdx.x,threadIdx.y);
printf("\nIndex val:%d\n",index);
if(col<N && row<N)
{
c[index]=a[index]+b[index];
}
}
int main()
{
int x[3][3]={1,2,3,4,5,6,7,8,9};
int y[3][3]={0,1,2,3,4,5,6,7,8};
int z[3][3];
int i=0,j=0;
int *dev_a,*dev_b,*dev_c;
cudaMalloc((void**)&dev_a,sizeof(x));
cudaMalloc((void**)&dev_b,sizeof(y));
cudaMalloc((void**)&dev_c,sizeof(z));
cudaMemcpy(dev_a,x,sizeof(x),cudaMemcpyHostToDevice);
cudaMemcpy(dev_b,y,sizeof(y),cudaMemcpyHostToDevice);
cudaMemcpy(dev_c,z,sizeof(z),cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM );
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));
matrixAdd<<<dimGrid,dimBlock>>>(dev_a,dev_b,dev_c);
cudaMemcpy(z,dev_c,sizeof(z),cudaMemcpyDeviceToHost);
printf("\noutput\n");
for(i=0;i<3;i++)
{
for(j=0;j<3;j++)
{
printf("\n%d",z[i][j]);
}
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
|
19,758 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <inttypes.h>
#include <math.h>
#define gettime(t) clock_gettime(CLOCK_MONOTONIC_RAW, t)
#define get_sub_seconde(t) (1e-9*(double)t.tv_nsec)
/** return time in second
*/
__host__ double get_elapsedtime(void)
{
struct timespec st;
int err = gettime(&st);
if (err !=0) return 0;
return (double)st.tv_sec + get_sub_seconde(st);
}
__host__ void init(double* A, double* B, double* C, int size)
{
int i = 0, j = 0;
srand(2019);
for(i = 0; i < size; i++)
{
for(j = 0; j < size; j++)
{
A[i * size + j] = rand();
B[i * size + j] = rand();
C[i * size + j] = 0.0;
}
}
}
void mult(double* A, double* B, double* C, int size)
{
int i = 0, j = 0, k = 0;
for(i = 0; i < size; i++)
{
for(j = 0; j < size; j++)
{
double sum = 0.;
for(k = 0; k < size; k++)
{
sum += A[i * size + k] * B[k * size + j];
}
C[i * size + j] = sum;
}
}
}
__global__ void kernel(double* A, double* B, double* C, int N)
{
int x, y;
x = blockIdx.x * blockDim.x + threadIdx.x;
y = blockIdx.y * blockDim.y + threadIdx.y;
int id = x * N + y;
C[0] = 9999999;
double sum = 0.;
for (int i = 0; i < N; ++i)
{
sum += A[x * N + i] * B[i * N + y];
}
C[id] = sum;
}
int main(int argc, char** argv){
int N = 0;
double *A_h = NULL;
double *B_h = NULL;
double *C_h = NULL;
//double t0 = 0., t1 = 0., duration = 0.;
N = (argc < 2)?1000:atoi(argv[1]);
fprintf(stdout, "Matrix Multiplication\n Size: %dx%d\n", N, N);
dim3 DimBlock(32, 32, 1);
dim3 DimGrid(ceil(N/32.), ceil(N/32.), 1);
// Memory allocation
A_h = (double*) malloc(sizeof(double) * N * N);
B_h = (double*) malloc(sizeof(double) * N * N);
C_h = (double*) malloc(sizeof(double) * N * N);
// Value initialization
init(A_h, B_h, C_h, N);
//allocation des vecteurs sur le GPU avec gestion d'erreur : abort
cudaError_t error;
double *A_d = NULL;
double *B_d = NULL;
double *C_d = NULL;
error = cudaMalloc((void**)&A_d, N*N*sizeof(double));
if(error != cudaSuccess)
abort();
error = cudaMalloc((void**)&B_d, N*N*sizeof(double));
if(error != cudaSuccess)
abort();
error = cudaMalloc((void**)&C_d, N*N*sizeof(double));
if(error != cudaSuccess)
abort();
//transfert des données de host a device (avec gestion d'erreur, bien sur)
error = cudaMemcpy(A_d, A_h, N*N*sizeof(double), cudaMemcpyHostToDevice);
if(error != cudaSuccess)
abort();
error = cudaMemcpy(B_d, B_h, N*N*sizeof(double), cudaMemcpyHostToDevice);
if(error != cudaSuccess)
abort();
error = cudaMemcpy(C_d, C_h, N*N*sizeof(double), cudaMemcpyHostToDevice);
if(error != cudaSuccess)
abort();
for(int i=0; i<N; i++)
{
printf("%lf ", C_h[i]);
}
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
printf("%d %d %d\n%d %d %d", DimGrid.x, DimGrid.y, DimGrid.z, DimBlock.x, DimBlock.y, DimBlock.z);
kernel<<<DimGrid, DimBlock>>>(A_d, B_d, C_d, N);
cudaMemcpy(C_h, C_d, N*N*sizeof(double), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf("Time to generate: %3.1f ms \n", time);
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
for(int i = 0;i < N; i++)
{
printf("%lf ", C_h[i]);
}
// Compute multiplication
//t0 = get_elapsedtime();
//mult(A_h, B_h, C_h, N);
//t1 = get_elapsedtime();
// Pretty print
//duration = (t1 - t0);
//uint64_t nb_op = N * N * N;
//fprintf(stdout, "Performance results: \n");
//fprintf(stdout, " Time: %lf s\n", duration);
//fprintf(stdout, " MFlops: %.2f\n", (nb_op / duration)*1E-6);
return 0;
}
|
19,759 | #include "includes.h"
__global__ void differenceImg_gpu()
{
} |
19,760 | #include <stdio.h>
__global__
void kernel0(void) {
printf("kernel0\n");
}
int main() {
kernel0 <<<1,1>>> ();
return 0;
}
|
19,761 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <math.h>
#include <cuda.h>
// function to integrate
#define F(x) (x*x)
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), __FILE__,__LINE__); exit(-1);}
const long blocks_per_grid = 64;
const long threads_per_block = 128; // Must be a power of 2 for reduction
__device__ double rn(unsigned long * seed)
{
double ret;
unsigned long n1;
unsigned long a = 16807;
unsigned long m = 2147483647;
n1 = ( a * (*seed) ) % m;
*seed = n1;
ret = (double) n1 / m;
return ret;
}
__global__ void lookup(double *F_vals, long F_len, double interval,
long total_lookups, double *sums) {
// A per-block cache. Each thread i writes to sum_cache[i]
__shared__ double sum_cache[threads_per_block];
long i,j,k;
double x, f;
unsigned long seed;
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
int cache_id = threadIdx.x;
seed = 10000*threadIdx.x + 10* blockIdx.x + threadIdx.x;
for (i=thread_id; i < total_lookups; i += gridDim.x*blockDim.x) {
// Randomly sample a continous value for x
x = (double) rn(&seed);
// Find the indices that bound x on the grid
j = x / interval;
k = j+1;
// Calculate interpolation factor
f = (k*interval - x) / (k*interval - j*interval);
// Interpolate and accumulate result
sum_cache[cache_id] += F_vals[j+1] - f * (F_vals[j+1] - F_vals[j]);
}
__syncthreads();
// Naive reduction
for (i=blockDim.x/2; i != 0; i /= 2) {
if (cache_id < i)
sum_cache[cache_id] += sum_cache[cache_id + i];
__syncthreads();
}
if (cache_id == 0)
sums[blockIdx.x] = sum_cache[0];
}
int main(int argc, char* argv[]) {
// number of lookups
long n_lookups = (argc < 2) ? 10000000 : atol(argv[1]);
// number of gridpoints
long F_len = (argc < 3) ? 250000000 : atol(argv[2]);
// Discrete values for F(x)
double *F_vals, *dev_F_vals;
// interval for linearly-spaced grid
double interval = (double) 1 / (F_len - 1);
// Sum of random lookups on F_vals
double sum = 0;
// Vectors for sums of F(x_i). Dimensions will be sums[0:blocks_per_grid].
// Each block j will reduce is results to sum[i].
double *sums, *dev_sums;
// Timing
cudaEvent_t start, stop;
float elapsed_time;
// Loop control
long i;
printf("Running %0.2e lookups with %0.2e gridpoints in a %0.2f MB array...\n",
(double) n_lookups, (double) F_len, (double) F_len*sizeof(double)/1e6);
CUDA_CALL( cudaEventCreate( &start ) );
CUDA_CALL( cudaEventCreate( &stop ) );
sums = (double *) malloc( blocks_per_grid*sizeof(double) );
F_vals = (double *) malloc(F_len*sizeof(double));
// Populate values for F(x) on grid
for (i=0; i<F_len; i++) {
F_vals[i] = F(i*interval);
}
// Malloc and set dev_sums
CUDA_CALL( cudaMalloc( (void**)&dev_sums, blocks_per_grid*sizeof(double) ) );
CUDA_CALL( cudaMemset( (void*) dev_sums, 0, blocks_per_grid*sizeof(double) ) );
// Malloc and copyto dev_F_vals
CUDA_CALL( cudaMalloc( (void**)&dev_F_vals, F_len*sizeof(double)) );
CUDA_CALL( cudaMemcpy( dev_F_vals, F_vals, F_len*sizeof(double), cudaMemcpyHostToDevice ) );
CUDA_CALL( cudaEventRecord( start, 0 ) );
lookup<<<blocks_per_grid,threads_per_block>>>(dev_F_vals, F_len, interval, n_lookups, dev_sums);
CUDA_CALL( cudaEventRecord( stop, 0 ) );
CUDA_CALL( cudaEventSynchronize( stop ) );
// Copy dev_sums to sums
CUDA_CALL( cudaMemcpy( sums, dev_sums, blocks_per_grid*sizeof(double), cudaMemcpyDeviceToHost ));
// Get cumulative sum
for (i=0; i<blocks_per_grid; i++) {
sum += sums[i];
}
// Get timings
CUDA_CALL( cudaEventElapsedTime( &elapsed_time, start, stop ) );
printf("Result: %0.6f\n", sum / n_lookups);
printf("Time: %0.2e s\n", elapsed_time);
printf("Rate: %0.2e lookups/s\n", n_lookups / elapsed_time);
// Cleanup
CUDA_CALL( cudaEventDestroy( start ) );
CUDA_CALL( cudaEventDestroy( stop ) );
CUDA_CALL( cudaFree( dev_F_vals ) );
CUDA_CALL( cudaFree( dev_sums ) );
free(F_vals);
free(sums);
return 0;
}
|
19,762 | #include <stdio.h>
#include <cuda.h>
__device__ int sumg = 0;
__global__ void K(int num) {
num += num;
++num;
atomicAdd(&sumg, num);
__shared__ int sum;
sum = 0;
__syncthreads();
sum += num;
}
int main() {
for (unsigned ii = 0; ii < 100; ++ii) {
K<<<5, 32>>>(ii);
cudaDeviceSynchronize();
}
return 0;
}
|
19,763 | #include <stdio.h>
#include <stdlib.h>
/**
* =============== Comparação entre os tempos de execução: ===============
* Sequencial: 1m18.116s
* Paralelo: 0m25.027s
* Paralelo (GPU - OpenMP): 0m15.604s
* Paralelo (GPU - CUDA): 0m1.534s
*
* =============== Métricas relacionas as versões em GPU ===============
* OpenMP:
* warps_launched: 535592
* warp_execution_efficiency: 98.95%
*
* CUDA:
* warps_launched: 127008
* warp_execution_efficiency: 99.21%
*/
__global__ void mm(double* a, double* b, double* c, int width)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (i < width && j < width) {
double sum = 0;
for (int k = 0; k < width; k++) {
double x = a[i * width + k];
double y = b[k * width + j];
sum += x * y;
}
c[i * width + j] = sum;
}
}
int main()
{
int width = 2000;
double *a = (double*) malloc (width * width * sizeof(double));
double *b = (double*) malloc (width * width * sizeof(double));
double *c = (double*) malloc (width * width * sizeof(double));
for(int i = 0; i < width; i++) {
for(int j = 0; j < width; j++) {
a[i * width + j] = i;
b[i * width + j] = j;
c[i * width + j] = 0;
}
}
int size = width * width * sizeof(double);
double *cuda_a, *cuda_b, *cuda_c;
cudaMalloc((void **) &cuda_a, size);
cudaMemcpy(cuda_a, a, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &cuda_b, size);
cudaMemcpy(cuda_b, b, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &cuda_c, size);
int block_size = 32;
int dim = (width - 1) / block_size + 1;
dim3 dimGrid(dim, dim, 1);
dim3 dimBlock(block_size, block_size, 1);
mm<<<dimGrid, dimBlock>>>(cuda_a, cuda_b, cuda_c, width);
cudaMemcpy(c, cuda_c, size, cudaMemcpyDeviceToHost);
// for(int i = 0; i < width; i++) {
// for(int j = 0; j < width; j++) {
// printf("\n c[%d][%d] = %f", i, j, c[i * width + j]);
// }
// }
cudaFree(cuda_a);
cudaFree(cuda_b);
cudaFree(cuda_c);
}
|
19,764 | #include "includes.h"
__global__ void exclusive_scan(unsigned int *in,unsigned int *out, int n)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
out[i] -= in[i];
}
} |
19,765 | __global__ void
mat_dot(float *a, float *b, float *c,
int a_rows, int a_columns, int b_rows, int b_columns)
{
const int i = blockDim.y * blockIdx.y + threadIdx.y,
j = blockDim.x * blockIdx.x + threadIdx.x;
if (i < a_rows && j < b_columns)
{
float c_at_ij = 0;
for (int k = 0; k < a_columns; k++)
c_at_ij += a[i * a_columns + k] * b[k * b_columns + j];
c[i * b_columns + j] = c_at_ij;
}
}
|
19,766 | #include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime_api.h>
#define BASE_TYPE float
__global__ void mult(const BASE_TYPE *a, const BASE_TYPE *b, BASE_TYPE *c, const int N, const int M)
{
int i = N * (blockDim.y * blockIdx.y + threadIdx.y);
int j = blockDim.x * blockIdx.x + threadIdx.x;
BASE_TYPE sum = 0;
for (int k = 0; k < N; k++)
{
sum += a[i + k] * b[k * M + j];
}
int id = M * (blockDim.y * blockIdx.y + threadIdx.y) + blockDim.x * blockIdx.x + threadIdx.x;
c[id] = sum;
}
BASE_TYPE* gen_array(const int N)
{
BASE_TYPE *a = new BASE_TYPE[N * N];
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
a[i * N + j] = i * N + j;
}
return a;
}
void print_array(BASE_TYPE *a, const int N)
{
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
printf("%5.0f ", a[i *N + j]);
printf("\n");
}
printf("\n");
}
void cuda_init_array(BASE_TYPE **dev, const BASE_TYPE *host, const size_t size)
{
cudaError_t err;
err = cudaMalloc((void **)dev, size);
if (err != cudaSuccess)
throw err;
if (host != NULL)
{
err = cudaMemcpy(*dev, host, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
throw err;
}
}
void cuda_init_grid_and_block(dim3 *grid, dim3 *block, const int N)
{
*grid = dim3(1);
*block = dim3(N, N, 1);
printf("Block %d %d %d\n", block->x, block->y, block->z);
printf("Grid %d %d %d\n", grid->x, grid->y, grid->z);
}
int main()
{
const int N = 10;
const size_t size = N * N * sizeof(BASE_TYPE);
cudaError_t err;
dim3 threadsPerBlock, blocksPerGrid;
cuda_init_grid_and_block(&blocksPerGrid, &threadsPerBlock, N);
BASE_TYPE *host_a = gen_array(N), *host_b = gen_array(N);
BASE_TYPE *dev_a, *dev_b, *dev_c;
if (host_a == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
print_array(host_a, N);
print_array(host_b, N);
try
{
cuda_init_array(&dev_a, host_a, size);
cuda_init_array(&dev_b, host_b, size);
cuda_init_array(&dev_c, NULL, size);
}
catch (cudaError_t err)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
mult<<<blocksPerGrid, threadsPerBlock>>>(dev_a, dev_b, dev_c, N, N);
err = cudaMemcpy(host_a, dev_c, size, cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device (error code: %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
print_array(host_a, N);
cudaFree(dev_a);
cudaFree(dev_b);
delete[] host_a;
return 0;
} |
19,767 | # include <stdlib.h>
# include <cuda.h>
#include<stdio.h>
const int N = 1024;
__global__ void f(long long int *dev_a) {
unsigned int tid = threadIdx.x;
long long int temp = dev_a[(tid+1)%N];
__syncthreads();
dev_a[tid] = temp;
}
int main(void) {
long long int host_a[N];
long long int *dev_a;
cudaMalloc((void**)&dev_a, N * sizeof(long long int));
for(int i = 0 ; i < N ; i++) {
host_a[i] = i;
}
cudaMemcpy(dev_a, host_a, N * sizeof(long long int), cudaMemcpyHostToDevice);
f<<<1, N>>>(dev_a);
cudaMemcpy(host_a, dev_a, N * sizeof(long long int), cudaMemcpyDeviceToHost);
for(int i = 0 ; i < N ; i++) {
printf("%d ", host_a[i]);
}
printf("\n");
}
|
19,768 | /* Daniel Parker
* University of Reading
* 215 Parallel algorithms for Bioinformatics
*
* random.cu - generate some random strings for testing
*/
#include <stdlib.h>
#include <time.h>
void generate(char string[], int len) {
int i;
for (i = 0; i < len; i++) {
string[i] = 'a' + (rand() % 4 + 1);
}
}
|
19,769 | // nnCount: B*M
// nnDist: B*M*nnSample
// Weight: B*M*nnSample
__global__ void cal_weight(int B, int M, int nnSample, int weightType, float radius,
const int* nnCount, const float* nnDist, float* Weight)
{
// get the neighbor indices
for(int i=blockIdx.x;i<B;i+=gridDim.x)
{
for(int j=threadIdx.x;j<M;j+=blockDim.x)
{
int K = nnCount[i*M+j];
for(int k=0;k<K;k++)
{
float dist = max(nnDist[i*M*nnSample+j*nnSample+k],1e-15);
if (weightType==0)
{
Weight[i*M*nnSample+j*nnSample+k] = float(1)/dist; // inverse sqrt distance
}
else
{
Weight[i*M*nnSample+j*nnSample+k] = max(0.0, 1 - dist/radius); // bilinear like
}
}
}
}
}
void buildSampleWeightLauncher(int B, int M, int nnSample, int weightType, float radius,
const int* nnCount, const float* nnDist, float* Weight)
{
cal_weight<<<B,1024>>>(B, M, nnSample, weightType, radius, nnCount, nnDist, Weight);
}
|
19,770 | #include "includes.h"
__global__ void combineSourceAndBackground ( const int nwl, const int n, const float scale, float *src, const float *bkg ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
if ( i < n && j < nwl ) {
src[i+j*n] = src[i+j*n] + scale * bkg[i+j*n];
}
} |
19,771 | #include <cuda.h>
#include <stdio.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <string>
#include <vector>
//#define BLOCK_WIDTH 512
__global__ void printMatrix(float **d_matrix, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < size && i >= 0) {
if (j < size + 1 && j >=0) {
printf("i is %d, j is %d, %f \n", i, j, d_matrix[i][j]);
}
}
}
__global__ void changeFirstElementToOne(float **d_matrix, int pivot, int size, int firstElement) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i == pivot) {
if (j >= pivot && j < size+1) {
d_matrix[i][j] = d_matrix[i][j] / firstElement;
}
}
}
__global__ void eliminationKernel(float **d_matrix, int pivot, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i > pivot && i < size) {
if (j > pivot && j < size+1) {
d_matrix[i][j] = d_matrix[i][j] - d_matrix[i][pivot] * d_matrix[pivot][j];
}
}
}
__global__ void setPivotColumnToZero(float **d_matrix, int pivot, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i > pivot && i < size) {
if (j == pivot) {
d_matrix[i][j] = 0.0;
}
}
}
__global__ void backSubstitution(float **d_matrix, int subLine, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < subLine && i >= 0) {
if (j == size) {
d_matrix[i][j] = d_matrix[i][j] - d_matrix[i][subLine] * d_matrix[subLine][size];
}
}
}
__global__ void setSubColToZero(float **d_matrix, int subLine, int size) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < subLine && i >= 0) {
if (j == subLine) {
d_matrix[i][j] = 0.0;
}
}
}
__global__ void writeToDResult(float **d_matrix, int size, float *d_result) {
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
int j = (blockIdx.y * blockDim.y) + threadIdx.y;
if (i < size && i >= 0) {
if (j == size) {
d_result[i] = d_matrix[i][j];
}
}
}
int main(void) {
// read in data
std::vector<int> int_list;
std::string line_;
std::ifstream file_("test100.txt");
if (!file_) {
std::cout << "Cannot open file.\n";
return 0;
}
int size; // size of the matrix and vectors
file_ >> size;
float **matrix; // matrix of the linear system
matrix = new float*[size];
for (int i = 0; i < size; i++) {
matrix[i] = new float[size+1];
}
for (int i = 0; i < size; i++) {
for (int j = 0; j < size + 1; j++) {
file_ >> matrix[i][j];
}
}
// initialize variable
float * result, * d_result; // result vector
float **d_matrix;
float **d_matrix_h;
d_matrix_h = (float**)malloc(size * sizeof(float *));
// alloc space for device copies of a
cudaMalloc((void **) &d_result, size * sizeof(float));
cudaMalloc((void **) &d_matrix, size * sizeof(float*));
// alloc space for host copies of result
result = (float *)malloc(size * sizeof(float));
// copy from host to device
for (int i = 0; i < size; i++) {
cudaMalloc((void**)&(d_matrix_h[i]), (size+1) * sizeof(float));
cudaMemcpy(d_matrix_h[i], matrix[i], (size+1) * sizeof(float), cudaMemcpyHostToDevice);
}
cudaMemcpy(d_matrix, d_matrix_h, size * sizeof(float *), cudaMemcpyHostToDevice);
dim3 threadsPerBlock(8, 8);
dim3 numBlocks((size - 1 + threadsPerBlock.x)/threadsPerBlock.x, (size + 1 - 1 + threadsPerBlock.y)/threadsPerBlock.y);
struct timespec cudalustart = {0,0}; //time of constructing GF
struct timespec cudaluend = {0,0};
clock_gettime(CLOCK_REALTIME,&cudalustart);
// gaussian elimination
for (int i = 0; i < size; i++) { // i is the pivot line here.
// change first element of the pivot line to 1
float firstElement;
cudaMemcpy(&firstElement, &d_matrix_h[i][i], sizeof(float), cudaMemcpyDeviceToHost);
// std::cout << firstElement << std::endl;
changeFirstElementToOne<<<numBlocks, threadsPerBlock>>>(d_matrix, i, size, firstElement);
// the line under pivot line minus pivot line
eliminationKernel<<<numBlocks, threadsPerBlock>>>(d_matrix, i, size);
setPivotColumnToZero<<<numBlocks, threadsPerBlock>>>(d_matrix, i, size);
}
// back substitution
for (int i = size - 1; i > 0; i--) { // form the last line to first line
// current line is i. every line i 's "b"
backSubstitution<<<numBlocks, threadsPerBlock>>>(d_matrix, i, size);
setSubColToZero<<<numBlocks, threadsPerBlock>>>(d_matrix, i, size);
}
// write result from d_matrix to d_result
writeToDResult<<<numBlocks, threadsPerBlock>>>(d_matrix, size, d_result);
// copy result back to host
cudaMemcpy(result, d_result, size * sizeof(float), cudaMemcpyDeviceToHost);
clock_gettime(CLOCK_REALTIME,&cudaluend);
std::cout<<"The time is "<<(cudaluend.tv_sec-cudalustart.tv_sec)*1000+(cudaluend.tv_nsec-cudalustart.tv_nsec)/1000000<<"ms\n";
/* // print the result
for (int x = 0; x < size; x++) {
std::cout << result[x] << std::endl;
}
*/
// clean up
free(matrix); free(result); free(d_matrix_h);
cudaFree(d_matrix); cudaFree(d_result);
return 0;
}
|
19,772 | #include <stdio.h>
#include <stdlib.h>
#define DEBUG
__global__ void convol2D (float *a, float *h, float *c, int a_rows, int a_cols, int h_rows, int h_cols)
{
//Calculating indices along x and y directions
int index_x = blockIdx.x * blockDim.x + threadIdx.x;
int index_y = blockIdx.y * blockDim.y + threadIdx.y;
//Calculating Grid width in terms of number of threads
int grid_width = gridDim.x * blockDim.x;
int sum = 0;
for(int i = 0; i<h_rows; i++)
{
for(int j = 0; j<h_cols;j++)
{
if((index_y-i)>=0 & (index_y-i)<=(a_rows-1) & (index_x-j)>=0 & (index_x-j)<=(a_cols-1))
sum = sum + (a[(index_y-i)*a_cols+(index_x-j)] * h[i*h_cols + j]);
}
}
c[index_y*grid_width + index_x] = sum;
}
int main (int argc, char *argv[])
{
if(argc != 2)
{
fprintf(stderr,"Incorrect arguments passed.\nUse \"./2dconvol.o <input_file>\"\n");
exit(1);
}
FILE *f;
int a_cols = 1;
int h_cols = 1;
int a_rows = 0;
int h_rows = 0;
int c_cols = 0;
int c_rows = 0;
float *a_h = 0;
float *a_d = 0;
float *hinv_h = 0;
float *hinv_d = 0;
float *c_h = 0;
float *c_d = 0;
size_t a_size = 0;
size_t h_size = 0;
size_t c_size = 0;
dim3 block_size;
dim3 grid_size;
int i=0,j=0;
char junk,junk_old;
//Opening File
f = fopen(argv[1],"r");
//First pass to find out size of the matrices
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
a_rows++;
}
else if(junk == 0x20 & a_rows == 0)
{
a_cols++;
}
junk_old = junk;
junk = fgetc(f);
if(junk == '\n' & junk == junk_old)
{
break;
}
}
junk = fgetc(f);
while (junk != EOF)
{
if(junk == '\n')
{
h_rows++;
}
else if(junk == 0x20 & h_rows == 0)
{
h_cols++;
}
junk = fgetc(f);
}
//Calculating op dimensions
c_rows = a_rows + h_rows - 1;
block_size.y = c_rows > 32 ? 32 : c_rows;
c_cols = a_cols + h_cols - 1;
block_size.x = c_cols > 16 ? 16 : c_cols;
grid_size.y = (c_rows/32)+1;
grid_size.x = (c_cols/16)+1;
#ifdef DEBUG
printf("Size of A: %dx%d\n",a_rows,a_cols);
printf("Size of H: %dx%d\n",h_rows,h_cols);
printf("Size of C: %dx%d\n",c_rows,c_cols);
printf("Size of grid: %dx%d\n",grid_size.y,grid_size.x);
printf("Size of block: %dx%d\n",block_size.y,block_size.x);
#endif
//Calculating the sizes of all the involved matrices
a_size = a_rows * a_cols *sizeof(float);
h_size = h_rows * h_cols *sizeof(float);
c_size = c_rows * c_cols *sizeof(float);
//Allocating memory on host
a_h = (float *) malloc(a_size);
hinv_h = (float *) malloc(h_size);
c_h = (float *) malloc(c_size);
//Rewinding file to read the actual data
rewind(f);
//Reading all the data matrices
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
fscanf(f,"%f",&a_h[i*a_cols + j]);
}
for(i = 0 ; i<h_rows;i++)
{
for (j = 0; j<h_cols ;j++)
{
fscanf(f,"%f",&hinv_h[i*h_cols + j]);
}
}
#ifdef DEBUG
for(i = 0;i<a_rows;i++)
{
for (j = 0; j<a_cols;j++)
printf("%f ",a_h[i*a_cols + j]);
printf("\n");
}
for(i = 0;i<h_rows;i++)
{
for (j = 0; j<h_cols;j++)
{
printf("%f ",hinv_h[i*h_cols + j]);
}
printf("\n");
}
printf("Completed Loading Matrices...\n");
#endif
//cudaMalloc to allocate required matrices on the device
cudaMalloc((void **)&a_d,a_size);
cudaMalloc((void **)&hinv_d,h_size);
cudaMalloc((void **)&c_d,c_size);
//Copying input data from the Host to Device
cudaMemcpy(a_d,a_h,a_size,cudaMemcpyHostToDevice);
cudaMemcpy(hinv_d,hinv_h,h_size,cudaMemcpyHostToDevice);
//Setting Op matrix to all zeros
cudaMemset(c_d,0,c_size);
//Convolution function
convol2D<<<grid_size,block_size>>>(a_d,hinv_d,c_d,a_rows,a_cols,h_rows,h_cols);
//Synchronize to wait for the kernel to complete exectution
cudaThreadSynchronize();
//Copy the output matrix from the Device to host
cudaMemcpy(c_h,c_d,c_size,cudaMemcpyDeviceToHost);
//Print Output
for(i=0;i<c_rows;i++)
{
for(j=0;j<c_cols;j++)
{
printf("%f ",c_h[i*c_cols + j]);
}
printf("\n");
}
//Freeing all the allocated memory from the device
cudaFree(a_d);
cudaFree(hinv_d);
cudaFree(c_d);
//Freeing all the allocated memory from the host
free(a_h);
free(hinv_h);
free(c_h);
fclose(f);
return 0;
}
|
19,773 | /* CPU Based Wallsolver
nvcc wallsolverCPU.cu -o testCPU
*/
#include <stdio.h>
#include <stdbool.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define SPACE_LENGTH 5 // Spaces Size of rows / columns
#define SPACE_WIDTH 5
#define NUM_SPACES 25
#define WALL_LENGTH 4 // Walls size of rows/colums
#define WALL_WIDTH 4
#define NUM_WALLS 16
#define POSSIBLE_DIRECTIONS 4 // Possible directions for traversing/finding neighbors
typedef enum wall {
UP, DOWN, LEFT, RIGHT
} wall;
typedef enum status {
UNEXPLORED, VISITED, EXPLORED
} status;
typedef struct space {
bool up, down, left, right, start, finish;
int parent;
int distance;
status state;
} space;
typedef struct nextSpace {
int index;
int distance;
} nextSpace;
typedef struct nextMove {
int space;
int playerScore;
int oppScore;
int wallIdx;
wall newDir;
} nextMove;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time() {
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if(usec_diff < 0) {
sec_diff --;
usec_diff += 1000000;
}
printf("Running time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
bool checkWallCollisions(wall *walls, int idx) {
/* Make sure no walls overlap
For each wall, identify the neighboring walls if they exist
determine if a neighbor caused a conflict
Return TRUE if there is a collision
*/
int i = idx / WALL_LENGTH;
int j = idx % WALL_WIDTH;
bool colUp = false;
bool colDown = false;
bool colLeft = false;
bool colRight = false;
wall up, down, left, right;
if (j < 4) {
right = walls[idx + 1];
colRight = (walls[idx] == RIGHT) && (right == LEFT);
}
if (j > 0) {
left = walls[idx - 1];
colLeft = (walls[idx] == LEFT) && (left == RIGHT);
}
if (i < 4) {
down = walls[idx + WALL_WIDTH];
colDown = (walls[idx] == DOWN) && (down == UP);
}
if (i > 0) {
up = walls[idx - WALL_LENGTH];
colUp = (walls[idx] == UP) && (up == DOWN);
}
// Returns true if there is a collision
return (colUp || colDown || colLeft || colRight);
}
void generateBoard(space *board, wall *walls) {
/* Generate the board
For each wall, identify the board spaces that it effects
Determine the effect of each affected space's mobility
*/
int numSpaces = SPACE_LENGTH * SPACE_WIDTH;
for (int i = 0; i < WALL_WIDTH; i++) {
for (int j = 0; j < WALL_LENGTH; j++) {
int idx = (i * WALL_LENGTH) + j;
//printf("Maze Generated: %d - %d\n", idx, walls[idx]);
// Determine the 4 adjacent spaces to this wall
int TL = idx + i;
int TR = TL +1;
int BL = TL + SPACE_LENGTH;
int BR = BL +1;
if (board[TL].right) board[TL].right = (walls[idx] != UP);
if (board[TL].down) board[TL].down = (walls[idx] != LEFT);
if (board[TR].left) board[TR].left = board[TL].right;
if (board[TR].down) board[TR].down = (walls[idx] != RIGHT);
if (board[BL].right) board[BL].right = (walls[idx] != DOWN);
if (board[BL].up) board[BL].up = board[TL].down;
if (board[BR].left) board[BR].left = board[BL].right;
if (board[BR].up) board[BR].up = board[TR].down;
}
}
board[0].start = true;
board[numSpaces - 1].finish = true;
}
void boardInit(space *board) {
printf("Initialize Board\n");
// Initialize the board, blank
for (int i = 0; i < SPACE_LENGTH; i++) {
for (int j = 0; j < SPACE_WIDTH; j++) {
int idx = (i * SPACE_WIDTH) + j;
//board[idx] = blankSpace;
/*
if (i == 0) board[idx].up = false;
if (j == 0) board[idx].left = false;
if (i == (SPACE_WIDTH - 1)) board[idx].down = false;
if (j == (SPACE_LENGTH - 1)) board[idx].right = false;
*/
// Better to avoid divergence
board[idx].up = (i != 0);
board[idx].left = (j != 0);
board[idx].down = (i != (SPACE_WIDTH - 1));
board[idx].right = (j != (SPACE_LENGTH - 1));
board[idx].start = false;
board[idx].finish = false;
board[idx].parent = -1;
board[idx].distance = 0;
board[idx].state = UNEXPLORED;
}
}
}
void generateWalls(wall *walls) {
/* Randomly generate the walls for the board
*/
srand(1024);
for (int i = 0; i < WALL_WIDTH; i++) {
for (int j = 0; j < WALL_LENGTH; j++) {
int idx = (i * WALL_LENGTH) + j; // == walls[i][j];
walls[idx] = (wall)(rand() % 4);
printf("IDX %d - %d\n", idx, walls[idx]);
}
}
// Check for any wall collisions and re-randomize if necessary
for (int i = 0; i < WALL_LENGTH; i++) {
for (int j = 0; j < WALL_WIDTH; j++) {
int idx = (i * WALL_WIDTH) + j;
while (checkWallCollisions(walls, idx)) {
printf("IDX No Overlap: %d - %d\n", idx, walls[idx]);
walls[idx] = (wall)(rand() % 4);
}
}
}
}
//output board configuration by numbers
void outputBoard(space *in) {
for (int i = 0; i < SPACE_WIDTH; i++) {
for (int j = 0; j < SPACE_LENGTH; j++) {
int idx = (i * SPACE_WIDTH) + j; // == board[i][j];
printf("outputBoard: Space #: %d, UP: %d, DOWN: %d, LEFT: %d, RIGHT: %d \n", idx, in[idx].up, in[idx].down, in[idx].left, in[idx].right);
}
}
}
//display board configuration visually
void displayBoard(space *in)
{
int bWidth = 1+2*SPACE_WIDTH;
int bLength = 1+2*SPACE_LENGTH;
char boardArray[bLength][bWidth];
printf("\n");
printf("Display Board:\n");
printf("(s = starting position; e = end goal)\n");
//initialize boardArray to blank spaces
for (int i=0; i<bWidth; i++)
{
for (int j=0;j<bLength;j++)
{
boardArray[i][j] = ' ';
}
}
//populate boardArray
for (int i=0; i<SPACE_WIDTH; i++)
{
for (int j=0; j< SPACE_LENGTH; j++)
{
int idx = (i*SPACE_WIDTH)+j;
if (in[idx].up==1)
{
boardArray[2*i][2*j+1] = '*';
boardArray[2*i][2*j+2] = '*';
}
else
boardArray[2*i][2*j+1] = ' ';
if (in[idx].right == 1)
{
boardArray[2*i+1][2*j+2] = '*';
boardArray[2*i+2][2*j+2] = '*';
boardArray[2*i][2*j+2] = '*';
}
else
boardArray[2*i+1][2*j+2] = ' ';
}
}
//set board vertical borders
for (int i = 0; i<bWidth; i++)
{
boardArray[i][0] = '*';
boardArray[i][bWidth-1] = '*';
}
//set board horizontal borders
for (int i = 0; i< bWidth; i++)
{
for (int j=0; j<bLength; j++)
{
if (i == 0) //if it's the first row
boardArray[i][j] = '*'; //set border
if (i == bWidth-1) //if it's the last row
boardArray[i][j] = '*'; //set border
}
}
//set start and end points
boardArray[1][1] = 's';
boardArray[bLength-2][bWidth-2] = 'e';
//print boardArray
for (int i=0; i<bWidth; i++)
{
for (int j = 0; j<bLength; j++)
{
printf("%c", boardArray[i][j]);
}
printf("\n");
}
printf("\n");
}
void outputResults(nextMove *results, int numResults) {
/* Output the best valued move for each space
*/
printf("----- RESULTS -----\n");
for (int i = 0; i < numResults; i++) {
printf("Best Move for Space %d\n", results[i].space);
printf("Move wall %d to direction %d\n", results[i].wallIdx, results[i].newDir);
printf("Player Score: %d, Opponent Score: %d\n\n", results[i].playerScore, results[i].oppScore);
}
}
/* Used strictly for debugging. This should not be used in the final version */
void printAdjList(int adjList[][POSSIBLE_DIRECTIONS]) {
int i = 0;
int numSpaces = SPACE_LENGTH * SPACE_WIDTH;
for (i = 0; i < numSpaces; ++i) {
printf("Space #%d's neighbors: UP: %d, DOWN: %d, LEFT: %d, RIGHT: %d \n", i,
adjList[i][0], adjList[i][1], adjList[i][2], adjList[i][3]);
}
}
/* Set all neighbors to -1, then cycle through and add neighbors for each space
All spaces marked with -1 afterwards means the neighbor is invalid and can be ignored
*/
void initializeAdjList(int adjList[][POSSIBLE_DIRECTIONS]) {
int i = 0;
int numSpaces = SPACE_LENGTH * SPACE_WIDTH;
for (i = 0; i < numSpaces; ++i) {
int j;
for (j = 0; j < POSSIBLE_DIRECTIONS; ++j) {
adjList[i][j] = -1;
}
}
for (i = 0; i < numSpaces; ++i) {
// Add up neighbor to list
if (i >= SPACE_WIDTH)
adjList[i][0] = i - SPACE_LENGTH;
// Add down neighbor to list
if (i < (numSpaces - SPACE_WIDTH))
adjList[i][1] = i + SPACE_LENGTH;
// Add left neighbor to list
if (i % SPACE_WIDTH != 0)
adjList[i][2] = i - 1;
// Add right neighbor to list
if (i % SPACE_WIDTH != (SPACE_WIDTH - 1))
adjList[i][3] = i + 1;
}
// printAdjList(adjList);
}
nextSpace findMinimum(space *in, int adjList[][POSSIBLE_DIRECTIONS], int idx) {
int min = 9999;
int min_idx = -1;
int j;
const int WALL_COST = 3;
nextSpace next;
// Find the best next step based on our index's neighbors.
for (j = 0; j < POSSIBLE_DIRECTIONS; ++j) {
if (adjList[idx][j] == -1 || in[adjList[idx][j]].state == VISITED)
continue;
if (j == 0) {
if (in[idx].up && min > WALL_COST) {
min = WALL_COST;
min_idx = adjList[idx][j];
}
else if (!in[idx].up && min > 1) {
min = 1;
min_idx = adjList[idx][j];
}
}
if (j == 1) {
if (in[idx].down && WALL_COST <= min) {
min = WALL_COST;
min_idx = adjList[idx][j];
}
else if (!in[idx].down && min > 1) {
min = 1;
min_idx = adjList[idx][j];
}
}
if (j == 2) {
if (in[idx].left && min > WALL_COST) {
min = WALL_COST;
min_idx = adjList[idx][j];
}
else if (!in[idx].left && min > 1) {
min = 1;
min_idx = adjList[idx][j];
}
}
if (j == 3) {
if (in[idx].right && min > WALL_COST) {
min = WALL_COST;
min_idx = adjList[idx][j];
}
else if (!in[idx].right && min > 1) {
min = 1;
min_idx = adjList[idx][j];
}
}
}
next.index = min_idx;
next.distance = min;
return next;
}
void resetSpaces(space *in) {
int i;
int numSpaces = SPACE_LENGTH * SPACE_WIDTH;
for (i = 0; i < numSpaces; ++i) {
in[i].parent = -1;
in[i].state = UNEXPLORED;
}
return;
}
int shortestPath(space *in, int idxIn = 0) {
int adjList[SPACE_LENGTH*SPACE_WIDTH][POSSIBLE_DIRECTIONS];
initializeAdjList(adjList);
int i = idxIn;
nextSpace next;
int distance;
// If shortestPath is used multiple times then we need to reset the parent & state.
resetSpaces(in);
// Iterate through the board until we reach the finish node.
while (!in[i].finish) {
// Run greedy shortest path on all of the current space's neighbors.
in[i].state = VISITED;
int tmp = i;
next = findMinimum(in, adjList, i);
i = next.index;
if (i == -1) {
i = in[tmp].parent;
}
else {
in[i].parent = tmp;
in[i].distance = in[in[i].parent].distance + next.distance;
}
}
distance = in[i].distance;
printf("Total distance: %d\n", distance);
while (!in[i].start) {
printf("Space #%d\n", i);
i = in[i].parent;
}
printf("Space #%d\n", i);
return distance;
}
/*
*/
void moveWall(wall *in, int wallIdx, wall newDir, nextMove *move, int moveIdx, int oppPos) {
wall oldDir = in[wallIdx]; // Temp store the old wall direction
bool sameDir = in[wallIdx] == newDir; // Set the walls[idx] to the new direction
in[wallIdx] = newDir;
bool collision = checkWallCollisions(in, wallIdx);
// If same direction or a collision
// Reset to old direction and return -1
if (sameDir || collision) {
in[wallIdx] = oldDir;
return;
}
printf("No collision - Space: %d, WallID: %d\n", move[moveIdx].space, wallIdx);
space *board = (space *)malloc(sizeof(space) * NUM_SPACES);
boardInit(board);
generateBoard(board, in);
int playerScore = 0;
playerScore = shortestPath(board, move[moveIdx].space);
int oppScore = 0;
oppScore = shortestPath(board, oppPos);
printf("Space: %d, WallID: %d, Wall New Dir: %d, Player Score: %d, Opp Score: %d\n", move[moveIdx].space, wallIdx, newDir, playerScore, oppScore);
if (playerScore < move[moveIdx].playerScore || oppScore > move[moveIdx].oppScore) {
move[moveIdx].playerScore = playerScore;
move[moveIdx].oppScore = oppScore;
move[moveIdx].wallIdx = wallIdx;
move[moveIdx].newDir = newDir;
}
// Reset to the old direction
in[wallIdx] = oldDir;
free(board);
}
void moveAllWalls(wall *walls, int oppIdx, nextMove *results, int resultsIdx) {
/* For all walls, orient them each possible direction
Check for collisions or if same direction
If not, see if it changes the player or opponent's shortest path
*/
for (int i = 0; i < (WALL_LENGTH * WALL_WIDTH); i++) {
for (int j = 0; j < 3; j++) {
// Check player (walls, wallID, direction, playerSpace, oppSpace)
moveWall(walls, i, (wall) j, results, resultsIdx, oppIdx);
}
}
}
/*
***********************************************************
*** Inputs: space *in (our board) *
*** idx (the space # we're finding neighbors for) *
***********************************************************
*** Output: 12-element int array with indexes to all *
*** potential neighbors. -1 is entered for any *
*** elements that are unnecessary. *
***********************************************************
*** Purpose: Find all possible neighbors a space can *
*** visit on the current turn. If a wall is *
*** encountered after a step is taken then the *
*** adjacent neighbors are added to the array. *
***********************************************************
*/
int* findNeighbors(space *in, int idx) {
int const NUM_NEIGHBORS = 12;
int *neighbors = (int *)malloc(NUM_NEIGHBORS*sizeof(int));
int const MAX_DISTANCE = 3;
int numSpaces = SPACE_LENGTH * SPACE_WIDTH;
int count = 0;
int i = idx;
int neighborIdx = 0;
int const WALL_COST = 3;
// Grab neighbors going up
while (count < MAX_DISTANCE) {
// Don't gather upward neighbors if index is in the top row.
if (idx < SPACE_WIDTH)
break;
i -= SPACE_WIDTH;
if (i >= numSpaces || i < 0) {
break;
}
else if (in[i].down && count > 0) {
// Get left neighbors if we're reached a wall
i += SPACE_WIDTH;
int tmp = i;
int tmpCount = count;
while (count < MAX_DISTANCE) {
i--;
if (in[i].right || (i % SPACE_WIDTH) == 0) {
count += MAX_DISTANCE;
break;
}
else {
neighbors[neighborIdx++] = i;
count++;
}
}
// Get right neighbors if we've reached a wall
i = tmp;
count = tmpCount;
while (count < MAX_DISTANCE) {
i++;
if (in[i].left || (i % SPACE_WIDTH == (SPACE_WIDTH - 1))) {
count += MAX_DISTANCE;
break;
}
else {
neighbors[neighborIdx++] = i;
count++;
}
}
}
else if (in[i].down && count == 0) {
count += WALL_COST;
neighbors[neighborIdx++] = i;
}
else {
count++;
neighbors[neighborIdx++] = i;
}
}
// Grab neighbors going down
count = 0;
i = idx;
while (count < MAX_DISTANCE) {
// Don't grab downward neighbors if index is in the bottom row.
if (i >= (numSpaces - SPACE_WIDTH))
break;
i += SPACE_WIDTH;
if (i >= numSpaces || i < 0) {
break;
}
else if (in[i].up && count > 0) {
// Get left neighbors if we're reached a wall
i -= SPACE_WIDTH;
int tmp = i;
int tmpCount = count;
while (count < MAX_DISTANCE) {
i--;
if (in[i].right || (i % SPACE_WIDTH) == 0) {
count += MAX_DISTANCE;
break;
}
else {
neighbors[neighborIdx++] = i;
count++;
}
}
// Get right neighbors if we've reached a wall
i = tmp;
count = tmpCount;
while (count < MAX_DISTANCE) {
i++;
if (in[i].left || (i % SPACE_WIDTH == (SPACE_WIDTH - 1))) {
count += MAX_DISTANCE;
break;
}
else {
neighbors[neighborIdx++] = i;
count++;
}
}
}
else if (in[i].up && count == 0) {
count += WALL_COST;
neighbors[neighborIdx++] = i;
}
else {
count++;
neighbors[neighborIdx++] = i;
}
}
// Grab neighbors going left
count = 0;
i = idx;
while (count < MAX_DISTANCE) {
// Don't gather leftward neighbors if index is in the left column.
if ((idx % SPACE_WIDTH == 0))
break;
i--;
if (i >= numSpaces || i < 0) {
break;
}
else if (in[i].right && count > 0) {
// Get up neighbors if we're reached a wall
i++;
int tmp = i;
int tmpCount = count;
while (count < MAX_DISTANCE) {
i -= SPACE_WIDTH;
if (i < 0 || in[i].down) {
count += MAX_DISTANCE;
break;
}
else {
neighbors[neighborIdx++] = i;
count++;
}
}
// Get down neighbors if we've reached a wall
i = tmp;
count = tmpCount;
while (count < MAX_DISTANCE) {
i += SPACE_WIDTH;
if (in[i].up || (i >= (numSpaces - SPACE_WIDTH))) {
count += MAX_DISTANCE;
break;
}
else {
neighbors[neighborIdx++] = i;
count++;
}
}
}
else if (in[i].right && count == 0) {
count += WALL_COST;
neighbors[neighborIdx++] = i;
}
else {
count++;
neighbors[neighborIdx++] = i;
}
}
// Grab neighbors going right
count = 0;
i = idx;
while (count < MAX_DISTANCE) {
// Don't gather rightward neighbors if index is in the right column.
if (idx % SPACE_WIDTH == (SPACE_WIDTH - 1))
break;
i++;
if (i >= numSpaces || i < 0) {
break;
}
else if (in[i].left && count > 0) {
// Get up neighbors if we're reached a wall
i--;
int tmp = i;
int tmpCount = count;
while (count < MAX_DISTANCE) {
i -= SPACE_WIDTH;
if (i < 0 || in[i].down) {
count += MAX_DISTANCE;
break;
}
else {
neighbors[neighborIdx++] = i;
count++;
}
}
// Get down neighbors if we've reached a wall
i = tmp;
count = tmpCount;
while (count < MAX_DISTANCE) {
i += SPACE_WIDTH;
if (in[i].up || (i >= (numSpaces - SPACE_WIDTH))) {
count += MAX_DISTANCE;
break;
}
else {
neighbors[neighborIdx++] = i;
count++;
}
}
}
else if (in[i].left && count == 0) {
count += WALL_COST;
neighbors[neighborIdx++] = i;
}
else {
count++;
neighbors[neighborIdx++] = i;
}
}
// Set all unused spaces in our return array to -1 to indicate they don't exist
for (i = neighborIdx; i < NUM_NEIGHBORS; ++i)
neighbors[i] = -1;
return neighbors;
}
nextMove *getPossibleMoves(space *board, int pos, int *numSpaces) {
// Counter number of possible spaces @ pos
int *neighbors = findNeighbors(board, pos);
int possibleSpaces = 0;
for(int i = 0; i < 12; i++) {
printf("Neighbors for space %d: %d\n", pos, neighbors[i]);
if (neighbors[i] != -1) possibleSpaces++;
}
nextMove *moves = (nextMove *) malloc( sizeof(nextMove) * possibleSpaces );
int j = 0;
for(int i = 0; i < 12 && j < possibleSpaces; i++) {
if (neighbors[i] == -1 ) break;
moves[j].space = neighbors[i];
moves[j].playerScore = 100; // Intentionally high preset
moves[j].oppScore = -1;
moves[j].wallIdx = -1;
moves[j].newDir = (wall) 0;
j++;
}
free(neighbors);
//numSpaces = possibleSpaces;
return moves;
}
nextMove pickBestMove(nextMove *moves, int possibleSpaces) {
nextMove best;
if (possibleSpaces > 0) {
best.space = moves[0].space;
best.playerScore = moves[0].playerScore;
best.oppScore = moves[0].oppScore;
best.wallIdx = moves[0].wallIdx;
best.newDir = moves[0].newDir;
}
int diff = best.oppScore - best.playerScore;
int i;
for (i = 0; i < possibleSpaces; ++i) {
int tmpdiff = moves[i].oppScore - moves[i].playerScore;
if (tmpdiff > diff) {
best.space = moves[i].space;
best.playerScore = moves[i].playerScore;
best.oppScore = moves[i].oppScore;
best.wallIdx = moves[i].wallIdx;
best.newDir = moves[i].newDir;
diff = tmpdiff;
}
}
return best;
}
int main(int argc, char const *argv[])
{
int playerPos = 0;
int oppPos = 0;
int numSpaces = SPACE_LENGTH * SPACE_WIDTH;
int spaceSize = sizeof(space) * numSpaces;
int numWalls = WALL_LENGTH * WALL_WIDTH;
int wallSize = sizeof(wall) * numWalls;
// Malloc the array of wall / board
wall *walls = (wall *)malloc(wallSize);
space *board = (space *)malloc(spaceSize);
// Initialize, zero out the board
boardInit(board);
// Generate walls
generateWalls(walls);
generateBoard(board, walls);
// Start the timer
gettimeofday(&startTime, &Idunno);
//display board
outputBoard(board); //display by numbers
displayBoard(board); //display visually
// Testing
//shortestPath(board, 0);
//shortestPath(board, 0);
// board, walls, playerIdx to be moved to, current opponent idx, results
//moveAllWalls(board, walls, 0, 0, results);
// Report the running time
//report_running_time();
// Get neighbors of a space
int *neighbors = findNeighbors(board, 7);
int i;
for (i = 0; i < 12; ++i) {
printf("Neighbors for space #7: %d\n", neighbors[i]);
}
neighbors = findNeighbors(board, 17);
for (i = 0; i < 12; ++i) {
printf("Neighbors for space #17: %d\n", neighbors[i]);
}
neighbors = findNeighbors(board, playerPos);
for (i = 0; i < 12; ++i) {
printf("Neighbors for space #%d: %d\n", playerPos, neighbors[i]);
}
// Count the number of possible spaces = # of blocks
int possibleSpaces = 0;
for (int i = 0; i < 12; i++) {
if (neighbors[i] != -1) {
possibleSpaces++;
}
}
// Malloc an array nextMove[ # of neighbors ]
nextMove *moves = (nextMove *)malloc( sizeof(nextMove) * possibleSpaces );
printf("DEBUG: successful malloc of results\n");
// Zero-out the results array and set each move.space ot the neighbor space
int j = 0;
for (int i = 0; i < 12 && j < possibleSpaces; i++) {
if (neighbors[i] != -1) {
printf("Init results array. Moves[%d], Space: %d\n", j, neighbors[i]);
moves[j].space = neighbors[i];
moves[j].playerScore = 100; // Intentionally high preset
moves[j].oppScore = -1;
moves[j].wallIdx = -1;
moves[j].newDir = (wall) 0;
j++;
}
}
// TODO: DEBUG this function
//int *possibleSpaces = getPossibleMoves(board, playerPos, possibleSpaces);
/* start counting time */
gettimeofday(&startTime, &Idunno);
// Set the nextSpace in the array to each part in array
/* For each possible space --> Move all 16 walls all possible direction
Determine the shortest path
*/
for (int i = 0; i < possibleSpaces; i++) {
moveAllWalls(walls, oppPos, moves, i);
}
/* check the total running time */
report_running_time();
outputResults(moves, possibleSpaces);
// PICK THE BEST MOVE
nextMove bestMove = pickBestMove(moves, possibleSpaces);
printf("Best Move: %d\n", bestMove.space);
printf("Memory Sizes - walls: %d, board: %d, nextMove (maximum): %d, neighbors: %d\n", wallSize, spaceSize, sizeof(nextMove)*12, sizeof(neighbors));
free(walls);
free(board);
//free(possibleSpaces);
free(neighbors);
free(moves);
return 0;
}
|
19,774 | #include "includes.h"
#define DOUBLE
#ifdef DOUBLE
#define Complex cufftDoubleComplex
#define Real double
#define Transform CUFFT_Z2Z
#define TransformExec cufftExecZ2Z
#else
#define Complex cufftComplex
#define Real float
#define Transform CUFFT_C2C
#define TransformExec cufftExecC2C
#endif
#define TILE_DIM 8
// synchronize blocks
__global__ void copy_0(int nx, int ny, int nz, Real *in, Real *out) {
int tj = threadIdx.x;
int td = blockDim.x;
int jj = (blockIdx.y*nx*ny/4 + blockIdx.x*nx/2);
int jj1 = ((blockIdx.y+nz/2)*nx*ny + (blockIdx.x+ny/2)*nx);
out[jj1+tj+td] = in[jj+tj];
} |
19,775 |
#ifdef _WIN32
# define EXPORT __declspec(dllexport)
#else
# define EXPORT
#endif
void __global__ file1_kernel(int x, int& r)
{
r = -x;
}
EXPORT int file1_launch_kernel(int x)
{
int r = 0;
file1_kernel<<<1, 1>>>(x, r);
return r;
}
|
19,776 | #include <cuda_runtime_api.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
using namespace std;
cudaStream_t createStreamWithFlags(){
cudaStream_t stream;
cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking);
return stream;
}
|
19,777 | #include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include "answer.cuh"
// Compute vector sum C = A+B
//CUDA kernel. Each thread performes one pair-wise addition
__global__ void vector_add(float *a, float *b, float *c, int N)
{
/* insert code to calculate the index properly using blockIdx.x, blockDim.x, threadIdx.x */
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < N; i += stride)
c[i] = a[i] + b[i];
}
/* experiment with N */
/* how large can it be? */
#define THREADS_PER_BLOCK 1000
void add(float *a, float *b, float *c, int N)
{
float *d_a, *d_b, *d_c;
int size = N * sizeof( float );
/* allocate space for device copies of a, b, c */
cudaMalloc( (void **) &d_a, size );
cudaMalloc( (void **) &d_b, size );
cudaMalloc( (void **) &d_c, size );
/* copy inputs to device */
/* fix the parameters needed to copy data to the device */
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
/* launch the kernel on the GPU */
/* insert the launch parameters to launch the kernel properly using blocks and threads */
// vector_add<<< (N+THREADS_PER_BLOCK-1)/THREADS_PER_BLOCK, THREADS_PER_BLOCK >>>( d_a, d_b, d_c, N);
vector_add<<<1, 1 >>>( d_a, d_b, d_c, N);
//Synchronize threads
cudaThreadSynchronize();
/* copy result back to host */
/* fix the parameters needed to copy data back to the host */
cudaMemcpy( c, d_c, size, cudaMemcpyDeviceToHost );
/* clean up */
cudaFree( d_a );
cudaFree( d_b );
cudaFree( d_c );
return;
}
|
19,778 | #include<stdio.h>
#include<stdlib.h>
#include<ctype.h>
#include<math.h>
#include<time.h>
__global__ void euler_method(float *y, float *sum, float delta_t, int N)
{
int y0 = 4;
int tId = threadIdx.x + blockIdx.x*blockDim.x;
if(tId < N){
y[tId] = y0 + delta_t * sum[tId];
}
}
float edo_resuelta(float t);
float edo_original(float t);
void sumatoria(float *sum, float delta_t, int N);
int main(){
//FILE *fp;
//int i;
FILE *fp_time;
int j, N, counter = 0;
float delta_t[6] = {powf(10, -1), powf(10, -2), powf(10, -3), powf(10, -4),
powf(10, -5), powf(10, -6)};
float *y, *y_dev, *sum, *sum_dev;
int block_size, grid_size;
//fp = fopen("../1_c", "w");
fp_time = fopen("1_c_time", "w");
cudaEvent_t ct1, ct2;
float dt;
cudaEventCreate(&ct1); cudaEventCreate(&ct2);
for(j = 0 ; j < 6 ; j++)
{
block_size = 256;
N = 10 / delta_t[j];
grid_size = (int)ceil((float) N / block_size);
cudaMalloc(&y_dev, sizeof(float) * N);
cudaMalloc(&sum_dev, sizeof(float) * N);
sum = (float*) malloc(sizeof(float) * N);
y = (float*) malloc(sizeof(float) * N);
sumatoria(sum, delta_t[j], N);
cudaEventRecord(ct1);
cudaMemcpy(sum_dev, sum, N *sizeof(float), cudaMemcpyHostToDevice);
euler_method<<<grid_size,block_size>>>(y_dev, sum_dev, delta_t[j], N);
cudaEventRecord(ct2);
cudaMemcpy(y, y_dev, N *sizeof(float), cudaMemcpyDeviceToHost);
cudaEventSynchronize(ct2);
cudaEventElapsedTime(&dt, ct1, ct2);
/*
fprintf(fp, "*********************************\n");
fprintf(fp, "Con delta = %f\n", delta_t[j]);
fprintf(fp, "*********************************\n");
for(i = 0 ; i < N; i++)
{
fprintf(fp, "t = %f\n", i+1 * delta_t[j]);
fprintf(fp, "y[%i] = %f , %f\n", i + 1, *(y + i), edo_resuelta((i+1) * delta_t[j]));
}
*/
counter++; printf("Tiempo que demora en HYBRID = %f [ms] para delta numero %d\n", dt, counter);
fprintf(fp_time, "%f %f ",delta_t[j], dt);
free(y);
free(sum);
cudaFree(y_dev);
}
fclose(fp_time);
return 0;
}
void sumatoria(float *sum, float delta_t, int N){
sum[0] = edo_original(0);
for(int i = 1; i < N; i++){
sum[i] = sum[i-1] + edo_original(i*delta_t);
}
}
float edo_original(float t)
{
return 9 * (powf(t, 2)) - 4 * t + 5;
}
float edo_resuelta(float t)
{
return 3 * (powf(t, 3)) - 2 * (powf(t, 2)) + 5 * t + 4;
}
|
19,779 | /**********************************************************************************
This code performs a calculation of pi using the monte carlo method
using cuda GPU parallelisation.
Created by: George Tall
Email: george.tall@seh.ox.ac.uk
/*********************************************************************************/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand.h>
__global__ void point_test(int *N, float *d_x, float *d_y, float *d_R, float *d_A){
long int index = blockDim.x*blockIdx.x + threadIdx.x;
//printf("%d\n", index);
// Now each value of d_R is computed
d_R[index] = d_x[index]*d_x[index] + d_y[index]*d_y[index];
//printf("Thread %d d_R is %f \n", index, d_R[index]);
// sync threads at this point to prevent deadlock
__syncthreads();
if(d_R[index] < 1.0f) atomicAdd(&d_A[blockIdx.x], 1);
//printf("\nPoints in block %d = %d", blockIdx.x, d_A[blockIdx.x]);
}
__global__ void area_reduction(float *d_A){
// allocate shared memory
extern __shared__ float shared_array[];
// copy passed array into shared array
int tid = threadIdx.x;
//long int index = blockIdx.x*blockDim.x + threadIdx.x;
shared_array[tid] = d_A[tid];
__syncthreads();
for(long int d = blockDim.x/2; d > 0; d /= 2){
if(tid < d){
atomicAdd(&shared_array[tid], shared_array[tid+d]);
}
__syncthreads();
}
__syncthreads();
// if you're the first thread get the value from shared array
if(tid == 0){
d_A[0] = shared_array[0];
}
}
int main() {
// N is the number of random points.
// area stores the number of random points that fall into
// the area of the quadrant of a circle of radius 1
//size_t N = 2^10;
int N = 6536;
float area=0;
//initalize the GPU
int nBlocks = N/256;
int nThreads = 256;
int deviceid = 0; // using GPU with id 0
int devCount;
// gets number of GPU available
cudaGetDeviceCount(&devCount);
// check if we have enough GPUs
if(deviceid<devCount) {
// tell CUDA that we want to use GPU 0
cudaSetDevice(deviceid);
}
else return(1);
//random variable gen
curandGenerator_t gen;
//pointers to host memory and device memory we have a pointer for a radius in the device to calculate that before conditional is operated
//we are also going to have an area count per block to prevent confusion in the kernal if statement later
float *h_x, *h_y, *h_A;
float *d_x, *d_y, *d_R, *d_A;
//allocate host memory
h_x = (float*)malloc(N*sizeof(float));
h_y = (float*)malloc(N*sizeof(float));
h_A = (float*)malloc(nBlocks*sizeof(float));
//allocate device memory
cudaMalloc((void**)&d_x, N*sizeof(float));
cudaMalloc((void**)&d_y, N*sizeof(float));
cudaMalloc((void**)&d_R, N*sizeof(float));
cudaMalloc((void**)&d_A, nBlocks*sizeof(float));
// Create a pseudo-random number generator
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
// Set a seed
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
// Generate N pseudo random numbers on device for x
curandGenerateUniform(gen, d_x, N);
curandGenerateUniform(gen, d_y, N);
// Kernal for testing if points lie in area or not
point_test<<<nBlocks, nThreads>>>(&N, d_x, d_y, d_R, d_A);
// Syncronise the device here in order for all the blocks to finish calculating their area data points
cudaDeviceSynchronize();
// Kernal for reducing the sum of the areas of the blocks
area_reduction<<<nBlocks/nThreads, nThreads, nBlocks*sizeof(float)>>>(d_A);
//Copy the generated numbers back to host
// I've bought the other data back aside from h_A because it appears my reduction doesn't work
cudaMemcpy(h_x, d_x, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(h_A, d_A, nBlocks*sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < nBlocks; i++){
printf("%f \n",h_A[i]);
}
area = h_A[0];
printf("\nPi from reduction:\t%f\n", (4.0*area)/(float)N);
// reset area to zero so that we can do a monte carlo method on the host, this now limits the number of points we can use
area = 0;
for(int i = 0; i < N; i++){
if(h_x[i]*h_x[i] + h_y[i]*h_y[i] < 1.0f) area++;
}
printf("\nPi from host:\t%f\n", (4.0*area)/(float)N);
/*
area = 0;
for(int i=0; i<N; i++) {
double x = ((double)rand())/RAND_MAX;
double y = ((double)rand())/RAND_MAX;
if(x*x + y*y <= 1.0) area++;
}
printf("\nPi:\t%f\n", (4.0*area)/(double)N);
*/
// Free memory on host and device
cudaFree(d_x); cudaFree(d_y); cudaFree(d_R); cudaFree(d_A);
free(h_x); free(h_y); free(h_A);
return(0);
}
|
19,780 | #include <stdio.h>
#include <vector>
#include <algorithm>
#include <functional>
#include <cuda_runtime.h>
#include <cstdlib>
#include <string>
#include <map>
#include <vector>
#include <math.h>
#include <cuda.h>
#include <float.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/gather.h>
#include <thrust/execution_policy.h>
#include <iostream>
#include <thrust/binary_search.h>
#include <thrust/random.h>
#include <curand.h>
#include <time.h>
#include <stdlib.h>
#define THREADS_PER_BLOCK 256
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
struct GenUnifRands
{
__device__
float operator () (int idx)
{
thrust::default_random_engine randEng;
thrust::uniform_real_distribution<float> uniDist;
randEng.discard(idx);
return uniDist(randEng);
}
};
__global__
void segmentation_kernel(float* device_xyz,float* device_rgb,int* device_offset,
int* neighbor_id,float* device_pdens,int yz_idx,
int z_idx,int* parents,float* distances){
int block_i = blockIdx.x;
int block_j = blockIdx.y;
int block_k = blockIdx.z;
int blockId = yz_idx * block_i + z_idx* block_j + block_k ;
int threadId = threadIdx.x;
int num_threads = THREADS_PER_BLOCK;
int my_num_pts = device_offset[blockId + 1] - device_offset[blockId];
float3 nbr_xyz;
float3 my_xyz;
int my_idx;
int nbr_idx;
// Iterate over all points in voxel, num_threads points at a time.
for(int i = 0 ; i < my_num_pts ; i+= num_threads){
if(threadId + i > my_num_pts){
return;
}
my_idx = device_offset[blockId] + i + threadId;
my_xyz = *(float3*) &device_xyz[3*my_idx];
// Iterate over the neighbouring blocks including yourself.
float min_distance = 10000000.0;
float current_parent = my_idx;
for(int j = 0 ; j < 7 ; j++){
if(neighbor_id[7*blockId+j] == -1){
continue;
}
int nbr_num_pts = device_offset[neighbor_id[7*blockId +j] + 1] - device_offset[neighbor_id[7*blockId + j]];
// Iterate over all the points in the neighbouring block.
for(int k = 0; k < nbr_num_pts; k++){
nbr_idx = device_offset[neighbor_id[blockId*7 + j]] + k;
nbr_xyz = *(float3*) &device_xyz[3*nbr_idx];
if(nbr_idx == my_idx){
continue;
}
float xyz_dist = pow(my_xyz.x - nbr_xyz.x,2.0) + pow((my_xyz.y - nbr_xyz.y),2.0) +
pow((my_xyz.z - nbr_xyz.z),2.0);
if(device_pdens[nbr_idx] > device_pdens[my_idx] && xyz_dist < min_distance){
min_distance = xyz_dist;
current_parent = nbr_idx;
}
}
}
distances[my_idx] = min_distance;
parents[my_idx] = current_parent;
}
}
__global__
void compute_weights_kernel(float *device_xyz, float *device_rgb, int *device_offset,
int *neighbor_id, int yz_idx, int z_idx, float *imp_wt,
float* pdensity)
{
int block_i = blockIdx.x;
int block_j = blockIdx.y;
int block_k = blockIdx.z;
int blockId = yz_idx * block_i + z_idx* block_j + block_k ;
int threadId = threadIdx.x;
int num_threads = THREADS_PER_BLOCK;
float pdensity_sum = 0.0;
float nbr_feature[6];
float norm_sum = 0.0;
int my_num_pts = device_offset[blockId + 1] - device_offset[blockId];
float3 nbr_rgb;
float3 nbr_xyz;
float3 my_rgb;
float3 my_xyz;
float sigma_sq = 0.00005;
float Aij_ew;
float Aij_ew2;
float lambda = 0.5;
// Iterate over all points in voxel, num_threads points at a time.
for(int i = 0 ; i < my_num_pts ; i+= num_threads){
pdensity_sum = 0.0;
norm_sum = 0.0;
nbr_feature[0] = 0.0;
nbr_feature[1] = 0.0;
nbr_feature[2] = 0.0;
nbr_feature[3] = 0.0;
nbr_feature[4] = 0.0;
nbr_feature[5] = 0.0;
if(threadId + i > my_num_pts){
return;
}
my_xyz = *(float3*) &device_xyz[3*(device_offset[blockId] + i + threadId)];
my_rgb = *(float3*) &device_rgb[3*(device_offset[blockId] + i + threadId)];
// Iterate over the neighbouring blocks including yourself.
for(int j = 0 ; j < 7 ; j++){
if(neighbor_id[7*blockId+j] == -1){
continue;
}
int nbr_num_pts = device_offset[neighbor_id[7*blockId +j] + 1] - device_offset[neighbor_id[7*blockId + j]];
// Iterate over all the points in the neighbouring block.
for(int k = 0; k < nbr_num_pts; k++){
nbr_xyz = *(float3*) &device_xyz[3*(device_offset[neighbor_id[blockId*7 + j]] + k)];
nbr_rgb = *(float3*) &device_rgb[3*(device_offset[neighbor_id[blockId*7 + j]] + k)];
float xyz_dist = pow(my_xyz.x - nbr_xyz.x,2.0) + pow((my_xyz.y - nbr_xyz.y),2.0) +
pow((my_xyz.z - nbr_xyz.z),2.0);
float rgb_dist = pow((my_rgb.x - nbr_rgb.x),2.0) + pow((my_rgb.y - nbr_rgb.y),2.0) +
pow((my_rgb.z - nbr_rgb.z),2.0);
Aij_ew = __expf(-1.0f * (xyz_dist/sigma_sq));
Aij_ew2 = __expf(-1.0f * ((lambda * xyz_dist) + ((1 - lambda) * rgb_dist))/(0.5 * sigma_sq));
pdensity_sum += Aij_ew2;
nbr_feature[0] += Aij_ew * nbr_xyz.x;
nbr_feature[1] += Aij_ew * nbr_xyz.y;
nbr_feature[2] += Aij_ew * nbr_xyz.z;
nbr_feature[3] += Aij_ew * nbr_rgb.x;
nbr_feature[4] += Aij_ew * nbr_rgb.y;
nbr_feature[5] += Aij_ew * nbr_rgb.z;
}
}
norm_sum += pow(my_xyz.x - nbr_feature[0],2.0);
norm_sum += pow(my_xyz.y - nbr_feature[1],2.0);
norm_sum += pow(my_xyz.z - nbr_feature[2],2.0);
norm_sum += pow(my_rgb.x - nbr_feature[3],2.0);
norm_sum += pow(my_rgb.y - nbr_feature[4],2.0);
norm_sum += pow(my_rgb.z - nbr_feature[5],2.0);
pdensity[device_offset[blockId] + i + threadId] = pdensity_sum;
imp_wt[device_offset[blockId] + i + threadId] = norm_sum;
}
}
void cuda_resampling(int num_pts, int num_voxels, float *flattenXYZ,
float *flattenRGB, int *voxel_offset, int *neighbor_ids,
int x_idx, int y_idx, int z_idx,int num_samples,uint *sample_arr,float *pdens)
{
float *device_xyz, *device_rgb;
int *device_offset, *device_neighbor_ids;
cudaMalloc(&device_xyz, num_pts*3*sizeof(float));
cudaMalloc(&device_rgb, num_pts*3*sizeof(float));
cudaMalloc(&device_offset, (num_voxels+1)*sizeof(int));
cudaMalloc(&device_neighbor_ids , num_voxels * 7 * sizeof(int));
thrust::device_vector<float> dev_imp_wt(num_pts);
thrust::device_vector<float> dev_pdensity(num_pts);
thrust::host_vector<float> host_imp_wt(num_pts);
thrust::host_vector<float> host_pdensity(num_pts);
float* imp_wt = thrust::raw_pointer_cast(dev_imp_wt.data());
float* pdensity = thrust::raw_pointer_cast(dev_pdensity.data());
gpuErrchk(cudaMemcpy(device_offset,voxel_offset, (num_voxels+1)*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(device_xyz, flattenXYZ, num_pts*3*sizeof(float),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(device_rgb, flattenRGB, num_pts*3*sizeof(float),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(device_neighbor_ids, neighbor_ids, num_voxels*7*sizeof(int),cudaMemcpyHostToDevice));
int yz_idx = y_idx * z_idx;
dim3 gridDim(x_idx,y_idx,z_idx);
dim3 blockDim(THREADS_PER_BLOCK,1,1);
compute_weights_kernel<<<gridDim,blockDim>>>(device_xyz,device_rgb,device_offset,device_neighbor_ids,yz_idx,z_idx,imp_wt,pdensity);
printf("finished computing weights! \n");
cudaDeviceSynchronize();
thrust::copy(dev_imp_wt.begin(),dev_imp_wt.end(),host_imp_wt.begin());
thrust::copy(dev_pdensity.begin(),dev_pdensity.end(),host_pdensity.begin());
/* WEIGHTED SAMPLING */
printf("====================\n");
// step 1: normalize: (TODO: use functor to make fast )
float norm_sum = thrust::reduce(dev_imp_wt.begin(),dev_imp_wt.end());
float norm_factor = 1.0/norm_sum;
thrust::device_vector<float> temp(dev_imp_wt.size());
thrust::fill(temp.begin(),temp.end(),norm_factor);
thrust::transform(dev_imp_wt.begin(),dev_imp_wt.end(),temp.begin(),dev_imp_wt.begin(),thrust::multiplies<float>());
// step 2: compute prefix sum (inclusive scan)
thrust::device_vector<float> wts_rs(dev_imp_wt.size());
thrust::inclusive_scan(dev_imp_wt.begin(),dev_imp_wt.end(),wts_rs.begin());
// step 3: generate uniform random numbers:
srand(time(NULL));
int seed = rand();
thrust::device_vector<float> d_unifrands(num_samples);
thrust::transform( thrust::make_counting_iterator(seed), thrust::make_counting_iterator(seed + num_samples),
d_unifrands.begin(),GenUnifRands());
// step 4 : generate (weighted) random samples
thrust::device_vector<unsigned int> samples(num_samples);
thrust::lower_bound(wts_rs.begin(),wts_rs.end(),d_unifrands.begin(),d_unifrands.end(),samples.begin());
thrust::host_vector<unsigned int> h_samples(num_samples);
thrust::copy(samples.begin(),samples.end(),h_samples.begin());
uint* pc_samples = thrust::raw_pointer_cast(h_samples.data());
memcpy(sample_arr, pc_samples,num_samples*sizeof(uint));
// step 5 : gather sample_arr indices of p_density
thrust::device_vector<float> dev_pdensity_rs(num_samples);
thrust::host_vector<float> host_pdensity_rs(num_samples);
thrust::gather(thrust::device,samples.begin(),samples.end(),dev_pdensity.begin(),dev_pdensity_rs.begin());
thrust::copy(dev_pdensity_rs.begin(),dev_pdensity_rs.end(),host_pdensity_rs.begin());
float* host_pdensity_rs_ptr = thrust::raw_pointer_cast(host_pdensity_rs.data());
memcpy(pdens, host_pdensity_rs_ptr,num_samples*sizeof(float));
printf("finished weighted sampling of points\n");
return;
}
void cuda_segmentation(int num_pts, int num_voxels, float* pdens, float *flattenXYZ,
float *flattenRGB, int *voxel_offset, int *neighbor_ids,
int x_idx, int y_idx, int z_idx,int* parents_ptr)
{
float *device_xyz, *device_rgb, *device_pdens;
int *device_offset, *device_neighbor_ids;
cudaMalloc(&device_xyz, num_pts*3*sizeof(float));
cudaMalloc(&device_rgb, num_pts*3*sizeof(float));
cudaMalloc(&device_offset, (num_voxels+1)*sizeof(int));
cudaMalloc(&device_neighbor_ids , num_voxels * 7 * sizeof(int));
cudaMalloc(&device_pdens, num_pts*sizeof(float));
thrust::device_vector<int> dev_parents(num_pts);
thrust::host_vector<int> host_parents(num_pts);
thrust::device_vector<float> dev_distances(num_pts);
thrust::host_vector<float> host_distances(num_pts);
int* parents = thrust::raw_pointer_cast(dev_parents.data());
float* distances = thrust::raw_pointer_cast(dev_distances.data());
gpuErrchk(cudaMemcpy(device_offset,voxel_offset, (num_voxels+1)*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(device_xyz, flattenXYZ, num_pts*3*sizeof(float),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(device_rgb, flattenRGB, num_pts*3*sizeof(float),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(device_neighbor_ids, neighbor_ids, num_voxels*7*sizeof(int),cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(device_pdens, pdens, num_pts * sizeof(float), cudaMemcpyHostToDevice));
int yz_idx = y_idx * z_idx;
dim3 gridDim(x_idx,y_idx,z_idx);
dim3 blockDim(THREADS_PER_BLOCK,1,1);
segmentation_kernel<<<gridDim,blockDim>>>(device_xyz,device_rgb,device_offset,device_neighbor_ids,
device_pdens,yz_idx,z_idx,parents,distances);
cudaDeviceSynchronize();
printf("finished segmentation!\n");
int num_gather = 6;
thrust::device_vector<int> temp_1(num_pts);
thrust::device_vector<int> temp_2(num_pts);
thrust::copy(dev_parents.begin(),dev_parents.end(),temp_1.begin());
for(int i = 0 ; i < num_gather ; i++){
thrust::gather(thrust::device, dev_parents.begin(),dev_parents.end(),temp_1.begin(),temp_2.begin());
thrust::copy(temp_2.begin(),temp_2.end(),temp_1.begin());
thrust::copy(temp_2.begin(),temp_2.end(),dev_parents.begin());
}
printf("finished tree cutting!\n");
thrust::copy(dev_parents.begin(),dev_parents.end(),host_parents.begin());
int* host_parents_ptr = thrust::raw_pointer_cast(host_parents.data());
memcpy(parents_ptr, host_parents_ptr,num_pts*sizeof(int));
return;
}
|
19,781 | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <vector>
#include "demo.cuh"
__global__ void add_kernel(float* A, float* B, float* C)
{
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
C[idx] = A[idx] + B[idx];
//printf("%d\n", C[idx]);
}
int testCUDA()
{
int num = 100;
float a[num];
float b[num];
float c[num];
float a_test[num];
std::vector<float> a_vec;
std::vector<float> b_vec;
std::vector<float> c_vec;
int block_num = 20;
int block_size = num / block_num;
cudaError_t cudaStatus;
for (int i = 0; i < num; i++)
{
a[i] = i;
b[i] = i;
//a_vec.push_back(i);
//b_vec.push_back(i);
a_vec.emplace_back(i);
b_vec.emplace_back(i);
}
float *a_d, *b_d, *c_d;
cudaMalloc((void **)&a_d, sizeof(float) * num);
cudaMalloc((void **)&b_d, sizeof(float) * num);
cudaMalloc((void **)&c_d, sizeof(float) * num);
//cudaMemcpy(a_d, a, num * sizeof(float), cudaMemcpyHostToDevice);
//cudaMemcpy(b_d, b, num * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(a_d, a_vec.data(), num * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_vec.data(), num * sizeof(int), cudaMemcpyHostToDevice);
add_kernel<<<block_num, block_size>>>(a_d, b_d, c_d);
cudaStatus = cudaGetLastError();
if(cudaStatus != cudaSuccess)
{
std::cout << "add_kernel failed: " << cudaGetErrorString(cudaStatus) << std::endl;
goto Error;
}
//cudaThreadSynchronize();
cudaStatus = cudaMemcpy(a_test, a_d, num * sizeof(int), cudaMemcpyDeviceToHost);
if(cudaStatus != cudaSuccess)
{
std::cout << "cuda memcpy failed: " << cudaGetErrorString(cudaStatus) << std::endl;
goto Error;
}
for (int i=0; i<num; i++)
{
std::cout << i << ": " << *(a_test + i) << std::endl;
}
cudaStatus = cudaMemcpy(c, c_d, num * sizeof(int), cudaMemcpyDeviceToHost);
if(cudaStatus != cudaSuccess)
{
std::cout << "cuda memcpy failed: " << cudaGetErrorString(cudaStatus) << std::endl;
goto Error;
}
for (int i = 0; i < num; i++)
{
//std::cout << "c[" << i << "]: " << c[i] << std::endl;
}
Error:
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
}
|
19,782 | /*
autor fredy m
uaem
desonses@gmail.com para mas comentarios
*/
#include <device_functions.h>
#include <stdio.h>
#include <stdlib.h>
#include <vector_types.h>
#include <cuda.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define N 33
/*
realiza la transpuesta de una matriz
*/
// definicio de memoria constante CUDA
__constant__ float dev_A[N][N];
//GLOBAL: func desde el host y ejecutada en el kernel(DEVICE)
__global__ void transpuesta(float *dev_B)
{
int columna = threadIdx.x;
int fila = threadIdx.y;
int pos = columna + N * fila;
// cada hilo coloca un elemento de la matriz final
dev_B[pos] = dev_A[columna][fila];
}
int main(int argc, char** argv)
{
float *hst_A, *hst_B;
float *dev_B;
int size = N * N * sizeof(float);
//reserva de memoria en el host
hst_A = (float*)malloc(size);
hst_B = (float*)malloc(size);
//reserva de memoria en el device
cudaMalloc((void**)&dev_B, size);
//llenar la matriz
for (int i = 0; i < N*N; i++)
{
hst_A[i] = float(i) + 1;
}
//copiar los datos hacia el device
cudaError_t error = cudaMemcpyToSymbol(dev_A, hst_A, size);
if (error != cudaSuccess) {
printf("Error Memori const\n");
}
//dimensiones del kernel a lanzar
dim3 bloques(1);
dim3 hilos(N, N);
//lanzamiento del kernel
transpuesta <<<bloques, hilos >>> (dev_B);
//recoger los datos
cudaMemcpy(hst_B, dev_B, size, cudaMemcpyDeviceToHost);
//impresion de los datos
printf("Matriz original:\n");
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
{
printf("%.2f\t", hst_A[j + i * N]);
}
printf("\n");
}
printf("Matriz transpuesta:\n");
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
printf("%.2f\t", hst_B[j + i * N]);
}
printf("\n");
}
//
printf("\n pulsa INTRO para salir:\n");
fflush(stdin);
char tecla = getchar();
return 0;
}
|
19,783 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: Xu Chen (email: hello_master1954@163.com) 2018-09-12
*/
#include "Dropout.h"
#include "Dropout.cuh"
#include "Loss.cuh"
#include "../XDevice.h"
#ifdef USE_CUDA
// the CUDA stuff
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda.h>
#endif
namespace nts{ // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
/*
dropout function (Cuda kernel)
>> x - input data pointer
>> y - output data pointer
>> m - mask indicator to set zero
>> s - the scale factor
>> size - size of input/output
*/
__global__
void KernelDropoutCompute(DTYPE * x, DTYPE * y, DTYPE * m, DTYPE s, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
y[i] = x[i] * m[i] * s;
}
}
/*
dropout function (Cuda version)
>> x - input tensor
>> y - output tensor
>> mask - mask tensor to set 0
>> scaleFactor - the scale factor
*/
void _CudaDropout(const XTensor * x, XTensor * y, const XTensor * mask, DTYPE scaleFactor)
{
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
CheckNTErrors(!x->isSparse && !y->isSparse, "the activation function (rectify) does not support sparse matrices.");
CheckNTErrors(x->unitNum && y->unitNum, "we require two vectors with the same length.");
int gridSize[3], blockSize[3];
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
KernelDropoutCompute<<<dim3(gridSize[0]), dim3(blockSize[0])>>>((DTYPE*)x->data, (DTYPE*)y->data, (DTYPE*)mask->data, scaleFactor, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
/*
backward computation of dropout function (Cuda kernel)
dE/dx = dE/dy * dy/dx
>> dedy - dE/dy
>> dedx - dE/dx
>> m - mask indicator to set zero
>> s - the scale factor
>> size - size of input/output
*/
__global__
void KernelDropoutBackward(DTYPE * dedy, DTYPE * dedx,
DTYPE * m, DTYPE s, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
dedx[i] = dedy[i] * m[i] * s;
}
}
/*
backward computation of dropout function (Cuda version)
dE/dx = dE/dy * dy/dx
>> y - output of the dropout function
>> x - input of the dropout function
>> dedy - dE/dy
>> dedx - dE/dx
>> mask - mask tensor to set 0
>> scaleFactor - the scale factor
*/
void _CudaDropoutBackward(const XTensor * y, const XTensor * x,
const XTensor * dedy, XTensor * dedx,
const XTensor * mask, DTYPE scaleFactor)
{
int gridSize[3], blockSize[3];
if(x->dataType == DEFAULT_DTYPE && y->dataType == DEFAULT_DTYPE){
GDevs.GetCudaThread(x->devID, x->unitNum, gridSize, blockSize);
int devIDBackup;
ProtectCudaDev(x->devID, devIDBackup);
/* dE/ds = dE/dy * dy/ds */
KernelDropoutBackward<<<dim3(gridSize[0]),dim3(blockSize[0])>>>
((DTYPE*)dedy->data, (DTYPE*)dedx->data,
(DTYPE*)mask->data, scaleFactor, x->unitNum);
BacktoCudaDev(x->devID, devIDBackup);
}
else
ShowNTErrors("TODO!");
}
#endif
} // namespace nts(NiuTrans.Tensor) |
19,784 | #include <cuda.h>
#include <assert.h>
#include <stdio.h>
template <int input_per_thread, int filter_per_thread, int input_per_block, int filter_per_block>
__global__ static void _cwc_kern_convolutional_forward_propagate(const int strides, const int border, const int batch,
float* input, const int rows, const int cols, const int channels_per_partition, const int partition,
float* out, const int out_rows, const int out_cols,
float* filter, const int filter_rows, const int filter_cols, const int count,
float* const biases)
{
assert(gridDim.x * partition * filter_per_block * input_per_block == out_cols * batch * count);
assert(gridDim.y == out_rows);
assert(gridDim.z == partition);
extern __shared__ float shared[];
float* shared_block = &shared[0];
float* shared_weights = &shared[input_per_block];
float* shared_bias = &shared[input_per_block + filter_per_block];
float prod[filter_per_thread][input_per_thread];
assert(input_per_block == input_per_thread * blockDim.x);
assert(filter_per_block == filter_per_thread * blockDim.y);
const int thidx = threadIdx.x + threadIdx.y * blockDim.x;
const int thcnt = blockDim.x * blockDim.y;
int c, i, j, x, y;
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
#pragma unroll
for (j = 0; j < input_per_thread; j++)
prod[i][j] = 0;
const int origin_x = blockIdx.x % out_cols;
const int origin_y = blockIdx.y;
const int input_group_count = batch / input_per_block;
const int input_group_idx = (blockIdx.x % (out_cols * input_group_count)) / out_cols;
const int filter_group_idx = blockIdx.z * count / (filter_per_block * partition) + blockIdx.x / (out_cols * input_group_count); // for the partitioned filter group
input += (blockIdx.z * channels_per_partition * rows * cols + origin_y * strides * cols + origin_x * strides) * batch + input_group_idx * input_per_block;
assert(thcnt >= input_per_block);
assert(thcnt >= filter_per_block);
if (thidx < filter_per_block)
shared_bias[thidx] = biases[filter_group_idx * filter_per_block + thidx];
const int start_x = max(origin_x * strides - border, 0) - (origin_x * strides - border);
const int end_x = min(origin_x * strides - border + filter_cols, cols) - (origin_x * strides - border);
const int start_y = max(origin_y * strides - border, 0) - (origin_y * strides - border);
const int end_y = min(origin_y * strides - border + filter_rows, rows) - (origin_y * strides - border);
filter += filter_group_idx * filter_per_block;
for (c = 0; c < channels_per_partition; c++)
{
for (y = start_y; y < end_y; y++)
for (x = start_x; x < end_x; x++)
{
if (thidx < input_per_block)
shared_block[thidx] = input[((y - border) * cols + x - border) * batch + thidx];
if (thidx < filter_per_block)
shared_weights[thidx] = filter[(y * filter_cols + x) * count + thidx];
__syncthreads();
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
#pragma unroll
for (j = 0; j < input_per_thread; j++)
prod[i][j] += shared_block[j + threadIdx.x * input_per_thread] * shared_weights[i + threadIdx.y * filter_per_thread];
__syncthreads();
}
input += rows * cols * batch;
filter += filter_rows * filter_cols * count;
}
const int outcnt = out_rows * out_cols * batch;
out += (filter_group_idx * filter_per_block + threadIdx.y * filter_per_thread) * outcnt + (origin_y * out_cols + origin_x) * batch + input_group_idx * input_per_block + threadIdx.x * input_per_thread;
#pragma unroll
for (i = 0; i < filter_per_thread; i++)
{
const float bias = shared_bias[i + threadIdx.y * filter_per_thread];
#pragma unroll
for (j = 0; j < input_per_thread; j++)
out[j] = max(0.0, prod[i][j] + bias);
out += outcnt;
}
}
int main(int argc, char** argv)
{
float* in = 0;
float* out = 0;
cudaMalloc(&in, sizeof(float) * (55 * 55 * 96 * 256));
cudaMalloc(&out, sizeof(float) * (27 * 27 * 256 * 256));
float* in_host = 0;
float* out_host = 0;
int i, j, c, k;
cudaMallocHost(&in_host, sizeof(float) * 55 * 55 * 96 * 128);
for (i = 0; i < 55; i++)
for (j = 0; j < 55; j++)
for (c = 0; c < 96; c++)
for (k = 0; k < 128; k++)
in_host[i * 55 * 96 * 128 + j * 96 * 128 + c * 128 + k] = c * k;
cudaMemcpy(in, in_host, sizeof(float) * 55 * 55 * 96 * 128, cudaMemcpyHostToDevice);
cudaMallocHost(&out_host, sizeof(float) * 27 * 27 * 256 * 128);
for (i = 0; i < 27; i++)
for (j = 0; j < 27; j++)
for (c = 0; c < 256; c++)
for (k = 0; k < 128; k++)
out_host[i * 27 * 256 * 128 + j * 256 * 128 + c * 128 + k] = c * k;
cudaMemcpy(out, out_host, sizeof(float) * 27 * 27 * 256 * 128, cudaMemcpyHostToDevice);
float* w = 0;
cudaMalloc(&w, sizeof(float) * (256 * 96 / 2) * 5 * 5);
float* biases = 0;
cudaMalloc(&biases, sizeof(float) * 256);
dim3 thread_per_block(64 / 4, 32 / 8);
dim3 num_blocks(27 * 2 * 256 / (32 * 2), 27, 2);
int shared_memory_size = sizeof(float) * (64 + 32 * 2);
cudaFuncSetCacheConfig(_cwc_kern_convolutional_forward_propagate<4, 8, 64, 32>, cudaFuncCachePreferL1);
_cwc_kern_convolutional_forward_propagate
<4, 8, 64, 32>
<<<num_blocks, thread_per_block, shared_memory_size>>>
(2, 1, 128,
in, 55, 55, 96 / 2, 2,
out, 27, 27,
w, 5, 5, 256,
biases);
cudaFree(biases);
cudaFree(w);
cudaFree(out);
cudaFree(in);
cudaFreeHost(out_host);
cudaFreeHost(in_host);
return 0;
}
|
19,785 | __global__ void KNNSearch( float * result, const int * args, const float * pc1, const float * pc2)
{
int cudaNumBlocks = args[0];
int cudaNumThreads = args[1];
int pc1NumPts = args[2];
int pc2NumPts = args[3];
int pc2Idx = blockIdx.x * cudaNumThreads + threadIdx.x;
float currPtX = pc2[pc2NumPts * 0 + pc2Idx];
float currPtY = pc2[pc2NumPts * 1 + pc2Idx];
float currPtZ = pc2[pc2NumPts * 2 + pc2Idx];
if (pc2Idx < pc2NumPts) {
int nnIdx = 0;
float nnDist = 100000.0f;
for (int i = 0; i < pc1NumPts; i++) {
float otherPtX = pc1[pc1NumPts * 0 + i];
float otherPtY = pc1[pc1NumPts * 1 + i];
float otherPtZ = pc1[pc1NumPts * 2 + i];
float checkDist = (currPtX - otherPtX) * (currPtX - otherPtX) +
(currPtY - otherPtY) * (currPtY - otherPtY) +
(currPtZ - otherPtZ) * (currPtZ - otherPtZ);
if (checkDist < nnDist) {
nnDist = checkDist;
nnIdx = i;
}
}
result[pc2NumPts * 0 + pc2Idx] = nnIdx + 1;
result[pc2NumPts * 1 + pc2Idx] = nnDist;
}
} |
19,786 | #include "includes.h"
// Author: Jose F. Martinez Rivera
// Course: ICOM4036 - 040
// Professor: Wilson Rivera Gallego
// Assignment 2 - CUDA Implementation
#define V 8
#define E 11
#define MAX_WEIGHT 1000000
#define TRUE 1
#define FALSE 0
typedef int boolean;
//
//Represents an edge or path between Vertices
typedef struct
{
int u;
int v;
} Edge;
//Represents a Vertex
typedef struct
{
int title;
boolean visited;
} Vertex;
//Finds the weight of the path from vertex u to vertex v
__device__ __host__ int findEdge(Vertex u, Vertex v, Edge *edges, int *weights)
{
int i;
for(i = 0; i < E; i++)
{
if(edges[i].u == u.title && edges[i].v == v.title)
{
return weights[i];
}
}
return MAX_WEIGHT;
}
__global__ void Find_Vertex(Vertex *vertices, Edge *edges, int *weights, int *length, int *updateLength)
{
int u = threadIdx.x;
if(vertices[u].visited == FALSE)
{
vertices[u].visited = TRUE;
int v;
for(v = 0; v < V; v++)
{
//Find the weight of the edge
int weight = findEdge(vertices[u], vertices[v], edges, weights);
//Checks if the weight is a candidate
if(weight < MAX_WEIGHT)
{
//If the weight is shorter than the current weight, replace it
if(updateLength[v] > length[u] + weight)
{
updateLength[v] = length[u] + weight;
}
}
}
}
} |
19,787 | #include<stdio.h>
#include<stdlib.h>
#include<stdbool.h>
#include<string.h>
#include<cuda.h>
#include<time.h>
#include<cuda_runtime_api.h>
#include<device_launch_parameters.h>
#include <device_functions.h>
#define MASK_WIDTH 3 //Here MASK_WIDTH = MASK_HEIGHT = 2*N + 1 where N is half-width of the chosen square mask
#define BLOCK_WIDTH 32
__global__ void convolution_kernel(unsigned char *input_img, unsigned char *output_img, int height, int width)
{
int x,y,row,col,chan,i,j;
unsigned char pixval;
int freq[256];
for(chan=0;chan<3;chan++) //3 Channel Image
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = blockIdx.x*BLOCK_WIDTH + tx;
int col = blockIdx.y*BLOCK_WIDTH + ty;
if(row < height && col < width)
{
for(i=0;i<256;i++)freq[i]=0;
for(x=row-MASK_WIDTH/2;x<=row+MASK_WIDTH/2;x++)
{
for(y=col-MASK_WIDTH/2;y<=col+MASK_WIDTH/2;y++)
{
if((x >= 0) && (x < height) && (y>=0) && (y < width))
{
pixval = input_img[(x*width + y)*3 + chan];
}
else
{
if(x<0 && y<0)
{
pixval = input_img[chan];
}
else if(x<0 && y<width)
{
pixval = input_img[3*y + chan];
}
else if(x<0)
{
pixval = input_img[3*(width-1) + chan];
}
else if(x<height && y<0)
{
pixval = input_img[x*width*3 + chan];
}
else if(x<height && y>width)
{
pixval = input_img[(x*width +width-1)*3 + chan];
}
else if(x>height && y<0)
{
pixval = input_img[width*(height-1)*3 + chan];
}
else if(x>height && y<width)
{
pixval = input_img[(width*(height-1)+y)*3 + chan];
}
else
{
pixval = input_img[(width*(height-1) + (width-1))*3 + chan];
}
}
freq[pixval]++;
}
}
j=0;
for(i=0;i<256;i++)
{
j=j+freq[i];
if(j>((MASK_WIDTH*MASK_WIDTH)/2))break;
}
output_img[(row*width + col)*3 + chan] = i;
}
}
}
unsigned char* readImg(char *filename,int *height_out, int *width_out, int *maxval_out)
{
FILE *fptr;
char buf[16];
int i,j,k;
int height,width,maxval;
unsigned char *input_img;
int index = 0;
fptr = fopen(filename,"rb");
if(!fptr)
{
printf("Unable to open file '%s'\n",filename);
return NULL;
}
if (!fgets(buf, sizeof(buf), fptr))
{
printf("Error reading format\n");
return NULL;
}
int c = getc(fptr);
while(c == '#'){
while(getc(fptr)!='\n');
c = getc(fptr);
}
ungetc(c, fptr);
if(fscanf(fptr,"%d %d",&height,&width) !=2){
printf("ERROR Reading Dimension\n");
return NULL;
}
if(fscanf(fptr,"%d",&maxval)!=1){
printf("ERROR Reading MAXDEPTH\n");
return NULL;
}
while(fgetc(fptr) != '\n');
printf("%d\t%d\t%d\n",height,width,maxval);
int pix = width*height;
input_img = (unsigned char*)(malloc((3*pix)*sizeof(unsigned char)));
if (fread(input_img,sizeof(unsigned char),3*pix, fptr) != 3*pix)
{
printf("Error loading image '%s'\n", filename);
return NULL;
}
*height_out = height;
*width_out = width;
*maxval_out = maxval;
fclose(fptr);
printf("Image read successfully\n");
return input_img;
}
int writeImg(int width, int height, int maxval, unsigned char *output_img)
{
FILE *fptr;
fptr = fopen("output.ppm","wb");
if(!fptr)
{
printf("Error opening file\n");
return 0;
}
fprintf(fptr,"P6\n");
fprintf(fptr, "%d %d\n",height,width);
// rgb component depth
fprintf(fptr, "%d\n",maxval);
int pix = 3*width*height;
// pixel data
int j;
if((j=fwrite(output_img,sizeof(unsigned char),pix, fptr))!=pix)
{
printf("ERROR WRITING %d\n",j);
}
fclose(fptr);
return 1;
}
int main(int argc, char **argv)
{
int height,width,maxval;
unsigned char *input_img,*output_img;
unsigned char *dev_input_img,*dev_output_img;
input_img = readImg(argv[1], &height, &width, &maxval);
//printf("%d %d %d\n",height,width,maxval);
int pix = width*height;
cudaMalloc((void**)&dev_input_img,3*pix*sizeof(unsigned char));
cudaMalloc((void**)&dev_output_img,3*pix*sizeof(unsigned char));
//if(input_img == NULL)printf("WHY\n");
cudaMemcpy(dev_input_img,input_img,3*pix*sizeof(unsigned char),cudaMemcpyHostToDevice);
int BlockX = ceil(((float)height)/BLOCK_WIDTH);
int BlockY = ceil(((float)width)/BLOCK_WIDTH);
dim3 dimGrid(BlockX, BlockY,1);
dim3 dimBlock(BLOCK_WIDTH,BLOCK_WIDTH,1);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
convolution_kernel<<<dimGrid,dimBlock>>>(dev_input_img, dev_output_img, height, width);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float parallel_time = 0;
cudaEventElapsedTime(¶llel_time, start, stop);
cudaDeviceSynchronize();
output_img = (unsigned char*)(malloc(3*pix*sizeof(unsigned char)));
cudaMemcpy(output_img,dev_output_img,3*pix*sizeof(unsigned char),cudaMemcpyDeviceToHost);
int i = writeImg(width,height,maxval,output_img);
if(i==0)
return 1;
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
{
// print the CUDA error message and exit
printf("CUDA error: %s\n", cudaGetErrorString(error));
exit(-1);
}
printf("%f\n",parallel_time);
} |
19,788 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float var_4,float var_5,float var_6,int var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float* var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26,float var_27,float var_28,float var_29,float var_30,float var_31,float var_32,float var_33,float var_34,float var_35,float var_36,float var_37,float var_38,float var_39,float var_40,float var_41,float var_42) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
if (comp < -1.3991E35f + var_3 * +1.4162E9f / var_4) {
if (comp <= -1.1995E-9f - -1.7017E-37f / var_5 / (var_6 + +1.8188E-35f)) {
float tmp_1 = (var_8 / var_9 - +0.0f);
comp += tmp_1 / (+1.9210E35f / var_10 * -1.7824E35f + (-1.4496E-37f * expf((var_11 - fmodf(+1.2371E-44f - (-1.4016E-36f + var_12), (+1.4381E36f / +1.3855E34f * +1.6777E-27f))))));
for (int i=0; i < var_7; ++i) {
var_13[i] = +0.0f * powf((-1.0946E5f * (+1.9144E-43f + var_14 - var_15 - var_16 / var_17)), var_18 - (-1.0446E-35f + var_19 * (+1.2310E-35f / (var_20 / var_21))));
float tmp_2 = +1.2608E-22f;
comp = tmp_2 * var_13[i] + (+1.0021E35f - floorf((+1.6976E36f / var_22 - (var_23 - tanhf(var_24 / (var_25 * (var_26 * -0.0f)))))));
}
if (comp <= (+0.0f * (var_27 - +1.3131E34f))) {
comp += var_28 - +1.9225E36f;
}
if (comp > (var_29 - (-1.7616E-41f * (var_30 - var_31)))) {
comp = var_32 + -1.3167E36f - (-1.4523E1f * var_33 / (var_34 / var_35));
float tmp_3 = +1.6180E-44f / var_36 - (var_37 + var_38 - (-1.8663E-37f + -0.0f));
comp = tmp_3 / (-1.2382E35f * powf((+1.7628E-42f + var_39 / ldexpf(var_40 / coshf(-1.5175E35f / var_41), 2)), var_42 - +0.0f));
}
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
int tmp_8 = atoi(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float* tmp_14 = initPointer( atof(argv[14]) );
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
float tmp_30 = atof(argv[30]);
float tmp_31 = atof(argv[31]);
float tmp_32 = atof(argv[32]);
float tmp_33 = atof(argv[33]);
float tmp_34 = atof(argv[34]);
float tmp_35 = atof(argv[35]);
float tmp_36 = atof(argv[36]);
float tmp_37 = atof(argv[37]);
float tmp_38 = atof(argv[38]);
float tmp_39 = atof(argv[39]);
float tmp_40 = atof(argv[40]);
float tmp_41 = atof(argv[41]);
float tmp_42 = atof(argv[42]);
float tmp_43 = atof(argv[43]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29,tmp_30,tmp_31,tmp_32,tmp_33,tmp_34,tmp_35,tmp_36,tmp_37,tmp_38,tmp_39,tmp_40,tmp_41,tmp_42,tmp_43);
cudaDeviceSynchronize();
return 0;
}
|
19,789 | #include "includes.h"
__global__ void matmul_double(double* A, double* B , double* C, int M, int N, int K)
{
int bx = blockIdx.x ;
int by = blockIdx.y ;
int tx = threadIdx.x ;
int ty = threadIdx.y ;
int row = by * TILE_WIDTH + ty ;
int col = bx * TILE_WIDTH + tx ;
__shared__ double SA[TILE_WIDTH][TILE_WIDTH+1] ;
__shared__ double SB[TILE_WIDTH][TILE_WIDTH+1] ;
double Csub = 0;
for (int i = 0; i < (K-1)/TILE_WIDTH +1 ; ++i)
{
/* code */
if ( (row < M) && (i * TILE_WIDTH + tx < K ) ){
SA[ty][tx] = A[row*K + i * TILE_WIDTH + tx] ;
}
else{
SA[ty][tx] = 0;
}
if ( (col < N ) && ( i * TILE_WIDTH + ty < K)){
SB[tx][ty] = B[ col * K + i*TILE_WIDTH + ty] ;
}
else{
SB[tx][ty] = 0;
}
__syncthreads() ;
for (int k = 0; k < TILE_WIDTH; ++k){
Csub += SA[ty][k]*SB[tx][k] ;
}
__syncthreads() ;
}
//C[row*n + col] = Csub ;
if ( (row < M ) && ( col < N )){
C[ row * N + col] = Csub ;
}
} |
19,790 | #include "includes.h"
__global__ void matrixAddKernel(float* A, float* B, float* C, int n)
{
int Row = blockIdx.y * blockDim.y + threadIdx.y;
int Col = blockIdx.x * blockDim.x + threadIdx.x;
if((Row < n) && (Col < n))
C[Row * n + Col] = A[Row * n + Col] + B[Row * n + Col];
} |
19,791 | #include"stdio.h"
#include<cuda_runtime.h>
#include <sys/time.h>
#define N 1024
// Kernel definition
__global__ void VecAdd(float* A, float* B, float* C)
{
int i = threadIdx.x;
for(int j=0;j<1000;j++)
C[i] = (A[i] * B[i]);
}
long getCurrentTime()
{
struct timeval tv;
gettimeofday(&tv,NULL);
return tv.tv_sec * 1000000 + tv.tv_usec;
}
void cpu_VecAdd(int i,float* A, float* B, float* C)
{
for(int j=0;j<1000;j++)
C[i] = (A[i] * B[i]);
}
int main()
{
// Kernel invocation with N threads
printf("Hello,World\n");
float *A=new float[N],*B=new float[N],*C=new float[N];
for(int i=0;i<N;i++)
{
A[i]=i;
B[i]=2*i;
}
size_t size = N * sizeof(float);
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
float *e=new float[N];
long st=getCurrentTime();
cudaMemcpy(d_A, A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, size, cudaMemcpyHostToDevice);
VecAdd<<<1, N>>>(d_A, d_B, d_C);
cudaMemcpy(C, d_C, size, cudaMemcpyDeviceToHost);
long ed=getCurrentTime();
printf("gpu running time:%ld\n",ed-st);
st=getCurrentTime();
for(int i=0;i<N;i++)
cpu_VecAdd(i,A,B,e);
ed=getCurrentTime();
printf("cpu running time:%ld\n",ed-st);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
for(int i=0;i<N;i++)
{
//printf("%f ",C[i]);
}
printf("\n");
}
|
19,792 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
// 1 block of threads --> 8 values, grid = 1
__global__ void unique_idx_calc_threadIdx(int * input)
{
int tid = threadIdx.x;
printf("threadIdx : %d, value : %d \n", tid, input[tid]);
}
// 4 blocks, each block - 4 threads.
__global__ void unique_gid_calculation(int * input)
{
int tid = threadIdx.x;
int offset = blockIdx.x * blockDim.x;
int gid = tid + offset;
printf("blockIdx.x : %d, threadIdx.x : %d, blockDim.x : %d, gridDim.x: %d, value : %d \n", blockIdx.x, tid, blockDim.x , gridDim.x, input[gid]);
}
int main()
{
int array_size = 16;
int array_bite_size = sizeof(int) * array_size;
int h_data[] = {23, 9, 4, 53, 64, 12, 1, 33, 22, 11, 9, 12, 13, 89, 90, 77};
for (int i=0; i < array_size; i++) {
printf("%d ", h_data[i]);
}
printf ("\n \n");
int * d_data;
cudaMalloc((void **)&d_data, array_bite_size);
cudaMemcpy(d_data, h_data, array_bite_size, cudaMemcpyHostToDevice);
dim3 block(4);
dim3 grid(4);
//unique_idx_calc_threadIdx<<<grid, block>>>(d_data);
unique_gid_calculation<<<grid, block>>>(d_data);
cudaDeviceSynchronize();
cudaDeviceReset();
}
|
19,793 | //
// Created by hina on 2021-03-13.
//
#include "activation.cuh"
__device__ float activation::func_relu(float input_num)
{
return input_num > 0 ? input_num : 0.0f;
}
__device__ float activation::deriv_relu(float input_num)
{
return input_num > 0 ? 1.0f : 0.0f;
} |
19,794 | #include "includes.h"
__global__ void bias_grad(float *pre_grad, float *output, int rows, int cols) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= rows) return;
output[i] = 0;
for (int k = 0; k < cols; k++) {
output[i] += pre_grad[i * cols + k];
}
} |
19,795 | #include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#include <unistd.h>
#define HASH_STEP 720
#define WARP_SIZE 32
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void generate_randoms(float *d_random_r, int numDim, unsigned long seed) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
curandState localState;
curand_init (seed, idx, 0, &localState);
for(int i=0;i<numDim;i++) {
d_random_r[idx*(numDim+1)+i] = curand_uniform(&localState); // ri [0,1]
}
d_random_r[idx*(numDim+1)+numDim] = curand_uniform(&localState)*HASH_STEP; // bi [0,720]
}
__device__ float matrix_multiplication(float *d_data, float *d_random_r, int numDim) {
float sum = 0;
for(int i=0;i<numDim;i++) {
sum += d_data[i]*d_random_r[i];
}
return(sum);
}
__global__ void hash_f(float *d_data, int *d_hash_codes, float *d_random_r, int numDim, unsigned long seed) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
d_hash_codes[idx] = floorf((matrix_multiplication(&d_data[(idx/numDim)*numDim],&d_random_r[idx*(numDim+1)],numDim)
+ d_random_r[idx*(numDim+1)+numDim])/HASH_STEP);
}
__global__ void initial_count(int *d_hash_codes, int *d_offsets, int digit, int numDim, int NTHREADS) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int s_zeros = 0,s_ones = 0;
for(int i=0;i<64;i++) {
if(d_hash_codes[idx*numDim*64+i*numDim+numDim-digit] == 0) {
s_zeros++;
}
else if(d_hash_codes[idx*numDim*64+i*numDim+numDim-digit] == 1){
s_ones++;
}
}
d_offsets[idx*2+2] = s_zeros; // First 2 slots are 0 in the table
d_offsets[idx*2+3] = s_ones; // Same
}
__global__ void lsd_count(int *d_hash_codes, int *d_offsets, int *d_ids, int digit, int numDim, int NTHREADS) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int s_zeros = 0,s_ones = 0;
for(int i=0;i<64;i++) {
if(d_hash_codes[d_ids[idx*64+i]*numDim+numDim-digit] == 0) {
s_zeros++;
}
else if(d_hash_codes[d_ids[idx*64+i]*numDim+numDim-digit] == 1){
s_ones++;
}
}
d_offsets[idx*2+2] = s_zeros; // First 2 slots are 0 in the table
d_offsets[idx*2+3] = s_ones; // Same
}
__global__ void initialize_sorting(int *d_hash_codes, int *d_offsets, int *d_ids_1, int digit, int numDim, int NTHREADS) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int offset_zero = d_offsets[idx*2];
int offset_one = d_offsets[idx*2+1]+d_offsets[2*NTHREADS]; // d_offsets[2*NTHREADS] holds the total number of zeros
for(int i=0;i<64;i++) {
if(d_hash_codes[idx*numDim*64+i*numDim+numDim-digit] == 0) {
d_ids_1[offset_zero] = idx*64+i;
offset_zero++;
}
else if(d_hash_codes[idx*numDim*64+i*numDim+numDim-digit] == 1){
d_ids_1[offset_one] = idx*64+i;
offset_one++;
}
}
// At this point offset_zero + offset_one = 64
}
__global__ void radix_sort(int *d_hash_codes, int *d_offsets, int *d_ids_1, int *d_ids_2, int digit, int numDim, int NTHREADS) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
int offset_zero = d_offsets[idx*2];
int offset_one = d_offsets[idx*2+1]+d_offsets[2*NTHREADS]; // d_offsets[2*NTHREADS] holds the total number of zeros
for(int i=0;i<64;i++) {
if(d_hash_codes[d_ids_1[idx*64+i]*numDim+numDim-digit] == 0) {
d_ids_2[offset_zero] = d_ids_1[idx*64+i];
offset_zero++;
}
else if(d_hash_codes[d_ids_1[idx*64+i]*numDim+numDim-digit] == 1){
d_ids_2[offset_one] = d_ids_1[idx*64+i];
offset_one++;
}
}
// At this point offset_zero + offset_one = 64
}
__global__ void write_sorted_hash_codes(int *d_hash_codes, int *d_sorted_hash_codes, int *d_ids, int numDim) {
int idx = threadIdx.x + blockIdx.x*blockDim.x;
for(int i=0;i<64;i++) {
for(int j=0;j<numDim;j++) {
d_sorted_hash_codes[idx*64*numDim+i*numDim+j] = d_hash_codes[d_ids[idx*64+i]*numDim+j];
}
}
}
void lsh(int numObjects, int numDim, float *h_data) {
/* Generate hash codes */
int NTHREADS = ceil((float)numObjects*numDim/(float)(64*WARP_SIZE)); //Concurrent threads per loop
int BLOCK_SIZE = 256;
int GRID_SIZE = ceil((float)NTHREADS/(float)BLOCK_SIZE);
dim3 grid(GRID_SIZE,1);
dim3 threads(BLOCK_SIZE,1);
printf("NTHREADS:%d GRID_SIZE:%d BLOCK_SIZE:%d\n", NTHREADS,GRID_SIZE,BLOCK_SIZE);
float *d_random_r;
cudaMalloc((void**)&d_random_r,NTHREADS*(numDim+1)*sizeof(float)); // numDim+1 because ri has numDim values and bi has 1
float *d_data;
cudaMalloc((void**)&d_data,numObjects*numDim*sizeof(float));
cudaMemcpy(d_data,h_data,numObjects*numDim*sizeof(float),cudaMemcpyHostToDevice);
int *d_hash_codes;
cudaMalloc((void**)&d_hash_codes, numObjects*numDim*sizeof(int));
for(int i=0;i<64*WARP_SIZE;i++) {
generate_randoms<<<grid,threads>>>(d_random_r,numDim,time(NULL));
hash_f<<<grid,threads>>>(&d_data[i*NTHREADS],&d_hash_codes[i*NTHREADS],d_random_r,numDim,time(NULL));
}
cudaFree(d_random_r);
cudaFree(d_data);
int *h_hash_codes;
h_hash_codes = (int *)malloc(numObjects*numDim*sizeof(int));
cudaMemcpy(h_hash_codes,d_hash_codes,numObjects*numDim*sizeof(int),cudaMemcpyDeviceToHost);
/* Sort hash codes */
NTHREADS = ceil((float)numObjects/(float)64); //Concurrent threads per loop
BLOCK_SIZE = 256;
GRID_SIZE = ceil((float)NTHREADS/(float)BLOCK_SIZE);
dim3 grid_2(GRID_SIZE,1);
dim3 threads_2(BLOCK_SIZE,1);
printf("NTHREADS:%d GRID_SIZE:%d BLOCK_SIZE:%d\n", NTHREADS,GRID_SIZE,BLOCK_SIZE);
int *d_ids_1;
cudaMalloc((void**)&d_ids_1,numObjects*sizeof(int));
int *d_ids_2;
cudaMalloc((void**)&d_ids_2,numObjects*sizeof(int));
int *d_offsets;
cudaMalloc((void**)&d_offsets,(2*NTHREADS+2)*sizeof(int));
int *d_sorted_hash_codes;
cudaMalloc((void**)&d_sorted_hash_codes, numObjects*numDim*sizeof(int));
initial_count<<<grid_2,threads_2>>>(d_hash_codes,d_offsets,1,numDim,NTHREADS);
cudaDeviceSynchronize();
int *h_offsets;
h_offsets = (int *)malloc((2*NTHREADS+2)*sizeof(int));
cudaMemcpy(h_offsets,d_offsets,(2*NTHREADS+2)*sizeof(int),cudaMemcpyDeviceToHost);
//Calculate offsets serially
h_offsets[0] = 0;
h_offsets[1] = 0;
for(int j=2;j<(2*NTHREADS+2);j++) {
h_offsets[j] += h_offsets[j-2];
}
cudaMemcpy(d_offsets,h_offsets,(2*NTHREADS+2)*sizeof(int),cudaMemcpyHostToDevice);
initialize_sorting<<<grid_2,threads_2>>>(d_hash_codes,d_offsets,d_ids_1,1,numDim,NTHREADS);
for(int i=1;i<numDim;i++) {
if (i%2!=0) {
lsd_count<<<grid_2,threads_2>>>(d_hash_codes,d_offsets,d_ids_1,(i+1),numDim,NTHREADS);
cudaMemcpy(h_offsets,d_offsets,(2*NTHREADS+2)*sizeof(int),cudaMemcpyDeviceToHost);
//Calculate offsets serially
h_offsets[0] = 0;
h_offsets[1] = 0;
for(int j=2;j<(2*NTHREADS+2);j++) {
h_offsets[j] += h_offsets[j-2];
}
cudaMemcpy(d_offsets,h_offsets,(2*NTHREADS+2)*sizeof(int),cudaMemcpyHostToDevice);
radix_sort<<<grid_2, threads_2>>>(d_hash_codes,d_offsets,d_ids_1,d_ids_2,(i+1),numDim,NTHREADS);
}
else {
lsd_count<<<grid_2,threads_2>>>(d_hash_codes,d_offsets,d_ids_2,(i+1),numDim,NTHREADS);
cudaMemcpy(h_offsets,d_offsets,(2*NTHREADS+2)*sizeof(int),cudaMemcpyDeviceToHost);
//Calculate offsets serially
h_offsets[0] = 0;
h_offsets[1] = 0;
for(int j=2;j<(2*NTHREADS+2);j++) {
h_offsets[j] += h_offsets[j-2];
}
cudaMemcpy(d_offsets,h_offsets,(2*NTHREADS+2)*sizeof(int),cudaMemcpyHostToDevice);
radix_sort<<<grid_2, threads_2>>>(d_hash_codes,d_offsets,d_ids_2,d_ids_1,(i+1),numDim,NTHREADS);
}
}
write_sorted_hash_codes<<<grid_2, threads_2>>>(d_hash_codes,d_sorted_hash_codes,d_ids_2,numDim);
int *h_sorted_hash_codes;
h_sorted_hash_codes = (int *)malloc(numObjects*numDim*sizeof(int));
cudaMemcpy(h_sorted_hash_codes,d_sorted_hash_codes,numObjects*numDim*sizeof(int),cudaMemcpyDeviceToHost);
}
int main(int argc, char** argv) {
int numObjects = atoi(argv[1]);
int numDim = atoi(argv[2]);
FILE *dataset;
float *h_data;
h_data = (float *)malloc(numObjects*numDim*sizeof(float));
dataset = fopen("/export/home/dhmtasos/Ergasia4/data.bin", "rb");
if(dataset == NULL) {
printf("Error opening data.bin\n");
}
size_t a = fread(h_data, sizeof(float), numObjects*numDim, dataset);
if(a!=numObjects*numDim) {
printf("Error reading data from data.bin\n");
}
fclose(dataset);
struct timeval first, second, lapsed;
struct timezone tzp;
gettimeofday(&first, &tzp);
lsh(numObjects,numDim,h_data);
gettimeofday(&second, &tzp);
if(first.tv_usec>second.tv_usec){
second.tv_usec += 1000000;
second.tv_sec--;
}
lapsed.tv_usec = second.tv_usec - first.tv_usec;
lapsed.tv_sec = second.tv_sec - first.tv_sec;
printf("Time elapsed: %d, %d s\n", lapsed.tv_sec, lapsed.tv_usec);
} |
19,796 | __host__ __device__
int get_raw(int idx, int K_k) {
return idx / K_k;
}
__host__ __device__
int get_col(int idx, int K_k) {
return idx % K_k;
}
__host__ __device__
void get_mul(double* res, double* a, double* b, int idx, int K_m, int K_n, int K_k ) {
int m = get_raw(idx, K_k);
int k = get_col(idx, K_k);
for (int n = 0; n < K_n; n++)
res[m * K_k + k] += a[idx] * b[n * K_k + k];
}
__global__
void mmul(double* a, double* b, double* res, int K_m, int K_n, int K_k) {
int id = blockDim.x * blockIdx.x + threadIdx.x;
if (id < K_m * K_k)
get_mul(res, a, b, id, K_m, K_n, K_k);
} |
19,797 | #include <stdio.h>
#include <cuda.h>
#define n 10
__global__ void add(int*a, int*max)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n)
{
for(i=0;i<n;i++)
{
if(a[i]>*max)
*max=a[i];
}
}
}
int main()
{
int a[n];
int i;
int max;
int* dev_a;
int* dev_max;
cudaMalloc((void**)&dev_max, sizeof(int));
cudaMalloc((void**)&dev_a, n * sizeof(int));
printf("\narray elements:\n");
for(i=0;i<n;i++)
{
scanf("%d",&a[i]);
}
max = a[0];
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_max, &max, sizeof(int), cudaMemcpyHostToDevice);
add<<<1,1>>>(dev_a, dev_max);
cudaMemcpy(&max, dev_max, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nMax is %d\n",max);
cudaFree(dev_max);
cudaFree(dev_a);
cudaDeviceReset();
return 0;
}
|
19,798 | #include<bits/stdc++.h>
int main (void) {
printf("Hello World!\n");
return 0;
}
|
19,799 | #include "includes.h"
__global__ void bcnn_op_cuda_tanh_grad_kernel(int n, float *x, float *dx)
{
int i = (blockIdx.x + blockIdx.y * gridDim.x) * blockDim.x + threadIdx.x;
if (i < n) {
dx[i] *= (1 - x[i] * x[i]);
}
return;
} |
19,800 | // Copyright (c) 2012-2017 VideoStitch SAS
// Copyright (c) 2018 stitchEm
#include <cstdio>
int main() {
int devices;
cudaError_t err = cudaGetDeviceCount(&devices);
if (err != cudaSuccess) {
fprintf(stderr, "cudaGetDeviceCount failed: %s\n", cudaGetErrorString(err));
return 1;
}
if (devices == 0) {
fprintf(stderr, "No CUDA device found!\n");
return 1;
}
cudaDeviceProp props_1;
cudaDeviceProp props_2;
for (int i = 0; i < devices; ++i) {
props_2 = props_1;
err = cudaGetDeviceProperties(&props_1, i);
if (err != cudaSuccess) {
fprintf(stderr, "cudaGetDeviceProperties failed: %s\n", cudaGetErrorString(err));
return 1;
}
if (i > 0 && (props_1.major != props_2.major || props_1.minor != props_2.minor)) {
fprintf(stderr, "Multiple CUDA arch not supported at the moment\n");
return 1;
}
}
printf("CUDA compute capability: %d%d", props_1.major, props_1.minor);
// Easiest cross-platform way to pass the number seems to be the exit code
// nvcc --run on windows seems to print the filename through cl.exe (that can't be silenced)
return props_1.major * 10 + props_1.minor;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.