serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
9,501 | #include <stdio.h>
int main()
{
int in[3] = {1,2,3};
int out[3] = {0,};
int* Gmem;
cudaMalloc((void**)&Gmem,3*sizeof(int));
//int -> Gmem
cudaMemcpy(Gmem, in, 3 * sizeof(int), cudaMemcpyHostToDevice );
//Gmem -> out
cudaMemcpy(out, Gmem, 3 * sizeof(int) ,cudaMemcpyDeviceToHost );
for(int i=0;i<3;i++)
printf("%d\n",out[i]);
cudaFree(Gmem);
return 0;
}
|
9,502 | #include "sum-reduction.cuh"
#define THREADS_PER_BLOCK_SUM_REDUCTION 512U
template <int blockSize>
__global__ void cuSumReduce__(int *list, int n)
{
__shared__ int s_data[blockSize];
int tid = threadIdx.x;
int i = threadIdx.x;
int gridSize = blockSize * 2;
int mySum = 0;
while (i < n)
{
mySum += list[i];
if (i + blockSize < n)
{
mySum += list[i + blockSize];
}
i += gridSize;
}
s_data[tid] = mySum;
__syncthreads();
if (blockSize >= 512)
{
if (tid < 256)
{
s_data[tid] = mySum = mySum + s_data[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256)
{
if (tid < 128)
{
s_data[tid] = mySum = mySum + s_data[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128)
{
if (tid < 64)
{
s_data[tid] = mySum = mySum + s_data[tid + 64];
}
__syncthreads();
}
if (tid < 32)
{
volatile int *smem = s_data;
if (blockSize >= 64)
{
smem[tid] = mySum = mySum + smem[tid + 32];
}
if (blockSize >= 32)
{
smem[tid] = mySum = mySum + smem[tid + 16];
}
if (blockSize >= 16)
{
smem[tid] = mySum = mySum + smem[tid + 8];
}
if (blockSize >= 8)
{
smem[tid] = mySum = mySum + smem[tid + 4];
}
if (blockSize >= 4)
{
smem[tid] = mySum = mySum + smem[tid + 2];
}
if (blockSize >= 2)
{
smem[tid] = mySum = mySum + smem[tid + 1];
}
}
if (threadIdx.x == 0)
{
list[0] = s_data[0];
}
}
int nextPowerOf2__(int x)
{
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
void getThreadAndBlockCountFoSumReduction(int n, int &blocks, int &threads)
{
threads = max(1, nextPowerOf2__(((n - 1) >> 1))), //threads must be power of 2
threads = min(threads, THREADS_PER_BLOCK_SUM_REDUCTION);
blocks = 1;
// printf("block = %d, threads = %d, n = %d, p =%d\n", blocks, threads, n, p );
}
void sum_reduce(int *d_list, int n)
{
int threads, blocks;
getThreadAndBlockCountFoSumReduction(n, blocks, threads);
int smemSize = (threads <= 32) ? 2 * threads * sizeof(float) : threads * sizeof(float);
switch (threads)
{
case 512:
cuSumReduce__<512> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 256:
cuSumReduce__<256> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 128:
cuSumReduce__<128> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 64:
cuSumReduce__<64> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 32:
cuSumReduce__<32> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 16:
cuSumReduce__<16> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 8:
cuSumReduce__<8> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 4:
cuSumReduce__<4> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 2:
cuSumReduce__<2> <<< blocks, threads, smemSize >>>(d_list, n); break;
case 1:
cuSumReduce__<1> <<< blocks, threads, smemSize >>>(d_list, n); break;
}
}
|
9,503 | #include<iostream>
using namespace std;
__global__ void kernel() {
}
int main(){
kernel<<<1,1>>>();
cout<<"Hello, World"<<endl;
return 0;
}
|
9,504 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <iostream>
//
////this kernel is example for unefficent memory access.
////this implementation considers only 1D grid. and assumes that mask width is odd number
//__global__ void convolution_1d_dram(float * input, float* mask, float* output, int array_lenght, int mask_width)
//{
// int thread_index = blockIdx.x*blockDim.x + threadIdx.x;
// float temp_value = 0;
//
// ////we can assigne thread_index-mask_width/2 to automatic variable to save memory bandwidth as shown in following code segment
// int offset = thread_index - mask_width / 2;
// for (int i = 0; i < mask_width; i++)
// {
// if ((offset + i) >= 0 && (offset + i) < array_lenght)
// {
// temp_value += input[offset + i] * mask[i];
// }
// }
//
// /*for (int i = 0; i < mask_width; i++)
// {
// if ((thread_index+i-mask_width/2)>= 0 && (thread_index + i- mask_width / 2) < array_lenght)
// {
// temp_value += input[thread_index + i - mask_width / 2] * mask[i];
// }
// }
// */
// output[thread_index] = temp_value;
// //printf("%d thread value %f \n", thread_index, temp_value);
//}
//
//void print_array(float* my_array, int size)
//{
// for (int i = 0; i < size; i++)
// {
// if (i % 32 == 0)
// {
// printf("\n");
// }
// printf("%.2f,",my_array[i]);
// }
//
// printf("\n");
//}
//
//void run_code_convolution_1()
//{
// int array_lenght = 128 * 2;
// int mask_width = 32;
//
// int array_byte_size = sizeof(float)*array_lenght;
// int mask_byte_size = sizeof(float)*mask_width;
//
// float *h_input_array, *h_mask, *h_output;
// float *d_input_array, *d_mask, *d_output;
//
// //host memory allocation
// h_input_array = (float*)malloc(array_byte_size);
// h_output = (float*)malloc(array_byte_size);
// h_mask = (float*)malloc(mask_byte_size);
//
// //initialize array
// for (int i = 0; i < array_lenght; i++)
// {
// h_input_array[i] = 1.0f;
// }
//
// //initialize mask
// for (int i = 0; i < mask_width; i++)
// {
// h_mask[i] = 1.0f;
// }
//
// dim3 grid(32);
// dim3 block((array_lenght) / grid.x);
//
// //device memory allocation
// cudaMalloc((float**)&d_input_array, array_byte_size);
// cudaMalloc((float**)&d_mask, mask_byte_size);
// cudaMalloc((float**)&d_output, array_byte_size);
//
// //transfer the initiazed arrays to device
// cudaMemcpy(d_input_array, h_input_array, array_byte_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_mask, h_mask, mask_byte_size, cudaMemcpyHostToDevice);
//
// //kernel launch
// convolution_1d_dram << <grid, block >> > (d_input_array, d_mask, d_output, array_lenght, mask_width);
// //test_kernel << <grid,block >>> (d_input_array);
// cudaDeviceSynchronize();
//
// //copy the output back to the host
// cudaMemcpy(h_output, d_output, array_byte_size, cudaMemcpyDeviceToHost);
//
// //print the arrays
// //printf("Printing input :\n ");
// //print_array(h_input_array,array_lenght);
//
// //printf("Printing mask :\n ");
// //print_array(h_mask, mask_width);
//
// /*printf("Printing output :\n ");
// print_array(h_output, array_lenght);*/
//
// //free the device memory
// cudaFree(d_input_array);
// cudaFree(d_output);
// cudaFree(d_mask);
//
// //free the host memory
// free(h_input_array);
// free(h_output);
// free(h_mask);
//}
//
////int main()
////{
//// run_code_convolution_1();
//// system("pause");
//// return 0;
////} |
9,505 | //**********************************************************************
// *
// University Of North Carolina Charlotte *
// *
//Program: Convolution *
//Description: This program is to do convolution calculation *
// - CUDA *
// - Direct convolution with global memory *
// *
//File Name: naivecon.c , naiveconv_kernel.cl *
//File Version: 1.0 *
//Baseline: Homework_2 *
// *
//Course: ECGR 6090 Heterogeneous Computing *
// *
//Programmed by: Yu Liu *
//Under Suppervision of: Dr. Hamed Tabkhi *
// *
//Input file: images/viptraffic0.ppm ... images/viptraffic119.ppm *
//Output file: none *
//**********************************************************************
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <time.h>
#define BLOCKSIZE 256
#define HEIGHT 160
#define WIDTH 120
#define FLTSIZE 9 //filter size
#define PADDING 0
#define STRIDE 1
//**********************************************************************
// Function Name: convolution (Kernel) *
// Description: - Execute direct(naive) convolution *
// - CUDA_global memory *
// Input file: none *
// Output file: none *
// Return: none *
//**********************************************************************
__global__ void convolution(unsigned char *image_d, unsigned char *output_d, float* filter, int convWidth, int convHeight, int filerSize)
{
int i, j, col, row;
int r,g,b;
col = blockIdx.x * blockDim.x + threadIdx.x; //image width *3
row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < (WIDTH - PADDING + 1)*3 && row < (HEIGHT - PADDING + 1))
{
r = 0;
g = 0;
b = 0;
for (i = 0; i < filerSize; i++)
{
for (j = 0; j < filerSize; j++)
{
r += filter[i*filerSize + j] * image_d[3 * (row + i)*WIDTH + col + j]; //R
g += filter[i*filerSize + j] * image_d[3 * (row + i)*WIDTH + col + j + 1];//G
b += filter[i*filerSize + j] * image_d[3 * (row + i)*WIDTH + col + j + 2]; //B
}
}
output_d[row * convWidth + col] = r;
output_d[row * convWidth + col + 1] = g;
output_d[row * convWidth + col + 2] = b;
}
}
//**********************************************************************
// Function Name: decode_image *
// Description: - read image in ppm formate, read the data of array *
// named frame[] *
// Input file: image file : viptrafficX.ppm *
// Output file: none *
// Return: 0 if success *
//**********************************************************************
int decode_image(unsigned char frame[HEIGHT * WIDTH * 3], char filename[])
{
FILE *pFile;
pFile = fopen(filename, "r");
fseek(pFile, 15L, SEEK_SET);//In ppm file, the first 15 bytes are content of "p6,120 160, 255", image data is from 16th bytes
fread(frame, sizeof(unsigned char), HEIGHT * WIDTH * 3 + 15, pFile);
fclose(pFile);
return 0;
}
//**********************************************************************
// Function Name:randomInit *
// Description: - Generate random value to an float array *
// *
// Input file: none *
// Output file: none *
// Return: kernel file size *
//**********************************************************************
int randomInit(float* data, int size, int range) // random form 0/255 to 255/255
{
int i;
srand(time(NULL));
for (i = 0; i < size; i++)
{
data[i] = rand() % range / (float)range;
}
//for (i = 0; i < size; i++) printf("%f;", data[i]); // for debugging
return 0;
}
//**********************************************************************
// Function Name:Main *
// Description: - Main function on host, configure the kernel parameter*
// and run kernel *
// Input file: none *
// Output file: none *
// Return: 0 if success *
//**********************************************************************
int main(void)
{
int fltsz = FLTSIZE;
int convWidth = (WIDTH - FLTSIZE + 2 * PADDING) / STRIDE + 1; //convolution width with padding
int convHeight = (HEIGHT - FLTSIZE + 2 * PADDING) / STRIDE + 1; //convolution width with padding
int imagecount = 0; //counter for 120 images
unsigned char *image_d, *output_d;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float kernelExecTimeNs = 0;
float timer;
float* filter = (float*)malloc(FLTSIZE*FLTSIZE * sizeof(float));
unsigned char* image = (unsigned char*)malloc(HEIGHT * WIDTH * sizeof(unsigned char) * 3);
unsigned char* output = (unsigned char*)malloc(convHeight * convWidth * 3 * sizeof(unsigned char));
randomInit(filter, FLTSIZE*FLTSIZE, 255); //initialize filter
cudaMalloc((void**)&image_d, HEIGHT*WIDTH * sizeof(unsigned char) * 3);
cudaMalloc((void**)&output_d, convHeight * convWidth * 3 * sizeof(unsigned char));
while (imagecount < 120)
{
char filename[50];//file length upto 50
sprintf(filename, "images/viptraffic%d.ppm", imagecount);//read viptrafficX.ppm
decode_image(image, filename); //get image data from file
imagecount++;
//Copy from host to device
cudaMemcpy(image_d, image, HEIGHT*WIDTH * sizeof(unsigned char) * 3, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCKSIZE, BLOCKSIZE);
dim3 dimGrid((WIDTH*3 + BLOCKSIZE - 1) / BLOCKSIZE, (HEIGHT + BLOCKSIZE - 1) / BLOCKSIZE);
cudaEventRecord(start, 0);
convolution <<<dimGrid, dimBlock >>> (image_d, output_d, filter, convWidth, convHeight, fltsz);//Block-thread
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
//Copy from device to host
cudaMemcpy(output, output_d, convHeight * convWidth * 3 * sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&timer, start, stop);
kernelExecTimeNs += timer;
}
//Free memory allocation
cudaFree(output_d);
cudaFree(image_d);
free(output);
free(image);
printf("Cumputing done! Golbal memory applied in CUDA.\n");
printf("Image amount:%d; Image size:%d x %d; Padding:%d; Stride:%d; Filter Size:%d.\n", imagecount, WIDTH, HEIGHT, PADDING, STRIDE, FLTSIZE);
printf("Kernel Execution time: %f milli seconds\n", kernelExecTimeNs);
//system("pause");
return EXIT_SUCCESS;
}
|
9,506 | /*
Array with backwards dependencies. Order Violation. Data Race in line 37. Inter Region Data Race.
*/
#include <stdio.h>
// Macro for checking errors in CUDA API calls
#define cudaErrorCheck(call) \
do{ \
cudaError_t cuErr = call; \
if(cudaSuccess != cuErr){ \
printf("CUDA Error - %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(cuErr));\
exit(0); \
} \
}while(0)
// Grid dimension
#define B 100
// Iterations per block
#define T 512
// Host pointer
int *countervar;
// Initialization
int init(){
for(int i=0; i<B*T; i++){
countervar[i]=0;
}
return 0;
}
// Kernel
__global__ void count(int *countervar){
for(int i = blockIdx.x * T; i < blockIdx.x * T + T; i++){
if(i!=0){
countervar[i] = countervar[i-1] + 1;
}
}
}
// Verifying result
int check(){
bool test = false;
for(int i=0; i<B*T; i++){
if(countervar[i]!=i){
test = true;
}
}
printf("Memory Access Issue visible: %s\n",test ? "true" : "false");
return 0;
}
// Main program
int main(){
// Device pointer for counter variable
int *d_count;
// Allocation of host counter variable
countervar = (int *) malloc(B*T*sizeof(int));
// Initialization of the counter variable
init();
// Allocation of GPU memory
cudaErrorCheck( cudaMalloc(&d_count, B*T*sizeof(int)));
// Copying the counter variable from the host to the device
cudaErrorCheck( cudaMemcpy(d_count,countervar,B*T*sizeof(int),cudaMemcpyHostToDevice));
//Launch Kernel
count<<<B,1>>>(d_count);
// Check for errors in kernel launch (e.g. invalid execution configuration paramters)
cudaErrorCheck( cudaGetLastError());
// Check for errors on the GPU after control is returned to CPU
cudaErrorCheck( cudaDeviceSynchronize());
// Copying the counter variable from the device to the host
cudaErrorCheck( cudaMemcpy(countervar,d_count,B*T*sizeof(int),cudaMemcpyDeviceToHost));
// Verifying result
check();
// Freeing GPU memory
cudaErrorCheck( cudaFree(d_count));
// Freeing CPU memory
free(countervar);
return 0;
}
|
9,507 | #include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include "mesh.cuh"
#include "matrix_functions.cuh"
#include "sparse_struc.cuh"
#include "minmax.cuh"
//--------------------------------------------PROTOTIPES--------------------------
void ih_jh_sh_compute(int *ih, int *jh, double *sh, struct mesh *mesh);
void hs_compute(struct sparse *h, struct mesh *mesh, double *hs);
//--------------------------------------------BODIES------------------------------
void prepare_filter(int **ih, int **jh, double **sh, struct mesh *mesh, double **hs, struct sparse *h) {
int size =(int)mesh->nelx*mesh->nely * (int)pow((2 * (mesh->rmin - 1) + 1), 2);
(*ih) = (int*)malloc(size * sizeof(int));
(*jh) = (int*)malloc(size * sizeof(int));
(*sh) = (double*)malloc(size * sizeof(double));
ih_jh_sh_compute((*ih), (*jh), (*sh), mesh);
prepare_sparse((*ih), (*jh), (*sh), h, size);
(*hs) = (double*)malloc((max_value(h->row,h->nnz)* sizeof(double)));
hs_compute(h, mesh, (*hs));
}
void ih_jh_sh_compute(int *ih, int *jh, double *sh, struct mesh *mesh) {
int size, e1, e2, k, i2_max, i2_min, j2_max, j2_min;
size = (int) mesh->nelx*mesh->nely * (int)pow((2 * (mesh->rmin - 1) + 1), 2);
for (int i = 0; i < size; i++)
ih[i] = 1;
for (int i = 0; i < size; i++)
jh[i] = 1;
for (int i = 0; i < size; i++)
sh[i] = 0;
k = 0;
for (int i1 = 0; i1 < mesh->nelx; i1++) {
for (int j1 = 0; j1 < mesh->nely; j1++) {
e1 = i1*mesh->nely + (j1 + 1);
i2_min = (int)maxVal((i1 + 1) - (mesh->rmin - 1), 1);
i2_max = (int)minVal((i1 + 1) + (mesh->rmin - 1), mesh->nelx);
j2_min = (int)maxVal((j1 + 1) - (mesh->rmin - 1), 1);
j2_max = (int)minVal((j1 + 1) + (mesh->rmin - 1), mesh->nely);
for (int i2 = (i2_min - 1);i2 < i2_max;i2++) {
for (int j2 = (j2_min - 1);j2 < j2_max; j2++) {
e2 = i2*mesh->nely + (j2 + 1);
ih[k] = e1;
jh[k] = e2;
sh[k] = maxVal(0, mesh->rmin - sqrt(pow(i1 - i2, 2) + pow(j1 - j2, 2)));
k++;
}
}
}
}
}
void hs_compute(struct sparse *h, struct mesh *mesh, double *hs) {
int k=0, size = h->nnz;
double tmp_sum = 0;
tmp_sum = h->val[0];
for (int i = 1; i <= size;i++) {
if (h->row[i] == h->row[i-1])
tmp_sum += h->val[i];
else {
hs[k] = tmp_sum;
tmp_sum = h->val[i];
k++;
}
}
}
|
9,508 | /*
* Cluster and Multicore Programming
* Department of Informatics
* Faculty of Sciences
* University of Lisbon
* November 30, 2019
* João David n49448
*/
#include <assert.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#define GRAPH_SIZE 2000
#define EDGE_COST(graph, graph_size, a, b) graph[a * graph_size + b]
#define D(a, b) EDGE_COST(output, graph_size, a, b)
#define INF 0x1fffffff
void generate_random_graph(int *output, int graph_size) {
int i, j;
srand(0xdadadada);
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (i == j) {
D(i, j) = 0;
} else {
int r;
r = rand() % 40;
if (r > 20) {
r = INF;
}
D(i, j) = r;
}
}
}
}
int calculate_blockSideLen() {
if(GRAPH_SIZE % 16 == 0){
return 16;
} else if (GRAPH_SIZE % 8 == 0){
return 8;
} else {
return 16;
}
}
__global__ void gpu_compute_floyd_warshall(int k, int graph_size, int *output, int blockSideLen) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if(i < GRAPH_SIZE && j < GRAPH_SIZE){
extern __shared__ int shared[];
int* frozenZoneHoriz = &shared[0];
int* frozenZoneVert = &shared[blockSideLen];
if(threadIdx.y == 0){
frozenZoneHoriz[threadIdx.x] = D(i, k);
}
if(threadIdx.x == 0){
frozenZoneVert[threadIdx.y] = D(k, j);
}
__syncthreads();
if (frozenZoneHoriz[threadIdx.x] + frozenZoneVert[threadIdx.y] < D(i, j)) {
D(i, j) = frozenZoneHoriz[threadIdx.x] + frozenZoneVert[threadIdx.y];
}
}
}
void floyd_warshall_gpu(const int *graph, int graph_size, int *output) {
int blockSideLen = calculate_blockSideLen();
int gridSideLen = ceil((double)GRAPH_SIZE / (double)blockSideLen);
dim3 threadsPerBlock(blockSideLen, blockSideLen);
dim3 numBlocks(gridSideLen, gridSideLen);
int *dev;
int size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE;
cudaMalloc(&dev, size);
cudaMemcpy(dev, graph, size, cudaMemcpyHostToDevice);
for (int k = 0; k < GRAPH_SIZE; k++) {
gpu_compute_floyd_warshall<<<numBlocks, threadsPerBlock, sizeof(int) * blockSideLen * 2>>>(k, GRAPH_SIZE, dev, blockSideLen);
}
cudaMemcpy(output, dev, size, cudaMemcpyDeviceToHost);
cudaFree(dev);
}
void floyd_warshall_cpu(const int *graph, int graph_size, int *output) {
int i, j, k;
memcpy(output, graph, sizeof(int) * graph_size * graph_size);
for (k = 0; k < graph_size; k++) {
for (i = 0; i < graph_size; i++) {
for (j = 0; j < graph_size; j++) {
if (D(i, k) + D(k, j) < D(i, j)) {
D(i, j) = D(i, k) + D(k, j);
}
}
}
}
}
int main(int argc, char **argv) {
#define TIMER_START() gettimeofday(&tv1, NULL)
#define TIMER_STOP() \
gettimeofday(&tv2, NULL); \
timersub(&tv2, &tv1, &tv); \
time_delta = (float)tv.tv_sec + tv.tv_usec / 1000000.0
struct timeval tv1, tv2, tv;
float time_delta;
int *graph, *output_cpu, *output_gpu;
int size;
size = sizeof(int) * GRAPH_SIZE * GRAPH_SIZE;
graph = (int *)malloc(size);
assert(graph);
output_cpu = (int *)malloc(size);
assert(output_cpu);
memset(output_cpu, 0, size);
output_gpu = (int *)malloc(size);
assert(output_gpu);
generate_random_graph(graph, GRAPH_SIZE);
fprintf(stderr, "running on cpu...\n");
TIMER_START();
floyd_warshall_cpu(graph, GRAPH_SIZE, output_cpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
fprintf(stderr, "running on gpu...\n");
TIMER_START();
floyd_warshall_gpu(graph, GRAPH_SIZE, output_gpu);
TIMER_STOP();
fprintf(stderr, "%f secs\n", time_delta);
if (memcmp(output_cpu, output_gpu, size) != 0) {
fprintf(stderr, "FAIL!\n");
} else {
printf("OK\n");
}
return 0;
}
|
9,509 |
__device__ double cuda_atomicAdd(double *address, double val)
{
double assumed,old=*address;
do {
assumed=old;
old=
__longlong_as_double
(
atomicCAS(
(unsigned long long int*)
address,
__double_as_longlong(assumed),
__double_as_longlong(val+assumed)));
}while (assumed!=old);
//printf("NEW ATOMIC ADD\n");
return old;
}
|
9,510 | /*
* Example program to demonstrate how to use the kernel tuner to tune
* parameters in the host code of GPU programs, such as the number of
* streams, in combination with parameters in the kernel
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#ifndef num_streams
#define num_streams 1
#endif
#ifndef grid_size_x
#define grid_size_x 1
#endif
#ifndef grid_size_y
#define grid_size_y 1
#endif
extern "C" {
#include "convolution.cu"
float convolution_streams(float *output, float *input, float *filter) {
float *h_output = output;
float *h_input = input;
float *h_filter = filter;
float *d_output;
float *d_input;
cudaError_t err;
err = cudaMalloc((void **)&d_output, image_width*image_height*sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaMalloc: %s\n", cudaGetErrorString(err));
}
err = cudaMemset(d_output, 0, image_width*image_height*sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaMemset: %s\n", cudaGetErrorString(err));
}
err = cudaMalloc((void **)&d_input, input_width*input_height*sizeof(float));
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaMalloc: %s\n", cudaGetErrorString(err));
}
cudaStream_t stream[num_streams];
cudaEvent_t event_htod[num_streams];
for (int i=0; i<num_streams; i++) {
err = cudaStreamCreate(&stream[i]);
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaStreamCreate: %s\n", cudaGetErrorString(err));
}
err = cudaEventCreate(&event_htod[i]);
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err));
}
}
cudaEvent_t start;
err = cudaEventCreate(&start);
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err));
}
cudaEvent_t stop;
err = cudaEventCreate(&stop);
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaEventCreate: %s\n", cudaGetErrorString(err));
}
//make sure there have been no errors
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "Error after memory setup in convolution_streams: %s\n", cudaGetErrorString(err));
}
dim3 threads(block_size_x, block_size_y, block_size_z);
dim3 grid(grid_size_x, grid_size_y);
//lines per stream, input data per stream, and border size
int lps = (image_height / num_streams);
int dps = lps * input_width;
int border = border_height * input_width;
//start timing
cudaDeviceSynchronize();
cudaEventRecord(start, 0);
err = cudaMemcpyToSymbolAsync(d_filter, h_filter, filter_width*filter_height*sizeof(float), 0, cudaMemcpyHostToDevice, stream[0]);
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaMemcpyToSymbolAsync: %s\n", cudaGetErrorString(err));
}
//streamed copy of input data with strict order among streams, stream[0] also copies border
for (int k=0; k<num_streams; k++) {
if (k == 0) {
err = cudaMemcpyAsync(d_input, h_input, (border + dps)*sizeof(float), cudaMemcpyHostToDevice, stream[k]);
}
else {
err = cudaStreamWaitEvent(stream[k], event_htod[k-1], 0);
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaStreamWaitEvent htod k-1: %s\n", cudaGetErrorString(err));
}
err = cudaMemcpyAsync(d_input +border+k*dps, h_input +border+k*dps, dps*sizeof(float), cudaMemcpyHostToDevice, stream[k]);
}
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaMemcpyHostToDevice: %s\n", cudaGetErrorString(err));
}
err = cudaEventRecord(event_htod[k], stream[k]);
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaEventRecord htod: %s\n", cudaGetErrorString(err));
}
}
//start the kernel in each stream
for (int k=0; k<num_streams; k++) {
convolution_kernel<<<grid, threads, 0, stream[k]>>>(d_output+k*lps*image_width, d_input +k*dps, filter);
}
//streamed copy of the output data back to the host
for (int k=0; k<num_streams; k++) {
err = cudaMemcpyAsync(h_output + k*lps*image_width, d_output + k*lps*image_width, lps*image_width*sizeof(float), cudaMemcpyDeviceToHost, stream[k]);
if (err != cudaSuccess) {
fprintf(stderr, "Error in cudaMemcpyDeviceToHost: %s\n", cudaGetErrorString(err));
}
}
//mark the end of the computation
cudaEventRecord(stop, 0);
//wait for all to finish and get time
cudaDeviceSynchronize();
float time = 0.0;
cudaEventElapsedTime(&time, start, stop);
memcpy(output, h_output, image_width*image_height*sizeof(float));
//cleanup
cudaFree(d_output);
cudaFree(d_input);
for (int k=0; k<num_streams; k++) {
cudaStreamDestroy(stream[k]);
cudaEventDestroy(event_htod[k]);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
//make sure there have been no errors
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
//this bit is necessary because the Kernel Tuner currently can't decide whether
//it's OK to silently skip an error or break execution when calling C functions
const char *error_string = cudaGetErrorString(err);
if (strncmp("too many resources requested for launch", error_string, 10) == 0) {
time = -1.0;
} else {
fprintf(stderr, "Error at the end of convolution_streams: %s\n", error_string);
exit(1);
}
}
return time;
}
}
|
9,511 | /***************************************************************************//**
* \file intermediatePressure.cu
* \author Christopher Minar (minarc@oregonstate.edu)
* \brief kernels to generate the right hand side of the poission equation
*/
#include "intermediatePressure.h"
/**
* \namespace kernels
* \brief Contains all the custom-written CUDA kernels.
*/
namespace kernels
{
/*
* Generate the right hand side of the pressure equation when no body is present
* param rhs2 right hand side of the pressure eq
* param uhat intermediate velocity
* param ym yminus boundary velocities
* param yp yplus boundary velocities
* param xm xminus boundary velocities
* param xp xplus boundary velocities
* param dx distance between nodes in the x direction (measured between node sides, where u velocites are stored)
* param dy distance between nodes in the y direction (measured between node top/bot, where v velocites are stored)
* param nx number of cells in x direction
* param ny number of cells in y direction
*/
__global__
void intermediatePressureNoBody(double *rhs2, double *uhat, double *ym, double *yp, double *xm, double *xp, double *dx, double *dy, int nx, int ny)
{
if (threadIdx.x + blockDim.x * blockIdx.x >= nx*ny)
return;
int ip = threadIdx.x + blockDim.x * blockIdx.x,
I = ip % nx,
J = ip / nx,
iu = (nx-1)*J + I,
iv = (nx-1)*ny + nx*J +I;
double temp = 0;
//EAST
//if not on the east wall and east is outside the body, add east term
if (I != nx-1)//not at east boundry
temp -= uhat[iu]/dx[I];
else if (I == nx-1)//at east boundry
temp -= xp[J]/dx[I];
//WEST
//if not on west wall and west is outside the body, add west term
if (I != 0)//not at west boundary
temp += uhat[iu - 1]/dx[I];
else if (I == 0)//at the west boundary
temp += xm[J]/dx[I];
//NORTH
//if not on north wall and north is outside the body, add north term
if (J != ny-1)//not at north boundry
temp -= uhat[iv]/dy[J];
else if (J == ny-1)//at north boundry
temp -= yp[(nx-1)+I]/dy[J];
//SOUTH
//if not on south wall and south is outside the body, add south term
if (J != 0)//not at south boundry
temp += uhat[iv-nx]/dy[J];
else if (J == 0)//at south boundry
temp += ym[(nx-1)+I]/dy[J];
rhs2[ip] = temp;
}
}
|
9,512 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 10000000
#define MAX_ERR 1e-6
__global__ void vector_sub(float *out, float *a, float *b, int n) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n){
out[tid] = a[tid] - b[tid];
}
}
int main(){
float *h_a, *h_b, *h_out;
float *d_a, *d_b, *d_out;
// Allocate host memory
h_a = (float*)malloc(sizeof(float) * N);
h_b = (float*)malloc(sizeof(float) * N);
h_out = (float*)malloc(sizeof(float) * N);
// Initialize host arrays
for(int i = 0; i < N; i++){
h_a[i] = 3.0f;
h_b[i] = 2.0f;
}
// Allocate device memory
cudaMalloc((void**)&d_a, sizeof(float) * N);
cudaMalloc((void**)&d_b, sizeof(float) * N);
cudaMalloc((void**)&d_out, sizeof(float) * N);
// Transfer data from host to device memory
cudaMemcpy(d_a, h_a, sizeof(float) * N, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sizeof(float) * N, cudaMemcpyHostToDevice);
// Executing kernel
// <<<# of thread block per grid, # of threads per block>>>
int block_size = 256;
int grid_size = (N / block_size) + 1;
vector_sub<<<grid_size,block_size>>>(d_out, d_a, d_b, N);
// Transfer data back to host memory
cudaMemcpy(h_out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost);
// TODO: implement another vector sub using resulted output and h_c
// Verification
for(int i = 0; i < N; i++){
assert(fabs(h_a[i] - h_out[i] - h_b[i]) < MAX_ERR);
}
printf("out[0] = %f\n", h_out[0]);
printf("PASSED\n");
// Deallocate device memory
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_out);
// Deallocate host memory
free(h_a);
free(h_b);
free(h_out);
}
|
9,513 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cooperative_groups.h>
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <math.h>
#include <iostream>
using namespace cooperative_groups;
// Reduces a thread group to a single element
__device__ int reduce_sum( thread_group g, int *temp, int val)
{
int lane = g.thread_rank();
// Each thread adds its partial sum[i] to sum[lane + i]
for(int i = g.size()/2; i > 0 ; i /= 2)
{
temp[lane] = val;
// wait for all threads to store
g.sync();
if(lane < i)
{
val += temp[lane + i];
}
// wait for all threads to laod
g.sync();
}
// only thread 0 will return full sum
return val;
}
// Create partial sums from the original array
__device__ int thread_sum(int *input, int n)
{
int sum = 0;
int tid = blockIdx.x * blockDim.x + threadIdx.x;
for(int i = tid; i < n / 4; i += blockDim.x * gridDim.x)
{ // cast as int4
int4 in = ((int4*)input)[i];
sum += in.x + in.y + in.z + in.w;
}
return sum;
}
__global__ void sum_reduction( int *sum, int *input, int n )
{
// Create partial sums from the array
int my_sum = thread_sum(input, n);
// Dynamic shared memory allocation
extern __shared__ int temp[];
// Identifier for a Thread Block
auto g = this_thread_block();
// Reduce each Thread Block
int block_sum = reduce_sum(g, temp, my_sum);
// Collect the partial result from each Thread Block
if (g.thread_rank() == 0)
{
atomicAdd(sum, block_sum);
}
}
void initialize_vector(int *v, int n) {
for (int i = 0; i < n; i++) {
v[i] = 1;//rand() % 10;
}
}
int main()
{ // Vector size
int n = 1<<13;
size_t bytes = n * sizeof(int);
// Original vector and result vector
int *sum;
int *data;
// Allocate using unified memory
cudaMallocManaged(&sum, sizeof(int));
cudaMallocManaged(&data, bytes);
// Initialize vector
initialize_vector(data, n);
// Thread Block size
int TB_SIZE = 256;
// Grid size ( cut in half )
int GRID_SIZE = (n + TB_SIZE - 1) / TB_SIZE;
// Call kernel with dynamic shared memory ( Could decrease this to fit larger data)
sum_reduction<<< GRID_SIZE, TB_SIZE, n * sizeof(int)>>> (sum, data, n);
// Synchronize the kernel
cudaDeviceSynchronize();
assert(*sum == 8192);
printf("COMPLTED SUCCESSFULLY\n");
return 0;
} |
9,514 | #include <stdio.h>
#include <stdlib.h>
#define N 20
#define THREADS_PER_BLOCK 512
#define BLOCKS (N + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK
__global__ void find_primes(int *a, int n);
int main(int argc, char *argv[]) {
float total_time, comp_time;
cudaEvent_t total_start, total_stop, comp_start, comp_stop;
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_stop);
/*
* Host's array
*/
int *array;
array = (int*) calloc(N, sizeof(int));
/*
* Start counting total time
*/
cudaEventRecord(total_start);
/*
* Device's array
*/
int *dev_array;
cudaMalloc(&dev_array, N * sizeof(int));
/*
* Start counting compile time
*/
cudaEventRecord(comp_start);
/*
* Kernel call
*/
find_primes<<<BLOCKS, THREADS_PER_BLOCK>>>(dev_array, N);
/*
* Compile time count
*/
cudaEventRecord(comp_stop);
cudaEventSynchronize(comp_stop);
cudaEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Copy c from host device memory to host memory
*/
cudaMemcpy(array, dev_array, N*sizeof(int), cudaMemcpyDeviceToHost);
/*
* Total time count
*/
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
cudaEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
cudaFree(dev_array);
cudaEventDestroy(comp_start);
cudaEventDestroy(comp_stop);
cudaEventDestroy(total_start);
cudaEventDestroy(total_stop);
/*
* GPU timing
*/
printf("N: %d, blocks: %d, total_threads: %d\n", N, BLOCKS, THREADS_PER_BLOCK*BLOCKS);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
/*
* Printing primes
*/
(void) printf("\n\nPrimes: [ ");
int i;
for (i=2; i<=N; ++i)
if (array[i])
(void) printf("%d ", i);
(void) printf("]\n\n");
return 0;
}
/*
* Function: find_primes
* --------------------
* Finds all prime numbers in specific range
*
* a: pointer of the array that will contain ones and zeros (1: is prime, 0: not a prime)
* n: number of elements in the array (the range)
* x: the key value we are searching
*
*/
__global__ void find_primes(int *a, int n){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int is_prime = 1;
if (idx > 1 && idx < n){
int j;
for (j=2; j<idx/2+1; ++j){
if (!(idx % j) && j != idx){
is_prime = 0;
break;
}
}
if (is_prime) a[idx] = 1;
is_prime = 1;
}
} |
9,515 | #include <stdio.h>
// __global__ 告诉编译器这个函数将会从cpu中调用,然后在gpu上运行
__global__ void helloFromGPU (void)
{
printf("Hello World from GPU!\n");
}
int main(void)
{
// hello from cpu
printf("Hello World from CPU!\n");
helloFromGPU <<<1, 10>>>();
// <<<1, 10>> 表示从主线程到设备端代码调用10个线程
cudaDeviceReset();
return 0;
}
|
9,516 | /////// /////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on CPU.
// Straight accumulation in double precision.
////////////////////////////////////////////////////////////////////////////
#include <iostream>
#include <cmath>
using namespace std;
void Kernel_4_CPU (int* B, int* Kernel_4_output, int* Start_A, int* Start_B, int* Length_Seq_K4,
int K3_Length, int K3_Report, int K3_Safety, int K_3_R,
int Start_Th1, int End_Th1,int K4_S1, int K4_S2,int K4_S3)
{
int *D;
D = new int [K3_Safety*(K3_Length+1)*(K3_Length+1)];
for (int i=0; i<(K3_Safety*(K3_Length+1)*(K3_Length+1)); i++)
D[i]=0;
for (int i=0; i<(K3_Length+1); i++)
{
D[i]=i*K4_S3;
D[i*(K3_Length+1)]=i*K4_S3;
}
for(int Sub_Block =0; Sub_Block < K_3_R; Sub_Block ++)
{
for (int Sub_Thread=Start_Th1; Sub_Thread<End_Th1; Sub_Thread++)
{
int A_Loc = Start_A[Sub_Thread] * K3_Safety * K3_Length + Sub_Block * K3_Safety * K3_Length * K3_Report;
int B_Loc = Start_B[Sub_Thread] * K3_Safety * K3_Length + Sub_Block * K3_Safety * K3_Length * K3_Report;
int End_A = Length_Seq_K4[Start_A[Sub_Thread] + Sub_Block * K3_Report];
int End_B = Length_Seq_K4[Start_B[Sub_Thread] + Sub_Block * K3_Report];
for (int i = 0; i<End_B; i++ )
{
for (int j = 0; j<End_A; j++)
{
int D_Sim;
int Num = i *(K3_Length+1) + j ;
int Num1 = (i+1)*(K3_Length+1) + j ;
if (B[A_Loc + j]==B[B_Loc + i])
D_Sim = D[Num]+K4_S1;
else
D_Sim=D[Num]+K4_S2;
int F = D[Num+1] + K4_S3;
int E = D[Num1] + K4_S3;
D[Num1+1] = min(min(E,F),D_Sim);
}
}
int Index_1 = Sub_Thread + Sub_Block * K3_Report * K3_Report;
int Num1 = (End_B)*(K3_Length+1) + End_A-1 ;
Kernel_4_output[Index_1] = D[Num1+1];
}
}
} |
9,517 | #include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/iterator/permutation_iterator.h>
#include <thrust/iterator/zip_iterator.h>
#include <thrust/transform.h>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <algorithm>
#include <cstdlib>
#include <cstdio>
#include <string>
#include <cstring>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <stdio.h>
// The std::chrono namespace provides timer functions in C++
#include <chrono>
// std::ratio provides easy conversions between metric units
#include <ratio>
// // Provide some namespace shortcuts
// using std::cout;
// using std::chrono::high_resolution_clock;
// using std::chrono::duration;
//using namespace std;
struct f_mult
{
template <typename Tuple>
__host__ __device__
float operator()(Tuple v)
{
return thrust::get<0>(v) * thrust::get<1>(v);
}
};
struct f_nextx
{
template <typename Tuple>
__host__ __device__
float operator()(Tuple v)
{
return ((thrust::get<0>(v) - thrust::get<1>(v)) / thrust::get<2>(v)) + thrust::get<3>(v);
//nextX[i] = (b[i] - sum) / A[i][i];
}
};
struct divF: thrust::unary_function<int, int>
{
int n;
divF(int n_) : n(n_) {}
__host__ __device__
int operator()(int idx)
{
return idx / n;
}
};
struct modF: thrust::unary_function<int, int>
{
int n;
modF(int n_) : n(n_) {}
__host__ __device__
int operator()(int idx)
{
return idx % n;
}
};
// struct diag_index : public thrust::unary_function<int,int>
// {
// diag_index(int rows) : rows(rows){}
// __host__ __device__
// int operator()(const int index) const
// {
// return (index*rows + (index%rows));
// }
// const int rows;
// };
struct dmF: thrust::unary_function<int, int>
{
int n;
dmF(int n_) : n(n_) {}
__host__ __device__
int operator()(int i)
{
return i*n+i;
}
};
typedef thrust::counting_iterator<int> countIt;
typedef thrust::transform_iterator<divF, countIt> columnIt;
typedef thrust::transform_iterator<modF, countIt> rowIt;
typedef thrust::transform_iterator<dmF, countIt> diagIt;
void solve(thrust::device_vector<float>& dx, thrust::device_vector<float>& dA, thrust::device_vector<float>& db,
thrust::device_vector<float>& dnextX, int size, thrust::device_vector<float>& temp, thrust::device_vector<int>&outkey,
thrust::device_vector<float>&sum)
{
// std::cout <<"dA= ";
// for (int i = 0; i<size*size; i++) {
// //printf("%f ", v[i]);
// std::cout << dA[i]<<" ";
// }
// //printf("\n");
// std::cout << "\n";
columnIt cv_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), divF(size));
columnIt cv_end = cv_begin + (size*size);
rowIt rv_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), modF(size));
rowIt rv_end = rv_begin + (size*size);
diagIt dg_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0), dmF(size));
diagIt dg_end = dg_begin + (size);
// diagIt dg_begin = thrust::make_transform_iterator(thrust::make_counting_iterator(0),diag_index(size));
// diagIt dg_end = dg_begin + (size*size);
//thrust::device_vector<float> temp(size*size);
thrust::transform(make_zip_iterator(
make_tuple(
dA.begin(),
thrust::make_permutation_iterator(dx.begin(),rv_begin) ) ),
make_zip_iterator(
make_tuple(
dA.end(),
thrust::make_permutation_iterator(dx.end(),rv_end) ) ),
temp.begin(),
f_mult());
// thrust::device_vector<int> outkey(size);
// thrust::device_vector<float> sum(size);
thrust::reduce_by_key(cv_begin, cv_end, temp.begin(), outkey.begin(), sum.begin());
// thrust::transform(v.begin(), v.end(), sum.begin(), v.begin(), thrust::plus<float>());
// std::cout <<"sum= ";
// for (int i = 0; i<size; i++) {
// //printf("%f ", v[i]);
// std::cout << sum[i]<<" ";
// }
// //printf("\n");
// std::cout << "\n";
// thrust::transform(
// make_zip_iterator(
// make_tuple(
// // dA.begin(),
// thrust::make_permutation_iterator(db.begin(),rv_begin),
// thrust::make_permutation_iterator(sum.begin(),rv_begin),
// thrust::make_permutation_iterator(dA.begin(),dg_begin),
// thrust::make_permutation_iterator(dx.begin(),rv_begin)
// )
// ),
// make_zip_iterator(
// make_tuple(
// thrust::make_permutation_iterator(db.end(),rv_end),
// thrust::make_permutation_iterator(sum.end(),rv_end),
// thrust::make_permutation_iterator(dA.end(),dg_end),
// thrust::make_permutation_iterator(dx.end(),rv_end)
// )
// ),
// dnextX.begin(),
// f_nextx());
thrust::transform(
make_zip_iterator(
make_tuple(
// dA.begin(),
db.begin(),
sum.begin(),
thrust::make_permutation_iterator(dA.begin(),dg_begin),
dx.begin()
)
),
make_zip_iterator(
make_tuple(
db.end(),
sum.end(),
thrust::make_permutation_iterator(dA.end(),dg_end),
dx.end()
)
),
dnextX.begin(),
f_nextx());
//nextX[i] = ((b[i] - sum) / A[i][i]) + x[i];
// std::cout <<"nextX= ";
// for (int i = 0; i<size; i++) {
// //printf("%f ", v[i]);
// std::cout << dnextX[i]<<" ";
// }
// //printf("\n");
// std::cout << "\n";
}
int main(int argc, char ** argv) {
int maxIterations = 100;
int size = std::stoi(argv[1], 0, 10);
std::cout << "size="<<size<<"\n";
thrust::host_vector<float> A_flat(size*size);
thrust::host_vector<float> hb(size);
thrust::host_vector<float> hx(size);
thrust::host_vector<float> hnextX(size);
int n = size;
std::string rfile = argv[2];
std::ifstream fin(rfile);
for (int i = 0; i < n; i++) {
for (int j = 0; j < n; j++) {
fin >> A_flat[i*n+j];
//cout << A[i][j] << " ";
}
//cout << endl;
}
for (int i = 0; i < n; i++) {
fin >> hb[i];
}
fin.close();
std::cout << "Read benchmark file "<<rfile<<std::endl;
// //float * A_flat = new float [size*size];
// for (int i = 0; i< size*size; i++) {
// A_flat[i] = i;
// }
int size2=size*size;
thrust::device_vector<float> dA(size2);
thrust::device_vector<float> dx(size);
thrust::device_vector<float> db(size);
thrust::device_vector<float> dnextX(size);
thrust::device_vector<float> temp(size*size);
thrust::device_vector<int> outkey(size);
thrust::device_vector<float> sum(size);
//thrust::fill(dA.begin(), dA.end(), A_flat);
//thrust::copy(dA.begin(), dA.end(), A_flat);
dA = A_flat;
db = hb;
// thrust::fill(db.begin(), db.end(), 3);
thrust::fill(dx.begin(), dx.end(), 0);
thrust::fill(dnextX.begin(), dnextX.end(), 0);
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
int count = 1;
for (; (count < maxIterations) ; count++)
{
if (count % 2) {
// odd
solve(dnextX, dA, db, dx, size, temp, outkey, sum);
}
else {
// even
solve(dx, dA, db, dnextX, size, temp, outkey, sum);
}
}
cudaEventRecord(stop);
cudaEventSynchronize(stop);
// Get the elapsed time in milliseconds
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
std::cout << std::endl << "Iterations:" << count << std::endl;
printf("%f\n", ms);
hx = dx;
hnextX = dnextX;
std::string wfile = argv[3];
std::ofstream fout(wfile);
for (int i = 0; i < n; i++)
{
fout << std::fixed<<hx[i] << " ";
//cout << x[i] << " ";
}
fout << std::endl;
fout.close();
float * c = new float [size];
float maxError = 0;
float total_err = 0;
for(int i = 0; i < size; i++) {
c[i] = 0;
for(int j = 0; j < size; j++)
{
c[i] += A_flat[i*size+j] * hx[j];
}
maxError = fmax(maxError, fabs(c[i] - hb[i]));
total_err += fabs(c[i] - hb[i]);
}
total_err = total_err / size;
std::cout << "\n==== max error: "<<maxError<<"\n";
std::cout << "==== avg error: "<<total_err<<"\n";
delete[] c;
return 0;
}
|
9,518 | /**
* Segmented Scan CUDA sample
*
* Original:
* "Efficient Parallel Scan Algorithms for GPUs",
* Shubhabrata Sengupta,Mark Harris, Michael Garland.
* https://research.nvidia.com/sites/default/files/publications/nvr-2008-003.pdf
*
* via
* aokomoriuta san
* http://qiita.com/aokomoriuta/items/3c2a80181a01c7f22e7f
*
* Using a template kernel.cu in NVIDIA Cuda Toolkit 5.5
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
template<typename T>
class AddOp {
public:
static __device__ inline T apply(const T a, const T b) {
return a + b;
}
};
template<typename T>
class MaxOp {
public:
static __device__ inline T apply(const T a, const T b) {
return max(a,b);
}
};
cudaError_t segmentedScanTest(int *c, const int *a, const int *b, unsigned int size);
template<typename T, class OP>
__device__ void compute_segscan(volatile T *p, volatile int *hd,
const unsigned int tid, const unsigned int offset) {
const unsigned int lane = tid & 31;
if (lane >= offset) {
p[tid] = hd[tid] ? p[tid] : OP::apply(p[tid - offset],p[tid]);
hd[tid] = hd[tid - offset] | hd[tid];
}
}
/**
* Figure 6
*/
template<typename T, int Kind, class OP>
__device__ T segscan_warp(volatile T *p, volatile int *hd,
const unsigned int tid = threadIdx.x) {
const unsigned int lane = tid & 31;
compute_segscan<T,OP>(p, hd, tid, 1);
compute_segscan<T,OP>(p, hd, tid, 2);
compute_segscan<T,OP>(p, hd, tid, 4);
compute_segscan<T,OP>(p, hd, tid, 8);
compute_segscan<T,OP>(p, hd, tid, 16);
if (Kind == 0)
return p[tid];
else
return (lane > 0) ? p[tid - 1] : 0;
}
template <typename T, class OP>
__global__ void segscan_warp_kernel(const T *src, T *dst, int *flag) {
const unsigned int tid = threadIdx.x;
dst[tid] = src[tid];
segscan_warp<int,0,OP>(dst, flag, threadIdx.x);
}
/**
* Figure 3
*/
template<typename T, int Kind, class OP>
__device__ T scan_warp(volatile T *p,
const unsigned int tid = threadIdx.x) {
const int lane = tid & 31;
if (lane >= 1)
p[tid] = OP::apply(p[tid - 1], p[tid]);
if (lane >= 2)
p[tid] = OP::apply(p[tid - 2], p[tid]);
if (lane >= 4)
p[tid] = OP::apply(p[tid - 4], p[tid]);
if (lane >= 8)
p[tid] = OP::apply(p[tid - 8], p[tid]);
if (lane >= 16)
p[tid] = OP::apply(p[tid - 16], p[tid]);
return p[tid];
}
/**
* Figure 7
*/
template<typename T, int Kind, class OP>
__device__ T segscan_warp2(volatile T *p, volatile int *hd,
const unsigned int tid = threadIdx.x) {
const unsigned int lane = tid & 31;
if (hd[tid])
hd[tid] = lane;
int mindex = scan_warp<T,Kind, MaxOp<T> >(hd,tid);
if (lane >= mindex + 1)
p[tid] = OP::apply(p[tid - 1],p[tid]);
if (lane >= mindex + 2)
p[tid] = OP::apply(p[tid - 2], p[tid]);
if (lane >= mindex + 4)
p[tid] = OP::apply(p[tid - 4], p[tid]);
if (lane >= mindex + 8)
p[tid] = OP::apply(p[tid - 8], p[tid]);
if (lane >= mindex + 16)
p[tid] = OP::apply(p[tid - 16], p[tid]);
if (Kind == 0)
return p[tid];
else
return (lane > 0 && mindex != lane) ? p[tid - 1] : 0;
}
template <typename T, class OP>
__global__ void segscan_warp2_kernel(const T *src, T *dst, int *flag) {
const unsigned int tid = threadIdx.x;
dst[tid] = src[tid];
dst[tid] = segscan_warp2<int,0, OP>(dst, flag, tid);
}
/**
* Figure 10
*/
template<typename T, int Kind, class OP>
__device__ T segscan_block(volatile T *p, volatile int *hd,
const unsigned int tid = threadIdx.x) {
const unsigned int warpid = tid >> 5;
const unsigned int warp_first = warpid << 5;
const unsigned int warp_last = warp_first + 31;
// step 1a
bool warp_is_open = (hd[warp_first] == 0);
__syncthreads();
// step 1b
T val = segscan_warp2<T,Kind, OP>(p, hd, tid);
// step 2a
T warp_total = p[warp_last];
// step 2b
int warp_flag = hd[warp_last] != 0 || !warp_is_open;
bool will_accumulate = warp_is_open && hd[tid] == 0;
__syncthreads();
// step 2c
if (tid == warp_last) {
p[warpid] = warp_total;
hd[warpid] = warp_flag;
}
__syncthreads();
// step 3
if (warpid == 0)
segscan_warp2<T,0, OP>(p, hd, tid);
__syncthreads();
// step 4
if (warpid != 0 && will_accumulate)
val = OP::apply( p[tid - 1] , val);
p[tid] = val;
__syncthreads();
return val;
}
template <typename T, class OP>
__global__ void segscan_block_kernel(const T *src, T *dst, int *flag) {
const unsigned int tid = threadIdx.x;
dst[tid] = src[tid];
dst[tid] = segscan_block<int,0, OP>(dst, flag, tid);
}
template <typename T, class OP, unsigned int SIZE>
__global__ void segscan_block_kernel_smem(const T *src, T *dst, int *flag) {
const unsigned int tid = threadIdx.x;
__shared__ T smem[SIZE];
smem[tid] = src[tid];
dst[tid] = segscan_block<int,0, OP>(smem, flag, tid);
}
template<typename T>
void segmentedScanCpu( const T *src, T *dst, int *flag, const unsigned int size ) {
dst[0] = src[0];
for ( int i=1; i<size; i++ ) {
dst[i] = flag[i] ? src[i] : dst[i-1] + src[i];
}
}
int main() {
const int arraySize = 1024;
int src[arraySize];
int hd[arraySize]={0};
int dst[arraySize] = { 0 };
int dstCpu[arraySize] = { 0 };
for ( int i=0; i<arraySize; i++) {
src[i] = i;
hd[i] = (i % 4)==0 ? 1 : 0;
}
cudaError_t cudaStatus = segmentedScanTest(dst, src, hd, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
const char *fmt = "%4d";
for ( int i=0; i<32; i++ ) {
printf(fmt, src[i]);
}
puts("");
for ( int i=0; i<32; i++ ) {
printf(fmt, hd[i]);
}
puts("");
for ( int i=0; i<32; i++ ) {
printf(fmt, dst[i]);
}
puts("");
segmentedScanCpu(src, dstCpu, hd, arraySize);
for ( int i=0; i<arraySize; i++ ) {
if ( dstCpu[i] != dst[i] ) {
puts("compared... not ok");
break;
}
}
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t segmentedScanTest(int *c, const int *a, const int *b, unsigned int size) {
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**) &dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int),
cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
// segscan_warp_kernel<int, AddOp<int> ><<<1, 32>>>(dev_a, dev_c, dev_b);
//scan_warp_max_kernel<<<1, 32>>>(dev_b, dev_c);
//segscan_warp2_kernel<<<1, 32>>>(dev_a, dev_c, dev_b);
segscan_block_kernel<int, AddOp<int> ><<<1, size>>>(dev_a, dev_c, dev_b);
//segscan_block_kernel_smem<int, 2048><<<1, size>>>(dev_a, dev_c, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, " launch failed: %s\n",
cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr,
"cudaDeviceSynchronize returned error code %d after launching addKernel!\n",
cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int),
cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error: cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
9,519 | #include "includes.h"
__global__ void Compute_weightx_weighty2_Kernel(float* weightx, float* weighty, const float* absIx, const float* absIy, int nPixels, float norm_for_smooth_term, float eps)
{
int bx = blockIdx.x;
int tx = threadIdx.x;
int x = bx*blockDim.x + tx;
if (x >= nPixels)
return;
if (norm_for_smooth_term == 2)
{
weightx[x] = 1.0f;
weighty[x] = 1.0f;
}
else if (norm_for_smooth_term == 1)
{
weightx[x] = 1.0f / (absIx[x] + eps);
weighty[x] = 1.0f / (absIy[x] + eps);
}
else if (norm_for_smooth_term == 0)
{
weightx[x] = 1.0f / (absIx[x] * absIx[x] + eps);
weighty[x] = 1.0f / (absIy[x] * absIy[x] + eps);
}
else
{
weightx[x] = 1.0f / (pow(absIx[x], 2.0f - norm_for_smooth_term) + eps);
weighty[x] = 1.0f / (pow(absIy[x], 2.0f - norm_for_smooth_term) + eps);
}
} |
9,520 | #include <bits/stdc++.h>
#include <cuda.h>
#include <fstream>
#define num_threads 1024
using namespace std;
__global__ void level_bfs(int * que , int que_size , int *next_que , int *next_que_size , int *distance , int * ad_siz , int* edges ,int * startpos )
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if(tid<que_size)
{
int v = que[tid];
for(int i = startpos[v] ; i <= startpos[v] + ad_siz[v] ; i++ )
{
if(atomicCAS(&(distance[i]) , -1 , distance[v] + 1) == -1)
{
int pos = atomicAdd(next_que_size , 1);
next_que[pos] = i;
}
}
}
}
int main(int argc, char *argv[])
{
ifstream input(argv[1]);
int num_vertices , num_edges;
input>>num_vertices;
input>>num_edges;
int *edges = (int*)malloc(num_edges*sizeof(int));
int *startpos = (int*)malloc(num_vertices*sizeof(int));
int *ad_siz = (int*)malloc(num_vertices*sizeof(int));
int *dist = (int*)malloc(num_vertices*sizeof(int));
int *que = (int*)malloc(num_vertices*sizeof(int));
int *que_size =(int*)malloc(sizeof(int));
int *next_que_size =(int*)malloc(sizeof(int));
for(int i=0;i<num_edges;i++)
{
input>>edges[i];
}
for(int i =0; i<num_vertices ; i++)
{
input>>startpos[i];
}
for(int i =0; i<num_vertices ; i++)
{
input>>ad_siz[i];
}
// memset(dist , -1 , sizeof(dist));
for(int i = 0 ; i < num_vertices ; i++)
dist[i] = -1;
dist[0] = 0;
que[0] = 0;
*que_size = 1;
*next_que_size = 0;
int *d_dist , *d_edges ,*d_start_pos , *d_ad_siz , *d_que, *d_next_que , *d_que_size , *d_next_que_size;
cudaMalloc((void**)&d_dist , num_vertices*sizeof(int) );
cudaMalloc((void**)&d_start_pos , num_vertices*sizeof(int) );
cudaMalloc((void**)&d_ad_siz , num_vertices*sizeof(int) );
cudaMalloc((void**)&d_que , num_vertices*sizeof(int) );
cudaMalloc((void**)&d_next_que , num_vertices*sizeof(int) );
cudaMalloc((void**)&d_edges , num_edges*sizeof(int) );
cudaMalloc((void**)&d_que_size , sizeof(int) );
cudaMalloc((void**)&d_next_que_size , sizeof(int) );
cudaMemcpy(d_dist , dist , num_vertices*sizeof(int) , cudaMemcpyHostToDevice );
cudaMemcpy(d_start_pos , startpos , num_vertices*sizeof(int) , cudaMemcpyHostToDevice );
cudaMemcpy(d_ad_siz , ad_siz , num_vertices*sizeof(int) , cudaMemcpyHostToDevice );
cudaMemcpy(d_que , que , num_vertices*sizeof(int) , cudaMemcpyHostToDevice );
cudaMemcpy(d_edges , edges , num_vertices*sizeof(int) , cudaMemcpyHostToDevice );
cudaMemcpy(d_que_size , que_size , sizeof(int) , cudaMemcpyHostToDevice );
cudaMemcpy(d_next_que_size , next_que_size , sizeof(int) , cudaMemcpyHostToDevice );
// cout<<"HI"<<endl;
while(*que_size>0)
{
long num_blocks = (*que_size+num_threads-1)/num_threads;
// cout<<"HI"<<endl;
level_bfs<<<num_blocks , num_threads >>>(d_que , *que_size , d_next_que , d_next_que_size , d_dist , d_ad_siz , d_edges , d_start_pos);
// cout<<"HI"<<endl;
// break;
cudaMemcpy( que_size , d_next_que_size , sizeof(int) , cudaMemcpyDeviceToHost );
cudaMemcpy( d_next_que_size , next_que_size , sizeof(int) , cudaMemcpyHostToDevice );
// cout<<*que_size<<endl;
swap(d_next_que , d_que);
}
cudaMemcpy( dist , d_dist , num_vertices*sizeof(int) , cudaMemcpyDeviceToHost );
for(int i = 0 ; i < num_vertices ; i++)
{
cout<<i<<": "<<dist[i]<<endl;
}
} |
9,521 | #include <cuda.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <math.h>
#define TILE_WIDTH 16
__global__ void TiledMatrixMulKernel(float* M, float* N, float* P, int j, int k, int l)
{
__shared__ float ds_M[TILE_WIDTH][TILE_WIDTH];
__shared__ float ds_N[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
int Row = by * TILE_WIDTH + ty;
int Col = bx * TILE_WIDTH + tx;
float Pvalue = 0;
// Loop over the M and N tiles required to compute the P element
for (int ph = 0; ph < (k - 1)/TILE_WIDTH + 1; ++ph) {
// Collaborative loading of M and N tiles into shared memory
if (Row < j && ph * TILE_WIDTH + tx < k) {
ds_M[ty][tx] = M[Row*k + ph*TILE_WIDTH + tx];
} else {
ds_M[ty][tx] = 0.0;
}
if (Col < l && ph * TILE_WIDTH + ty < k) {
ds_N[ty][tx] = N[(ph*TILE_WIDTH + ty)*l + Col];
} else {
ds_N[ty][tx] = 0.0;
}
__syncthreads();
for (int i = 0; i < TILE_WIDTH; ++i) {
Pvalue += ds_M[ty][i] * ds_N[i][tx];
}
__syncthreads();
}
if (Row < j && Col < l) {
P[Row * l + Col] = Pvalue;
}
}
void generateMat(float *m, size_t height, size_t width){
int i, j;
for (i = 0; i < height; i++){
for (j = 0; j < width; j++) {
m[i*width+j] = rand() % 100;
}
}
}
void printMat(float *m, size_t height, size_t width) {
int i, j;
for (i = 0; i < height; i++){
for (j = 0; j < width; j++) {
printf("%f ", m[i*width+j]);
}
printf("\n");
}
printf("\n");
}
int main(int argc, char**argv){
int j,k,l;
if (argc >= 3) {
j = strtol(argv[1], NULL, 10);
k = strtol(argv[2], NULL, 10);
l = strtol(argv[3], NULL, 10);
} else {
return 0;
}
srand(time(NULL));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
float * m, *n, *p;
float * d_m, *d_p, *d_n;
long mSize = j * k * sizeof(float);
long nSize = k * l * sizeof(float);
long pSize = j * l * sizeof(float);
cudaMalloc((void**)&d_m, mSize);
cudaMalloc((void**)&d_n, nSize);
cudaMalloc((void**)&d_p, pSize);
m = (float *)malloc(mSize);
n = (float *)malloc(nSize);
p = (float *)malloc(pSize);
generateMat(m, j, k);
generateMat(n, k, l);
cudaMemcpy(d_m, m, mSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_n, n, nSize, cudaMemcpyHostToDevice);
dim3 threadDims(TILE_WIDTH, TILE_WIDTH, 1);
dim3 blockDims(ceil(j * 1.0/ TILE_WIDTH), ceil(l * 1.0/TILE_WIDTH), 1);
TiledMatrixMulKernel<<<blockDims, threadDims>>>(d_m, d_n, d_p, j, k, l);
cudaThreadSynchronize();
cudaMemcpy(p, d_p, pSize, cudaMemcpyDeviceToHost);
if (cudaGetLastError() != cudaSuccess) {
printf("Error %d\n", cudaGetLastError());
exit(-1);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("The elapsed time is %f s\n", elapsedTime / 1000.0);
free(n); free(m); free(p);
cudaFree(d_n);
cudaFree(d_m);
cudaFree(d_p);
}
|
9,522 | /*
Write a CUDA program that includes a host function to compute the minimum value in a N dimensional square matrix, N being 16.
*/
// Author: Naoki Atkins
#include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <stdlib.h>
#include <limits.h>
#define N 16
__host__ int generator(int rank){
int rand_num;
srand(time(NULL)+rank);
rand_num = rand();
if (rand_num<0) {
rand_num=abs(rand_num);
}
else if(rand_num<10){
rand_num+=10;
}
else if( rand_num>=100){
rand_num = rand_num%100;
}
rand_num=rand_num*10;
rand_num+=1000;
rand_num+=rank;
return rand_num;
}
__host__ int computeMin(int* matrix) {
int min = INT_MAX;
int val;
int i, j, offset;
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
offset = i * N + j;
val = matrix[offset];
if (val < min) min = val;
}
}
return min;
}
int main(int argc, char** argv) {
int i, j;
int offset;
int *mat = (int *)malloc(N * N * sizeof(int));
printf("Generating a random matrix...\n");
for (i = 0; i < N; ++i) {
for (j = 0; j < N; ++j) {
offset = i * N + j;
mat[offset] = generator(offset);
printf("%d ", mat[offset]);
}
printf("\n");
}
printf("Calculating the minimum...\n");
int min;
min = computeMin(mat);
printf("Min is %d\n", min);
return 0;
}
|
9,523 | #include <stdio.h>
#include <stdlib.h>
#include <chrono>
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#define BlockSize 28
const int INF = 1000000000;
void input(char *inFileName);
void output(char *outFileName);
void block_FW(int B);
int ceil(int a, int b);
void cal(char* d,size_t pitch,int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height,cudaStream_t stream);
void cpu(int B, int Round, int block_start_x, int block_start_y, int block_width, int block_height);
int n, m;
static int* d;
double io_time = 0;
double comp_time = 0;
double mem_time = 0;
int main(int argc, char* argv[]) {
auto io_beg = std::chrono::high_resolution_clock::now();
input(argv[1]);
auto io_end = std::chrono::high_resolution_clock::now();
io_time += std::chrono::duration<double>(io_end-io_beg).count();
int B = BlockSize;
block_FW(B);
io_beg = std::chrono::high_resolution_clock::now();
output(argv[2]);
io_end = std::chrono::high_resolution_clock::now();
io_time += std::chrono::duration<double>(io_end-io_beg).count();
std::cout<< comp_time <<" "<<mem_time<<" "<<io_time;
return 0;
}
void input(char* infile) {
FILE* file = fopen(infile, "rb");
fread(&n, sizeof(int), 1, file);
fread(&m, sizeof(int), 1, file);
d = new int[n*n];
for (int i = 0; i < n; ++ i) {
for (int j = 0; j < n; ++ j) {
if (i == j) {
d[i*n+j] = 0;
} else {
d[i*n+j] = INF;
}
}
}
int pair[3];
for (int i = 0; i < m; ++ i) {
fread(pair, sizeof(int), 3, file);
d[pair[0]*n+pair[1]] = pair[2];
}
fclose(file);
}
void output(char *outFileName) {
FILE *outfile = fopen(outFileName, "w");
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
if (d[i*n+j] >= INF)
d[i*n+j] = INF;
}
}
fwrite(d, sizeof(int), n*n, outfile);
fclose(outfile);
}
int ceil(int a, int b) {
return (a + b - 1) / b;
}
__global__ void kernel(char* d,size_t pitch,int block_x,
int block_y,int k,int n,int B,int round);
__global__ void kernel_simple(char* d,size_t pitch,int block_x,
int block_y,int k,int n,int B);
void block_FW(int B) {
int round = ceil(n, B);
char *device_d;
size_t pitch;
// cudaMalloc(&device_d,sizeof(int)*n*n);
// cudaMemcpy(device_d,d,sizeof(int)*n*n,cudaMemcpyHostToDevice);
cudaMallocPitch(&device_d,&pitch,sizeof(int)*round*B,round*B);
auto mem_beg = std::chrono::high_resolution_clock::now();
cudaMemcpy2D(device_d,pitch,d,sizeof(int)*n,sizeof(int)*n,n,cudaMemcpyHostToDevice);
auto mem_end = std::chrono::high_resolution_clock::now();
// cudaStream_t streams[4];
// for(int i=0;i<4;i++)
// cudaStreamCreate(streams+i);
auto comp_beg = std::chrono::high_resolution_clock::now();
for (int r = 0; r < round; ++r) {
dim3 dimBlock(B,B);
dim3 dimGrid(1,1);
/* Phase 1*/
for (int k = r * B; k < (r+1) * B && k < n; ++k)
kernel_simple<<<dimGrid,dimBlock>>>(device_d,pitch,r,r,k,n,B);
if(r==0){
auto mem_end = std::chrono::high_resolution_clock::now();
mem_time += std::chrono::duration<double>(mem_end-mem_beg).count();
}
// for (int k = r * B; k < (r+1) * B && k < n; ++k){
// for(int i=0;i<round;i++){
// if(i!=r){
// kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,i,r,0,n,B,r);
// }
// }
// for(int j=0;j<round;j++){
// if(j!=r){
// kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,r,j,0,n,B,r);
// }
// }
// }
dimGrid = dim3(1,r);
kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,r,0,0,n,B,r);
dimGrid = dim3(1,round-r-1);
kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,r,r+1,0,n,B,r);
dimGrid = dim3(r,1);
kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,0,r,0,n,B,r);
dimGrid = dim3(round-r-1,1);
kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,r+1,r,0,n,B,r);
// for (int k = r * B; k < (r+1) * B && k < n; ++k) {
// for(int i=0;i<round;i++){
// for(int j=0;j<round;j++){
// if(i!=r&&j!=r){
// kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,i,j,0,n,B,r);
// }
// }
// }
// }
dimGrid = dim3(r,r);
kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,0,0,0,n,B,r);
dimGrid = dim3(round-r-1,r);
kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,r+1,0,0,n,B,r);
dimGrid = dim3(r,round-r-1);
kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,0,r+1,0,n,B,r);
dimGrid = dim3(round-r-1,round-r-1);
kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,r+1,r+1,0,n,B,r);
// dimGrid = dim3(round,round);
// kernel<<<dimGrid,dimBlock,0>>>(device_d,pitch,0,0,0,n,B,r);
// std::cout<< std::chrono::duration<double>(comp_end-comp_beg).count()<<"\n";
}
auto comp_end = std::chrono::high_resolution_clock::now();
comp_time += std::chrono::duration<double>(comp_end-comp_beg).count();
// cudaStreamSynchronize(0);
mem_beg = std::chrono::high_resolution_clock::now();
cudaMemcpy2D(d,sizeof(int)*n,device_d,pitch,sizeof(int)*n,n,cudaMemcpyDeviceToHost);
mem_end = std::chrono::high_resolution_clock::now();
mem_time += std::chrono::duration<double>(mem_end-mem_beg).count();
// cudaMemcpy(d,device_d,sizeof(int)*n*n,cudaMemcpyDeviceToHost);
}
__device__ inline int gmin(int a,int b){
return (a>b)*b+(a<=b)*a;
}
__global__ void kernel(char* d,size_t pitch,int block_x,
int block_y,int k,int n,int B,int r){
const int i = (block_x+blockIdx.x)*B+threadIdx.x;
const int j = (block_y+blockIdx.y)*B+threadIdx.y;
// const int idx = threadIdx.y*blockDim.x*threadIdx.x;
__shared__ int p[BlockSize][BlockSize];
__shared__ int d_i_k[BlockSize][BlockSize];
__shared__ int d_k_j[BlockSize][BlockSize];
__shared__ int d_i_j[BlockSize][BlockSize];
int* d_i = (int*)(d+pitch*i);
p[threadIdx.x][threadIdx.y] = d_i[j];
d_i_j[threadIdx.x][threadIdx.y] = p[threadIdx.x][threadIdx.y];
d_i_k[threadIdx.x][threadIdx.y] = d_i[r*B+threadIdx.y];
d_k_j[threadIdx.x][threadIdx.y] = ((int*)(d+pitch*(r*B+threadIdx.x)))[j];
const int k_max = gmin((r+1) * B , n);
__syncthreads();
#pragma unroll
for (int k = r * B; k < k_max; ++k) {
// int* d_k = (int*)(d+pitch*k);
p[threadIdx.x][threadIdx.y] = gmin( p[threadIdx.x][threadIdx.y],
d_i_k[threadIdx.x][k-r*B]+d_k_j[k-r*B][threadIdx.y] );
}
if(d_i_j[threadIdx.x][threadIdx.y]>p[threadIdx.x][threadIdx.y]){
d_i[j]=p[threadIdx.x][threadIdx.y];
}
// int new_d = ((int*)(d+pitch*(i)))[k]+((int*)(d+pitch*k))[j];
// if(((int*)(d+pitch*i))[j]>new_d && j<n && i<n){
// ((int*)(d+pitch*i))[j]=new_d;
// }
}
__global__ void kernel_simple(char* d,size_t pitch,int block_x,
int block_y,int k,int n,int B){
const int i = block_x*B+threadIdx.x;
const int j = block_y*B+threadIdx.y;
// const int idx = threadIdx.y*blockDim.x*threadIdx.x;
__shared__ int p[BlockSize][BlockSize];
__shared__ int d_k_j[BlockSize];
__shared__ int d_i_k[BlockSize];
if(threadIdx.x==0)
d_k_j[threadIdx.y] = ((int*)(d+pitch*k))[j];
else if(threadIdx.x==1)
d_i_k[threadIdx.y] = ((int*)(d+pitch*(block_x*B+threadIdx.y)))[k];
int* d_i = (int*)(d+pitch*i);
p[threadIdx.x][threadIdx.y] = d_i[j];
__syncthreads();
// int* d_k_j = (int*)(d+pitch*k);
int new_d = d_i_k[threadIdx.x]+d_k_j[threadIdx.y];
if(p[threadIdx.x][threadIdx.y]>new_d){
d_i[j]=new_d;
}
} |
9,524 | #include<iostream>
__global__ void kernel(void) {
}
int main()
{
kernel<<<1,1>>>();
printf("Hello world!\n");
int count;
cudaGetDeviceCount(&count);
printf("Count:\t%d\n", count);
cudaDeviceProp prop;
for( int i=0; i<count; i++) {
cudaGetDeviceProperties(&prop,i);
//print info
printf("Name:\t%s\n", prop.name);
printf("Global Mem:\t%d\n", prop.totalGlobalMem);
printf("Const Mem:\t%d\n", prop.totalConstMem);
printf("Shared Mem:\t%d\n", prop.sharedMemPerBlock);
printf("Mem Pitch:\t%d\n", prop.memPitch);
printf("Max Threads:\t%d\n", prop.maxThreadsPerBlock);
printf("Compute Capasity:\t%d,%d\n", prop.minor, prop.major);
printf("Processor Num:\t%d,%d\n", prop.multiProcessorCount);
}
return 0;
} |
9,525 | #include "includes.h"
__global__ void Bprop2(const float* out, const float* layer1, float* dsyn2, const int count, const float alpha)
{
int i = blockDim.y*blockIdx.y + threadIdx.y; //256
int j = blockDim.x*blockIdx.x + threadIdx.x; //4
//int k = blockIdx.x; //Data.count
atomicAdd(&dsyn2[i*4 + j], out[j] * layer1[256*(count) + i] * alpha);
} |
9,526 | #define t_max 1
#define t 1
/*
(u[0][0][0][1][0]=((((u[1][0][0][0][0]+(u[-1][0][0][0][0]+u[0][1][0][0][0]))+(u[0][-1][0][0][0]+(u[0][0][1][0][0]+u[0][0][-1][0][0])))*0.25)-u[0][0][0][0][0]))
*/
__global__ void laplacian(float * * u_0_1_out, float * u_0_0, float * u_0_1, int x_max, int y_max, int z_max)
{
//float * const u__u_0[16] = { u_0_0, u_0_1 } ;
int _idx0;
int _idx1;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int idx_1_2;
int p_idx_x;
int p_idx_x_max;
int p_idx_y;
int p_idx_y_max;
int p_idx_z;
int p_idx_z_max;
int size_1_1;
int size_1_2;
//int t;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x));
p_idx_x_max=(p_idx_x+1);
p_idx_y=(threadIdx.y+(tmp*blockDim.y));
p_idx_y_max=(p_idx_y+1);
p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
p_idx_z_max=(p_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0])
*/
/* _idx0 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+2) */
_idx0=(((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+2);
/* _idx1 = ((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x) */
_idx1=(_idx0-2);
/* _idx2 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+2)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+4)*t))+p_idx_x)+1) */
_idx2=(((_idx1+x_max)+(2*t))+1);
/* _idx3 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+(((((2*p_idx_z)+2)*t)+p_idx_y)*x_max))+(((4*p_idx_z)+4)*(t*t)))+((2*p_idx_y)*t))+p_idx_x)+1) */
_idx3=(((_idx1-x_max)-(2*t))+1);
/* _idx4 = (((((((((p_idx_z+2)*x_max)+(((2*p_idx_z)+4)*t))*y_max)+((((((2*p_idx_z)+4)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+8)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */
_idx4=((((_idx3+((x_max+(2*t))*y_max))+(((2*t)+1)*x_max))+(4*(t*t)))+(2*t));
/* _idx5 = ((((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(((((2*p_idx_z)*t)+p_idx_y)+1)*x_max))+((4*p_idx_z)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */
_idx5=((((_idx1+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1);
/* _idx6 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */
_idx6=(_idx1+1);
u_0_1[_idx6]=((((u_0_0[_idx0]+(u_0_0[_idx1]+u_0_0[_idx2]))+(u_0_0[_idx3]+(u_0_0[_idx4]+u_0_0[_idx5])))*0.25)-u_0_0[_idx6]);
}
}
__global__ void initialize(float * u_0_0, float * u_0_1, int x_max, int y_max, int z_max)
{
float * const u__u_0[16] = { u_0_0, u_0_1 } ;
int _idx0;
int _idx1;
int _idx2;
int _idx3;
int _idx4;
int _idx5;
int _idx6;
int idx_1_2;
int p_idx_x;
int p_idx_x_max;
int p_idx_y;
int p_idx_y_max;
int p_idx_z;
int p_idx_z_max;
int size_1_1;
int size_1_2;
//int t;
int tmp;
/*
Initializations
*/
size_1_1=(y_max/blockDim.y);
size_1_2=(z_max/blockDim.z);
idx_1_2=(blockIdx.y/size_1_2);
tmp=(blockIdx.y-(idx_1_2*size_1_2));
p_idx_x=(threadIdx.x+(blockDim.x*blockIdx.x));
p_idx_x_max=(p_idx_x+1);
p_idx_y=(threadIdx.y+(tmp*blockDim.y));
p_idx_y_max=(p_idx_y+1);
p_idx_z=(threadIdx.z+(idx_1_2*blockDim.z));
p_idx_z_max=(p_idx_z+1);
/*
Implementation
*/
/*
for t = 1..t_max by 1 parallel 1 <level 0> schedule { ... }
*/
//for (t=1; t<=t_max; t+=1)
{
/* Index bounds calculations for iterators in p[t=t, s=(1, 1, 1)][0] */
/*
u[t=(t+1), s=p[t=?, s=?][0]][0]=stencil(u[t=t, s=p[t=?, s=?][0]][0])
*/
/* _idx0 = ((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x) */
_idx0=((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x);
u_0_0[_idx0]=0.1;
/* _idx1 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+(((((2*p_idx_z)+2)*t)+p_idx_y)*x_max))+(((4*p_idx_z)+4)*(t*t)))+((2*p_idx_y)*t))+p_idx_x)+1) */
_idx1=(((_idx0-x_max)-(2*t))+1);
u_0_0[_idx1]=0.1;
/* _idx2 = ((((((((p_idx_z*x_max)+((2*p_idx_z)*t))*y_max)+(((((2*p_idx_z)*t)+p_idx_y)+1)*x_max))+((4*p_idx_z)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */
_idx2=((((_idx0+((( - x_max)-(2*t))*y_max))-((2*t)*x_max))-(4*(t*t)))+1);
u_0_0[_idx2]=0.1;
/* _idx3 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */
_idx3=(_idx0+1);
u_0_0[_idx3]=0.1;
/* _idx4 = (((((((((p_idx_z+2)*x_max)+(((2*p_idx_z)+4)*t))*y_max)+((((((2*p_idx_z)+4)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+8)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+1) */
_idx4=(((_idx3+((x_max+(2*t))*y_max))+((2*t)*x_max))+(4*(t*t)));
u_0_0[_idx4]=0.1;
/* _idx5 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+2)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+4)*t))+p_idx_x)+1) */
_idx5=((_idx3+x_max)+(2*t));
u_0_0[_idx5]=0.1;
/* _idx6 = (((((((((p_idx_z+1)*x_max)+(((2*p_idx_z)+2)*t))*y_max)+((((((2*p_idx_z)+2)*t)+p_idx_y)+1)*x_max))+(((4*p_idx_z)+4)*(t*t)))+(((2*p_idx_y)+2)*t))+p_idx_x)+2) */
_idx6=(_idx0+2);
u_0_0[_idx6]=0.1;
u_0_1[_idx3]=1.1;
}
}
|
9,527 | #include <iostream>
//3d_array.cu
#include <stdlib.h>
#include <stdio.h>
// Device code
__global__ void MyKernel(cudaPitchedPtr devPitchedPtr,cudaExtent extent)
{
char * start = (char*) devPitchedPtr.ptr;
size_t x_dim = devPitchedPtr.pitch;
size_t x_times_y_dim = x_dim * extent.height;
int x = threadIdx.x;
int y = threadIdx.y;
int z = blockIdx.x;
printf("%d %d %d\n", x, y, z);
char * ptr = start + z * x_times_y_dim + y * x_dim + x * sizeof(float);
*(float*)ptr = x + (y*10) + (z*100);
}
#define X 10
#define Y 5
#define Z 3
int main(void)
{
cudaSetDevice(0);
float array[X][Y][Z];
float result[X][Y][Z];
cudaError_t status = cudaSuccess;
//initialise array
for (int x = 0; x < X; x++)
{
for (int y = 0; y < Y; y++)
{
for (int z = 0; z < Z; z++)
{
array[x][y][z] = 1.0;
}
}
}
for (int x = 0; x < X; x++)
{
for (int y = 0; y < Y; y++)
{
for (int z = 0; z < Z; z++)
{
result[x][y][z] = 0.0;
}
}
}
//allocate memory on device for a 3D matrix
cudaExtent extent;
extent.width = X *sizeof(float);
extent.height = Y;
extent.depth = Z;
cudaPitchedPtr mem_device;
status = cudaMalloc3D(&mem_device,extent);
if (status != cudaSuccess)
{
fprintf(stderr, "Malloc: %s\n", cudaGetErrorString(status));
}
//copy memory to device
cudaMemcpy3DParms p = { 0 };
p.srcPtr = make_cudaPitchedPtr((void*)array, X * sizeof(float), X, Y);
p.dstPtr = mem_device;
p.extent = extent;
p.kind = cudaMemcpyHostToDevice;
status = cudaMemcpy3D(&p);
if (status != cudaSuccess)
{
fprintf(stderr, "MemcpyHtD: %s\n", cudaGetErrorString(status));
}
//run 3d kernel!
dim3 block_dim(X, Y);
MyKernel<<<Z, block_dim>>>(mem_device, extent);
//copy result array back to host
cudaMemcpy3DParms q = {0};
q.srcPtr = mem_device;
q.dstPtr = make_cudaPitchedPtr((void*)result, X * sizeof(float), X, Y);
q.extent = extent;
q.kind = cudaMemcpyDeviceToHost;
status = cudaMemcpy3D(&q);
if(status != cudaSuccess)
{
fprintf(stderr, "MemcpyDtoH: %s\n", cudaGetErrorString(status));
}
for (int x = 0; x < X; x++)
{
for (int y = 0; y < Y; y++)
{
for (int z = 0; z < Z; z++)
{
printf("%f ", result[x][y][z]);
}
printf("\n");
}
printf("\n");
}
cudaFree(mem_device.ptr);
}
|
9,528 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,int var_2,float var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float* var_23,float var_24,float var_25,float var_26,float var_27,float var_28) {
if (comp >= +1.8035E-44f * var_1 - ldexpf(sinhf(+1.7007E6f), 2)) {
float tmp_1 = var_3 + -1.2161E24f;
comp = tmp_1 + var_4 / powf(coshf(sinhf(+1.0028E36f + (+1.4320E35f - var_5 / (var_6 + var_7)))), (-1.2913E35f / -1.0767E25f - sinf(var_8 + var_9)));
if (comp > (-1.9370E36f / coshf((var_10 + var_11)))) {
comp = var_12 * logf(var_13 / (var_14 + var_15 / -1.0214E-44f));
comp += var_16 * (var_17 / (var_18 * log10f(-1.8316E36f + var_19 / (var_20 * (var_21 - var_22)))));
}
for (int i=0; i < var_2; ++i) {
var_23[i] = (var_24 * fabsf(+1.6748E-43f));
comp += var_23[i] * log10f((+1.8275E-35f * +1.5333E34f));
}
if (comp == -1.2292E35f / (+1.4097E-14f * -1.3440E-43f / fabsf(var_25 - var_26 + +1.1116E-37f))) {
comp = (var_27 - logf(+1.7451E8f));
comp += (+1.0251E-44f * var_28);
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float* tmp_24 = initPointer( atof(argv[24]) );
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
float tmp_28 = atof(argv[28]);
float tmp_29 = atof(argv[29]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27,tmp_28,tmp_29);
cudaDeviceSynchronize();
return 0;
}
|
9,529 | // http://csweb.cs.wfu.edu/bigiron/LittleFE-CUDA-TrapezoidalRule/build/html/cudaAlg.html
// This program implements trapezoidal integration for a function
// f(x) over the interval [c,d] using N subdivisions. This program
// runs on a host and device (NVIDIA graphics chip with cuda
// certification). The function f(x) is implemented as a callable
// function on the device. The kernel computes the sums f(xi)+f(xi+deltaX).
// The host function computes of the individual sums computed on the
// device and multiplies by deltaX/2.
#include <iostream>
#include <ctime>
using namespace std;
#include <cuda.h>
#include <math_constants.h>
#include <cuda_runtime.h>
// function to integrate, defined as a function on the
// GPU device
__device__ float myfunction(float a)
{
return a*a+2.0f*a + 3.0f;
}
// kernel function to compute the summation used in the trapezoidal
// rule for numerical integration
// __global__ __device__ void integratorKernel(float *a, float c, float deltaX, int N)
__global__ void integratorKernel(float *a, float c, float deltaX, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float x = c + (float)idx * deltaX;
if (idx<N)
{
a[idx] = myfunction(x)+myfunction(x+deltaX);
}
}
// cudaIntegrate() is the host function that sets up the
// computation of the integral of f(x) over the interval
// [c,d].
__host__ float cudaIntegrate(float c, float d, int N)
{
// deltaX
float deltaX = (d-c)/N;
// error code variable
cudaError_t errorcode = cudaSuccess;
// size of the arrays in bytes
int size = N*sizeof(float);
// allocate array on host and device
float* a_h = (float *)malloc(size);
float* a_d;
if (( errorcode = cudaMalloc((void **)&a_d,size))!= cudaSuccess)
{
cout << "cudaMalloc(): " << cudaGetErrorString(errorcode) << endl;
exit(1);
}
// do calculation on device
int block_size = 256;
int n_blocks = N/block_size + ( N % block_size == 0 ? 0:1);
// cout << "blocks: " << n_blocks << endl;
// cout << "block size: " << block_size << endl;
integratorKernel <<< n_blocks, block_size >>> (a_d, c, deltaX, N);
// copy results from device to host
if((errorcode = cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost))!=cudaSuccess)
{
cout << "cudaMemcpy(): " << cudaGetErrorString(errorcode) << endl;
exit(1);
}
// add up results
float sum = 0.0;
for(int i=0; i<N; i++) sum += a_h[i];
sum *= deltaX/2.0;
// clean up
free(a_h);
cudaFree(a_d);
return sum;
}
// utility host function to convert the length of time into
// micro seconds
__host__ double diffclock(clock_t clock1, clock_t clock2)
{
double diffticks = clock1-clock2;
double diffms = diffticks/(CLOCKS_PER_SEC/1000);
return diffms;
}
// host main program
int main()
{
clock_t start = clock();
float answer = cudaIntegrate(0.0,1.0,1000);
clock_t end = clock();
cout << "The answer is " << answer << endl;
cout << "Computation time: " << diffclock(end,start);
cout << " micro seconds" << endl;
return 0;
} |
9,530 | #include "includes.h"
__global__ void ComputeOffsetOfMatrixA(const int32_t* col_sum, int32_t* output, int32_t N) {
for (int32_t i = threadIdx.x; i < N; i += blockDim.x) {
*(output + blockIdx.x * N + i) = -col_sum[i];
}
} |
9,531 | #include "includes.h"
__global__ void GetOutlet(double *h, double *houtlet, double *u, double *uout, double *v, double *vout, int M, int N, int t) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int ind = 2;
while (tid < M) {
houtlet[t*M+tid] = h[(tid+1)*N-ind];
vout[t*M+tid] = v[(tid+1)*N-ind];
uout[t*M+tid] = u[(tid+1)*N-ind];
tid += blockDim.x * gridDim.x;
}
} |
9,532 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <string.h>
#include <iostream>
using namespace std;
__global__ void removeMultiple(int * playground, int N, int startNum);
int main(int argc, char * argv[]) {
double time_taken;
clock_t start, end;
int N = (unsigned int) atoi(argv[1]);
int startNum = 2;
size_t groundSize = (N-1)*sizeof(int);
int * playground = (int *)calloc(N-1, sizeof(int));
for(int i = 0; i < N-1; i ++ ) {
playground[i] = i + 2;
}
int *d_playground;
cudaMalloc((void **)&d_playground, groundSize);
cudaMemcpy(d_playground, playground, groundSize, cudaMemcpyHostToDevice);
int threadNum = 256;
int blockNum = (N -1 + threadNum -1)/threadNum;
int wI = 0;
start = clock();
while(startNum < (N+1)/2) {
if(playground[startNum-2] != -1)
////////////////////////////////////////
removeMultiple<<< blockNum, threadNum >>>(d_playground, N, startNum);
////////////////////////////////////////
startNum ++;
wI ++;
}
end = clock();
int *h_playgroundResult = (int *)malloc(groundSize);
cudaMemcpy(h_playgroundResult, d_playground, groundSize, cudaMemcpyDeviceToHost);
FILE * fp;
char filename[15];
sprintf(filename, "%d.txt", N);
// fp = freopen(filename, "w", stdout);
fp = fopen(filename, "a");
int numPrime = 0;
for(int i = 0; i < N-1; i++) {
if(h_playgroundResult[i] != -1) {
fprintf(fp, "%d ", playground[i] );
// cout<<"prime: i->"<<i+2<<" value->"<<h_playgroundResult[i]<<endl;
numPrime ++;
}
}
cout<<"the number of prime is "<<numPrime<<endl;
cudaFree(d_playground);
time_taken = ((double)(end - start))/ CLOCKS_PER_SEC;
printf("Time taken for %s is %lf\n", "GPU", time_taken);
return 0;
}
__global__ void removeMultiple(int * playground, int N, int startNum) {
int ix = threadIdx.x + blockDim.x*blockIdx.x;
if((ix<N-1) && (ix > startNum -1) ) {
if((playground[ix] % startNum == 0)&& (playground[ix] != -1 ) ) {
playground[ix] = -1;
}
}
}
|
9,533 |
// Cudafy_Test.RuneCalc
extern "C" __global__ void calc_r(int n, int* build, int buildLen0, int buildLen1, int* stat, int statLen0, int* mult, int multLen0, int multLen1, int* flat, int flatLen0, int flatLen1, int* res, int resLen0, int resLen1);
// Cudafy_Test.RuneCalc
extern "C" __global__ void calc_r(int n, int* build, int buildLen0, int buildLen1, int* stat, int statLen0, int* mult, int multLen0, int multLen1, int* flat, int flatLen0, int flatLen1, int* res, int resLen0, int resLen1)
{
int num = 0;
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x)
{
num = build[(i) * buildLen1 + ( 0)];
res[(i) * resLen1 + ( 0)] = stat[(0)] * mult[num * multLen1 + ( 0)] + flat[num * flatLen1 + ( 0)];
res[(i) * resLen1 + ( 1)] = stat[(1)] * mult[num * multLen1 + ( 1)] + flat[num * flatLen1 + ( 1)];
res[(i) * resLen1 + ( 2)] = stat[(2)] * mult[num * multLen1 + ( 2)] + flat[num * flatLen1 + ( 2)];
res[(i) * resLen1 + ( 3)] = stat[(3)] * mult[num * multLen1 + ( 3)] + flat[num * flatLen1 + ( 3)];
res[(i) * resLen1 + ( 4)] = stat[(4)] * mult[num * multLen1 + ( 4)] + flat[num * flatLen1 + ( 4)];
res[(i) * resLen1 + ( 5)] = stat[(5)] * mult[num * multLen1 + ( 5)] + flat[num * flatLen1 + ( 5)];
res[(i) * resLen1 + ( 6)] = stat[(6)] * mult[num * multLen1 + ( 6)] + flat[num * flatLen1 + ( 6)];
res[(i) * resLen1 + ( 7)] = stat[(7)] * mult[num * multLen1 + ( 7)] + flat[num * flatLen1 + ( 7)];
num = build[(i) * buildLen1 + ( 1)];
res[(i) * resLen1 + ( 0)] = stat[(0)] * mult[num * multLen1 + ( 0)] + flat[num * flatLen1 + ( 0)];
res[(i) * resLen1 + ( 1)] = stat[(1)] * mult[num * multLen1 + ( 1)] + flat[num * flatLen1 + ( 1)];
res[(i) * resLen1 + ( 2)] = stat[(2)] * mult[num * multLen1 + ( 2)] + flat[num * flatLen1 + ( 2)];
res[(i) * resLen1 + ( 3)] = stat[(3)] * mult[num * multLen1 + ( 3)] + flat[num * flatLen1 + ( 3)];
res[(i) * resLen1 + ( 4)] = stat[(4)] * mult[num * multLen1 + ( 4)] + flat[num * flatLen1 + ( 4)];
res[(i) * resLen1 + ( 5)] = stat[(5)] * mult[num * multLen1 + ( 5)] + flat[num * flatLen1 + ( 5)];
res[(i) * resLen1 + ( 6)] = stat[(6)] * mult[num * multLen1 + ( 6)] + flat[num * flatLen1 + ( 6)];
res[(i) * resLen1 + ( 7)] = stat[(7)] * mult[num * multLen1 + ( 7)] + flat[num * flatLen1 + ( 7)];
num = build[(i) * buildLen1 + ( 2)];
res[(i) * resLen1 + ( 0)] = stat[(0)] * mult[num * multLen1 + ( 0)] + flat[num * flatLen1 + ( 0)];
res[(i) * resLen1 + ( 1)] = stat[(1)] * mult[num * multLen1 + ( 1)] + flat[num * flatLen1 + ( 1)];
res[(i) * resLen1 + ( 2)] = stat[(2)] * mult[num * multLen1 + ( 2)] + flat[num * flatLen1 + ( 2)];
res[(i) * resLen1 + ( 3)] = stat[(3)] * mult[num * multLen1 + ( 3)] + flat[num * flatLen1 + ( 3)];
res[(i) * resLen1 + ( 4)] = stat[(4)] * mult[num * multLen1 + ( 4)] + flat[num * flatLen1 + ( 4)];
res[(i) * resLen1 + ( 5)] = stat[(5)] * mult[num * multLen1 + ( 5)] + flat[num * flatLen1 + ( 5)];
res[(i) * resLen1 + ( 6)] = stat[(6)] * mult[num * multLen1 + ( 6)] + flat[num * flatLen1 + ( 6)];
res[(i) * resLen1 + ( 7)] = stat[(7)] * mult[num * multLen1 + ( 7)] + flat[num * flatLen1 + ( 7)];
num = build[(i) * buildLen1 + ( 3)];
res[(i) * resLen1 + ( 0)] = stat[(0)] * mult[num * multLen1 + ( 0)] + flat[num * flatLen1 + ( 0)];
res[(i) * resLen1 + ( 1)] = stat[(1)] * mult[num * multLen1 + ( 1)] + flat[num * flatLen1 + ( 1)];
res[(i) * resLen1 + ( 2)] = stat[(2)] * mult[num * multLen1 + ( 2)] + flat[num * flatLen1 + ( 2)];
res[(i) * resLen1 + ( 3)] = stat[(3)] * mult[num * multLen1 + ( 3)] + flat[num * flatLen1 + ( 3)];
res[(i) * resLen1 + ( 4)] = stat[(4)] * mult[num * multLen1 + ( 4)] + flat[num * flatLen1 + ( 4)];
res[(i) * resLen1 + ( 5)] = stat[(5)] * mult[num * multLen1 + ( 5)] + flat[num * flatLen1 + ( 5)];
res[(i) * resLen1 + ( 6)] = stat[(6)] * mult[num * multLen1 + ( 6)] + flat[num * flatLen1 + ( 6)];
res[(i) * resLen1 + ( 7)] = stat[(7)] * mult[num * multLen1 + ( 7)] + flat[num * flatLen1 + ( 7)];
num = build[(i) * buildLen1 + ( 4)];
res[(i) * resLen1 + ( 0)] = stat[(0)] * mult[num * multLen1 + ( 0)] + flat[num * flatLen1 + ( 0)];
res[(i) * resLen1 + ( 1)] = stat[(1)] * mult[num * multLen1 + ( 1)] + flat[num * flatLen1 + ( 1)];
res[(i) * resLen1 + ( 2)] = stat[(2)] * mult[num * multLen1 + ( 2)] + flat[num * flatLen1 + ( 2)];
res[(i) * resLen1 + ( 3)] = stat[(3)] * mult[num * multLen1 + ( 3)] + flat[num * flatLen1 + ( 3)];
res[(i) * resLen1 + ( 4)] = stat[(4)] * mult[num * multLen1 + ( 4)] + flat[num * flatLen1 + ( 4)];
res[(i) * resLen1 + ( 5)] = stat[(5)] * mult[num * multLen1 + ( 5)] + flat[num * flatLen1 + ( 5)];
res[(i) * resLen1 + ( 6)] = stat[(6)] * mult[num * multLen1 + ( 6)] + flat[num * flatLen1 + ( 6)];
res[(i) * resLen1 + ( 7)] = stat[(7)] * mult[num * multLen1 + ( 7)] + flat[num * flatLen1 + ( 7)];
num = build[(i) * buildLen1 + ( 5)];
res[(i) * resLen1 + ( 0)] = stat[(0)] * mult[num * multLen1 + ( 0)] + flat[num * flatLen1 + ( 0)];
res[(i) * resLen1 + ( 1)] = stat[(1)] * mult[num * multLen1 + ( 1)] + flat[num * flatLen1 + ( 1)];
res[(i) * resLen1 + ( 2)] = stat[(2)] * mult[num * multLen1 + ( 2)] + flat[num * flatLen1 + ( 2)];
res[(i) * resLen1 + ( 3)] = stat[(3)] * mult[num * multLen1 + ( 3)] + flat[num * flatLen1 + ( 3)];
res[(i) * resLen1 + ( 4)] = stat[(4)] * mult[num * multLen1 + ( 4)] + flat[num * flatLen1 + ( 4)];
res[(i) * resLen1 + ( 5)] = stat[(5)] * mult[num * multLen1 + ( 5)] + flat[num * flatLen1 + ( 5)];
res[(i) * resLen1 + ( 6)] = stat[(6)] * mult[num * multLen1 + ( 6)] + flat[num * flatLen1 + ( 6)];
res[(i) * resLen1 + ( 7)] = stat[(7)] * mult[num * multLen1 + ( 7)] + flat[num * flatLen1 + ( 7)];
}
}
|
9,534 | #include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
__global__ void reduce4(float *g_idata, float *g_odata) {
extern __shared__ float sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x * (blockDim.x * 2) + threadIdx.x;
sdata[tid] = g_idata[i] + g_idata[i + blockDim.x];
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (tid < s) { sdata[tid] += sdata[tid + s]; }
__syncthreads();
}
}
int main(void) {
int N = 100000000;
float *g_indata_host, *g_indata_device, *g_outdata_host, *g_outdata_device;
g_indata_host = (float *) malloc(N * sizeof(float));
g_outdata_host = (float *) malloc(sizeof(float));
cudaMalloc(&g_indata_device, N * sizeof(float));
cudaMalloc(&g_outdata_device, sizeof(float));
for (auto i = 0; i < N; i++) {
g_indata_host[i] = static_cast <float> (rand()) / static_cast <float> (RAND_MAX);;
}
cudaMemcpy(g_indata_device, g_indata_host, N * sizeof(float), cudaMemcpyHostToDevice);
// This is where the code is run
auto start = high_resolution_clock::now();
reduce4<<<(N + 255) / 256, 256>>>(g_indata_device, g_outdata_device);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout << "Time taken by function: "
<< duration.count() << " microseconds" << std::endl;
cudaFree(g_indata_device);
cudaFree(g_outdata_device);
free(g_indata_host);
free(g_outdata_host);
} |
9,535 | #include <iostream>
#include <cuda.h>
using std::cout;
using std::endl;
__global__ void my_kernel()
{
// I do absolutely nothing
}
int main(int argc, char *argv[])
{
cout << "Hello world!! I will call a CUDA kernel now!!" << endl;
my_kernel<<<1,1,0>>>();
cudaDeviceSynchronize();
return 0;
}
|
9,536 | //
// Created by Peter Rigole on 2019-05-03.
//
#ifndef AXONBITS_TESTOUTPUTPROCESSOR_H
#define AXONBITS_TESTOUTPUTPROCESSOR_H
#include "../../OutputProcessor.cuh"
#include "../../NeuralNet.cuh"
class TestOutputProcessor : public OutputProcessor {
public:
TestOutputProcessor(NeuralNet* neuralNet_init) : OutputProcessor(neuralNet_init) {}
void processOutput() {
}
};
#endif //AXONBITS_TESTOUTPUTPROCESSOR_H
|
9,537 | #include <stdio.h>
__device__ void device_strcpy(char *dst, const char *src) {
while (*dst++ = *src++);
}
__global__ void kernel(char *A) {
device_strcpy(A, "Hello, World!");
}
int main() {
char *d_hello;
char hello[32];
cudaMalloc((void**)&d_hello, 32);
kernel<<<1,1>>>(d_hello);
cudaMemcpy(hello, d_hello, 32, cudaMemcpyDeviceToHost);
cudaFree(d_hello);
puts(hello);
}
|
9,538 | #include <stdio.h>
int main( void )
{
cudaDeviceProp prop;
int count;
char str[4];
cudaGetDeviceCount( &count);
if (count == 0)
{
printf("No CUDA capable devices found.\n");
}
for (int i = 0; i < count; i++)
{
cudaGetDeviceProperties( &prop, i);
printf(" --- General Information for device %d ---\n", i );
printf("Name: %s\n", prop.name );
printf("\n");
sprintf(str,"%d.%d",prop.major, prop.minor);
printf("Compute capability : %14s\n",str);
printf("Clock rate : %14.2f (GHz)\n", prop.clockRate/1000000.0);
printf("\n");
printf(" --- Memory Information for device %d ---\n", i );
//printf("Total global mem : %14.1f (bytes)\n", (double) prop.totalGlobalMem );
//printf("Total global mem : %14.1f (kb)\n", prop.totalGlobalMem/1024.0);
//printf("Total global mem : %14.1f (mb)\n", prop.totalGlobalMem/(1024.0*1024.0));
printf("Total global mem : %14.1f (gb)\n", prop.totalGlobalMem/(1024.0*1024.0*1024.0));
printf("\n");
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count : %14d\n",
prop.multiProcessorCount );
printf("Shared mem per mp : %14.1f (kb)\n", prop.sharedMemPerBlock/1024. );
printf("Registers per mp : %14.1f (kb)\n", prop.regsPerBlock/1024. );
printf("Threads in warp : %14d\n", prop.warpSize );
printf("Max threads per block : %14d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions : %d, %d, %d\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
9,539 | #include <stdio.h>
#include <math.h>
// Simple define to index into a 1D array from 2D space
#define I2D(num, c, r) ((r)*(num)+(c))
__global__
void step_kernel_mod(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
// loop over all points in domain (except boundary)
if (j > 0 && i > 0 && j < nj-1 && i < ni-1) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
void step_kernel_ref(int ni, int nj, float fact, float* temp_in, float* temp_out)
{
int i00, im10, ip10, i0m1, i0p1;
float d2tdx2, d2tdy2;
// loop over all points in domain (except boundary)
for ( int j=1; j < nj-1; j++ ) {
for ( int i=1; i < ni-1; i++ ) {
// find indices into linear memory
// for central point and neighbours
i00 = I2D(ni, i, j);
im10 = I2D(ni, i-1, j);
ip10 = I2D(ni, i+1, j);
i0m1 = I2D(ni, i, j-1);
i0p1 = I2D(ni, i, j+1);
// evaluate derivatives
d2tdx2 = temp_in[im10]-2*temp_in[i00]+temp_in[ip10];
d2tdy2 = temp_in[i0m1]-2*temp_in[i00]+temp_in[i0p1];
// update temperatures
temp_out[i00] = temp_in[i00]+fact*(d2tdx2 + d2tdy2);
}
}
}
int main()
{
int istep;
int nstep = 200; // number of time steps
// Specify our 2D dimensions
const int ni = 200;
const int nj = 100;
float tfac = 8.418e-5; // thermal diffusivity of silver
float *temp1_ref, *temp2_ref, *temp1, *temp2, *temp_tmp;
const int size = ni * nj * sizeof(float);
temp1_ref = (float*)malloc(size);
temp2_ref = (float*)malloc(size);
cudaMallocManaged(&temp1, size);
cudaMallocManaged(&temp2, size);
// Initialize with random data
for( int i = 0; i < ni*nj; ++i) {
temp1_ref[i] = temp2_ref[i] = temp1[i] = temp2[i] = (float)rand()/(float)(RAND_MAX/100.0f);
}
// Execute the CPU-only reference version
for (istep=0; istep < nstep; istep++) {
step_kernel_ref(ni, nj, tfac, temp1_ref, temp2_ref);
// swap the temperature pointers
temp_tmp = temp1_ref;
temp1_ref = temp2_ref;
temp2_ref= temp_tmp;
}
dim3 tblocks(32, 16, 1);
dim3 grid((nj/tblocks.x)+1, (ni/tblocks.y)+1, 1);
cudaError_t ierrSync, ierrAsync;
// Execute the modified version using same data
for (istep=0; istep < nstep; istep++) {
step_kernel_mod<<< grid, tblocks >>>(ni, nj, tfac, temp1, temp2);
ierrSync = cudaGetLastError();
ierrAsync = cudaDeviceSynchronize(); // Wait for the GPU to finish
if (ierrSync != cudaSuccess) { printf("Sync error: %s\n", cudaGetErrorString(ierrSync)); }
if (ierrAsync != cudaSuccess) { printf("Async error: %s\n", cudaGetErrorString(ierrAsync)); }
// swap the temperature pointers
temp_tmp = temp1;
temp1 = temp2;
temp2= temp_tmp;
}
float maxError = 0;
// Output should always be stored in the temp1 and temp1_ref at this point
for( int i = 0; i < ni*nj; ++i ) {
if (abs(temp1[i]-temp1_ref[i]) > maxError) { maxError = abs(temp1[i]-temp1_ref[i]); }
}
// Check and see if our maxError is greater than an error bound
if (maxError > 0.0005f)
printf("Problem! The Max Error of %.5f is NOT within acceptable bounds.\n", maxError);
else
printf("The Max Error of %.5f is within acceptable bounds.\n", maxError);
free( temp1_ref );
free( temp2_ref );
cudaFree( temp1 );
cudaFree( temp2 );
return 0;
}
|
9,540 | #include <cuda_runtime.h>
#define a(i, j) ((i)*s+(j))
#define b(i, j) ((i)*s+(j))
#define c(i, j) ((i)*s+(j))
extern "C"{
__global__ void matrixMul(int *A, int *B, int *C, int s)
{
int row = blockIdx.y*blockDim.y + threadIdx.y;
int cul = blockIdx.x*blockDim.x + threadIdx.x;
int k, val;
if( row<s && cul<s )
{
val = 0;
for(k=0; k<s; k++)
val += A[a(row, k)]*B[b(k, cul)];
C[c(row, cul)] = val;
}
}
}
|
9,541 | #include "includes.h"
__constant__ float *c_Kernel;
__global__ void convolutionRowsKernel_down_smp( float *d_Dst, float *d_Src, int imageW, int n_imageW, int imageH, int filter_Rad, int Halo_steps )
{
extern __shared__ float s_Data[];
//Offset to the left halo edge
const int baseX = (blockIdx.x * 2 * ROWS_RESULT_STEPS - Halo_steps) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseX1 = (blockIdx.x * ROWS_RESULT_STEPS) * ROWS_BLOCKDIM_X + threadIdx.x;
const int baseY = blockIdx.y * ROWS_BLOCKDIM_Y + threadIdx.y;
if (baseY < imageH)
{
d_Src += baseY * imageW + baseX;
d_Dst += baseY * n_imageW + baseX1;
//Load left halo
#pragma unroll
for (int i = 0; i < Halo_steps; ++i)
{
s_Data[(threadIdx.y*(2 * ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X >= 0) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Load right halo and main data
#pragma unroll
for (int i = Halo_steps; i < Halo_steps + 2 * ROWS_RESULT_STEPS + Halo_steps; ++i)
{
s_Data[(threadIdx.y*(2 * ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + threadIdx.x + i * ROWS_BLOCKDIM_X] = (baseX + i * ROWS_BLOCKDIM_X < imageW) ? d_Src[i * ROWS_BLOCKDIM_X] : 0;
}
//Compute and store results
__syncthreads();
#pragma unroll
for (int i = 0; i < ROWS_RESULT_STEPS; ++i)
{
float sum = 0;
if (baseX1 + i * ROWS_BLOCKDIM_X < n_imageW)
{
#pragma unroll
for (int j = -filter_Rad; j <= filter_Rad; ++j)
{
sum += c_Kernel[filter_Rad - j] * s_Data[(threadIdx.y*(2 * ROWS_RESULT_STEPS + 2 * Halo_steps)*ROWS_BLOCKDIM_X) + (Halo_steps + 2 * i) * ROWS_BLOCKDIM_X + threadIdx.x * 2 + j];
}
d_Dst[i * ROWS_BLOCKDIM_X] = sum;
}
}
}
} |
9,542 | #include "includes.h"
__global__ void writeOffsetUnroll2(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k + blockDim.x < n)
{
C[k] = A[i] + B[i];
C[k + blockDim.x] = A[i + blockDim.x] + B[i + blockDim.x];
}
} |
9,543 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <device_functions.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
CUcontext hContext = 0;
#define CUDA_CHECK( fn ) do { \
CUresult status = (fn); \
if ( CUDA_SUCCESS != status ) { \
const char* errstr; \
cuGetErrorString(status, &errstr); \
printf("CUDA Driver Failure (line %d of file %s):\n\t%s returned 0x%x (%s)\n", __LINE__, __FILE__, #fn, status, errstr); \
exit(EXIT_FAILURE); \
} \
} while (0)
void gflops(const char* ident, int N, float ms, int repeat)
{
float msecPerMatrixMul = ms / repeat;
double flopsPerMatrixMul = (N+16-1) * 16.0 ;
double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f);
printf("ms = %f \n", msecPerMatrixMul);
printf("%s GFLOPS: %.2f (size: %d, iterations: %d)\n", ident, gigaFlops, N, repeat);
}
int main()
{
//-----------------sample_data_config---------------------
int N = 1024032;//1024032;//1023985;
int M = 16;//16;
int P = 1024000;
size_t sizeSampleFloat = N * 4;
size_t sizeFilterFloat = M * 4;//16 * 4;
size_t sizeResultFloat = P * 4;
dim3 threads(32, 1, 1);
dim3 grid(2000, 1, 1);
cudaError_t error;
char deviceName[32];
int count, ordinal, major, minor;
CUdevice hDevice;
CUevent hStart, hStop;
CUdeviceptr devH, devX, devY;
// ------Initialize the Driver API and find a device-----
CUDA_CHECK(cuInit(0));
CUDA_CHECK(cuDeviceGetCount(&count));
for (ordinal = 0; ordinal < count; ordinal++)
{
CUDA_CHECK(cuDeviceGet(&hDevice, ordinal));
CUDA_CHECK(cuDeviceGetAttribute(&major, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, hDevice));
CUDA_CHECK(cuDeviceGetAttribute(&minor, CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, hDevice));
CUDA_CHECK(cuDeviceGetName(deviceName, sizeof(deviceName), hDevice));
if (major >= 5 && minor >= 2)
{
//printf("Using: Id:%d %s (%d.%d)\n\n", ordinal, deviceName, major, minor);
break;
}
}
if (ordinal == count)
{
printf("No compute 5.0 device found, exiting.\n");
exit(EXIT_FAILURE);
}
//-----------------device_test------------------------
int device = 0;
error = cudaSetDevice(0);
if (error != cudaSuccess)
{
printf("device error");
exit(EXIT_FAILURE);
}
else printf("device: %d \n", device);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, 0);
if (error != cudaSuccess)
{
printf("DeviceProperties error");
exit(EXIT_FAILURE);
}
//-----------------------host----------------------------
float* H = (float*)malloc(sizeFilterFloat);
float* X = (float*)malloc(sizeSampleFloat);
float* Y = (float*)malloc(sizeResultFloat);
float* T = (float*)malloc(sizeResultFloat);
for (int i = 0; i < N ; i++)
{
X[i] = (float)rand()/1000;
}
for (int i = 0; i < M; i++)
{
H[i] = (float)rand()/1000;
}
for (int i = 0; i < P; i++) //
{
Y[i] = (float)0.0;
T[i] = (float)0.0;
}
for (int i = 0; i < P; i++)
{
int k = i;
for (int j = 16; j > 0; j--)
{
T[i] += H[j - 1] * X[k];
k++;
}
}
//-----------------------Dev----------------------------
CUDA_CHECK(cuCtxCreate(&hContext, 0, hDevice));
CUDA_CHECK(cuEventCreate(&hStart, CU_EVENT_BLOCKING_SYNC)); // CU_EVENT_DEFAULT
CUDA_CHECK(cuEventCreate(&hStop, CU_EVENT_BLOCKING_SYNC));
CUDA_CHECK(cuMemAlloc(&devH, sizeFilterFloat));
CUDA_CHECK(cuMemAlloc(&devX, sizeSampleFloat));
CUDA_CHECK(cuMemAlloc(&devY, sizeResultFloat));
CUDA_CHECK(cuMemcpyHtoD(devH, H, sizeFilterFloat));
CUDA_CHECK(cuMemcpyHtoD(devX, X, sizeSampleFloat));
//---------------------Kernel----------------------------
printf("Computing result using CUDA Kernel...\n");
// Load the cubin
CUmodule hModule;
CUDA_CHECK(cuModuleLoad(&hModule, "conv.cubin"));
// Load the kernel function
CUfunction hKernel;
CUDA_CHECK(cuModuleGetFunction(&hKernel, hModule, "conv_kernel_32"));
void * params[] = {&devH, &devX, &devY};
int repeat = 20;
float totalTime = 0;
// Launch the kernel repeat times.. but break it up into pieces so as not to lock things up.
CUDA_CHECK(cuEventCreate(&hStart, CU_EVENT_BLOCKING_SYNC)); // CU_EVENT_DEFAULT
CUDA_CHECK(cuEventCreate(&hStop, CU_EVENT_BLOCKING_SYNC));
while (repeat > 0)
{
float ms;
int r = repeat;
CUDA_CHECK(cuEventRecord(hStart, NULL));
for (int i = 0; i < repeat; i++)
CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, 1, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(cuEventRecord(hStop, NULL));
CUDA_CHECK(cuEventSynchronize(hStop));
CUDA_CHECK(cuEventElapsedTime(&ms, hStart, hStop));
totalTime += ms;
//gflops("conv_kernel_32", N, ms, repeat);
repeat -= r;
}
//CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
//CUDA_CHECK(cuLaunchKernel(hKernel, grid.x, grid.y, 1, threads.x, 1, 1, 0, 0, params, 0));
CUDA_CHECK(cuModuleUnload(hModule));
printf("first time done\n");
// Copy result from device to host
CUDA_CHECK(cuMemcpyDtoH(Y, devY, sizeResultFloat));
CUDA_CHECK(cuMemcpyDtoH(H, devH, sizeFilterFloat));
CUDA_CHECK(cuMemcpyDtoH(X, devX, sizeSampleFloat));
for (int i = 1024*0; i<1024*1; i++)
printf("Y[%d] = %f --- and --- T[%d] = %f delta = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
for (int i = 1024*0; i<P; i++)
{
if (Y[i] - T[i] > 1e-2)
printf("Y[%d] = %f --- but --- T[%d] = %f delta = %f\n", i, Y[i], i, T[i], T[i] - Y[i]);
}
//-----------------------free----------------------------
// Cleanup and shutdown of cuda
CUDA_CHECK(cuMemFree(devH));
CUDA_CHECK(cuMemFree(devX));
CUDA_CHECK(cuMemFree(devY));
free(H);
free(X);
free(Y);
CUDA_CHECK(cuEventDestroy(hStart));
CUDA_CHECK(cuEventDestroy(hStop));
//CUBLAS_CHECK( cublasDestroy(hCublas) );
//hCublas = 0;
CUDA_CHECK(cuCtxDestroy(hContext));
hContext = 0;
cudaDeviceReset();
printf("done\n");
return EXIT_SUCCESS;
}
|
9,544 | #include <stdio.h>
__global__ void myGpuFunction(int a, int b, int *c)
{
*c=a+b;
}
int main(void)
{
int c;
int *dev_c;
cudaMalloc((void**)&dev_c,sizeof(int));
myGpuFunction<<<1,1>>>(2,2,dev_c);
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("the Simple program to check make the GPU calls : Sum %d\n",c);
cudaFree(dev_c);
return 0;
}
|
9,545 | #include "includes.h"
__global__ void zupdate_inter(float *z, float tau, int nx, int ny)
{
int px = blockIdx.x * blockDim.x + threadIdx.x;
int py = blockIdx.y * blockDim.y + threadIdx.y;
int idx = px + py*nx;
float a, b, t;
if (px<nx && py<ny)
{
// compute the gradient
a = 0;
b = 0;
if (px<(nx - 1)) a = z[3 * (idx + 1) + 2] - z[3 * idx + 2];
if (py<(ny - 1)) b = z[3 * (idx + nx) + 2] - z[3 * idx + 2];
// update z
t = 1 / (1 + tau*sqrtf(a*a + b*b));
z[3 * idx + 0] = (z[3 * idx + 0] + tau*a)*t;
z[3 * idx + 1] = (z[3 * idx + 1] + tau*b)*t;
}
} |
9,546 |
__device__ float manhattan_distance_gpu(float x, float y) {
return fabsf(x - y);
}
__device__ float euclidean_distance_gpu(float x, float y) {
float substraction = x - y;
return substraction * substraction;
}
__global__ void distances_kernel_naive(float* dataset, float* to_predict, int dataset_n, int dimension,
int to_predict_n, float* distances, int distance_algorithm) {
// Cada hilo en x, y guarda la distancia entre el vector x del dataset y el vector
// y a predecir
// distances tiene filas de to_predict_n de ancho
// cada fila tiene todas las distancias para el to_pred_i contra todos los del dastaset
int dataset_i = blockIdx.x * blockDim.x + threadIdx.x;
int to_pred_i = blockIdx.y * blockDim.y + threadIdx.y;
if (dataset_i >= dataset_n || to_pred_i >= to_predict_n)
return;
float distance = 0;
for (int i = 0; i < dimension ; i++) {
if (distance_algorithm == 1) {
distance += manhattan_distance_gpu(
to_predict[to_pred_i * dimension + i],
dataset[dataset_i * dimension + i]
);
} else {
distance += euclidean_distance_gpu(
to_predict[to_pred_i * dimension + i],
dataset[dataset_i * dimension + i]
);
}
}
distances[to_pred_i * dataset_n + dataset_i] = distance;
}
__global__ void distances_kernel_test_in_shared_naive(float* dataset, float* to_predict, int dataset_n, int dimension,
int to_predict_n, float* distances, int distance_algorithm) {
// Son max(1024, dimension) hilos en la dim X
// si es dimension cada hilo hace una distancia
// Primero se guarda el de test en la shared memory, toda la fila hace el mismo ejemplo de test
// Cada hilo guarda el elemento threadIdx.x del de test. Si dim > 1024 guardan los n necesarios
// para llegar entre los 1024 hilos
// Cada hilo en x, y guarda la distancia (al cuadrado) entre el vector x del dataset y el vector
// y a predecir
// distances tiene filas de to_predict_n de ancho
// cada fila tiene todas las distancias para el to_pred_i contra todos los del dastaset
extern __shared__ float shared_test[];
int dataset_i = blockIdx.x * blockDim.x + threadIdx.x;
int to_pred_i = blockIdx.y * blockDim.y + threadIdx.y;
if (to_pred_i >= to_predict_n){
return;
}
int to_calc = dimension / 1024 + 1;
// Cargo el ejemplo de test a shared memory
for (int i = 0; i < to_calc; i++) {
if (threadIdx.x * to_calc + i < dimension) {
shared_test[
dimension * threadIdx.y + threadIdx.x * to_calc + i
] = to_predict[
dimension * to_pred_i + threadIdx.x * to_calc + i
];
}
}
__syncthreads();
if (dataset_i >= dataset_n){
return;
}
int dataset_i_to_calc;
for (int tc = 0; tc < to_calc; tc++) {
dataset_i_to_calc = (dataset_i * to_calc) + tc;
if (dataset_i_to_calc < dataset_n) {
distances[to_pred_i * dataset_n + dataset_i_to_calc] = 0;
for (int i = 0; i < dimension; i++) {
if (distance_algorithm == 1) {
distances[to_pred_i * dataset_n + dataset_i_to_calc] += manhattan_distance_gpu(
dataset[(dataset_i_to_calc) * dimension + i],
shared_test[threadIdx.y * dimension + i]
);
} else {
distances[to_pred_i * dataset_n + dataset_i_to_calc] += euclidean_distance_gpu(
dataset[(dataset_i_to_calc) * dimension + i],
shared_test[threadIdx.y * dimension + i]
);
}
}
}
}
}
__global__ void distances_test_in_shared(const float* __restrict__ dataset, const float* __restrict__ to_predict,
int dataset_n, int dimension, int to_predict_n, float* distances, int distance_algorithm) {
// Son max(1024, dimension) hilos en la dim X
// si es dimension cada hilo hace una distancia
// Primero se guarda el de test en la shared memory, toda la fila hace el mismo ejemplo de test
// Cada hilo guarda el elemento threadIdx.x del de test. Si dim > 1024 guardan los n necesarios
// para llegar entre los 1024 hilos
// Cada hilo en x, y guarda la distancia (al cuadrado) entre el vector x del dataset y el vector
// y a predecir
// distances tiene filas de to_predict_n de ancho
// cada fila tiene todas las distancias para el to_pred_i contra todos los del dastaset
// va a tener dimension * tamBlock.y * sizeof(float)
extern __shared__ float shared_test[];
int dataset_i = blockIdx.x * blockDim.x + threadIdx.x;
int to_pred_i = blockIdx.y * blockDim.y + threadIdx.y;
if (to_pred_i >= to_predict_n) {
return;
}
int to_calc = dimension / 1024;
if (dimension % 1024 != 0) to_calc += 1;
// Cargo el ejemplo de test a shared memory
for (int i = 0; i < to_calc; i++) {
if (i * 1024 + threadIdx.x < dimension) {
shared_test[
dimension * threadIdx.y + i * 1024 + threadIdx.x
] = to_predict[
dimension * to_pred_i + i * 1024 + threadIdx.x
];
}
}
__syncthreads();
if (dataset_i >= dataset_n){
return;
}
// Cada hilo tiene que, potencialmente, calcular más de una distancia (si dataset_n > 1024)
to_calc = dataset_n / 1024;
if (dataset_n % 1024 != 0) to_calc += 1;
float distance;
int dataset_i_to_calc_dist;
for (int tc = 0; tc < to_calc; tc++) {
dataset_i_to_calc_dist = (1024 * tc) + dataset_i;
if (dataset_i_to_calc_dist < dataset_n) {
distance = 0;
for (int i = 0; i < (dimension / 32) + 1; i++) {
for (int j = 0; j < 32; j++) {
if (i * 32 + ((j + threadIdx.x) % 32) < dimension) {
if (distance_algorithm == 1) {
distance += manhattan_distance_gpu(
dataset[(dataset_i_to_calc_dist) * dimension + i * 32 + ((j + threadIdx.x) % 32)],
shared_test[threadIdx.y * dimension + i * 32 + ((j + threadIdx.x) % 32)]
);
} else {
distance += euclidean_distance_gpu(
dataset[(dataset_i_to_calc_dist) * dimension + i * 32 + ((j + threadIdx.x) % 32)],
shared_test[threadIdx.y * dimension + i * 32 + ((j + threadIdx.x) % 32)]
);
}
}
}
}
distances[to_pred_i * dataset_n + dataset_i_to_calc_dist] = distance;
}
}
}
__global__ void distances_kernel_test_in_shared_transposed(const float* __restrict__ dataset, const float* __restrict__ to_predict,
int dataset_n, int dimension, int to_predict_n, float* distances, int distance_algorithm) {
// Son max(1024, dimension) hilos en la dim X
// si es dimension cada hilo hace una distancia
// Primero se guarda el de test en la shared memory, toda la fila hace el mismo ejemplo de test
// Cada hilo guarda el elemento threadIdx.x del de test. Si dim > 1024 guardan los n necesarios
// para llegar entre los 1024 hilos
// Cada hilo en x, y guarda la distancia (al cuadrado) entre el vector x del dataset y el vector
// y a predecir
// distances tiene filas de to_predict_n de ancho
// cada fila tiene todas las distancias para el to_pred_i contra todos los del dastaset
extern __shared__ float shared_test[];
int dataset_i = blockIdx.x * blockDim.x + threadIdx.x;
int to_pred_i = blockIdx.y * blockDim.y + threadIdx.y;
if (to_pred_i >= to_predict_n){
return;
}
int to_calc = dimension / 1024;
if (dimension % 1024 != 0) to_calc += 1;
// Cargo el ejemplo de test a shared memory
for (int i = 0; i < to_calc; i++) {
if (i * 1024 + threadIdx.x < dimension) {
shared_test[
dimension * threadIdx.y + i * 1024 + threadIdx.x
] = to_predict[
dimension * to_pred_i + i * 1024 + threadIdx.x
];
}
}
__syncthreads();
if (dataset_i >= dataset_n){
return;
}
float distance;
int dataset_i_to_calc;
for (int tc = 0; tc < to_calc; tc++) {
dataset_i_to_calc = (dataset_i * to_calc) + tc;
if (dataset_i_to_calc < dataset_n) {
distance = 0;
distances[to_pred_i * dataset_n + dataset_i_to_calc] = 0;
for (int i = 0; i < dimension; i++) {
if (distance_algorithm == 1) {
distance += manhattan_distance_gpu(
dataset[i * dataset_n + dataset_i_to_calc],
shared_test[threadIdx.y * dimension + i]
);
} else {
distance += euclidean_distance_gpu(
dataset[i * dataset_n + dataset_i_to_calc],
shared_test[threadIdx.y * dimension + i]
);
}
}
distances[to_pred_i * dataset_n + dataset_i_to_calc] = distance;
}
}
}
|
9,547 | #include <exception>
#include <stack>
#include <mutex>
#include <memory>
#include <iostream>
struct empty_stack :std::exception {
const char* what()const throw() {
return "empty stack";
}
};
template<typename T>
class threadsafe_stack {
private:
std::stack<T>data;
mutable std::mutex m;
public:
threadsafe_stack() {}//ĬϹ캯
threadsafe_stack(const threadsafe_stack& other) {
std::lock_guard<std::mutex> lock(other.m);
data = other.data;
}//캯
threadsafe_stack& operator=(const threadsafe_stack&) = delete;//ֵɾ
void push(T new_value) {
std::lock_guard<std::mutex> lock(m);
data.push(new_value);
}
std::shared_ptr<T> pop() {
std::lock_guard<std::mutex>lock(m);
if (data.empty())throw empty_stack();
std::shared_ptr<T> const res(std::make_shared<T>(data.top()));
data.pop();
return res;
}
void pop(T& value) {
std::lock_guard<std::mutex> lock(m);
if (data.empty())throw empty_stack();
value = data.top();
data.pop();
}
bool empty()const {
std::lock_guard<std::mutex> lock(m);
return data.empty();
}
};
int main() {
threadsafe_stack<int>si;
si.push(5);
si.pop();
si.push(3);
if (!si.empty()) {
int x;
si.pop(x);
std::cout << x << std::endl;
}
return 0;
} |
9,548 | #include <stdio.h>
#define ELEMS 20
__global__ void kernel_vecadd(int *a, int *b, int *c){
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
}
int main(){
int *ha,*hb,*hc;
int *da,*db,*dc;
int i;
int size = ELEMS*sizeof(int);
ha = (int*)malloc(size);
hb = (int*)malloc(size);
hc = (int*)malloc(size);
cudaMalloc((void **)&da,size);
cudaMalloc((void **)&db,size);
cudaMalloc((void **)&dc,size);
for (i=0;i<ELEMS;i++){
ha[i] = i;
hb[i] = ELEMS-i-1;
}
cudaMemcpy(da,ha,size,cudaMemcpyHostToDevice);
cudaMemcpy(db,hb,size,cudaMemcpyHostToDevice);
kernel_vecadd<<<2,10>>>(da,db,dc);
cudaMemcpy(hc,dc,size,cudaMemcpyDeviceToHost);
for (i=0;i<ELEMS;i++)
printf("%2d + %2d = %2d\n",ha[i],hb[i],hc[i]);
return 0;
}
|
9,549 | // Created by luozhiwang (luozw1994@outlook.com)
// Date: 2020/1/2
#include <cuda.h>
#include <vector>
#include <random>
// 此章节主要是关于稀疏矩阵计算,对应不同类型的稀疏矩阵有不同的存储格式。
// 主要是介绍为主,没什么代码。此处就是Dense-Matrix转CSR,ELL,COO格式
class Matrix{
public:
int row;
int column;
int num;
std::vector<std::vector<float>> data;
Matrix(const std::vector<std::vector<float>> &data){
this->row = data.size();
this->column = data[0].size();
for (int r = 0; r < data.size(); ++r){
std::vector<float> tmp;
for (int c = 0; c < data[0].size(); ++c){
tmp.push_back(data[r][c]);
}
this->data.push_back(tmp);
}
}
void show(){
printf(" =================== Origin Matrix ===================>\n");
for (int r = 0; r < this->row; ++r){
for(int c = 0; c < this->column; ++c){
printf("%.3f ", data[r][c]);
}
printf("\n");
}
printf("\n");
}
};
class CSR{
public:
int column;
int row;
std::vector<int> col_idx;
std::vector<int> row_ptr;
std::vector<float> data;
CSR(const Matrix &matrix){
this->column = matrix.data[0].size();
this->row = matrix.data.size();
int count = 0;
row_ptr.push_back(0);
for (int r = 0; r < this->row; ++r){
for (int c = 0; c < this->column; ++c){
float tmp = matrix.data[r][c];
if (tmp != 0){
++count;
data.push_back(tmp);
col_idx.push_back(c);
}
}
row_ptr.push_back(count);
}
}
void show(){
printf(" =================== CSR ===================>\n");
printf("CSR data ===> ");
for (int i = 0; i < data.size(); ++i){
printf("%.3f ", data[i]);
}
printf("\nCSR col_idx ===> ");
for (int i = 0; i < col_idx.size(); ++i){
printf("%d ", col_idx[i]);
}
printf("\nCSR row_ptr ===> ");
for (int i = 0; i < row_ptr.size(); ++i){
printf("%d ", row_ptr[i]);
}
printf("\n\n");
}
};
class COO{
public:
int column;
int row;
std::vector<int> col_idx;
std::vector<int> row_idx;
std::vector<float> data;
COO(const Matrix &matrix){
this->column = matrix.column;
this->row = matrix.row;
for (int r = 0; r < this->row; ++r){
for (int c = 0; c < this->column; ++c){
float tmp = matrix.data[r][c];
if (tmp != 0){
data.push_back(tmp);
col_idx.push_back(c);
row_idx.push_back(r);
}
}
}
}
void show(){
printf(" =================== COO ===================>\n");
printf("COO data ===> ");
for (int i = 0; i < data.size(); ++i){
printf("%.3f ", data[i]);
}
printf("\nCOO col_idx ===> ");
for (int i = 0; i < col_idx.size(); ++i){
printf("%d ", col_idx[i]);
}
printf("\nCOO row_ptr ===> ");
for (int i = 0; i < row_idx.size(); ++i){
printf("%d ", row_idx[i]);
}
printf("\n\n");
}
};
class ELL{
public:
std::vector<std::vector<float>> data;
std::vector<std::vector<int>> col_idx;
ELL(const Matrix &matrix){
int max_len = 0;
for (int r = 0; r < matrix.row; ++r){
std::vector<int> tmp_col;
std::vector<float> tmp_data;
for (int c = 0; c < matrix.column; ++c){
float tmp = matrix.data[r][c];
if (tmp != 0){
printf("%d ", c);
tmp_col.push_back(c);
tmp_data.push_back(tmp);
}
}
if(max_len < tmp_data.size()){
max_len = tmp_data.size();
}
data.push_back(tmp_data);
col_idx.push_back(tmp_col);
}
for (int r = 0; r < data.size(); ++r){
for (int c = data[r].size(); c < max_len; ++c){
data[r].push_back(0);
col_idx[r].push_back(0);
}
}
}
void show(){
printf(" =================== ELL ===================>\n");
for (int r = 0; r < data.size(); ++r){
for (int c = 0; c < data[0].size(); ++c){
printf("%.3f ", data[r][c]);
}
printf(" ");
for (int c = 0; c < col_idx[0].size(); ++c){
printf("%d ", col_idx[r][c]);
// printf("%d ", c);
}
printf("\n");
}
printf("\n");
}
};
const int ROW = 10;
const int COL = 10;
int main(int args, char **argv){
// 构建稀疏矩阵
std::default_random_engine e;
std::uniform_real_distribution<float > probability(0, 1);
std::uniform_real_distribution<float > number(0, 10);
std::vector<std::vector<float>> data;
for (int i=0; i<ROW; ++i){
std::vector<float> tmp;
for (int j = 0; j < COL; ++j){
if(probability(e) < 0.1){
tmp.push_back(number(e));
}else{
tmp.push_back(0);
}
}
data.push_back(tmp);
}
Matrix matrix{data};
matrix.show();
CSR csr{matrix};
csr.show();
COO coo{matrix};
coo.show();
ELL ell(matrix);
ell.show();
} |
9,550 | /* =======================================================
Student: Patricia Wilthew
The basic SDH algorithm implementation for 3D data
To compile: nvcc SDH.c -o SDH
=======================================================
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <sys/time.h>
#define BOX_SIZE 23000
/*
Structure: atom.
Descriptors for single atom in the tree.
*/
typedef struct atomdesc
{
double x_pos;
double y_pos;
double z_pos;
} atom;
/*
Structure: bucket.
Size of the buckets.
*/
typedef struct hist_entry
{
long long d_cnt;
} bucket;
cudaError_t err;
long long PDH_acnt;
double PDH_res;
int num_buckets, PDH_threads;
bucket *histogram;
atom *atom_list;
struct timezone Idunno;
struct timeval startTime, endTime;
/*
Method: distance.
Distance of two points (x1, y1, z1) and (x2, y2, z2).
*/
__device__
double distance(double x1, double y1, double z1, double x2, double y2, double z2)
{
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
Method: PDH_on_gpu.
SDH solution in GPU threads.
*/
__global__
void PDH_on_gpu(double *x, double *y, double *z, bucket *hist,
int PDH_acnt, double PDH_res, int num_buckets)
{
extern __shared__ unsigned int SHMOut[];
int t_id, b_id, t, s;
int i, h_pos;
double x1, y1, z1, x2, y2, z2, d;
t_id = threadIdx.x;
b_id = blockIdx.x;
t = b_id*blockDim.x + t_id;
// Initialize Shared Memory to Zero.
for (s = 0; s < (num_buckets + blockDim.x - 1)/blockDim.x; s++)
{
if (t_id + s*blockDim.x < num_buckets)
{
SHMOut[t_id + s*blockDim.x] = 0;
}
}
// The t-th datum of b-th input data block.
i = t + 1;
x1 = x[t];
y1 = y[t];
z1 = z[t];
for (i=t+1; i < PDH_acnt; i++)
{
x2 = x[i];
y2 = y[i];
z2 = z[i];
d = distance(x1, y1, z1, x2, y2, z2);
h_pos = (int) (d / PDH_res);
atomicAdd(&SHMOut[h_pos], 1);
}
__syncthreads();
// Write results to Global Memory.
for (s = 0; s < (num_buckets + blockDim.x - 1)/blockDim.x; s++)
{
if (t_id + s*blockDim.x < num_buckets)
{
atomicAdd((unsigned int *)&hist[t_id + s*blockDim.x].d_cnt,
SHMOut[t_id + s*blockDim.x]);
}
}
}
/*
Method: p2p_distance.
Distance of two points in the atom_list.
*/
double p2p_distance(atom *atom_list, int ind1, int ind2)
{
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
double z1 = atom_list[ind1].z_pos;
double z2 = atom_list[ind2].z_pos;
return sqrt((x1 - x2)*(x1-x2) + (y1 - y2)*(y1 - y2) + (z1 - z2)*(z1 - z2));
}
/*
Method: PDH_baseline.
Brute-force SDH solution in a single CPU thread.
*/
int PDH_baseline(atom *atom_list, bucket *histogram, long long PDH_acnt, double PDH_res)
{
int i, j, h_pos;
double dist;
for (i = 0; i < PDH_acnt; i++)
{
for (j = i+1; j < PDH_acnt; j++)
{
dist = p2p_distance(atom_list, i,j);
h_pos = (int) (dist / PDH_res);
histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
Method: report_running_time.
Set a checkpoint and show the (natural) running time in seconds.
*/
double report_running_time()
{
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff= endTime.tv_usec-startTime.tv_usec;
if (usec_diff < 0)
{
sec_diff--;
usec_diff += 1000000;
}
printf("Running time: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff/1000000.0);
}
/*
Method: output_histogram.
Print the counts in all buckets of the histogram.
*/
void output_histogram(bucket *histogram, int num_buckets)
{
int i;
long long total_cnt = 0;
for (i=0; i< num_buckets; i++)
{
if (i%5 == 0) // Print 5 buckets in a row.
printf("\n%02d: ", i);
printf("%15lld ", histogram[i].d_cnt);
total_cnt += histogram[i].d_cnt;
// Also want to make sure the total distance count is correct.
if (i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
/*
Method: catch_error.
Prints any CUDA error to stdout.
*/
void catch_error(cudaError_t error)
{
if (error)
{
printf("Error: %s\n", cudaGetErrorString(err));
}
}
int main(int argc, char **argv)
{
if (argc <= 3)
{
printf("Usage: ./SDH {# Atoms} {# Buckets} {# Threads}\n");
exit(1);
}
if (atoi(argv[3]) < 32)
{
printf("Number of threads must be greater or equal to 32.\n");
exit(1);
}
PDH_acnt = atoi(argv[1]);
PDH_res = atof(argv[2]);
PDH_threads = atoi(argv[3]);
// Variables declaration;
float time = 0;
int i;
double *x, *y, *z, *d_x, *d_y, *d_z;
bucket *d_histogram, *h_histogram;
// bucket *difference_histogram;
// Variables initialization and mem allocation.
num_buckets = (int)(BOX_SIZE * 1.732 / PDH_res) + 1;
atom_list = (atom *)malloc(sizeof(atom) * PDH_acnt);
histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
x = (double *)malloc(sizeof(double)*PDH_acnt);
y = (double *)malloc(sizeof(double)*PDH_acnt);
z = (double *)malloc(sizeof(double)*PDH_acnt);
h_histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
// difference_histogram = (bucket *)malloc(sizeof(bucket) * num_buckets);
err = cudaSuccess;
srand(1);
// Generate data following a uniform distribution.
for (i = 0; i < PDH_acnt; i++)
{
x[i] = atom_list[i].x_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
y[i] = atom_list[i].y_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
z[i] = atom_list[i].z_pos = ((double)(rand()) / RAND_MAX) * BOX_SIZE;
}
/*
printf("----CPU----");
// Start counting time.
gettimeofday(&startTime, &Idunno);
// Call CPU single thread version to compute the histogram.
PDH_baseline(atom_list, histogram, PDH_acnt, PDH_res);
// Check the total running time.
report_running_time();
// Print out the histogram.
output_histogram(histogram, num_buckets);
*/
/* My part of the project */
// Initialize h_histogram with zeroes.
for (i = 0; i < num_buckets; i++)
{
h_histogram[i].d_cnt = 0;
}
// Allocate memory in device for single dim arrays.
err = cudaMalloc((void **)&d_x, PDH_acnt * sizeof(double)); catch_error(err);
err = cudaMalloc((void **)&d_y, PDH_acnt * sizeof(double)); catch_error(err);
err = cudaMalloc((void **)&d_z, PDH_acnt * sizeof(double)); catch_error(err);
// Allocate memory in device for histogram.
err = cudaMalloc(&d_histogram, num_buckets * sizeof(bucket)); catch_error(err);
// Copy single dim arrays to device.
err = cudaMemcpy(d_x, x, PDH_acnt * sizeof(double), cudaMemcpyHostToDevice); catch_error(err);
err = cudaMemcpy(d_y, y, PDH_acnt * sizeof(double), cudaMemcpyHostToDevice); catch_error(err);
err = cudaMemcpy(d_z, z, PDH_acnt * sizeof(double), cudaMemcpyHostToDevice); catch_error(err);
// Copy zeroed histogram from host to device.
err = cudaMemcpy(d_histogram, h_histogram, num_buckets * sizeof(bucket),
cudaMemcpyHostToDevice); catch_error(err);
// Recording variables.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Start to record.
cudaEventRecord( start, 0);
// Call GPU version.
PDH_on_gpu<<<(PDH_acnt - 1 + PDH_threads)/PDH_threads,
PDH_threads,
num_buckets * sizeof(int)>>>(d_x, d_y, d_z,
d_histogram,
PDH_acnt,
PDH_res,
num_buckets);
// Stop recording.
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
// Copy histogram from device to host.
err = cudaMemcpy(h_histogram, d_histogram, num_buckets * sizeof(bucket),
cudaMemcpyDeviceToHost); catch_error(err);
// Print out the histogram.
output_histogram(h_histogram, num_buckets);
// Output the total running time.
printf("******** Total Running Time of Kernel = %.5f sec *******\n", time/1000.0);
/*
printf("\n----Difference between histograms:\n");
// Print the difference between the histograms.
for (i = 0; i < num_buckets; i++)
{
difference_histogram[i].d_cnt = abs(histogram[i].d_cnt - h_histogram[i].d_cnt);
}
// Print out the histograms' difference.
output_histogram(difference_histogram, num_buckets);
*/
// Free memory.
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
cudaFree(d_histogram);
free(histogram);
free(h_histogram);
free(atom_list);
free(x);
free(y);
free(z);
return 0;
}
|
9,551 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
|
9,552 | #include "nms_kern.cuh"
#include <algorithm>
#include <cassert>
namespace {
#if __CUDACC_VER_MAJOR__ >= 9
#define __shfl_down(x, y) __shfl_down_sync(0xffffffffu, x, y)
#endif
// each thread computs one bit
const int THREADS_PER_BLOCK = 64;
const int WARP_SIZE = 32;
// use aligned structure for large memory transaction
struct __align__(16) Box {
float x0, y0, x1, y1;
};
//! return whether IoU(a, b) > thresh
__device__ __forceinline__ bool box_iou(Box a, Box b, float thresh) {
float left = max(a.x0, b.x0), right = min(a.x1, b.x1);
float top = max(a.y0, b.y0), bottom = min(a.y1, b.y1);
float width = max(right - left, 0.f), height = max(bottom - top, 0.f);
float interS = width * height;
float Sa = (a.x1 - a.x0) * (a.y1 - a.y0);
float Sb = (b.x1 - b.x0) * (b.y1 - b.y0);
return interS > (Sa + Sb - interS) * thresh;
}
//! store uint64_t with cache streaming
__device__ __forceinline__ void store_u64_cs(uint64_t* ptr, uint64_t val) {
asm volatile("st.cs.u64 [%0], %1;" : : "l"(ptr), "l"(val));
}
//! load uint64_t with cache streaming
__device__ __forceinline__ uint64_t load_u64_cs(const uint64_t* ptr) {
uint64_t val;
asm volatile("ld.cs.u64 %0, [%1];" : "=l"(val) : "l"(ptr));
return val;
}
__global__ void kern_gen_mask(
const int nr_boxes, const float nms_overlap_thresh, const Box* dev_boxes,
const int dev_mask_width, uint64_t* dev_mask) {
const int box_group_row = blockIdx.y, box_group_col = blockIdx.x;
if (box_group_row > box_group_col)
return;
const int row_nr_boxes = min(
nr_boxes - box_group_row * THREADS_PER_BLOCK, THREADS_PER_BLOCK),
col_nr_boxes = min(
nr_boxes - box_group_col * THREADS_PER_BLOCK, THREADS_PER_BLOCK);
__shared__ Box block_boxes[THREADS_PER_BLOCK];
if (threadIdx.x < col_nr_boxes) {
block_boxes[threadIdx.x] =
dev_boxes[THREADS_PER_BLOCK * box_group_col + threadIdx.x];
}
__syncthreads();
if (threadIdx.x < row_nr_boxes) {
const int cur_box_idx = THREADS_PER_BLOCK * box_group_row + threadIdx.x;
Box cur_box = dev_boxes[cur_box_idx];
uint64_t result = 0;
const int start = (box_group_row == box_group_col) ? threadIdx.x + 1
: // blocks on diagnal
0;
for (int i = start; i < col_nr_boxes; ++i) {
result |= static_cast<uint64_t>(
box_iou(cur_box, block_boxes[i], nms_overlap_thresh))
<< i;
}
store_u64_cs(&dev_mask[cur_box_idx * dev_mask_width + box_group_col], result);
}
}
//! true -> ~0, false -> 0
__device__ __forceinline__ uint32_t bool_as_u32_mask(bool v) {
return (!v) - 1;
}
//! return min value of val in current warp
__device__ __forceinline__ uint32_t warp_reduce_min_brdcst(uint32_t val) {
__shared__ uint32_t ans;
static_assert(WARP_SIZE == 32, "warp size != 32");
#pragma unroll
for (uint32_t offset = WARP_SIZE / 2; offset; offset /= 2)
val = min(val, __shfl_down(val, offset));
if (!threadIdx.x)
ans = val;
__syncthreads();
return ans;
}
struct BitwiseOrArgs {
uint64_t* dst;
const uint64_t* src;
uint32_t size;
};
__device__ __forceinline__ void bitwise_or_single_warp(BitwiseOrArgs args) {
uint64_t* __restrict__ dst = args.dst;
const uint64_t* __restrict__ src = args.src;
uint32_t size = args.size;
for (uint32_t i = threadIdx.x; i < size; i += WARP_SIZE) {
dst[i] |= load_u64_cs(&src[i]);
}
}
__global__ void kern_gen_indices(
uint32_t nr_boxes, uint32_t max_output, uint32_t overlap_mask_width,
const uint64_t* __restrict__ overlap_mask, uint64_t* __restrict__ rm_mask,
uint32_t* __restrict__ out_idx, uint32_t* __restrict__ out_size) {
__shared__ uint32_t out_pos;
__shared__ BitwiseOrArgs bitwise_or_args;
const uint32_t nr_box_blocks = DIVUP(nr_boxes, 64);
if (!threadIdx.x) {
uint32_t cnt = nr_box_blocks * 64 - nr_boxes;
// mark the padded boxes as having been removed
rm_mask[nr_box_blocks - 1] = ((1ull << cnt) - 1) << (64 - cnt);
out_pos = 0;
}
__syncthreads();
uint32_t box_block_id = threadIdx.x, th0_box_block_id = 0;
while (th0_box_block_id < nr_box_blocks) {
bool in_range = box_block_id < nr_box_blocks;
uint64_t cur_mask = ~rm_mask[box_block_id & bool_as_u32_mask(in_range)];
uint32_t min_box_block_id = warp_reduce_min_brdcst(
box_block_id | bool_as_u32_mask(!(in_range && cur_mask)));
if (min_box_block_id + 1) {
// min_box_block_id != UINT32_MAX, so at least one thread finds a
// un-removed box
if (min_box_block_id == box_block_id) {
// exactly one thread can take this path
uint32_t box_id_in_block = __ffsll(cur_mask) - 1,
box_id = box_block_id * 64 + box_id_in_block;
// so this box would not be processed again
rm_mask[box_block_id] |= 1ull << box_id_in_block;
bitwise_or_args.dst = &rm_mask[box_block_id];
bitwise_or_args.src =
&overlap_mask[box_id * overlap_mask_width + box_block_id];
bitwise_or_args.size = nr_box_blocks - box_block_id;
out_idx[out_pos++] = box_id;
}
__syncthreads();
if (out_pos == max_output)
break;
bitwise_or_single_warp(bitwise_or_args);
// skip the blocks before min_box_block_id
th0_box_block_id = min_box_block_id;
box_block_id = min_box_block_id + threadIdx.x;
} else {
th0_box_block_id += WARP_SIZE;
box_block_id += WARP_SIZE;
}
}
if (out_pos < max_output) {
// fill the values after out_pos
uint32_t val = out_idx[out_pos - 1];
for (uint32_t i = out_pos + threadIdx.x; i < max_output; i += WARP_SIZE) {
out_idx[i] = val;
}
}
if (!threadIdx.x) {
*out_size = out_pos;
}
}
} // anonymous namespace
void mgb::opr::standalone::nms::launch_gen_mask(
const int nr_boxes, const float nms_overlap_thresh, const float* dev_boxes,
const int dev_mask_width, uint64_t* dev_mask, cudaStream_t stream) {
dim3 blocks(DIVUP(nr_boxes, THREADS_PER_BLOCK), DIVUP(nr_boxes, THREADS_PER_BLOCK));
dim3 threads(THREADS_PER_BLOCK);
kern_gen_mask<<<blocks, threads, 0, stream>>>(
nr_boxes, nms_overlap_thresh, reinterpret_cast<const Box*>(dev_boxes),
dev_mask_width, dev_mask);
}
void mgb::opr::standalone::nms::launch_gen_indices(
int nr_boxes, int max_output, int overlap_mask_width,
const uint64_t* overlap_mask, uint64_t* rm_mask, uint32_t* out_idx,
uint32_t* out_size, cudaStream_t stream) {
kern_gen_indices<<<1, WARP_SIZE, 0, stream>>>(
nr_boxes, max_output, overlap_mask_width, overlap_mask, rm_mask, out_idx,
out_size);
}
// vim: ft=cuda syntax=cuda.doxygen
|
9,553 | #include <stdio.h>
#include <stdint.h>
// #define DEBUG
#define UINT uint32_t
#define MAXN 1024
#define MULSIDE 16 // each block has size SIDE x SIDE
#define MULBLK (MAXN / MULSIDE) // divide C into BLK x BLK blocks
// function for debugging.
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void mul_kernel(UINT* A, UINT* B, UINT* C, size_t pitcha, size_t pitchb, size_t pitchc){
__shared__ UINT left[MULSIDE][MULSIDE];
__shared__ UINT right[MULSIDE][MULSIDE];
int gridx = blockIdx.x;
int gridy = blockIdx.y;
int localx = threadIdx.x;
int localy = threadIdx.y;
int globalx = gridx * MULSIDE + localx;//x for C
int globaly = gridy * MULSIDE + localy;//y for C
UINT result = 0;
for(int block = 0; block < MULBLK; block++){
// recommended way to address cuda matrix is to use pitch, syntax below:
// T* pElement = (T*)((char*)BaseAddress + Row * pitch) + Column;
// also, when loading, transpose the right matrix for temporal locality
left[localx][localy] = *((UINT*)((char*)A + globalx * pitcha) + (block*MULSIDE + localy));
right[localy][localx] = *((UINT*)((char*)B + (block*MULSIDE + localx) * pitchb) + globaly);
__syncthreads();
for(int k = 0; k < MULSIDE; k++){
result += left[localx][k] * right[localy][k];
}
__syncthreads();
}
*((UINT*)((char*)C + globalx * pitchc) + globaly) = result;
}
void copyto(UINT* dst, UINT* src, size_t pitch){
gpuErrchk(cudaMemcpy2D((void*)dst, pitch, (void *)src, pitch, MAXN*sizeof(UINT), MAXN, cudaMemcpyHostToDevice));
}
void copyback(UINT* dst, UINT* src, size_t pitch){
gpuErrchk(cudaMemcpy2D((void*)dst, pitch, (void *)src, pitch, MAXN*sizeof(UINT), MAXN, cudaMemcpyDeviceToHost));
}
void cuClear(UINT* dst, size_t pitch){
gpuErrchk(cudaMemset2D((void*)dst, pitch, 0, MAXN*sizeof(UINT), MAXN));
}
void clear(UINT A[][MAXN]){
for (int i = 0; i < MAXN; i++) {
memset(A, 0, MAXN);
}
}
void rand_gen(UINT c, int N, UINT A[][MAXN]) {
UINT x = 2, n = N*N;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
x = (x * x + c + i + j)%n;
A[i][j] = x;
}
}
}
void print_matrix(int N, UINT A[][MAXN]) {
for (int i = 0; i < N; i++) {
fprintf(stderr, "[");
for (int j = 0; j < N; j++)
fprintf(stderr, " %u", A[i][j]);
fprintf(stderr, " ]\n");
}
}
UINT signature(int N, UINT A[][MAXN]) {
UINT h = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++)
h = (h + A[i][j]) * 2654435761LU;
}
return h;
}
UINT A[MAXN][MAXN], B[MAXN][MAXN], C[MAXN][MAXN];
int main() {
int N;
uint32_t S1, S2;
scanf("%d %u %u", &N, &S1, &S2);
rand_gen(S1, N, A);
rand_gen(S2, N, B);
size_t pitcha, pitchb, pitchc;
UINT *devA, *devB, *devC;
gpuErrchk(cudaMallocPitch(&devA, &pitcha, MAXN*sizeof(UINT), MAXN));
gpuErrchk(cudaMallocPitch(&devB, &pitchb, MAXN*sizeof(UINT), MAXN));
gpuErrchk(cudaMallocPitch(&devC, &pitchc, MAXN*sizeof(UINT), MAXN));
copyto(devA, (UINT*)A, pitcha);
copyto(devB, (UINT*)B, pitchb);
cuClear(devC, pitchc);
mul_kernel <<< dim3(MULBLK,MULBLK), dim3(MULSIDE,MULSIDE) >>> (devA, devB, devC, pitcha, pitchb, pitchc);//AB
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
copyback((UINT*)C, devC, pitchc);
#ifdef DEBUG
print_matrix(N, A);
print_matrix(N, B);
print_matrix(N, C);
#endif
printf("%u\n", signature(N, C));
return 0;
} |
9,554 |
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* NVIDIA Corporation and its licensors retain all intellectual property and
* proprietary rights in and to this software and related documentation.
* Any use, reproduction, disclosure, or distribution of this software
* and related documentation without an express license agreement from
* NVIDIA Corporation is strictly prohibited.
*
* Please refer to the applicable NVIDIA end user license agreement (EULA)
* associated with this source code for terms and conditions that govern
* your use of this NVIDIA software.
*
*/
#include <stdio.h>
#include <sys/time.h>
#include <cuda.h>
#define HANDLE_ERROR( err ) (gpuCheckError( err, __FILE__, __LINE__ ))
static void gpuCheckError( cudaError_t err,
const char *file,
int line ) {
if (err != cudaSuccess) {
printf( "%s in %s at line %d\n", cudaGetErrorString( err ),
file, line );
exit( EXIT_FAILURE );
}
}
/*#define N 10000
#define FULL_DATA_SIZE (N*100)*/
/*__global__ void kernel( int *a, int *b, int *c ) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < N) {
int idx1 = (idx + 1) % 256;
int idx2 = (idx + 2) % 256;
float as = (a[idx] + a[idx1] + a[idx2]) / 3.0f;
float bs = (b[idx] + b[idx1] + b[idx2]) / 3.0f;
c[idx] = (as + bs) / 2;
}
}*/
__global__ void primeP_gpu (int max, int *A, int *count, int cnt)
{
int n = blockDim.x * blockIdx.x + threadIdx.x;
// do nothing if we are not in the useable space of
// threads (see kernel launch call: you may be creating
// more threads than you need)
if (n >= max) return;
unsigned int a = A[n];
int i;
for (i = 2; i < a; i++)
{
if (a % i == 0 && i != a)
break;
}
if (a == i) {
// don't do this: threads overwrite each other's values
// causing undercount:
// *count = *count + 1;
// instead, use atomic operations:
atomicAdd(&count[cnt], 1);
}
}
int main( int argc, char *argv[] ) {
cudaEvent_t start, stop;
float elapsedTime;
cudaStream_t stream0, stream1;
int *host_a, *host_c;
int *dev_a0;
int *dev_a1;
int *dev_c0;
int *dev_c1;
char *filename;
if(argc < 2)
{
printf("too few arguments");
return 0;
}
filename = (char *) malloc(strlen(argv[1]));
strcpy(filename, argv[1]);
FILE *file;
char *input;
size_t len = 0;
int i=0;
int maxTested = 100000000;
// allocate host locked memory, used to stream
HANDLE_ERROR( cudaHostAlloc( (void**)&host_a,
maxTested * sizeof(int),
cudaHostAllocDefault ) );
file = fopen(filename, "r");
while(!feof(file))
{
getline(&input, &len, file);
host_a[i++] = atoi(input);
}
int data_size = i;
int N = data_size/100;
cudaDeviceProp prop;
int whichDevice;
HANDLE_ERROR( cudaGetDevice( &whichDevice ) );
HANDLE_ERROR( cudaGetDeviceProperties( &prop, whichDevice ) );
if (!prop.deviceOverlap) {
printf( "Device will not handle overlaps, so no speed up from streams\n" );
return 0;
}
// start the timers
HANDLE_ERROR( cudaEventCreate( &start ) );
HANDLE_ERROR( cudaEventCreate( &stop ) );
// initialize the streams
HANDLE_ERROR( cudaStreamCreate( &stream0 ) );
HANDLE_ERROR( cudaStreamCreate( &stream1 ) );
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a0,
N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_a1,
N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c0,
100 * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c1,
100 * sizeof(int) ) );
HANDLE_ERROR( cudaMemset( (void*)dev_c0, 0,
100 * sizeof(int) ) );
HANDLE_ERROR( cudaMemset( (void*)dev_c1, 0,
100 * sizeof(int) ) );
HANDLE_ERROR( cudaHostAlloc( (void**)&host_c,
100 * sizeof(int),
cudaHostAllocDefault ) );
HANDLE_ERROR( cudaEventRecord( start, 0 ) );
unsigned int threads_per_block = 1024;
unsigned int num_blocks = ceil (N / (1.0*threads_per_block) );
// now loop over full data, in bite-sized chunks
int cnt =0;
for (int i=0; i<data_size; i+= N*2) {
// enqueue copies of a and b for stream0
HANDLE_ERROR( cudaMemcpyAsync( dev_a0, host_a+i,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream0 ) );
// enqueue kernel in stream0
primeP_gpu<<<num_blocks,threads_per_block,0,stream0>>>( N, dev_a0, dev_c0, cnt );
// enqueue copies of a and b for stream1
HANDLE_ERROR( cudaMemcpyAsync( dev_a1, host_a+i+N,
N * sizeof(int),
cudaMemcpyHostToDevice,
stream1 ) );
// enqueue kernel in stream1
primeP_gpu<<<num_blocks,threads_per_block,0,stream1>>>( N, dev_a1, dev_c1, cnt);
// enqueue copies of c from device to locked memory
HANDLE_ERROR( cudaMemcpyAsync( host_c+cnt, dev_c0+cnt,
sizeof(int),
cudaMemcpyDeviceToHost,
stream0 ) );
HANDLE_ERROR( cudaMemcpyAsync( host_c+cnt+1, dev_c1+cnt,
sizeof(int),
cudaMemcpyDeviceToHost,
stream1 ) );
cnt +=2;
}
HANDLE_ERROR( cudaStreamSynchronize( stream0 ) );
HANDLE_ERROR( cudaStreamSynchronize( stream1 ) );
int h_numPrimes=0;
for(int i=0;i<100;i++)
{
h_numPrimes += host_c[i];
}
HANDLE_ERROR( cudaEventRecord( stop, 0 ) );
HANDLE_ERROR( cudaEventSynchronize( stop ) );
HANDLE_ERROR( cudaEventElapsedTime( &elapsedTime,
start, stop ) );
printf( "Time taken: %3.1f s\n", elapsedTime/1000 );
printf("Number of primes: %d\n", h_numPrimes);
// cleanup the streams and memory
HANDLE_ERROR( cudaFreeHost( host_a ) );
HANDLE_ERROR( cudaFree( dev_a0 ) );
HANDLE_ERROR( cudaFree( dev_a1 ) );
HANDLE_ERROR( cudaFreeHost( host_c ) );
HANDLE_ERROR( cudaFree( dev_c0 ) );
HANDLE_ERROR( cudaFree( dev_c1 ) );
HANDLE_ERROR( cudaStreamDestroy( stream0 ) );
HANDLE_ERROR( cudaStreamDestroy( stream1 ) );
return 0;
}
|
9,555 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <device_functions.h>
#define FP_TYPE float
/* Kernel for vector addition */
static __device__ __inline__ int __shortadd2(const int value_a, const int value_b)
{
int ret;
asm("{vadd2.s32.s32.s32.sat %0, %1, %2, %0;}" : "=r"(ret) : "r"(value_a) , "r"(value_b));
return ret;
}
__global__ void Vec_add(FP_TYPE x[], FP_TYPE y[], FP_TYPE z[], int n) {
/* blockDim.x = threads_per_block */
/* First block gets first threads_per_block components. */
/* Second block gets next threads_per_block components, etc. */
int i = blockDim.x * blockIdx.x + threadIdx.x;
/* block_count*threads_per_block may be >= n */
if (i < n) {
//~ for (int j = 0; j<1000; j++)
float x_val = x[i];
float y_val = y[i];
float z_val;
#pragma unroll 1
for (int k = 0; k<10000; k++)
{ //z_val = __shortadd2(x_val,y_val);
if(i != -2)
z_val = x_val + y_val;
}
// z_val = __vadd2(x_val, y_val);
//~ asm("{vadd2.s32.s32.s32.sat %0, %1, %2, %0;}" : "=r"(z_val) : "r"(x_val) , "r"(y_val));
z[i] = z_val;
}
} /* Vec_add */
/* Host code */
int main(int argc, char* argv[]) {
int n, i;
FP_TYPE *h_x, *h_y, *h_z;
FP_TYPE *d_x, *d_y, *d_z;
int threads_per_block;
int block_count;
size_t size;
cudaEvent_t start, stop;
float elapsedTime;
/* Get number of components in vector */
if (argc != 2) {
fprintf(stderr, "usage: %s <vector order>\n", argv[0]);
exit(0);
}
n = strtol(argv[1], NULL, 10);
size = n*sizeof(FP_TYPE);
/* Allocate input vectors in host memory */
h_x = (FP_TYPE*) malloc(size);
h_y = (FP_TYPE*) malloc(size);
h_z = (FP_TYPE*) malloc(size);
/* Initialize input vectors */
for (i = 0; i < n; i++) {
int k= rand();
h_x[i] = (i+1)%50;
h_y[i] = -((n-k)%5);
}
//~ printf("h_x = ");
//~ for (i = 0; i < n; i++)
//~ printf("%d ", h_x[i]);
//~ printf("\n");
//~ printf("h_y = ");
//~ for (i = 0; i < n; i++)
//~ printf("%d ", h_y[i]);
//~ printf("\n\n");
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
/* Define block size */
threads_per_block = 256;
block_count = (n + threads_per_block - 1)/threads_per_block;
cudaEventCreate(&start);
cudaEventRecord(start,0);
Vec_add<<<block_count, threads_per_block>>>(d_x, d_y, d_z, n);
cudaThreadSynchronize();
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
printf("Elapsed time : %f ms\n" ,elapsedTime);
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
double avg_sum = 0.0;
for (i = 0; i < n; i++)
avg_sum = avg_sum + h_z[i];
avg_sum/= n;
printf("%.3lf \n",avg_sum );
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
return 0;
} /* main */
|
9,556 | #include "includes.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
* All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
__global__ void _slowKernel(char* ptr, int sz) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (; idx < sz; idx += (gridDim.x * blockDim.x)) {
for (int i = 0; i < 100000; ++i) {
ptr[idx] += ptr[(idx + 1007) % sz] + i;
}
}
} |
9,557 | #include <curand.h>
#include <curand_kernel.h>
#define DIM 1600
#define PI 3.14159265
__device__ int log2(int N){
int k = N, i = 0;
while(k) {
k >>= 1;
i++;}
return i - 1;
}
__device__ int reverse(int N, int n) {
int p = 0;
for(int j = 1; j <= log2(N); j++) {
if(n & (1 << (log2(N) - j)))
p |= 1 << (j - 1);
}
return p;
}
__device__ void ordina_x(float *complex_r, float *complex_i,
float *real_d_out, float *imagi_d_out,
int row, int col, int x) {
int N = row, a;
for(int i = 0; i < N; i++){
a = reverse((int)N, i);
real_d_out[i*col + x] = complex_r[a*col + x];
imagi_d_out[i*col + x] = complex_i[a*col + x];}
for(int j = 0; j < N; j++){
complex_r[j*col + x] = real_d_out[j*col + x];
complex_i[j*col + x] = imagi_d_out[j*col + x];}
}
__device__ void ordina_y(float *complex_r, float *complex_i,
float *real_d_out, float *imagi_d_out,
int row, int col, int y) {
int N = row, a;
for(int i = 0; i < N; i++){
a = reverse((int)N, i);
real_d_out[y*col + i] = complex_r[y*col + a];
imagi_d_out[y*col + i] = complex_i[y*col + a];}
for(int j = 0; j < N; j++){
complex_r[y*col + j] = real_d_out[y*col + j];
complex_i[y*col + j] = imagi_d_out[y*col + j];}
}
__device__ void Func_FFT_X(float *complex_r, float *complex_i,
int row, int col, int x){
int n = 1, N = row;
int a = N/2;
float temp_real, temp_imagi;
float t_r, t_i, a_r, a_i;
for(int j = 0; j < log2(N); j++){
for (int i = 0; i < N; i++) {
if(!(i & n)) {
temp_real = complex_r[x + (i * col)];
temp_imagi = complex_i[x + (i * col)];
a_r = cos((-2) * ((i * a) % (n * a)) * PI / N);
a_i = sin((-2) * ((i * a) % (n * a)) * PI / N);
t_r = (a_r*complex_r[x + (i + n)*col]) - (a_i*complex_i[x + (i + n)*col]);
t_i = (a_i*complex_r[x + (i + n)*col]) + (a_r*complex_i[x + (i + n)*col]);
complex_r[x + (i * col)] += t_r;
complex_i[x + (i * col)] += t_i;
complex_r[x + (i + n)*col] = temp_real - t_r;
complex_i[x + (i + n)*col] = temp_imagi - t_i;}
}
n *= 2;
a = a/2;
}
}
__device__ void Func_FFT_Y(float *complex_r, float *complex_i,
int row, int col, int y){
int n = 1, N = col;
int a = N/2;
float temp_real, temp_imagi;
float t_r, t_i, a_r, a_i;
for(int j = 0; j < log2(N); j++){
for (int i = 0; i < N; i++) {
if(!(i & n)) {
temp_real = complex_r[i + (y * col)];
temp_imagi = complex_i[i + (y * col)];
a_r = cos(-2 * ((i * a) % (n * a)) * PI/ N);
a_i = sin(-2 * ((i * a) % (n * a)) * PI/ N);
t_r = (a_r*complex_r[(i + n) + y*col]) - (a_i*complex_i[(i + n) + y*col]);
t_i = (a_i*complex_r[(i + n) + y*col]) + (a_r*complex_i[(i + n) + y*col]);
complex_r[i + (y * col)] += t_r;
complex_i[i + (y * col)] += t_i;
complex_r[(i + n) + y*col] = temp_real - t_r;
complex_i[(i + n) + y*col] = temp_imagi - t_i;}
}
n *= 2;
a = a/2;
}
}
__global__ void FFT_X(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
float *complex_r, float *complex_i,
float *real_d_out, float *imagi_d_out,
unsigned char *r_dataC, unsigned char *g_dataC,
unsigned char *b_dataC, unsigned long col, unsigned long row,
unsigned long colF, unsigned long rowF ) {
int x = threadIdx.x + (blockIdx.x * blockDim.x);
float temp;
if(x < col){
for (int i = 0; i < row; i++) {
complex_r[x + (i * colF)] = 0.2989 * R_input[x + (i * i_size)] + 0.587 * G_input[x + (i * i_size)] + 0.1140 * B_input[x + (i * i_size)];
complex_i[x + (i * colF)] = 0;}
for (int i = row; i < rowF; i++) {
complex_r[x + (i * colF)] = 0;
complex_i[x + (i * colF)] = 0;}
}else{
for (int i = 0; i < rowF; i++) {
complex_r[x + (i * colF)] = 0;
complex_i[x + (i * colF)] = 0;}
}
ordina_x(complex_r, complex_i, real_d_out, imagi_d_out, rowF, colF, x);
Func_FFT_X(complex_r, complex_i, rowF, colF, x);
for (int i = 0; i < rowF/2; i++){
temp = complex_r[x + (i * colF)];
complex_r[x + (i * colF)] = complex_r[x + ((i + rowF/2) * colF)];
complex_r[x + ((i + rowF/2) * colF)] = temp;
temp = complex_i[x + (i * colF)];
complex_i[x + (i * colF)] = complex_i[x + ((i + rowF/2) * colF)];
complex_i[x + ((i + rowF/2) * colF)] = temp;}
}
__global__ void FFT_Y(unsigned char *R_input, unsigned char *G_input,
unsigned char *B_input, size_t i_size,
float *complex_r, float *complex_i,
float *real_d_out, float *imagi_d_out,
unsigned char *r_dataC, unsigned char *g_dataC,
unsigned char *b_dataC, unsigned long col, unsigned long row,
unsigned long colF, unsigned long rowF ) {
int y = threadIdx.x + (blockIdx.x * blockDim.x);
float temp;
ordina_y(complex_r, complex_i, real_d_out, imagi_d_out, rowF, colF, y);
Func_FFT_Y(complex_r, complex_i, rowF, colF, y);
for (int i = 0; i < colF/2; i++) {
temp = complex_r[i + (y * colF)];
complex_r[i + (y * colF)] = complex_r[(i + colF/2) + (y * colF)];
complex_r[(i + colF/2) + (y * colF)] = temp;
temp = complex_i[i + (y * colF)];
complex_i[i + (y * colF)] = complex_i[(i + colF/2) + (y * colF)];
complex_i[(i + colF/2) + (y * colF)] = temp;}
unsigned char v;
int a = (colF/2) - (col/2);
int temp_b = (rowF/2) - (row/2);
if( y >= temp_b)
for (int i = a; i < (colF/2) + (col/2); i++) {
v = (unsigned char)(20*log10(sqrt((complex_r[i + (y * colF)]*complex_r[i + (y * colF)]) + (complex_i[i + (y * colF)]*complex_i[i + (y * colF)]))));
r_dataC[(i - a ) + (y - temp_b) * i_size] = v;
g_dataC[(i - a) + (y - temp_b) * i_size] = v;
b_dataC[(i - a) + (y - temp_b) * i_size] = v;}
}
|
9,558 | #ifdef _WIN32
# define NOMINMAX
#endif
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <assert.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
float* read_array(const char* filename, int len) {
float *x = (float*) malloc(len * sizeof(float));
FILE *fp = fopen(filename, "r");
for( int i=0; i<len; i++){
int r=fscanf(fp,"%f",&x[i]);
if(r == EOF){
rewind(fp);
}
x[i]-=5;
}
fclose(fp);
return x;
}
void computeSum( float* reference, float* idata, const unsigned int len)
{
reference[0] = 0;
double total_sum = 0;
unsigned int i;
for( i = 0; i < len; ++i)
{
total_sum += idata[i];
}
*reference = total_sum;
}
int main( int argc, char** argv)
{
if(argc != 2) {
fprintf(stderr, "usage: ./problem2 N\n");
exit(1);
}
int num_elements = atoi(argv[1]);
float* h_data=read_array("problem1.inp",num_elements);
float reference = 1.0f;
computeSum(&reference , h_data, num_elements);
//start inclusive timing
float time;
cudaEvent_t startIn,stopIn;
cudaEventCreate(&startIn);
cudaEventCreate(&stopIn);
cudaEventRecord(startIn, 0);
int size = num_elements*sizeof(float);
float *d_in;
assert(cudaSuccess == cudaMalloc((void**)&d_in, size));
//copy the memory to device
assert(cudaSuccess == cudaMemcpy(d_in, h_data, size, cudaMemcpyHostToDevice));
//set up the pointer
thrust::device_ptr<float> dev_ptr(d_in);
//perform the thrust reduction
double result = thrust::reduce(dev_ptr,dev_ptr + num_elements, (double) 0.0,thrust::plus<float>());
//stop inclusive timing
cudaEventRecord(stopIn, 0);
cudaEventSynchronize(stopIn);
cudaEventElapsedTime(&time, startIn, stopIn);
cudaEventDestroy(startIn);
cudaEventDestroy(stopIn);
// Run accuracy test
float epsilon = 0.3f;
unsigned int result_regtest = (abs(result - reference) <= epsilon);
if(!result_regtest) printf("Test failed device: %f host: %f\n",result,reference);
//print the outputs
printf("%d\n%f\n%f\n",num_elements, result, time);
//printf("%f\n", time);
// cleanup memory
cudaFree(d_in);
//cudaFree(d_out);
free( h_data);
return 0;
}
|
9,559 | #include <stdio.h>
const int rSize = 4;
const int num_blocks = 2;
const int num_threads = 64;
// Smallest power of 2 larger than n
int getHashTableSize(int n) {
do {
n--;
n |= n >> 1;
n |= n >> 2;
n |= n >> 4;
n |= n >> 8;
n |= n >> 16;
n++; } while (0);
return n;
}
__global__ void calculateBucketSizes(int* rKeys, int rSize, int* bucketSizes, int hSize) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < rSize) {
int rKey = rKeys[idx];
int hKey = rKey & (hSize - 1);
atomicAdd(&(bucketSizes[hKey]), 1);
printf("%d: %d\n", rKey, bucketSizes[hKey]);
}
}
void calculateBucketPositions(int*bucketSizes, int* bucketPositions, int hSize) {
bucketPositions[0] = 0;
for (int i = 1; i < hSize; i++) {
bucketPositions[i] = bucketPositions[i-1] + 2 * bucketSizes[i-1];
}
}
__global__ void buildPhase(int* rKeys, int rSize, int hSize, int* bucketSizes, int* bucketPositions, int* hashTable) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = idx; i < rSize; i += stride) {
int rKey = rKeys[idx];
int hKey = rKey & (hSize - 1);
int pos = bucketPositions[hKey];
// TODO: Handle duplicates
atomicAdd(&(bucketPositions[hKey]), 2);
hashTable[pos] = rKey;
hashTable[pos+1] = rKey; // TODO: Assume value = key for now
}
}
__global__ void probePhase(int* joined, int* joinedSize, int* sKeys, int rSize, int hSize, int* hashTable, int* bucketPositions, int* bucketSizes) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < rSize) {
int sKey = sKeys[idx];
int hKey = sKey & (hSize - 1);
int pos = bucketPositions[hKey];
int len = bucketSizes[hKey];
for (int i = pos; i < pos + 2 * len; i += 2) {
if (hashTable[i] == sKey) {
printf("Writing %d to %d\n", sKey, idx);
joined[idx] = sKey; // TODO: Figure out how to write to array in parallel
}
}
}
}
void hashJoin(int** r, int** s) {
// Get keys of r
int h_rKeys[rSize];
for (int i = 0; i < rSize; i++) h_rKeys[i] = r[i][0];
int* d_rKeys;
cudaMalloc((void**) &d_rKeys, sizeof(int) * rSize);
cudaMemcpy(d_rKeys, h_rKeys, sizeof(int) * rSize, cudaMemcpyHostToDevice);
// Get size of hashtable
int hSize = getHashTableSize(rSize);
// Get size of each bucket in hash table
int *d_bucketSizes;
cudaMalloc((void**) &d_bucketSizes, sizeof(int) * hSize);
cudaMemset(d_bucketSizes, 0, sizeof(int) * hSize);
printf("Bucket sizes:\n");
calculateBucketSizes<<<num_blocks, num_threads>>>(d_rKeys, rSize, d_bucketSizes, hSize);
cudaDeviceSynchronize();
int h_bucketSizes[hSize];
cudaMemcpy(h_bucketSizes, d_bucketSizes, sizeof(int) * hSize, cudaMemcpyDeviceToHost);
// Stores the position of each bucket in the hash table
int h_bucketPositions[hSize];
calculateBucketPositions(h_bucketSizes, h_bucketPositions, hSize);
printf("Bucket positions:\n");
for (int i = 0; i < hSize; i++) printf("%d ", h_bucketPositions[i]);
printf("\n");
int* d_tempBucketPositions;
cudaMalloc((void**) &d_tempBucketPositions, sizeof(int) * hSize);
cudaMemcpy(d_tempBucketPositions, h_bucketPositions, sizeof(int) * hSize, cudaMemcpyHostToDevice);
// Scan R and create in-memory hash table
int* hashTable;
cudaMalloc((void**) &hashTable, 2 * sizeof(int) * rSize);
buildPhase<<<num_blocks, num_threads>>>(d_rKeys, rSize, hSize, d_bucketSizes, d_tempBucketPositions, hashTable);
cudaDeviceSynchronize();
// Get keys of s
int h_sKeys[rSize]; // TODO: Assume two relations are the same size for now
for (int i = 0; i < rSize; i++) h_sKeys[i] = s[i][0];
int* d_sKeys;
cudaMalloc((void**) &d_sKeys, sizeof(int) * rSize);
cudaMemcpy(d_sKeys, h_sKeys, sizeof(int) * rSize, cudaMemcpyHostToDevice);
// Scan S, look up join key in hash table, and add tuple to output if match found
int* d_joined; // TODO: just print out list of join keys for now
cudaMalloc((void**) &d_joined, sizeof(int) * rSize);
int* d_joinedSize;
cudaMalloc((void**) &d_joinedSize, sizeof(int));
cudaMemset(d_joinedSize, 0, sizeof(int));
int* d_bucketPositions;
cudaMalloc((void**) &d_bucketPositions, sizeof(int) * hSize);
cudaMemcpy(d_bucketPositions, h_bucketPositions, sizeof(int) * hSize, cudaMemcpyHostToDevice);
probePhase<<<num_blocks, num_threads>>>(d_joined, d_joinedSize, d_sKeys, rSize, hSize, hashTable, d_bucketPositions, d_bucketSizes);
cudaDeviceSynchronize();
int h_joined[rSize];
cudaMemcpy(h_joined, d_joined, sizeof(int) * rSize, cudaMemcpyDeviceToHost);
int h_joinedSize[1];
cudaMemcpy(h_joinedSize, d_joinedSize, sizeof(int), cudaMemcpyDeviceToHost);
printf("Final joined result:\n");
for (int i = 0; i < rSize; i++) {
if (h_joined[i] != 0) {
printf("%d ", h_joined[i]);
}
}
printf("\n");
}
void printRelation(int** relation, int relationSize) {
printf("Relation:\n");
for (int i = 0; i < relationSize; i++) {
for (int j = 0; j < 2; j++) {
printf("%d ", relation[i][j]);
}
printf("\n");
}
}
int** generateRelation(int relationSize) {
int **relation = (int **)malloc(relationSize * sizeof(int *));
for (int i = 0; i < relationSize; i++)
relation[i] = (int*) malloc(2 * sizeof(int));
// TODO: randomize
int count = rand() % 5;
for (int i = 0; i < relationSize; i++) {
int value = ++count;
for (int j = 0; j < 2; j++)
relation[i][j] = value;
}
return relation;
}
int main() {
int** r = generateRelation(rSize);
printRelation(r, rSize);
int** s = generateRelation(rSize);
printRelation(s, rSize);
hashJoin(r, s);
return 0;
}
|
9,560 | #include <iostream>
#include "cuda_runtime.h"
//定义矢量长度
const int N = 64 * 256;
// 定义每个Block中包含的Thread数量
const int threadsPerBlock = 256;
// 定义每个Grid中包含的Block数量, 这里32 < 64, 是为了模拟线程数量不足的情况
const int blocksPerGrid = 32;
// 核函数:矢量点积
__global__ void dot(float* a, float* b, float* c)
{
// 声明共享内存用于存储临时乘积结果,内存大小为1个Block中的线程数量
// PS. 每个Block都相当于有一份程序副本,因此相当于每个Block都有这样的一份共享内存
__shared__ float cache[threadsPerBlock];
// 线程索引
int tid = blockIdx.x * blockDim.x + threadIdx.x;
// 一个Block中的线程索引
int cacheIndex = threadIdx.x;
// 计算分量乘积,同时处理线程不足的问题
float temp = 0.0f;
while (tid < N)
{
temp += a[tid] * b[tid];
tid += gridDim.x * blockDim.x;
}
// 存储临时乘积结果
cache[cacheIndex] = temp;
// 对线程块中的所有线程进行同步
// 线程块中的所有线程都执行完前面的代码后才会继续往后执行
__syncthreads();
// 合并算法要求长度为2的指数倍
int i = threadsPerBlock / 2;
while (i != 0)
{
if (cacheIndex < i)
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
if (cacheIndex == 0)
c[blockIdx.x] = cache[0];
}
int main()
{
// 在主机端创建数组
float a[N];
float b[N];
float c[threadsPerBlock];
for (size_t i = 0; i < N; i++)
{
a[i] = 1.f;
b[i] = 1.f;
}
// 申请GPU内存
float* dev_a = nullptr;
float* dev_b = nullptr;
float* dev_c = nullptr;
cudaMalloc((void**)&dev_a, N * sizeof(float));
cudaMalloc((void**)&dev_b, N * sizeof(float));
cudaMalloc((void**)&dev_c, blocksPerGrid * sizeof(float));
//将数据从主机copy进GPU
cudaMemcpy(dev_a, a, N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, N * sizeof(float), cudaMemcpyHostToDevice);
//进行点积计算
dot<<<32, 256>>>(dev_a, dev_b, dev_c);
//将计算结果copy回主机
cudaMemcpy(c, dev_c, blocksPerGrid * sizeof(float), cudaMemcpyDeviceToHost);
//将每个block的结果进行累加
for (size_t i = 1; i < blocksPerGrid; i++)
c[0] += c[i];
// 输出结果
std::cout << "The ground truth is 16384, our answer is " << c[0] << std::endl;
//释放内存
cudaFree(dev_a); cudaFree(dev_b); cudaFree(dev_c);
system("pause");
return 0;
}
|
9,561 | #include <stdlib.h>
#include <stdio.h>
#include <string.h>
static void cuda_checker(cudaError_t err, const char *file, int line ) {
if (err != cudaSuccess) {
printf("%s in %s at line %d\n", cudaGetErrorString(err), file, line);
exit(EXIT_FAILURE);
}
}
#define CUDA_CHECK(err) (cuda_checker(err, __FILE__, __LINE__ ))
const int M = 2048, K = 2048, N = 2048;
const int TS = 32;
__global__ void myGEMM1(const float* A,
const float* B,
float* C) {
// Thread identifiers
const int globalRow = threadIdx.x + blockIdx.x * blockDim.x;
const int globalCol = threadIdx.y + blockIdx.y * blockDim.y;
// printf("Hello from block %d, thread %d\n", globalRow, globalCol);
// Compute a single element (loop over K)
float acc = 0.0f;
for (int k=0; k<K; k++) {
acc += A[k*M + globalRow] * B[globalCol*K + k];
}
// Store the result
C[globalCol*M + globalRow] = acc;
}
int main(int argc, const char **argv) {
float *a, *b, *c;
a = (float*) malloc(sizeof(float) * M * K);
b = (float*) malloc(sizeof(float) * K * N);
c = (float*) malloc(sizeof(float) * M * N);
float *dev_a, *dev_b, *dev_c;
for(int i = 0; i < M*N; i++) {
a[i] = 1;
b[i] = 1;
}
CUDA_CHECK( cudaMalloc((void**)&dev_a, M * N * sizeof(float)) );
CUDA_CHECK( cudaMalloc((void**)&dev_b, M * N * sizeof(float)) );
CUDA_CHECK( cudaMalloc((void**)&dev_c, M * N * sizeof(float)) );
CUDA_CHECK( cudaMemcpy(dev_a, a, M * N * sizeof(float), cudaMemcpyHostToDevice) );
CUDA_CHECK( cudaMemcpy(dev_b, b, M * N * sizeof(float), cudaMemcpyHostToDevice) );
dim3 threadsPerBlock(TS, TS);
dim3 numBlocks(M / TS, N / TS);
float time;
cudaEvent_t start, stop;
CUDA_CHECK(cudaEventCreate(&start));
CUDA_CHECK(cudaEventCreate(&stop));
CUDA_CHECK(cudaEventRecord(start, 0));
myGEMM1<<<numBlocks, threadsPerBlock>>>(dev_a, dev_b, dev_c);
CUDA_CHECK( cudaMemcpy(c, dev_c, M * N * sizeof(float), cudaMemcpyDeviceToHost) );
CUDA_CHECK(cudaEventRecord(stop, 0));
CUDA_CHECK(cudaEventSynchronize(stop));
CUDA_CHECK(cudaEventElapsedTime(&time, start, stop));
printf("Time to generate: %3.1f ms \n", time);
CUDA_CHECK( cudaFree(dev_a) );
CUDA_CHECK( cudaFree(dev_b) );
CUDA_CHECK( cudaFree(dev_c) );
cudaDeviceReset();
return 0;
} |
9,562 | #include "includes.h"
__global__ void dotProduct_CUDA_double(double *sum, int size, double *vector1, double *vector2){
int idx = blockIdx.x*blockDim.x+threadIdx.x; // Sequential thread index across the blocks
if(idx < size){
sum[idx] = (vector2[idx]) * (vector1[idx]);
}
} |
9,563 | extern "C"{
#define RGB2GRAY_CONST_ARR_SIZE 3
#define STRONG_EDGE 255
#define NON_EDGE 0.0
#define KERNEL_SIZE 7
//*****************************************************************************************
// CUDA Gaussian Filter Implementation
//*****************************************************************************************
///
/// \brief Apply gaussian filter. This is the CUDA kernel for applying a gaussian blur to an image.
///
__global__ void cu_apply_gaussian_filter(float3 *in_pixels, float3 *out_pixels, int rows, int cols, double *in_kernel)
{
//copy kernel array from global memory to a shared array
__shared__ double kernel[KERNEL_SIZE][KERNEL_SIZE];
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
kernel[i][j] = in_kernel[i * KERNEL_SIZE + j];
}
}
__syncthreads();
//determine id of thread which corresponds to an individual pixel
int pixNum = blockIdx.x * blockDim.x + threadIdx.x;
if (pixNum >= 0 && pixNum < rows * cols) {
double kernelSum;
double redPixelVal;
double greenPixelVal;
double bluePixelVal;
//Apply Kernel to each pixel of image
for (int i = 0; i < KERNEL_SIZE; ++i) {
for (int j = 0; j < KERNEL_SIZE; ++j) {
//check edge cases, if within bounds, apply filter
if (((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) >= 0)
&& ((pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)) <= rows*cols-1)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) >= 0)
&& (((pixNum % cols) + j - ((KERNEL_SIZE-1)/2)) <= (cols-1))) {
redPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].x;
greenPixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].y;
bluePixelVal += kernel[i][j] * in_pixels[pixNum + ((i - ((KERNEL_SIZE - 1) / 2))*cols) + j - ((KERNEL_SIZE - 1) / 2)].z;
kernelSum += kernel[i][j];
}
}
}
//update output image
out_pixels[pixNum].x = redPixelVal / kernelSum;
out_pixels[pixNum].y = greenPixelVal / kernelSum;
out_pixels[pixNum].z = bluePixelVal / kernelSum;
}
}
//*****************************************************************************************
// CUDA Intensity Gradient Implementation
//*****************************************************************************************
///
/// \brief Compute gradient (first order derivative x and y). This is the CUDA kernel for taking the derivative of color contrasts in adjacent images.
///
__global__
void cu_compute_intensity_gradient(float3 *in_pixels, float *deltaX_channel, float *deltaY_channel, int parser_length, int offset)
{
// compute delta X ***************************
// deltaX = f(x+1) - f(x-1)
int idx = blockIdx.x * blockDim.x + threadIdx.x;
/* condition here skips first and last row */
if ((idx > offset) && (idx < (parser_length * offset) - offset))
{
float deltaXred = 0;
float deltaYred = 0;
float deltaXgreen = 0;
float deltaYgreen = 0;
float deltaXblue = 0;
float deltaYblue = 0;
/* first column */
if((idx % offset) == 0)
{
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx].z);
// gradient at the first pixel of each line
// note: at the edge pix[idx-1] does NOT exist
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx].z);
}
/* last column */
else if((idx % offset) == (offset - 1))
{
deltaXred = (float)(in_pixels[idx].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx].z - in_pixels[idx-offset].z);
}
/* gradients where NOT edge */
else
{
deltaXred = (float)(in_pixels[idx+1].x - in_pixels[idx-1].x);
deltaXgreen = (float)(in_pixels[idx+1].y - in_pixels[idx-1].y);
deltaXblue = (float)(in_pixels[idx+1].z - in_pixels[idx-1].z);
deltaYred = (float)(in_pixels[idx+offset].x - in_pixels[idx-offset].x);
deltaYgreen = (float)(in_pixels[idx+offset].y - in_pixels[idx-offset].y);
deltaYblue = (float)(in_pixels[idx+offset].z - in_pixels[idx-offset].z);
}
deltaX_channel[idx] = (float)(0.2989 * deltaXred + 0.5870 * deltaXgreen + 0.1140 * deltaXblue);
deltaY_channel[idx] = (float)(0.2989 * deltaYred + 0.5870 * deltaYgreen + 0.1140 * deltaYblue);
}
}
//*****************************************************************************************
// CUDA Gradient Magnitude Implementation
//*****************************************************************************************
///
/// \brief Compute magnitude of gradient(deltaX & deltaY) per pixel.
///
__global__
void cu_magnitude(float *deltaX, float *deltaY, float *out_pixel, int parser_length, int offset)
{
//computation
//Assigned a thread to each pixel
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset) {
out_pixel[idx] = (float)(sqrt((double)deltaX[idx]*deltaX[idx] +
(double)deltaY[idx]*deltaY[idx]) + 0.5);
}
}
//*****************************************************************************************
// CUDA Non Maximal Suppression Implementation
//*****************************************************************************************
///
/// \brief Non Maximal Suppression
/// If the centre pixel is not greater than neighboured pixels in the direction,
/// then the center pixel is set to zero.
/// This process results in one pixel wide ridges.
///
__global__ void cu_suppress_non_max(float *mag, float *deltaX, float *deltaY, float *nms, int parser_length, int offset)
{
const float SUPPRESSED = 0;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= 0 && idx < parser_length * offset)
{
float alpha;
float mag1, mag2;
// put zero all boundaries of image
// TOP edge line of the image
if((idx >= 0) && (idx <offset))
nms[idx] = 0;
// BOTTOM edge line of image
else if((idx >= (parser_length-1)*offset) && (idx < (offset * parser_length)))
nms[idx] = 0;
// LEFT & RIGHT edge line
else if(((idx % offset)==0) || ((idx % offset)==(offset - 1)))
{
nms[idx] = 0;
}
else // not the boundaries
{
// if magnitude = 0, no edge
if(mag[idx] == 0)
nms[idx] = SUPPRESSED;
else{
if(deltaX[idx] >= 0)
{
if(deltaY[idx] >= 0) // dx >= 0, dy >= 0
{
if((deltaX[idx] - deltaY[idx]) >= 0) // direction 1 (SEE, South-East-East)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
}
else // direction 2 (SSE)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
}
}
else // dx >= 0, dy < 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 8 (NEE)
{
alpha = (float)-deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
mag2 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
}
else // direction 7 (NNE)
{
alpha = (float)deltaX[idx] / -deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
}
}
else
{
if(deltaY[idx] >= 0) // dx < 0, dy >= 0
{
if((deltaX[idx] + deltaY[idx]) >= 0) // direction 3 (SSW)
{
alpha = (float)-deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset+1];
}
else // direction 4 (SWW)
{
alpha = (float)deltaY[idx] / -deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx+offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx-offset+1];
}
}
else // dx < 0, dy < 0
{
if((-deltaX[idx] + deltaY[idx]) >= 0) // direction 5 (NWW)
{
alpha = (float)deltaY[idx] / deltaX[idx];
mag1 = (1-alpha)*mag[idx-1] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+1] + alpha*mag[idx+offset+1];
}
else // direction 6 (NNW)
{
alpha = (float)deltaX[idx] / deltaY[idx];
mag1 = (1-alpha)*mag[idx-offset] + alpha*mag[idx-offset-1];
mag2 = (1-alpha)*mag[idx+offset] + alpha*mag[idx+offset+1];
}
}
}
// non-maximal suppression
// compare mag1, mag2 and mag[t]
// if mag[t] is smaller than one of the neighbours then suppress it
if((mag[idx] < mag1) || (mag[idx] < mag2))
nms[idx] = SUPPRESSED;
else
{
nms[idx] = mag[idx];
}
} // END OF ELSE (mag != 0)
} // END OF FOR(j)
} // END OF FOR(i)
}
//*****************************************************************************************
// CUDA Hysteresis Implementation
//*****************************************************************************************
///
/// \brief This is a helper function that runs on the GPU.
///
/// It checks if the eight immediate neighbors of a pixel at a given index are above
/// a low threshold, and if they are, sets them to strong edges. This effectively
/// connects the edges.
///
__device__
void trace_immed_neighbors(float *out_pixels, float *in_pixels,
int idx, float t_low, int img_width)
{
/* directions representing indices of neighbors */
unsigned n, s, e, w;
unsigned nw, ne, sw, se;
/* get indices */
n = idx - img_width;
nw = n - 1;
ne = n + 1;
s = idx + img_width;
sw = s - 1;
se = s + 1;
w = idx - 1;
e = idx + 1;
if (in_pixels[nw] >= t_low &&in_pixels[nw]!=255.0 ) {
out_pixels[nw] = STRONG_EDGE;
}
if (in_pixels[n] >= t_low&&in_pixels[n]!=255.0) {
out_pixels[n] = STRONG_EDGE;
}
if (in_pixels[ne] >= t_low&&in_pixels[ne]!=255.0) {
out_pixels[ne] = STRONG_EDGE;
}
if (in_pixels[w] >= t_low&&in_pixels[w]!=255.0) {
out_pixels[w] = STRONG_EDGE;
}
if (in_pixels[e] >= t_low&&in_pixels[e]!=255.0) {
out_pixels[e] = STRONG_EDGE;
}
if (in_pixels[sw] >= t_low&&in_pixels[sw]!=255.0) {
out_pixels[sw] = STRONG_EDGE;
}
if (in_pixels[s] >= t_low&&in_pixels[s]!=255.0) {
out_pixels[s] = STRONG_EDGE;
}
if (in_pixels[se] >= t_low&&in_pixels[se]!=255.0) {
out_pixels[se] = STRONG_EDGE;
}
}
///
/// \brief CUDA implementation of Canny hysteresis high thresholding.
///
/// This kernel is the first pass in the parallel hysteresis step.
/// It launches a thread for every pixel and checks if the value of that pixel
/// is above a high threshold. If it is, the thread marks it as a strong edge (set to 1)
/// in a pixel map and sets the value to the channel max. If it is not, the thread sets
/// the pixel map at the index to 0 and zeros the output buffer space at that index.
///
/// The output of this step is a mask of strong edges and an output buffer with white values
/// at the mask indices which are set.
///
__global__
void cu_hysteresis_high(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_high, int img_height, int img_width)
{
//printf("t_high=%f\n",t_high);
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < (img_height * img_width)) {
/* apply high threshold */
//printf("pixel=%f\n",in_pixels[idx]);
if (in_pixels[idx] > t_high) {
strong_edge_mask[idx] = 1.0;
out_pixels[idx] = STRONG_EDGE;
} else {
strong_edge_mask[idx] = 0.0;
out_pixels[idx] = NON_EDGE;
}
}
}
///
/// \brief CUDA implementation of Canny hysteresis low thresholding.
///
/// This kernel is the second pass in the parallel hysteresis step.
/// It launches a thread for every pixel, but skips the first and last rows and columns.
/// For surviving threads, the pixel at the thread ID index is checked to see if it was
/// previously marked as a strong edge in the first pass. If it was, the thread checks
/// their eight immediate neighbors and connects them (marks them as strong edges)
/// if the neighbor is above the low threshold.
///
/// The output of this step is an output buffer with both "strong" and "connected" edges
/// set to whtie values. This is the final edge detected image.
///
__global__
void cu_hysteresis_low(float *out_pixels, float *in_pixels, float *strong_edge_mask,
float t_low, int img_height, int img_width)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) /* skip first row */
&& (idx < (img_height * img_width) - img_width) /* skip last row */
&& ((idx % img_width) < (img_width - 1)) /* skip last column */
&& ((idx % img_width) > (0)) ) /* skip first column */
{
if (1.0 == strong_edge_mask[idx]) { /* if this pixel was previously found to be a strong edge */
trace_immed_neighbors(out_pixels, in_pixels, idx, t_low, img_width);
}
}
}
__global__ void hysteresis_kernel(float* out_pixels,float * in_pixels,float t_low,float t_high,int img_height,int img_width){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if ((idx > img_width) &&
(idx < (img_height * img_width) - img_width) &&
((idx % img_width) < (img_width - 1)) &&
((idx % img_width) > (0)) ){
float pixel = in_pixels[idx];
if (pixel != 255.0){
if (pixel > t_high){
out_pixels[idx] = (float)255.0;
trace_immed_neighbors(out_pixels,in_pixels,idx,t_low,img_width);
}else{
out_pixels[idx] = 0.0;
}
}
}
}
} |
9,564 | #include<iostream>
#include<vector>
#include<cstdlib>
const int M = 7; // size of the mask
__constant__ double mask[M];
__global__ void convolution_kernel(double *arr, double *output, int N){
auto i = blockDim.x*blockIdx.x+threadIdx.x;
extern __shared__ double sharedArray[]; // use extern keyword because we don't know the size of the shared array; extern means dynamically allocated
// Load the left elements
sharedArray[threadIdx.x] = arr[i];
// Load the right elements
if(threadIdx.x + blockDim.x < blockDim.x + M){
sharedArray[threadIdx.x+blockDim.x] = arr[i+blockDim.x];
}
__syncthreads();
auto temp = 0.0;
for(auto k = 0; k < M; k++){
temp += sharedArray[threadIdx.x+k]*mask[k];
}
output[i] = temp;
}
int main(){
int N = 1048576; // size of the array = 2^20
// int N = 1024;
int Npad = N + M; // size of the padding
size_t size_N = N*sizeof(double);
size_t size_M = M*sizeof(double);
size_t size_Npad = Npad*sizeof(double);
std::vector<double> h_array(Npad);
std::vector<double> h_mask(M);
std::vector<double> h_output(N);
for(auto i = 0; i < Npad; i++){
if((i < M/2) || (i >= N+(M/2))){
h_array[i] = 0;
}
else{
h_array[i] = rand()%100;
}
}
for(auto& j:h_mask){j = rand()%10;}
double *d_array, *d_output;
cudaMalloc(&d_array, size_Npad);
cudaMalloc(&d_output, size_N);
cudaMemcpy(d_array, h_array.data(), size_Npad, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(mask, h_mask.data(), size_M);
int threadsPerBlock = 256;
int blocksPerGrid = (N+threadsPerBlock-1)/threadsPerBlock;
size_t sharedMem = (threadsPerBlock+M)*sizeof(double); // number of threads + the halo (size M/2) on the left and right ends
convolution_kernel<<<blocksPerGrid, threadsPerBlock, sharedMem>>>(d_array, d_output, N);
cudaMemcpy(h_output.data(), d_output, size_N, cudaMemcpyDeviceToHost);
// Uncomment to print the output
// for(auto& i:h_output){std::cout << i << std::endl;}
cudaFree(d_array);
cudaFree(d_output);
return 0;
}
|
9,565 | __global__ void
convolution(float *N , float *M, float *P , int Tile_Size, int Mask_Width , int Width)
{
int k = blockIdx.x * blockDim.x + threadIdx.x;
int l = blockIdx.y * blockDim.y + threadIdx.y;
__shared__ float N_ds[7][7];
int n = Mask_Width/2;
int halo_index_left = (blockIdx.x - 1) * blockDim.x + threadIdx.x;
int halo_index_top = (blockIdx.y - 1) * blockDim.y + threadIdx.y;
int halo_index_right = (blockIdx.x +1) * blockDim.x + threadIdx.x;
int halo_index_bottom = (blockIdx.y + 1) * blockDim.y + threadIdx.y;
N_ds[n + threadIdx.y][n + threadIdx.x]= N[(blockIdx.y * blockDim.y + threadIdx.y)*Width + (blockIdx.x * blockDim.x + threadIdx.x)];
if(threadIdx.x >= blockDim.x-n && threadIdx.y >= blockDim.y - n) {
N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0 || halo_index_top < 0)?0:N[halo_index_top* Width + halo_index_left];
N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n) + n] = (halo_index_top<0)?0:N[halo_index_top*Width + (blockDim.x*blockIdx.x + threadIdx.x)];
N_ds[threadIdx.y -(blockDim.y - n) + n][threadIdx.x -(blockDim.x - n)] = (halo_index_left<0)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_left];
}
if(threadIdx.x < n && threadIdx.y >= blockDim.y - n){
N_ds[threadIdx.y -(blockDim.y - n)][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width || halo_index_top < 0)?0:N[halo_index_top*Width + halo_index_right];
N_ds[threadIdx.y -(blockDim.y - n)][threadIdx.x -(blockDim.x - n) + n] = (halo_index_top<0)?0:N[halo_index_top*Width + (blockDim.x*blockIdx.x + threadIdx.x)];
N_ds[threadIdx.y -(blockDim.y - n) + n][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_right];
}
if(threadIdx.y < n && threadIdx.x >= blockDim.x - n){
N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n)] = (halo_index_bottom >= Width || halo_index_left < 0)?0:N[halo_index_bottom*Width + halo_index_left];
N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n) + n] = (halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + (blockDim.x*blockIdx.x + threadIdx.x)];
N_ds[threadIdx.y -(blockDim.y - n) + n][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_left];
}
if(threadIdx.x < n && threadIdx.y < n){
N_ds[n + blockDim.y + threadIdx.y][n + blockDim.x + threadIdx.x] = (halo_index_right >= Width || halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + halo_index_right];
N_ds[n + blockDim.y + threadIdx.y][threadIdx.x -(blockDim.x - n) + n] = (halo_index_bottom >= Width)?0:N[halo_index_bottom*Width + (blockDim.x*blockIdx.x + threadIdx.x)];
N_ds[threadIdx.y -(blockDim.y - n) + n][n + blockDim.x + threadIdx.x] = (halo_index_right >=Width)?0:N[(blockDim.y*blockIdx.y + threadIdx.y)*Width + halo_index_right];
}
if(threadIdx.y < n && threadIdx.x > n && threadIdx.x <= blockDim.x-n) N_ds[n + blockDim.y + threadIdx.y][n + threadIdx.x] = (halo_index_bottom >=Width)?0:N[(halo_index_bottom*Width) + (blockIdx.x * blockDim.x + threadIdx.x)];
if(threadIdx.x < n && threadIdx.y > n && threadIdx.y <= blockDim.y-n) N_ds[n + threadIdx.y][n + blockDim.x + threadIdx.x] = (halo_index_right >=Width)?0:N[(blockDim.y * blockIdx.y + threadIdx.y)*Width + (halo_index_right)];
if(threadIdx.y >= blockDim.y - n && threadIdx.x > n && threadIdx.x <= blockDim.x-n) N_ds[threadIdx.y -(blockDim.y - n)][n+threadIdx.x] = (halo_index_top < 0)?0:N[(halo_index_top*Width) + (blockDim.x*blockIdx.x + threadIdx.x)];
if(threadIdx.x >= blockDim.x - n && threadIdx.y > n && threadIdx.y <= blockDim.y-n) N_ds[n+threadIdx.y][threadIdx.x -(blockDim.x - n)] = (halo_index_left < 0)?0:N[(blockDim.y * blockIdx.y + threadIdx.y)*Width + halo_index_left];
__syncthreads();
float Pvalue = 0;
for(int i =0; i < Mask_Width; i++){
for(int j =0; j < Mask_Width ; j++) {
Pvalue += N_ds[threadIdx.y + i][threadIdx.x + j] * M[(i*Mask_Width) + j];
}
}
P[(l*Width) + k] = Pvalue;
}
|
9,566 | // the code initializes and computes jacobi inside the kernel function
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#define NUM_THREADS 5
// save matrix to file
void save_gnuplot( double *M, size_t dim );
// return the elapsed time
double seconds( void );
// print the matrix M
void print_matrix(int rows, int cols, double *M);
// ===============================================================================
// KERNEL FUNCTIONS
__global__ void matrix_init(int iterations, int dimension, double * d_matrix, double * d_matrix_new)
{
int idx = ( blockIdx.x * blockDim.x ) + threadIdx.x; // local index for each thread
// global indexes for the matrices (idx = i*dimension+j)
int i = idx / dimension;
int j = idx % dimension;
double increment;
//fill initial values
for( i = 1; i <= dimension; ++i ){
for( j = 1; j <= dimension; ++j ){
d_matrix[ ( i * ( dimension + 2 ) ) + j ] = 0.5;
}
}
__syncthreads();
// set up borders
increment = 100.0 / ( dimension + 1 );
for( i=1; i <= dimension+1; ++i ){
d_matrix[ i * ( dimension + 2 ) ] = i * increment; //setting left border
d_matrix[ ( ( dimension + 1 ) * ( dimension + 2 ) ) + ( dimension + 1 - i ) ] = i * increment; //setting bottom border
d_matrix_new[ i * ( dimension + 2 ) ] = i * increment;
d_matrix_new[ ( ( dimension + 1 ) * ( dimension + 2 ) ) + ( dimension + 1 - i ) ] = i * increment;
}
__syncthreads();
}
// jacobi method
extern __shared__ double * ptr[];
__global__ void jacobi(int iterations, int dimension, double * d_matrix, double * d_matrix_new)
{
double * tmp_matrix;
size_t byte_dimension = sizeof(double*) * ( dimension + 2 ) * ( dimension + 2 );
double * s_matrix = (double*)&ptr[byte_dimension];
s_matrix = d_matrix;
int idx = ( blockIdx.x * blockDim.x ) + threadIdx.x; // local index for each thread
// global indexes for the matrices (idx = i*dimension+j)
int i = idx / dimension;
int j = idx % dimension;
if( i > 0 && i < (dimension+1) && j > 0 && j < (dimension+2)){
// if ( (idx < (dimension + 2) * (dimension + 2)) && i > 0 && j > 0){
for(int it = 0; it < iterations; ++it ){
// This is a row dominant program.
d_matrix_new[ ( i * ( dimension + 2 ) ) + j ] = ( 0.25 ) *
( s_matrix[ ( ( i - 1 ) * ( dimension + 2 ) ) + j ] +
s_matrix[ ( i * ( dimension + 2 ) ) + ( j + 1 ) ] +
s_matrix[ ( ( i + 1 ) * ( dimension + 2 ) ) + j ] +
s_matrix[ ( i * ( dimension + 2 ) ) + ( j - 1 ) ] );
// swap the pointers
tmp_matrix = s_matrix;
s_matrix = d_matrix_new;
d_matrix_new = tmp_matrix;
__syncthreads();
}
d_matrix = s_matrix;
}
}
// ===============================================================================
int main(int argc, char* argv[]){
// timing variables
double t_start, t_end;
double *h_matrix; // host pointer
double *d_matrix, *d_matrix_new; // device pointers
size_t dimension = 0, iterations = 0, row_peek = 0, col_peek = 0;
size_t byte_dimension = 0;
// check on input parameters
if(argc != 5) {
fprintf(stderr,"\nwrong number of arguments. Usage: ./a.out dim it n m\n");
return 1;
}
dimension = atoi(argv[1]);
iterations = atoi(argv[2]);
row_peek = atoi(argv[3]);
col_peek = atoi(argv[4]);
printf("matrix size = %zu\n", dimension);
printf("number of iterations = %zu\n", iterations);
printf("element for checking = Mat[%zu,%zu]\n",row_peek, col_peek);
if((row_peek > dimension) || (col_peek > dimension)){
fprintf(stderr, "Cannot Peek a matrix element outside of the matrix dimension\n");
fprintf(stderr, "Arguments n and m must be smaller than %zu\n", dimension);
return 1;
}
#ifdef DEBUG
if(dimension>10){
printf("Choose a smaller dimension for debug.\n");
return 2;
}
#endif
byte_dimension = sizeof(double*) * ( dimension + 2 ) * ( dimension + 2 );
h_matrix = ( double* )malloc( byte_dimension );
// h_matrix_new = ( double* )malloc( byte_dimension );
cudaMalloc( (void **) &d_matrix, byte_dimension); // allocates memory on the GPU
cudaMalloc( (void **) &d_matrix_new, byte_dimension);
// memset( h_matrix, 0, byte_dimension ); // sets initial values to zero
// memset( h_matrix_new, 0, byte_dimension );
t_start = seconds();
// call kernel functions
matrix_init<<< ((dimension+2) * (dimension+2))/NUM_THREADS, NUM_THREADS >>>(iterations, dimension, d_matrix, d_matrix_new);
jacobi<<< ((dimension+2) * (dimension+2))/NUM_THREADS, NUM_THREADS >>>(iterations, dimension, d_matrix, d_matrix_new);
// copy from gpu to cpu
cudaMemcpy( h_matrix, d_matrix, byte_dimension, cudaMemcpyDeviceToHost );
t_end = seconds();
#ifdef DEBUG
print_matrix(dimension+2, dimension+2, h_matrix);
// free( h_matrix_new );
#endif
printf( "\nelapsed time = %f seconds\n", t_end - t_start );
printf( "\nmatrix[%zu,%zu] = %f\n", row_peek, col_peek, h_matrix[ ( row_peek + 1 ) * ( dimension + 2 ) + ( col_peek + 1 ) ] );
// save_gnuplot( h_matrix, dimension );
// free the memory
free( h_matrix );
cudaFree( d_matrix );
cudaFree( d_matrix_new );
return 0;
}
// ===============================================================================
void print_matrix(int rows, int cols, double *M) {
int i, j;
for (i = 0; i < rows; i++) {
for (j = 0; j < cols; j++) {
printf("%.1f ", (double)M[i * cols + j]);
}
printf("\n");
}
printf("\n");
}
void save_gnuplot( double *M, size_t dimension ){
size_t i , j;
const double h = 0.1;
FILE *file;
file = fopen( "solution.dat", "w" );
for( i = 0; i < dimension + 2; ++i )
for( j = 0; j < dimension + 2; ++j )
fprintf(file, "%f\t%f\t%f\n", h * j, -h * i, M[ ( i * ( dimension + 2 ) ) + j ] );
fclose( file );
}
// A Simple timer for measuring the walltime
double seconds(){
struct timeval tmp;
double sec;
gettimeofday( &tmp, (struct timezone *)0 );
sec = tmp.tv_sec + ((double)tmp.tv_usec)/1000000.0;
return sec;
}
|
9,567 | /*************************************************************************
> File Name: 02cudahashtable.cu
> Author: dong xu
> Mail: gwmxyd@163.com
> Created Time: 2016年04月02日 星期四 21时24分04秒
************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <time.h>
#include "cuda_runtime.h"
struct Entry{
unsigned int key;
void* value;
struct Entry* next;
};
struct Table{
size_t count;
struct Entry **entries;
struct Entry *pool;
struct Entry *firstFree;
};
void init_table(struct Table* table,int entries,int elements)
{
if(table == NULL)
table = (struct Table*)malloc(sizeof(struct Table));
table->count = entries;
table->entries = (struct Entry**)calloc(entries,sizeof(struct Entry*));
table->pool = (struct Entry*)malloc(elements * sizeof(struct Entry));
table->firstFree = table->pool;
}
void free_table(struct Table* table)
{
free(table->entries);
free(table->pool);
//if(table != NULL)
// free(table);
}
size_t hash(const unsigned int key,const size_t count)
{
return key%count;
}
void add_to_table(struct Table *table,const unsigned int key,void*value)
{
if(table == NULL){
printf("Error:table is null\n");
return;
}
size_t hashValue = hash(key,table->count);
struct Entry* location = table->firstFree++;
location->key = key;
location->value = value;
location->next = table->entries[hashValue];
table->entries[hashValue] = location;
}
#define SIZE 100*1024*1024
#define ELEMENTS (SIZE/sizeof(unsigned int))
void verify_table(const struct Table *table)
{
int count = 0;
size_t i = 0;
struct Entry* current;
for(i=0;i<table->count;i++){
current = table->entries[i];
while(current != NULL){
++count;
if(hash(current->key,table->count) != i)
printf("%d hashed to %ld,but was located at %ld\n",current->key,hash(current->key,table->count),i);
current = current->next;
}
}
if(count != ELEMENTS)
printf("Hash Error!\n");
else
printf("Hash Success!\n");
}
#define HASH_ENTRIES 1024
int main()
{
unsigned int *buffer = (unsigned int*)malloc(SIZE*sizeof(unsigned int));
int i;
clock_t start,stop;
start = clock();
struct Table* table = (struct Table*)malloc(sizeof(struct Table));
init_table(table,HASH_ENTRIES,ELEMENTS);
for(i=0;i<ELEMENTS;i++){
add_to_table(table,buffer[i],(void*)NULL);
}
stop = clock();
float elapsedTime = (float)(stop-start)/(float)CLOCKS_PER_SEC*1000.0f;
printf("Time to hash:%3.1f ms\n",elapsedTime);
verify_table(table);
free_table(table);
free(buffer);
if(table != NULL)
free(table);
return 0;
}
|
9,568 | #include "includes.h"
__global__ void kRectifyBoundingBox( float* boxes, float* width_offset, float* height_offset, float* flip, int num_images, int patch_width, int patch_height, int num_locs) {
for (int loc_id = blockIdx.x; loc_id < num_locs; loc_id += gridDim.x) {
float *xmin_block = boxes + num_images * loc_id,
*ymin_block = boxes + num_images * (loc_id + num_locs),
*xmax_block = boxes + num_images * (loc_id + num_locs * 2),
*ymax_block = boxes + num_images * (loc_id + num_locs * 3);
for (int image_id = threadIdx.x; image_id < num_images; image_id += blockDim.x) {
float xmin = (flip[image_id] > 0.5) ? (256.0/patch_width - xmax_block[image_id]) : xmin_block[image_id],
xmax = (flip[image_id] > 0.5) ? (256.0/patch_width - xmin_block[image_id]) : xmax_block[image_id],
ymin = ymin_block[image_id],
ymax = ymax_block[image_id],
wo = width_offset[image_id],
ho = height_offset[image_id];
xmin_block[image_id] = xmin - wo / patch_width;
xmax_block[image_id] = xmax - wo / patch_width;
ymin_block[image_id] = ymin - ho / patch_height;
ymax_block[image_id] = ymax - ho / patch_height;
}
}
} |
9,569 | #include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void derive(double2 *data, double2 *out, int stride, int gsize, double dx){
int gid = getGid3d3d();
if (gid < gsize){
if (gid + stride < gsize){
out[gid].x = (data[gid+stride].x - data[gid].x)/dx;
out[gid].y = (data[gid+stride].y - data[gid].y)/dx;
}
else{
out[gid].x = data[gid].x/dx;
out[gid].y = data[gid].y/dx;
}
}
} |
9,570 | #include "includes.h"
__global__ void matrixAdd(float *A, float *B, float *C, int n)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int num = n;
int i = row * num + col;
if (row < num && col < num)
{
C[i] = A[i] + B[i];
}
} |
9,571 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void matrixMultiplicationKernel(float *A, float* B, float* C, int a, int b, int d) {
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
int ROW = by*blockDim.y+ty;
int COL = bx*blockDim.x+tx;
// First check if the thread exceeds the matrix dimensions
if (ROW < a && COL < d) {
// Declaration of the shared memory array As used to store the sub-
// matrix of A
__shared__ float As[BLOCK_SIZE * BLOCK_SIZE];
__shared__ float As2[BLOCK_SIZE * BLOCK_SIZE];
float *prefetch = As;
float *prefetch2 = As2;
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
// __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
float cv[BLOCK_SIZE];
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
cv[ii] = 0;
}
// Index of the first sub-matrix of A processed by the block
int aBegin = a * BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + a - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = BLOCK_SIZE * VECTOR_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = BLOCK_SIZE * d;
int cBegin = d * BLOCK_SIZE * by + VECTOR_SIZE * BLOCK_SIZE * bx;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
// float Csub = 0;
float *Ap = &A[aBegin + a * ty +tx];
float *ap = &prefetch[ty + BLOCK_SIZE * tx];
#pragma unroll
for(int ii = 0; ii < BLOCK_SIZE; ii+=4){
ap[ii] = Ap[a * ii];
}
__syncthreads();
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
Ap = &A[a + aStep + a * ty +tx];
float *ap2 = &prefetch2[ty + BLOCK_SIZE * tx];
#pragma unroll
for(int ii = 0; ii < BLOCK_SIZE; ii+=4){
ap2[ii] = Ap[b * ii];
}
ap = &prefetch[0];
float *bp = &B[b + BLOCK_SIZE * ty + tx];
#pragma unroll
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
float bv = bp[0];
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
cv[jj] += ap[jj]*bv;
ap += BLOCK_SIZE;
bp += d;
}
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// swap As and As2
float *prefetch_temp = prefetch;
prefetch = prefetch2;
prefetch2 = prefetch_temp;
}
// Write the block sub-matrix to device memory;
// each thread writes one element
float *Cp = &C[cBegin];
Cp += BLOCK_SIZE * ty + tx;
int cStep = d;
#pragma unroll
for(int ii=0; ii<BLOCK_SIZE; ii++){
Cp[0] = cv[ii]; Cp += cStep;
}
}
} |
9,572 | #include <cuda.h>
#define NP_MAX 7
// Important variables for GPU shit
float *d_u;
float *d_f;
float *d_x;
float *d_mesh;
float *d_r;
float *d_w;
// Runge-Kutta time integration storage
float *d_kstar;
float *d_k1;
float *d_k2;
float *d_k3;
float *d_k4;
/* legendre polynomials
*
* Calculates the value of P_i(x)
*/
__device__ float legendre(float x, int i) {
switch (i) {
case 0: return 1.;
case 1: return x;
case 2: return (3.*powf(x,2) -1.) / 2.;
case 3: return (5.*powf(x,3) - 3.*x) / 2.;
case 4: return (35.*powf(x,4) - 30.*powf(x,2) + 3.)/8.;
case 5: return (63.*powf(x,5) - 70.*powf(x,3) + 15.*x)/8.;
case 6: return (231.*powf(x,6) - 315.*powf(x,4) + 105.*powf(x,2) -5.)/16.;
case 7: return (429.*powf(x,7) - 693.*powf(x,5) + 315.*powf(x,3) - 35.*x)/16.;
case 8: return (6435.*powf(x,8) - 12012.*powf(x,6) + 6930.*powf(x,4) - 1260.*powf(x,2) + 35.)/128.;
case 9: return (12155.*powf(x,9) - 25740.*powf(x,7) + 18018*powf(x,5) - 4620.*powf(x,3) + 315.*x)/128.;
case 10: return (46189.*powf(x,10) - 109395.*powf(x,8) + 90090.*powf(x,6) - 30030.*powf(x,4) + 3465.*powf(x,2) - 63.)/256.;
}
return -1;
}
/* legendre polynomials derivatives
*
* Calculates the value of d/dx P_i(x)
*/
__device__ float legendreDeriv(float x, int i) {
switch (i) {
case 0: return 0.;
case 1: return 1.;
case 2: return 3.*x;
case 3: return (15.*powf(x,2) - 3.) / 2.;
case 4: return (140.*powf(x,3) - 60*x)/8.;
case 5: return (315.*powf(x,4) - 210.*powf(x,2) + 15.)/8.;
case 6: return (1386.*powf(x,5) - 1260.*powf(x,3) + 210.*x)/16.;
case 7: return (3003.*powf(x,6) - 3465.*powf(x,4) + 945.*powf(x,2) - 35.)/16.;
case 8: return (51480.*powf(x,7) - 72072.*powf(x,5) + 27720.*powf(x,3) - 2520.*x)/128.;
case 9: return (109395.*powf(x,8) - 180180.*powf(x,6) + 90090.*powf(x,4) - 13860.*powf(x,2) + 315.)/128.;
case 10: return (461890.*powf(x,9) - 875160.*powf(x,7) + 540540.*powf(x,5) - 120120.*powf(x,3) + 6930.*x)/256.;
}
return -1;
}
/* flux function f(u)
*
* evaluate the flux function f(u)
*/
__device__ float flux(float u) {
float aspeed = 2.*3.14159; // the wave speed
return aspeed*u;
}
/* initilialize the mesh nodes
*
* ideally, this should be done on the GPU, but meh
*/
__global__ void initMesh(float *mesh, float dx, float a, int K) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < K) {
mesh[idx] = a + dx * idx;
}
}
/* initialize flux
*
* for these periodic boundary conditions, you need to set the flux
* on the ghost state to be something.
*/
__global__ void initFlux(float *u, float *f, int K, int Np) {
float cl[NP_MAX];
float ul;
int i;
for (i = 0; i < Np+1; i++) {
cl[i] = u[(K + 1)*i + K];
}
ul = 0;
for (i = 0; i < Np+1; i++) {
ul += cl[i];
}
f[K+1] = flux(ul);
f[0] = f[K+1];
}
/* flux calculations for each node
*
* | endpoint - f0 - f1 - ... - fm-1 - endpoint |
*
* That is, fi is the flux between nodes i and i+1, making f a m+1 length vector.
* Store results into f
*/
__global__ void calcFlux(float *u, float *f, float aspeed, float time, int K, int Np) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i;
float ul, ur;
float cl[NP_MAX], cr[NP_MAX];
if (idx < K+2) {
// periodic
if (idx == 0) {
f[idx] = f[K+1];
}
if (idx > 0) {
for (i = 0; i < Np+1; i++) {
cl[i] = u[(K + 1)*i + idx - 1];
cr[i] = u[(K + 1)*i + idx];
}
// Left value
ul = 0;
for (i = 0; i < Np+1; i++) {
ul += cl[i];
}
// Evaluate flux
ul = flux(ul);
// Right value
ur = 0;
for (i = 0; i < Np+1; i++) {
ur += powf(-1, i) * cr[i];
}
// Evaluate flux
ur = flux(ur);
// Upwind flux
f[idx] = ul;
}
}
}
/* initial condition function
*
* returns the value of the intial condition at point x
*/
__device__ float u0(float x) {
if (x > -0.25 && x < 0.25) {
return 1;
} else {
return 0;
}
//return sinf(2*3.14159*x);
}
/* intialize the ghost state
*
* since we have periodic boundary conditions, make the ghost state
* think that it's just the first element.
*/
__global__ void initUPeriodic(float *u, int K, int Np) {
int i;
for (i = 0; i < Np+1; i++) {
u[i*(K + 1) + K] = 0;//u[i*(K + 1)];
}
}
/* calculate the initial data for U
*
* needs to interpolate u0 with legendre polynomials to grab the right coefficients.
*/
__global__ void initU(float *u, float *x, float *w, float *r, float dx, int K, int Np) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i, j;
float xi, uval;
if (idx < K) {
for (i = 0; i < Np+1; i++) {
uval = 0.;
for (j = 0; j < Np+1; j++) {
// The mapping to the integration points for u0
xi = x[idx] + dx*(r[j] + 1.)/2.;
uval += w[j] * u0(xi) * legendre(r[j], i);
}
// Leftover from integration
u[i*(K + 1) + idx] = (2.*i + 1.)/2. * uval;
}
}
}
/* right hand side calculations
*
* Calculates the flux integral
* int_k (u * vprime) dx
* and adds it to the flux boundary integral.
* Store results into k, the RK variable
*/
__global__ void rhs(float *c, float *kstar, float *f, float *w, float *r, float a, float dt, float dx, int K, int Np) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int i,j, k;
float rhs[NP_MAX], register_c[NP_MAX];
float lflux, rflux, u;
if (idx < (K + 1)) {
// Read the global u into a register variable and set rhs = 0.
for (i = 0; i < Np+1; i++) {
register_c[i] = c[i*(K + 1) + idx];
}
// Read the flux contributions.
lflux = f[idx];
rflux = f[idx+1];
// Perform quadrature W*P'*f(U) at integration points
for (i = 0; i < Np+1; i++) {
rhs[i] = 0.;
for (j = 0; j < Np+1; j++) {
// Evaluate u(r_j)
u = 0.;
for (k = 0; k < Np+1; k++) {
u += legendre(r[j], k) * register_c[k];
}
// rhs = sum w_j P'(r_j) flux(u_j)
rhs[i] += w[j] * legendreDeriv(r[j], i) * flux(u);
}
}
// Store result
for (i = 0; i < Np+1; i++) {
kstar[(K + 1)*i + idx] = dt*(((2.*i+1.) / dx) * (-rflux + powf(-1.,i) * lflux + rhs[i]));
}
}
}
/* tempstorage for RK4
*
* I need to store u + alpha * k_i into some temporary variable called k*.
*/
__global__ void rk4_tempstorage(float *u, float *kstar, float*k, float alpha, float dt, int Np, int K) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < (Np + 1) * K) {
kstar[idx] = u[idx] + alpha * k[idx];
}
}
/* rk4
*
* computes the runge-kutta solution
* u_n+1 = u_n + k1/6 + k2/3 + k3/3 + k4/6
*/
__global__ void rk4(float *u, float *k1, float *k2, float *k3, float *k4, int Np, int K) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < (Np + 1) * K) {
u[idx] += k1[idx]/6. + k2[idx]/3. + k3[idx]/3. + k4[idx]/6.;
}
}
|
9,573 | #include "includes.h"
__global__ void addNccValues(const float* prevData, float* result, int slices)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < slices)
{
float norm = prevData[3 * tid + 1] * prevData[3 * tid + 2];
float res = 0;
if (norm > 0)
res = prevData[3 * tid] / sqrtf(norm);
result[tid] += res;
}
} |
9,574 | #include<stdio.h>
#include<cuda.h>
#define NUM 327133
//Check Error
#define printError(func) \
{ \
cudaError_t E = func; \
if(E != cudaSuccess) \
{ \
printf( "\nError at line: %d ", __LINE__); \
printf( "\nError: %s ", cudaGetErrorString(E)); \
} \
} \
//Kernel
__global__ void add(float* A, float* B, float* C)
{
unsigned int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i<NUM)
C[i] = B[i] + A[i];
}
//To check the output to see if it matches
int checkSum(float* A, float* B, float* C)
{
for(int i = 0; i<NUM; i++)
if(C[i] != A[i] + B[i])
return 0;
return 1;
}
int main()
{
float* A;
float* B;
float* C;
float* deviceA;
float* deviceB;
float* deviceC;
A = (float*) malloc( NUM * sizeof(float));
B = (float*) malloc( NUM * sizeof(float));
C = (float*) malloc( NUM * sizeof(float));
for(int i=0; i<NUM; i++)
{
A[i] = rand();
B[i] = rand();
}
printError(cudaMalloc((void **)&deviceA, NUM * sizeof(float)));
printError(cudaMalloc((void **)&deviceB, NUM * sizeof(float)));
printError(cudaMalloc((void **)&deviceC, NUM * sizeof(float)));
//cudaMalloc((void **)&deviceA, NUM * sizeof(int));
//cudaMalloc((void **)&deviceB, NUM * sizeof(int));
//cudaMalloc((void **)&deviceC, NUM * sizeof(int));
cudaMemcpy(deviceA, A, NUM * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(deviceB, B, NUM * sizeof(float), cudaMemcpyHostToDevice);
add<<<ceil(NUM/1024.0), 1024>>>(deviceA, deviceB, deviceC);
cudaMemcpy(C, deviceC, NUM * sizeof(float), cudaMemcpyDeviceToHost);
if(checkSum(A, B, C))
printf("\nResult of 2 array sum is correct\n");
else
printf("\nResult of 2 array sum is wrong\n");
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
free(A);
free(B);
free(C);
}
|
9,575 | #include <stdio.h>//printf
#define MIN(a,b) (((a)<(b))?(a):(b))
#define MAX(a,b) (((a)>(b))?(a):(b))
static __constant__ const char md5_salt_prefix[] = "$1$";
static __constant__ const char b64t[] = "./0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
static __constant__ const unsigned char fillbuf[64] = { 0x80, 0 };
struct md5_ctx{
unsigned int A;
unsigned int B;
unsigned int C;
unsigned int D;
unsigned int total[2];
unsigned int buflen;
union{
char buffer[128];
unsigned int buffer32[32];
};
};
#define FF(b, c, d) (d ^ (b & (c ^ d)))
#define FG(b, c, d) FF (d, b, c)
#define FH(b, c, d) (b ^ c ^ d)
#define FI(b, c, d) (c ^ (b | ~d))
__device__ __forceinline__ void md5_process_block (const void *buffer, size_t len, struct md5_ctx *ctx){
unsigned int correct_words[16];
const unsigned int *words = (const unsigned int *)buffer;
size_t nwords = len / sizeof (unsigned int);
const unsigned int *endp = words + nwords;
unsigned int A = ctx->A;
unsigned int B = ctx->B;
unsigned int C = ctx->C;
unsigned int D = ctx->D;
unsigned int lolen = len;
ctx->total[0] += lolen;
ctx->total[1] += (len >> 32) + (ctx->total[0] < lolen);
while (words < endp){
unsigned int *cwp = correct_words;
unsigned int A_save = A;
unsigned int B_save = B;
unsigned int C_save = C;
unsigned int D_save = D;
#define OP(a, b, c, d, s, T) \
a += FF (b, c, d) + (*cwp++ = (*words)) + T; \
++words; \
CYCLIC (a, s); \
a += b;
#define CYCLIC(w, s) (w = (w << s) | (w >> (32 - s)))
/* Round 1. */
OP (A, B, C, D, 7, 0xd76aa478);
OP (D, A, B, C, 12, 0xe8c7b756);
OP (C, D, A, B, 17, 0x242070db);
OP (B, C, D, A, 22, 0xc1bdceee);
OP (A, B, C, D, 7, 0xf57c0faf);
OP (D, A, B, C, 12, 0x4787c62a);
OP (C, D, A, B, 17, 0xa8304613);
OP (B, C, D, A, 22, 0xfd469501);
OP (A, B, C, D, 7, 0x698098d8);
OP (D, A, B, C, 12, 0x8b44f7af);
OP (C, D, A, B, 17, 0xffff5bb1);
OP (B, C, D, A, 22, 0x895cd7be);
OP (A, B, C, D, 7, 0x6b901122);
OP (D, A, B, C, 12, 0xfd987193);
OP (C, D, A, B, 17, 0xa679438e);
OP (B, C, D, A, 22, 0x49b40821);
#undef OP
#define OP(f, a, b, c, d, k, s, T) \
a += f (b, c, d) + correct_words[k] + T; \
CYCLIC (a, s); \
a += b;
/* Round 2. */
OP (FG, A, B, C, D, 1, 5, 0xf61e2562);
OP (FG, D, A, B, C, 6, 9, 0xc040b340);
OP (FG, C, D, A, B, 11, 14, 0x265e5a51);
OP (FG, B, C, D, A, 0, 20, 0xe9b6c7aa);
OP (FG, A, B, C, D, 5, 5, 0xd62f105d);
OP (FG, D, A, B, C, 10, 9, 0x02441453);
OP (FG, C, D, A, B, 15, 14, 0xd8a1e681);
OP (FG, B, C, D, A, 4, 20, 0xe7d3fbc8);
OP (FG, A, B, C, D, 9, 5, 0x21e1cde6);
OP (FG, D, A, B, C, 14, 9, 0xc33707d6);
OP (FG, C, D, A, B, 3, 14, 0xf4d50d87);
OP (FG, B, C, D, A, 8, 20, 0x455a14ed);
OP (FG, A, B, C, D, 13, 5, 0xa9e3e905);
OP (FG, D, A, B, C, 2, 9, 0xfcefa3f8);
OP (FG, C, D, A, B, 7, 14, 0x676f02d9);
OP (FG, B, C, D, A, 12, 20, 0x8d2a4c8a);
/* Round 3. */
OP (FH, A, B, C, D, 5, 4, 0xfffa3942);
OP (FH, D, A, B, C, 8, 11, 0x8771f681);
OP (FH, C, D, A, B, 11, 16, 0x6d9d6122);
OP (FH, B, C, D, A, 14, 23, 0xfde5380c);
OP (FH, A, B, C, D, 1, 4, 0xa4beea44);
OP (FH, D, A, B, C, 4, 11, 0x4bdecfa9);
OP (FH, C, D, A, B, 7, 16, 0xf6bb4b60);
OP (FH, B, C, D, A, 10, 23, 0xbebfbc70);
OP (FH, A, B, C, D, 13, 4, 0x289b7ec6);
OP (FH, D, A, B, C, 0, 11, 0xeaa127fa);
OP (FH, C, D, A, B, 3, 16, 0xd4ef3085);
OP (FH, B, C, D, A, 6, 23, 0x04881d05);
OP (FH, A, B, C, D, 9, 4, 0xd9d4d039);
OP (FH, D, A, B, C, 12, 11, 0xe6db99e5);
OP (FH, C, D, A, B, 15, 16, 0x1fa27cf8);
OP (FH, B, C, D, A, 2, 23, 0xc4ac5665);
/* Round 4. */
OP (FI, A, B, C, D, 0, 6, 0xf4292244);
OP (FI, D, A, B, C, 7, 10, 0x432aff97);
OP (FI, C, D, A, B, 14, 15, 0xab9423a7);
OP (FI, B, C, D, A, 5, 21, 0xfc93a039);
OP (FI, A, B, C, D, 12, 6, 0x655b59c3);
OP (FI, D, A, B, C, 3, 10, 0x8f0ccc92);
OP (FI, C, D, A, B, 10, 15, 0xffeff47d);
OP (FI, B, C, D, A, 1, 21, 0x85845dd1);
OP (FI, A, B, C, D, 8, 6, 0x6fa87e4f);
OP (FI, D, A, B, C, 15, 10, 0xfe2ce6e0);
OP (FI, C, D, A, B, 6, 15, 0xa3014314);
OP (FI, B, C, D, A, 13, 21, 0x4e0811a1);
OP (FI, A, B, C, D, 4, 6, 0xf7537e82);
OP (FI, D, A, B, C, 11, 10, 0xbd3af235);
OP (FI, C, D, A, B, 2, 15, 0x2ad7d2bb);
OP (FI, B, C, D, A, 9, 21, 0xeb86d391);
A += A_save;
B += B_save;
C += C_save;
D += D_save;
}
ctx->A = A;
ctx->B = B;
ctx->C = C;
ctx->D = D;
}
__device__ __forceinline__ void md5_init_ctx (struct md5_ctx *ctx){
ctx->A = 0x67452301;
ctx->B = 0xefcdab89;
ctx->C = 0x98badcfe;
ctx->D = 0x10325476;
ctx->total[0] = ctx->total[1] = 0;
ctx->buflen = 0;
}
__device__ __forceinline__ void md5_read_ctx (const struct md5_ctx *ctx, void *resbuf){
((unsigned int *) resbuf)[0] = ctx->A;
((unsigned int *) resbuf)[1] = ctx->B;
((unsigned int *) resbuf)[2] = ctx->C;
((unsigned int *) resbuf)[3] = ctx->D;
}
__device__ __forceinline__ void md5_process_bytes (const void *buffer, size_t len, struct md5_ctx *ctx){
size_t left_over = ctx->buflen;
size_t add = MIN(len, 128 - left_over);
memcpy (&ctx->buffer[left_over], buffer, add);
ctx->buflen += add;
buffer = (const char *) buffer + add;
len -= add;
ctx->buflen += len;
}
__device__ __forceinline__ void md5_finish_ctx (struct md5_ctx *ctx, void *resbuf){
unsigned int bytes = ctx->buflen;
size_t pad;
ctx->total[0] += bytes;
ctx->total[1] += (ctx->total[0] < bytes);
pad = bytes >= 56 ? 64 + 56 - bytes : 56 - bytes;
memcpy (&ctx->buffer[bytes], fillbuf, pad);
ctx->buffer32[(bytes + pad) / 4] = (ctx->total[0] << 3);
ctx->buffer32[(bytes + pad + 4) / 4] = (ctx->total[1] << 3) | (ctx->total[0] >> 29);
md5_process_block (ctx->buffer, bytes + pad + 8, ctx);
md5_read_ctx (ctx, resbuf);
}
__global__ void get_it(char* key, char* salt, char* buffer, int key_len, int salt_len){
unsigned char alt_result[16];
size_t cnt;
char *cp;
struct md5_ctx ctx;
struct md5_ctx alt_ctx;
md5_init_ctx (&ctx);
md5_process_bytes (key, key_len, &ctx);
md5_process_bytes (md5_salt_prefix, sizeof (md5_salt_prefix) - 1, &ctx);
md5_process_bytes (salt, salt_len, &ctx);
md5_init_ctx (&alt_ctx);
md5_process_bytes (key, key_len, &alt_ctx);
md5_process_bytes (salt, salt_len, &alt_ctx);
md5_process_bytes (key, key_len, &alt_ctx);
md5_finish_ctx (&alt_ctx, alt_result);
for (cnt = key_len; cnt > 16; cnt -= 16)
md5_process_bytes (alt_result, 16, &ctx);
md5_process_bytes (alt_result, cnt, &ctx);
*alt_result = 0;
for (cnt = key_len; cnt > 0; cnt >>= 1)
md5_process_bytes ((cnt & 1) != 0 ? (const void *) alt_result : (const void *) key, 1, &ctx);
md5_finish_ctx (&ctx, alt_result);
#pragma unroll
for (cnt = 0; cnt < 1000; ++cnt){
md5_init_ctx (&ctx);
if ((cnt & 1) != 0)
md5_process_bytes (key, key_len, &ctx);
else
md5_process_bytes (alt_result, 16, &ctx);
if (cnt % 3 != 0)
md5_process_bytes (salt, salt_len, &ctx);
if (cnt % 7 != 0)
md5_process_bytes (key, key_len, &ctx);
if ((cnt & 1) != 0)
md5_process_bytes (alt_result, 16, &ctx);
else
md5_process_bytes (key, key_len, &ctx);
md5_finish_ctx (&ctx, alt_result);
}
cp = buffer;
#define b64_from_24bit(b2,b1,b0,N) \
{ \
unsigned int w = (b2 << 16) | (b1 << 8) | b0; \
for(int i=0;i<N;i++){ \
*cp++ = b64t[w & 0x3f]; \
w >>= 6; \
} \
}
b64_from_24bit (alt_result[0], alt_result[6], alt_result[12], 4);
b64_from_24bit (alt_result[1], alt_result[7], alt_result[13], 4);
b64_from_24bit (alt_result[2], alt_result[8], alt_result[14], 4);
b64_from_24bit (alt_result[3], alt_result[9], alt_result[15], 4);
b64_from_24bit (alt_result[4], alt_result[10], alt_result[5], 4);
b64_from_24bit (0, 0, alt_result[11], 2);
*cp = 0;
}
int main(){
char* key;
char* salt;
char* buffer;
cudaMalloc((void**)&salt, 32 * sizeof(char));
cudaMalloc((void**)&key, 32 * sizeof(char));
cudaMemcpy(key,"qwertyui",9 * sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(salt,"8UbX8cck",9 * sizeof(char), cudaMemcpyHostToDevice);
cudaMalloc((void**)&buffer,32 * sizeof(char));
get_it<<<1,1>>>(key,salt,buffer,18,8);
char ans[64];
cudaMemcpy(ans,buffer,32 * sizeof(char),cudaMemcpyDeviceToHost);
printf("%s\n",ans);
return 0;
}
|
9,576 | /*
* file name: histogram.cu
*
* CPE810A: Homework 2: Implement a histogram routine using atomic operations and shared memory in
CUDA.
*
* Yupeng Cao, 10454637
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#define BLOCK_SIZE 256
/*
*********************************************************************
function name: hist_GPU
parameters:
vector: input vector data on device
hist_cpu: save results on device
bin
Size
Note: count histogram by using GPU
*********************************************************************
*/
__global__ void hist_GPU(int* d_vec, int* d_hist, int bin, int Size) {
unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x;
int bin_range = 1024 / bin;
extern __shared__ int histo_s[];
for (unsigned int binIdx = threadIdx.x; binIdx < bin; binIdx += blockDim.x) {
histo_s[binIdx] = 0;
}
__syncthreads();
for (unsigned int i = tid; i < Size; i += blockDim.x * gridDim.x) {
atomicAdd(&(histo_s[d_vec[i] / bin_range]), 1);
}
__syncthreads();
for (unsigned int binIdx = threadIdx.x; binIdx < bin; binIdx += blockDim.x) {
atomicAdd(&(d_hist[binIdx]), histo_s[binIdx]);
}
}
/*
*********************************************************************
function name: hist_CPU
parameters:
vector: input vector data
hist_cpu: save results
bin
Size
Note: count histogram by using CPU
*********************************************************************
*/
void hist_CPU(int* vector, int* hist_cpu, int bin, int Size){
int bin_range = 1024 / bin;
for (int i = 0; i < Size; ++i){
++hist_cpu[vector[i] / bin_range];
}
return;
}
/*
*********************************************************************
function name: check_input
parameters:
binNum: Input Bin
vecNum: Data (vector) Size
Note: if binNum isn't 2^N or vecNum < 0, input is invalid.
*********************************************************************
*/
int check_input(int binNum, int vecNum){
if ((binNum & (binNum - 1)) != 0){
printf("Invalid bin number \n");
printf("bin must be 2^n \n");
return -1;
}
if (vecNum < 0){
printf("Invalid vector size \n");
printf("vector size must be >= 0 \n");
return -1;
}
return 1;
}
/*
*********************************************************************
Main Function
*********************************************************************
*/
int main(int argc, char *argv[])
{
// input parameter and data check
if ( argc != 4 )
{
printf("Error input Parameter \n");
printf("Please input BinNum and VecDim \n");
return 0;
}
if (argc == 4 && (strcmp(argv[1], "-i") == 0)){
printf("Input Data\n");
}else{
printf("Please Follow Format to Run Program: ./execute_file -i binNum vecNum\n");
return -1;
}
int bin = atoi(argv[2]);
int Size = atoi(argv[3]);
if (check_input(bin, Size) == 1){
printf("Input is Valid \n\n");
}else{
return -1;
}
// initialize vector
int *vector;
cudaMallocHost((void **) &vector, sizeof(int)*Size);
srand((unsigned)time(NULL)); // make sure the number in vector >= 0
for (int i = 0; i < Size; ++i){
vector[i] = rand() % 1024;
}
// allocate memory on host for saving results
int* hist_cpu = (int*)calloc(Size, sizeof(int));
int* hist_gpu = (int*)calloc(Size, sizeof(int));
// allocate memory on device
int *d_vec, *d_hist;
cudaMalloc((void **)&d_vec, sizeof(int)*Size);
cudaMalloc((void **)&d_hist, sizeof(int)*bin);
// transfer vector from host to device
cudaMemcpy(d_vec, vector, sizeof(int)*Size, cudaMemcpyHostToDevice);
cudaMemset(d_hist, 0, bin);
// prepare for recording the execution time
float gpu_time_ms, cpu_time_ms;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// count histogram by using GPU
cudaEventRecord(start, 0);
dim3 dimBlock(BLOCK_SIZE);
dim3 dimGrid(128);
hist_GPU<<< dimGrid, dimBlock, sizeof(int)*bin>>>(d_vec, d_hist, bin, Size);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&gpu_time_ms, start, stop);
printf("Counting histogram by using GPU: %f ms.\n", gpu_time_ms);
cudaMemcpy(hist_gpu, d_hist, sizeof(int)*bin, cudaMemcpyDeviceToHost);
// count histogram by using CPU
cudaEventRecord(start, 0);
hist_CPU(vector, hist_cpu, bin, Size);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&cpu_time_ms, start, stop);
printf("Counting histogram by using CPU: %f ms.\n", cpu_time_ms);
// validate results computed by GPU with shared memory
int all_ok = 1;
for (int i = 0; i < bin; ++i)
{
if(hist_gpu[i] != hist_cpu[i])
{
all_ok = 0;
}
}
if (all_ok == 1){
printf("all results are correct!\n");
}else{
printf("Wrong Error!\n");
}
// free memory
cudaFree(d_vec);
cudaFree(d_hist);
cudaFreeHost(vector);
cudaFreeHost(hist_cpu);
cudaFreeHost(hist_gpu);
return 0;
}
|
9,577 | //#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//
//#include <stdio.h>
//#include <iostream>
//#include <time.h>
//
//void initialize_my_data(int * ip, int size)
//{
// time_t t;
// srand((unsigned)time(&t));
//
// for (size_t i = 0; i < size; i++)
// {
// //ip[i] = (float)(rand() & 0xFF) / 10.0f;
// ip[i] = 0;
// }
//}
//
//__global__ void add_1_to_array(int * a, int nx)
//{
// int ix = threadIdx.x + blockIdx.x*blockDim.x;
// int iy = threadIdx.y + blockIdx.y*blockDim.y;
//
// int index = iy * nx + ix;
// a[index] += 1;
//}
//
//void print_array(int * x,int size)
//{
// for (size_t i = 0; i < size; i++)
// {
// printf("%d,",x[i]);
// }
//}
//
//void run_code()
//{
// int element_Count = 32 * 32;
// int nx = 32;
// size_t number_bytes = element_Count * sizeof(float);
//
// int *h_a, *gpu_ref;
// h_a = (int *)malloc(number_bytes);
// gpu_ref = (int *)malloc(number_bytes);
//
// //initialize array with values
// initialize_my_data(h_a, element_Count);
// memset(gpu_ref, 0, number_bytes);
//
// int *d_a;
// cudaMalloc((int **)&d_a, number_bytes);
//
// cudaMemcpy(d_a, h_a, number_bytes, cudaMemcpyHostToDevice);
//
// dim3 block(16, 16);
// dim3 grid(nx / block.x, (element_Count / nx) / block.y);
//
// add_1_to_array << < grid, block >> > (d_a, nx);
//
// //wait computation in device to finish
// cudaDeviceSynchronize();
//
// cudaMemcpy(gpu_ref, d_a, number_bytes, cudaMemcpyDeviceToHost);
// //print_array(gpu_ref,element_Count);
//
// cudaFree(d_a);
//
// free(h_a);
// free(gpu_ref);
//}
//
////int main()
////{
//// run_code();
//// system("pause");
//// return 0;
////} |
9,578 | #include <stdio.h>
#include <float.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <ctime>
#include <cmath>
//#define VERBOSE
//#define DEBUG
/***********************************/
/* COST FUNCTION - CPU & GPU CASES */
/***********************************/
__host__ __device__ double Rosenbrock(double * h_x, int M) {
// --- Rosenbrock function
double sum = 0.f;
for (int i=0; i<M-1; i++) {
double temp1 = (h_x[i] * h_x[i] - h_x[i+1]);
double temp2 = (h_x[i] - 1.f);
sum = sum + 100.f * temp1 * temp1 + temp2 * temp2;
}
return sum;
}
/*******************************/
/* GRADIENT DESCENT - GPU CASE */
/*******************************/
__device__ double F_xn(double * d_x, int i, int dim) {
if (i == 0)
return -400.f * (d_x[1] - d_x[0] * d_x[0]) * d_x[0] + 2.f * (d_x[0] - 1.f);
else if (i == dim-1)
return 200.f * (d_x[dim-1] - d_x[dim-2] * d_x[dim-2]);
else
return -400.f * d_x[i] * (d_x[i+1] - d_x[i] * d_x[i]) + 2.f * (d_x[i] - 1.f) + 200.f * (d_x[i] - d_x[i-1] * d_x[i-1]);
}
__device__ double F_xn_xn(double * d_x, int i, int dim) {
if (i == 0)
return 1200.f * d_x[0] * d_x[0] - 400.f * d_x[1] + 2;
else if (i == dim-1)
return 200;
else
return 200.f + 1200.f * d_x[i] * d_x[i] - 400.f * d_x[i+1] + 2.f;
}
__device__ double F_xn_xn_plus_1(double * d_x, int i) {
return -400.f * d_x[i];
}
__device__ double F_xn_xn_minus_1(double * d_x, int i) {
return -400.f * d_x[i-1];
}
// --- Version using analytical gradient (Rosenbrock function)
__global__ void RosenbrockGradientGPU(double * d_x, double * d_g, double * d_g_norm, int dim, int offset) {
int global_index = blockDim.x*blockIdx.x + threadIdx.x + offset;
if (global_index < dim) {
d_g[global_index] = -F_xn(d_x, global_index, dim);
d_g_norm[global_index] = d_g[global_index]*d_g[global_index];
}
}
__global__ void RosenbrockHessianGPU(double * d_x, double * d_h, int dim, int offset) {
int global_index = blockDim.x*blockIdx.x + threadIdx.x + offset;
if (global_index < dim) {
d_h[global_index*dim + global_index] = F_xn_xn(d_x, global_index, dim);
if (global_index < dim - 1)
d_h[global_index*dim + global_index + 1] = F_xn_xn_plus_1(d_x, global_index);
if (global_index > 0)
d_h[global_index*dim + global_index - 1] = F_xn_xn_minus_1(d_x, global_index);
}
}
__global__ void VectorNormGPU(double * v, double * v_norm, int dim, int offset) {
int global_index = blockDim.x*blockIdx.x + threadIdx.x + offset;
if (global_index < dim)
v_norm[global_index] = v[global_index]*v[global_index];
}
/*******************/
/* STEP - GPU CASE */
/*******************/
void ComputeGradientHessian(double * d_x, double * d_g, double * d_h, double * d_g_norm, int dim, int blocks, int threads) {
double * d_x_cuda = NULL;
double * d_g_cuda = NULL;
double * d_g_norm_cuda = NULL;
double * d_h_cuda = NULL;
cudaSetDevice(0);
cudaMalloc(&d_x_cuda, sizeof(double)*dim);
cudaMalloc(&d_g_cuda, sizeof(double)*dim);
cudaMalloc(&d_g_norm_cuda, sizeof(double)*dim);
cudaMalloc(&d_h_cuda, sizeof(double)*dim*dim);
cudaMemcpy(d_x_cuda, d_x, sizeof(double)*dim, cudaMemcpyHostToDevice);
int offset = 0;
while (offset < dim) {
RosenbrockGradientGPU<<<blocks, threads>>>(d_x_cuda, d_g_cuda, d_g_norm_cuda, dim, offset);
RosenbrockHessianGPU<<<blocks, threads>>>(d_x_cuda, d_h_cuda, dim, offset);
cudaDeviceSynchronize();
offset += blocks*threads;
}
cudaMemcpy(d_g, d_g_cuda, sizeof(double)*dim, cudaMemcpyDeviceToHost);
cudaMemcpy(d_g_norm, d_g_norm_cuda, sizeof(double)*dim, cudaMemcpyDeviceToHost);
cudaMemcpy(d_h, d_h_cuda, sizeof(double)*dim*dim, cudaMemcpyDeviceToHost);
cudaFree(d_x_cuda);
cudaFree(d_g_cuda);
cudaFree(d_g_norm_cuda);
cudaFree(d_h_cuda);
}
void computeNorm(double * v, double * v_norm, int dim, int blocks, int threads) {
double * v_norm_cuda = NULL;
double * v_cuda = NULL;
cudaSetDevice(0);
cudaMalloc(&v_norm_cuda, sizeof(double)*dim);
cudaMalloc(&v_cuda, sizeof(double)*dim);
cudaMemcpy(v_cuda, v, sizeof(double)*dim, cudaMemcpyHostToDevice);
int offset = 0;
while (offset < dim) {
VectorNormGPU<<<blocks, threads>>>(v_cuda, v_norm_cuda, dim, offset);
cudaDeviceSynchronize();
offset += blocks*threads;
}
cudaMemcpy(v_norm, v_norm_cuda, sizeof(double)*dim, cudaMemcpyDeviceToHost);
cudaFree(v_norm_cuda);
cudaFree(v_cuda);
}
double squareRootOfSum(double * v, int dim) {
double sum = 0;
for (int i = 0; i < dim; i++)
sum += v[i];
return sqrt(sum);
}
void vectorAdd(double* A, double* B, double* C, int dim) {
for (int i = 0; i < dim; i++)
C[i] = A[i] + B[i];
}
int transform(int i, int j, int n) {
return i*n+j;
}
void forwardSubstitution(double* B, double* L, double* Y, int n) {
// Declare variables
int i;
Y[0] = B[0];
// Perform forward substitution to find Y.
for (i = 1; i < n; ++i) {
Y[i] = B[i] - L[transform(i, i-1, n)]*Y[i-1];
}
}
void substitution(double* Y, double* D, double* Z, int n) {
// Declare variables
int i;
// Perform substitution to find Z.
for (i = 0; i < n; ++i) {
Z[i] = Y[i] / D[i];
}
}
void backwardSubstitution(double* Z, double* L, double* X, int n) {
// Declare variables
int i;
X[n-1] = Z[n-1];
// Perform backward substitution to find X.
for (i = n - 2; i >= 0; --i) {
X[i] = Z[i] - L[transform(i+1, i, n)]*X[i+1];
}
}
void ldl(double* A, double* L, double* D, int n){
// Declare variables.
int i;
// Perform LDL factorization.
for (i = 1; i < n; ++i) {
D[i-1] = A[transform(i-1, i-1, n)] - L[transform(i-1, i-2, n)]*L[transform(i-1, i-2, n)]*D[i-2];
L[transform(i, i-1, n)] = A[transform(i, i-1, n)]/D[i-1];
}
D[0] = A[transform(0, 0, n)];
D[n-1] = A[transform(n-1, n-1, n)] - L[transform(n-1, n-2, n)]*L[transform(n-1, n-2, n)]*D[n-2];
}
void solve(double* A, double* B, double* X, int n) {
// Create L matrix.
double* L = new double[n*n];
// Create D vector.
double* D = new double[n];
// Create Y vector.
double* Y = new double[n];
// Create Z vector.
double* Z = new double[n];
// Compute LDL factorization of A.
ldl(A, L, D, n);
// Compute Y by forward substitution.
forwardSubstitution(B, L, Y, n);
// Compute Z by substitution.
substitution(Y, D, Z, n);
// Compute X by backward substitution.
backwardSubstitution(Z, L, X, n);
// Free L.
delete[] L;
// Free D.
delete[] D;
// Free Y.
delete[] Y;
// Free Z.
delete[] Z;
}
/****************************************/
/* GRADIENT DESCENT FUNCTION - GPU CASE */
/****************************************/
// x0 - Starting point
// tol - Termination tolerance
// maxiter - Maximum number of allowed iterations
// alpha - Step size
// dxmin - Minimum allowed perturbations
int main(int argc, char** argv)
{
double timeSec;
clock_t begin, end;
int i;
int iter = 0;
int n = atoi(argv[1]);
double tol = atoi(argv[2]);
int blocks = atoi(argv[3]);
int threads = atoi(argv[4]);
double * d_x = new double[n];
double * delta_x = new double[n];
double * d_g = new double[n];
double * d_g_norm = new double[n];
double * delta_x_norm = new double[n];
double * d_h = new double[n*n];
for (i = 0; i < n; i++) {
d_x[i] = i+1;
}
begin = clock();
do {
ComputeGradientHessian(d_x, d_g, d_h, d_g_norm, n, blocks, threads);
solve(d_h, d_g, delta_x, n);
vectorAdd(d_x, delta_x, d_x, n);
computeNorm(delta_x, delta_x_norm, n, blocks, threads);
iter++;
} while (squareRootOfSum(d_g_norm, n) > tol);
end = clock();
timeSec = (end - begin)/static_cast<double>(CLOCKS_PER_SEC);
printf("Blocks %d. Threads %d. Elapsed time is %f seconds\n", blocks, threads, timeSec);
/*
for (i = 0; i < n; i++) {
printf("%4.2f ", d_x[i]);
}
printf("\n\n");
*/
} |
9,579 | #include<cuda.h>
#include<stdio.h>
#include<time.h>
#define SIZE 1000
__global__ void min(int *a,int *c)
{
int i = threadIdx.x;
*c = a[0];
if(a[i] < *c)
{
*c = a[i];
}
}
int main()
{
int i;
srand(time(NULL));
int a[SIZE];
int c;
int *dev_a, *dev_c;
cudaMalloc((void **) &dev_a, SIZE*sizeof(int));
cudaMalloc((void **) &dev_c, SIZE*sizeof(int));
for(i=0;i<SIZE;i++)
{
a[i] = i+8;
}
cudaMemcpy(dev_a,a, SIZE*sizeof(int),cudaMemcpyHostToDevice);
min<<<1,SIZE>>>(dev_a,dev_c);
cudaMemcpy(&c,dev_c,SIZE*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nmin = %d ",c);
cudaFree(dev_a);
cudaFree(dev_c);;
return 0;
}
|
9,580 | #include<stdlib.h>
#include<stdio.h>
#include<string.h>
#include<math.h>
#include<cuda.h>
#include<time.h>
struct node
{
int start;
int num_edges;
};
__global__
void Kernel(node *g_node,int *g_edges,bool *g_frontier,bool *g_visited,int *g_cost,bool *g_over,int num_nodes)
{
int tid=blockIdx.x*128+threadIdx.x;
if(tid<num_nodes&&g_frontier[tid])
{
g_frontier[tid]=false;
g_visited[tid]=true;
for(int i=g_node[tid].start;i<g_node[tid].start+g_node[tid].num_edges;i++)
{
int id=g_edges[i];
if(!g_visited[id])
{
g_cost[id]=g_cost[tid]+1;
g_frontier[id]=true;
*g_over=true;
}
}
}
}
void bfs_graph(int num_vertex,int num_edges_list)
{
int i,k,vertex,edges,id,cost,source;
scanf("%d",&num_vertex);
node *h_node=(node*)malloc(sizeof(node)*num_vertex);
bool *h_frontier=(bool*)malloc(sizeof(bool)*num_vertex);
bool *h_visited=(bool*)malloc(sizeof(bool)*num_vertex);
int *h_cost = (int*)malloc( sizeof(int)*num_vertex);
for(i=0;i<num_vertex;i++)
{
scanf("%d %d",&vertex,&edges);
h_node[i].start=vertex;
h_node[i].num_edges=edges;
h_frontier[i]=false;
h_visited[i]=false;
h_cost[i]=-1;
}
scanf("%d",&source);
h_frontier[source]=true;
h_cost[source]=0;
scanf("%d",&num_edges_list);
int* h_edges=(int*)malloc(sizeof(int)*num_edges_list);
for(i=0;i<num_edges_list;i++)
{
scanf("%d %d",&id,&cost);
h_edges[i]=id;
}
clock_t begin,end;
double time;
begin=clock();
node *d_node;
cudaMalloc((void**)&d_node,sizeof(node)*num_vertex);
cudaMemcpy(d_node,h_node,sizeof(node)*num_vertex,cudaMemcpyHostToDevice);
int *d_edges,*d_cost;
cudaMalloc((void**)&d_edges,sizeof(int)*num_edges_list);
cudaMemcpy(d_edges,h_edges,sizeof(int)*num_edges_list,cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_cost,sizeof(int)*num_vertex);
cudaMemcpy(d_cost,h_cost,sizeof(int)*num_vertex,cudaMemcpyHostToDevice);
bool *d_frontier,*d_visited;
cudaMalloc((void**)&d_frontier,sizeof(bool)*num_vertex);
cudaMemcpy(d_frontier,h_frontier,sizeof(bool)*num_vertex,cudaMemcpyHostToDevice);
cudaMalloc((void**)&d_visited,sizeof(bool)*num_vertex);
cudaMemcpy(d_visited,h_visited,sizeof(bool)*num_vertex,cudaMemcpyHostToDevice);
bool *d_over;
cudaMalloc((void**)&d_over,sizeof(bool));
dim3 grid(128,1,1);
dim3 threads(128,1,1);
bool stop;
do
{
stop=false;
cudaMemcpy(d_over,&stop,sizeof(bool),cudaMemcpyHostToDevice);
Kernel<<<grid,threads,0>>>(d_node,d_edges,d_frontier,d_visited,d_cost,d_over,num_vertex);
cudaThreadSynchronize();
cudaMemcpy(&stop,d_over,sizeof(bool),cudaMemcpyDeviceToHost);
k++;
} while(stop);
cudaMemcpy(h_cost,d_cost,sizeof(int)*num_vertex,cudaMemcpyDeviceToHost);
for(i=0;i<num_vertex;i++)
printf("%d cost:%d\n",i,h_cost[i]);
end=clock();
time=(double)(end-begin)/CLOCKS_PER_SEC;
printf("\n%f",time);
}
int main()
{
int num_vertex=0;
int num_edges_list=0;
bfs_graph(num_vertex,num_edges_list);
return 0;
} |
9,581 | #include <cuda_runtime.h>
#define UINT64 long long
__global__ void
sumKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( a[idx] + b[idx] ) % m[idx];
}
__global__ void
diffKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( m[idx] + a[idx] - b[idx] ) % m[idx];
}
__global__ void
mulKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
c[idx] = ( a[idx] * b[idx] ) % m[idx];
}
__global__ void
divKernel ( UINT64 * a, UINT64 * b, UINT64 * m, UINT64 * c )
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
for ( int i = 0; i < m[ index ]; i++ )
if ( ( i*b[ index ] + m[ index ] ) % m[ index ] == ( a[ index ] % m[ index ] ) )
{
c[ index ] = i;
break;
}
}
void doOperation( UINT64 * aDev, UINT64 * bDev, UINT64 * mDev, UINT64 * cDev, int operationType, const dim3 & threads, const dim3 & blocks )
{
if ( operationType == 1 )
sumKernel<<<blocks, threads>>> (aDev, bDev, mDev, cDev);
else if ( operationType == 2 )
diffKernel<<<blocks, threads>>> (aDev, bDev, mDev, cDev);
else if ( operationType == 3 )
mulKernel<<<blocks, threads>>> (aDev, bDev, mDev, cDev);
else if ( operationType == 4 )
mulKernel<<<blocks, threads>>> (aDev, bDev, mDev, cDev);
}
|
9,582 | #include "includes.h"
__global__ void g_countCellOcc(uint *_hash, uint *_cellOcc, uint _pixCount, uint _hashCellCount)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < _pixCount && _hash[idx] < _hashCellCount)
atomicAdd(&(_cellOcc[_hash[idx]]), 1);
} |
9,583 |
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
//#include"formats.h"
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err ){
fprintf(stderr, "ERROR[CUDA]:%s{%s}.\n", msg, cudaGetErrorString( err ) );
exit(EXIT_FAILURE);
}
}
//Copy RGB data from shared memory region..
inline void copy_shmrgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int rgbleft,int rgbtop,
int rgbwidth,int rgbheight,
int width,int height)
{
int offset=(rgbtop*width)<<2;
int offset_left=rgbleft<<2;
int line_siz=width<<2;
int h=0;
for(h=rgbtop;h<rgbheight+rgbtop;h++){
cudaMemcpy(devmem+offset+offset_left,rgbs+offset+offset_left,rgbwidth<<2,cudaMemcpyHostToDevice);
offset+=line_siz;
}
}
//for TEST ONLY,
inline void copy_caprgb_to_device(unsigned char*rgbs,
unsigned char*devmem,//already allocated throuth cuMemAlloc()
int patch_left,int patch_top,
int patch_width,int patch_height,
int width,int height)
{
int rgb_offset=0;
int offset=(patch_top*width)<<2;
int offset_left=patch_left<<2;
int line_siz=width<<2;
int h;
for(h=0;h<patch_height;h++){
cudaMemcpy(devmem+offset+offset_left,rgbs+rgb_offset,patch_width<<2,cudaMemcpyHostToDevice);
offset+=line_siz;
rgb_offset+=(patch_width<<2);
}
}
__global__ void
convert_line_rgb_to_nv12(unsigned char*devrgb,int rgbstride,/*device mem*/
unsigned char*oyuv,int ostride,int ovstride,/*device mem*/
int width,int left,int top)
{
int curline=threadIdx.x;
unsigned char*rgb_p=devrgb+(curline+top)*rgbstride*4;
unsigned char*luma_p=oyuv+(curline+top)*ostride;
unsigned char*chroma_p=oyuv+(ovstride*ostride)+((curline+top)>>1)*ostride;
int r,g,b;
int y,u,v;
int j;
if(curline%2==0){
//even line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
}
}
}else{
//odd line
for(j=left;j<width+left;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
}
}
}
}
//FIXME
__global__ void
convert_line_yv12_to_nv12(unsigned char*pdev,int istride,
unsigned char*oyuv,int ostride,
int width,int height)
{
int curline=threadIdx.x;
int yv12_luma_siz = istride*height;
int yv12_chroma_siz = yv12_luma_siz>>2;
int curpos=curline*istride;
unsigned char*yv12_luma_p=pdev+curpos;
unsigned char*yv12_v_p=pdev+yv12_luma_siz+(curpos>>1);
unsigned char*yv12_u_p=pdev+yv12_luma_siz+yv12_chroma_siz+(curpos>>1);
curpos=curline*ostride;
unsigned char*nv12_luma_p=oyuv+curpos;
unsigned char*nv12_chroma_p=oyuv+(height*ostride)+(curpos>>1);
char val;
int j;
for(j=0;j<width;j++){
val=*(yv12_luma_p+j);
*(nv12_luma_p+j)=val;
val=*(yv12_u_p+j);
*(nv12_chroma_p)=val;
val=*(yv12_v_p+j);
*(nv12_chroma_p+1)=val;
}
}
extern "C" void load_rgb_bgrx_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devrgb,/*device */
unsigned char*rgb, /*input data host*/
int left,int top,int width,int height,//rgb patch rect
int rgbwidth,int rgbheight,//rgb data size
int ostride //yuv data height<pixel>
)
{
//Copy date from shared Memory to Device;
#if 1
// Read rects from shm region.
copy_shmrgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#else
//for TEST :read rects from capture file.
copy_caprgb_to_device((unsigned char*)rgb,
(unsigned char*)devrgb,//already allocated throuth cuMemAlloc()
left,top,
width,height,
rgbwidth,rgbheight);
#endif
int ovstride=rgbheight;
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
convert_line_rgb_to_nv12<<<1,height>>>(devrgb,rgbwidth,
oyuv,ostride,ovstride,
width,left,top);
cudaThreadSynchronize();
checkCUDAError("Convert BGRA to NV12\n");
}
extern "C" void load_yuv_yv12_cuda(
unsigned char* oyuv,/*device*/
unsigned char* devyv12,/*device */
unsigned char*iyuv, /*input data host*/
int width,int height,/*real size*/
int istride,int ostride
)
{
// Load yv12 to device buffer
//TODO
int in_luma_siz=istride*height;
int out_luma_siz=ostride*height;
int in_chroma_siz=in_luma_siz>>2;
int out_chroma_siz=out_luma_siz>>2;
unsigned char*in_luma_p=iyuv;
unsigned char*out_luma_p=devyv12;
unsigned char*in_v_p=iyuv+in_luma_siz;
unsigned char*out_v_p=devyv12+out_luma_siz;
unsigned char*in_u_p=iyuv+in_luma_siz+in_chroma_siz;
unsigned char*out_u_p=devyv12+out_luma_siz+out_chroma_siz;
int j;
for(j=0;j<height;j++){
//y
memcpy(out_luma_p+j*ostride,in_luma_p+j*istride,width);
}
for(j=0;j<(height>>1);j++){
//v
memcpy(out_v_p+((j*ostride)>>1),in_v_p+((j*istride)>>1),width>>1);
//u
memcpy(out_u_p+((j*ostride)>>1),in_u_p+((j*istride)>>1),width>>1);
}
// fprintf(stderr,"rgbwidth:%d ostride:%d ovstride:%d, width:%d, left:%d, top:%d\n",rgbwidth,ostride,ovstride,width,left,top);
convert_line_yv12_to_nv12<<<1,height>>>(devyv12,istride,
oyuv,ostride,
width,height);
cudaThreadSynchronize();
checkCUDAError("Convert YV12 to NV12\n");
}
/***************************************************/
/***************************************************/
/***************************************************/
/***************************************************/
extern"C"{
inline void rgb2yuv_pixel(
unsigned char r,
unsigned char g,
unsigned char b,
unsigned char*y,
unsigned char*u,
unsigned char*v
){
#if 0
//YCbCr
*y=(0.257*r)+(0.504*g)+(0.098*b)+16;
*u=-(0.148 * r) - (0.291 * g) + (0.439 * b) + 128;
*v=(0.439*r)-(0.368*g)+(0.071*b)+128;
#else
//YUV Intel IPP的BT.709
*y= 0.299*r + 0.587*g + 0.114*b;
*u= -0.169*r - 0.331*g + 0.5*b+128;
*v= 0.5*r - 0.419*g - 0.081*b+128;
#endif
}
/*For Test*/
void load_rgb_bgrx_(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
{
//assert left top width height are even;
//
int luma_off=ostride*rgbheight;
unsigned char*luma_p;
unsigned char*chroma_p;
unsigned char*rgb_p;
int r,g,b;
int y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d, ww:%d, hh:%d }\n",left,top,width,height,stride,vstride);
int i,j;
for(i=top;i<height+top;i++){
//rows
rgb_p=rgb+width*(i-top)*4;
luma_p=yuv+ostride*i;
chroma_p=yuv+luma_off+ostride*(i/2);
for(j=left;j<width+left;j++){
#if 1
b=*(rgb_p+(j-left)*4);
g=*(rgb_p+(j-left)*4+1);
r=*(rgb_p+(j-left)*4+2);
#else
#endif
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
if(i%2==0 && j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
}
if(i%2==1 && j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
}
}
}
}
void load_rgb_bgrx_2(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
{
//assert left top width height are even;
//
int luma_off=ostride*rgbheight;
unsigned char*luma_p0,*luma_p1;
unsigned char*chroma_p;
unsigned char*rgb_p0,*rgb_p1;
int au;//(u1+u2+u3+u4)/4
int av;//
unsigned char r,g,b;
unsigned char y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d, ww:%d, hh:%d }\n",left,top,width,height,stride,vstride);
int i,j;
for(i=top;i<height+top;i+=2){
//rows
rgb_p0=rgb+width*(i-top)*4;
rgb_p1=rgb+width*(i-top+1)*4;
luma_p0=yuv+ostride*i;
luma_p1=yuv+ostride*(i+1);
chroma_p=yuv+luma_off+ostride*(i/2);
for(j=left;j<width+left;j++){
b=*(rgb_p0+(j-left)*4);
g=*(rgb_p0+(j-left)*4+1);
r=*(rgb_p0+(j-left)*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p0+j)=(char)y&0xff;
au+=u;
av+=v;
///////////
b=*(rgb_p1+(j-left)*4);
g=*(rgb_p1+(j-left)*4+1);
r=*(rgb_p1+(j-left)*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p1+j)=(char)y&0xff;
au+=u;
av+=v;
if(j%2==0){
*(chroma_p+j)=(au>>2)&0xff;
*(chroma_p+j+1)=(av>>2)&0xff;
av=au=0;
}
}
}
}
/*
void load_rgb_bgrx(unsigned char*yuv,unsigned char*rgb,
int left,int top,int width,int height,//patch rectangle
int rgbheight,
int ostride)
*/
void load_rgb_bgrx(
unsigned char*bgrx,
unsigned char*nv12,
int pleft,int ptop,int pwidth,int pheight,//rgb patch rect
int width,int height,//rgb data size
int sstride,
int dstride //yuv data stride<pixel>
)
{
//assert left top width height are even;
//
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
int luma_off=dstride*height;
unsigned char*luma_p0,*luma_p1;
unsigned char*chroma_p;
unsigned char*rgb_p0,*rgb_p1;
int au;//(u1+u2+u3+u4)/4
int av;//
unsigned char r,g,b;
unsigned char y,u,v;
// fprintf(stderr,"LOAD {x:%d, y:%d, w:%d, h:%d, ww:%d, hh:%d }\n",left,top,width,height,stride,vstride);
int i,j;
for(i=ptop;i<pheight+ptop;i+=2){
//rows
rgb_p0=bgrx+sstride*(i)*4;
rgb_p1=bgrx+sstride*(i+1)*4;
luma_p0=nv12+dstride*i;
luma_p1=nv12+dstride*(i+1);
chroma_p=nv12+luma_off+dstride*(i/2);
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p0+j*4);
g=*(rgb_p0+j*4+1);
r=*(rgb_p0+j*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p0+j)=(char)y&0xff;
au=u;
// av=v;
///////////
b=*(rgb_p1+j*4);
g=*(rgb_p1+j*4+1);
r=*(rgb_p1+j*4+2);
rgb2yuv_pixel(r,g,b,&y,&u,&v);
*(luma_p1+j)=(char)y&0xff;
// au+=u;
av=v;
if(j%2==0){
*(chroma_p+j)=au&0xff;
*(chroma_p+j+1)=av&0xff;
// av=au=0;
}
}
}
}
#if 0
void load_rgb_bgrx__(
unsigned char*bgrx,
unsigned char*nv12,
int pleft,int ptop,int pwidth,int pheight,//rgb patch rect
int width,int height,//rgb data size
int sstride,
int dstride //yuv data stride<pixel>
)
{
unsigned char*luma_p=nv12;
unsigned char*chroma_p;
unsigned char*rgb_p=bgrx;
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
chroma_p=luma_p+dstride*height;
unsigned char b,g,r;
unsigned char y,u,v;
int i,j;
for(i=ptop;i<pheight;i+=2){//vertical
//==============
rgb_p=bgrx+i*sstride*4;
luma_p=nv12+dstride*i;
chroma_p=nv12+dstride+height+dstride*(i/2);
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
// if(j%2==0){
u= -0.169*r - 0.331*g + 0.5*b+128;
*(chroma_p+j)=(char)u&0xff;
// }
}
//odd line
rgb_p+=sstride*4;
luma_p+=dstride;
for(j=pleft;j<pwidth+pleft;j++){
b=*(rgb_p+j*4);
g=*(rgb_p+j*4+1);
r=*(rgb_p+j*4+2);
y= 0.299*r + 0.587*g + 0.114*b;
*(luma_p+j)=(char)y&0xff;
// if(j%2==0){
v= 0.5*r - 0.419*g - 0.081*b+128;
*(chroma_p+j+1)=(char)v&0xff;
// }
}
// }
}
}
#endif
void load_yuv_yv12(unsigned char*yv12,unsigned char*nv12,int width,int height,int sstride,int dstride)
{
unsigned char*nv12_luma=nv12;
unsigned char*nv12_chroma;
unsigned char*yv12_luma=yv12;
unsigned char*yv12_v;
unsigned char*yv12_u;
if (sstride == 0)
sstride = width;
if (dstride == 0)
dstride = width;
nv12_chroma=nv12_luma+dstride*height;
yv12_v=yv12_luma+sstride*height;
yv12_u=yv12_v+sstride*height/4;
int y;
int x;
for (y = 0 ; y < height ; y++){
memcpy(nv12_luma + (dstride*y), yv12_luma + (sstride*y) , width);
}
for (y = 0 ; y < height/2 ; y++){
for (x= 0 ; x < width; x=x+2){
nv12_chroma[(y*dstride) + x] = yv12_v[((sstride/2)*y) + (x >>1)];
nv12_chroma[(y*dstride) +(x+1)] = yv12_u[((sstride/2)*y) + (x >>1)];
}
}
}
void load_yuv_nv12(unsigned char*inyuv, unsigned char*outyuv,int width,int height,int istride,int ostride)
{
if(istride==0)
istride=width;
if(ostride==0)
ostride=width;
unsigned char*inyuv_chroma=inyuv+width*istride;
unsigned char*outyuv_chroma=outyuv+width*ostride;
int y;
for(y=0;y<height;y++){
memcpy(outyuv+y*ostride,inyuv+y*istride,width);
}
for(y=0;y<height/2;y++){
memcpy(outyuv_chroma+y*ostride/2,inyuv_chroma+y*istride/2,width/2);
}
}
}//extern "C"
|
9,584 | // compute.cu
// Collin Beaudoin November 2020
// driver and kernel call
#include <stdio.h>
/*********************************************************
This section is used to declare global variables
*********************************************************/
#define THREADS_PER_BLOCK 128
/*********************************************************
This is the kernel function of the code. This is where
the GPU will be calculating the heat diffusion of the rod
@parameter c_d: This is the heat diffusion array
@parameter arrSize: This is the size of the array
@parameter timeStep: This is the current time step
@parameter timeSteps: This is the amount of steps to
calculate
*********************************************************/
__global__ void compute_d (float *c_d, int arrSize, int timeStep, int timeSteps)
{
//DECLARE VARS
int x = blockIdx.x * blockDim.x + threadIdx.x;
//CHECK THAT POSITION EXISTS
if (x <= arrSize) {
//CHECK IF THIS VALUE SHOULD BE CALCULATED CURRENTLY
if (x % 2 == timeStep % 2 && x <= timeStep)
{
//SKIP IF THE CALCULATIONS ARE DONE FOR THIS SECTION
if (timeStep > timeSteps && x <= (timeStep - timeSteps - arrSize))
{
} else
{
//CALCULATE HEAT DIFFUSION BASED ON POSITION
if (x == 0)
{
c_d[x] = (100.0 + c_d[x + 1]) / 2.0;
} else if (x == arrSize - 1)
{
c_d[x] = (c_d[x - 1] + c_d[x]) / 2.0;
} else
{
c_d[x] = (c_d[x - 1] + c_d[x + 1]) / 2.0;
}
}
}
__syncthreads();
}
}
/*********************************************************
This is the CUDA declaration of the GPU program. Here it
will set up the required memory for the GPU calculations
to run.
@parameter metalRod: This is rod to compute
@parameter arrSize: This is the size of the rod
@parameter timeSteps: This is the amount of steps to
calculate
*********************************************************/
extern "C" void computeArr (float *metalRod, int arrSize, int timeSteps)
{
//DECLARE VARS
float *c_d;
int i = 0;
//ALLOCATE MEMORY
cudaMalloc ((void**) &c_d, sizeof(float) * arrSize);
cudaMemcpy (c_d, metalRod, sizeof(float) * arrSize, cudaMemcpyHostToDevice);
//RUN CALCULATIONS FOR REQUIRED AMOUNT OF STEPS
for (i = 0; i < (2*(timeSteps - 1)) + arrSize; i++)
{
compute_d <<< ceil((float) arrSize/THREADS_PER_BLOCK), THREADS_PER_BLOCK >>> (c_d, arrSize, i, timeSteps);
}
//CHECK FOR ERRORS
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf ("CUDA error: %s\n", cudaGetErrorString(err));
//RETURN ARRAY TO DEVICE
cudaMemcpy (metalRod, c_d, sizeof(float) * arrSize, cudaMemcpyDeviceToHost);
cudaFree (c_d);
}
|
9,585 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <iostream>
#include <algorithm>
int main()
{
//refer to other branches for code samples.
}
|
9,586 | #include <stdio.h>
__global__ void checkPrime(int * d_in)
{
//get thread id
int id = blockIdx.x + threadIdx.x;
//num is now in local memory, much quicker
int num = d_in[id];
//couple of corner cases
if(num == 0 || num == 1)
{
d_in[id] = 0;
return;
}
//assume prime until proven otherwise
d_in[id] = 1;
//quicker to check 2 on its own
//then we can count up in 2s (only need to check odd numbers) starting from 3
if(num % 2 == 0)
{
d_in[id] = 0;
}
else
{
//only need to check upto ceil of sqrt(num)
//better to start from 3 and count up rather than down
//do sqrt here not in loop to stop it being evaluated each time round
int sqrtNum = (int)sqrt((float)num);
for(int i = 3; i < sqrtNum + 1; i += 2)
{
if(num % i == 0)
{
d_in[id] = 0;
return;
}
}
}
}
int main(int argc, char ** argv)
{
//anything over 1000000 crashes it, not sure why
//possibly to do with my vram
const int ARRAY_SIZE = 100000;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(int);
//generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++)
{
h_in[i] = i;
}
//declare GPU memory pointers
int * d_in;
//allocate GPU memory
cudaMalloc((void**) &d_in, ARRAY_BYTES);
//transfer the array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
//launch the kernel
//not sure what the best ratio of blocks to threads is
checkPrime<<<ARRAY_SIZE/100, 100>>>(d_in);
//copy back the result array to the CPU
cudaMemcpy(h_in, d_in, ARRAY_BYTES, cudaMemcpyDeviceToHost);
//print out the resulting array of primes
for (int i = 0; i < ARRAY_SIZE; i++)
{
if(h_in[i])
printf("%d\n", i);
}
cudaFree(d_in);
return 0;
} |
9,587 | #include <stdio.h>
#include <math.h>
#include <sys/time.h>
// get_walltime function for time measurement
double get_walltime_(double* wcTime) {
struct timeval tp;
gettimeofday(&tp, NULL);
*wcTime = (double)(tp.tv_sec + tp.tv_usec/1000000.0);
return 0.0;
}
void get_walltime(double* wcTime) {
get_walltime_(wcTime);
}
typedef struct {
int width;
int height;
float* elements;
} Matrix;
// Thread block size
#define BLOCK_SIZE 8
// Forward declaration of the matrix multiplication kernel
__global__
void MatMulKernel(const Matrix, const Matrix, Matrix);
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
void MatMul(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.width = A.width; d_A.height = A.height;
size_t size = A.width * A.height * sizeof(float);
cudaMalloc(&d_A.elements, size);
cudaMemcpy(d_A.elements, A.elements, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.width = B.width; d_B.height = B.height;
size = B.width * B.height * sizeof(float);
cudaMalloc(&d_B.elements, size);
cudaMemcpy(d_B.elements, B.elements, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.width = C.width; d_C.height = C.height;
size = C.width * C.height * sizeof(float);
cudaMalloc(&d_C.elements, size);
// Invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.width / dimBlock.x, A.height / dimBlock.y);
MatMulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
// Read C from device memory
cudaMemcpy(C.elements, d_C.elements, size,
cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// Matrix multiplication kernel called by MatMul()
__global__ void MatMulKernel(Matrix A, Matrix B, Matrix C)
{
// Each thread computes one element of C
// by accumulating results into Cvalue
float Cvalue = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.width; ++e)
Cvalue += A.elements[row * A.width + e]
* B.elements[e * B.width + col];
C.elements[row * C.width + col] = Cvalue;
}
// main function allocating the matrices and calling the multiplication function
int main(int argc, char** argv)
{
int m=1000, n=1000;
int row, col;
float sum = 0;
float tmp;
double delta, finish, start;
double flops, nd;
Matrix A, B, C;
A.width = m; A.height = n;
B.width = m; B.height = n;
C.width = m; C.height = n;
A.elements = (float*) calloc ((A.width)*(A.height), sizeof(float));
B.elements = (float*) calloc ((B.width)*(B.height), sizeof(float));
C.elements = (float*) calloc ((C.width)*(C.height), sizeof(float));
col = 0;
for (row=0; row<A.width; row++) {
//for (col=0; col<A.height; col++) {
A.elements[row*A.width+col] = 1;
B.elements[row*A.width+col] = 2;
col++;
//}
}
// start time measurement
get_walltime(&start);
MatMul(A, B, C);
// stop time measurement
get_walltime(&finish);
for (row=0; row<C.width; row++) {
tmp = 0;
for (col=0; col<C.height; col++) {
tmp += C.elements[row*C.width+col];
}
sum += tmp;
}
printf("Sum of all elements of C is: %f\n", sum);
// calculating time delta and Mflops
delta = (finish - start);
nd = (double) n;
flops = (2.*nd*nd*nd-nd*nd)/delta/1000000. ;
printf(">>>>> finish: %f\n", finish);
printf(">>>>> delta: %f\n", delta );
printf(">>>>> Mflops: %f\n", flops );
free(A.elements);
free(B.elements);
free(C.elements);
return 0;
}
|
9,588 | #include <stdio.h>
#define N 10
__global__ void sum(int *a,
int *b, int *c)
{
int i = blockIdx.x;
c[i] = a[i] + b[i];
}
int main( void ) {
int host_a[N];
int host_b[N];
int host_c[N];
for (int i=0; i<N; i++) {
host_a[i] = i;
host_b[i] = i;
}
int *dev_a, *dev_b, *dev_c;
cudaMalloc(&dev_a, sizeof(int) * N);
cudaMalloc(&dev_b, sizeof(int) * N);
cudaMalloc(&dev_c, sizeof(int) * N);
cudaMemcpy(dev_a, host_a, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, sizeof(int) * N,
cudaMemcpyHostToDevice);
sum<<<N, 1>>>(dev_a, dev_b, dev_c);
cudaMemcpy(host_c, dev_c, sizeof(int) * N, cudaMemcpyDeviceToHost);
for (int i=0; i<N; i++) {
printf("%d ", host_c[i]);
}
printf("\n");
} |
9,589 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
void CPU_Matrix_Add(const int* A,const int* B, int* C, const int& size){
for(int i=0;i<size;i++)
C[i]=A[i]+B[i];
}
void CPU_Matrix_Minus(const int* A, const int* B, int* C, const int& size){
for(int i=0;i<size;i++)
C[i]=A[i]-B[i];
}
void CPU_Matrix_Multiply(const int* A,const int* B,int* C, const int& size){
for(int i=0;i<size;i++){
//int tmp=0;
for(int j=0;j<size;j++){
for(int k=0;k<size;k++)
C[i*size+j]+=A[i*size+k]*B[k*size+j];
}
}
}
__global__ void GPU_Matrix_Add(const int* A,const int* B, int* C, const int& size){
int id=threadIdx.x;
C[id]=A[id]+B[id];
}
__global__ void GPU_Matrix_Minus(const int* A, const int* B, int* C, const int& size){
int id=threadIdx.x;
C[id]=A[id]-B[id];
}
__global__ void GPU_Matrix_Multiply(const int* A,const int* B,int* C, const int& size){
int id=threadIdx.x;
int i=id/size;
int j=id%size;
for(int k=0;k<size;k++)
C[id]=A[i*size+k]*B[k*size+j];
}
void print(const int* m,const int& size){
for(int i=0;i<size;i++){
for(int j=0;j<size;j++)
printf("%d ",m[i*size+j]);
printf("\n");
}
printf("\n");
}
int main()
{
//const int arraySize = 5;
//const int a[arraySize] = { 1, 2, 3, 4, 5 };
//const int b[arraySize] = { 10, 20, 30, 40, 50 };
//int c[arraySize] = { 0 };
const int size=5;
const int matrixSize=size*size;
const int a[matrixSize]={ 1,0,0,0,0,
0,1,0,0,0,
0,0,1,0,0,
0,0,0,1,0,
0,0,0,0,1};
const int b[matrixSize]={ 1,2,3,4,5,
0,1,0,0,0,
0,0,1,0,0,
0,0,0,1,0,
0,0,0,0,1};
int c[matrixSize]={0};
int d[matrixSize]={0};
int *ad=0,*bd=0,*cd=0,*dd=0;
cudaError_t cudaStatus;
cudaStatus=cudaSetDevice(0);
//cudaStatus = cudaDeviceReset();
if(!cudaStatus) printf("-1");
cudaStatus=cudaMalloc((void**)&ad,matrixSize*sizeof(int));
cudaStatus=cudaMalloc((void**)&bd,matrixSize*sizeof(int));
cudaStatus=cudaMalloc((void**)&cd,matrixSize*sizeof(int));
cudaStatus=cudaMalloc((void**)&dd,matrixSize*sizeof(int));
if(!cudaStatus) printf("-2");
cudaStatus=cudaMemcpy(ad,a,matrixSize*sizeof(int),cudaMemcpyHostToDevice);
cudaStatus=cudaMemcpy(bd,b,matrixSize*sizeof(int),cudaMemcpyHostToDevice);
cudaStatus=cudaMemcpy(cd,c,matrixSize*sizeof(int),cudaMemcpyHostToDevice);
cudaStatus=cudaMemcpy(dd,d,matrixSize*sizeof(int),cudaMemcpyHostToDevice);
if(!cudaStatus) printf("-3");
CPU_Matrix_Add(a,b,c,matrixSize);
print(c,size);
CPU_Matrix_Minus(a,b,c,matrixSize);
print(c,size);
CPU_Matrix_Multiply(a,b,d,size);
print(d,size);
GPU_Matrix_Add<<<1,matrixSize>>>(ad,bd,cd,matrixSize);
cudaMemcpy(c,cd,matrixSize*sizeof(int),cudaMemcpyDeviceToHost);
print(c,size);
GPU_Matrix_Minus<<<1,matrixSize>>>(ad,bd,cd,matrixSize);
cudaMemcpy(c,cd,matrixSize*sizeof(int),cudaMemcpyDeviceToHost);
print(c,size);
GPU_Matrix_Multiply<<<1,matrixSize>>>(ad,bd,dd,size);
cudaMemcpy(dd,d,matrixSize*sizeof(int),cudaMemcpyDeviceToHost);
print(d,size);
cudaFree(ad);
cudaFree(bd);
cudaFree(cd);
cudaFree(dd);
cudaStatus = cudaDeviceReset();
// Add vectors in parallel.
//cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "addWithCuda failed!");
// return 1;
//}
//printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
// c[0], c[1], c[2], c[3], c[4]);
//// cudaDeviceReset must be called before exiting in order for profiling and
//// tracing tools such as Nsight and Visual Profiler to show complete traces.
//cudaStatus = cudaDeviceReset();
//if (cudaStatus != cudaSuccess) {
// fprintf(stderr, "cudaDeviceReset failed!");
// return 1;
//}
return 0;
}
|
9,590 | #include "includes.h"
__global__ void conv_vertical_naive_output(const int n, float *y, const float *x, const float *w, const int iH, const int iW, const int kL)
{
for (int i = blockIdx.x*blockDim.x+threadIdx.x; i < n; i += blockDim.x*gridDim.x) {
int oH = iH - kL + 1;
int x_offset = (i/(oH*iW))*iH*iW + i%(oH*iW);
int w_offset = (i/(oH*iW))*kL;
for (int k = 0; k < kL; k++) {
y[i] += w[w_offset + k]*x[x_offset + k*iW];
}
}
} |
9,591 | #include "includes.h"
// Device code for ICP computation
// Currently working only on performing rotation and translation using cuda
#ifndef _ICP_KERNEL_H_
#define _ICP_KERNEL_H_
#define TILE_WIDTH 256
#endif // #ifndef _ICP_KERNEL_H_
__global__ void CalculateDistanceAllPoints(double * data_x_d, double * data_y_d, double * data_z_d, double * transformed_data_x_d, double * transformed_data_y_d, double * transformed_data_z_d, int * index_d, double * distance_d, int size_data)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < size_data)
{
int index = index_d[i];
distance_d[i] = sqrt(pow(data_x_d[index] - transformed_data_x_d[i],2) + pow(data_y_d[index] - transformed_data_y_d[i],2) + pow(data_z_d[index] - transformed_data_z_d[i],2));
}
} |
9,592 | #include "includes.h"
using namespace std;
__global__ void variance(int* n, double *x, double *mean)
{
int index = threadIdx.x;
int stride = blockDim.x;
for (int i = index; i < *n; i+= stride) {
x[i] = (x[i] - *mean) ;
x[i] = x[i] * x[i];
}
} |
9,593 | // Device code
///////////////////////////////////////////////////////////////////////////////
//! Kernel to modify vertex positions
//! @param data data in global memory
///////////////////////////////////////////////////////////////////////////////
__global__ void kernel(float4 *agent, float4 *ids, int *d_world, int world_width, int world_height, int agent_width, int world_height_node, int pid)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y*blockDim.y + threadIdx.y;
float PI = 3.14159265358979323846;
int rebote = 0;
int dist_prev = 1;
int limit_width = world_width/6;
int limit_height = world_height/6;
//Update status of agents who change of node (status 2,3) in the last iteration
if(ids[y*agent_width+x].y != 1 )
ids[y*agent_width+x].y = -1;
ids[y*agent_width+x].w += 1;
//HPV movimiento de los agentes
// 1ra opcion seguir en la misma direccion, se revisa el mundo para ver si esta disponible las sig coordenadas
// en caso contrario se mueve 45 grados en sentido antihorario
if(ids[y*agent_width+x].y == 1)//Verifica que el agente este activo
{
//Verificando si estas cerca de la orilla en la siguiente coordenada
//next position = actual position + (cos(teta)*velocity)
int ccx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z) * 2 * agent[y*agent_width+x].w) );
int ccy = round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z) * 2 * agent[y*agent_width+x].w) );
if( ccx < limit_width || ccx > world_width - limit_width || ccy < limit_height || ccy > world_height - limit_height )
{
//si la siguiente coordenada sale del mundo entonces el angulo cambia 90 grados "rebote"
agent[y*agent_width+x].z += PI/2;
rebote = 1;
}
//calculando las coordenadas originales, marcas la coordenada original como disponible
int cx_old = round( agent[y*agent_width+x].x ) ;
int cy_old = ( round( agent[y*agent_width+x].y ) - 1 ) * world_width ;
if ( cy_old < 0 )
cy_old = 0 ;
int coord_old = cy_old + cx_old ;
//Aqui revisas que la nueva posicion no este ocupada, si se trata de un rebote haces una excepcion y permites la colision
// si esta ocupada la sig posicion te vas moviendo 45 grados en sentido antihorario
// se utiliza world_width porque se hace la conversion a un arreglo dimensional de uno bidimensional
int cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z) * dist_prev *agent[y*agent_width+x].w) ) ;
int cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z) * dist_prev *agent[y*agent_width+x].w) ) - 1 )* world_width ;
int coord = cx + cy ;
if( d_world[coord] == 0 || rebote )
{
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
}
else{
cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + PI/4) * dist_prev * agent[y*agent_width+x].w) ) ;
cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + PI/4) * dist_prev *agent[y*agent_width+x].w) ) -1 ) * world_width ;
coord = cy + cx ;
if( d_world[coord] == 0)
{
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + PI/4) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + PI/4) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
}
else{
cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 2*PI/4) * dist_prev *agent[y*agent_width+x].w) ) ;
cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 2*PI/4) * dist_prev *agent[y*agent_width+x].w) ) - 1) * world_width;
coord = cy + cx ;
if( d_world[coord] == 0)
{
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 2*PI/4) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 2*PI/4) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
}
else{
cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 3*PI/4) * dist_prev *agent[y*agent_width+x].w) ) ;
cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 3*PI/4) * dist_prev *agent[y*agent_width+x].w) ) -1 ) * world_width ;
coord = cy + cx ;
if( d_world[coord] == 0)
{
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 3*PI/4) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 3*PI/4) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
}
else{
cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + PI) * dist_prev *agent[y*agent_width+x].w) ) ;
cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + PI) * dist_prev *agent[y*agent_width+x].w) ) -1 ) * world_width;
coord = cy + cx ;
if( d_world[coord] == 0)
{
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + PI) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + PI) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
}
else{
cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 5*PI/4) * dist_prev *agent[y*agent_width+x].w) ) ;
cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 5*PI/4) * dist_prev *agent[y*agent_width+x].w) ) - 1 ) * world_width;
coord = cy + cx ;
if( d_world[coord] == 0)
{
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 5*PI/4) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 5*PI/4) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
}
else{
cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 6*PI/4) * dist_prev *agent[y*agent_width+x].w) ) ;
cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 6*PI/4) * dist_prev *agent[y*agent_width+x].w) ) -1 ) * world_width;
coord = cy + cx ;
if( d_world[coord] == 0)
{
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 6*PI/4) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 6*PI/4) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
}
else{
cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 7*PI/4) * dist_prev *agent[y*agent_width+x].w) ) ;
cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 7*PI/4) * dist_prev *agent[y*agent_width+x].w) ) - 1 ) * world_width;
coord = cy + cx ;
if( d_world[coord] == 0)
{
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z + 7*PI/4) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z + 7*PI/4) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
}
else{
//si todas las posiciones a su alrededor estan ocupadas se queda donde esta y marcas
//ocupada de nuevo esa posicion
//d_world[coord_old] = 1;
//si todas las posiciones a su alrededor estan ocupadas avanzas en la direccion original aunque se colisione
int cx = round( agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z) * dist_prev *agent[y*agent_width+x].w) ) ;
int cy = ( round( agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z) * dist_prev *agent[y*agent_width+x].w) ) - 1 )* world_width ;
int coord = cx + cy ;
agent[y*agent_width+x].x = agent[y*agent_width+x].x + ( cos(agent[y*agent_width+x].z) *agent[y*agent_width+x].w) ;
agent[y*agent_width+x].y = agent[y*agent_width+x].y + ( sin(agent[y*agent_width+x].z) *agent[y*agent_width+x].w) ;
d_world[coord] = ids[y*agent_width+x].x ;
d_world[coord_old] = 0;
ids[y*agent_width+x].w = 9;
}
} //7*PI/4
} //6*PI/4
}//5*PI/4
}//PI
}//3*PI/4
}//PI/2
}//PI/4
//check if the agent should be computed by other node in the iteration according to its 'y' coordinate
if( round(agent[y*agent_width+x].y) < (pid * world_height_node) )
{
ids[y*agent_width+x].y = 2;
d_world[coord_old] = 0; //queda vacia la casilla en el mundo
}
else if( round(agent[y*agent_width+x].y) > ( (pid + 1) * world_height_node ) )
{
ids[y*agent_width+x].y = 3;
d_world[coord_old] = 0; //queda vacia la casilla en el mundo
}
}//if active
}
// CUDA computation on each node
// No MPI here, only CUDA
extern "C" void launch_kernel(float4 *d_agents_in, float4 *d_agents_ids, int *d_world, int world_width, int world_height, int agent_width, int agent_height, int world_height_node, int pid)
{
// execute the kernel
//dim3 block(agent_width, agent_height, 1);
int block_width = 8;
int block_height = 8;
dim3 block(block_width, block_height, 1);
dim3 grid(agent_width / block.x + 1, agent_height / block.y + 1, 1);
// dim3 grid(agent_width / block.x, mesh_height / block.y, 1);
kernel<<< grid, block>>>(d_agents_in, d_agents_ids, d_world, world_width, world_height, agent_width, world_height_node, pid);
}
/*
extern "C" void launch_kernel_init(float4* pos_ini, float4* d_world, unsigned int agent_width, unsigned int window_width)
{
// execute the kernel
dim3 block(8, 8, 1);
dim3 grid(agent_width / block.x, agent_width / block.y, 1);
kernel_init<<< grid, block>>>(pos_ini, d_world, agent_width, window_width);
}
*/
|
9,594 | #include "includes.h"
__global__ void g_One_backpropagation( float* _curDelta, float* _w, float* _nextDelta, int rows, int cols, int channels)
{
int row = blockIdx.x;
int channel = blockIdx.y;
int skip = channel * rows * cols + row * cols;
float* curDelta = _curDelta + skip;
float* nextDelta= _nextDelta+ skip;
float* w = _w + channel * cols;
for(int i = 0; i < cols; i += blockDim.x){
int id = i + threadIdx.x;
if(id < cols){
nextDelta[id] = curDelta[id] * w[id];
}
}
} |
9,595 | #include <stdio.h>
#define N 800
void add(int *X, int *Y, int *Z)
{
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
Z[i*N+j] = X[i*N+j] + Y[i*N+j];
}
__global__ void add_kernel(int *X, int *Y, int *Z)
{
int i = threadIdx.x;
int j = threadIdx.y;
Z[i*N+j] = X[i*N+j] + Y[i*N+j];
}
int main()
{
//Input matrix
int X[N*N];
int Y[N*N];
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++){
X[i*N+j] = 0;
Y[i*N+j] = 1;
}
//Output matrix
int Z[N*N];
int *d_X, *d_Y, *d_Z;
cudaMalloc((void**) &d_X, (N*N)*sizeof(int));
cudaMalloc((void**) &d_Y, (N*N)*sizeof(int));
cudaMalloc((void**) &d_Z, (N*N)*sizeof(int));
//timer
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMemcpy(d_X, &X, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_Y, &Y, (N*N)*sizeof(int), cudaMemcpyHostToDevice);
dim3 dimGrid(32,1,1);
dim3 dimBlock(32,1,1);
cudaEventRecord(start);
add_kernel<<<dimGrid, dimBlock>>>(d_X, d_Y, d_Z);
cudaEventRecord(stop);
//add(X, Y, Z);
cudaMemcpy(&Z, d_Z, (N*N)*sizeof(int), cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float ms = 0;
cudaEventElapsedTime(&ms, start, stop);
cudaFree(d_X);
cudaFree(d_Y);
cudaFree(d_Z);
int sum = 0;
for (int i = 0; i < N; i++){
for (int j = 0; j < N; j++) {
sum += Z[i*N+j];
}
}
printf("Time used: %f milliseconds\n", ms);
return -1;
}
|
9,596 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__global__ void hello_cuda() {
printf("threadIdx = (%d, %d, %d), blockIdx = (%d, %d, %d)\n", threadIdx.x, threadIdx.y,
threadIdx.z, blockIdx.x, blockIdx.y, blockIdx.z);
}
int main() {
int nx = 16;
int ny = 4;
dim3 block(8, 2);
dim3 grid(nx / block.x, ny / block.y);
hello_cuda<<<grid, block>>>();
cudaDeviceSynchronize();
cudaDeviceReset();
return 0;
} |
9,597 | #include <stdio.h>
#include <stdlib.h>
#define N 20
#define BLOCK_DIM 20
double mat_a[N][N]; //matriz A
double mat_b[N][N]; //matrz B
double mat_result[N][N]; //matriz C
//Contadores de los loops for
int i,j,m;
//Flag para imprimir los resultados
int flag;
__global__ void suma(double *A, double *B, double *C){
//índices de los hilos
int columna = blockIdx.x * blockDim.x + threadIdx.x;
int renglon = blockIdx.y * blockDim.y + threadIdx.y;
int indice = columna + renglon*N;
//suma
if(columna < N && renglon < N){
C[indice] = A[indice] + B[indice];
}
}
void inicializa_matrices();
void imprime_matrices();
int main(int argc, char *argv[]){
//Inicializa matrices A y B
inicializa_matrices();
//Se imprimen resultados?
flag = atoi(argv[1]);
//Variables utilizadas por el device
int size = N*N*sizeof(double);
double *pA, *pB, *pC;
//Memory allocation en el device
cudaMalloc((void**)&pA, size);
cudaMalloc((void**)&pB, size);
cudaMalloc((void**)&pC, size);
//Se copian las matrices del host al device
cudaMemcpy(pA, mat_a, size, cudaMemcpyHostToDevice);
cudaMemcpy(pB, mat_b, size, cudaMemcpyHostToDevice);
dim3 dimBlock(BLOCK_DIM,BLOCK_DIM);
dim3 dimGrid((int)ceil(N/dimBlock.x),(int)ceil(N/dimBlock.y));
suma<<<dimGrid,dimBlock>>>(pA,pB,pC);
cudaMemcpy(mat_result, pC, size, cudaMemcpyDeviceToHost);
if (flag !=0){
imprime_matrices();
}
cudaFree(pA);
cudaFree(pB);
cudaFree(pC);
return 0;
}
void inicializa_matrices()
{
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
mat_a[i][j] = i + j;
}
}
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++) {
mat_b[i][j] = i*j;
}
}
}
void imprime_matrices()
{ printf("Matriz A \n");
for (i = 0; i < N; i++) {
printf("\n");
for (j = 0; j < N; j++)
printf("%8.2f ", mat_a[i][j]);
}
printf("\n\n\n");
printf("Matriz B \n");
for (i = 0; i < N; i++) {
printf("\n");
for (j = 0; j < N; j++)
printf("%8.2f ", mat_b[i][j]);
}
printf("\n\n\n");
printf("Matriz C = A + B\n");
for (i = 0; i < N; i++) {
printf("\n");
for (j = 0; j < N; j++)
printf("%8.2f ", mat_result[i][j]);
}
printf("\n\n");
}
|
9,598 | #include "includes.h"
__global__ void nllLoss(float *x, int x_stride, float *y, int* target) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int offset = tid * x_stride + target[tid];
y[tid] = -1 * x[offset];
} |
9,599 | #include <stdio.h>
#include <stdlib.h>
__global__ void VecAdd(int *Vec1, int *Vec2, int *Res){
Res[blockIdx.x]=Vec2[blockIdx.x]+Vec2[blockIdx.x];
}
int main(){
int length, i;
size_t size;
int *dev_Vector1;
int *dev_Vector2;
int *dev_Result;
length = 10;
size = (length+1)*sizeof(int);
int* Vector1 = (int*)malloc(size);
int* Vector2 = (int*)malloc(size);
int* Result = (int*)malloc(size);
for(i=0;i<=length;i++){
Vector1[i] = i;
Vector2[i] = i;
}
cudaMalloc((void**)&dev_Vector1, size);
cudaMalloc((void**)&dev_Vector2, size);
cudaMalloc((void**)&dev_Result, size);
cudaMemcpy(dev_Vector1,Vector1,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_Vector2,Vector2,size,cudaMemcpyHostToDevice);
VecAdd<<<length+1,1>>>(dev_Vector1, dev_Vector2, dev_Result);
cudaMemcpy(Result, dev_Result, size, cudaMemcpyDeviceToHost);
for(i=0;i<=length;i++){
printf("%d\t",Result[i]);
}
printf("\n");
free(Vector1);
free(Vector2);
free(Result);
cudaFree(dev_Vector1);
cudaFree(dev_Vector2);
cudaFree(dev_Result);
return 0;
}
|
9,600 | #include "includes.h"
__global__ void warmup(float *A, float *B, float *C, const int n, int offset)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int k = i + offset;
if (k < n) C[k] = A[i] + B[i];
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.