serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
701
|
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
// functie kernel prin care adunam doi arrays
__global__ void vector_add(float *x, float *y, int n) {
// calculam indexul - echivalent cu for-ul
// threadId.x - id-ul unui thread blocul actual
// blockDim.x - dimensiunea blocului actual
// blockIdx.x - id-ul blocului actual
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < n) {
x[i] = x[i] + y[i];
}
}
int main(void)
{
const int num_elements = 1 << 16;
const int num_bytes = num_elements * sizeof(float);
float *x, *y;
cudaMallocManaged(&x, num_bytes);
cudaMallocManaged(&y, num_bytes);
if (!x || !y) {
fprintf(stderr, "[HOST & DEVICE] cudaMallocManaged failed\n");
return 1;
}
// se initializeaza x si y
for (int i = 0; i < num_elements; i++) {
x[i] = 4;
y[i] = 2;
}
// stabilim dimensiunea unui bloc (adica numarul de threads dintr-un bloc)
const size_t block_size = 256;
// numarul de blocuri
size_t blocks_no = num_elements / block_size;
// daca avem un bloc care nu are dimensiunea 256, incrementam numarul de blocuri
if (num_elements % block_size != 0) {
++blocks_no;
}
vector_add<<<blocks_no, block_size>>>(x, y, num_elements);
// asteptam ca thread-urile de pe GPU sa-si termine treaba - echivalent cu pthread_join
cudaDeviceSynchronize();
for (int i = 0; i < 10; ++i) {
printf("Result %d: %1.1f + %1.1f = %1.3f\n", i, x[i] - y[i],
y[i], x[i]);
}
// eliberam memoria pe device
cudaFree(x);
cudaFree(y);
return 0;
}
|
702
|
#include "includes.h"
__device__ bool checkBoundary(int blockIdx, int blockDim, int threadIdx){
int x = threadIdx;
int y = blockIdx;
return (x == 0 || x == (blockDim-1) || y == 0 || y == 479);
}
__global__ void mDivergence_TwoDim(float *div, float *u_dimX, float *u_dimY, float r_sStep) {
if(checkBoundary(blockIdx.x, blockDim.x, threadIdx.x)) return;
int Idx = blockIdx.x * blockDim.x + threadIdx.x;
int Left = Idx - 1;
int Right = Idx + 1;
int Top = Idx + blockDim.x;
int Bottom = Idx - blockDim.x;
div[Idx] = ((u_dimX[Right]-u_dimX[Left])+(u_dimY[Top]-u_dimY[Bottom]))*r_sStep;
}
|
703
|
#define RADIUS 2
#define BLOCK_SIZE 32 // needs to be equal to blockDim.x
__constant__ double WEIGHT = 1. / (2.*RADIUS + 1);
__global__ void smoothing_stencil_1d(double *in, double *out, int length)
/**
Smoothing stencil in one dimension using average over the radius
defined by RADIUS .
Inspired by http://www.orangeowlsolutions.com/archives/1119 .
*/
{
__shared__ double temp[BLOCK_SIZE + 2 * RADIUS];
int gindex = threadIdx.x + blockIdx.x * blockDim.x;
int lindex = threadIdx.x + RADIUS;
// Read input elements into shared memory
temp[lindex] = in[gindex];
if (threadIdx.x < RADIUS) {
if (gindex < RADIUS) {
temp[lindex - RADIUS] = in[0];
} else {
temp[lindex - RADIUS] = in[gindex - RADIUS];
}
if (gindex >= length - BLOCK_SIZE) {
temp[lindex + BLOCK_SIZE] = in[length-1];
} else {
temp[lindex + BLOCK_SIZE] = in[gindex + BLOCK_SIZE];
}
}
__syncthreads();
// Apply the stencil
double result = 0;
for (int offset = -RADIUS; offset <= RADIUS; offset++)
result += temp[lindex + offset];
// Store the result
out[gindex] = result * WEIGHT;
}
#define W4 0.0001338306246147
#define W3 0.0044318616200313
#define W2 0.0539911274207044
#define W1 0.2419714456566007
#define W0 0.3989434693560978
// __device__ __constant__ double GAUSSIAN_SMOOTHING_WEIGHT[] =
// {
// 0.0001338306246147,
// 0.0044318616200313,
// 0.0539911274207044,
// 0.2419714456566007,
// 0.3989434693560978,
// 0.2419714456566007,
// 0.0539911274207044,
// 0.0044318616200313,
// 0.0001338306246147,
// };
#define GAUSSIAN_FILTER_RADIUS 4
#define GAUSSIAN_BLOCK_SIZE 32
// template<int GAUSSIAN_FILTER_RADIUS, int GAUSSIAN_BLOCK_SIZE>
__global__ void gaussian_smoothing_1d(double *in, double *out, int length)
{
// needs length GAUSSIAN_BLOCK_SIZE + 2*GAUSSIAN_FILTER_RADIUS
__shared__ double temp[GAUSSIAN_BLOCK_SIZE + 2*GAUSSIAN_FILTER_RADIUS];
int lindex, offset;
for (int gindex = blockIdx.x * blockDim.x + threadIdx.x;
gindex < length;
gindex += blockDim.x * gridDim.x)
{
lindex = threadIdx.x + GAUSSIAN_FILTER_RADIUS;
temp[lindex] = in[gindex];
if (threadIdx.x < GAUSSIAN_FILTER_RADIUS) {
// local edge handling
// left
if (gindex < GAUSSIAN_FILTER_RADIUS) {
// local edge is global edge
temp[lindex - GAUSSIAN_FILTER_RADIUS] = in[0];
} else {
temp[lindex - GAUSSIAN_FILTER_RADIUS] =
in[gindex - GAUSSIAN_FILTER_RADIUS];
}
//right
if (gindex >= length - GAUSSIAN_BLOCK_SIZE) {
// local edge is global edge
temp[lindex + GAUSSIAN_BLOCK_SIZE] = in[length - 1];
} else {
temp[lindex + GAUSSIAN_BLOCK_SIZE] = in[gindex + GAUSSIAN_BLOCK_SIZE];
}
}
__syncthreads();
out[gindex] = temp[lindex - 4] * W4
+ temp[lindex - 3] * W3
+ temp[lindex - 2] * W2
+ temp[lindex - 1] * W1
+ temp[lindex] * W0
+ temp[lindex + 1] * W1
+ temp[lindex + 2] * W2
+ temp[lindex + 3] * W3
+ temp[lindex + 4] * W4;
// for (offset = lindex - GAUSSIAN_FILTER_RADIUS;
// offset <= lindex + GAUSSIAN_FILTER_RADIUS;
// offset++)
// {
// out[gindex] += temp[offset] *
// GAUSSIAN_SMOOTHING_WEIGHT[offset + GAUSSIAN_FILTER_RADIUS - lindex];
// }
}
}
|
704
|
/*
This is a demonstration of how crazily simple using `thrust` is compared
to using the lower-level runtime API.
*/
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/sort.h>
#include <ctime>
#include <cstdio>
int myrand() {
return rand() % 10;
}
int main() {
int count = 1024;
// initialize vector on host
thrust::host_vector<int> h(count);
thrust::generate(std::begin(h), std::end(h), myrand);
// copy memory from host to device; essentially the same as `cudaMemcpy`
thrust::device_vector<int> d = h;
// sort data on device
thrust::sort(std::begin(d), std::end(d));
// copy from device back to host
h = d;
// print results
for (int i=0; i<count; i++) {
printf("%d\t", h[i]);
}
}
|
705
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
//Number of elements of the inpu layers, that correspond to the number of pixels of a picture
#define PIXELS 3073
//Number of elements of the first hidden layer
#define HIDDEN_LAYER_1 2000
//Number of elements of the second hidden layer
#define HIDDEN_LAYER_2 450
//Number of elements of the output layer
#define OUTPUT_LAYER 10
//Learning rate of the algorithm
#define LEARNING_RATE 0.01
//Numbers of elements to use for training
#define ELEMENTS 1000
/*
* Function that given a vector and its size, print it
* In:
* f: vector of doubles to be printed
* N: size of the vector
*/
void print_vector(double *f, int N){
//Move in all vector to print each value
for (int i =0; i < N; ++i)
printf("%f\n",f[i]);
}
/*
* Function that given the value of the previous layer of a neural network, and its transition matrix
* to the new layer, calculates the net value of the layer
* In:
* input: vector that represents the previous layer of the layer to calculate
* matrix: transition matrix with the weigths of the neural network
* result: vector to store the results. It represents the layer to be calculated
* input_size: size of the previous layer
* hidden_size: size of the calculated layer
*
*/
void get_layer(double *input, double *matrix, double *result,int input_size, int hidden_size){
//Move thorugh every element of the layer
for (int i = 0; i<hidden_size; ++i){
//Init the neuron value in 0
result[i] = 0.0;
//Multiply each value of the previous neuron times its weigth and store it in result
for (int j = 0; j < input_size; ++j){
result[i] += input[j] * matrix[j*hidden_size+i];
}
}
}
/*
* Function that apply the sigmoid function to every element of a vector
* In:
* double: vector to apply the signmoid function to every element
* N: size of the vector
*/
void sigmoid(double *f, int N){
//Move through all elements of the vector
for (int i =0; i < N; ++i)
//Apply the sigmoid function to every element
//Sigmoid used: f(x) = 1 / (1 + e^(-x))
f[i] = 1.0 / (1.0 + exp(-f[i]));
}
/*
* Function that normalize the input, so all the values are equally important. Normalize is the process
* to transform every element of a vector to its correspondent value beetwen 0 and 1
* In:
* c: vector with the numbers between 0 and 255, each one which corresponds to a pixel of the input image
* f: vector so save the normalized vector
* N: size of the vectors
*/
void process_input(unsigned char *c, double *f, int N){
//Move through all elements of the vecotr
for (int i =0; i < N; ++i){
//Normalize x cosist of (x - Min)/ (Max-Min), in pixels Max is 255 and Min is 1
f[i] = (c[i]-1)/254.0;
}
}
/*
* Function that returns the index corresponding to the maximum element of an array
* In:
* f: vector of values
* N: size of vector
* Out:
* int corresponding to the index of the maximum value
*/
int max_index(double *f, int N){
//The max is the first element
int max_index = 0;
for (int i = 1; i < N; ++ i){
//If there is a new max, then substitute it
if (f[i] > f[max_index]){
max_index = i;
}
}
//Return the index of the max element
return max_index;
}
/*
* Function that calculate the error of the neural network
* In:
* f: output vector of the neural network
* output: expected value
* N: size of the vector
* Out:
* double corresponding to the calculated error of the NN
*/
double error(double *f, int output, int N) {
double *output_array = (double *) malloc(N * sizeof(double));
//Init the expected answer in 0
for (int i = 0; i < N; ++i)
output_array[i] = 0.0;
//Mark the expected answer 1
output_array[output] = 1.0;
//Init the error in 0
double error = 0.0;
//Calulate the total error, the rror is defined as
//(1/2)*SUM(di - oi)^2 where di is expected value, oi is calculated value
for (int i = 0; i < N; ++i)
error += (output_array[i] - f[i]) * (output_array[i] - f[i]);
return 0.5*error;
}
/*
* Function that calculate the error of the output layers
* In:
* f: value of the output neurons
* output: expected value
* N: size of the vector
* error_array: vector with the calculated error of every neuron
*
*/
void error_output(double *f, int output, int N, double *error_array){
double *output_array = (double *) malloc(N * sizeof(double));
//Init the expected answer in 0
for (int i = 0; i < N; ++i)
output_array[i] = 0.0;
//Mark the expected answer 1
output_array[output] = 1.0;
//Get the error for every neuron. The error in the output layer is defined as Si = (di - oi)*oi*(1-oi)
for (int i = 0; i < N; ++i){
error_array[i] = (output_array[i] - f[i])*f[i]*(1-f[i]);
}
}
/* Function that calculates the error of the hidden layers
* In:
* f: hidden layer calculated values
* error_array: error vector, to save the error of every neuron in the hidden layer
* next_layer_error: error vector of the next layer, neccessary to calculate the error of a hidden layer
* layer_size: size of the hidden layer
* next_size: size of the next layer
* transition_matrix: transition matrix to propagate values from hidden to next layer
*/
void error_hidden_layer(double *f, double* error_array, double *next_layer_error, int layer_size, int next_size, double *transition_matrix){
//Calculate error of every neuron in a hidden layer
//The error in a hidden layer is defined as Si = oi * (1 - oi) * SUM(Wij * Sj) where Sj is the error from next
for (int i = 0; i < layer_size; ++i){
//Inicial value of the sumatory
double product = 0.0;
for (int j = 0; j < next_size; ++j){
//Add Wij * Sj to acumulator
product += transition_matrix[i*next_size+j]*next_layer_error[j];
}
//Get the final product
error_array[i] = f[i]*(1-f[i])*product;
}
}
/*
* Function that calculates the variation of weigths of a neural network
* In:
* error_vector: error vector of the layer
* gradient: variance of the weights for every element
* layer: value of the elements of the layer
* N: rows of the transition matrix
* M: columns ot the transition matrix
*
*/
void calculate_error(double *error_vector, double* gradient, double *layer, int N, int M){
//Iterate over the matrix
for (int i =0; i<N; ++i){
for (int j = 0; j<M; ++j){
//The variance of the weigth is alpha * Sj * Oi
gradient[i*M+j] = LEARNING_RATE * error_vector[j] * layer[i];
}
}
}
/*
* Function that sums two matrix and store it directly in the first matrix
* In:
* transition: first matrix
* gradient: second matrix
* N: rows of the matrix
* M: columns of the matrix
*/
void sum_matrix(double *transition, double *gradient, int N, int M){
//Iterate over all the matrix
for (int i = 0; i < N; ++i){
for (int j = 0; j < M; ++j){
//Change the weight of every value of the transition matrix
transition[i*M+j] = transition[i*M+j] + gradient[i*M+j];
}
}
}
/*
* Function that reads a file, stores every vector of it, and then apply backpropagation
* In:
* file: name of the file with the data
* buffer: vector where every pixel will be stored
* transition_matrix_1: transition vector from input layer to hidden layer 1
* transition_matrix_2: transition vector from hidden layer 1 to hidden layer 2
* transition_matrix_3: transition vector from hidden layer 2 to output layer
* elements: number of elements to use for training
*/
void read_file(char *file, unsigned char* buffer,double *transition_matrix_1,double *transition_matrix_2, double *transition_matrix_3, int elements){
//Read the file that is in binary mode
FILE *f;
f = fopen(file, "rb");
//Variable for the expected output
unsigned char expected_output;
int i = 0;
//Init the total time to get the average of every classification
float total_time = 0.0;
//While there still elements on the file, and i is less than elements number cycloe
//Read PIXELS elements, because every pixel is represented by a byte, is enough to tell the reader to read exactly PIXELS bytes
while(1 == fread(buffer,PIXELS,1,f) && i < elements){
//Start taking the time
float tiempo1;
cudaEvent_t inicio1, fin1;
cudaEventCreate(&inicio1); // Se inicializan
cudaEventCreate(&fin1);
cudaEventRecord( inicio1, 0 );
//The first value of the vector is the expected time
expected_output = buffer[0];
//After the expected output is saved, it can be substituted by the bias
buffer[0] = 1;
//Init the layers of the network
double *hidden_layer_1 = (double*)malloc((HIDDEN_LAYER_1+1)*sizeof(double));
double *hidden_layer_2 = (double*)malloc((HIDDEN_LAYER_2+1)*sizeof(double));
double *output_layer = (double*)malloc(OUTPUT_LAYER*sizeof(double));
//Normalize the data
double *input = (double*)malloc(PIXELS*sizeof(double));
process_input(buffer,input,PIXELS);
//Forward information from input layer to hidden layer 1
get_layer(input,transition_matrix_1,hidden_layer_1,PIXELS,HIDDEN_LAYER_1);
//Apply signmoid to hidden layer 1
sigmoid(hidden_layer_1,HIDDEN_LAYER_1+1);
//Assign the bias
hidden_layer_1[HIDDEN_LAYER_1] = 1;
//Forward information from hidden layer 1 to hidden layer 2
get_layer(hidden_layer_1,transition_matrix_2,hidden_layer_2,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
//Apply signmoid to hidden layer 2
sigmoid(hidden_layer_2,HIDDEN_LAYER_2+1);
//Assign the bias
hidden_layer_2[HIDDEN_LAYER_2] = 1;
//Forward information from hidden layer 2 to output layer
get_layer(hidden_layer_2,transition_matrix_3,output_layer,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Apply signmoid to output layer
sigmoid(output_layer,OUTPUT_LAYER);
//Get the error of the output
double *errors_array = (double*)malloc(OUTPUT_LAYER* sizeof(double));
error_output(output_layer,expected_output,OUTPUT_LAYER,errors_array);
//Get the weight update for transision matrix 3
double *transition_matrix_3_gradient = (double*)malloc((HIDDEN_LAYER_2+1)*OUTPUT_LAYER*sizeof(double));
calculate_error(errors_array,transition_matrix_3_gradient,hidden_layer_2,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Get the weight update for transision matrix 2
double *hidden_layer_array_2 = (double*)malloc((HIDDEN_LAYER_2+1)* sizeof(double));
error_hidden_layer(hidden_layer_2,hidden_layer_array_2,errors_array,OUTPUT_LAYER,HIDDEN_LAYER_2+1,transition_matrix_3);
double *transition_matrix_2_gradient = (double*)malloc((HIDDEN_LAYER_1+1)*HIDDEN_LAYER_2*sizeof(double));
calculate_error(hidden_layer_array_2,transition_matrix_2_gradient,hidden_layer_1,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
//Get the weight update for transision matrix 1
double *hidden_layer_array_1 = (double*)malloc((HIDDEN_LAYER_1+1)* sizeof(double));
error_hidden_layer(hidden_layer_1,hidden_layer_array_1,hidden_layer_array_2,HIDDEN_LAYER_2,HIDDEN_LAYER_1+1,transition_matrix_2);
double *transition_matrix_1_gradient = (double*)malloc(PIXELS*HIDDEN_LAYER_1*sizeof(double));
calculate_error(hidden_layer_array_1,transition_matrix_1_gradient,input,PIXELS,HIDDEN_LAYER_1);
//Update the value of the transitions matrix once all have been calculated
sum_matrix(transition_matrix_1,transition_matrix_1_gradient,PIXELS,HIDDEN_LAYER_1);
sum_matrix(transition_matrix_2,transition_matrix_2_gradient,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
sum_matrix(transition_matrix_3,transition_matrix_3_gradient,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Siguiente elemento
++i;
//Free the information not required for the next iteration
free(hidden_layer_1);
free(hidden_layer_2);
free(output_layer);
free(input);
free(errors_array);
//Record the finish moment
cudaEventRecord( fin1, 0);
cudaEventSynchronize( fin1 );
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
//Add the time to the total
total_time += tiempo1;
}
//Take the average time
total_time /= elements;
printf ("Tiempo promedio por clasificacion: %f\n", total_time);
}
/*
* Function that randomly initialize all values off the transiction matrix
* In:
* matrix: transition matrix of the neural network
* N: rows of the matrix
* M: columns of the matrix
*/
void init_layer(double *matrix, int N, int M){
//Iterate over the matrix
for (int i =0; i < N; ++ i){
for (int j = 0; j < M; ++j){
//Random number to see if its negative or positive
int sign = rand() % 2;
//Random number between 0 and 1
if (sign == 0)
matrix[i*M+j] = (rand() % 1000000) / 1000000.0;
else
matrix[i*M+j] = - ((rand() % 1000000) / 1000000.0);
}
}
}
/*
* Function that prints the value of the transition matrix
* In:
* matrix: transition matrix
* N: rows of the matrix
* M: columns of the matrix
*/
void print_layer(double *matrix, int N, int M){
//Iterate over the matrix and print
for (int i =0; i < N; ++ i){
for (int j = 0; j < M; ++j)
printf("%f ",matrix[i*M+j]);
printf("\n");
}
}
int main(int argc, char *argv[]){
//Init the random
srand(time(NULL));
//Review if the arguments
if ( argc != 2 ) {
/* We print argv[0] assuming it is the program name */
printf( "Error se debe ejecutar: %s <N>\n", argv[0] );
exit(0);
}
//Transform the argv to int
int elements = atoi(argv[1]);
printf("Se va a entrenar con %d elementos\n",elements);
//Create the space for the transition matrix
double *transition_matrix_1 = (double*)malloc(PIXELS*HIDDEN_LAYER_1*sizeof(double));
double *transition_matrix_2 = (double*)malloc((HIDDEN_LAYER_1+1)*HIDDEN_LAYER_2*sizeof(double));
double *transition_matrix_3 = (double*)malloc((HIDDEN_LAYER_2+1)*OUTPUT_LAYER*sizeof(double));
//Initialize the values of the matrix
init_layer(transition_matrix_1,PIXELS,HIDDEN_LAYER_1);
init_layer(transition_matrix_2,HIDDEN_LAYER_1+1,HIDDEN_LAYER_2);
init_layer(transition_matrix_3,HIDDEN_LAYER_2+1,OUTPUT_LAYER);
//Start the time
float tiempo1;
cudaEvent_t inicio1, fin1;
unsigned char *buffer = (unsigned char*)malloc(PIXELS*sizeof(unsigned char));
//Start getting the time
cudaEventCreate(&inicio1); // Se inicializan
cudaEventCreate(&fin1);
cudaEventRecord( inicio1, 0 );
//Start the training
read_file("data_batch_1.bin",buffer,transition_matrix_1,transition_matrix_2,transition_matrix_3,elements);
//Finish the time
cudaEventRecord( fin1, 0); // Se toma el tiempo final.
cudaEventSynchronize( fin1 ); // Se sincroniza
cudaEventElapsedTime( &tiempo1, inicio1, fin1 );
//Print the time
printf("Tiempo total del programa: %f ms\n", tiempo1);
}
|
706
|
/**********************************************************************
* DESCRIPTION:
* Serial Concurrent Wave Equation - C Version
* This program implements the concurrent wave equation
*********************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#define MAXPOINTS 1000000
#define MAXSTEPS 1000000
#define MINPOINTS 20
#define PI 3.14159265
void check_param(void);
void init_line(void);
void update (void);
void printfinal (void);
int nsteps, /* number of time steps */
tpoints, /* total points along string */
rcode; /* generic return code */
float values[MAXPOINTS]; /* values at time t */
/**********************************************************************
* Checks input values from parameters
*********************************************************************/
void check_param(void)
{
char tchar[20];
/* check number of points, number of iterations */
while ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS)) {
printf("Enter number of points along vibrating string [%d-%d]: "
,MINPOINTS, MAXPOINTS);
scanf("%s", tchar);
tpoints = atoi(tchar);
if ((tpoints < MINPOINTS) || (tpoints > MAXPOINTS))
printf("Invalid. Please enter value between %d and %d\n",
MINPOINTS, MAXPOINTS);
}
while ((nsteps < 1) || (nsteps > MAXSTEPS)) {
printf("Enter number of time steps [1-%d]: ", MAXSTEPS);
scanf("%s", tchar);
nsteps = atoi(tchar);
if ((nsteps < 1) || (nsteps > MAXSTEPS))
printf("Invalid. Please enter value between 1 and %d\n", MAXSTEPS);
}
printf("Using points = %d, steps = %d\n", tpoints, nsteps);
}
/**********************************************************************
* Initialize points on line
*********************************************************************/
__global__
void init(float *values, int num_of_points){
int index = blockIdx.x*blockDim.x + threadIdx.x;
float x;
//fac = 2.0 * PI;
//tmp = num_of_points;
x = 1.0*(float)index / (float)num_of_points;
values[index] = sin (6.2831853f * x);
}
void init_line(void)
{
int j;
float x, fac, k, tmp;
/* Calculate initial values based on sine curve */
fac = 2.0 * PI;
k = 0.0;
tmp = tpoints - 1;
for (j = 0; j < tpoints; j++) {
x = k/tmp;
values[j] = sin (fac * x);
k = k + 1.0;
}
}
/**********************************************************************
* Calculate new values using wave equation
*********************************************************************/
__global__
void do_math(float *values, int num_of_points, int num_of_iterations)
{
int index = blockIdx.x*blockDim.x + threadIdx.x;
if (index < num_of_points && index > 0)
{
float old_val, val, new_val,a,b;
a = 2.0;
b = -0.18;
old_val = val = values[index];
for (int i = num_of_iterations; i >0; i--)
{
new_val = (a * val) - old_val + ((b) *val);
old_val = val;
val = new_val;
}
values[index] = val;
}
}
/**********************************************************************
* Update all values along line a specified number of times
*********************************************************************/
void update()
{
float *arr;
int size = tpoints * sizeof(float);
cudaMalloc((void**)&arr, size);
cudaMemcpy(arr, values, size, cudaMemcpyHostToDevice);
/* Update values for each time step */
do_math<<<((tpoints + 1023) >> 10), 1024>>>(arr, tpoints, nsteps);
cudaMemcpy(values, arr, size, cudaMemcpyDeviceToHost);
cudaFree(arr);
values[0] = values[tpoints-1] = 0.0;
}
/**********************************************************************
* Print final results
*********************************************************************/
void printfinal()
{
int i;
for (i = 0; i < tpoints; i++) {
printf("%6.4f ", values[i]);
if ((i+1)%10 == 0)
printf("\n");
}
}
void output_data(int n,int point){
char filename[100];
FILE *fp;
sprintf(filename, "output_%d.txt", n);
fp = fopen(filename, "w");
fprintf(fp, "%d\n", point);
for (int i = 0; i < tpoints; i++) {
fprintf(fp, "%6.4f ", values[i]);
}
fprintf(fp, "\n");
fclose(fp);
}
/**********************************************************************
* Main program
*********************************************************************/
int main(int argc, char *argv[])
{
sscanf(argv[1],"%d",&tpoints);
sscanf(argv[2],"%d",&nsteps);
check_param();
printf("Initializing points on the line...\n");
init_line();
printf("Updating all points for all time steps...\n");
update();
printf("Printing final results...\n");
printfinal();
output_data(1,tpoints);
printf("\nDone.\n\n");
return 0;
}
|
707
|
#include"rbsspfvehicletracker.cuh"
//==============================================================================
LaserScan * d_scan=NULL;
LaserScan h_scan;
EgoMotion h_egomotion;
//==============================================================================
__host__ __device__
void deviceBuildModel(VehicleState & state, double & density)
{
double c=cos(state.theta);
double s=sin(state.theta);
state.ox=-c*state.x-s*state.y;
state.oy=s*state.x-c*state.y;
state.cx[0]=c*state.lf-s*state.wl+state.x; state.cy[0]=s*state.lf+c*state.wl+state.y;
state.cx[1]=c*state.lf+s*state.wr+state.x; state.cy[1]=s*state.lf-c*state.wr+state.y;
state.cx[2]=-c*state.lb+s*state.wr+state.x; state.cy[2]=-s*state.lb-c*state.wr+state.y;
state.cx[3]=-c*state.lb-s*state.wl+state.x; state.cy[3]=-s*state.lb+c*state.wl+state.y;
state.cl[0]=state.cl[2]=state.wl+state.wr;
state.cl[1]=state.cl[3]=state.lf+state.lb;
state.bid[0]=(atan2(state.cy[0],state.cx[0])+PI)/density;
state.bid[1]=(atan2(state.cy[1],state.cx[1])+PI)/density;
state.bid[2]=(atan2(state.cy[2],state.cx[2])+PI)/density;
state.bid[3]=(atan2(state.cy[3],state.cx[3])+PI)/density;
if(state.ox>state.lf)
{
if(state.oy>state.wl)
{
state.eid[0]=0;state.eid[1]=3;
}
else if(state.oy<-state.wr)
{
state.eid[0]=0;state.eid[1]=1;
}
else
{
state.eid[0]=0;state.eid[1]=-1;
}
}
else if(state.ox<-state.lb)
{
if(state.oy>state.wl)
{
state.eid[0]=2;state.eid[1]=3;
}
else if(state.oy<-state.wr)
{
state.eid[0]=2;state.eid[1]=1;
}
else
{
state.eid[0]=2;state.eid[1]=-1;
}
}
else
{
if(state.oy>state.wl)
{
state.eid[0]=3;state.eid[1]=-1;
}
else if(state.oy<-state.wr)
{
state.eid[0]=1;state.eid[1]=-1;
}
else
{
state.eid[0]=-1;state.eid[1]=-1;
}
}
return;
}
__host__ __device__
void deviceMeasureEdge(VehicleState & state, int edgeid, LaserScan * scan, double anneal, int * beamnum, int * beamid, bool uncertainflag)
{
if(state.eid[edgeid]<0)
{
return;
}
if(uncertainflag)
{
switch(state.eid[edgeid])
{
case 0:
if(state.dlf>UNCERTAINTHRESH)
{
return;
}
break;
case 1:
if(state.dwr>UNCERTAINTHRESH)
{
return;
}
break;
case 2:
if(state.dlb>UNCERTAINTHRESH)
{
return;
}
break;
case 3:
if(state.dwl>UNCERTAINTHRESH)
{
return;
}
break;
default:
break;
}
}
int starteid=state.eid[edgeid];
int endeid=(state.eid[edgeid]+1)%4;
int startbid=state.bid[starteid];
int endbid=state.bid[endeid];
if(startbid>endbid)
{
endbid+=scan->beamnum;
}
int totalbeam=(endbid-startbid)+1;
if(totalbeam<=UNCERTAINTHRESH_CNT)
{
state.eid[edgeid]=-1;
}
double dx1=state.cx[endeid]-state.cx[starteid];
double dy1=state.cy[endeid]-state.cy[starteid];
double dx2=-dy1/state.cl[starteid];
double dy2=dx1/state.cl[starteid];
double density=2*PI/scan->beamnum;
for(int i=startbid;i<=endbid;i++)
{
double P[4]={MAXBEAMLENGTH,MAXBEAMLENGTH,MAXBEAMLENGTH,MAXBEAMLENGTH};
int tmpid=i%scan->beamnum;
double bear=tmpid*density-PI;
double c=cos(bear);
double s=sin(bear);
double tmpx=c*dx1+s*dy1;
double tmpy=s*dx1-c*dy1;
if(tmpy!=0)
{
double beta=tmpx/tmpy*(c*state.cy[starteid]-s*state.cx[starteid])+(c*state.cx[starteid]+s*state.cy[starteid]);
if(beta>=MINBEAMLENGTH&&beta<=MAXBEAMLENGTH)
{
P[2]=beta;
double gamma0,gamma1,gamma2;
if(beta<NEARESTRING)
{
gamma0=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*beta)-s*(state.cx[starteid]+dx2*beta))+c*(state.cx[starteid]+dx2*beta)+s*(state.cy[starteid]+dy2*beta)));
gamma1=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*2)-s*(state.cx[starteid]+dx2*2))+c*(state.cx[starteid]+dx2*2)+s*(state.cy[starteid]+dy2*2)));
gamma2=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*beta)-s*(state.cx[starteid]+dx2*beta))+c*(state.cx[starteid]+dx2*beta)+s*(state.cy[starteid]+dy2*beta)));
}
else
{
gamma0=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*MARGIN0)-s*(state.cx[starteid]+dx2*MARGIN0))+c*(state.cx[starteid]+dx2*MARGIN0)+s*(state.cy[starteid]+dy2*MARGIN0)));
gamma1=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*MARGIN1)-s*(state.cx[starteid]+dx2*MARGIN1))+c*(state.cx[starteid]+dx2*MARGIN1)+s*(state.cy[starteid]+dy2*MARGIN1)));
gamma2=fabs(beta-(tmpx/tmpy*(c*(state.cy[starteid]+dy2*MARGIN2)-s*(state.cx[starteid]+dx2*MARGIN2))+c*(state.cx[starteid]+dx2*MARGIN2)+s*(state.cy[starteid]+dy2*MARGIN2)));
}
P[1]=P[2]-gamma0>=MINBEAMLENGTH?P[2]-gamma0:MINBEAMLENGTH;
P[3]=P[2]+gamma1<=MAXBEAMLENGTH?P[2]+gamma1:MAXBEAMLENGTH;
P[0]=P[2]-gamma2>=MINBEAMLENGTH?P[2]-gamma2:MINBEAMLENGTH;
double tmplogweight;
if(scan->length[tmpid]<=P[0])
{
double delta=scan->length[tmpid]-P[0];
double w1=WEIGHT0-WEIGHT0;
double w2=WEIGHT1-WEIGHT0;
tmplogweight=w1+(w2-w1)*exp(-delta*delta/0.01);
}
else if(scan->length[tmpid]<=P[1])
{
double delta=scan->length[tmpid]-P[1];
double w1=WEIGHT1-WEIGHT0;
double w2=WEIGHT2-WEIGHT0;
tmplogweight=w1+(w2-w1)*exp(-delta*delta/0.01);
}
else if(scan->length[tmpid]<=P[3])
{
if(beta>=NEARESTRING)
{
if(beamnum!=NULL&&beamid!=NULL&&totalbeam>UNCERTAINTHRESH_CNT)
{
if((*beamnum)<MAXEDGEPOINT)
{
beamid[*beamnum]=tmpid;
(*beamnum)++;
}
}
state.count++;
}
double delta=scan->length[tmpid]-P[2];
double w1=WEIGHT2-WEIGHT0;
double w2=2*w1;
tmplogweight=w1+(w2-w1)*exp(-delta*delta/0.01);
}
else
{
double delta=scan->length[tmpid]-P[3];
double w1=WEIGHT3-WEIGHT0;
double w2=WEIGHT2-WEIGHT0;
tmplogweight=w1+(w2-w1)*exp(-delta*delta/0.01);
}
state.weight+=tmplogweight/anneal;
}
}
}
}
__host__ __device__
void deviceEgoMotion(VehicleState & state, EgoMotion & egomotion)
{
double c=cos(egomotion.dtheta);
double s=sin(egomotion.dtheta);
double tmpx=c*state.x-s*state.y+egomotion.dx;
double tmpy=s*state.x+c*state.y+egomotion.dy;
state.x=tmpx;
state.y=tmpy;
state.theta+=egomotion.dtheta;
return;
}
__host__ __device__
void deviceAckermannModel(VehicleState & state0, VehicleState & state1, EgoMotion & egomotion)
{
state1=state0;
if(state1.v==0)
{
deviceEgoMotion(state1,egomotion);
return;
}
double c=cos(state1.theta);
double s=sin(state1.theta);
if(state1.k==0)
{
state1.x=state1.x+c*state1.v*egomotion.dt/1000;
state1.y=state1.y+s*state1.v*egomotion.dt/1000;
state1.a=0;
deviceEgoMotion(state1,egomotion);
return;
}
double c0=cos(state1.theta+state1.a);
double s0=sin(state1.theta+state1.a);
state1.omega=state1.v*state1.k;
double dtheta=state1.omega*egomotion.dt/1000;
state1.theta+=dtheta;
double c1=cos(state1.theta+state1.a);
double s1=sin(state1.theta+state1.a);
double R=1/state1.k;
state1.x=state1.x+R*(-s0+s1);
state1.y=state1.y+R*(c0-c1);
deviceEgoMotion(state1,egomotion);
return;
}
//==============================================================================
__global__
void kernelSetRandomSeed(int * seed, thrust::minstd_rand * rng, int tmppnum)
{
GetThreadID_1D(id);
if(id>=tmppnum)
{
return;
}
rng[id]=thrust::minstd_rand(seed[id]);
return;
}
__global__
void kernelGeometryModel(LaserScan * scan, int pnum, VehicleState * particle, int tmppnum, VehicleState * tmpparticle, thrust::minstd_rand * rng, ObjectStateOffset objectstateoffset, StateConstrain stateconstrain, EgoMotion egomotion)
{
GetThreadID_1D(id);
if(id>=tmppnum)
{
return;
}
double index=double(pnum)/double(tmppnum);
int pid=int(id*index);
tmpparticle[id]=particle[pid];
if(objectstateoffset.thetaoff>objectstateoffset.thetaprec)
{
double thetamin=tmpparticle[id].theta-objectstateoffset.thetaoff;thetamin=thetamin>stateconstrain.thetamin?thetamin:stateconstrain.thetamin;
double thetamax=tmpparticle[id].theta+objectstateoffset.thetaoff;thetamax=thetamax<stateconstrain.thetamax?thetamax:stateconstrain.thetamax;
tmpparticle[id].theta=thrust::random::uniform_real_distribution<double>(thetamin,thetamax)(rng[id]);
}
double wlmin=tmpparticle[id].wl-objectstateoffset.wloff;wlmin=wlmin>stateconstrain.wlmin?wlmin:stateconstrain.wlmin;
double wlmax=tmpparticle[id].wl+objectstateoffset.wloff;wlmax=wlmax<stateconstrain.wlmax?wlmax:stateconstrain.wlmax;
tmpparticle[id].wl=thrust::random::uniform_real_distribution<double>(wlmin,wlmax)(rng[id]);
double wrmin=tmpparticle[id].wr-objectstateoffset.wroff;wrmin=wrmin>stateconstrain.wrmin?wrmin:stateconstrain.wrmin;
double wrmax=tmpparticle[id].wr+objectstateoffset.wroff;wrmax=wrmax<stateconstrain.wrmax?wrmax:stateconstrain.wrmax;
tmpparticle[id].wr=thrust::random::uniform_real_distribution<double>(wrmin,wrmax)(rng[id]);
double lfmin=tmpparticle[id].lf-objectstateoffset.lfoff;lfmin=lfmin>stateconstrain.lfmin?lfmin:stateconstrain.lfmin;
double lfmax=tmpparticle[id].lf+objectstateoffset.lfoff;lfmax=lfmax<stateconstrain.lfmax?lfmax:stateconstrain.lfmax;
tmpparticle[id].lf=thrust::random::uniform_real_distribution<double>(lfmin,lfmax)(rng[id]);
double lbmin=tmpparticle[id].lb-objectstateoffset.lboff;lbmin=lbmin>stateconstrain.lbmin?lbmin:stateconstrain.lbmin;
double lbmax=tmpparticle[id].lb+objectstateoffset.lboff;lbmax=lbmax<stateconstrain.lbmax?lbmax:stateconstrain.lbmax;
tmpparticle[id].lb=thrust::random::uniform_real_distribution<double>(lbmin,lbmax)(rng[id]);
deviceBuildModel(tmpparticle[id],egomotion.density);
tmpparticle[id].weight=0;
tmpparticle[id].count=0;
deviceMeasureEdge(tmpparticle[id],0,scan,objectstateoffset.anneal,NULL,NULL,0);
deviceMeasureEdge(tmpparticle[id],1,scan,objectstateoffset.anneal,NULL,NULL,0);
return;
}
__global__
void kernelMotionModel(LaserScan * scan, int pnum, VehicleState * particle, int tmppnum, VehicleState * tmpparticle, thrust::minstd_rand * rng, ObjectStateOffset objectstateoffset, StateConstrain stateconstrain, EgoMotion egomotion)
{
GetThreadID_1D(id);
if(id>=tmppnum)
{
return;
}
double index=double(pnum)/double(tmppnum);
int pid=int(id*index);
tmpparticle[id]=particle[pid];
if(egomotion.pfflag)
{
tmpparticle[id].v=thrust::random::normal_distribution<double>(tmpparticle[id].v,objectstateoffset.voff)(rng[id]);
tmpparticle[id].v=tmpparticle[id].v>stateconstrain.vmin?tmpparticle[id].v:stateconstrain.vmin;
tmpparticle[id].v=tmpparticle[id].v<stateconstrain.vmax?tmpparticle[id].v:stateconstrain.vmax;
tmpparticle[id].omega=thrust::random::normal_distribution<double>(tmpparticle[id].omega,objectstateoffset.omegaoff)(rng[id]);
tmpparticle[id].omega=tmpparticle[id].omega>stateconstrain.omegamin?tmpparticle[id].omega:stateconstrain.omegamin;
tmpparticle[id].omega=tmpparticle[id].omega<stateconstrain.omegamax?tmpparticle[id].omega:stateconstrain.omegamax;
}
else
{
double vmin=tmpparticle[id].v-objectstateoffset.voff;vmin=vmin>stateconstrain.vmin?vmin:stateconstrain.vmin;
double vmax=tmpparticle[id].v+objectstateoffset.voff;vmax=vmax<stateconstrain.vmax?vmax:stateconstrain.vmax;
tmpparticle[id].v=thrust::random::uniform_real_distribution<double>(vmin,vmax)(rng[id]);
double omegamin=tmpparticle[id].omega-objectstateoffset.omegaoff;omegamin=omegamin>stateconstrain.omegamin?omegamin:stateconstrain.omegamin;
double omegamax=tmpparticle[id].omega+objectstateoffset.omegaoff;omegamax=omegamax<stateconstrain.omegamax?omegamax:stateconstrain.omegamax;
tmpparticle[id].omega=thrust::random::uniform_real_distribution<double>(omegamin,omegamax)(rng[id]);
}
if(tmpparticle[id].v==0)
{
tmpparticle[id].k=(stateconstrain.kmin+stateconstrain.kmax)/2;
}
else
{
tmpparticle[id].k=tmpparticle[id].omega/tmpparticle[id].v;
if(tmpparticle[id].k<stateconstrain.kmin)
{
tmpparticle[id].k=stateconstrain.kmin;
}
if(tmpparticle[id].k>stateconstrain.kmax)
{
tmpparticle[id].k=stateconstrain.kmax;
}
}
tmpparticle[id].omega=tmpparticle[id].v*tmpparticle[id].k;
double R,phi;
if(tmpparticle[id].k!=0)
{
R=1/fabs(tmpparticle[id].k);
phi=atan2(4.0,R);
}
if(tmpparticle[id].omega>0)
{
stateconstrain.amin=-MAXANGLEOFFSET;
stateconstrain.amax=phi;
stateconstrain.amax=stateconstrain.amax>stateconstrain.amin?stateconstrain.amax:stateconstrain.amin;
}
else if(tmpparticle[id].omega<0)
{
stateconstrain.amax=MAXANGLEOFFSET;
stateconstrain.amin=-phi;
stateconstrain.amin=stateconstrain.amin<stateconstrain.amax?stateconstrain.amin:stateconstrain.amax;
}
else if(tmpparticle[id].omega==0)
{
stateconstrain.amin=0;
stateconstrain.amax=0;
}
if(egomotion.pfflag)
{
tmpparticle[id].a=thrust::random::normal_distribution<double>(tmpparticle[id].a,objectstateoffset.aoff)(rng[id]);
tmpparticle[id].a=tmpparticle[id].a>stateconstrain.amin?tmpparticle[id].a:stateconstrain.amin;
tmpparticle[id].a=tmpparticle[id].a<stateconstrain.amax?tmpparticle[id].a:stateconstrain.amax;
}
else
{
double amin=tmpparticle[id].a-objectstateoffset.aoff;amin=amin>stateconstrain.amin?amin:stateconstrain.amin;
double amax=tmpparticle[id].a+objectstateoffset.aoff;amax=amax<stateconstrain.amax?amax:stateconstrain.amax;
tmpparticle[id].a=thrust::random::uniform_real_distribution<double>(amin,amax)(rng[id]);
}
VehicleState movedparticle;
deviceAckermannModel(tmpparticle[id],movedparticle,egomotion);
deviceBuildModel(movedparticle,egomotion.density);
movedparticle.weight=0;
movedparticle.count=0;
deviceMeasureEdge(movedparticle,0,scan,objectstateoffset.anneal,NULL,NULL,1);
deviceMeasureEdge(movedparticle,1,scan,objectstateoffset.anneal,NULL,NULL,1);
tmpparticle[id].weight=movedparticle.weight;
tmpparticle[id].count=movedparticle.count;
return;
}
__global__
void kernelMotionUpdate(int pnum, VehicleState * particle, EgoMotion egomotion)
{
GetThreadID_1D(id);
if(id>=pnum)
{
return;
}
deviceAckermannModel(particle[id],particle[id],egomotion);
deviceBuildModel(particle[id],egomotion.density);
}
//==============================================================================
void sampleParticle(int & pnum, VehicleState * d_particle, int & tmppnum, VehicleState * d_tmpparticle, VehicleState & estimate)
{
VehicleState h_particle[RQPN];
VehicleState h_tmpparticle[MAXPN];
bool h_flag[MAXPN];
cudaMemcpy(h_tmpparticle,d_tmpparticle,sizeof(VehicleState)*tmppnum,cudaMemcpyDeviceToHost);
double maxlogweight=h_tmpparticle[0].weight;
double minlogweight=h_tmpparticle[0].weight;
for(int j=0;j<tmppnum;j++)
{
if(maxlogweight<h_tmpparticle[j].weight)
{
maxlogweight=h_tmpparticle[j].weight;
}
if(minlogweight>h_tmpparticle[j].weight)
{
minlogweight=h_tmpparticle[j].weight;
}
h_flag[j]=1;
}
double maxscale=maxlogweight<=30?1:30/maxlogweight;
double minscale=minlogweight>=-30?1:-30/minlogweight;
double scale=maxscale<minscale?maxscale:minscale;
for(int j=0;j<tmppnum;j++)
{
h_tmpparticle[j].weight=exp(h_tmpparticle[j].weight*scale);
if(j>0)
{
h_tmpparticle[j].weight+=h_tmpparticle[j-1].weight;
}
}
int planpnum=tmppnum<RQPN?tmppnum:RQPN;
double weightstep=1.0/planpnum;
int accuracy=1000000;
double samplebase=(rand()%accuracy)*weightstep/accuracy;
double weightsum=h_tmpparticle[tmppnum-1].weight;
pnum=0;
estimate.weight=0;
estimate.x=0;estimate.y=0;estimate.theta=0;
estimate.wl=0;estimate.wr=0;estimate.lf=0;estimate.lb=0;
estimate.a=0;estimate.v=0;estimate.k=0;estimate.omega=0;
estimate.count=0;
VehicleState minstate,maxstate;
for(int j=0, k=0;j<planpnum;j++)
{
double sample=samplebase+j*weightstep;
while(k<tmppnum)
{
if(sample>h_tmpparticle[k].weight/weightsum)
{
k++;
continue;
}
if(h_flag[k])
{
h_flag[k]=0;
h_particle[pnum]=h_tmpparticle[k];
h_particle[pnum].weight=weightstep;
if(pnum==0)
{
minstate.x=h_particle[pnum].x;maxstate.x=h_particle[pnum].x;
minstate.y=h_particle[pnum].y;maxstate.y=h_particle[pnum].y;
minstate.theta=h_particle[pnum].theta;maxstate.theta=h_particle[pnum].theta;
minstate.wl=h_particle[pnum].wl;maxstate.wl=h_particle[pnum].wl;
minstate.wr=h_particle[pnum].wr;maxstate.wr=h_particle[pnum].wr;
minstate.lf=h_particle[pnum].lf;maxstate.lf=h_particle[pnum].lf;
minstate.lb=h_particle[pnum].lb;maxstate.lb=h_particle[pnum].lb;
minstate.a=h_particle[pnum].a;maxstate.a=h_particle[pnum].a;
minstate.v=h_particle[pnum].v;maxstate.v=h_particle[pnum].v;
minstate.k=h_particle[pnum].k;maxstate.k=h_particle[pnum].k;
minstate.omega=h_particle[pnum].omega;maxstate.omega=h_particle[pnum].omega;
}
else
{
minstate.x=minstate.x<h_particle[pnum].x?minstate.x:h_particle[pnum].x;
maxstate.x=maxstate.x>h_particle[pnum].x?maxstate.x:h_particle[pnum].x;
minstate.y=minstate.y<h_particle[pnum].y?minstate.y:h_particle[pnum].y;
maxstate.y=maxstate.y>h_particle[pnum].y?maxstate.y:h_particle[pnum].y;
minstate.theta=minstate.theta<h_particle[pnum].theta?minstate.theta:h_particle[pnum].theta;
maxstate.theta=maxstate.theta>h_particle[pnum].theta?maxstate.theta:h_particle[pnum].theta;
minstate.wl=minstate.wl<h_particle[pnum].wl?minstate.wl:h_particle[pnum].wl;
maxstate.wl=maxstate.wl>h_particle[pnum].wl?maxstate.wl:h_particle[pnum].wl;
minstate.wr=minstate.wr<h_particle[pnum].wr?minstate.wr:h_particle[pnum].wr;
maxstate.wr=maxstate.wr>h_particle[pnum].wr?maxstate.wr:h_particle[pnum].wr;
minstate.lf=minstate.lf<h_particle[pnum].lf?minstate.lf:h_particle[pnum].lf;
maxstate.lf=maxstate.lf>h_particle[pnum].lf?maxstate.lf:h_particle[pnum].lf;
minstate.lb=minstate.lb<h_particle[pnum].lb?minstate.lb:h_particle[pnum].lb;
maxstate.lb=maxstate.lb>h_particle[pnum].lb?maxstate.lb:h_particle[pnum].lb;
minstate.a=minstate.a<h_particle[pnum].a?minstate.a:h_particle[pnum].a;
maxstate.a=maxstate.a>h_particle[pnum].a?maxstate.a:h_particle[pnum].a;
minstate.v=minstate.v<h_particle[pnum].v?minstate.v:h_particle[pnum].v;
maxstate.v=maxstate.v>h_particle[pnum].v?maxstate.v:h_particle[pnum].v;
minstate.k=minstate.k<h_particle[pnum].k?minstate.k:h_particle[pnum].k;
maxstate.k=maxstate.k>h_particle[pnum].k?maxstate.k:h_particle[pnum].k;
minstate.omega=minstate.omega<h_particle[pnum].omega?minstate.omega:h_particle[pnum].omega;
maxstate.omega=maxstate.omega>h_particle[pnum].omega?maxstate.omega:h_particle[pnum].omega;
}
pnum++;
}
else
{
h_particle[pnum-1].weight+=weightstep;
}
estimate.weight+=weightstep;
estimate.x+=h_particle[pnum-1].x*weightstep;
estimate.y+=h_particle[pnum-1].y*weightstep;
estimate.theta+=h_particle[pnum-1].theta*weightstep;
estimate.wl+=h_particle[pnum-1].wl*weightstep;
estimate.wr+=h_particle[pnum-1].wr*weightstep;
estimate.lf+=h_particle[pnum-1].lf*weightstep;
estimate.lb+=h_particle[pnum-1].lb*weightstep;
estimate.a+=h_particle[pnum-1].a*weightstep;
estimate.v+=h_particle[pnum-1].v*weightstep;
estimate.k+=h_particle[pnum-1].k*weightstep;
estimate.omega+=h_particle[pnum-1].omega*weightstep;
estimate.count+=h_particle[pnum-1].count*weightstep;
break;
}
}
estimate.x/=estimate.weight;
estimate.y/=estimate.weight;
estimate.theta/=estimate.weight;
estimate.wl/=estimate.weight;
estimate.wr/=estimate.weight;
estimate.lf/=estimate.weight;
estimate.lb/=estimate.weight;
estimate.a/=estimate.weight;
estimate.v/=estimate.weight;
estimate.k/=estimate.weight;
estimate.omega=estimate.v*estimate.k;
estimate.count/=estimate.weight;
estimate.weight/=estimate.weight;
estimate.dx=std::max(estimate.x-minstate.x,maxstate.x-estimate.x);
estimate.dy=std::max(estimate.y-minstate.y,maxstate.y-estimate.y);
estimate.dtheta=std::max(estimate.theta-minstate.theta,maxstate.theta-estimate.theta);
estimate.dwl=std::max(estimate.wl-minstate.wl,maxstate.wl-estimate.wl);
estimate.dwr=std::max(estimate.wr-minstate.wr,maxstate.wr-estimate.wr);
estimate.dlf=std::max(estimate.lf-minstate.lf,maxstate.lf-estimate.lf);
estimate.dlb=std::max(estimate.lb-minstate.lb,maxstate.lb-estimate.lb);
estimate.da=std::max(estimate.a-minstate.a,maxstate.a-estimate.a);
estimate.dv=std::max(estimate.v-minstate.v,maxstate.v-estimate.v);
estimate.dk=std::max(estimate.k-minstate.k,maxstate.k-estimate.k);
estimate.domega=std::max(estimate.omega-minstate.omega,maxstate.omega-estimate.omega);
deviceBuildModel(estimate,h_egomotion.density);
cudaMemcpy(d_particle,h_particle,sizeof(VehicleState)*pnum,cudaMemcpyHostToDevice);
return;
}
#define CALRATIO(ratio, vratio, maxratio, maxrange, minrange) \
ratio=maxrange/minrange; vratio*=ratio; maxratio=ratio>maxratio?ratio:maxratio;
#define CALZOOM(zoom, maxrange, minrange, N) \
zoom=log(maxrange/minrange)/log(2)/N;zoom=1/pow(2,zoom);
void SSPF_GeometryModel(LaserScan * scan, int & pnum, VehicleState * d_particle, VehicleState * d_tmpparticle, thrust::minstd_rand * d_rng, VehicleState & estimate, ObjectStateOffset & objectstateoffset, EgoMotion & egomotion)
{
double ratio=1,vratio=1,maxratio=1;
CALRATIO(ratio,vratio,maxratio,objectstateoffset.thetaoff,objectstateoffset.thetaprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.wloff,objectstateoffset.wlprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.wroff,objectstateoffset.wrprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.lfoff,objectstateoffset.lfprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.lboff,objectstateoffset.lbprec);
objectstateoffset.anneal=maxratio*maxratio;
double N=log(vratio)/log(2);
CALZOOM(objectstateoffset.thetazoom,objectstateoffset.thetaoff,objectstateoffset.thetaprec,N);
CALZOOM(objectstateoffset.wlzoom,objectstateoffset.wloff,objectstateoffset.wlprec,N);
CALZOOM(objectstateoffset.wrzoom,objectstateoffset.wroff,objectstateoffset.wrprec,N);
CALZOOM(objectstateoffset.lfzoom,objectstateoffset.lfoff,objectstateoffset.lfprec,N);
CALZOOM(objectstateoffset.lbzoom,objectstateoffset.lboff,objectstateoffset.lbprec,N);
objectstateoffset.annealratio=pow(objectstateoffset.anneal,-1/N);
StateConstrain stateconstrain;
stateconstrain.thetamin=estimate.theta-objectstateoffset.thetaoff;
stateconstrain.thetamax=estimate.theta+objectstateoffset.thetaoff;
int tmppnum;
for(int i=1;i<=N;i++)
{
tmppnum=pnum*SPN;
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelGeometryModel<<<blocknum,threadnum>>>(scan,pnum,d_particle,tmppnum,d_tmpparticle,d_rng,objectstateoffset,stateconstrain,egomotion);
sampleParticle(pnum,d_particle,tmppnum,d_tmpparticle,estimate);
objectstateoffset.thetaoff*=objectstateoffset.thetazoom;
objectstateoffset.wloff*=objectstateoffset.wlzoom;
objectstateoffset.wroff*=objectstateoffset.wrzoom;
objectstateoffset.lfoff*=objectstateoffset.lfzoom;
objectstateoffset.lboff*=objectstateoffset.lbzoom;
objectstateoffset.anneal*=objectstateoffset.annealratio;
}
{
objectstateoffset.thetaoff=objectstateoffset.thetaprec;
objectstateoffset.wloff=objectstateoffset.wlprec;
objectstateoffset.wroff=objectstateoffset.wrprec;
objectstateoffset.lfoff=objectstateoffset.lfprec;
objectstateoffset.lboff=objectstateoffset.lbprec;
objectstateoffset.anneal=1;
tmppnum=pnum*SPN;
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelGeometryModel<<<blocknum,threadnum>>>(scan,pnum,d_particle,tmppnum,d_tmpparticle,d_rng,objectstateoffset,stateconstrain,egomotion);
sampleParticle(pnum,d_particle,tmppnum,d_tmpparticle,estimate);
}
}
void SSPF_MotionModel(LaserScan * scan, int & pnum, VehicleState * d_particle, VehicleState * d_tmpparticle, thrust::minstd_rand * d_rng, VehicleState & estimate, ObjectStateOffset & objectstateoffset, EgoMotion & egomotion)
{
double ratio=1,vratio=1,maxratio=1;
CALRATIO(ratio,vratio,maxratio,objectstateoffset.aoff,objectstateoffset.aprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.voff,objectstateoffset.vprec);
CALRATIO(ratio,vratio,maxratio,objectstateoffset.omegaoff,objectstateoffset.omegaprec);
objectstateoffset.anneal=maxratio*maxratio;
double N=log(vratio)/log(2);
CALZOOM(objectstateoffset.azoom,objectstateoffset.aoff,objectstateoffset.aprec,N);
CALZOOM(objectstateoffset.vzoom,objectstateoffset.voff,objectstateoffset.vprec,N);
CALZOOM(objectstateoffset.omegazoom,objectstateoffset.omegaoff,objectstateoffset.omegaprec,N);
objectstateoffset.annealratio=pow(objectstateoffset.anneal,-1/N);
StateConstrain stateconstrain;
if(!(egomotion.pfflag))
{
stateconstrain.amin=std::max(stateconstrain.amin,estimate.a-objectstateoffset.aoff);
stateconstrain.amax=std::min(stateconstrain.amax,estimate.a+objectstateoffset.aoff);
stateconstrain.vmin=std::max(stateconstrain.vmin,estimate.v-objectstateoffset.voff);
stateconstrain.vmax=std::min(stateconstrain.vmax,estimate.v+objectstateoffset.voff);
stateconstrain.kmin=std::max(stateconstrain.kmin,estimate.k-objectstateoffset.koff);
stateconstrain.kmax=std::min(stateconstrain.kmax,estimate.k+objectstateoffset.koff);
stateconstrain.omegamin=std::max(stateconstrain.omegamin,estimate.omega-objectstateoffset.omegaoff);
stateconstrain.omegamax=std::min(stateconstrain.omegamax,estimate.omega+objectstateoffset.omegaoff);
}
int tmppnum;
for(int i=1;i<=N&&!(egomotion.pfflag);i++)
{
tmppnum=pnum*SPN;
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelMotionModel<<<blocknum,threadnum>>>(scan,pnum,d_particle,tmppnum,d_tmpparticle,d_rng,objectstateoffset,stateconstrain,egomotion);
sampleParticle(pnum,d_particle,tmppnum,d_tmpparticle,estimate);
objectstateoffset.aoff*=objectstateoffset.azoom;
objectstateoffset.voff*=objectstateoffset.vzoom;
objectstateoffset.omegaoff*=objectstateoffset.omegazoom;
objectstateoffset.anneal*=objectstateoffset.annealratio;
}
{
if(!(egomotion.pfflag))
{
objectstateoffset.aoff=objectstateoffset.aprec;
objectstateoffset.voff=objectstateoffset.vprec;
objectstateoffset.omegaoff=objectstateoffset.omegaprec;
objectstateoffset.anneal=1;
tmppnum=pnum*SPN;
}
else
{
objectstateoffset.anneal=1;
tmppnum=MAXPN;
}
GetKernelDim_1D(blocknum,threadnum,tmppnum);
kernelMotionModel<<<blocknum,threadnum>>>(scan,pnum,d_particle,tmppnum,d_tmpparticle,d_rng,objectstateoffset,stateconstrain,egomotion);
kernelMotionUpdate<<<blocknum,threadnum>>>(tmppnum,d_tmpparticle,egomotion);
sampleParticle(pnum,d_particle,tmppnum,d_tmpparticle,estimate);
}
}
//==============================================================================
extern "C" void cuda_InitLaserScan()
{
if(d_scan==NULL)
{
cudaMalloc(&(d_scan),sizeof(LaserScan));
}
}
extern "C" void cuda_SetLaserScan(LaserScan & laserScan)
{
cudaMemcpy(d_scan,&laserScan,sizeof(LaserScan),cudaMemcpyHostToDevice);
h_scan=laserScan;
if(h_egomotion.validflag)
{
double tmpdx=h_egomotion.x-laserScan.x;
double tmpdy=h_egomotion.y-laserScan.y;
double c=cos(laserScan.theta);
double s=sin(laserScan.theta);
h_egomotion.dx=c*tmpdx+s*tmpdy;
h_egomotion.dy=-s*tmpdx+c*tmpdy;
h_egomotion.dtheta=h_egomotion.theta-laserScan.theta;
h_egomotion.dt=laserScan.timestamp-h_egomotion.timestamp;
}
h_egomotion.x=laserScan.x;
h_egomotion.y=laserScan.y;
h_egomotion.theta=laserScan.theta;
h_egomotion.timestamp=laserScan.timestamp;
h_egomotion.validflag=1;
h_egomotion.density=2*PI/laserScan.beamnum;
}
extern "C" void cuda_FreeLaserScan()
{
CUDAFREE(d_scan);
}
//==============================================================================
extern "C" void cuda_OpenTracker(TrackerDataContainer & trackerDataContainer)
{
trackerDataContainer.pnum=0;
cudaMalloc(&(trackerDataContainer.d_particle),sizeof(VehicleState)*RQPN);
cudaMalloc(&(trackerDataContainer.d_tmpparticle),sizeof(VehicleState)*MAXPN);
cudaMalloc(&(trackerDataContainer.d_rng),sizeof(thrust::minstd_rand)*MAXPN);
int h_seed[MAXPN];
thrust::generate(h_seed,h_seed+MAXPN,rand);
int * d_seed;
cudaMalloc(&(d_seed),sizeof(int)*MAXPN);
cudaMemcpy(d_seed,h_seed,sizeof(int)*MAXPN,cudaMemcpyHostToDevice);
GetKernelDim_1D(blocks,threads,MAXPN);
kernelSetRandomSeed<<<blocks,threads>>>(d_seed,trackerDataContainer.d_rng,MAXPN);
CUDAFREE(d_seed);
}
extern "C" void cuda_CloseTracker(TrackerDataContainer & trackerDataContainer)
{
CUDAFREE(trackerDataContainer.d_particle);
CUDAFREE(trackerDataContainer.d_tmpparticle);
CUDAFREE(trackerDataContainer.d_rng);
}
//==============================================================================
extern "C" void cuda_InitGeometry(TrackerDataContainer & trackerDataContainer, TrackerResultContainer & trackerResultContainer)
{
ObjectStateOffset objectstateoffset;
EgoMotion egomotion=h_egomotion;
egomotion.pfflag=0;
trackerDataContainer.pnum=1;
cudaMemcpy(trackerDataContainer.d_particle,&(trackerResultContainer.estimate),sizeof(VehicleState),cudaMemcpyHostToDevice);
SSPF_GeometryModel(d_scan,trackerDataContainer.pnum,trackerDataContainer.d_particle,trackerDataContainer.d_tmpparticle,trackerDataContainer.d_rng,trackerResultContainer.estimate,objectstateoffset,egomotion);
trackerResultContainer.estimate.dwl=trackerResultContainer.estimate.dwl>MINSIGMA?trackerResultContainer.estimate.dwl:MINSIGMA;
trackerResultContainer.estimate.dwr=trackerResultContainer.estimate.dwr>MINSIGMA?trackerResultContainer.estimate.dwr:MINSIGMA;
trackerResultContainer.estimate.dlf=trackerResultContainer.estimate.dlf>MINSIGMA?trackerResultContainer.estimate.dlf:MINSIGMA;
trackerResultContainer.estimate.dlb=trackerResultContainer.estimate.dlb>MINSIGMA?trackerResultContainer.estimate.dlb:MINSIGMA;
trackerResultContainer.estimate.dwl=trackerResultContainer.estimate.dwl<UNCERTAINTHRESH?trackerResultContainer.estimate.dwl:MAXSIGMA;
trackerResultContainer.estimate.dwr=trackerResultContainer.estimate.dwr<UNCERTAINTHRESH?trackerResultContainer.estimate.dwr:MAXSIGMA;
trackerResultContainer.estimate.dlf=trackerResultContainer.estimate.dlf<UNCERTAINTHRESH?trackerResultContainer.estimate.dlf:MAXSIGMA;
trackerResultContainer.estimate.dlb=trackerResultContainer.estimate.dlb<UNCERTAINTHRESH?trackerResultContainer.estimate.dlb:MAXSIGMA;
deviceBuildModel(trackerResultContainer.estimate,egomotion.density);
trackerResultContainer.estimate.weight=0;
trackerResultContainer.estimate.count=0;
trackerResultContainer.edgepointnum[0]=0;
deviceMeasureEdge(trackerResultContainer.estimate,0,&h_scan,1,&(trackerResultContainer.edgepointnum[0]),trackerResultContainer.edgepointid[0],1);
trackerResultContainer.edgepointnum[1]=0;
deviceMeasureEdge(trackerResultContainer.estimate,1,&h_scan,1,&(trackerResultContainer.edgepointnum[1]),trackerResultContainer.edgepointid[1],1);
}
extern "C" void cuda_InitMotion(TrackerDataContainer & trackerDataContainer, TrackerResultContainer & trackerResultContainer)
{
VehicleState preestimate=trackerResultContainer.estimate;
VehicleState curestimate=preestimate;
ObjectStateOffset objectstateoffset;
objectstateoffset.thetaoff=objectstateoffset.thetaprec;
if(preestimate.dwl<objectstateoffset.wlprec)
{
objectstateoffset.wloff=objectstateoffset.wlprec;
}
if(preestimate.dwr<objectstateoffset.wrprec)
{
objectstateoffset.wroff=objectstateoffset.wrprec;
}
if(preestimate.dlf<objectstateoffset.lfprec)
{
objectstateoffset.lfoff=objectstateoffset.lfprec;
}
if(preestimate.dlb<objectstateoffset.lbprec)
{
objectstateoffset.lboff=objectstateoffset.lbprec;
}
EgoMotion egomotion=h_egomotion;
egomotion.pfflag=0;
trackerDataContainer.pnum=1;
cudaMemcpy(trackerDataContainer.d_particle,&(preestimate),sizeof(VehicleState),cudaMemcpyHostToDevice);
SSPF_MotionModel(d_scan,trackerDataContainer.pnum,trackerDataContainer.d_particle,trackerDataContainer.d_tmpparticle,trackerDataContainer.d_rng,curestimate,objectstateoffset,egomotion);
double dx=curestimate.dx;
double dy=curestimate.dy;
double dtheta=curestimate.dtheta;
trackerDataContainer.pnum=1;
cudaMemcpy(trackerDataContainer.d_particle,&(curestimate),sizeof(VehicleState),cudaMemcpyHostToDevice);
SSPF_GeometryModel(d_scan,trackerDataContainer.pnum,trackerDataContainer.d_particle,trackerDataContainer.d_tmpparticle,trackerDataContainer.d_rng,curestimate,objectstateoffset,egomotion);
trackerResultContainer.estimate=curestimate;
curestimate.dwl=curestimate.dwl>MINSIGMA?curestimate.dwl:MINSIGMA;
curestimate.dwr=curestimate.dwr>MINSIGMA?curestimate.dwr:MINSIGMA;
curestimate.dlf=curestimate.dlf>MINSIGMA?curestimate.dlf:MINSIGMA;
curestimate.dlb=curestimate.dlb>MINSIGMA?curestimate.dlb:MINSIGMA;
curestimate.dwl=curestimate.dwl<UNCERTAINTHRESH?curestimate.dwl:MAXSIGMA;
curestimate.dwr=curestimate.dwr<UNCERTAINTHRESH?curestimate.dwr:MAXSIGMA;
curestimate.dlf=curestimate.dlf<UNCERTAINTHRESH?curestimate.dlf:MAXSIGMA;
curestimate.dlb=curestimate.dlb<UNCERTAINTHRESH?curestimate.dlb:MAXSIGMA;
trackerResultContainer.estimate.dx=dx;trackerResultContainer.estimate.dy=dy;trackerResultContainer.estimate.dtheta=dtheta;
trackerResultContainer.estimate.wl=(preestimate.wl*curestimate.dwl*curestimate.dwl+curestimate.wl*preestimate.dwl*preestimate.dwl)/(preestimate.dwl*preestimate.dwl+curestimate.dwl*curestimate.dwl);
trackerResultContainer.estimate.dwl=sqrt((preestimate.dwl*preestimate.dwl*curestimate.dwl*curestimate.dwl)/(preestimate.dwl*preestimate.dwl+curestimate.dwl*curestimate.dwl));
trackerResultContainer.estimate.dwl=trackerResultContainer.estimate.dwl>MINSIGMA?trackerResultContainer.estimate.dwl:MINSIGMA;
trackerResultContainer.estimate.wr=(preestimate.wr*curestimate.dwr*curestimate.dwr+curestimate.wr*preestimate.dwr*preestimate.dwr)/(preestimate.dwr*preestimate.dwr+curestimate.dwr*curestimate.dwr);
trackerResultContainer.estimate.dwr=sqrt((preestimate.dwr*preestimate.dwr*curestimate.dwr*curestimate.dwr)/(preestimate.dwr*preestimate.dwr+curestimate.dwr*curestimate.dwr));
trackerResultContainer.estimate.dwr=trackerResultContainer.estimate.dwr>MINSIGMA?trackerResultContainer.estimate.dwr:MINSIGMA;
trackerResultContainer.estimate.lf=(preestimate.lf*curestimate.dlf*curestimate.dlf+curestimate.lf*preestimate.dlf*preestimate.dlf)/(preestimate.dlf*preestimate.dlf+curestimate.dlf*curestimate.dlf);
trackerResultContainer.estimate.dlf=sqrt((preestimate.dlf*preestimate.dlf*curestimate.dlf*curestimate.dlf)/(preestimate.dlf*preestimate.dlf+curestimate.dlf*curestimate.dlf));
trackerResultContainer.estimate.dlf=trackerResultContainer.estimate.dlf>MINSIGMA?trackerResultContainer.estimate.dlf:MINSIGMA;
trackerResultContainer.estimate.lb=(preestimate.lb*curestimate.dlb*curestimate.dlb+curestimate.lb*preestimate.dlb*preestimate.dlb)/(preestimate.dlb*preestimate.dlb+curestimate.dlb*curestimate.dlb);
trackerResultContainer.estimate.dlb=sqrt((preestimate.dlb*preestimate.dlb*curestimate.dlb*curestimate.dlb)/(preestimate.dlb*preestimate.dlb+curestimate.dlb*curestimate.dlb));
trackerResultContainer.estimate.dlb=trackerResultContainer.estimate.dlb>MINSIGMA?trackerResultContainer.estimate.dlb:MINSIGMA;
deviceBuildModel(trackerResultContainer.estimate,egomotion.density);
trackerResultContainer.estimate.weight=0;
trackerResultContainer.estimate.count=0;
trackerResultContainer.edgepointnum[0]=0;
deviceMeasureEdge(trackerResultContainer.estimate,0,&h_scan,1,&(trackerResultContainer.edgepointnum[0]),trackerResultContainer.edgepointid[0],1);
trackerResultContainer.edgepointnum[1]=0;
deviceMeasureEdge(trackerResultContainer.estimate,1,&h_scan,1,&(trackerResultContainer.edgepointnum[1]),trackerResultContainer.edgepointid[1],1);
}
extern "C" bool cuda_UpdateTracker(TrackerDataContainer & trackerDataContainer, TrackerResultContainer & trackerResultContainer)
{
VehicleState preestimate=trackerResultContainer.estimate;
VehicleState curestimate=preestimate;
ObjectStateOffset objectstateoffset;
objectstateoffset.thetaoff=objectstateoffset.thetaprec;
if(preestimate.dwl<objectstateoffset.wlprec)
{
objectstateoffset.wloff=objectstateoffset.wlprec;
}
if(preestimate.dwr<objectstateoffset.wrprec)
{
objectstateoffset.wroff=objectstateoffset.wrprec;
}
if(preestimate.dlf<objectstateoffset.lfprec)
{
objectstateoffset.lfoff=objectstateoffset.lfprec;
}
if(preestimate.dlb<objectstateoffset.lbprec)
{
objectstateoffset.lboff=objectstateoffset.lbprec;
}
EgoMotion egomotion=h_egomotion;
if(preestimate.dx<=2*UNCERTAINTHRESH&&preestimate.dy<=2*UNCERTAINTHRESH&&preestimate.dtheta<=UNCERTAINTHRESH_ANG&&preestimate.count>=UNCERTAINTHRESH_CNT)
{
objectstateoffset.aoff=DEG2RAD(30);
objectstateoffset.voff=10;
objectstateoffset.koff=0.5;
objectstateoffset.omegaoff=DEG2RAD(60);
egomotion.pfflag=0;
trackerDataContainer.pnum=1;
cudaMemcpy(trackerDataContainer.d_particle,&(preestimate),sizeof(VehicleState),cudaMemcpyHostToDevice);
}
else
{
objectstateoffset.aoff=DEG2RAD(5);
objectstateoffset.voff=2;
objectstateoffset.koff=0.05;
objectstateoffset.omegaoff=DEG2RAD(3);
egomotion.pfflag=1;
}
SSPF_MotionModel(d_scan,trackerDataContainer.pnum,trackerDataContainer.d_particle,trackerDataContainer.d_tmpparticle,trackerDataContainer.d_rng,curestimate,objectstateoffset,egomotion);
if(curestimate.count>=10||curestimate.dx<=2*UNCERTAINTHRESH&&curestimate.dy<=2*UNCERTAINTHRESH&&curestimate.dtheta<=UNCERTAINTHRESH_ANG&&curestimate.count>=UNCERTAINTHRESH_CNT)
{
double dx=curestimate.dx;
double dy=curestimate.dy;
double dtheta=curestimate.dtheta;
trackerDataContainer.pnum=1;
cudaMemcpy(trackerDataContainer.d_particle,&(curestimate),sizeof(VehicleState),cudaMemcpyHostToDevice);
SSPF_GeometryModel(d_scan,trackerDataContainer.pnum,trackerDataContainer.d_particle,trackerDataContainer.d_tmpparticle,trackerDataContainer.d_rng,curestimate,objectstateoffset,egomotion);
trackerResultContainer.estimate=curestimate;
curestimate.dwl=curestimate.dwl>MINSIGMA?curestimate.dwl:MINSIGMA;
curestimate.dwr=curestimate.dwr>MINSIGMA?curestimate.dwr:MINSIGMA;
curestimate.dlf=curestimate.dlf>MINSIGMA?curestimate.dlf:MINSIGMA;
curestimate.dlb=curestimate.dlb>MINSIGMA?curestimate.dlb:MINSIGMA;
curestimate.dwl=curestimate.dwl<UNCERTAINTHRESH?curestimate.dwl:MAXSIGMA;
curestimate.dwr=curestimate.dwr<UNCERTAINTHRESH?curestimate.dwr:MAXSIGMA;
curestimate.dlf=curestimate.dlf<UNCERTAINTHRESH?curestimate.dlf:MAXSIGMA;
curestimate.dlb=curestimate.dlb<UNCERTAINTHRESH?curestimate.dlb:MAXSIGMA;
trackerResultContainer.estimate.dx=dx;trackerResultContainer.estimate.dy=dy;trackerResultContainer.estimate.dtheta=dtheta;
trackerResultContainer.estimate.wl=(preestimate.wl*curestimate.dwl*curestimate.dwl+curestimate.wl*preestimate.dwl*preestimate.dwl)/(preestimate.dwl*preestimate.dwl+curestimate.dwl*curestimate.dwl);
trackerResultContainer.estimate.dwl=sqrt((preestimate.dwl*preestimate.dwl*curestimate.dwl*curestimate.dwl)/(preestimate.dwl*preestimate.dwl+curestimate.dwl*curestimate.dwl));
trackerResultContainer.estimate.dwl=trackerResultContainer.estimate.dwl>MINSIGMA?trackerResultContainer.estimate.dwl:MINSIGMA;
trackerResultContainer.estimate.wr=(preestimate.wr*curestimate.dwr*curestimate.dwr+curestimate.wr*preestimate.dwr*preestimate.dwr)/(preestimate.dwr*preestimate.dwr+curestimate.dwr*curestimate.dwr);
trackerResultContainer.estimate.dwr=sqrt((preestimate.dwr*preestimate.dwr*curestimate.dwr*curestimate.dwr)/(preestimate.dwr*preestimate.dwr+curestimate.dwr*curestimate.dwr));
trackerResultContainer.estimate.dwr=trackerResultContainer.estimate.dwr>MINSIGMA?trackerResultContainer.estimate.dwr:MINSIGMA;
trackerResultContainer.estimate.lf=(preestimate.lf*curestimate.dlf*curestimate.dlf+curestimate.lf*preestimate.dlf*preestimate.dlf)/(preestimate.dlf*preestimate.dlf+curestimate.dlf*curestimate.dlf);
trackerResultContainer.estimate.dlf=sqrt((preestimate.dlf*preestimate.dlf*curestimate.dlf*curestimate.dlf)/(preestimate.dlf*preestimate.dlf+curestimate.dlf*curestimate.dlf));
trackerResultContainer.estimate.dlf=trackerResultContainer.estimate.dlf>MINSIGMA?trackerResultContainer.estimate.dlf:MINSIGMA;
trackerResultContainer.estimate.lb=(preestimate.lb*curestimate.dlb*curestimate.dlb+curestimate.lb*preestimate.dlb*preestimate.dlb)/(preestimate.dlb*preestimate.dlb+curestimate.dlb*curestimate.dlb);
trackerResultContainer.estimate.dlb=sqrt((preestimate.dlb*preestimate.dlb*curestimate.dlb*curestimate.dlb)/(preestimate.dlb*preestimate.dlb+curestimate.dlb*curestimate.dlb));
trackerResultContainer.estimate.dlb=trackerResultContainer.estimate.dlb>MINSIGMA?trackerResultContainer.estimate.dlb:MINSIGMA;
}
else
{
trackerResultContainer.estimate=curestimate;
trackerResultContainer.estimate.wl=preestimate.wl;trackerResultContainer.estimate.dwl=preestimate.dwl;
trackerResultContainer.estimate.wr=preestimate.wr;trackerResultContainer.estimate.dwr=preestimate.dwr;
trackerResultContainer.estimate.lf=preestimate.lf;trackerResultContainer.estimate.dlf=preestimate.dlf;
trackerResultContainer.estimate.lb=preestimate.lb;trackerResultContainer.estimate.dlb=preestimate.dlb;
}
deviceBuildModel(trackerResultContainer.estimate,egomotion.density);
trackerResultContainer.estimate.weight=0;
trackerResultContainer.estimate.count=0;
trackerResultContainer.edgepointnum[0]=0;
deviceMeasureEdge(trackerResultContainer.estimate,0,&h_scan,1,&(trackerResultContainer.edgepointnum[0]),trackerResultContainer.edgepointid[0],1);
trackerResultContainer.edgepointnum[1]=0;
deviceMeasureEdge(trackerResultContainer.estimate,1,&h_scan,1,&(trackerResultContainer.edgepointnum[1]),trackerResultContainer.edgepointid[1],1);
return egomotion.pfflag;
}
//==============================================================================
|
708
|
#include "includes.h"
__global__ void normalize_cdf( unsigned int* d_input_cdf, float* d_output_cdf, int n )
{
const float normalization_constant = 1.f / d_input_cdf[n - 1];
int global_index_1d = ( blockIdx.x * blockDim.x ) + threadIdx.x;
if ( global_index_1d < n )
{
unsigned int input_value = d_input_cdf[ global_index_1d ];
float output_value = input_value * normalization_constant;
d_output_cdf[ global_index_1d ] = output_value;
}
}
|
709
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
__global__ void findSubstr(char *str, char *substr, int *len)
{
int subStrLen = *len;
int k = threadIdx.x;
for (int j = 0, i = k; j < subStrLen; j++, i++)
{
if(str[i] != substr[j])
{
return;
}
}
printf("Substring present at index %i\n", k);
}
int main()
{
cudaError_t err;
char *str = (char *) calloc(BUFSIZ, sizeof(char));
printf("Enter the String\n");
scanf("%[^\n]%*c", str);
int strLen = strlen(str);
char *substr = (char *) calloc(BUFSIZ, sizeof(char));
printf("Enter the substring\n");
scanf("%[^\n]%*c", substr);
int subStrLen = strlen(substr);
char *dStr, *dSubStr;
int *dLen;
cudaMalloc(&dStr,strLen);
cudaMalloc(&dSubStr, subStrLen);
cudaMalloc(&dLen, sizeof(int));
cudaMemcpy(dStr, str, strLen, cudaMemcpyHostToDevice);
cudaMemcpy(dSubStr, substr, subStrLen, cudaMemcpyHostToDevice);
cudaMemcpy(dLen, &subStrLen, sizeof(int), cudaMemcpyHostToDevice);
findSubstr<<<1, strLen - subStrLen + 1>>>(dStr, dSubStr, dLen);
err = cudaGetLastError();
if(err != cudaSuccess)
{
printf("The error string: %s\n", cudaGetErrorString(err));
}
cudaFree(dStr);
cudaFree(dSubStr);
cudaFree(dLen);
}
|
710
|
#include "includes.h"
/*
Copyright 2014-2015 Dake Feng, Peri LLC, dakefeng@gmail.com
This file is part of TomograPeri.
TomograPeri is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
TomograPeri is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with TomograPeri. If not, see <http://www.gnu.org/licenses/>.
*/
#define blockx 16
#define blocky 16
__global__ void _weightTRightkernel_cuda(int num_slices, int num_grid, float beta, float *dev_F, float *dev_G, float*dev_wg3, float *dev_recon)
{
int ind0, indg[3],q;
int k=blockIdx.x*blockDim.x + threadIdx.x;
if (k>=num_slices)
return;
ind0 = (num_grid-1) + k*num_grid*num_grid;
indg[0] = ind0-1;
indg[1] = ind0+num_grid;
indg[2] = ind0+num_grid-1;
for (q = 0; q < 3; q++) {
dev_F[ind0] += 2*beta*dev_wg3[q];
dev_G[ind0] -= 2*beta*dev_wg3[q]*(dev_recon[ind0]+dev_recon[indg[q]]);
}
}
|
711
|
// Date March 26 2029
//Programer: Hemanta Bhattarai
// Progarm : To add two arrays
#include<stdio.h>
#include<stdlib.h> //for random numbers
// device kernal
__global__ void vecAdd(int *A, int *B, int *C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
int main()
{
// host function definition
int get_random();
//variable definition
int *hA, *hB, *hC, *dA, *dB, *dC;
int size_of_array;
//define size of array
printf("Enter the size of array");
scanf("%d",&size_of_array);
int size = sizeof(int) * size_of_array;
//memory allocation in host
hA = (int*)malloc(size);
hB = (int*)malloc(size);
hC = (int*)malloc(size);
//memory allocation in device
cudaMalloc(&dA,size);
cudaMalloc(&dB,size);
cudaMalloc(&dC,size);
//array initilization
for(int i=0; i<size_of_array; ++i) hA[i] = get_random();
for(int i=0; i<size_of_array; ++i) hB[i] = get_random();
//copy host data to memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
cudaMemcpy(dC, hC, size, cudaMemcpyHostToDevice);
// add array in device
vecAdd<<<1,size_of_array>>>(dA,dB,dC);
//copy data from device to host
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
//display the result
for(int i=0; i< size_of_array; ++i) printf("%d + %d = %d\n", hA[i], hB[i], hC[i]);
//free host memory
free(hA);
free(hB);
free(hC);
//free device memory
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
}
//random number generator
int get_random()
{
return rand() % 100 + 1;
}
|
712
|
//#include "crop_cuda.h"
//
//#include <stdio.h>
//#include <cstdlib>
//#include <math.h>
//#include <iostream>
//
//#include "../common/macro.h"
//
//
//namespace va_cv {
//
//texture<unsigned char, 2> srcTexture2D;
//__constant__ int rect[4];
//
//
//extern "C" __global__ void kernel_crop_grey(unsigned char *dst ) {
// // map from threadIdx/BlockIdx to pixel position(on dst)
// int dst_x = threadIdx.x;
// int dst_y = blockIdx.x;
//
// if (dst_x <= rect[2] && dst_y <= rect[3]){
// int dst_ofs = dst_y * rect[2] + dst_x;
// int src_x = dst_x;
// int src_y = dst_y;
// dst[dst_ofs] = tex2D(srcTexture2D, src_x, src_y);
// }
//}
//
//
//void CropCuda::crop_cuda_grey_int8(const unsigned char *src, int src_width, int src_height,
// unsigned char *dst,
// int crop_left, int crop_top, int crop_width, int crop_height) {
// // crop rect, use const value
// int rect_vec[4] = {crop_left, crop_top, crop_width, crop_height};
// cudaMemcpyToSymbol( rect, rect_vec, sizeof(int) * 4);
// // 每個block處理一行數據,一共height個block
//// const int threadsPerBlock = crop_width;
//// const int blocksPerGrid = crop_height;
//// dim3 grids(blocksPerGrid);
//// dim3 threads(threadsPerBlock);
//
//
// int dst_size = crop_width * crop_height;
// int src_size = src_width * src_height;
// unsigned char *dev_src, *dev_dst;
// cudaMalloc( (void**)&dev_dst, dst_size * sizeof(unsigned char) ) ;
// cudaMalloc( (void**)&dev_src, src_size * sizeof(unsigned char) ) ;
// cudaMemcpy( dev_src, src, src_size * sizeof(unsigned char), cudaMemcpyHostToDevice );
//
// cudaChannelFormatDesc desc = cudaCreateChannelDesc<unsigned char>();
// cudaBindTexture2D( NULL, srcTexture2D, dev_src, desc, src_width, src_height,
// sizeof(unsigned char) * src_width );
//
// kernel_crop_grey<<<crop_height,crop_width>>>( dev_dst );
//
// cudaMemcpy(dst, dev_dst, dst_size * sizeof(unsigned char), cudaMemcpyDeviceToHost);
//
// cudaUnbindTexture( srcTexture2D );
// cudaFree(dev_dst);
// cudaFree(dev_src);
//}
//
//}
|
713
|
/* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Maryam Dehnavi, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2019 Bogdan Simion and Maryam Dehnavi
* -------------
*/
__inline__ __device__
float warpReduceSum(float val)
{
return val;
}
__inline__ __device__
float blockReduceSum(float val)
{
return val;
}
__global__ void dot_kernel8(float *g_idata1, float *g_idata2, float *g_odata,
int N)
{
}
__global__ void dot_kernel9(float *g_idata1, float *g_idata2, float *g_odata,
int N)
{
}
__global__ void dot_kernel10(float *g_idata1, float *g_idata2, float *g_odata,
int N)
{
}
|
714
|
#include "includes.h"
__global__ void kEltwiseLogregCost(float* predmap, float* indmap, float*indlogpred, float* correctprobs, int numCases, int numTasks, int per_thread_case) {
const int task_id = blockIdx.x;
const int start_tx = threadIdx.x * per_thread_case;
const int end_tx = min(start_tx + per_thread_case, numCases);
const float EPSILON=1e-20; // Minimum value allowed, avoid log( 0 )
if (task_id >= numTasks) {
return;
}
for (int c_id = start_tx; c_id < end_tx; ++c_id) {
int pos = task_id * numCases + c_id;
float t = __fdividef(1.0f, 1.0f + __expf(-predmap[ pos ]));
if (indmap[pos] == 1) {
t = fmaxf(t, EPSILON);
indlogpred[pos] = __logf(t);
correctprobs[pos] = t;
} else {
t = 1-t;
t = fmaxf(t, EPSILON);
indlogpred[pos] = __logf(t);
correctprobs[pos] = t;
}
}
}
|
715
|
__global__ void init(float* xbar, float* xcur, float* xn, float* y1, float* y2, float* img, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int i;
float val;
for (int z = 0; z < nc; z++) {
i = x + w * y + w * h * z;
val = img[i];
xbar[i] = val;
xn[i] = val;
xcur[i] = val;
y1[i] = 0.f;
y2[i] = 0.f;
}
}
}
|
716
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
inline double seconds() {
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp, &tzp);
return ((double)tp.tv_sec + (double)tp.tv_usec * 1.e-6);
}
int total_size = 1024 * 1024; // 1MB
void test(int size) {
double iStart, iElaps;
int num = total_size / size;
float *d[num];
iStart = seconds();
for (int i = 0; i < num; i++) {
cudaMalloc((float **)&d[i], size);
}
iElaps = seconds() - iStart;
printf("cudaMalloc(%d) x %d Time elapsed %f sec\n", size, num, iElaps);
iStart = seconds();
for (int i = 0; i < num; i++) {
cudaFree(d[i]);
}
iElaps = seconds() - iStart;
printf("cudaFree(%d) x %d Time elapsed %f sec\n", size, num, iElaps);
}
int main(int argc, char **argv) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
printf("Using Device %d: %s\n", dev, deviceProp.name);
cudaSetDevice(dev);
int size = atoi(argv[1]);
test(size);
return (0);
}
|
717
|
#include <stdlib.h>
#include <stdio.h>
void fill_matrix(double *mat, unsigned numRows, unsigned numCols)
{
for(unsigned i=0; i < numRows; i++)
for(unsigned j=0; j < numCols; j++)
{
mat[i*numCols + j] = i*2.1f + j*3.2f;
}
}
void print_matrix_to_file(double *mat, unsigned numRows, unsigned numCols)
{
const char *fname = "assignment2_2_out";
FILE *f = fopen(fname, "w");
for(unsigned i=0; i < numRows; i++)
{
for(unsigned j=0; j < numCols; j++)
fprintf(f,"%4.4f ", mat[i*numCols + j]);
fprintf(f,"\n");
}
fclose(f); }
__global__ void MatrixMulKernel_col_maj(double* M, double* N, double* P, int Width) {
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.y;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
__global__ void MatrixMulKernel_row_maj(double* M, double* N, double* P, int Width) {
// Calculate the row index of the P element and M
int Row = blockIdx.y*blockDim.y+threadIdx.x;
// Calculate the column index of P and N
int Col = blockIdx.x*blockDim.x+threadIdx.y;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
for (int k = 0; k < Width; ++k) {
Pvalue += M[Row*Width+k]*N[k*Width+Col];
}
P[Row*Width+Col] = Pvalue;
}
}
int main(int argc,char **argv) {
int N = 8192;
size_t size = N *N* sizeof(double);
int thread_dim_ll[8];
int thread_dim;
double*h_matA = (double*)malloc(size);
double*h_matB = (double*)malloc(size);
double*h_matC = (double*)malloc(size); // result
int loop, loop1, loop2; // loop variables
float time_spent;
fill_matrix(h_matA,N,N);
fill_matrix(h_matB,N,N);
printf("Thread dims\n");
for (loop=0;loop<8;loop++){
thread_dim_ll[loop]=pow(2,2+loop);
}
printf("\nMatrix A (first 10*10 inputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matA + N*loop1 + loop2));
}
printf("\n\nMatrix B (first 10*10 inputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matB + N*loop1 + loop2));
}
double* d_matA; cudaMalloc(&d_matA, size);
double* d_matB; cudaMalloc(&d_matB, size);
double* d_matC; cudaMalloc(&d_matC, size);
//GPU timing
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Copy vectors from host memory to device memory
cudaMemcpy(d_matA, h_matA, size,cudaMemcpyHostToDevice);
cudaMemcpy(d_matB, h_matB, size,cudaMemcpyHostToDevice);
for (loop = 0; loop < 8; loop++){
//thread dim
thread_dim=thread_dim_ll[loop];
// Invoke kernel
dim3 threadsPerBlock = (thread_dim,thread_dim);
dim3 blocksPerGrid ((N + threadsPerBlock.x - 1) /threadsPerBlock.x,(N + threadsPerBlock.y - 1) /threadsPerBlock.y);
cudaEventRecord(start, 0);
MatrixMulKernel_col_maj<<<blocksPerGrid, threadsPerBlock>>>(d_matA,d_matB, d_matC, N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time_spent, start, stop);
printf("\nTime spent in col maj %f with threadsPerBlock %d \n",time_spent,thread_dim);
}
// h_C contains the result in host memory
cudaMemcpy(h_matC, d_matC, size,cudaMemcpyDeviceToHost);
printf("\n\nMatrix C (first 10*10 outputs)\n");
for(loop1 = 0; loop1 < 10; loop1++){
for (loop2=0;loop2 < 10; loop2++)
printf("%f ", *(h_matC + N*loop1 + loop2));
}
// Log outputs
printf("\nWritting to file assignment_2_1_out as Mat C");
print_matrix_to_file(h_matC,N,N);
// Free device memory
cudaFree(d_matA);
cudaFree(d_matB);
cudaFree(d_matC);
// Free host memory
free(h_matA);
free(h_matB);
free(h_matC);
return 0;
}
|
718
|
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#define A1 0.31938153
#define A2 -0.356563782
#define A3 1.781477937
#define A4 -1.821255978
#define A5 1.330274429
#define RSQRT2PI 0.3989422804
__device__ double cndGPU(double d)
{
double
K = 1.0 / (1.0 + 0.2316419 * fabs(d));
double
cnd = RSQRT2PI * exp(- 0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if(d > 0)
cnd = 1.0 - cnd;
return cnd;
}
///////////////////////////////////////////////////////////////////////////////
// Black-Scholes formula for both call and put
///////////////////////////////////////////////////////////////////////////////
__device__ void BlackScholesBodyGPU
(
double& CallResult,
double& PutResult,
double S, //Stock price
double X, //Option strike
double T, //Option years
double R, //Riskless rate
double V //Volatility rate
)
{
double sqrtT, expRT;
double d1, d2, CNDD1, CNDD2;
sqrtT = sqrt(T);
d1 = (log(S / X) + (R + 0.5 * V * V) * T) / (V * sqrtT);
d2 = d1 - V * sqrtT;
CNDD1 = cndGPU(d1);
CNDD2 = cndGPU(d2);
//printf("%.15f,", CNDD1);
//Calculate Call and Put simultaneously
expRT = exp(- R * T);
CallResult = S * CNDD1 - X * expRT * CNDD2;
PutResult = X * expRT * (1.0 - CNDD2) - S * (1.0 - CNDD1);
}
////////////////////////////////////////////////////////////////////////////////
//Process an array of optN options on GPU
////////////////////////////////////////////////////////////////////////////////
__global__ void BlackScholesGPU(
double *d_CallResult,
double *d_PutResult,
double *d_StockPrice,
double *d_OptionStrike,
double *d_OptionYears,
double Riskfree,
double Volatility,
int optN
)
{
//Thread index
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
//Total number of threads in execution grid
const int THREAD_N = blockDim.x * gridDim.x;
//No matter how small is execution grid or how large OptN is,
//exactly OptN indices will be processed with perfect memory coalescing
for(int opt = tid; opt < optN; opt += THREAD_N)
BlackScholesBodyGPU(
d_CallResult[opt],
d_PutResult[opt],
d_StockPrice[opt],
d_OptionStrike[opt],
d_OptionYears[opt],
Riskfree,
Volatility
);
}
double RandDouble(double low, double high){
double t = (double)rand() / (double)RAND_MAX;
return (1.0 - t) * low + t * high;
}
#define OPT_N 400000
const int NUM_ITERATIONS = 512;
const int OPT_SZ = OPT_N * sizeof(double);
const double RISKFREE = 0.02;
const double VOLATILITY = 0.30;
int main()
{
double * h_CallResultGPU = (double *)malloc(OPT_SZ);
double * h_PutResultGPU = (double *)malloc(OPT_SZ);
double * h_StockPrice = (double *)malloc(OPT_SZ);
double * h_OptionStrike = (double *)malloc(OPT_SZ);
double * h_OptionYears = (double *)malloc(OPT_SZ);
double
//Results calculated by GPU
*d_CallResult,
*d_PutResult,
//GPU instance of input data
*d_StockPrice,
*d_OptionStrike,
*d_OptionYears;
cudaMalloc((void **)&d_CallResult, OPT_SZ);
cudaMalloc((void **)&d_PutResult, OPT_SZ);
cudaMalloc((void **)&d_StockPrice, OPT_SZ);
cudaMalloc((void **)&d_OptionStrike, OPT_SZ);
cudaMalloc((void **)&d_OptionYears, OPT_SZ);
srand(5347);
//Generate options set
int i;
for(i = 0; i < OPT_N; i++)
{
h_CallResultGPU[i] = 0.0;
h_PutResultGPU[i] = -1.0;
h_StockPrice[i] = RandDouble(5.0, 30.0);
h_OptionStrike[i] = RandDouble(1.0, 100.0);
h_OptionYears[i] = RandDouble(0.25, 10.0);
}
cudaMemcpy(d_StockPrice, h_StockPrice, OPT_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionStrike, h_OptionStrike, OPT_SZ, cudaMemcpyHostToDevice);
cudaMemcpy(d_OptionYears, h_OptionYears, OPT_SZ, cudaMemcpyHostToDevice);
for(i = 0; i < NUM_ITERATIONS; i++){
BlackScholesGPU<<<256, 128>>>(
d_CallResult,
d_PutResult,
d_OptionStrike,
d_StockPrice,
d_OptionYears,
RISKFREE,
VOLATILITY,
OPT_N
);
}
cudaMemcpy(h_CallResultGPU, d_CallResult, OPT_SZ, cudaMemcpyDeviceToHost);
cudaMemcpy(h_PutResultGPU, d_PutResult, OPT_SZ, cudaMemcpyDeviceToHost);
//for(i = 0; i < OPT_N; i++)
// printf("%.15f,", h_CallResultGPU[i]);
return 0;
}
|
719
|
/* File: vec_add.cu
* Purpose: Implement vector addition on a gpu using cuda
*
* Compile: nvcc [-g] [-G] -o vec_add vec_add.cu
* Run: ./vec_add
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
__global__ void Vec_add(unsigned int x[], unsigned int y[], unsigned int z[], int n) {
int thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if (thread_id < n){
z[thread_id] = x[thread_id] + y[thread_id];
}
}
int main(int argc, char* argv[]) {
int n, m;
unsigned int *h_x, *h_y, *h_z;
unsigned int *d_x, *d_y, *d_z;
size_t size;
/* Define vector length */
n = 2621440;
m = 320;
size = m * n * sizeof(unsigned int);
// Allocate memory for the vectors on host memory.
h_x = (unsigned int*) malloc(size);
h_y = (unsigned int*) malloc(size);
h_z = (unsigned int*) malloc(size);
for (int i = 0; i < n * m; i++) {
h_x[i] = i+1;
h_y[i] = n-i;
}
printf("Input size = %d\n", n * m);
// Print original vectors.
/*printf("h_x = ");
for (int i = 0; i < m; i++){
printf("%u ", h_x[i]);
}
printf("\n\n");
printf("h_y = ");
for (int i = 0; i < m; i++){
printf("%u ", h_y[i]);
}
printf("\n\n");*/
// Event creation
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float time1 = 0;
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
// Start timer
cudaEventRecord( start, 0 );
/* Kernel Call */
Vec_add<<<(n * m) / 256, 256>>>(d_x, d_y, d_z, n * m);
// End timer
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &time1, start, stop );
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
/*printf("The sum is: \n");
for (int i = 0; i < m; i++){
printf("%u ", h_z[i]);
}
printf("\n");*/
printf("Execution time = %f ms\n", time1);
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
return 0;
} /* main */
|
720
|
#include "includes.h"
__global__ void expandKernel(double* values, int n_original, int factor, double* expanded){
int tid0 = threadIdx.x + blockIdx.x*blockDim.x ;
int stride = blockDim.x*gridDim.x ;
for ( int tid = tid0 ; tid < n_original*factor ; tid += stride){
int idx = floor(double(tid)/factor) ;
expanded[tid] = values[idx] ;
}
}
|
721
|
#include<stdio.h>
__global__ void vecadd(float *a, float *b, float *c, int n)
{
int i= threadIdx.x + blockDim.x*blockIdx.x;
if(i<n)
c[i] = a[i]+b[i];
}
int main(){
int n;
scanf("%d",&n);
int a[n],b[n];
for(int i=0; i<n; i++)
scanf("%d",&a[i]);
for(int i=0;i<n; i++)
scanf("%d",&b[i]);
int c[n];
float *da,*db,*dc;
int size = n*sizeof(float);
cudaMalloc((void **) &da,size);
cudaMalloc((void **) &db,size);
cudaMalloc((void **) &dc,size);
cudaMemcpy(da,a,sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(db,b,sizeof(int),cudaMemcpyHostToDevice);
// cudaMemcpy(dc,c,sizeof(int),cudaMemcpyHostToDevice);
// cudaMemcpy(n,n,sizeof(int),cudaMemcpyHostToDevice);
vecadd<<<ceil(n/32.0),15>>>(da,db,dc,n);
cudaMemcpy(c,dc,sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0; i<n; i++)
printf("%d ",c[i]);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return 0;
}
|
722
|
#include "includes.h"
__global__ void fill(int * v, std::size_t size)
{
auto id = blockIdx.x * blockDim.x + threadIdx.x;
if( id < size)
{
v [ id ] = id;
}
}
|
723
|
#include "includes.h"
/*
CUDA MATRIX NORMALIZATION
MOHAMMED ARBAAZ SHAREEF
A2077541
ASSIGNMENT-4
INTRODUCTION TO PARALLEL AND DISTRIBUTED COMPUTING
*/
//Incuding all the required libraries
/* Program Parameters */
#define MAXN 8000 /* Max value of N */
int N; /* Matrix size */
/* Matrices */
float A[MAXN*MAXN], B[MAXN*MAXN];
//Initialize the number of threads per blocks and number of blocks as 32 and 64 initially.
int numBlocks = 32;
int numThreadsPerBlock = 64;
/* junk */
#define randm() 4|2[uid]&3
/* returns a seed for srand based on the time */
__global__ void normCalc (float *d_A, float *d_B, int n) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
__shared__ int row, mu, sigma;
if (col < n){
mu = (float)0.0;
for (row=0; row < n; row++)
mu += d_A[col*n+row];
mu /= (float) n;
__syncthreads();
sigma = (float)0.0;
for (row=0; row < n; row++)
sigma += powf(d_A[col*n+row] - mu, (float)2.0);
sigma /= (float) n;
__syncthreads();
sigma = sqrt((float)sigma);
for (row=0; row < n; row++) {
if (sigma == (float)0.0)
d_B[row*n+col] = (float)0.0;
else
d_B[row*n+col] = (d_A[col*n+row] - mu) / sigma;
}
}
}
|
724
|
#include <cuda_runtime.h>
#include <stdio.h>
#include "includes/kernel.cuh"
#include "includes/utils.cuh"
#define L1Func(I, x, y) (I)
#define L2Func(I, x, y) (powf(I, 2))
#define LxFunc(I, x, y) (x * I)
#define LyFunc(I, x, y) (y * I)
#define RowCumSum(name, func) \
__global__ void name(const float *image, float *rowCumSum, int colNumberM, \
int rowNum) { \
float sum = 0; \
int xIndex = threadIdx.x + blockIdx.x * blockDim.x; \
if (xIndex >= rowNum) return; \
for (int i = 0; i < colNumberM; ++i) { \
sum += func(image[xIndex * colNumberM + i], i, xIndex); \
rowCumSum[xIndex * colNumberM + i] = sum; \
} \
}
// Usar macro para crear núcleos que calcula filas de tablas L
RowCumSum(calcL1RowCumSum, L1Func);
RowCumSum(calcL2RowCumSqrSum, L2Func);
RowCumSum(calcLxRowCumGradntSum, LxFunc);
RowCumSum(calcLyRowCumGradntSum, LyFunc);
// Suma L tablas por columna
__global__ void calcSumTable(const float *rowCumSum, float *SumTable,
int rowNumberN, int colNumberM) {
int xIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (xIndex >= colNumberM) return;
for (int i = 1; i < rowNumberN; i++) {
SumTable[i * colNumberM + xIndex] +=
rowCumSum[(i - 1) * colNumberM + xIndex];
}
}
// Función auxiliar que calcula S de cierta tabla L
__device__ float computeS(float *sumTable, int rowNumberN, int colNumberM,
int startX, int startY, int Kx, int Ky) {
startX--;
startY--;
float S =
sumTable[startX + Kx + (Ky + startY) * colNumberM] -
(startX < 0 ? 0 : sumTable[startX + (Ky + startY) * colNumberM]) -
(startY < 0 ? 0 : sumTable[startX + Kx + startY * colNumberM]) +
(startX < 0 || startY < 0 ? 0 : sumTable[startX + startY * colNumberM]);
return S;
}
__global__ void calculateFeatureDifference(float *templateFeatures,
int colNumberM, int rowNumberN,
float *l1SumTable, float *l2SumTable,
float *lxSumTable, float *lySumTable,
int Kx, int Ky, float *differences) {
int widthLimit = colNumberM - Kx + 1;
int heightLimit = rowNumberN - Ky + 1;
float meanVector;
float varianceVector;
float xGradientVector;
float yGradientVector;
int startX = threadIdx.x + blockIdx.x * blockDim.x;
int startY = threadIdx.y + blockIdx.y * blockDim.y;
if (startX >= widthLimit || startY >= heightLimit) return;
float S1D =
computeS(l1SumTable, rowNumberN, colNumberM, startX, startY, Kx, Ky);
float S2D =
computeS(l2SumTable, rowNumberN, colNumberM, startX, startY, Kx, Ky);
meanVector = S1D / (Kx * Ky);
varianceVector = S2D / (Kx * Ky) - powf(meanVector, 2);
float SxD =
computeS(lxSumTable, rowNumberN, colNumberM, startX, startY, Kx, Ky);
xGradientVector = 4 * (SxD - (startX + Kx / 2.0) * S1D) / (Kx * Kx * Ky);
float SyD =
computeS(lySumTable, rowNumberN, colNumberM, startX, startY, Kx, Ky);
yGradientVector = 4 * (SyD - (startY + Ky / 2.0) * S1D) / (Ky * Ky * Kx);
differences[startX + startY * widthLimit] = norm4df(
templateFeatures[0] - meanVector, templateFeatures[1] - varianceVector,
templateFeatures[2] - xGradientVector,
templateFeatures[3] - yGradientVector);
}
void preprocess(const float *I, const float *T, int M, int N, int Kx, int Ky,
SumTable *sumTable, float *featuresT, int STableThread) {
float *l1SumTable;
float *l2SumTable;
float *lxSumTable;
float *lySumTable;
AllocateCudaMem(&l1SumTable, sizeof(float) * M * N);
AllocateCudaMem(&l2SumTable, sizeof(float) * M * N);
AllocateCudaMem(&lxSumTable, sizeof(float) * M * N);
AllocateCudaMem(&lySumTable, sizeof(float) * M * N);
float *dev_I;
AllocateCudaMem(&dev_I, sizeof(float) * M * N);
cudaMemcpy(dev_I, I, sizeof(float) * M * N, cudaMemcpyHostToDevice);
// Use streams to ensure the order
cudaStream_t l1Stream, l2Stream, lxStream, lyStream;
cudaStreamCreate(&l1Stream);//Crea un nuevo flujo asíncrono.
cudaStreamCreate(&l2Stream);
cudaStreamCreate(&lxStream);
cudaStreamCreate(&lyStream);
// Calcula tablas L por fila
int rowBlocksize = (N + STableThread - 1) / STableThread;
int sumTableBlocksize = (M + STableThread - 1) / STableThread;
calcL1RowCumSum<<<rowBlocksize, STableThread, 0, l1Stream>>>(
dev_I, l1SumTable, M, N);
calcL2RowCumSqrSum<<<rowBlocksize, STableThread, 0, l2Stream>>>(
dev_I, l2SumTable, M, N);
calcLxRowCumGradntSum<<<rowBlocksize, STableThread, 0, lxStream>>>(
dev_I, lxSumTable, M, N);
calcLyRowCumGradntSum<<<rowBlocksize, STableThread, 0, lyStream>>>(
dev_I, lySumTable, M, N);
// then sum by column
calcSumTable<<<sumTableBlocksize, STableThread, 0, l1Stream>>>(
l1SumTable, l1SumTable, N, M);
calcSumTable<<<sumTableBlocksize, STableThread, 0, l2Stream>>>(
l2SumTable, l2SumTable, N, M);
calcSumTable<<<sumTableBlocksize, STableThread, 0, lxStream>>>(
lxSumTable, lxSumTable, N, M);
calcSumTable<<<sumTableBlocksize, STableThread, 0, lyStream>>>(
lySumTable, lySumTable, N, M);
cudaStreamDestroy(l1Stream);
cudaStreamDestroy(l2Stream);
cudaStreamDestroy(lxStream);
cudaStreamDestroy(lyStream);
// Calculate features for the template
for (int i = 0; i < Ky; i++) {
for (int j = 0; j < Kx; j++) {
featuresT[0] += T[i * Kx + j];
featuresT[1] += T[i * Kx + j] * T[i * Kx + j];
featuresT[2] += j * T[i * Kx + j];
featuresT[3] += i * T[i * Kx + j];
}
}
featuresT[0] /= (float)(Kx * Ky);
featuresT[1] = featuresT[1] / (float)(Kx * Ky) - featuresT[0] * featuresT[0];
// 4/Kx^2Ky*(Sx(D)-x*S1(D)), where x = Kx/2
// = 4/Kx^2Ky*(f2-Kx/2*f0*Kx*Ky)
// = 4/Kx^2Ky*f2-2*f0
featuresT[2] = 4.0 / (Kx * Kx * Ky) * featuresT[2] - 2.0 * featuresT[0];
featuresT[3] = 4.0 / (Ky * Kx * Ky) * featuresT[3] - 2.0 * featuresT[0];
cudaDeviceSynchronize();
sumTable->l1SumTable = l1SumTable;
sumTable->l2SumTable = l2SumTable;
sumTable->lxSumTable = lxSumTable;
sumTable->lySumTable = lySumTable;
}
void getMinimum(float *target, int M, int N, int *x, int *y) {
float minimum = *target;
*x = 0;
*y = 0;
for (int i = 0; i < N; i++) {
for (int j = 0; j < M; j++) {
if (target[i * M + j] < minimum) {
minimum = target[i * M + j];
*x = j;
*y = i;
}
}
}
}
void GetMatch(float *I, float *T, int Iw, int Ih, int Tw, int Th, int *x,
int *y) {
int STableThread;
int maxThreadsPerBlock;
GetDeviceInfo(&maxThreadsPerBlock, &STableThread);
SumTable sumTable;
float featuresT[4] = {0, 0, 0, 0};
preprocess(I, T, Iw, Ih, Tw, Th, &sumTable, featuresT, STableThread);
float *dev_difference;
float *difference;
float *dev_featuresT;
size_t difference_size = sizeof(float) * (Iw - Tw + 1) * (Ih - Th + 1);
difference = (float *)malloc(difference_size);
AllocateCudaMem(&dev_featuresT, sizeof(float) * 4);
AllocateCudaMem(&dev_difference, difference_size);
cudaMemcpy(dev_featuresT, featuresT, sizeof(float) * 4,
cudaMemcpyHostToDevice);
int differenceThreadsX = 32;
int differenceThreadsY = maxThreadsPerBlock / differenceThreadsX;
// Iw - Tw + 1 + differenceThreadsX - 1
dim3 differenceBlockSize((Iw - Tw + differenceThreadsX) / differenceThreadsX,
// Ih - Th + 1 + differenceThreadsY - 1
(Ih - Th + differenceThreadsY) / differenceThreadsY);
calculateFeatureDifference<<<differenceBlockSize,
dim3(differenceThreadsX, differenceThreadsY)>>>(
dev_featuresT, Iw, Ih, sumTable.l1SumTable, sumTable.l2SumTable,
sumTable.lxSumTable, sumTable.lySumTable, Tw, Th, dev_difference);
cudaMemcpy(difference, dev_difference, difference_size,
cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
getMinimum(difference, Iw - Tw + 1, Ih - Th + 1, x, y);
cudaFree(sumTable.l1SumTable);
cudaFree(sumTable.l2SumTable);
cudaFree(sumTable.lxSumTable);
cudaFree(sumTable.lySumTable);
cudaFree(dev_difference);
cudaFree(dev_featuresT);
free(difference);
}
|
725
|
#include "graphs.cuh"
#include <ctime>
#include <algorithm>
using namespace graphs;
__global__
void stage1(bool * status, int * d_q_curlen, int * d_q_nexlen, int * d_S_len, int * d_ends_len, int * d_q_cur, int * d_q_next, int * d_sigma, int * d_delta, int * d_S, int * d_ends, int * d_dist,int* d_depth, int * d_no_nodes, Edge * d_edges){
int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id<*d_q_curlen)
{
int current = d_q_cur[id];
for(int i=0;i<d_edges[current].no_neigh;i++)
{
if(atomicCAS(&d_dist[d_edges[current].neighbours[i]],INT_MAX,d_dist[current]+1)==INT_MAX)
{
int temp = atomicAdd(d_q_nexlen,1);
d_q_next[temp]=d_edges[current].neighbours[i];
}
if(d_dist[d_edges[current].neighbours[i]]==(d_dist[current]+1))
atomicAdd(&d_sigma[d_edges[current].neighbours[i]],d_sigma[current]);
}
__syncthreads();
}
}
__global__
void stage1_1(bool * status, int * d_q_curlen, int * d_q_nexlen, int * d_S_len, int * d_ends_len, int * d_q_cur, int * d_q_next, int * d_sigma, int * d_delta, int * d_S, int * d_ends, int * d_dist,int* d_depth, int * d_no_nodes, Edge * d_edges)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if(id<*d_q_nexlen)
{
d_q_cur[id]=d_q_next[id];
d_S[id+*d_S_len]=d_q_next[id];
__syncthreads();
}
}
__global__
void single(int * d_depth, int * d_dist, int * d_S, int * d_S_len){
*d_depth=d_dist[d_S[*d_S_len-1]]-1;
}
__global__
void singleThread(int * d_ends, int * d_ends_len, int * d_q_nexlen, int * d_q_curlen, int * d_S_len){
d_ends[*d_ends_len]=d_ends[*d_ends_len-1]+*d_q_nexlen;
*d_ends_len = *d_ends_len + 1;
*d_q_curlen=*d_q_nexlen;
*d_S_len+=*d_q_nexlen;
*d_q_nexlen=0;
}
__global__
void stage2_2(int * d_delta, int * d_dist, int * d_sigma, int * d_S, Edge * d_edges, const int offset, const int itr){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx <= itr){
int tid = idx + offset;
int w = d_S[tid];
float dsw = 0;
int sw = d_sigma[w];
for(int i = 0; i < d_edges[w].no_neigh; i++){
int v = d_edges[w].neighbours[i];
if(d_dist[v] == d_dist[w] + 1){
dsw += ((float) sw * (1 + d_delta[v])) / d_sigma[v];
}
}
d_delta[w] = (int)dsw;
__syncthreads();
}
}
namespace graphs{
Results calculateBC(Edge * h_edges, int no_nodes, int threads){
clock_t begin = clock();
int t = min(no_nodes, threads);
#define size t
int * d_q_curlen, * d_q_nexlen, * d_depth, * d_S_len, * d_ends_len, * d_no_nodes, h_q_nexlen;
int * d_q_cur, * d_q_next, * d_sigma, * d_delta, *h_delta, * d_S, * d_ends, * d_dist, * h_ends, h_depth;
int * h_dis = new int[no_nodes];
h_ends = new int[no_nodes];
for(int cc=0;cc<no_nodes;cc++)
{
h_ends[cc] = 0;
h_dis[cc] = INT_MAX;
}
h_dis[0] = 0;
Edge * d_edges;
bool * d_status;
cudaMalloc((void **)&d_q_curlen, sizeof(int));
cudaMalloc((void **)&d_q_nexlen, sizeof(int));
cudaMalloc((void **)&d_depth, sizeof(int));
cudaMalloc((void **)&d_S_len, sizeof(int));
cudaMalloc((void **)&d_ends_len, sizeof(int));
cudaMalloc((void **)&d_no_nodes, sizeof(int));
cudaMalloc((void **)&d_status, sizeof(bool));
cudaMalloc((void **)&d_q_cur, no_nodes*sizeof(int));
cudaMalloc((void **)&d_q_next, no_nodes*sizeof(int));
cudaMalloc((void **)&d_sigma, no_nodes*sizeof(int));
cudaMalloc((void **)&d_delta, no_nodes*sizeof(int));
cudaMalloc((void **)&d_S, no_nodes*sizeof(int));
cudaMalloc((void **)&d_ends, no_nodes*sizeof(int));
cudaMalloc((void **)&d_dist, no_nodes*sizeof(int));
cudaMalloc((void **)&d_edges, no_nodes*sizeof(Edge));
cudaMemset(d_delta, 0, no_nodes*sizeof(int));
int One = 1;
int Zero = 0;
int Two = 2;
int initEnd[2] = {0, 1};
// Initialize
cudaMemcpy(d_q_curlen, &One, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_q_nexlen, &Zero, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_S_len, &One, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_ends_len, &Two, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_ends, initEnd, 2*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_no_nodes, &no_nodes, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_q_cur, &Zero, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_sigma, &One, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_S, &Zero, sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_dist, h_dis, no_nodes*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_edges, h_edges, no_nodes*sizeof(Edge), cudaMemcpyHostToDevice);
while(1){
int blocks = ceil((float)no_nodes/size);
stage1<<<blocks,size>>>(d_status,d_q_curlen,d_q_nexlen,d_S_len,d_ends_len,d_q_cur,d_q_next,d_sigma,d_delta,d_S,d_ends,d_dist,d_depth,d_no_nodes,d_edges);
cudaMemcpy(&h_q_nexlen, d_q_nexlen, sizeof(int), cudaMemcpyDeviceToHost);
if(h_q_nexlen==0){
single<<<1, 1>>>(d_depth, d_dist, d_S, d_S_len);
break;
}
stage1_1<<<blocks,size>>>(d_status,d_q_curlen,d_q_nexlen,d_S_len,d_ends_len,d_q_cur,d_q_next,d_sigma,d_delta,d_S,d_ends,d_dist,d_depth,d_no_nodes,d_edges);
singleThread<<<1, 1>>>(d_ends, d_ends_len, d_q_nexlen, d_q_curlen, d_S_len);
}
cudaMemcpy(&h_depth,d_depth,sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(h_ends,d_ends,no_nodes * sizeof(int),cudaMemcpyDeviceToHost);
int counter = h_depth;
int offset;
while(counter >= 0){
offset = h_ends[counter];
int itr = h_ends[counter + 1] - 1 - offset;
int blocks = ceil((float)(itr+1)/size);
stage2_2<<<blocks, size>>>(d_delta, d_dist, d_sigma, d_S, d_edges, (const int)offset, (const int)itr);
counter --;
}
h_delta = new int[no_nodes];
cudaMemcpy(h_delta, d_delta, no_nodes * sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(d_q_curlen);
cudaFree(d_q_nexlen);
cudaFree(d_depth);
cudaFree(d_S_len);
cudaFree(d_ends_len);
cudaFree(d_q_cur);
cudaFree(d_q_next);
cudaFree(d_sigma);
cudaFree(d_delta);
cudaFree(d_S);
cudaFree(d_ends);
cudaFree(d_dist);
clock_t end = clock();
double elapsed_secs = double(end - begin) / (CLOCKS_PER_SEC);
Results res;
res.time = elapsed_secs;
res.delta = *max_element(h_delta, h_delta + no_nodes);
return res;
}
}
|
726
|
#include "includes.h"
__global__ void kernel_m(unsigned int * ind, unsigned int *scand, unsigned int shift, const unsigned int ne)
{
unsigned int sosm = 1 << shift;
int m_i_b = threadIdx.x + blockDim.x * blockIdx.x;
if (m_i_b >= ne) return;
scand[m_i_b] = ((ind[m_i_b] & sosm) >> shift) ? 0 : 1;
}
|
727
|
#include <stdio.h>
__global__
void k_lu_f32(int n, float* A, int stride_row, int stride_col){
for (int i=0;i<n;i++){
for (int j=i+1;j<n;j++){
float factor=-A[j*stride_col+i*stride_row]/A[i*stride_col+i*stride_row];
for (int k=i+1;k<n;k++){
A[j*stride_col+k*stride_row]=A[j*stride_col+k*stride_row]+A[i*stride_col+k*stride_row]*factor;
}
A[j*stride_col+i*stride_row]=-factor;
}
}
}
//Solves LX=A, whereas L is a lower triangular matrix and X an unknown Matrix. It is assumed that L is stored inside A=LU with ones on the diagonal
__global__
void k_lu_solve_lower_f32(int n, int m, const float* L, int stride_row_l,int stride_col_l, const float* A, int stride_row_a, int stride_col_a, float* X, int stride_row_x, int stride_col_x){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
int bpx=(bx*TILE_SIZE+tx)*stride_row_x;
int bpa=(bx*TILE_SIZE+tx)*stride_row_a;//weitermachen
if ((bx*TILE_SIZE+tx)<m){
for (int i=0;i<n;i++){
float sum=A[bpa+i*stride_col_a];
for (int j=0;j<i;j++){
sum-=L[i*stride_col_l+j*stride_row_l]*X[bpx+j*stride_col_x];
}
X[bpx+i*stride_col_x]=sum;
}
}
}
//Solves UX=A, whereas U is a upprt triangular matrix coming from lu factorization and X an unknown Matrix
__global__
void k_lu_solve_upper_f32(int n, int m,const float* U, int stride_row_u,int stride_col_u, const float* A, int stride_row_a, int stride_col_a, float* X, int stride_row_x, int stride_col_x){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
int bpx=(bx*TILE_SIZE+tx)*stride_row_x;
int bpa=(bx*TILE_SIZE+tx)*stride_row_a;
if ((bx*TILE_SIZE+tx)<m){
for (int i=n-1;i>=0;i--){
float sum=A[bpa+i*stride_col_a];
for (int j=n-1;j>i;j--){
sum-=U[i*stride_col_u+j*stride_row_u]*X[bpx+j*stride_col_x];
}
sum/=U[i*stride_col_u+i*stride_row_u];
//printf("sum:%.7f and U:%.g and i%d\n",sum,U[i*stride_col_u+i*stride_row_u], i);
X[bpx+i*stride_col_x]=sum;
}
}
}
//Solves AX=B
__host__
void lu_solve_f32_device(int n, int m, float* A_d, int stride_row_a, int stride_col_a, float* B_d, int stride_row_b, int stride_col_b, float* X_d, int stride_row_x, int stride_col_x){
float bsmx=64;
float* Y;
cudaMalloc((void**)&Y,sizeof(float)*n*m);
k_lu_solve_lower_f32<<<ceil(m/bsmx),bsmx>>>(n,m,A_d,stride_row_a,stride_col_a,B_d,stride_row_b,stride_col_b,Y,1,m);
k_lu_solve_upper_f32<<<ceil(m/bsmx),1>>>(n,m,A_d,stride_row_a,stride_col_a,Y,1,m,X_d,stride_row_x,stride_col_x); //TODO: replace with non kernel version
cudaFree(Y);
}
|
728
|
#include <stdio.h>
#include <stdlib.h>
/* =================================== scan_cuda.cu ===================================
a[39999999] = 799999980000000.000000
real 0m2.485s
user 0m1.233s
sys 0m1.130s
==27669== NVPROF is profiling process 27669, command: ./scan_cuda
a[39999999] = 799999980000000.000000
==27669== Profiling application: ./scan_cuda
==27669== Profiling result:
Time(%) Time Calls Avg Min Max Name
51.51% 464.14ms 2 232.07ms 452.72us 463.69ms [CUDA memcpy HtoD]
41.72% 375.87ms 2 187.94ms 362.41us 375.51ms [CUDA memcpy DtoH]
5.10% 45.915ms 1 45.915ms 45.915ms 45.915ms scan_cuda(double*, double*, int)
1.68% 15.118ms 1 15.118ms 15.118ms 15.118ms add_cuda(double*, double*, int)
==27669== API calls:
Time(%) Time Calls Avg Min Max Name
77.63% 902.05ms 4 225.51ms 94.699us 463.95ms cudaMemcpy
22.23% 258.28ms 2 129.14ms 8.6740us 258.27ms cudaMalloc
0.06% 717.03us 2 358.52us 40.549us 676.49us cudaFree
0.04% 483.83us 90 5.3750us 292ns 205.79us cuDeviceGetAttribute
0.02% 255.32us 2 127.66us 33.083us 222.24us cudaLaunch
0.01% 92.437us 1 92.437us 92.437us 92.437us cuDeviceTotalMem
0.01% 65.884us 1 65.884us 65.884us 65.884us cuDeviceGetName
0.00% 13.031us 6 2.1710us 360ns 9.7500us cudaSetupArgument
0.00% 6.7530us 2 3.3760us 1.4020us 5.3510us cudaConfigureCall
0.00% 2.7420us 2 1.3710us 990ns 1.7520us cuDeviceGetCount
0.00% 1.1170us 2 558ns 499ns 618ns cuDeviceGet
=================================== scan.c ===================================
a[39999999] = 799999980000000.000000
real 0m0.511s
user 0m0.216s
sys 0m0.287s
*/
__global__ void scan_cuda(double* a, double *s, int width) {
int t = threadIdx.x;
int b = blockIdx.x*blockDim.x;
double fodase;
__shared__ double p[1024];
if(b + t < width) {
p[t] = a[t+b];
}
__syncthreads();
for(int i = 1; i < blockDim.x; i = i * 2) {
if (t >= i){
fodase = p[t] + p[t-i];
}
__syncthreads();
if (t >= i){
p[t] = fodase;
}
__syncthreads();
}
if (b + t < width){
a[t+b] = p[t];
}
if (t == blockDim.x-1){
s[blockIdx.x+1] = a[t+b];
}
}
__global__ void add_cuda(double *a, double *s, int width) {
int t = threadIdx.x;
int b = blockIdx.x*blockDim.x;
if (b + t < width) {
a[b+t] += s[blockIdx.x];
}
}
int main()
{
int width = 40000000;
int size = width * sizeof(double);
int block_size = 1024;
int num_blocks = (width-1)/block_size+1;
int s_size = (num_blocks * sizeof(double));
double *a = (double*) malloc (size);
double *s = (double*) malloc (s_size);
for(int i = 0; i < width; i++)
a[i] = i;
double *d_a, *d_s;
// alocar vetores "a" e "s" no device
cudaMalloc((void**) &d_a, size);
cudaMalloc((void**) &d_s, s_size);
// copiar vetor "a" para o device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
// definição do número de blocos e threads (dimGrid e dimBlock)
dim3 dimGrid(num_blocks, 1, 1);
dim3 dimBlock(block_size, 1, 1);
// chamada do kernel scan
scan_cuda <<<dimGrid,dimBlock>>>(d_a, d_s, width);
// copiar vetor "s" para o host
cudaMemcpy(s, d_s, s_size, cudaMemcpyDeviceToHost);
// scan no host (já implementado)
s[0] = 0;
for (int i = 1; i < num_blocks; i++)
s[i] += s[i-1];
// copiar vetor "s" para o device
cudaMemcpy(d_s, s, s_size, cudaMemcpyHostToDevice);
// chamada do kernel da soma
add_cuda <<<dimGrid,dimBlock>>>(d_a, d_s, width);
// copiar o vetor "a" para o host
cudaMemcpy(a, d_a, size, cudaMemcpyDeviceToHost);
printf("\na[%d] = %f\n",width-1,a[width-1]);
cudaFree(d_a);
cudaFree(d_s);
}
|
729
|
extern "C"
__global__ void grav(int n, double G,
double *mass, double *posX, double *posY,
double *rForceX, double *rForceY)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < n && j < n && i != j)
{
double relX = posX[j] - posX[i];
double relY = posY[j] - posY[i];
double dist2 = relX * relX + relY * relY;
double scl = G * mass[i] * mass[j] * rsqrt(dist2 * dist2 * dist2);
rForceX[i] += relX * scl;
rForceY[i] += relY * scl;
}
}
|
730
|
#include <memory>
#include <iostream>
#include <cuda_runtime.h>
int main(void)
{
int device_count = 0;
cudaGetDeviceCount(&device_count);
std::cout << "There are " << device_count << " gpus on this computer" << std::endl;
}
|
731
|
#include "includes.h"
#define DOUBLE
#ifdef DOUBLE
#define Complex cufftDoubleComplex
#define Real double
#define Transform CUFFT_Z2Z
#define TransformExec cufftExecZ2Z
#else
#define Complex cufftComplex
#define Real float
#define Transform CUFFT_C2C
#define TransformExec cufftExecC2C
#endif
#define TILE_DIM 8
// synchronize blocks
__global__ void spread_y_i_r(Real* src, Real* dst)
{
unsigned int tid = (blockIdx.y * gridDim.x + blockIdx.x) * blockDim.x + threadIdx.x;
unsigned int tid1 = (blockIdx.y * gridDim.x * 2 + blockIdx.x) * blockDim.x + threadIdx.x;
Real res = src[tid1];
dst[tid] = res;
}
|
732
|
#include <stdlib.h>
#include <stdio.h>
/*
__global__ void kernel10(int *a)
{
printf("Hello from thread %d in block %d\n", threadIdx.x, blockIdx.x);
}
*/
__global__ void kernel10(int *a)
{
printf("Hello from thread %d and %d in block %d and %d \n", threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y);
}
int main() {
int n=20;
int memSize = n*sizeof(int);
int *a, *d_a;
a = (int*) malloc (n*sizeof(*a));
cudaMalloc( (void**) &d_a, memSize);
cudaMemcpy( d_a, a, memSize, cudaMemcpyHostToDevice);
dim3 block(4,2);
dim3 grid(3,2);
kernel10<<<grid,block>>>(d_a);
cudaMemcpy( a, d_a, memSize, cudaMemcpyDeviceToHost);
cudaFree(d_a);
free(a);
return 0;
}
|
733
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#define N 5
__global__ void CUDAStrCopy(char *A, char C[N])
{
int i = threadIdx.x;
C[i] = A[i] - 32;
printf("%c\t", C[i]);
}
int main()
{
char A[N];
char C[N];
char *pa, *pc;
for(int i = 0; i < N; i++)
A[i] = 'a';
printf("C = \n");
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaMalloc((void**)&pa, N * sizeof(char));
cudaMalloc((void**)&pc, N * sizeof(char));
cudaMemcpy(pa, A, N * sizeof(char), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if(error != cudaSuccess)
printf("CUDA Error1: %s\n", cudaGetErrorString(error));
CUDAStrCopy<<<1, N>>>(pa, pc);
error = cudaGetLastError();
if(error != cudaSuccess)
printf("CUDA Error2: %s\n", cudaGetErrorString(error));
cudaMemcpy(C, pc, N * sizeof(char), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
int i;
/*
printf("Value of C in host after kernel execution\n");
for(int i = 0; i < N; i++)
printf("%c\n", C[i]);
*/
printf("Time taken = %f", elapsedTime);
cudaFree(pa);
cudaFree(pc);
printf("\n");
return 0;
}
|
734
|
__global__ void ch1(unsigned char* Pout, unsigned char* Pin, int width, int height) {
int channels = 3;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// check if pixel within range
if (col < width && row < height){
int gOffset = row * width + col;
int rgbOffset = gOffset * channels;
unsigned char r = Pin[rgbOffset ];
unsigned char g = Pin[rgbOffset+1];
unsigned char b = Pin[rgbOffset+2];
Pout[gOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
}
}
__global__ void gpu_blur(unsigned char* Pout, unsigned char* Pin, int width, int height){
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
int k_size = 3;
if (col < width && row < height){
int pixVal = 0;
int pixels = 0;
for(int blurRow = -k_size; blurRow < k_size+1; blurRow++){
for(int blurCol = -k_size; blurCol < k_size+1; blurCol++){
int curRow = row + blurRow;
int curCol = col + blurCol;
if (curRow > -1 && curRow < height && curCol > -1 && curCol < width){
pixVal += Pin[curRow * width + curCol];
pixels++;
}
}
}
Pout[row * width + col] = (unsigned char) (pixVal / pixels);
}
}
__global__ void gpu_grey_and_blur(unsigned char* Pout, unsigned char* Pin, int width, int height){
int channels = 3;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// check if pixel within range
if (col < width && row < height){
int gOffset = row * width + col;
int rgbOffset = gOffset * channels;
unsigned char r = Pin[rgbOffset ];
unsigned char g = Pin[rgbOffset+1];
unsigned char b = Pin[rgbOffset+2];
Pout[gOffset] = 0.21f*r + 0.71f*g + 0.07f*b;
}
__syncthreads();
unsigned char k_size = 1;
int pixVal = 0;
int pixels = 0;
if (col < width && row < height){
for(int blurRow = -k_size; blurRow < k_size+1; ++blurRow){
for(int blurCol = -k_size; blurCol < k_size+1; ++blurCol){
int curRow = row + blurRow;
int curCol = col + blurCol;
if (curRow > -1 && curRow < height && curCol > -1 && curCol < width){
pixVal += Pout[curRow * width + curCol];
pixels++;
}
}
}
}
__syncthreads();
if (col < width && row < height) {
Pout[row * width + col] = (unsigned char) (pixVal / pixels);
}
}
__global__ void gpu_grey_and_thresh(unsigned char* Pout, unsigned char* Pin, int width, int height){
int channels = 3;
unsigned char thresh = 157;
int col = threadIdx.x + blockIdx.x * blockDim.x;
int row = threadIdx.y + blockIdx.y * blockDim.y;
// check if pixel within range
if (col < width && row < height){
int gOffset = row * width + col;
int rgbOffset = gOffset * channels;
unsigned char r = Pin[rgbOffset ];
unsigned char g = Pin[rgbOffset+1];
unsigned char b = Pin[rgbOffset+2];
unsigned char gval = 0.21f*r + 0.71f*g + 0.07f*b;
if(gval > thresh){
Pout[gOffset] = 255;
}
else {
Pout[gOffset] = 0;
}
}
}
extern "C" void cuda_ch1(unsigned char* Pout, unsigned char* Pin, int width, int height, dim3 numBlocks, dim3 numThreads) {
ch1 <<< numBlocks, numThreads >>> (Pout, Pin, width, height);
}
extern "C" void cuda_blur(unsigned char* Pout, unsigned char* Pin, int width, int height, dim3 numBlocks, dim3 numThreads) {
gpu_blur <<< numBlocks, numThreads >>> (Pout, Pin, width, height);
}
extern "C" void cuda_grey_and_blur(unsigned char* Pout, unsigned char* Pin, int width, int height, dim3 numBlocks, dim3 numThreads) {
gpu_grey_and_blur <<< numBlocks, numThreads >>> (Pout, Pin, width, height);
}
extern "C" void cuda_grey_and_thresh(unsigned char* Pout, unsigned char* Pin, int width, int height, dim3 numBlocks, dim3 numThreads) {
gpu_grey_and_thresh <<< numBlocks, numThreads >>> (Pout, Pin, width, height);
}
|
735
|
#include <cuda.h>
#include <iostream>
#define checkCudaErrors(err) __checkCudaErrors (err, __FILE__, __LINE__)
inline void __checkCudaErrors( CUresult err, const char *file, const int line )
{
if( CUDA_SUCCESS != err) {
fprintf(stderr,
"CUDA Driver API error = %04d from file <%s>, line %i.\n",
err, file, line );
exit(-1);
}
}
int main() {
checkCudaErrors(cuInit(0));
int device_count = 0;
checkCudaErrors(cuDeviceGetCount(&device_count));
std::cout << "Hello World!" << std::endl;
std::cout << "Find " << device_count << " device(s)." << std::endl;
CUdevice device;
checkCudaErrors(cuDeviceGet(&device, 0));
char name[100];
checkCudaErrors(cuDeviceGetName(name, 100, device));
size_t bytes = 0;
checkCudaErrors(cuDeviceTotalMem(&bytes, device));
std::cout << name << " has total memory of " << bytes << " bytes." << std::endl;
return 0;
}
|
736
|
__global__ void kernel(float4* a, const cudaTextureObject_t* tex){
a[0] = tex3D<float4>(tex[blockIdx.x], 0.1, 0.2, 0.3);
}
|
737
|
/*
* purpose: model the event of infection with avian flu virus
* substrain H7N9 known to also affect humans; infection
* will be said to occur when a random number from the
* interval [0,1] falls into a certain range representing
* the fraction of lung/bronchi/bronchiole cells within all
* types of cells forming the body; in case of infection, the
* progress of the virus is modeled in terms of numbers of
* infected cells growing over time;
* n.b. here the immune system is taken into account via a
* term counteracting the increase in nmb_infected_cells[]
* n.b.2 again, the generalized variant is considered,
* allowing all kinds of parallel infections;
* n.b.3 here the time of action of immune system components
* is modeled every hour while viral distribution updates
* are taken into account only every 14 hours following
* the known H7N9 transmission period
* n.b.4 here the core part of the simulation is run on the
* GPU with basic usage of managed unified memory;
* n.b.5 the standard math lib needs to be replaced with the
* CUDA-internal math library, which is automatically
* included when calling nvcc, so just dropping -lm
* on the link line will facilitate this replacement
* result: H7N9 infection initially starts with exponential growth
* and becomes downregulated by the modulating function of
* the immune system; much more frequent counteraction
* does render the growth-pattern now fan-shaped; however
* individual cells won't be synchronized that perfectly, so
* the times of real updates will vary to some extent probably
* leaving behind an overall gaussian-like average impression
* of viral invasion and defense;
* GPU-ported results are identical to the orignial CPU-only
* results and the math lib is perfectly substituted from
* CUDA internal libs;
* compilation: nvcc ./h7n9_v4.cu
* usage: ./a.out > ./h7n9_v4.gpu.0.dat
* tail -499 ./h7n9_v4.gpu.0.dat > ./h7n9_v4.gpu.1.dat
* gnuplot "./h7n9_v4.gpu.gnuplot"
* gs ./h7n9_v4.gpu.eps
*/
/*
* doi: 10.3109/03014460.2013.807878
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <string.h>
#define NMB_CELL_TYPES 54
#define NMB_INITIAL_INFECTION_ATTEMPTS 100
#define H7N9_TISSUE_TARGET 25
#define H7N9_REPRODUCTION_NMB 22
#define H7N9_TRANSMISSION_TIME 14
#define H7N9_TISSUE_SURVIVAL_PERCENTAGE 35
#define TIME_STEP_IN_HOURS 1
#define NMB_TIME_STEPS_2STUDY 500
#define HOURS_2ACTIVATE_IMMUNE_SYSTEM 24
/*
* prototype declarations to avoid including additional local header files
*/
void initialize(float *, char **);
void random_infection(float, float *, float *);
void monitor_infection_progress(float *, float *, float *);
/*
* GPU kernel
*/
__global__ void update_nmb_infected_cells(int time,
float *sick_cell_type_count,
float *nmb_infected_cells)
{
int i;
i = threadIdx.x;
if (i == H7N9_TISSUE_TARGET) {
if ((time % H7N9_TRANSMISSION_TIME) == 0) {
/*
* at this point, all currently infected cells will die and
* release their load of new mature H7N9 virus particles;
*/
sick_cell_type_count[i] -= nmb_infected_cells[i];
nmb_infected_cells[i] *= (float) H7N9_REPRODUCTION_NMB;
}
/*
* consider counteraction of the immune system after an
* initial lag time for alerting; effective action will be
* down-scaling of nmb_infected_cells[]
*/
if (time >= HOURS_2ACTIVATE_IMMUNE_SYSTEM) {
nmb_infected_cells[i] *= exp(-0.000010969388 * time * time);
}
} else {
/*
* just dummy actions of potential interest for
* cross-infections;
*/
sick_cell_type_count[i] += 0.0;
nmb_infected_cells[i] *= 1.0;
}
}
/*
* host main
*/
int main()
{
int i, got_hit_by_h7n9;
float healthy_tot_nmb_cells, *healthy_cell_type_count, *initial_infection;
float *sick_cell_type_count, *nmb_infected_cells;
char **healthy_cell_type_tissue;
time_t t;
/*
* initialize reference data with values from the literature
*/
srand((unsigned) time(&t));
healthy_cell_type_count = (float *) malloc(NMB_CELL_TYPES * sizeof(float));
healthy_cell_type_tissue = (char **) malloc(NMB_CELL_TYPES * sizeof(char *));
for (i=0; i<NMB_CELL_TYPES; i++) {
healthy_cell_type_tissue[i] = (char *) malloc(300 * sizeof(char));
}
initialize(healthy_cell_type_count, healthy_cell_type_tissue);
/*
* let us also allocate another array, sick_cell_type_count[],
* for modelling an ongoing infection; initially this array will be
* quasi-identical to healthy_cell_type_count[], except that for a
* particular tissue the count (number of still healthy cells) will be
* lower than its corresponding counterpart in healthy_cell_type_count[]
* and with progress of the disease this gap will become larger and larger;
* at the same instance we may also introduce another array,
* nmb_infected_cells[], that keeps track of the current number of cells
* actually infected (hence still living) for each of the known tissues;
* this way we can take into account that whenever new cells get infected
* others must have died, hence distinguish between cells carrying on
* the spread of the virus and cells already extinguished and thus just
* missing in the overall count of functioning cells;
* n.b. since these arrays will be used on the GPU we shall make use
* of managed unified memory via cudaMallocManaged()
*/
cudaMallocManaged(&sick_cell_type_count, NMB_CELL_TYPES * sizeof(float));
cudaMallocManaged(&nmb_infected_cells, NMB_CELL_TYPES * sizeof(float));
/*
* compute total number of cells including all various tissues
*/
healthy_tot_nmb_cells = (float) 0;
for (i=0; i<NMB_CELL_TYPES; i++) {
printf("%6d%12.2e%*c%-s\n", i, healthy_cell_type_count[i],
5, ' ', healthy_cell_type_tissue[i]);
healthy_tot_nmb_cells += healthy_cell_type_count[i];
}
printf("*** healthy: sum of all cells %12.2e ***\n", healthy_tot_nmb_cells);
/*
* fill a vector with all 0 except one position where a -1 shall
* reflect infection of that particular tissue
*/
initial_infection = (float *) malloc(NMB_CELL_TYPES * sizeof(float));
random_infection(healthy_tot_nmb_cells,
healthy_cell_type_count, initial_infection);
//printf("*** infection vector ***\n");
//for (i=0; i<NMB_CELL_TYPES; i++) {
// printf("%6d%6.0f\n", i, initial_infection[i]);
//}
/*
* give it a couple of attempts of initial infection, in particular
* NMB_INITIAL_INFECTION_ATTEMPTS times, and see whether any of them
* will affect lung/bronchi tissue as this is the entry point of H7N9;
* in case of really hitting lung/bronchi, variable got_hit_by_h7n9 will
* be set to 1, otherwise got_hit_by_h7n9 = 0 shall signal no infection
* has taken place;
*/
got_hit_by_h7n9 = 0;
for (i=0; i<NMB_INITIAL_INFECTION_ATTEMPTS; i++) {
random_infection(healthy_tot_nmb_cells,
healthy_cell_type_count, initial_infection);
if (( initial_infection[21] + initial_infection[22]
+ initial_infection[23] + initial_infection[24]
+ initial_infection[25] + initial_infection[26]
+ initial_infection[27] + initial_infection[28]
+ initial_infection[29] + initial_infection[30]
+ initial_infection[31]) < 0 ) {
got_hit_by_h7n9 = 1;
}
}
if (got_hit_by_h7n9 == 1) {
printf("*** infected with H7N9 ***\n");
}
else {
printf("*** not infected with H7N9 ***\n");
}
/*
* depending on whether or not we have emerged as "infected" let us
* enter a special routine to model progress of the disease;
*/
if (got_hit_by_h7n9 == 1) {
monitor_infection_progress(healthy_cell_type_count,
sick_cell_type_count, nmb_infected_cells);
}
/*
* and don't forget to free all allocated memory
* n.b. now we have two instances of cudaMallocManaged() types !
*/
free(initial_infection);
cudaFree(nmb_infected_cells);
cudaFree(sick_cell_type_count);
for (i=NMB_CELL_TYPES-1; i>=0; i--) {
free(healthy_cell_type_tissue[i]);
}
free(healthy_cell_type_tissue);
free(healthy_cell_type_count);
return(0);
}
void initialize(float *ctc, char **ct)
{
int i;
const float cell_type_count[] = { 500000.00e05,
1490.00e05,
1230.00e05,
806.00e05,
703.00e05,
1610.00e05,
4.94e05,
15800.00e05,
84.80e05,
263000000.00e05,
517000.00e05,
14500000.00e05,
11000.00e05,
7110.00e05,
7530000.00e05,
40000.00e05,
20000.00e05,
103000.00e05,
2410000.00e05,
963000.00e05,
241000.00e05,
386000.00e05,
699000.00e05,
290000.00e05,
43200.00e05,
76800.00e05,
1410000.00e05,
17400.00e05,
33000.00e05,
1370000.00e05,
4490.00e05,
10300.00e05,
30000000.00e05,
1000000.00e05,
29500.00e05,
2500.00e05,
150000.00e05,
18500000.00e05,
481.00e05,
329000.00e05,
1370000.00e05,
25800.00e05,
38000.00e05,
36200.00e05,
167000.00e05,
104.00e05,
10900.00e05,
11800.00e05,
67600.00e05,
17700.00e05,
70200.00e05,
8.70e05,
100000.00e05,
25400000.00e05};
const char* cell_type[] = {"adipose tissue: adipocytes",
"articular cartilage: femoral cartilage cells",
"articular cartilage: humeral head cartilage cells",
"articular cartilage: talus cartilage cells",
"biliary system: biliary ducts epithelial cells",
"biliary system: gallbladder epithelial cells",
"biliary system: gallbladder interstitial Cajal-like cells",
"biliary system: gallbladder smooth myocytes",
"biliary system: gallbladder other stromal cells",
"blood: erythrocytes",
"blood: leucocytes",
"blood: platelets",
"bone: cortical osteocytes",
"bone: trabecular osteocytes",
"bone marrow: nucleated cells",
"heart: connective tissue cells",
"heart: muscle cells",
"kidney: glomerulus cells",
"liver: hepatocytes",
"liver: kupffer cells",
"liver: stellate cells",
"lung bronchi bronchioles: alveolar cells type I",
"lung bronchi bronchioles: alveolar cells type II",
"lung bronchi bronchioles: alveolar macrophages",
"lung bronchi bronchioles: basal cells",
"lung bronchi bronchioles: ciliated cells",
"lung bronchi bronchioles: endothelial cells",
"lung bronchi bronchioles: goblet cells",
"lung bronchi bronchioles: indeterminate bronchial bronchiolar cells",
"lung bronchi bronchioles: interstitial cells",
"lung bronchi bronchioles: other bronchial bronchiolar secretory cells",
"lung bronchi bronchioles: preciliated cells",
"nervous system: glial cells",
"nervous system: neurons",
"pancreas: islet cells",
"skeletal muscle: muscle fibers",
"skeletal muscle: satellite cells",
"skin: dermal fibroblasts",
"skin: dermal mast cells",
"skin: epidermal corneocytes",
"skin: epidermal nucleate cells",
"skin: epidermal Langerhans cells",
"skin: epidermal melanocytes",
"skin: epidermal Merkel cells",
"small intestine: enterocytes",
"stomach: G-cells",
"stomach: parietal cells",
"suprarenal gland: medullary cells",
"suprarenal gland: zona fasciculata cells",
"suprarenal gland: zona glomerularis cells",
"suprarenal gland: zona reticularis cells",
"thyroid: clear cells",
"thyroid: follicular cells",
"vessels: endothelial cells"};
for (i=0; i<NMB_CELL_TYPES; i++) {
ctc[i] = cell_type_count[i];
strcpy(ct[i], cell_type[i]);
}
return;
}
void random_infection(float tot_nmb_cells,
float *cell_type_count, float *infection)
{
int i;
float random_number, lower_bound, upper_bound;
random_number = (float) rand() / (float) RAND_MAX;
/*
* so now that we got a random_number somewhere in between 0 and 1
* we can use it to identify a particular tissue; in order to do so
* we consider the entire number of cells as range covering the interval
* from 0.0 to 1.0 and define subranges therein to represent individual
* tissues, e.g.
* 0.000000 -> 0.001351...tissue 0 (i.e. 5.00e10/3.72e13 = 0.001351)
* 0.001351 -> 0.001355...tissue 1 (i.e. 1.49e08/3.72e13 = 0.000004)
* 0.001355 -> 0.001358...tissue 2 (i.e. 1.23e08/3.72e13 = 0.000003)
* .....
* and thus we just need to walk along the NMB_CELL_TYPES intervals and
* see whether our random_number falls into that subrange;
*/
lower_bound = 0.0;
upper_bound = 0.0;
for (i=0; i<NMB_CELL_TYPES; i++) {
infection[i] = (float) 0;
upper_bound += cell_type_count[i] / tot_nmb_cells;
if ((random_number >= lower_bound) && (random_number <= upper_bound)) {
infection[i] = (float) -1;
}
lower_bound = upper_bound;
}
return;
}
void monitor_infection_progress(float *healthy_cell_type_count,
float *sick_cell_type_count,
float *nmb_infected_cells)
{
int i, time;
float healthy_fraction;
dim3 thrds_per_block, blcks_per_grid;
/*
* initialize with appropriate data relevant for time 0, ie the time of
* de-novo infection with H7N9
*/
for (i=0; i<NMB_CELL_TYPES; i++) {
sick_cell_type_count[i] = healthy_cell_type_count[i];
nmb_infected_cells[i] = 0.0;
}
sick_cell_type_count[H7N9_TISSUE_TARGET] -= 1.0;
nmb_infected_cells[H7N9_TISSUE_TARGET] = 1.0;
/*
* simulate a time span of interest to follow/survey the evolution
* of cell counts in case of H7N9 infection
*/
for (i=1; i<NMB_TIME_STEPS_2STUDY; i++) {
time = i * TIME_STEP_IN_HOURS;
/*
* consider the most general case of updating all tissue types
* in terms of current cell counts, regardless of whether or not
* they are directly implicated in H7N9 infection; this update
* of all the NMB_CELL_TYPES types of different tissues may be
* done concurrently, so a perfect task to be outsourced to the
* GPU, thus we need to set up an appropriate kernel execution
* configuration, hence one block of NMB_CELL_TYPES threads;
*/
thrds_per_block.x = (int) NMB_CELL_TYPES;
blcks_per_grid.x = (int) 1;
update_nmb_infected_cells<<<blcks_per_grid, thrds_per_block>>>(time,
sick_cell_type_count,
nmb_infected_cells);
cudaDeviceSynchronize();
printf("%6d%12.3e\n", time, nmb_infected_cells[H7N9_TISSUE_TARGET]);
/*
* and in case we drop below a critical level of required
* healthy cells abort and declare the organism dead
*/
healthy_fraction = sick_cell_type_count[H7N9_TISSUE_TARGET] /
healthy_cell_type_count[H7N9_TISSUE_TARGET];
if (100.0 * healthy_fraction < (float) H7N9_TISSUE_SURVIVAL_PERCENTAGE) {
printf("*** this organism died %6d hours post infection ***\n", time);
printf("*** nmb_infected_cells[X] %14.6e ***\n", nmb_infected_cells[H7N9_TISSUE_TARGET]);
printf("*** sick_cell_type_count[X] %14.6e ***\n", sick_cell_type_count[H7N9_TISSUE_TARGET]);
printf("*** healthy_fraction %14.6e ***\n", healthy_fraction);
break;
}
}
return;
}
|
738
|
#include "arguments.hh"
#include <algorithm>
Arguments::Arguments(const std::vector<std::string>& args)
: args_(args)
{}
Arguments::Arguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
args_.push_back(argv[i]);
}
const std::vector<std::string>& Arguments::args_get() const
{
return args_;
}
bool Arguments::has_option(char s) const
{
std::string query = "--";
query[1] = s;
return std::find(args_.begin(), args_.end(), query) != args_.end();
}
bool Arguments::has_option(const std::string& l) const
{
std::string query = "--" + l;
return std::find(args_.begin(), args_.end(), query) != args_.end();
}
bool Arguments::has_option(char s, const std::string& l) const
{
return has_option(s) || has_option(l);
}
std::string Arguments::get_option(char s) const
{
std::string query = "--";
query[1] = s;
auto it = std::find(args_.begin(), args_.end(), query);
if (it == args_.end() || (it + 1) == args_.end())
return "";
return *(it + 1);
}
std::string Arguments::get_option(const std::string& l) const
{
std::string query = "--" + l;
auto it = std::find(args_.begin(), args_.end(), query);
if (it == args_.end() || (it + 1) == args_.end())
return "";
return *(it + 1);
}
std::string Arguments::get_option(char s, const std::string& l) const
{
auto res = get_option(l);
return !res.empty() ? res : get_option(s);
}
std::size_t Arguments::size() const
{
return args_.size();
}
const std::string& Arguments::operator[](std::size_t i) const
{
return args_[i];
}
|
739
|
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include <cuda_runtime.h>
#define WIDTH 1024
#define TILE_WIDTH 16
#define BLOCKSPERGRID WIDTH / TILE_WIDTH
int N[WIDTH][WIDTH] = {0};
int T[WIDTH][WIDTH] = {0};
__global__ void transpose(int *Nd, int *Td);
__device__ int GetElement(int *matrix, int row, int col);
__device__ void SetElement(int *matrix, int row, int col, int value);
__device__ int *GetSubMatrix(int *matrix, int blockrow, int blockcol);
int main(int argc, char *argv[])
{
float elapsedTime;
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
N[i][j] = (int)(rand() % 255 + 1);
}
}
// Original
size_t size = WIDTH * WIDTH * sizeof(int);
int *Nd, *Td;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaMalloc((void **)&Nd, size);
cudaMemcpy(Nd, N, size, cudaMemcpyHostToDevice);
cudaMalloc((void **)&Td, size);
struct timeval starttime, endtime;
gettimeofday(&starttime, NULL);
for (int i = 0; i < WIDTH; i++)
{
for (int j = i + 1; j < WIDTH; j++)
{
int temp = N[i][j];
N[i][j] = N[j][i];
N[j][i] = temp;
}
}
gettimeofday(&endtime, NULL);
double executime;
executime = (endtime.tv_sec - starttime.tv_sec) * 1000.0;
executime += (endtime.tv_usec - starttime.tv_usec) / 1000.0;
printf("CPU time: %13lf msec\n", executime);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
dim3 dimGrid(BLOCKSPERGRID, BLOCKSPERGRID);
cudaEventRecord(start, 0);
transpose<<<dimGrid, dimBlock>>>(Nd, Td);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("GPU time: %13f msec\n", elapsedTime);
cudaError_t cuda_err = cudaGetLastError();
if (cudaSuccess != cuda_err)
{
printf("before kernel call: error = %s\n", cudaGetErrorString(cuda_err));
exit(1);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(T, Td, size, cudaMemcpyDeviceToHost);
int pass = 1;
for (int i = 0; i < WIDTH; ++i)
{
for (int j = 0; j < WIDTH; ++j)
{
if (N[i][j] != T[i][j])
{
printf("N[%d][%d] = %d T[%d][%d] = %d\n", i, j, N[i][j], i, j, T[i][j]);
pass = 0;
break;
}
}
}
printf("Test %s\n", (pass) ? "PASSED" : "FAILED");
cudaFree(Nd);
cudaFree(Td);
return 0;
}
__global__ void transpose(int *Nd, int *Td)
{
int xIndex = blockIdx.x * TILE_WIDTH + threadIdx.x;
int yIndex = blockIdx.y * TILE_WIDTH + threadIdx.y;
int index_in = xIndex + WIDTH * yIndex;
int index_out = yIndex + WIDTH * xIndex;
Td[index_out] = Nd[index_in];
}
__device__ int GetElement(int *matrix, int y, int x)
{
return *(matrix + y * WIDTH + x);
}
__device__ void SetElement(int *matrix, int y, int x, int value)
{
*(matrix + y * WIDTH + x) = value;
}
__device__ int *GetSubMatrix(int *matrix, int block_y, int block_x)
{
return (matrix + block_y * TILE_WIDTH * WIDTH + block_x * TILE_WIDTH);
}
|
740
|
#include <iostream>
#include <stdio.h>
#include <math.h>
__global__ void vectorAdd(int a) {
}
|
741
|
#include <iostream>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
const size_t GRID_SIZE = 100;
const size_t BLOCK_SIZE = 256;
// kernel menambahkan vector
__global__
void dotProduct(
const float *cVectorA,
const float *cVectorB,
float *dotProductSebagian,
const int cJumlahElemen)
{
__shared__ float cache[ BLOCK_SIZE ];
// caching
int idx_ = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
// gunakan idx_ untuk mentrace ukuran block
//hasil akhir pada cache[0]
if (threadIdx.x == 0) dotProductSebagian[blockIdx.x] = cache[0];
}
// fungsi main untuk panggil kernel
int main(void)
{
// gunakan GPU ke-1
cudaSetDevice(0);
const int kJumlahElemen = GRID_SIZE * BLOCK_SIZE;
size_t ukuran_vector_bytes_ = kJumlahElemen * sizeof(float);
std::cout << "[Penjumlahan vector dengan jumlah elemen " << kJumlahElemen << std::endl;
float *h_A_ = (float *)malloc(ukuran_vector_bytes_);
float *h_B_ = (float *)malloc(ukuran_vector_bytes_);
float *h_C_ = (float *)malloc(GRID_SIZE * sizeof(float));
if (h_A_ == NULL || h_B_ == NULL || h_C_ == NULL)
{
std::cerr << "Failed to allocate host vectors!\n";
exit(-1);
}
srand(time(NULL));
for (int i = 0; i < kJumlahElemen; ++i)
{
h_A_[i] = rand()/(float)RAND_MAX;
h_B_[i] = rand()/(float)RAND_MAX;
}
float *d_A_ = NULL;
float *d_B_ = NULL;
float *d_C_ = NULL;
cudaMalloc((void **)&d_A_, ukuran_vector_bytes_);
cudaMalloc((void **)&d_B_, ukuran_vector_bytes_);
cudaMalloc((void **)&d_C_, GRID_SIZE * sizeof(float));
std::cout << "Salin input dari host ke CUDA device\n";
cudaMemcpy(d_A_, h_A_, ukuran_vector_bytes_, cudaMemcpyHostToDevice);
cudaMemcpy(d_B_, h_B_, ukuran_vector_bytes_, cudaMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, 1, 1);
dim3 grid(GRID_SIZE, 1, 1);
std::cout << "Peluncuran kernel Cuda dengan ukuran " << GRID_SIZE << " block " << BLOCK_SIZE << " threads\n";
dotProduct<<<grid,block>>>(d_A_,d_B_,d_C_,kJumlahElemen);
cudaError_t err_ = cudaGetLastError();
if (err_ != cudaSuccess)
{
std::cerr << "Gagal meluncurkan kernel Cuda (error code " << cudaGetErrorString(err_) << ")!\n";
exit(-1);
}
std::cout << "Salin data dari CUDA device ke host memory\n";
cudaMemcpy(h_C_, d_C_, GRID_SIZE * sizeof(float), cudaMemcpyDeviceToHost);
float resultGPU = 0.0;
for (int i=0;i<GRID_SIZE;i++) resultGPU += h_C_[i];
float resultCPU = 0.0;
for (int i=0;i<kJumlahElemen;i++) resultCPU += h_A_[i] * h_B_[i];
std::cout << "GPU = " << resultGPU << std::endl;
std::cout << "CPU = " << resultCPU << std::endl;
if (fabs(resultGPU - resultCPU) < 1e-1)
std::cout << "Test PASSED\n";
else
std::cout << "Test FAILED\n";
cudaFree(d_A_);
cudaFree(d_B_);
cudaFree(d_C_);
free(h_A_);
free(h_B_);
free(h_C_);
cudaDeviceReset();
std::cout << "Done\n";
return 0;
}
|
742
|
__global__ void convolution(float* input, int inputRows, int inputCols, int inputLd,
float* kernel, int kernelRows, int kernelCols, int kernelLd,
int rowStep, int colStep, float* output, int outputLd) {
int row = (blockIdx.y * blockDim.y + threadIdx.y) * rowStep;
int col = (blockIdx.x * blockDim.x + threadIdx.x) * colStep;
if (row <= inputRows - kernelRows && col <= inputCols - kernelCols) {
int i, j;
output[row+col*outputLd] = 0;
for (i=0; i<kernelRows; i++) {
for (j=0; j<kernelCols; j++) {
output[row+col*outputLd] += kernel[i+j*kernelLd] * input[(row+i)+(col+j)*inputLd];
}
}
}
}
extern "C" {
void Matrix_convolution(float* input, int inputRows, int inputCols, int inputLd,
float* kernel, int kernelRows, int kernelCols, int kernelLd,
int rowStep, int colStep, float* output, int outputLd, cudaStream_t stream) {
dim3 blockDim(32, 32);
dim3 gridDim((inputRows + blockDim.x - 1) / blockDim.x, (inputCols + blockDim.y - 1) / blockDim.y);
convolution <<<gridDim, blockDim, 0, stream>>> (input, inputRows, inputCols, inputLd,
kernel, kernelRows, kernelCols, kernelLd,
rowStep, colStep, output, outputLd);
}
}
|
743
|
//
//#include "cuda_runtime.h"
//#include "device_launch_parameters.h"
//#include <stdlib.h>
//#include <stdio.h>
//
//__global__ void Read_texture_obj_kernel(float *iptr, cudaTextureObject_t tex) {
// int x = threadIdx.x + blockIdx.x * blockDim.x;
// int y = threadIdx.y + blockIdx.y * blockDim.y;
// int offset = x + y * blockDim.x * gridDim.x;
//
// float c = tex1Dfetch<float>(tex, offset);
// iptr[offset] = c;
//}
//
//
//cudaTextureObject_t *TexObjFloat1D(float *devPtr, int length)
//{
// // create texture object
// cudaResourceDesc resDesc;
// memset(&resDesc, 0, sizeof(resDesc));
// resDesc.resType = cudaResourceTypeLinear;
// resDesc.res.linear.devPtr = devPtr;
// resDesc.res.linear.desc.f = cudaChannelFormatKindFloat;
// resDesc.res.linear.desc.x = 32; // bits per channel
// resDesc.res.linear.sizeInBytes = length * sizeof(float);
//
// cudaTextureDesc texDesc;
// memset(&texDesc, 0, sizeof(texDesc));
// texDesc.readMode = cudaReadModeElementType;
//
// cudaTextureObject_t *tex = (cudaTextureObject_t *)malloc(sizeof(cudaTextureObject_t));
// cudaCreateTextureObject(tex, &resDesc, &texDesc, NULL);
// return tex;
//}
|
744
|
__global__ void simple_copy(const double *a, double *b) {
size_t i = threadIdx.x + blockDim.x * blockIdx.x;
b[i] = a[i];
}
|
745
|
#include <inttypes.h>
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
//#include <time.h>
#include <sys/time.h>
#include <sys/types.h>
#include <dirent.h>
#include <unistd.h>
#define MAX(x, y) (((x) > (y)) ? (x) : (y))
#define MIN(x, y) (((x) < (y)) ? (x) : (y))
#define MAX_STR_LEN 256
struct ponto_capturado{
int TID;
char *clazz;
int time;
double lat, lon;
int gid;
int stopId;
};
struct trajetoria{
ponto_capturado** pontos;
int qntdPontos;
};
trajetoria** trajetorias;
trajetoria* readTrajFile(char*);
double* trajectoryRawer(trajetoria*);
double euclidean(double *p1, double *p2);
void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors);
double distance(double*, int, double*, int);
int main(int argc, char *argv[]) {
int file_count = 0;
int len;
DIR * dirp;
struct dirent * entry;
dirp = opendir("./trajetorias");
while ((entry = readdir(dirp)) != NULL) {
len = strlen (entry->d_name);
if (entry->d_type == DT_REG && strcmp (".traj", &(entry->d_name[len - 5])) == 0) { /* If the entry is a regular file */
file_count++;
}
}
closedir(dirp);
trajetorias = (trajetoria**) malloc(file_count*sizeof(trajetoria*));
DIR* FD;
struct dirent* in_file;
if (NULL == (FD = opendir ("./trajetorias"))) {
fprintf(stderr, "Error : Failed to open input directory\n");
return 1;
}
int fileCounter = 0;
while ((in_file = readdir(FD))) {
len = strlen (in_file->d_name);
if (len > 4 && in_file->d_type == DT_REG && strcmp (".traj", &(in_file->d_name[len - 5])) == 0) {
if (!strcmp (in_file->d_name, "."))
continue;
if (!strcmp (in_file->d_name, ".."))
continue;
char filePath[1024];
sprintf( filePath, "%s/%s", "./trajetorias", in_file->d_name );
trajetorias[fileCounter++] = readTrajFile(filePath);
}
}
printf("Qntd arquivos lidos %d\n", file_count);
double** allDistances = (double**) malloc(file_count*sizeof(double*));
double** rawTrajs = (double**) malloc(file_count*sizeof(double*));
for(int k = 0;k<file_count;k++) {
rawTrajs[k] = trajectoryRawer(trajetorias[k]);
}
for(int k = 0;k<file_count;k++) {
allDistances[k] = (double*) malloc(file_count*sizeof(double));
}
printf("Trajetorias transformadas %d\n", file_count);
for(int k = 0;k<file_count;k++) {
allDistances[k][k] = 0.0;
for(int l = 0;l<file_count;l++) {
//printf("Distance lengthA=%d, lengthB=%d\n", trajetorias[k]->qntdPontos, trajetorias[l]->qntdPontos);
if(k<l) {
double *trajA = rawTrajs[k];
double *trajB = rawTrajs[l];
double similarity = distance(trajA, trajetorias[k]->qntdPontos, trajB, trajetorias[l]->qntdPontos);
allDistances[k][l] = similarity;
allDistances[l][k] = similarity;
//printf("Similaridade das trajetrias: %.2f\n", similarity);
}
}
}
for(int i = 0; i < file_count;i++) {
if(trajetorias[i]) {
for(int j = 0; j < trajetorias[i]->qntdPontos;j++) {
free(trajetorias[i]->pontos[j]);
}
free(trajetorias[i]);
}
}
free(trajetorias);
return 0;
}
double distance(double* trajA, int N, double* trajB, int M) {
double* aScore = (double*)malloc( N*sizeof(double));
double* bScore = (double*)malloc( N*M*sizeof(double));
double* semanticsDescriptors = (double*)malloc( 2*2*sizeof(double));
//GEO
semanticsDescriptors[0] = 0.0;
semanticsDescriptors[1] = 0.5;
//TIME
semanticsDescriptors[2] = 0.0;
semanticsDescriptors[3] = 0.5;
//printf("Distance lengthA=%d, lengthB=%d\n", N,M);
msm( trajA, N, trajB, M, aScore, bScore, semanticsDescriptors );
double parityAB = 0.0;
for (int i = 0; i < N; i++) {
parityAB += aScore[i];
}
double parityBA = 0.0;
for (int i = 0; i < N; i++) {
double maxScore = 0.0;
for (int j = 0; j < M; j++) {
maxScore = MAX(maxScore, bScore[i * M + j]);
}
parityBA += maxScore;
}
//printf("parityAB=%.2f, parityBA=%.2f\n", parityAB, parityBA );
double similarity = (parityAB + parityBA) / (N + M);
free(semanticsDescriptors);
//printf("similarity=%.2f\n", similarity );
free(bScore);
free(aScore);
aScore = NULL;
bScore = NULL;
semanticsDescriptors = NULL;
return similarity;
}
void msm(double* trajA, int lengthA, double* trajB, int lengthB, double* aScore, double* bScore, double* semanticsDescriptors) {
for(int i = 0; i < lengthA; i++) {
double latGeoA = trajA[i * 4];
double lonGeoA = trajA[i * 4 + 1];
double startTimeA = trajA[i * 4 + 2];
double endTimeA = trajA[i * 4 + 3];
double geoThreshold = semanticsDescriptors[0];
double timeThreshold = semanticsDescriptors[2];
double geoWeight = semanticsDescriptors[1];
double timeWeight = semanticsDescriptors[3];
double maxScore = 0.0;
for (int j = 0; j < lengthB; j++) {
double latGeoB = trajB[j * 4];
double lonGeoB = trajB[j * 4 + 1];
double startTimeB = trajB[j * 4 + 2];
double endTimeB = trajB[j * 4 + 3];
double timeScore = 0.0;
if(startTimeA < endTimeB && startTimeB < endTimeA ) {
double overlap = MIN(endTimeA, endTimeB) - MAX(startTimeA, startTimeB);
if(overlap > 0.0) {
double duration = MAX(endTimeA, endTimeB) - MIN(startTimeA, startTimeB);
double timeDistance = 1 - (overlap / duration);
timeScore = (timeDistance <= timeThreshold ? 1 : 0) * timeWeight;
}
}
double geoB[] = {latGeoB, lonGeoB};
double geoA[] = {latGeoA, lonGeoA};
double geoScore = (euclidean(geoB, geoA) <= geoThreshold ? 1 : 0) * geoWeight;
double sumScore = timeScore + geoScore;
if(sumScore > maxScore) {
maxScore = sumScore;
}
bScore[i * lengthB + j] = sumScore;
}
aScore[i] = maxScore;
}
}
trajetoria* readTrajFile(char *filePath) {
/* FileStream for the Library File */
FILE *trajFile;
/* allocation of the buffer for every line in the File */
char *buf = (char*) malloc(MAX_STR_LEN);
char *tmp;
/* if the space could not be allocaed, return an error */
if (buf == NULL) {
printf ("No memory\n");
return NULL;
}
if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) //Reading a file
{
printf( "File could not be opened: %s.\n", filePath );
return NULL;
}
int pointsCounter = 0;
while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL) {
pointsCounter++;
}
fclose(trajFile);
ponto_capturado **traj = (ponto_capturado**) malloc(pointsCounter*sizeof(ponto_capturado*));
trajetoria* trajetoria = new struct trajetoria;
trajetoria->pontos = traj;
trajetoria->qntdPontos = pointsCounter;
if ( ( trajFile = fopen( filePath, "r" ) ) == NULL ) {
printf( "File could not be opened: %s.\n", filePath );
return NULL;
}
int i = 0;
while (fgets(buf, MAX_STR_LEN - 1, trajFile) != NULL)
{
if (strlen(buf)>0) {
if(buf[strlen (buf) - 1] == '\n')
buf[strlen (buf) - 1] = '\0';
} else {
if(buf[0] == '\n') {
continue;
}
}
tmp = strtok(buf, ";");
traj[i] = new ponto_capturado();
traj[i]->TID = atoi(tmp);
tmp = strtok(NULL, ";");
int len = strlen(tmp);
traj[i]->clazz = (char*)malloc(len + 1);
strcpy(traj[i]->clazz, tmp);
tmp = strtok(NULL, ";");
traj[i]->time = atoi(tmp);
tmp = strtok(NULL, ";");
traj[i]->lat = atof(tmp);
tmp = strtok(NULL, ";");
traj[i]->lon = atof(tmp);
tmp = strtok(NULL, ";");
traj[i]->gid = atoi(tmp);
tmp = strtok(NULL, ";");
if ((tmp != NULL) && (tmp[0] == '\0')) {
traj[i]->stopId = atoi(tmp);
} else {
traj[i]->stopId = 0;
}
/*
printf("index i= %d ID: %d, %s, %d, %.8f, %.8f, %d, %d \n",i, traj[i]->TID ,
traj[i]->clazz, traj[i]->time ,
traj[i]->lat, traj[i]->lon,
traj[i]->gid, traj[i]->stopId);
*/
i++;
}
//printf("Loaded %s - %d points\n", filePath, i);
fclose(trajFile);
return trajetoria;
}
double* trajectoryRawer(trajetoria* trajetoria) {
int N = trajetoria->qntdPontos;
double* trajA = (double*)malloc( 4*N*sizeof(double));
for(int i = 0; i < N; i++) {
trajA[i * 4] = trajetoria->pontos[i]->lat;
trajA[i * 4 + 1] = trajetoria->pontos[i]->lon;
trajA[i * 4 + 2] = trajetoria->pontos[i]->time;
trajA[i * 4 + 3] = trajetoria->pontos[i]->time + 30;
}
return trajA;
}
double euclidean(double *p1, double *p2) {
double distX = abs(p1[0] - p2[0]);
double distXSquare = distX * distX;
double distY = abs(p1[1] - p2[1]);
double distYSquare = distY * distY;
return sqrt(distXSquare + distYSquare);
}
|
746
|
/* matrixmul.cu
*
* Jonathan Lehman
* February 22, 2012
*/
#include <cuda.h>
#include <stdio.h>
#include <math.h>
#include <sys/time.h>
__global__
void mult( float*, float*, float*, int, int, int, int, int);
void buildArrays( int, int );
void checkArgs(int, char**);
void checkGPUCapabilities(int, int, int, int, int);
int nearestDivInt(int, int);
//set block size
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 16
#endif
//user input
int GRID_WIDTH;
int GRID_HEIGHT;
int MATRIX_A_HEIGHT;
int MATRIX_A_WIDTH;
int MATRIX_B_HEIGHT;
int MATRIX_B_WIDTH;
int TOTAL_ELEM;
int MAT_A_ELEM;
int MAT_B_ELEM;
int MAT_C_ELEM;
// Keep track of the time.
cudaEvent_t start, stop;
float elapsedTime;
//arrays
float* a;
float* b;
float* c;
int main( int argc, char *argv[] ){
float *dev_a, *dev_b, *dev_c;
//check validity of arguments
checkArgs(argc, argv);
//assign variables
GRID_WIDTH = atoi(argv[1]);
GRID_HEIGHT = atoi(argv[2]);
MATRIX_A_HEIGHT = atoi(argv[3]);
MATRIX_A_WIDTH = atoi(argv[4]);
MATRIX_B_HEIGHT = atoi(argv[5]);
MATRIX_B_WIDTH = atoi(argv[6]);
//check that multiplication is valid
if(MATRIX_A_WIDTH != MATRIX_B_HEIGHT){
fprintf(stderr, "\nmatrixmul: Matrix A width, %d, must equal Matrix B height, %d, otherwise these matrices cannot be multiplied\n", MATRIX_A_WIDTH, MATRIX_B_HEIGHT );
exit(1);
}
//make sure dimensions of C matrix are divisible by block size
if(nearestDivInt(MATRIX_A_WIDTH, BLOCK_SIZE) != MATRIX_A_WIDTH){
MATRIX_A_WIDTH = nearestDivInt(MATRIX_A_WIDTH, BLOCK_SIZE);
if(MATRIX_A_WIDTH == 0){
MATRIX_A_WIDTH = BLOCK_SIZE;
}
MATRIX_B_HEIGHT = MATRIX_A_WIDTH;
printf("Matrix A width and Matrix B height must be divisible by the block dimension %d\nChanging the dimensions of Matrix A to %d x %d (HxW) and Matrix B to %d x % d (HxW)\n", BLOCK_SIZE, MATRIX_A_HEIGHT, MATRIX_A_WIDTH, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
MAT_A_ELEM = MATRIX_A_WIDTH * MATRIX_A_HEIGHT;
MAT_B_ELEM = MATRIX_B_WIDTH * MATRIX_B_HEIGHT;
//check that matrixA is divisible by block size, if not change dimensions
if(nearestDivInt(MAT_A_ELEM, BLOCK_SIZE * BLOCK_SIZE) != MAT_A_ELEM){
MATRIX_A_HEIGHT = nearestDivInt(MATRIX_A_HEIGHT, BLOCK_SIZE * BLOCK_SIZE);
if(MATRIX_A_HEIGHT == 0){
MATRIX_A_HEIGHT = BLOCK_SIZE * BLOCK_SIZE;
}
printf("Matrix A not divisible by the block size, %d\nChanging the dimensions of Matrix A to %d x %d (HxW)\n", BLOCK_SIZE * BLOCK_SIZE, MATRIX_A_HEIGHT, MATRIX_A_WIDTH);
}
//check that matrixB is divisible by block size, if not change dimensions
if(nearestDivInt(MAT_B_ELEM, BLOCK_SIZE * BLOCK_SIZE) != MAT_B_ELEM){
MATRIX_B_WIDTH = nearestDivInt(MATRIX_B_WIDTH, BLOCK_SIZE * BLOCK_SIZE);
if(MATRIX_B_WIDTH == 0){
MATRIX_B_WIDTH = BLOCK_SIZE * BLOCK_SIZE;
}
printf("Matrix B not divisible by the block size, %d\nChanging the dimensions of Matrix B to %d x %d (HxW)\n", BLOCK_SIZE * BLOCK_SIZE, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
//need to ensure that the gridwidth is the same as this value, to ensure that the multiplier will work in ALL instances
if(MATRIX_B_WIDTH != GRID_WIDTH * BLOCK_SIZE){
MATRIX_B_WIDTH = GRID_WIDTH * BLOCK_SIZE;
printf("Matrix B width must equal the grid width, %d, times the block size, %d\nChanging the dimensions of Matrix B to %d x %d (HxW)\n", GRID_WIDTH, BLOCK_SIZE, MATRIX_B_HEIGHT, MATRIX_B_WIDTH);
}
MAT_A_ELEM = MATRIX_A_WIDTH * MATRIX_A_HEIGHT;
MAT_B_ELEM = MATRIX_B_WIDTH * MATRIX_B_HEIGHT;
MAT_C_ELEM = MATRIX_A_HEIGHT * MATRIX_B_WIDTH;
TOTAL_ELEM = MAT_A_ELEM + MAT_B_ELEM + MAT_C_ELEM;
//check that there are no more elements in the resultant matrix than threads to calculate them
if(GRID_WIDTH * BLOCK_SIZE * GRID_HEIGHT * BLOCK_SIZE < MAT_C_ELEM){
printf("There must be more threads in the grid, %d, than elements in the resulting matrix, %d\n", GRID_WIDTH * BLOCK_SIZE * GRID_HEIGHT * BLOCK_SIZE, MAT_C_ELEM);
exit(1);
}
//check that GPU can handle arguments
checkGPUCapabilities(GRID_WIDTH, GRID_HEIGHT, BLOCK_SIZE, BLOCK_SIZE, TOTAL_ELEM);
/* Initialize the source arrays here. */
a = new float[MAT_A_ELEM];
b = new float[MAT_B_ELEM];
c = new float[MAT_C_ELEM];
//fill array a and b with random doubles
buildArrays(MAT_A_ELEM, MAT_B_ELEM);
/*printf( "The sequence:\n" );
for( int i = 0; i < MAT_A_ELEM; i++ ){
if(i % MATRIX_A_WIDTH == 0){
printf("\n");
}
printf( "%f\t", a[i] );
}
printf( "\n" );
printf( "The sequence:\n" );
for( int i = 0; i < MAT_B_ELEM; i++ ) {
if(i % MATRIX_B_WIDTH == 0){
printf("\n");
}
printf( "%f\t",b[i] );
}
printf( "\n" );*/
//check if there will be enough blocks to handle matrix size (if not some threads will take on more than one addition)
int reps = ceil((double)(MAT_C_ELEM) / (BLOCK_SIZE * BLOCK_SIZE * GRID_WIDTH * GRID_HEIGHT));
/* Allocate global device memory. */
cudaMalloc( (void **)&dev_a, sizeof(float) * MAT_A_ELEM );
cudaMalloc( (void **)&dev_b, sizeof(float) * MAT_B_ELEM );
cudaMalloc( (void **)&dev_c, sizeof(float) * MAT_C_ELEM );
/* Copy the host values to global device memory. */
cudaMemcpy( dev_a, a, sizeof(float) * MAT_A_ELEM, cudaMemcpyHostToDevice );
cudaMemcpy( dev_b, b, sizeof(float) * MAT_B_ELEM, cudaMemcpyHostToDevice);
/* Start the timer. */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
/* Execute the kernel. */
dim3 block(BLOCK_SIZE, BLOCK_SIZE); //threads w x h
dim3 grid(GRID_WIDTH, GRID_HEIGHT); //blocks w x h
mult<<<grid, block>>>(dev_a, dev_b, dev_c, MATRIX_A_WIDTH, MATRIX_B_WIDTH, MATRIX_A_HEIGHT, reps, MAT_C_ELEM);
/* Wait for the kernel to complete. Needed for timing. */
cudaThreadSynchronize();
/* Stop the timer and print the resulting time. */
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
printf( "Time: %f secs\n", elapsedTime / 1000 );
/* Get result from device. */
cudaMemcpy(c, dev_c, sizeof(float) * MAT_C_ELEM, cudaMemcpyDeviceToHost);
/*printf( "The sequence:\n" );
for( int i = 0; i < MAT_C_ELEM; i++ ){
if(i % MATRIX_B_WIDTH == 0){
printf("\n");
}
printf( "%f\t", c[i] );
}
printf( "\n" );*/
//print any cuda error messages
const char* errorString = cudaGetErrorString(cudaGetLastError());
printf("GPU Error: %s\n", errorString);
//destroy cuda event
cudaEventDestroy( start );
cudaEventDestroy( stop );
/* Free the allocated device memory. */
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
//free allocated host memory
free(a);
free(b);
free(c);
}
__global__
void mult( float *a, float *b, float *c, int wA , int wB, int hA, int reps, int size)
{
//grid dimensions (# blocks)
int gridW = gridDim.x;
int gridH = gridDim.y;
//block id
int blockX = blockIdx.x;
int blockY = blockIdx.y;
//thread id
int threadX = threadIdx.x;
int threadY = threadIdx.y;
//float to store c subtotal
float cTot = 0;
//values to iterate through submatrix blocks
int aStart;
int aSize;
int aStop;
int bStart;
int bSize;
//shared memory for each block (A and B matrices)
__shared__ float shA[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float shB[BLOCK_SIZE][BLOCK_SIZE];
//loop through number of times matrix elements fill more than an entire grid
for(int i = 0; i < reps; i++){
//A blocks
// index of first submatrix of A (account for if doesnt fit on initial grid)
if(hA > gridH * BLOCK_SIZE){
aStart = wA * BLOCK_SIZE * (blockY + gridW * i);
}
else{
aStart = wA * BLOCK_SIZE * blockY;
}
// size of each submatrix of A
aSize = BLOCK_SIZE;
// index of last submatrix of A
aStop = aStart + wA - 1;
//B blocks
// index of first submatrix of B (account for if doesnt fit on initial grid)
if(wB > gridW * BLOCK_SIZE){
bStart = BLOCK_SIZE * (blockX + gridH * i);
}
else{
bStart = BLOCK_SIZE * blockX;
}
// size of each submatrix of B
bSize = BLOCK_SIZE * wB;
// loop through submatrices for a and b by specified steps
for (int aVal = aStart, bVal = bStart; aVal <= aStop; aVal += aSize, bVal += bSize){
int aIndex = aVal + wA * threadY + threadX;
int bIndex = bVal + wB * threadY + threadX;
//load memory for matrices a and b into shared memory
shA[threadX][threadY] = a[aIndex];
shB[threadX][threadY] = b[bIndex];
__syncthreads();
for (int i = 0; i < BLOCK_SIZE; i++){
cTot += shA[i][threadX] * shB[threadY][i];
}
__syncthreads();
}
//store values to correct index in c
int cVal = wB * BLOCK_SIZE * blockY + BLOCK_SIZE * blockX;
int index = cVal + wB * threadX + threadY + (gridW * gridH * BLOCK_SIZE * BLOCK_SIZE * i);
if(index < size){
c[index] = cTot;
}
}
}
void buildArrays( int mat_a_size, int mat_b_size ){
/* Seed the random number generator. */
srand( 200 );
for(int i = 0; i < mat_a_size; i++){
float val = rand() / (float(RAND_MAX));
a[i] = val;
}
srand( 300 );
for(int i = 0; i < mat_b_size; i++){
float val = rand() / (float(RAND_MAX));
b[i] = val;
}
}
void checkArgs(int argc, char *argv[]){
//check number of arguments
if(argc != 7){
fprintf(stderr, "\nmatrixmul: Incorrect number of arguments. matrixmul requires 6 arguments not %d\nCorrect usage: \"matrixmul grid-width grid-height matA-height matA-width matB-height matB-width\"\n", argc - 1);
exit(1);
}
char* invalChar;
long arg;
//check each argument
for(int i = 1; i < 7; i++){
//check for overflow of argument
if((arg = strtol(argv[i], &invalChar, 10)) >= INT_MAX){
fprintf(stderr, "\nmatrixmul: Overflow. Invalid argument %d for matrixmul, '%s'.\nThe argument must be a valid, positive, non-zero integer less than %d.\n", i, argv[i], INT_MAX);
exit(1);
}
//check that argument is a valid positive integer and check underflow
if(!(arg > 0) || (*invalChar)){
fprintf(stderr, "\nmatrixmul: Invalid argument %d for matrixmul, '%s'. The argument must be a valid, positive, non-zero integer.\n", i, argv[i]);
exit(1);
}
}
}
void checkGPUCapabilities(int gridW, int gridH, int blockW, int blockH, int size){
//check what GPU is being used
int devId;
cudaGetDevice( &devId );
//get device properties for GPU being used
cudaDeviceProp gpuProp;
cudaGetDeviceProperties( &gpuProp, devId );
//check if GPU has enough memory to handle the 3 arrays
if(gpuProp.totalGlobalMem < (size * sizeof(float))){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU does not have enough memory to handle the data size: %ld. It can only handle data sizes up to %ld.\n", (size * sizeof(float)) * 3, gpuProp.totalGlobalMem);
exit(1);
}
//check if GPU can handle the number of threads per bloc
if(gpuProp.maxThreadsPerBlock < (blockW * blockH)){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads per block, not %d.\n", gpuProp.maxThreadsPerBlock, (blockW * blockH));
exit(1);
}
//check that GPU can handle the number of threads in the block width
if(gpuProp.maxThreadsDim[0] < blockW){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads as the block width of each block, not %d.\n", gpuProp.maxThreadsDim[0], blockW );
exit(1);
}
//check that GPU can handle the number of threads in the block height
if(gpuProp.maxThreadsDim[1] < blockH){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d threads as the block height of each block, not %d.\n", gpuProp.maxThreadsDim[1], blockH );
exit(1);
}
//check that GPU can handle the number of blocks in the grid width
if(gpuProp.maxGridSize[0] < gridW){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d blocks as the grid width of each grid, not %d.\n", gpuProp.maxGridSize[0], gridW );
exit(1);
}
//check that GPU can handle the number of blocks in the grid height
if(gpuProp.maxGridSize[1] < gridH){
fprintf(stderr, "\nmatrixmul: Insufficient GPU. GPU can only handle %d blocks as the grid height of each grid, not %d.\n", gpuProp.maxGridSize[1], gridH );
exit(1);
}
}
//returns nearest int to initVal divisible by divBy
int nearestDivInt(int initVal, int divBy){
int attemptVal = initVal / divBy;
return (abs(initVal - (attemptVal * divBy)) <= abs(initVal - ((attemptVal + 1) * divBy))) ? attemptVal * divBy : (attemptVal + 1) * divBy;
}
|
747
|
#include <type_traits>
int main(int, char*[])
{
return 0;
}
|
748
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
__global__ void sumaDatos(int* in, int* out, int size)
{
int IDx=blockIdx.x*blockDim.x+threadIdx.x;
if(IDx>size) return;
out[IDx]=in[IDx]+in[IDx];
}
int main(int argc, char **argv)
{
int datosCount=100000000;
int* h_datos=(int*)malloc(datosCount*sizeof(int));
int* h_datosout=(int*)malloc(datosCount*sizeof(int));
int* d_datos;
int* d_datosout;
cudaMalloc(&d_datos,datosCount*sizeof(int));
cudaMalloc(&d_datosout,datosCount*sizeof(int));
for(int i=0;i<datosCount;i++)
{
h_datos[i]=i*2;
}
cudaMemcpy(d_datos,h_datos,datosCount*sizeof(int),cudaMemcpyHostToDevice);
int numthreads=256;
int numbloques=datosCount/numthreads+1;
sumaDatos<<<numbloques,numthreads>>>(d_datos,d_datosout,datosCount);
cudaMemcpy(h_datosout,d_datosout,datosCount*sizeof(int),cudaMemcpyDeviceToHost);
printf("FIN\n");
return 0;
}
|
749
|
#include<iostream>
#include<ctime>
using namespace std;
#define R 32
#define C 32
//#define BY_R
__global__ void by_row(int* data) {
__shared__ int cache[R][C];
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
cache[threadIdx.y][threadIdx.x] = idx;
__syncthreads();
data[idx] = cache[threadIdx.y][threadIdx.x];
}
__global__ void by_column(int* data) {
__shared__ int cache[R][C];
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
cache[threadIdx.x][threadIdx.y] = idx;
__syncthreads();
data[idx] = cache[threadIdx.x][threadIdx.y];
}
int main() {
clock_t start, end;
int *a_dev;
dim3 block(R,C);
cudaMalloc((void**)&a_dev, sizeof(R*C));
start = clock();
#ifdef BY_R
by_row<<<1,block>>>(a_dev);
cout << "gpu by_row ";
#else
by_column<<<1,block>>>(a_dev);
cout << "gpu by_column ";
#endif
cudaDeviceSynchronize();
end = clock();
cout << end - start << "us" << endl;
return 0;
}
|
750
|
#include <iostream>
#include <math.h>
#include <ctime>
#include <cmath>
#include <stdlib.h>
#include <fstream>
#include <sstream>
#define PI 3.14159265358979323846
__device__ double densityW(double Xold, double Xnew, double sigma, double r, double delta, double delta_t){
double f=0, x=0;
//x=(1/(sigma*sqrt(delta_t)))*(log(Xnew)-log(Xold)-(r-delta-0.5*sigma*sigma)*delta_t);
x=(1/(sigma*sqrt(delta_t)))*(Xnew-Xold-(r-delta-0.5*sigma*sigma)*delta_t);
//f= (1/(sigma*sqrt(delta_t)*Xnew))*(1/(sqrt(2*PI)))*exp(-0.5*x*x); // this is the transition density
f= (1/(sigma*sqrt(delta_t)))*(1/(sqrt(2*PI)))*exp(-0.5*x*x);
return f;
}
__device__ double* two_dim_indexW(double* vector, int i, int j, double m, int b){
//int m_int= (int)m;
double* p;
//specify index layout here
p=&vector[b*(i)+(j)];
return p;
}
__device__ double* three_dim_indexW(double* matrix, int i, int j, int k, double m, int b, int num_assets){
//int m_int = (int)m;
double* p;
//specify index layout here
//p=&matrix[(m_int)*b*(k)+(m_int)*(j)+(i)];
p=&matrix[i*b*num_assets+j*num_assets+k];
return p;
}
__device__ double kahansum(double* sortvector, int b){
double sum=0, c=0, y, t;
for(int i=0; i<b; i++){
y=sortvector[i]-c;
t=sum+y;
c=(t-sum)-y;
sum=t;
}
return sum;
}
__global__ void valuesKernel(double* tempW_device ,double m,int b, double* sigma_device,double* delta_device,double r, double delta_t,double* X_device,int num_assets){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
int m_int=(int)m;
if(idx<(m_int-1)*b*b){
double w;
int i=idx/(b*b);
int j=idx/b;
if(j>(b-1)){
j=j%b;
}
int k=idx%b;
// for(int k=0; k<b; k++){
// for(int j=0; j<b; j++){
//std::cout<<j<<std::endl;
w=1;
//w=0; //set w to 1 since it will be equal to a product
for(int jjj=0; jjj<num_assets; jjj++){
w = w * densityW(*three_dim_indexW(X_device, (i), k, jjj, m, b, num_assets), *three_dim_indexW(X_device, i+1, j, jjj, m, b, num_assets), sigma_device[jjj], r, delta_device[jjj], delta_t);
}
tempW_device[idx]=w;
}
}
__global__ void sumweightsKernel(double* tempW_device , int b, double* weight_denominator_device, double m){
int idx =blockDim.x*blockIdx.x + threadIdx.x;
int m_int=(int)m;
if(idx<(m_int-1)*b){
double sum=0, c=0, y, t;
int start=idx*b;
for(int i=start; i<start+b; i++){
y=tempW_device[i]-c;
t=sum+y;
c=(t-sum)-y;
sum=t;
}
weight_denominator_device[idx]=sum;
}
}
__global__ void meshweightsKernel(double* W_device, double m, int b, double* sigma_device, double* delta_device, double r, double delta_t, double* X_device, int num_assets, double* weight_denominator_device, double* tempW_device){
double wdenominator;
int idx =blockDim.x*blockIdx.x + threadIdx.x;
int m_int=(int)m;
if(idx<b*b*m_int){
int i=idx/(b*b);
int k=idx/b;
if(k>(b-1)){
k=k%b;
}
int j=idx%b;
if(i==0){
if(j==0){
*three_dim_indexW(W_device, i, k, j, m, b, b)=1;
}// all weights from the starting node are equal to 1
else{
*three_dim_indexW(W_device, i, k, j, m, b, b)=0;
}
}
//dim1temp.clear();
if(i>0){ //sortvector.clear();
//devide each element by the denominator
// std::cout<<"before"<<std::endl;
wdenominator= *two_dim_indexW(weight_denominator_device, i-1, k, m-1, b);
// std::cout<<"after and I= "<<I<<std::endl;
// std::cout<<*two_dim_index(weight_denominator, I, k, m-1, b)<<std::endl;
//for(int t=0; t<b; t++){
*three_dim_indexW(W_device, (i), k, j, m, b, b)=(((double)b) * (*three_dim_indexW(tempW_device, i-1, k, j, m-1, b, b)))/wdenominator;
// *three_dim_indexW(W_device, (i), k, j, m, b, b)=(((double)b)*(point))/wdenominator;
//}
//std::cout<<"after"<<std::endl;
}
}
}
void meshweights(double* W, double m, int b, double sigma[], double delta[], double r, double delta_t, double* X, int num_assets, double* weight_denominator){
int m_int=(int)m;
int temp_N=(m_int-1) * b*b;
double* sigma_host;
sigma_host =sigma;
double* delta_host;
delta_host=delta;
double* tempW;
tempW= new double[temp_N];
//double* asset_amount_host;
//asset_amount_host =asset_amount;
int X_N=(m_int) * b * (num_assets);
int W_N=(m_int) * b*b;
int w_N=(m_int-1)*b;
int sigma_N=num_assets;
int delta_N=num_assets;
//int weight_denominator_N=(N-1) * b;
double* X_device;
double* W_device;
double* weight_denominator_device;
double* sigma_device;
double* delta_device;
double* tempW_device;
cudaMalloc((void**) &X_device, X_N*sizeof(double) );
cudaMemcpy(X_device, X, X_N*sizeof(double), cudaMemcpyHostToDevice);
cudaError_t error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &W_device, W_N*sizeof(double) );
cudaMemcpy(W_device, W, W_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &sigma_device, sigma_N*sizeof(double) );
cudaMemcpy(sigma_device, sigma_host, sigma_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &delta_device, delta_N*sizeof(double) );
cudaMemcpy(delta_device, delta_host, delta_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &weight_denominator_device, w_N*sizeof(double) );
cudaMemcpy(weight_denominator_device, weight_denominator, w_N*sizeof(double), cudaMemcpyHostToDevice);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMalloc((void**) &tempW_device, temp_N*sizeof(double) );
cudaMemcpy(tempW_device, tempW, temp_N*sizeof(double), cudaMemcpyHostToDevice);
//dim3 gridDim((int)ceil(temp_N/512.0));
//dim3 blockDim(512.0);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 80000000*sizeof(double));
dim3 VgridDim((int)ceil(temp_N/512.0));
dim3 VblockDim(512.0);
valuesKernel<<<VgridDim,VblockDim>>>(tempW_device , m, b, sigma_device, delta_device, r, delta_t, X_device, num_assets);
cudaDeviceSynchronize();
cudaMemcpy(tempW, tempW_device, sizeof(double)*temp_N, cudaMemcpyDeviceToHost);
cudaMemcpy(tempW_device, tempW, temp_N*sizeof(double), cudaMemcpyHostToDevice);
//dim3 gridDim((int)ceil(w_N/512.0));
//dim3 blockDim(512.0);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
//std::cout<<w_N/512<<std::endl;
dim3 sgridDim((int)ceil(w_N/512.0));
dim3 sblockDim(512.0);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
sumweightsKernel<<<sgridDim, sblockDim>>>(tempW_device , b, weight_denominator_device, m);
cudaDeviceSynchronize();
cudaMemcpy(weight_denominator, weight_denominator_device, sizeof(double)*w_N, cudaMemcpyDeviceToHost);
cudaMemcpy(weight_denominator_device, weight_denominator, w_N*sizeof(double), cudaMemcpyHostToDevice);
//for(int check=0; check<w_N; check++){
//std::cout<< weight_denominator[check]<<std::endl;
//}
//dim3 gridDim((int)ceil(W_N/512.0));
//dim3 blockDim(512.0);
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
dim3 mgridDim((int)ceil(W_N/512.0));
dim3 mblockDim(512.0);
meshweightsKernel<<<mgridDim, mblockDim>>>(W_device , m, b, sigma_device, delta_device, r, delta_t, X_device, num_assets, weight_denominator_device, tempW_device);
cudaDeviceSynchronize();
error = cudaGetLastError();
if( error != cudaSuccess )
{
std::cout << cudaGetErrorString(error) << std::endl;
printf("found at line %d\n", __LINE__);
exit(1);
}
cudaMemcpy(W, W_device, sizeof(double)*W_N, cudaMemcpyDeviceToHost);
cudaMemcpy(weight_denominator, weight_denominator_device, sizeof(double)*w_N, cudaMemcpyDeviceToHost);
cudaFree(X_device);
cudaFree(sigma_device);
cudaFree(delta_device);
cudaFree(W_device);
cudaFree(weight_denominator_device);
cudaFree(tempW_device);
delete[] tempW;
}
|
751
|
#include "includes.h"
__global__ void calculateCircuitGraphVertexData( unsigned int * D,unsigned int * C,unsigned int ecount){
unsigned int tid=(blockDim.x*blockDim.y * gridDim.x*blockIdx.y) + (blockDim.x*blockDim.y*blockIdx.x)+(blockDim.x*threadIdx.y)+threadIdx.x;
if( tid <ecount)
{
unsigned int c=D[tid];
atomicExch(C+c,1);
}
}
|
752
|
#include "includes.h"
__global__ void ModuloKernel(float* input, int divisor, float* output, int size)
{
int id = blockDim.x * blockIdx.y * gridDim.x + blockDim.x * blockIdx.x + threadIdx.x;
if(id < size)
{
output[id] = (float) (((int)input[id]) % divisor) ;
}
}
|
753
|
/*
* @Author: grantmcgovern
* @Date: 2015-09-11 12:19:51
* @Last Modified by: grantmcgovern
* @Last Modified time: 2015-09-13 15:42:09
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/*
* File_Packet
*
* Contains a small data packet of
* the file info (data + size) to
* help with dynamic allocation.
*
*/
struct File_Packet {
char *file_data;
int file_size;
};
/*
* get_filename_length(char *[])
*
* Computes the length of command line
* argument filename to store the filename
* as a string, and null-terminate it.
*
*/
int get_filename_length(char *filename[]) {
int i = 0;
while(filename[1][i] != '\0')
i++;
return i;
}
/*
* check_command_line_args(int)
*
* Checks to see whether used used proper
* number of command line arguments.
*/
void check_command_line_args(int argc) {
// Ensure command line args are limited to only 1
if(argc > 2 || argc == 1) {
printf("Invalid Number of Arguments\n");
exit(1);
}
}
/*
* read_encrypted_file(char*, int)
*
* Takes command line args passed from main
* and opens the file, reading the data, then
* bulding a character array.
*/
struct File_Packet read_encrypted_file(char *args[], int length) {
int filename_length = get_filename_length(args);
char filename[filename_length + 1];
// Null terminate the end to ensure no weird chars
filename[filename_length] = '\0';
// Prevents buffer overflow, copies filename
strncpy(filename, args[1], filename_length);
/*
* Read in file content, use fseek()
* to get file size, and dynamically
* allocate a string.
*/
FILE *file = fopen(filename, "rb");
// Check if file exits
if(file) {
fseek(file, 0, SEEK_END);
long file_size = ftell(file);
fseek(file, 0, SEEK_SET);
char *file_data = (char *)(malloc(file_size + 1));
fread(file_data, file_size, 1, file);
fclose(file);
file_data[file_size] = 0;
struct File_Packet packet;
packet.file_data = file_data;
packet.file_size = file_size;
return packet;
}
else {
printf("%s\n", "File does not exist");
exit(1);
}
}
/*
* caesar_cipher(char*)
*
* Takes a character array of the file contents
* and converts each character to its decrypted
* state by first casting to int, decrementing by
* 1, then casting back to a char.
*/
__global__ void caesar_cipher(char *file_data, char *dev_decrypted_file_data) {
// Get thread id as the index
int i = threadIdx.x;
// Cast to int
int to_int = (int)file_data[i];
// Decrement and cast back to char
char decrypted = (char)(to_int - 1);
// Store the answer
dev_decrypted_file_data[i] = decrypted;
}
/*
* print_decrypted_message(char *)
*
* Recieves the memory block back from CUDA,
* and prints the decrypted message.
*/
void print_decrypted_message(char *decrypted_file_data) {
printf("%s\n", decrypted_file_data);
exit(0);
}
/*
* MAIN
*/
int main(int argc, char *argv[]) {
// First check command line args are valid
check_command_line_args(argc);
// Get file contents
struct File_Packet packet = read_encrypted_file(argv, argc);
// Get file length (chars)
int file_size = packet.file_size;
// Compute size of memory block we'll need
int size = file_size * sizeof(char);
// Local memory
char *file_data = packet.file_data;
char decrypted_file_data[file_size];
// Device memory
char *dev_file_data;
char *dev_decrypted_file_data;
// Allocate memory on the GPU
cudaMalloc((void**)&dev_file_data, size);
cudaMalloc((void**)&dev_decrypted_file_data, size);
cudaMemcpy(dev_file_data, file_data, size, cudaMemcpyHostToDevice);
// Decrypt the message on the GPU
caesar_cipher<<<1, file_size>>>(dev_file_data, dev_decrypted_file_data);
// Not sure if we need this, since we're only running on 1 thread
cudaThreadSynchronize();
cudaMemcpy(decrypted_file_data, dev_decrypted_file_data, size, cudaMemcpyDeviceToHost);
// Check we've decrypted
print_decrypted_message(decrypted_file_data);
// Deallocate memory on CUDA
cudaFree(dev_file_data);
cudaFree(dev_decrypted_file_data);
// Exit with success
exit(0);
}
|
754
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define BLOCK_SIZE 1024
// обработка ошибок
#define CSC(call) \
do { \
cudaError_t res = call; \
if (res != cudaSuccess) { \
fprintf(stderr, "ERROR in %s:%d. Message: %s\n", \
__FILE__, __LINE__, cudaGetErrorString(res)); \
exit(0); \
} \
} while(0)
__global__ void bitonic_merge(int *host_data, int n, int mergeSize, int dist) { // разделяемая память
int id = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
int id_in_block = threadIdx.x; // индекс в блоке
int tmp, power_of_2;
// разделяемая память выделяется на блок, она сильно бытрее глобальной и среди потоков индексация не отличается
__shared__ int shared_mem_block[BLOCK_SIZE];
for (int block_part = 0; (block_part < n); block_part += offset, id += offset) { // проходим куски блоков (блок может не помещаться в сетку)
if (block_part + blockIdx.x * blockDim.x >= n) // если мы вылезли за заданное количство чисел, то не сортируем
break;
__syncthreads(); // синхронизация потоков исключительно на GPU в рамках блока для работы с разделяемой памятью
shared_mem_block[id_in_block] = host_data[block_part + blockIdx.x * blockDim.x + id_in_block];
for (int half_cleaner_index = mergeSize; half_cleaner_index >= 2; half_cleaner_index >>= 1) { // полуочистители
__syncthreads();
power_of_2 = half_cleaner_index >> 1;
if ((((id_in_block / (power_of_2)) & 1) - 1) && // нужно ли сравнивать этот элемент (выходит ли из него стрелка)
((((id / dist) & 1) == 0 && shared_mem_block[id_in_block] > shared_mem_block[id_in_block + (power_of_2)]) || // если элемент больше и стрелка вперед
(((id / dist) & 1) == 1 && shared_mem_block[id_in_block] < shared_mem_block[id_in_block + (power_of_2)]))) { // если элемент меньше и стрелка назад
// свапаем элементы
tmp = shared_mem_block[id_in_block];
shared_mem_block[id_in_block] = shared_mem_block[id_in_block + (power_of_2)];
shared_mem_block[id_in_block + (power_of_2)] = tmp;
}
__syncthreads();
host_data[block_part + blockIdx.x * blockDim.x + id_in_block] = shared_mem_block[id_in_block];
}
}
}
__global__ void bitonic_half_cleaner(int *host_data, int n, int half_cleaner_index, int dist) { // применить битонический полуочиститель с использованием глобальной памяти
int id_in_block = blockIdx.x * blockDim.x + threadIdx.x;
int offset = blockDim.x * gridDim.x;
int tmp, power_of_2 = half_cleaner_index >> 1;
for (int i = id_in_block; i < n; i += offset) {
if ((((i / (power_of_2)) & 1) - 1) && // нужно ли сравнивать этот элемент (выходит ли из него стрелка)
((((i / dist) & 1) == 0 && host_data[i] > host_data[i + (power_of_2)]) || // если элемент больше и стрелка вперед
(((i / dist) & 1) == 1 && host_data[i] < host_data[i + (power_of_2)]))) { // если элемент меньше и стрелка назад
// свапаем элементы
tmp = host_data[i];
host_data[i] = host_data[i + (power_of_2)];
host_data[i + (power_of_2)] = tmp;
}
}
}
int main() {
int n;
fread(&n, sizeof(int), 1, stdin);
// находим ближайшую степень двойки
int power = 0;
for (int i = n; i > 0; i = i >> 1)
++power;
int power_of_2 = 1 << power;
// выделяем память для CPU
int *host_memorry = NULL;
host_memorry = (int *)malloc(power_of_2 * sizeof(int));
int *host_data;
host_data = host_memorry;
// выделяем глобальную память для GPU
int *device_memorry = NULL;
CSC(cudaMalloc(&device_memorry, power_of_2 * sizeof(int)));
int *device_data;
device_data = device_memorry;
// считываем числа для сортировки и дополняем оставшееся место INT_MAX
fread(host_data, sizeof(int), n, stdin);
for(int i = n; i < power_of_2; i++)
host_data[i] = INT_MAX;
cudaMemcpy(device_data, host_data, power_of_2 * sizeof(int), cudaMemcpyHostToDevice);
// битоническая сортировка
int half_cleaner_index;
for (int mergeSize = 2; mergeSize <= power_of_2; mergeSize <<= 1) { // размер слияния
if (mergeSize <= BLOCK_SIZE) { // если размер слияния меньше размера блока либо равен ему
half_cleaner_index = mergeSize;
} else { // в противном случае применяем битонический полуочиститель на глобальной памяти
for (half_cleaner_index = mergeSize; half_cleaner_index > BLOCK_SIZE; half_cleaner_index >>= 1)
bitonic_half_cleaner<<<dim3(BLOCK_SIZE / 2), dim3(BLOCK_SIZE)>>>(device_data, power_of_2, half_cleaner_index, mergeSize);
}
// когда становимся меньше разделяемой памяти, применяем слияние
bitonic_merge<<<dim3(BLOCK_SIZE / 2), dim3(BLOCK_SIZE)>>>(device_data, power_of_2, half_cleaner_index, mergeSize);
}
cudaMemcpy(host_data, device_data, n * sizeof(int), cudaMemcpyDeviceToHost);
fwrite(host_data, sizeof(int), n, stdout);
free(host_memorry);
cudaFree(device_memorry);
return 0;
}
|
755
|
#include "includes.h"
__global__ void useNoTexture(float* pin, float* pout, int len)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int j = blockIdx.y * blockDim.y + threadIdx.y;
unsigned int k = blockIdx.z * blockDim.z + threadIdx.z;
auto a = pin[0 + len * (i + c_size.x * (j + k * c_size.y))];
auto b = pin[1 + len * (i + c_size.x * (j + k * c_size.y))];
auto c = pin[2 + len * (i + c_size.x * (j + k * c_size.y))];
pout[i + c_size.x * (j + k * c_size.y)] = sqrtf(powf(a, 2) + powf(b, 2) + powf(c, 2));
}
|
756
|
#include "includes.h"
__global__ void copy( float *v4, const float *v3, const int n ) {
for(int i=blockIdx.x*blockDim.x+threadIdx.x;i<n;i+=blockDim.x*gridDim.x) {
v4[i*8+0] = v3[i*6+0];
v4[i*8+1] = v3[i*6+1];
v4[i*8+2] = v3[i*6+2];
v4[i*8+4] = v3[i*6+3];
v4[i*8+5] = v3[i*6+4];
v4[i*8+6] = v3[i*6+5];
}
}
|
757
|
#include "includes.h"
__global__ void FillAdjacencyMatrix(float* adj_mat , float* maskBuffer , int size , int cols , int rows ,int Nsegs){
int idx = blockDim.x*blockIdx.y*gridDim.x + blockDim.x*blockIdx.x + threadIdx.x;
int icol = idx % cols;
int irow = idx / cols;
int seg_id1=-1;
if (idx<size){
if (icol<cols-2 && irow<rows-2 && irow>1 && icol>1){
seg_id1 = maskBuffer[idx];
if (seg_id1!=maskBuffer[idx+1]){
adj_mat[ (int)maskBuffer[idx+1] + seg_id1*Nsegs ]=1;
adj_mat[ seg_id1 + Nsegs*(int)maskBuffer[idx+1] ]=1; /// it can happen that a->b, but b->a wont appear...
}
else if (seg_id1!=maskBuffer[idx-cols]){
adj_mat[ (int)maskBuffer[idx-cols] + seg_id1*Nsegs ]=1;
adj_mat[ seg_id1 + Nsegs*(int)maskBuffer[idx-cols] ]=1; /// it can happen that a->b, but b->a wont appear...
}
}
}
}
|
758
|
extern "C" {
__device__ inline int threadIdx_x() { return threadIdx.x; }
__device__ inline int threadIdx_y() { return threadIdx.y; }
__device__ inline int threadIdx_z() { return threadIdx.z; }
__device__ inline int blockIdx_x() { return blockIdx.x; }
__device__ inline int blockIdx_y() { return blockIdx.y; }
__device__ inline int blockIdx_z() { return blockIdx.z; }
__device__ inline int blockDim_x() { return blockDim.x; }
__device__ inline int blockDim_y() { return blockDim.y; }
__device__ inline int blockDim_z() { return blockDim.z; }
__device__ inline int gridDim_x() { return gridDim.x; }
__device__ inline int gridDim_y() { return gridDim.y; }
__device__ inline int gridDim_z() { return gridDim.z; }
__global__ void lambda_35652(float*, float*);
__global__ __launch_bounds__ (128 * 1 * 1) void lambda_35652(float* _35655_38561, float* _35656_38562) {
int threadIdx_x_38568;
int pthreadIdx_x_38568;
int blockDim_x_38574;
int pblockDim_x_38574;
int blockIdx_x_38580;
int pblockIdx_x_38580;
int _38586;
int p_38586;
int _38592;
int p_38592;
int _38598;
int p_38598;
int _38601;
int p_38601;
int converge_38608;
int pconverge_38608;
int converge_38613;
int pconverge_38613;
int converge_38621;
int pconverge_38621;
int converge_38625;
int pconverge_38625;
float _38635;
float p_38635;
int converge_38640;
int pconverge_38640;
int converge_38644;
int pconverge_38644;
int converge_38648;
int pconverge_38648;
int converge_38652;
int pconverge_38652;
float _38658;
float p_38658;
int converge_38661;
int pconverge_38661;
int converge_38665;
int pconverge_38665;
int converge_38668;
int pconverge_38668;
int converge_38672;
int pconverge_38672;
float _38678;
float p_38678;
int converge_38684;
int pconverge_38684;
int converge_38688;
int pconverge_38688;
int converge_38691;
int pconverge_38691;
int converge_38695;
int pconverge_38695;
float _38701;
float p_38701;
int converge_38704;
int pconverge_38704;
int converge_38708;
int pconverge_38708;
int converge_38713;
int pconverge_38713;
int converge_38717;
int pconverge_38717;
float _38723;
float p_38723;
threadIdx_x_38568 = threadIdx_x();
pthreadIdx_x_38568 = threadIdx_x_38568;
l38566: ;
threadIdx_x_38568 = pthreadIdx_x_38568;
blockDim_x_38574 = blockDim_x();
pblockDim_x_38574 = blockDim_x_38574;
l38572: ;
blockDim_x_38574 = pblockDim_x_38574;
blockIdx_x_38580 = blockIdx_x();
pblockIdx_x_38580 = blockIdx_x_38580;
l38578: ;
blockIdx_x_38580 = pblockIdx_x_38580;
_38586 = threadIdx_y();
p_38586 = _38586;
l38584: ;
_38586 = p_38586;
_38592 = blockDim_y();
p_38592 = _38592;
l38590: ;
_38592 = p_38592;
_38598 = blockIdx_y();
p_38598 = _38598;
l38596: ;
_38598 = p_38598;
_38601 = blockDim_y();
p_38601 = _38601;
l38599: ;
_38601 = p_38601;
int _38602;
_38602 = blockDim_x_38574 * blockIdx_x_38580;
int _38603;
_38603 = threadIdx_x_38568 + _38602;
bool _38605;
_38605 = _38603 < 0;
if (_38605) goto l38606; else goto l38761;
l38761: ;
pconverge_38608 = _38603;
goto l38607;
l38606: ;
pconverge_38608 = 0;
goto l38607;
l38607: ;
converge_38608 = pconverge_38608;
bool _38610;
_38610 = 2048 <= converge_38608;
if (_38610) goto l38611; else goto l38760;
l38760: ;
pconverge_38613 = converge_38608;
goto l38612;
l38611: ;
pconverge_38613 = 2047;
goto l38612;
l38612: ;
converge_38613 = pconverge_38613;
int _38615;
_38615 = _38592 * _38598;
int gid_y_38616;
gid_y_38616 = _38586 + _38615;
int _38617;
_38617 = -1 + gid_y_38616;
bool _38618;
_38618 = _38617 < 0;
if (_38618) goto l38619; else goto l38759;
l38759: ;
pconverge_38621 = _38617;
goto l38620;
l38619: ;
pconverge_38621 = 0;
goto l38620;
l38620: ;
converge_38621 = pconverge_38621;
bool _38622;
_38622 = 2048 <= converge_38621;
if (_38622) goto l38623; else goto l38758;
l38758: ;
pconverge_38625 = converge_38621;
goto l38624;
l38623: ;
pconverge_38625 = 2047;
goto l38624;
l38624: ;
converge_38625 = pconverge_38625;
int _38630;
_38630 = 2048 * converge_38625;
int _38631;
_38631 = _38630 + converge_38613;
float* idx_38632;
idx_38632 = _35655_38561 + _38631;
_38635 = __ldg(idx_38632);
p_38635 = _38635;
l38633: ;
_38635 = p_38635;
int _38636;
_38636 = -1 + _38603;
bool _38637;
_38637 = _38636 < 0;
if (_38637) goto l38638; else goto l38757;
l38757: ;
pconverge_38640 = _38636;
goto l38639;
l38638: ;
pconverge_38640 = 0;
goto l38639;
l38639: ;
converge_38640 = pconverge_38640;
bool _38641;
_38641 = 2048 <= converge_38640;
if (_38641) goto l38642; else goto l38756;
l38756: ;
pconverge_38644 = converge_38640;
goto l38643;
l38642: ;
pconverge_38644 = 2047;
goto l38643;
l38643: ;
converge_38644 = pconverge_38644;
bool _38645;
_38645 = gid_y_38616 < 0;
if (_38645) goto l38646; else goto l38755;
l38755: ;
pconverge_38648 = gid_y_38616;
goto l38647;
l38646: ;
pconverge_38648 = 0;
goto l38647;
l38647: ;
converge_38648 = pconverge_38648;
bool _38649;
_38649 = 2048 <= converge_38648;
if (_38649) goto l38650; else goto l38754;
l38754: ;
pconverge_38652 = converge_38648;
goto l38651;
l38650: ;
pconverge_38652 = 2047;
goto l38651;
l38651: ;
converge_38652 = pconverge_38652;
int _38653;
_38653 = 2048 * converge_38652;
int _38654;
_38654 = _38653 + converge_38644;
float* idx_38655;
idx_38655 = _35655_38561 + _38654;
_38658 = __ldg(idx_38655);
p_38658 = _38658;
l38656: ;
_38658 = p_38658;
if (_38605) goto l38659; else goto l38753;
l38753: ;
pconverge_38661 = _38603;
goto l38660;
l38659: ;
pconverge_38661 = 0;
goto l38660;
l38660: ;
converge_38661 = pconverge_38661;
bool _38662;
_38662 = 2048 <= converge_38661;
if (_38662) goto l38663; else goto l38752;
l38752: ;
pconverge_38665 = converge_38661;
goto l38664;
l38663: ;
pconverge_38665 = 2047;
goto l38664;
l38664: ;
converge_38665 = pconverge_38665;
if (_38645) goto l38666; else goto l38751;
l38751: ;
pconverge_38668 = gid_y_38616;
goto l38667;
l38666: ;
pconverge_38668 = 0;
goto l38667;
l38667: ;
converge_38668 = pconverge_38668;
bool _38669;
_38669 = 2048 <= converge_38668;
if (_38669) goto l38670; else goto l38750;
l38750: ;
pconverge_38672 = converge_38668;
goto l38671;
l38670: ;
pconverge_38672 = 2047;
goto l38671;
l38671: ;
converge_38672 = pconverge_38672;
int _38673;
_38673 = 2048 * converge_38672;
int _38674;
_38674 = _38673 + converge_38665;
float* idx_38675;
idx_38675 = _35655_38561 + _38674;
_38678 = __ldg(idx_38675);
p_38678 = _38678;
l38676: ;
_38678 = p_38678;
int _38680;
_38680 = 1 + _38603;
bool _38681;
_38681 = _38680 < 0;
if (_38681) goto l38682; else goto l38749;
l38749: ;
pconverge_38684 = _38680;
goto l38683;
l38682: ;
pconverge_38684 = 0;
goto l38683;
l38683: ;
converge_38684 = pconverge_38684;
bool _38685;
_38685 = 2048 <= converge_38684;
if (_38685) goto l38686; else goto l38748;
l38748: ;
pconverge_38688 = converge_38684;
goto l38687;
l38686: ;
pconverge_38688 = 2047;
goto l38687;
l38687: ;
converge_38688 = pconverge_38688;
if (_38645) goto l38689; else goto l38747;
l38747: ;
pconverge_38691 = gid_y_38616;
goto l38690;
l38689: ;
pconverge_38691 = 0;
goto l38690;
l38690: ;
converge_38691 = pconverge_38691;
bool _38692;
_38692 = 2048 <= converge_38691;
if (_38692) goto l38693; else goto l38746;
l38746: ;
pconverge_38695 = converge_38691;
goto l38694;
l38693: ;
pconverge_38695 = 2047;
goto l38694;
l38694: ;
converge_38695 = pconverge_38695;
int _38696;
_38696 = 2048 * converge_38695;
int _38697;
_38697 = _38696 + converge_38688;
float* idx_38698;
idx_38698 = _35655_38561 + _38697;
_38701 = __ldg(idx_38698);
p_38701 = _38701;
l38699: ;
_38701 = p_38701;
if (_38605) goto l38702; else goto l38745;
l38745: ;
pconverge_38704 = _38603;
goto l38703;
l38702: ;
pconverge_38704 = 0;
goto l38703;
l38703: ;
converge_38704 = pconverge_38704;
bool _38705;
_38705 = 2048 <= converge_38704;
if (_38705) goto l38706; else goto l38744;
l38744: ;
pconverge_38708 = converge_38704;
goto l38707;
l38706: ;
pconverge_38708 = 2047;
goto l38707;
l38707: ;
converge_38708 = pconverge_38708;
int _38709;
_38709 = 1 + gid_y_38616;
bool _38710;
_38710 = _38709 < 0;
if (_38710) goto l38711; else goto l38743;
l38743: ;
pconverge_38713 = _38709;
goto l38712;
l38711: ;
pconverge_38713 = 0;
goto l38712;
l38712: ;
converge_38713 = pconverge_38713;
bool _38714;
_38714 = 2048 <= converge_38713;
if (_38714) goto l38715; else goto l38742;
l38742: ;
pconverge_38717 = converge_38713;
goto l38716;
l38715: ;
pconverge_38717 = 2047;
goto l38716;
l38716: ;
converge_38717 = pconverge_38717;
int _38718;
_38718 = 2048 * converge_38717;
int _38719;
_38719 = _38718 + converge_38708;
float* idx_38720;
idx_38720 = _35655_38561 + _38719;
_38723 = __ldg(idx_38720);
p_38723 = _38723;
l38721: ;
_38723 = p_38723;
float _38731;
_38731 = 2.000000e-01f * _38658;
int _38724;
_38724 = 2048 * gid_y_38616;
float _38738;
_38738 = 2.000000e-01f * _38723;
float _38734;
_38734 = 2.000000e-01f * _38678;
float _38736;
_38736 = 2.000000e-01f * _38701;
float _38729;
_38729 = 2.000000e-01f * _38635;
int _38725;
_38725 = _38724 + _38603;
float _38730;
_38730 = 0.000000e+00f + _38729;
float* idx_38726;
idx_38726 = _35656_38562 + _38725;
float _38732;
_38732 = _38730 + _38731;
float _38735;
_38735 = _38732 + _38734;
float _38737;
_38737 = _38735 + _38736;
float _38739;
_38739 = _38737 + _38738;
*idx_38726 = _38739;
return ;
}
}
|
759
|
#include <iostream>
#include <cuda_runtime.h>
#define CHECK(call) { \
const cudaError_t error = call; \
if (error != cudaSuccess) { \
std::cout << "Error: " << __FILE__ << ":" \
<< __LINE__ << std::endl \
<< cudaGetErrorString(error) << std::endl; \
exit(1); \
} \
}
__global__ void vectorAdd(const float *A, const float *B, float *C, int numElements) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < numElements) {
C[i] = A[i] + B[i];
}
}
int main(void) {
const int numElements = 50000;
size_t size = numElements * sizeof(float);
std::cout << "[Vector addition of " << numElements << " elements]" << std::endl;
float h_A[numElements];
float h_B[numElements];
float h_C[numElements];
for (int i = 0; i < numElements; ++i) {
h_A[i] = rand()/(float)RAND_MAX;
h_B[i] = rand()/(float)RAND_MAX;
}
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((void **)&d_A, size));
CHECK(cudaMalloc((void **)&d_B, size));
CHECK(cudaMalloc((void **)&d_C, size));
CHECK(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice));
int threadsPerBlock = 256;
int blocksPerGrid = (numElements + threadsPerBlock - 1) / threadsPerBlock;
std::cout << "CUDA kernel launch with " << blocksPerGrid
<< " blocks of " << threadsPerBlock << " threads" << std::endl;
vectorAdd<<<blocksPerGrid, threadsPerBlock>>>(d_A, d_B, d_C, numElements);
CHECK(cudaGetLastError());
std::cout << "Copy output data from the CUDA device to the host memory" << std::endl;
CHECK(cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost));
for (int i = 0; i < numElements; ++i) {
if (fabs(h_A[i] + h_B[i] - h_C[i]) > 1e-5) {
std::cerr << "Result verification failed at element " << i << std::endl;
exit(EXIT_FAILURE);
}
}
std::cout << "Test PASSED" << std::endl;
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
std::cout << "Done" << std::endl;
return 0;
}
|
760
|
/* game.cu
* Jonathan Lehman
* April 17, 2012
*
* Compile with: nvcc -o game game.cu
*
*/
#include <cuda.h>
#include <stdio.h>
#include <assert.h>
#include <stdlib.h>
#include <strings.h>
#include <math.h>
#include <sys/time.h>
#define DEBUG 0 /* set DEBUG to flag to 1 for debugging, set flag to 0 for optimized performance */
#define ROWS 6 /* number of rows in connect-4 */
#define COLS 7 /* number of columns in connect-4 */
#define RED -1 /* value for red tokens and red player */
#define BLACK 1 /* value for black tokens and black player */
#define HUMANPLAYER 1 /* by default we assume that human player plays black */
#define numB 5 /*power number of blocks in grid raised to (for width and height each) */
#define numTX 2
#define numTY 1
using namespace std;
typedef struct
{
int board[ROWS][COLS]; /* -1 token red player, 1 token black player, 0 empty square */
int currentplayer; /* -1 red player, 1 black player */
int tokensonboard; /* counts the number of tokens on the board */
}Game;
typedef struct
{
int row; /* row coordinate of square */
int col; /* column coordinate of square */
int token; /* -1 red token, 1 black token */
} Move;
/* The game state (struct Game *game) is passed as a reference
parameter (pointer) to the functions in order to avoid that the
entire board state is copied with each function call */
void InitGame(Game *game); /* set up empty board and initial player */
void MakeMove(Game *game, Move move); /* make a move */
void UndoMove(Game *game, Move move); /* undo a move */
int Win(Game *game, int player); /* checks if player (red=-1, black=1) has won game */
int Draw(Game *game); /* checks if game ended in a draw */
void PossibleMoves(Game *game, int *number_of_moves, Move moves[]); /* computes the possible moves , number_of_moves returns the number of available moves, moves[] contains list of moves */
void DisplayBoard(Game *game); /* print board state on screen */
void DisplayMove(Move move); /* print move on screen */
//int Utility(struct Game *game); /* returns the Utility of a non-terminal board state */
void EnterMove(Move *move, Game *game); /* reads in a move from the keyboard */
int Row(Game *game, int col); /* computes the row on which token ends when dropped in column col */
__global__ void generate(Game, Move*);
__device__ int Evaluate(Game*);
__device__ int Win2(Game *game, int player); /* checks if player (red=-1, black=1) has won game */
__device__ int Draw2(Game *game); /* checks if game ended in a draw */
void checkGPUCapabilities(int, int, int, int, int);
// Keep track of the gpu time.
cudaEvent_t start, stop;
float elapsedTime;
int main(int argc, char *argv[])
{
int i;
Game game;
Move moves[COLS];
//int number_of_moves;
int playagainsthuman=0; /* computer plays against itself (0) or against human (1) */
for (i=1; i<argc; i++) /* iterate through all command line arguments */
{
if(strcmp(argv[i],"-p")==0) /* if command line argument -p human opponent */
{
playagainsthuman=1;
printf("Human player plays black\n");
}
if(strcmp(argv[i],"-h")==0) /* if command line argument -h print help */
{
printf("game [-p] [-h]\n-p for play against human player\n-h for help\n");
return 0; /* quit program */
}
}
InitGame(&game); /* set up board */
while( !Draw(&game) && !Win(&game,RED) && !Win(&game,BLACK)) /* no draw or win */
{
//int rand;
Move move;
Move *compMove = NULL;
DisplayBoard(&game); /* display board state */
//PossibleMoves(&game,&number_of_moves,moves); /* calculate available moves */
//rand = (int) (drand48()*number_of_moves); /* pick a random move */
//MakeMove(&game,moves[rand]); /* make move */
//CUDA STUFF
Game dev_game = game;
Move *dev_move = NULL;
//dev_move.row = 0;
//dev_move.col = 0;
int threadX = 49;
int threadY = 7;
int gridSize = pow(7, numB);
//check that GPU can handle arguments
checkGPUCapabilities(gridSize, gridSize, threadX, threadY, gridSize * gridSize);
/* Allocate global device memory. */
cudaMalloc((void **)&(dev_game.board), sizeof(int) * COLS * ROWS);
cudaMalloc((void **)&dev_move, sizeof(Move) );
/* Start the timer. */
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
/* Execute the kernel. */
dim3 block(threadX, threadY); //threads w x h
dim3 grid(gridSize, gridSize); //blocks w x h
generate<<<grid, block>>>(dev_game, dev_move);//passes current game config, and empty shell to store best move
/* Wait for the kernel to complete. Needed for timing. */
cudaDeviceSynchronize();
/* Stop the timer and print the resulting time. */
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
/* Get move result from device. */
//cudaMemcpy(compMove, dev_move, sizeof(Move), cudaMemcpyDeviceToHost);
//print any cuda error messages
const char* errorString = cudaGetErrorString(cudaGetLastError());
printf("hi6\n");
printf("GPU Error: %s\n", errorString);
//printf("Moveee: %d %d\n", (*compMove).row, (*compMove).col);
//destroy cuda event
//cudaEventDestroy(start);
//cudaEventDestroy(stop);
/* Free the allocated device memory. */
//cudaFree(dev_move);
cudaFree(dev_game.board);
//free allocated host memory
//free(a);
MakeMove(&game,*compMove); /* make move */
DisplayMove(*compMove); /* display move */
if (playagainsthuman) /* human player */
{
DisplayBoard(&game); /* show board state after computer moved */
if (!Draw(&game) && !Win(&game,RED)) /* no draw and no computer win */
{
EnterMove(&move,&game); /* human player enters her move */
MakeMove(&game,move); /* make move */
}
} /* end of if humanplayer */
} /* end of while not draw or win */
DisplayBoard(&game); /* display board state */
if (Draw(&game))
printf("the game ended in a draw\n");
if (Win(&game, RED))
printf("player red won the game\n");
if (Win(&game, BLACK))
printf("player black won the game\n");
return 0;
} /* end of main */
void checkGPUCapabilities(int gridW, int gridH, int blockW, int blockH, int size){
//check what GPU is being used
int devId;
cudaGetDevice( &devId );
//get device properties for GPU being used
cudaDeviceProp gpuProp;
cudaGetDeviceProperties( &gpuProp, devId );
//check if GPU has enough memory
if(gpuProp.totalGlobalMem < (size * sizeof(long))){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU does not have enough memory to handle the data size: %ld. It can only handle data sizes up to %ld.\n", (size * sizeof(float)) * 3, gpuProp.totalGlobalMem);
exit(1);
}
//check if GPU can handle the number of threads per bloc
if(gpuProp.maxThreadsPerBlock < (blockW * blockH)){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads per block, not %d.\n", gpuProp.maxThreadsPerBlock, (blockW * blockH));
exit(1);
}
//check that GPU can handle the number of threads in the block width
if(gpuProp.maxThreadsDim[0] < blockW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block width of each block, not %d.\n", gpuProp.maxThreadsDim[0], blockW );
exit(1);
}
//check that GPU can handle the number of threads in the block height
if(gpuProp.maxThreadsDim[1] < blockH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d threads as the block height of each block, not %d.\n", gpuProp.maxThreadsDim[1], blockH );
exit(1);
}
//check that GPU can handle the number of blocks in the grid width
if(gpuProp.maxGridSize[0] < gridW){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid width of each grid, not %d.\n", gpuProp.maxGridSize[0], gridW );
exit(1);
}
//check that GPU can handle the number of blocks in the grid height
if(gpuProp.maxGridSize[1] < gridH){
fprintf(stderr, "\nnqueens: Insufficient GPU. GPU can only handle %d blocks as the grid height of each grid, not %d.\n", gpuProp.maxGridSize[1], gridH );
exit(1);
}
}
void InitGame(Game *game)
{
int i;
int j;
for (i=0; i < ROWS; i++)
for (j=0; j < COLS; j++)
(*game).board[i][j]=0; /* empty board */
(*game).currentplayer=RED; /* red player to start game */
(*game).tokensonboard=0;
};
void MakeMove(Game *game, Move move)
{
#if DEBUG
assert((*game).board[move.row][move.col]==0); /* assert square is empty */
if (move.row>0)
assert((*game).board[move.row-1][move.col]!=0); /* assert square below is occupied */
assert((*game).currentplayer==move.token); /* assert that right player moves */
#endif
(*game).board[move.row][move.col]=move.token; /* place token at square */
(*game).currentplayer*=-1; /* switch player */
(*game).tokensonboard++; /* increment number of tokens on board by one */
}
void UndoMove(Game *game, Move move)
{
#if DEBUG
assert((*game).board[move.row][move.col]!=0); /* assert square is occupied */
if (move.row<ROWS-1)
assert((*game).board[move.row+1][move.col]==0); /* assert square above is empty */
assert((*game).currentplayer!=move.token); /* assert that right player moves */
#endif
(*game).board[move.row][move.col]=0; /* remove token from square */
(*game).currentplayer*=-1; /* switch player */
(*game).tokensonboard--; /* decrement number of tokens on board by one */
}
int Draw(Game *game)
{
if ((*game).tokensonboard<42)
return 0;
else return (!Win(game,RED) && !Win(game,BLACK));
}
int Win(Game *game, int player)
{
int i;
int j;
for (j=0;j<COLS;j++)
for(i=0;i<ROWS-3;i++) /* check for vertical four tokens in a column */
{
int count=0; /* counts number of consecutive tokens */
while((count < 4) && ((*game).board[i+count][j]==player)) /* check if token is ok and not 4 tokens yet */
count++;
if (count==4) /* four tokens in a column */
return 1; /* win for player */
}
for (j=0;j<COLS-3;j++)
for(i=0;i<ROWS;i++) /* check for four horizontal tokens in a row */
{
int count=0; /* counts number of consecutive tokens */
while((count < 4) && ((*game).board[i][j+count]==player)) /* check if token is ok and not 4 tokens yet */
count++;
if (count==4) /* four tokens in a row */
return 1; /* win for player */
}
for (j=0;j<COLS-3;j++)
for(i=0;i<ROWS-3;i++) /* check for four tokens in an upward diagonal */
{
int count=0; /* counts number of consecutive tokens */
while((count < 4) && ((*game).board[i+count][j+count]==player)) /* check if token is owned by player and not 4 tokens yet */
count++;
if (count==4) /* four tokens in a diagonal */
return 1; /* win for player */
}
for (j=0;j<COLS-3;j++)
for(i=3;i<ROWS;i++) /* check for four tokens in a downward diagonal */
{
int count=0; /* counts number of consecutive tokens */
while((count < 4) && ((*game).board[i-count][j+count]==player)) /* check if token is owned by player and not 4 tokens yet */
count++;
if (count==4) /* four tokens in a diagonal */
return 1; /* win for player */
}
return 0; /* no win for player */
}
void PossibleMoves(Game *game, int *number_of_moves, Move moves[])
/* computes the possible moves ,
number_of_moves returns the number of available moves,
moves[] contains list of moves */
{
int i;
*number_of_moves=0;
for (i=0;i<COLS;i++)
{
int row=Row(game,i); /* computes first empty square in col i */
if (row<ROWS) /* column has an empty square */
{
moves[*number_of_moves].row=row;
moves[*number_of_moves].col=i;
moves[*number_of_moves].token=(*game).currentplayer;
(*number_of_moves)++;
}
}
}
void DisplayBoard(Game *game) /* print board state on screen */
{
int i;
int j;
for(i=ROWS-1;i>=0;i--)
{
for (j=0;j<COLS;j++)
switch ((*game).board[i][j])
{
case -1:
printf("X|");
break;
case 1:
printf("0|");
break;
case 0:
printf(" |");
break;
}
printf("\n");
}
printf("--------------\n0|1|2|3|4|5|6\n\n");
}
void DisplayMove(Move move) /* print move on screen */
{
if (move.token==-1)
printf("X->(%d,%d)\n",move.row,move.col);
else
printf("0->(%d,%d)\n",move.row,move.col);
}
void EnterMove(Move *move, Game *game) /* reads in a move from the keyboard */
{
int col=0;
int row=0;
do
{
do
{
printf("\nEnter the column [0-6] of token: ");
scanf("%d",&col);
} while ((col < 0) || (col >= COLS));
row=Row(game,col);
if (row >= ROWS)
printf("column %d is full\n", col);
} while(row>=ROWS);
printf("your move 0->(%d,%d)\n",row,col);
(*move).row=row;
(*move).col=col;
(*move).token=HUMANPLAYER;
}
int Row(Game *game, int col) /* computes the row on which token ends when dropped in column col */
{
int row=0;
while((row<ROWS) && (*game).board[row][col]!=0)
row++;
return row;
}
__device__
int Evaluate(Game *game)
{
if (Draw2(game))
return 0;
if (Win2(game,RED))
return 10; /* maximum utility for winning */
if (Win2(game,BLACK))
return -10; /* minimum utility for losing */
return 0;
}
__device__
int Draw2(Game *game)
{
if ((*game).tokensonboard<42)
return 0;
else return (!Win2(game,RED) && !Win2(game,BLACK));
}
__device__
int Win2(Game *game, int player)
{
int i;
int j;
for (j=0;j<COLS;j++)
for(i=0;i<ROWS-3;i++) /* check for vertical four tokens in a column */
{
int count=0; /* counts number of consecutive tokens */
while((count < 4) && ((*game).board[i+count][j]==player)) /* check if token is ok and not 4 tokens yet */
count++;
if (count==4) /* four tokens in a column */
return 1; /* win for player */
}
for (j=0;j<COLS-3;j++)
for(i=0;i<ROWS;i++) /* check for four horizontal tokens in a row */
{
int count=0; /* counts number of consecutive tokens */
while((count < 4) && ((*game).board[i][j+count]==player)) /* check if token is ok and not 4 tokens yet */
count++;
if (count==4) /* four tokens in a row */
return 1; /* win for player */
}
for (j=0;j<COLS-3;j++)
for(i=0;i<ROWS-3;i++) /* check for four tokens in an upward diagonal */
{
int count=0; /* counts number of consecutive tokens */
while((count < 4) && ((*game).board[i+count][j+count]==player)) /* check if token is owned by player and not 4 tokens yet */
count++;
if (count==4) /* four tokens in a diagonal */
return 1; /* win for player */
}
for (j=0;j<COLS-3;j++)
for(i=3;i<ROWS;i++) /* check for four tokens in a downward diagonal */
{
int count=0; /* counts number of consecutive tokens */
while((count < 4) && ((*game).board[i-count][j+count]==player)) /* check if token is owned by player and not 4 tokens yet */
count++;
if (count==4) /* four tokens in a diagonal */
return 1; /* win for player */
}
return 0; /* no win for player */
}
__global__
void generate(Game game, Move *move){
/*__shared__ int evalVals[49][7];//array to store each threads evaluation
__shared__ char moves[49][7][42];
int mvCtr = 0;
int start = mvCtr;
int end = mvCtr + numB;
mvCtr = end;
int rem = blockIdx.x;
for(int i = start; i < end; i++){
moves[threadIdx.x][threadIdx.y][i] = rem % 7;
rem = rem / 7;
}
rem = blockIdx.y;
start = mvCtr;
end = mvCtr + numB;
mvCtr = end;
for(int i = start; i < end; i++){
moves[threadIdx.x][threadIdx.y][i] = rem % 7;
rem = rem / 7;
}
rem = threadIdx.x;
start = mvCtr;
end = mvCtr + numTX;
mvCtr = end;
for(int i = start; i < end; i++){
moves[threadIdx.x][threadIdx.y][i] = rem % 7;
rem = rem / 7;
}
moves[threadIdx.x][threadIdx.y][mvCtr] = threadIdx.y;*/
(*move).col = 2;
(*move).row = 1;
(*move).token = 1;
/*if(totalWrong == 0){
//iterate through all numbers to generate possible solutions thread must check
//does not do if thread is already not valid at this point
int start = bYsegment * workSize;
for(int c = start; c < start + workSize + (bYsegment == numBY - 1) * extra; c++){
//generate last values in tuple, convert to base N and store to tuple array
int rem = c;
for(int b = 0, k = tupCtr + 1; b < numGen; b++, k++){
tuple[threadIdx.x][threadIdx.y][k] = rem % _N_;
rem = rem / _N_;
}
//checks that the numGen tuple values are indeed unique (saves work overall)
for(int x = 0; x < numGen && totalWrong == 0; x++){
for(int y = 0; y < numGen && totalWrong == 0; y++){
totalWrong += tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + x] == tuple[threadIdx.x][threadIdx.y][tupCtr + 1 + y] && x != y;
}
}
//check one solution
for(int i = _N_ - 1; i > totalWrong * _N_; i--){
for(int j = i - 1, ctr = 1; j >= 0; j--, ctr++){
//same row
totalWrong += tuple[threadIdx.x][threadIdx.y][i] == tuple[threadIdx.x][threadIdx.y][j];
//diag upleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] - ctr) == tuple[threadIdx.x][threadIdx.y][j];
//diag downleft
totalWrong += (tuple[threadIdx.x][threadIdx.y][i] + ctr) == tuple[threadIdx.x][threadIdx.y][j];
}
}
//add 1 to solution total if nothing wrong
solutions[threadIdx.x][threadIdx.y] += !(totalWrong);
//reset total wrong
totalWrong = 0;
}
}
//sync the threads so that thread 0 can make the calculations
__syncthreads();
//have thread 0 sum for all threads in block to get block total
if(threadIdx.x == 0 && threadIdx.y == 0){
//ensure that the block total value is 0 initially
long sum = 0;
//iterate through each threads solution and add it to the block total
for(int i =0; i < _N_; i++){
for(int j = 0; j < _N_; j++){
//use local var
sum += solutions[i][j];
}
}
//store to global memory
a[gridDim.x * blockIdx.y + blockIdx.x] = sum;
}
//sync the threads so that calculations can be made
__syncthreads();
//have the first thread in the first block sum up the block sums to return to the CPU
if(sumOnGPU == 1 && blockIdx.x == 0 && blockIdx.y == 0 && threadIdx.x == 0 && threadIdx.y == 0){
sumBlocks(a);
}*/
}
|
761
|
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <math.h>
#include <iostream>
__device__ float distance(float2 x1, float2 x2){
return sqrt(pow(x1.x - x2.x,2) + pow(x1.y - x2.y,2));
}
__global__ void distance_kernel(float2 *data_in, float *data_out, int n){
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < n){
float2 ref;
ref.x = 0.0;
ref.y = 0.0;
data_out[i] = distance(data_in[i], ref);
}
}
void init_host_data(float2* h_in,int n){
for(int i = 0; i< n;i++){
h_in[i].x = (float)i /((n - 1) * M_PI * 100);
h_in[i].y = sin(h_in[i].x);
}
}
int main(){
float *d_out = NULL;
float2 *d_in = NULL;
float2 *h_in = NULL;
float *h_out = NULL;
int N = 4096;
int TPB = 32;
size_t in_size = N*2*sizeof(float);
size_t out_size = N*sizeof(float);
h_in = (float2*)malloc(in_size);
h_out = (float*)malloc(out_size);
//设备端分配内存
cudaMalloc((void**)&d_in, in_size);
cudaMalloc((void**)&d_out, out_size);
init_host_data(h_in, N);
//拷贝host数据到device
cudaMemcpy(d_in, h_in, in_size, cudaMemcpyHostToDevice);
distance_kernel<<<(N + TPB -1)/TPB,TPB>>>(d_in, d_out, N);
//拷贝device端计算结果到host
cudaMemcpy(h_out, d_out, out_size, cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
for(int i = 0;i < N;i++){
std::cout<<i<<":<"<<h_in[i].x<<","<<h_in[1].y<<">, dist:"<<h_out[i]<<std::endl;
}
free(h_in);
free(h_out);
return 0;
}
|
762
|
#include <cuda_runtime.h>
#include <cstddef>
#include <utility>
#include <string>
#include <system_error>
#if defined(__CUDACC__)
#define DEVICE __device__
#define HOST __host__
#else
#define DEVICE
#define HOST
#endif
#define DEVICE_HOST DEVICE HOST
/*!
\brief std::error_code category for cudaError
*/
class cuda_error_category_impl : public std::error_category {
public:
~cuda_error_category_impl() noexcept override = default;
const char* name() const noexcept override { return "cudaError"; }
std::string message(int ev) const override {
return cudaGetErrorString(static_cast<cudaError_t>(ev));
}
};
inline std::error_category const& cuda_error_category() {
static cuda_error_category_impl sCudaErrorCategory;
return sCudaErrorCategory;
}
inline std::error_code make_error_code(cudaError e) noexcept {
return std::error_code(static_cast<int>(e), cuda_error_category());
}
namespace std {
template <>
struct is_error_code_enum<cudaError> : public true_type {};
} // namespace std
/*!
\brief General mathematical vector class.
*/
template <typename T, std::size_t Size>
struct vec {
using value_type = T;
using size_type = std::size_t;
using reference = value_type&;
using const_reference = value_type const&;
value_type m[Size];
DEVICE_HOST reference operator[](size_type index) { return m[index]; }
DEVICE_HOST const_reference operator[](size_type index) const { return m[index]; }
template <class... Ts, std::enable_if_t<sizeof...(Ts) == Size>* = nullptr>
explicit constexpr DEVICE_HOST vec(Ts... ts) noexcept
: m{std::forward<Ts>(ts)...} {}
explicit constexpr DEVICE_HOST vec(value_type a) noexcept {
for (size_type i = 0; i < Size; ++i) m[i] = a;
}
constexpr DEVICE_HOST vec& operator=(value_type a) noexcept {
for (size_type i = 0; i < Size; ++i) m[i] = a;
return *this;
};
constexpr /*DEVICE_HOST*/ vec() noexcept = default; // annotations ignored on explicitly default?
}; // struct vec<T, Size>
template <typename T, std::size_t Size>
constexpr DEVICE_HOST vec<T, Size> operator+(vec<T, Size> a, vec<T, Size> const& b) noexcept {
for (std::size_t i = 0; i < Size; ++i) a.m[i] = a.m[i] + b.m[i];
return a;
}
template <typename T, std::size_t Size>
constexpr DEVICE_HOST vec<T, Size>& operator+=(vec<T, Size>& a, vec<T, Size> const& b) noexcept {
for (std::size_t i = 0; i < Size; ++i) a.m[i] += b.m[i];
return a;
}
using vec2d = vec<double, 2>;
using vec3d = vec<double, 3>;
using vec4d = vec<double, 4>;
/*!
\brief Enable \ref vec specializations to use a union for custom accessors.
*/
template <typename T, std::size_t Size, std::size_t Index>
struct component_accessor {
using value_type = T;
value_type m[Size];
template <std::size_t M>
constexpr DEVICE_HOST operator vec<T, M>() const noexcept { return {m[Index]}; }
constexpr DEVICE_HOST value_type get() const noexcept { return m[Index]; }
constexpr DEVICE_HOST operator T() const noexcept { return m[Index]; }
}; // struct component_accessor<T, Size, I>
template <typename T, std::size_t Size, std::size_t IndexA, std::size_t IndexB>
constexpr DEVICE_HOST component_accessor<T, Size, IndexA>
operator+(component_accessor<T, Size, IndexA> a,
component_accessor<T, Size, IndexB> const& b) noexcept {
a.m[IndexA] = a.m[IndexA] + b.m[IndexB];
return a;
}
template <typename T, std::size_t Size, std::size_t IndexA, std::size_t IndexB>
constexpr DEVICE_HOST component_accessor<T, Size, IndexA>&
operator+=(component_accessor<T, Size, IndexA>& a,
component_accessor<T, Size, IndexB> const& b) noexcept {
a.m[IndexA] += b.m[IndexB];
return a;
}
template <typename T>
struct vec<T, 2> {
using value_type = T;
using size_type = std::size_t;
using reference = value_type&;
using const_reference = value_type const&;
enum { Size = 2 };
union {
value_type m[Size];
// component_accessor has the same size and layout as the array m above,
// thus m, x, and y can be used without invalidating the union.
component_accessor<value_type, Size, 0> x;
component_accessor<value_type, Size, 1> y;
};
DEVICE_HOST reference operator[](size_type index) { return m[index]; }
DEVICE_HOST const_reference operator[](size_type index) const { return m[index]; }
template <class... Ts, std::enable_if_t<sizeof...(Ts) == Size>* = nullptr>
explicit constexpr DEVICE_HOST vec(Ts... ts) noexcept
: m{std::forward<Ts>(ts)...} {}
explicit constexpr DEVICE_HOST vec(value_type a) noexcept {
m[0] = m[1] = a;
}
constexpr DEVICE_HOST vec& operator=(value_type a) noexcept {
m[0] = m[1] = a;
return *this;
};
constexpr /*DEVICE_HOST*/ vec() noexcept = default; // annotations ignored on explicitly default?
}; // struct vec<T, 2>
template <typename T>
constexpr DEVICE_HOST vec<T, 2> operator+(vec<T, 2> a, vec<T, 2> const& b) noexcept {
a.m[0] = a.m[0] + b.m[0];
a.m[1] = a.m[1] + b.m[1];
return a;
}
template <typename T>
constexpr DEVICE_HOST vec<T, 2>& operator+=(vec<T, 2>& a, vec<T, 2> const& b) noexcept {
a.m[0] += b.m[0];
a.m[1] += b.m[1];
return a;
}
template <typename T>
struct vec<T, 3> {
using value_type = T;
using size_type = std::size_t;
using reference = value_type&;
using const_reference = value_type const&;
enum { Size = 3 };
union {
value_type m[Size];
// component_accessor has the same size and layout as the array m above,
// thus m, x, y, and z can be used without invalidating the union.
component_accessor<value_type, Size, 0> x;
component_accessor<value_type, Size, 1> y;
component_accessor<value_type, Size, 2> z;
};
DEVICE_HOST reference operator[](size_type index) { return m[index]; }
DEVICE_HOST const_reference operator[](size_type index) const { return m[index]; }
template <class... Ts, std::enable_if_t<sizeof...(Ts) == Size>* = nullptr>
explicit constexpr DEVICE_HOST vec(Ts... ts) noexcept
: m{std::forward<Ts>(ts)...} {}
explicit constexpr DEVICE_HOST vec(value_type a) noexcept {
m[0] = m[1] = m[2] = a;
}
constexpr DEVICE_HOST vec& operator=(value_type a) noexcept {
m[0] = m[1] = m[2] = a;
return *this;
};
constexpr /*DEVICE_HOST*/ vec() noexcept = default; // annotations ignored on explicitly default?
}; // struct vec<T, 3>
template <typename T>
constexpr DEVICE_HOST vec<T, 3> operator+(vec<T, 3> a, vec<T, 3> const& b) noexcept {
a.m[0] = a.m[0] + b.m[0];
a.m[1] = a.m[1] + b.m[1];
a.m[2] = a.m[2] + b.m[2];
return a;
}
template <typename T>
constexpr DEVICE_HOST vec<T, 3>& operator+=(vec<T, 3>& a, vec<T, 3> const& b) noexcept {
a.m[0] += b.m[0];
a.m[1] += b.m[1];
a.m[2] += b.m[2];
return a;
}
template <typename T>
struct vec<T, 4> {
using value_type = T;
using size_type = std::size_t;
using reference = value_type&;
using const_reference = value_type const&;
enum { Size = 4 };
union {
value_type m[Size];
// component_accessor has the same size and layout as the array m above,
// thus m, x, y, z, and w can be used without invalidating the union.
component_accessor<value_type, Size, 0> x;
component_accessor<value_type, Size, 1> y;
component_accessor<value_type, Size, 2> z;
component_accessor<value_type, Size, 3> w;
};
DEVICE_HOST reference operator[](size_type index) { return m[index]; }
DEVICE_HOST const_reference operator[](size_type index) const { return m[index]; }
template <class... Ts, std::enable_if_t<sizeof...(Ts) == Size>* = nullptr>
explicit constexpr DEVICE_HOST vec(Ts... ts) noexcept
: m{std::forward<Ts>(ts)...} {}
explicit constexpr DEVICE_HOST vec(value_type a) noexcept {
m[0] = m[1] = m[2] = m[3] = a;
}
constexpr DEVICE_HOST vec& operator=(value_type a) noexcept {
m[0] = m[1] = m[2] = m[3] = a;
return *this;
};
constexpr /*DEVICE_HOST*/ vec() noexcept = default; // annotations ignored on explicitly default?
}; // struct vec<T, 4>
template <typename T>
constexpr DEVICE_HOST vec<T, 4> operator+(vec<T, 4> a, vec<T, 4> const& b) noexcept {
a.m[0] = a.m[0] + b.m[0];
a.m[1] = a.m[1] + b.m[1];
a.m[2] = a.m[2] + b.m[2];
a.m[3] = a.m[3] + b.m[3];
return a;
}
template <typename T>
constexpr DEVICE_HOST vec<T, 4>& operator+=(vec<T, 4>& a, vec<T, 4> const& b) noexcept {
a.m[0] += b.m[0];
a.m[1] += b.m[1];
a.m[2] += b.m[2];
a.m[3] += b.m[3];
return a;
}
class buffer {
public:
using size_type = std::size_t;
void* get() noexcept { return ptr_; }
operator void*() noexcept { return ptr_; }
enum class attachment_points { global, host };
buffer(size_type sizeBytes, attachment_points attachment = attachment_points::global) {
cudaError err = cudaMallocManaged(&ptr_, sizeBytes,
attachment == attachment_points::global ? cudaMemAttachGlobal
: cudaMemAttachHost);
if (err != cudaSuccess && err != cudaErrorNotSupported) std::terminate();
if (err == cudaErrorNotSupported) {
err = cudaMalloc(&ptr_, sizeBytes);
if (err != cudaSuccess) std::terminate();
}
}
~buffer() noexcept { cudaFree(ptr_); }
friend void swap(buffer& a, buffer& b) noexcept { std::swap(a.ptr_, b.ptr_); }
buffer(buffer&& other) { swap(*this, other); }
buffer& operator=(buffer&& other) {
swap(*this, other);
return *this;
}
buffer() noexcept = default;
buffer(buffer const&) = delete;
buffer& operator=(buffer const&) = delete;
private:
void* ptr_{nullptr};
}; // class buffer
template <class T>
class typed_buffer : public buffer {
public:
using value_type = T;
using size_type = std::size_t;
T* get() noexcept { return reinterpret_cast<T*>(buffer::get()); }
operator T*() noexcept { return reinterpret_cast<T*>(buffer::get()); }
typed_buffer(size_type count, attachment_points attachment = attachment_points::global)
: buffer{count * sizeof(T), attachment} {}
~typed_buffer() noexcept = default;
friend void swap(typed_buffer& a, typed_buffer& b) noexcept {
using std::swap;
swap(static_cast<buffer&>(a), static_cast<buffer&>(b));
}
typed_buffer(typed_buffer&& other) : buffer{std::move(other)} {}
typed_buffer& operator=(typed_buffer&& other) {
buffer::operator=(std::move(other));
return *this;
}
typed_buffer() noexcept = default;
typed_buffer(typed_buffer const&) = delete;
typed_buffer& operator=(typed_buffer const&) = delete;
}; // class typed_buffer
__global__ void calculate_accelerations(std::size_t numBodies, vec3d* positions, vec3d* accel) {
std::size_t const bid = blockIdx.y * gridDim.x + blockIdx.x;
std::size_t const tid = threadIdx.x;
std::size_t const idx = bid * blockDim.x + tid;
extern __shared__ vec3d shmem[]; // 128 threads per block, make dynamic
shmem[tid] = vec3d{0.0, 0.0, 0.0};
if (idx >= numBodies) return;
} // calculate_accelerations
__global__ void update_positions(std::size_t numBodies, double deltaTime, vec3d* positions,
vec3d* velocities, vec3d* accel, vec3d* accel0) {
std::size_t const bid = blockIdx.y * gridDim.x + blockIdx.x;
std::size_t const tid = threadIdx.x;
std::size_t const idx = bid * blockDim.x + tid;
if (idx >= numBodies) return;
} // update_positions
__global__ void update_velocities(std::size_t numBodies, double deltaTime, vec3d* velocities,
vec3d* accel, vec3d* accel0) {
std::size_t const bid = blockIdx.y * gridDim.x + blockIdx.x;
std::size_t const tid = threadIdx.x;
std::size_t const idx = bid * blockDim.x + tid;
if (idx >= numBodies) return;
} // update_velocities
__global__ void compute_energies(std::size_t numBodies, vec3d* positions, vec3d* velocities,
vec3d* accel0, vec2d* energies) {
std::size_t const bid = blockIdx.y * gridDim.x + blockIdx.x;
std::size_t const tid = threadIdx.x;
std::size_t const idx = bid * blockDim.x + tid;
extern __shared__ vec3d shmem[]; // 128 threads per block, make dynamic
shmem[tid] = vec3d{0.0, 0.0, 0.0};
if (idx >= numBodies) return;
} // compute_energies
#include <cstdlib>
#include <fstream>
#include <iostream>
int main() {
std::ifstream ifs{"../../../nbody_data/input128"};
if (!ifs) {
std::cout << "cannot open ../../../nbody_data/input128\n";
return EXIT_FAILURE;
}
int numBodies = 0;
std::string line;
while (!ifs.eof()) {
std::getline(ifs, line);
if (!line.empty()) numBodies += 1;
}
ifs.seekg(0);
std::cout << "numBodies: " << numBodies << "\n";
typed_buffer<vec3d> positions(numBodies);
typed_buffer<vec3d> velocities(numBodies);
typed_buffer<vec3d> accelerations(numBodies);
typed_buffer<vec3d> accelerations0(numBodies);
typed_buffer<vec2d> energies(numBodies, buffer::attachment_points::host);
int particleIndex;
double mass;
for (std::size_t i = 0; i < numBodies; ++i) {
ifs >> particleIndex >> mass;
ifs >> positions[i][0] >> positions[i][1] >> positions[i][2];
ifs >> velocities[i][0] >> velocities[i][1] >> velocities[i][2];
}
dim3 threads, grid;
threads.x = (numBodies < 128) ? numBodies : 128;
grid.x = (numBodies / 128) + 1;
calculate_accelerations<<<grid, threads, sizeof(vec3d) * threads.x>>>(numBodies, positions,
accelerations);
compute_energies<<<256, 128, sizeof(vec2d) * threads.x>>>(numBodies, positions, velocities,
accelerations, energies);
for (int i = 1; i < 256; ++i) {
energies[0].x += energies[i].x;
energies[0].y += energies[i].y;
}
std::cout << "Energies:" << energies[0].x + energies[0].y << "\t" << energies[0].x << "\t"
<< energies[0].y << "\n";
vec2d energies0 = energies[0];
double const dt = 1e-3;
double tend = 1.0;
double t = 0.0;
int k = 0;
while (t < tend) {
update_positions<<<grid, threads>>>(numBodies, dt, positions, velocities, accelerations,
accelerations0);
calculate_accelerations<<<grid, threads, sizeof(vec3d) * threads.x>>>(numBodies, positions,
accelerations);
update_velocities<<<grid, threads>>>(numBodies, dt, velocities, accelerations, accelerations0);
t += dt;
k += 1;
if (k % 10 == 0) {
compute_energies<<<256, 128, sizeof(vec2d) * threads.x>>>(numBodies, positions, velocities,
accelerations, energies);
for (int i = 1; i < 256; ++i) {
energies[0].x += energies[i].x;
energies[0].y += energies[i].y;
}
std::cout << "t= " << t << " E= " << energies[0].x + energies[0].y << " " << energies[0].x
<< " " << energies[0].y << " dE = "
<< (((energies[0].x + energies[0].y) - (energies0.x + energies0.y)) /
(energies0.x + energies0.y))
<< "\n";
energies0 = energies[0];
}
}
return 0;
}
|
763
|
#include "includes.h"
__global__ void reduction_neighbored_pairs_improved_1( int * int_array, int * temp_array, int size)
{
int tid = threadIdx.x;
int gid = blockDim.x * blockIdx.x + threadIdx.x;
//local data block pointer
int * i_data = int_array + blockDim.x * blockIdx.x;
if (gid > size)
return;
for (int offset = 1; offset <= blockDim.x / 2; offset *= 2)
{
int index = 2 * offset * tid;
if (index < blockDim.x)
{
i_data[index] += i_data[index + offset];
}
__syncthreads();
}
if (tid == 0)
{
temp_array[blockIdx.x] = int_array[gid];
}
}
|
764
|
/*
@EECE528 Project - BDD Parallelization
@Authors: Yu Lei, Haotian Zhang
@Date: 2017/12/3
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define MAXNODENUM 160000
#define MAXLINE 256 /* Maximum length of each input line read. */
typedef struct bddNode_ {
float index;
int value;
struct bddNode_ *lowChild;
struct bddNode_ *highChild;
} bddNode;
typedef struct bddTree_ {
int totalNodeNum;
int totalLevels;
bddNode *topNode;
bddNode *zeroLeaf;
bddNode *oneLeaf;
} bddTree;
typedef struct applyManager_ {
int maxNodeNum;
int currentSpaceNum;
} applyManager;
typedef struct pattern_{
int size;
float index[MAXNODENUM];
bddNode* left[MAXNODENUM];
bddNode* right[MAXNODENUM];
}pattern;
pattern patterns;
void bddTreeInit(bddTree *bdd) {
bddNode *zero,*one;
bdd->totalNodeNum = 0;
bdd->totalLevels = 0;
zero = (bddNode*)malloc(sizeof(bddNode));
one = (bddNode*)malloc(sizeof(bddNode));
one->index = INFINITY;
zero->index = INFINITY;
zero->value = 0;
zero->lowChild = NULL;
zero->highChild = NULL;
one->value = 1;
one->lowChild = NULL;
one->highChild = NULL;
bdd->zeroLeaf = zero;
bdd->oneLeaf = one;
}
void applyManagerInit(applyManager *appMan, int maxNodes){
appMan->maxNodeNum = maxNodes;
appMan->currentSpaceNum = 0;
}
bddTree* readBDD(char *filename) {
FILE *f;
bddTree *bdd;
int nodeTotal;
int levelTotal;
int nodeNum;
int nodeIndex;
int lowC;
int highC;
f = fopen(filename,"r");
if (!f) {
fprintf(stderr, "cannot open file \"%s\"\n", filename);
return NULL;
}
bdd = (bddTree*)malloc(sizeof(bddTree));
bddTreeInit(bdd);
char linebuf[MAXLINE];
fgets(linebuf,MAXLINE,f);
sscanf(linebuf, "%d %d", &nodeTotal, &levelTotal);
bddNode *array[10000];
bdd->totalNodeNum = nodeTotal;
bdd->totalLevels = levelTotal;
while (fgets(linebuf, MAXLINE, f) != NULL) {
sscanf(linebuf, "%d %d %d %d", &nodeNum, &nodeIndex, &lowC, &highC);
bddNode *newNode;
newNode = (bddNode*)malloc(sizeof(bddNode));
newNode->index = nodeIndex;
newNode->value = -1;
if (lowC == -10) {
newNode->lowChild = bdd->zeroLeaf;
} else if (lowC == -11) {
newNode->lowChild = bdd->oneLeaf;
} else {
newNode->lowChild = array[lowC];
}
if (highC == -10) {
newNode->highChild = bdd->zeroLeaf;
} else if (highC == -11) {
newNode->highChild = bdd->oneLeaf;
} else {
newNode->highChild = array[highC];
}
array[nodeNum] = newNode;
bdd->topNode = newNode;
}
fclose(f);
return bdd;
}
void printNode(bddNode *node) {
printf("Node: %f children: \t%f \t%f.\n", node->index, node->lowChild->index, node->highChild->index);
if (node->lowChild->index != INFINITY) {
printNode(node->lowChild);
}
if (node->highChild->index != INFINITY) {
printNode(node->highChild);
}
}
void printBDD(bddTree *bdd) {
printf("\nPrinting bdd:\n");
printf("Total nodes in bdd: %d\n", bdd->totalNodeNum);
printNode(bdd->topNode);
}
void recursFree(bddNode *node) {
if (node->lowChild->index != INFINITY) {
recursFree(node->lowChild);
}
if (node->highChild->index != INFINITY) {
recursFree(node->highChild);
}
free(node);
}
void freeBDD(bddTree *bdd) {
recursFree(bdd->topNode);
free(bdd->zeroLeaf);
free(bdd->oneLeaf);
free(bdd);
}
// void addNew(int *size) {
// }
int check_node(float index,bddNode *left, bddNode* right){
int i;
for(i=0;i<patterns.size;i++){
if(index == patterns.index[i] && left == patterns.left[i] && right == patterns.right[i]){
return i;
}
}
patterns.index[patterns.size] = index;
patterns.left[patterns.size] = left;
patterns.right[patterns.size] = right;
patterns.size++;
return 0;
}
bddNode* applyBDDs(bddTree *result, bddNode *node1, bddNode *node2, applyManager *appMan){
bddNode *left, *right;
float newNodeIndex;
int checkNode = 0;
if(node1->value == 0 && node2->value == 0){
return result->zeroLeaf;
}else if(node1->value == 0 && node2->value == 1){
return result->zeroLeaf;
}else if(node1->value == 1 && node2->value == 0){
return result->zeroLeaf;
}else if(node1->value == 1 && node2->value == 1){
return result->oneLeaf;
}
// printf("node1:%lf node2:%lf",node1->index, node2->index);
if(node1->index == node2->index){
left = applyBDDs(result, node1->lowChild,node2->lowChild,appMan);
right = applyBDDs(result, node1->highChild,node2->highChild,appMan);
}else if (node1->index < node2->index){
left = applyBDDs(result,node1->lowChild,node2,appMan);
right = applyBDDs(result,node1->highChild,node2,appMan);
newNodeIndex = node1 -> index;
}else if (node1->index > node2->index){
left = applyBDDs(result,node1,node2->lowChild,appMan);
right = applyBDDs(result,node1,node2->highChild,appMan);
newNodeIndex == node2 -> index;
}
// return result -> oneLeaf;
bddNode *newNode;
newNode = (bddNode*)malloc(sizeof(bddNode));
if(left == right){
return left;
}else{
if(checkNode = check_node(newNodeIndex,left,right)){
newNode->index = patterns.index[checkNode];
newNode->value = -1;
newNode->lowChild = patterns.left[checkNode];
newNode->highChild = patterns.right[checkNode];
}
else{
newNode->index = newNodeIndex;
newNode->value = -1;
newNode->lowChild = left;
newNode->highChild = right;
}
return newNode;
}
}
int main(int argc, char* argv[]) {
bddTree *bdd1, *bdd2;
bddTree *bddResult;
if (argc !=3) {
fprintf(stderr,"usage: a.out file1 file2\n");
exit(1);
}
bdd1 = readBDD(argv[1]);
bdd2 = readBDD(argv[2]);
// printBDD(bdd1);
printf("bdd1 index: %lf and %lf \n",bdd1->topNode->lowChild->index, bdd2->topNode->highChild->index);
bddResult = (bddTree*)malloc(sizeof(bddTree));
bddTreeInit(bddResult);
// printBDD(bdd2);
applyManager *appMan;
appMan = (applyManager*)malloc(sizeof(applyManager));
applyManagerInit(appMan, (int)pow(2, (bdd1->totalLevels + bdd2->totalLevels)));
patterns.size = 0;
bddResult->topNode = applyBDDs(bddResult, bdd1->topNode, bdd2->topNode, appMan);
// printBDD(bdd1);
// printBDD(bdd2);
// printBDD(bddResult);
free(bdd1);
free(bdd2);
free(bddResult);
free(appMan);
return 0;
}
|
765
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <vector>
#include <stdio.h>
#include <ctime>
#include <fstream>
using namespace std;
vector<char> add_data(vector<char> data);
void copy_data_to_gpu(char* data, int size);
int main()
{
int size = 0;
vector<char> data;
for (int i = 1; i <= 50; i++)
{
data = add_data(data);
size += 1048576;
copy_data_to_gpu(&data[0], size);
}
data.clear();
data.shrink_to_fit();
}
vector<char> add_data(vector<char> data)
{
int random_integer;
for (int i = 0; i < 1048576; i++)
{
random_integer = rand() % 10;
data.push_back(random_integer);
}
return data;
}
void copy_data_to_gpu(char* data, int size)
{
ofstream outfile;
outfile.open("result.txt", ofstream::out | ofstream::app);
outfile.seekp(0, ios::end);
char* dev_data;
int sumTo = 0, sumFrom = 0;
for (int i = 0; i < 100; i++)
{
int elapsedTime;
cudaMalloc((void**)&dev_data, size);
clock_t begin = clock();
cudaMemcpy(dev_data, data, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
clock_t end = clock();
elapsedTime = double(end - begin);
sumTo += elapsedTime;
begin = clock();
cudaMemcpy(data, dev_data, size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
end = clock();
elapsedTime = double(end - begin);
sumFrom += elapsedTime;
cudaFree(dev_data);
}
outfile << sumTo / (float)100 << " " << sumFrom / (float)100 << endl;
outfile.close();
}
|
766
|
#include "includes.h"
#ifndef _KERNEL_H
#define _KERNEL_H
typedef struct Node {
int starting;
int no_of_edges;
}Node;
#endif
__global__ void test1(bool* d_graph_visited, int no_of_nodes) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < no_of_nodes) {
d_graph_visited[tid] = true;
}
}
|
767
|
/*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
__global__ void rpe_q_forward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
output += index * nhead + head_idx;
atomicAdd(
output,
query_features[0] * lookup_table[0]);
}
void rpe_q_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_forward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
}
__global__ void rpe_q_backward(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float * grad_lookup_table, float * grad_query_features) {
// dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
int index = blockIdx.x * blockDim.x + threadIdx.x;
int head_idx = blockIdx.y;
int hdim_idx = blockIdx.z;
if (index >= total_query_num * local_size ||
head_idx >= nhead ||
hdim_idx >= hdim) return;
// 1. Obtain query features.
int query_idx = index / local_size;
query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
grad_query_features += query_idx * nhead * hdim + head_idx * hdim + hdim_idx;
// 2. Obtain quantize relative position.
relpos += index;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim + hdim_idx;
// 3. Obtain output position.
grad_out += index * nhead + head_idx;
atomicAdd(
grad_query_features,
grad_out[0] * lookup_table[0]);
atomicAdd(
grad_lookup_table,
grad_out[0] * query_features[0]);
}
void rpe_q_grad_launcher(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float* grad_lookup_table, float* grad_query_features){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
dim3 blocks(DIVUP(total_query_num * local_size, THREADS_PER_BLOCK), nhead, hdim);
dim3 threads(THREADS_PER_BLOCK);
rpe_q_backward<<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
}
|
768
|
#include "includes.h"
__global__ void g_FullConnectDropout(float * outputs, float * drop, int len)
{
for(int i = 0; i < len; i += blockDim.x * gridDim.x)
{
int id = i + blockIdx.x * blockDim.x + threadIdx.x;
if(id < len)
{
outputs[id] = outputs[id] * drop[id];
}
}
}
|
769
|
#include <stdio.h>
#define N 2250
#define T 512
__global__ void vecReverse(int *a, int *b){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N){
b[i] = a[N - i - 1];
}
}
int main(int argc, char *argv[]){
int size = N * sizeof(int);
int a[N], b[N], *devA, *devB;
int blocks;
//Compute the blocks in case that N % T != 0
if (N % T != 0){
blocks =(N+T-1) / T;
}else{
blocks = N/T;
}
//Assign the number
for (int i = 0; i < N; i++){
a[i] = i;
}
cudaMalloc((void**)&devA, size);
cudaMalloc((void**)&devB, size);
cudaMemcpy(devA, a, size, cudaMemcpyHostToDevice);
vecReverse<<<blocks,T>>>(devA,devB);
cudaMemcpy(b, devB, size, cudaMemcpyDeviceToHost);
cudaFree(devA);
cudaFree(devB);
for (int i = 0; i < N; i++){
printf("%d ",b[i]);
}
printf("\n");
}
|
770
|
#include "cuda_runtime.h"
#include <stdio.h>
#include <memory.h>
#define N 33 * 1024
#define threadsPerBlock 256
#define blocksPerGrid (N + threadsPerBlock - 1) / threadsPerBlock
#define RADIUS 2
// Signal/image element type
typedef int element;
// 1D MEDIAN FILTER implementation
// signal - input signal
// result - output signal
// N - length of the signal
__global__ void _medianfilter(const element* signal, element* result)
{
__shared__ element cache[threadsPerBlock + 2 * RADIUS];
element window[5];
int gindex = threadIdx.x + blockDim.x * blockIdx.x;
int lindex = threadIdx.x + RADIUS;
// Reads input elements into shared memory
cache[lindex] = signal[gindex];
if (threadIdx.x < RADIUS)
{
cache[lindex - RADIUS] = signal[gindex - RADIUS];
cache[lindex + threadsPerBlock] = signal[gindex + threadsPerBlock];
}
__syncthreads();
for (int j = 0; j < 2 * RADIUS + 1; ++j)
window[j] = cache[threadIdx.x + j];
// Orders elements (only half of them)
for (int j = 0; j < RADIUS + 1; ++j)
{
// Finds position of minimum element
int min = j;
for (int k = j + 1; k < 2 * RADIUS + 1; ++k)
if (window[k] < window[min])
min = k;
// Puts found minimum element in its place
const element temp = window[j];
window[j] = window[min];
window[min] = temp;
}
// Gets result - the middle element
result[gindex] = window[RADIUS];
}
// 1D MEDIAN FILTER wrapper
// signal - input signal
// result - output signal
// N - length of the signal
void medianfilter(element* signal, element* result)
{
element *dev_extension, *dev_result;
// Check arguments
if (!signal || N < 1)
return;
// Treat special case N = 1
if (N == 1)
{
if (result)
result[0] = signal[0];
return;
}
// Allocate memory for signal extension
element* extension = (element*)malloc((N + 2 * RADIUS) * sizeof(element));
// Check memory allocation
if (!extension)
return;
// Create signal extension
cudaMemcpy(extension + 2, signal, N * sizeof(element), cudaMemcpyHostToHost);
for (int i = 0; i < RADIUS; ++i)
{
extension[i] = signal[1 - i];
extension[N + RADIUS + i] = signal[N - 1 - i];
}
cudaMalloc((void**)&dev_extension, (N + 2 * RADIUS) * sizeof(int));
cudaMalloc((void**)&dev_result, N * sizeof(int));
// Copies signal to device
cudaMemcpy(dev_extension, extension, (N + 2 * RADIUS) * sizeof(element), cudaMemcpyHostToDevice);
// Call median filter implementation
for (int i = 0; i < 10; ++i)
_medianfilter<<<blocksPerGrid, threadsPerBlock>>>(dev_extension + RADIUS, dev_result);
// Copies result to host
cudaMemcpy(result, dev_result, N * sizeof(element), cudaMemcpyDeviceToHost);
// Free memory
free(extension);
cudaFree(dev_extension);
cudaFree(dev_result);
}
int main()
{
int *Signal, *result;
float elapsedTime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
FILE *fp;
Signal = (int *)malloc(N * sizeof(int));
result = (element *)malloc(N * sizeof(element));
for (int i = 0; i < N; i++)
{
Signal[i] = i % 5 + 1;
}
cudaEventRecord(start, 0);
medianfilter(Signal, result);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("%.3lf ms\n", elapsedTime);
fp = fopen("result.txt", "w");
if (fp == NULL)
printf("OPEN FILE FAILS!\n");
for (int i = 0; i < N; i ++)
fprintf(fp, "%d ", result[i]);
fclose(fp);
return 0;
}
|
771
|
#include "includes.h"
__global__ void BpropH(const float* layer1, float* dlayer1, const float* synH, float* dsynH, const float alpha, const int offset)
{
int i = blockDim.x*blockIdx.x + threadIdx.x; //256
int j = blockDim.y*blockIdx.y + threadIdx.y; //256
atomicAdd(&dsynH[i*256 + j] , dlayer1[offset*256 + j] * layer1[(offset-1)*256 + i] * alpha);
atomicAdd(&dlayer1[(offset-1)*256 + i] , layer1[offset*256 + j] * synH[i*256 + j]);
}
|
772
|
#include <iostream>
#include <cuda.h>
#include <chrono>
#define ITER 200
void checkCudaError(cudaError_t msg, int x)
{
if (msg != cudaSuccess) {
fprintf(stderr, "line: %d %s\n", x, cudaGetErrorString(msg));
exit(1);
}
return;
}
int main()
{
float *s, *dev_s;
int i, j;
std::chrono::time_point<std::chrono::system_clock> start, end;
double time;
double array[ITER];
for (i = 1; i <= ITER; i+=1) {
s = (float *)malloc(sizeof(float)*i*100);
checkCudaError(cudaMalloc((void**)&dev_s, sizeof(float)*i*100), __LINE__);
for (j = 0; j < i*100; j++) {
s[j] = j;
}
start = std::chrono::system_clock::now();
checkCudaError(cudaMemcpy(dev_s, s, sizeof(float)*i*100, cudaMemcpyHostToDevice), __LINE__);
checkCudaError(cudaThreadSynchronize(), __LINE__);
end = std::chrono::system_clock::now();
time = std::chrono::duration_cast<std::chrono::nanoseconds>(end - start).count();
array[i-1] = time;
free(s);
checkCudaError(cudaFree(dev_s), __LINE__);
}
for (i = 0; i < ITER; i++) {
std::cout << (i+1)*100 << " float : " << array[i] << "sec." << std::endl;
}
return 0;
}
|
773
|
// Includes
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <fstream>
#include <ctime>
// Definitions
#define M_PI 3.14276
#define c 299792458
#define mu0 M_PI*4e-7
#define eta0 c*mu0
// CPU function for source calculation
void stageSource(double* V1, double* V2, double* V3, double* V4, int x, int y, double E0, int NY) {
/* Stage 1: Source */
// Adapted to be 1D
V1[x * NY + y] = V1[x * NY + y] + E0;
V2[x * NY + y] = V2[x * NY + y] - E0;
V3[x * NY + y] = V3[x * NY + y] - E0;
V4[x * NY + y] = V4[x * NY + y] + E0;
// Using 1 dimensional arrays is more obvious to work with when porting to GPU
} // end func
// CPU function
void stageScatter(double* V1, double* V2, double* V3, double* V4, int NX, int NY, double Z) {
/* Stage 2: Scatter */
// Variables
double I = 0, V = 0;
// Parallelisable code
// for int i = 0; i < NX*NY; i++
for (int x = 0; x < NX; x++) {
for (int y = 0; y < NY; y++) {
I = (V1[(x * NY) + y] + V4[(x * NY) + y] - V2[(x * NY) + y] - V3[(x * NY) + y]) / (2);
// factorized by 2 for unnecessary mathematics
V = 2 * V1[x * NY + y] - I; //port1
V1[x * NY + y] = V - V1[x * NY + y];
V = 2 * V2[x * NY + y] + I; //port2
V2[x * NY + y] = V - V2[x * NY + y];
V = 2 * V3[x * NY + y] + I; //port3
V3[x * NY + y] = V - V3[x * NY + y];
V = 2 * V4[x * NY + y] - I; //port4
V4[x * NY + y] = V - V4[x * NY + y];
}
}
} // end func
// CPU Function
void stageConnect(double* V1, double* V2, double* V3, double* V4, // Arrays
int NX, int NY, // Array arguments
double rXmin, double rXmax, double rYmin, double rYmax) { // Boundary conditions
/* Stage 3: Connect */
// Variables
double tempV = 0;
// Connect internals
for (int x = 1; x < NX; x++) {
for (int y = 0; y < NY; y++) {
tempV = V2[x * NY + y];
V2[x * NY + y] = V4[(x - 1) * NY + y];
V4[(x - 1) * NY + y] = tempV;
}
}
for (int x = 0; x < NX; x++) {
for (int y = 1; y < NY; y++) {
tempV = V1[x * NY + y];
V1[x * NY + y] = V3[x * NY + y - 1];
V3[x * NY + y - 1] = tempV;
}
}
// Connect boundaries
for (int x = 0; x < NX; x++) {
V3[x * NY + NY - 1] = rYmax * V3[x * NY + NY - 1];
V1[x * NY] = rYmin * V1[x * NY]; // V1[x * NY + 0] = rYmin * V1[x * NY + 0];
}
for (int y = 0; y < NY; y++) {
V4[(NX - 1) * NY + y] = rXmax * V4[(NX - 1) * NY + y];
V2[y] = rXmin * V2[y]; // V2[0 * NY + y] = rXmin * V2[0 * NY + y];
}
} // end func
int main() {
// Start timer
std::clock_t start = std::clock();
/* Variables */
// Changable variables
int NX = 200; // number of X
int NY = 200; // number of Y
int NT = 8192; // number of Times/Iterations
double dl = 1;
// Retrieval from GPU
//*/
double* V1 = new double[int(NX * NY)](); // new double[int(NX*NY)](); // Sets all values to 0
double* V2 = new double[int(NX * NY)]();
double* V3 = new double[int(NX * NY)]();
double* V4 = new double[int(NX * NY)]();
//*/
// Variables and coefficients
// Scatter coefficient
double Z = eta0 / sqrt(2.);
// Boundary connect Coefficiants
double rXmin = -1;
double rXmax = -1;
double rYmin = -1;
double rYmax = -1;
// impulse parameters
double dt = dl / (sqrt(2.) * c);
double width = 20 * dt * sqrt(2.);
double delay = 100 * dt * sqrt(2.);
// input position
int Ein[] = { 10,10 };
// output/reading position
int Eout[] = { 15,15 };
// file output
std::ofstream output("CPU.csv");
for (int n = 0; n < NT; n++) {
// Variables dependant on n
double E0 = (1 / sqrt(2.)) * exp(-(n * dt - delay) * (n * dt - delay) / (width * width));
/* Stage 1: Source */
stageSource(V1, V2, V3, V4, Ein[0], Ein[1], E0, NY);
/* Stage 2: Scatter */
stageScatter(V1, V2, V3, V4, NX, NY, Z);
/* Stage 3: Connect */
stageConnect(V1, V2, V3, V4, NX, NY, rXmin, rXmax, rYmin, rYmax);
output << n * dt << "," << V2[Eout[0] * NY + Eout[1]] + V4[Eout[0] * NY + Eout[1]] << std::endl;
if (n % 100 == 0)
std::cout << n << std::endl;
} // End of loop
output.close();
std::cout << "Done: " << ((std::clock() - start) / (double)CLOCKS_PER_SEC) << std::endl;
std::cin.get();
} // end main
// EOF
|
774
|
extern "C"
__global__ void forceCompute(float* pX,float* pY,float* pZ,
float* nX,float* nY,float* nZ,
float* FX,float* FY,float* FZ,
float* RFX,float* RFY,float* RFZ,
float* MX,float* MY,float* MZ,
float* sDRadius_squared, float* relaxed,
int* NN,
int* CID,
int* hasConverged,
int* allNeighborsHaveConverged,
int* allNeighborsHaveConvergedPreviously,
// KD Tree specs
// per Block
int* stBl0, int* nPtBl0,
int* stBl1, int* nPtBl1,
int* blLevel,
// per GPU Block
int* idBl, int* offsBl,
// for all
int* dotIndexes,
// Integration specs
float r_0,
float k_align,
float k_bend,
float radiusTresholdInteract,
float ka,
float pa,
float pr,
float maxDisplacementPerStep
)
{
extern __shared__ int array[];
int iGPUBlock=blockIdx.x;
int iThread=threadIdx.x;
if (iThread==0) {
int idBloc=idBl[iGPUBlock];
array[0]=offsBl[iGPUBlock];
array[1]=stBl0[idBloc];
array[2]=nPtBl0[idBloc];
array[3]=stBl1[idBloc];
array[4]=nPtBl1[idBloc];
array[5]=blLevel[idBloc];
}
__syncthreads();
int offsInteraction = array[0];
int startIndexBl0 = array[1];
int nPtBlock0 = array[2];
int startIndexBl1 = array[3];
int nPtBlock1 = array[4];
int blockLevel = array[5];
int interactionToBeComputed = iThread+offsInteraction;
int iPt1=-1;
int iPt2=-1;
int totalNumberOfInteractions = (blockLevel==0)*(nPtBlock0*(nPtBlock0-1)/2)+
(blockLevel>0)*nPtBlock0*nPtBlock1;
if (interactionToBeComputed<totalNumberOfInteractions) {
// It's not an overflow
int ind0, ind1;
if (blockLevel==0) {
ind0 = nPtBlock0 - 2 - floor(sqrtf(-8*interactionToBeComputed + 4*nPtBlock0*(nPtBlock0-1)-7)/2.0 - 0.5);
ind1 = interactionToBeComputed + ind0 + 1 - nPtBlock0*(nPtBlock0-1)/2 + (nPtBlock0-ind0)*((nPtBlock0-ind0)-1)/2;
startIndexBl1=startIndexBl0;
} else {
ind1=interactionToBeComputed/nPtBlock0;
ind0=interactionToBeComputed-ind1*nPtBlock0;
}
iPt1 = dotIndexes[startIndexBl0+ind0];
iPt2 = dotIndexes[startIndexBl1+ind1];
// that's a correct interaction
float dx=pX[iPt2]-pX[iPt1];
float dy=pY[iPt2]-pY[iPt1];
float dz=pZ[iPt2]-pZ[iPt1];
float dist_Squared=(dx*dx+dy*dy+dz*dz);
int idC1=CID[iPt1];
int idC2=CID[iPt2];
if ((idC1!=-1)&&(idC2!=-1)) {
if (dist_Squared<radiusTresholdInteract*radiusTresholdInteract*r_0*r_0) {
if (hasConverged[iPt1]==0) {allNeighborsHaveConverged[iPt2]=0;}
if (hasConverged[iPt2]==0) {allNeighborsHaveConverged[iPt1]=0;}
//float rfx, rfy, rfz;
float kr=pa*ka/pr*powf(1,pr-pa);
float dist = sqrtf(dist_Squared);
float r=dist/r_0;
float f_rep=(-pr*kr/powf(r, pr+1))*(r>1.0)-(r<=1.0)*((1.0-r)*maxDisplacementPerStep+pr*kr/powf(1.0, pr+1));
dx=dx/dist;dy=dy/dist;dz=dz/dist;
float fx1=dx*f_rep;float fx2=-dx*f_rep;
float fy1=dy*f_rep;float fy2=-dy*f_rep;
float fz1=dz*f_rep;float fz2=-dz*f_rep;
if (idC1==idC2){
// iPt1 et 2 sont voisins
atomicAdd(& NN[iPt1], 1);
atomicAdd(& NN[iPt2], 1);
// Neighbor
// iPt1 et 2 sont voisins
atomicAdd(& RFX[iPt1], fx1);atomicAdd(& RFX[iPt2], fx2);
atomicAdd(& RFY[iPt1], fy1);atomicAdd(& RFY[iPt2], fy2);
atomicAdd(& RFZ[iPt1], fz1);atomicAdd(& RFZ[iPt2], fz2);
float f_attract=(pa*ka/powf(r, pa+1))*(r>1.0)+(r<=1.0)*(pr*kr/powf(1.0, pr+1));
fx1+=dx*f_attract;fx2-=dx*f_attract;
fy1+=dy*f_attract;fy2-=dy*f_attract;
fz1+=dz*f_attract;fz2-=dz*f_attract;
// Get data
float nX1=nX[iPt1];float nX2=nX[iPt2];
float nY1=nY[iPt1];float nY2=nY[iPt2];
float nZ1=nZ[iPt1];float nZ2=nZ[iPt2];
float iFlatten = k_align*(dx*(nX1+nX2)+dy*(nY1+nY2)+dz*(nZ1+nZ2));
fx1+=iFlatten*nX1;
fy1+=iFlatten*nY1;
fz1+=iFlatten*nZ1;
fx2-=iFlatten*nX2;
fy2-=iFlatten*nY2;
fz2-=iFlatten*nZ2;
float iPerpend1=-k_bend*(dx*nX1+dy*nY1+dz*nZ1);
float iPerpend2=-k_bend*(dx*nX2+dy*nY2+dz*nZ2);
atomicAdd(& MX[iPt1], iPerpend1*dx);atomicAdd(& MX[iPt2], iPerpend2*dx);
atomicAdd(& MY[iPt1], iPerpend1*dy);atomicAdd(& MY[iPt2], iPerpend2*dy);
atomicAdd(& MZ[iPt1], iPerpend1*dz);atomicAdd(& MZ[iPt2], iPerpend2*dz);
} else {
relaxed[iPt1]=1.0;
relaxed[iPt2]=1.0;
}
atomicAdd(& FX[iPt1], fx1);atomicAdd(& FX[iPt2], fx2);
atomicAdd(& FY[iPt1], fy1);atomicAdd(& FY[iPt2], fy2);
atomicAdd(& FZ[iPt1], fz1);atomicAdd(& FZ[iPt2], fz2);
}
} else if (idC1*idC2<=0){
int idSD, idND;
if (idC1<0) {idSD=iPt1;idND=iPt2;} else {idSD=iPt2;idND=iPt1;}
if ((dist_Squared-sDRadius_squared[idSD]<0)&&(allNeighborsHaveConvergedPreviously[idND]==0)) {
// superdot is touched! atomic is unnecessary here
allNeighborsHaveConverged[idSD]=0;
}
}
}
}
/*
if ((iTx<LX)&&(iTy<LY)) {
int iPtx=pStX[blockIdx.x]+iTx;
int iPty=pStY[blockIdx.x]+iTy;
int compute=0;
if (level==0) {
compute=1;
} else {
int kS1=kSo[iPtx];
int kS2=kSo[iPty];
if ((kS1+kS2)==((1<<(level))-1)) {
compute=1;
}
}
if (compute==1) {
int iPt1 = io[iPtx];//threadIdx.x+blockIdx.x*blockDim.x;
int iPt2 = io[iPty];//threadIdx.y+blockIdx.y*blockDim.y;
//atomicAdd(& NN[iPt1], 1);
//float r_0=#r_0;
float kr=pa*ka/pr*powf(1,pr-pa);
if (iPt1!=iPt2)
{
float dx=pX[iPt2]-pX[iPt1];
float dy=pY[iPt2]-pY[iPt1];
float dz=pZ[iPt2]-pZ[iPt1];
float dist_Squared=(dx*dx+dy*dy+dz*dz);
if (dist_Squared<radiusTresholdInteract*radiusTresholdInteract*r_0*r_0) {
// Get data
float nX1=nX[iPt1];float nX2=nX[iPt2];
float nY1=nY[iPt1];float nY2=nY[iPt2];
float nZ1=nZ[iPt1];float nZ2=nZ[iPt2];
int idC1=CID[iPt1];
int idC2=CID[iPt2];
//
float f_rep, f_attract=0;
float fx, fy, fz;
//float rfx, rfy, rfz;
float dist = sqrt(dist_Squared);
float r=dist/r_0;
f_rep=(-pr*kr/powf(r, pr+1))*(r>1.0)-(r<=1.0)*((1.0-r)*maxDisplacementPerStep+pr*kr/powf(1.0, pr+1));
// for convergence tests
if (hasConverged[iPt2]==0) {allNeighborsHaveConverged[iPt1]=0;}
//atomicMin(& allNeighborsHaveConverged[iPt1], hasConverged[iPt2]);
dx=dx/dist;dy=dy/dist;dz=dz/dist;
fx=dx*f_rep;
fy=dy*f_rep;
fz=dz*f_rep;
if (idC1==idC2){ //&&(dist_Squared<radiusTresholdNeighbor*r_0*radiusTresholdNeighbor*r_0)) {
// Neighbor
// iPt1 et 2 sont voisins
atomicAdd(& NN[iPt1], 1);
atomicAdd(& RFX[iPt1], fx);
atomicAdd(& RFY[iPt1], fy);
atomicAdd(& RFZ[iPt1], fz);
f_attract=(pa*ka/powf(r, pa+1))*(r>1.0)+(r<=1.0)*(pr*kr/powf(1.0, pr+1));
fx=dx*(f_rep+f_attract);
fy=dy*(f_rep+f_attract);
fz=dz*(f_rep+f_attract);
float iFlatten = k_align*(dx*(nX1+nX2)+dy*(nY1+nY2)+dz*(nZ1+nZ2));
fx+=iFlatten*nX1;
fy+=iFlatten*nY1;
fz+=iFlatten*nZ1;
float iPerpend1=-k_bend*(dx*nX1+dy*nY1+dz*nZ1);
atomicAdd(& MX[iPt1], iPerpend1*dx);
atomicAdd(& MY[iPt1], iPerpend1*dy);
atomicAdd(& MZ[iPt1], iPerpend1*dz);
} else {
relaxed[iPt1]=1.0;
}
atomicAdd(& FX[iPt1], fx);
atomicAdd(& FY[iPt1], fy);
atomicAdd(& FZ[iPt1], fz);
}
}
}
}*/
//rfx=dx*(f_rep);
//rfy=dy*(f_rep);
//rfz=dz*(f_rep);
// Attractif
// pX[iPt1]=0;
// pX[iPt2]=0;
//if (idC1==idC2) {
// f_attract=(pa*ka/powf(r, pa+1))*(r>1.0);
//f_attract=0;
//}//*prodScalNorm;
|
775
|
#include "includes.h"
__global__ void wlcss_cuda_kernel(int32_t *d_mss, int32_t *d_mss_offsets, int32_t *d_ts, int32_t *d_ss, int32_t *d_tlen, int32_t *d_toffsets, int32_t *d_slen, int32_t *d_soffsets, int32_t *d_params, int32_t *d_3d_cost_matrix){
int32_t params_idx = threadIdx.x;
int32_t template_idx = blockIdx.x;
int32_t stream_idx = blockIdx.y;
int32_t t_len = d_tlen[template_idx];
int32_t s_len = d_slen[stream_idx];
int32_t t_offset = d_toffsets[template_idx];
int32_t s_offset = d_soffsets[stream_idx];
int32_t d_mss_offset = d_mss_offsets[params_idx*gridDim.x*gridDim.y+template_idx*gridDim.y+stream_idx];
int32_t *mss = &d_mss[d_mss_offset];
int32_t *tmp_window = new int32_t[(t_len + 2)]();
int32_t *t = &d_ts[t_offset];
int32_t *s = &d_ss[s_offset];
int32_t reward = d_params[params_idx*3];
int32_t penalty = d_params[params_idx*3+1];
int32_t accepteddist = d_params[params_idx*3+2];
int32_t tmp = 0;
for(int32_t j=0;j<s_len;j++){
for(int32_t i=0;i<t_len;i++){
int32_t distance = d_3d_cost_matrix[s[j]*26 + t[i]];;
if (distance <= accepteddist){
tmp = tmp_window[i]+reward;
} else{
tmp = max(tmp_window[i]-penalty*distance,
max(tmp_window[i+1]-penalty*distance,
tmp_window[t_len+1]-penalty*distance));
}
tmp_window[i] = tmp_window[t_len+1];
tmp_window[t_len+1] = tmp;
}
tmp_window[t_len] = tmp_window[t_len+1];
mss[j] = tmp_window[t_len+1];
tmp_window[t_len+1] = 0;
}
delete [] tmp_window;
}
|
776
|
#include <stdio.h>
#include <stdlib.h>
__global__ void sum_cuda(double* a, double *s, int width) {
int t = threadIdx.x;
int b = blockIdx.x*blockDim.x;
int i;
for(i = blockDim.x/2; i > 0; i /= 2) {
if(t < i && b+t+i < width)
a[t+b] += a[t+b+i];
__syncthreads();
}
if(t == 0)
s[blockIdx.x] = a[t+b];
}
int main()
{
int width = 40000000;
int size = width * sizeof(double);
int block_size = 1024;
int num_blocks = (width-1)/block_size+1;
int s_size = (num_blocks * sizeof(double));
double *a = (double*) malloc (size);
double *s = (double*) malloc (s_size);
for(int i = 0; i < width; i++)
a[i] = i;
double *d_a, *d_s;
// alocação e cópia dos dados
cudaMalloc((void **) &d_a, size);
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_s, s_size);
// definição do número de blocos e threads
dim3 dimGrid(num_blocks,1,1);
dim3 dimBlock(block_size,1,1);
// chamada do kernel
sum_cuda<<<dimGrid,dimBlock>>>(d_a, d_s, width);
// cópia dos resultados para o host
cudaMemcpy(s, d_s, s_size, cudaMemcpyDeviceToHost);
// soma das reduções parciais
for(int i = 1; i < num_blocks; i++)
s[0] += s[i];
//printf("\nSum = %f\n",s[0]);
cudaFree(d_a);
cudaFree(d_s);
free(a);
free(s);
a = s = NULL;
}
|
777
|
#include <cuda.h>
#include <stdio.h>
__global__
void scaleit_kernel(double *a,int n)
{
/* Determine my index */
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
a[i] = a[i] * 2.0l;
}
}
int main(int argc, char **argv)
{
double *h_a, *d_a;
int i,n=16384;
dim3 block, grid;
/* Allocate Host Pointer */
h_a = (double*)malloc(n*sizeof(double));
for(i=0; i<n; i++)
{
h_a[i] = i+1;
}
/* Allocate Device Pointer */
cudaMalloc((void**)&d_a, n*sizeof(double));
if ( d_a == NULL )
{
fprintf(stderr,"Failed to malloc!\n");
exit(1);
}
/* Decompose Problem */
block = dim3(1024, 1, 1);
grid = dim3(n/block.x, 1, 1);
/* Copy from Host to Device */
cudaMemcpy(d_a, h_a, n*sizeof(double),cudaMemcpyHostToDevice);
/* Launch Compute Kernel */
scaleit_kernel<<<grid,block>>>(d_a,n);
/* Copy from Device to Host */
cudaMemcpy(h_a, d_a, n*sizeof(double),cudaMemcpyDeviceToHost);
for(i=0;i<n;i++)
{
if(h_a[i] != (2.0l * (i+1)))
{
fprintf(stderr, "Error! %d: %lf\n",i,h_a[i]);
exit(1);
}
}
fprintf(stdout, "Correct!\n");
/* Free Device Pointer */
cudaFree(d_a);
/* Free Host Pointer */
free(h_a);
return 0;
}
|
778
|
/*
#include "SDFDevice.cuh"
__host__
SDFDevice::SDFDevice(DistancePrimitive** primitives, SDModification** modifications, size_t modificationCount) : primitives(primitives), modifications(modifications), modificationCount(modificationCount)
{
}
__host__
SDFDevice::~SDFDevice()
{
}
__device__ float
SDFDevice::distanceFromPoint(glm::vec3 position)
{
float distance = selectDistanceFunction(primitives[0], position);
for (int i = 0; i < modificationCount; ++i)
{
float distance2 = selectDistanceFunction(primitives[i + 1], position);
distance = selectModificationFunction(modifications[i], distance, distance2);
}
return distance;
}
__device__ float
SDFDevice::selectDistanceFunction(DistancePrimitive* primitive, glm::vec3 position)
{
switch (primitive->functionId)
{
case 0:
{
SDSphere *sphereCast = ((SDSphere*)primitive);
float distance = distanceFromSphere(glm::vec4(position, 1), sphereCast->transform, sphereCast->radius);
return distance;
}
case 1:
{
SDTorus *torusCast = ((SDTorus*)primitive);
float distance2 = distanceFromTorus(glm::vec4(position, 1), torusCast->transform, torusCast->dimensions);
return distance2;
}
}
return 0.0f;
}
*/
/*
__device__ inline float
distanceFromSphere(glm::vec3 position, float radius, glm::vec3 point)
{
return GLMUtil::length(point - position) - radius;
}
*/
/*
__host__ __device__ inline float
SDFDevice::distanceFromSphere(glm::vec4 point, glm::mat4 transform, float radius)
{
point = transform * point;
return GLMUtil::length(glm::vec3(point)) - radius;
}
/*
__device__ inline float
distanceFromTorus(glm::vec3 position, glm::vec2 dimensions, glm::vec3 point)
{
point -= position;
glm::vec2 q = glm::vec2(GLMUtil::length(glm::vec2(point.x, point.y)) - dimensions.x, point.z);
return GLMUtil::length(q) - dimensions.y;
}
*/
/*
__host__ __device__ inline float
SDFDevice::distanceFromTorus(glm::vec4 point, glm::mat4 transform, glm::vec2 dimensions)
{
point = transform * point;
glm::vec2 q = glm::vec2(GLMUtil::length(glm::vec2(point.x, point.y)) - dimensions.x, point.z);
return GLMUtil::length(q) - dimensions.y;
}
__device__ float
SDFDevice::selectModificationFunction(SDModification* modification, float distance1, float distance2)
{
switch (modification->functionId)
{
case 0:
//PlaceSDPrimitive *placeCast = ((PlaceSDPrimitive*)modification);
return placeModification(distance1, distance2);
case 1:
return carveModification(distance1, distance2);
}
return 0;
}
__device__ inline float
SDFDevice::placeModification(float originalDistance, float modifierDistance)
{
return fminf(originalDistance, modifierDistance);
}
__device__ inline float
SDFDevice::carveModification(float originalDistance, float modifierDistance)
{
return fmaxf(originalDistance, -modifierDistance);
}
*/
|
779
|
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
__device__ int get_index_x (int ncols, int index ) {
if (index == -1) {
index = blockDim.x * blockIdx.x + threadIdx.x;
} else {
index += gridDim.x;
}
if (index >= ncols) index = -1;
return index;
}
__device__ int get_index_y (int nrows, int index ) {
if (index == -1) {
index = blockDim.y * blockIdx.y + threadIdx.y;
} else {
index += gridDim.y;
}
if (index >= nrows) index = -1;
return index;
}
__device__ int get_index_z (int depth, int index ) {
if (index == -1) {
index = blockDim.z * blockIdx.z + threadIdx.z;
} else {
index += gridDim.z;
}
if (index >= depth) index = -1;
return index;
}
__device__ float Gaussian (float x, float y, float sigma) {
return (1/sqrt(2*M_PI*sigma)) * exp((-pow(x - y,2)) / (2*pow(sigma,2)));
}
__device__ float Gaussian_prime (float x, float y, float sigma) {
return ( -(x - y) / ((pow(sigma,3))*sqrt(2*M_PI)) ) * exp((-pow(x - y,2)) / (2*pow(sigma,2)));
}
__global__ void NCC( float *out, const float *x, const float *y, const float *wx, const float *wy, const int *marray, float sigma, int msize, int ncols, int nrows, int depth) {
float sum = 0;
int i = 0;
int idm = get_index_x(msize, -1);
int idy = get_index_y(nrows, -1);
int idz = get_index_z(depth, -1);
int m = marray[idm];
int cn = 1;
while(idz >= 0) {
while (idy >= 0) {
while(idm >= 0) {
sum = 0;
cn = 0;
for (i=m; i < ncols; i++) {
if (i < 0 || i-m > ncols) {
continue;
}
sum += Gaussian (x[i + idy*ncols + idz*nrows*ncols] * wx[i + idm*ncols], y[i-m + idy*ncols + idz*nrows*ncols] * wy[i-m + idm*ncols], sigma);
cn = cn + 1;
}
out[idm + idy*msize + idz*nrows*msize] = ( 1/((float)cn) ) * sum;
idm = get_index_x(msize, idm);
m = marray[idm];
}
idy = get_index_y (nrows, idy);
}
idz = get_index_z (depth, idz);
}
}
__global__ void NCC_prime( float *out, const float *x, const float *y, const float *wx, const float *wy, const int *marray, float sigma, int msize, int ncols, int nrows, int depth) {
float sum = 0;
int i = 0;
int idm = get_index_x(msize, -1);
int idy = get_index_y(nrows, -1);
int idz = get_index_z(depth, -1);
int m = marray[idm];
int cn = 1;
while(idz >= 0) {
while (idy >= 0) {
while(idm >= 0) {
sum = 0;
cn = 0;
for (i=m; i < ncols; i++) {
if (i < 0 || i-m > ncols) {
continue;
}
sum += Gaussian_prime (x[i + idy*ncols + idz*nrows*ncols] * wx[i + idm*ncols], y[abs(i-m) + idy*ncols + idz*nrows*ncols] * wy[i-m + idm*ncols], sigma);
cn = cn + 1;
}
out[idm + idy*msize + idz*nrows*msize] = ( 1/((float)cn) ) * sum;
idm = get_index_x(msize, idm);
m = marray[idm];
}
idy = get_index_y (nrows, idy);
}
idz = get_index_z (depth, idz);
}
}
|
780
|
// https://github.com/thrust/thrust/blob/8551c97870cd722486ba7834ae9d867f13e299ad/examples/sum_rows.cu
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/random.h>
#include <iostream>
// convert a linear index to a row index
template <typename T>
struct linear_index_to_row_index : public thrust::unary_function<T,T>
{
T C; // number of columns
__host__ __device__
linear_index_to_row_index(T C) : C(C) {}
__host__ __device__
T operator()(T i)
{
return i / C;
}
};
const int NUM_REPS = 10;
cudaEvent_t startEvent, stopEvent;
float ms;
int test(thrust::device_vector<int>& array, int R, int C)
{
// int R = 5; // number of rows
// int C = 8; // number of columns
// allocate storage for row sums and indices
thrust::device_vector<int> row_sums(R);
thrust::device_vector<int> row_indices(R);
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)) + (R*C),
array.begin(),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
cudaEventRecord(startEvent, 0);
for (int i = 0; i < NUM_REPS; i++) {
// compute row sums by summing values with equal row indices
thrust::reduce_by_key
(thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)),
thrust::make_transform_iterator(thrust::counting_iterator<int>(0), linear_index_to_row_index<int>(C)) + (R*C),
array.begin(),
row_indices.begin(),
row_sums.begin(),
thrust::equal_to<int>(),
thrust::plus<int>());
}
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
printf("%15.3f\n", (ms / NUM_REPS) * 1e3 );
return 0;
}
void dothatbench(int THEPOWER, int start) {
// int start=0;
int end=THEPOWER;
int total_elems = 1 << THEPOWER;
printf("Benchmarking Thrust %i.%i.%i TotalElems=%i\n",
THRUST_MAJOR_VERSION, THRUST_MINOR_VERSION, THRUST_SUBMINOR_VERSION, total_elems);
thrust::default_random_engine rng;
thrust::uniform_int_distribution<int> dist(10, 99);
// initialize data
thrust::device_vector<int> array(total_elems);
for (size_t i = 0; i < array.size(); i++)
array[i] = dist(rng);
printf("initialized array\n");
for(int powy=start; powy<=end; powy++) {
int powx = THEPOWER-powy;
int num_segments = 1 << powy;
int segment_size = 1 << powx;
char buf[16];
snprintf(buf, 16, "2^%i 2^%i", powy, powx);
printf("%15s", buf);
test(array, num_segments, segment_size);
}
}
int main(int argc, char** argv) {
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
dothatbench(20, 0);
dothatbench(26, 0);
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
}
|
781
|
// pi1.cu
/*
* A simple CUDA-enabled program that approximates \pi using monte-carlo
* sampling. This version generates all the random numbers at the start,
* then launches kernels to use them.
*/
#include <stdio.h>
#include <curand.h>
__global__ void pi(float* d_out, float* d_rands, int rands_per_kernel, int trials) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int rand_idx = idx*rands_per_kernel;
int count = 0;
for(int i=0; i < trials; i++) {
float x = d_rands[rand_idx + 2*i];
float y = d_rands[rand_idx + 2*i + 1];
if( x*x + y*y <= 1.0f ) {
count++;
}
}
d_out[idx] = float(count)/float(trials);
}
int main() {
const int N_BLOCKS = 1024;
const int N_THREADS = 512;
const int N_KERNELS = N_BLOCKS * N_THREADS;
const int N_TRIALS = 100;
const int N_RANDS_PER_TRIAL = 2;
const int N_RANDS = N_KERNELS * N_TRIALS * N_RANDS_PER_TRIAL;
float* d_pis;
float* d_rands;
cudaMalloc(&d_pis, N_KERNELS * sizeof(float));
cudaMalloc(&d_rands, N_RANDS * sizeof(float));
curandGenerator_t prng;
curandCreateGenerator(&prng, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(prng, 0);
curandGenerateUniform(prng, d_rands, N_RANDS);
curandDestroyGenerator(prng);
pi<<<N_BLOCKS, N_THREADS>>>(d_pis, d_rands, N_TRIALS*N_RANDS_PER_TRIAL, N_TRIALS);
float* h_pis = (float*) malloc(N_KERNELS*sizeof(float));
cudaMemcpy(h_pis, d_pis, N_KERNELS*sizeof(float), cudaMemcpyDeviceToHost);
float avg = 0.0;
for(int i=0; i < N_KERNELS; i++) {
avg += h_pis[i];
}
avg /= N_KERNELS;
printf("pi = %f\n", 4*avg);
free(h_pis);
cudaFree(d_pis);
cudaFree(d_rands);
return 0;
}
|
782
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define BLOCK_SIZE 16
__global__ void mandelKernel(int* device_img, float lowerX, float lowerY, float stepX, float stepY, int width, int height, int maxIterations)
{
// To avoid error caused by the floating number, use the following pseudo code
// float x = lowerX + thisX * stepX;
// float y = lowerY + thisY * stepY;
unsigned int thisX = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int thisY = blockIdx.y * blockDim.y + threadIdx.y;
if (thisX < width && thisY < height) {
int idx = thisY * width + thisX;
float c_re = lowerX + thisX * stepX;
float c_im = lowerY + thisY * stepY;
float z_re = c_re, z_im = c_im;
int i = 0;
for (i = 0; i < maxIterations; ++i)
{
if (z_re * z_re + z_im * z_im > 4.f)
break;
float new_re = z_re * z_re - z_im * z_im;
float new_im = 2.f * z_re * z_im;
z_re = c_re + new_re;
z_im = c_im + new_im;
}
device_img[idx] = i;
}
}
// Host front-end function that allocates the memory and launches the GPU kernel
void hostFE (float upperX, float upperY, float lowerX, float lowerY, int* img, int resX, int resY, int maxIterations)
{
int* device_img, *host_img;
float stepX = (upperX - lowerX) / resX;
float stepY = (upperY - lowerY) / resY;
cudaMalloc((void **)&device_img, resX * resY * sizeof(int));
host_img = (int *) malloc(resX * resY * sizeof(int));
dim3 blockSize(BLOCK_SIZE, BLOCK_SIZE);
dim3 numBlock(resX / BLOCK_SIZE, resY / BLOCK_SIZE);
mandelKernel<<<numBlock, blockSize>>>(device_img, lowerX, lowerY, stepX, stepY, resX, resY, maxIterations);
cudaDeviceSynchronize();
cudaMemcpy(host_img, device_img, resX * resY * sizeof(int), cudaMemcpyDeviceToHost);
memcpy(img,host_img,resX * resY * sizeof(int));
cudaFree(device_img);
}
|
783
|
#include "includes.h"
__global__ void bitflip_kernel(float* M, int height, int row, int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
int off = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < n; i += off){
M[i * height + row] = 1 - M[i * height + row];
}
}
|
784
|
#include "includes.h"
__global__ void MatrVectMul(int *d_c, int *d_a, int *d_b)
{
int i = blockIdx.x*blockDim.x+threadIdx.x;
if(i<N)
{
d_c[i]=0;
for (int k=0;k<N;k++)
d_c[i]+=d_a[i+k*N]*d_b[k];
}
}
|
785
|
#include <cuda_runtime.h>
#include <float.h>
#include <limits.h>
#include <iostream>
__global__ void bitonic_sort_step(float *dev_values, int j, int k){
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int ixj = i ^ j;
if(ixj > i){
if((i & k) == 0){
if(dev_values[i] > dev_values[ixj]){
float tmp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = tmp;
}
}
else{
if(dev_values[i] < dev_values[ixj]){
float tmp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = tmp;
}
}
}
}
__global__ void bitonic_sort_step_with_follower(float *dev_values, int *dev_followers, int j, int k){
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int ixj = i ^ j;
if(ixj > i){
if((i & k) == 0){
if(dev_values[i] > dev_values[ixj]){
float tmp_value = dev_values[i];
int tmp_follower = dev_followers[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = tmp_value;
dev_followers[i] = dev_followers[ixj];
dev_followers[ixj] = tmp_follower;
}
}
else{
if(dev_values[i] < dev_values[ixj]){
float tmp_value = dev_values[i];
int tmp_follower = dev_followers[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = tmp_value;
dev_followers[i] = dev_followers[ixj];
dev_followers[ixj] = tmp_follower;
}
}
}
}
__global__ void max_padding(float *dev_values, int *dev_followers, int length){
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
if(i >= length){
dev_values[i] = FLT_MAX;
dev_followers[i] = INT_MAX;
}
}
void bitonic_sort(float *values, int NUM_VALS, int BLOCKS, int THREADS){
float *dev_values;
size_t size = NUM_VALS * sizeof(float);
cudaMalloc((void **)&dev_values, size);
cudaMemcpy(dev_values, values, size, cudaMemcpyHostToDevice);
dim3 blocks(BLOCKS, 1);
dim3 threads(THREADS, 1);
for(int k = 2; k <= NUM_VALS; k <<= 1){
for(int j = (k >> 1); j > 0; j >>= 1){
bitonic_sort_step<<<blocks, threads>>>(dev_values, j, k);
}
}
cudaMemcpy(values, dev_values, size, cudaMemcpyDeviceToHost);
cudaFree(dev_values);
}
void bitonic_sort_with_follower(float *values, int *followers, int NUM_ORIGINAL, int NUM_VALS, int BLOCKS, int THREADS){
float *dev_values;
int *dev_followers;
size_t size_values = NUM_VALS * sizeof(float);
size_t size_followers = NUM_VALS * sizeof(int);
cudaError_t error = cudaMalloc((void **)&dev_values, size_values);
//std::cout<<error<<std::endl;
error = cudaMalloc((void **)&dev_followers, size_followers);
//std::cout<<error<<std::endl;
error = cudaMemcpy(dev_values, values, size_values, cudaMemcpyHostToDevice);
//std::cout<<error<<std::endl;
error = cudaMemcpy(dev_followers, followers, size_followers, cudaMemcpyHostToDevice);
//std::cout<<error<<std::endl;
dim3 blocks(BLOCKS, 1);
dim3 threads(THREADS, 1);
max_padding<<<blocks, threads>>>(dev_values, dev_followers, NUM_ORIGINAL);
for(int k = 2; k <= NUM_VALS; k <<= 1){
for(int j = (k >> 1); j > 0; j >>= 1){
bitonic_sort_step_with_follower<<<blocks, threads>>>(dev_values, dev_followers, j, k);
}
}
error = cudaMemcpy(values, dev_values, size_values, cudaMemcpyDeviceToHost);
//std::cout<<error<<std::endl;
error = cudaMemcpy(followers, dev_followers, size_followers, cudaMemcpyDeviceToHost);
//std::cout<<dev_values<<std::endl;
//std::cout<<dev_followers<<std::endl;
error = cudaFree(dev_values);
//std::cout<<error<<std::endl;
cudaFree(dev_followers);
//std::cout<<error<<std::endl;
}
|
786
|
#include "includes.h"
__global__ void VanLeerRadialKernel (double *Rinf, double *Rsup, double *QRStar, double *DensStar, double *Vrad, double *LostByDisk, int nsec, int nrad, double dt, int OpenInner, double *Qbase, double *invSurf)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
double varq, dtheta;
if (i<nrad && j<nsec){
dtheta = 2.0*PI/(double)nsec;
varq = dt*dtheta*Rinf[i]*QRStar[i*nsec + j]* DensStar[i*nsec + j]*Vrad[i*nsec + j];
varq -= dt*dtheta*Rsup[i]*QRStar[(i+1)*nsec + j]* DensStar[(i+1)*nsec + j]*Vrad[(i+1)*nsec + j];
Qbase[i*nsec + j] += varq*invSurf[i];
if (i==0 && OpenInner)
LostByDisk[j] = varq;
}
}
|
787
|
#include <bits/stdc++.h>
#include <chrono>
using namespace std;
__global__ void kernel(float *arr, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x; // Абсолютный номер потока
int offset = blockDim.x * gridDim.x; // Общее кол-во потоков
for(int i = idx; i < n; i += offset) {
if (arr[i] < 0)
arr[i] = abs(arr[i]);
}
}
int main() {
vector<int> v {32, 128, 512, 1024};
for (int idx = 0; idx < v.size(); ++idx) {
for (int j = 0; j < 16; ++j) {
string str = to_string(j);
string name = "tests/" + str + ".t";
freopen(name.c_str(), "r", stdin);
freopen("log_gpu.txt", "a", stdout);
long long n;
scanf("%lld", &n);
cout << "GPU:\n";
cout << "n: " << n << "\n";
cout << "Threads: " << v[idx] << ", " << v[idx] << "\n";
float *arr = (float *)malloc(sizeof(float) * n);
for(long long i = 0; i < n; ++i) {
scanf("%f", &arr[i]);
}
float *dev_arr;
cudaMalloc(&dev_arr, sizeof(float) * n);
cudaMemcpy(dev_arr, arr, sizeof(float) * n, cudaMemcpyHostToDevice);
auto start = chrono::steady_clock::now();
kernel<<<v[idx], v[idx]>>>(dev_arr, n);
auto end = chrono::steady_clock::now();
cudaMemcpy(arr, dev_arr, sizeof(float) * n, cudaMemcpyDeviceToHost);
cudaFree(dev_arr);
free(arr);
cout << ((double)chrono::duration_cast<chrono::microseconds>(end - start).count()) / 1000.0 << "ms\n\n";
}
}
return 0;
}
|
788
|
#include "includes.h"
__global__ void totalWithThreadSync(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
for(unsigned int j = blockDim.x/2; j > 0; j = j/2)
{
if(tid < j)
{
if ((i + j) < len)
input[i] += input[i+j];
else
input [i] += 0.0;
}
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = input[i];
}
}
|
789
|
#include <stdio.h>
#include "cuda.h"
#include "cuda_runtime.h"
// Define matrix width
#define N 100
#define BLOCK_DIM 32
#define SIGMA 20.0
// Define tile size
#define TILE_WIDTH 2
// Non shared version
__global__ void computeMatrix(float *dVectorA, float *dVectorB, float *dVectorC, int length, float sigma)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int tid = length * y + x;
float tmp = 0;
if (x < length && y < length)
{
tmp = dVectorA[tid] - dVectorB[tid];
tmp = (tmp*tmp)/(2*(sigma*sigma));
dVectorC[tid] = exp(-tmp);
}
}
// Shared version doesn't work
__global__ void computeMatrixShared(float *dVectorA, float *dVectorB, float *dVectorC, int length, float sigma)
{
__shared__ float Ads[TILE_WIDTH][TILE_WIDTH];
__shared__ float Bds[TILE_WIDTH][TILE_WIDTH];
float tmp = 0;
unsigned int col = TILE_WIDTH*blockIdx.x + threadIdx.x;
unsigned int row = TILE_WIDTH*blockIdx.y + threadIdx.y;
for(int m = 0; m < length/TILE_WIDTH; m++)
{
Ads[threadIdx.y][threadIdx.x] = dVectorA[row * length +(m * TILE_WIDTH + threadIdx.x)];
Bds[threadIdx.y][threadIdx.x] = dVectorB[(m*TILE_WIDTH + threadIdx.y) * length + col];
// Synchronize all threads
__syncthreads();
for(int k = 0; k < TILE_WIDTH; k++)
{
tmp = Ads[threadIdx.x][k] + Bds[k][threadIdx.y];
tmp = (tmp*tmp)/(2*(sigma*sigma));
dVectorC[row * length + col] = exp(-tmp);
}
// Synchronize all threads
__syncthreads();
}
}
int main()
{
cudaSetDevice(0);
int totalLength = N * N;
float hVectorA[totalLength];
float hVectorB[totalLength];
float hVectorC[totalLength];
float *dVectorA = NULL;
float *dVectorB = NULL;
float *dVectorC = NULL;
// Fill arrays
for (int i = 0; i < totalLength; i++)
{
hVectorA[i] = 2*i;
hVectorB[i] = 1*i;
}
int size = sizeof(float) * totalLength;
// Transfert A and B to device
cudaMalloc((void**) &dVectorA, size);
cudaMalloc((void**) &dVectorB, size);
cudaMalloc((void**) &dVectorC, size);
cudaMemcpy(dVectorA, hVectorA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dVectorB, hVectorB, size, cudaMemcpyHostToDevice);
// -- Non shared version
// -- Grid mapping
dim3 blocks((totalLength + BLOCK_DIM - 1) / BLOCK_DIM);
dim3 threads(BLOCK_DIM);
// -- Kernel invocation code
computeMatrix<<<blocks, threads>>>(dVectorA, dVectorB, dVectorC, N, SIGMA);
// -- Shared version
// -- Grid mapping
//dim3 dimGrid ( N/TILE_WIDTH , N/TILE_WIDTH ,1 ) ;
//dim3 dimBlock( TILE_WIDTH, TILE_WIDTH, 1 ) ;
// -- Kernel invocation code
//computeMatrixShared<<<dimGrid, dimBlock>>>(dVectorA, dVectorB, dVectorC, N, (float)SIGMA);
// Transfert C from device to host
cudaMemcpy(hVectorC, dVectorC, size, cudaMemcpyDeviceToHost);
for (int i = 0; i < totalLength; i++)
printf("%0.1f\t", hVectorC[i]);
printf("\n");
// Free memories
cudaFree(dVectorA);
cudaFree(dVectorB);
cudaFree(dVectorC);
return 0;
}
|
790
|
//template<typename T>
//__device__ void sliceRows(const T* matrix, const int from, const int to, T* result,
// const int numRows, const int numColumns) {
//
// int bx = blockIdx.x;
// int by = blockIdx.y;
// int tx = threadIdx.x;
// int ty = threadIdx.y;
//
// int row = by * blockDim.y + ty;
// int col = bx * blockDim.x + tx;
//
// int resultNumRows = to - from;
// if (row < resultNumRows && col < numColumns) {
// int ij = row * numColumns + col;
// result[ij] = matrix[row * numColumns + from + col];
// }
//}
template<typename T>
__device__ void sliceColumns(const T* matrix, const int from, const int to, T* result,
const int numRows, const int numColumns) {
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * blockDim.y + ty;
int col = bx * blockDim.x + tx;
int resultNumColumns = to - from;
if (row < numRows && col < resultNumColumns) {
int ij = row * resultNumColumns + col;
result[ij] = matrix[row * numColumns + from + col];
}
}
|
791
|
#include "includes.h"
__global__ void prefixSum(float* arr,int step){
int bx = blockIdx.x;
int tx = threadIdx.x;
int BX = blockDim.x;
int i = bx*BX+tx;
if(i < step) return;
int temp = arr[i-step];
__syncthreads();
arr[i] += temp;
}
|
792
|
#include <cstdio>
#include <cstdlib>
__device__ void count(int *pos, int *tmp, int range, int i) { //count position by scan
for (int j=1; j<range; j<<=1) {
tmp[i] = pos[i];
__syncthreads();
if (i<j) return;
pos[i] += tmp[i-j];
__syncthreads();
}
}
__global__ void bucket_sort(int *bucptr, int *kptr, int *pos, int *tmp, int n, int range){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i>=n) return;
if (i<range) bucptr[i] = 0;
__syncthreads();
atomicAdd(&bucptr[kptr[i]], 1);
if (i<range) {
pos[i] = bucptr[i];
count(pos, tmp, range, i);
}
__syncthreads();
for (int j=0; j<range; j++) {
__syncthreads();
if (j==0 && i<pos[j]) {
kptr[i] = j;
return;
}
else if (i<pos[j] && i>=pos[j-1]) {
kptr[i] = j;
return;
}
}
}
int main() {
int n = 50;
int range = 5;
int *key, *bucket;
cudaMallocManaged(&key, n*sizeof(int));
cudaMallocManaged(&bucket, range*sizeof(int));
for (int i=0; i<n; i++) {
key[i] = rand() % range;
printf("%d ",key[i]);
}
printf("\n");
const int m = 64;
int *pos, *tmp;
cudaMallocManaged(&pos, range*sizeof(int));
cudaMallocManaged(&tmp, range*sizeof(int));
bucket_sort<<<1, m>>>(bucket, key, pos, tmp, n, range);
cudaDeviceSynchronize();
for (int i=0; i<n; i++) {
printf("%d ",key[i]);
}
printf("\n");
cudaFree(key);
cudaFree(bucket);
cudaFree(pos);
cudaFree(tmp);
}
|
793
|
#include <iostream>
#include <math.h>
#include <stdio.h>
__global__ void add(int n, float *x, float *y, float *c) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx >= n) {
return;
}
c[idx] = x[idx] + y[idx];
}
void FillWithData(int n, float* x, float* y) {
for (int i = 0; i < n; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
}
void FillWith(int n, float value, float* x) {
for (int i = 0; i < n; i++) {
x[i] = value;
}
}
void CheckCudaError() {
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess) {
printf("Error: %s\n", cudaGetErrorString(err));
exit(-1);
}
}
int main(void) {
int N = 1<<20;
// int N = 200000;
//int N = 1024;
// int N = 1 << 20;
float *x, *y, *c;
float *d_x, *d_y, *d_c;
int size = N * sizeof(float);
x = (float*) malloc(size);
y = (float*) malloc(size);
c = (float*) malloc(size);
FillWithData(N, x, y);
FillWith(N, 0.0f, c);
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_c, size);
cudaMemcpy(d_x, x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, size, cudaMemcpyHostToDevice);
add<<<N/(1024-1), 1024>>>(N, d_x, d_y, d_c);
CheckCudaError();
cudaDeviceSynchronize();
// cudaDeviceSync();
// cudaMemcpy(x, d_x, size, cudaMemcpyDeviceToHost);
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
int i = 0;
int sample_rate = N / 100;
for (i = 0; i < N; i=i+sample_rate) {
printf("Value %d - %f + %f = %f\n" , i, x[i], y[i], c[i]);
}
// Free memory
free(x); free(y);
cudaFree(d_x); cudaFree(d_y);
return 0;
}
|
794
|
#include <stdio.h>
int main() {
const int kb = 1024;
const int mb = kb * kb;
const int gb = mb * kb;
int nDevices;
cudaGetDeviceCount(&nDevices);
for (int i = 0; i < nDevices; i++) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, i);
printf("\nDevice %d - GPU Card name : %s\n", i, prop.name);
printf("Compute Capabilities : %d.%d\n", prop.major, prop.minor);
printf("Maximum Block Dimensions : %d x %d x %d\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]);
printf("Maximum Grid dimensions : %d x %d x %d\n",
prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2]);
printf("Total global memory : %0.2lf GB\n", prop.totalGlobalMem *1.0 / gb);
printf("Total Constant memory : %0.2lf KB\n", prop.totalConstMem *1.0 / kb);
printf("Shared memory per block : %0.2lf KB\n", prop.sharedMemPerBlock *1.0 / kb);
printf("Warp size : %d\n", prop.warpSize);
}
}
|
795
|
#include <iostream>
//#include <Cuda.h>
#include<curand.h>
#include<curand_kernel.h>
int n = 200;
using namespace std;
__device__ float generate( curandState* globalState, int ind )
{
//int ind = threadIdx.x;
curandState localState = globalState[ind];
float RANDOM = curand_uniform( &localState );
globalState[ind] = localState;
return RANDOM;
}
__global__ void setup_kernel ( curandState * state, unsigned long seed )
{
int id = threadIdx.x;
curand_init ( seed, id, 0, &state[id] );
}
__global__ void kernel(float* N, curandState* globalState, int n)
{
// generate random numbers
for(int i=0;i<40000;i++)
{
int k = generate(globalState, i) * 100000;
while(k > n*n-1)
{
k-=(n*n-1);
}
N[i] = k;
}
}
int main()
{
int N=40000;
curandState* devStates;
cudaMalloc ( &devStates, N*sizeof( curandState ) );
// setup seeds
setup_kernel <<< 1, N >>> ( devStates,unsigned(time(NULL)) );
float N2[40000];
float* N3;
cudaMalloc((void**) &N3, sizeof(float)*N);
kernel<<<1,1>>> (N3, devStates, n);
cudaMemcpy(N2, N3, sizeof(float)*N, cudaMemcpyDeviceToHost);
for(int i=0;i<N;i++)
{
cout<<N2[i]<<endl;
}
return 0;
}
|
796
|
#include <stdio.h>
#include <iostream>
#include <string>
#include <fstream>
#include <sstream>
#include <vector>
#include <algorithm>
#include <numeric>
#include <thrust/complex.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
using namespace std;
const int MAX_THREADS = 1024;
inline cudaError_t checkCuda(cudaError_t result)
{
if (result != cudaSuccess) {
cout << "CUDA Runtime Error: " << cudaGetErrorName(result) << " - " << cudaGetErrorString(result) << endl;
}
return result;
}
__global__ void radiusKernel(double *inputs, int pointCount, double neighborRadius, int *radiusNeighborCount) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= pointCount) return;
int i;
double x = inputs[3 * index];
double y = inputs[3 * index + 1];
double z = inputs[3 * index + 2];
double neighborX;
double neighborY;
double neighborZ;
double neighborDistance;
for (i = 0; i < pointCount; i++) {
if (index == i) continue;
neighborX = inputs[3 * i];
neighborY = inputs[3 * i + 1];
neighborZ = inputs[3 * i + 2];
neighborDistance = sqrtf(
(x - neighborX) * (x - neighborX) +
(y - neighborY) * (y - neighborY) +
(z - neighborZ) * (z - neighborZ)
);
if (neighborDistance <= neighborRadius) {
radiusNeighborCount[index]++;
}
}
}
__global__ void kernel(
double *inputs, double *features, double *bestNeighbors, double *bestNeighborsIndeces, double *OO,
int pointCount, int vicinityAlgo, int neighborCount, double neighborRadius,
double *radiusBestNeighborsIndeces, double *radiusOO, int *radiusNeighborCount
) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= pointCount) return;
int i, j, k;
double x = inputs[3 * index];
double y = inputs[3 * index + 1];
double z = inputs[3 * index + 2];
double neighborX;
double neighborY;
double neighborZ;
double neighborDistance;
double mi[3];
mi[0] = 0.0; mi[1] = 0.0; mi[2] = 0.0;
float S[3][3];
if (vicinityAlgo == 1) {
for (i = 0; i < neighborCount; i++) {
bestNeighbors[index * neighborCount + i] = INFINITY;
bestNeighborsIndeces[index * neighborCount + i] = INFINITY;
}
for (i = 0; i < pointCount; i++) {
if (index == i) continue;
neighborX = inputs[3 * i];
neighborY = inputs[3 * i + 1];
neighborZ = inputs[3 * i + 2];
neighborDistance = sqrtf(
(x - neighborX) * (x - neighborX) +
(y - neighborY) * (y - neighborY) +
(z - neighborZ) * (z - neighborZ)
);
//if (index == 0) printf("%g ", neighborDistance);
for (j = 0; j < neighborCount; j++) {
if (neighborDistance < bestNeighbors[index * neighborCount + j]) {
for (k = neighborCount - 1; k > j; k--) {
bestNeighbors[index * neighborCount + k] = bestNeighbors[index * neighborCount + k - 1];
bestNeighborsIndeces[index * neighborCount + k] = bestNeighborsIndeces[index * neighborCount + k - 1];
}
bestNeighbors[index * neighborCount + j] = neighborDistance;
bestNeighborsIndeces[index * neighborCount + j] = i;
break;
}
}
}
for (i = 0; i < neighborCount; i++) {
mi[0] += inputs[3 * (int)bestNeighborsIndeces[index * neighborCount + i] + 0] / neighborCount;
mi[1] += inputs[3 * (int)bestNeighborsIndeces[index * neighborCount + i] + 1] / neighborCount;
mi[2] += inputs[3 * (int)bestNeighborsIndeces[index * neighborCount + i] + 2] / neighborCount;
//if (index == 0) printf("%f\t%f\t%f\n", mi[0], mi[1], mi[2]);
}
for (i = 0; i < neighborCount; i++) {
OO[index * neighborCount * 3 + (i * 3 + 0)] = inputs[3 * (int)bestNeighborsIndeces[index * neighborCount + i] + 0] - mi[0];
OO[index * neighborCount * 3 + (i * 3 + 1)] = inputs[3 * (int)bestNeighborsIndeces[index * neighborCount + i] + 1] - mi[1];
OO[index * neighborCount * 3 + (i * 3 + 2)] = inputs[3 * (int)bestNeighborsIndeces[index * neighborCount + i] + 2] - mi[2];
//if (index == 2000) printf("%d: %f %f %f\n", i, OO[i * 3 + 0], OO[i * 3 + 1], OO[i * 3 + 2]);
}
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
S[i][j] = 0.0;
for (k = 0; k < neighborCount; k++) {
S[i][j] += OO[index * neighborCount * 3 + (k * 3 + j)] * OO[index * neighborCount * 3 + (k * 3 + i)];
}
S[i][j] /= neighborCount;
//if (index == 2000) printf("%.17g ", S[i][j]);
}
//if (index == 2000) printf("\n");
}
}
else if (vicinityAlgo == 2) {
int previousIndecesSum = 0;
if (radiusNeighborCount[index] == 0) {
return;
}
for (j = 0; j < index; j++) {
previousIndecesSum += radiusNeighborCount[j];
//if (index == 2) printf("%d: %d\n", index, previousIndecesSum);
}
int neighborsFoundCount = 0;
for (i = 0; i < pointCount; i++) {
if (index == i) continue;
neighborX = inputs[3 * i];
neighborY = inputs[3 * i + 1];
neighborZ = inputs[3 * i + 2];
neighborDistance = sqrtf(
(x - neighborX) * (x - neighborX) +
(y - neighborY) * (y - neighborY) +
(z - neighborZ) * (z - neighborZ)
);
if (neighborDistance <= neighborRadius) {
radiusBestNeighborsIndeces[previousIndecesSum + neighborsFoundCount] = i;
neighborsFoundCount++;
}
}
for (i = 0; i < radiusNeighborCount[index]; i++) {
mi[0] += inputs[3 * (int)radiusBestNeighborsIndeces[previousIndecesSum + i] + 0] / radiusNeighborCount[index];
mi[1] += inputs[3 * (int)radiusBestNeighborsIndeces[previousIndecesSum + i] + 1] / radiusNeighborCount[index];
mi[2] += inputs[3 * (int)radiusBestNeighborsIndeces[previousIndecesSum + i] + 2] / radiusNeighborCount[index];
//if (index == 2000) printf("%f\t%f\t%f\n", mi[0], mi[1], mi[2]);
}
for (i = 0; i < radiusNeighborCount[index]; i++) {
radiusOO[3 * previousIndecesSum + i + 0 * radiusNeighborCount[index]] =
inputs[3 * (int)radiusBestNeighborsIndeces[previousIndecesSum + i] + 0] - mi[0];
radiusOO[3 * previousIndecesSum + i + 1 * radiusNeighborCount[index]] =
inputs[3 * (int)radiusBestNeighborsIndeces[previousIndecesSum + i] + 1] - mi[1];
radiusOO[3 * previousIndecesSum + i + 2 * radiusNeighborCount[index]] =
inputs[3 * (int)radiusBestNeighborsIndeces[previousIndecesSum + i] + 2] - mi[2];
//if (index == 2000) printf("%d: %g %f %f\n", i,
// radiusOO[3 * previousIndecesSum + i + 0 * radiusNeighborCount[index]],
// radiusOO[3 * previousIndecesSum + i + 1 * radiusNeighborCount[index]],
// radiusOO[3 * previousIndecesSum + i + 2 * radiusNeighborCount[index]]);
}
for (i = 0; i < 3; i++) {
for (j = 0; j < 3; j++) {
S[i][j] = 0.0;
for (k = 0; k < radiusNeighborCount[index]; k++) {
S[i][j] +=
radiusOO[3 * previousIndecesSum + i * radiusNeighborCount[index] + k] *
radiusOO[3 * previousIndecesSum + j * radiusNeighborCount[index] + k];
}
S[i][j] /= radiusNeighborCount[index];
}
}
}
thrust::complex<double> im = thrust::complex<double>(0.0, 1.0f);
double a = S[0][0];
double b = S[1][1];
double c = S[2][2];
double d = S[0][1];
double e = S[0][2];
double f = S[1][2];
double lambda1, lambda2, lambda3;
thrust::complex<double> lambdaPart1 = thrust::pow(2 * a*a*a - 3 * a*a*b - 3 * a*a*c +
thrust::sqrt(thrust::complex<double>(4) * thrust::pow(thrust::complex<double>(-a*a + a*b + a*c - b*b + b*c - c*c - 3 * d*d - 3 * e*e - 3 * f*f, 0.0), thrust::complex<double>(3)) +
thrust::pow(thrust::complex<double>(2 * a*a*a - 3 * a*a*b - 3 * a*a*c - 3 * a*b*b + 12 * a*b*c - 3 * a*c*c + 9 * a*d*d + 9 * a*e*e -
18 * a*f*f + 2 * b*b*b - 3 * b*b*c - 3 * b*c*c + 9 * b*d*d - 18 * b*e*e +
9 * b*f*f + 2 * c*c*c - 18 * c*d*d + 9 * c*e*e + 9 * c*f*f + 54 * d*e*f), thrust::complex<double>(2))) -
3 * a*b*b + 12 * a*b*c - 3 * a*c*c + 9 * a*d*d + 9 * a*e*e - 18 * a*f*f + 2 * b*b*b -
3 * b*b*c - 3 * b*c*c + 9 * b*d*d - 18 * b*e*e + 9 * b*f*f + 2 * c*c*c -
18 * c*d*d + 9 * c*e*e + 9 * c*f*f + 54 * d*e*f, thrust::complex<double>(1 / 3.0));
thrust::complex<double> lambdaPart2 = -a*a + a*b + a*c - b*b + b*c - c*c - 3 * d*d - 3 * e*e - 3 * f*f;
lambda1 = (1/(3*cbrt(2.0)) * lambdaPart1 -
cbrt(2.0) * lambdaPart2 /
(3.0 * lambdaPart1) +
(a+b+c)/3.0)
.real()
;
lambda2 = ((-(1.0 + im * sqrt(3.0)) / (6.0 * cbrt(2.0))) * lambdaPart1 +
(1.0 - im * sqrt(3.0)) * lambdaPart2 /
(3.0 * thrust::pow(thrust::complex<double>(2), 2 / 3.0) * lambdaPart1) +
(a + b + c) / 3.0)
.real()
;
lambda3 = ((-(1.0 - im * sqrt(3.0))/(6.0 * cbrt(2.0))) * lambdaPart1 +
(1.0 + im * sqrt(3.0)) * lambdaPart2 /
(3.0 * thrust::pow(thrust::complex<double>(2), 2/3.0) * lambdaPart1) +
(a + b + c) / 3.0)
.real()
;
if (index == 32) printf("%d: %.17g\n", index, lambda1);
if (index == 32) printf("%.17g\n", lambda2);
if (index == 32) printf("%.17g\n", lambda3);
if (index == 0) printf("\n");
features[6 * index + 0] = (lambda1 - lambda2) / lambda1;
features[6 * index + 1] = (lambda2 - lambda3) / lambda1;
features[6 * index + 2] = lambda3 / lambda1;
features[6 * index + 3] = cbrt(lambda1 * lambda2 * lambda3);
features[6 * index + 4] = (lambda1 - lambda3) / lambda1;
features[6 * index + 5] = -((lambda1 * log(lambda1)) + (lambda2 * log(lambda2)) + (lambda3 * log(lambda3)));
// TODO: check if the lambda order is correct, i.e. l1 >= l2 >= l3
// TODO: check feature values (sometimes eigenentropy is -nan(ind))
}
int main(int argc, char* argv[])
{
int i, j;
string inputName;
string outputName;
ifstream inputFile;
ofstream outputFile;
int vicinityAlgo = 0; // 1: kNN, 2: FDN
int neighborCount = 0;
double neighborRadius = 0.0;
for (i = 1; i < argc; ++i) {
if (i + 1 < argc) {
if (string(argv[i]) == "-i") {
inputName = argv[++i];
}
if (string(argv[i]) == "-o") {
outputName = argv[++i];
}
if (string(argv[i]) == "-n") {
neighborCount = stoi(argv[++i]);
vicinityAlgo += 1;
}
if (string(argv[i]) == "-r") {
neighborRadius = stod(argv[++i]);
vicinityAlgo += 2;
}
}
}
if (vicinityAlgo == 0) {
cout << "ERROR: No vicinity algorithm parameters specified!" << endl;
}
else if (vicinityAlgo == 3) {
cout << "ERROR: Too many vicinity algorithm parameters specified!" << endl;
}
else if (neighborCount <= 0 && neighborRadius <= 0.0) {
cout << "ERROR: Incorrect vicinity algorithm parameters!" << endl;
}
else {
cout << "Neighbor count : " << neighborCount << endl;
cout << "Neighbor radius: " << neighborRadius << endl;
inputFile.open(inputName);
outputFile.open(outputName);
unsigned long long int inputFileLineNumber = (int)count(
istreambuf_iterator<char>(inputFile),
istreambuf_iterator<char>(),
'\n'
);
inputFile.seekg(0);
cout << "Points count: " << inputFileLineNumber << endl;
string inputLine;
double *inputs = (double*)malloc(3 * inputFileLineNumber * sizeof(double));
int lineCounter = 0;
while (getline(inputFile, inputLine)) {
stringstream stream(inputLine);
string s;
for (i = 0; i < 3; i++) {
getline(stream, s, ' ');
inputs[lineCounter * 3 + i] = stof(s);
//cout << inputs[lineCounter * 3 + i] << " ";
}
//cout << endl;
lineCounter++;
}
checkCuda(cudaDeviceReset());
checkCuda(cudaSetDevice(0));
double *cudaInputs;
checkCuda(cudaMalloc((double**)&cudaInputs, 3 * inputFileLineNumber * sizeof(double)));
checkCuda(cudaMemcpy(cudaInputs, inputs, 3 * inputFileLineNumber * sizeof(double), cudaMemcpyHostToDevice));
int *cudaRadiusNeighborCount;
checkCuda(cudaMalloc((int**)&cudaRadiusNeighborCount, inputFileLineNumber * sizeof(int)));
int *radiusNeighborCount = (int*)calloc(inputFileLineNumber, sizeof(int));
int radiusNeighborCountTotal = 0;
if (vicinityAlgo == 2) {
dim3 threadsPerBlock(inputFileLineNumber);
dim3 blocksPerGrid(1);
if (inputFileLineNumber > MAX_THREADS) {
int divisor = (int)ceil((float)inputFileLineNumber / MAX_THREADS);
threadsPerBlock.x = (int)ceil(1.0 * inputFileLineNumber / divisor);
blocksPerGrid.x = divisor;
}
radiusKernel <<< blocksPerGrid, threadsPerBlock >>> (cudaInputs, inputFileLineNumber, neighborRadius, cudaRadiusNeighborCount);
checkCuda(cudaMemcpy(radiusNeighborCount, cudaRadiusNeighborCount, inputFileLineNumber * sizeof(int), cudaMemcpyDeviceToHost));
//for (i = 0; i < inputFileLineNumber; i++) {
//cout << i << ": " << radiusNeighborCount[i] << endl;
//}
radiusNeighborCountTotal = accumulate(radiusNeighborCount, radiusNeighborCount + inputFileLineNumber, 0);
cout << "Total neighbors: " << radiusNeighborCountTotal << endl;
}
long double potentialMemory = (
3 * inputFileLineNumber / 1024.0 / 1024.0 * sizeof(double)
+ 6 * inputFileLineNumber / 1024.0 / 1024.0 * sizeof(double)
+ (vicinityAlgo == 1 ? 1 : 0) * neighborCount / 1024.0 * inputFileLineNumber / 1024.0 * sizeof(double)
+ (vicinityAlgo == 1 ? 1 : 0) * neighborCount / 1024.0 * inputFileLineNumber / 1024.0 * sizeof(double)
+ (vicinityAlgo == 1 ? 1 : 0) * 3 * neighborCount / 1024.0 * inputFileLineNumber / 1024.0 * sizeof(double)
+ (vicinityAlgo == 2 ? 1 : 0) * radiusNeighborCountTotal / 1024.0 / 1024.0 * sizeof(double)
+ (vicinityAlgo == 2 ? 1 : 0) * 3 * radiusNeighborCountTotal / 1024.0 / 1024.0 * sizeof(double)
);
cout << "potentialMemory: " << potentialMemory << " MB" << endl;
double *features = (double*)malloc(6 * inputFileLineNumber * sizeof(double));
double *cudaFeatures;
double *cudaBestNeighbors;
double *cudaBestNeighborsIndeces;
double *cudaOO;
double *cudaRadiusBestNeighborsIndeces;
double *cudaRadiusOO;
checkCuda(cudaMalloc((double**)&cudaFeatures, 6 * inputFileLineNumber * sizeof(double)));
checkCuda(cudaMalloc((double**)&cudaBestNeighbors, neighborCount * inputFileLineNumber * sizeof(double)));
checkCuda(cudaMalloc((double**)&cudaBestNeighborsIndeces, neighborCount * inputFileLineNumber * sizeof(double)));
checkCuda(cudaMalloc((double**)&cudaOO, 3 * neighborCount * inputFileLineNumber * sizeof(double)));
checkCuda(cudaMalloc((double**)&cudaRadiusBestNeighborsIndeces, radiusNeighborCountTotal * sizeof(double)));
checkCuda(cudaMalloc((double**)&cudaRadiusOO, 3 * radiusNeighborCountTotal * sizeof(double)));
dim3 threadsPerBlock(inputFileLineNumber);
dim3 blocksPerGrid(1);
if (inputFileLineNumber > MAX_THREADS) {
int divisor = (int)ceil((float)inputFileLineNumber / MAX_THREADS);
threadsPerBlock.x = (int)ceil(1.0 * inputFileLineNumber / divisor);
blocksPerGrid.x = divisor;
}
//cout << "threads x: " << threadsPerBlock.x << endl;
//cout << "blocks x: " << blocksPerGrid.x << endl;
kernel <<<blocksPerGrid, threadsPerBlock >>> (
cudaInputs, cudaFeatures, cudaBestNeighbors, cudaBestNeighborsIndeces, cudaOO,
inputFileLineNumber, vicinityAlgo, neighborCount, neighborRadius,
cudaRadiusBestNeighborsIndeces, cudaRadiusOO, cudaRadiusNeighborCount
);
checkCuda(cudaPeekAtLastError());
checkCuda(cudaMemcpy(features, cudaFeatures, 6 * inputFileLineNumber * sizeof(double), cudaMemcpyDeviceToHost));
for (i = 0; i < inputFileLineNumber; i++) {
for (j = 0; j < 3; j++) {
outputFile << inputs[i * 3 + j] << "\t";
}
for (j = 0; j < 6; j++) {
outputFile << features[i * 6 + j] << (j == 5 ? "" : "\t");
//cout << features[i * 6 + j] << "\t";
}
outputFile << endl;
//cout << endl;
}
cudaFree(cudaFeatures);
cudaFree(cudaInputs);
free(features);
free(inputs);
}
cout << endl << "DONE";
cin.ignore();
return 0;
}
|
797
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#define E 2.71828182845904523536
__global__ void euler_step(float * array, int m, int step) {
float dt = powf(10,-3);
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < m) {
array[tId] = array[tId] + dt*(4*(dt*step)-array[tId]+3+tId);
};
};
int main() {
cudaEvent_t start, stop;
int e_s = 100000000;
int n_i = 1000;
int block[4] = {64,128,256,512};
for(int m =0; m < 4;m++){
float error = 0;
int block_size = block[m];
float elapsed=0;
int grid_size = (int) ceil((float)e_s / block_size);
float * resultados = (float *) malloc(e_s * sizeof(float));
float * d_r;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int k = 0; k < e_s; k++){
resultados[k] = k;
}
cudaMalloc(&d_r, e_s * sizeof(float));
cudaMemcpy(d_r, resultados, e_s * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
for(int n = 0; n < n_i; n++){
euler_step<<<grid_size, block_size>>>(d_r,e_s,n);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(resultados, d_r, e_s * sizeof(float), cudaMemcpyDeviceToHost);
for(int g = 0; g < e_s; g++){
error = error + powf(resultados[g]-((1/E)+4-1+g),2);
}
printf("Executed with %d blocks\n", block[m]);
printf("The elapsed time in gpu was %.2f ms \n", elapsed);
printf("Mean squared error: %f \n", error/e_s);
free(resultados);
cudaFree(d_r);
}
return 0;
}
|
798
|
#include "includes.h"
__global__ void createAnaglyph_kernel(uchar4 *out_image, const float *left_image, const float *right_image, int width, int height, int pre_shift) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int x_right = x - pre_shift;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp;
if (x < width && y < height) {
temp.x = left_image[__mul24(y, width) + x];
if (x_right > 0 && x_right < width) {
temp.y = right_image[__mul24(y, width) + x_right];
temp.z = temp.y;
} else {
temp.y = 0;
temp.z = 0;
}
temp.w = 255;
out_image[__mul24(y, width) + x] = temp;
}
}
|
799
|
#include <cstdio>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <chrono>
#define CUDA_RANGE 1000
#define BLOCK_SIZE 512
#define WARP_SIZE 32
struct point {
float x;
float y;
};
const int THREADS = 1 << 20;
const int THREADS_PER_BLOCK = 512;
__device__ float func(float x) {
return cosf(3.0f * powf(x, 4.0f)) * sinf(5.0f * powf(x, 2.0f)) * powf(sinf(5.0f * x), 2.0f);
}
__device__ point max(point a, point b) {
if (a.y > b.y) {
return a;
}
return b;
}
__global__ void kernel(point *outData) {
__shared__ point data[BLOCK_SIZE];
int thread_id = threadIdx.x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
float part = (float) CUDA_RANGE / (float) (blockDim.x * gridDim.x);
float end = part * (float) (idx + 1);
float x = part * (float) idx;
point result{
x,
func(x)
};
while (x <= end) {
float y = func(x);
if (y > result.y) {
result.y = y;
result.x = x;
}
x += 1E-4f;
}
data[thread_id] = result;
__syncthreads();
for (int s = blockDim.x / 2; s > WARP_SIZE; s >>= 1) {
if (thread_id < s)
data[thread_id] = max(data[thread_id], data[thread_id + s]);
__syncthreads();
}
if (thread_id < WARP_SIZE) {
data[thread_id] = max(data[thread_id], data[thread_id + 32]);
data[thread_id] = max(data[thread_id], data[thread_id + 16]);
data[thread_id] = max(data[thread_id], data[thread_id + 8]);
data[thread_id] = max(data[thread_id], data[thread_id + 4]);
data[thread_id] = max(data[thread_id], data[thread_id + 2]);
data[thread_id] = max(data[thread_id], data[thread_id + 1]);
}
if (thread_id == 0)
outData[blockIdx.x] = data[0];
}
int main() {
auto start = std::chrono::steady_clock::now();
thrust::host_vector<point> H(THREADS / THREADS_PER_BLOCK);
thrust::device_vector<point> D(THREADS / THREADS_PER_BLOCK);
point *ptr = thrust::raw_pointer_cast(&D[0]);
kernel<<<THREADS / THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(ptr);
thrust::copy(D.begin(), D.end(), H.begin());
point res{
-1,
-std::numeric_limits<float>::infinity()
};
for (int i = 0; i < H.size(); i++) {
if (H[i].y > res.y) {
res = H[i];
}
}
auto end = std::chrono::steady_clock::now();
std::cout << "X: " << res.x << " Y: " << res.y << std::endl;
std::cout << "Duration: " << std::chrono::duration_cast<std::chrono::milliseconds>(end - start).count() << "ms"
<< std::endl;
return EXIT_SUCCESS;
}
|
800
|
/// LSU EE 7700-2 (Spring 2013), GPU Microarchitecture
//
/// Homework 3
//
// Assignment in: http://www.ece.lsu.edu/koppel/gp/2013/hw03.pdf
//
/// Your Name:
#include <pthread.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <ctype.h>
#include <time.h>
#include <new>
#include <cuda_runtime.h>
#define N 4
/// CUDA API Error-Checking Wrapper
///
#define CE(call) \
{ \
const cudaError_t rv = call; \
if ( rv != cudaSuccess ) \
{ \
printf("CUDA error %d, %s\n",rv,cudaGetErrorString(rv)); \
exit(1); \
} \
}
double
time_fp()
{
struct timespec tp;
clock_gettime(CLOCK_REALTIME,&tp);
return ((double)tp.tv_sec)+((double)tp.tv_nsec) * 0.000000001;
}
// Make it easy to switch between float and double for vertex and matrix
// elements.
//
typedef float Elt_Type;
struct __align__(16) Vertex
{
Elt_Type a[N];
};
struct App
{
int num_threads;
Elt_Type matrix[N][N];
int array_size; // Number of vertices.
bool find_minimum_magnitude; // For problem 2.
Vertex *v_in, *v_out;
Vertex *d_v_in, *d_v_out;
};
// In host address space.
App app;
// In device constant address space.
__constant__ App d_app;
// The entry point for the GPU code.
//
__global__ void
cuda_thread_start()
{
// Compute an id number that will be in the range from 0 to num_threads-1.
//
const int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Number of elements that each thread should work on. We are ignoring
// rounding errors.
//
const int elt_per_thread = d_app.array_size / d_app.num_threads;
/// WARNING:
//
// The order in which the threads examine elements here is poorly
// chosen and will unnecessarily make inefficient use of the memory
// system.
// Compute element number to start at.
//
const int start = elt_per_thread * tid;
// Compute element number to stop at.
//
const int stop = start + elt_per_thread;
// WARNING: This code accesses elements in an inefficient order.
for ( int h=start; h<stop; h++ )
{
Vertex p = d_app.d_v_in[h];
Vertex q;
for ( int i=0; i<N; i++ )
{
q.a[i] = 0;
for ( int j=0; j<N; j++ ) q.a[i] += d_app.matrix[i][j] * p.a[j];
}
d_app.d_v_out[h] = q;
}
}
void
print_gpu_info()
{
// Get information about GPU and its ability to run CUDA.
//
int device_count;
cudaGetDeviceCount(&device_count); // Get number of GPUs.
if ( device_count == 0 )
{
fprintf(stderr,"No GPU found, exiting.\n");
exit(1);
}
int dev = 0;
CE(cudaGetDevice(&dev));
printf("Using GPU %d\n",dev);
cudaDeviceProp cuda_prop; // Properties of cuda device (GPU, cuda version).
/// Print information about the available GPUs.
//
{
CE(cudaGetDeviceProperties(&cuda_prop,dev));
printf
("GPU %d: %s @ %.2f GHz WITH %d MiB GLOBAL MEM\n",
dev, cuda_prop.name, cuda_prop.clockRate/1e6,
int(cuda_prop.totalGlobalMem >> 20));
printf
("GPU %d: CC: %d.%d MP: %2d TH/WP: %3d TH/BL: %4d\n",
dev, cuda_prop.major, cuda_prop.minor,
cuda_prop.multiProcessorCount,
cuda_prop.warpSize,
cuda_prop.maxThreadsPerBlock
);
printf
("GPU %d: SHARED: %5d CONST: %5d # REGS: %5d\n",
dev,
int(cuda_prop.sharedMemPerBlock), int(cuda_prop.totalConstMem),
cuda_prop.regsPerBlock
);
}
cudaFuncAttributes cfa_prob1; // Properties of code to run on device.
CE( cudaFuncGetAttributes(&cfa_prob1,cuda_thread_start) );
// Print information about time_step routine.
//
printf("\nCUDA Routine Resource Usage:\n");
printf(" Our CUDA Thread: %6zd shared, %zd const, %zd loc, %d regs; "
"%d max threads per block.\n",
cfa_prob1.sharedSizeBytes,
cfa_prob1.constSizeBytes,
cfa_prob1.localSizeBytes,
cfa_prob1.numRegs,
cfa_prob1.maxThreadsPerBlock);
}
void*
pt_thread_start(void *arg)
{
const int tid = (ptrdiff_t) arg;
printf("Hello from %d\n",tid);
const int elt_per_thread = app.array_size / app.num_threads;
const int start = elt_per_thread * tid;
const int stop = start + elt_per_thread;
for ( int h=start; h<stop; h++ )
{
Vertex p = app.v_in[h];
Vertex q;
for ( int i=0; i<N; i++ )
{
q.a[i] = 0;
for ( int j=0; j<N; j++ ) q.a[i] += app.matrix[i][j] * p.a[j];
}
app.v_out[h] = q;
}
return NULL;
}
int
main(int argc, char **argv)
{
// Examine argument 1, block size, if negative, find minimum magnitude.
//
const int arg1_int = argc < 2 ? 1 : atoi(argv[1]);
const bool find_mag = arg1_int < 0;
const int num_blocks = abs(arg1_int);
const bool use_pthreads = false;
// For Problem 2.
app.find_minimum_magnitude = find_mag;
// Examine argument 2, number of threads per block.
//
const int thd_per_block = argc < 3 ? 1 : atoi(argv[2]);
app.num_threads = use_pthreads ? -arg1_int : num_blocks * thd_per_block;
// Examine argument 3, size of array in MiB. Fractional values okay.
//
app.array_size = argc < 4 ? 1 << 20 : int( atof(argv[3]) * (1<<20) );
if ( app.num_threads <= 0 || app.array_size <= 0 )
{
printf("Usage: %s [ NUM_PTHREADS | - NUM_CUDA_BLOCKS ] [THD_PER_BLOCK] [DATA_SIZE_MiB]\n",
argv[0]);
exit(1);
}
if ( !use_pthreads )
print_gpu_info();
const int array_size_bytes = app.array_size * sizeof(app.v_in[0]);
// Allocate storage for CPU copy of data.
//
app.v_in = new Vertex[app.array_size];
app.v_out = new Vertex[app.array_size];
// Allocate storage for GPU copy of data.
//
CE( cudaMalloc( &app.d_v_in, app.array_size * sizeof(Vertex) ) );
CE( cudaMalloc( &app.d_v_out, app.array_size * sizeof(Vertex) ) );
printf
("\nPreparing for %d %s threads operating on %d vectors of %d elements.\n",
app.num_threads,
use_pthreads ? "CPU" : "GPU",
app.array_size, N);
// Initialize input array.
//
for ( int i=0; i<app.array_size; i++ )
for ( int j=0; j<N; j++ ) app.v_in[i].a[j] = drand48();
// Initialize transformation matrix.
//
for ( int i=0; i<N; i++ )
for ( int j=0; j<N; j++ )
app.matrix[i][j] = drand48();
double elapsed_time_s;
int minimum_mag_index = 0; // For Problem 2.
Elt_Type minimum_mag_val = 0; // For Problem 2.
if ( use_pthreads )
{
const double time_start = time_fp();
// Allocate a structure to hold pthread thread ids.
//
pthread_t* const ptid = new pthread_t[app.num_threads];
// Set up a pthread attribute, used for specifying options.
//
pthread_attr_t attr;
pthread_attr_init(&attr);
pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
// Launch the threads.
//
for ( int i=0; i<app.num_threads; i++ )
pthread_create(&ptid[i], &attr, pt_thread_start, (void*)i);
// Wait for each thread to finish.
//
for ( int i=0; i<app.num_threads; i++ )
pthread_join( ptid[i], NULL );
elapsed_time_s = time_fp() - time_start;
}
else
{
// Prepare events used for timing.
//
cudaEvent_t gpu_start_ce, gpu_stop_ce;
CE(cudaEventCreate(&gpu_start_ce));
CE(cudaEventCreate(&gpu_stop_ce));
// Copy input array from CPU to GPU.
//
CE( cudaMemcpy
( app.d_v_in, app.v_in, array_size_bytes, cudaMemcpyHostToDevice ) );
// Copy App structure to GPU.
//
CE( cudaMemcpyToSymbol
( d_app, &app, sizeof(app), 0, cudaMemcpyHostToDevice ) );
// Measure execution time starting "now", which is after data
// set to GPU.
//
CE(cudaEventRecord(gpu_start_ce,0));
printf("Launching with %d blocks of %d threads.\n",
num_blocks, thd_per_block);
// Tell CUDA to start our threads on the GPU.
//
cuda_thread_start<<<num_blocks,thd_per_block>>>();
// Stop measuring execution time now, which is before is data
// returned from GPU.
//
CE(cudaEventRecord(gpu_stop_ce,0));
CE(cudaEventSynchronize(gpu_stop_ce));
float cuda_time_ms = -1.1;
CE(cudaEventElapsedTime(&cuda_time_ms,gpu_start_ce,gpu_stop_ce));
elapsed_time_s = cuda_time_ms * 0.001;
// Copy output array from GPU to CPU.
//
CE( cudaMemcpy
( app.v_out, app.d_v_out, array_size_bytes, cudaMemcpyDeviceToHost) );
// PROBLEM 2
//
// Insert code for reading magnitude information and having
// CPU finish up finding the minimum.
minimum_mag_index = -1; // PROBLEM 2 - change this line.
minimum_mag_val = 0.0; // PROBLEM 2 - change this line too.
}
const double data_size = app.array_size * sizeof(Vertex) * 2;
const double fp_op_count = app.array_size * ( 2 * N * N - N );
printf("Elapsed time for %d threads and %d elements is %.3f µs\n",
app.num_threads, app.array_size, 1e6 * elapsed_time_s);
printf("Rate %.3f GFLOPS, %.3f GB/s\n",
1e-9 * fp_op_count / elapsed_time_s,
1e-9 * data_size / elapsed_time_s);
if ( app.find_minimum_magnitude )
{
// Compute correct answer.
Elt_Type min_val = 0;
int min_idx = -1;
for ( int h=0; h<app.array_size; h++ )
{
Vertex p = app.v_in[h];
Vertex q;
for ( int i=0; i<N; i++ )
{
q.a[i] = 0;
for ( int j=0; j<N; j++ ) q.a[i] += app.matrix[i][j] * p.a[j];
}
Elt_Type sos = 0; for(int i=0; i<N; i++ ) sos+= q.a[i]*q.a[i];
Elt_Type mag = sqrt(sos);
if ( min_idx < 0 || mag < min_val ) { min_val = mag; min_idx = h; }
}
Elt_Type diff = fabs(min_val-minimum_mag_val);
printf
("\nMinimum mag is %s, %d %s %d (correct) %.4f %s %.4f (correct)\n",
diff < 1e-5 ? "correct" : "**wrong**",
minimum_mag_index,
min_idx == minimum_mag_index ? "==" : "!=",
min_idx,
minimum_mag_val,
min_val == minimum_mag_val ? "==" : diff < 1e-5 ? "~" : "!=",
min_val
);
}
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.