serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
1,701
|
char *title = "Little's algorithm";
char *description = "Алгоритм Литтла - метод решения задачи коммивояжера";
/*
Алгоритм Литтла применяют для поиска решения задачи коммивояжера в виде гамильтонова контура.
Данный алгоритм используется для поиска оптимального гамильтонова контура в графе, имеющем N вершин,
причем каждая вершина i связана с любой другой вершиной j двунаправленной дугой.
Каждой дуге приписан вес Сi,j, причем веса дуг строго положительны (Сi,j≥0).
Веса дуг образуют матрицу стоимости. Все элементы по диагонали матрицы приравнивают
к бесконечности (Сj,j=∞).
В случае, если пара вершин i и j не связана между собой (граф не полносвязный), то соответствующему элементу
матрицы стоимости приписываем вес, равный длине минимального пути между вершинами i и j.
Если в итоге дуга (i, j) войдет в результирующий контур, то ее необходимо заменить соответствующим ей путем.
Матрицу оптимальных путей между всеми вершинами графа можно получить применив алгоритм Данцига или Флойда.
Алгоритм Литтала является частным случаем применения метода "ветвей и границ" для конкретной задачи.
Общая идея тривиальна: нужно разделить огромное число перебираемых вариантов на классы и получить оценки
(снизу – в задаче минимизации, сверху – в задаче максимизации) для этих классов, чтобы иметь возможность
отбрасывать варианты не по одному, а целыми классами.
Трудность состоит в том, чтобы найти такое разделение на классы (ветви) и такие оценки (границы),
чтобы процедура была эффективной.
Алгоритм Литтла
В каждой строке матрицы стоимости найдем минимальный элемент и вычтем его из всех элементов строки.
Сделаем это и для столбцов.
Получим матрицу стоимости, каждая строка и каждый столбец которой содержат хотя бы один нулевой элемент.
Для каждого нулевого элемента матрицы cij рассчитаем коэффициент Гi,j, который равен сумме наименьшего элемента i строки
(исключая элемент Сi,j=0) и наименьшего элемента j столбца.
Проверяем, что не существует однозначных путей - то есть с одним входом и выходом
Если такой путь есть, то выбираем его
иначе Из всех коэффициентов Гi,j выберем такой, который является максимальным Гk,m=max{Гi,j}.
В гамильтонов контур вносится соответствующая дуга (k,m).
Удаляем k-тую строку и столбец m.
Поменяем на бесконечность значение элемент Сr,l для всех путей (l,...,k,m,...r) из добавленных дуг,
содежащих дугу (k,m) (иначе может образоваться простой цикл).
Повторяем алгоритм шага 1, пока порядок матрицы не станет равным одному.
Получаем гамильтонов контур.
В ходе решения ведется постоянный подсчет текущего значения нижней границы.
Нижняя граница равна сумме всех вычтенных элементов в строках и столбцах.
Итоговое значение нижней границы должно совпасть с длиной результирующего контура.
*/
#include <iostream>
#include <cstdio>
#include <cstdlib>
#include <stdio.h>
#include <string.h>
#include <time.h>
#include <ctype.h>
#include <limits.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#define assert( bool )
int strempty(const char *p)
{
if (!p)
return (1);
for (; *p; p++)
if (!isspace(*p))
return (0);
return (1);
}
char *mystrtok(char **m, char *s, char c)
{
char *p = s ? s : *m;
if (!*p)
return 0;
*m = strchr(p, c);
if (*m)
*(*m)++ = 0;
else
*m = p + strlen(p);
return p;
}
#ifndef max
#define max( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef min
#define min( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
__global__ void global_queue_oneway_a(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 2*n; id += blockDim.x*gridDim.x) {
islice[id] = 0;
if (id < n){
for (int i = 0; islice[id] < 2 && i < n; i++) {
if (matrix[i*n + id] != LONG_MAX) {
islice[id]++;
}
}
}
else {
for (int j = 0; im[0] < 2 && j < n; j++) {
if (matrix[(id - n)*n + j] != LONG_MAX) {
islice[id]++;
}
}
}
}
}
__global__ void global_queue_oneway_b(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
for (int k = 0; k < 2 * n; k++){
if (islice[k] == 1){
if (k < n){
int i; for (i = 0; i < n; i++){
if (matrix[i*n + k] != LONG_MAX)
break;
}
queue[--qsize[n]] = i*n + k;
}
else {
int j; for (j = 0; j < n; j++){
if (matrix[(k - n)*n + j] != LONG_MAX)
break;
}
queue[--qsize[n]] = (k - n)*n + j;
}
}
}
}
}
/*
Добавление запрещённых переходов.
Шаг первый.
Последнюю добавленную дугу помещаем в середину массива.
Массив нарашиваем слева и справа.
Шаг второй.
Запрещаем все дуги ведущие из правой половины массива в левую половину массива.
*/
__global__ void global_add_forbidden_a(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
im[0] = im[1] = rank;
islice[--im[0]] = islice[im[1]++] = n;
while(1==1){
int id1; for(id1 = rank; id1-->n ; ) if (to[id1]==from[islice[im[0]]]) break;
if (id1>n) islice[--im[0]] = id1; else break;
}
while(1==1){
int id2; for(id2 = rank; id2-->n ; ) if (from[id2]==to[islice[im[1]-1]]) break;
if (id2>n) islice[im[1]++] = id2; else break;
}
}
}
__global__ void global_add_forbidden_b(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < (rank-im[0])*(im[1]-rank); id += blockDim.x*gridDim.x) {
int id1 = islice[rank - (id%(rank-im[0])) - 1];
int id2 = islice[rank + (id/(rank-im[0]))];
int i; for (i = n; i-- > 0;) if (rows[i] == to[id2]) break; /* Номер строки */
int j; for (j = n; j-- > 0;) if (cols[j] == from[id1]) break; /* Номер столбца */
if (i != -1 && j != -1) matrix[i*n + j] = LONG_MAX;
}
}
/*
Удаление строки im[0] и столбца im[1], соответствующих последней добавленной дуге (im[0],im[1])
*/
__global__ void global_matrix_trunc(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
/* Удаляем строку и столбец параллельно в процессах */
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < (n - 1)*(n - 1); id += blockDim.x*gridDim.x) {
int i = id / (n - 1); /* Номер строки */
int j = id % (n - 1); /* Номер столбца */
if (i < im[0] && j < im[1]) matrix_1[id] = matrix[(i + 0)*n + j + 0];
else if (i >= im[0] && j < im[1]) matrix_1[id] = matrix[(i + 1)*n + j + 0];
else if (i < im[0] && j >= im[1]) matrix_1[id] = matrix[(i + 0)*n + j + 1];
else if (i >= im[0] && j >= im[1]) matrix_1[id] = matrix[(i + 1)*n + j + 1];
}
}
__global__ void global_queue_indexes_of_max(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
/* Находим все индексы максимального коэффициента параллельно в процессах */
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
for (int i = 0; i < (im[0] + 1); i++) {
if (lm[1] == gamma[i]) queue[--qsize[n]] = i;
}
}
}
/*
Нахождение максимального индекса максимального элемента массива gamma
Возвращаемые значения:
im[0] - индекс максимального элемента
lm[1] - значение максимального элемента
*/
__global__ void global_gamma_max_index_of_max_a(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n; id += blockDim.x*gridDim.x) {
islice[id] = id*n;
lslice[n + id] = gamma[islice[id]];
for (int i = 1; i < n; i++) {
if (lslice[n + id] <= gamma[id*n + i]) {
islice[id] = id*n + i;
lslice[n + id] = gamma[islice[id]];
}
}
}
}
__global__ void global_gamma_max_index_of_max_b(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
im[0] = islice[0];
lm[1] = lslice[n];
for (int i = 1; i < n; i++) {
if ((lm[1] < lslice[n + i]) || ((lm[1] == lslice[n + i]) && (im[0] < islice[i]))) {
im[0] = islice[i];
lm[1] = lslice[n + i];
}
}
}
}
/*
Для каждого нулевого элемента матрицы cij рассчитаем коэффициент Гi,j,
который равен сумме наименьшего элемента i строки (исключая элемент Сi,j=0)
и наименьшего элемента j столбца.
Возвращаемые значения:
gamma - массив рассчитанных коэффициентов
Массив gamma представляет собой расчёт минимальной цены въезда и выезда из города
*/
__global__ void global_calc_gamma(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
/* Расчитываем коэффициенты параллельно в процессах */
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n*n; id += blockDim.x*gridDim.x) {
if (matrix[id] == 0) {
int i = id / n; /* Номер строки */
int j = id % n; /* Номер столбца */
long x = matrix[i*n + ((j + 1) % n)]; /* Берём следующий элемент в качестве начального */
long y = matrix[((i + 1) % n)*n + j]; /* Берём следующий элемент в качестве начального */
for (int k = 2; k < n; k++){
x = min(x, matrix[i*n + ((j + k) % n)]);
y = min(y, matrix[((i + k) % n)*n + j]);
}
if ((x == LONG_MAX) && (y == LONG_MAX)) gamma[id] = LONG_MAX; /* Из города не въехать и не выехать */
else if (x == LONG_MAX) gamma[id] = y; /* Из города не въехать */
else if (y == LONG_MAX) gamma[id] = x; /* Из города не выехать */
else gamma[id] = x + y;
}
else gamma[id] = LONG_MIN;
}
}
__global__ void global_sub_by_row(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
/* Находим минимальные значения в строках матрицы параллельно в процессах */
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n*n; id += blockDim.x*gridDim.x) {
int i = id / n; /* Номер строки */
if (matrix[id] != LONG_MAX)
matrix[id] -= lslice[i];
}
}
__global__ void global_sub_by_col(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
/* Находим минимальные значения в строках матрицы параллельно в процессах */
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n*n; id += blockDim.x*gridDim.x) {
int j = id % n; /* Номер столбца */
if (matrix[id] != LONG_MAX)
matrix[id] -= lslice[j];
}
}
__global__ void global_min_by_col(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
/* Находим минимальные значения в колонках матрицы параллельно в процессах */
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n; id += blockDim.x*gridDim.x) {
lslice[id] = matrix[id];
for (int i = 1; i < n; i++) {
lslice[id] = min(lslice[id], matrix[i*n + id]);
}
}
}
__global__ void global_min_by_row(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
/* Находим минимальные значения в строках матрицы параллельно в процессах */
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n; id += blockDim.x*gridDim.x) {
lslice[id] = matrix[id*n];
for (int j = 1; j < n; j++) {
lslice[id] = min(lslice[id], matrix[id*n + j]);
}
}
}
__global__ void global_next_by_row(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n; id += blockDim.x*gridDim.x) {
for (int i = 0; i < n; i++) {
if (matrix[i*n + id] != LONG_MAX) {
lslice[id] = max(lslice[id], lslice[i+n]);
islice[id] = max(islice[id], islice[i+n]);
}
}
}
}
__global__ void global_prev_by_col(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n; id += blockDim.x*gridDim.x) {
for (int j = 0; j < n; j++) {
if (matrix[id*n + j] != LONG_MAX){
lslice[id] = max(lslice[id], lslice[j+n]);
islice[id] = max(islice[id], islice[j+n]);
}
}
}
}
__global__ void global_min_by_dim(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
lm[0] = lslice[0];
im[0] = islice[0];
for (int i = 1; i < n; i++){
lm[0] = min(lm[0], lslice[i]);
im[0] = min(im[0], islice[i]);
}
}
}
__global__ void global_sum_lbound(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
lm[0] = 0;
for (int i = 1; i < n; i++) {
lbound[i] = matrix[(n - 1)*i];
}
for (int i = 1; i <= rank; i++){
lm[0] += lbound[i];
}
}
}
__global__ void global_add_lbound(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
lbound[n] += matrix[queue[qsize[n]]];
}
}
__global__ void global_sum_lbound_begin(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
lbound[n] = 0;
}
}
__global__ void global_sum_lbound_step(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
for (int i = 0; i < n; i++) lbound[n] += lslice[i];
}
}
__global__ void global_slice_clear(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < n; id += blockDim.x*gridDim.x) {
islice[id] = 0;
lslice[id] = 0;
}
}
__global__ void global_sum_lbound_end(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
}
__global__ void global_check_infinity(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
im[0] = 0; for (int i = 0; im[0] == 0 && i < n; i++) if (lslice[i] == LONG_MAX) im[0] = 1;
}
}
__global__ void global_initialize(int *queue, int *qsize, long *lbound, long *gamma, int *islice, long *lslice, long *matrix_1, long *matrix, int *rows, int *cols, int *from, int *to, int *im, long *lm, int n, int rank){
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < 1; id += blockDim.x*gridDim.x) {
lbound[0] = 0;
for (int i = 0; i < n; i++) rows[i] = i;
for (int i = 0; i < n; i++) cols[i] = i;
qsize[n + 1] = n*n*n;
qsize[n] = qsize[n + 1];
}
}
/*
В случае неправильных параметров возвращённая лучшая цена имеет LONG_MAX значение
*/
__host__ void host_little(int gridSize, int blockSize, long *data, int *bestFrom, int *bestTo, long *bestPrice, int rank)
{
cudaError_t err;
int n; /* Ранг текущего массива */
long **matrix; /* Стек массивов элементов */
int **rows; /* Стек массивов элементов */
int **cols; /* Стек массивов элементов */
long *gamma; /* Массив коэффициентов */
int *queue; /* Стек очередей индексов элементов */
int *qsize; /* Размер очередей индексов элементов */
long *lbound; /* Стек вычисленных нижних границ */
/* Стеки дуг (индексов) хранятся в порядке их удаления из матрицы */
/* Индексы записаны в соответствии с текущим размером матрицы */
/* и требуют пересчёта в исходный размер матрицы */
int *from; /* Стек дуг (индексов) в порядке их удаления из матрицы */
int *to; /* Стек дуг (индексов) в порядке их удаления из матрицы */
int *im;
long *lm;
int *islice;
long *lslice;
int ivalue[2];
long lvalue[2];
int *ibuffer;
long *lbuffer;
n = rank;
ibuffer = (int*)malloc(n*n*sizeof(int));
lbuffer = (long*)malloc(n*n*sizeof(long));
matrix = (long**)malloc((n + 1)*sizeof(long*));
rows = (int**)malloc((n + 1)*sizeof(int*));
cols = (int**)malloc((n + 1)*sizeof(int*));
for (int i = 1; i <= n; i++) err = cudaMalloc((void**)&matrix[i], i*i*sizeof(long));
for (int i = 1; i <= n; i++) err = cudaMalloc((void**)&rows[i], i*sizeof(int));
for (int i = 1; i <= n; i++) err = cudaMalloc((void**)&cols[i], i*sizeof(int));
err = cudaMalloc((void**)&im, 2 * sizeof(int));
err = cudaMalloc((void**)&lm, 2 * sizeof(long));
err = cudaMalloc((void**)&islice, 2*n*sizeof(int));
err = cudaMalloc((void**)&lslice, 2*n*sizeof(long));
err = cudaMalloc((void**)&lbound ,(n + 1)*sizeof(long));
err = cudaMalloc((void**)&from, n*sizeof(int));
err = cudaMalloc((void**)&to, n*sizeof(int));
err = cudaMalloc((void**)&queue, n*n*n * sizeof(int));
err = cudaMalloc((void**)&qsize ,(n + 2)*sizeof(int));
err = cudaMalloc((void**)&gamma,n*n*sizeof(long));
cudaMemcpy(matrix[n], data, n*n*sizeof(long), cudaMemcpyHostToDevice);
global_initialize <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
*bestPrice = LONG_MAX;
int blocks = (gridSize > 0)? gridSize : min(max(1, (int)pow((double)rank, 0.333333333333333)), 15);
int threads = (blockSize > 0)? blockSize : min(max(1, (int)pow((double)rank, 0.333333333333333)), 15);
ivalue[1] = 1;
printf(" Check Graph by rows \n");
/* Проверяем граф на связанность по строкам */
global_slice_clear <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(islice, &ivalue[1], sizeof(int), cudaMemcpyHostToDevice);
for (int i = 1; i <= n; i++)
{
cudaMemcpy(&islice[n], islice, n*sizeof(int), cudaMemcpyDeviceToDevice);
global_slice_clear <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
global_next_by_row <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
}
cudaMemcpy(ivalue, islice, sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[0] == 0) {
fprintf(stderr, "Wrong Graph\n"); fflush(stderr);
goto the_end;
}
printf(" Check Graph by columns \n");
/* Проверяем граф на связанность по столбцам */
global_slice_clear <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(islice, &ivalue[1], sizeof(int), cudaMemcpyHostToDevice);
for (int i = 1; i <= n; i++) {
cudaMemcpy(&islice[n], islice, n*sizeof(int), cudaMemcpyDeviceToDevice);
global_slice_clear <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
global_prev_by_col <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
}
cudaMemcpy(ivalue, islice, sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[0] == 0) {
fprintf(stderr, "Wrong Graph\n"); fflush(stderr);
goto the_end;
}
printf(" Check Graph by rows \n");
/* Проверяем граф на связанность по строкам */
global_slice_clear <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(islice, &ivalue[1], sizeof(int), cudaMemcpyHostToDevice);
for (int i = 1; i <= n; i++)
{
cudaMemcpy(&islice[n], islice, n*sizeof(int), cudaMemcpyDeviceToDevice);
global_next_by_row <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
}
global_min_by_dim <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(ivalue, im, sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[0] == 0) {
fprintf(stderr, "Wrong Graph\n"); fflush(stderr);
goto the_end;
}
printf(" Check Graph by columns \n");
/* Проверяем граф на связанность по столбцам */
global_slice_clear <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(islice, &ivalue[1], sizeof(int), cudaMemcpyHostToDevice);
for (int i = 1; i <= n; i++) {
cudaMemcpy(&islice[n], islice, n*sizeof(int), cudaMemcpyDeviceToDevice);
global_prev_by_col <<< blocks, threads >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
}
global_min_by_dim <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(ivalue, im, sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[0] == 0) {
fprintf(stderr, "Wrong Graph\n"); fflush(stderr);
goto the_end;
}
printf("Graph is ok\n");
while (n > 0 && n <= rank) {
printf("Matrix rank :\t%d\n", n);
cudaMemcpy(lbuffer, matrix[n], n*n*sizeof(long), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%ld%s", lbuffer[i*n + j], ((j == n - 1) ? "\n" : "\t"));
}
}
int blocks0 = (gridSize > 0)? gridSize : min(max(1, (int)pow((double)(rank-n), 0.6666666666666)), 15);
int threads0 = (blockSize > 0)? blockSize : min(max(1, (int)pow((double)(rank-n), 0.6666666666666)), 15);
int blocks1 = (gridSize > 0)? gridSize : min(max(1, (int)pow((double)n, 0.333333333333333)), 15);
int threads1 = (blockSize > 0)? blockSize : min(max(1, (int)pow((double)n, 0.333333333333333)), 15);
int blocks2 = (gridSize > 0)? gridSize : min(max(1, (int)pow((double)n, 0.66666666666666)), 15);
int threads2 = (blockSize > 0)? blockSize : min(max(1, (int)pow((double)n, 0.66666666666666)), 15);
global_sum_lbound_begin <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
if (n > 1) {
printf(" global_add_forbidden \n");
/* Запрещаем обратные переходы */
global_add_forbidden_a <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
global_add_forbidden_b <<< blocks0, threads0 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(lbuffer, matrix[n], n*n*sizeof(long), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%ld%s", lbuffer[i*n + j], ((j == n - 1) ? "\n" : "\t"));
}
}
cudaMemcpy(&qsize[n], &qsize[n + 1], sizeof(int), cudaMemcpyDeviceToDevice);
printf(" global_min_by_row \n");
/* Находим минимальные значения в строках матрицы параллельно в процессах */
global_min_by_row <<< blocks1, threads1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
global_check_infinity <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(ivalue, im, sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[0] == 0) {
printf(" global_sub_by_row \n");
/* Вычитаем минимальные значения из строк параллельно в процессах */
global_sub_by_row <<< blocks2, threads2 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(lbuffer, matrix[n], n*n*sizeof(long), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%ld%s", lbuffer[i*n + j], ((j == n - 1) ? "\n" : "\t"));
}
}
printf(" global_sum_lbound_step \n");
global_sum_lbound_step <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
}
printf(" global_min_by_col \n");
/* Находим минимальные значения в столбцах матрицы параллельно в процессах */
global_min_by_col <<< blocks1, threads1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
global_check_infinity <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(ivalue, im, sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[0] == 0) {
printf(" global_sub_by_col \n");
/* Вычитаем минимальные значения из столбцов параллельно в процессах */
global_sub_by_col <<< blocks2, threads2 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(lbuffer, matrix[n], n*n*sizeof(long), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%ld%s", lbuffer[i*n + j], ((j == n - 1) ? "\n" : "\t"));
}
}
printf(" global_sum_lbound_step \n");
global_sum_lbound_step <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
}
printf(" global_sum_lbound_end \n");
global_sum_lbound_end <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(lbuffer, &lbound[n], sizeof(long), cudaMemcpyDeviceToHost);
printf("%ld\n", lbuffer[0]);
printf(" global_queue_oneway \n");
/* Находим все индексы максимального коэффициента параллельно в процессах */
global_queue_oneway_a <<< blocks1, threads1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
global_queue_oneway_b <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(ivalue, &qsize[n], 2 * sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[1] > ivalue[0]) cudaMemcpy(ibuffer, &queue[ivalue[0]], (ivalue[1] - ivalue[0])*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < (ivalue[1] - ivalue[0]); i++) printf("%d%s", ibuffer[i], (i == (ivalue[1] - ivalue[0]) - 1) ? "\n" : "\t");
cudaMemcpy(ivalue, &qsize[n], 2 * sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[0] == ivalue[1]) {
printf(" global_calc_gamma \n");
/* Расчитываем коэффициенты параллельно в процессах */
global_calc_gamma <<< blocks2, threads2 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(lbuffer, gamma, n*n*sizeof(long), cudaMemcpyDeviceToHost);
for (int i = 0; i < n; i++){
for (int j = 0; j < n; j++){
printf("%ld%s", lbuffer[i*n + j], ((j == n - 1) ? "\n" : "\t"));
}
}
/* Находим максимальный индекс максимального коэффициента параллельно в процессах */
global_gamma_max_index_of_max_a <<< blocks1, threads1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
global_gamma_max_index_of_max_b <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(lvalue, lm, 2 * sizeof(long), cudaMemcpyDeviceToHost);
if (lvalue[1] != LONG_MIN)
{
printf(" global_queue_indexes_of_max \n");
/* Находим все индексы максимального коэффициента параллельно в процессах */
global_queue_indexes_of_max <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
cudaMemcpy(ivalue, &qsize[n], 2 * sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[1] > ivalue[0]) cudaMemcpy(ibuffer, &queue[ivalue[0]], (ivalue[1] - ivalue[0])*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < (ivalue[1] - ivalue[0]); i++) printf("%d%s", ibuffer[i], (i == (ivalue[1] - ivalue[0]) - 1) ? "\n" : "\t");
}
}
else {
ivalue[0] = ivalue[1] - 1;
cudaMemcpy(&qsize[n], ivalue, sizeof(int), cudaMemcpyHostToDevice);
printf(" global_add_lbound \n");
global_add_lbound <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
}
/* Теперь все индексы должны быть рекурсивно обработаны */
/* Чтобы не делать рекурсивные обходы работаем только с объявленным стеком */
}
else {
cudaMemcpy(lvalue, matrix[n], sizeof(long), cudaMemcpyDeviceToHost);
if (lvalue[0] != LONG_MAX){
cudaMemcpy(from, rows[n], n*sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(to, cols[n], n*sizeof(int), cudaMemcpyDeviceToDevice);
printf(" global_sum_lbound \n");
/* Суммируем Текущую Нижнюю Границу параллельно в процессах */
global_sum_lbound <<< 1, 1 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n - 1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
/* Сравниваем текущую стоимость с ранее найденой лучшей стоимостью */
cudaMemcpy(lvalue, lm, sizeof(long), cudaMemcpyDeviceToHost);
if (lvalue[0] < bestPrice[0]){
bestPrice[0] = lvalue[0];
cudaMemcpy(bestFrom, from, rank * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(bestTo, to, rank * sizeof(int), cudaMemcpyDeviceToHost);
}
printf("Current Price\t: %ld\n", bestPrice[0]);
}
n++;
}
/* Возврат из "рекурсивного" вызова */
/* Чтобы не делать рекурсивные обходы работаем только с объявленным стеком */
while ((n <= rank)) {
cudaMemcpy(ivalue, &qsize[n], 2 * sizeof(int), cudaMemcpyDeviceToHost);
if (ivalue[0] == ivalue[1]) {
printf(" Return from Recursion \n");
n++;
continue;
}
break;
}
if (n > rank) break;
/* Перебираем значения из очереди */
cudaMemcpy(ivalue, &qsize[n], sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&ivalue[1], &queue[ivalue[0]], sizeof(int), cudaMemcpyDeviceToHost);
ivalue[0]++;
cudaMemcpy(&qsize[n], ivalue, sizeof(int), cudaMemcpyHostToDevice);
int id = ivalue[1];
ivalue[0] = id / n; /* Номер строки */
ivalue[1] = id % n; /* Номер столбца */
cudaMemcpy(im, ivalue, 2 * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(&from[n - 1], &rows[n][ivalue[0]], sizeof(int), cudaMemcpyDeviceToDevice);
cudaMemcpy(&to[n - 1], &cols[n][ivalue[1]], sizeof(int), cudaMemcpyDeviceToDevice);
printf(" global_matrix_trunc \n");
/* Удаляем строку и столбец */
if (ivalue[0] > 0) cudaMemcpy(rows[n - 1], rows[n], ivalue[0] * sizeof(int), cudaMemcpyDeviceToDevice);
if (ivalue[0] < (n - 1)) cudaMemcpy(&rows[n - 1][ivalue[0]], &rows[n][ivalue[0] + 1], (n - ivalue[0] - 1) * sizeof(int), cudaMemcpyDeviceToDevice);
if (ivalue[1] > 0) cudaMemcpy(cols[n - 1], cols[n], ivalue[1] * sizeof(int), cudaMemcpyDeviceToDevice);
if (ivalue[1] < (n - 1)) cudaMemcpy(&cols[n - 1][ivalue[1]], &cols[n][ivalue[1] + 1], (n - ivalue[1] - 1) * sizeof(int), cudaMemcpyDeviceToDevice);
global_matrix_trunc <<< blocks2, threads2 >>>(queue, qsize, lbound, gamma, islice, lslice, matrix[n-1], matrix[n], rows[n], cols[n], from, to, im, lm, n, rank);
n--;
}
n--;
the_end:
/* Освобождаем ранее выделенные ресурсы */
free(ibuffer);
free(lbuffer);
for (int i = 1; i <= n; i++) err = cudaFree(matrix[i]);
for (int i = 1; i <= n; i++) err = cudaFree(rows[i]);
for (int i = 1; i <= n; i++) err = cudaFree(cols[i]);
free(matrix);
free(rows);
free(cols);
err = cudaFree(gamma);
err = cudaFree(lbound);
err = cudaFree(queue);
err = cudaFree(qsize);
err = cudaFree(from);
err = cudaFree(to);
err = cudaFree(islice);
err = cudaFree(lslice);
err = cudaFree(im);
err = cudaFree(lm);
err = err;
}
int main(int argc, char* argv[])
{
int gridSize = 0;
int blockSize = 0;
printf("Title :\t%s\n", title); fflush(stdout);
if (argc < 3) {
printf("Usage :\t%s [-g <gridSize>] [-b <blockSize>] <inputfilename> <outputfilename>\n", argv[0]); fflush(stdout);
printf("\tinputfilename - source matrix of path prices or empty\n"); fflush(stdout);
printf("\toutputfilename - output best path point-to-point segments\n"); fflush(stdout);
exit(-1);
}
int argId = 1;
for(; argId < argc && argv[argId][0]=='-' ; argId++){
switch(argv[argId][1]){
case 'g':
gridSize = atoi(argv[++argId]);
break;
case 'b':
blockSize = atoi(argv[++argId]);
break;
}
}
char *inputFileName = argv[argId++];
char *outputFileName = argv[argId++];
char buffer[4096];
char *tok;
char *p;
int n; /* Ранг текущего массива */
long *matrix; /* Стек массивов элементов */
int i, j;
long bestPrice;
int *bestFrom; /* Стек дуг (индексов) в порядке их удаления из матрицы */
int *bestTo; /* Стек дуг (индексов) в порядке их удаления из матрицы */
printf("Input File Name :\t%s\n", inputFileName); fflush(stdout);
printf("Output File Name :\t%s\n", outputFileName); fflush(stdout);
FILE *fs = fopen(inputFileName, "r");
if (fs == NULL) {
fprintf(stderr, "File open error (%s)\n", inputFileName); fflush(stderr);
exit(-1);
}
n = 0;
/* Заполняем массив числами из файла */
/* Операция выполняетя только на хост процессе */
/* Операция выполняетя в два прохода по файлу */
/* На первом проходе определяется ранг матрицы */
/* На втором проходе считываются данные */
for (i = 0; (tok = fgets(buffer, sizeof(buffer), fs)) != NULL; i++)
{
j = 0;
for (tok = mystrtok(&p, tok, ';'); tok != NULL; tok = mystrtok(&p, NULL, ';'))
{
j++;
}
n = max(n, j);
}
n = max(n, i);
matrix = (long *)malloc(n*n*sizeof(long));
bestFrom = (int *)malloc((n + 1)*sizeof(int));
bestTo = (int *)malloc((n + 1)*sizeof(int));
fseek(fs, 0, SEEK_SET);
for (i = 0; (tok = fgets(buffer, sizeof(buffer), fs)) != NULL; i++)
{
j = 0;
for (tok = mystrtok(&p, tok, ';'); tok != NULL; tok = mystrtok(&p, NULL, ';'))
{
/* Пустые элементы - это запрещённые пути */
matrix[n*i + j++] = strempty(tok) ? LONG_MAX : atol(tok);
}
for (; j < n; j++) matrix[n*i + j] = LONG_MAX;
}
for (j = 0; j < (n - i)*n; j++) matrix[n*i + j] = LONG_MAX;
for (i = 0; i < n; i++) matrix[n*i + i] = LONG_MAX; /* Запрещаем петли */
fclose(fs);
printf("Matrix rank :\t%d\n", n);
for (i = 0; i < n; i++){
for (j = 0; j < n; j++){
printf("%ld%s", matrix[i*n + j], ((j == n - 1) ? "\n" : "\t"));
}
}
fflush(stdout);
// Find/set the device.
int device_size = 0;
cudaGetDeviceCount(&device_size);
for (i = 0; i < device_size; ++i)
{
cudaDeviceProp cudaDeviceProp;
cudaGetDeviceProperties(&cudaDeviceProp, i);
printf("Running on GPU %d (%s)\n", i, cudaDeviceProp.name);
printf("Device has ECC support enabled %d\n",cudaDeviceProp.ECCEnabled);
printf("Number of asynchronous engines %d\n",cudaDeviceProp.asyncEngineCount);
printf("Device can map host memory with cudaHostAlloc/cudaHostGetDevicePointer %d\n",cudaDeviceProp.canMapHostMemory);
printf("Clock frequency in kilohertz %d\n",cudaDeviceProp.clockRate);
printf("Compute mode (See cudaComputeMode) %d\n",cudaDeviceProp.computeMode);
printf("Device can possibly execute multiple kernels concurrently %d\n",cudaDeviceProp.concurrentKernels);
printf("Device can concurrently copy memory and execute a kernel. Deprecated. Use instead asyncEngineCount. %d\n",cudaDeviceProp.deviceOverlap);
printf("Device is integrated as opposed to discrete %d\n",cudaDeviceProp.integrated);
printf("Specified whether there is a run time limit on kernels %d\n",cudaDeviceProp.kernelExecTimeoutEnabled);
printf("Size of L2 cache in bytes %d\n",cudaDeviceProp.l2CacheSize);
printf("Major compute capability %d\n",cudaDeviceProp.major);
printf("Maximum size of each dimension of a grid %d\n",cudaDeviceProp.maxGridSize[0]);
printf("Maximum size of each dimension of a grid %d\n",cudaDeviceProp.maxGridSize[1]);
printf("Maximum size of each dimension of a grid %d\n",cudaDeviceProp.maxGridSize[2]);
printf("Maximum 1D surface size %d\n",cudaDeviceProp.maxSurface1D);
printf("Maximum 1D layered surface dimensions %d\n",cudaDeviceProp.maxSurface1DLayered[0]);
printf("Maximum 1D layered surface dimensions %d\n",cudaDeviceProp.maxSurface1DLayered[1]);
printf("Maximum 2D surface dimensions %d\n",cudaDeviceProp.maxSurface2D[0]);
printf("Maximum 2D surface dimensions %d\n",cudaDeviceProp.maxSurface2D[1]);
printf("Maximum 2D layered surface dimensions %d\n",cudaDeviceProp.maxSurface2DLayered[0]);
printf("Maximum 2D layered surface dimensions %d\n",cudaDeviceProp.maxSurface2DLayered[1]);
printf("Maximum 2D layered surface dimensions %d\n",cudaDeviceProp.maxSurface2DLayered[2]);
printf("Maximum 3D surface dimensions %d\n",cudaDeviceProp.maxSurface3D[0]);
printf("Maximum 3D surface dimensions %d\n",cudaDeviceProp.maxSurface3D[1]);
printf("Maximum 3D surface dimensions %d\n",cudaDeviceProp.maxSurface3D[2]);
printf("Maximum Cubemap surface dimensions %d\n",cudaDeviceProp.maxSurfaceCubemap);
printf("Maximum Cubemap layered surface dimensions %d\n",cudaDeviceProp.maxSurfaceCubemapLayered[0]);
printf("Maximum Cubemap layered surface dimensions %d\n",cudaDeviceProp.maxSurfaceCubemapLayered[1]);
printf("Maximum 1D texture size %d\n",cudaDeviceProp.maxTexture1D);
printf("Maximum 1D layered texture dimensions %d\n",cudaDeviceProp.maxTexture1DLayered[0]);
printf("Maximum 1D layered texture dimensions %d\n",cudaDeviceProp.maxTexture1DLayered[1]);
printf("Maximum size for 1D textures bound to linear memory %d\n",cudaDeviceProp.maxTexture1DLinear);
printf("Maximum 1D mipmapped texture size %d\n",cudaDeviceProp.maxTexture1DMipmap);
printf("Maximum 2D texture dimensions %d\n",cudaDeviceProp.maxTexture2D[0]);
printf("Maximum 2D texture dimensions %d\n",cudaDeviceProp.maxTexture2D[1]);
printf("Maximum 2D texture dimensions if texture gather operations have to be performed %d\n",cudaDeviceProp.maxTexture2DGather[0]);
printf("Maximum 2D texture dimensions if texture gather operations have to be performed %d\n",cudaDeviceProp.maxTexture2DGather[1]);
printf("Maximum 2D layered texture dimensions %d\n",cudaDeviceProp.maxTexture2DLayered[0]);
printf("Maximum 2D layered texture dimensions %d\n",cudaDeviceProp.maxTexture2DLayered[1]);
printf("Maximum 2D layered texture dimensions %d\n",cudaDeviceProp.maxTexture2DLayered[2]);
printf("Maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory %d\n",cudaDeviceProp.maxTexture2DLinear[0]);
printf("Maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory %d\n",cudaDeviceProp.maxTexture2DLinear[1]);
printf("Maximum dimensions (width, height, pitch) for 2D textures bound to pitched memory %d\n",cudaDeviceProp.maxTexture2DLinear[2]);
printf("Maximum 2D mipmapped texture dimensions %d\n",cudaDeviceProp.maxTexture2DMipmap[0]);
printf("Maximum 2D mipmapped texture dimensions %d\n",cudaDeviceProp.maxTexture2DMipmap[1]);
printf("Maximum 3D texture dimensions %d\n",cudaDeviceProp.maxTexture3D[0]);
printf("Maximum 3D texture dimensions %d\n",cudaDeviceProp.maxTexture3D[1]);
printf("Maximum 3D texture dimensions %d\n",cudaDeviceProp.maxTexture3D[2]);
printf("Maximum alternate 3D texture dimensions %d\n",cudaDeviceProp.maxTexture3DAlt[0]);
printf("Maximum alternate 3D texture dimensions %d\n",cudaDeviceProp.maxTexture3DAlt[1]);
printf("Maximum alternate 3D texture dimensions %d\n",cudaDeviceProp.maxTexture3DAlt[2]);
printf("Maximum Cubemap texture dimensions %d\n",cudaDeviceProp.maxTextureCubemap);
printf("Maximum Cubemap layered texture dimensions %d\n",cudaDeviceProp.maxTextureCubemapLayered[0]);
printf("Maximum Cubemap layered texture dimensions %d\n",cudaDeviceProp.maxTextureCubemapLayered[1]);
printf("Maximum size of each dimension of a block %d\n",cudaDeviceProp.maxThreadsDim[0]);
printf("Maximum size of each dimension of a block %d\n",cudaDeviceProp.maxThreadsDim[1]);
printf("Maximum size of each dimension of a block %d\n",cudaDeviceProp.maxThreadsDim[2]);
printf("Maximum number of threads per block %d\n",cudaDeviceProp.maxThreadsPerBlock);
printf("Maximum resident threads per multiprocessor %d\n",cudaDeviceProp.maxThreadsPerMultiProcessor);
printf("Maximum pitch in bytes allowed by memory copies %d\n",cudaDeviceProp.memPitch);
printf("Global memory bus width in bits %d\n",cudaDeviceProp.memoryBusWidth);
printf("Peak memory clock frequency in kilohertz %d\n",cudaDeviceProp.memoryClockRate);
printf("Minor compute capability %d\n",cudaDeviceProp.minor);
printf("Number of multiprocessors on device %d\n",cudaDeviceProp.multiProcessorCount);
printf("PCI bus ID of the device %d\n",cudaDeviceProp.pciBusID);
printf("PCI device ID of the device %d\n",cudaDeviceProp.pciDeviceID);
printf("PCI domain ID of the device %d\n",cudaDeviceProp.pciDomainID);
printf("32-bit registers available per block %d\n",cudaDeviceProp.regsPerBlock);
printf("Shared memory available per block in bytes %d\n",cudaDeviceProp.sharedMemPerBlock);
printf("Device supports stream priorities %d\n",cudaDeviceProp.streamPrioritiesSupported);
printf("Alignment requirements for surfaces %d\n",cudaDeviceProp.surfaceAlignment);
printf("1 if device is a Tesla device using TCC driver, 0 otherwise %d\n",cudaDeviceProp.tccDriver);
printf("Alignment requirement for textures %d\n",cudaDeviceProp.textureAlignment);
printf("Pitch alignment requirement for texture references bound to pitched memory %d\n",cudaDeviceProp.texturePitchAlignment);
printf("Constant memory available on device in bytes %d\n",cudaDeviceProp.totalConstMem);
printf("Global memory available on device in bytes %d\n",cudaDeviceProp.totalGlobalMem);
printf("Device shares a unified address space with the host %d\n",cudaDeviceProp.unifiedAddressing);
printf("Warp size in threads %d\n",cudaDeviceProp.warpSize);
fflush(stdout);
}
host_little(gridSize, blockSize, matrix, bestFrom, bestTo, &bestPrice, n);
cudaDeviceReset();
/* Bыводим результаты */
if (bestPrice != LONG_MAX){
printf("Best Path\t: "); for (int i = 0; i < n; i++) printf("(%d,%d)%s", bestFrom[i], bestTo[i], ((i < (n - 1)) ? "," : "\n"));
printf("Best Price\t: %ld\n", bestPrice);
fs = fopen(outputFileName, "w");
if (fs == NULL) {
fprintf(stderr, "File open error (%s)\n", outputFileName); fflush(stderr);
exit(-1);
}
for (int i = 0; i < n; i++) fprintf(fs, "%d;%d\n", bestFrom[i], bestTo[i]);
fclose(fs);
}
free(matrix);
free(bestFrom);
free(bestTo);
fflush(stdout);
if (bestPrice == LONG_MAX) exit(-1);
exit(0);
}
|
1,702
|
#include <iostream>
#include <cuda.h>
using namespace std;
int *a, *b; // host data
int *c, *c2; // results
__global__ void vecAdd(int *A,int *B,int *C,int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
C[i] = A[i] + B[i];
}
void vecAdd_h(int *A1,int *B1, int *C1, int N)
{
for(int i=0;i<N;i++)
C1[i] = A1[i] * B1[i];
}
int main(int argc,char **argv)
{
printf("Begin \n");
int n=10000000;
int nBytes = n*sizeof(int);
int block_size, block_no;
a = (int *)malloc(nBytes);
b = (int *)malloc(nBytes);
c = (int *)malloc(nBytes);
c2 = (int *)malloc(nBytes);
int *a_d,*b_d,*c_d;
block_size=4000;
block_no = n/block_size;
dim3 dimBlock(block_size,1,1);
dim3 dimGrid(block_no,1,1);
for(int i=0;i<n;i++)
a[i]=i,b[i]=i;
printf("Allocating device memory on host..\n");
cudaMalloc((void **)&a_d,n*sizeof(int));
cudaMalloc((void **)&b_d,n*sizeof(int));
cudaMalloc((void **)&c_d,n*sizeof(int));
printf("Copying to device..\n");
cudaMemcpy(a_d,a,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b,n*sizeof(int),cudaMemcpyHostToDevice);
clock_t start_d=clock();
printf("Doing GPU Vector add\n");
vecAdd<<<block_no,block_size>>>(a_d,b_d,c_d,n);
cudaThreadSynchronize();
clock_t end_d = clock();
clock_t start_h = clock();
printf("Doing CPU Vector add\n");
vecAdd_h(a,b,c2,n);
clock_t end_h = clock();
double time_d = (double)(end_d-start_d)/CLOCKS_PER_SEC;
double time_h = (double)(end_h-start_h)/CLOCKS_PER_SEC;
cudaMemcpy(c,c_d,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("%d %f %f\n",n,time_d,time_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
}
|
1,703
|
#include "includes.h"
__global__ void PondHeadInit(double *ph, int size) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
while (tid < size) {
ph[tid] = psi_min;
tid += blockDim.x * gridDim.x;
}
}
|
1,704
|
#include "includes.h"
__global__ void fsc_tomo_cmp_kernal(const float* data1, const float* data2, float* device_soln, const float data1threshold, const float data2threshold, const int nx, const int ny, const int nz, const int offset)
{
const uint x=threadIdx.x;
const uint y=blockIdx.x;
int idx = x + y*MAX_THREADS + offset;
float sum_data1_amps = 0.0;
float sum_data2_amps = 0.0;
float top = 0.0;
for(int i = 0; i < ny; i++){
//int index = i*nx + idx % nx + ((idx/nx)*ny*nz); //for coalesing
int rindex = i*nx + 2*(idx % nx/2) + (2*idx/nx)*ny*nz;
int iindex = i*nx + 2*(idx % nx/2)+ 1 + (2*idx/nx)*ny*nz;
float data1_r = data1[rindex];
float data1_i = data1[iindex];
float data2_r = data2[rindex];
float data2_i = data2[iindex];
if((data1_r* data1_r + data1_i*data1_i) > data1threshold && (data2_r* data2_r + data2_i*data2_i) > data2threshold){
sum_data1_amps += (data1_r* data1_r + data1_i*data1_i);
sum_data2_amps += (data2_r* data2_r + data2_i*data2_i);
top += (data1_r*data2_r + data1_i*data2_i);
}
}
device_soln[idx*3] = top;
device_soln[idx*3 +1] = sum_data1_amps;
device_soln[idx*3 +2] = sum_data2_amps;
}
|
1,705
|
// 1 / (1 + e^(-x))
extern "C"
__global__ void logistic(size_t n, double *result, double *x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = 1.0 / (1.0 + exp(-x[i]));
}
}
|
1,706
|
#include "includes.h"
__global__ void profileSubphaseComputeRestriction_kernel() {}
|
1,707
|
#include<stdio.h>
#include<iostream>
using namespace std;
int main() {
int dCount;
cudaGetDeviceCount(&dCount);
for(int i=0; i<dCount+3; i++)
{
cudaDeviceProp prop;
cudaError_t err = cudaGetDeviceProperties(&prop, i);
if(err != cudaSuccess)
cout<<"yes"<<endl;
printf("CUDA Device#%d\n", i);
printf("Device name:%s\n", prop.name);
printf("multiProcessorCount:%d\n", prop.multiProcessorCount);
printf("maxThreadsPerBlock:%d\n", prop.maxThreadsPerBlock);
printf("warpSize:%d\n", prop.warpSize);
printf("maxThreadsDim[3]:%d, %d, %d\n",
prop.maxThreadsDim[0],
prop.maxThreadsDim[1],
prop.maxThreadsDim[2]);
printf("maxGridSize[3]:%d, %d, %d\n",
prop.maxGridSize[0],
prop.maxGridSize[1],
prop.maxGridSize[2]);
}
return 0;
}
|
1,708
|
#include <cstdint>
// Algorithm parameters.
const double escape_radius = 2.5;
const int max_iterations = 60; // Must be divisible by 3.
const int blocks_size_x = 8;
const int blocks_size_y = 10;
const int threads_size_x = 32;
const int threads_size_y = 32;
typedef struct {
uint8_t red;
uint8_t green;
uint8_t blue;
} color_t;
// Counts the number of iterations for the series to diverge.
__device__ uint8_t count_iterations(double x, double y) {
uint8_t iterations = 0;
double u = 0.0, v = 0.0;
while (iterations < max_iterations && (u * u + v * v < escape_radius * escape_radius)) {
double _u = u * u - v * v + x;
double _v = 2 * u * v + y;
u = _u;
v = _v;
iterations++;
}
return iterations;
}
// Linerarly scales the interval [0:size] to [start:stop].
__device__ double scale(int index, int size, double start, double stop)
{
return (stop - start) * index / size + start;
}
// Paints the pixel at coordinates (x_index, y_index) of an image buffer.
__device__ void paint(uint8_t* buffer, int x_index, int y_index, int x_size, color_t color) {
int index = 3 * (x_index + (y_index * x_size));
buffer[index] = color.red;
buffer[index+1] = color.green;
buffer[index+2] = color.blue;
}
// Converts a number of iterations to a RGB color.
__device__ color_t palette(uint8_t iterations) {
uint8_t red = 0, green = 0, blue = 0;
if (iterations < max_iterations) {
if (iterations < max_iterations / 3) {
red = 255 * 3 * iterations / max_iterations;
} else if (iterations < 2 * max_iterations / 3) {
red = 255;
green = 255 * 3 * (iterations - (max_iterations / 3)) / max_iterations;
} else {
red = 255;
green = 255;
blue = 255 * 3 * (iterations - (2 * max_iterations / 3)) / max_iterations;
}
}
return { red, green, blue };
}
// Partially renders the image buffer for a single thread.
__global__ void render_thread(uint8_t *buffer,
int x_size, double x_start, double x_stop,
int y_size, double y_start, double y_stop)
{
int y_first = threadIdx.y + (blockIdx.y * blockDim.y);
int y_step = blockDim.y * gridDim.y;
for (int y_index=y_first; y_index<y_size; y_index+=y_step) {
double y = scale(y_index, y_size, y_start, y_stop);
int x_first = threadIdx.x + (blockIdx.x * blockDim.x);
int x_step = blockDim.x * gridDim.x;
for (int x_index=x_first; x_index<x_size; x_index+=x_step) {
double x = scale(x_index, x_size, x_start, x_stop);
uint8_t iterations = count_iterations(x, y);
color_t color = palette(iterations);
paint(buffer, x_index, y_index, x_size, color);
}
}
}
// Renders the entire image buffer.
extern "C" void render(uint8_t *buffer,
int x_size, double x_start, double x_stop,
int y_size, double y_start, double y_stop)
{
uint8_t* gpu_buffer;
size_t size = sizeof(uint8_t) * 3 * x_size * y_size;
cudaMalloc(&gpu_buffer, size);
render_thread<<<
dim3(blocks_size_x, blocks_size_y),
dim3(threads_size_x, threads_size_y)>>>(gpu_buffer,
x_size, x_start, x_stop,
y_size, y_start, y_stop);
cudaMemcpy(buffer, gpu_buffer, size, cudaMemcpyDeviceToHost);
cudaFree(gpu_buffer);
}
|
1,709
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
void check(cudaError_t e) {
if(e != cudaSuccess) {
printf(cudaGetErrorString(e));
}
}
__global__ void addArrayGPU(int* a, int* b, int* c) {
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main() {
const int count = 5;
int ha[] = { 1, 2, 3, 4, 5 };
int hb[] = { 10, 20, 30, 40, 50};
int hc[count];
int *da, *db, *dc;
int size = sizeof(int)*count;
cudaMalloc(&da, size);
cudaMalloc(&db, size);
cudaMalloc(&dc, size);
cudaMemcpy(da, ha, size, cudaMemcpyHostToDevice);
cudaMemcpy(db, hb, size, cudaMemcpyHostToDevice);
addArrayGPU<<<1, count>>>(da, db, dc);
cudaMemcpy(hc, dc, size, cudaMemcpyDeviceToHost);
printf("%d %d %d %d %d",
hc[0],
hc[1],
hc[2],
hc[3],
hc[4]);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
return 0;
}
|
1,710
|
__global__ void init_kernel(int * domain, int pitch, int block_y_step)
{
/* 512 / 4 */
int blockXThreadSize = blockDim.x / block_y_step;
int blockYThreadSize = blockDim.x / block_y_step / gridDim.y;
int tx = threadIdx.x % blockDim.x;
int ty = (blockIdx.y * blockDim.y) + threadIdx.y;
int value = tx % 3;
switch (value) {
case(0):
domain[ tx + ty * blockDim.x] = 1;
break;
case(1):
domain[ tx + ty * blockDim.x] = 0;
break;
case(2):
domain[ tx + ty * blockDim.x] = 2;
break;
}
}
// Reads a cell at (x+dx, y+dy)
__device__ int read_cell(int * source_domain, int x, int y, int dx, int dy,
int domain_x, int domain_y, int pitch)
{
x = (x + dx + domain_x ) % domain_x; // Wrap around
y = (y + dy + domain_y ) % domain_y;
return source_domain[y * (pitch / sizeof(int)) + x];
}
__device__ void write_cell(int * dest_domain, int x, int y, int dx, int dy,
int domain_x, int domain_y, int pitch, int value)
{
x = (x + dx + domain_x ) % domain_x; // Wrap around
y = (y + dy + domain_y ) % domain_y;
dest_domain[y * (pitch / sizeof(int)) + x] = value;
}
// Compute kernel
__global__ void life_kernel(int * source_domain, int * dest_domain, int domain_x, int domain_y, int pitch)
{
extern __shared__ int shared_data[];
int tx = threadIdx.x ;
/* 0-511 */
/* 0-127 */
/*
global memory
X ---------> Y
00210210210210210210210210210210210210210210210210210210210210210210210210210020100210211002210210210210210210020100020002210210 |
01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 |
11000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 V
11000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001
*/
/* 0-31 */
/* 0-124 step 4 + 0 - 3 */
/* 0-127 */
int ty = blockIdx.y * blockDim.y + (threadIdx.y);
/* 0 -127 */
int shared_tx = tx;
/* 1 - 4 */
int shared_ty = ty % blockDim.y + 1;
// load shared;
/*
127
0 /
// Shared memory |
0 00210210210210210210210210210210210210210210210210210210210210210210210210210020100210211002210210210210210210020100020002210210
X(shared_x=0, shared_y= 1) ---------> Y
1 00210210210210210210210210210210210210210210210210210210210210210210210210210020100210211002210210210210210210020100020002210210 |
2 01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 |
3 11000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001 V
4 11000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001
5 11000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001
*/
/* 0-127 + (1-4)*128 */
shared_data[shared_tx + (shared_ty)*blockDim.x ] = read_cell(source_domain, tx, ty, 0, 0, domain_x, domain_y, pitch);
if (shared_ty == 1) {
/* 0-127 + 0 */
shared_data[shared_tx + (shared_ty-1)*blockDim.x ] = read_cell(source_domain, tx, ty, 0, -1, domain_x, domain_y, pitch);
}
if (shared_ty == 4) {
/* 0-127 + 5*blockDim.x */
shared_data[shared_tx + (shared_ty+1)*blockDim.x ] = read_cell(source_domain, tx, ty, 0, 1,
domain_x, domain_y, pitch);
}
#if 0
if ( (threadIdx.x == 0) && (threadIdx.y==0) && (blockIdx.y==0 )) {
int i;
for (i=0;i<768;i++) {
write_cell(dest_domain, i%blockDim.x, i/blockDim.x, 0,0,domain_x,domain_y,pitch,(shared_data[i]+1)%10);
}
}
return;
#endif
// Read cell
// int myself=0;
int myself = shared_data[shared_tx + (shared_ty)*blockDim.x];
// TODO: Read the 8 neighbors and count number of blue and red
int blue=0;
int red=0;
int adjacent_count=0;
for (int i=0; i<9;i++) {
if (i==4) /* itself */ {
continue;
}
int x = i % 3 - 1;
int y = (int) (i / 3) - 1;
// In C, mod of negative is negative
int near = shared_data[(((x+shared_tx+blockDim.x)%blockDim.x) + ((shared_ty+y)*blockDim.x))];
switch (near) {
case (1):
red++;
break;
case (2):
blue++;
break;
default:
break;
}
if ( (i+1)%2==0) {
if (near>0) {
adjacent_count++;
}
}
}
int total_near = blue+red;
int new_value = myself;
// rules
if ((total_near)>3) {
new_value = 0;
}
if (adjacent_count==1) {
new_value = 0;
}
if ((total_near)==3 && (myself==0)) {
if (blue>red) {
new_value=2;
}
else {
new_value=1;
}
}
write_cell(dest_domain, tx, ty, 0,0,domain_x,domain_y,pitch,new_value);
return;
}
|
1,711
|
#ifndef _GPU_CUDA_COMMON_CU__
#define _GPU_CUDA_COMMON_CU__
#include <stdio.h>
namespace SiddhiGpu
{
__device__ bool cuda_strcmp(const char *s1, const char *s2)
{
// if(!s1 || !s2) return false; TODO: uncomment
for ( ; *s1==*s2; ++s1, ++s2) {
if (*s1=='\0') return true;
}
return false;
}
__device__ bool cuda_prefix(char *s1, char *s2)
{
if(!s1 || !s2) return false;
for ( ; *s1==*s2; ++s1, ++s2) {
if (*(s2+1)=='\0') return true;
}
return false;
}
__device__ bool cuda_contains(const char *s1, const char *s2)
{
if(!s1 || !s2) return false;
int size1 = 0;
int size2 = 0;
while (s1[size1]!='\0')
size1++;
while (s2[size2]!='\0')
size2++;
if (size1==size2)
return cuda_strcmp(s1, s2);
if (size1<size2)
return false;
for (int i=0; i<size1-size2+1; i++)
{
bool failed = false;
for (int j=0; j<size2; j++)
{
if (s1[i+j-1]!=s2[j])
{
failed = true;
break;
}
}
if (! failed)
return true;
}
return false;
}
}
#endif
|
1,712
|
/*
============================================================================
Name : juliaset.cu
Author : Wolfgang
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <iostream>
#include <numeric>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
static void CheckCudaErrorAux (const char *, unsigned, const char *, cudaError_t);
#define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value)
/**
* CUDA kernel that computes reciprocal values for a given vector
*/
/*
__global__ void reciprocalKernel(float *data, unsigned vectorSize) {
unsigned idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < vectorSize)
data[idx] = 1.0/data[idx];
}
*/
/**
* Host function that copies the data and launches the work on GPU
*/
/*
float *gpuReciprocal(float *data, unsigned size)
{
float *rc = new float[size];
float *gpuData;
CUDA_CHECK_RETURN(cudaMalloc((void **)&gpuData, sizeof(float)*size));
CUDA_CHECK_RETURN(cudaMemcpy(gpuData, data, sizeof(float)*size, cudaMemcpyHostToDevice));
static const int BLOCK_SIZE = 32;
const int blockCount = (size+BLOCK_SIZE-1)/BLOCK_SIZE;
reciprocalKernel<<<blockCount, BLOCK_SIZE>>> (gpuData, size);
CUDA_CHECK_RETURN(cudaMemcpy(rc, gpuData, sizeof(float)*size, cudaMemcpyDeviceToHost));
CUDA_CHECK_RETURN(cudaFree(gpuData));
return rc;
}
*/
__device__ void cadd(float a_real, float a_imag, float b_real, float b_imag, float* c_real, float* c_imag)
{
//Complex c;
*c_real = a_real + b_real;
*c_imag = a_imag + b_imag;
//return c;
}
__device__ void cmul(float a_real, float a_imag, float b_real, float b_imag, float* c_real, float* c_imag)
{
//Complex c;
*c_real = (a_real*b_real)-(a_imag*b_imag);
*c_imag = (a_real*b_imag)+(a_imag*b_real);
}
__device__ void cbetr(float a_real, float a_imag, float* btr)
{
*btr = sqrt(pow(a_real,2)+pow(a_imag,2));
}
__device__ void qadd(float a_real, float a_i, float a_j, float a_k, float b_real, float b_i, float b_j, float b_k, float* c_real, float* c_i, float* c_j, float* c_k)
{
*c_real = a_real + b_real;
*c_i = a_i + b_i;
*c_j = a_j + b_j;
*c_k = a_k + b_k;
}
__device__ void qmul(float a_real, float a_i, float a_j, float a_k, float b_real, float b_i, float b_j, float b_k, float* c_real, float* c_i, float* c_j, float* c_k)
{
*c_real = (a_real*b_real)-(a_i*b_i)-(a_j*b_j)-(a_k*b_k);
*c_i = (a_real*b_i)-(a_i*b_real)-(a_j*b_k)-(a_k*b_j);
*c_j = (a_real*b_j)-(a_i*b_k)-(a_j*b_real)-(a_k*b_i);
*c_k = (a_real*b_k)-(a_i*b_j)-(a_j*b_i)-(a_k*b_real);
}
__device__ void qbetr(float a_real, float a_i, float a_j, float a_k, float* btr)
{
*btr = sqrt(pow(a_real, 2)+pow(a_i, 2)+pow(a_j, 2)+pow(a_k, 2));
}
__global__ void calc_CJuliaset(float* A, float c_real, float c_imag, int number_of_iterations, float x_start, float y_start, float granularity, int N, int M)
{
// Block index
//int bx = blockIdx.x;
//int by = blockIdx.y;
// Thread index
//int tx = threadIdx.x;
//int ty = threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
//int y = blockDim.y * blockIdx.y + threadIdx.y;
int n = x/N;
int m = x-(n*N);
int step=0;
float temp_real=0;
float temp_imag=0;
float z_real=x_start-(n*granularity);
float z_imag=y_start-(m*granularity);
while(step < number_of_iterations)
{
cmul(z_real, z_imag, z_real, z_imag, &temp_real, &temp_imag); //function to calculate JuliaSet z(1) = z(0)² + c
cadd(temp_real, temp_imag, c_real, c_imag, &z_real, &z_imag);
cbetr(z_real, z_imag, &temp_real);
if(temp_real>2)
{
break; //is NOT considered as in JuliaSet
}
//z.real=temp.real;
//z.imag=temp.imag;
step++;
}
A[x] = step;
}
__global__ void calc_CMandelbrot(int* A, int number_of_iterations, float x_start, float y_start, float granularity, int N, int M)
{
// Block index
//int bx = blockIdx.x;
//int by = blockIdx.y;
// Thread index
//int tx = threadIdx.x;
//int ty = threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
//int y = blockDim.y * blockIdx.y + threadIdx.y;
int n = x/N;
int m = x-(n*N);
int step=0;
float temp_real=0;
float temp_imag=0;
float c_real=x_start-(n*granularity);//0;
float c_imag=y_start-(m*granularity);//0;
float z_real=0;//x_start-(n*granularity);
float z_imag=0;//y_start-(m*granularity);
while(step < number_of_iterations)
{
cmul(z_real, z_imag, z_real, z_imag, &temp_real, &temp_imag); //function to calculate JuliaSet z(1) = z(0)² + c
cadd(temp_real, temp_imag, c_real, c_imag, &z_real, &z_imag);
cbetr(z_real, z_imag, &temp_real);
if(temp_real>2)
{
break; //is NOT considered as in JuliaSet
}
//z.real=temp.real;
//z.imag=temp.imag;
step++;
}
A[x] = step;
}
__global__ void calc_CMandelbrot(int* A, long number_of_iterations, int N, int M)
{
// Block index
//int bx = blockIdx.x;
//int by = blockIdx.y;
// Thread index
//int tx = threadIdx.x;
//int ty = threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
//int y = blockDim.y * blockIdx.y + threadIdx.y;
int n = x/N;
int m = x-(n*N);
float gran_n = 4./(M-1);
float gran_m = 4./(N-1);
int step=0;
float temp_real=0;
float temp_imag=0;
float c_imag=2-(n*gran_n);//0;
float c_real=-2+(m*gran_m);//0;
float z_real=0;//x_start-(n*granularity);
float z_imag=0;//y_start-(m*granularity);
while(step < number_of_iterations)
{
cmul(z_real, z_imag, z_real, z_imag, &temp_real, &temp_imag); //function to calculate MandelbrotSet z(1) = z(0)² + c
cadd(temp_real, temp_imag, c_real, c_imag, &z_real, &z_imag);
cbetr(z_real, z_imag, &temp_real);
if(temp_real>2)
{
break; //is NOT considered as in MandelbrotSet
}
//z.real=temp.real;
//z.imag=temp.imag;
step++;
}
A[x] = step;
}
__global__ void calc_CMandelbrot(int* A, long number_of_iterations, int pixel_x, int pixel_y, float start_x, float start_y, float end_x, float end_y, int device)
{
// Block index
//int bx = blockIdx.x;
//int by = blockIdx.y;
// Thread index
//int tx = threadIdx.x;
//int ty = threadIdx.y;
int x = blockDim.x * blockIdx.x + threadIdx.x;
//int y = blockDim.y * blockIdx.y + threadIdx.y;
int n = x/pixel_x;
int m = x-(n*pixel_x);
float gran_n = 4./(pixel_y-1);
float gran_m = 4./(pixel_x-1);
int step=0;
float temp_real=0;
float temp_imag=0;
float c_imag=2-(n*gran_n);//0;
float c_real=-2+(m*gran_m);//0;
float z_real=0;//x_start-(n*granularity);
float z_imag=0;//y_start-(m*granularity);
while(step < number_of_iterations)
{
cmul(z_real, z_imag, z_real, z_imag, &temp_real, &temp_imag); //function to calculate MandelbrotSet z(1) = z(0)² + c
cadd(temp_real, temp_imag, c_real, c_imag, &z_real, &z_imag);
cbetr(z_real, z_imag, &temp_real);
if(temp_real>2)
{
break; //is NOT considered as in MandelbrotSet
}
//z.real=temp.real;
//z.imag=temp.imag;
step++;
}
A[x] = step;
}
void startCalc(float start_x, float start_y, float end_x, float end_y, int pixel_x, int pixel_y, int iterations)
{
clock_t prgstart, prgende;
int O = 1;
size_t size = pixel_x * pixel_y * O * sizeof(int);
//int *d_A;
int devices = 0;
cudaError_t error;
error = cudaGetDeviceCount(&devices);
if (error != cudaSuccess)
{
printf("cudaGetDeviceCount returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("DeviceCount: %d\n", devices);
int **h_A = (int**)malloc(devices); //(int *)malloc(size);
int **d_A = (int**)malloc(devices);
int i;
for(i=0; i<devices; i++)
{
d_A[i] = (int*)malloc(size/devices);
error = cudaSetDevice(i);
if (error != cudaSuccess)
{
printf("cudaSetDeviceCount returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
error = cudaMalloc((void **) &d_A[i], size/devices);
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
int BLOCK_SIZE = 512;
int blockCount = (pixel_x*pixel_y)/BLOCK_SIZE;
prgstart=clock();
//calc_CJuliaset<<<blockCount, BLOCK_SIZE>>>(d_A, 0.1, 0.1, 1000, 0.5, 0.5, 0.1, N, M);
//calc_CMandelbrot<<<blockCount, BLOCK_SIZE>>>(d_A, 1000, 0.5, 0.5, 0.01, N, M);
calc_CMandelbrot<<<blockCount, BLOCK_SIZE>>>(d_A[i], 10000, pixel_x, pixel_y);
cudaMemcpy(h_A[i], d_A[i], size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
prgende=clock();//CPU-Zeit am Ende des Programmes
printf("Laufzeit %.2f Sekunden\n",(float)(prgende-prgstart) / CLOCKS_PER_SEC);
int a=0;
for(a=0; a<pixel_x*pixel_y; a++)
{
if(a%pixel_x==0)
{
printf("\n");
}
if(h_A[i][a]<1000)
{
//printf("x");
}
else
{
//printf("o");
}
printf(" %d ", h_A[i][a]);
}
}
}
int main(int argc, char* argv[])
{
/*
clock_t prgstart, prgende;
int N = 64;
int M = 64;
int O = 1;
size_t size = N * M * O * sizeof(int);
int *h_A = (int *)malloc(size);
int *d_A;
int devices = 0;
cudaError_t error;
error = cudaGetDeviceCount(&devices);
if (error != cudaSuccess)
{
printf("cudaGetDeviceCount returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
printf("DeviceCount: %d\n", devices);
error = cudaMalloc((void **) &d_A, size);
*/
//error = cudaMalloc3D((void **) &d_A, size);
/*
if (error != cudaSuccess)
{
printf("cudaMalloc d_A returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
int BLOCK_SIZE = 512;
int blockCount = (M*N)/BLOCK_SIZE;//(size+BLOCK_SIZE-1)/BLOCK_SIZE;
prgstart=clock();
error = cudaSetDevice(0);
if (error != cudaSuccess)
{
printf("cudaSetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__);
exit(EXIT_FAILURE);
}
*/
//calc_CJuliaset<<<blockCount, BLOCK_SIZE>>>(d_A, 0.1, 0.1, 1000, 0.5, 0.5, 0.1, N, M);
//calc_CMandelbrot<<<blockCount, BLOCK_SIZE>>>(d_A, 1000, 0.5, 0.5, 0.01, N, M);
/*
calc_CMandelbrot<<<blockCount, BLOCK_SIZE>>>(d_A, 10000, N, M);
cudaMemcpy(h_A, d_A, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
prgende=clock();//CPU-Zeit am Ende des Programmes
printf("Laufzeit %.2f Sekunden\n",(float)(prgende-prgstart) / CLOCKS_PER_SEC);
int a=0;
for(a=0; a<N*M; a++)
{
if(a%N==0)
{
printf("\n");
}
if(h_A[a]<1000)
{
//printf("x");
}
else
{
//printf("o");
}
printf(" %d ", h_A[a]);
}
*/
/*
float *recCpu = cpuReciprocal(data, WORK_SIZE);
float *recGpu = gpuReciprocal(data, WORK_SIZE);
float cpuSum = std::accumulate (recCpu, recCpu+WORK_SIZE, 0.0);
float gpuSum = std::accumulate (recGpu, recGpu+WORK_SIZE, 0.0);
*/
/* Verify the results */
//std::cout<<"gpuSum = "<<gpuSum<< " cpuSum = " <<cpuSum<<std::endl;
/* Free memory */
//delete[] data;
//delete[] recCpu;
//delete[] recGpu;
startCalc(-2,2,-2,2,64,64,1024);
return 0;
}
/**
* Check the return value of the CUDA runtime API call and exit
* the application if the call has failed.
*/
static void CheckCudaErrorAux (const char *file, unsigned line, const char *statement, cudaError_t err)
{
if (err == cudaSuccess)
return;
std::cerr << statement<<" returned " << cudaGetErrorString(err) << "("<<err<< ") at "<<file<<":"<<line << std::endl;
exit (1);
}
|
1,713
|
// O(N) operations
#include <stdio.h>
#include <iostream>
using namespace std;
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(1); \
} \
}
const int BS = 1 << 5;
const int N = 1 << 10;
__global__ void up_sweep(float *g_idata, const int chunk)
{
int threadId = threadIdx.x;
int entireId = blockIdx.x * blockDim.x + threadIdx.x;
int offset = chunk;
for (int d = blockDim.x >> 1; d > 0; d >>= 1) // build sum in place up the tree
{
__syncthreads();
if (threadId < d)
{
// B
int ai = offset * (2 * (entireId / blockDim.x * d + threadId) + 1) - 1;
int bi = offset * (2 * (entireId / blockDim.x * d + threadId) + 2) - 1;
/*
if (chunk > 512)
{
printf("##%d: %d <- %d (%d <- %d)\n", entireId, 0, 0, bi, ai // (int)g_idata[bi], (int)g_idata[ai], bi, ai);
}
*/
if (!((0 <= ai && ai <= N) && (0 <= bi && bi <= N)))
continue;
/*
if (chunk >= 512)
{
printf("#%d: %lld <- %lld (%d <- %d)\n", entireId, (long long)g_idata[bi], (long long)g_idata[ai], bi, ai);
}
*/
g_idata[bi] += g_idata[ai];
}
offset <<= 1;
}
}
__global__ void assign_zero(float *g_idata, const long long pow2)
{
// C
g_idata[pow2 - 1] = 0; // clear the last element
}
__global__ void down_sweep(float *g_idata, const int chunk)
{
int threadId = threadIdx.x;
int entireId = blockIdx.x * blockDim.x + threadIdx.x;
int offset = chunk;
for (int d = blockDim.x >> 1; d > 0; d >>= 1)
offset <<= 1;
for (int d = 1; d < blockDim.x; d <<= 1) // traverse down tree & build scan
{
offset >>= 1;
__syncthreads();
if (threadId < d)
{
// D
int ai = offset * (2 * (entireId / blockDim.x * d + threadId) + 1) - 1;
int bi = offset * (2 * (entireId / blockDim.x * d + threadId) + 2) - 1;
if (!((0 <= ai && ai <= N) && (0 <= bi && bi <= N)))
continue;
float t = g_idata[ai];
g_idata[ai] = g_idata[bi];
g_idata[bi] += t;
//printf("#%d: %d <-> %d (%d <-> %d)\n", entireId, (int)g_idata[bi], (int)g_idata[ai], bi, ai);
}
}
__syncthreads();
}
int main()
{
long long pow2 = 1;
while (pow2 < N)
pow2 <<= 1;
float *v;
v = (float *)malloc(sizeof(float) * pow2);
for (int i = 0; i < N; i++)
v[i] = i + 1;
float *g_idata;
cudaMalloc((void **)&g_idata, sizeof(float) * pow2), cudaMemcpy(g_idata, v, sizeof(float) * pow2, cudaMemcpyDefault);
{ // calc
long long chunkSize;
for (chunkSize = 1; chunkSize < pow2; chunkSize *= BS)
{
up_sweep<<<(pow2 + BS - 1) / BS, BS>>>(g_idata, chunkSize);
CHECK(cudaDeviceSynchronize());
//cout << "#" << chunkSize << endl;
}
/*
assign_zero<<<1, 1>>>(g_idata, pow2);
CHECK(cudaDeviceSynchronize());
for (chunkSize /= BS; chunkSize > 0; chunkSize /= BS)
{
down_sweep<<<(pow2 + BS - 1) / BS, BS>>>(g_idata, chunkSize);
CHECK(cudaDeviceSynchronize());
}
*/
}
float *res;
res = (float *)malloc(sizeof(float) * pow2);
cudaMemcpy(res, g_idata, sizeof(float) * pow2, cudaMemcpyDefault);
for (int i = N - 10; i < N; i++)
{
cout << i << " " << (long long)res[i] << "\n";
}
cout << flush;
}
|
1,714
|
//
// Created by heidies on 7/5/18.
//
#include <cuda_runtime.h>
#include <iostream>
using namespace std;
int main(int argc, char **argv){
cout << "Starting... " << endl;
int deviceCount = 0;
cudaError_t error_id = cudaGetDeviceCount(&deviceCount);
if (error_id != cudaSuccess){
cout << "cudaGetDeviceCount returned " << int(error_id) << endl;
cout << "-> " <<cudaGetErrorString(error_id) << endl;
cout << "Result = FAIL" << endl;
//exit(EXIT_FAILURE);
}
if (deviceCount == 0){
cout << "There is no available device that support CUDA" << endl;
}
else{
cout << "Deteced " << deviceCount <<" CUDA Capable device(s)" << endl;
}
int dev, driverVersion = 0, runtimeVersion = 0;
dev = 0;
cudaSetDevice(dev);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
cout << "Device " << dev << "\"" << deviceProp.name << "\"" << endl;
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
cout << " CUDA Driver Version / Runtime Version " << driverVersion / 1000 << "." << (driverVersion %100) / 10 << "/" <<
runtimeVersion / 1000 << "." << (runtimeVersion%100) / 10 << endl;
cout << " CUDA Capability Major/Minor version number: " << deviceProp.major << "." << deviceProp.minor << endl;
cout << " Total amount of global memory: " << (float)deviceProp.totalGlobalMem/(pow(1024.0, 3)) << " GBytes" <<
"(" << (unsigned long long) deviceProp.totalGlobalMem << " bytes)" << endl;
cout << " GPU Clock rate: " << deviceProp.clockRate * 1e-3f << " MHz" << "(" <<
deviceProp.clockRate * 1e-6f << " GHz)" << endl;
cout << " Memory Clock rate: " << deviceProp.memoryClockRate * 1e-3f << " Mhz" << endl;
cout << " Memory Bus Width: " << deviceProp.memoryBusWidth << "-bit" << endl;
if (deviceProp.l2CacheSize)
cout << " L2 Cache Size: " << deviceProp.l2CacheSize << " bytes" << endl;
cout << " Max Texture Dimension Size (x, y, z) 1D=(" << deviceProp.maxTexture1D << "), " << "2D=(" <<
deviceProp.maxTexture2D[0] << ", " << deviceProp.maxTexture2D[1] << "), " << "3D=(" << deviceProp.maxTexture3D[0] << ", " <<
deviceProp.maxTexture3D[1] << ", " << deviceProp.maxTexture3D[2] << ")" << endl;
cout << " Max Layered Texture Size (dim) x layers 1D=(" << deviceProp.maxTexture1DLayered[0] << ") x " <<
deviceProp.maxTexture1DLayered[1] << "2D=(" << deviceProp.maxTexture2DLayered[0] << ", " << deviceProp.maxTexture2DLayered[1] << ") x " <<
deviceProp.maxTexture2DLayered[2] << endl;
cout << " Total amount of constant memory: " << deviceProp.totalConstMem << " bytes" << endl;
cout << " Total amount of shared memory per block: " << deviceProp.sharedMemPerBlock << " bytes" << endl;
cout << " Total number of registers available per block: " << deviceProp.regsPerBlock << endl;
cout << " Warp size: " << deviceProp.warpSize << endl;
cout << " Number of multiprocessors: " << deviceProp.multiProcessorCount << endl;
cout << " Maximum number of warps per multiprocessor: " << deviceProp.maxThreadsPerMultiProcessor / 32 << endl;
cout << " Maximum number of threads per multiprocessor: " << deviceProp.maxThreadsPerMultiProcessor << endl;
cout << " Maximum number of threads per block: " << deviceProp.maxThreadsPerBlock << endl;
cout << " Maximum sizes of each dimension of a block: " << deviceProp.maxThreadsDim[0] << " x " <<
deviceProp.maxThreadsDim[1] << " x " << deviceProp.maxThreadsDim[2] << endl;
cout << " Maximum sizes of each dimension of a grid: " << deviceProp.maxGridSize[0] << " x " <<
deviceProp.maxGridSize[1] << " x " << deviceProp.maxGridSize[2] << endl;
cout << " Maximum memory pitch: " << deviceProp.memPitch << " bytes" << endl;
exit(EXIT_SUCCESS);
}
|
1,715
|
#include "includes.h"
__global__ void sortKernelSimple(int *arr, int arr_len, int odd)
{
int i = 2 * (blockIdx.x * blockDim.x + threadIdx.x) + odd;
if (i < arr_len - 1)
{
//Even
int a = arr[i];
int b = arr[i + 1];
if (a > b)
{
arr[i] = b;
arr[i + 1] = a;
}
}
}
|
1,716
|
#include <iostream>
#include <cuda_runtime.h> // CUDA routines prefixed with cuda_
#include <stdio.h>
#include <time.h> // time() for timing functions
// cuda_runtime.h includes
// stdlib.h -> rand(), RAND_MAX, malloc, calloc EXIT_FAILURE, EXIT_SUCCESS, exit() (among others)
// Add two equally-sized vectors (1D) element-wise.
// Results are contained in the third vector, C.
// Parameters:
// A, B, C: float *
// Returns:
// void
__global__ void
vecAddKernel(const float *A, const float *B, float *C)
{
// built-in variables blockDim, blockIdx, threadIdx
// allow us to assign one thread per operation
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
// TODO throw exc?
}
// This function utilizes device intrinsics to compute the addition
__global__ void
vecAddIntrinsicKernel(const float *A, const float *B, float *C)
{
// thread ID, one for each operation
int i = blockDim.x * blockIdx.x + threadIdx.x;
// round-nearest
C[i] = __fadd_rn(A[i], B[i]);
}
// function to get time in nanoseconds
long get_nanos(void)
{
struct timespec ts;
timespec_get(&ts, TIME_UTC);
return (long)ts.tv_sec * 1000000000L + ts.tv_nsec;
}
int main()
{
// TODO make dynamic via CLI input
// variable to be used for checking results of CUDA routines
cudaError_t cudaErrResult = cudaSuccess;
int nElements = 25000; // number of elements per vector
size_t vecSize = nElements * sizeof(float); // size in bytes per vector
// pointers for host and device vectors
float *hostA = NULL;
float *hostB = NULL;
float *hostC = NULL;
float *devA = NULL;
float *devB = NULL;
float *devC = NULL;
// time variables for use in timing functions
long nSecondsStart;
long nSecondsEnd;
// host memory allocation; allocate all to 0
hostA = (float*)calloc(nElements, vecSize);
hostB = (float*)calloc(nElements, vecSize);
hostC = (float*)calloc(nElements, vecSize);
// check that none of our alloc'd ptrs are null
if (hostA == NULL || hostB == NULL || hostC == NULL)
{
std::cout << "ERROR: COULD NOT ALLOCATE HOST MEMORY!" << std::endl;
exit(EXIT_FAILURE);
}
// CUDA device memory allocation; we'll need to cast our ptrs
// to ptr to void ptr; NOTE how the cast is performed on the address
// of the float ptr
// allocate for device array A ...
cudaErrResult = cudaMalloc((void **)&devA, vecSize);
if (cudaErrResult != cudaSuccess)
{
std::cout << "CUDA ERROR: " << cudaErrResult
<< " COULD NOT ALLOCATE DEVICE MEMORY FOR ARRAY A!"
<< std::endl
<< "Error: " << cudaGetErrorString(cudaErrResult)
<< std::endl;
exit(EXIT_FAILURE);
}
// ... for device array B ...
cudaErrResult = cudaMalloc((void **)&devB, vecSize);
if (cudaErrResult != cudaSuccess)
{
std::cout << "CUDA ERROR: " << cudaErrResult
<< " COULD NOT ALLOCATE DEVICE MEMORY FOR ARRAY B!"
<< std::endl
<< "Error: " << cudaGetErrorString(cudaErrResult)
<< std::endl;
exit(EXIT_FAILURE);
}
// ... and finally device array C
cudaErrResult = cudaMalloc((void **)&devC, vecSize);
if (cudaErrResult != cudaSuccess)
{
std::cout << "CUDA ERROR: " << cudaErrResult
<< " COULD NOT ALLOCATE DEVICE MEMORY FOR ARRAY C!"
<< std::endl
<< "Error: " << cudaGetErrorString(cudaErrResult)
<< std::endl;
exit(EXIT_FAILURE);
}
// initlialize arrays on host; make arrays of random numbers
for (int i=0; i<nElements; ++i)
{
hostA[i] = rand()/(float)RAND_MAX;
hostB[i] = rand()/(float)RAND_MAX;
}
printf("Just sanity checking host arrays...\n");
printf("hostA first element: %8.6f\n", hostA[0]);
printf("hostA last element: %8.6f\n", hostA[nElements-1]);
printf("hostB first element: %8.6f\n", hostB[0]);
printf("hostB last element: %8.6f\n", hostB[nElements-1]);
// copy host memory input arrays to device memory input arrays
// NOTE that cudaMemcpyHostToDevice is enum
cudaErrResult = cudaMemcpy(devA, hostA, vecSize, cudaMemcpyHostToDevice);
if (cudaErrResult != cudaSuccess)
{
std::cout << "FAILURE COPYING hostA TO devA!"
<< std::endl
<< "Error: " << cudaGetErrorString(cudaErrResult)
<< std::endl;
exit(EXIT_FAILURE);
}
cudaErrResult = cudaMemcpy(devB, hostB, vecSize, cudaMemcpyHostToDevice);
if (cudaErrResult != cudaSuccess)
{
std::cout << "FAILURE COPYING hostB TO devB!"
<< std::endl
<< "Error: " << cudaGetErrorString(cudaErrResult)
<< std::endl;
exit(EXIT_FAILURE);
}
// compute how many blocks and threads we'll need to execute this kernel;
// kernels utilize blocks of threads, where each block can be arranged into
// a grid of blocks
int threadsPerBlock = 256; // NOTE this is conventional - CLI param for kicks?
int blocksPerGrid = (nElements + threadsPerBlock - 1) / threadsPerBlock;
// call computation - performed on GPU
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
printf("Kernel launched with vanilla addition\n");
// start time
nSecondsStart = get_nanos();
// launch on device
vecAddKernel<<<blocksPerGrid, threadsPerBlock>>>(devA, devB, devC);
// end time
nSecondsEnd = get_nanos();
printf("Finished in ... %ld nanoseconds\n", (nSecondsEnd-nSecondsStart));
cudaErrResult = cudaGetLastError();
if (cudaErrResult != cudaSuccess)
{
std::cout << "FAILED TO LAUNCH KERNEL! "
<< "(error code " << cudaGetErrorString(cudaErrResult) << ")"
<< std::endl;
exit(EXIT_FAILURE);
}
// copy the result on device memory back to host
cudaErrResult = cudaMemcpy(hostC, devC, vecSize, cudaMemcpyDeviceToHost);
if (cudaErrResult != cudaSuccess)
{
std::cout << "FAILED TO COPY VECTOR C FROM DEVICE TO HOST! "
<< "(error code " << cudaGetErrorString(cudaErrResult) << ")"
<< std::endl;
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < nElements; ++i)
{
if (fabs(hostA[i] + hostB[i] - hostC[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// call using __fadd_rn intrinsic
printf("CUDA kernel launch with %d blocks of %d threads\n", blocksPerGrid, threadsPerBlock);
printf("Kernel launched with __fadd_rn instrinsic addition\n");
nSecondsStart = get_nanos();
vecAddIntrinsicKernel<<<blocksPerGrid, threadsPerBlock>>>(devA, devB, devC);
nSecondsEnd = get_nanos();
printf("Finished in ... %ld nanoseconds\n", (nSecondsEnd-nSecondsStart));
cudaErrResult = cudaGetLastError();
if (cudaErrResult != cudaSuccess)
{
std::cout << "FAILED TO LAUNCH KERNEL! "
<< "(error code " << cudaGetErrorString(cudaErrResult) << ")"
<< std::endl;
exit(EXIT_FAILURE);
}
// copy the result on device memory back to host
cudaErrResult = cudaMemcpy(hostC, devC, vecSize, cudaMemcpyDeviceToHost);
if (cudaErrResult != cudaSuccess)
{
std::cout << "FAILED TO COPY VECTOR C FROM DEVICE TO HOST! "
<< "(error code " << cudaGetErrorString(cudaErrResult) << ")"
<< std::endl;
exit(EXIT_FAILURE);
}
// Verify that the result vector is correct
for (int i = 0; i < nElements; ++i)
{
if (fabs(hostA[i] + hostB[i] - hostC[i]) > 1e-5)
{
fprintf(stderr, "Result verification failed at element %d!\n", i);
exit(EXIT_FAILURE);
}
}
// free CUDA device memory
cudaErrResult = cudaFree(devA);
if (cudaErrResult != cudaSuccess)
{
std::cout << "CUDA ERROR: COULD NOT FREE DEVICE MEMORY!" << std::endl;
exit(EXIT_FAILURE);
}
cudaErrResult = cudaFree(devB);
if (cudaErrResult != cudaSuccess)
{
std::cout << "CUDA ERROR: COULD NOT FREE DEVICE MEMORY!" << std::endl;
exit(EXIT_FAILURE);
}
cudaErrResult = cudaFree(devC);
if (cudaErrResult != cudaSuccess)
{
std::cout << "CUDA ERROR: COULD NOT FREE DEVICE MEMORY!" << std::endl;
exit(EXIT_FAILURE);
}
// free host memory
free(hostA);
free(hostB);
free(hostC);
return 0;
}
|
1,717
|
#include <stdio.h>
/*
* Scopo: somma due interi
*
* Tasks:
* * Uso di un kernel
* * allocazione delle memoria GPU
* * Trasferimento di un intero dalla GPU al processore
*/
// Attenzione a questa parola chiave. Definisce un kernel, ovvero un processo che avviene
// sulla GPU
__global__ void dark(void)
{
// Oggi non mi va di fare nulla. E in effetti non faccio niente.
}
// Come sopra, ma questa volta il kernel è estremamente complesso ;)
__global__ void add(int a, int b, int *c)
{
*c = a + b;
}
int main(void)
{
int c;
int *dev_c;
// Devo allocare la memoria
cudaMalloc( (void**)&dev_c, sizeof(int) );
// lancio il kernel
add<<<1,1>>>(2,4, dev_c);
// Il numerello sta ancora sulla GPU. Me lo devo copiare sulla mamoria del processore
// prima di poterlo usare
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost );
//printf("Benvenuto nel magico mondo delle GPU. Guarda il file 'somma.cu' per capire di cosa si tratta.\n");
printf("2 + 4 = %d\n", c);
// E' sempre una buona abitudine liberare la memoria dopo averla usata
cudaFree( dev_c );
return 0;
}
|
1,718
|
////////////////////////////////////////////////////////////////////////////
// Calculate scalar products of VectorN vectors of ElementN elements on CPU.
// Straight accumulation in double precision.
////////////////////////////////////////////////////////////////////////////
#include <iostream>
void Kernel_1_Max_CPU(int *Max_GPU, int *Max_CPU, int *A_Location, int *B_Location, int *Location,
int K1_Max_Report, int Number, int Row)
{
// int ii=0;
for (int i=0; i<Number; ++i)
// do
{
// int i=Location[ii];
if ((Max_GPU[i]>1) && (Max_GPU[i]>Max_CPU[0]))
{
Max_CPU[0]=Max_GPU[i];
// End_Point[0]=i+(LA+1)*(row+1)+1;
A_Location[0]=Location[i];
B_Location[0] = Row;
for (int j=0; j<K1_Max_Report-1; j++)
{
if (Max_CPU[j]>Max_CPU[j+1])
{
// printf("C++++ %i, %i \n", j, End_Point[j] );
int temp1=Max_CPU[j+1];
int temp2=A_Location[j+1];
int temp3=B_Location[j+1];
Max_CPU[j+1] = Max_CPU[j];
A_Location[j+1] = A_Location[j];
B_Location[j+1] = B_Location[j];
Max_CPU[j] = temp1;
A_Location[j] = temp2;
B_Location[j] = temp3;
}
else
{
j=K1_Max_Report;
}
}
}
// ii++ ;
// } while (Location[ii]>0);
}
}
|
1,719
|
#include <stdint.h>
// Galois field multiplication
// From Wikipedia
__device__ uint8_t gmul( uint8_t a, uint8_t b )
{
uint8_t p = 0;
uint8_t counter;
uint8_t hi_bit_set;
for(counter = 0; counter < 8; counter++) {
if(b & 1)
p ^= a;
hi_bit_set = (a & 0x80);
a <<= 1;
if(hi_bit_set)
a ^= 0x1b; /* x^8 + x^4 + x^3 + x + 1 */
b >>= 1;
}
return p;
}
// Key scheduling
__device__ uchar4 key_subBytes( uchar4 word, uint8_t *sbox )
{
uchar4 result;
result.x = sbox[word.x];
result.y = sbox[word.y];
result.z = sbox[word.z];
result.w = sbox[word.w];
return result;
}
__device__ uchar4 xorTransformation( uchar4 word1, uchar4 word2, uint8_t rcon )
{
uchar4 result;
result.x = word1.x ^ word2.x ^ rcon;
result.y = word1.y ^ word2.y;
result.z = word1.z ^ word2.z;
result.w = word1.w ^ word2.w;
return result;
}
// Encryption
__device__ void enc_subBytes( uchar4 *state, uint8_t *sbox )
{
// First column
state[0].x = sbox[state[0].x];
state[0].y = sbox[state[0].y];
state[0].z = sbox[state[0].z];
state[0].w = sbox[state[0].w];
// First column
state[1].x = sbox[state[1].x];
state[1].y = sbox[state[1].y];
state[1].z = sbox[state[1].z];
state[1].w = sbox[state[1].w];
// First column
state[2].x = sbox[state[2].x];
state[2].y = sbox[state[2].y];
state[2].z = sbox[state[2].z];
state[2].w = sbox[state[2].w];
// First column
state[3].x = sbox[state[3].x];
state[3].y = sbox[state[3].y];
state[3].z = sbox[state[3].z];
state[3].w = sbox[state[3].w];
}
__device__ void shiftRows( uchar4 *state )
{
uchar4 temp;
// First row
// NOTHING HAPPENS
// Second row
temp.x = state[0].y;
temp.y = state[1].y;
temp.z = state[2].y;
temp.w = state[3].y;
state[0].y = temp.y;
state[1].y = temp.z;
state[2].y = temp.w;
state[3].y = temp.x;
// Third row
temp.x = state[0].z;
temp.y = state[1].z;
temp.z = state[2].z;
temp.w = state[3].z;
state[0].z = temp.z;
state[1].z = temp.w;
state[2].z = temp.x;
state[3].z = temp.y;
// Fourth row
temp.x = state[0].w;
temp.y = state[1].w;
temp.z = state[2].w;
temp.w = state[3].w;
state[0].w = temp.w;
state[1].w = temp.x;
state[2].w = temp.y;
state[3].w = temp.z;
}
__device__ void mixColumns( uchar4 *state )
{
uchar4 temp;
// First column
temp.x = gmul(state[0].x,2) ^ gmul(state[0].y,3) ^ state[0].z ^ state[0].w;
temp.y = gmul(state[0].y,2) ^ gmul(state[0].z,3) ^ state[0].w ^ state[0].x;
temp.z = gmul(state[0].z,2) ^ gmul(state[0].w,3) ^ state[0].x ^ state[0].y;
temp.w = gmul(state[0].w,2) ^ gmul(state[0].x,3) ^ state[0].y ^ state[0].z;
state[0].x = temp.x;
state[0].y = temp.y;
state[0].z = temp.z;
state[0].w = temp.w;
// Second column
temp.x = gmul(state[1].x,2) ^ gmul(state[1].y,3) ^ state[1].z ^ state[1].w;
temp.y = gmul(state[1].y,2) ^ gmul(state[1].z,3) ^ state[1].w ^ state[1].x;
temp.z = gmul(state[1].z,2) ^ gmul(state[1].w,3) ^ state[1].x ^ state[1].y;
temp.w = gmul(state[1].w,2) ^ gmul(state[1].x,3) ^ state[1].y ^ state[1].z;
state[1].x = temp.x;
state[1].y = temp.y;
state[1].z = temp.z;
state[1].w = temp.w;
// Third column
temp.x = gmul(state[2].x,2) ^ gmul(state[2].y,3) ^ state[2].z ^ state[2].w;
temp.y = gmul(state[2].y,2) ^ gmul(state[2].z,3) ^ state[2].w ^ state[2].x;
temp.z = gmul(state[2].z,2) ^ gmul(state[2].w,3) ^ state[2].x ^ state[2].y;
temp.w = gmul(state[2].w,2) ^ gmul(state[2].x,3) ^ state[2].y ^ state[2].z;
state[2].x = temp.x;
state[2].y = temp.y;
state[2].z = temp.z;
state[2].w = temp.w;
// Fourth column
temp.x = gmul(state[3].x,2) ^ gmul(state[3].y,3) ^ state[3].z ^ state[3].w;
temp.y = gmul(state[3].y,2) ^ gmul(state[3].z,3) ^ state[3].w ^ state[3].x;
temp.z = gmul(state[3].z,2) ^ gmul(state[3].w,3) ^ state[3].x ^ state[3].y;
temp.w = gmul(state[3].w,2) ^ gmul(state[3].x,3) ^ state[3].y ^ state[3].z;
state[3].x = temp.x;
state[3].y = temp.y;
state[3].z = temp.z;
state[3].w = temp.w;
}
__device__ void addRoundKey( uchar4 *state, uchar4 *keys, uint8_t round_number )
{
// First column
state[0].x ^= keys[4*round_number + 0].x;
state[0].y ^= keys[4*round_number + 0].y;
state[0].z ^= keys[4*round_number + 0].z;
state[0].w ^= keys[4*round_number + 0].w;
// Second column
state[1].x ^= keys[4*round_number + 1].x;
state[1].y ^= keys[4*round_number + 1].y;
state[1].z ^= keys[4*round_number + 1].z;
state[1].w ^= keys[4*round_number + 1].w;
// Third column
state[2].x ^= keys[4*round_number + 2].x;
state[2].y ^= keys[4*round_number + 2].y;
state[2].z ^= keys[4*round_number + 2].z;
state[2].w ^= keys[4*round_number + 2].w;
// Fourth column
state[3].x ^= keys[4*round_number + 3].x;
state[3].y ^= keys[4*round_number + 3].y;
state[3].z ^= keys[4*round_number + 3].z;
state[3].w ^= keys[4*round_number + 3].w;
}
// Decryption
__device__ void invSubBytes( uchar4 *state, uint8_t *inv_sbox )
{
// First column
state[0].x = inv_sbox[state[0].x];
state[0].y = inv_sbox[state[0].y];
state[0].z = inv_sbox[state[0].z];
state[0].w = inv_sbox[state[0].w];
// First column
state[1].x = inv_sbox[state[1].x];
state[1].y = inv_sbox[state[1].y];
state[1].z = inv_sbox[state[1].z];
state[1].w = inv_sbox[state[1].w];
// First column
state[2].x = inv_sbox[state[2].x];
state[2].y = inv_sbox[state[2].y];
state[2].z = inv_sbox[state[2].z];
state[2].w = inv_sbox[state[2].w];
// First column
state[3].x = inv_sbox[state[3].x];
state[3].y = inv_sbox[state[3].y];
state[3].z = inv_sbox[state[3].z];
state[3].w = inv_sbox[state[3].w];
}
__device__ void invShiftRows( uchar4 *state )
{
uchar4 temp;
// First row
// NOTHING HAPPENS
// Second row
temp.x = state[0].y;
temp.y = state[1].y;
temp.z = state[2].y;
temp.w = state[3].y;
state[0].y = temp.w;
state[1].y = temp.x;
state[2].y = temp.y;
state[3].y = temp.z;
// Third row
temp.x = state[0].z;
temp.y = state[1].z;
temp.z = state[2].z;
temp.w = state[3].z;
state[0].z = temp.z;
state[1].z = temp.w;
state[2].z = temp.x;
state[3].z = temp.y;
// Fourth row
temp.x = state[0].w;
temp.y = state[1].w;
temp.z = state[2].w;
temp.w = state[3].w;
state[0].w = temp.y;
state[1].w = temp.z;
state[2].w = temp.w;
state[3].w = temp.x;
}
__device__ void invMixColumns( uchar4 *state )
{
uchar4 temp;
// First column
temp.x = gmul(state[0].x,14) ^ gmul(state[0].y,11) ^ gmul(state[0].z,13) ^ gmul(state[0].w,9);
temp.y = gmul(state[0].y,14) ^ gmul(state[0].z,11) ^ gmul(state[0].w,13) ^ gmul(state[0].x,9);
temp.z = gmul(state[0].z,14) ^ gmul(state[0].w,11) ^ gmul(state[0].x,13) ^ gmul(state[0].y,9);
temp.w = gmul(state[0].w,14) ^ gmul(state[0].x,11) ^ gmul(state[0].y,13) ^ gmul(state[0].z,9);
state[0].x = temp.x;
state[0].y = temp.y;
state[0].z = temp.z;
state[0].w = temp.w;
// Second column
temp.x = gmul(state[1].x,14) ^ gmul(state[1].y,11) ^ gmul(state[1].z,13) ^ gmul(state[1].w,9);
temp.y = gmul(state[1].y,14) ^ gmul(state[1].z,11) ^ gmul(state[1].w,13) ^ gmul(state[1].x,9);
temp.z = gmul(state[1].z,14) ^ gmul(state[1].w,11) ^ gmul(state[1].x,13) ^ gmul(state[1].y,9);
temp.w = gmul(state[1].w,14) ^ gmul(state[1].x,11) ^ gmul(state[1].y,13) ^ gmul(state[1].z,9);
state[1].x = temp.x;
state[1].y = temp.y;
state[1].z = temp.z;
state[1].w = temp.w;
// Third column
temp.x = gmul(state[2].x,14) ^ gmul(state[2].y,11) ^ gmul(state[2].z,13) ^ gmul(state[2].w,9);
temp.y = gmul(state[2].y,14) ^ gmul(state[2].z,11) ^ gmul(state[2].w,13) ^ gmul(state[2].x,9);
temp.z = gmul(state[2].z,14) ^ gmul(state[2].w,11) ^ gmul(state[2].x,13) ^ gmul(state[2].y,9);
temp.w = gmul(state[2].w,14) ^ gmul(state[2].x,11) ^ gmul(state[2].y,13) ^ gmul(state[2].z,9);
state[2].x = temp.x;
state[2].y = temp.y;
state[2].z = temp.z;
state[2].w = temp.w;
// Fourth column
temp.x = gmul(state[3].x,14) ^ gmul(state[3].y,11) ^ gmul(state[3].z,13) ^ gmul(state[3].w,9);
temp.y = gmul(state[3].y,14) ^ gmul(state[3].z,11) ^ gmul(state[3].w,13) ^ gmul(state[3].x,9);
temp.z = gmul(state[3].z,14) ^ gmul(state[3].w,11) ^ gmul(state[3].x,13) ^ gmul(state[3].y,9);
temp.w = gmul(state[3].w,14) ^ gmul(state[3].x,11) ^ gmul(state[3].y,13) ^ gmul(state[3].z,9);
state[3].x = temp.x;
state[3].y = temp.y;
state[3].z = temp.z;
state[3].w = temp.w;
}
|
1,720
|
// Author: Ayush Kumar
// Roll No: 170195
// Compile: nvcc -g -G -arch=sm_61 -std=c++11 assignment5-p5.cu -o assignment5-p5
#include <cmath>
#include <cstdlib>
#include <cuda.h>
#include <iostream>
#include <sys/time.h>
const uint64_t N = (256);
#define BLOCK_SIZE_X 32
#define BLOCK_SIZE_Y 8
#define BLOCK_SIZE_Z 4
#define THRESHOLD (0.000001)
using std::cerr;
using std::cout;
using std::endl;
// TODO: Edit the function definition as required
__global__ void kernel1(float* d_in, float* d_out) {
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
int k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= 1 && j >= 1 && k >= 1 && i < (N-1) && j < (N-1) && k < (N-1)) {
d_out[i*N*N + j*N + k] = 0.8 * (d_in[(i-1)*N*N + j*N + k] + d_in[(i+1)*N*N + j*N + k] +
d_in[i*N*N + (j-1)*N + k] + d_in[i*N*N + (j+1)*N + k] +
d_in[i*N*N + j*N + k-1] + d_in[i*N*N + j*N + k+1]);
}
}
// TODO: Edit the function definition as required
__global__ void kernel2(float* d_in, float* d_out) {
// Block row and column
int block_i = blockIdx.y;
int block_j = blockIdx.x;
int block_k = blockIdx.z;
// Thread row and column within B_sub
int thread_i = threadIdx.y;
int thread_j = threadIdx.x;
int thread_k = threadIdx.z;
int i = block_i * BLOCK_SIZE_Y + thread_i;
int j = block_j * BLOCK_SIZE_X + thread_j;
int k = block_k * BLOCK_SIZE_Z + thread_k;
// if (i < 1 || i >= N-1 || j < 1 || j >= N-1 || k < 1 || k >= N-1) return;
if (i >= 1 && j >= 1 && k >= 1 && i < (N-1) && j < (N-1) && k < (N-1)) {
// Each thread block computes one sub-matrix B_sub of B
float* d_in_sub = d_in + (block_i*BLOCK_SIZE_Y)*N*N + (block_j*BLOCK_SIZE_X)*N + block_k*BLOCK_SIZE_Z;
float* d_out_sub = d_out + (block_i*BLOCK_SIZE_Y)*N*N + (block_j*BLOCK_SIZE_X)*N + block_k*BLOCK_SIZE_Z;
__shared__ float tmp[BLOCK_SIZE_Y+2][BLOCK_SIZE_X+2][BLOCK_SIZE_Z+2];
// thread_i++; thread_j++; thread_k++;
// load everything from d_in_sub to tmp
tmp[1+thread_i][1+thread_j][1+thread_k] = d_in_sub[thread_i*N*N + thread_j*N + thread_k];
tmp[1+thread_i-1][1+thread_j][1+thread_k] = d_in_sub[(thread_i-1)*N*N + thread_j*N + thread_k];
tmp[1+thread_i+1][1+thread_j][1+thread_k] = d_in_sub[(thread_i+1)*N*N + thread_j*N + thread_k];
tmp[1+thread_i][1+thread_j-1][1+thread_k] = d_in_sub[thread_i*N*N + (thread_j-1)*N + thread_k];
tmp[1+thread_i][1+thread_j+1][1+thread_k] = d_in_sub[thread_i*N*N + (thread_j+1)*N + thread_k];
tmp[1+thread_i][1+thread_j][1+thread_k-1] = d_in_sub[thread_i*N*N + thread_j*N + thread_k-1];
tmp[1+thread_i][1+thread_j][1+thread_k+1] = d_in_sub[thread_i*N*N + thread_j*N + thread_k+1];
__syncthreads();
float d_out_value =
0.8 * (tmp[1+thread_i-1][1+thread_j][1+thread_k] + tmp[1+thread_i+1][1+thread_j][1+thread_k] +
tmp[1+thread_i][1+thread_j-1][1+thread_k] + tmp[1+thread_i][1+thread_j+1][1+thread_k] +
tmp[1+thread_i][1+thread_j][1+thread_k-1] + tmp[1+thread_i][1+thread_j][1+thread_k+1]);
__syncthreads();
d_out_sub[thread_i*N*N + thread_j*N + thread_k] = d_out_value;
}
}
// TODO: Edit the function definition as required
__host__ void stencil(float* in, float* out) {
for (int i=1; i<N -1; i++) {
for (int j=1; j<N -1; j++) {
for (int k=1; k<N -1; k++) {
out[i*N*N + j*N + k] = 0.8 * (in[(i-1)*N*N + j*N + k] + in[(i+1)*N*N + j*N + k] +
in[i*N*N + (j-1)*N + k] + in[i*N*N + (j+1)*N + k] +
in[i*N*N + j*N + k-1] + in[i*N*N + j*N + k+1]);
}
}
}
}
__host__ void check_result(float* w_ref, float* w_opt, uint64_t size) {
bool wrong = false;
for (uint64_t i = 0; i < size; i++) {
for (uint64_t j = 0; j < size; j++) {
for (uint64_t k = 0; k < size; k++) {
if (w_ref[i*N*N + j*N + k] != w_opt[i*N*N + j*N + k]) {
wrong = true;
goto out;
}
}
}
}
out:
if (wrong) {
cout << "Diffs found!" << endl;
} else {
cout << "No differences found between base and test versions\n";
}
}
double rtclock() { // Seconds
struct timezone Tzp;
struct timeval Tp;
int stat;
stat = gettimeofday(&Tp, &Tzp);
if (stat != 0) {
cout << "Error return from gettimeofday: " << stat << "\n";
}
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
}
int main() {
uint64_t SIZE = N * N * N;
float* h_in = new float[sizeof(float)*N*N*N];
float* h_out = new float[sizeof(float)*N*N*N];
float* h_k1_out = new float[sizeof(float)*N*N*N];
float* h_k2_out = new float[sizeof(float)*N*N*N];
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
for (int k = 0; k < N; k++) {
h_in[i*N*N + j*N + k] = rand() % 10;
}
}
}
double clkbegin = rtclock();
stencil(h_in, h_out);
double clkend = rtclock();
double cpu_time = clkend - clkbegin;
cout << "Stencil time on CPU: " << cpu_time * 1000 << " msec" << endl;
cudaError_t status;
cudaEvent_t start, end;
float kernel_time;
float *d_in, *d_out;
cudaMalloc(&d_in, SIZE * sizeof(float));
cudaMalloc(&d_out, SIZE * sizeof(float));
// TODO: Fill in kernel1
// TODO: Adapt check_result() and invoke
dim3 threads_in_block1(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z);
dim3 blocks_in_grid1(N/threads_in_block1.x, N/threads_in_block1.y, N/threads_in_block1.z);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
/************** CUDA **************/
cudaMemcpy(d_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_out, 0, SIZE * sizeof(float));
kernel1<<<blocks_in_grid1, threads_in_block1>>>(d_in, d_out);
cudaMemcpy(h_k1_out, d_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
/************** CUDA **************/
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_out, h_k1_out, N);
std::cout << "Kernel 1 time (ms): " << kernel_time << "\n";
status = cudaGetLastError();
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
// TODO: Fill in kernel2
// TODO: Adapt check_result() and invoke
dim3 threads_in_block2(BLOCK_SIZE_X, BLOCK_SIZE_Y, BLOCK_SIZE_Z);
dim3 blocks_in_grid2(N/threads_in_block2.x, N/threads_in_block2.y, N/threads_in_block2.z);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start, 0);
/************** CUDA **************/
cudaMemcpy(d_in, h_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemset(d_out, 0, SIZE * sizeof(float));
kernel2<<<blocks_in_grid2, threads_in_block2>>>(d_in, d_out);
cudaMemcpy(h_k2_out, d_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
/************** CUDA **************/
cudaEventRecord(end, 0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&kernel_time, start, end);
cudaEventDestroy(start);
cudaEventDestroy(end);
check_result(h_out, h_k2_out, N);
std::cout << "Kernel 2 time (ms): " << kernel_time << "\n";
status = cudaGetLastError();
if (status != cudaSuccess) {
cerr << cudaGetErrorString(status) << endl;
}
// TODO: Free memory
cudaFree(d_in);
cudaFree(d_out);
delete[] h_in;
delete[] h_out;
delete[] h_k1_out;
delete[] h_k2_out;
return EXIT_SUCCESS;
}
|
1,721
|
#include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *A, int *B, int *C, int ha, int wa, int wb) {
// Get the 1D Array index of the matrix
int id = threadIdx.x;
int sum;
for (int i = 0; i < ha; ++i) {
sum = 0;
for (int j = 0; j < wa; ++j){
sum += (A[i*wa + j] * B[j*wb + id]);
}
C[i*wb + id] = sum;
}
}
int main(){
int a[100], b[100], c[100], n1, m1, n2, m2;
printf("Enter m1: ");
scanf("%d",&m1);
printf("Enter n1: ");
scanf("%d",&n1);
printf("Enter Matrix 1:\n");
for(int i=0;i<n1*m1;i++)
scanf("%d",&a[i]);
printf("Enter m2: ");
scanf("%d",&m2);
if (m2 != n1){
printf("cannot be multiplied\n");
exit(0);
}
printf("Enter n2: ");
scanf("%d",&n2);
printf("Enter Matrix 2:\n");
for(int i=0;i<n2*m2;i++)
scanf("%d",&b[i]);
int *d_a,*d_b,*d_c;
cudaMalloc((void**)&d_a,sizeof(int)*n1*m1);
cudaMalloc((void**)&d_b,sizeof(int)*n2*m2);
cudaMalloc((void**)&d_c,sizeof(int)*m1*n2);
cudaMemcpy(d_a,&a,sizeof(int)*n1*m1,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,&b,sizeof(int)*n2*m2,cudaMemcpyHostToDevice);
add<<<1, n2>>>(d_a, d_b, d_c, m1, n1, n2);
cudaMemcpy(&c,d_c,sizeof(int)*n2*m1,cudaMemcpyDeviceToHost);
for(int i=0;i<m1*n2;i++){
if (i % n2 == 0)
printf("\n");
printf("%d ",c[i]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
}
|
1,722
|
#include "includes.h"
__global__ void arrayOf2DConditions ( const int dim, const int nwl, const float *bn, const float *xx, float *cc ) {
int i = threadIdx.x + blockDim.x * blockIdx.x;
int j = threadIdx.y + blockDim.y * blockIdx.y;
int t = i + j * dim;
if ( i < dim && j < nwl ) {
cc[t] = ( bn[0+i*2] < xx[t] ) * ( xx[t] < bn[1+i*2] );
}
}
|
1,723
|
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <iostream>
#include <string.h>
#include <algorithm>
#include <stdlib.h>
//#define N 4
#define BLOCK_SIZE 4
#define GRID_SIZE 2
using namespace std;
__device__ volatile int g_mutex;
void cuda_error_check(cudaError_t err , const char *msg )
{
if(err != cudaSuccess)
{
printf("The error is %s, %s \n", cudaGetErrorString(err), msg );
exit(1);
}
}
__device__ void __gpu_sync(int goalVal)
{
//thread ID in a block
int tid_in_block = threadIdx.x * blockDim.y + threadIdx.y;
// only thread 0 is used for synchronization
if (tid_in_block == 0)
{
atomicAdd((int *)&g_mutex, 1);
while(g_mutex != goalVal)
{
//Do nothing here
}
}
//__threadfence();
__syncthreads();
}
__global__ void matrix(int *d_a)
{
int row = (blockIdx.y * blockDim.y) + threadIdx.y ;
int col = (blockIdx.x * blockDim.x) + threadIdx.x ;
//int L1 = blockDim.x ;
//int L2 = blockDim.y ;
int offset = col + row * blockDim.x * gridDim.x ;
int blockid = blockIdx.y * gridDim.x + blockIdx.x ;
int off = col + row * gridDim.x - blockDim.x*blockid ;
off += (blockDim.x - gridDim.x ) * threadIdx.y;
int off_x = off - (threadIdx.y * blockDim.x) ;
int off_y = (off - threadIdx.x) / blockDim.x ;
//while(blockid!=g_mutex ){}
if(blockid==0)
d_a[offset] = g_mutex ;
__gpu_sync(4);
if(blockid!=0)
d_a[offset] = g_mutex ;
/*
if(blockid == g_mutex && off==0)
{ atomicAdd((int *)&g_mutex, 1);
}*/
}
int main(int argc , char **argv)
{
int a[(GRID_SIZE*GRID_SIZE)*BLOCK_SIZE*BLOCK_SIZE];
int i,j;
//int m,n;
for(j=0;j<(GRID_SIZE*GRID_SIZE)*BLOCK_SIZE*BLOCK_SIZE;j++)
{
a[j]=0;
}
int *d_a ;
cuda_error_check(cudaSetDevice(0) , "cudaSetDevice failed!" );
cuda_error_check(cudaMalloc((void **)&d_a , (GRID_SIZE*GRID_SIZE)*BLOCK_SIZE*BLOCK_SIZE* sizeof(int)),"cudaMalloc Failed!");
cuda_error_check(cudaMemcpy(d_a , a , (GRID_SIZE*GRID_SIZE)*BLOCK_SIZE*BLOCK_SIZE * sizeof(int) , cudaMemcpyHostToDevice),"cudaMemcpy H-D failed!");
dim3 dimBlock(BLOCK_SIZE , BLOCK_SIZE );
dim3 dimGrid(GRID_SIZE , GRID_SIZE);
matrix<<< dimGrid , dimBlock >>>(d_a);
cuda_error_check(cudaMemcpy(a , d_a , (GRID_SIZE*GRID_SIZE)*BLOCK_SIZE*BLOCK_SIZE * sizeof(int) , cudaMemcpyDeviceToHost),"cudaMemcpy D-H failed!");
for(j=0;j<(GRID_SIZE*GRID_SIZE)*BLOCK_SIZE*BLOCK_SIZE;j++)
{
if(j%(BLOCK_SIZE*GRID_SIZE) == 0)
cout<<endl;
if(j%(GRID_SIZE*BLOCK_SIZE*BLOCK_SIZE)==0)
cout<<endl;
if(j%BLOCK_SIZE ==0)
cout<<" ";
cout.width( 3 );
cout<<a[j]<<" ";
}
printf("\n\n");
/*for(m=0;m<2;m++)
{
for(n=0;n<2;n++)
{*/
/*
for(i=0;i<2*BLOCK_SIZE;i++)
{
for(j=0;j<2*BLOCK_SIZE;j++)
{ // [m*2+n]
//printf("%d\t",a[i*BLOCK_SIZE + j]);
cout.width( 3 );
//cout<<a[(m*2+n)*16 + i*BLOCK_SIZE + j]<<" ";
cout<<a[i*(2*BLOCK_SIZE) + j]<<" ";
}
printf("\n");
}
*/
/* printf("\n");
}
}
*/
return 0;
}
/*
if(blockIdx.x == 0 && blockIdx.y == 1)
d_a[offset] = offset;
else
d_a[offset] = -1;
if(row==0)
d_a[offset] = -1 * offset;
if(col==0)
d_a[offset] = -1 * offset/(BLOCK_SIZE*BLOCK_SIZE);
*/
|
1,724
|
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#include <sys/time.h>
#define SIZE 102400
#define MOD 102399
#define STEP 128
/* ARRAY A INITIALIZER */
void init_a(int * a)
{
int i;
for(i=0; i<SIZE; i++)
{
a[i] = 1;
}
}
/* ARRAY B INITIALIZER */
void init_b(int * b)
{
int i, j;
j=0;
for(i=0; i<SIZE-1; i++)
{
b[j] = i;
j = (j+STEP)%MOD;
}
b[SIZE-1] = SIZE-1;
}
/* CHECKING A VALUES */
int check_a(int * a)
{
int i;
int correct = 1;
for(i=0; i<SIZE; i++)
{
if(a[i] != (i+1))
{
correct = 0;
}
}
return correct;
}
/* CUDA FUNCTION */
__global__ void mykernel(int * a, int * b, int N)
{
/*
int i =blockIdx.x * blockDim.x + threadIdx.x;
int total = (blockDim.x * gridDim.x);
for(int j = i; j < N; j += total){
a[b[j]] += b[j];
}*/
//Method prof :
int index = threadIdx.x;
int tmp;
for(;index <N; index+=blockDim.x){
tmp = b[index];
a[tmp] = a[tmp]+tmp;
}
}
int main(int argc, char * argv[])
{
int sz_in_bytes = SIZE*sizeof(int);
int * h_a = (int *)malloc(sz_in_bytes);
int * h_b = (int *)malloc(sz_in_bytes);
int *d_a, *d_b;
init_a(h_a);
init_b(h_b);
cudaMalloc((void**)&d_a, sz_in_bytes);
cudaMalloc((void**)&d_b, sz_in_bytes);
cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice);
dim3 nBlocks;
dim3 nThperBlock;
nBlocks.x = 16;
nThperBlock.x = 1024;
struct timeval tv_start, tv_stop;
gettimeofday(&tv_start, NULL);
mykernel<<< nBlocks , nThperBlock >>>(d_a, d_b, SIZE);
cudaDeviceSynchronize();
gettimeofday(&tv_stop, NULL);
cudaMemcpy(h_a, d_a, sz_in_bytes, cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
int correct = check_a(h_a);;
if(0 == correct)
{
printf("\n\n ******************** \n ***/!\\ ERROR /!\\ *** \n ******************** \n\n");
}
else
{
printf("\n\n ******************** \n ***** SUCCESS! ***** \n ******************** \n\n");
}
free(h_a);
free(h_b);
int nsec = tv_stop.tv_sec - tv_start.tv_sec;
int nusec = tv_stop.tv_usec - tv_start.tv_usec;
if(nusec <0){
nusec = nusec + 1000000;
nsec = nsec -1;
}
printf("time = %d s,%d us", nsec, nusec);
return 1;
}
|
1,725
|
#include<stdio.h>
#include<cuda.h>
#define N 1024
#define BLOCKSIZE 64
__device__ volatile unsigned k2counter; // try removing volatile: the code may hang.
__global__ void K2init() {
k2counter = 0;
}
__global__ void K2() {
unsigned id = blockDim.x * blockIdx.x + threadIdx.x;
printf("This is before: %d\n", id);
// global barrier start
atomicInc((unsigned *)&k2counter, N + 1);
while (k2counter != N)
;
// global barrier end
printf("This is after the global barrier: %d\n", id);
}
int main() {
K2init<<<1, 1>>>();
K2<<<N / BLOCKSIZE, BLOCKSIZE>>>();
cudaDeviceSynchronize();
return 0;
}
|
1,726
|
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
#include<iostream>
#include<limits.h>
#include<algorithm>
#include<sys/time.h>
using namespace std;
#define INF INT_MAX-1
#define NS 1024
int m;
int rowSize;
int tilesize[3] = {2, 2, INT_MAX};
void print_matrix(float *d)
{
int i,j;
for(i=0;i<32;i++)
{
for(j=0;j<32;j++)
printf("%0.1f\t", d[i*rowSize+j]);
puts("");
}
}
__global__ void Dloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + xColStart;
if (j >= rowSize)
return;
//int i = xRowStart + rowSize*blockIdx.y;
int i = blockIdx.y * blockDim.y + threadIdx.y + xRowStart;
if (i >= rowSize)
return;
if(currSize <= NS) {
__shared__ float local_a[NS];
//local_a[i*rowSize + j] = d_a[i*rowSize + j];
for(int k = vRowStart; k < (vRowStart + currSize); k++) {
local_a[i*rowSize + j] = d_a[i*rowSize + j];
local_a[i*rowSize + k] = d_a[i*rowSize + k];
local_a[k*rowSize + j] = d_a[k*rowSize + j];
}
__syncthreads();
for(int k = vRowStart; k < (vRowStart + currSize); k++) {
if(i != j && j != k && i != k)
local_a[i*rowSize + j] = fmin(local_a[i*rowSize + j], local_a[i*rowSize + k] + local_a[k*rowSize + j]);
}
__syncthreads();
//for(int k = vRowStart; k < (vRowStart + currSize); k++) {
d_a[i*rowSize + j] = local_a[i*rowSize + j];
//}
} else {
__shared__ int intermed;
for(int k = vRowStart; k < (vRowStart + currSize); k++) {
if (threadIdx.x == 0) {
intermed = d_a[i*rowSize + k];
}
__syncthreads();
if(i != j && j != k && i != k)
d_a[i*rowSize + j] = fmin(d_a[i*rowSize + j], intermed + d_a[k*rowSize+j]);
}
}
}
void FW_D_loop(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize <= 1024)
threadsPerBlock = currSize;
else
threadsPerBlock = 1024;
dim3 blocksPerGrid( currSize /threadsPerBlock ,currSize);
Dloop_FW<<<blocksPerGrid,threadsPerBlock>>>(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//cudaThreadSynchronize();
}
void DFW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_D_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize/r;
for(int k=1; k<=r; k++) {
for(int i=1; i<=r; i++) {
for(int j=1; j<=r; j++) {
DFW(d_a, xRowStart + (i-1)*newsize, xColStart + (j-1)*newsize, uRowStart + (i-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (j-1)*newsize, newsize, d+1, rowSize);
}
}
cudaThreadSynchronize();
}
}
}
__global__ void Cloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + xRowStart;
if(i >= rowSize)
return;
if(currSize <= NS) {
__shared__ float local_a[NS];
for(int k = vRowStart; k < (vRowStart + currSize); k++) {
for(int j = xColStart; j < (xColStart + currSize); j++) {
local_a[i*rowSize + j] = d_a[i*rowSize + j];
local_a[i*rowSize + k] = d_a[i*rowSize + k];
local_a[k*rowSize + j] = d_a[k*rowSize + j];
}
}
__syncthreads();
for(int k = vRowStart; k < (vRowStart + currSize); k++)
{
for(int j = xColStart; j < (xColStart + currSize); j++)
{
if(i != j && j != k && i != k)
local_a[i*rowSize + j] = fmin(local_a[i*rowSize + j], local_a[i*rowSize + k] + local_a[k*rowSize + j]);
}
}
__syncthreads();
//for(int k = vRowStart; k < (vRowStart + currSize); k++) {
for(int j = xColStart; j < (xColStart + currSize); j++) {
d_a[i*rowSize + j] = local_a[i*rowSize + j];
}
//}
} else
{
__shared__ int intermed;
for(int k = vRowStart; k < (vRowStart + currSize); k++)
{
for(int j = xColStart; j < (xColStart + currSize); j++)
{
if (threadIdx.x == 0) {
intermed = d_a[k*rowSize+j];
}
__syncthreads();
if(i != j && j != k && i != k)
d_a[i*rowSize + j ] = fmin( d_a[i*rowSize + j ], d_a[i*rowSize + k] + intermed);
}
}
}
}
void FW_C_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize <= 1024)
threadsPerBlock = currSize;
else
threadsPerBlock = 1024;
int noOfBlocks = currSize / threadsPerBlock;
Cloop_FW<<<noOfBlocks,threadsPerBlock>>>(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//cudaThreadSynchronize();
}
void CFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_C_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize/r;
for(int k=1; k<=r; k++) {
for(int i=1; i<=r; i++) {
CFW(d_a, xRowStart + (i-1)*newsize, xColStart + (k-1)*newsize, uRowStart + (i-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (k-1)*newsize, newsize, d+1, rowSize);
}
cudaThreadSynchronize();
for(int i=1; i<=r; i++) {
for(int j=1; j<=r; j++) {
if(j != k)
DFW(d_a, xRowStart + (i-1)*newsize, xColStart + (j-1)*newsize, uRowStart + (i-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (j-1)*newsize, newsize, d+1, rowSize);
}
}
cudaThreadSynchronize();
}
}
}
__global__ void Bloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int j = blockIdx.x * blockDim.x + threadIdx.x + xColStart;
if(j >= rowSize)
return;
if(currSize <= NS) {
__shared__ float local_a[NS];
for(int k = vRowStart; k < (vRowStart + currSize); k++) {
for(int i = xRowStart; i < (xRowStart + currSize); i++) {
local_a[i*rowSize + j] = d_a[i*rowSize + j];
local_a[i*rowSize + k] = d_a[i*rowSize + k];
local_a[k*rowSize + j] = d_a[k*rowSize + j];
}
}
__syncthreads();
for(int k = vRowStart; k < (vRowStart + currSize); k++)
{
for(int i = xRowStart; i < (xRowStart + currSize); i++)
{
if(i != j && j != k && i != k)
local_a[i*rowSize + j] = fmin(local_a[i*rowSize + j], local_a[i*rowSize + k] + local_a[k*rowSize + j]);
}
}
__syncthreads();
//for(int k = vRowStart; k < (vRowStart + currSize); k++) {
for(int i = xRowStart; i < (xRowStart + currSize); i++) {
d_a[i*rowSize + j] = local_a[i*rowSize + j];
}
//}
} else
{
__shared__ int intermed;
for(int k = vRowStart; k < (vRowStart + currSize); k++)
{
for(int i = xRowStart; i < (xRowStart + currSize); i++)
{
if (threadIdx.x == 0) {
intermed = d_a[i*rowSize+k];
}
__syncthreads();
if(i != j && j != k && i != k)
d_a[i*rowSize + j ] = fmin(intermed + d_a[k*rowSize + j], d_a[i*rowSize+j]);
}
}
}
}
void FW_B_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
int threadsPerBlock;
if (currSize < 1024)
{
threadsPerBlock = currSize;
}
else
{
threadsPerBlock = 1024;
}
int noOfBlocks = currSize / threadsPerBlock;
Bloop_FW<<<noOfBlocks,threadsPerBlock>>>(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
//cudaThreadSynchronize();
}
void BFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_B_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize/r;
for(int k=1; k<=r; k++) {
for(int j=1; j<=r; j++) {
BFW(d_a, xRowStart + (k-1)*newsize, xColStart + (j-1)*newsize, uRowStart + (k-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (j-1)*newsize, newsize, d+1, rowSize);
}
cudaThreadSynchronize();
for(int i=1; i<=r; i++) {
for(int j=1; j<=r; j++) {
if(i != k)
DFW(d_a, xRowStart + (i-1)*newsize, xColStart + (j-1)*newsize, uRowStart + (i-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (j-1)*newsize, newsize, d+1, rowSize);
}
}
cudaThreadSynchronize();
}
}
}
__global__ void Aloop_FW(float *d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
/*int col = blockIdx.x * blockDim.x + threadIdx.x;
if(col >= rowSize)
return;
*/
if(currSize <= NS) {
__shared__ float local_a[NS];
for(int k = vRowStart; k < (vRowStart + currSize); k++) {
for(int i = xRowStart; i < (xRowStart + currSize); i++) {
for(int j = xColStart; j < (xColStart + currSize); j++) {
local_a[i*rowSize + j] = d_a[i*rowSize + j];
local_a[i*rowSize + k] = d_a[i*rowSize + k];
local_a[k*rowSize + j] = d_a[k*rowSize + j];
}
}
}
__syncthreads();
for(int k = vRowStart; k < (vRowStart + currSize); k++)
{
for(int i = xRowStart; i < (xRowStart + currSize); i++)
{
for(int j = xColStart; j < (xColStart + currSize); j++)
{
if(i != j && j != k && i != k)
local_a[i*rowSize + j] = fmin(local_a[i*rowSize + j], local_a[i*rowSize + k] + local_a[k*rowSize + j]);
}
}
}
__syncthreads();
//for(int k = vRowStart; k < (vRowStart + currSize); k++) {
for(int i = xRowStart; i < (xRowStart + currSize); i++) {
for(int j = xColStart; j < (xColStart + currSize); j++) {
d_a[i*rowSize + j] = local_a[i*rowSize + j];
}
}
//}
} else
{
for(int k = vRowStart; k < (vRowStart + currSize); k++)
{
for(int i = xRowStart; i < (xRowStart + currSize); i++)
{
for(int j = xColStart; j < (xColStart + currSize); j++)
{
if(i != j && j != k && i != k)
d_a[i*rowSize+j] = fmin( d_a[i*rowSize+k] + d_a[k*rowSize+j] ,d_a[i*rowSize+j]);
}
}
}
}
}
void FW_A_loop(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int rowSize)
{
Aloop_FW<<<1,1>>>(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
}
void AFW(float* d_a, int xRowStart, int xColStart, int uRowStart, int uColStart, int vRowStart, int vColStart, int currSize, int d, int rowSize)
{
int r = tilesize[d];
if (r > currSize)
FW_A_loop(d_a, xRowStart, xColStart, uRowStart, uColStart, vRowStart, vColStart, currSize, rowSize);
else
{
int newsize = currSize/r;
for(int k=1; k<=r; k++) {
AFW(d_a, xRowStart + (k-1)*newsize, xColStart + (k-1)*newsize, uRowStart + (k-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (k-1)*newsize, newsize, d+1, rowSize);
for(int j=1; j<=r; j++) {
if(j != k)
BFW(d_a, xRowStart + (k-1)*newsize, xColStart + (j-1)*newsize, uRowStart + (k-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (j-1)*newsize, newsize, d+1, rowSize);
}
for(int i=1; i<=r; i++) {
if(i != k)
CFW(d_a, xRowStart + (i-1)*newsize, xColStart + (k-1)*newsize, uRowStart + (i-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (k-1)*newsize, newsize, d+1, rowSize);
}
cudaThreadSynchronize();
for(int i=1; i<=r; i++) {
for(int j=1; j<=r; j++) {
if(i != k && j != k)
DFW(d_a, xRowStart + (i-1)*newsize, xColStart + (j-1)*newsize, uRowStart + (i-1)*newsize, uColStart + (k-1)*newsize, vRowStart + (k-1)*newsize, vColStart + (j-1)*newsize, newsize, d+1, rowSize);
}
}
cudaThreadSynchronize();
}
}
}
int main(int argc, char *argv[])
{
float *d_a;
float *a;
size_t pitch;
rowSize = atoi(argv[1]);
int colSize = rowSize;
int i,j;
cudaError_t err = cudaSuccess;
size_t totalSize = rowSize*colSize*sizeof(float);
a = (float *) malloc(totalSize);
if (!a)
{
printf("Unable to allocate memory for host array\n");
return 1;
}
err = cudaMallocPitch(&d_a, &pitch, rowSize * sizeof(float), colSize);
if(!d_a)
{
printf("memory failed for cudaMalloc");
return 1;
}
if(err !=0){
printf("%s-%d",cudaGetErrorString(err),3);
return 1;
}
for(i = 0; i < rowSize;i++)
for (j=0;j<colSize;j++)
{
if (i == j){
a[i*rowSize+j] = 0;
}
else {
a[i*rowSize+j] = (i+j)%5? (i+j) : (i+j)%7;
}
}
err = cudaMemcpy(d_a, a, totalSize, cudaMemcpyHostToDevice);
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
AFW(d_a,0,0,0,0,0,0,rowSize,0, rowSize);
gettimeofday(&tv2, NULL);
printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec));
err = cudaMemcpy(a, d_a, totalSize, cudaMemcpyDeviceToHost);
print_matrix(a);
return 0;
}
|
1,727
|
// example of using CUDA streams
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <chrono>
using namespace std::chrono;
__global__
void initWith(float num, float *a, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
a[i] = num;
}
}
void initWithNoStream(float num, float *a, int N)
{
for(int i = 0; i < N; ++i)
{
a[i] = num;
}
}
__global__
void addVectorsInto(float *result, float *a, float *b, int N)
{
int index = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < N; i += stride)
{
result[i] = a[i] + b[i];
}
}
void checkElementsAre(float target, float *vector, int N)
{
for(int i = 0; i < N; i++)
{
if(vector[i] != target)
{
printf("FAIL: vector[%d] - %0.0f does not equal %0.0f\n", i, vector[i], target);
exit(1);
}
}
printf("Success! All values calculated correctly.\n");
}
int main(int argc, char** argv)
{
int deviceId;
int numberOfSMs;
cudaGetDevice(&deviceId);
cudaDeviceGetAttribute(&numberOfSMs, cudaDevAttrMultiProcessorCount, deviceId);
char* pEnd;
const int N = 2<<strtol(argv[1], &pEnd, 10);
//const int N = 2<<24;
size_t size = N * sizeof(float);
float *a;
float *b;
float *c;
float *d;
float *e;
float *f;
float *g;
cudaMallocManaged(&a, size);
cudaMallocManaged(&b, size);
cudaMallocManaged(&c, size);
cudaMallocManaged(&d, size);
cudaMallocManaged(&e, size);
cudaMallocManaged(&f, size);
cudaMallocManaged(&g, size);
cudaMemPrefetchAsync(a, size, deviceId);
cudaMemPrefetchAsync(b, size, deviceId);
cudaMemPrefetchAsync(c, size, deviceId);
cudaMemPrefetchAsync(d, size, deviceId);
cudaMemPrefetchAsync(e, size, deviceId);
cudaMemPrefetchAsync(f, size, deviceId);
cudaMemPrefetchAsync(g, size, deviceId);
size_t threadsPerBlock;
size_t numberOfBlocks;
threadsPerBlock = 256;
numberOfBlocks = 32 * numberOfSMs;
cudaError_t addVectorsErr;
cudaError_t asyncErr;
/*
* Create 3 streams to run initialize the 3 data vectors in parallel.
*/
//auto start = high_resolution_clock::now();
cudaStream_t stream1, stream2, stream3, stream4, stream5, stream6, stream7;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
cudaStreamCreate(&stream3);
cudaStreamCreate(&stream4);
cudaStreamCreate(&stream5);
cudaStreamCreate(&stream6);
cudaStreamCreate(&stream7);
/*
* Give each `initWith` launch its own non-standard stream.
*/
int which = strtol(argv[2], &pEnd, 10);
if (which == 0) {
initWithNoStream(1.25, a, N);
initWithNoStream(1.25, b, N);
initWithNoStream(0, c, N);
initWithNoStream(0, d, N);
initWithNoStream(1.25, e, N);
initWithNoStream(0, f, N);
initWithNoStream(1.25, g, N);
}
if (which == 1) {
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream1>>>(3.1, a, N);
initWithNoStream(4.25, b, N);
initWithNoStream(0, c, N);
}
if (which == 2) {
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream1>>>(3.1, a, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream2>>>(4.25, b, N);
initWithNoStream(0, c, N);
}
if (which == 3) {
initWithNoStream(3.1, a, N);
initWithNoStream(4.25, b, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream3>>>(0, c, N);
}
if (which == 4) {
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream1>>>(1.25, a, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream2>>>(1.25, b, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream3>>>(0, c, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream4>>>(0, d, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream5>>>(1.25, e, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream6>>>(0, f, N);
initWith<<<numberOfBlocks, threadsPerBlock, 0, stream7>>>(1.25, g, N);
}
auto start = high_resolution_clock::now();
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(c, a, b, N);
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(d, c, e, N);
addVectorsInto<<<numberOfBlocks, threadsPerBlock>>>(f, d, g, N);
auto stop = high_resolution_clock::now();
auto duration = duration_cast<microseconds>(stop - start);
std::cout<< "Time in seconds: " << duration.count()/1E6 << std::endl;
addVectorsErr = cudaGetLastError();
if(addVectorsErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(addVectorsErr));
asyncErr = cudaDeviceSynchronize();
if(asyncErr != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(asyncErr));
cudaMemPrefetchAsync(f, size, cudaCpuDeviceId);
checkElementsAre(5, f, N);
/*
* Destroy streams when they are no longer needed.
*/
cudaStreamDestroy(stream1);
cudaStreamDestroy(stream2);
cudaStreamDestroy(stream3);
cudaStreamDestroy(stream4);
cudaStreamDestroy(stream5);
cudaStreamDestroy(stream6);
cudaStreamDestroy(stream7);
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaFree(d);
cudaFree(e);
cudaFree(f);
cudaFree(g);
}
|
1,728
|
#include<stdio.h>
#include<time.h>
#include<time.h>
#include<stdlib.h>
#include<math.h>
__global__ void func1(int *c,int *a,int *b,int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n)
{
a[i] = 2 * i;
b[i] = 3 * i;
}
}
__global__ void func2(int *c,int *a,int *b,int n)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < n)
{
c[i] = a[i] + b[i];
}
}
int main()
{
int *d_c;
int *d_a;
int *d_b;
int n=5;
int a[n],b[n],c[n];
int i ;
int blocks = 2048;
int threads= 2048;
cudaMalloc((void **)&d_c, n*sizeof(int));
cudaMemcpy(d_c, &c, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_a, n*sizeof(int));
cudaMemcpy(d_a, &a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, n*sizeof(int));
cudaMemcpy(d_b, &b, n*sizeof(int), cudaMemcpyHostToDevice);
func1<<<blocks, threads>>>(d_c,d_a,d_b,n);
cudaDeviceSynchronize();
cudaMemcpy(&c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaMemcpy(&a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaMemcpy(&b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
cudaMalloc((void **)&d_c, n*sizeof(int));
cudaMemcpy(d_c, &c, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_a, n*sizeof(int));
cudaMemcpy(d_a, &a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, n*sizeof(int));
cudaMemcpy(d_b, &b, n*sizeof(int), cudaMemcpyHostToDevice);
func2<<<blocks, threads>>>(d_c,d_a,d_b,n);
cudaDeviceSynchronize();
cudaMemcpy(&c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaMemcpy(&a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaMemcpy(&b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
for (i=0;i<n;i++)
{
printf("c =%d\n",c[i]);
}
return 0;
}
|
1,729
|
#include "includes.h"
__global__ void GenerateRandoms(int size, int iterations, unsigned int *randoms, unsigned int *seeds) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int z = seeds[idx];
int offset = idx;
int step = 32768;
for (int i = 0; i < iterations; i++)
{
if (offset < size)
{
unsigned int b = (((z << 13) ^ z) >> 19);
z = (((z & UINT_MAX) << 12) ^ b);
randoms[offset] = z;
offset += step;
}
}
}
|
1,730
|
//fonte: https://github.com/carloschilazo/CUDA_GA/blob/master/program.cu
#include <iostream>
#include <cstdlib>
#include <stdio.h>
#include <cuda.h>
#include <time.h>
using namespace std;
/* Cannot use built-in functions, need to rewrite pow function so it can run on the device, kinda reinventing the wheel over here
Im not sure if CUDA has something in its SDK, however better build it myself to avoid any overhead
0^0 even though is not defined, im treating it as 1
*/
__device__ void calculate_exponent(int base,int exponent,long &result){
result = 1L;
if(exponent==0){
return;
}
for(int counter=1;counter<=exponent;counter++){
result *= (long)base;
}
}
__global__ void evaluate(int *input, int totalSizeOfArray, int number_genes, int individualsPerThread, int number_blocks, int threads_per_block, long *scores){
/*global position in population array index calculation */
int startingPosition = (blockIdx.y * threads_per_block * number_genes * individualsPerThread) + (threadIdx.y * number_genes * individualsPerThread);
if(startingPosition>=totalSizeOfArray){
return; //*return if thread is useless, the final block may have some threads that will not compute any data therefore we return early */
}
/*global position in scores array index calculation */
int startingPosition_scores = (blockIdx.y * threads_per_block * individualsPerThread) + (threadIdx.y * individualsPerThread);
long acumulated = 0L;
long temp = 0L;
for(int counter_individuals=0;counter_individuals<individualsPerThread;counter_individuals++){
if(startingPosition + (counter_individuals*number_genes) >= totalSizeOfArray){
return;
}
for(int counter_gene=0;counter_gene<number_genes;counter_gene++){
int base = startingPosition + (counter_individuals*number_genes) + counter_gene;
calculate_exponent(input[base],(number_genes-1)-counter_gene,temp);
acumulated += temp;
}
scores[startingPosition_scores+counter_individuals] = acumulated;
acumulated=0L;
}
}
__device__ void determine_fitness_solution(unsigned long desired_number, unsigned long actual_result, unsigned long &deviation){
if(desired_number>actual_result){
deviation = desired_number - actual_result;
}
if(actual_result>desired_number){
deviation = actual_result - desired_number;
}
if(actual_result==desired_number){
deviation = 0;
}
}
__global__ void scan_for_solution(long *scores_array, int number_individuals, int individuals_per_thread, int threads_per_block, int *solution_found_flag, unsigned long desired_number, int acceptable_error){
int starting_position_in_scores = (blockIdx.y * threads_per_block * individuals_per_thread) + (threadIdx.y * individuals_per_thread);
if(starting_position_in_scores>=number_individuals){
return; /* Return if useless thread */
}
unsigned long result;
unsigned long deviation;
for(int counter_individuals=0;counter_individuals<individuals_per_thread;counter_individuals++){
if(starting_position_in_scores+counter_individuals>=number_individuals){
return;
}
result = scores_array[starting_position_in_scores+counter_individuals];
determine_fitness_solution(desired_number,result,deviation);
if(deviation==0 || deviation<acceptable_error){
*solution_found_flag = starting_position_in_scores + counter_individuals;
}
}
}
int main(){
/* define settings */
const unsigned int number_genes = 10;
const unsigned int number_individuals = 10000000;
const unsigned int threads_per_block_evaluation = 500; //DO NOT FORGET: BLOCK IS 1 thread width, and threads_per_block height, MAX 512
const unsigned int individuals_per_thread_evaluation = 50;
/* desired algorithm result and acceptable error */
const unsigned long desired_number = 123456;
const unsigned int acceptable_error_window = 1000; /* So result can be +- acceptable_error_window */
/* allocate and randomly initialize memory for population */
int *population_array_host = new int[number_genes*number_individuals];
int *population_array_device;
srand ( time(NULL) );
for(int contador=0;contador<number_genes*number_individuals;contador++){
population_array_host[contador] = ( rand() % 10 );
}
size_t memory_for_population = number_genes*number_individuals*sizeof(int);
cudaMalloc((void **) &population_array_device, memory_for_population);
/* allocate and zeroise scores array, avoid any future issues with non initialized arrays */
long *scores_array_host = new long[number_individuals];
long *scores_array_device;
for(int contador=0;contador<number_individuals;contador++){
scores_array_host[contador] = 0L;
}
size_t memory_for_scores = number_individuals*sizeof(long);
cudaMalloc((void **) &scores_array_device, memory_for_scores);
/* allocate and initialize memory for acceptable result flag, flag indicates the element of the population which has the result */
int *solution_found_host = new int;
*solution_found_host = -1;
int *solution_found_device;
size_t memory_solution_found = sizeof(int);
cudaMalloc((void **) &solution_found_device, memory_solution_found);
/* we move data from host to device*/
cudaMemcpy(population_array_device, population_array_host, memory_for_population, cudaMemcpyHostToDevice);
cudaMemcpy(scores_array_device, scores_array_host, memory_for_scores, cudaMemcpyHostToDevice);
cudaMemcpy(solution_found_device, solution_found_host, memory_solution_found, cudaMemcpyHostToDevice);
/* we calculate dimensions for grid and blocks and create them: for evaluation */
unsigned int blocks_required_evaluation = number_individuals/(threads_per_block_evaluation *individuals_per_thread_evaluation) +
(number_individuals%(threads_per_block_evaluation *individuals_per_thread_evaluation) == 0 ? 0:1);
dim3 grid_evaluation(1,blocks_required_evaluation); /* in terms of blocks */
dim3 block_evaluation(1,threads_per_block_evaluation); /* in terms of threads*/
/* define how many elements per thread, threads and blocks should be launched to scan the score of each individual, we create dim elements accordingly*/
const unsigned int individuals_per_thread_scan_scores = 50;
const unsigned int threads_per_block_scan_scores = 511; // remember block is 1 thread width and threads_per_block_scan_scores height
const unsigned int blocks_required_scan_scores = (number_individuals/ (individuals_per_thread_scan_scores * threads_per_block_scan_scores)) +
(number_individuals%(threads_per_block_scan_scores * individuals_per_thread_scan_scores) == 0 ? 0:1);
dim3 grid_scan_scores(1,blocks_required_scan_scores); // in terms of blocks
dim3 block_scan_scores(1,threads_per_block_scan_scores); // in terms of threads
/* output parameters */
cout << "-Algorithm parameters-" << endl;
cout << "Individuals: " << number_individuals << endl;
cout << "Genes per individual: " << number_genes << endl;
cout << "Individuals computed per thread: " << individuals_per_thread_evaluation << endl;
cout << "-Computing distribution for evaluation-" << endl;
cout << "Blocks required: " << blocks_required_evaluation << endl;
cout << "Threads per block: " << threads_per_block_evaluation << endl;
cout << "Total number of threads: " << blocks_required_evaluation*threads_per_block_evaluation << endl;
cout << "-Computing distribution for scan_results-" << endl;
cout << "Individuals (scores) evaluated per thread: " << individuals_per_thread_scan_scores << endl;
cout << "Threads per block: " << threads_per_block_scan_scores << endl;
cout << "Blocks required: " << blocks_required_scan_scores << endl;
cout << endl << "Algorithm Start" << endl;
/*we launch evaluation kernel: evaluate(int *input, int totalSizeOfArray, int number_genes, int individualsPerThread, int number_blocks, int threads_per_block, long *scores)*/
evaluate <<< grid_evaluation, block_evaluation >>> (population_array_device, number_genes*number_individuals, number_genes, individuals_per_thread_evaluation, blocks_required_evaluation, threads_per_block_evaluation, scores_array_device);
/* long *scores_array, int number_individuals, int individuals_per_thread, int threads_per_block, int *solution_found_flag, unsigned long desired_number, int acceptable_error */
scan_for_solution <<< grid_scan_scores, block_scan_scores >>> (scores_array_device, number_individuals, individuals_per_thread_scan_scores, threads_per_block_scan_scores, solution_found_device, desired_number, acceptable_error_window);
//cudaMemcpy(scores_array_host, scores_array_device, memory_for_scores, cudaMemcpyDeviceToHost);
//cudaMemcpy(population_array_host, population_array_device, memory_for_population, cudaMemcpyDeviceToHost);
cudaMemcpy(solution_found_host, solution_found_device, memory_solution_found, cudaMemcpyDeviceToHost);
cout << *solution_found_host << endl;
//cout << scores_array_host[*solution_found_host] << endl;
return 0;
}
|
1,731
|
#include "includes.h"
__global__ void empty() {}
|
1,732
|
#include <cuda_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/sort.h>
extern void sort_uint_internal(void* dev_ptr, unsigned numElements, void* output_ptr)
{
if(output_ptr) {
cudaMemcpy(output_ptr, dev_ptr, numElements * sizeof(unsigned), cudaMemcpyDeviceToDevice);
} else {
output_ptr = dev_ptr;
}
thrust::device_ptr<unsigned> dp((unsigned*)output_ptr);
thrust::stable_sort(dp, dp + numElements);
}
extern void sort_double_internal(void* dev_ptr, unsigned numElements, void* output_ptr)
{
if(output_ptr) {
cudaMemcpy(output_ptr, dev_ptr, numElements * sizeof(double), cudaMemcpyDeviceToDevice);
} else {
output_ptr = dev_ptr;
}
thrust::device_ptr<double> dp((double*)output_ptr);
thrust::stable_sort(dp, dp + numElements);
}
|
1,733
|
#include "includes.h"
__global__ void kSigmoid(const int nThreads, float const *input, float *output){
/* Computes the value of the sigmoid function f(x) = 1/(1 + e^-x).
Inputs:
input: array
output: array, the results of the computation are to be stored here
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
output[i] = 1.0 / (1.0 + std::exp(-input[i]));
}
}
|
1,734
|
// Elapsed Real Time for input-4.txt: 1.381 seconds
#include <stdio.h>
#include <stdbool.h>
#include <cuda_runtime.h>
// Size of the square we're looking for.
#define SQUARE_WIDTH 6
#define SQUARE_HEIGHT 6
// Maximum width of a row. Makes it easier to allocate the whole
// grid contiguously.
#define MAX_WIDTH 16384
// Type used for a row of the grid. Makes it easier to declare the
// grid as a pointer.
typedef char Row[ MAX_WIDTH ];
// Size of the grid of characters.
int rows, cols;
// Grid of letters.
Row *grid;
// Kernel, run by each thread to count complete squares in parallel.
__global__ void countSquares( int rows, int cols, bool report, int *gpuResults, Row *gridCpy ) {
// Unique index for this worker.
int r0 = blockDim.x * blockIdx.x + threadIdx.x;
int c = 0;
// Make sure I actually have something to work on.
if ( r0 + SQUARE_HEIGHT - 1 < rows ) {
for(int col = 0; col < cols; col++) {
if(col + 6 <= cols) {
bool check = false;//check if that grid actually makes a square or not.
char letters[26] = {'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'}; // array of alphabets.
for( int i = r0; i < r0 + 6; i++) {
for( int j = col; j < col + 6; j++) {
for( int k = 0; k < 26; k++) {
if(gridCpy[i][j] == letters[k]) { //if cell in grid has any aplhabet, change that alphabet bucket to 0.
letters[k] = '0';//if the letter is not yet, remove it.
}
}
}
}
for(int k = 0; k < 26; k++) {
if(letters[k] != '0') {
check = false;//if any letter in the array is not 0, the grid is not a square.
break;
} else {
check = true;//else it is true and continue to make sure all if true.
}
}
if(check == true) {
c += 1;//if it is a perfect square grid, increment the count for that index
if(report) {
printf("%d %d\n", r0, col);//if report is true, print the index
}
}
}
}
}
gpuResults[r0] = c;//put number of squares in the array
}
// Read the grid of characters.
void readGrid() {
// Read grid dimensions.
scanf( "%d%d", &rows, &cols );
if ( cols > MAX_WIDTH ) {
fprintf( stderr, "Input grid is too wide.\n" );
exit( EXIT_FAILURE );
}
// Make space to store the grid as a big, contiguous array.
grid = (Row *) malloc( rows * sizeof( Row ) );
// Read each row of the grid as a string, then copy everything
// but the null terminator into the grid array.
int rowCount = 0;
char buffer[ MAX_WIDTH + 1 ];
while ( rowCount < rows ) {
scanf( "%s", buffer );
memcpy( grid[ rowCount++ ], buffer, cols );
}
}
// General function to report a failure and exit.
static void fail( char const *message ) {
fprintf( stderr, "%s\n", message );
exit( 1 );
}
// Print out a usage message, then exit.
static void usage() {
printf( "usage: square [report]\n" );
exit( 1 );
}
//main
int main( int argc, char *argv[] ) {
// If there's an argument, it better be "report"
bool report = false;
if ( argc == 2 ) {
if ( strcmp( argv[ 1 ], "report" ) != 0 )
usage();
report = true;
}
readGrid();
// Need to add code to allocate memory on the device and copy the grid
// over.
Row *gridCpy = NULL;
cudaMalloc( (void **)&gridCpy, rows * sizeof( Row ) );
cudaMemcpy( gridCpy, grid, rows * sizeof( Row ), cudaMemcpyHostToDevice );
//allocate memory for results array
int *gpuResults = NULL;
cudaMalloc((void **)&gpuResults, rows * sizeof(int));
// Block and grid dimensions.
int threadsPerBlock = 250;
// Round up.
int blocksPerGrid = ( rows + threadsPerBlock - 1 ) / threadsPerBlock;
// Run our kernel on these block/grid dimensions
countSquares<<<blocksPerGrid, threadsPerBlock>>>( rows, cols, report, gpuResults, gridCpy );
if ( cudaGetLastError() != cudaSuccess )
fail( "Failure in CUDA kernel execution." );
int *results = (int *) malloc( rows * sizeof( int ) );
// Need to add code to copy the results list back to the host and
// add them up.
cudaMemcpy( results, gpuResults, rows * sizeof(int), cudaMemcpyDeviceToHost);
int total = 0;
for(int i = 0; i < rows; i++) {
total += results[i];//add all integers in results array to get total
}
printf( "Squares: %d\n", total );
// Free memory on the device and the host.
free(grid);
free(results);
cudaFree(gridCpy);
cudaFree(gpuResults);
cudaDeviceReset();
return 0;
}
|
1,735
|
//Vector Addition using CUDA.
//Winter 2020
//High Performance Computing.
#include <string> //For stoi.
#include <iostream> //For stdout.
#include <cstdlib> //For random number generator.
#include <chrono> //For getting time.
#include <climits> //For maximum n.
#include <cmath>
#include "cuda_runtime.h" //For Windows support.
#include "device_launch_parameters.h"
//The type that is used for the calculations.
typedef int type;
//Define constants for min/max.
#define RANDOMIZE_MIN -10
#define RANDOMIZE_MAX 10
#define DEVICE_NUM 0
//Cuda calculator which will run in each thread.
__global__ void cuda_calculator(type* a, type* b, type* c, int num_calcs)
{
extern __shared__ type a_shared[];
extern __shared__ type b_shared[];
extern __shared__ type c_shared[];
//Calculate the starting index.
int start_index = (threadIdx.x + blockIdx.x * blockDim.x) * num_calcs;
int end_index = start_index + num_calcs;
//Copy the data to the shared memory.
for(int i = start_index; i < end_index; i++)
{
a_shared[i] = a[i];
b_shared[i] = b[i];
}
__syncthreads();
//Add the vectors in the current thread index.
for(int i = start_index; i < end_index; i++)
c_shared[i] = b_shared[i] + c_shared[i];
//Copy the data back to the global memory.
for(int i = start_index; i < end_index; i++)
c[i] = c_shared[i];
}
//Cuda addition which runs the cuda program.
int cuda_addition(type* a, type* b, type* c, int n, int blocks,
int threads, double times[3])
{
//Create pointers for the GPU memory allocation
type* cu_vec_a;
type* cu_vec_b;
type* cu_vec_c;
//Calculate the number of elements that each kernel will handle (round up).
int num_calcs = std::ceil((double) n / (((double) blocks) * ((double) threads)));
//Calculate the padding (for output matrix to avoid conditionals in kernel.
int padding_size = (int)(num_calcs * blocks * threads) - n ;
//Allocate memory on the device for the arrays.
cudaMalloc((void**) &cu_vec_a, sizeof(type) * (n + padding_size));
cudaMalloc((void**) &cu_vec_b, sizeof(type) * (n + padding_size));
cudaMalloc((void**) &cu_vec_c, sizeof(type) * (n + padding_size));
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
//Capture the beginning time before the data transfer (from host).
auto begin_transfer_to = std::chrono::high_resolution_clock::now();
//Copy the data, and the size from the main memory to VRAM.
cudaMemcpy(cu_vec_a, a, ((int) sizeof(type)) * n, cudaMemcpyHostToDevice);
cudaMemcpy(cu_vec_b, b, ((int) sizeof(type)) * n, cudaMemcpyHostToDevice);
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
//Calculate the total time in seconds that it took to transfer data to the device
auto total_transfer_to = std::chrono::high_resolution_clock::now() - begin_transfer_to;
times[0] = std::chrono::duration<double> (total_transfer_to).count();
//Get the device properties for optimizing the memory usage.
cudaDeviceProp stats;
cudaGetDeviceProperties(&stats, DEVICE_NUM);
//Get the shared memory size.
size_t shared_mem_size = stats.sharedMemPerBlock;
std::cerr << "Shared Mem size is " << shared_mem_size << " bytes" << std::endl;
//Capture the beginning time before the calculations.
auto begin_calcs_only = std::chrono::high_resolution_clock::now();
//Launch the addition kernel on the device.
cuda_calculator<<<blocks, threads>>>(cu_vec_a, cu_vec_b, cu_vec_c, num_calcs);
//Check if we got any errors.
if(cudaGetLastError() != cudaSuccess)
return EXIT_FAILURE;
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
//Calculate the total time in seconds that it took to calculate.
auto total_calcs_only = std::chrono::high_resolution_clock::now() - begin_calcs_only;
times[1] = std::chrono::duration<double> (total_calcs_only).count();
//Capture the beginning time before the calculations.
auto begin_transfer_from = std::chrono::high_resolution_clock::now();
//Copy the results back from Vram to main ram.
cudaMemcpy(c, cu_vec_c, ((int) sizeof(type)) * n, cudaMemcpyDeviceToHost);
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
//Calculate the total time in seconds that it took to transfer back to host.
auto total_transfer_from = std::chrono::high_resolution_clock::now() - begin_transfer_from;
times[2] = std::chrono::duration<double> (total_transfer_from).count();
//Deallocate memory in the GPU.
cudaFree(cu_vec_a);
cudaFree(cu_vec_b);
cudaFree(cu_vec_c);
//Wait for the thread to finish execution.
cudaDeviceSynchronize();
return EXIT_SUCCESS;
}
//Sequential addition function.
double seq_addition(type* a, type* b, type* c, int size)
{
//Capture the beginning time before the calculations.
auto begin = std::chrono::high_resolution_clock::now();
//Iterate over the vectors and add the elements.
for(int i = 0; i < size; i++)
c[i] = a[i] + b[i];
//Calculate and return the total time in seconds that it took to compute.
auto total = std::chrono::high_resolution_clock::now() - begin;
return std::chrono::duration<double> (total).count();;
}
//Sequential subtraction function (used for residual matrix).
void seq_subtraction(type* a, type* b, type* c, int size)
{
//Iterate over the vectors and subtract the elements.
for(int i = 0; i < size; i++)
c[i] = a[i] - b[i];
}
//Returns false if first and second aren't equal, true otherwise.
bool are_equal(type* first, type* second, int size)
{
//Iterate over and return false if not equal.
for(int i = 0; i < size; i++)
if(first[i] != second[i])
return false;
//If we get here, they were equal.
return true;
}
//A function which randomizes the vector, by defualt it only uses values between -10 - 10
void randomize(type* vec, int size, int min = RANDOMIZE_MIN, int max = RANDOMIZE_MAX)
{
//Perform this to ensure the random number generation is truly random.
std::srand(std::chrono::system_clock::now().time_since_epoch().count());
//Iterate through, and generate random numbers for each index.
for(int i = 0; i < size; i++)
vec[i] = ((type) std::rand() %
(type) (RANDOMIZE_MAX * 2) + (type) RANDOMIZE_MIN) % RANDOMIZE_MAX ;
}
//Print the given vector to stdout.
void dump(type* vec, int size)
{
//Iterate through, and generate random numbers for each index.
for(int i = 0; i < size - 1; i++)
std::cout << std::scientific << vec[i] << " | " ;
//Print the last item in a different format and add a new line.
std::cout << std::scientific << vec[size - 1] << std::endl;
}
//A function which will be called when there is an error.
int error(std::string msg)
{
//Print the error message.
std::cout << "Error: " << msg << std::endl;
//Print the usage message.
std::cout << std::endl << "Usage Guide:" << std::endl
<< "\t* ./a.out <Size of Vectors> <Number of Blocks> <Number of Threads>"
<< " <Output Mode>" << std::endl << "\t* Output mode is either \'q\' "
<< "(quiet) or \'v\' (verbose)" << std::endl
<< "\t* Number of blocks and threads are for the GPU." << std::endl;
//Return exit failure for passing it back to the terminal.
return EXIT_FAILURE;
}
//Main method which parses the arguments, and runs the program.
int main(int argc, char** argv)
{
//Define values for parameters.
int n, blocks, threads;
bool verbose;
//Check for invalid number of args.
if(argc != 5)
return error("Invalid number of arguments.");
//Parse the arguments.
try
{
n = std::stoi(argv[1]);
blocks = std::stoi(argv[2]);
threads = std::stoi(argv[3]);
}
catch(...) //If we get here, there was an error in the arguments.
{
return error("Invalid arguments, could not parse.");
}
//Check the print mode.
if(std::string(argv[4]) == "q" || std::string(argv[4]) == "v")
//If the mode is valid and set to v, set verbose to true, false otherwise.
verbose = (std::string(argv[4]) == "v" ? true : false);
else
//If we get here an invalid mode was passed.
return error("Invalid print mode.");
//Check for invalid threads / blocks / n sizes.
if(n < 1 || blocks < 1 || threads < 1)
return error("Invalid arguments. All parameters should be positive.");
//Check if we're gonna get overflow.
if(n > INT_MAX)
return error("Integer Overflow, please reduce N.");
//Allocate memory for the input vectors.
type* vec_a = new type[n];
type* vec_b = new type[n];
//Randomize the input vectors.
randomize(vec_a, n);
randomize(vec_b, n);
//Allocate output matrices for the sequential and cuda executions.
type* vec_c_seq = new type[n];
type* vec_c_cuda = new type[n];
//Perform the sequential addition.
double seq_time = seq_addition(vec_a, vec_b, vec_c_seq, n);
//Perform the cuda addition, and capture the timings.
double times[3];
int stat = cuda_addition(vec_a, vec_b, vec_c_cuda, n, blocks, threads, times);
//Check the status.
if(stat == EXIT_FAILURE)
return error("Failed to execute kernel.");
//Check if the cuda and sequential results are not equal (error).
if(!are_equal(vec_c_seq, vec_c_cuda, n))
{
std::cout << "Error: Output vectors were not equal." << std::endl
<< "ErrorInfo: N=" << n << " Blocks=" << blocks
<< " Threads=" << threads << std::endl;
}
//Print the timing results, and the input arguments.
std::cout << "[Cuda_Transfer_To_Device_Seconds]=" << std::scientific << times[0]
<< " [Cuda_Transfer_To_Host_Seconds]=" << std::scientific << times[2]
<< " [Cuda_Calculation_Time_Seconds]=" << std::scientific << times[1]
<< " [Sequential_Time_Seconds]=" << std::scientific << seq_time
<< " [N]=" << n << " [Blocks]=" << blocks
<< " [Threads]=" << threads
<< std::endl;
//Allocate memory for residual vector.
type* residual = new type[n];
//Check if we're in verbose output mode.
if(verbose)
{
//Calculate residual vector for sequential implementation vs cuda.
seq_subtraction(vec_c_seq, vec_c_cuda, residual, n);
//Print out the inputs, calculations and residual vector.
std::cout << std::endl << "Printing out the First Vector:" << std::endl;
dump(vec_a, n);
std::cout << "\nPrinting out the Second Vector:" << std::endl;
dump(vec_b, n);
std::cout << "\nPrinting out the Addition results (Sequential):" << std::endl;
dump(vec_c_seq, n);
std::cout << "\nPrinting out the Addition results (Cuda):" << std::endl;
dump(vec_c_cuda, n);
std::cout << "\nPrinting out the residual matrix (Seq - Cuda):" << std::endl;
dump(residual, n);
}
//Deallocate the memory in the heap.
delete[] vec_a, vec_b, vec_c_seq, vec_c_cuda, residual;
return EXIT_SUCCESS;
}
|
1,736
|
#include "includes.h"
__global__ void kDot(const int nThreads, const float *m1, const float *m2, float *output, const int m1_rows , const int m1_columns, const int m2_columns ){
/* Computes the product of two matrices: m1 x m2.
Inputs:
m1: array, left matrix of size m1_rows x m1_columns
m2: array, right matrix of size m1_columns x m2_columns (the number of rows in the right matrix
must be equal to the number of the columns in the left one)
output: array, the results of the computation are to be stored here:
m1 * m2, product of two arrays m1 and m2, a matrix of size m1_rows x m2_columns
m1_rows: int, number of rows in the left matrix m1
m1_columns: int, number of columns in the left matrix m1
m2_columns: int, number of columns in the right matrix m2
*/
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < nThreads;
i += blockDim.x * gridDim.x)
{
int r = (int)i / m2_columns;
int c = i % m2_columns;
float t_output = 0.f;
for( int k = 0; k < m1_columns; ++k ) {
t_output += m1[ r * m1_columns + k ] * m2[ k * m2_columns + c ];
}
output[i] = t_output;
}
}
|
1,737
|
#include <cuda_runtime.h>
#include <stdio.h>
__global__ void doublevector(int* vec, int N)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
vec[idx] *= 2;
}
}
__global__ void init(int* vec, int N)
{
int idx = (blockIdx.x * blockDim.x) + threadIdx.x;
if (idx < N) {
vec[idx] = idx;
}
}
bool check(int* vec, int N)
{
for (int i=0; i<N; ++i) {
if (vec[i] != i*2) {
return false;
}
}
return true;
}
int main(int argc, char** argv)
{
const int N = 4096;
int* vec;
// managed memory in cuda
cudaError_t result = cudaMallocManaged(&vec, N * sizeof(int));
if (result != cudaSuccess) {
printf("cudaMalloc failed\n");
}
// initialize the vector on the gpu
init<<<4, N/4>>>(vec, N);
// launch a gpu kernel with 4 blocks and 1024 threads in each block.
doublevector<<<4,N/4>>>(vec, N);
// block the cpu for the gpu to finish execution
cudaDeviceSynchronize();
if (!check(vec, N)) {
printf("test failed\n");
}
cudaFree(vec);
return 0;
}
|
1,738
|
#include "includes.h"
__global__ void DrawObstacles(uchar4 *ptr, int* indices, int size) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
while (thread_id < size) {
int index = indices[thread_id];
ptr[index].x = 0;
ptr[index].y = 0;
ptr[index].z = 0;
ptr[index].w = 255;
thread_id += blockDim.x * gridDim.x;
}
}
|
1,739
|
#include "includes.h"
/* ==================================================================
Programmers:
Kevin Wagner
Elijah Malaby
John Casey
Omptimizing SDH histograms for input larger then global memory
==================================================================
*/
#define BOX_SIZE 23000 /* size of the data box on one dimension */
/* descriptors for single atom in the tree */
typedef struct atomdesc {
float x_pos;
float y_pos;
float z_pos;
} atom;
unsigned long long * histogram; /* list of all buckets in the histogram */
unsigned long long PDH_acnt; /* total number of data points */
int block_size; /* Number of threads per block */
int num_buckets; /* total number of buckets in the histogram */
float PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
unsigned long long * histogram_GPU;
unsigned long long * temp_histogram_GPU;
atom * atom_list_GPU;
__device__ void block_to_block (atom * block_a, atom * block_b, int b_length, unsigned long long * histogram, float resolution) {
atom me = block_a[threadIdx.x];
for(int i = 0; i < b_length; i++)
atomicAdd(&(histogram[(int)(sqrt((me.x_pos - block_b[i].x_pos) * (me.x_pos - block_b[i].x_pos) +
(me.y_pos - block_b[i].y_pos) * (me.y_pos - block_b[i].y_pos) +
(me.z_pos - block_b[i].z_pos) * (me.z_pos - block_b[i].z_pos)) / resolution)]),
1);
}
__global__ void GPUKernelFunction (unsigned long long PDH_acnt, float PDH_res, atom * atom_list_GPU, unsigned long long * histogram_GPU, int num_buckets) {
extern __shared__ unsigned long long SHist[];
/* assign register values */
int i, h_pos;
float dist;
atom * my_block = &atom_list_GPU[blockIdx.x * blockDim.x];
atom temp_atom_1 = my_block[threadIdx.x];
for(h_pos=threadIdx.x; h_pos < num_buckets; h_pos+=blockDim.x)
SHist[h_pos] = 0;
__syncthreads();
/* loop through all points in atom list calculating distance from current point to all further points */
for (i = threadIdx.x + 1; i < blockDim.x && i+blockIdx.x*blockDim.x < PDH_acnt; i++)
{
atom temp_atom_2 = my_block[i];
dist = sqrt((temp_atom_1.x_pos - temp_atom_2.x_pos) * (temp_atom_1.x_pos - temp_atom_2.x_pos) +
(temp_atom_1.y_pos - temp_atom_2.y_pos) * (temp_atom_1.y_pos - temp_atom_2.y_pos) +
(temp_atom_1.z_pos - temp_atom_2.z_pos) * (temp_atom_1.z_pos - temp_atom_2.z_pos));
h_pos = (int)(dist / PDH_res);
atomicAdd(&(SHist[h_pos]), 1);
}
__syncthreads();
for(i=blockIdx.x+1; i < gridDim.x-1; i++)
block_to_block(my_block,
&atom_list_GPU[i*blockDim.x],
blockDim.x,
SHist,
PDH_res);
block_to_block(my_block,
&atom_list_GPU[i*blockDim.x],
PDH_acnt-i*blockDim.x, // Last block may be small
SHist,
PDH_res);
__syncthreads();
for(h_pos = threadIdx.x; h_pos < num_buckets; h_pos += blockDim.x)
*(histogram_GPU+(num_buckets*blockIdx.x)+h_pos) += SHist[h_pos];
}
|
1,740
|
#include<cuda_runtime.h>
#include<stdio.h>
int main(int argc, char **arg) {
//f[^vf̍v`
int nElem = 1024;
//ObhƃubN̍\`
dim3 block(1024);
dim3 grid((nElem+block.x-1)/block.x);
printf("grid.x %d block.x %d \n", grid.x, block.x);
//ubNZbg
block.x = 512;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
//ubNZbg
block.x = 256;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
//ubNZbg
block.x = 128;
grid.x = (nElem+block.x-1)/block.x;
printf("grid.x %d block.x %d \n", grid.x, block.x);
//foCXZbg
cudaDeviceReset();
return 0;
}
|
1,741
|
#include<iostream>
#include<cuda.h>
#include<stdlib.h>
#include<algorithm>
#include<thrust/sort.h>
#include<math.h>
#include<stdio.h>
using namespace std;
struct tree{
int id;
int leftid;
int parent;
float filter;
int rightid;
int pos;
int startpos;
int endpos;
}Maptree[30];
__global__ void distance(float *data,float *query,float *dis,int *id,int count,int start){
int idt = threadIdx.x;
idt = threadIdx.x+start;
//printf("%d\n",idt);
float dist = 0;
for(int i=1;i<count;i++){
dist += (data[idt*count+i]-query[i])*(data[idt*count+i]-query[i]);
}
dist = sqrt(dist);
id[threadIdx.x] = data[idt*count+0];
dis[threadIdx.x] = dist ;
}
__global__ void Accuracy(int *s1,int *s2,int *counter){
int id = threadIdx.x;
//printf("%d %d\n",s1[id],s2[id]);
int x = 1;
if(s1[id]==s2[id]){
atomicAdd(&counter[0],x);
}
}
void KDDpartition(float *index,float *data,int points,int count,int front,int N,int time){
//cout<<"\n========================================================================\n";
Maptree[time].id = time;
int Noofitems = Maptree[time].endpos - Maptree[time].startpos;
//cout<<Noofitems<<"\n";
if(Noofitems<points){
return ;
}
float **decide = (float **)malloc(count*sizeof(float*));
float *mean = (float *)malloc(count*sizeof(float));
float *var = (float *)malloc(count*sizeof(float));
for(int i=0;i<count;i++){
decide[i] = (float *)malloc(N*sizeof(float));
for(int j=front;j<N;j++){
decide[i][j] = data[j*count+i];
mean[i] += decide[i][j];
}
mean[i] = mean[i]/N;
}
for(int i=0;i<count;i++){
for(int j=front;j<N;j++){
var[i] +=(decide[i][j]-mean[i])*(decide[i][j]-mean[i]);
}
var[i] = var[i]/N;
}
float Max = 0;
int pos = 0;
for(int i=1;i<count;i++){
if(Max<var[i]){
Max = var[i];
pos = i;
}
}
//cout<<Max<<" "<<pos<<"\n";
float *cdata = (float *)malloc(N*count*sizeof(float));
sort(decide[pos]+front,decide[pos]+N);
for(int i=front;i<N;i++){
//cout<<decide[pos][i]<<"\t";
}
//cout<<"\n";
int mid = (N-front)/2;
float Median = decide[pos][front+mid];
//cout<<mid<<" "<<Median<<"\n";
int start,last;
start = Maptree[time].startpos;
last = Maptree[time].endpos;
Maptree[time].filter = Median;
Maptree[time].pos = pos;
for(int i=front;i<N;i++){
if(data[i*count+pos]<Median){
for(int j=0;j<count;j++){
cdata[start*count+j] = data[i*count+j];
}
start++;
}
else{
for(int j=0;j<count;j++){
cdata[last*count+j] = data[i*count+j];
}
last--;
}
}
//cout<<start<<" "<<last<<"\n";
/*for(int i=front;i<N;i++){
cout<<i<<"\t";
for(int j=0;j<count;j++){
cout<<cdata[i*count+j]<<"\t";
}
cout<<"\n";
}*/
int left = 2*time;
int right = 2*time+1;
Maptree[time].leftid = left;
Maptree[time].rightid = right;
Maptree[left].parent = time;
Maptree[right].parent = time;
Maptree[left].startpos = front;
Maptree[left].endpos = last;
Maptree[right].startpos = start;
Maptree[right].endpos = Maptree[time].endpos;
//cout<<Maptree[left].startpos<<" "<<Maptree[left].endpos<<" "<<Maptree[right].startpos<<" "<<Maptree[right].endpos<<"\n";
for(int i=front;i<N;i++){
//cout<<i<<"\t";
for(int j=0;j<count;j++){
data[i*count+j] = cdata[i*count+j];
//cout<<data[i*count+j]<<"\t";
}
//cout<<"\n";
}
KDDpartition(index,data,points,count,Maptree[left].startpos,last+1,left);
KDDpartition(index,data,points,count,Maptree[right].startpos,Maptree[right].endpos+1,right);
}
void search(float *data,float *query,int points,int count,int N,int m,int time,int k,string s[],string s1[]){
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float ms = 0;
int noofelements = Maptree[time].endpos - Maptree[time].startpos;
//cout<<noofelements<<"\n";
int x = time;
int *fclass = (int *)malloc(m*sizeof(int));
int *res = (int *)malloc(m*sizeof(int));
float *line = (float *)malloc(count*sizeof(float));
for(int i=0;i<m;i++){
if(s1[i]=="Iris-setosa"){
fclass[i] = 1;
//cout<<"c1";
}
if(s1[i]=="Iris-versicolor"){
fclass[i] = 2;
//cout<<"c2";
}
if(s1[i]=="Iris-virginica"){
fclass[i] = 3;
//cout<<"c3";
}
for(int j=0;j<count;j++){
line[j] = query[i*count+j];
//cout<<line[j]<<"\t";
}
//cout<<"\n";
while(noofelements>points){
int dim = Maptree[x].pos;
float Median = Maptree[x].filter;
if(query[i*count+dim]<Median){
x = Maptree[x].leftid;
}
else{
x = Maptree[x].rightid;
}
noofelements = Maptree[x].endpos - Maptree[x].startpos;
}
x = Maptree[x].parent;
int st = Maptree[x].startpos;
int et = Maptree[x].endpos;
//cout<<x<<" "<<st<<" "<<et<<"\n";
float *gdata,*gquery,*dis,*gdis;
int *id,*gid;
id = (int *)malloc(N*sizeof(int));
dis = (float *)malloc(N*sizeof(float));
float milliseconds = 0;
cudaEventRecord(start,0);
cudaMalloc(&gid,N*sizeof(int));
cudaMalloc(&gdis,N*sizeof(float));
cudaMalloc(&gdata,N*count*sizeof(float));
cudaMalloc(&gquery,count*sizeof(float));
cudaMemcpy(gdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(gquery,line,count*sizeof(float),cudaMemcpyHostToDevice);
//cout<<"\n------------------\n";
distance<<<1,(et-st)>>>(gdata,gquery,gdis,gid,count,st);
cudaMemcpy(dis,gdis,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(id,gid,N*sizeof(int),cudaMemcpyDeviceToHost);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&milliseconds, start, stop);
ms += milliseconds;
thrust::sort_by_key(dis, dis + (et-st), id);
int count1,count2,count3;
count1 = count2 = count3 = 0;
for(int j=0;j<k;j++){
//cout<<id[j]<<" "<<dis[j]<<"\n";
if(id[j]<=50 && id[j]>0){
count1++;
}
if(id[j]>50 && id[j]<=100){
count2++;
}
if(id[j]<=150 && id[j]>100){
count3++;
}
}
//cout<<"------------------"<<count1<<" "<<count2<<" "<<count3<<"\n";
if(count1>count2){
if(count1>count3){
//count1
res[i] = 1;
}
else{
//count3
res[i] = 3;
}
}
else{
if(count2>count3){
//count2
res[i] = 2;
}
else{
//count3
res[i] = 3;
}
}
x = time;
noofelements = Maptree[x].endpos - Maptree[x].startpos;
}
int *gclass,*ggsres,*gcounter;
int counter[1];
counter[0] = 0;
cudaMalloc(&gclass,m*sizeof(int));
cudaMalloc(&ggsres,m*sizeof(int));
cudaMalloc(&gcounter,1*sizeof(int));
cudaMemcpy(gclass,fclass,m*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(ggsres,res,m*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(gcounter,counter,1*sizeof(int),cudaMemcpyHostToDevice);
Accuracy<<<1,m>>>(gclass,ggsres,gcounter);
cudaMemcpy(counter,gcounter,1*sizeof(int),cudaMemcpyDeviceToHost);
float acc = counter[0]*100;
acc = acc/m;
printf("Total Execution time %f in millisecond\n",ms);
cout<<"Accuracy of KD tree implementation of KNN "<<acc<<"% \n";
}
////////////////////////
void searchprediction(float *data,float *query,int points,int count,int N,int m,int time,int k,string s[],string s1[]){
int noofelements = Maptree[time].endpos - Maptree[time].startpos;
//cout<<noofelements<<"\n";
int x = time;
string sf = "";
while(noofelements>points){
int dim = Maptree[x].pos;
float Median = Maptree[x].filter;
if(query[dim]<Median){
x = Maptree[x].leftid;
}
else{
x = Maptree[x].rightid;
}
noofelements = Maptree[x].endpos - Maptree[x].startpos;
}
x = Maptree[x].parent;
int st = Maptree[x].startpos;
int et = Maptree[x].endpos;
//cout<<x<<" "<<st<<" "<<et<<"\n";
float *gdata,*gquery,*dis,*gdis;
int *id,*gid;
id = (int *)malloc(N*sizeof(int));
dis = (float *)malloc(N*sizeof(float));
cudaMalloc(&gid,N*sizeof(int));
cudaMalloc(&gdis,N*sizeof(float));
cudaMalloc(&gdata,N*count*sizeof(float));
cudaMalloc(&gquery,count*sizeof(float));
cudaMemcpy(gdata,data,N*count*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(gquery,query,count*sizeof(float),cudaMemcpyHostToDevice);
//cout<<"\n------------------\n";
distance<<<1,(et-st)>>>(gdata,gquery,gdis,gid,count,st);
cudaMemcpy(dis,gdis,N*sizeof(float),cudaMemcpyDeviceToHost);
cudaMemcpy(id,gid,N*sizeof(int),cudaMemcpyDeviceToHost);
thrust::sort_by_key(dis, dis + (et-st), id);
int count1,count2,count3;
count1 = count2 = count3 = 0;
for(int j=0;j<k;j++){
//cout<<id[j]<<" "<<dis[j]<<"\n";
if(id[j]<=50 && id[j]>0){
count1++;
}
if(id[j]>50 && id[j]<=100){
count2++;
}
if(id[j]<=150 && id[j]>100){
count3++;
}
}
//cout<<"------------------"<<count1<<" "<<count2<<" "<<count3<<"\n";
if(count1>count2){
if(count1>count3){
//count1
sf = "Iris-setosa";
}
else{
//count3
sf = "Iris-virginica";
}
}
else{
if(count2>count3){
//count2
sf = "Iris-versicolor";
}
else{
//count3
sf = "Iris-virginica";
}
}
cout<<"Predicted output for random point"<<sf<<"\n";
}
////////////////////////
int main(){
int points = 20;
int k = 15;
cout<<"KDD Tree implementation of KNN Algorithm\n";
FILE *fp;
int N = 135;
int count = 0 ;
fp = fopen("input.txt","r");
char ch = ' ';
while(ch!='\n'){
ch = getc(fp);
if(ch==','){
count++;
}
}
string s[N];
float *data = (float *)malloc(N*count*sizeof(float));
for(int i=0;i<N;i++){
for(int j=0;j<count;j++){
fscanf(fp,"%f",&data[i*count+j]);
ch = fgetc(fp);
//cout<<data[i*count+j]<<"\t";
}
char c;
c = fgetc(fp);
while(c!='\n'){
s[i]+=c;
c = fgetc(fp);
}
//cout<<s[i]<<"\n";
}
//cout<<"\n=================================================\n";
int m =15;
float *query = (float *)malloc(m*count*sizeof(float));
FILE *op;
string s1[m];
op = fopen("test.txt","r");
for(int i=0;i<m;i++){
for(int j=0;j<count;j++){
fscanf(op,"%f",&query[i*count+j]);
ch = fgetc(op);
//cout<<query[i*count+j]<<"\t";
}
char c;
c = fgetc(op);
while(c!='\n'){
s1[i] += c;
c = fgetc(op);
}
//cout<<s1[i]<<"\n";
}
float *index = (float *)malloc(N*2*sizeof(float));
//Grouping all data
for(int i=0;i<N;i++){
index[i*2+0] = 1;
index[i*2+1] = data[i*count+0];
//cout<<index[i*2+0]<<" "<<index[i*2+1]<<"\n";
}
Maptree[1].id = 1;
Maptree[1].leftid = 0;
Maptree[1].filter = 0;
Maptree[1].rightid = 0;
Maptree[1].pos = 0;
Maptree[1].parent = -1;
Maptree[1].startpos = 0;
Maptree[1].endpos = 134;
KDDpartition(index,data,points,count,0,N,1);
//cout<<"\n==============================================================\n";
/*for(int i=0;i<N;i++){
for(int j=0;j<count;j++){
// cout<<data[i*count+j]<<"\t";
}
//cout<<"\n";
}*/
search(data,query,points,count,N,m,1,k,s,s1);
srand(time(0));
float *point = (float *)malloc(count*sizeof(float));
for(int j=0;j<count;j++){
if(j<count-1){
point[j] = rand()%8;
}
else{
point[j] = rand()%3;
}
//cout<<point[j]<<"\t";
}
searchprediction(data,point,points,count,N,m,1,k,s,s1);
cudaDeviceSynchronize();
return 0;
}
|
1,742
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
/**
1.5[MB]
div == 4, size = * 48000
2.0[MB]
div == 8, size = * 32000
2.4[MB]
div == 8, size = * 37000
**/
__global__ void __add(float* a,float* b,int size,int div){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#pragma unroll
for(int i = 0 ; i < div ; i ++){
a[idx + (size*i)/div] += b[idx + (size*i)/div];
}
}
static float elapsed(struct timeval tv0,struct timeval tv1){
return (float)(tv1.tv_sec - tv0.tv_sec)
+ (float)(tv1.tv_usec - tv0.tv_usec)
* 0.000001f;
}
int main(){
struct timeval t0,t1;
gettimeofday(&t0,NULL);
float *h_a = NULL;
float *h_b = NULL;
float *d_a = NULL;
float *d_b = NULL;
int div = 8;
int threadNum = 1024;
unsigned int size = (threadNum*div) * 32000;
int blockNum = size/(threadNum*div);
printf("blockNum : %d\n",blockNum);
printf("threadNum : %d\n",threadNum);
printf("size : %d\n",size);
printf("vector size : %d\n",sizeof(float)*size);
int ite = 4000;
cudaMalloc((void**)&d_a,sizeof(float)*size);
cudaMalloc((void**)&d_b,sizeof(float)*size);
h_a = (float*)malloc(sizeof(float)*size);
h_b = (float*)malloc(sizeof(float)*size);
for(int i = 0 ; i < size ; i ++){
h_a[i] = 0.0f;
h_b[i] = 1.0f;
}
dim3 threads(threadNum,1,1);
dim3 blocks(blockNum,1,1);
cudaMemcpy(d_a,h_a,sizeof(float)*size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,sizeof(float)*size,cudaMemcpyHostToDevice);
for(int i = 0 ; i < ite ; i ++){
__add<<<blocks,threads>>>(d_a,d_b,size,div);
}
cudaMemcpy(h_a,d_a,sizeof(float)*size,cudaMemcpyDeviceToHost);
int pass = 1;
int firstFailedIndex = 0;
for(int i = 0 ; i < size ; i ++){
// printf("h_a[%d]:%f ",i,h_a[i]);
if(h_a[i] != ite){
firstFailedIndex = i;
pass = 0;
break;
}
}
if(pass){
printf("Result test PASS!\n");
}else{
printf("Result test Failed\n");
printf("h_a[%d] == %f\n",firstFailedIndex,h_a[firstFailedIndex]);
}
gettimeofday(&t1,NULL);
printf("TIME RESULT : %f\n",elapsed(t0,t1));
return 0;
}
|
1,743
|
extern "C"
__global__
void add (long n, double *a, double *b){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
{
a[i] += b[i];
}
}
|
1,744
|
#include "includes.h"
#define NUM 100
__global__ void add (int *a, int *b, int *c)
{
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
|
1,745
|
#include "includes.h"
__device__ float machine_eps_flt() {
typedef union {
int i32;
float f32;
} flt_32;
flt_32 s;
s.f32 = 1.;
s.i32++;
return (s.f32 - 1.);
}
__device__ double machine_eps_dbl() {
typedef union {
long long i64;
double d64;
} dbl_64;
dbl_64 s;
s.d64 = 1.;
s.i64++;
return (s.d64 - 1.);
}
__global__ void calc_consts(float *fvals, double *dvals) {
int i = threadIdx.x + blockIdx.x*blockDim.x;
if (i==0) {
fvals[EPS] = machine_eps_flt();
dvals[EPS]= machine_eps_dbl();
float xf, oldxf;
double xd, oldxd;
xf = 2.; oldxf = 1.;
xd = 2.; oldxd = 1.;
/* double until overflow */
/* Note that real fmax is somewhere between xf and oldxf */
while (!isinf(xf)) {
oldxf *= 2.;
xf *= 2.;
}
while (!isinf(xd)) {
oldxd *= 2.;
xd *= 2.;
}
dvals[MAX] = oldxd;
fvals[MAX] = oldxf;
/* half until overflow */
/* Note that real fmin is somewhere between xf and oldxf */
xf = 1.; oldxf = 2.;
xd = 1.; oldxd = 2.;
while (xf != 0.) {
oldxf /= 2.;
xf /= 2.;
}
while (xd != 0.) {
oldxd /= 2.;
xd /= 2.;
}
dvals[MIN] = oldxd;
fvals[MIN] = oldxf;
}
return;
}
|
1,746
|
#include "includes.h"
__global__ void devInverseReindexInt3Bool(int N, int3 *destArray, int3 *srcArray, unsigned int *reindex, int realSize, int nDims, int maxValue, bool ignoreValue)
{
for (unsigned int n = 0; n < nDims; n++) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
while (i < N) {
int ret = -1;
int tmp = srcArray[i + n*realSize].x;
if (tmp != -1 || ignoreValue == false) {
int addValue = 0;
while (tmp >= maxValue) {
tmp -= maxValue;
addValue += maxValue;
}
while (tmp < 0) {
tmp += maxValue;
addValue -= maxValue;
}
ret = (int) reindex[tmp] + addValue;
}
destArray[i + n*realSize].x = ret;
ret = -1;
tmp = srcArray[i + n*realSize].y;
if (tmp != -1 || ignoreValue == false) {
int addValue = 0;
while (tmp >= maxValue) {
tmp -= maxValue;
addValue += maxValue;
}
while (tmp < 0) {
tmp += maxValue;
addValue -= maxValue;
}
ret = (int) reindex[tmp] + addValue;
}
destArray[i + n*realSize].y = ret;
ret = -1;
tmp = srcArray[i + n*realSize].z;
if (tmp != -1 || ignoreValue == false) {
int addValue = 0;
while (tmp >= maxValue) {
tmp -= maxValue;
addValue += maxValue;
}
while (tmp < 0) {
tmp += maxValue;
addValue -= maxValue;
}
ret = (int) reindex[tmp] + addValue;
}
destArray[i + n*realSize].z = ret;
i += gridDim.x*blockDim.x;
}
}
}
|
1,747
|
#include <stdio.h>
#include <iostream>
#include <fstream>
#include <random>
#define N 10000
#define MIN_POS 1688
using namespace std;
typedef struct
{
float charge;
int index;
} cell;
#define CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
if (error != cudaSuccess) { \
cout << cudaGetErrorString(error) << endl; \
} \
} while (0)
__global__ void reduce0(cell *g_idata, cell *g_odata)
{
extern __shared__ cell sdata[];
// each thread loads one element from global to shared mem
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = g_idata[i];
__syncthreads();
// do reduction in shared mem
for(unsigned int s=1; s < blockDim.x; s *= 2)
{
if (tid % (2*s) == 0)
{
sdata[tid] = (sdata[tid].charge < sdata[tid + s].charge)? sdata[tid]: sdata[tid + s];
}
__syncthreads();
}
// write result for this block to global mem
if (tid == 0)
g_odata[blockIdx.x] = sdata[0];
}
int main()
{
cell *a, *out, *dev_a, *dev_out;
a = (cell*)malloc(N*sizeof(cell));
out = (cell*)malloc(N*sizeof(cell));
CUDA_CHECK(cudaMalloc(&dev_a, N*sizeof(cell)));
CUDA_CHECK(cudaMalloc(&dev_out, N*sizeof(cell)));
for (int i=0; i<N; i++)
{
if (i==MIN_POS) a[i].charge = 1;
else a[i].charge = (i%15)+50;
a[i].index = i;
}
CUDA_CHECK(cudaMemcpy(dev_a, a, N*sizeof(cell), cudaMemcpyHostToDevice));
int blockSize = 256; // # threads
int gridSize = N/blockSize + (N % blockSize != 0);; // # blocks
int sharedBytes = blockSize*sizeof(cell);
reduce0<<<gridSize,blockSize,sharedBytes>>>(dev_a, dev_out);
cudaDeviceSynchronize();
// check for errors
cudaError_t error = cudaGetLastError();
if (error != cudaSuccess) {
fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error));
}
CUDA_CHECK(cudaMemcpy(out, dev_out, N*sizeof(float), cudaMemcpyDeviceToHost));
cout << "Min charge:" << out[0].charge << endl;
cout << "Min index:" << out[0].index << endl;
}
|
1,748
|
#include <stdio.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
//this is the function that finds the min within the matrix
__global__ void getminimum(unsigned *da, unsigned* minValue){
int i = threadIdx.x * blockDim.y + threadIdx.y;
atomicMin(minValue, da[i]);
}
//fill matrix with random nums
__global__ void generateRandomMatrix(unsigned *da, float x, int n)
{
curandState_t state;
curand_init(clock64(), 0, 0, &state);
int i = threadIdx.x * blockDim.y + threadIdx.y;
da[i] = curand(&state) % 100 + i;
}
//set min as max int value
__global__ void makeMax(unsigned *min){
min[0] = INT_MAX;
}
__global__ void square ( unsigned *matrix, unsigned *result, unsigned matrixsize) {
unsigned id = threadIdx.x * blockDim.y + threadIdx.y;
for (unsigned jj = 0; jj < matrixsize; jj++) {
for (unsigned kk = 0; kk < matrixsize; kk++) {
result[id * matrixsize + jj] += matrix[id * matrixsize + kk] * matrix[kk * matrixsize + jj];
}
}
}
__global__ void initializeResult(unsigned *result, unsigned size){
for(unsigned i = 0; i < size; i++){
result[i] = 0;
}
}
//PART 1 findMIN
__host__ void findMin(unsigned *matrix, dim3 block){
unsigned *min, *C_min; //store the min number
min = (unsigned *)malloc(1 * sizeof(unsigned)); //allocate space for min
cudaMalloc((void **)&C_min, 1 * sizeof(unsigned)); //allocate space for device copy
cudaMemcpy(C_min, min, 1 * sizeof(unsigned), cudaMemcpyHostToDevice); //copy input to device
makeMax<<<1,1>>>(C_min); //set min value as a MAX for initial
getminimum<<<1, block>>>(matrix, C_min); //find the min within matrix
cudaMemcpy(min, C_min, 1 * sizeof(unsigned), cudaMemcpyDeviceToHost); //copy back to host
printf("Min Value is %d\n", min[0]); //print min
free(min);
cudaFree(C_min);
}
#define N 16
//PART 2 findSquare
__host__ void findSquare(unsigned *matrix){
unsigned *result, *hresult; //store the square
hresult = (unsigned *)malloc(N * N * sizeof(unsigned)); //allocate space for square
cudaMalloc(&result, N * N * sizeof(unsigned)); //allocate space for device copy
initializeResult<<<1, N>>>(result, N);
square<<<1, N>>>(matrix, result, N); //set square for n
cudaMemcpy(hresult, result, N * N * sizeof(unsigned), cudaMemcpyDeviceToHost); //copy back to host
printf("Resulting square Matrix\n"); //print square
for(unsigned ii = 0; ii < N; ++ii){
for(unsigned jj = 0; jj < N; ++jj){
//printf("%2d ", hresult[ii * N + jj]);
}
printf("\n");
}
}
int main() {
dim3 block(N, N, 1);
unsigned *matrix, *hmatrix; //hold the matrix
cudaMalloc(&matrix, N * N * sizeof(unsigned)); //alloc matrix
hmatrix = (unsigned *)malloc(N * N * sizeof(unsigned)); //alloc matrix
generateRandomMatrix<<<1, block>>>(matrix, 5, 30); //fill matrix with random numbers
cudaMemcpy(hmatrix, matrix, N * N * sizeof(unsigned), cudaMemcpyDeviceToHost);
printf("starting matrix\n");
for (unsigned ii = 0; ii < N; ++ii) {
for (unsigned jj = 0; jj < N; ++jj) {
// printf("%2d ", hmatrix[ii * N + jj]);
}
printf("\n");
}
findMin(matrix, block);
findSquare(matrix);
return 0;
}
|
1,749
|
#include <cstdio>
#include <cstdlib>
// error checking macro
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
struct list_elem {
int key;
list_elem *next;
};
template <typename T>
void alloc_bytes(T &ptr, size_t num_bytes){
cudaMallocManaged(&ptr, num_bytes);
}
__host__ __device__
void print_element(list_elem *list, int ele_num){
list_elem *elem = list;
for (int i = 0; i < ele_num; i++)
elem = elem->next;
printf("key = %d\n", elem->key);
}
__global__ void gpu_print_element(list_elem *list, int ele_num){
print_element(list, ele_num);
}
const int num_elem = 5;
const int ele = 3;
int main(){
list_elem *list_base, *list;
alloc_bytes(list_base, sizeof(list_elem));
list = list_base;
for (int i = 0; i < num_elem; i++){
list->key = i;
alloc_bytes(list->next, sizeof(list_elem));
list = list->next;}
print_element(list_base, ele);
gpu_print_element<<<1,1>>>(list_base, ele);
cudaDeviceSynchronize();
cudaCheckErrors("cuda error!");
}
|
1,750
|
#include <cuda.h>
__device__ float phi(float eig1, float eig2, float gamma) {
if (eig1 < 0.f)
return __powf(eig1/eig2, gamma);
return 0.f;
}
__device__ float omega(float eig1, float eig2, float gamma, float alpha) {
eig2 = abs(eig2);
if (eig1 <= 0.f)
return __powf(1.f + eig1/eig2, gamma);
if (eig1 < eig2/alpha)
return __powf(1.f - alpha*eig1/eig2, gamma);
return 0.f;
}
// Vesselness device kernel
__global__ void vesselness3DKernel(
const int num_elements,
const float * eig1,
const float * eig2,
const float * eig3,
float * V,
float gamma,
float alpha
){
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i >= num_elements) return;
if (eig2[i] < 0)
V[i] = abs(eig3[i]) * phi(eig2[i],eig3[i],gamma) * omega(eig1[i],eig2[i],gamma,alpha);
else
V[i] = 0;
}
|
1,751
|
#include <stdio.h>
//#include <stdlib.h>
#define DATA_SIZE 10
__global__ void cusum(int *data,int *size,float *sum){
int thi=threadIdx.x;
if(thi<DATA_SIZE){
sum+=data[thi];
}
}
int main(int argc,char *argv[]){
int *list;
int *dev_list;
int i;
int size=DATA_SIZE;
int *dev_size;
float sum=0;
float *dev_sum;
cudaError_t err;
srand(1);
list=(int *)malloc(sizeof(int)*DATA_SIZE);
for(i=0;i<DATA_SIZE;i++){
list[i]=rand()%20;
printf("%d\n",list[i]);
}
err=cudaMalloc((void **)&dev_list,sizeof(int)*DATA_SIZE);
err=cudaMalloc((void **)&dev_sum,sizeof(float));
err=cudaMalloc((void **)&dev_size,sizeof(int));
err=cudaMemcpy(dev_list,list,sizeof(int)*DATA_SIZE,cudaMemcpyHostToDevice);
err=cudaMemcpy(dev_sum,&sum,sizeof(float),cudaMemcpyHostToDevice);
err=cudaMemcpy(dev_size,&size,sizeof(float),cudaMemcpyHostToDevice);
if(err != cudaSuccess){
printf("cudaMalloc Error\n");
}
cusum<<<1,10>>>(dev_list,dev_size,dev_sum);
err=cudaMemcpy(&sum,dev_sum,sizeof(float),cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
printf("cudaMalloc Error\n");
}
cudaFree(dev_sum);
cudaFree(dev_list);
cudaFree(dev_size);
free(list);
printf("sum=%f\n",sum);
return 0;
}
|
1,752
|
#include "includes.h"
__global__ void drawHeart(int CIRCLE_SEGMENTS, float *xx, float*yy) {
float scale = 0.5f;
int i = threadIdx.y*CIRCLE_SEGMENTS + threadIdx.x;
float const theta = 2.0f * 3.1415926f * (float)i / (float)CIRCLE_SEGMENTS;
xx[i] = scale * 16.0f * sinf(theta) * sinf(theta) * sinf(theta);
yy[i] = -1 * scale * (13.0f * cosf(theta) - 5.0f * cosf(2.0f * theta) - 2 * cosf(3.0f * theta) - cosf(4.0f * theta));
}
|
1,753
|
extern "C" __global__ void kNMLQuadraticMinimize1_kernel( int numAtoms, int paddedNumAtoms, float4 *posqP, float4 *velm, long long *force, float *blockSlope ) {
/* Compute the slope along the minimization direction. */
extern __shared__ float slopeBuffer[];
float slope = 0.0f;
for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) {
const float invMass = velm[atom].w;
const float4 xp = posqP[atom];
const float fx = ( float )force[atom] / ( float )0x100000000;
const float fy = ( float )force[atom + 1 * paddedNumAtoms] / ( float )0x100000000;
const float fz = ( float )force[atom + 2 * paddedNumAtoms] / ( float )0x100000000;
slope -= invMass * ( xp.x * fx + xp.y * fy + xp.z * fz );
}
slopeBuffer[threadIdx.x] = slope;
__syncthreads();
if( threadIdx.x == 0 ) {
for( int i = 1; i < blockDim.x; i++ ) {
slope += slopeBuffer[i];
}
blockSlope[blockIdx.x] = slope;
}
}
extern "C" __global__ void kNMLQuadraticMinimize2_kernel( int numAtoms, float currentPE, float lastPE, float invMaxEigen, float4 *posq, float4 *posqP, float4 *velm, float *blockSlope, float *lambdaval ) {
/* Load the block contributions into shared memory. */
extern __shared__ float slopeBuffer[];
for( int block = threadIdx.x; block < gridDim.x; block += blockDim.x ) {
slopeBuffer[block] = blockSlope[block];
}
__syncthreads();
/* Compute the scaling coefficient. */
if( threadIdx.x == 0 ) {
float slope = 0.0f;
for( int i = 0; i < gridDim.x; i++ ) {
slope += slopeBuffer[i];
}
float lambda = invMaxEigen;
float oldLambda = lambda;
float a = ( ( ( lastPE - currentPE ) / oldLambda + slope ) / oldLambda );
if( a != 0.0f ) {
const float b = slope - 2.0f * a * oldLambda;
lambda = -b / ( 2.0f * a );
} else {
lambda = 0.5f * oldLambda;
}
if( lambda <= 0.0f ) {
lambda = 0.5f * oldLambda;
}
slopeBuffer[0] = lambda - oldLambda;
/* Store variables for retrival */
lambdaval[0] = lambda;
}
__syncthreads();
/* Remove previous position update (-oldLambda) and add new move (lambda). */
const float dlambda = slopeBuffer[0];
for( int atom = threadIdx.x + blockIdx.x * blockDim.x; atom < numAtoms; atom += blockDim.x * gridDim.x ) {
const float factor = velm[atom].w * dlambda;
float4 pos = posq[atom];
pos.x += factor * posqP[atom].x;
pos.y += factor * posqP[atom].y;
pos.z += factor * posqP[atom].z;
posq[atom] = pos;
}
}
|
1,754
|
#include <iostream>
#include <string>
#include <fstream>
#include <stdio.h>
using namespace std;
typedef uchar4 ImageType;
typedef double4 ClastersPos;
typedef double DistanceType;
__constant__ int QUANTITY;
__constant__ ClastersPos POSITIONS[32];
void setZero(ClastersPos * pos, int clastersNum) {
for (int i = 0; i < clastersNum; i++) {
pos[i].x = 0;
pos[i].y = 0;
pos[i].z = 0;
pos[i].w = 0;
}
}
__device__ double dist(ImageType f, ClastersPos u)
{
return sqrtf((f.x - u.x) * (f.x - u.x) + (f.y - u.y) * (f.y - u.y) + (f.z - u.z) * (f.z - u.z));
}
bool hasChanged(const ClastersPos * const old, const ClastersPos * const nw, const int n)
{
for (int i = 0; i < n; i++)
{
if (old[i].x != nw[i].x || old[i].y != nw[i].y || old[i].z != nw[i].z || old[i].w != nw[i].w)
return true;
}
return false;
}
__global__ void kernel(ImageType *data, int n, int m)
{
// Calculate normalized texture coordinates
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int xOffset = blockDim.x * gridDim.x;
int yOffset = blockDim.y * gridDim.y;
for (int i = y; i < m; i += yOffset)
{
for (int j = x; j < n; j += xOffset)
{
ImageType threadValue = data[i * n + j];
DistanceType minDist = 1.7976931348623158e+308;
DistanceType tempDist;
int tempClaster;
for (int claster = 0; claster < QUANTITY; claster++)
{
tempDist = dist(threadValue, POSITIONS[claster]);
if (tempDist < minDist)
{
minDist = tempDist;
tempClaster = claster;
}
}
data[i * n + j].w = tempClaster;
}
}
}
int main()
{
int width, height;
string input, output;
cin >> input;
cin >> output;
ifstream fin(input, std::ios::binary | std::ios::in);
fin.read((char *) &width, 4);
fin.read((char *) &height, 4);
ImageType *host_data = new ImageType [width * height];
fin.read((char *) host_data, sizeof(ImageType) * height * width);
fin.close();
int clastersNum;
cin >> clastersNum;
ClastersPos * host_positions = new ClastersPos [clastersNum];
for (int i = 0, x, y; i < clastersNum; i++)
{
cin >> x >> y;
host_positions[i].x = host_data[y * width + x].x;
host_positions[i].y = host_data[y * width + x].y;
host_positions[i].z = host_data[y * width + x].z;
}
ImageType * device_data;
cudaMalloc((void **) &device_data, sizeof(ImageType) * height * width);
cudaMemcpy(device_data, host_data, sizeof(ImageType) * height * width, cudaMemcpyHostToDevice);
ClastersPos * device_positions;
cudaMalloc((void **) &device_positions, sizeof(ClastersPos) * clastersNum);
cudaMemcpyToSymbol(QUANTITY, &clastersNum, sizeof(int));
ClastersPos * host_output = new ClastersPos [clastersNum];
while (true)
{
cudaMemcpyToSymbol(POSITIONS, host_positions, clastersNum * sizeof(ClastersPos));
kernel<<<dim3(16,16), dim3(16,16)>>>(device_data, width, height);
cudaMemcpy(host_data, device_data, sizeof(ImageType) * height * width, cudaMemcpyDeviceToHost);
// cudaDeviceSynchronize();
setZero(host_output, clastersNum);
for (int i = 0; i < height; i++)
for (int j = 0, claster, ind; j < width; j++)
{
claster = host_data[i * width + j].w;
ind = i * width + j;
host_output[claster].x += host_data[ind].x;
host_output[claster].y += host_data[ind].y;
host_output[claster].z += host_data[ind].z;
host_output[claster].w += 1;
}
for (int i = 0; i < clastersNum; i++)
{
host_output[i].x /= host_output[i].w;
host_output[i].y /= host_output[i].w;
host_output[i].z /= host_output[i].w;
}
if (!hasChanged(host_positions, host_output, clastersNum)) break;
for (int i = 0; i < clastersNum; i++)
{
host_positions[i] = host_output[i];
}
}
// printClastersPos(host_data, width, height);
std::ofstream fout(output, std::ios::binary | std::ios::out);
fout.write((char *) &width, 4);
fout.write((char *) &height, 4);
fout.write((char *) host_data, sizeof(ImageType) * height * width);
fout.close();
return 0;
}
|
1,755
|
//CUDA reduction algorithm. simple approach
//Tom Dale
//11-20-18
#include <iostream>
#include <random>
using namespace std;
#define N 100000//number of input values
#define R 100//reduction factor
#define F (1+((N-1)/R))//how many values will be in the final output
//basicRun will F number of threads go through R number of values and put the average in z[tid]
__global__ void basicRun(double *a,double *z){
int tid = blockDim.x*blockIdx.x + threadIdx.x;
if(tid > F) return;
double avg=0;
for(int i= 0;i<R;i++){//get sum of input values in this threads domain
avg += a[i+tid*R];
}
z[tid]=avg/R;//divide sum by total number of input values to get average
}
int main(){
int bufferedSize = N + (N%R);//buffered size is closest evenly divisible by R value that is equal or greater than n
double *a,*z;
a = (double*)malloc(sizeof(double)*N);
z = (double*)malloc(sizeof(double)*F);
for(int i =0;i< N;i++){//set a to random values
//a[i]= rand() % 10;
a[i] = i;
}
for(int i = 0;i<(N%R);i++){//wrap around buffer. a will be extended to be evenly split by R.
a[N+i] =a[i];//added buffer values will be equal to first few variables in the array as stated in problem
}
// for(int i =0;i< bufferedSize;i++){//print values to screen
// cout << a[i] << " ";
// }
// cout << endl;
double *dev_a,*dev_z;//create device side variables
cudaMalloc((void**)&dev_a,sizeof(double)*bufferedSize);
cudaMalloc((void**)&dev_z,sizeof(double)*F);
int blockSize = 1024 ;//number of threads per block
int gridSize =1+(bufferedSize-1)/blockSize;//number of blocks per grid remeber, should be 1 dimension
cudaEvent_t start,stop;//create clock variables
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);//start clock
cudaMemcpy(dev_a,a,sizeof(double)*bufferedSize,cudaMemcpyHostToDevice);
basicRun<<<gridSize,blockSize>>>(dev_a,dev_z);
cudaEventRecord(stop);//end clock
cudaMemcpy(z,dev_z,sizeof(double)*F,cudaMemcpyDeviceToHost);
// for(int i =0;i< F;i++){//output final reduced values
// cout << z[i] << " ";
// }
cout << endl << endl << gridSize << " blocks used to reduce " << N << " by " << R << " to get " << F << " values"<< endl;
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds,start,stop);
cout << "This run took " << milliseconds << " miliseconds to compute." << endl;
cudaFree(dev_a);
cudaFree(dev_z);
}
|
1,756
|
#include<cuda.h>
#include<stdio.h>
void initializeArray(int*,int);
void stampaArray(int*, int);
void equalArray(int*, int*, int);
int main(int argn, char * argv[])
{
//numero totale di elementi dell'array
int N;
int *A_host; //array memorizzato sull'host
int *A_device; //array memorizzato sul device
int *copy; //array in cui copieremo i dati dal device
int size; //size in byte di ciascun array
if(argn==1)
N=20;
else
N=atoi(argv[1]);
printf("**********\tPROGRAMMA INIZIALE\t**********\n");
printf("copia di %d elementi dalla CPU alla GPU e viceversa\n\n", N);
//size in byte di ogni array
size=N*sizeof(int);
//allocazione dati sull'host
A_host=(int*)malloc(size);
copy=(int*)malloc(size);
//allocazione dati sul device
cudaMalloc((void**)&A_device,size);
//inizializzazione dati sull'host
initializeArray(A_host, N);
//copia dei dati dall'host al device
cudaMemcpy(A_device, A_host, size, cudaMemcpyHostToDevice);
//copia dei risultati dal device all'host
cudaMemcpy(copy, A_device, size, cudaMemcpyDeviceToHost);
printf("array sull'host\n");
stampaArray(A_host,N);
printf("array ricopiato dal device\n");
stampaArray(copy,N);
//test di correttezza
equalArray(copy, A_host,N);
//disallocazione host
free(A_host);
free(copy);
//disallocazione device
cudaFree(A_device);
exit(0);
}
void initializeArray(int *array, int n)
{
int i;
for(i=0;i<n;i++)
array[i] = i;
}
void stampaArray(int* array, int n)
{
int i;
for(i=0;i<n;i++)
printf("%d ", array[i]);
printf("\n");
}
void equalArray(int* a, int*b, int n)
{
int i=0;
while(a[i]==b[i])
i++;
if(i<n)
printf("I risultati dell'host e del device sono diversi\n");
else
printf("I risultati dell'host e del device coincidono\n");
}
|
1,757
|
#include "matrix.cuh"
matrix_t* matrix_constructor(unsigned int rows, unsigned int cols)
{
//assert(rows > 0 && cols > 0);
matrix_t* m = (matrix_t*)malloc(sizeof(matrix_t) + sizeof(float) * rows * cols);
assert(m != NULL);
m->rows = rows;
m->cols = cols;
set_matrix(m, 0.0);
return m;
}
float matrix_get(matrix_t* m, unsigned int x, unsigned int y)
{
assert(m != NULL);
//assert(x >= 0 && x < m->rows && y >= 0 && y < m->cols);
return (m->matrix[x * m->cols + y]);
}
void matrix_set(matrix_t* m, unsigned int x, unsigned int y, float value)
{
assert(m != NULL);
//assert(x >= 0 && x < m->rows && y >= 0 && y < m->cols);
m->matrix[x * m->cols + y] = value;
}
matrix_t* matrix_add(matrix_t* m1, matrix_t* m2)
{
assert(m1 != NULL && m2 != NULL);
//assert(m1->rows > 0 && m2->rows > 0 && m1->cols > 0 && m2->cols > 0);
assert(m1->rows == m2->rows && m1->cols == m2->cols);
matrix_t* sum = matrix_constructor(m1->rows, m1->cols);
int i, j;
for(i=0; i<m1->rows; i++)
{
for(j=0; j<m1->cols; j++)
{
matrix_set(sum, i, j, matrix_get(m1, i, j) + matrix_get(m2, i, j));
}
}
return sum;
}
matrix_t* matrix_subtract(matrix_t* m1, matrix_t* m2)
{
assert(m1 != NULL && m2 != NULL);
assert(m1->rows > 0 && m2->rows > 0 && m1->cols > 0 && m2->cols > 0);
assert(m1->rows == m2->rows && m1->cols == m2->cols);
matrix_t* difference = matrix_constructor(m1->rows, m1->cols);
int i, j;
for(i=0; i<m1->rows; i++)
{
for(j=0; j<m1->cols; j++)
{
matrix_set(difference, i, j, matrix_get(m1, i, j) - matrix_get(m2, i , j));
}
}
return difference;
}
matrix_t* matrix_multiply(matrix_t* m1, matrix_t* m2)
{
if(!(m1->rows > 0 && m2->rows > 0 && m1->cols > 0 && m2->cols > 0))
{
printf("%d %d %d %d", m1->rows, m2->rows, m1->cols, m2->cols);
}
assert(m1 != NULL && m2 != NULL);
assert(m1->rows > 0 && m2->rows > 0 && m1->cols > 0 && m2->cols > 0);
assert(m1->cols == m2->rows);
matrix_t* product = matrix_constructor(m1->rows, m2->cols);
int i, j, k;
for(i=0; i<product->rows; i++)
{
for(j=0; j<product->cols; j++)
{
for(k=0; k<m1->cols; k++)
{
matrix_set(product, i, j, matrix_get(product, i, j) + matrix_get(m1, i, k) * matrix_get(m2, k, j));
}
}
}
return product;
}
matrix_t* matrix_scalar_multiply(matrix_t* m, float scalar)
{
assert(m!= NULL);
assert(m->rows > 0 && m->cols > 0);
matrix_t* product = matrix_constructor(m->rows, m->cols);
int i, j;
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
matrix_set(product, i, j, matrix_get(m, i, j) * scalar);
}
}
return product;
}
matrix_t* matrix_transpose(matrix_t* m)
{
assert(m!= NULL);
assert(m->rows > 0 && m->cols > 0);
matrix_t* transpose = copy_matrix(m);
transpose->rows = m->cols;
transpose->cols = m->rows;
int i, j;
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
matrix_set(transpose, j, i, matrix_get(m, i, j));
}
}
return transpose;
}
void set_matrix(matrix_t* m, float val)
{
assert(m != NULL);
assert(m->rows > 0 && m->cols > 0);
int i, j;
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
matrix_set(m, i, j, val);
}
}
}
void set_matrix_index(matrix_t* m)
{
assert(m != NULL);
assert(m->rows > 0 && m->cols > 0);
int i, j;
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
matrix_set(m, i, j, i * m->cols + j);
}
}
}
matrix_t* copy_matrix(matrix_t* m)
{
matrix_t* copy = matrix_constructor(m->rows, m->cols);
memcpy(copy->matrix, m->matrix, sizeof(float)*m->rows*m->cols);
return copy;
}
void print_matrix(matrix_t* m)
{
int i, j;
printf("%dx%d\n", m->rows, m->cols);
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
printf("%f ", matrix_get(m, i, j));
}
printf("\n");
}
}
void free_matrix(matrix_t* m)
{
assert(m != NULL);
free(m);
}
matrix_t* matrix_sigmoid(matrix_t* m)
{
matrix_t* copy = copy_matrix(m);
int i, j;
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
matrix_set(copy, i, j, 1.0 / (1.0 + exp(-1.0 * matrix_get(copy, i, j))));
}
}
return copy;
}
matrix_t* matrix_sigmoid_gradient(matrix_t* m)
{
float sig;
matrix_t* copy = copy_matrix(m);
int i, j;
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
sig = 1.0 / (1.0 + exp(-1.0 * matrix_get(copy, i, j)));
matrix_set(copy, i, j, sig * (1-sig));
}
}
return copy;
}
matrix_t* matrix_square(matrix_t* m)
{
matrix_t* copy = copy_matrix(m);
int i, j;
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
matrix_set(copy, i, j, pow(matrix_get(copy, i, j), 2));
}
}
return copy;
}
matrix_t* matrix_prepend_col(matrix_t* m, float value)
{
matrix_t* result = matrix_constructor(m->rows, m->cols+1);
unsigned int i, j;
for(i=0; i<result->rows; i++)
{
matrix_set(result, i, 0, value);
}
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
matrix_set(result, i, j+1, matrix_get(m, i, j));
}
}
return result;
}
matrix_t* matrix_remove_col(matrix_t* m)
{
matrix_t* result = matrix_constructor(m->rows, m->cols-1);
unsigned int i, j;
for(i=0; i<result->rows; i++)
{
for(j=0; j<result->cols; j++)
{
matrix_set(result, i, j, matrix_get(m, i, j+1));
}
}
return result;
}
matrix_t* matrix_prepend_row(matrix_t* m, float value)
{
matrix_t* result = matrix_constructor(m->rows+1, m->cols);
unsigned int i, j;
for(i=0; i<result->cols; i++)
{
matrix_set(result, 0, i, value);
}
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
matrix_set(result, i+1, j, matrix_get(m, i, j));
}
}
return result;
}
matrix_t* matrix_remove_row(matrix_t* m)
{
matrix_t* result = matrix_constructor(m->rows-1, m->cols);
unsigned int i, j;
for(i=0; i<result->rows; i++)
{
for(j=0; j<result->cols; j++)
{
matrix_set(result, i, j, matrix_get(m, i+1, j));
}
}
return result;
}
matrix_t* row_to_vector(matrix_t* m, unsigned int row)
{
matrix_t* v = matrix_constructor(1, m->cols);
unsigned int i;
for(i=0; i<m->cols; i++)
{
vector_set(v, i, matrix_get(m, row, i));
}
return v;
}
matrix_t* col_to_vector(matrix_t* m, unsigned int col)
{
matrix_t* v = matrix_constructor(1, m->rows);
unsigned int i;
for(i=0; i<m->rows; i++)
{
vector_set(v, i, matrix_get(m, i, col));
}
return v;
}
matrix_t* matrix_cell_multiply(matrix_t* m1, matrix_t* m2)
{
assert(m1 != NULL && m2 != NULL);
assert(m1->rows > 0 && m2->rows > 0 && m1->cols > 0 && m2->cols > 0);
assert(m1->rows == m2->rows && m1->cols == m2->cols);
matrix_t* product = matrix_constructor(m1->rows, m1->cols);
int i, j;
for(i=0; i<m1->rows; i++)
{
for(j=0; j<m1->cols; j++)
{
matrix_set(product, i, j, matrix_get(m1, i, j) * matrix_get(m2, i , j));
}
}
return product;
}
matrix_t* load_from_file(const char* filename, unsigned int rows, unsigned int cols)
{
matrix_t* m = matrix_constructor(rows, cols);
char* line = NULL;
size_t n = 0;
FILE* stream = fopen(filename, "rb");
int i, j;
for(i=0; i<rows; i++)
{
int ret = getline(&line, &n, stream);
assert(ret != EOF);
char* tmp = strtok(line, ",");
for(j=0; j<cols; j++)
{
assert(tmp != NULL);
matrix_set(m, i, j, atof(tmp));
tmp = strtok(NULL, ",");
}
n = 0;
free(line);
}
return m;
}
float matrix_average(matrix_t* m)
{
int i, j;
float sum;
for(i=0; i<m->rows; i++)
{
for(j=0; j<m->cols; j++)
{
sum += matrix_get(m, i, j);
}
}
return sum / (m->rows * m->cols);
}
void print_matrix_dimensions(matrix_t* m)
{
printf("%dx%d\n", m->rows, m->cols);
}
matrix_t* matrix_random(unsigned int rows, unsigned int cols, float range)
{
srand(time(NULL));
matrix_t *m = matrix_constructor(rows, cols);
unsigned int i, j;
for(i=0; i<rows; i++)
{
for(j = 0; j<cols; j++)
{
float random = ((float)(rand() % 1000)) / (float)1000;
matrix_set(m, i, j, random * 2 * range - range);
}
}
return m;
}
unsigned int matrix_memory_size(matrix_t* m)
{
return sizeof(matrix_t) + sizeof(float) * m->rows * m->cols;
}
|
1,758
|
__global__ void primal(float *y1, float *y2, float *xbar, float sigma, int w, int h, int nc) {
int x = threadIdx.x + blockDim.x * blockIdx.x;
int y = threadIdx.y + blockDim.y * blockIdx.y;
if (x < w && y < h) {
int i;
float x1, x2, val, norm;
for (int z = 0; z < nc; z++) {
i = x + w * y + w * h * z;
val = xbar[i];
x1 = (x+1<w) ? (xbar[(x+1) + w * y + w * h * z] - val) : 0.f;
x2 = (y+1<h) ? (xbar[x + w * (y+1) + w * h * z] - val) : 0.f;
x1 = y1[i] + sigma * x1;
x2 = y2[i] + sigma * x2;
norm = sqrtf(x1*x1+x2*x2);
y1[i] = x1 / fmax(1.f, norm);
y2[i] = x2 / fmax(1.f, norm);
}
}
}
|
1,759
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <assert.h>
#define NTHREADS 120
#define CUDA_CALL(x) \
{ \
const cudaError_t a = (x); \
if (a != cudaSuccess) \
{ \
printf("\nCUDA Error: %s (err_num = %d)\n", cudaGetErrorString(a), a); \
cudaDeviceReset(); \
assert(0); \
} \
}
#pragma pack(1)
typedef struct
{
short type; //0x4d42
int size; //width*
short reserved1;
short reserved2;
int offset;
} BMPHeader;
typedef struct
{
int size;
int width;
int height;
short planes;
short bitsPerPixel;
unsigned compression;
unsigned imageSize;
int xPelsPerMeter;
int yPelsPerMeter;
int clrUsed;
int clrImportant;
} BMPInfoHeader;
typedef struct
{
unsigned char r, g, b, alaph;
} mypoint;
void SaveBMPFile(mypoint *dst, unsigned int width, unsigned int height, const char *name)
{
FILE *fd;
fd = fopen(name, "wb");
BMPHeader hdr;
BMPInfoHeader InfoHdr;
hdr.type = 0x4d42;
hdr.size = width * height * 3 + sizeof(hdr) + sizeof(InfoHdr);
hdr.reserved1 = 0;
hdr.reserved2 = 0;
hdr.offset = sizeof(hdr) + sizeof(InfoHdr);
InfoHdr.size = sizeof(InfoHdr);
InfoHdr.width = width;
InfoHdr.height = height;
InfoHdr.planes = 1;
InfoHdr.bitsPerPixel = 24;
InfoHdr.compression = 0;
InfoHdr.imageSize = width * height * 3;
InfoHdr.xPelsPerMeter = 0;
InfoHdr.yPelsPerMeter = 0;
InfoHdr.clrUsed = 0;
InfoHdr.clrImportant = 0;
fwrite(&hdr, sizeof(BMPHeader), 1, fd);
fwrite(&InfoHdr, sizeof(BMPInfoHeader), 1, fd);
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
fputc(dst[y * width + x].b, fd);
fputc(dst[y * width + x].g, fd);
fputc(dst[y * width + x].r, fd);
}
}
if (ferror(fd))
{
printf("***Unknown BMP load error.***\n");
free(dst);
exit(EXIT_SUCCESS);
}
else
{
printf("BMP file loaded successfully!\n");
}
fclose(fd);
}
__global__ void draw(mypoint *img)
{
int x = threadIdx.x;
int y = blockIdx.x;
int r = (int)((x / 640.0-y / 1105.5125) * 256.0);
int g = (int)(y / 2.165);
int b = 256 - r - g;
if (r < 0 || g < 0 || b < 0)
r = g = b = 0;
// extern __shared__ mypoint smem[]
// smem[]
img[y * 640 + x].r = r;
img[y * 640 + x].g = g;
img[y * 640 + x].b = b;
}
__host__ int main()
{
int width = 640, height = 640;
int sizeofimg = width * height * sizeof(mypoint);
mypoint *img = (mypoint *)malloc(sizeofimg);
assert(img);
mypoint *cudaimg;
CUDA_CALL(cudaMalloc((void **)&cudaimg, sizeofimg));
struct timespec time_start = {0, 0}, time_end = {0, 0};
clock_gettime(CLOCK_REALTIME, &time_start);
draw<<<width,height>>>(cudaimg);
CUDA_CALL(cudaMemcpy(img, cudaimg, sizeofimg, cudaMemcpyDeviceToHost));
// for(int i = 0;i < width * height;++i){
// printf("r%d g%d b%d, ",img[i].r,img[i].g,img[i].b);
// }
clock_gettime(CLOCK_REALTIME, &time_end);
double costTime = (time_end.tv_sec - time_start.tv_sec) * 1000 * 1000 * 1000 + time_end.tv_nsec - time_start.tv_nsec;
printf("GPU cal cost:%.7lfms\n", costTime / 1000 / 1000);
SaveBMPFile(img, width, height, "gpu.bmp");
CUDA_CALL(cudaFree(cudaimg));
free(img);
}
|
1,760
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 1
#define THREADS_PER_SM 1
#define BLOCKS_NUM 1
#define TOTAL_THREADS (THREADS_PER_BLOCK*BLOCKS_NUM)
#define WARP_SIZE 32
#define REPEAT_TIMES 4096
// GPU error check
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true){
if (code != cudaSuccess) {
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
template <class T>
__global__ void max_flops(uint32_t *startClk, uint32_t *stopClk, T *data1, T *res) {
int gid = blockIdx.x*blockDim.x + threadIdx.x;
//register T s1 = data1[gid];
//register T s2 = data2[gid];
//register T result = 0;
uint32_t index = 0;
int32_t offset = 10;
// synchronize all threads
asm volatile ("bar.sync 0;");
// start timing
uint32_t start = 0;
asm volatile ("mov.u32 %0, %%clock;" : "=r"(start) :: "memory");
//printf("%ld \n", &data1[0]);
for (int j=0 ; j<REPEAT_TIMES ; ++j) {
index = atomicAdd(&data1[index], offset);
//printf("index = %d", index);
}
// synchronize all threads
asm volatile("bar.sync 0;");
// stop timing
uint32_t stop = 0;
asm volatile("mov.u32 %0, %%clock;" : "=r"(stop) :: "memory");
// write time and data back to memory
startClk[gid] = start;
stopClk[gid] = stop;
res[gid] = data1[0];
}
int main(){
uint32_t *startClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
uint32_t *stopClk = (uint32_t*) malloc(TOTAL_THREADS*sizeof(uint32_t));
int32_t *data1 = (int32_t*) malloc(REPEAT_TIMES*sizeof(int32_t));
//int32_t *data2 = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
int32_t *res = (int32_t*) malloc(TOTAL_THREADS*sizeof(int32_t));
uint32_t *startClk_g;
uint32_t *stopClk_g;
int32_t *data1_g;
//int32_t *data2_g;
int32_t *res_g;
int32_t stride = 1;
for (int32_t i=0; i<(REPEAT_TIMES); i++)
data1[i] = (i+stride)%REPEAT_TIMES;
gpuErrchk( cudaMalloc(&startClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&stopClk_g, TOTAL_THREADS*sizeof(uint32_t)) );
gpuErrchk( cudaMalloc(&data1_g, REPEAT_TIMES*sizeof(int32_t)) );
//gpuErrchk( cudaMalloc(&data2_g, TOTAL_THREADS*sizeof(int32_t)) );
gpuErrchk( cudaMalloc(&res_g, TOTAL_THREADS*sizeof(int32_t)) );
//printf("address = %ld\n", (long)data1_g);
gpuErrchk( cudaMemcpy(data1_g, data1, REPEAT_TIMES*sizeof(int32_t), cudaMemcpyHostToDevice) );
//gpuErrchk( cudaMemcpy(data2_g, data2, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyHostToDevice) );
max_flops<int32_t><<<BLOCKS_NUM,THREADS_PER_BLOCK>>>(startClk_g, stopClk_g, data1_g, res_g);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaMemcpy(startClk, startClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(stopClk, stopClk_g, TOTAL_THREADS*sizeof(uint32_t), cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy(res, res_g, TOTAL_THREADS*sizeof(int32_t), cudaMemcpyDeviceToHost) );
float latency;
latency = ((float)(stopClk[0]-startClk[0]))/((float)(REPEAT_TIMES));
printf("int32 latency = %f (clk)\n", latency);
printf("Total Clk number = %u \n", stopClk[0]-startClk[0]);
return 0;
}
|
1,761
|
#include <cuda_runtime.h>
#include <stdio.h>
//#include <stdbool.h>
extern "C" void gray_parallel(unsigned char* h_in, unsigned char* h_out, int elems, int rows, int cols);
__global__ void kernel1(unsigned char* d_in, unsigned char* d_out, int rows, int cols){
int idx = threadIdx.x+blockIdx.x*blockDim.x;
int idy = blockIdx.y;
int index = idx+idy*cols;
int clr_adr = 3*index;
if(index<(rows*cols))
{
double gray_val = 0.144*d_in[clr_adr] + 0.587*d_in[clr_adr+1] + 0.299*d_in[clr_adr+2];
d_out[index] = (unsigned char)gray_val;
}
}
__global__ void kernel2(unsigned char* d_in, unsigned char* d_out, int rows, int cols){
int index= threadIdx.x+blockIdx.x*blockDim.x;
int clr_adr = 3*index;
if(index<(rows*cols))
{
double gray_val = 0.144*d_in[clr_adr] + 0.587*d_in[clr_adr+1] + 0.299*d_in[clr_adr+2];
d_out[index] = (unsigned char)gray_val;
}
}
// Kernel Calling Function
extern "C" void gray_parallel(unsigned char* h_in, unsigned char* h_out, int elems, int rows, int cols){
int checkgrid2D=1;
dim3 block(cols,1,1);
//dim3 block(64,1,1);
dim3 grid(cols+block.x-1/block.x, rows, 1);
unsigned char* d_in;
unsigned char* d_out;
cudaMalloc((void**) &d_in, elems);
cudaMalloc((void**) &d_out, rows*cols);
printf("rows_kernel: %d \n", rows);
printf("cols_kernel: %d \n", cols);
cudaMemcpy(d_in, h_in, elems*sizeof(unsigned char), cudaMemcpyHostToDevice);
if(checkgrid2D==1)
{
kernel1<<<grid,block>>>(d_in, d_out, rows, cols);
printf("use 2D grid 1D block\n");
}
else
{
kernel2<<<rows,cols>>>(d_in, d_out, rows, cols);
printf("use 1D grid 1D block\n");
}
cudaMemcpy(h_out, d_out, rows*cols*sizeof(unsigned char), cudaMemcpyDeviceToHost);
cudaFree(d_in);
cudaFree(d_out);
}
|
1,762
|
#include <stdio.h>
#include<cuda_runtime.h>
#include <time.h>
#include <cuda.h>
// CUDA runtime
// Helper functions and utilities to work with CUDA
#define N 256
//#define M 256
//__global__ĺ߱δ뽻CPUãGPUִ
__global__ void matrix_mult(float *dev_a, float* dev_b, float* dev_c, int Width)
{
int Row = blockIdx.y*blockDim.y+threadIdx.y;
int Col = blockIdx.x*blockDim.x+threadIdx.x;
if ((Row < Width) && (Col < Width)) {
float Pvalue = 0;
for (int k = 0; k < Width; k++)
{
Pvalue += dev_a[Row*Width + k] * dev_b[k*Width+Col];
}
dev_c[Row*Width + Col] = Pvalue;
}
}
int main(void)
{
//ڴ棬гʼ
//clock_t start = clock();
float host_a[N][N];
float host_b[N][N];
float host_c[N][N];
for (int i = 0; i<N; i++)
for (int j = 0; j<N; j++)
host_a[i][j] = 1.0f;
for (int i = 0; i<N; i++)
for (int j = 0; j<N; j++)
host_b[i][j] = 0.01f;
/*
for (int i = 0; i < N; i++)
for (int j = 0; j < N; j++)
printf("%d\n", host_a[i][j]);
*/
//cudaErrorĬΪcudaSuccess(0)
cudaError_t err = cudaSuccess;
//GPU洢ռ
float *dev_a, *dev_b, *dev_c;
err = cudaMalloc((void **)&dev_a, sizeof(float)* N*N);
err = cudaMalloc((void **)&dev_b, sizeof(float)* N*N);
err = cudaMalloc((void **)&dev_c, sizeof(float)* N*N);
if (err != cudaSuccess)
{
printf("the cudaMalloc on GPU is failed");
return 1;
}
printf("SUCCESS");
//ҪʹcudaMemcpy͵GPU
cudaMemcpy(dev_a, host_a, sizeof(float)* N*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, host_b, sizeof(float)* N*N, cudaMemcpyHostToDevice);
//cudaMemcpy(dev_a, host_a, sizeof(host_a), cudaMemcpyHostToDevice);
//cudaMemcpy(dev_b, host_b, sizeof(host_b), cudaMemcpyHostToDevice);
//ú˺GPUִСݽ٣֮ʹһBlock1024߳
#define BLOCK_WIDTH 32
int NumBlocks = N / BLOCK_WIDTH;
//int NumBlocks2 = M / BLOCK_WIDTH;
if ( N%BLOCK_WIDTH ) NumBlocks++;
//if (M%BLOCK_WIDTH) NumBlocks2++;
dim3 dimGrid(NumBlocks, NumBlocks);
dim3 dimBlock(BLOCK_WIDTH, BLOCK_WIDTH);
clock_t start = clock();
matrix_mult<<< dimGrid, dimBlock >>>(dev_a, dev_b, dev_c, N);
cudaMemcpy(&host_c, dev_c, sizeof(host_c), cudaMemcpyDeviceToHost);
clock_t end = clock();
float time = (float)(end - start) / CLOCKS_PER_SEC;
printf("%f seconds\n", time);
//for (int i = 0; i < N; i++)
//for (int j = 0; j < N;j++)
//printf("%f\n", host_c[i][j]);
cudaFree(dev_a);//ͷGPUڴ
cudaFree(dev_b);//ͷGPUڴ
cudaFree(dev_c);//ͷGPUڴ
//test
return 0;
}
|
1,763
|
#include<iostream>
using namespace std;
__global__ void multiply(int *ad,int *bd,int *cd,int n)
{
int row=blockIdx.y*blockDim.y+threadIdx.y;
int col=blockIdx.x*blockDim.x+threadIdx.x;
int sum=0;
for(int i=0;i<n;i++)
{
sum=sum+ad[row*n+i]*bd[i*n+col];
}
cd[row*n+col]=sum;
}
int main()
{
cout<<"Enter the size"<<endl;
int n;
cin>>n;
int a[n][n],b[n][n],c[n][n];
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
a[i][j]=3+i;
b[i][j]=2+j;
}
}
int size=n*n*sizeof(int);
int *ad,*bd,*cd;
cudaEvent_t start,end;
cudaMalloc(&ad,size);
cudaMemcpy(ad,a,size,cudaMemcpyHostToDevice);
cudaMalloc(&bd,size);
cudaMemcpy(bd,b,size,cudaMemcpyHostToDevice);
cudaMalloc(&cd,size);
dim3 grid(n,n,n);
dim3 block(1,1,1);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaEventRecord(start);
multiply<<<grid,size>>>(ad,bd,cd,n);
cudaEventRecord(end);
cudaEventSynchronize(end);
float time=0;
cudaEventElapsedTime(&time,start,end);
cudaMemcpy(c,cd,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
cout<<c[i][j]<<" ";
}
cout<<endl;
}
cout<<"The time required is "<<time<<endl;
}
|
1,764
|
#include "includes.h"
__global__ void divide_by_vector(float *matrix, float *vector, unsigned int row, unsigned int col) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < row * col)
matrix[index] /= vector[index / col];
}
|
1,765
|
#include <cstdio>
#include <cstddef>
#include <cfloat>
#include <chrono>
#ifndef ARRAY_SIZE
#define ARRAY_SIZE 100
#endif
#ifndef ARRAY_TYPE
#define ARRAY_TYPE double
#endif
#ifndef BLOCK_NUM
#define BLOCK_NUM 100
#endif
#ifndef BLOCK_SIZE
#define BLOCK_SIZE 512
#endif
#ifndef WINDOW_SIZE
#define WINDOW_SIZE 4
#endif
#ifndef NTIMES
#define NTIMES 10
#endif
#ifndef LOOP_TIME
#define LOOP_TIME 1000000
#endif
#define cudaErrorCheck(call) \
do { \
cudaError_t cuErr = call; \
if (cudaSuccess != cuErr) { \
printf("CUDA Error - %s:%d: '%s, %s'\n", __FILE__, __LINE__, \
cudaGetErrorName(cuErr), cudaGetErrorString(cuErr)); \
exit(0); \
} \
} while(0)
#ifdef SHADOW_MEMORY
class ShadowMemory {
private:
static unsigned offsetPatterns[4];
unsigned long long int bits[WINDOW_SIZE];
public:
static const unsigned long long int EMPTY = 0;
ShadowMemory() {
for (unsigned i = 0; i < WINDOW_SIZE; i++) {
bits[i] = EMPTY;
}
}
friend __device__ __noinline__ void insertSM(ptrdiff_t address,
unsigned threadID, bool isWrite,
unsigned size);
unsigned getThreadID(unsigned index) {
return (unsigned)(this->bits[index] >> 48);
}
unsigned long long int getClock(unsigned index) {
return (this->bits[index] >> 6) & 0x000003FFFFFFFFFF;
}
bool isWrite(unsigned index) {
return ((this->bits[index] >> 5) & 0x0000000000000001) ==
0x0000000000000001
? true
: false;
}
unsigned getAccessSize(unsigned index) {
unsigned patternIndex =
(this->bits[index] >> 3) & 0x0000000000000003;
return offsetPatterns[patternIndex];
}
unsigned getAddressOffset(unsigned index) {
return (unsigned)(this->bits[index] & 0x0000000000000007);
}
void outputSM() {
for (unsigned i = 0; i < WINDOW_SIZE; i++) {
printf(
"Cell ID = %d, Thread ID = %d, Clock = %lld, Access mode = %s, Access size = "
"%d, Offset = %d\n",
i, getThreadID(i), getClock(i), isWrite(i) ? "write" : "read",
getAccessSize(i), getAddressOffset(i));
}
}
};
unsigned ShadowMemory::offsetPatterns[] = {1, 2, 4, 8};
unsigned smSize =
((unsigned)ARRAY_SIZE * sizeof(ARRAY_TYPE) + 7) / 8;
__device__ ShadowMemory *sm;
__device__ __noinline__ void insertSM(ptrdiff_t address,
unsigned threadID, bool isWrite,
unsigned size) {
unsigned index = address / 8;
unsigned offset = address % 8;
unsigned clock = 0xC0DA;
unsigned encodedSize = 0;
while (!(size & 0x0000000000000001)) {
encodedSize++;
size >>= 1;
}
unsigned long long int bit = 0x0000000000000000;
bit |= (threadID & 0x000000000000FFFF);
bit <<= 42;
bit |= (clock & 0x000003FFFFFFFFFF);
bit <<= 1;
bit |= (isWrite ? 0x0000000000000001 : 0x0000000000000000);
bit <<= 2;
bit |= encodedSize;
bit <<= 3;
bit |= (offset & 0x0000000000000007);
unsigned nextIndex = WINDOW_SIZE;
for (unsigned i = 0; i < WINDOW_SIZE; i++) {
unsigned long long int temp;
temp = *(volatile unsigned long long int*)(&sm[index].bits[i]);
if (temp == ShadowMemory::EMPTY && nextIndex == WINDOW_SIZE) {
nextIndex = i;
}
}
if (nextIndex == WINDOW_SIZE) {
nextIndex = (address >> 3) % WINDOW_SIZE;
}
#ifdef USE_CAS
atomicExch(&sm[index].bits[nextIndex], bit);
#else
*(volatile unsigned long long int*)(&sm[index].bits[nextIndex]) = bit;
#endif
}
void printShadowMemory(ShadowMemory* sm, unsigned size, unsigned limit = 10, unsigned stride = 1) {
for (unsigned i = 0; i < limit && i * stride < size; i++) {
sm[i * stride].outputSM();
}
}
#endif
__global__ void initialize(double *array1, double *array2, unsigned size1, unsigned size2) {
unsigned stride = gridDim.x * blockDim.x;
unsigned index = blockDim.x * blockIdx.x + threadIdx.x;
while (index < size1) {
array1[index] = (double)index;
index += stride;
}
index = blockDim.x * blockIdx.x + threadIdx.x;
while (index < size2) {
array2[index] = 0;
index += stride;
}
}
__global__ void conflictAccess(double *array1, double* array2, unsigned size1) {
unsigned threadID = blockDim.x * blockIdx.x + threadIdx.x;
unsigned sum = 0;
for (int i = 0; i < LOOP_TIME; i++) {
sum += array1[5];
#ifdef SHADOW_MEMORY
insertSM(5 * sizeof(ARRAY_TYPE), threadID, false, sizeof(ARRAY_TYPE));
#endif
}
array2[threadID] = sum;
}
int main() {
ARRAY_TYPE *array1OnDevice, *array2OnDevice;
#ifdef SHADOW_MEMORY
ShadowMemory *smOnDevice;
#endif
double executionTime[NTIMES + 1];
unsigned array2Size = BLOCK_NUM * BLOCK_SIZE;
cudaErrorCheck(cudaMalloc(&array1OnDevice, sizeof(ARRAY_TYPE) * ARRAY_SIZE));
cudaErrorCheck(cudaMalloc(&array2OnDevice, sizeof(ARRAY_TYPE) * array2Size));
#ifdef SHADOW_MEMORY
cudaErrorCheck(cudaMalloc(&smOnDevice, sizeof(ShadowMemory) * smSize));
cudaErrorCheck(cudaMemcpyToSymbol(sm, &smOnDevice, sizeof(ShadowMemory*), 0, cudaMemcpyHostToDevice));
#endif
initialize<<<BLOCK_NUM, BLOCK_SIZE>>>(array1OnDevice, array2OnDevice, ARRAY_SIZE, array2Size);
cudaErrorCheck(cudaGetLastError());
#ifdef SHADOW_MEMORY
ShadowMemory* smOnHost = new ShadowMemory[smSize]();
cudaErrorCheck(cudaMemcpy(smOnDevice, smOnHost, sizeof(ShadowMemory) * smSize, cudaMemcpyHostToDevice));
#endif
for (int i = 0; i <= NTIMES; i++) {
auto startTime = std::chrono::high_resolution_clock::now();
conflictAccess<<<BLOCK_NUM, BLOCK_SIZE>>>(array1OnDevice, array2OnDevice, ARRAY_SIZE);
cudaErrorCheck(cudaDeviceSynchronize());
cudaErrorCheck(cudaGetLastError());
auto endTime = std::chrono::high_resolution_clock::now();
std::chrono::duration<double> elapsedTime = endTime - startTime;
executionTime[i] = elapsedTime.count();
}
ARRAY_TYPE* array2OnHost = new ARRAY_TYPE[array2Size];
cudaErrorCheck(cudaMemcpy(array2OnHost, array2OnDevice, sizeof(ARRAY_TYPE) * array2Size, cudaMemcpyDeviceToHost));
ARRAY_TYPE expectedResult = 0;
for (unsigned i = 0; i < LOOP_TIME; i++) {
expectedResult += 5;
}
unsigned errorNum = 0;
for (unsigned i = 0; i < array2Size; i++) {
//printf("array2[%d] = %f\n", i, array2OnHost[i]);
if (array2OnHost[i] != expectedResult) {
printf("Mismatch at %d, expected = %f, actual = %f\n", i, expectedResult, array2OnHost[i]);
errorNum++;
}
}
if (errorNum == 0) {
printf("The calculation on the device is correct\n");
} else {
printf("Total amount of erroenous results = %d\n", errorNum);
}
cudaFree(array1OnDevice);
cudaFree(array2OnDevice);
#ifdef SHADOW_MEMORY
cudaErrorCheck(cudaMemcpy(smOnHost, smOnDevice, sizeof(ShadowMemory) * smSize, cudaMemcpyDeviceToHost));
printShadowMemory(smOnHost, smSize);
cudaFree(smOnDevice);
#endif
double minTime = DBL_MAX, maxTime = 0, averageTime = 0;
for (unsigned i = 1; i <= NTIMES; i++) {
if (executionTime[i] < minTime) {
minTime = executionTime[i];
} else if (executionTime[i] > maxTime) {
maxTime = executionTime[i];
}
averageTime += executionTime[i];
}
averageTime /= NTIMES;
printf("Evaluation Result: \n");
printf("Min: %f Max: %f Average: %f\n", minTime, maxTime, averageTime);
return 0;
}
|
1,766
|
#include "includes.h"
__global__ void PyrDown_y_g(u_int8_t *ptGrayIn,u_int8_t *ptGrayOut, int w, int h)
{
int ix = blockIdx.x*blockDim.x + threadIdx.x;
int iy = blockIdx.y*blockDim.y + threadIdx.y;
if(ix<w && iy<h)// && y>2)
{
float p_2 = ptGrayIn[ix*2+(iy*2-2)*w*2]/16.0f;
float p_1 = ptGrayIn[ix*2+(iy*2-1)*w*2]/4.0f;
float p0 = 3.0f*ptGrayIn[ix*2+iy*2*w*2]/8.0f;
float pp1 = ptGrayIn[ix*2+(iy*2+1)*w*2]/4.0f;
float pp2 = ptGrayIn[ix*2+(iy*2+2)*w*2]/16.0f;
int output = p_2 + p_1 + p0 + pp1 + pp2;
ptGrayOut[ix+iy*w] = min(output,255);
}
}
|
1,767
|
// add two numbers
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
const int NBLOCK = 1;
const int NTHREAD = 1;
__global__ void add(int a,int b,int *c){
*c = a + b;
}
int main(void){
int a = 2;
int b = 7;
int c;
int *c_dev;
cudaMalloc( (void**)&c_dev,sizeof(int) );
add<<<NBLOCK,NTHREAD>>>(a,b,c_dev);
cudaMemcpy(&c,c_dev,sizeof(int),cudaMemcpyDeviceToHost);
printf("%d + %d = %d \n",a,b,c);
cudaFree(c_dev);
return 0;
}
|
1,768
|
#include "gemm.cuh"
#include <stdio.h>
//assumes that diagonal matrix has diagonal of one in reality (see ldl algorithm packed storage)
__global__
void k_choi_diag_lower_gemm_f32(int n, float alpha, const float* D, int stride_row_d, int stride_col_d,const float* L, int stride_row_l, int stride_col_l, float beta, float* C, int stride_row_c, int stride_col_c){
const int BLOCK_WIDTH=256; //size of a block
int tx=threadIdx.x;
int bx=blockIdx.x;
int col=bx*BLOCK_WIDTH+tx;
if (col<n){
float d;
int index;
for (int i=n-1;i>col;i--){
d=D[i*stride_col_c+i*stride_row_c];
index=i*stride_col_c+col;
C[index]*=beta;
C[index]+=alpha*d*L[index];
}
index=col*stride_col_c+col;
C[index]*=beta;
C[index]+=alpha*d;
}
}
//Multiplies D*L
__global__
void k_choi_dl_gemm(int n, int k, const float* D, int stride_row_d, int stride_col_d, const float* L, int stride_row_l, int stride_col_l, float* C, int stride_row_c, int stride_col_c){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
int row=bx*TILE_SIZE+tx;
if (row<n){
float diag=D[row*stride_row_d+row*stride_col_d];
for (int i=0;i<k;i++){
C[row*stride_col_c+i*stride_row_c]=L[row*stride_col_l+i*stride_row_l]*diag;
}
}
}
//Multiplies L*D=A whereas L is a lower triangular and D a diagonal matrix. Expects LD to be stored in choi form. Input matrices are padded if they are not a multiple of block size bsmx and bsmy
__host__
void diag_lower_gemm_ldl_f32(int n, float alpha, const float* D_h,const float* L_h, float beta, float* C_h){
float* D_d;
float* L_d;
float* C_d;
float bsmx=256; //blocksize x
int sizeC=sizeof(float)*n*n;
cudaMalloc((void**) &C_d, sizeC);
dim3 threadLayout=dim3(bsmx,1,1);
dim3 grid=dim3(ceil(n/bsmx),1,1);
int sizeD=sizeof(float)*n*n;
int sizeL=sizeof(float)*n*n;
cudaMalloc((void**) &D_d,sizeD);
cudaMalloc((void**) &L_d,sizeL);
cudaError_t copy1=cudaMemcpy((void*) D_d, (void*) D_h, sizeD,cudaMemcpyHostToDevice);
cudaError_t copy2=cudaMemcpy((void*) L_d, (void*) L_h, sizeL,cudaMemcpyHostToDevice);
if ((copy1==cudaSuccess)&& (copy2==cudaSuccess)){
k_choi_diag_lower_gemm_f32<<<grid,threadLayout>>> (n, alpha, D_d, 1,n,L_d,1,n,beta,C_d,1,n);
cudaFree(D_d);
cudaFree(L_d);
}
cudaMemcpy((void*) C_h, (void*) C_d,sizeC,cudaMemcpyDeviceToHost);
cudaFree(C_d);
}
//Solves LX=A, whereas L is a lower triangular matrix and X an unknown Matrix
__global__
void k_solve_lower_f32(int n, int m, const float* L, int stride_row_l,int stride_col_l, const float* A, int stride_row_a, int stride_col_a, float* X, int stride_row_x, int stride_col_x){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
int bpx=(bx*TILE_SIZE+tx)*stride_row_x;
int bpa=(bx*TILE_SIZE+tx)*stride_row_a;//weitermachen
if ((bx*TILE_SIZE+tx)<m){
for (int i=0;i<n;i++){
float sum=A[bpa+i*stride_col_a];
for (int j=0;j<i;j++){
sum-=L[i*stride_col_l+j*stride_row_l]*X[bpx+j*stride_col_x];
}
sum/=L[i*stride_col_l+i*stride_row_l];
X[bpx+i*stride_col_x]=sum;
}
}
}
//Solves (LD)X=A, whereas L is a lower triangular matrix, D a diagonal matrix and X an unknown Matrix. Assumes the following:
//L has ones along the diagonal
//
__global__
void k_choi_solve_lower_f32(int n, int m, const float* L, int stride_row_l,int stride_col_l, const float* A, int stride_row_a, int stride_col_a, float* X, int stride_row_x, int stride_col_x){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
int bpx=(bx*TILE_SIZE+tx)*stride_row_x;
int bpa=(bx*TILE_SIZE+tx)*stride_row_a;//weitermachen
if ((bx*TILE_SIZE+tx)<m){
for (int i=0;i<n;i++){
float sum=A[bpa+i*stride_col_a];
for (int j=0;j<i;j++){
sum-=L[i*stride_col_l+j*stride_row_l]*L[j*stride_col_l+j*stride_row_l]*X[bpx+j*stride_col_x];
}
sum/=L[i*stride_col_l+i*stride_row_l];
X[bpx+i*stride_col_x]=sum;
}
}
}
__global__
void k_solve_lower_f32_temp(int n, const float* L, int stride_row_l,int stride_col_l, const float* A, int stride_row_a, int stride_col_a, float* X, int stride_row_x, int stride_col_x){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
for (int a=0;a<n;a++){
int bpx=(bx*TILE_SIZE+tx+a)*stride_row_x;
int bpa=(bx*TILE_SIZE+tx+a)*stride_row_a;//weitermachen
if ((bx*TILE_SIZE+tx)<n){
for (int i=0;i<n;i++){
float sum=A[bpa+i*stride_col_a];
for (int j=0;j<i;j++){
sum-=L[i*stride_col_l+j*stride_row_l]*X[bpx+j*stride_col_x];
}
sum/=L[i*stride_col_l+i*stride_row_l];
X[bpx+i*stride_col_x]=sum;
}
}
}
}
//Solves UX=A, whereas U is a upprt triangular matrix and X an unknown Matrix
__global__
void k_solve_upper_f32(int n, int m,const float* U, int stride_row_u,int stride_col_u, const float* A, int stride_row_a, int stride_col_a, float* X, int stride_row_x, int stride_col_x){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
int bpx=(bx*TILE_SIZE+tx)*stride_row_x;
int bpa=(bx*TILE_SIZE+tx)*stride_row_a;
if ((bx*TILE_SIZE+tx)<m){
for (int i=n-1;i>=0;i--){
float sum=A[bpa+i*stride_col_a];
for (int j=n-1;j>i;j--){
//printf("index U %d index X %d\n",i*stride_col_u+j*stride_row_u,bpx+j*stride_col_x);
sum-=U[i*stride_col_u+j*stride_row_u]*X[bpx+j*stride_col_x];
}
sum/=U[i*stride_col_u+i*stride_row_u];
X[bpx+i*stride_col_x]=sum;
}
}
}
//Solves UX=A, whereas U is a upper triangular matrix coming from ldl decomposition and X an unknown Matrix
__global__
void k_choi_solve_upper_f32(int n, int m,const float* U, int stride_row_u,int stride_col_u, const float* A, int stride_row_a, int stride_col_a, float* X, int stride_row_x, int stride_col_x){
const int TILE_SIZE=64;
int tx=threadIdx.x;
int bx=blockIdx.x;
int bpx=(bx*TILE_SIZE+tx)*stride_row_x;
int bpa=(bx*TILE_SIZE+tx)*stride_row_a;
if ((bx*TILE_SIZE+tx)<m){
for (int i=n-1;i>=0;i--){
float sum=A[bpa+i*stride_col_a];
for (int j=n-1;j>i;j--){
//printf("index U %d index X %d\n",i*stride_col_u+j*stride_row_u,bpx+j*stride_col_x);
sum-=U[i*stride_col_u+j*stride_row_u]*X[bpx+j*stride_col_x];
}
X[bpx+i*stride_col_x]=sum;
}
}
}
__global__
void k_upper_inverse_ldl_f32(int n, const float* A, int stride_row_a,int stride_col_a, float* C, int stride_row_c, int stride_col_c){
for (int i=n-1; i>= 0; i--){
float factor;
for (int j=i;j<n;j++){
if (j!=i){
float sum=0.0;
for (int k=i;k<j;k++){
sum-=A[k*stride_col_a+j*stride_row_a]*C[i*stride_col_c+k*stride_row_c];
//printf("j:%d\n",j);
//printf("%f times %f",A[i*stride_col_a+j*stride_row_a],C[i*stride_col_c+k*stride_row_c]);
}
C[i*stride_col_c+j*stride_row_c]=sum*C[j*stride_col_c+j*stride_row_c];
}
else{
factor=A[i*stride_col_a+i*stride_row_a];
C[i*stride_col_c+i*stride_row_c]=1/factor;
}
}
}
}
__global__
void k_printmatrix(const char* name, int n, int m, const float* A, int stride_row_a, int stride_col_a){
//printf("matrix:%name\n",name);
printf("matrix:\n");
for (int i=0;i<n;i++){
for (int j=0;j<m;j++){
printf("%.7g \t",A[i*stride_col_a+j*stride_row_a]);
}
printf("\n");
}
}
__global__
void k_choi_single_f32(int n, float* A, int stride_row, int stride_col){
for (int j=0;j<n;j++){
float sum=0.0;
for (int i=0;i<j;i++){
sum-=A[j*stride_col+i*stride_row]*A[j*stride_col+i*stride_row]*A[i*stride_col+i*stride_row];
}
//printf("sum:%f vs. %f and sum %f and diagonal %d\n",sum,A[j*stride_col+j*stride_row],sum+A[j*stride_col+j*stride_row],j);
A[j*stride_col+j*stride_row]+=sum;
float D_inv=1.0/A[j*stride_col+j*stride_row];
for (int i=j+1;i<n;i++){
float sum=0.0;
for (int t=0;t<j;t++){
sum-=A[i*stride_col+t*stride_row]*A[j*stride_col+t*stride_row]*A[t*stride_col+t*stride_row];
}
A[i*stride_col+j*stride_row]+=sum;
A[i*stride_col+j*stride_row]*=D_inv;
A[j*stride_col+i*stride_row]=0.0; //can be removed if we do not want to have zeros on upper triangular part
}
}
}
__global__
void k_dcopy(int m,int n, float* source, int stride_row_source, int stride_col_source, float* dest, int stride_row_dest, int stride_col_dest){
int TILE_SIZE=32;
int bx=blockIdx.x;
int tx=threadIdx.x;
int row=bx*TILE_SIZE+tx;
if (row<m){
for (int i=0;i<n;i++){
dest[row*stride_col_dest+i*stride_row_dest]=source[row*stride_col_source+i*stride_row_source];
}
}
}
//Solves LX=A, whereas L is a lower triangular matrix and X an unknown Matrix
__host__
void solve_lower_f32_v1_device(int n, int m, const float* L_d, int stride_row_l,int stride_col_l, const float* A_d, int stride_row_a, int stride_col_a, float* X_d, int stride_row_x, int stride_col_x){
float bsmx=64; //blocksize x
dim3 threadLayout=dim3(bsmx,1,1);
dim3 grid=dim3(ceil(m/bsmx),1,1);
k_solve_lower_f32<<<grid,threadLayout>>>(n,m, L_d, stride_row_l,stride_col_l, A_d, stride_row_a,stride_col_a, X_d, stride_row_x, stride_col_x);
}
/*Solves LX=A, whereas L is a lower triangular matrix with dimension nxn and an unknown Matrix X with dimension nxm
Transforms value matrix A
*/
__host__
void solve_lower_f32_device(int n, int m, float* L_d, int stride_row_l,int stride_col_l, float* A_d, int stride_row_a, int stride_col_a, float* X_d, int stride_row_x, int stride_col_x){
int d=3;
int rem=n%d;
int k=n/d;
float bsmx=64; //blocksize x
float* A1=A_d;
float* A2=A1+d*stride_col_a;
float* L11=L_d;
float* L21=L11+d*stride_col_l;
float* L22=L21+d*stride_row_l;
float* X1=X_d;
for (int i=0;i<k;i++){
dim3 threadLayout=dim3(bsmx,1,1);
dim3 grid=dim3(ceil(m/bsmx),1,1);
//k_printmatrix<<<1,1>>>("L_d",d,d,L11,stride_row_l,stride_col_l);
k_solve_lower_f32<<<grid,threadLayout>>>(d,m, L11, stride_row_l,stride_col_l, A1, stride_row_a,stride_col_a, X1, stride_row_x, stride_col_x);
gemm_f32_device(n-d, m, d, -1.0, L21, stride_row_l,stride_col_l,X1, stride_row_x,stride_col_x, 1.0, A2,stride_row_a, stride_col_a);
//k_printmatrix<<<1,1>>>("A_d",n-d,m,A2,stride_row_a,stride_col_a);
n-=d;
X1+=d*stride_col_x;
L11=L22;
L21=L11+d*stride_col_l;
L22=L21+d*stride_row_l;
A1=A2;
A2+=d*stride_col_a;
}
if (rem!=0){
d=rem;
dim3 threadLayout=dim3(bsmx,1,1);
dim3 grid=dim3(ceil(m/bsmx),1,1);
printf("d:%d\n",d);
k_solve_lower_f32<<<grid,threadLayout>>>(d,m, L11, stride_row_l,stride_col_l, A1, stride_row_a,stride_col_a, X1, stride_row_x, stride_col_x);
}
}
/*Solves L*D*X=A, whereas L is a lower triangular matrix with dimension nxn, D a diagonal matrix and an unknown Matrix X with dimension nxm
Transforms value matrix A.
Might not be useful, because we need a domain specific function gemdm that multiplies three matrices
*/
/*
__host__
void choi_solve_lower_f32_device(int n, int m, float* L_d, int stride_row_l,int stride_col_l, float* A_d, int stride_row_a, int stride_col_a, float* X_d, int stride_row_x, int stride_col_x){
int d=3;
int rem=n%d;
int k=n/d;
float bsmx=64; //blocksize x
dim3 threadLayout=dim3(bsmx,1,1);
dim3 grid=dim3(ceil(m/bsmx),1,1);
float* A1=A_d;
float* A2=A1+d*stride_col_a;
float* L11=L_d;
float* L21=L11+d*stride_col_l;
float* L22=L21+d*stride_row_l;
float* X1=X_d;
for (int i=0;i<k;i++){
//k_printmatrix<<<1,1>>>("L_d",d,d,L11,stride_row_l,stride_col_l);
k_choi_solve_lower_f32<<<grid,threadLayout>>>(d,m, L11, stride_row_l,stride_col_l, A1, stride_row_a,stride_col_a, X1, stride_row_x, stride_col_x);
//k_printmatrix<<<1,1>>>("L21",n-d,d,L21,stride_row_l,stride_col_l);
//k_printmatrix<<<1,1>>>("X",d,m,X1,stride_row_x,stride_col_x);
//k_printmatrix<<<1,1>>>("A2",n-d,m,A2,stride_row_a,stride_col_a);
gemdm_f32_device(n-d, m, d, -1.0, L21, stride_row_l,stride_col_l,D,stride_row_d,stride_col_d,X1, stride_row_x,stride_col_x, 1.0, A2,stride_row_a, stride_col_a);
//k_printmatrix<<<1,1>>>("A2",n-d,m,A2,stride_row_a,stride_col_a);
//k_printmatrix<<<1,1>>>("A_d",4,8,A_d,stride_row_a,stride_col_a);
n-=d;
X1+=d*stride_col_x;
L11=L22;
L21=L11+d*stride_col_l;
L22=L21+d*stride_row_l;
A1=A2;
A2+=d*stride_col_a;
}
if (rem!=0){
d=rem;
//printf("d:%d\n",d);
k_choi_solve_lower_f32<<<grid,threadLayout>>>(d,m, L11, stride_row_l,stride_col_l, A1, stride_row_a,stride_col_a, X1, stride_row_x, stride_col_x);
}
printf("done choi solve\n");
}
*/
//Solves LX=A, whereas L is a lower triangular matrix and X an unknown Matrix
__host__
void upper_inverse_ldl_f32_device(int n, const float* A_d, int stride_row_a,int stride_col_a, float* C_d, int stride_row_c, int stride_col_c){
k_upper_inverse_ldl_f32<<<1,1>>>(n, A_d, stride_row_a,stride_col_a,C_d,stride_row_c,stride_col_c);
}
//Solves UX=A, whereas L is lower triangular matrix and X an unknown Matrix
__host__
void solve_lower_f32_v2_device(int n, const float* L_d, int stride_row_l,int stride_col_l, const float* A_d, int stride_row_a, int stride_col_a, float* X_d, int stride_row_x, int stride_col_x){
k_solve_lower_f32_temp<<<1,1>>>(n, L_d, stride_row_l,stride_col_l, A_d, stride_row_a,stride_col_a, X_d, stride_row_x, stride_col_x);
}
//Solves AX=B for matrices A,X and B. A is in LDL format
__host__
void choi_solve_f32_device(int n, int m, const float* A_d, int stride_row_a,int stride_col_a, const float* B_d, int stride_row_b, int stride_col_b, float* X_d, int stride_row_x, int stride_col_x){
float bsmx=64;
float* Y;
cudaMalloc((void**)&Y,sizeof(float)*n*m);
// printf("bx: %f\n",ceil(m/bsmx));
// k_printmatrix<<<1,1>>>("A",n,n,A_d,1,n);
k_choi_solve_lower_f32<<<ceil(m/bsmx),bsmx>>>(n,m,A_d,stride_row_a,stride_col_a,B_d,stride_row_b,stride_col_b,Y,1,m);
// k_printmatrix<<<1,1>>>("Y",n,m,Y,1,m);
k_choi_solve_upper_f32<<<ceil(m/bsmx),1>>>(n,m,A_d,stride_row_a,stride_col_a,Y,1,m,X_d,stride_row_x,stride_col_x); //replace with non kernel version
cudaFree(Y);
}
__host__
void choi_f32_device(int n, float* A_d, int stride_row, int stride_col){
int BLOCKSIZE=32;
int d;
int q;
int rem;
d=BLOCKSIZE;
q=n/d;
rem=n%d;
float* A11=A_d;
float* A21=A_d+d*stride_col;
int sizeT1=sizeof(float)*d*d;
int sizeT2=sizeof(float)*n*(n-d);
float* temp1;
float* temp2;
cudaMalloc((void**) &temp1,sizeT1);
cudaMalloc((void**) &temp2,sizeT2);
cudaMemset(temp1,0,sizeT1);
cudaMemset((void**)&temp2,0,sizeT2);
for (int i=0;i<q;i++){
//Calculate L11
k_choi_single_f32<<<1,1>>>(d, A11,stride_row, stride_col);
//Calculate L21
k_dcopy<<<ceil((n-d)/32.0),32>>>(n-d,d,A21,stride_row,stride_col,temp2,n-d,1);
//k_printmatrix<<<1,1>>>("temp2", d, n-d, temp2, 1, n-d);
//choi_solve_lower_f32_device(d, n-d, A11, stride_row,stride_col, temp2, 1, n-d, A21, stride_col, stride_row);
k_choi_solve_lower_f32<<<ceil((n-d)/64.0),64>>>(d, n-d, A11, stride_row,stride_col, temp2, 1, n-d, A21, stride_col, stride_row);
//Calculate L22
k_choi_dl_gemm<<<ceil(d/64.0),64>>>(d, n-d, A11, stride_row, stride_col, A21, stride_col,stride_row,temp2, 1, n-d);
A11+=d*stride_row+d*stride_col;
gemm_f32_device(n-d, n-d, d, -1.0, A21, stride_row, stride_col, temp2, 1, n-d, 1.0, A11,stride_row, stride_col);
A21=A11+d*stride_col;
n-=d;
}
if (rem!=0){
d=rem;
k_choi_single_f32<<<1,1>>>(d, A11,stride_row, stride_col);
}
}
void ldl(float* ){
}
|
1,769
|
#include <cuda.h>
#include <cuda_runtime.h>
#include "ColorConverterKernels.cuh"
#include "../errorCheck.cuh"
__global__ void kernelCalcHist(unsigned char* data, unsigned int* hist,
unsigned int size) {
// Shared memory für lokales Histogramm im Aktuellen Block
__shared__ unsigned int temp[256];
// Thread i im Block setzt tmp[i] auf 0
if (threadIdx.x < 256)
temp[threadIdx.x] = 0;
__syncthreads(); // Hier ist dann alles auf 0 gesetzt
// Get index
// i = x + y * width;
// Der Pixel, der durch den Thread verarbeitet wird.
int i = threadIdx.x + blockIdx.x * blockDim.x;
// Im Shared memory histogram berechnen
if (i < size)
atomicAdd(&temp[data[i]], 1);
// Warten bis alle Threads ihr Lokales histogramm berechnet haben.
__syncthreads();
// die ersten 256 Threads jedes Blocks addieren die lokalen Ergebnisse
if (threadIdx.x < 256)
atomicAdd(&(hist[threadIdx.x]), temp[threadIdx.x]);
}
void cudaCalcHist_dev(unsigned char* dev_data, unsigned int* dev_hist,
unsigned int size) {
int blockSize = 1024;
int n_blocks = size / blockSize + (size % blockSize == 0 ? 0 : 1);
kernelCalcHist<<<n_blocks, blockSize>>>(dev_data, dev_hist, size);
CudaCheckError();
}
bool cudaCalcHist(unsigned char* data, unsigned int* hist, unsigned int size) {
if (size > 1024 * 1024) {
return false;
}
unsigned char* dev_data;
unsigned int* dev_hist;
unsigned int histSize = 256;
unsigned int histBuffSize = histSize * sizeof(int);
CudaSafeCall(cudaMalloc((void**) &dev_data, size));
CudaSafeCall(cudaMalloc((void**) &dev_hist, histBuffSize));
CudaSafeCall(cudaMemset(dev_hist, 0, histBuffSize));
CudaSafeCall(cudaMemcpy(dev_data, data, size, cudaMemcpyHostToDevice));
cudaCalcHist_dev(dev_data, dev_hist, size);
CudaSafeCall(cudaMemcpy(hist, dev_hist, histBuffSize, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dev_data));
CudaSafeCall(cudaFree(dev_hist));
return true;
}
__global__ void kernelRGBToGray(unsigned char* dev_rgbData,
unsigned char* dev_grayData, unsigned int imgSize) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i < imgSize)
dev_grayData[i] = dev_rgbData[3 * i] * 0.144
+ dev_rgbData[3 * i + 1] * 0.587
+ dev_rgbData[3 * i + 2] * 0.299;
}
void cudaRGBToGray_dev(unsigned char* dev_rgbData, unsigned char* dev_grayData,
unsigned int imgSize) {
int blockSize = 1024;
int nBlocks = imgSize / blockSize + (imgSize % blockSize == 0 ? 0 : 1);
kernelRGBToGray<<<nBlocks, blockSize>>>(dev_rgbData, dev_grayData, imgSize);
CudaCheckError();
}
bool cudaRGBToGray(unsigned char* host_rgbData, unsigned char* host_grayData,
unsigned int imgSize) {
if (imgSize > 1024 * 1024)
return false;
unsigned char* dev_rgbData;
unsigned char* dev_grayData;
CudaSafeCall(cudaMalloc((void**) &dev_rgbData, imgSize * 3));
CudaSafeCall(cudaMalloc((void**) &dev_grayData, imgSize));
CudaSafeCall(cudaMemcpy(dev_rgbData, host_rgbData, imgSize * 3, cudaMemcpyHostToDevice));
cudaRGBToGray_dev(dev_rgbData, dev_grayData, imgSize);
CudaSafeCall(cudaMemcpy(host_grayData, dev_grayData, imgSize, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dev_rgbData));
CudaSafeCall(cudaFree(dev_grayData));
return true;
}
__global__ void kernelYUY2ToRGB(unsigned char* dev_yuy2Data,
unsigned char* dev_rgbData, unsigned int imgSize) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
i *= 2;
if (i < imgSize - 1) {
unsigned char* yuy2Buff = dev_yuy2Data + i * 2;
unsigned char* rgbBuff = dev_rgbData + i * 3;
int u, v, y1, y2;
y1 = *yuy2Buff++;
u = *yuy2Buff++;
y2 = *yuy2Buff++;
v = *yuy2Buff++;
// Integer operation of ITU-R standard for YCbCr is (from Wikipedia)
// https://en.wikipedia.org/wiki/YUV#Y.27UV422_to_RGB888_conversion
u = u - 128;
v = v - 128;
*rgbBuff++ = CLIP(y1 + 45 * v / 32);
*rgbBuff++ = CLIP(y1 - (11 * u + 23 * v) / 32);
*rgbBuff++ = CLIP(y1 + 113 * u / 64);
*rgbBuff++ = CLIP(y2 + 45 * v / 32);
*rgbBuff++ = CLIP(y2 - (11 * u + 23 * v) / 32);
*rgbBuff++ = CLIP(y2 + 113 * u / 64);
// v = v - 128;
// u = u - 128;
//
// *rgbBuff++ = y1 + u + (u >> 1) + (u >> 2) + (u >> 6);
// *rgbBuff++ = y1 -((u >> 2) + (u >> 4) + (u >> 5)) - ((v >> 1) + (v >> 3) + (v >> 4) + (v >> 5));
// *rgbBuff++ = y1 + v + (v >> 2) + (v >> 3) + (v >> 5);
//
// *rgbBuff++ = y2 + u + (u >> 1) + (u >> 2) + (u >> 6);
// *rgbBuff++ = y2 -((u >> 2) + (u >> 4) + (u >> 5)) - ((v >> 1) + (v >> 3) + (v >> 4) + (v >> 5));
// *rgbBuff++ = y2 + v + (v >> 2) + (v >> 3) + (v >> 5);
}
}
void cudaYUY2ToRGB_dev(unsigned char* dev_yuy2Data, unsigned char* dev_rgbData,
unsigned int imgSize) {
int blockSize = 1024;
int nBlocks = (imgSize / 2) / blockSize
+ ((imgSize / 2) % blockSize == 0 ? 0 : 1);
kernelYUY2ToRGB<<<nBlocks, blockSize>>>(dev_yuy2Data, dev_rgbData, imgSize);
}
bool cudaYUY2ToRGB(unsigned char* host_yuy2Data, unsigned char* host_rgbData,
unsigned int imgSize) {
unsigned char* dev_rgbData;
unsigned char* dev_yuy2Data;
CudaSafeCall(cudaMalloc((void**) &dev_rgbData, imgSize * 3));
CudaSafeCall(cudaMalloc((void**) &dev_yuy2Data, imgSize * 2));
CudaSafeCall(cudaMemcpy(dev_yuy2Data, host_yuy2Data, imgSize * 2,
cudaMemcpyHostToDevice));
cudaYUY2ToRGB_dev(dev_yuy2Data, dev_rgbData, imgSize);
CudaSafeCall(cudaMemcpy(host_rgbData, dev_rgbData, imgSize * 3, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dev_rgbData));
CudaSafeCall(cudaFree(dev_yuy2Data));
return true;
}
__global__ void kernelYUY2ToGray(unsigned char* dev_yuy2Data,
unsigned char* dev_grayData, unsigned int imgSize) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
dev_grayData[i] = dev_yuy2Data[i * 2];
}
void cudaYUY2ToGray_dev(unsigned char* dev_yuy2Data,
unsigned char* dev_grayData, unsigned int imgSize) {
int blockSize = 1024;
int nBlocks = imgSize / blockSize + (imgSize % blockSize == 0 ? 0 : 1);
kernelYUY2ToGray<<<nBlocks, blockSize>>>(dev_yuy2Data, dev_grayData,
imgSize);
CudaCheckError();
}
bool cudaYUY2ToGray(unsigned char* host_yuy2Data, unsigned char* host_grayData,
unsigned int imgSize) {
unsigned char* dev_grayData;
unsigned char* dev_yuy2Data;
CudaSafeCall(cudaMalloc((void**) &dev_grayData, imgSize));
CudaSafeCall(cudaMalloc((void**) &dev_yuy2Data, imgSize * 2));
CudaSafeCall(cudaMemcpy(dev_yuy2Data, host_yuy2Data, imgSize * 2,
cudaMemcpyHostToDevice));
cudaYUY2ToGray_dev(dev_yuy2Data, dev_grayData, imgSize);
CudaSafeCall(cudaMemcpy(host_grayData, dev_grayData, imgSize, cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dev_grayData));
CudaSafeCall(cudaFree(dev_yuy2Data));
return true;
}
__global__ void kernelGrayToPseudoColor(unsigned char* dev_inputImg,
unsigned char* dev_pseudoColor, int imgSize, int maxGray, int minH,
int maxH) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < imgSize) {
int h, s, v, r, b, g;
// h: 0 - 359 (0 = rot, 120 = grün)
// s: 0 - 100
// v: 0 - 100
s = 100;
v = 100;
int grayVal = dev_inputImg[idx];
if (grayVal > maxGray)
grayVal = maxGray;
if (grayVal > 0) {
h = (100 * grayVal / maxGray) * (maxH - minH) / 100 + minH;
// HSV to RGB
//Winkel im Farbkeis 0 - 360 in 1 Grad Schritten
//h = (englisch hue) Farbwert
//1 Grad Schrittweite, 4.25 Steigung pro Schritt bei 60 Grad
if (h < 61) {
r = 255;
b = 0;
g = 4.25 * h;
} else if (h < 121) {
g = 255;
b = 0;
r = 255 - (4.25 * (h - 60));
} else if (h < 181) {
r = 0;
g = 255;
b = 4.25 * (h - 120);
} else if (h < 241) {
r = 0;
b = 255;
g = 255 - (4.25 * (h - 180));
} else if (h < 301) {
g = 0;
b = 255;
r = 4.25 * (h - 240);
} else if (h < 360) {
r = 255;
g = 0;
b = 255 - (4.25 * (h - 300));
}
//Berechnung der Farbsättigung
//s = (englisch saturation) Farbsättigung
int diff;
s = 100 - s; //Kehrwert berechnen
diff = ((255 - r) * s) / 100;
r = r + diff;
diff = ((255 - g) * s) / 100;
g = g + diff;
diff = ((255 - b) * s) / 100;
b = b + diff;
//Berechnung der Dunkelstufe
//v = (englisch value) Wert Dunkelstufe einfacher Dreisatz 0..100%
r = (r * v) / 100;
g = (g * v) / 100;
b = (b * v) / 100;
dev_pseudoColor[idx * 3] = r;
dev_pseudoColor[idx * 3 + 1] = g;
dev_pseudoColor[idx * 3 + 2] = b;
} else {
dev_pseudoColor[idx * 3] = 0;
dev_pseudoColor[idx * 3 + 1] = 0;
dev_pseudoColor[idx * 3 + 2] = 0;
}
}
}
__global__ void kernelGrayToPseudoColor(float* dev_inputImg,
unsigned char* dev_pseudoColor, int imgSize, int maxGray, int minH,
int maxH, bool scaledFloat) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < imgSize) {
int h, s, v, r, b, g;
// h: 0 - 359 (0 = rot, 120 = grün)
// s: 0 - 100
// v: 0 - 100
s = 100;
v = 100;
int grayVal = dev_inputImg[idx];
if (scaledFloat)
grayVal *= 255;
if (grayVal > maxGray)
grayVal = maxGray;
if (grayVal > 0) {
h = (100 * grayVal / maxGray) * (maxH - minH) / 100 + minH;
// HSV to RGB
//Winkel im Farbkeis 0 - 360 in 1 Grad Schritten
//h = (englisch hue) Farbwert
//1 Grad Schrittweite, 4.25 Steigung pro Schritt bei 60 Grad
if (h < 61) {
r = 255;
b = 0;
g = 4.25 * h;
} else if (h < 121) {
g = 255;
b = 0;
r = 255 - (4.25 * (h - 60));
} else if (h < 181) {
r = 0;
g = 255;
b = 4.25 * (h - 120);
} else if (h < 241) {
r = 0;
b = 255;
g = 255 - (4.25 * (h - 180));
} else if (h < 301) {
g = 0;
b = 255;
r = 4.25 * (h - 240);
} else if (h < 360) {
r = 255;
g = 0;
b = 255 - (4.25 * (h - 300));
}
//Berechnung der Farbsättigung
//s = (englisch saturation) Farbsättigung
int diff;
s = 100 - s; //Kehrwert berechnen
diff = ((255 - r) * s) / 100;
r = r + diff;
diff = ((255 - g) * s) / 100;
g = g + diff;
diff = ((255 - b) * s) / 100;
b = b + diff;
//Berechnung der Dunkelstufe
//v = (englisch value) Wert Dunkelstufe einfacher Dreisatz 0..100%
r = (r * v) / 100;
g = (g * v) / 100;
b = (b * v) / 100;
dev_pseudoColor[idx * 3] = r;
dev_pseudoColor[idx * 3 + 1] = g;
dev_pseudoColor[idx * 3 + 2] = b;
} else {
dev_pseudoColor[idx * 3] = 0;
dev_pseudoColor[idx * 3 + 1] = 0;
dev_pseudoColor[idx * 3 + 2] = 0;
}
}
}
void cudaGrayToPseudoColor_dev(unsigned char* dev_grayImg,
unsigned char* dev_pseudoColor, int imgSize, int maxGray, int minH,
int maxH) {
int blockSize = 1024;
int nBlocks = imgSize / blockSize + (imgSize % blockSize == 0 ? 0 : 1);
kernelGrayToPseudoColor<<<nBlocks, blockSize>>>(dev_grayImg,
dev_pseudoColor, imgSize, maxGray, minH, maxH);
CudaCheckError();
}
void cudaGrayToPseudoColor(unsigned char* host_grayImg,
unsigned char* host_pseudoColor, int imgSize, int maxGray, int minH,
int maxH) {
unsigned char* dev_grayImg;
unsigned char* dev_pseudoColor;
CudaSafeCall(cudaMalloc((void**) &dev_grayImg, imgSize));
CudaSafeCall(cudaMalloc((void**) &dev_pseudoColor, imgSize * 3));
CudaSafeCall(cudaMemcpy(dev_grayImg, host_grayImg, imgSize, cudaMemcpyHostToDevice));
cudaGrayToPseudoColor_dev(dev_grayImg, dev_pseudoColor, imgSize, maxGray,
minH, maxH);
CudaSafeCall(cudaMemcpy(host_pseudoColor, dev_pseudoColor, imgSize * 3,
cudaMemcpyDeviceToHost));
CudaSafeCall(cudaFree(dev_grayImg));
CudaSafeCall(cudaFree(dev_pseudoColor));
}
|
1,770
|
#include <cstdio>
#include <fstream>
#include <iostream>
#include <math.h>
using namespace std;
int nx = 41;
int ny = 41;
int grid_size = nx * ny;
int SIZE = grid_size * sizeof(float);
int nt = 700;
int nit = 50;
float c = 1.0;
float dx = 2.0 / (nx - 1);
float dy = 2.0 / (ny - 1);
int rho = 1.0;
float nu = 0.1;
float dt = 0.001;
// CUDA configs
int n_threads = 1024; // per block
int n_blocks = (nx * ny + n_threads - 1) / n_threads;
__host__ void writeFile(float* u, float* v, float* p) {
ofstream fs("cavity_cu_results.txt");
// u
fs << "u ";
for (int i = 0; i < grid_size; i++) fs << u[i] << " ";
fs << "\n";
fs << "v ";
for (int i = 0; i < grid_size; i++) fs << v[i] << " ";
fs << "\n";
fs << "p ";
for (int i = 0; i < grid_size; i++) fs << p[i] << " ";
fs << "\n";
fs.close();
}
__global__ void build_up_b(float *u, float *v, float *b, int nx, int ny, float dx, float dy, float dt, float rho) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < nx) return;
else if (i % nx == 0) return;
else if (i % nx == nx - 1) return;
else if (i > (ny - 1) * nx - 1) return;
b[i] = (rho * ( 1.0 / dt *
((u[i+1] - u[i-1]) /
(2.0 * dx) + (v[i+nx] - v[i-nx]) / (2.0 * dy)) -
((u[i+1] - u[i-1]) / (2.0 * dx)) * ((u[i+1] - u[i-1]) / (2.0 * dx)) -
2.0 * ((u[i+nx] - u[i-nx]) / (2.0 * dy) *
(v[i+1] - v[i-1]) / (2.0 * dx)) -
((v[i+nx] - v[i-nx]) / (2.0 * dy)) * ((v[i+nx] - v[i-nx]) / (2.0 * dy))));
}
__global__ void pressure_poisson(float *p, float *pn, float *b, int nx, int ny, float dx, float dy) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= nx) && (i % nx != 0) && (i % nx != nx - 1) && (i <= nx * (ny - 1) - 1)) {
p[i] = (((pn[i+1] + pn[i-1]) * pow(dy, 2) +
(pn[i+nx] + pn[i-nx]) * pow(dx, 2)) /
(2 * (pow(dx, 2) + pow(dy, 2))) -
pow(dx, 2) * pow(dy, 2) / (2 * (pow(dx, 2) + pow(dy, 2))) *
b[i]);
}
if (i >= nx * ny) return;
if (i % nx == nx - 1) p[i] = p[i-1];
else if (i % nx == 0) p[i] = p[i+1];
else if (i < nx) p[i] = p[i+nx];
else if (i >= nx*(ny - 1)) p[i] = 0.0;
}
__global__ void cavity_flow_u_update(float *u, float *un, float *vn, float *p, int nx, int ny, float nu, float dx, float dy, float dt, float rho) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= nx) && (i % nx != nx - 1) && (i % nx != 0) && (i < nx * (ny - 1))) {
u[i] = (un[i] -
un[i] * dt / dx *
(un[i] - un[i-1]) -
vn[i] * dt / dy *
(un[i] - un[i-nx]) -
dt/ (2.0 * rho * dx) * (p[i+1] - p[i-1]) +
nu * (dt / pow(dx, 2.0) *
(un[i+1] - 2.0 * un[i] + un[i-1]) +
dt / pow(dy, 2.0) *
(un[i+nx] - 2.0 * un[i] + un[i-nx])));
}
if(i >= nx*ny) return;
if(i%nx == nx-1) u[i] = 0.0;
else if(i < nx) u[i] = 0.0;
else if(i%nx == 0) u[i] = 0.0;
else if(i >= nx*(ny-1)) u[i] = 1.0;
}
__global__ void cavity_flow_v_update(float *v, float *vn, float *un, float *p, int nx, int ny, float nu, float dx, float dy, float dt, float rho) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= nx) && (i % nx != nx - 1) && (i % nx != 0) && (i < nx * (ny - 1))) {
v[i] = (vn[i] -
vn[i] * dt / dx *
(vn[i] - vn[i-1]) -
vn[i] * dt / dy *
(vn[i] - vn[i-nx]) -
dt / (2 * rho * dy) * (p[i+nx] - p[i-nx]) +
nu * (dt / pow(dx, 2) *
(vn[i+1] - 2 * vn[i] + vn[i-1]) +
dt / pow(dy, 2) *
(vn[i+nx] - 2.0 * vn[i] + vn[i-nx])));
}
if(i >= nx*ny) return;
if(i%nx == nx-1) v[i] = 0.0;
else if(i < nx) v[i] = 0.0;
else if(i%nx == 0) v[i] = 0.0;
else if(i >= nx*(ny-1)) v[i] = 0.0;
}
void cavity_flow(float *u, float *v, float *p) {
float *un, *vn, *pn, *b;
cudaMallocManaged(&un, SIZE);
cudaMallocManaged(&vn, SIZE);
cudaMallocManaged(&pn, SIZE);
cudaMallocManaged(&b, SIZE);
cudaMemset(un, 0.0, SIZE);
cudaMemset(vn, 0.0, SIZE);
cudaMemset(pn, 0.0, SIZE);
cudaMemset(b, 0.0, SIZE);
for (int n = 0; n < nt; n++) {
cudaMemcpy(un, u, SIZE, cudaMemcpyDeviceToDevice);
cudaMemcpy(vn, v, SIZE, cudaMemcpyDeviceToDevice);
build_up_b<<<n_blocks, n_threads>>>(u, v, b, nx, ny, dx, dy, dt, rho);
cudaDeviceSynchronize();
for (int q = 0; q < nit; q++) {
cudaMemcpy(pn, p, SIZE, cudaMemcpyDeviceToDevice);
pressure_poisson<<<n_blocks, n_threads>>>(p, pn, b, nx, ny, dx, dy);
cudaDeviceSynchronize();
}
cavity_flow_u_update<<<n_blocks, n_threads>>>(u, un, vn, p, nx, ny, nu, dx, dy, dt, rho);
cavity_flow_v_update<<<n_blocks, n_threads>>>(v, vn, un, p, nx, ny, nu, dx, dy, dt, rho);
cudaDeviceSynchronize();
}
cudaFree(un);
cudaFree(vn);
cudaFree(pn);
cudaFree(b);
}
int main() {
float *u, *v, *p;
cudaMallocManaged(&u, SIZE);
cudaMallocManaged(&v, SIZE);
cudaMallocManaged(&p, SIZE);
cudaMemset(u, 0.0, SIZE);
cudaMemset(v, 0.0, SIZE);
cudaMemset(p, 0.0, SIZE);
cavity_flow(u, v, p);
writeFile(u, v, p);
cudaFree(u);
cudaFree(v);
cudaFree(p);
return 0;
}
|
1,771
|
#include "includes.h"
__global__ void dot(int *a, int *b, int *temp, int *c)
{
int outputIndex = blockIdx.x * blockDim.x + threadIdx.x;
int i = outputIndex;
int result = 0;
/* multiplication step: compute partial sum */
while(i < N)
{
result += a[i] * b[i];
i += blockDim.x * gridDim.x;
}
temp[outputIndex] = result;
/* wait for all threads to be done multiplying */
__syncthreads();
/* reduction step: sum all entries in the block and write to c */
/* this requires that blockDim.x be a power of two! */
i = blockDim.x / 2;
while (i != 0)
{
/* only threads 0 through i are busy */
if (threadIdx.x < i)
{
/* sum our output element with the one half a block away */
temp[outputIndex] += temp[outputIndex + i];
}
/* wait for all threads within the block */
__syncthreads();
i /= 2;
}
/* thread 0 writes the results for this block */
if (threadIdx.x == 0)
{
c[blockIdx.x] = temp[outputIndex];
}
}
|
1,772
|
#include "includes.h"
__global__ static void mprts_update_offsets(int nr_total_blocks, uint* d_off, uint* d_spine_sums)
{
int bid = threadIdx.x + THREADS_PER_BLOCK * blockIdx.x;
if (bid <= nr_total_blocks) {
d_off[bid] = d_spine_sums[bid * CUDA_BND_STRIDE + 0];
}
}
|
1,773
|
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <iostream>
#include <chrono>
#include <thrust/functional.h>
#include <thrust/iterator/constant_iterator.h>
int main() {
std::vector<double> stocks_ms, stocks_aapl;
while (std::cin){
double mstf, aapl;
std::cin >> mstf >> aapl;
stocks_aapl.push_back(aapl);
stocks_ms.push_back(mstf);
}
int N = stocks_aapl.size();
std::cerr << stocks_aapl.size() << "\n";
thrust::device_vector<double> stocks_ms_dev(stocks_ms), stocks_aapl_dev (stocks_aapl);
thrust::device_vector<double> diff(N);
thrust::transform(stocks_ms_dev.begin(), stocks_ms_dev.end(),stocks_aapl_dev.begin(),diff.begin(),thrust::minus<double>());
double soma = thrust::reduce(diff.begin(),diff.end());
double media = soma/stocks_ms.size();
thrust::device_vector<double> diff_media_dev(N);
thrust::transform(diff.begin(),diff.end(),thrust::make_constant_iterator(media),diff_media_dev.begin(),thrust::minus<double>());
thrust::transform(diff_media_dev.begin(),diff_media_dev.end(),diff_media_dev.begin(),diff_media_dev.begin(),thrust::multiplies<double>());
double std = thrust::reduce(diff_media_dev.begin(),diff_media_dev.end());
std = std / N;
std::cout << "Media: " << media << " Var: " << std << "\n";
return 0;
}
|
1,774
|
#include "NeuralNetwork.cuh"
/**
* Creates a neural network with the specified number of layers
* and neurons
* Parameter layers: the number of layers in the neural network
* Parameter neurons: an array with the number of neurons for each layer
* Returns: a NeuralNet with the specified layers/neurons
*/
NeuralNet* createNeuralNet(int layers, int* neurons, activation** activations){
// Seeds the randomizer
srand(time(NULL));
// Creates the neural net
NeuralNet* nn;
cudaMallocManaged(&nn, sizeof(NeuralNet));
// Sets the attributes of the neural net
nn->layers = layers;
cudaMallocManaged(&nn->neurons, layers*sizeof(int));
for(int layer=0; layer<layers; layer++){
nn->neurons[layer] = neurons[layer];
}
// Allocates memory for the activation function enum
cudaMallocManaged(&nn->activations,
(layers - 1) * sizeof(activation*));
// Allocates memory for the weights/biases and assigns random values
cudaMallocManaged(&nn->biases, (layers - 1) * sizeof(double*));
cudaMallocManaged(&nn->weights, (layers - 1) * sizeof(double**));
for(int layer = 0; layer < layers-1; layer++){
cudaMallocManaged(&nn->activations[layer],
neurons[layer + 1] *sizeof(activation));
cudaMallocManaged(&nn->biases[layer],
neurons[layer+1] * sizeof(double));
cudaMallocManaged(&nn->weights[layer],
neurons[layer] * sizeof(double*));
for(int neuron1 = 0; neuron1 < neurons[layer+1]; neuron1++){
nn->biases[layer][neuron1] = double(rand())/RAND_MAX;
nn->activations[layer][neuron1] =
activations[layer][neuron1];
}
for(int neuron1 = 0; neuron1 < neurons[layer]; neuron1++){
cudaMallocManaged(&nn->weights[layer][neuron1],
neurons[layer+1] * sizeof(double));
for(int neuron2 = 0; neuron2 < neurons[layer+1];
neuron2++){
nn->weights[layer][neuron1][neuron2] =
double(rand())/RAND_MAX;
}
}
}
return nn;
}
void freeNeuralNet(NeuralNet* nn){
for(int layer = 0; layer < nn->layers-1; layer++){
for(int neuron1=0; neuron1 < nn->neurons[layer]; neuron1++){
cudaFree(nn->weights[layer][neuron1]);
}
cudaFree(nn->activations[layer]);
cudaFree(nn->biases[layer]);
cudaFree(nn->weights[layer]);
}
cudaFree(nn->neurons);
cudaFree(nn->activations);
cudaFree(nn->biases);
cudaFree(nn->weights);
cudaFree(nn);
}
void printNeuralNet(NeuralNet* nn){
printf("Layers = %d\n", nn->layers);
for(int layer = 0; layer < nn->layers; layer++){
printf("\nLayer %d Neurons %d\n", layer, nn->neurons[layer]);
// Prints the biases
if(layer != 0){
printf("Activations for this layer:\n");
for(int neuron1 = 0; neuron1 < nn->neurons[layer];
neuron1++){
printf("%d\t", nn->activations[layer-1][neuron1]);
}
printf("\n");
printf("Biases for this layer:\n");
for(int neuron1 = 0; neuron1 < nn->neurons[layer];
neuron1++){
printf("%f\t", nn->biases[layer-1][neuron1]);
}
printf("\n");
}
else{
printf("No biases in this layer\n");
}
// Prints the weights
if(layer != nn->layers - 1){
printf("Weights for this layer:\n");
for(int neuron1 = 0; neuron1 < nn->neurons[layer];
neuron1++){
for(int neuron2 = 0;
neuron2 < nn->neurons[layer+1];
neuron2++){
printf("%f\t", nn->weights[layer]\
[neuron1][neuron2]);
}
printf("\n");
}
}
}
}
|
1,775
|
/*
***** vecadd.cu *****
CUDA program to add two vectors.
Compile: nvcc -o vecadd vecadd.cu
Usage: vecadd [N], where N is vector length
Author: John M. Weiss, Ph.D.
CSC433/533 Computer Graphics - Fall 2016.
Modifications:
*/
#include <chrono>
#include <ctime>
#include <cmath>
#include <iostream>
using namespace std;
// sequential vector addition (on the host)
void add_seq( int *a, int *b, int *c, int n )
{
for ( int i = 0; i < n; i++ )
c[i] = a[i] + b[i];
}
// CUDA kernel: add two ints in parallel
__global__ void add_par( int *a, int *b, int *c, int n )
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if ( i < n ) c[i] = a[i] + b[i];
}
// generate array of random ints with values between 0 and n-1
void random_ints( int array[], int n )
{
for ( int i = 0; i < n; i++ )
array[i] = rand() % n;
}
int main( int argc, char** argv )
{
int n = 1024;
if ( argc > 1 ) n = atoi( argv[1] );
// alloc host memory for vectors a, b, c
int size = n * sizeof( int );
int *a = ( int * )malloc( size );
int *b = ( int * )malloc( size );
int *c_seq = ( int * )malloc( size );
int *c_par = ( int * )malloc( size );
// fill arrays a and b with random ints
srand( time( NULL ) );
random_ints( a, n );
random_ints( b, n );
// add vectors sequentially
auto c = chrono::system_clock::now();
add_seq( a, b, c_seq, n );
chrono::duration<double> d_cpu = chrono::system_clock::now() - c;
// alloc device memory for vectors a, b, c
int *d_a, *d_b, *d_c;
cudaMalloc( ( void ** )&d_a, size );
cudaMalloc( ( void ** )&d_b, size );
cudaMalloc( ( void ** )&d_c, size );
// copy vectors a and b to device
cudaMemcpy( d_a, a, size, cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, size, cudaMemcpyHostToDevice );
// launch add() kernel on GPU with M threads per block, (N+M-1)/M blocks
int nThreads = 64; // should be multiple of 32 (up to 1024)
int nBlocks = ( n + nThreads - 1 ) / nThreads;
c = chrono::system_clock::now();
add_par<<< nBlocks, nThreads >>>(d_a, d_b, d_c, n);
chrono::duration<double> d_gpu = chrono::system_clock::now() - c;
// copy vector sum back to host
cudaMemcpy( c_par, d_c, size, cudaMemcpyDeviceToHost );
#if 0
cout << "\na: ";
for ( int i = 0; i < n; i++ ) cout << " " << a[i];
cout << "\nb: ";
for ( int i = 0; i < n; i++ ) cout << " " << b[i];
cout << "\nc_seq:";
for ( int i = 0; i < n; i++ ) cout << " " << c_seq[i];
cout << "\nc_par:";
for ( int i = 0; i < n; i++ ) cout << " " << c_par[i];
#endif
cout << "\n\nBenchmarks: CPU " << d_cpu.count() << " sec, GPU " << d_gpu.count() << " sec\n\n";
// cleanup
free( a ); free( b ); free( c_seq ); free( c_par );
cudaFree( d_a ); cudaFree( d_b ); cudaFree( d_c );
return 0;
}
|
1,776
|
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
#include <errno.h>
#include <unistd.h>
#include <stdlib.h>
#include <arpa/inet.h>
#include <math.h>
#include "cs_dbg.h"
#include "cs_cuda.h"
#include "cs_helper.h"
#include "cs_perm_generic.h"
// #define CUDA_DBG
// #define CUDA_DBG1
__global__ void d_do_permutation_generic_f1 ( int *input, int *output,
int *idxp, int tbl_size )
{
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < tbl_size )
{
output[ idxp[ t_idx ]] = input[ t_idx ] ;
t_idx += CUDA_MAX_THREADS ;
}
}
/*
perform
target(per(i))=orig(i)
target: outcome vector
orig: the original vector
per: permutation vector
*/
void
h_do_permutation_generic_f1 ( int *d_input, int *d_output, int *d_perm_tbl,
int tbl_size )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_DBG1
printf("%s: din %p dout %p perm %p tblsize %d\n", __func__,
d_input, d_output, d_perm_tbl, tbl_size ) ;
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_permutation_generic_f1 <<< nBlocks, nThreadsPerBlock >>> ( d_input,
d_output, d_perm_tbl, tbl_size ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("h_do_permutation_generic_f1 perm before", d_input, tbl_size ) ;
dbg_p_d_data_i("h_do_permutation_generic_f1 perm after", d_output, tbl_size ) ;
#endif
}
__global__ void d_do_permutation_generic_f2 ( int *input, int *output,
int *idxp, int tbl_size )
{
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < tbl_size )
{
output[t_idx] = input[ idxp[t_idx]] ;
t_idx += CUDA_MAX_THREADS ;
}
}
/*
perform
target(i)=orig((per(i))
target: outcome vector
orig: the original vector
per: permutation vector
*/
void
h_do_permutation_generic_f2 ( int *d_input, int *d_output, int *d_perm_tbl,
int tbl_size )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_DBG1
printf("%s: din %p dout %p perm %p size %d \n", __func__,
d_input, d_output, d_perm_tbl, tbl_size ) ;
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_permutation_generic_f2 <<< nBlocks, nThreadsPerBlock >>> ( d_input,
d_output, d_perm_tbl, tbl_size ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("h_do_permutation_generic_f2 perm before", d_input, tbl_size ) ;
dbg_p_d_data_i("h_do_permutation_generic_f2 perm after", d_output, tbl_size ) ;
#endif
}
// same logic as above but the data types are float
__global__ void d_do_permutation_generic_f1 ( float *input, float *output,
int *idxp, int tbl_size )
{
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < tbl_size )
{
output[ idxp[ t_idx ]] = input[ t_idx ] ;
t_idx += CUDA_MAX_THREADS ;
}
}
/*
perform
target(per(i))=orig(i)
target: outcome vector
orig: the original vector
per: permutation vector
*/
void
h_do_permutation_generic_f1 ( float *d_input, float *d_output, int *d_perm_tbl,
int tbl_size )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_DBG1
printf("%s: din %p dout %p perm %p tblsize %d\n", __func__,
d_input, d_output, d_perm_tbl, tbl_size ) ;
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_permutation_generic_f1 <<< nBlocks, nThreadsPerBlock >>> ( d_input,
d_output, d_perm_tbl, tbl_size ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_f("h_do_permutation_generic_f1 perm before", d_input, tbl_size ) ;
dbg_p_d_data_f("h_do_permutation_generic_f1 perm after", d_output, tbl_size ) ;
#endif
}
__global__ void d_do_permutation_generic_f2 ( float *input, float *output,
int *idxp, int tbl_size )
{
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < tbl_size )
{
output[t_idx] = input[ idxp[t_idx]] ;
t_idx += CUDA_MAX_THREADS ;
}
}
/*
perform
target(i)=orig((per(i))
target: outcome vector
orig: the original vector
per: permutation vector
*/
void
h_do_permutation_generic_f2 ( float *d_input, float *d_output, int *d_perm_tbl,
int tbl_size )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_DBG1
printf("%s: din %p dout %p perm %p size %d tblsize %d\n", __func__,
d_input, d_output, d_perm_tbl, tbl_size ) ;
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_permutation_generic_f2 <<< nBlocks, nThreadsPerBlock >>> ( d_input,
d_output, d_perm_tbl, tbl_size ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_f("h_do_permutation_generic_f2 perm before", d_input, tbl_size ) ;
dbg_p_d_data_f("h_do_permutation_generic_f2 perm after", d_output, tbl_size ) ;
#endif
}
__global__ void d_do_permutation_generic_inverse ( int *output,
int *idxp, int tbl_size )
{
int t_idx = blockIdx.x*blockDim.x + threadIdx.x;
while ( t_idx < tbl_size )
{
output[ idxp[ t_idx ]] = t_idx ;
t_idx += CUDA_MAX_THREADS ;
}
}
/*
perform
target(per(i))=(i)
target: outcome vector
orig: the original vector
per: permutation vector
*/
void
h_do_permutation_generic_inverse ( int *d_output, int *d_perm_tbl,
int tbl_size )
{
int nThreadsPerBlock = CUDA_MAX_THREADS_P_BLK ;
int nBlocks ; // = ( tbl_size + ( nThreadsPerBlock - 1 ))/nThreadsPerBlock ;
#ifdef CUDA_DBG1
printf("%s: dout %p perm %p tblsize %d\n", __func__,
d_output, d_perm_tbl, tbl_size ) ;
#endif
h_block_adj ( tbl_size, nThreadsPerBlock, &nBlocks ) ;
d_do_permutation_generic_inverse <<< nBlocks, nThreadsPerBlock >>> (
d_output, d_perm_tbl, tbl_size ) ;
cudaThreadSynchronize() ;
#ifdef CUDA_OBS
dbg_p_d_data_i("h_do_permutation_generic_inverse perm before", d_perm_tbl, tbl_size ) ;
dbg_p_d_data_i("h_do_permutation_generic_inverse perm after", d_output, tbl_size ) ;
#endif
}
|
1,777
|
#include "includes.h"
__global__ void assignInitialClusters_64(int width, int height, int nPixels, int clusterCount, int* cluster, int filterCount, float* responses, int* intResponses) {
int x = blockDim.x * blockIdx.x + threadIdx.x;
int y = blockDim.y * blockIdx.y + threadIdx.y;
int pixel = y * width + x;
if ((x < width) && (y < height)) {
int xBlock = x / ((width - 1) / 8 + 1);
int yBlock = y / ((height - 1) / 8 + 1);
int assignedCluster = yBlock * 8 + xBlock;
cluster[y * width + x] = assignedCluster;
for(int i = 0; i < filterCount; i++) {
int index = pixel + i * nPixels;
int response = (int)(INTCONFACTOR * responses[index]);
intResponses[index] = response;
}
}
}
|
1,778
|
#include <stdlib.h>
#include <stdio.h>
#define ARR_SIZE 10
__global__ void add(int *a, int *b, int *c) {
int i = blockIdx.x;
if (i < ARR_SIZE)
c[i] = a[i] + b[i];
}
int main() {
int i;
int h_A[ARR_SIZE], h_B[ARR_SIZE], h_C[ARR_SIZE];
int *d_A, *d_B, *d_C;
// Popula os vetores a serem somados
for (i = 0; i < ARR_SIZE; i++) {
h_A[i] = i;
h_B[i] = i + 1;
}
// Aloca-se memória para os três no dispositivo (GPU)
cudaMalloc((void**) &d_A, ARR_SIZE * sizeof(int));
cudaMalloc((void**) &d_B, ARR_SIZE * sizeof(int));
cudaMalloc((void**) &d_C, ARR_SIZE * sizeof(int));
// Copia o conteudo DRAM -> VRAM
cudaMemcpy(d_A, h_A, ARR_SIZE * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, ARR_SIZE * sizeof(int), cudaMemcpyHostToDevice);
// Despacha ARR_SIZE blocos de execucao paralela na GPU
add<<<ARR_SIZE, 1>>> (d_A, d_B, d_C);
// Copia o conteudo VRAM -> DRAM
cudaMemcpy(h_C, d_C, ARR_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
for (i = 0; i < ARR_SIZE; i++) {
printf ("[%d] -> %d + %d = %d\n", i, h_A[i], h_B[i], h_C[i]);
}
// Desaloca memoria da GPU
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
return 0;
}
|
1,779
|
#define N 1200
#define THREADS 1024
#include <stdio.h>
#include <math.h>
__global__ void vecAdd(int *a, int *b, int *c);
int main(){
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size;
size = N*sizeof(int);
cudaMalloc((void**) &dev_a, size);
cudaMalloc((void**) &dev_b, size);
cudaMalloc((void**) &dev_c, size);
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for(int i = 0; i < N; i++){
a[i] = b[i] = i;
c[i] = 0;
}
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
vecAdd<<<(int)ceil(THREADS/N),N>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, size, cudaMemcpyDeviceToHost);
for(int i = 0; i < N; i++){
printf("c[%d] = %d\n", i, c[i]);
}
free(a);
free(b);
free(c);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
exit(0);
}
__global__ void vecAdd(int *a, int *b, int *c){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i < N){
c[i] = a[i] + b[i];
printf("Sou a thread %d em %d\n", threadIdx.x, i);
}
}
|
1,780
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand_kernel.h>
__global__ void n_avg(int *a, int *b, int i, int n) {
for (int j = i; j < i + n; j++) {
atomicAdd(&b[i], a[j]);
}
b[i] /= n;
}
int main() {
int m = 10000;
int n = 32;
int i;
int block = 256;
int grid = 256;
int *a;
int *dev_a;
int *b;
int *dev_b;
printf("N is %d\n", n);
printf("DimBlock is %d\n", block);
printf("DimGrid is %d\n", grid);
cudaMalloc((void**)&dev_a, sizeof(int) * m);
cudaMalloc((void**)&dev_b, sizeof(int) * (m - n + 1));
cudaMallocHost((void**)&a, sizeof(int) * m);
cudaMallocHost((void**)&b, sizeof(int) * (m - n + 1));
for (i = 0; i < m; i++) {
a[i] = rand();
}
for (i = 0; i < m - n + 1; i++) {
b[i] = 0;
}
clock_t start_time = clock();
cudaMemcpy(dev_a, a, sizeof(int) * m, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int) * (m - n + 1), cudaMemcpyHostToDevice);
for (i = 0; i < m - n + 1; i++) {
n_avg<<<grid, block>>>(dev_a, dev_b, i, n);
}
clock_t end_time = clock();
printf("Time consuming is %f ms. \n", static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000);
cudaFree(dev_a);
cudaFreeHost(a);
return 0;
}
|
1,781
|
/**
Sample for Mobile CUDA
Simple Adding Vectors Application.
Authoer @ Taichirou Suzuki
**/
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
/**
Simple Kernel.
**/
__global__ void ___add(float* a,float* b,unsigned long size){
int _x = blockDim.x * blockIdx.x + threadIdx.x;
int _y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned long id = _x + _y * size;
a[id] += b[id];
}
static float elapsed(struct timeval tv0,struct timeval tv1){
return (float)(tv1.tv_sec - tv0.tv_sec)
+ (float)(tv1.tv_usec - tv0.tv_usec)
* 0.000001f;
}
int main(void){
struct timeval t0,t1;
gettimeofday(&t0,NULL);
/**
Define Vector Size.
**/
// unsigned long _hen = 11000;
unsigned long _hen = 14000;
// unsigned long _hen = 18000;
unsigned long size = _hen * _hen;
printf("gyouretu size : %lu\n",size);
/**
Number Of Launch Kernel.
**/
int numOfLaunchKernel = 1;
//int numOfLaunchKernel = 1;
cudaSetDevice(0);
// float* h_a = (float*)malloc(sizeof(float)*size);
// float* h_b = (float*)malloc(sizeof(float)*size);
float* d_a = NULL;
float* d_b = NULL;
// float* d_c = NULL;
cudaMalloc((void**)&d_a,sizeof(float)*size);
cudaMalloc((void**)&d_b,sizeof(float)*size);
// cudaMalloc((void**)&d_c,sizeof(float)*size);
float* h_a = NULL;
float* h_b = NULL;
/*
cudaError_t res;
res = cudaHostAlloc((void **)&h_a,sizeof(float)*size,0);
printf("cudaHostAlloc : %d\n",res);
res = cudaHostAlloc((void **)&h_b,sizeof(float)*size,0);
printf("cudaHostAlloc : %d\n",res);
*/
h_a = (float*)malloc(sizeof(float)*size);
h_b = (float*)malloc(sizeof(float)*size);
// float* h_c = (float*)malloc(sizeof(float)*size);
printf("This Sample Application Uses %d[Mbyte] per vector.(Total : %d[Mbyte])\n",sizeof(float)*size >> 20,sizeof(float)*size*2 >> 20);
for(int i = 0 ; i < size ; i ++){
h_a[i] = 0.0f;
h_b[i] = 1.0f;
}
// int ite = 140;
int ite = 260;
// int ite = 1000000;
for(int j = 0 ; j < ite ; j ++){
cudaMemcpy(d_a,h_a,sizeof(float)*size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,sizeof(float)*size,cudaMemcpyHostToDevice);
int _size = 10;
dim3 threads(_size,_size,1);
dim3 grid(_hen/_size,_hen/_size,1);
for(int i = 0 ; i < numOfLaunchKernel ; i ++){
//__add<<<grid,threads>>>(d_c,d_a,d_b,_hen);
___add<<<grid,threads>>>(d_a,d_b,_hen);
/**
Main thread can sleep at here.
**/
// sleep(1);
}
// cudaMemcpy(h_c,d_c,sizeof(float)*size,cudaMemcpyDeviceToHost);
cudaMemcpy(h_a,d_a,sizeof(float)*size,cudaMemcpyDeviceToHost);
}
int pass = 1;
for(int i = 0 ; i < size ; i ++){
// if(h_c[i] != numOfLaunchKernel){
// if(h_a[i] != numOfLaunchKernel){
if(h_a[i] != ite){
pass = 0;
}
}
if(pass){
printf(">Result TEST : PASS\n");
}else{
printf(">Result TEST : FAILED\n");
}
cudaFree(d_a);
cudaFree(d_b);
// cudaFree(d_c);
free(h_a);
free(h_b);
// cudaFreeHost(h_a);
// cudaFreeHost(h_b);
// free(h_c);
printf("Application Closed...\n");
gettimeofday(&t1,NULL);
printf("My RESULT : %f\n",elapsed(t0,t1));
return 0;
}
|
1,782
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include<stdio.h>
#include<stdlib.h>
#include<string.h>
__global__ void countWord(char *a , char *b , unsigned int* d_count , int size , int wordSize)
{
int id = threadIdx.x+1;
int cur = 0;
int start = 0;
int end = size;
int j = 0;
for(j = 0;j<size;j++)
{
if(a[j] == ' ')
{
cur++;
if(cur == id)
{
end = j;
break;
}
else
{
start = j;
}
}
}
int i = 0;
if(start!=0)
{
j = start+1;
}
else
j = start;
i = end-1;
int k = 0;
int flag = 1;
for(k = 0;k<wordSize;k++,j++)
{
if(a[j]!=b[k] || j>i)
{
flag = -1;
break;
}
}
if(flag == 1)
atomicAdd(d_count,1);
}
int main()
{
int n;
unsigned int *count = 0,*d_count,*result = 0;
count = (unsigned int*)malloc(sizeof(unsigned int));
result = (unsigned int*)malloc(sizeof(unsigned int));
char *a = (char*)malloc(sizeof(char)*(30));
int size = sizeof(int);
printf("Enter the string \n");
scanf("%[^\n]%*c", a);
printf("Enter number of words \n");
scanf("%d",&n);
char *b = (char*)malloc(sizeof(char)*(30));
printf("Enter the word \n");
scanf("%s",b);
char *d_a , *d_b;
printf("Input String = %s \n",a);
int wordSize = strlen(b);
int size1 = sizeof(char)*30;
int size2 = sizeof(char)*30;
cudaError_t error;
error = cudaMalloc((void**)&d_a,size1);
if(error != cudaSuccess)
{
printf("Error in first malloc\n");
exit(0);
}
error = cudaMalloc((void**)&d_b,size2);
if(error != cudaSuccess)
{
printf("Error in second malloc\n");
exit(0);
}
error = cudaMalloc((void**)&d_count,sizeof(unsigned int));
if(error != cudaSuccess)
{
printf("Error in third malloc \n");
exit(0);
}
error = cudaMemcpy(d_count,count,sizeof(*count),cudaMemcpyHostToDevice);
if(error != cudaSuccess)
{
printf("Error in first\n");
printf("Cuda error 2: %s \n",cudaGetErrorString(error));
exit(0);
}
error = cudaMemcpy(d_a,a,size1,cudaMemcpyHostToDevice);
if(error != cudaSuccess)
{
printf("Error in second\n");
exit(0);
}
error = cudaMemcpy(d_b,b,size2,cudaMemcpyHostToDevice);
if(error != cudaSuccess)
{
printf("Error in third\n");
exit(0);
}
size = strlen(a);
countWord<<<1,n>>>(d_a,d_b,d_count,size,wordSize);
cudaMemcpy(result,d_count,sizeof(unsigned int),cudaMemcpyDeviceToHost);
printf("Total occurences of %s = %d \n",b,*result);
cudaFree(d_a);
cudaFree(d_b);
}
|
1,783
|
#include <stdio.h>
#include <stdlib.h>
#include <curand.h>
#define GRID_SIZE 2
#define BLOCK_SIZE 3
struct point3D {
float x;
float y;
float z;
};
/**
* GPU側のグローバル関数から呼び出される関数の定義は、deviceを指定する。
* これで、後は普通の関数定義と同じように、好きな関数を定義できる。
*/
__device__
float negate(float val) {
return -val;
}
/**
* GPU側の関数の引数に、構造体を使用できる。
* これで、コードがスッキリだ!
*/
__global__
void test(point3D* devResults, point3D* devRandom) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
devResults[idx].x = negate(devRandom[idx].x);
devResults[idx].y = negate(devRandom[idx].y);
devResults[idx].z = negate(devRandom[idx].z);
}
int main()
{
point3D* results;
point3D* devResults;
point3D *devRandom;
// CPU側でメモリを確保する
results = new point3D[GRID_SIZE * BLOCK_SIZE];
// CPU側のバッファにデータを格納する
for (int i = 0; i < GRID_SIZE * BLOCK_SIZE; ++i) {
results[i].x = (rand() % 100) * 0.01f;
results[i].y = (rand() % 100) * 0.01f;
results[i].z = (rand() % 100) * 0.01f;
printf("%lf, %lf, %lf\n", results[i].x, results[i].y, results[i].z);
}
// GPU側でメモリを確保する
cudaMalloc((void**)&devResults, sizeof(point3D) * GRID_SIZE * BLOCK_SIZE);
cudaMalloc((void**)&devRandom, sizeof(point3D) * GRID_SIZE * BLOCK_SIZE);
// CPU側からGPU側へデータを転送する
cudaMemcpy(devRandom, results, sizeof(point3D) * GRID_SIZE * BLOCK_SIZE, cudaMemcpyHostToDevice);
// GPU側の関数を呼び出す。()内が、そのまま関数の引数となる
test<<<GRID_SIZE, BLOCK_SIZE>>>(devResults, devRandom);
// 指定したsize分、GPUのd_bufferから、CPUのbufferへ、データを転送する
cudaMemcpy(results, devResults, sizeof(point3D) * GRID_SIZE * BLOCK_SIZE, cudaMemcpyDeviceToHost);
// GPU側で確保したメモリを開放する
cudaFree(devResults);
cudaFree(devRandom);
// 結果を表示する
for (int i = 0; i < GRID_SIZE * BLOCK_SIZE; ++i) {
printf("%lf, %lf, %lf\n", results[i].x, results[i].y, results[i].z);
}
// CPU側で確保したメモリを開放する
free(results);
cudaDeviceReset();
}
|
1,784
|
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
#include <cassert>
#include <cstdlib>
#include <functional>
#include <iostream>
#include <algorithm>
#include <vector>
using std::cout;
using std::generate;
using std::vector;
#define SIZE 10000
#define N 10
#define CUDA_CALL(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
#define CHECK(x) do { if((x)!=cudaSuccess) { \
printf("Error at %s:%d\n",__FILE__,__LINE__);\
return EXIT_FAILURE;}} while(0)
__global__ void computeMovingAverage(const float *dev_a, float *dev_b, int size, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if(idx < (size - n + 2)){
for(int i = 0; i< n; i++){
dev_b[idx] += dev_a[idx + i]/n;
}
}
__syncthreads();
}
void computeMovingAverageOnCPU(vector<float> &host_a, float &cpuRef, int size, int n) {
vector<float> temp_vec(size);
for(int i = 0; i < (size - n + 2); i++)
for(int j = 0; j < n; j++)
temp_vec[i] += host_a[i+j]/n;
for(int i = 0; i < (size - n + 2); i++)
cpuRef += (float)(temp_vec[i]/(size - n + 1));
}
int main(void){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
int n = N;
int size = SIZE;
printf("Array Size: %d Sample Size: %d\n", size, N);
size_t nBytes = size * sizeof(float);
float cpuRef = 0.0f;
float gpuRef = 0.0f;
// initialize random number
srand ((int)time(0));
// initialize vector and generate random indices between 0 and 5.
vector<float> host_a(size);
vector<float> host_b(size-n);
generate(host_a.begin(), host_a.end(), []() { return rand() % 5; });
float *dev_a, *dev_b;
cudaMalloc(&dev_a, nBytes);
cudaMalloc(&dev_b, nBytes);
cudaMemcpy(dev_a, host_a.data(), nBytes, cudaMemcpyHostToDevice);
// declare block and grid dimension.
dim3 block (1000);
dim3 grid (10);
// Timer starts
float GPUtime, CPUtime;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
computeMovingAverage <<< grid, block >>> (dev_a, dev_b, size, n);
cudaMemcpy(host_b.data(), dev_b, nBytes, cudaMemcpyDeviceToHost);
for(int x = 0; x< (size-n+2); x++){
gpuRef += (float)(host_b[x]/(size - n + 1));
}
// timer stops
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&GPUtime, start, stop);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
computeMovingAverageOnCPU(host_a, cpuRef, size, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&CPUtime, start, stop);
printf("Kernel: computeMovingAverage <<<gridDim: %d, blockDim: %d>>>\n", grid.x, block.x);
printf("Compute time on GPU: %3.6f ms \n", GPUtime);
printf("Compute time on CPU: %3.6f ms \n", CPUtime);
printf("Moving Average computed on CPU: %3.6f\n", cpuRef);
printf("Moving Average computed on GPU: %3.6f\n", gpuRef);
cudaFree(dev_a);
cudaFree(dev_b);
return (0);
}
|
1,785
|
#include <cuda_runtime.h>
#include <stdio.h>
#include <sys/time.h>
#define LEN 1<<22
double seconds(){
struct timeval tp;
struct timezone tzp;
int i = gettimeofday(&tp,&tzp);
return ((double)tp.tv_sec+(double)tp.tv_usec*1.e-6);
}
struct InnerArray{
float x[LEN];
float y[LEN];
};
void initialInnerArray(InnerArray *ip, int size){
for (int i = 0; i < size; i++){
ip->x[i] = (float)(rand() & 0xFF)/100.0f;
ip->y[i] = (float)(rand() & 0xFF)/100.0f;
}
return;
}
void testInnerArrayHost(InnerArray *A, InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
C->x[idx] = A->x[idx] + 10.f;
C->y[idx] = A->y[idx] + 20.f;
}
return;
}
void testInnerArrayHost2(InnerArray *A,InnerArray *C, const int n){
// used for testify that ip->x[i] is equal to (*ip).x[i]
for (int idx = 0;idx < n; idx++){
(*C).x[idx] = (*A).x[idx] + 10.f;
(*C).y[idx] = (*A).y[idx] + 20.f;
}
return;
}
void printfHostResult(InnerArray *C, const int n){
for (int idx = 0; idx < n; idx ++){
printf("printout idx %d: x %f y %f \n",idx,C->x[idx],C->y[idx]);
}
return;
}
void checkInnerArray(InnerArray *hostRef, InnerArray *gpuRef, const int N){
double epsilon = 1.0e-8;
bool match = 1;
for(int i=0; i<N; i++){
if (abs(hostRef->x[i] - gpuRef->x[i])>epsilon){
match = 0;
printf("different on x %dth element: host %f gpu %f \n",i,hostRef->x[i],gpuRef->x[i]);
break;
}
if (abs(hostRef->y[i] - gpuRef->y[i])>epsilon){
match = 0;
printf("different on y %dth element: host %f gpu %f \n",i,hostRef->y[i],gpuRef->y[i]);
break;
}
}
if (!match) printf("Arrays do not match.\n\n");
}
__global__ void testInnerArray(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
__global__ void warmup(InnerArray *data, InnerArray *result, const int n){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n){
float tmpx = data->x[i];
float tmpy = data->y[i];
tmpx += 10.0f;
tmpy += 20.0f;
result->x[i] = tmpx;
result->y[i] = tmpy;
}
}
int main(int argc, char ** argv){
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp,dev);
printf("%s test struct of array at ",argv[0]);
printf("device %d: %s \n\n",dev,deviceProp.name);
cudaSetDevice(dev);
// allocate host memory
int nElem = LEN;
size_t nBytes = sizeof(InnerArray);
InnerArray *h_A = (InnerArray *)malloc(nBytes);
InnerArray *hostRef = (InnerArray *)malloc(nBytes);
InnerArray *gpuRef = (InnerArray *)malloc(nBytes);
InnerArray *hostRef2 = (InnerArray *)malloc(nBytes);
// initialize host array
initialInnerArray(h_A,nElem);
testInnerArrayHost(h_A,hostRef,nElem);
testInnerArrayHost(h_A,hostRef2,nElem);
checkInnerArray(hostRef,hostRef2,nElem);
// allocate memory on device
InnerArray *d_A,*d_C;
cudaMalloc((InnerArray**)&d_A,nBytes);
cudaMalloc((InnerArray**)&d_C,nBytes);
// copy data from host to device
cudaMemcpy(d_A,h_A,nBytes,cudaMemcpyHostToDevice);
// set up blocksize
int blocksize = 128;
if (argc>1) blocksize = atoi(argv[1]);
// execution config
dim3 block (blocksize,1);
dim3 grid((nElem+block.x-1)/block.x,1);
// kernel 1
double iStart = seconds();
warmup<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
double iElaps = seconds() - iStart;
printf("warmup <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
// kernel 2
iStart = seconds();
testInnerArray<<<grid,block>>>(d_A,d_C,nElem);
cudaDeviceSynchronize();
iElaps = seconds() - iStart;
printf("innerarray <<<%3d,%3d>>> elapsed %f sec \n",grid.x,block.x,iElaps);
cudaMemcpy(gpuRef,d_C,nBytes,cudaMemcpyDeviceToHost);
checkInnerArray(hostRef,gpuRef,nElem);
cudaGetLastError();
// free memories
cudaFree(d_A);
cudaFree(d_C);
free(h_A);
free(hostRef);
free(hostRef2);
free(gpuRef);
cudaDeviceReset();
return EXIT_SUCCESS;
}
|
1,786
|
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <cuda.h>
#define maxThreads 512
/*
This code was developed and tested on cuda3
*/
__global__ void getmaxcu(unsigned int num[], unsigned int size){
unsigned int tid = threadIdx.x;
unsigned int gloid = blockIdx.x*blockDim.x+threadIdx.x;
__shared__ int sdata[maxThreads]; // shared data
sdata[tid]=num[gloid];
if(gloid>=size){
sdata[threadIdx.x]=0;
}
/*if(n<size){
int tSize = size/n;
if(tid<(size%n)
tSize++;
for(int i; i<tSize; i++)
if(sdata[tid]<num[glo
}
*/
__syncthreads();
//get a block max by performing a tree-structured
//reduction akin to that depicted in slide 18 of
//the lecture 8 pp
for (int stride = blockDim.x / 2; stride > 0; stride = stride / 2) {
if (tid < stride) {
if (sdata[tid] < sdata[tid + stride]) {
sdata[tid] = sdata[tid + stride];
}
}
__syncthreads();
}
if(tid==0){//store the block maxes in global memory
num[blockIdx.x]=sdata[0];
}
}
int main(int argc, char *argv[])
{
cudaDeviceProp prop;
cudaError_t propErr = cudaGetDeviceProperties(&prop, 0);
if (propErr != cudaSuccess) {
printf("unable to get device properties\n");
}
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
unsigned int* cudaNumbers;
unsigned int thread;
unsigned int block;
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++)
numbers[i] = rand() % size;
cudaMalloc((void**)&cudaNumbers, (size * sizeof(unsigned int)));
cudaMemcpy(cudaNumbers, numbers, (size * sizeof(unsigned int)), cudaMemcpyHostToDevice);
if (size%maxThreads != 0) {
size = (size/maxThreads+1)*maxThreads;
}
unsigned int cudaSize=size;
thread = maxThreads;
block = size/thread;
while(block>1){
getmaxcu<<<block, maxThreads>>>(cudaNumbers, cudaSize);
cudaSize=cudaSize/maxThreads;
block = cudaSize/maxThreads;
}
getmaxcu<<<1, block>>>(cudaNumbers, block);
cudaMemcpy(numbers, cudaNumbers, sizeof(unsigned int), cudaMemcpyDeviceToHost);//only copies back the max, which should be in the first element of the array
printf(" The maximum number in the array is: %u\n", numbers[0]);
free(numbers);
cudaFree(cudaNumbers);
exit(0);
}
/*
input: pointer to an array of long int
number of elements in the array
output: the maximum number of the array
*/
|
1,787
|
/*
sergeim19
April 27, 2015
Burgers equation - GPU CUDA version
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda.h>
#include <cufft.h>
#include <time.h>
#include <sys/time.h>
#define NADVANCE (4000)
#define nu (5.0e-2)
int timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
__global__ void kernel_rescale_u(double *u_dev, int N)
{
int j;
j = blockIdx.x * blockDim.x + threadIdx.x;
u_dev[j] = u_dev[j] / (double)N;
}
__global__ void kernel_calc_uu(double *u_dev, double *uu_dev)
{
int j;
j = blockIdx.x * blockDim.x + threadIdx.x;
uu_dev[j] = 0.5 * u_dev[j] * u_dev[j];
}
__global__ void kernel_setmodezero(cufftDoubleComplex *fhat, int n)
{
fhat[n].x = 0.0;
fhat[n].y = 0.0;
}
__global__ void kernel_burgers(cufftDoubleComplex *uhat, cufftDoubleComplex *uuhat, double dt)
{
int jj;
jj = blockIdx.x * blockDim.x + threadIdx.x;
double j = (double)jj;
uhat[jj].x = ( uhat[jj].x*(1.0 + j*j*nu*dt) + j*dt*uuhat[jj].y )/
( pow((1.0 + j*j*nu*dt), 2) );
uhat[jj].y = ( uhat[jj].y*(1.0 + j*j*nu*dt) - j*dt*uuhat[jj].x )/
( pow((1.0 + j*j*nu*dt), 2) );
}
int main(void)
{
FILE *out_file = fopen("data_burgers_gpu.dat", "w");
int N = 1048576, blockSize = 512, nBlocks = N/blockSize;
double dx = 2.0 * M_PI / (double)N, dt = 1.0e-3;
double *x, *u, *u_dev, *uu_dev;
double norm = 0.0;
int devid, devcount, error;
double restime;
struct timeval tdr0, tdr1;
/* find compute device an initialize it */
/* add device detection */
/* find number of device in current "context" */
cudaGetDevice(&devid);
/* find how many devices are available */
if (cudaGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
}
x = (double *) malloc (sizeof (double) * N);
cudaMallocHost((void **) &u, sizeof (double) * N);
// Generate initial conditions
for (int i = 0; i < N; i++) {
x[i] = (double)i * dx;
u[i] = sin(x[i]);
}
// Allocate device memory
cudaMalloc((void **)&u_dev, sizeof (double) * N);
cudaMalloc((void **)&uu_dev, sizeof (double) * N);
// Copy array to device memory
if (error = cudaMemcpy(u_dev, u, sizeof (double) * N, cudaMemcpyHostToDevice))
{
printf ("Error %d\n", error);
exit (error);
}
cudaDeviceSynchronize();
///////////////////////////////////////////////////////////////////////////////////
cufftDoubleComplex *uhat, *uuhat;
//int fourier_mem_size = sizeof(cufftDoubleComplex)* (N/2 + 1);
cudaMalloc((void**) &uhat, sizeof(cufftDoubleComplex) * (N/2 + 1) );
cudaMalloc((void**) &uuhat, sizeof(cufftDoubleComplex) * (N/2 + 1) );
// CUFFT plans
cufftHandle plan_forward, plan_backward;
cufftPlan1d(&plan_forward, N, CUFFT_D2Z, 1);
cufftPlan1d(&plan_backward, N, CUFFT_Z2D, 1);
// Get initial uhat
cufftExecD2Z(plan_forward, u_dev, uhat);
// Set N/2th mode to zero
kernel_setmodezero<<<1, 1>>>(uhat, N/2);
cudaDeviceSynchronize();
gettimeofday (&tdr0, NULL);
// Main time stepping
for (int i = 0; i < NADVANCE; i++) {
// Fourier to real space
cufftExecZ2D(plan_backward, uhat, u_dev);
cudaDeviceSynchronize();
// Rescale
kernel_rescale_u<<<nBlocks, blockSize>>>(u_dev, N);
cudaDeviceSynchronize();
// Calculate nonlinear product in real space
kernel_calc_uu<<<nBlocks, blockSize>>>(u_dev, uu_dev);
cudaDeviceSynchronize();
// Nonlinear product in Fourier space
cufftExecD2Z(plan_forward, uu_dev, uuhat);
cudaDeviceSynchronize();
kernel_setmodezero<<<1, 1>>>(uuhat, N/2);
cudaDeviceSynchronize();
// New uhat
kernel_burgers<<<nBlocks/2, blockSize>>>(uhat, uuhat, dt);
cudaDeviceSynchronize();
kernel_setmodezero<<<1, 1>>>(uhat, N/2);
cudaDeviceSynchronize();
}
// Final result
cufftExecZ2D(plan_backward, uhat, u_dev);
cudaDeviceSynchronize();
/* retrieve results from device (synchronous) */
if (error = cudaMemcpy(u, u_dev, sizeof (double) * N, cudaMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
cudaDeviceSynchronize();
// Rescale
for (int i = 0; i < N; i++) {
u[i] = u[i] / (double)N;
}
gettimeofday (&tdr1, NULL);
// Timing information
timeval_subtract (&restime, &tdr1, &tdr0);
printf ("gpu time: %es\n", restime);
// Show L2 norm
for (int i = 0; i < N; i++) {
norm += fabs(u[i] * u[i]);
}
norm = sqrt(norm);
printf ("norm: %e\n", norm);
// Print results to file
if (out_file == NULL) {
printf("Error opening file\n");
}
for (int i = 0; i < N; i++) {
fprintf(out_file, "%.15f, %.15f\n", x[i], u[i]);
}
free(x);
cudaFree(u);
cudaFree(u_dev);
cudaFree(uhat);
cudaFree(uuhat);
cufftDestroy(plan_forward);
cufftDestroy(plan_backward);
return 0;
}
|
1,788
|
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
extern "C" __device__ void profileCount(long index){
}
|
1,789
|
// inspired by https://devblogs.nvidia.com/efficient-matrix-transpose-cuda-cc/
// TILE_DIM=32, BLOCK_ROWS=8
// No bank-conflict transpose
// Same as transposeCoalesced except the first tile dimension is padded
// to avoid shared memory bank conflicts.
// can be used to transpose non-square 2D arrays
__global__
void transposeNoBankConflicts32x8(float *odata, const float *idata, int xdim, int ydim) {
__shared__ float tile[32][32 + 1];
int tilex = blockIdx.x * 32;
int tiley = blockIdx.y * 32;
int x = tilex + threadIdx.x;
int y = tiley + threadIdx.y;
for (int j = 0; j < 32; j += 8) {
int index = (y + j) * xdim + x;
if (index < (xdim*ydim)) {
tile[threadIdx.y + j][threadIdx.x] = idata[index];
}
}
__syncthreads();
x = tiley + threadIdx.x; // transpose tiles
y = tilex + threadIdx.y; // transpose tiles
if (x >= ydim) return; // output matrix has y columns
int maxJ = min(32, xdim - y); // output matrix has x rows
for (int j = 0; j < maxJ; j += 8) {
int index = (y+j) * ydim + x;
odata[index] = tile[threadIdx.x][threadIdx.y + j];
}
}
__global__
void iirConvolve2D_Cardinal_Bspline_3_MirrorOffBoundNew(float* input,
float* output, int xDim, int yDim) {
// assign column to thread
int idx = blockIdx.x * blockDim.x + threadIdx.x;
// we will process data line by line, but data are stored in
// columns!
if (idx >= yDim) return; // only threads with data should continue
float* line = output + idx; // FIXME rename to sth reasonable
// adjust gain
float z = sqrtf(3.f) - 2.f;
float z1 = 1.0 - z;
float gain = -(z1 * z1) / z;
// copy original data
for (int i = 0; i < xDim; i++) {
line[i * yDim] = input[(i * yDim) + idx] * gain;
}
// prepare some values
float sum = (line[0] + powf(z, xDim) * line[(xDim - 1) * yDim]) * (1.f + z) / z;
z1 = z;
float z2 = powf(z, 2 * xDim - 2);
float iz = 1.f / z;
for (int j = 1; j < (xDim - 1); ++j) {
sum += (z2 + z1) * line[j * yDim];
z1 *= z;
z2 *= iz;
}
line[0] = sum * z / (1.f - powf(z, 2 * xDim));
for (int j = 1; j < xDim; ++j) {
line[j * yDim] += z * line[(j - 1) * yDim];
}
line[(xDim - 1) * yDim] *= z / (z - 1.f);
for (int j = xDim - 2; 0 <= j; --j) {
line[j * yDim] = z * (line[(j + 1) * yDim] - line[j * yDim]);
}
}
__global__
void iirConvolve2D_Cardinal_Bspline_3_MirrorOffBound(float* input,
float* output, size_t xDim, size_t yDim) {
// assign line to thread
int idy = blockIdx.y * blockDim.y + threadIdx.y;
if (idy >= yDim) return; // only threads with data should continue
float* line = output + (idy * xDim);
// adjust gain
float z = sqrtf(3.f) - 2.f;
float z1 = 1.0 - z;
float gain = -(z1 * z1) / z;
// copy original data
for (int i = 0; i < xDim; i++) {
line[i] = input[i + (idy * xDim)] * gain;
}
// prepare some values
float sum = (line[0] + powf(z, xDim) * line[xDim - 1]) * (1.f + z) / z;
z1 = z;
float z2 = powf(z, 2 * xDim - 2);
float iz = 1.f / z;
for (int j = 1; j < (xDim - 1); ++j) {
sum += (z2 + z1) * line[j];
z1 *= z;
z2 *= iz;
}
line[0] = sum * z / (1.f - powf(z, 2 * xDim));
for (int j = 1; j < xDim; ++j) {
line[j] += z * line[j - 1];
}
line[xDim - 1] *= z / (z - 1.f);
for (int j = xDim - 2; 0 <= j; --j) {
line[j] = z * (line[j + 1] - line[j]);
}
}
|
1,790
|
__global__ void update(int nx, int ny, float *f, float *g) {
int idx = blockIdx.x*blockDim.x + threadIdx.x;
int i, j;
i = idx/ny;
j = idx%ny;
if (i > 0 && i < nx-1 && j > 0 && j < ny-1) {
f[idx] = 0.25*(g[idx-ny] + g[idx+ny] + g[idx-1] + g[idx+1] - 4*g[idx]) + 2*g[idx] - f[idx];
}
}
__global__ void update_src(int nx, int ny, int tstep, float *g) {
g[(nx/2)*ny + (ny/2)] = sin(0.1*tstep);
}
|
1,791
|
#include <iostream>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/sort.h>
#include <ctime>
#include <sys/time.h>
#include <sstream>
#include <string>
#include <fstream>
using namespace std;
__global__ void reduce0(int *g_idata, int *g_odata, int size){
extern __shared__ int sdata[];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
sdata[tid] = 0;
if(i<size)
sdata[tid] = g_idata[i];
__syncthreads();
for(unsigned int s=1; s < blockDim.x; s *= 2) {
if (tid % (2*s) == 0) {
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid == 0) g_odata[blockIdx.x] = sdata[0];
}
int main(void){
int size = 939289;
thrust::host_vector<int> data_h_i(size, 1);
//initialize the data, all values will be 1
//so the final sum will be equal to size
int threadsPerBlock = 256;
int totalBlocks = (size+(threadsPerBlock-1))/threadsPerBlock;
thrust::device_vector<int> data_v_i = data_h_i;
thrust::device_vector<int> data_v_o(totalBlocks);
int* output = thrust::raw_pointer_cast(data_v_o.data());
int* input = thrust::raw_pointer_cast(data_v_i.data());
bool turn = true;
while(true){
if(turn){
reduce0<<<totalBlocks, threadsPerBlock, threadsPerBlock*sizeof(int)>>>(input, output, size);
turn = false;
}
else{
reduce0<<<totalBlocks, threadsPerBlock, threadsPerBlock*sizeof(int)>>>(output, input, size);
turn = true;
}
if(totalBlocks == 1) break;
size = totalBlocks;
totalBlocks = ceil((double)totalBlocks/threadsPerBlock);
}
thrust::host_vector<int> data_h_o;
if(turn)
data_h_o = data_v_i;
else
data_h_o = data_v_o;
data_v_i.clear();
data_v_i.shrink_to_fit();
data_v_o.clear();
data_v_o.shrink_to_fit();
cout<<data_h_o[0]<<endl;
return 0;
}
|
1,792
|
//#pragma comment (lib, "cublas.lib")
//#include "stdio.h"
//#include <cuda.h>
//using namespace std;
//#include <ctime>
//#include "cuda_runtime.h"
//#include "curand_kernel.h"
//#include "device_launch_parameters.h"
//#include <stdio.h>
//#include <stdlib.h>
//
//#include <string>
//#include <iomanip>
//#include <time.h>
//#include <iostream>
//#include <cmath>
//#include <math.h>
//
//#define TRAIN_NUM 60000
//#define TEST_NUM 10000
//#define ROW 4
//#define COL 4
//#define CONV_SIZE 2
//#define POOL_SIZE 1
//#define FC1_SIZE 45
//#define FC2_SIZE 10
//#define CONV_W_SIZE 3
//#define CONV_W_NUM 1
//
//__device__ float _alpha = 20;
//__device__ int _train_label[TRAIN_NUM];
//__device__ float _train_image[ROW][COL];
//__device__ float _conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE];
//__device__ float _conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//
////__global__ void _input_conv(float _train_image[4][4], float _conv_w[1][3][3], float _conv_z[1][2][2])
//__global__ void _input_conv()
//{
// int ix = threadIdx.x + blockDim.x * blockIdx.x;
// int iy = threadIdx.y + blockDim.y * blockIdx.y;
// int iz = threadIdx.z + blockDim.z * blockIdx.z;
// if (ix < CONV_W_NUM && iy < CONV_SIZE && iz < CONV_SIZE)
// {
// _conv_z[ix][iy][iz] = 0;
// // #pragma unroll
// for (int l = 0;l < CONV_W_SIZE;l++)
// for (int m = 0;m < CONV_W_SIZE;m++)
// _conv_z[ix][iy][iz] += _train_image[iy + l][iz + m] * _conv_w[ix][l][m];
// _conv_z[ix][iy][iz] += _alpha;
// //_conv_a[ix][iy][iz] = _sigmoid(_conv_z[ix][iy][iz]);
// }
//}
//int main() {
// float train_image[ROW][COL] = { { 3, 1, 2, 4 },
// { 2, 4, 3, 1 },
// { 1, 5, 2, 3 },
// { 2, 3, 4, 1 } };
// float conv_w[CONV_W_NUM][CONV_W_SIZE][CONV_W_SIZE] = { {
// {1, 2, 3},
// {4, 3, 1},
// {1, 2, 4}}};
//
// float** h_weight = new float* [3];
// for (int i = 0;i < 3;i++) {
// h_weight[i] = new float[3];
// }
// float conv_z[CONV_W_NUM][CONV_SIZE][CONV_SIZE];
//
// float train_label[2] = { 3,2 };
// //float* _train_image;
// //float* _train_label;
// //float* _conv_w;
// //float* _conv_z;
//
// //cudaMalloc(&_conv_w, CONV_W_NUM * CONV_W_SIZE * CONV_W_SIZE * sizeof(float));
// //cudaMalloc(&_conv_z, CONV_W_NUM * CONV_SIZE * CONV_SIZE * sizeof(float));
// //cudaMalloc(&_train_image, ROW*COL*sizeof(float));
// //cudaMalloc(&_train_label, 2*sizeof(float));
//
// float alpha = 10.0;
// cudaMemcpyToSymbol(_alpha, &alpha, sizeof(float));
// cudaMemcpyToSymbol(_train_image, train_image, ROW * COL * sizeof(float));
// cudaMemcpyToSymbol(_conv_w, conv_w, CONV_W_NUM * CONV_W_SIZE * CONV_W_SIZE * sizeof(float));
// //cudaMemcpy(_train_label, train_label, 2 * sizeof(float), cudaMemcpyHostToDevice);
// //cudaMemcpy(_train_image, train_image, ROW * COL * sizeof(float), cudaMemcpyHostToDevice);
// //cudaMemcpy(_conv_w, conv_w, CONV_W_NUM*CONV_W_SIZE*CONV_W_SIZE*sizeof(float), cudaMemcpyHostToDevice);
// dim3 grid2(1, 2, 2);
//
// //_input_conv << <1, grid2>> > ((float (*)[4])_train_image, (float (*)[3][3])_conv_w, (float (*)[2][2])_conv_z);
// _input_conv << <1, grid2>> > ();
// cudaMemcpyFromSymbol(&conv_z, _conv_z, CONV_W_NUM * CONV_SIZE * CONV_SIZE*sizeof(float));
// for (int i = 0;i < CONV_SIZE;i++) {
// for (int j = 0;j < CONV_SIZE;j++) {
// cout << conv_z[0][i][j]<<" ";
// }
// cout << endl;
// }
// return 0;
//}
|
1,793
|
#include <cuda.h>
#include <bits/stdc++.h>
#define BLOCK_SIZE 32
#define TILE_WIDTH BLOCK_SIZE
//int BLOCK_SIZE, TILE_WIDTH;
using namespace std;
//Declarations :
//matrix initialization
void init(int *A, int n, int d);
//matrix comparation
bool compare(int *A, int *B, int n);
//print matrix
void printmat(int *A, int rows, int cols);
//sequential matrix multiplication
void matMult(int *h_A, int *h_B, int *h_C, int common, int Arows, int Bcols);
//pre kernel matrix multiplication
void prematMultP(int *A, int *B, int *C, int common, int Arows, int Bcols);
//pre kernel matrix tiling multiplication
void prematMultPTiled(int *A, int *B, int *C, int common, int Arows, int Bcols);
//Parallel kernel
__global__ void matMultP (int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols);
//Parallel kernel (tiling)
__global__ void matMultPTiled(int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols);
//End declarations
int main() {
for(int i = 0; i < 10; i++){
cout<<i+1<<endl;
//cin>>BLOCK_SIZE;
//TILE_WIDTH = BLOCK_SIZE;
int Arows,common,Bcols;
cin >> Arows >> common >> Bcols;
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *A = (int *)malloc(sizeA);
int *B = (int *)malloc(sizeB);
int *C = (int *)malloc(sizeR);
int *D = (int *)malloc(sizeR);
int *E = (int *)malloc(sizeR);
init(A, Arows * common, 1.5);
init(B, common * Bcols, 1.75);
init(C, Arows * Bcols, 0);
init(D, Arows * Bcols, 0);
init(E, Arows * Bcols, 0);
double a, b, c;
//Sequential
clock_t t = clock();
matMult(A, B, C, common, Arows, Bcols);
t = clock() - t;
a = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo secuencial:" << endl;
cout << a << endl;
//Parallel
t = clock();
prematMultP(A, B, D, common, Arows, Bcols);
t = clock() - t;
b = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo paralelo: " << endl;
cout << b << endl;
//cout << "Acelero con X " << endl;
cout << (a / b) << endl;
//Parallel (tiling)
t = clock();
prematMultPTiled(A, B, E, common, Arows, Bcols);
t = clock() - t;
c = ((float)t) / CLOCKS_PER_SEC;
//cout << "Tiempo paralelo con tilings: " << endl;
cout << c << endl;
//cout << "Acelero con X " << endl;
cout << (a / c) << endl;
//print matrix
//printmat(C,Arows,Bcols);
//printmat(D,Arows,Bcols);
//printmat(E,Arows,Bcols);
//checking
if(compare(C, D, Arows * Bcols) and compare(D, E, Arows * Bcols))
cout << "Ok :)" << endl;
else
cout << "No ok :(" << endl;
//Free
free(A);
free(B);
free(C);
free(D);
}
return 0;
}
//Functions
//matrix initialization
void init(int *A,int n, int d) {
for(int i = 0; i < n; i++)
A[i] = d;
}
//matrix comparation
bool compare(int *A, int *B, int n) {
for(int i = 0; i < n; i++)
if(A[i] != B[i])
return false;
return true;
}
//print matrix
void printmat(int *A, int rows, int cols) {
for(int i = 0; i < rows; i++) {
for(int j = 0; j < cols; j++) {
cout << A[i * rows + j] << " ";
}
cout << endl;
}
cout << endl;
}
//matrix multiplication
void matMult(int *h_A, int *h_B, int *h_C, int common, int Arows, int Bcols) {
int sum;
for(int i = 0; i < Arows; i++)
for(int j = 0; j < Bcols; j++) {
sum = 0;
for(int k = 0; k < common; k++)
sum += h_A[common * i + k] * h_B[Bcols * k + j];
h_C[Bcols * i + j] = sum;
}
}
//pre kernel matrix multiplication
void prematMultP(int *A, int *B, int *C, int common, int Arows, int Bcols) {
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *d_A, *d_B, *d_C;
//Allocate memory for device
cudaMalloc(&d_A, sizeA);
cudaMalloc(&d_B, sizeB);
cudaMalloc(&d_C, sizeR);
//Copy Data from host to device
cudaMemcpy(d_A, A, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeB, cudaMemcpyHostToDevice);
//Blocks and Grids
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(ceil(Bcols / (float)BLOCK_SIZE), ceil(Arows / (float)BLOCK_SIZE));
//Launch Kernel
matMultP<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, common, Arows, Bcols);
cudaDeviceSynchronize();
//Copy from device, free device memory
cudaMemcpy (C, d_C, sizeR, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//pre kernel matrix tiling multiplication
void prematMultPTiled(int *A, int *B, int *C, int common, int Arows, int Bcols) {
int sizeA = Arows * common * sizeof(int);
int sizeB = common * Bcols * sizeof(int);
int sizeR = Arows * Bcols * sizeof(int);
int *d_A, *d_B, *d_C;
//Allocate memory for device
cudaMalloc(&d_A, sizeA);
cudaMalloc(&d_B, sizeB);
cudaMalloc(&d_C, sizeR);
//Copy Data from host to device
cudaMemcpy(d_A, A, sizeA, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, B, sizeB, cudaMemcpyHostToDevice);
//Blocks and Grids
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(ceil(Bcols / (float)BLOCK_SIZE), ceil(Arows / (float)BLOCK_SIZE));
//Launch Kernel
matMultPTiled<<<dimGrid, dimBlock>>> (d_A, d_B, d_C, common, Arows, Bcols);
cudaDeviceSynchronize();
//Copy from device, free device memory
cudaMemcpy (C, d_C, sizeR, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
//Parallel kernel
__global__ void matMultP (int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols) {
int i = threadIdx.y + blockDim.y * blockIdx.y;
int j = threadIdx.x + blockDim.x * blockIdx.x;
if(i < Arows and j < Bcols) {
int sum = 0;
for(int k = 0; k < common; ++k)
sum += d_A[common * i + k] * d_B[Bcols * k + j];
d_C[Bcols * i + j] = sum;
}
}
//Parallel kernel (tiling)
__global__ void matMultPTiled(int *d_A, int *d_B, int *d_C, int common, int Arows, int Bcols) {
__shared__ int Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ int Nds[TILE_WIDTH][TILE_WIDTH];
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int row = by * TILE_WIDTH + ty;
int col = bx * TILE_WIDTH + tx;
int Pvalue = 0;
for(int m = 0; m < (common + TILE_WIDTH - 1) / TILE_WIDTH; ++m) {
if(m * TILE_WIDTH + tx < common and row < Arows)
Mds[ty][tx] = d_A[row * common + m * TILE_WIDTH + tx];
else
Mds[ty][tx] = 0;
if(m * TILE_WIDTH + ty < common and col < Bcols)
Nds[ty][tx] = d_B[(m * TILE_WIDTH + ty) * Bcols + col];
else
Nds[ty][tx] = 0;
__syncthreads();
for(int k = 0; k < TILE_WIDTH; ++k) {
Pvalue += Mds[ty][k] * Nds[k][tx];
}
__syncthreads();
}
if(row < Arows and col < Bcols)
d_C[row * Bcols + col] = Pvalue;
}
|
1,794
|
/*
Multiplica um vetor por uma constante.
Exemplo para o uso de memória constante em CUDA
*/
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#define TAM 100
#define VLR_ESCALAR 10
#define TPB 256
__device__ __constant__ int escalar_d;
__global__ void mult(int *vetA_glb){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < TAM)
{
vetA_glb[idx] = escalar_d * vetA_glb[idx];
}
}
int main(int argc,char **argv){
int *vetA_h;
int *vetA_d;
int blocksPerGrid;
int i, escalar_h;
//Aloca o vetor no host
vetA_h=(int *)malloc(TAM * sizeof(int));
//Aloca o vetor no device
cudaMalloc((void**)&vetA_d,TAM*(sizeof(int)));
//Preenche o vetor no host
for(i=0;i<TAM;i++){
vetA_h[i]=i;
}
//Copia o conteúdo do vetor para o device
cudaMemcpy(vetA_d,vetA_h,TAM*(sizeof(int)), cudaMemcpyHostToDevice);
escalar_h=VLR_ESCALAR;
//Copia o conteúdo de escalar_h, lido do terminal, para a variável constante escalar_d, no device
cudaMemcpyToSymbol(escalar_d,&escalar_h,sizeof(int));
//Define a quantidade de blocos por grade
blocksPerGrid=(TAM+TPB-1)/TPB;
//Invoca o kernel com blocksPerGrid blocos e threadsPerBlock threads
mult <<<blocksPerGrid,TPB>>> (vetA_d);
//Copia o resultado da soma de volta para o host
cudaMemcpy(vetA_h,vetA_d,TAM*(sizeof(int)), cudaMemcpyDeviceToHost);
//Imprime o resultado no host
for(i=0;i<TAM;i++){
printf("%d ",vetA_h[i]);
}
//Desaloca os vetores no host
free(vetA_h);
//Desaloca os vetores no device
cudaFree(vetA_d);
}
|
1,795
|
#include <iostream>
#include <stdio.h>
using namespace std;
__global__ void add(float *dX, float *dY, int N) {
// contains the index of the current thread in the block
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
int arraySize = N;
int valuesPerThread;
int remainder = 0;
if (stride < arraySize) {
// Gets the amount of values not assigned to a thread
remainder = arraySize % stride;
// Determines how many values each thread should add.
valuesPerThread = int(arraySize / stride);
// Checks to see if casting rounds up and corrects
if (valuesPerThread > (arraySize/stride)) {
valuesPerThread = valuesPerThread - 1;
}
} else {
valuesPerThread = 1;
}
// Assigns a range of values to each thread
int startLocation = index*valuesPerThread;
// Each threads will iterate through all assigned values
for (int i = 0; i < valuesPerThread; i++) {
dY[startLocation+i] = dX[startLocation+i] + dY[startLocation+i];
}
// Takes a remaining thread and sequentially handles any leftover values.
if (index == 0 && remainder != 0) {
// Remainder index stores the starting position of un-added values
int remainderIndex = arraySize - remainder - 1;
for (int i = 1; i <= remainder; i++) {
dY[remainderIndex + i] = dX[remainderIndex + i] + dY[remainderIndex + i];
}
}
}
int main() {
int N = 256;
int memSize = N*sizeof(float);
float x[N], y[N], z[N];
float *dX = (float *) malloc(memSize);
float *dY = (float *) malloc(memSize);
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 40.0f;
}
cudaMalloc(&dX, memSize);
cudaMalloc(&dY, memSize);
cudaMemcpy(dX, &x, memSize, cudaMemcpyHostToDevice);
cudaMemcpy(dY, &y, memSize, cudaMemcpyHostToDevice);
add<<<1, 50>>>(dX, dY, N);
cudaDeviceSynchronize();
cudaMemcpy(&z, dY, memSize, cudaMemcpyDeviceToHost);
int errorCount = 0;
for (int i = 0; i < N; i++) {
if (z[i] != 41) {
errorCount += 1;
}
}
int percentError = (errorCount/N)*100;
printf("Percent Error: %d\n", percentError);
cudaFree(dX);
cudaFree(dY);
printf("Done!\n");
printf("Memory freed\n");
return 0;
}
|
1,796
|
#include <stdio.h>
#define N 512
/***************************************************************
* TERMINOLOGÍA *
* Un block puede ser dividido en distintos threads paralelos *
* Usamos threadId.x en vez de blockIdx.x *
****************************************************************/
__global__ void add(/*int *a, int *b, int *c*/){
//c[threadIdx.x] = a[threadIdx.x] + b[threadIdx.x];
printf("hola desde el kernel");
}
int main(){
int *a, *b, *c; // Copias de a b y c en el host
int *d_a, *d_b, *d_c; // Copias de a b y c en el device
int size = N * sizeof(int);
// Resevamos memoria para las copias en el device
cudaMalloc((void**)&d_a, size);
cudaMalloc((void**)&d_b, size);
cudaMalloc((void**)&d_c, size);
// Obtenemos espacio para las copias de a, b y c dentro del host
a = (int *)malloc(size);
b = (int *)malloc(size);
c = (int *)malloc(size);
for(int i = 0; i < N; i++){
a[i] = i;
b[i] = i;
}
// Copiamos los inputs en el device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Lanzamos add kernel con N threads
add<<<1,N>>>(/*d_a, d_b, d_c*/);
// Copiamos el resultado a memoria del host
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
/*
for(int i = 0; i < N; i++)
printf("%d, ", c[i]);
*/
// Liberamos memoria
free(a); free(b); free(c);
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
return 0;
}
|
1,797
|
#include "includes.h"
// 1 / (1 + e^(-x))
extern "C"
__global__ void logistic(size_t n, double *result, double *x)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = 1.0 / (1.0 + exp(-x[i]));
}
}
|
1,798
|
#include "includes.h"
__global__ void VectorAdd(float *VecA, float *VecB, float *VecC, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size)
VecC[i] = VecA[i] + VecB[i];
}
|
1,799
|
#include <cuda_runtime.h>
extern "C" void sumMatrixOnGPU2D1(float *MatA, float *MatB, float *MatC, int nx, int ny, int dimx);
// grid 1D block 1D
// grid 2D block 2D
// grid 2D block 1D
__global__ void sumMatrixOnGPUMix(float *MatA, float *MatB, float *MatC, int nx, int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = blockIdx.y;
unsigned int idx = iy * nx + ix;
if (ix < nx && iy < ny)
MatC[idx] = MatA[idx] + MatB[idx];
}
void sumMatrixOnGPU2D1(float *MatA, float *MatB, float *MatC, int nx, int ny, int dimx)
{
dim3 block(dimx, 1);
dim3 grid((nx + block.x - 1) / block.x, ny);
//sumMatrixOnGPU2D<<<grid, block>>>(MatA, MatB, MatC, nx, ny);
sumMatrixOnGPUMix<<<grid, block>>>(MatA, MatB, MatC, nx, ny);
}
|
1,800
|
/*
* @Author: heze
* @Date: 2021-06-01 00:38:55
* @LastEditTime: 2021-06-05 00:47:39
* @Description: 在gpu_shareMem.cu基础上查对数表
* @FilePath: /src/gpu_shareMem_log.cu
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define blockSize 10
#define printArray 0
/**
* @brief 对数表
*/
__device__ float logTable[26]={0,
0.000000000000000,1.000000000000000,1.584962500721156,2.000000000000000,2.321928094887362,
2.584962500721156,2.807354922057604,3.000000000000000,3.169925001442312,3.321928094887362,
3.459431618637297,3.584962500721156,3.700439718141092,3.807354922057604,3.906890595608519,
4.000000000000000,4.087462841250339,4.169925001442312,4.247927513443585,4.321928094887363,
4.392317422778761,4.459431618637297,4.523561956057013,4.584962500721156,4.643856189774724};
/**
* @brief 核函数,在gpu_shareMem.cu基础上查对数表
*
* @param width 矩阵列数
* @param height 矩阵行数
* @param array 待计算矩阵
* @param globalResult 存放结果的矩阵
* @return void
*/
__global__ void cal(int width, int height, int *array, float *globalResult) {
//索引待计算元素位置
int ix = threadIdx.x+blockDim.x*blockIdx.x;
int iy = threadIdx.y+blockDim.y*blockIdx.y;
int index = ix+iy*width;
int indexX = index / width;
int indexY = index % width;
//计算需计算窗口的四条边分别是哪一行、哪一列,计算窗口总元素个数
int indexLeft = max(0, indexY-2);
int indexRight = min(indexY+3, width);
int indexUp = max(0, indexX-2);
int indexDown = min(indexX+3, height);
int indexNum = (indexRight-indexLeft) * (indexDown-indexUp);
//每个thread唯一的ID
int shareID=threadIdx.y*blockSize+threadIdx.x;
//分配共享内存
__shared__ int indexTimes[16*10*10];
float localResult = 0, indexP;
for(int i=indexUp;i<indexDown;i++){
for(int j=indexLeft;j<indexRight;j++) {
indexTimes[shareID*16+array[i * width + j]]++;
}
}
for(int i=0;i<16;i++){
indexP = (float)indexTimes[i] / indexNum;
if(indexP!=0.0){
localResult -= indexP * (logTable[indexTimes[shareID*16+i]] - logTable[indexNum]);
}
}
globalResult[index] = localResult;
}
/**
* @description: 主函数
* @param {int} argc 命令行参数个数
* @param {char const} *argv 命令行参数指针
* @return {*}
*/
int main(int argc, char const *argv[])
{
//由运行时的命令行参数获取矩阵的行数和列数,并计算元素个数
int height=atoi(argv[1]);
int width=atoi(argv[2]);
int size=height*width;
int *host_array,*device_array;
float *host_result,*device_result;
//在CPU上分配矩阵和结果的内存
cudaMallocHost((void **)&host_array,sizeof(int)*size);
cudaMallocHost((void **)&host_result,sizeof(float)*size);
//随机生成矩阵元素
srand((unsigned)time(0));
for (int i = 0; i < height; i++) {
for (int j = 0; j < width; j++) {
host_array[i * width + j] = rand()%16;
}
}
if(printArray){
printf("二维数组:\n");
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
printf("%2d ",host_array[i*width+j]);
}
printf("\n");
}
}
//在GPU上分配矩阵和结果的内存
cudaMalloc((void **) &device_array, sizeof(int)*size);
cudaMalloc((void **) &device_result, sizeof(float)*size);
cudaMemcpy(device_array, host_array, sizeof(int)*size, cudaMemcpyHostToDevice);
clock_t start,end;
//分配线程块大小
unsigned int grid_rows = (height/blockSize)+1;
unsigned int grid_cols = (width/blockSize)+1;
dim3 dimGrid(grid_rows, grid_cols);
dim3 dimBlock(blockSize, blockSize);
//调用核函数计算,并在前后计时,最后算出运行时间
start=clock();
cal<<<dimGrid, dimBlock>>>(width, height,device_array,device_result);
cudaDeviceSynchronize();
end=clock();
double time_gpu=(double)(end-start)/CLOCKS_PER_SEC;
//将结果从GPU拷贝回CPU,打印信息
cudaMemcpy(host_result,device_result, sizeof(float)*size, cudaMemcpyDeviceToHost);
if(printArray){
printf("结果:\n");
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
printf("%.5f ",host_result[i*width+j]);
}
printf("\n");
}
}
printf("矩阵维度%dx%d,使用共享内存并查表在GPU上运行时间: %f ms.\n", height,width,time_gpu*1000);
cudaFree(host_array);
cudaFree(host_result);
cudaFree(device_array);
cudaFree(device_result);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.