serial_no
int64 1
24.2k
| cuda_source
stringlengths 11
9.01M
|
|---|---|
801
|
#include <stdio.h>
int main(void){
printf("hello world!!\n");
return 0;
}
|
802
|
#include <iostream>
#include <random>
#include <cuda_runtime_api.h>
#include <sys/time.h>
#include <vector>
struct RGBPoint {
float x;
float y;
float z;
float r;
float g;
float b;
float a;
RGBPoint() {}
RGBPoint(float x, float y, float z, float r, float g, float b, float a) : x(x), y(y), z(z), r(r), g(g), b(b), a(a) {}
};
__global__ void TestKernelRGB(RGBPoint *img_RGB, const int row, const int col) {
int h = threadIdx.x + blockIdx.x * blockDim.x;
int w = threadIdx.y + blockIdx.y * blockDim.y;
if ((h >= row) || (w >= col)) {
return;
}
int index = h * col + w;
if (index % 120 == 0) {
img_RGB[index].a = 3.0f;
img_RGB[index].r = 4.0f;
img_RGB[index].g = 5.0f;
img_RGB[index].b = 6.0f;
img_RGB[index].x = 7.0f;
img_RGB[index].y = 8.0f;
img_RGB[index].z = 9.0f;
}
}
__global__ void TestKernelFloat(float *img_float, float *img_float_a, const int row, const int col) {
int h = threadIdx.x + blockIdx.x * blockDim.x;
int w = threadIdx.y + blockIdx.y * blockDim.y;
if ((h >= row) || (w >= col)) {
return;
}
int index = h * col + w;
int index6 = index * 6;
if (index % 120 == 0) {
img_float_a[index] = 3.0f;
img_float[index6] = 4.0f;
img_float[index6 + 1] = 5.0f;
img_float[index6 + 2] = 6.0f;
img_float[index6 + 3] = 7.0f;
img_float[index6 + 4] = 8.0f;
img_float[index6 + 5] = 9.0f;
}
}
int main() {
struct timeval start, end;
float t1, t2, t3, t4, t5;
const int row = 640;
const int col = 480;
const size_t size_RGB = row * col * sizeof(RGBPoint);
const size_t size_float = row * col * sizeof(float);
RGBPoint *img_RGB;
cudaMallocManaged(&img_RGB, size_RGB);
float *img_float, *img_float_a;
cudaMallocManaged(&img_float, size_float * 6);
cudaMallocManaged(&img_float_a, size_float);
dim3 block_size(4, 32);
dim3 grid_size((row - 1) / block_size.x + 1, (col - 1) / block_size.y + 1);
gettimeofday(&start, nullptr);
TestKernelRGB<<<grid_size, block_size>>>(img_RGB, row, col);
cudaDeviceSynchronize();
gettimeofday(&end, nullptr);
t1 = ((end.tv_sec - start.tv_sec) * 1000000.0f + (end.tv_usec - start.tv_usec)) / 1000.0f;
gettimeofday(&start, nullptr);
TestKernelFloat<<<grid_size, block_size>>>(img_float, img_float_a, row, col);
cudaDeviceSynchronize();
gettimeofday(&end, nullptr);
t2 = ((end.tv_sec - start.tv_sec) * 1000000.0f + (end.tv_usec - start.tv_usec)) / 1000.0f;
std::vector<RGBPoint> result_RGB;
gettimeofday(&start, nullptr);
for (int h = 0; h < row; h++) {
for (int w = 0; w < col; w++) {
int index = h * col + w;
if (img_RGB[index].a > 0) {
result_RGB.push_back(img_RGB[index]);
}
}
}
gettimeofday(&end, nullptr);
t3 = ((end.tv_sec - start.tv_sec) * 1000000.0f + (end.tv_usec - start.tv_usec)) / 1000.0f;
std::vector<RGBPoint> result_float1;
gettimeofday(&start, nullptr);
for (int h = 0; h < row; h++) {
for (int w = 0; w < col; w++) {
int index = h * col + w;
if (img_float_a[index] > 0) {
//RGBPoint img;
result_float1.push_back(img_RGB[index]);
}
}
}
gettimeofday(&end, nullptr);
t4 = ((end.tv_sec - start.tv_sec) * 1000000.0f + (end.tv_usec - start.tv_usec)) / 1000.0f;
std::vector<RGBPoint> result_float2;
gettimeofday(&start, nullptr);
for (int h = 0; h < row; h++) {
for (int w = 0; w < col; w++) {
int index = h * col + w;
if (img_float_a[index] > 0) {
RGBPoint img;
int index6 = index * 6;
img.a = img_float_a[index];
img.r = img_float[index6];
img.g = img_float[index6 + 1];
img.b = img_float[index6 + 2];
img.x = img_float[index6 + 3];
img.y = img_float[index6 + 4];
img.z = img_float[index6 + 5];
result_float2.push_back(img);
}
}
}
gettimeofday(&end, nullptr);
t5 = ((end.tv_sec - start.tv_sec) * 1000000.0f + (end.tv_usec - start.tv_usec)) / 1000.0f;
cudaFree(img_RGB);
cudaFree(img_float);
cudaFree(img_float_a);
std::cout << "RGB kernel time: " << t1 << " ms float kernel time: " << t2 << " ms" << std::endl;
std::cout << "RGB postprocess time: " << t3 << " ms float postprocess time: " << t5 << " ms float postprocess time without data copy: " << t4 << " ms" << std::endl;
return 0;
}
|
803
|
//MIT License
//
//Copyright(c) 2020 Zheng Jiaqi @NUSComputing
//
//Permission is hereby granted, free of charge, to any person obtaining a copy
//of this software and associated documentation files(the "Software"), to deal
//in the Software without restriction, including without limitation the rights
//to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
//copies of the Software, and to permit persons to whom the Software is
//furnished to do so, subject to the following conditions :
//
//The above copyright notice and this permission notice shall be included in all
//copies or substantial portions of the Software.
//
//THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
//IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
//FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE
//AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
//LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
//OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
//SOFTWARE.
#include "cuda_runtime.h"
#include <stdio.h>
#define TOID(x, y, n) (y) * (n) + (x)
__host__ __device__ double scalar_product(double x1, double y1, double x2, double y2){
return x1 * x2 + y1 * y2;
}
__host__ __device__ void barycentric_coordinate(
double x1, double y1, double x2, double y2,
double x3, double y3, double x0, double y0,
double &w3, double &w1, double &w2
){
double v0x = x2 - x1, v0y = y2 - y1;
double v1x = x3 - x1, v1y = y3 - y1;
double v2x = x0 - x1, v2y = y0 - y1;
double d00 = scalar_product(v0x, v0y, v0x, v0y);
double d01 = scalar_product(v0x, v0y, v1x, v1y);
double d11 = scalar_product(v1x, v1y, v1x, v1y);
double d20 = scalar_product(v2x, v2y, v0x, v0y);
double d21 = scalar_product(v2x, v2y, v1x, v1y);
double denom = d00 * d11 - d01 * d01;
if (denom == 0) {
w1 = w2 = w3 = -1;
return;
}
w1 = (d11 * d20 - d01 * d21) / denom;
w2 = (d00 * d21 - d01 * d20) / denom;
w3 = 1.0 - w1 - w2;
}
__global__ void kernelDiscretization(
double *points,
double *weight,
int *triangle,
int num_tri,
float *density,
double scale,
int n
){
int tx = blockDim.x * blockIdx.x + threadIdx.x;
int ty = blockDim.y * blockIdx.y + threadIdx.y;
int id = TOID(tx, ty, n);
double x = tx * scale, y = ty * scale;
float res = 0;
for (int i = 0; i < num_tri; ++i) {
int p1 = triangle[i*3], p2 = triangle[i*3+1], p3 = triangle[i*3+2];
double x1, x2, x3, y1, y2, y3, w1, w2, w3;
x1 = points[p1 << 1], y1 = points[p1 << 1 | 1];
x2 = points[p2 << 1], y2 = points[p2 << 1 | 1];
x3 = points[p3 << 1], y3 = points[p3 << 1 | 1];
barycentric_coordinate(x1, y1, x2, y2, x3, y3, x, y, w1, w2, w3);
if (w1 < 0 || w2 < 0 || w3 < 0) continue;
density[id] = w1 * weight[p1] + w2 * weight[p2] + w3 * weight[p3];
return;
}
density[id] = 0;
return;
}
void discretization_d(
double *points,
double *weight,
int num_point,
int *triangle,
int num_tri,
float *density,
double scale,
int n
){
double *points_d, *weight_d;
float *density_d;
int *triangle_d;
cudaMalloc((void **) &points_d, num_point * sizeof(double) * 2);
cudaMalloc((void **) &weight_d, num_point * sizeof(double));
cudaMalloc((void **) &triangle_d, num_tri * sizeof(int) * 3);
cudaMalloc((void **) &density_d, n * n * sizeof(float));
cudaMemcpy(points_d, points, num_point * sizeof(double) * 2, cudaMemcpyHostToDevice);
cudaMemcpy(weight_d, weight, num_point * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(triangle_d, triangle, num_tri * sizeof(int) * 3, cudaMemcpyHostToDevice);
dim3 block(16, 16);
dim3 grid(n/block.x, n/block.y);
kernelDiscretization <<< grid, block >>> (points_d, weight_d, triangle_d, num_tri, density_d, scale, n);
cudaMemcpy(density, density_d, n * n * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(points_d);
cudaFree(weight_d);
cudaFree(triangle_d);
cudaFree(density_d);
}
|
804
|
#include "stdio.h"
#include "float.h"
#include <cuda.h>
#define HILOS 128
#define PATH "./inputs/randomData_2M_3feature.csv"
#define CANT_FEATURES 3
#define CANT_MEANS 4
#define CANT_ITERACIONES 100
#define MAX_DOUBLE DBL_MAX
//Funciones CUDA
__global__ void kMeansClusterAssignment(double* means_dev, double* items_dev, int *clusterAsignado_dev );
__global__ void kMeansCentroidUpdate(double *items_dev, int *clusterAsignado_dev, double *means_dev, int *d_clust_sizes,int *clusterAsignadoAnterior_dev,int *countChangeItem_dev);
__device__ u_int64_t Classify(double* means_dev, double* item, int cant_means, int cant_features);
__device__ double distanciaEuclidiana(double* x , double* y, int length);
//Funciones HOST
double** CalculateMeans(double* items_dev, double** means, u_int64_t size_lines, int *clusterAsignado_dev, int nBloques, int hilosB);
double*** FindClusters(int *clusterAsignado_dev, u_int64_t cant_items, double **items);
u_int64_t CalcLines(char filename[50]);
double **alloc_2d_double(u_int64_t rows, u_int64_t cols);
double** ReadData(char filename[50], u_int64_t size_lines, u_int8_t cant_features);
void searchMinMax(double** items, u_int64_t size_lines, double* minimo, double* maximo, u_int8_t cant_features);
double** InitializeMeans(u_int16_t cant_means, double* cMin, double* cMax, u_int8_t cant_features);
__host__ void check_CUDA_Error(const char *mensaje);
//Constantes de CUDA
__constant__ u_int64_t CANT_ITEMS_CUDA;
int main()
{
//Declaracion de eventos para tomar tiempos
cudaEvent_t start;
cudaEvent_t stop;
//Creacion de eventos
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Marca de inicio CalcLines y ReadData
cudaEventRecord(start,0);
//Calcula la cantidad de lineas del CSV
u_int64_t size_lines = CalcLines(PATH);
cudaMemcpyToSymbol(CANT_ITEMS_CUDA, &size_lines, sizeof(u_int64_t));
check_CUDA_Error("ERROR en cudaMemcpyToSymbol");
// double maxDouble = DBL_MAX;
// cudaMemcpyToSymbol(MAX_DOUBLE, &maxDouble, sizeof(double));
// check_CUDA_Error("ERROR en cudaMemcpyToSymbol");
double **items = ReadData(PATH, size_lines, CANT_FEATURES);
//Marca de final CalcLines y ReadData
cudaEventRecord(stop,0);
//Sincronizacion GPU-CPU
cudaEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime2;
cudaEventElapsedTime(&elapsedTime2,start,stop);
//Marca de inicio SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias
cudaEventRecord(start,0);
double *cMin, *cMax;
cMin = (double*) malloc(CANT_FEATURES * sizeof(double));
cMax = (double*) malloc(CANT_FEATURES * sizeof(double));
//Encuentra el minimo y maximo de cada columna (o feature)
searchMinMax(items, size_lines, cMin, cMax, CANT_FEATURES);
printf("MIN: %lf, MAX: %lf\n", cMin[0], cMax[0]);
// calculamos el numero de bloques necesario para un tamaño de bloque fijo
int nBloques = size_lines/HILOS;
if (size_lines%HILOS != 0)
{
nBloques = nBloques + 1;
}
int hilosB = HILOS;
//Inicializa las means (medias) con valores estimativos
double** means = InitializeMeans(CANT_MEANS, cMin, cMax, CANT_FEATURES);
//Marca de final SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias
cudaEventRecord(stop,0);
//Sincronizacion GPU-CPU
cudaEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime3;
cudaEventElapsedTime(&elapsedTime3,start,stop);
//Almacena los indices de los items
int *clusterAsignado_dev = 0;
cudaMalloc(&clusterAsignado_dev,size_lines*sizeof(int));
cudaMemset(clusterAsignado_dev,0,size_lines*sizeof(int));
double* items_dev;
cudaMalloc( (void**)&items_dev, size_lines*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en cudaMalloc");
cudaMemcpy( items_dev, &items[0][0], size_lines*CANT_FEATURES*sizeof(double), cudaMemcpyHostToDevice );
check_CUDA_Error("ERROR en cudaMemcpy items_dev");
//Marca de inicio CalculateMeans
cudaEventRecord(start,0);
//Funcion que calcula las medias nuevas
means = CalculateMeans(items_dev, means, size_lines, clusterAsignado_dev ,nBloques, hilosB);
//Marca de final CalculateMeans
cudaEventRecord(stop,0);
//Sincronizacion GPU-CPU
cudaEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime;
cudaEventElapsedTime(&elapsedTime,start,stop);
//Marca de inicio FindCluster
cudaEventRecord(start,0);
//Funcion que calcula las medias nuevas
double ***clusters = FindClusters(clusterAsignado_dev, size_lines, items);
//Marca de final CalculateMeans
cudaEventRecord(stop,0);
//Sincronizacion GPU-CPU
cudaEventSynchronize(stop);
//Calculo del tiempo en milisegundos
float elapsedTime4;
cudaEventElapsedTime(&elapsedTime4,start,stop);
//Liberacion de recursos
for(int n = 0; n < CANT_MEANS; n++){
for(u_int64_t m = 0; m < size_lines; m++){
free(clusters[n][m]);
}
free(clusters[n]);
}
free(clusters);
free(items[0]);
free(items);
free(means[0]);
free(means);
free(cMin);
free(cMax);
cudaFree(clusterAsignado_dev);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//Impresion de resultados
printf("> Tiempo de ejecucion de CalcLines y ReadData: %f ms\n",elapsedTime2);
printf("> Tiempo de ejecucion de SearchMinMax, Calculo de hilos-bloques CUDA e Inicializacion Medias: %f ms\n",elapsedTime3);
printf("> Tiempo de ejecucion de CalculateMeans: %f ms\n",elapsedTime);
printf("> Tiempo de ejecucion de FindCluster: %f ms\n",elapsedTime4);
printf("> Tiempo de total del programa: %f ms\n", elapsedTime + elapsedTime2 + elapsedTime3 + elapsedTime4);
return EXIT_SUCCESS;
}
/**
* @brief Funcion que se encarga de armar una matriz 3D, donde se insertaran los items de acuerdo a su clasificacion
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param cant_items Cantidad de items
* @param items Items a clasificar
* @return Arreglo 3D de Clusters finales de acuerdo a la clasificacion de los items en cada media
*/
double*** FindClusters(int *clusterAsignado_dev, u_int64_t cant_items, double **items)
{
// clusters es un array de 3 dimensiones, es un conjunto de clusters.
// cada cluster es un conjunto de items.
// cada item es un conjunto de features.
double ***clusters = (double ***) malloc(CANT_MEANS * sizeof(double**));
//Inicializa clusters
for(u_int8_t n = 0; n < CANT_MEANS; n++){
clusters[n] = (double **) malloc(cant_items * sizeof(double*));
for(u_int64_t m = 0; m < cant_items; m++){
clusters[n][m] = (double *) malloc(CANT_FEATURES * sizeof(double));
}
}
int *clusterAsignado = (int*)malloc(cant_items*sizeof(int));
cudaMemcpy(clusterAsignado, clusterAsignado_dev, cant_items*sizeof(int), cudaMemcpyDeviceToHost );
int indices_series[CANT_MEANS];
memset(indices_series, 0, sizeof(int)*CANT_MEANS);
for(u_int64_t i = 0; i < cant_items; i++){
for(u_int8_t j = 0; j < CANT_FEATURES; j++){ //se cargan todas las features del item al cluster
clusters[clusterAsignado[i]][indices_series[clusterAsignado[i]]][j] = items[i][j];
}
indices_series[clusterAsignado[i]]++;
}
return clusters;
}
/**
* @brief Funcion que se encarga de clasificar los items en las medias correspondientes
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param size_lines Cantidad de items
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param nBloques Cantidad de bloques CUDA
* @param hilosB Cantidad de hilos CUDA
* @return Arreglo 2D de Medias finales de acuerdo a la clasificacion de los items
*/
double** CalculateMeans(double* items_dev, double** means, u_int64_t size_lines, int *clusterAsignado_dev, int nBloques, int hilosB)
{
double minPorcentaje;
//define el porcentaje minimo de cambio de items entre clusters para que continue la ejecucion del algoritmo
minPorcentaje = 0.001 * (double) size_lines;
printf("Porentaje minimo = %.2lf\n", minPorcentaje);
double* means_dev;
cudaMalloc( (void**)&means_dev, CANT_MEANS*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en cudaMalloc");
/*Arreglo de cluster sizes*/
//Creo y reseteo a 0 la variable de host
int *h_clust_sizes = (int*)malloc(CANT_MEANS*sizeof(int));
memset(h_clust_sizes, 0, sizeof(int)*CANT_MEANS);
//cudaMemset(countChangeItem_dev, 0, sizeof(int));
//Creo la variable de device
int *d_clust_sizes = 0;
cudaMalloc(&d_clust_sizes,CANT_MEANS*sizeof(float));
check_CUDA_Error("ERROR en cudaMalloc d_clust_sizes ");
//Copio lo que hay en host a device
cudaMemcpy(d_clust_sizes,h_clust_sizes,CANT_MEANS*sizeof(int),cudaMemcpyHostToDevice);
check_CUDA_Error("ERROR en cudaMemcpy d_clust_sizes ");
//Almacena contador de cambios de items
int *countChangeItem_dev = 0;
cudaMalloc(&countChangeItem_dev,sizeof(int));
int *countChangeItem = (int*)malloc(sizeof(int));
//Almacena los indices de los items
int *clusterAsignadoAnterior_dev = 0;
cudaMalloc(&clusterAsignadoAnterior_dev,size_lines*sizeof(int));
//Calcula las medias
for(int j = 0; j < CANT_ITERACIONES; j++) {
printf("Iteracion: %d\n", j);
//En cada iteracion, cantidad de cambios es 0
//memset(countChangeItem, 0, sizeof(int));
//Paso lo que hay en means a la placa luego de cambiarlo
cudaMemcpy( means_dev, &means[0][0], CANT_MEANS*CANT_FEATURES*sizeof(double), cudaMemcpyHostToDevice );
check_CUDA_Error("ERROR en cudaMemcpy means_dev");
//Reseteo la cantidad de elementos de cada media en cada iteracion
cudaMemset(d_clust_sizes,0,CANT_MEANS*sizeof(int));
check_CUDA_Error("ERROR en cudaMemset means_dev");
kMeansClusterAssignment<<<nBloques,hilosB>>>(items_dev, means_dev, clusterAsignado_dev);
//Reseteo means para la placa, ya que se va a cambiar
cudaMemset(means_dev,0,CANT_MEANS*CANT_FEATURES*sizeof(double));
check_CUDA_Error("ERROR en cudaMemset means_dev");
kMeansCentroidUpdate<<<nBloques,hilosB>>>(items_dev,clusterAsignado_dev,means_dev,d_clust_sizes,clusterAsignadoAnterior_dev,countChangeItem_dev);
//Copio las nuevas medias obtenidas en la placa a las medias de Host
cudaMemcpy(&means[0][0],means_dev,CANT_MEANS*CANT_FEATURES*sizeof(double),cudaMemcpyDeviceToHost);
check_CUDA_Error("ERROR en cudaMemcpy means_dev 3");
//Copio la cantidad de items de cada medias obtenidas en la placa al arreglo del host
cudaMemcpy(h_clust_sizes, d_clust_sizes, CANT_MEANS*sizeof(int), cudaMemcpyDeviceToHost );
check_CUDA_Error("ERROR en cudaMemcpy h_clust_sizes ");
//Copio las nuevas medias obtenidas en la placa a las medias de Host
cudaMemcpy(countChangeItem,countChangeItem_dev,sizeof(int),cudaMemcpyDeviceToHost);
for (int a = 0; a < CANT_MEANS; a++)
{
for(int b=0; b < CANT_FEATURES; b++)
{
//Asigno el nuevo valor de las medias sacando promedio
means[a][b] = means[a][b] / h_clust_sizes[a];
}
printf("Mean[%d] -> (%lf,%lf,%lf)\n", a, means[a][0], means[a][1], means[a][2]);
printf("Cluster[%d] -> %d\n", a, h_clust_sizes[a]);
}
//Comparo la cantidad de items cambiado en la iteracion actual con la anterior y si es menor al porcentaje
//se deja de iterar
printf("Cant cambios: %d\n",*countChangeItem);
if(*countChangeItem < minPorcentaje){break;}
//Reseteo cantidad de camios para la placa, ya que se va a cambiar
cudaMemset(countChangeItem_dev,0,sizeof(int));
cudaMemcpy(clusterAsignadoAnterior_dev, clusterAsignado_dev, size_lines*sizeof(int),cudaMemcpyDeviceToDevice );
}
cudaFree(items_dev);
cudaFree(means_dev);
cudaFree(d_clust_sizes);
free(h_clust_sizes);
cudaFree(countChangeItem_dev);
cudaFree(clusterAsignadoAnterior_dev);
free(countChangeItem);
return means;
}
/**
* @brief Funcion que se encarga de obtener las sumas en cada media y la cantidad de elementos
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param d_clust_sizes Arreglo 1D de la cantidad de items de cada media del cluster
* @param clusterAsignadoAnterior_dev Arreglo 1D del cluster a que corresponde cada item de la iteracion anterior
* @param countChangeItem_dev Cantidad de cambios de items
*/
__global__ void kMeansCentroidUpdate(double *items_dev, int *clusterAsignado_dev, double *means_dev, int *d_clust_sizes,int *clusterAsignadoAnterior_dev,int *countChangeItem_dev)
{
//Obtengo el ID de cada hilo
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Elimino aquellos que no deban trabajar
if (idx >= CANT_ITEMS_CUDA) return;
//Obtengo el ID de los hilos a nivel de bloque
const int s_idx = threadIdx.x;
//Armo un arreglo de items para cada bloque en memoria compartida
__shared__ double items_bloque[HILOS][CANT_FEATURES];
for(int i = 0; i < CANT_FEATURES; i++){
items_bloque[s_idx][i] = items_dev[idx*CANT_FEATURES + i];
}
//Armo un arreglo de los cluster asignados para cada bloque en memoria compartida
__shared__ int clusterAsignado_bloque[HILOS];
clusterAsignado_bloque[s_idx] = clusterAsignado_dev[idx];
//Armo un arreglo de los cluster asignados anteriores para cada bloque en memoria compartida -> CAMBIOS RESPECTO AL EFICIENTE
__shared__ int clusterAsignadoAnterior_bloque[HILOS];
clusterAsignadoAnterior_bloque[s_idx] = clusterAsignadoAnterior_dev[idx];
__syncthreads();
//Si es el hilo 0 de cada bloque, entonces suma los valores dentro de los arreglo compartido
if(s_idx==0)
{
int limite = ((idx + blockDim.x) < CANT_ITEMS_CUDA)? blockDim.x : (CANT_ITEMS_CUDA - idx);
//Creo arreglos de suma de valores del cluster del bloque y la cantidad de items de cada media
double clust_sums[CANT_MEANS][CANT_FEATURES]={{0},{0},{0},{0}};
int clust_sizes[CANT_MEANS]={0};
int changeItems = 0;
//Se recorre el bloque, incrementando el cluster sizes de acuerdo a la media asignada y lo sumo
for(int j=0; j < limite; ++j)
{
int clust_id = clusterAsignado_bloque[j];
if(clust_id != clusterAsignadoAnterior_bloque[j])
{
//El hilo 0 es el encargado de sumar si cambiaron los items de cada hilo de su bloque
changeItems+=1;
}
clust_sizes[clust_id]+=1;
for(int k = 0; k < CANT_FEATURES; ++k)
{
clust_sums[clust_id][k]+=items_bloque[j][k];
}
}
//Por ultimo agregamos de forma atomica al arreglo means_dev la suma de todos los items designados en cada cluster
//y al arreglo d_clust_sizes la cantidad de items en cada media
int indice;
for(int z=0; z < CANT_MEANS; ++z)
{
indice = z*CANT_FEATURES;
for(int s=0; s < CANT_FEATURES ; s++)
{
atomicAdd(&means_dev[indice+s],clust_sums[z][s]);
}
atomicAdd(&d_clust_sizes[z],clust_sizes[z]);
}
if(changeItems != 0) //CAMBIOS RESPECTO AL EFICIENTE
{
atomicAdd(countChangeItem_dev,changeItems);
}
}
__syncthreads();
}
/**
* @brief Funcion que se encarga de asignar los indices de cluster a cada item
* @param items_dev Items a clasificar, cada item contiene un valor por Feature, representada como arreglo 1D
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param clusterAsignado_dev Arreglo 1D del cluster a que corresponde cada item
*/
__global__ void kMeansClusterAssignment(double *items_dev, double *means_dev, int *clusterAsignado_dev)
{
//Obtengo el ID para cada hilo
const int idx = blockIdx.x*blockDim.x + threadIdx.x;
//Descarto aquellos hilos que no deban trabajar
if (idx >= CANT_ITEMS_CUDA) return;
//Obtengo el item correspondiente a cada hilo
double *item = &items_dev[idx*CANT_FEATURES];
u_int64_t index = Classify(means_dev, item, CANT_MEANS, CANT_FEATURES);
// if(clusterAsignado_dev[idx] != (int)index) -> CAMBIOS RESPECTO AL EFICIENTE
// {
// atomicAdd(countChangeItem_dev,1);
// }
//Asigno cada item en un cluster y almaceno el indice de clasificacion en un arreglo
clusterAsignado_dev[idx]=(int)index;
}
/**
* @brief Funcion que se encarga de obtener el indice del cluster al que pertenece el item
* @param means_dev Matriz de medias (Cantidad de Features * Cantidad de Medias), representada como arreglo 1D
* @param item Item a clasificar
* @param cant_means Cantidad de Medias
* @param cant_features Cantidad de Features
* @return Indice del cluster al que corresponde el Item
*/
__device__ u_int64_t Classify(double* means_dev, double* item, int cant_means, int cant_features){
double minimun = MAX_DOUBLE;
int index = -1;
double distance;
for(int i = 0; i < cant_means; i++){
//calcula la distancia de un item a la media
//printf("Means_dev: %ld\n", means_dev[i*3]);
distance = distanciaEuclidiana(item, &means_dev[i*cant_features], cant_features);
if(distance < minimun){
minimun = distance;
index = i;
}
}
return (u_int64_t) index;
}
/**
* @brief Funcion que se encarga de calcular la distancia Euclideana entre el item y las distintas Medias (2 vectores)
* @param x Item (Vector 1)
* @param y Medias (Vector 2)
* @param length longitud del vector (Cantidad de Features)
* @return Distancia euclidiana entre ambos vectores.
*/
__device__ double distanciaEuclidiana(double* x , double* y, int length){
double distancia = 0;
for(int i = 0; i < length; i++){
distancia += pow((x[i] - y[i]), 2);
}
return sqrt(distancia);
}
/**
* @brief Funcion que se encarga de calcular la cantidad de items a clasificar
* @param filename nombre del archivo
* @return cantidad de lineas (o items) del archivo
*/
u_int64_t CalcLines(char filename[50]) {
printf(filename);
FILE *f = fopen(filename, "r");
u_int64_t cant_lines = 0;
char* cadena = (char*) calloc(100, sizeof(char));
char* valor;
while(fgets(cadena, 100, f)){
valor = strstr(cadena, ",");
valor++;
if(valor != NULL && strcmp(valor,"values\n") && strcmp(valor,"\n")){
cant_lines++;
}
}
free (cadena);
fclose(f);
printf("Cantidad de items: %ld\n", cant_lines);
return cant_lines;
}
/**
* @brief Funcion que se encarga allocar una matriz 2D
* @param rows filas de la matriz
* @param cols columnas de la matriz
* @return Matriz 2D
*/
double **alloc_2d_double(u_int64_t rows, u_int64_t cols) {
double *data = (double *)malloc(rows * cols * sizeof(double));
double **array= (double **)malloc(rows * sizeof(double*));
for (u_int64_t i = 0; i < rows; i++)
array[i] = &(data[cols*i]);
return array;
}
/**
* @brief Busca el minimo y maximo valor para cada feature del arreglo items.
* @param items datos a clasificar
* @param size_lines cantidad de items
* @param minimo arreglo de los valores minimos de cada feature
* @param maximo arreglo de los valores maximos de cada feature
* @param cant_features cantidad de caracteristicas que tiene cada item
*/
void searchMinMax(double** items, u_int64_t size_lines, double* minimo, double* maximo, u_int8_t cant_features){
//Define el maximo como el minimo valor de tipo DOUBLE y el minimo como el maximo valor de tipo DOUBLE
for(int n = 0; n < cant_features; n++){
maximo[n] = DBL_MIN;
minimo[n] = DBL_MAX;
}
for(u_int64_t i = 0; i < size_lines; i++){ //recorremos cada item
for(u_int8_t j = 0; j < cant_features; j++){ //recorremos cada feature
if(items[i][j] < minimo[j]){
minimo[j] = items[i][j];
}
if(items[i][j] > maximo[j]){
maximo[j] = items[i][j];
}
}
}
printf("maximos: %lf, %lf, %lf\n", maximo[0], maximo[1], maximo[2]);
printf("minimos: %lf, %lf, %lf\n", minimo[0], minimo[1], minimo[2]);
}
/**
* @brief Lee el archivo indicado y carga el arreglo de items.
* @param filename string nombre del archivo que contiene los datos
* @param size_lines cantidad de lineas del archivo
* @param cant_features cantidad de features de cada item (cantidad de columnas del archivo separadas por comas)
* @return arreglo doble con cantidad de filas igual a cantidad de items y cantidad de columnas igual a cantidad de features.
*/
double** ReadData(char filename[50], u_int64_t size_lines, u_int8_t cant_features){
FILE *file = fopen(filename, "r");
rewind(file);
//Definimos un arreglo de arreglos (cada item consta de 2 o mas features)
double** items = (double **) alloc_2d_double(size_lines, cant_features);
char* line = (char*)calloc(100, sizeof(char));
double feature;
u_int64_t i = 0, j = 0;
char* ptr;
while(fgets(line, 100, file)){
j = 0;
char *item = strstr(line, ","); //se ignora el primer elemento del archivo (indice)
item++;
if(item != NULL && strcmp(item, "values\n") && strcmp(item, "\n")){ //Para recortar la cadena y tomar solo el segundo dato
// item[strlen(item)-1] = '\0';
char *token = strtok(item, ","); //separa los elementos de la linea por comas
while(token != NULL){
feature = strtod(token, &ptr); //Pasaje a double
items[i][j] = feature; //Almacenamiento en item
j++;
token = strtok(NULL, ","); //busco el siguiente token
}
i++;
}
}
free(line);
fclose(file);
return items;
}
/**
* @brief Funcion que se encarga de detectar error de CUDA
* @param mensaje Mensaje de error CUDA
*/
__host__ void check_CUDA_Error(const char *mensaje)
{
cudaError_t error;
cudaDeviceSynchronize();
error = cudaGetLastError();
if(error != cudaSuccess)
{
printf("ERROR %d: %s (%s)\n", error, cudaGetErrorString(error), mensaje);
printf("\npulsa INTRO para finalizar...");
fflush(stdin);
char tecla = getchar();
exit(-1);
}
}
/**
* @brief Inicializa el arreglo de medias en valores equiespaciados en el rango de datos.
* @param cant_means cantidad de medias o clusters
* @param cMin vector con los valores minimos de cada feature
* @param cMax vector con los valores maximos de cada feature
* @param cant_features cantidad de features (o columnas) de cada item
* @return arreglo con las medias (1 por cada cluster).
* Ejemplo: range: 20 (0 a 19)
* cantMeans -> 4
* jump: 20 / 4 = 5
* means[0] = 0 + 0.5 * 5 = 2.5
* means[1] = 0 + 1.5 * 5 = 7.5
* means[2] = 0 + 2.5 * 5 = 12.5
* means[3] = 0 + 3.5 * 5 = 17.5
*/
double** InitializeMeans(u_int16_t cant_means, double* cMin, double* cMax, u_int8_t cant_features){
/* |__Feature 0__|__Feature 1__|__Feature 2__|
Media0|_____________|_____________|_____________|
Media1|_____________|_____________|_____________|
*/
double **means = (double **) alloc_2d_double(cant_means, cant_features);
//definimos el salto de un valor de media al siguiente
double *jump = (double *) malloc(cant_features * sizeof(double));
for(u_int8_t n = 0; n < cant_features; n++){
jump[n] = (double) (cMax[n] - cMin[n]) / cant_means;
}
printf("\nValores de las medias iniciales:\n");
for(u_int16_t i = 0; i < cant_means; i++){
for(u_int8_t j = 0; j < cant_features; j++){
means[i][j] = cMin[j] + (0.5 + i) * jump[j];
}
printf("Mean[%d] -> (%lf,%lf,%lf)\n", i, means[i][0], means[i][1], means[i][2]);
}
free(jump);
return means;
}
|
805
|
/*
* EzUpdaterTM.cpp
*
* Created on: 11 янв. 2016 г.
* Author: aleksandr
*/
#include "EzUpdaterTM.h"
#include "SmartIndex.h"
#include <thrust/device_vector.h>
#include <thrust/functional.h>
// x x x x x
// x o o o x
// x o o o x
// x o o o x
// x x x x x
/*__host__ __device__
void EzUpdaterTM::operator() (const int indx) {
// m и n - индексы в полноценных массивах
// sizeY - размер полноценнго массива
int m = indx/(sizeY-2) + 1;
int n = indx%(sizeY-2) + 1;
float Cezh = S * 377.0;
Ez(m, n) = Ez(m, n) + Cezh * ((Hy(m, n) - Hy(m-1, n)) - (Hx(m, n) - Hx(m, n-1)));
}*/
__host__ __device__
void EzUpdaterTM::operator() (const int indx) {
// m и n - индексы в полноценных массивах
// sizeY - размер полноценнго массива
int m = indx/(sizeY);
int n = indx%(sizeY);
if (excluded(m, n) != 0) {
return;
}
float loss = sigma(m, n)/(2*epsilon(m, n));
float Cezh = S * 377.0 / epsilon(m,n) / (1+loss);
float Ceze = (1-loss)/(1+loss);
if ( (m>0 && m<sizeX-1) && (n>0 && n<sizeY-1) ) {
Ez(m, n) = Ceze*Ez(m, n) + Cezh * ((Hy(m, n) - Hy(m-1, n)) - (Hx(m, n) - Hx(m, n-1)));
}
}
|
806
|
#include "includes.h"
/*This file is part of quantumsim. (https://github.com/brianzi/quantumsim)*/
/*(c) 2016 Brian Tarasinski*/
/*Distributed under the GNU GPLv3. See LICENSE.txt or https://www.gnu.org/licenses/gpl.txt*/
//kernel to transform to pauli basis (up, x, y, down)
//to be run on a complete complex density matrix, once for each bit
//this operation is its own inverse (can also be used in opposite direction)
__global__ void swap(double *dm, unsigned int bit1, unsigned int bit2, unsigned int no_qubits) {
unsigned int addr = threadIdx.x + blockDim.x*blockIdx.x;
if (addr >= (1<<2*no_qubits)) return;
unsigned int bit1_mask = (0x3 << (2*bit1));
unsigned int bit2_mask = (0x3 << (2*bit2));
unsigned int addr2 = ( addr & ~(bit1_mask | bit2_mask)) |
((addr & bit1_mask) << (2*(bit2 - bit1))) |
((addr & bit2_mask) >> (2*(bit2 - bit1)));
double t;
if (addr > addr2) {
t = dm[addr2];
dm[addr2] = dm[addr];
dm[addr] = t;
}
}
|
807
|
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <cufft.h>
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#define TPBx 32 // TPBx * TPBy = number of threads per block
#define TPBy 32
__global__ void real2complex(cufftDoubleComplex *c, double *a, int n);
__global__ void complex2real_scaled(double *a, cufftDoubleComplex *c, double scale, int n);
__global__ void solve_poisson(cufftDoubleComplex *c, double *kx, double *ky, int n);
void core(double *error, double *maxError, const int n, const int range);
void exportData(const char *file, const double *X, const double *Y, const double *Z, const int m, const int n);
void gaussian(double *bin, const double *X, const double *Y, const int n);
void getError(double *error, double *maxError, const double *data, const double *result, const int n);
void getR2(double *result, const double *data, const double delta, const int n);
int main(){
///////////////////////////// INITIZALIZATION ////////////////////////////
int minN, maxN, dN, minR, maxR, dR;
char *errorFile = (char *)"error.dat";
char *maxErrorFile = (char *)"maxError.dat";
printf("Range of sample size: ");
scanf("%d:%d", &minN, &maxN);
printf("Range of sample range: ");
scanf("%d:%d", &minR, &maxR);
printf("Change of sample size: ");
scanf("%d", &dN);
printf("Change of sample range: ");
scanf("%d", &dR);
printf("Sample size: %d:%d; Range: %d:%d; dN: %d; dR: %d\n", minN, maxN, minR, maxR, dN, dR);
int numN = (maxN - minN) / dN + 1;
int numR = (maxR - minR) / dR + 1;
double *error = (double *)malloc(sizeof(double));
double *maxError = (double *)malloc(sizeof(double));
double *errorList = (double *)malloc(sizeof(double) * numN * numR);
double *maxErrorList = (double *)malloc(sizeof(double) * numN * numR);
double *NList = (double *)malloc(sizeof(double) * numN);
double *RList = (double *)malloc(sizeof(double) * numR);
int currentN = minN;
int currentR = minR;
int r = 0;
int n = 0;
int p = 0;
int pnew = 0;
printf("Start!\n");
printf("Progress: %d%%", p);
fflush(stdout);
while(r < numR){
while(n < numN){
NList[n] = currentN;
core(error, maxError, currentN, currentR);
errorList[n+r*numN] = *error;
maxErrorList[n+r*numN] = *maxError;
n += 1;
currentN += dN;
pnew = (int) (100 * (n + r * numN)) / (numN * numR);
if(pnew > p){
p = pnew;
if(p <= 10){
printf("\b\b%d%%", p);
fflush(stdout);
}else{
printf("\b\b\b%d%%", p);
fflush(stdout);
}
}
}
RList[r] = currentR;
r += 1;
currentR += dR;
n = 0;
currentN = minN;
}
printf("\nDone!\n");
printf("Exporting Data...");
exportData(errorFile, NList, RList, errorList, numN, numR);
exportData(maxErrorFile, NList, RList, maxErrorList, numN, numR);
printf("The data is %d x %d.\n", numN, numR);
free(error);
free(maxError);
return 0;
}
__global__ void real2complex(cufftDoubleComplex *c, double *a, int n){
/* compute idx and idy, the location of the element in the original NxN array */
int idxX = blockIdx.x * blockDim.x + threadIdx.x;
int idxY = blockIdx.y * blockDim.y + threadIdx.y;
if(idxX < n && idxY < n){
int idx = idxX + idxY * n;
c[idx].x = a[idx];
c[idx].y = 0.0;
}
}
__global__ void complex2real_scaled(double *a, cufftDoubleComplex *c, double scale, int n){
/* Compute index X and index Y, the location of the element in the original NxN array */
int idxX = blockIdx.x * blockDim.x + threadIdx.x;
int idxY = blockIdx.y * blockDim.y + threadIdx.y;
if(idxX < n && idxY < n){
int idx = idxX + idxY * n;
a[idx] = scale * c[idx].x;
}
}
__global__ void solve_poisson(cufftDoubleComplex *c, double *kx, double *ky, int n){
/* compute idxX and idxY, the location of the element in the original NxN array */
int idxX = blockIdx.x * blockDim.x + threadIdx.x;
int idxY = blockIdx.y * blockDim.y + threadIdx.y;
if (idxX < n && idxY < n){
int idx = idxX + idxY * n;
double scale = -(kx[idxX] * kx[idxX] + ky[idxY] * ky[idxY]);
if(idxX == 0 && idxY == 0){
scale = 1.0;
}
scale = 1.0 / scale;
c[idx].x *= scale;
c[idx].y *= scale;
}
}
void core(double *error, double *maxError, const int n, const int range){
double *X = (double *)malloc(sizeof(double) * n);
double *Y = (double *)malloc(sizeof(double) * n);
double *kx = (double *)malloc(sizeof(double) * n);
double *ky = (double *)malloc(sizeof(double) * n);
double *r = (double *)malloc(sizeof(double) * n * n);
double *r2 = (double *)malloc(sizeof(double) * n * n);
double *u = (double *)malloc(sizeof(double) * n * n);
const double EPSILON = 8.85418782 * pow(10, -12); // Permitivity of free space
const double PI = 4 * atan(1);
double *kx_d, *ky_d, *r_d;
cufftDoubleComplex *r_complex_d;
cudaMalloc((void **)&kx_d, sizeof(double) * n);
cudaMalloc((void **)&ky_d, sizeof(double) * n);
cudaMalloc((void **)&r_d, sizeof(double) * n * n);
cudaMalloc((void **)&r_complex_d, sizeof(cufftDoubleComplex) * n * n);
int m = 0;
double deltaX = (double)range / (n / 2);
double deltaK = 1.0 / (2 * range);
for(int i = n/-2; i < n/2; i++){
if(m < n){
X[m] = i * deltaX;
Y[m] = i * deltaX;
}
m += 1;
}
m = 0;
for(int i = 0; i < n/2; i++){
if(m < n/2){
kx[m] = i * deltaK;
kx[m+n/2] = (double)(i - n / 2) * deltaK;
ky[m] = i * deltaK;
ky[m+n/2] = (double)(i - n / 2) * deltaK;
}
m += 1;
}
gaussian(r, X, Y, n); // Generate a Gaussian Distribution for r
for (int i = 0; i < n * n; i++){
u[i] = 0.0;
}
cudaMemcpy(kx_d, kx, sizeof(double) * n, cudaMemcpyHostToDevice);
cudaMemcpy(ky_d, ky, sizeof(double) * n, cudaMemcpyHostToDevice);
cudaMemcpy(r_d, r, sizeof(double) * n * n, cudaMemcpyHostToDevice);
cufftHandle plan;
cufftPlan2d(&plan, n, n, CUFFT_Z2Z);
// Compute the execution configuration
dim3 dimBlock(TPBx, TPBy);
dim3 dimGrid(n / dimBlock.x, n / dimBlock.y);
// Handle N not multiple of TPBx or TPBy
if(n % TPBx != 0){
dimGrid.x += 1;
}
if(n % TPBy != 0){
dimGrid.y += 1;
}
const double PI2 = 4 * PI * PI;
double scale = 1.0 / (n * n * PI2);
real2complex<<<dimGrid, dimBlock>>>(r_complex_d, r_d, n);
cufftExecZ2Z(plan, r_complex_d, r_complex_d, CUFFT_FORWARD);
solve_poisson<<<dimGrid, dimBlock>>>(r_complex_d, kx_d, ky_d, n);
cufftExecZ2Z(plan, r_complex_d, r_complex_d, CUFFT_INVERSE);
complex2real_scaled<<<dimGrid, dimBlock>>>(r_d, r_complex_d, scale, n);
cudaMemcpy(u, r_d, sizeof(double) * n * n, cudaMemcpyDeviceToHost);
getR2(r2, u, deltaX, n);
getError(error, maxError, r, r2, n);
// Destroy plan and clean up memory on device
free(kx);
free(ky);
free(X);
free(Y);
free(r);
free(r2);
free(u);
cufftDestroy(plan);
cudaFree(r_d);
cudaFree(r_complex_d);
cudaFree(kx_d);
cudaFree(ky_d);
}
void exportData(const char *file, const double *X, const double *Y, const double *Z, const int m, const int n){
FILE *dataFile = fopen(file, "w");
if(dataFile != NULL){
for(int j = 0; j < n ; j++){
for(int i = 0; i < m; i++){
fprintf(dataFile, "%lf\t%lf\t%lf\n", X[i], Y[j], Z[i+j*m]);
}
}
printf("All data have been stored in \"%s\".\n", file);
fclose(dataFile);
}else{
printf("File not found!");
}
}
void gaussian(double *bin, const double *X, const double *Y, const int n){
const double PI = 4 * atan(1);
double x, y;
// Generate required function
double scale = 10.0 / sqrt(2 * PI);
for(int j = 0; j < n; j++){
for(int i = 0; i < n; i++){
x = X[i] * X[i];
y = Y[j] * Y[j];
bin[i+j*n] = scale * exp(-(x + y) / 2);
}
}
// Fix boundary
for(int i = 0; i < n; i++){
bin[i+(n-1)*n] = bin[i];
bin[(n-1)+i*n] = bin[i*n];
}
}
void getError(double *error, double *maxError, const double *data, const double *result, const int n){
double difference = 0.0;
double totalError = 0.0;
double mError = 0.0;
int count = 0;
for(int j = 0; j < n; j++){
for(int i = 0; i < n; i++){
if (abs(result[i+j*n])>0 && abs(data[i+j*n])>0){
difference = (double) abs(result[i+j*n] - data[i+j*n]);
totalError += difference;
if(difference > mError){
mError = difference;
}
count += 1;
}
}
}
*error = (double) totalError / count;
*maxError = mError;
}
void getR2(double *result, const double *data, const double delta, const int n){
const double iDelta2 = 1.0 / (delta * delta);
const double scale = iDelta2;
// Fix boundary to be zero
for(int i = 0; i < n; i++){
result[i+0*n] = 0.0;
result[i+(n-1)*n] = 0.0;
result[0+i*n] = 0.0;
result[(n-1)+i*n] = 0.0;
}
// Finite Difference
for(int j = 1; j < n - 1; j++){
for(int i = 1; i < n - 1; i++){
result[i+j*n] = scale * (data[(i-1)+j*n] + data[(i+1)+j*n]
+ data[i+(j-1)*n] + data[i+(j+1)*n]
- 4 * data[i+j*n]);
}
}
}
|
808
|
#include <stdio.h>
#include <cuda.h>
// setting array and block size
#define ARRAY_SIZE 1048576
#define BLOCK_SIZE 1024
// helper function for calculating upper ceil of division
int upper_ceil(int numerator, int denominator) {
if(numerator % denominator == 0){
return numerator/denominator;
}
return (numerator/denominator) + 1;
}
/**
* Kernel code to compute the vector reduction sum
*/
__global__ void vector_sum_reduction(float *device_arr, float *device_sum) {
// array in shared memory declaration
__shared__ float shared_data[BLOCK_SIZE];
// thread id
unsigned int thread_id = threadIdx.x;
// index of host array mapped to the thread
unsigned int index = blockDim.x * blockIdx.x + thread_id;
// initializing shared memory in the block corresponding to this thread
shared_data[thread_id] = device_arr[index];
// wait for the all threads to complete filling the array in shared memory
__syncthreads();
// setting offsets in multiple of 2
for(int offset = 1; offset < blockDim.x; offset *= 2) {
// finding the idx to be incremented
unsigned int idx = 2 * thread_id * offset;
// check boundary of idx
if(idx < blockDim.x) {
// incrementing the shared data at index idx by the shared data at an offset
/* shared data at idx and shared data at an offset hold the cumulative sum of fixed no of
elements to the right and values at these indices */
// refer diagram in the report
shared_data[idx] += shared_data[idx + offset];
// now shared_data[idx] contains the sum from element at idx to index of the rightmost element taken into account by shared_data at offset
}
// making sure all adds at one stage are done
__syncthreads();
}
// adding the block sums (present at the first index of array in shared data for each block) to the device sum
if(thread_id == 0) {
atomicAdd(device_sum, shared_data[0]);
}
}
// Main function
int main() {
// host variables
float *host_arr;
float *host_sum;
// device variables
float *device_arr;
float *device_sum;
// allocate space in host
host_arr = (float *) malloc(ARRAY_SIZE * sizeof(float));
host_sum = (float *) malloc(sizeof(float));
(*host_sum) = 0;
// initialize host array elements
for (int i = 0; i < ARRAY_SIZE; ++i) {
host_arr[i] = (float)2;
}
// allocate device memory with error handling
cudaError_t err = cudaMalloc((void **)&device_arr, ARRAY_SIZE * sizeof(float));; \
if(err != cudaSuccess) {
printf( "\nError: %s ", cudaGetErrorString(err));
return 0;
}
err = cudaMalloc((void **)&device_sum , sizeof(float));
if(err != cudaSuccess) {
printf( "\nError: %s ", cudaGetErrorString(err));
return 0;
}
// copy host memory data to device
cudaMemcpy(device_arr, host_arr, ARRAY_SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(device_sum, host_sum, sizeof(float), cudaMemcpyHostToDevice);
// initialize thread block and kernel grid dimensions
int blocks = upper_ceil(ARRAY_SIZE, BLOCK_SIZE);
// invoke CUDA kernel
vector_sum_reduction<<<blocks, BLOCK_SIZE>>>(device_arr, device_sum);
// copy results from device to host
cudaMemcpy(host_sum, device_sum, sizeof(float), cudaMemcpyDeviceToHost);
// print result
printf("Sum = %f\n", *host_sum);
// free device and host memory
cudaFree(device_arr);
cudaFree(device_sum);
free(host_arr);
free(host_sum);
return 0;
}
|
809
|
#include <stdio.h>
#include <time.h>
#include "md5LibCPU.cu"
#define WORD_SIZE 512
#define MD5_SIZE 32
#define UINT4 uint
int getNumberOfWords(FILE*);
int main(int argc, char ** argv){
// Random Seed
srand(time(NULL));
// Get file name where MD5 to be cracked is stored
FILE * inputFile = fopen(argv[1],"r");
// Get file name for wordlist
FILE * wordListFile = fopen(argv[2],"r");
// Get MD5 to be cracked
char md5ToCrack[MD5_SIZE+1];
fgets(md5ToCrack,MD5_SIZE+1,inputFile);
// Close input File
fclose(inputFile);
// Initialize word list size
int wordListSize = getNumberOfWords(wordListFile);
// Reset the file pointer to word list
rewind(wordListFile);
// Word found index
int wordFound = -1;
//Start Total Time
cudaEvent_t startTotal, stopTotal;
float elapsedTimeTotal;
cudaEventCreate(&startTotal);
cudaEventRecord(startTotal,0);
// Start Execution Time
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start, 0);
char currentWord[WORD_SIZE+1];
// Counter to see the number of iterations
int cont = 0;
// Repeat for every word in wordlist or until the right word is found
for (int i = 0; i < wordListSize; ++i){
// Var where MD5 will be stored
char hash[33];
cont++;
// Get word from wordlist File
fgets(currentWord,WORD_SIZE+1,wordListFile);
// Remove spaces or end of line chars from word
for (char *s = &(currentWord[0]); s < &(currentWord[WORD_SIZE+1]); ++s) {
if ('\r' == *s || '\n' == *s || '\0' == *s) {
*s = '\0'; break;
}
}
//Get MD5 for currentword and store in hash
MDString(currentWord,hash);
// Check if MD5's are equal
if (strcmp(hash,md5ToCrack) == 0){
// MD5 cracked
// Change flag to True and break cycle
wordFound = 1;
break;
}
}
// Stop Execution Time
cudaEventCreate(&stop);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime( &elapsedTime, start, stop);
printf("Time to crack MD5 : %f ms\n" ,elapsedTime);
// Check if word was found and print
if (wordFound == -1)
printf("Wasn't able to crack MD5\n");
else
printf("-------------MD5 Cracked!-------------\nWord: %s\n--------------------------------------\n",currentWord);
// Close word list file
fclose(wordListFile);
// Stop Total time
cudaEventCreate(&stopTotal);
cudaEventRecord(stopTotal,0);
cudaEventSynchronize(stopTotal);
cudaEventElapsedTime(&elapsedTimeTotal, startTotal,stopTotal);
printf("Total Time : %f ms\n" ,elapsedTimeTotal);
return 0;
}
// Return number of words from wordlist File
int getNumberOfWords(FILE * fp){
int lines = 1;
int ch = 0;
while(!feof(fp))
{
ch = fgetc(fp);
if(ch == '\n')
{
lines++;
}
}
return lines;
}
|
810
|
#include "includes.h"
__global__ void add(int n, float a, float *x, float *y){
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
|
811
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <iostream>
int __host__ file1_sq_func(int x)
{
cudaError_t err;
int nDevices = 0;
err = cudaGetDeviceCount(&nDevices);
if(err != cudaSuccess)
{
std::cout << "nDevices: " << nDevices << std::endl;
std::cout << "err: " << err << std::endl;
return 1;
}
std::cout << "this library uses cuda code" << std::endl;
std::cout << "you have " << nDevices << " devices that support cuda" << std::endl;
return x * x;
}
|
812
|
#include <cstdio>
#define cudaErrChk(ans) { cudaAssert((ans), __FILE__, __LINE__); }
inline void cudaAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"CUDA assert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int main(void) {
int num_devices=0;
cudaErrChk ( cudaGetDeviceCount (&num_devices) );
printf("\n=================================================\n");
printf("The number of device(s) : %d\n", num_devices);
printf("=================================================\n\n");
for (int i=0; i<num_devices; i++) {
cudaDeviceProp prop;
cudaErrChk ( cudaGetDeviceProperties (&prop, i) );
printf ("Device Number: %d\n", i);
printf (" Device name: %s\n", prop.name);
printf (" Device compute capability: %d.%d\n", prop.major, prop.minor);
printf (" Number of SM(s): %d\n", prop.multiProcessorCount);
printf (" Memory Clock Rate (GHz): %.2f\n",
((float)prop.memoryClockRate)/1.0e6);
printf (" Memory Bus Width (bits): %d\n",
prop.memoryBusWidth);
printf (" Peak Memory Bandwidth (GB/s): %f\n",
2.0*prop.memoryClockRate*(prop.memoryBusWidth/8)/1.0e6);
printf ("\n[Kernel size]\n");
printf (" Maximum size of a grid [%d, %d, %d]\n"
, prop.maxGridSize[0], prop.maxGridSize[0], prop.maxGridSize[0]);
printf (" Maximum size of a block [%d]\n"
, prop.maxThreadsPerBlock);
printf ("\n[Shared mem]\n");
printf (" Shared memory size per block :%dKB\n", (int)(prop.sharedMemPerBlock/1.0e3));
}
printf("\n=================================================\n\n");
return 0;
}
|
813
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <math.h>
#include<time.h>
#include<stdlib.h>
#define arrayLength 10000
float a[10240000],b[10240000];
float c[1048576];
cudaError_t addWithCuda(float *c, const float *a, const float *b, unsigned int size);
double randf()
{
return (double)(rand()/(double)RAND_MAX);
}
__global__ void calculWithGPU(float *c, const float *a, const float *b,int k)
{
int i = blockIdx.x;
int j = threadIdx.x;
c[i+j*1024]=c[i+j*1024]+(a[i*1024+k]-b[j*1024+k])*(a[i*1024+k]-b[j*1024+k]);
}
void calculWithCPU()
{
for(int i=0;i<1024;i++)
{
for(int j=0;j<1024;j++)
{
for(int k=0;k<arrayLength;k++)
{
c[i+j*1024]=c[i+j*1024]+(a[i*1024+k]-b[j*1024+k])*(a[i*1024+k]-b[j*1024+k]);
}
c[i+j*1024]=sqrtf(c[i+j*1024]);
}
}
}
int main()
{
cudaEvent_t start = 0;
cudaEvent_t stop = 0;
time_t t_start,t_end;
//*******************************
srand(time(NULL));
for(int i=0;i<1024;i++){
for(int j=0;j<arrayLength;j++){
a[i*arrayLength+j]=randf();
}
}
for(int i=0;i<1024;i++){
for(int j=0;j<arrayLength;j++){
b[i*arrayLength+j]=randf();
}
}
for(int i=0;i<1048576;i++)
{
c[i]=0;
}
//*********CPU*******************************
/*
t_start = time(NULL) ;
calculWithCPU();
t_end = time(NULL) ;
printf("CPU spends %ld s to finish the mission.Press ENTER to see the data\n",t_end-t_start );
getchar();
for(int i=0;i<1024*1024;i++)
{
printf("%f\n",c[i]);
}
getchar();
*/
//*************GPU****************************
t_start = time(NULL) ;
cudaError_t cudaStatus = addWithCuda(c, a, b, 10240000);
for(int i=0;i<1024*1024;i++)
{
c[i]=sqrtf(c[i]);
}
t_end = time(NULL) ;
printf("GPU spends %ld s to finish the mission.Press ENTER to see the data\n",t_end-t_start );
getchar();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
for(int i=0;i<1024*1024;i++)
{
printf("%f\n",c[i]);
//getchar();
}
getchar();
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(float *c, const float *a, const float *b, unsigned int size)
{
float *dev_a = 0;
float *dev_b = 0;
float *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output).
cudaStatus = cudaMalloc((void**)&dev_c, 1048576 * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(float));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_c, c, 1048576 * sizeof(float), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
for(int k=0;k<arrayLength;k++)
calculWithGPU<<<1024, 1024>>>(dev_c, dev_a, dev_b,k);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, 1048576 * sizeof(float), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
814
|
#include "includes.h"
/**
* Maestría en Ciencias - Mención Informática
* -------------------------------------------
* Escriba un programa CUDA que calcule C = n*A + B, en donde A, B, C son vectores
* y n una constante escalar.
*
* Adaptado de https://www.olcf.ornl.gov/tutorials/cuda-vector-addition/
*
* Presentado por:
* Zuñiga Rojas, Gabriela
* Soncco Pimentel, Braulio
*/
cudaEvent_t start, stop;
float elapsedTime;
const int k = 5;
// CUDA kernel. Each thread takes care of one element of c
__global__ void vecAdd(double *a, double *b, double *c, int n, int k)
{
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = k * a[id] + b[id];
}
|
815
|
#include <stdio.h>
#include <stdlib.h>
// Kernel to give a value
__global__ void value( int *a ){
*a = 1;
}
// Main program
int main(void){
int *a; // Host memory
int *a_dev; // Device memory
int size = sizeof(int); // size of integer
a = (int *) malloc(size); // Allocate host memory
cudaMalloc( (void**) &a_dev, size); // Allocate device memory
value <<<1,1>>> (a_dev); // Launch kernel on device
// Copy device result back to host
cudaMemcpy( a, a_dev, size, cudaMemcpyDeviceToHost );
printf("%d\n",*a); // Print result
cudaFree(a_dev); // Free device memory
free(a); // Free host memory
return 0;
}
|
816
|
#include <iostream>
#include <cstdio>
#include <chrono>
using namespace std;
__global__ void kernel(double *res, double *arr1, double *arr2, int n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x; // Абсолютный номер потока
int offset = blockDim.x * gridDim.x; // Общее кол-во потоков
for(int i = idx; i < n; i += offset) {
res[i] = arr1[i] * arr2[i];
}
}
int main() {
int n;
scanf("%d", &n);
double *res = (double *)malloc(sizeof(double) * n);
double *vec1 = (double *)malloc(sizeof(double) * n);
double *vec2 = (double *)malloc(sizeof(double) * n);
for(int i = 0; i < n; i++)
scanf("%lf", &vec1[i]);
for(int i = 0; i < n; i++)
scanf("%lf", &vec2[i]);
double *dev_res, *dev_vec1, *dev_vec2;
cudaMalloc(&dev_res, sizeof(double) * n);
cudaMemcpy(dev_res, res, sizeof(double) * n, cudaMemcpyHostToDevice);
cudaMalloc(&dev_vec1, sizeof(double) * n);
cudaMemcpy(dev_vec1, vec1, sizeof(double) * n, cudaMemcpyHostToDevice);
cudaMalloc(&dev_vec2, sizeof(double) * n);
cudaMemcpy(dev_vec2, vec2, sizeof(double) * n, cudaMemcpyHostToDevice);
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
kernel<<<256, 256>>>(dev_res, dev_vec1, dev_vec2, n);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
fprintf(stderr, "time = %f\n", time);
cudaEventDestroy(stop);
cudaEventDestroy(start);
cudaMemcpy(res, dev_res, sizeof(double) * n, cudaMemcpyDeviceToHost);
cudaFree(dev_res);
cudaFree(dev_vec1);
cudaFree(dev_vec2);
for(int i = 0; i < n; i++) {
printf("%f ", res[i]);
}
printf("\n");
free(res);
free(vec1);
free(vec2);
return 0;
}
|
817
|
// fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* typedef struct __attribute__ ((packed)) { */
/* float real; */
/* float imag; */
/* } Complex; */
extern "C" {
__global__ void findPeakKernel(const int nrBlocks, const int n, float* peak, float* peaks, int* indicesPeak, const float* input);
}
__global__ void findPeakKernel(const int nrBlocks, const int n, float* peak, float* peaks, int* indicesPeak, const float* input) {
const int bi = blockIdx.x;
const int wti = threadIdx.y;
const int tti = threadIdx.x;
const int nrThreads = 256;
const int stepSize = nrBlocks * nrThreads;
const int nrThreadsNrThreads = min(32, nrThreads);
__shared__ float reduceMem[256];
__shared__ int indexMem[256];
const int ti = wti * (1 * nrThreadsNrThreads) + tti;
if (ti < nrThreads) {
float max = -1.0;
int index = -1;
for (int i = bi * nrThreads + ti; i < n; i += stepSize) {
const float val = fabs(input[i * 2 + 0]);
if (val > max) {
max = val;
index = i;
}
}
reduceMem[ti] = max;
indexMem[ti] = index;
__syncthreads();
for (int i = nrThreads / 2; i > 0; i >>= 1) {
if (ti < i) {
const float v1 = reduceMem[ti];
const float v2 = reduceMem[ti + i];
if (v2 > v1) {
reduceMem[ti] = v2;
indexMem[ti] = indexMem[ti + i];
}
}
__syncthreads();
}
if (ti == 0) {
peaks[bi] = reduceMem[0];
indicesPeak[bi] = indexMem[0];
if (bi == 0) {
*peak = input[(n - 1) * 2 + 0];
}
}
}
}
|
818
|
#include "Datastructures.cuh"
#include <thrust/host_vector.h>
#include <vector>
#include <iterator>
#include <algorithm>
namespace DATASTRUCTURES {
thrust::host_vector<double6> hostVectorD6FromStdVector(std::vector<std::vector<double>> & orig){
thrust::host_vector<double6> out;
std::for_each(orig.begin(),orig.end(),
[&](std::vector<double> &particle){
double6 vec;
vec.x = particle[0];
vec.px = particle[1];
vec.y = particle[2];
vec.py = particle[3];
vec.t = particle[4];
vec.delta = particle[5];
out.push_back(vec);
});
return out;
}
}
|
819
|
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
__global__ void euler_step(float * array, int m, int step) {
float dt = powf(10,-3);
int tId = threadIdx.x + blockIdx.x * blockDim.x;
if (tId < m) {
array[tId] = array[tId] + dt*(4*(dt*step)-array[tId]+3+tId);
};
};
int main() {
cudaEvent_t start, stop;
int e_s = 1000;
int n_i = 1000;
int block_size = 256;
for(int m =0; m < 5;m++){
e_s = e_s*10;
float elapsed=0;
int grid_size = (int) ceil((float)e_s / block_size);
float * resultados = (float *) malloc(e_s * sizeof(float));
float * d_r;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int m = 0; m < e_s; m++){
resultados[m] = m;
}
cudaMalloc(&d_r, e_s * sizeof(float));
cudaMemcpy(d_r, resultados, e_s * sizeof(float), cudaMemcpyHostToDevice);
cudaEventRecord(start, 0);
for(int n = 0; n < n_i; n++){
euler_step<<<grid_size, block_size>>>(d_r,e_s,n);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaMemcpy(resultados, d_r, e_s * sizeof(float), cudaMemcpyDeviceToHost);
printf("Executed with %d equations\n", e_s);
printf("The elapsed time in gpu was %.2f ms \n", elapsed);
//printf("%f\n", resultados[0]);
free(resultados);
cudaFree(d_r);
}
return 0;
}
|
820
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void maxPerRow(double *values, int *gl_max, int n){
// Get our global thread ID
int id = blockIdx.x*blockDim.x+threadIdx.x;
int val = values[id];
//figure this out
if(id < n)
atomicMax(&gl_max[blockIdx.x], val);
}
int main( int argc, char* argv[] ){
// variable size of matrix < 1024
int col_size = 10, row_size = 1024;
//check that sizes are correct
if(col_size > 1025 || row_size > 1025){
printf("size not valid\n");
return 1;
}
//how many data points there are row*col
int data_size = col_size * row_size;
// Host input matrix
double *h_a;
//Host output matrix
int *h_c;
// Device input matrix
double *d_a;
//Device output matrix
int *d_c;
// Size, in bytes, of each matrix
size_t bytes_input = data_size*sizeof(double);
size_t bytes_output = col_size*sizeof(int);
// Allocate memory for each matrix on host
h_a = (double*)malloc(bytes_input);
h_c = (int*)malloc(bytes_output);
// Allocate memory for each matrix on GPU
cudaMalloc(&d_a, bytes_input);
cudaMalloc(&d_c, bytes_output);
int row_id;
// Initialize matrix on host
// Simple initialize for now
for( row_id = 0; row_id < data_size; row_id++ ) {
h_a[row_id] = 4;
}
// initialize output to zeroes
for(row_id = 0; row_id < col_size; row_id++){
h_c[row_id] = 0;
}
//some max values to test
h_a[4] = 19;
h_a[16] = 21;
h_a[98] = 49;
// Copy host matrices to device
cudaMemcpy( d_a, h_a, bytes_input, cudaMemcpyHostToDevice);
cudaMemcpy( d_c, h_c, bytes_output, cudaMemcpyHostToDevice);
// Initialize grid and block
int blockSize, gridSize;
// Number of threads in each thread block
blockSize = 1024;
// Number of thread blocks in grid 1024 height*width/blockSize
gridSize = (int)ceil(data_size/(float)blockSize);
// Execute the kernel add each thread for one output
maxPerRow<<<gridSize, col_size>>>(d_a, d_c, data_size);
// Copy result back to host
cudaMemcpy( h_c, d_c, bytes_output, cudaMemcpyDeviceToHost );
// print out data
for(row_id = 0; row_id < col_size; row_id++){
printf("%d ", (int)h_c[row_id]);
}
printf("\n done\n");
// Release device memory
cudaFree(d_a);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_c);
return 0;
}
|
821
|
//TEST(smoke):COMPILE: -pass-through nvrtc -target ptx -entry hello tests/cuda/cuda-compile.cu
__global__
void hello(char *a, int *b)
{
a[threadIdx.x] += b[threadIdx.x];
}
|
822
|
#include <stdio.h>
#include <stdlib.h>
#include <inttypes.h>
// Define constants and data types
#define PAGE_SIZE 32
#define PHYSICAL_MEM_SIZE 32768
#define STORAGE_SIZE 131072
#define DATAFILE "./data.bin"
#define OUTPUTFILE "./snapshot.bin"
typedef unsigned char uchar;
typedef uint32_t u32;
const uint32_t VALID = 0 | 1;
const uint32_t INVALID = 0;
const uint32_t PAGENUMBERMASK = 0x00003FFE;
const uint32_t LASTTIMEMASK = 0xFFFFC000;
const uint32_t DNE = 0xFFFFFFFF;
// Declare variables
__device__ __managed__ int PAGE_ENTRIES = 0;
__device__ __managed__ int PAGEFAULT = 0;
__device__ __managed__ int CURRENTTIME = 0;
__device__ __managed__ uchar storage[STORAGE_SIZE];
__device__ __managed__ uchar results[STORAGE_SIZE];
__device__ __managed__ uchar input[STORAGE_SIZE];
extern __shared__ u32 pageTable[];
// Function
// ******************************************************************
// Initialize
__device__ void initPageTable(int entries) {
for (int i = 0; i < entries; i++) {
pageTable[i] = INVALID;
}
}
// ******************************************************************
// ******************************************************************
// File I/O
int loadBinaryFile(char *fileName, uchar *input, int storageSize) {
FILE *fptr = fopen(fileName, "rb");
// Get size
fseek(fptr, 0, SEEK_END);
int size = ftell(fptr);
rewind(fptr);
// Read data from input file
fread(input, sizeof(unsigned char), size, fptr);
if (storageSize < size) {
printf("ERROR: Storage size is too small to store input data!\n");
}
fclose(fptr);
return size;
}
void writeBinaryFile(char *fileName, uchar *input, int storageSize) {
FILE *fptr = fopen(fileName, "wb");
// Read data from input file
fwrite(input, sizeof(unsigned char), storageSize, fptr);
fclose(fptr);
}
// ******************************************************************
// ******************************************************************
// Read/Write
__device__ u32 isValid(u32 PTE) {
return PTE & VALID;
}
__device__ u32 getPageNumber(u32 PTE) {
return (PTE & PAGENUMBERMASK) >> 1;
}
__device__ u32 getLastUsedTime(u32 PTE) {
return (PTE & LASTTIMEMASK) >> 14;
}
__device__ u32 makePTE(u32 time, u32 pageNumber, u32 validbit) {
return (time << 14) | (pageNumber << 1) | validbit;
}
__device__ u32 paging(uchar *memory, u32 pageNumber, u32 pageOffset) {
// ******************************************************************** //
// How I store infomation in a PTE: //
// |------------------|-------------|-| //
// |332222222222111111|1111-8-6-4-2-|0| //
// |109876543210987654|32109-7-5-3-1|-| //
// |------------------|-------------|-| //
// | Last used time | Page Number | | <-- last one bit is valid bit //
// |------------------|-------------|-| //
// ******************************************************************** //
CURRENTTIME++;
// Find if the target page exists
for (u32 i = 0; i < PAGE_ENTRIES; i++) {
if (isValid(pageTable[i]) && pageNumber == getPageNumber(pageTable[i])) {
// Update time
pageTable[i] = makePTE(CURRENTTIME, pageNumber, VALID);
return i * PAGE_SIZE + pageOffset;
}
}
// Find if there is a empty entry to place
for (u32 i = 0; i < PAGE_ENTRIES; i++) {
if (isValid(pageTable[i]) == 0) {
// Because of a empty hole, it must be a pagefault
PAGEFAULT++;
// Update PTE
pageTable[i] = makePTE(CURRENTTIME, pageNumber, VALID);
return i * PAGE_SIZE + pageOffset;
}
}
// Find a place for swaping in by the RULE of LRU
u32 leastEntry = DNE;
u32 leastTime = DNE;
for (u32 i = 0; i < PAGE_ENTRIES; i++) {
if (leastTime > getLastUsedTime(pageTable[i])) {
leastTime = getLastUsedTime(pageTable[i]);
leastEntry = i;
}
}
// Replace & update infos
PAGEFAULT++;
for (u32 j = 0;
j < PAGE_SIZE;
j++) {
u32 memoryAddress = leastEntry * PAGE_SIZE + j;
u32 storageAddress = pageNumber * PAGE_SIZE + j;
u32 toStorageAddress = getPageNumber(pageTable[leastEntry]) * PAGE_SIZE + j;
storage[toStorageAddress] = memory[memoryAddress];
memory[memoryAddress] = storage[storageAddress];
}
pageTable[leastEntry] = makePTE(CURRENTTIME, pageNumber, VALID);
return leastEntry * PAGE_SIZE + pageOffset;
}
__device__ uchar Gread(uchar *memory, u32 address) {
u32 pageNumber = address/PAGE_SIZE;
u32 pageOffset = address%PAGE_SIZE;
u32 reMappingAddress = paging(memory, pageNumber, pageOffset);
return memory[reMappingAddress];
}
__device__ void Gwrite(uchar *memory, u32 address, uchar writeValue) {
u32 pageNumber = address/PAGE_SIZE;
u32 pageOffset = address%PAGE_SIZE;
u32 reMappingAddress = paging(memory, pageNumber, pageOffset);
memory[reMappingAddress] = writeValue;
}
__device__ void snapshot(uchar *result, uchar *memory, int offset, int input_size) {
for (int i = 0; i < input_size; i++) {
result[i] = Gread(memory, i+offset);
}
}
// ******************************************************************
// ******************************************************************
// Kernel function
__global__ void mykernel(int input_size) {
__shared__ uchar data[PHYSICAL_MEM_SIZE];
PAGE_ENTRIES = PHYSICAL_MEM_SIZE/PAGE_SIZE;
initPageTable(PAGE_ENTRIES);
//##Gwrite / Gread code section start###
for(int i = 0; i < input_size; i++) {
Gwrite(data, i, input[i]);
}
for(int i = input_size - 1; i >= input_size - 10; i--) {
int value = Gread(data, i);
}
//the last line of Gwrite/Gread code section should be snapshot ()
snapshot(results, data, 0, input_size);
//###Gwrite/Gread code section end###
printf("pagefault times = %d\n", PAGEFAULT);
}
// ******************************************************************
int main() {
int input_size = loadBinaryFile(DATAFILE, input, STORAGE_SIZE);
cudaSetDevice(2);
mykernel<<<1, 1, 16384>>>(input_size);
cudaDeviceSynchronize();
cudaDeviceReset();
writeBinaryFile(OUTPUTFILE, results, input_size);
return 0;
}
|
823
|
#include "includes.h"
__device__ unsigned int concatenate(float* array)
{
unsigned int rvalue=0;
unsigned int sign;
for (int i = 0; i < 32; i++)
{
sign = (array[i]>=0);
rvalue = rvalue | (sign<<i);
}
return rvalue;
}
__global__ void concatenate_cols_kernel(float *a, unsigned int *b, int m, int n)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if(j<n){
float * array = new float[32];
for(int i=0; i<m; i+=32){
for(int k=0; k<32;k++) array[k] = a[j + n*(i+k)];
b[j+n*i/32]=concatenate(array);
}
delete[] array;
}
}
|
824
|
#include<iostream>
#include<chrono>
#include<cuda.h>
#include<cmath>
#include<cuda_runtime.h>
#define N 1024
using namespace std;
using namespace std::chrono;
static const int wholeArraySize = 10000000;
static const int blockSize = 1024;
static const int gridSize = 24; //this number is hardware-dependent; usually #SM*2 is a good number.
__global__ void sumCommMultiBlock(const int *gArr, int arraySize, int *gOut) {
int thIdx = threadIdx.x;
int gthIdx = thIdx + blockIdx.x*blockSize;
const int gridSize = blockSize*gridDim.x;
int sum = 0;
for (int i = gthIdx; i < arraySize; i += gridSize)
sum += gArr[i];
__shared__ int shArr[blockSize];
shArr[thIdx] = sum;
__syncthreads();
for (int size = blockSize/2; size>0; size/=2) { //uniform
if (thIdx<size)
shArr[thIdx] += shArr[thIdx+size];
__syncthreads();
}
if (thIdx == 0)
gOut[blockIdx.x] = shArr[0];
}
__global__ void numerator(const int *gArr, int arraySize, int *gOut, float mean) {
int thIdx = threadIdx.x;
int gthIdx = thIdx + blockIdx.x*blockSize;
const int gridSize = blockSize*gridDim.x;
float sum = 0;
for (int i = gthIdx; i < arraySize; i += gridSize)
sum += (float(gArr[i]) - mean)*(float(gArr[i]) - mean);
__shared__ float shArr[blockSize];
shArr[thIdx] = sum;
__syncthreads();
for (int size = blockSize/2; size>0; size/=2) { //uniform
if (thIdx<size)
shArr[thIdx] += shArr[thIdx+size];
__syncthreads();
}
if (thIdx == 0)
gOut[blockIdx.x] = shArr[0];
}
float stddev(int *arr, int n)
{
float sum = 0.0f;
for(int i = 0 ; i < n ; i++)
{
sum+=arr[i];
}
float mean = sum / n;
sum = 0;
for(int i = 0 ; i < n ; i++)
{
sum+=(arr[i] - mean)*(arr[i] - mean);
}
float res = sum/n;
return float(sqrt(res));
}
int main() {
int *arr = new int[wholeArraySize];
for(int i = 0; i < wholeArraySize ; i++)
{
arr[i] = (i+1)%10;
}
int* dev_arr;
cudaMalloc((void**)&dev_arr, wholeArraySize * sizeof(int));
cudaMemcpy(dev_arr, arr, wholeArraySize * sizeof(int), cudaMemcpyHostToDevice);
int out;
int* dev_out;
cudaMalloc((void**)&dev_out, sizeof(int)*gridSize);
auto start1 = high_resolution_clock::now();
sumCommMultiBlock<<<gridSize, blockSize>>>(dev_arr, wholeArraySize, dev_out);
//dev_out now holds the partial result
sumCommMultiBlock<<<1, blockSize>>>(dev_out, gridSize, dev_out);
auto stop1 = high_resolution_clock::now();
auto dur1 = duration_cast<microseconds>(stop1 - start1).count();
//dev_out[0] now holds the final result
cudaDeviceSynchronize();
cudaMemcpy(&out, dev_out, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_arr);
cudaFree(dev_out);
cout<<"Sum is : "<<out;
cudaMalloc((void**)&dev_arr, wholeArraySize * sizeof(int));
cudaMemcpy(dev_arr, arr, wholeArraySize * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void**)&dev_out, sizeof(int)*gridSize);
int sum = out;
float mean = float(sum)/wholeArraySize;
auto start2 = high_resolution_clock::now();
numerator<<<gridSize, blockSize>>>(dev_arr, wholeArraySize, dev_out, mean);
sumCommMultiBlock<<<1, blockSize>>>(dev_out, gridSize, dev_out);
auto stop2 = high_resolution_clock::now();
auto dur2 = duration_cast<microseconds>(stop2 - start2).count();
cout<<"\nPARALLEL TIME : "<<dur1 + dur2<<endl;
cudaDeviceSynchronize();
cudaMemcpy(&out, dev_out, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_arr);
cudaFree(dev_out);
cout<<"numerator is : "<<out;
float num = out;
float term = num / wholeArraySize;
float sol = sqrt(term);
cout<<"\nSTD DEV is : "<<sol<<endl;
start1 = high_resolution_clock::now();
float sol2 = stddev(arr, wholeArraySize);
stop1 = high_resolution_clock::now();
dur1 = duration_cast<microseconds>(stop1 - start1).count();
cout<<"SERIAL TIME is : "<<dur1<<endl;
cout<<"\nSTD DEV is (SERIAL) : "<<sol2;
}
|
825
|
#include "includes.h"
__global__ void calcPrimes(int *d_IL, int *d_PL, int numOfPrimes, int lenInputList) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
if(index < numOfPrimes) {
for(int i = d_PL[numOfPrimes-1]+1; i < lenInputList; i++) {
if(i % d_PL[index] == 0) {
d_IL[i] = 0;
}
}
}
}
|
826
|
#include <stdlib.h>
#include <stdio.h>
__global__ void kernel( void ) { }
int main()
{
kernel<<< 1, 1 >>>();
printf("Hello World!\n");
return EXIT_SUCCESS;
}
|
827
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
typedef unsigned long long bignum;
//return 1 if it is a prime else return 0
//save as main.cu
// CUDA kernel. Each thread takes care of one element of c
__host__ __device__ bignum checkIfValIsPrime(bignum number)
{
if(number ==1) return (bignum) 0;
if (number == 2) return (bignum) 0;
if (number % 2 == 0) return (bignum) 0;
for (long divisor = 3; divisor < (number / 2); divisor += 2)
{
if (number % divisor == 0)
{
return (bignum) 0;
}
}
return (bignum) 1;
}
__global__ void isPrime(double *a, bignum length)
{
int id = blockIdx.x*blockDim.x+threadIdx.x;
if(id<length){
a[id] = checkIfValIsPrime((bignum) id);
}
}
int main( int argc, char* argv[] )
{
if(argc < 2)
{
printf("Usage: prime upbound\n");
exit(-1);
}
bignum N = (bignum) atoi(argv[1]);
bignum blockSize = (bignum) atoi(argv[2]);
if(N <= 0)
{
printf("Usage: prime upbound, you input invalid upbound number!\n");
exit(-1);
}
// Host input
double *h_a;
// Host output
double *h_c;
// Device input
double *d_a;
// Device output
double *d_c;
// Size, in bytes, of each vector
size_t bytes = N*sizeof(double);
// Allocate memory for vector on host
h_a = (double*)malloc(bytes);
h_c = (double*)malloc(bytes);
// Allocate memory for each vector on GPU
cudaMalloc(&d_a, bytes);
cudaMalloc(&d_c, bytes);
printf("Made it past the allocation of memory\n");
int i;
// Initialize array with 0's to show that it is empty
printf("Initialize array with 0's\n");
for( i = 0; i < N; i++ ) {
h_a[i] = 0;
}
// Copy host vectors to device
cudaMemcpy( d_a, h_a, bytes, cudaMemcpyHostToDevice);
//Number of threads blocks in grid.
int gridSize = (int)ceil((float)(N+1)/2/blockSize);
// Execute the kernel
isPrime<<<gridSize, blockSize>>>(d_a, N);
// Copy array back to host
cudaMemcpy( h_a, d_a, bytes, cudaMemcpyDeviceToHost );
// Sum up vector c and print result divided by n, this should equal 1 without error
double sum = 0;
printf("In the for block adding up sum\n");
for(i=0; i<N; i++){
sum += h_a[i];
printf("In position %d ", i);
printf("We have %f\n", h_a[i]);
}
printf("Final result: %f\n", sum);
// Release device memory
cudaFree(d_a);
cudaFree(d_c);
// Release host memory
free(h_a);
free(h_c);
return 0;
}
|
828
|
#include <iostream>
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %d %s %s %d\n", code, cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__global__ void vecAddKernel(float *A, float *B, float *C, int n) {
size_t index = blockIdx.x * blockDim.x + threadIdx.x;
size_t stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
C[i] = A[i] + B[i];
}
void vecAdd(float *h_arrayA, float *h_arrayB, float *h_arrayC, int n) {
vecAddKernel<<<65535, 1024>>>(h_arrayA, h_arrayB, h_arrayC, n);
cudaDeviceSynchronize();
}
int main() {
unsigned long long size = 1 << 28;
cout << size << endl;
float *arrayA;
float *arrayB;
float *arrayC;
gpuErrchk(cudaMallocManaged(&arrayA, size * sizeof(float)));
gpuErrchk(cudaMallocManaged(&arrayB, size * sizeof(float)));
gpuErrchk(cudaMallocManaged(&arrayC, size * sizeof(float)));
for (int i = 0; i < size; i++) {
arrayA[i] = 1.0f;
arrayB[i] = 2.0f;
}
vecAdd(arrayA, arrayB, arrayC, size);
cout << arrayC[0] << ' '<<arrayC[size - 1 ] << endl;
cudaFree(arrayA);
cudaFree(arrayB);
cudaFree(arrayC);
}
|
829
|
/*
============================================================================
Name : review_chp3_2.cu
Author : freshield
Version :
Copyright : Your copyright notice
Description : CUDA compute reciprocals
============================================================================
*/
#include <stdio.h>
__global__ void add(int a, int b, int *c){
*c = a + b;
}
int main(void){
int c;
int *dev_c;//define dev_c as a pointer
int *gpu_pointer;
gpu_pointer = (int *)cudaMalloc( (void**)&dev_c, sizeof(int));//get dev_c address
//change dev_c address to void** type
//allocate a address on GPU, and give the address number to dev_c
//
//all in all: dev_c is a pointer, get the dev_c address and change
//to void** type
printf("Memory Allocated at: %x\n",dev_c);
add<<<1,1>>>(2, 7, dev_c);//use function and back value to
//the place where dev_c point
cudaMemcpy( &c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
//get c address, allocate the value which dev_c store and copy to c
printf( "2 + 7 = %d\n", c);
cudaFree(dev_c);
return 0;
}
|
830
|
#include <stdio.h>
#define BLOCKDIM 1024
#define RSIZE 1024
//------------------------------------------------------------------------------------------------------------------------------------------------
//try thread coarsening
__global__ void Action_noImage_center_GPU(double *D_,double *maskCenter,double *SolventMols_,double maxD, int Nmols , int NAtoms, int active_size)
{
__shared__ double dist_array[BLOCKDIM];
int mol = (blockIdx.x * active_size + threadIdx.x)/NAtoms;
int atom = (blockIdx.x * active_size + threadIdx.x) - (mol * NAtoms);
//int mol_in_block = threadIdx.x/NAtoms;
//advantage of register
double a0 = maskCenter[0];
double a1 = maskCenter[1];
double a2 = maskCenter[2];
if ( threadIdx.x < active_size && mol*NAtoms + atom < Nmols*NAtoms )
{
// if(atom == 0 )
// D_[mol] = maxD;
//__syncthreads();
int sIndex = mol*NAtoms*3 + atom*3;
double x = a0 - SolventMols_[sIndex + 0];
double y = a1 - SolventMols_[sIndex + 1];
double z = a2 - SolventMols_[sIndex + 2];
//Dist = x*x + y*y + z*z;
dist_array[threadIdx.x] = x*x + y*y + z*z;
//printf(" dist = %f\n", Dist);
__syncthreads();
//first thread
//naive approach to a reduction algorithm
//this works if NAtoms is small other wise you need split
//and do some of log(n) parallel reduction
int i;
double min_val = maxD;
if( atom ==0 )
{
for(i = 0 ; i < NAtoms ; i++ ){
//sIndex = mol*NAtoms*3 + i*3;
//if (dist_array[threadIdx.x + i] < min_val)
// min_val = dist_array[threadIdx.x + i] ;
min_val = min(min_val, dist_array[threadIdx.x + i]);
}
D_[mol] = min_val;
}
//if(tx == 0 && bx == 0 )
// printf("end of kernel");
}
}
// int i;
// double min_val = maxD;
// if( atom ==0 )
// {
// for(i = 0 ; i < NAtoms ; i++ ){
// //sIndex = mol*NAtoms*3 + i*3;
// if (dist_array[threadIdx.x + i] < min_val)
// min_val = dist_array[threadIdx.x + i] ;
// }
// D_[mol] = min_val;
// }
// double min_val = maxD;
// if( threadIdx.x < active_size/NAtoms )
// {
// for(i = threadIdx.x*NAtoms ; i <threadIdx.x*NAtoms + NAtoms ; i++ ){
// //sIndex = mol*NAtoms*3 + i*3;
// if (dist_array[i] < min_val)
// min_val = dist_array[i] ;
// }
// D_[blockIdx.x * active_size/NAtoms + threadIdx.x ] = min_val;
// }
//------------------------------------------------------------------------------------------------------------------------------------------------
__global__ void Action_noImage_no_center_GPU(double *D_,double *SolventMols_,double *Solute_atoms ,double maxD, int Nmols , int NAtoms,int NSAtoms , int active_size)
{
__shared__ double dist_array[BLOCKDIM];
__shared__ double sAtom_shared[RSIZE];
int mol = (blockIdx.x * active_size + threadIdx.x)/NAtoms;
int atom = (blockIdx.x * active_size + threadIdx.x) - (mol * NAtoms);
//int mol_in_block = threadIdx.x/NAtoms;
//handling the chunks for solute_atoms
int chunksize,start,end, NChunks,i,j;
if(NSAtoms*3 > RSIZE)
{
chunksize = (RSIZE/3)*3;
NChunks = ceil(double(NSAtoms*3)/chunksize);
start = 0;
end = chunksize;
}
else
{
chunksize = NSAtoms*3;
NChunks = 1;
start = 0;
end = NSAtoms*3;
}
// if(threadIdx.x == 0 && blockIdx.x == 0 )
// printf("chunkszize = %d ; Nchunk = %d; start = %d; end = %d\n ",
// chunksize,NChunks,start,end);
if ( threadIdx.x < active_size && mol*NAtoms + atom < Nmols*NAtoms )
{
// if(atom == 0 )
// D_[mol] = maxD;
//__syncthreads();
double min_val = maxD;
double dist;
int sIndex = mol*NAtoms*3 + atom*3;
double a0 = SolventMols_[sIndex + 0];
double a1 = SolventMols_[sIndex + 1];
double a2 = SolventMols_[sIndex + 2];
for(i = 0 ; i < NChunks ; i++)
{
//copying to shared
//if (threadIdx.x < (end - start))
// sAtom_shared[threadIdx.x] = Solute_atoms[start + threadIdx.x];
//__syncthreads();
//TODO - add skew per thread
for (j = start ; j < end; j+=3 )
{
//int offset = start + (j + threadIdx.x)%(end - start);
double x = Solute_atoms[j + 0] - a0;
double y = Solute_atoms[j + 1] - a1;
double z = Solute_atoms[j + 2] - a2;
dist = x*x + y*y + z*z;
//if (mol == 11)
// printf("min = %f\n",min_val);
min_val = min(min_val,dist);
}
start = end;
end = min(end + chunksize, NSAtoms*3);
}
dist_array[threadIdx.x] = min_val;
//if (threadIdx.x == 0)
// printf("min_val = %f\n",min_val);
//printf(" dist = %f\n", Dist);
__syncthreads();
//first thread
//naive approach to a reduction algorithm
//this works if NAtoms is small other wise you need split
//and do some of log(n) parallel reduction
//min_val = maxD;
if( atom ==0 )
{
for(i = 0 ; i < NAtoms ; i++ ){
//sIndex = mol*NAtoms*3 + i*3;
//if (dist_array[threadIdx.x + i] < min_val)
// min_val = dist_array[threadIdx.x + i] ;
min_val = min(min_val, dist_array[threadIdx.x + i]);
}
D_[mol] = min_val;
}
//if(tx == 0 && bx == 0 )
// printf("end of kernel");
}
}
|
831
|
#include "losses.cuh"
#define BLOCK_SIZE 1024
#define DIVIDE(A,B) ((A+B-1)/B)
#define BLOCKS(N) DIVIDE(N,BLOCK_SIZE)
__global__ void gpu_deriv_mse(float* yt, float* yp, const unsigned int size, float* ans)
{
int i = threadIdx.x + blockDim.x * blockIdx.x;
if (i < size)
ans[i] = yp[i] - yt[i];
}
float losses::func_mse(const Matrix& y_true, const Matrix& y_pred)
{
if (y_true.is_gpu() != y_pred.is_gpu())
yeet "One of the two matrices is GPU enabled, and the other isn't";
if (y_true.get_dim1() != y_pred.get_dim1() or y_true.get_dim2() != y_pred.get_dim2())
yeet "The dimensions does not match";
// there isn't much of a point doing it on the GPU rn, so we'll do it on the CPU
float loss = 0.0f, *yt, *yp;
if (y_true.is_gpu())
{
yt = y_true.get_cpu().get_matrix();
yp = y_pred.get_cpu().get_matrix();
}
else
{
yt = y_true.get_matrix();
yp = y_pred.get_matrix();
}
for (int i = 0; i < y_true.get_dim1()*y_true.get_dim2(); ++i)
loss += (yt[i] - yp[i]) * (yt[i] - yp[i]);
return loss * 0.5f;
}
Matrix losses::deriv_mse(const Matrix& y_true, const Matrix& y_pred)
{
if (y_true.is_gpu() != y_pred.is_gpu())
yeet "One of the two matrices is GPU enabled, and the other isn't";
if (y_true.get_dim1() != y_pred.get_dim1() or y_true.get_dim2() != y_pred.get_dim2())
yeet "The dimensions does not match";
if (y_true.is_gpu())
{
Matrix ans(y_true.get_dim1(), y_true.get_dim2(), 0.0f);
gpu_deriv_mse <<<BLOCKS(y_true.get_dim1()*y_true.get_dim2()), BLOCK_SIZE>>>(y_true.get_matrix(), y_pred.get_matrix(), y_true.get_dim2()*y_true.get_dim1(), ans.get_matrix());
return ans;
}
else
{
Matrix ans(y_true.get_dim1(), y_true.get_dim2(), 0.0f, false);
float *yt = y_true.get_matrix(), *yp = y_pred.get_matrix(), *a = ans.get_matrix();
for (unsigned int i = 0; i < y_true.get_dim1()*y_true.get_dim2(); ++i)
a[i] = yp[i] - yt[i];
return ans;
}
}
|
832
|
#include<cstdio>
struct RGB {
unsigned char r, g, b, a;
};
extern "C" {
__global__
void BlackWhite(RGB *image, int width, int height) {
int thidx = blockIdx.x*blockDim.x + threadIdx.x;
if (thidx >= width*height) {
return;
}
unsigned char tmp = 0.299*image[thidx].r +
0.587*image[thidx].g + 0.114*image[thidx].b;
image[thidx].r = image[thidx].g = image[thidx].b = tmp;
}
__global__
void Negative(RGB *oldImage, RGB *newImage, int width, int height)
{
int thidx = blockIdx.x*blockDim.x + threadIdx.x;
if(thidx >= width*height) {
return;
}
RGB tmp = oldImage[thidx];
tmp.r = (unsigned char) 255 - tmp.r;
tmp.g = (unsigned char) 255 - tmp.g;
tmp.b = (unsigned char) 255 - tmp.b;
newImage[thidx] = tmp;
}
__global__
void Normalization(RGB *image, int width, int height) {
const int thidx = blockIdx.x*blockDim.x + threadIdx.x;
if (thidx >= width*height) {
return;
}
int tmp = image[thidx].r + image[thidx].g + image[thidx].b;
image[thidx].r = (unsigned char) (image[thidx].r*255.0 / tmp);
image[thidx].g = (unsigned char) (image[thidx].g*255.0 / tmp);
image[thidx].b = (unsigned char) (image[thidx].b*255.0 / tmp);
}
}
|
833
|
#include "includes.h"
__global__ void copyPixelsInSlicesRGB(float *ptrinput0, float *ptrkslices0, int dH, int dW, int kH, int kW, int size1, int size2, int isize1, int isize2, int nInputPlane, int padleft, int padright, int padup, int paddown, int inputstr0, int kslicesstr0, int batchsize)
{
// each block does one pixel of the input image
// each kernel slice is represented by its upper-left coordinates
const int pixi=blockIdx.x;
const int pixj=blockIdx.y*blockDim.y + threadIdx.y;
const int tidx=threadIdx.x;
const int batchindex=blockIdx.z*blockDim.z+threadIdx.z;
int i,j;
int imin, jmin, imax, jmax;
int inputoffset, ksliceoffset;
// step 1 : find which kernel slices contain the values of the pixel
__shared__ int _imin, _jmin[32], _imax, _jmax[32], _inputoffset[32][3], _ksliceoffset[32][3];
if(threadIdx.z==0)
{
imin=(pixi - (kH - 1) + (dH -1))/dH > 0 ? (pixi - (kH - 1) + (dH -1))/dH : 0 ;
jmin=(pixj - (kW - 1) + (dW -1))/dW > 0 ? (pixj - (kW - 1) + (dW -1))/dW : 0 ;
imax= pixi / dH < size1 ? pixi / dH : size1 - 1 ;
jmax= pixj / dW < size2 ? pixj / dW : size2 - 1 ;
if(threadIdx.x==0 && threadIdx.y==0)
{
_imin=imin;
_imax=imax;
}
if(threadIdx.x==0)
{
_jmin[threadIdx.y]=jmin;
_jmax[threadIdx.y]=jmax;
}
inputoffset = inputstr0*blockIdx.z*blockDim.z + ((pixi-padup) * isize2 + (pixj-padleft)) * nInputPlane ;
ksliceoffset= kslicesstr0*blockIdx.z*blockDim.z + ((imin * size2 + jmin) * kH * kW + (pixi - imin * dH) * kW + (pixj - jmin*dW) ) * nInputPlane;
_inputoffset[threadIdx.y][threadIdx.x]=inputoffset;
_ksliceoffset[threadIdx.y][threadIdx.x]=ksliceoffset;
}
__syncthreads();
if(batchindex >= batchsize) return;
if(pixj > isize2 + padleft + padright -1) return;
if(threadIdx.z>0)
{
imin=_imin;
imax=_imax;
jmin=_jmin[threadIdx.y];
jmax=_jmax[threadIdx.y];
inputoffset=_inputoffset[threadIdx.y][threadIdx.x];
ksliceoffset=_ksliceoffset[threadIdx.y][threadIdx.x];
}
// step 2 : move the pointers
// this one goes to where the pixel is at
ptrinput0 += inputoffset+inputstr0*threadIdx.z ;
ptrkslices0 += ksliceoffset+kslicesstr0*threadIdx.z ;
const int stridej = (kH*kW - dW) * nInputPlane;
const int stridei = (size2*kH-dH) * kW *nInputPlane - (jmax-jmin+1) * stridej ;
bool zeropad = pixi<padup || pixi>isize1-1+padup || pixj<padleft || pixj>isize2-1+padleft ;
// read pixel
// load the stuff first...
//for (b=0; b<batchsize; b++)
//{
float * ptrinput = ptrinput0;
float * ptrkslices = ptrkslices0;
float pixvalue;
if (zeropad) {
pixvalue=0;
}
else {
pixvalue=ptrinput[tidx];
}
// write to memory
for(i=imin; i<imax+1; i++) {
for(j=jmin; j<jmax+1; j++) {
if(zeropad)
{
ptrkslices[tidx]=0;
}
else {
ptrkslices[tidx]=pixvalue;
}
ptrkslices += stridej;
}
ptrkslices += stridei;
}
//}
}
|
834
|
#define COALESCED_NUM 16
#define blockDimX 256
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 16
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 16
#define WIDTH_A (2048+16)
__global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h)
{
__shared__ float shared_1[16][17];
__shared__ float shared_0[272];
int j;
float sum_0 = 0;
float sum_1 = 0;
float sum_2 = 0;
float sum_3 = 0;
float sum_4 = 0;
float sum_5 = 0;
float sum_6 = 0;
float sum_7 = 0;
float sum_8 = 0;
float sum_9 = 0;
float sum_10 = 0;
float sum_11 = 0;
float sum_12 = 0;
float sum_13 = 0;
float sum_14 = 0;
float sum_15 = 0;
int it_2;
for (j=0; j<(h-15); j=(j+1))
{
int it_2;
if ((tidx<16))
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*j))+h), (idx+(( - 1)*0)));
}
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*j))+h), ((idx+(( - 1)*0))+16));
__syncthreads();
if ((tidx<16))
{
shared_1[(tidx+0)][0]=B((j+0), (0+tidx));
shared_1[(tidx+0)][1]=B((j+1), (0+tidx));
shared_1[(tidx+0)][2]=B((j+2), (0+tidx));
shared_1[(tidx+0)][3]=B((j+3), (0+tidx));
shared_1[(tidx+0)][4]=B((j+4), (0+tidx));
shared_1[(tidx+0)][5]=B((j+5), (0+tidx));
shared_1[(tidx+0)][6]=B((j+6), (0+tidx));
shared_1[(tidx+0)][7]=B((j+7), (0+tidx));
shared_1[(tidx+0)][8]=B((j+8), (0+tidx));
shared_1[(tidx+0)][9]=B((j+9), (0+tidx));
shared_1[(tidx+0)][10]=B((j+10), (0+tidx));
shared_1[(tidx+0)][11]=B((j+11), (0+tidx));
shared_1[(tidx+0)][12]=B((j+12), (0+tidx));
shared_1[(tidx+0)][13]=B((j+13), (0+tidx));
shared_1[(tidx+0)][14]=B((j+14), (0+tidx));
shared_1[(tidx+0)][15]=B((j+15), (0+tidx));
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
__syncthreads();
__syncthreads();
}
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-1)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-1)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
sum_0+=(a*b_0);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-2)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-2)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-3)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-3)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-4)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-4)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-5)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-5)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-6)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-6)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-7)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-7)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-8)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-8)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-8), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][7]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-9)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-9)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-9), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-8), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][7]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][8]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-10)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-10)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-10), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-9), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-8), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][7]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][8]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][9]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-11)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-11)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-11), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-10), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-9), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-8), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][7]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][8]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][9]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][10]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-12)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-12)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-12), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-11), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-10), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-9), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-8), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][7]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][8]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][9]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][10]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][11]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-13)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-13)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-13), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-12), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-11), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-10), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-9), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-8), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][7]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][8]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][9]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][10]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][11]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][12]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-14)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-14)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-14), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-13), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-12), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-11), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-10), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-9), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-8), (0+tidx));
}
{
shared_1[(tidx+0)][7]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][8]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][9]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][10]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][11]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][12]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][13]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
}
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(h-15)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(h-15)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][0]=B((h-15), (0+tidx));
}
{
shared_1[(tidx+0)][1]=B((h-14), (0+tidx));
}
{
shared_1[(tidx+0)][2]=B((h-13), (0+tidx));
}
{
shared_1[(tidx+0)][3]=B((h-12), (0+tidx));
}
{
shared_1[(tidx+0)][4]=B((h-11), (0+tidx));
}
{
shared_1[(tidx+0)][5]=B((h-10), (0+tidx));
}
{
shared_1[(tidx+0)][6]=B((h-9), (0+tidx));
}
{
shared_1[(tidx+0)][7]=B((h-8), (0+tidx));
}
{
shared_1[(tidx+0)][8]=B((h-7), (0+tidx));
}
{
shared_1[(tidx+0)][9]=B((h-6), (0+tidx));
}
{
shared_1[(tidx+0)][10]=B((h-5), (0+tidx));
}
{
shared_1[(tidx+0)][11]=B((h-4), (0+tidx));
}
{
shared_1[(tidx+0)][12]=B((h-3), (0+tidx));
}
{
shared_1[(tidx+0)][13]=B((h-2), (0+tidx));
}
{
shared_1[(tidx+0)][14]=B((h-1), (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_0;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_0=shared_1[it_2][0];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
sum_0+=(a*b_0);
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
}
C(((idy*16)+0), idx)=sum_0;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-1)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-1)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][1]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][2]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][3]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][4]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][5]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][6]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][7]=B(6, (0+tidx));
}
{
shared_1[(tidx+0)][8]=B(7, (0+tidx));
}
{
shared_1[(tidx+0)][9]=B(8, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(9, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(10, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(11, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(12, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(13, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(14, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_1;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_1=shared_1[it_2][1];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_1+=(a*b_1);
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+1), idx)=sum_1;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-2)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-2)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][2]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][3]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][4]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][5]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][6]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][7]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][8]=B(6, (0+tidx));
}
{
shared_1[(tidx+0)][9]=B(7, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(8, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(9, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(10, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(11, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(12, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(13, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_2;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_2=shared_1[it_2][2];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_2+=(a*b_2);
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+2), idx)=sum_2;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-3)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-3)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][3]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][4]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][5]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][6]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][7]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][8]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][9]=B(6, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(7, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(8, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(9, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(10, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(11, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(12, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_3;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_3=shared_1[it_2][3];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_3+=(a*b_3);
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+3), idx)=sum_3;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-4)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-4)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][4]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][5]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][6]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][7]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][8]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][9]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(6, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(7, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(8, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(9, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(10, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(11, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_4;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_4=shared_1[it_2][4];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_4+=(a*b_4);
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+4), idx)=sum_4;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-5)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-5)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][5]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][6]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][7]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][8]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][9]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(6, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(7, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(8, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(9, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(10, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_5;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_5=shared_1[it_2][5];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_5+=(a*b_5);
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+5), idx)=sum_5;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-6)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-6)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][6]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][7]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][8]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][9]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(6, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(7, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(8, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(9, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_6;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_6=shared_1[it_2][6];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_6+=(a*b_6);
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+6), idx)=sum_6;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-7)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-7)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][7]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][8]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][9]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(6, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(7, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(8, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_7;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_7=shared_1[it_2][7];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_7+=(a*b_7);
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+7), idx)=sum_7;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-8)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-8)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][8]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][9]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(6, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(7, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_8;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_8=shared_1[it_2][8];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_8+=(a*b_8);
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+8), idx)=sum_8;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-9)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-9)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][9]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][10]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(5, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(6, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_9;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_9=shared_1[it_2][9];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_9+=(a*b_9);
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+9), idx)=sum_9;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-10)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-10)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][10]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][11]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(4, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(5, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_10;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_10=shared_1[it_2][10];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_10+=(a*b_10);
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+10), idx)=sum_10;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-11)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-11)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][11]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][12]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(3, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(4, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_11;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_11=shared_1[it_2][11];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_11+=(a*b_11);
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+11), idx)=sum_11;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-12)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-12)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][12]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][13]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(2, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(3, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_12;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_12=shared_1[it_2][12];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_12+=(a*b_12);
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+12), idx)=sum_12;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-13)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-13)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][13]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][14]=B(1, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(2, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_13;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_13=shared_1[it_2][13];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_13+=(a*b_13);
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+13), idx)=sum_13;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-14)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-14)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][14]=B(0, (0+tidx));
}
{
shared_1[(tidx+0)][15]=B(1, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_14;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_14=shared_1[it_2][14];
b_15=shared_1[it_2][15];
sum_14+=(a*b_14);
sum_15+=(a*b_15);
}
C(((idy*16)+14), idx)=sum_14;
__syncthreads();
__syncthreads();
if ((tidx<16))
{
{
shared_0[(tidx+0)]=A((((idy*16)+(( - 1)*(0-15)))+h), (idx+(( - 1)*0)));
}
}
{
shared_0[(tidx+16)]=A((((idy*16)+(( - 1)*(0-15)))+h), ((idx+(( - 1)*0))+16));
}
__syncthreads();
if ((tidx<16))
{
{
shared_1[(tidx+0)][15]=B(0, (0+tidx));
}
}
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b_15;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b_15=shared_1[it_2][15];
sum_15+=(a*b_15);
}
C(((idy*16)+15), idx)=sum_15;
__syncthreads();
__syncthreads();
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
{
}
}
|
835
|
#include "includes.h"
__global__ void multiply( float *A2, float *B2, float *C, int N, int threads_num ){
__shared__ float *A;
__shared__ float *B;
A = A2; B = B2;
float tmp;
int k, pos;
int a = N * N * (blockDim.x * blockIdx.x + threadIdx.x) / threads_num, b;
if ( blockDim.x * blockIdx.x + threadIdx.x == threads_num - 1)
b = N * N;
else
b = N * N * ( blockDim.x * blockIdx.x + threadIdx.x + 1) / threads_num;
for( pos = a; pos < b; pos++ ){
tmp = 0;
for( k = 0; k < N; k++ )
tmp += A[ N * (pos / N ) + k ] * B[ k * N + pos - ( pos / N) * N];
C[ pos ] = tmp;
}
}
|
836
|
/*
@EECE528 Project - BDD Parallelization
@Authors: Yu Lei, Haotian Zhang
@Date: 2017/12/3
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include"cuda.h"
#define MAXNODENUM 16000
#define MAXLINE 256 /* Maximum length of each input line read. */
typedef struct bddNode_ {
float index;
int value;
struct bddNode_ *lowChild;
struct bddNode_ *highChild;
} bddNode;
typedef struct bddTree_ {
int totalNodeNum;
int totalLevels;
bddNode *topNode;
bddNode *zeroLeaf;
bddNode *oneLeaf;
} bddTree;
typedef struct applyManager_ {
int maxNodeNum;
int currentSpaceNum;
} applyManager;
typedef struct pattern_{
int size;
float index[MAXNODENUM];
bddNode* left[MAXNODENUM];
bddNode* right[MAXNODENUM];
}pattern;
pattern patterns;
void bddTreeInit(bddTree *bdd) {
bddNode *zero,*one;
bdd->totalNodeNum = 0;
bdd->totalLevels = 0;
zero = (bddNode*)malloc(sizeof(bddNode));
one = (bddNode*)malloc(sizeof(bddNode));
one->index = INFINITY;
zero->index = INFINITY;
zero->value = 0;
zero->lowChild = NULL;
zero->highChild = NULL;
one->value = 1;
one->lowChild = NULL;
one->highChild = NULL;
bdd->zeroLeaf = zero;
bdd->oneLeaf = one;
}
void applyManagerInit(applyManager *appMan, int maxNodes){
appMan->maxNodeNum = maxNodes;
appMan->currentSpaceNum = 0;
}
bddTree* readBDD(char *filename) {
FILE *f;
bddTree *bdd;
int nodeTotal;
int levelTotal;
int nodeNum;
int nodeIndex;
int lowC;
int highC;
f = fopen(filename,"r");
if (!f) {
fprintf(stderr, "cannot open file \"%s\"\n", filename);
return NULL;
}
bdd = (bddTree*)malloc(sizeof(bddTree));
bddTreeInit(bdd);
char linebuf[MAXLINE];
fgets(linebuf,MAXLINE,f);
sscanf(linebuf, "%d %d", &nodeTotal, &levelTotal);
bddNode *array[10000];
bdd->totalNodeNum = nodeTotal;
bdd->totalLevels = levelTotal;
while (fgets(linebuf, MAXLINE, f) != NULL) {
sscanf(linebuf, "%d %d %d %d", &nodeNum, &nodeIndex, &lowC, &highC);
bddNode *newNode;
newNode = (bddNode*)malloc(sizeof(bddNode));
newNode->index = nodeIndex;
newNode->value = -1;
if (lowC == -10) {
newNode->lowChild = bdd->zeroLeaf;
} else if (lowC == -11) {
newNode->lowChild = bdd->oneLeaf;
} else {
newNode->lowChild = array[lowC];
}
if (highC == -10) {
newNode->highChild = bdd->zeroLeaf;
} else if (highC == -11) {
newNode->highChild = bdd->oneLeaf;
} else {
newNode->highChild = array[highC];
}
array[nodeNum] = newNode;
bdd->topNode = newNode;
}
fclose(f);
return bdd;
}
void printNode(bddNode *node) {
printf("Node: %f children: \t%f \t%f.\n", node->index, node->lowChild->index, node->highChild->index);
if (node->lowChild->index != INFINITY) {
printNode(node->lowChild);
}
if (node->highChild->index != INFINITY) {
printNode(node->highChild);
}
}
void printBDD(bddTree *bdd) {
printf("\nPrinting bdd:\n");
printf("Total nodes in bdd: %d\n", bdd->totalNodeNum);
printNode(bdd->topNode);
}
void recursFree(bddNode *node) {
if (node->lowChild->index != INFINITY) {
recursFree(node->lowChild);
}
if (node->highChild->index != INFINITY) {
recursFree(node->highChild);
}
free(node);
}
void freeBDD(bddTree *bdd) {
recursFree(bdd->topNode);
free(bdd->zeroLeaf);
free(bdd->oneLeaf);
free(bdd);
}
// void addNew(int *size) {
// }i
float *d_index;
int *d_result,*check_result;
bddNode *d_left, *d_right,*cleft,*cright,*d_array_left,*d_array_right;
float *d_array_index;
__global__
void check_nodec(int size,int *d_result,bddNode *d_left,bddNode *d_right,float *d_index,bddNode **d_array_left,bddNode **d_array_right,float *d_array_index){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i==0){
*d_result = 0;
}
if(i < size){
if(d_array_index[i] == *d_index && d_array_left[i] == d_left && d_array_right[i] == d_right){
*d_result = i;
}
if(i == 0 && *d_result == 1){
d_array_index[size+1]=*d_index;
d_array_right[size+1]=d_right;
d_array_left[size+1]=d_left;
}
}
}
int check_node(float index,bddNode* left, bddNode *right){
int size = patterns.size;
float cindex;
// for(i=0;i<patterns.size;i++){
// if(index == patterns.index[i] && left == patterns.left[i] && right == patterns.right[i]){
// return i;
// }
// }
cleft = left;
cright = right;
cindex = index;
cudaMemcpy(d_left,cleft,sizeof(bddNode*),cudaMemcpyHostToDevice);
cudaMemcpy(d_right,cright,sizeof(bddNode*),cudaMemcpyHostToDevice);
cudaMemcpy(d_index,&cindex,sizeof(bddNode),cudaMemcpyHostToDevice);
check_nodec<<<(size+511)/512,512>>>(size,d_result,d_left,d_right,d_index,&d_array_left,&d_array_right,d_array_index);
check_result = (int*)malloc(sizeof(int));
cudaMemcpy(check_result,d_result,sizeof(int),cudaMemcpyDeviceToHost);
if(check_result ==0){
patterns.index[patterns.size] = index;
patterns.left[patterns.size] = left;
patterns.right[patterns.size] = right;
patterns.size++;
}
return *check_result;
}
bddNode* applyBDDs(bddTree *result, bddNode *node1, bddNode *node2, applyManager *appMan){
bddNode *left, *right;
float newNodeIndex;
int checkNode = 0;
if(node1->value == 0 && node2->value == 0){
return result->zeroLeaf;
}else if(node1->value == 0 && node2->value == 1){
return result->zeroLeaf;
}else if(node1->value == 1 && node2->value == 0){
return result->zeroLeaf;
}else if(node1->value == 1 && node2->value == 1){
return result->oneLeaf;
}
// printf("node1:%lf node2:%lf",node1->index, node2->index);
if(node1->index == node2->index){
left = applyBDDs(result, node1->lowChild,node2->lowChild,appMan);
right = applyBDDs(result, node1->highChild,node2->highChild,appMan);
}else if (node1->index < node2->index){
left = applyBDDs(result,node1->lowChild,node2,appMan);
right = applyBDDs(result,node1->highChild,node2,appMan);
newNodeIndex = node1 -> index;
}else if (node1->index > node2->index){
left = applyBDDs(result,node1,node2->lowChild,appMan);
right = applyBDDs(result,node1,node2->highChild,appMan);
newNodeIndex = node2 -> index;
}
// return result -> oneLeaf;
bddNode *newNode;
newNode = (bddNode*)malloc(sizeof(bddNode));
if(left == right){
return left;
}else{
if(checkNode = check_node(newNodeIndex,left,right)){
newNode->index = patterns.index[checkNode];
newNode->value = -1;
newNode->lowChild = patterns.left[checkNode];
newNode->highChild = patterns.right[checkNode];
}
else{
newNode->index = newNodeIndex;
newNode->value = -1;
newNode->lowChild = left;
newNode->highChild = right;
}
return newNode;
}
}
int main(int argc, char* argv[]) {
bddTree *bdd1, *bdd2;
bddTree *bddResult;
clock_t begin,end;
if (argc !=3) {
fprintf(stderr,"usage: a.out file1 file2\n");
exit(1);
}
bdd1 = readBDD(argv[1]);
bdd2 = readBDD(argv[2]);
bddResult = (bddTree*)malloc(sizeof(bddTree));
bddTreeInit(bddResult);
applyManager *appMan;
appMan = (applyManager*)malloc(sizeof(applyManager));
applyManagerInit(appMan, (int)pow(2, (bdd1->totalLevels + bdd2->totalLevels)));
patterns.size = 0;
check_result = (int*)malloc(sizeof(int));
cudaMalloc(&d_result,sizeof(int));
cudaMalloc(&d_index,sizeof(float));
cudaMalloc(&d_left,sizeof(bddNode*));
cudaMalloc(&d_right,sizeof(bddNode*));
cudaMalloc(&d_array_index,MAXNODENUM*sizeof(float));
cudaMalloc(&d_array_right,MAXNODENUM*sizeof(bddNode*));
cudaMalloc(&d_array_left,MAXNODENUM*sizeof(bddNode*));
begin = clock();
bddResult->topNode = applyBDDs(bddResult, bdd1->topNode, bdd2->topNode, appMan);
end = clock();
printf("time: %f sec\n",(double)(end-begin)/CLOCKS_PER_SEC);
free(bdd1);
free(bdd2);
free(bddResult);
cudaFree(d_result);
cudaFree(d_index);
cudaFree(d_left);
cudaFree(d_right);
cudaFree(d_array_index);
cudaFree(d_array_right);
cudaFree(d_array_left);
free(appMan);
return 0;
}
|
837
|
#define MOVEBITS 22 //Using 22 bits for Supervertex IDs, hope this is enough, leaving 10 bits for weights, max 1K weight range
#define APPEND_VERTEX_BITS 32
#define INF 100000000
#define MAX_THREADS_PER_BLOCK 512
__global__ void makeVertexList(unsigned int *d_vertices, unsigned int *d_edge_flag, int *d_pick_array, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i<num_edges; i++)
if(i < num_edges)
{
if(d_edge_flag[i]==1)
{
unsigned int pos=d_pick_array[i]; //get the u value
d_vertices[pos]=i; //write the index to the u'th value in the array to create the vertex list
}
}
}
__global__ void makeFlagArrayForNewVertexList(unsigned int *d_edge_flag, int *d_pick_array, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i<num_edges;i++)
if( i < num_edges)
{
if(i==0)
{
d_edge_flag[i]=1;
}
else
{
if(d_pick_array[i-1]<d_pick_array[i])
d_edge_flag[i]=1;
}
}
}
__global__ void copyEdgeMapping(unsigned int *d_edge_mapping_copy, unsigned int *d_edge_mapping,
unsigned int *d_weights, unsigned int *d_edges,
unsigned int *d_segmented_min_scan_input, unsigned int *d_segmented_min_scan_output,
unsigned int *new_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i<*new_edges;i++)
if(i < *new_edges)
{
d_edge_mapping[i] = d_edge_mapping_copy[i];
d_edges[i] = d_segmented_min_scan_input[i];
d_weights[i] = d_segmented_min_scan_output[i];
}
}
__global__ void compactEdgeListDuplicates(unsigned int *d_edges, unsigned int *d_weights, unsigned int *d_new_vertex_Ids,
unsigned int *d_edge_mapping, unsigned int *d_edge_mapping_copy,
unsigned int *d_segmented_min_scan_input, unsigned int *d_segmented_min_scan_output,
unsigned int *d_edge_flag, unsigned long long int *d_appended_uindex,
int *d_pick_array, unsigned int *new_edges, unsigned int *new_vertex_size,
unsigned int *new_edge_size)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i<*new_edges; i++)
if(i < *new_edges)
{
unsigned long long int val = d_appended_uindex[i];
unsigned long long int mask = pow(2.0, APPEND_VERTEX_BITS)-1;
unsigned long long int index = val&mask;
unsigned long long int u = val >> APPEND_VERTEX_BITS;
unsigned int v = d_edges[index];
if(u!=INF && v!=INF)
{
//Copy the edge_mapping into a temporary array, used to resolve read after write inconsistancies
d_edge_mapping_copy[i] = d_edge_mapping[index]; //keep a mapping from old edge-list to new one
d_pick_array[i]=u; // reusing this to store u's
d_segmented_min_scan_output[i]= d_weights[index]; //resuing d_segmented_min_scan_output to store weights
d_segmented_min_scan_input[i] = d_new_vertex_Ids[v]; //resuing d_segmented_scan_input to store v ids
//Set the new vertex list and edge list sizes
if(i==*new_edges-1)
{
*new_edge_size=(i+1);
*new_vertex_size=(u+1);
}
}
}
}
__global__ void markEdgesNew(unsigned int *d_edge_flag, unsigned long long int *d_appended_uindex,
unsigned int *d_new_edges, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_edges; i++)
if(i < num_edges)
{
if(i==0)
d_edge_flag[i]=1;
else
{
unsigned long long int prev = d_appended_uindex[i-1]>>APPEND_VERTEX_BITS;
unsigned long long int curr = d_appended_uindex[i]>>APPEND_VERTEX_BITS;
if(curr > prev)
d_edge_flag[i]=1;
if(curr == INF && prev != INF)
{
*d_new_edges = i;
}
}
}
}
__global__ void sortArray(unsigned long long int *d_array, int length)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
unsigned long long int Temp;
int j;
// for(int i=1; i<length; i++)
if (i < length)
{
Temp = d_array[i];
j = i-1;
while(Temp<d_array[j] && j>=0)
{
d_array[j+1] = d_array[j];
j = j-1;
}
d_array[j+1] = Temp;
}
}
__global__ void appendForNoDuplicateEdgeRemoval(unsigned long long int *d_appended_uindex, unsigned int *d_edges, unsigned int *d_old_uId,
unsigned int *d_weights, unsigned int *d_new_vertex_Ids, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_edges; i++)
if(i < num_edges)
{
unsigned long long int val;
unsigned int u,v,superuid=INF;
u = d_old_uId[i];
v = d_edges[i];
if(u!= INF && v!=INF)
{
superuid = d_new_vertex_Ids[u];
}
val = superuid;
val = val<<APPEND_VERTEX_BITS;
val |= i;
d_appended_uindex[i] = val;
}
}
__global__ void removeSelfEdges(unsigned int *d_edges, unsigned int *d_old_uId,
unsigned int *d_new_vertex_Ids, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_edges; i++)
if(i < num_edges)
{
unsigned int u = d_old_uId[i];
unsigned int v = d_edges[i];
unsigned int u_parent = d_new_vertex_Ids[u];
unsigned int v_parent = d_new_vertex_Ids[v];
if(u_parent == v_parent)
{
d_edges[i] = INF;
}
}
}
__global__ void markSuperVertexIdsPerVertex(unsigned int *d_new_vertex_Ids, unsigned long long int *d_append_Ids,
unsigned int *d_vertex_flag, int num_vertices)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_vertices; i++)
if(i < num_vertices)
{
unsigned long long int mask = pow(2.0, APPEND_VERTEX_BITS)-1;
unsigned long long int vertexid = d_append_Ids[i]&mask;
d_vertex_flag[vertexid] = d_new_vertex_Ids[i];
}
}
__global__ void markVertexFlag(unsigned int *d_vertex_flag, unsigned long long int *d_append_Ids, int num_vertices)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_vertices; i++)
if(i < num_vertices)
{
if(i > 0)
{
unsigned int parent_curr = d_append_Ids[i] >> APPEND_VERTEX_BITS;
unsigned int parent_prev = d_append_Ids[i-1] >> APPEND_VERTEX_BITS;
if( parent_curr != parent_prev)
d_vertex_flag[i]=1;
}
}
}
__global__ void sortSuperVertexIds(unsigned long long int *d_append_Ids, int num_vertices)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
unsigned long long int Temp;
int j;
// for(int i=1; i<num_vertices; i++)
if(i < num_vertices)
{
Temp = d_append_Ids[i];
j = i-1;
while(Temp<d_append_Ids[j] && j>=0)
{
d_append_Ids[j+1] = d_append_Ids[j];
j = j-1;
}
d_append_Ids[j+1] = Temp;
}
}
__global__ void appendVertexId(unsigned long long int *d_append_Ids, unsigned int *d_successor, int num_vertices)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for (int i=0; i< num_vertices; i++)
if(i < num_vertices)
{
unsigned long long int parent = d_successor[i];
parent = parent << APPEND_VERTEX_BITS;
parent |= i;
d_append_Ids[i] = parent;
}
}
__global__ void doPointerDoubling(unsigned int *d_successor,unsigned int *d_successor_copy, int num_vertices, bool *d_change)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_vertices; i++)
if(i < num_vertices)
{
unsigned int parent = d_successor[i];
unsigned int grandParent = d_successor[parent];
if( grandParent != parent)
{
d_successor_copy[i] = grandParent;
*d_change = true;
}
}
}
__global__ void copyArray(unsigned int *newArray, unsigned int *oldArray, int num_elements)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i =0; i< num_elements; i++)
if(i < num_elements)
{
newArray[i] = oldArray[i];
}
}
__global__ void markOutputEdges(int *d_pick_array, unsigned int *d_segmented_min_scan_output,
unsigned int *d_output_MST, unsigned int *d_edge_mapping, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i<num_edges; i++)
if(i < num_edges)
{
int index = d_pick_array[i];
if( index >= 0)
{
unsigned int output = d_segmented_min_scan_output[index];
unsigned int curr = d_segmented_min_scan_output[i];
unsigned int prev = d_segmented_min_scan_output[i-1];
int prev_index = d_pick_array[i-1];
if(prev_index == index)
{
if(curr == output && curr != prev)
{
unsigned int edgeid = d_edge_mapping[i];
d_output_MST[edgeid]=1;
}
}
else
{
if(curr == output)
{
unsigned int edgeid = d_edge_mapping[i];
d_output_MST[edgeid]=1;
}
}
}
}
}
__global__ void initialiseArray(unsigned int *d_edge_mapping, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i =0; i< num_edges; i++)
if(i < num_edges)
{
d_edge_mapping[i] = i;
}
}
__global__ void makePickArray(int *d_pick_array, unsigned int *d_successor,unsigned int *d_vertices,
unsigned int *d_old_uID,int num_vertices, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i =0; i< num_edges; i++)
if(i < num_edges)
{
unsigned int parent = d_old_uID[i];
unsigned int end=0;
if( parent < ((num_vertices) - 1))
{
end = d_vertices[parent+1] -1;
}
else
{
end = num_edges-1;
}
if( parent != d_successor[parent])
d_pick_array[i] = end;
else
{
d_pick_array[i] = -1;
}
}
}
__global__ void modifyOldUID(unsigned int *d_old_uID, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
if(i < num_edges)
{
d_old_uID[i] = d_old_uID[i]-1;
}
}
__global__ void prefixSum(unsigned int *d_old_uID, unsigned int *d_edge_flag, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for (int i=0;i<num_edges; i++)
if(i < num_edges)
{
if(i == 0)
// d_old_uID[i]= d_edge_flag[i];
d_old_uID[i]= 0;
else
d_old_uID[i]= d_old_uID[i-1]+d_edge_flag[i];
}
}
__global__ void markFlagForUid(unsigned int *d_edge_flag, unsigned int *d_vertices, int num_vertices)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_vertices; i++)
if(i < num_vertices)
{
unsigned int vertex = d_vertices[i];
d_edge_flag[vertex]=1;
}
}
__global__ void removeCycles(unsigned int *d_successor, int num_vertices)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i =0; i< num_vertices; i++)
if(i < num_vertices)
{
unsigned int succ = d_successor[i];
unsigned int nextsucc = d_successor[succ];
if(i == nextsucc)
{
if(i < succ)
d_successor[i]=i;
else
d_successor[succ]=succ;
}
}
}
__global__ void makeSuccesorArray(unsigned int *d_successor, unsigned int *d_vertices, unsigned int *d_segmented_min_scan_output,
int num_vertices, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for (int i =0; i< num_vertices; i++)
if(i < num_vertices)
{
unsigned int end;
if(i < (num_vertices-1))
end = d_vertices[i+1]-1;
else
end = num_edges-1;
unsigned int mask = pow(2.0, MOVEBITS)-1;
d_successor[i] = d_segmented_min_scan_output[end]&mask;
}
}
__device__ unsigned int min_device(unsigned int a, unsigned int b)
{
int tmp = a;
if(a > b)
{
tmp = b;
}
return tmp;
}
__global__ void segmentedMinScan(unsigned int *d_segmented_min_scan_output, unsigned int *d_segmented_min_scan_input,
const unsigned int *d_edge_flag, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_edges; i++)
if(i < num_edges)
{
if(d_edge_flag[i] == 1)
{
d_segmented_min_scan_output[i]= d_segmented_min_scan_input[i];
}
else
{
d_segmented_min_scan_output[i] = min_device(d_segmented_min_scan_output[i-1],d_segmented_min_scan_input[i]);
}
}
}
__global__ void appendWeight(unsigned int *d_segmented_min_scan_input, unsigned int *d_weights, unsigned int *d_edges, int num_edges)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for (int i=0; i< num_edges; i++)
if(i < num_edges)
{
unsigned int val=d_weights[i];
val=val<<MOVEBITS;
val|=d_edges[i];
d_segmented_min_scan_input[i]=val;
}
}
/*
__global__ void printArray(unsigned int *arrayToBePrint, int num_elements)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_elements; i++)
if(i < num_elements)
{
cout<<arrayToBePrint[i]<<" ";
}
cout<<"\n";
}
__global__ void printArrayInt(int *arrayToBePrint, int num_elements)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_elements; i++)
if(i < num_elements)
{
cout<<arrayToBePrint[i]<<" ";
}
cout<<"\n";
}
*/
__global__ void clearArray(unsigned int *array, int num_elements)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_elements; i++)
if(i < num_elements)
{
array[i] = 0;
}
}
__global__ void clearArrayInt(int *array, int num_elements)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_elements; i++)
if( i < num_elements)
{
array[i] = 0;
}
}
__global__ void markEdgeFlag(unsigned int *d_edge_flag, unsigned int *d_vertices, int num_vertices)
{
unsigned int i = blockIdx.x*MAX_THREADS_PER_BLOCK + threadIdx.x;
// for(int i=0; i< num_vertices; i++)
if(i < num_vertices)
{
unsigned int vertex = d_vertices[i];
d_edge_flag[vertex]=1;
}
}
|
838
|
#include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void simple_K(double *xp, double *yp, double *zp, double mass, double *K){
unsigned int gid = getGid3d3d();
unsigned int xid = blockDim.x*blockIdx.x + threadIdx.x;
unsigned int yid = blockDim.y*blockIdx.y + threadIdx.y;
unsigned int zid = blockDim.z*blockIdx.z + threadIdx.z;
K[gid] = (HBAR*HBAR/(2*mass))*(xp[xid]*xp[xid] + yp[yid]*yp[yid]
+ zp[zid]*zp[zid]);
}
|
839
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand.h"
#include "curand_kernel.h"
#include <stdio.h>
#include <time.h>
__global__ void mhNoSwitch(unsigned int *randNums, bool *results)
{
unsigned int idx = blockIdx.x * 1024 + threadIdx.x;
unsigned int rand0 = randNums[2*idx];
unsigned int rand1 = randNums[2*idx+1];
// door with prize
unsigned int prize = rand0 % 3;
// door choice
unsigned int choice = rand1 % 3;
// since choice does not change after extra door opening, just check if choice and prize door are the same
results[idx] = (prize == choice);
}
__global__ void mhSwitch(unsigned int *randNums, bool *results)
{
unsigned int idx = blockIdx.x * 1024 + threadIdx.x;
unsigned int rand0 = randNums[2*idx];
unsigned int rand1 = randNums[2*idx+1];
// door with prize
unsigned int prize = rand0 % 3;
// initial choice
unsigned int choice = rand1 % 3;
unsigned int reveal = 0;
unsigned int final = 0;
if (prize == 0) {
if (choice == 0) {
reveal = 1 + rand0 % 2;
final = 3 - reveal;
} else if (choice == 1) {
reveal = 2;
final = 0;
} else { // choice == 2
reveal = 1;
final = 0;
}
} else if (prize == 1) {
if (choice == 0) {
reveal = 2;
final = 1;
} else if (choice == 1) {
reveal = 2 * (rand0 % 2);
final = 2 - reveal;
} else { // choice == 2
reveal = 0;
final = 1;
}
} else if (prize == 2) {
if (choice == 0) {
reveal = 1;
final = 2;
} else if (choice == 1) {
reveal = 0;
final = 2;
} else { // choice == 2
reveal = rand0 % 2;
final = 1 - reveal;
}
}
results[idx] = (prize == final);
}
int main()
{
cudaSetDevice(0);
const unsigned int numBlocks = 10;
const unsigned int numTests = 1024 * numBlocks;
bool results_noswitch[numTests];
bool results_switch[numTests];
bool *dev_results_noswitch = 0;
bool *dev_results_switch = 0;
unsigned int *dev_randNums = 0;
cudaMalloc((void**) &dev_results_noswitch, numTests * sizeof(bool));
cudaMalloc((void**) &dev_results_switch, numTests * sizeof(bool));
cudaMalloc((void**) &dev_randNums, 2 * numTests * sizeof(unsigned int));
curandGenerator_t generator;
curandCreateGenerator(&generator, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(generator, time(NULL));
curandGenerate(generator, dev_randNums, 2 * numTests);
mhNoSwitch<<<numBlocks, 1024>>>(dev_randNums, dev_results_noswitch);
mhSwitch<<<numBlocks, 1024>>>(dev_randNums, dev_results_switch);
cudaMemcpy(results_noswitch, dev_results_noswitch, numTests * sizeof(bool), cudaMemcpyDeviceToHost);
cudaMemcpy(results_switch, dev_results_switch, numTests * sizeof(bool), cudaMemcpyDeviceToHost);
cudaFree(dev_results_noswitch);
cudaFree(dev_results_switch);
curandDestroyGenerator(generator);
unsigned int noswitchpass = 0;
unsigned int switchpass = 0;
for (int i = 0; i < numTests; i++) {
noswitchpass += results_noswitch[i] ? 1 : 0;
switchpass += results_switch[i] ? 1 : 0;
}
printf("no switch picked correctly %i out of %i, which is %g percent\n", noswitchpass, numTests, 100 * ((float) noswitchpass) / ((float) numTests));
printf("switch picked correctly %i out of %i, which is %g percent\n", switchpass, numTests, 100 * ((float) switchpass) / ((float) numTests));
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaError_t cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
return 0;
}
|
840
|
#include <iostream>
#include <stdio.h>
#include <math.h>
#include <time.h>
//============================= GPU Kernel ====================================
__global__
void d_add(int n, float *x, float *y)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
y[i] = x[i] + y[i];
}
//============================= CPU ===========================================
// function to add the elements of two arrays
void add(int n, float *x, float *y)
{
// loop through array and add element by element
for (int i = 0; i < n; i++)
y[i] = x[i] + y[i];
}
// CPU WRAPPER
int add_cpu(void){
int N = 1<<25; // 1M elements
printf("Number of elements: %d\n",N);
float *x = new float[N];
float *y = new float[N];
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the CPU
clock_t begin = clock();
add(N, x, y);
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("CPU Time: %f seconds\n", time_spent);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
delete [] x;
delete [] y;
return 0;
}
// GPU WRAPPER
int add_gpu(void){
int N = 1<<25;
float *x, *y;
// Allocate Unified Memory – accessible from CPU or GPU
cudaMallocManaged(&x, N*sizeof(float));
cudaMallocManaged(&y, N*sizeof(float));
// initialize x and y arrays on the host
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
// Run kernel on 1M elements on the GPU
int blockSize = 256;
int numBlocks = (N + blockSize - 1) / blockSize;
clock_t begin = clock();
d_add<<<numBlocks, blockSize>>>(N, x, y);
// Wait for GPU to finish before accessing on host
cudaDeviceSynchronize();
clock_t end = clock();
double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("GPU Time: %f seconds\n", time_spent);
// Check for errors (all values should be 3.0f)
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = fmax(maxError, fabs(y[i]-3.0f));
std::cout << "Max error: " << maxError << std::endl;
// Free memory
cudaFree(x);
cudaFree(y);
return 0;
}
int main(void)
{
add_cpu();
add_gpu();
}
|
841
|
#include<stdio.h>
#include<cuda.h>
#include<time.h>
__global__ void zad2(float *a,float *b,float *c, int N)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx<N) c[idx] = a[idx] + b[idx];
}
int main(void)
{
clock_t t1,t2,t5,t6,t7,t8;
float *a_h,*b_h,*c_h;
float *a_d,*b_d,*c_d;
const int N = 50000000;
size_t size = N * sizeof(float);
t1=clock();
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
c_h = (float *)malloc(size);
cudaMalloc((void **) &a_d,size);
cudaMalloc((void **) &b_d,size);
cudaMalloc((void **) &c_d,size);
t2=clock();
for(int i=0;i<N;i++)
{
a_h[i]=(float)(i+1);
b_h[i]=(float)(i+1);
c_h[i]=(float)(i+1);
}
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
t5=clock();
cudaMemcpy(a_d,a_h,size,cudaMemcpyHostToDevice);
cudaMemcpy(b_d,b_h,size,cudaMemcpyHostToDevice);
cudaMemcpy(c_d,c_h,size,cudaMemcpyHostToDevice);
t6=clock();
int block_size = 1024;
int n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
cudaEventRecord(start, 0);
zad2<<<n_blocks,block_size>>>(a_d,b_d,c_d,N);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
t7=clock();
cudaMemcpy(a_h,a_d,size,cudaMemcpyDeviceToHost);
cudaMemcpy(b_h,b_d,size,cudaMemcpyDeviceToHost);
cudaMemcpy(c_h,c_d,size,cudaMemcpyDeviceToHost);
t8=clock();
//for(int i=0;i<N;i++)
//{
//printf("%d rekord to: %f\n",i,c_h[i]);
//}
printf("Czas alokowania danych: %f s\n",(float)(t2-t1)/CLOCKS_PER_SEC);
printf("Czas przesyłu danych: %f s\n",(float)((t6-t5)+(t8-t7))/CLOCKS_PER_SEC);
free(a_h);
free(b_h);
free(c_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventElapsedTime(&time, start, stop);
printf ("Czas wykonania dodawania wektorów: %f ms\n", time);
}
|
842
|
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
#include <thrust/extrema.h>
#include <thrust/device_vector.h>
using namespace std;
void zzz2(int c){
if(c<10){
char a=c+'0';
printf("%c",a);
}else{
char a=c-10+'a';
printf("%c",a);
}
}
void zzz1(uchar3 a){
int x=a.x;
int y=a.y;
int z=a.z;
zzz2(x/16);
zzz2(x%16);
zzz2(y/16);
zzz2(y%16);
zzz2(z/16);
zzz2(z%16);
}
void zzz3(uchar4 a){
int x=a.x;
int y=a.y;
int z=a.z;
zzz2(x/16);
zzz2(x%16);
zzz2(y/16);
zzz2(y%16);
zzz2(z/16);
zzz2(z%16);
}
__device__ double SQRT(double A){
return sqrt(A);
}
__device__ double POW(double a){
return a*a;
}
#define CSC(call) do { \
cudaError_t e = call; \
if (e != cudaSuccess) { \
fprintf(stderr, "CUDA Error in %s:%d: %s\n"\
, __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(0); \
} \
} while(0)
#define RGBToWB(r, g, b) 0.299*r + 0.587*g + 0.114*b
#define chek(a) a>255.1?255.1:a
struct abs_max {
__host__ __device__ bool operator()(double a, double b) {
return abs(a) < abs(b);
}
};
__global__ void my_swap(double* data,double* E, int n,int x,int y) {
int idx = threadIdx.x + blockIdx.x * blockDim.x; // Индекс нити
int offset = gridDim.x * blockDim.x; // кол-во блоков * размер блока
int i;
double tmp;
for(i=idx;i<n;i+=offset){
tmp=data[i*n+x];
data[i*n+x]=data[i*n+y];
data[i*n+y]=tmp;
tmp=E[i*n+x];
E[i*n+x]=E[i*n+y];
E[i*n+y]=tmp;
}
}
__global__ void normalization(double* data,double* E, int n,int i){
int idx = threadIdx.x + blockIdx.x * blockDim.x; // Индекс нити
int offset = gridDim.x * blockDim.x; // кол-во блоков * размер блока
int j;
double tmp=data[i*n+i];
for(j=idx;j<n;j+=offset){
if(j!=i)
data[j*n+i]/=tmp;
E[j*n+i]/=tmp;
}
}
__global__ void kernel(double* data,double* E, int n,int x) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
int idy = threadIdx.y + blockDim.y * blockIdx.y;
int offsetx = blockDim.x * gridDim.x;
int offsety = blockDim.y * gridDim.y;
int i, j;
for(i = idx; i < n; i += offsetx){
for(j = idy; j < n; j += offsety){
if(i!=x){
//a*n b
E[j*n+i]-=data[x*n+i]*E[j*n+x];
if(j!=x)
data[j*n+i]-=data[x*n+i]*data[j*n+x];
}
}
}
}
int main() {
abs_max zzz;
int n;
scanf("%d", &n);
//fprintf(stderr,"%d\n",n);
double* data = (double*) malloc( n*n*sizeof(double) );
double* E = (double*) malloc( n*n*sizeof(double) );
for (int j = 0; j < n; ++j) {
for (int i = 0; i < n; ++i){
scanf("%lf", &data[i * n + j]);
//fprintf(stderr,"%.10e ",data[i * n + j]);
if(i==j){
E[i * n + j]=1;
}else{
E[i * n + j]=0;
}
}
//fprintf(stderr,"\n");
}
double *dev_data,*dev_E;
CSC( cudaMalloc( &dev_data, n*n*sizeof(double) ) );
CSC( cudaMemcpy( dev_data, data, n*n*sizeof(double), cudaMemcpyHostToDevice ) );
CSC( cudaMalloc( &dev_E, n*n*sizeof(double) ) );
CSC( cudaMemcpy( dev_E, E, n*n*sizeof(double), cudaMemcpyHostToDevice ) );
for(int i=0;i<n;i++){
if(i!=n-1){
thrust::device_ptr<double> ptr_data = thrust::device_pointer_cast(dev_data);
thrust::device_ptr<double> max_elem_ref = thrust::max_element(
ptr_data + i * n + i,
ptr_data +i * n+ n,
zzz );
int max_pos = max_elem_ref - (ptr_data + i * n);
//fprintf(stderr,"%d %d\n",max_pos,i);
if(max_pos!=i){
my_swap <<<dim3(1024), dim3(1024)>>> (dev_data,dev_E, n, i, max_pos);
}
}
normalization <<<dim3(1024), dim3(1024)>>>(dev_data,dev_E,n,i);
kernel<<<dim3(32, 32), dim3(32, 32)>>>(dev_data,dev_E,n,i);
}
//normalization <<<dim3(1024), dim3(1024)>>>(dev_data,dev_E,n,n-1);
CSC( cudaMemcpy( E, dev_E, n*n*sizeof(double), cudaMemcpyDeviceToHost ) );
for (int j = 0; j < n; ++j) {
for (int i = 0; i < n; ++i){
//fprintf(stderr,"%.10e ",E[i * n + j]);
printf("%.10e ",E[i * n + j]);
}
//fprintf(stderr,"\n");
printf("\n");
}
CSC(cudaFree ( dev_E ));
CSC(cudaFree ( dev_data ));
free(data);
free(E);
return 0;
}
|
843
|
__device__ void _sum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sum_32_20_1<<<64,64>>>(n,x,y);
_sum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sum_64_20_1<<<64,64>>>(n,x,y);
_sum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float prod_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_prod_32_20_1<<<64,64>>>(n,x,y);
_prod_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _prod_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai*xi;
ai=x[i]; xi=x[i+16]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai*xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai*xi;
}
__global__ void _prod_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 1;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=ai*xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_prod_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _prod_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai*xi;
}
__syncthreads();
}
if(tid<32) {
_prod_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double prod_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_prod_64_20_1<<<64,64>>>(n,x,y);
_prod_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float maximum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_maximum_32_20_1<<<64,64>>>(n,x,y);
_maximum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _maximum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai>xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai>xi?ai:xi);
}
__global__ void _maximum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = (-INFINITY);
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai>xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_maximum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _maximum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai>xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_maximum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double maximum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_maximum_64_20_1<<<64,64>>>(n,x,y);
_maximum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float minimum_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_minimum_32_20_1<<<64,64>>>(n,x,y);
_minimum_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _minimum_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+16]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 8]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 4]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 2]; x[i]=(ai<xi?ai:xi);
ai=x[i]; xi=x[i+ 1]; x[i]=(ai<xi?ai:xi);
}
__global__ void _minimum_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = INFINITY;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=xi; ai=(ai<xi?ai:xi);
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_minimum_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _minimum_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=(ai<xi?ai:xi);
}
__syncthreads();
}
if(tid<32) {
_minimum_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double minimum_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_minimum_64_20_1<<<64,64>>>(n,x,y);
_minimum_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sumabs_32_20_1<<<64,64>>>(n,x,y);
_sumabs_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi<0?-xi:xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sumabs_64_20_1<<<64,64>>>(n,x,y);
_sumabs_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float sumabs2_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_sumabs2_32_20_1<<<64,64>>>(n,x,y);
_sumabs2_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _sumabs2_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _sumabs2_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi*xi); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _sumabs2_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_sumabs2_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double sumabs2_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_sumabs2_64_20_1<<<64,64>>>(n,x,y);
_sumabs2_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_32_20_0(volatile float *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
float ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_32_20_1(int n, float *x, float *y) {
__shared__ float buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
float ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_32_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_32_20_2(float *y,float *z) { // sum block results in y
__shared__ float buffer[64];
float ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_32_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
float countnz_32_20(int n, float *x) {
float r;
static float *y;
static float *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(float)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(float)); // final sum
_countnz_32_20_1<<<64,64>>>(n,x,y);
_countnz_32_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(float),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
__device__ void _countnz_64_20_0(volatile double *x, int i) {
//for optimizing warps
//volatile must be used as register optimization will lead to wrong answers
double ai, xi;
ai=x[i]; xi=x[i+32]; x[i]=ai+xi;
ai=x[i]; xi=x[i+16]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 8]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 4]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 2]; x[i]=ai+xi;
ai=x[i]; xi=x[i+ 1]; x[i]=ai+xi;
}
__global__ void _countnz_64_20_1(int n, double *x, double *y) {
__shared__ double buffer[64]; //all THR threads in the block write to buffer on their own tid
int i_start = threadIdx.x+blockIdx.x*blockDim.x; //start at the thread index
int i_end = n; //end at dim
int i_step = blockDim.x*gridDim.x; // step is the total number of threads in the system
int tid = threadIdx.x;
double ai, xi;
// sum the elements assigned to this thread
ai = 0;
for(int i=i_start; i<i_end; i+=i_step) {
xi=x[i]; xi=(xi!=0); ai=ai+xi;
}
buffer[tid] = ai;
__syncthreads();
// help sum the entries in the block
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads(); // Q: can this be outside the for loop?
}
if(tid<32) {
_countnz_64_20_0(buffer,tid); // Inlining this does not work.
}
__syncthreads();
if(tid==0) { // the first thread in the block writes the block result to y
y[blockIdx.x]=buffer[0];
}
}
__global__ void _countnz_64_20_2(double *y,double *z) { // sum block results in y
__shared__ double buffer[64];
double ai, xi;
int tid = threadIdx.x;
buffer[tid] = y[tid];
__syncthreads();
for(int stride=64/2; stride>32; stride>>=1) {
if(tid < stride) {
ai=buffer[tid]; xi=buffer[stride+tid]; buffer[tid]=ai+xi;
}
__syncthreads();
}
if(tid<32) {
_countnz_64_20_0(buffer,tid);
}
__syncthreads();
if(tid==0) {
z[0]=buffer[0];
}
}
#ifdef __cplusplus
extern "C" {
#endif
double countnz_64_20(int n, double *x) {
double r;
static double *y;
static double *z;
if (y == NULL) cudaMalloc(&y, 64*sizeof(double)); // sum for each block
if (z == NULL) cudaMalloc(&z, sizeof(double)); // final sum
_countnz_64_20_1<<<64,64>>>(n,x,y);
_countnz_64_20_2<<<1,64>>>(y,z);
cudaMemcpy(&r,z,sizeof(double),cudaMemcpyDeviceToHost);
return r;
}
#ifdef __cplusplus
}
#endif
|
844
|
//pass
//--blockDim=2 --gridDim=2
struct S {
struct {
int * p;
int * q;
} s;
};
__global__ void foo(struct S A) {
A.s.p[threadIdx.x + blockDim.x*blockIdx.x] =
A.s.q[threadIdx.x + blockDim.x*blockIdx.x] + threadIdx.x;
A.s.q[threadIdx.x + blockDim.x*blockIdx.x] = threadIdx.x;
}
|
845
|
//Based on the work of Andrew Krepps
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <time.h>
#include <cuda.h>
#define N 32*32
#define BLOCK_SIZE 16
#define NUM_BLOCKS N/BLOCK_SIZE
#define ARRAY_SIZE N
#define ARRAY_SIZE_IN_BYTES (sizeof(int) * (ARRAY_SIZE))
///generate data//
__host__ void generateData(int * host_data_ptr, int arrayNum)
{
for(unsigned int i=0; i < N; i++)
{
if(arrayNum ==1)
{
host_data_ptr[i] = i;
}
else if(arrayNum ==2)
{
host_data_ptr[i] = 1;
}
else
{
host_data_ptr[i] = rand()%3;
}
}
}
//KERNEL//
__global__ void operation(int *device_a, int *device_b, int *device_result, int opNum)
{
int threadId = threadIdx.x + blockIdx.x * blockDim.x ;
if (threadId < ARRAY_SIZE)
if (opNum ==1){ device_result[threadId]= device_a[threadId]+device_b[threadId];}
else if (opNum ==2){device_result[threadId]= device_a[threadId]-device_b[threadId];}
else if (opNum ==3){device_result[threadId]= device_a[threadId]*device_b[threadId];}
else { device_result[threadId]= device_a[threadId]%device_b[threadId];}
}
//****************************************************************************
void main_streams(int opNum, int dev_num1, int dev_num2)
{
cudaDeviceProp prop;
int *host_a, *host_b, *host_result;
int *device_a, *device_b, *device_result;
int whichDevice;
cudaGetDeviceCount( &whichDevice);
cudaGetDeviceProperties( &prop, whichDevice);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaStream_t stream;
cudaStreamCreate(&stream);
///////////Declare Arrays && allocate data/////////
//Allocate Host Memory
cudaHostAlloc((void **)&host_a, ARRAY_SIZE_IN_BYTES, cudaHostAllocDefault);
cudaHostAlloc((void **)&host_b, ARRAY_SIZE_IN_BYTES, cudaHostAllocDefault);
cudaHostAlloc((void **)&host_result, ARRAY_SIZE_IN_BYTES, cudaHostAllocDefault);
//Allocate Device Memory
cudaMalloc((void**)&device_a, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&device_b, ARRAY_SIZE_IN_BYTES);
cudaMalloc((void**)&device_result, ARRAY_SIZE_IN_BYTES);
///////////Fill host arrays with values/////////
generateData(host_a, 1);
generateData(host_b, 2);
cudaEventRecord(start);
///////////copy over memory /////////
cudaMemcpyAsync(device_a, host_a, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice, stream);
cudaMemcpyAsync(device_b, host_b, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice, stream);
///////////execute operation/////////
operation <<<ARRAY_SIZE, dev_num1, dev_num2, stream>>>(device_a, device_b, device_result, opNum);
cudaMemcpyAsync(host_result, device_result, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost, stream);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Time elapsed: %f\n", milliseconds);
///////////free memory /////////
cudaFree(device_a);
cudaFree(device_b);
cudaFree(device_result);
printf("\n/////////////////MULTIPLE STREAMS RESULTS//////////////////\n");
for( int k=0; k<ARRAY_SIZE; k++)
{
printf("\nINDEX: %i\tVALUE:%i\n",k, host_result[k]);
}
}
int main(int argc, char** argv){
int opNum=1;
int dev_num1 = 1;
int dev_num2 = 1;
if (argc >= 2) {
opNum = atoi(argv[1]);
}
if (argc >= 3) {
dev_num1= atoi(argv[2]);
}
if (argc >= 4) {
dev_num2 = atoi(argv[3]);
}
main_streams(opNum, dev_num1, dev_num2);
return 0;
}
|
846
|
#include <stdio.h>
#include <sys/time.h>
#define IMG_DIMENSION 32
#define N_IMG_PAIRS 10000
typedef unsigned char uchar;
#define OUT
#define CUDA_CHECK(f) do { \
cudaError_t e = f; \
if (e != cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n", __FILE__, __LINE__, cudaGetErrorString(e)); \
exit(1); \
} \
} while (0)
#define SQR(a) ((a) * (a))
double static inline get_time_msec(void) {
struct timeval t;
gettimeofday(&t, NULL);
return t.tv_sec * 1e+3 + t.tv_usec * 1e-3;
}
/* we won't load actual files. just fill the images with random bytes */
void load_image_pairs(uchar *images1, uchar *images2) {
srand(0);
for (int i = 0; i < N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION; i++) {
images1[i] = rand() % 256;
images2[i] = rand() % 256;
}
}
bool is_in_image_bounds(int i, int j) {
return (i >= 0) && (i < IMG_DIMENSION) && (j >= 0) && (j < IMG_DIMENSION);
}
uchar local_binary_pattern(uchar *image, int i, int j) {
uchar center = image[i * IMG_DIMENSION + j];
uchar pattern = 0;
if (is_in_image_bounds(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
void image_to_histogram(uchar *image, int *histogram) {
memset(histogram, 0, sizeof(int) * 256);
for (int i = 0; i < IMG_DIMENSION; i++) {
for (int j = 0; j < IMG_DIMENSION; j++) {
uchar pattern = local_binary_pattern(image, i, j);
histogram[pattern]++;
}
}
}
double histogram_distance(int *h1, int *h2) {
/* we'll use the chi-square distance */
double distance = 0;
for (int i = 0; i < 256; i++) {
if (h1[i] + h2[i] != 0) {
distance += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
}
return distance;
}
/* Your __device__ functions and __global__ kernels here */
__device__ bool is_in_image_bounds_d(int i,int j){
return ( i>=0) && (i<IMG_DIMENSION) && (j>=0) && (j<IMG_DIMENSION);
}
__device__ uchar d_local_binary_pattern(uchar* image){
int i= threadIdx.x;
int j= threadIdx.y;
uchar center= image[i*IMG_DIMENSION +j ];
uchar pattern =0;
if (is_in_image_bounds_d(i - 1, j - 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds_d(i - 1, j )) pattern |= (image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds_d(i - 1, j + 1)) pattern |= (image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds_d(i , j + 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds_d(i + 1, j + 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds_d(i + 1, j )) pattern |= (image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds_d(i + 1, j - 1)) pattern |= (image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds_d(i , j - 1)) pattern |= (image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
return pattern;
}
__global__ void image_to_histogram_simple(uchar *image1, OUT int *hist1) {
uchar pattern=d_local_binary_pattern(image1);
//we are adding to global memory, which is shared between all the threads in the
//system, which may have same pattern so we need AtomicAdd.
atomicAdd(&hist1[pattern],1);
}
//load img1 to AHARED MEM . compute the binary patters
//compute the histogram in shared memory
//load it back to gloabl (hist1)
__global__ void image_to_histogram_shared(uchar *image, OUT int *hist1){
__shared__ uchar shared_image[IMG_DIMENSION*IMG_DIMENSION];
__shared__ int shared_hist[256];
int i= threadIdx.x;
int j= threadIdx.y;
//load img to shared memory
int k=i*IMG_DIMENSION+j;
shared_image[k]= image[k];
if(k<256){
shared_hist[k]=0;
}
__syncthreads(); // all threads load "the image"
uchar center= shared_image[k];
uchar pattern =0;
if (is_in_image_bounds_d(i - 1, j - 1)) pattern |= (shared_image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds_d(i - 1, j )) pattern |= (shared_image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds_d(i - 1, j + 1)) pattern |= (shared_image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds_d(i , j + 1)) pattern |= (shared_image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds_d(i + 1, j + 1)) pattern |= (shared_image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds_d(i + 1, j )) pattern |= (shared_image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds_d(i + 1, j - 1)) pattern |= (shared_image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds_d(i , j - 1)) pattern |= (shared_image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
//may to ooptimize the atomicAdd.
atomicAdd((&shared_hist[pattern]),1);
//we need to wait for all the threads to update the "pattern"
__syncthreads();
//load from shared mem to global
if(k<256){
hist1[k]=shared_hist[k];
}
}
//10000 threadblocks
//32*32 threads
__global__ void image_to_histogram_batching(uchar* images,OUT int *hist1){
__shared__ uchar shared_image[IMG_DIMENSION*IMG_DIMENSION];
__shared__ int shared_hist[256];
int i= threadIdx.x;
int j= threadIdx.y;
int blockId = blockIdx.x;
//load img to shared memory
int k=i*IMG_DIMENSION+j;
shared_image[k]= images[k+blockId * IMG_DIMENSION * IMG_DIMENSION ];
if(k<256){
shared_hist[k]=0;
}
__syncthreads(); // all threads load "the image"
uchar center= shared_image[k];
uchar pattern =0;
if (is_in_image_bounds_d(i - 1, j - 1)) pattern |= (shared_image[(i - 1) * IMG_DIMENSION + (j - 1)] >= center) << 7;
if (is_in_image_bounds_d(i - 1, j )) pattern |= (shared_image[(i - 1) * IMG_DIMENSION + (j )] >= center) << 6;
if (is_in_image_bounds_d(i - 1, j + 1)) pattern |= (shared_image[(i - 1) * IMG_DIMENSION + (j + 1)] >= center) << 5;
if (is_in_image_bounds_d(i , j + 1)) pattern |= (shared_image[(i ) * IMG_DIMENSION + (j + 1)] >= center) << 4;
if (is_in_image_bounds_d(i + 1, j + 1)) pattern |= (shared_image[(i + 1) * IMG_DIMENSION + (j + 1)] >= center) << 3;
if (is_in_image_bounds_d(i + 1, j )) pattern |= (shared_image[(i + 1) * IMG_DIMENSION + (j )] >= center) << 2;
if (is_in_image_bounds_d(i + 1, j - 1)) pattern |= (shared_image[(i + 1) * IMG_DIMENSION + (j - 1)] >= center) << 1;
if (is_in_image_bounds_d(i , j - 1)) pattern |= (shared_image[(i ) * IMG_DIMENSION + (j - 1)] >= center) << 0;
//may to ooptimize the atomicAdd.
atomicAdd((&shared_hist[pattern]),1);
//we need to wait for all the threads to update the "pattern"
__syncthreads();
//load from shared mem to global
if(k<256){
hist1[k + blockId * 256 ]=shared_hist[k];
}
}
__global__ void histogram_distance_simple(int *h1, int *h2, OUT double *distance) {
int i=threadIdx.x;
distance[i]=0;
if(h1[i] + h2[i] != 0){
distance[i] += ((double)SQR(h1[i] - h2[i])) / (h1[i] + h2[i]);
}
__syncthreads();
int half_length=256/2;
while(half_length>0){
if(i<half_length){
distance[i]=distance[i]+distance[i+half_length];
}
__syncthreads(); //note not inside if, otherwise you will have a deadlock
half_length /= 2;
}
}
//10000 threadblock
//256 threads.
__global__ void histogram_distance_batching(int* h1,int* h2,OUT double *distance) {
__shared__ double shared_distance[256];
int i=threadIdx.x;
int blockId=blockIdx.x;
int k=i+blockId * 256;
shared_distance[i]=0;
if(h1[k] + h2[k] != 0){
shared_distance[i] += ((double)SQR(h1[k] - h2[k])) / (h1[k] + h2[k]);
}
__syncthreads();
int half_length=256/2;
while(half_length>0){
if(i<half_length){
shared_distance[i]=shared_distance[i]+shared_distance[i+half_length];
}
__syncthreads(); //note not inside if, otherwise you will have a deadlock
half_length /= 2;
}
__syncthreads();
if(i==0){
distance[blockId]=shared_distance[i];
}
}
//1thb
//10000 threads.
__global__ void distances_to_distance_batching(double* distance){
int i=threadIdx.x;
int half_length=N_IMG_PAIRS/2;
while(half_length>0){
if(i<half_length){
distance[i]=distance[i]+distance[i+half_length];
}
__syncthreads(); //note not inside if, otherwise you will have a deadlock
half_length /= 2;
}
__syncthreads();
}
int main() {
uchar *images1; /* we concatenate all images in one huge array */
uchar *images2;
CUDA_CHECK( cudaHostAlloc(&images1, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
CUDA_CHECK( cudaHostAlloc(&images2, N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION, 0) );
load_image_pairs(images1, images2);
double t_start, t_finish;
double total_distance;
/* using CPU */
printf("\n=== CPU ===\n");
int histogram1[256];
int histogram2[256];
t_start = get_time_msec();
for (int i = 0; i < N_IMG_PAIRS; i++) {
image_to_histogram(&images1[i * IMG_DIMENSION * IMG_DIMENSION], histogram1);
image_to_histogram(&images2[i * IMG_DIMENSION * IMG_DIMENSION], histogram2);
total_distance += histogram_distance(histogram1, histogram2);
}
t_finish = get_time_msec();
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
/*----------------------------------------------------------------------------*/
/* using GPU task-serial */
/*----------------------------------------------------------------------------*/
total_distance=0;
printf("\n=== GPU Task Serial ===\n");
do { /* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise */
/* Your Code Here */
uchar *gpu_image1, *gpu_image2;
int *gpu_hist1, *gpu_hist2;
double *gpu_hist_distance;
double cpu_hist_distance;
//cudaMalloc to allocate in global memory, all the threads see it.
CUDA_CHECK(cudaMalloc((void**)&gpu_image1,sizeof(uchar) * IMG_DIMENSION * IMG_DIMENSION));
CUDA_CHECK(cudaMalloc((void**)&gpu_image2,sizeof(uchar) * IMG_DIMENSION * IMG_DIMENSION));
CUDA_CHECK(cudaMalloc((void**)&gpu_hist1,sizeof(int)*256));
CUDA_CHECK(cudaMalloc((void**)&gpu_hist2,sizeof(int)*256));
CUDA_CHECK(cudaMalloc((void**)&gpu_hist_distance,sizeof(double)*256));
//threads in one threadblock, we use dim3 to use x and y block id
dim3 threadsIntb(IMG_DIMENSION,IMG_DIMENSION);
t_start = get_time_msec();
for (int i = 0; i < N_IMG_PAIRS; i++) {
//cudaMemcpy to copy from host to global memory
CUDA_CHECK(cudaMemcpy(gpu_image1,&(images1[i*IMG_DIMENSION*IMG_DIMENSION]),IMG_DIMENSION*IMG_DIMENSION,cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(gpu_image2,&(images2[i*IMG_DIMENSION*IMG_DIMENSION]),IMG_DIMENSION*IMG_DIMENSION,cudaMemcpyHostToDevice));
//set to zeor0
CUDA_CHECK(cudaMemset(gpu_hist1,0,sizeof(int)*256));
CUDA_CHECK(cudaMemset(gpu_hist2,0,sizeof(int)*256));
CUDA_CHECK(cudaMemset(gpu_hist_distance,0,sizeof(double)*256));
//call kernels threadsIntb=1024
image_to_histogram_simple<<<1, threadsIntb>>>(gpu_image1, gpu_hist1);
image_to_histogram_simple<<<1, threadsIntb>>>(gpu_image2, gpu_hist2);
histogram_distance_simple<<<1, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance);
//cudaMemcpy to copy from gpu to host
CUDA_CHECK(cudaMemcpy(&cpu_hist_distance,gpu_hist_distance,sizeof(double),cudaMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
}
CUDA_CHECK(cudaDeviceSynchronize());
t_finish = get_time_msec();
//free the global memory
CUDA_CHECK(cudaFree(gpu_image1));
CUDA_CHECK(cudaFree(gpu_image2));
CUDA_CHECK(cudaFree(gpu_hist_distance));
CUDA_CHECK(cudaFree(gpu_hist1));
CUDA_CHECK(cudaFree(gpu_hist2));
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
} while (0);
/*----------------------------------------------------------------------------*/
/* using GPU task-serial + images and histograms in shared memory */
/*----------------------------------------------------------------------------*/
total_distance=0;
printf("\n=== GPU Task Serial with shared memory ===\n");
do { /* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise */
/* Your Code Here */
uchar *gpu_image1, *gpu_image2;
int *gpu_hist1, *gpu_hist2;
double *gpu_hist_distance;
double cpu_hist_distance;
//cudaMalloc to allocate in global memory, all the threads see it.
CUDA_CHECK(cudaMalloc((void**)&gpu_image1,sizeof(uchar) * IMG_DIMENSION * IMG_DIMENSION));
CUDA_CHECK(cudaMalloc((void**)&gpu_image2,sizeof(uchar) * IMG_DIMENSION * IMG_DIMENSION));
CUDA_CHECK(cudaMalloc((void**)&gpu_hist1,sizeof(int)*256));
CUDA_CHECK(cudaMalloc((void**)&gpu_hist2,sizeof(int)*256));
CUDA_CHECK(cudaMalloc((void**)&gpu_hist_distance,sizeof(double)*256));
//threads in one threadblock, we use dim3 to use x and y block id
dim3 threadsIntb(IMG_DIMENSION,IMG_DIMENSION);
t_start = get_time_msec();
for (int i = 0; i < N_IMG_PAIRS; i++) {
CUDA_CHECK(cudaMemcpy(gpu_image1,&(images1[i*IMG_DIMENSION*IMG_DIMENSION]),IMG_DIMENSION*IMG_DIMENSION,cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(gpu_image2,&(images2[i*IMG_DIMENSION*IMG_DIMENSION]),IMG_DIMENSION*IMG_DIMENSION,cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemset(gpu_hist1,0,sizeof(int)*256));
CUDA_CHECK(cudaMemset(gpu_hist2,0,sizeof(int)*256));
CUDA_CHECK(cudaMemset(gpu_hist_distance,0,sizeof(double)*256));
//shared memory in kernel image_to_histogram_shared
image_to_histogram_shared<<<1, threadsIntb>>>(gpu_image1, gpu_hist1);
image_to_histogram_shared<<<1, threadsIntb>>>(gpu_image2, gpu_hist2);
histogram_distance_simple<<<1, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance);
CUDA_CHECK(cudaMemcpy(&cpu_hist_distance,gpu_hist_distance,sizeof(double),cudaMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
}
CUDA_CHECK(cudaDeviceSynchronize());
t_finish = get_time_msec();
CUDA_CHECK(cudaFree(gpu_image1));
CUDA_CHECK(cudaFree(gpu_image2));
CUDA_CHECK(cudaFree(gpu_hist_distance));
CUDA_CHECK(cudaFree(gpu_hist1));
CUDA_CHECK(cudaFree(gpu_hist2));
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
}while(0);
/*----------------------------------------------------------------------------*/
/* using GPU + batching */
/*----------------------------------------------------------------------------*/
total_distance=0;
printf("\n=== GPU Batching ===\n");
do { /* do {} while (0): to keep variables inside this block in their own scope. remove if you prefer otherwise */
/* Your Code Here */
uchar *gpu_images1, *gpu_images2;
int *gpu_hist1, *gpu_hist2;
double *gpu_hist_distance;
double cpu_hist_distance;
//cudaMalloc to allocate in global memory, all the threads see it.
//we allocate all the N_IMG_PAIRS in the global memory
CUDA_CHECK(cudaMalloc((void**)&gpu_images1,sizeof(uchar) * N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION));
CUDA_CHECK(cudaMalloc((void**)&gpu_images2,sizeof(uchar) * N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION));
//copying from host all the N_IMG_PAIRS images
CUDA_CHECK(cudaMemcpy(gpu_images1,images1,N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION,cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(gpu_images2,images2,N_IMG_PAIRS * IMG_DIMENSION * IMG_DIMENSION,cudaMemcpyHostToDevice));
//hisogram memory
CUDA_CHECK(cudaMalloc((void**)&gpu_hist1,sizeof(int)*256 * N_IMG_PAIRS ));
CUDA_CHECK(cudaMalloc((void**)&gpu_hist2,sizeof(int)*256 * N_IMG_PAIRS ));
CUDA_CHECK(cudaMemset(gpu_hist1,0,sizeof(int)*256 * N_IMG_PAIRS));
CUDA_CHECK(cudaMemset(gpu_hist2,0,sizeof(int)*256 * N_IMG_PAIRS));
CUDA_CHECK(cudaMalloc((void**)&gpu_hist_distance,sizeof(double)*N_IMG_PAIRS));
CUDA_CHECK(cudaMemset(gpu_hist_distance,0,sizeof(double)*N_IMG_PAIRS));
dim3 threadsIntb(IMG_DIMENSION,IMG_DIMENSION);
t_start = get_time_msec();
//threadblock = N_IMG_PAIRS , thread in the tb= 32*32
image_to_histogram_batching<<<N_IMG_PAIRS,threadsIntb>>>(gpu_images1,gpu_hist1);
image_to_histogram_batching<<<N_IMG_PAIRS,threadsIntb>>>(gpu_images2,gpu_hist2);
histogram_distance_batching<<<N_IMG_PAIRS, 256>>>(gpu_hist1, gpu_hist2, gpu_hist_distance);
//optimize it -> run in kernel
for(int i=0;i<N_IMG_PAIRS;i++){
CUDA_CHECK(cudaMemcpy(&cpu_hist_distance,&(gpu_hist_distance[i]),sizeof(double),cudaMemcpyDeviceToHost));
total_distance += cpu_hist_distance;
}
// distances_to_distance_batching<<<1,N_IMG_PAIRS>>>(gpu_hist_distance);
// CUDA_CHECK(cudaMemcpy(&cpu_hist_distance,gpu_hist_distance,sizeof(double),cudaMemcpyDeviceToHost));
// total_distance += cpu_hist_distance;
CUDA_CHECK(cudaDeviceSynchronize());
t_finish = get_time_msec();
CUDA_CHECK(cudaFree(gpu_images1));
CUDA_CHECK(cudaFree(gpu_images2));
CUDA_CHECK(cudaFree(gpu_hist_distance));
CUDA_CHECK(cudaFree(gpu_hist1));
CUDA_CHECK(cudaFree(gpu_hist2));
printf("average distance between images %f\n", total_distance / N_IMG_PAIRS);
printf("total time %f [msec]\n", t_finish - t_start);
}while(0);
return 0;
}
|
847
|
#include "includes.h"
__global__ void kernel(int* d_vec, int n) {
int tid = threadIdx.x;
if(threadIdx.x < n) {
int i = d_vec[tid];
d_vec[tid] = i > 5 ? -i : i;
}
}
|
848
|
#include "includes.h"
__device__ unsigned int getGid3d3d(){
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.y * blockDim.x)
+ (threadIdx.z * (blockDim.x * blockDim.y)) + threadIdx.x;
return threadId;
}
__global__ void kstd_wfc(double *x, double *y, double *z, double *items, double winding, double *phi, double2 *wfc){
int gid = getGid3d3d();
int xid = blockDim.x*blockIdx.x + threadIdx.x;
int yid = blockDim.y*blockIdx.y + threadIdx.y;
int zid = blockDim.z*blockIdx.z + threadIdx.z;
phi[gid] = -fmod(winding*atan2(y[yid], x[xid]),2*PI);
wfc[gid].x = exp(-(x[xid]*x[xid]/(items[14]*items[14]*items[15]*items[15])
+ y[yid]*y[yid]/(items[14]*items[14]*items[16]*items[16])
+ z[zid]*z[zid]/(items[14]*items[14]*items[17]*items[17])))
* cos(phi[gid]);
wfc[gid].y = -exp(-(x[xid]*x[xid]/(items[14]*items[14]*items[15]*items[15])
+ y[yid]*y[yid]/(items[14]*items[14]*items[16]*items[16])
+ z[zid]*z[zid]/(items[14]*items[14]*items[17]*items[17])))
* sin(phi[gid]);
}
|
849
|
/* File: vec_add.cu
* Purpose: Implement vector addition on a gpu using cuda
*
* Compile: nvcc [-g] [-G] -o vec_add vec_add.cu
* Run: ./vec_add
*/
#include <stdio.h>
#include <time.h>
#include <unistd.h>
#include <stdlib.h>
#include <math.h>
__global__ void Vec_add(float x[], float y[], float z[], int n, int blks) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
while(thread_id < n){
z[thread_id] = x[thread_id] + y[thread_id];
thread_id+=blks*blockDim.x;
}
}
int main(int argc, char* argv[]) {
int n, th_p_blk;
float *h_x, *h_y, *h_z, *h_z_res;
float *d_x, *d_y, *d_z;
size_t size;
th_p_blk = 1024;
n = 1024;
if(argc > 1)
n = atoi(argv[1]);
if(argc > 2)
th_p_blk = atoi(argv[2]);
int blks = ceil((float)n/(float)th_p_blk);
if(argc > 3)
blks = atoi(argv[3]);
/* Define vector length */
size = n*sizeof(float);
// Allocate memory for the vectors on host memory.
h_x = (float*) malloc(size);
h_y = (float*) malloc(size);
h_z = (float*) malloc(size);
h_z_res = (float*) malloc(size);
for (int i = 0; i < n; i++) {
h_x[i] = rand();
h_y[i] = rand();
h_z_res[i] = h_x[i]+h_y[i];
}
/* Allocate vectors in device memory */
cudaMalloc(&d_x, size);
cudaMalloc(&d_y, size);
cudaMalloc(&d_z, size);
/* Copy vectors from host memory to device memory */
cudaMemcpy(d_x, h_x, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, size, cudaMemcpyHostToDevice);
clock_t Ticks[2];
Ticks[0] = clock();
/* Kernel Call */
Vec_add<<<blks,th_p_blk>>>(d_x, d_y, d_z, n,blks);
Ticks[1] = clock();
double Tempo = (Ticks[1] - Ticks[0]) * 1000.0 / CLOCKS_PER_SEC;
printf("\n\n Tempo gasto: %g ms para:\n %d elementos \n %d blocks \n %d th_p_blk \n\n", Tempo,n,blks,th_p_blk);
cudaThreadSynchronize();
cudaMemcpy(h_z, d_z, size, cudaMemcpyDeviceToHost);
bool certo=true;
for (int i = 0; i < n; i++){
if(h_z_res[i] != h_z[i])
certo=false;
}
printf("\n*****\n certo = %s\n*****\n", certo ? "true" : "false");
/* Free device memory */
cudaFree(d_x);
cudaFree(d_y);
cudaFree(d_z);
/* Free host memory */
free(h_x);
free(h_y);
free(h_z);
free(h_z_res);
return 0;
} /* main */
|
850
|
#include "includes.h"
__global__ void LRMomentaKernel (double *RadMomP, double *RadMomM, double *ThetaMomP, double *ThetaMomM, double *Dens, double *Vrad, double *Vtheta, int nrad, int nsec, double *Rmed, double OmegaFrame)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = threadIdx.y + blockDim.y*blockIdx.y;
if (i<nrad && j<nsec){
RadMomP[i*nsec + j] = Dens[i*nsec + j] * Vrad[(i+1)*nsec + j]; // (i+1)*nsec
RadMomM[i*nsec + j] = Dens[i*nsec + j] * Vrad[i*nsec + j];
/* it is the angular momentum -> ThetaMomP */
ThetaMomP[i*nsec + j] = Dens[i*nsec + j] * (Vtheta[i*nsec + (j+1)%nsec]+Rmed[i]*OmegaFrame)*Rmed[i];
ThetaMomM[i*nsec + j] = Dens[i*nsec + j] * (Vtheta[i*nsec + j]+Rmed[i]*OmegaFrame)*Rmed[i];
}
}
|
851
|
#include "includes.h"
__global__ void pictureKernel(float* d_pix,int X, int Y) {
int thread_x=blockDim.x*blockIdx.x+threadIdx.x;
int thread_y=blockDim.y*blockIdx.y+threadIdx.y;
// printf("thread_x=%d,blockDim.x=%d,blockIdx.x=%d,threadIdx=%d\n",thread_x,blockDim.x,blockIdx.x,threadIdx.x);
// printf("thread_y=%d,blockDim.y=%d,blockIdx.y=%d,threadIdy=%d\n",thread_y,blockDim.y,blockIdx.y,threadIdx.y);
// use this printf nvcc -arch compute_20 pixel.cu
if(thread_x<X&&thread_y<Y) {
d_pix[thread_y*X+thread_x]*=2;
}
}
|
852
|
#include <stdio.h>
#include <cuda.h>
#define N 4096 // Define size of array
#define THREADS_PER_BLOCK 1024
__global__ void vectorAddKernel(int *a, int *b, int *c) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
c[i] = a[i] + b[i];
}
int main() {
// Initialise grid and block variables
dim3 grid(N / THREADS_PER_BLOCK, 1, 1);
dim3 block(THREADS_PER_BLOCK, 1, 1);
// Initialise host arrays and device pointers
int a_h[N];
int b_h[N];
int c_h[N];
int *a_d;
int *b_d;
int *c_d;
// Load host arrays with data
for (int i = 0; i < N; i++) {
a_h[i] = i;
b_h[i] = i;
}
// Allocate device memory
cudaMalloc((void**)&a_d, N*sizeof(int));
cudaMalloc((void**)&b_d, N*sizeof(int));
cudaMalloc((void**)&c_d, N*sizeof(int));
// Copy host memory to device memory
cudaMemcpy(a_d, a_h, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, N*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(c_d, c_h, N*sizeof(int), cudaMemcpyHostToDevice);
// Create timer
cudaEvent_t start;
cudaEvent_t stop;
float elapsedTime;
// Start timer
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// Launch kernel
vectorAddKernel<<<grid, block>>>(a_d, b_d, c_d);
// Stop timer
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
// Copy results to device and print
cudaMemcpy(c_h, c_d, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i++) {
printf("%i+%i = %i\n", a_h[i], b_h[i], c_h[i]);
}
// Print execution time
printf("Time to calculate results: %f ms\n", elapsedTime);
// Free memory
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
cudaEventDestroy(start);
cudaEventDestroy(stop);
return 0;
}
|
853
|
#include <iostream>
#include <cstdlib>
#include <sys/time.h>
#define N 32 // Only powers of 2 to simplify the code
#define BLOCK_SIZE 8
#define NUM_BLOCKS N
#define NUM_THREADS_PER_BLOCK N
#define NUM_BLOCKS_TILED (N*N)/(BLOCK_SIZE*BLOCK_SIZE)
#define NUM_THREADS_PER_BLOCK_TILED BLOCK_SIZE*BLOCK_SIZE
#define TIME_RESOLUTION 1000000
using namespace std;
long long unsigned initial_time;
struct timeval t;
void printResults (long long unsigned tt) {
cout << tt << endl;
}
void start (void) {
gettimeofday(&t, NULL);
initial_time = t.tv_sec * TIME_RESOLUTION + t.tv_usec;
}
long long unsigned stop (void) {
gettimeofday(&t, NULL);
long long unsigned final_time = t.tv_sec * TIME_RESOLUTION + t.tv_usec;
return final_time - initial_time;
}
__global__ void matrixKernel (float *dev_m1, float *dev_m2, float *dev_result) {
*(dev_result+blockIdx.x*N+threadIdx.x)=0;
for(unsigned i=0; i < N; i++)
*(dev_result+blockIdx.x*N+threadIdx.x) += *(dev_m1+blockIdx.x*N+i) * *(dev_m2+i*N+threadIdx.x);
}
void gpuMatrixMult (float *m1, float *m2, float *result) {
float *dev_m1, *dev_m2, *dev_result;
long long unsigned stopHostToGPU, beginGPUtoHost, allTime;
cudaMalloc((void**) &dev_m1,N * N * sizeof(float));
cudaMalloc((void**) &dev_m2,N * N * sizeof(float));
cudaMalloc((void**) &dev_result, N * N * sizeof(float));
//startTime
start();
cudaMemcpy(dev_m1, m1, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_m2, m2, N * N * sizeof(float), cudaMemcpyHostToDevice);
stopHostToGPU = stop();
matrixKernel <<< NUM_THREADS_PER_BLOCK, NUM_BLOCKS >>>(dev_m1, dev_m2, dev_result);
beginGPUtoHost = stop();
// copy the output to the host
cudaMemcpy(result, dev_result, N * N * sizeof(float), cudaMemcpyDeviceToHost);
allTime = stop();
//stopTime
printResults(allTime);
//transfer time
printResults(allTime-beginGPUtoHost+stopHostToGPU);
// free the device memory
cudaFree(dev_m1);
cudaFree(dev_m2);
cudaFree(dev_result);
}
/*
__global__ void tiledMatrixKernel (float *dev_m1, float *dev_m2, float *dev_result) {
__shared__ float temp1 [BLOCK_SIZE][BLOCK_SIZE];
__shared__ float temp2 [BLOCK_SIZE][BLOCK_SIZE];
int xIn = threadIdx.x/BLOCK_SIZE;
int yIn = threadIdx.x%BLOCK_SIZE;
int xB = (((blockIdx.x*BLOCK_SIZE) / N) * BLOCK_SIZE) + xIn;
int yB = (((blockIdx.x*BLOCK_SIZE) % N) * BLOCK_SIZE) + yIn;
temp1[xIn][yIn]=*(dev_m1+xB*N+yB);
temp2[xIn][yIn]=*(dev_m2+xB*N+yB);
__syncthreads();
for(unsigned i=0; i < BLOCK_SIZE; i++)
*(dev_result+xB*N+yB) += temp1[xIn][i]*temp2[i][yIn];
}
void gpuTiledMatrixMult (float *m1, float *m2, float *result) {
float *dev_m1, *dev_m2, *dev_result;
cudaMalloc((void**) &dev_m1,N * N * sizeof(float));
cudaMalloc((void**) &dev_m2,N * N * sizeof(float));
cudaMalloc((void**) &dev_result, N * N * sizeof(float));
start();
//startKernelTime();
cudaMemcpy(dev_m1, m1, N * N * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_m2, m2, N * N * sizeof(float), cudaMemcpyHostToDevice);
tiledMatrixKernel <<< NUM_THREADS_PER_BLOCK_TILED, NUM_BLOCKS_TILED >>>(dev_m1, dev_m2, dev_result);
// copy the output to the host
cudaMemcpy(result, dev_result, N * N * sizeof(float), cudaMemcpyDeviceToHost);
//stopKernelTime();
printResults(stop());
// free the device memory
cudaFree(dev_m1);
cudaFree(dev_m2);
cudaFree(dev_result);
}
*/
int main (int argc, char *argv[]) {
unsigned seed=0;
float *a = (float*)malloc(sizeof(float)*N*N);
float *b = (float*)malloc(sizeof(float)*N*N);
float *c = (float*)malloc(sizeof(float)*N*N);
srand(seed);
//build matrix A with random values and C initilized with 0's
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++){
*(a+i*N+j) = rand();
*(c+i*N+j) = 0;
}
}
//build matrix B with all elements equals to 1
for(int i = 0; i < N; i++){
for(int j = 0; j < N; j++)
*(b+i*N+j) = 1;
}
gpuMatrixMult(a,b,c);
//gpuTiledMatrixMult(a,b,c);
return 0;
}
|
854
|
#define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vy(z,x) d_vy[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)] // Pressure
#define d_mem_dszz_dz(z,x) d_mem_dszz_dz[(x)*(nz)+(z)]
#define d_mem_dsxx_dx(z,x) d_mem_dsxx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_ave_Byc_a(z,x) d_ave_Byc_a[(x)*(nz)+(z)]
#define d_ave_Byc_b(z,x) d_ave_Byc_b[(x)*(nz)+(z)]
#include<stdio.h>
__global__ void ac_velocity(float *d_vz, float *d_vx, float *d_szz, \
float *d_mem_dszz_dz, float *d_mem_dsxx_dx, float *d_Lambda, \
float *d_Den, float *d_ave_Byc_a, float *d_ave_Byc_b, float *d_K_z, \
float *d_a_z, float *d_b_z, \
float *d_K_x_half, float *d_a_x_half, float *d_b_x_half, \
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, bool isFor){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
float dszz_dz = 0.0;
float dsxx_dx = 0.0;
float c1 = 9.0/8.0;
float c2 = 1.0/24.0;
if (isFor) {
if(gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) {
// if(gidz>=2 && gidz<=nz-nPad-2) {
// update vz
dszz_dz = (c1*(d_szz(gidz,gidx)-d_szz(gidz-1,gidx)) - c2*(d_szz(gidz+1,gidx)-d_szz(gidz-2,gidx)))/dz;
if(gidz<=nPml || (gidz>=nz-nPml-nPad-1)){
d_mem_dszz_dz(gidz,gidx) = d_b_z[gidz]*d_mem_dszz_dz(gidz,gidx) + d_a_z[gidz]*dszz_dz;
}
d_vz(gidz,gidx) += (dszz_dz/d_K_z[gidz] + d_mem_dszz_dz(gidz,gidx)) * d_ave_Byc_a(gidz, gidx) * dt;
// }
// if(gidx>=1 && gidx<=nx-3) {
// update vx
dsxx_dx = (c1*(d_szz(gidz,gidx+1)-d_szz(gidz,gidx)) - c2*(d_szz(gidz,gidx+2)-d_szz(gidz,gidx-1)))/dx;
if(gidx<=nPml || gidx>=nx-nPml-1){
d_mem_dsxx_dx(gidz,gidx) = d_b_x_half[gidx]*d_mem_dsxx_dx(gidz,gidx) + d_a_x_half[gidx]*dsxx_dx;
}
d_vx(gidz,gidx) += (dsxx_dx/d_K_x_half[gidx] + d_mem_dsxx_dx(gidz,gidx)) * d_ave_Byc_b(gidz, gidx) * dt;
// }
}
else {
return;
}
}
else {
if(gidz>=nPml+2 && gidz<=nz-nPad-3-nPml && gidx>=nPml+2 && gidx<=nx-3-nPml) {
// update vx
dsxx_dx = (c1*(d_szz(gidz,gidx+1)-d_szz(gidz,gidx)) - c2*(d_szz(gidz,gidx+2)-d_szz(gidz,gidx-1)))/dx;
d_vx(gidz,gidx) -= dsxx_dx * d_ave_Byc_b(gidz, gidx) * dt;
// update vz
dszz_dz = (c1*(d_szz(gidz,gidx)-d_szz(gidz-1,gidx)) - c2*(d_szz(gidz+1,gidx)-d_szz(gidz-2,gidx)))/dz;
d_vz(gidz,gidx) -= dszz_dz * d_ave_Byc_a(gidz, gidx) * dt;
}
else {
return;
}
}
}
|
855
|
#include "includes.h"
__global__ void SimpleKernel(int N, float* a){
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < N) {
for (int x=0;x<1000;x++)
a[idx] = asin(a[idx]+x);
}
}
|
856
|
/*
Transformer function helper function.
Written by tomztyang,
2021/08/23
*/
#include <math.h>
#include <stdio.h>
#define THREADS_PER_BLOCK 256
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
// #define DEBUG
template <unsigned int d>
__global__ void rpe_q_forward_v2(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output) {
// dim3 blocks(total_query_num, nhead); dim3 threads(local_size);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
int query_idx = blockIdx.x;
int head_idx = blockIdx.y;
int local_key_idx = threadIdx.x;
if (query_idx >= total_query_num ||
head_idx >= nhead ||
local_key_idx >= local_size) return;
// get query features for attention computation.
__shared__ float shared_query_features[d];
for(int i = local_key_idx; i < hdim; i += blockDim.x){
shared_query_features[i] = query_features[
query_idx * nhead * hdim + head_idx * hdim + i];
}
__syncthreads();
// 1. obtain quantize relative position.
relpos += query_idx * local_size + local_key_idx;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim;
output += query_idx * local_size * nhead + local_key_idx * nhead + head_idx;
float attn_weight = 0;
for (int i = 0; i < hdim; i++){
attn_weight += shared_query_features[i] * lookup_table[i];
}
output[0] = attn_weight;
}
void rpe_q_launcher_v2(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *output){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params output: [total_query_num, local_size, nhead]
if (hdim > 100){
throw "hdim should be <= 100.";
}
dim3 blocks(total_query_num, nhead);
dim3 threads(local_size);
switch (hdim){
case 4:
rpe_q_forward_v2<4><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
break;
case 8:
rpe_q_forward_v2<8><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
break;
case 16:
rpe_q_forward_v2<16><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
break;
case 32:
rpe_q_forward_v2<32><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
break;
default:
rpe_q_forward_v2<100><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt,
relpos, lookup_table, query_features,
output);
break;
}
}
template <unsigned int d>
__global__ void rpe_q_backward_v2(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float * grad_lookup_table, float * grad_query_features) {
// dim3 blocks(total_query_num, nhead); dim3 blocks(local_size);
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
int query_idx = blockIdx.x;
int head_idx = blockIdx.y;
int local_key_idx = threadIdx.x;
// out-range judgement.
if (query_idx >= total_query_num ||
head_idx >= nhead ||
local_key_idx >= local_size) return;
// get shared query features and shared grad query features.
__shared__ float shared_query_features[d], shared_grad_query_features[d];
for (int i = local_key_idx; i < hdim; i += blockDim.x){
shared_query_features[i] = query_features[
query_idx * nhead * hdim + head_idx * hdim + i];
shared_grad_query_features[i] = 0;
}
__syncthreads();
// 2. Obtain quantize relative position.
relpos += query_idx * local_size + local_key_idx;
int quantize_relpos = min(max(int(floor(relpos[0])), 0), l - 1);
lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim;
grad_lookup_table += quantize_relpos * nhead * hdim + head_idx * hdim;
grad_query_features += query_idx * nhead * hdim + head_idx * hdim;
float gradient = grad_out[query_idx * local_size * nhead + local_key_idx * nhead + head_idx];
for (int i = 0; i < hdim; i++){
atomicAdd(
grad_lookup_table + i,
gradient * shared_query_features[i]);
atomicAdd(
shared_grad_query_features + i,
gradient * lookup_table[i]);
}
__syncthreads();
for (int i = local_key_idx; i < hdim; i += blockDim.x){
grad_query_features[i] = shared_grad_query_features[i];
}
}
void rpe_q_grad_launcher_v2(
int b, int total_query_num, int local_size, int nhead, int hdim, int l,
const int *query_batch_cnt,
const float *relpos, const float* lookup_table, const float* query_features,
float *grad_out, float* grad_lookup_table, float* grad_query_features){
// params query_batch_cnt: [b]
// params relpos: [total_query_num, local_size]
// params lookup_table: [l, nhead, hdim]
// params query_features: [total_query_num, nhead, hdim]
// params grad_out: [total_query_num, local_size, nhead]
// params grad_lookup_table: [l, nhead, hdim]
// params grad_query_features: [total_query_num, nhead, hdim]
if (hdim > 100){
throw "hdim should be <= 100.";
}
dim3 blocks(total_query_num, nhead);
dim3 threads(local_size);
switch (hdim){
case 4:
rpe_q_backward_v2<4><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
break;
case 8:
rpe_q_backward_v2<8><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
break;
case 16:
rpe_q_backward_v2<16><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
break;
case 32:
rpe_q_backward_v2<32><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
break;
default:
rpe_q_backward_v2<100><<<blocks, threads>>>(
b, total_query_num, local_size, nhead, hdim, l,
query_batch_cnt, relpos, lookup_table, query_features,
grad_out, grad_lookup_table, grad_query_features);
break;
}
}
|
857
|
#include <cuda.h>
#include <stdio.h>
void cudasafe(int error, char message[50], char file[100], int line) {
if (error != cudaSuccess) {
fprintf(stderr, "CUDA Error: %s : %i. In %s line %d\n", message, error, file, line);
exit(-1);
}
}
int main(int argc, char ** argv) {
int deviceCount;
cudasafe(cudaGetDeviceCount(&deviceCount), "GetDeviceCount", __FILE__, __LINE__);
printf("Number of CUDA devices %d.\n", deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudasafe(cudaGetDeviceProperties(&deviceProp, dev), "Get Device Properties", __FILE__, __LINE__);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
printf("No CUDA GPU has been detected\n");
return -1;
} else if (deviceCount == 1) {
printf("There is 1 device supporting CUDA\n");
} else {
printf("There are %d devices supporting CUDA\n", deviceCount);
}
}
printf("For device #%d\n", dev);
printf("Device name: %s\n", deviceProp.name);
printf("Major revision number: %d\n", deviceProp.major);
printf("Minor revision Number: %d\n", deviceProp.minor);
printf("Total Global Memory: %u\n", deviceProp.totalGlobalMem);
printf("Total shared mem per block: %d\n", deviceProp.sharedMemPerBlock);
printf("Total const mem size: %d\n", deviceProp.totalConstMem);
printf("Warp size: %d\n", deviceProp.warpSize);
printf("Maximum block dimensions: %d x %d x %d\n", deviceProp.maxThreadsDim[0], \
deviceProp.maxThreadsDim[1], \
deviceProp.maxThreadsDim[2]);
printf("Maximum grid dimensions: %d x %d x %d\n",
deviceProp.maxGridSize[0], deviceProp.maxGridSize[1], deviceProp.maxGridSize[2]);
printf("Clock Rate: %d\n", deviceProp.clockRate);
printf("Number of muliprocessors: %d\n", deviceProp.multiProcessorCount);
}
return 0;
}
|
858
|
#include "includes.h"
__global__ void sumArraysZeroCopyWithUVA(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
|
859
|
#include <stdio.h>
#include <cuda.h>
extern "C" {
void mandelbrot_row_calc(int *nStep_ptr, float *deltaStep_ptr, float *start_x_ptr, float *start_y_ptr, int *mandelSize_ptr, int *innerLoopSize_ptr, int *outerLoopSize_ptr, unsigned int *results);
void mini_mandelbrot_calc(float *start_x_ptr, float *start_y_ptr, int *mandelSize_ptr, int *innerLoopSize_ptr, int *outerLoopSize_ptr, unsigned int *results);
void PrintLastCUDAError();
}
__global__ void mini_mandel_kernel(unsigned int *dev_Results, float start_x, float start_y, int innerloopsize, int outerloopsize) {
int bx = blockIdx.x;
int by = blockIdx.y;
int mandsize = gridDim.x;
int i,j;
unsigned int threadscore = 0;
float xpos = (float)-1.0 + (float)4.0*float(bx - mandsize/2 )/(float)mandsize;
float ypos = (float)0.0 + (float)4.0*float(by - mandsize/2 )/(float)mandsize;
float a = start_x;
float a_tmp;
float b = start_y;
for(i=0;i<outerloopsize;i++){
for(j=0;j<innerloopsize;j++){
a_tmp = a;
a = a*a - b*b + xpos;
b = (float)2*a_tmp*b + ypos;
}
if ((a*a + b*b) <= (float)4.0){
threadscore += i;
} else {
break;
}
}
dev_Results[by*mandsize + bx] = threadscore;
return;
}
void mini_mandelbrot_calc(float *start_x_ptr, float *start_y_ptr, int *mandelSize_ptr, int *innerLoopSize_ptr, int *outerLoopSize_ptr, unsigned int *results) {
float start_x = *start_x_ptr;
float start_y = *start_y_ptr;
int mandelSize = *mandelSize_ptr;
int innerloopsize = *innerLoopSize_ptr;
int outerloopsize = *outerLoopSize_ptr;
unsigned int *dev_Results;
cudaMalloc( (void**)&dev_Results, mandelSize * mandelSize * sizeof(unsigned int) );
// define the launch dimensions
dim3 threads(1);
dim3 grid(mandelSize,mandelSize);
// run the kernel
mini_mandel_kernel<<<grid,threads>>>(dev_Results, start_x, start_y, innerloopsize, outerloopsize);
cudaMemcpy( results, dev_Results, mandelSize * mandelSize * sizeof(unsigned int), cudaMemcpyDeviceToHost );
cudaFree( dev_Results );
return;
}
__global__ void row_calc(unsigned int *dev_Results, float deltaStep, float start_x, float start_y, int innerloopsize, int outerloopsize){
// __shared__ bool killus;
__shared__ unsigned int threadscores[16][16];
__syncthreads();
// killus = 0;
int bx = blockIdx.x;
int tx = threadIdx.x;
int ty = threadIdx.y;
int mandsize = blockDim.x;
int i,j;
unsigned int threadscore = 0;
float xpos = (float)-1.0 + (float)4.0*float(tx - mandsize/2 )/(float)mandsize;
float ypos = (float)0.0 + (float)4.0*float(ty - mandsize/2 )/(float)mandsize;
float a = start_x + deltaStep*(float)bx;
float a_tmp;
float b = start_y;
for(i=0;i<outerloopsize;i++){
for(j=0;j<innerloopsize;j++){
a_tmp = a;
a = a*a - b*b + xpos;
b = (float)2*a_tmp*b + ypos;
}
if ((a*a + b*b) <= (float)4.0){
threadscore += i;
}
}
threadscores[tx][ty] = threadscore;
__syncthreads();
if (ty == 0){ // the first thread of the row will sum the row
threadscore = 0;
for (i=0;i<mandsize;i++){
threadscore += threadscores[tx][i];
} // and put it in the first slot of the row in shared mem
threadscores[tx][0] = threadscore;
}
__syncthreads();
if (tx == 0 && ty == 0 ){ // tread (0,0) will do the sum over the
threadscore = 0; // first column, to get the total
for (i=0;i<mandsize;i++){
threadscore +=threadscores[i][0];
}
dev_Results[bx] = threadscore; // the result to global_mem
}
return;
}
// Call this to print out the CUDA error status. To check if GPU stuff worked correctly,
// this should be called after a synchronous function (like a normal memory copy).
void PrintLastCUDAError(){
cudaError_t err = cudaGetLastError();
printf(cudaGetErrorString( err ));
printf("\n");
return;
}
void mandelbrot_row_calc(int *nStep_ptr, float *deltaStep_ptr, float *start_x_ptr, float *start_y_ptr, int *mandelSize_ptr, int *innerLoopSize_ptr, int *outerLoopSize_ptr, unsigned int *results) {
int nStep = *nStep_ptr;
int mandelSize = *mandelSize_ptr;
float deltaStep = *deltaStep_ptr;
float start_x = *start_x_ptr;
float start_y = *start_y_ptr;
int innerloopsize = *innerLoopSize_ptr;
int outerloopsize = *outerLoopSize_ptr;
unsigned int *dev_Results;
cudaMalloc( (void**)&dev_Results, nStep * sizeof(unsigned int) );
cudaMemset( dev_Results, 0, nStep * sizeof(unsigned int));
// PrintLastCUDAError();
// define the launch dimensions
dim3 threads(mandelSize,mandelSize); // size of the cuda-blocks
dim3 grid(nStep);
// run the kernel
row_calc<<<grid,threads>>>(dev_Results, deltaStep, start_x, start_y, innerloopsize, outerloopsize);
cudaMemcpy( results, dev_Results, nStep * sizeof(unsigned int), cudaMemcpyDeviceToHost );
cudaFree( dev_Results );
// PrintLastCUDAError();
return;
}
|
860
|
// 16CO145 Sumukha PK
// 16CO234 Prajval M
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <math.h>
__global__ void generate(float * A, int size, int num, int MAX_THREAD){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
A[idx] = num;
}
void generate_in_cpu(float *A, int size){
for(int i = 0; i< size; i++){
A[i] = rand();
}
}
__global__ void sum(float * A, float * B, float * C, int size, int MAX_THREAD){
int idx = threadIdx.x + blockDim.x * blockIdx.x;
C[idx] = A[idx] + B[idx];
}
int main(){
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if(deviceCount > 0){
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0); // Use default GPU
#define MAX_THREAD devProp.maxThreadsDim[0]
#define MAX_BLOCK devProp.maxGridSize[0]
int h_itr = rand() % (16001) + 16000;
int d_itr = ceil((float)h_itr/MAX_THREAD) * MAX_THREAD;
float * h_A, * h_B, * h_C;
float * d_A, * d_B, * d_C;
// printf("%d %d %d\n", h_itr, d_itr);
h_A = (float *)malloc(h_itr * sizeof(float));
h_B = (float *)malloc(h_itr * sizeof(float));
h_C = (float *)malloc(h_itr * sizeof(float));
cudaMalloc((void **)&d_A, d_itr*sizeof(float));
cudaMalloc((void **)&d_B, d_itr*sizeof(float));
cudaMalloc((void **)&d_C, d_itr*sizeof(float));
int blocks = d_itr / MAX_THREAD;
if(blocks < MAX_BLOCK){
generate_in_cpu(h_A, h_itr); // generating A array of random number with size > 16000
generate_in_cpu(h_B, h_itr); // generating B array of random number with size > 16000
cudaMemcpy( d_A, h_A, d_itr * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy( d_B, h_B, d_itr * sizeof(float), cudaMemcpyHostToDevice);
// generate<<<blocks, MAX_THREAD>>>(d_A, d_itr, 1, MAX_THREAD);
// generate<<<blocks, MAX_THREAD>>>(d_B, d_itr, 3, MAX_THREAD);
sum<<<blocks, MAX_THREAD>>>(d_A, d_B, d_C, d_itr, MAX_THREAD);
cudaMemcpy( h_A, d_A, h_itr * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( h_B, d_B, h_itr * sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy( h_C, d_C, h_itr * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0 ; i< h_itr; i++){
printf("A[%d] = %f B[%d] = %f C[%d] = %f\n", i, h_A[i], i, h_B[i], i, h_C[i]);
}
}
}
else{
printf("Nvidia GPU not not found");
}
}
|
861
|
#include "thrust/device_ptr.h"
#include "thrust/sort.h"
__global__ void calculate_hash(uint *hash_values, uint *particle_ids, int length)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i >= length)
return;
hash_values[i] = 1;
particle_ids[i] = i;
}
void hash_particles_gpu(uint *d_hash_values, uint *d_particle_ids, int length)
{
int block_size = 256;
int num_blocks = ceil(length/(float)block_size);
calculate_hash<<<num_blocks, block_size>>>(d_hash_values, d_particle_ids, length);
cudaDeviceSynchronize();
thrust::device_ptr<uint> keys(d_hash_values);
thrust::device_ptr<uint> values(d_particle_ids);
thrust::sort_by_key(keys, keys+length, values);
}
int main(int argc, char *argv[])
{
int length = 15;
int bytes;
#ifdef BROKE
int *m_int;
cudaMallocManaged((void**)&m_int, sizeof(int));
#endif
// Allocate uint hash value array
bytes = length*sizeof(unsigned int);
unsigned int * hash_values;
cudaMalloc((void**)&hash_values, bytes);
// Allocate uint particle ID array
bytes = length*sizeof(unsigned int);
unsigned int *particle_ids;
cudaMalloc((void**)&particle_ids, bytes);
hash_particles_gpu(hash_values, particle_ids, length);
}
|
862
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <iostream>
__global__ void nabeatsuKernel(bool* result, int result_len)
{
int i = (blockIdx.x * blockDim.x) + threadIdx.x;
if (i < result_len) {
int val = i + 1;
result[i] = (val % 3) == 0;
while (val > 0) {
result[i] |= (val % 10) == 3;
val /= 10;
}
}
}
void showInfo() {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
std::cout << "Devuce: " << devProp.name << std::endl;
std::cout << "Global memory available on device in bytes: " << devProp.totalGlobalMem << std::endl;
std::cout << "Shared memory available per block in bytes: " << devProp.sharedMemPerBlock << std::endl;
std::cout << "Warp size in threads: " << devProp.warpSize << std::endl;
std::cout << "Maximum number of threads per block: " << devProp.maxThreadsPerBlock << std::endl;
std::cout << "Compute capacity: " << devProp.major << "." << devProp.minor << std::endl;
std::cout << "Clock frequency in kilohertz: " << devProp.clockRate << std::endl;
std::cout << "Number of multiprocessors on device: " << devProp.multiProcessorCount << std::endl;
}
int getMaxThreadsPerBlock() {
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
return devProp.maxThreadsPerBlock;
}
bool invokeNabeatsu(bool *result, int result_len, int nBlock, int nThread)
{
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaSetDevice failed!" << std::endl;
return false;
}
bool* dev_result = NULL;
cudaStatus = cudaMalloc((void**)&dev_result, result_len * sizeof(bool));
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMalloc failed!" << std::endl;
return false;
}
nabeatsuKernel <<<nBlock, nThread>>> (dev_result, result_len);
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
std::cerr << "nabeatsuKernel failed: " << cudaGetErrorString(cudaStatus) << std::endl;
cudaFree(dev_result);
return false;
}
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaDeviceSynchronize failed!" << std::endl;
cudaFree(dev_result);
return false;
}
cudaStatus = cudaMemcpy(result, dev_result, result_len * sizeof(bool), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
std::cerr << "cudaMemcpy failed!" << std::endl;
cudaFree(dev_result);
return false;
}
cudaFree(dev_result);
cudaDeviceReset();
return true;
}
|
863
|
#include "includes.h"
__global__ void fp_bias_conv(float* preact, float* bias, const int size, const int n_channel)
{
const int pos = blockIdx.x * blockDim.x + threadIdx.x;
const int totalPos = blockDim.x * gridDim.x;
const int N = n_channel * size * size;
for (int n = N * pos / totalPos; n < N * (pos+1) / totalPos; ++n) {
int idx = n;
const int i_channel = ((idx /= 1 ) % n_channel);
const int i_row = ((idx /= n_channel ) % size);
const int i_col = ((idx /= size ) % size);
preact[(i_channel * size + i_col) * size + i_row] += bias[i_channel];
}
}
|
864
|
#include "includes.h"
/*
waveform.cu:°üº¬µÄº¯ÊýÖ÷ÒªÊǶÔÓ¦SpikeDetect²¿·ÖµÄwaveformµÄһЩ²Ù×÷
º¯Êý×÷ÓÃÈçÏ£º
comps_wave()£º¶ÔÓÚdetect²¿·ÖÌáÈ¡µ½µÄcomponents£¬´Ó±ä»»ºóµÄ²¨ÐÎdata_tÖÐÌáÈ¡¶ÔÓ¦µÄwave
normalize()£º¶ÔÓÚ²¨ÐÎÖеĵçλֵ£¬Í¨¹ý¸ßãÐÖµtsºÍµÍãÐÖµtw½øÐйéÒ»»¯£¬·½±ãÖ®ºó¼ÆËãmasksºÍ¼â·åµÄÖÐÐÄʱ¼ä
compute_masks():¶ÔÓÚÿһ¸öÌáÈ¡µ½µÄwave£¬¼ÆËãÆämasksµÄÖµ
*/
/*******************************************************copy the components to the wave**************************************************************/
/****************************************************normalize²Ù×÷*************************************************************/
/****************************************************compute_masks²Ù×÷*************************************************************/
__global__ void normalize(float *nor_ary, float *flit_ary,float tw,float ts, size_t N)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < N)
{
if (flit_ary[tid] >= ts) nor_ary[tid] = 1;
else if (nor_ary[tid] < tw) nor_ary[tid] = 0;
else nor_ary[tid] = (flit_ary[tid] - tw) / (ts - tw);
}
}
|
865
|
#include "includes.h"
__global__ void iReduceSum(int *idata, int *odata, unsigned int ncols) {
int i;
unsigned int tid = threadIdx.x;
extern __shared__ int sdata[];
unsigned int startPos = blockDim.x + threadIdx.x;
int colsPerThread = ncols/blockDim.x;
int blockOffset = threadIdx.x *(ncols/blockDim.x);
int myPart = 0;
for(i=0;i<colsPerThread;i++) {
myPart+=idata[blockOffset+startPos+i];
}
sdata[tid]=myPart;
__syncthreads();
unsigned int s;
for(s=1;s<blockDim.x;s*=2){
if(tid%(2*s) == 0){
sdata[tid]+=sdata[tid+s];
}
__syncthreads();
}
if(tid==0)odata[blockIdx.x]=sdata[0];
}
|
866
|
#include "includes.h"
__global__ void STREAM_Scale_Optimized_double(double *a, double *b, double scale, size_t len)
{
/*
* Ensure size of thread index space is as large as or greater than
* vector index space else return.
*/
if (blockDim.x * gridDim.x < len) return;
size_t idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) b[idx] = scale* a[idx];
}
|
867
|
#include <cuda_runtime.h>
#include <iostream>
#include <stdlib.h>
#include <time.h>
__global__ void zhuanshi(int *d_b,int *d_bt,int n)
{
int ITdx = threadIdx.x;
int IBdx = blockIdx.x;
d_bt[ITdx * n + IBdx] = d_b[IBdx * n + ITdx];
}
__global__ void neiji(int *d_a,int *d_bt,int *d_c,int *d_data,int ICTdx,int ICBdx,int n)
{
/*
int INTdx = threadIdx.x;
int i = 2, j = 1;
d_data[INTdx] = d_a[ICTdx * n + INTdx] * d_bt[ICBdx * n + INTdx];
__syncthreads();
while(i <= n)
{
if(INTdx % 2 == 0)
{
d_data[INTdx] += d_data[INTdx + j];
}
i *= 2;
j *= 2;
}
d_c[ICTdx * n + ICBdx] = d_data[0];
*/
}
__global__ void chengfa(int *d_a,int *d_bt,int *d_c,int *d_data,int n)
{
/*
int ICTdx = threadIdx.x;
int ICBdx = blockIdx.x;
neiji<<<1,n>>>(d_a,d_bt,d_c,d_data,ICTdx,ICBdx,n);
__syncthreads();
*/
}
int main()
{
int blag = 1;//标志位
int n = 0;
/******判断输入数据是否合法************/
do{
std::cout << "请输入矩阵的维度:" << std::endl;
std::cin >> n;
if(n <= 0)
{
std::cout << "你输入的矩阵维度有误,请重新输入!" << std::endl;
}else{
blag = 0;
}
}while(blag);
/*******申请主机内存*********/
int *h_a = (int*)malloc(sizeof(int) * n * n);
int *h_b = (int*)malloc(sizeof(int) * n * n);
int *h_c = (int*)malloc(sizeof(int) * n * n);
int *h_bt = (int*)malloc(sizeof(int) * n * n);
/*******初始化主机内存数据********/
srand(time(NULL));//设置随机数值
for(int i = 0; i < n * n; ++i)
{
h_a[i] = rand() % 11;
h_b[i] = rand() % 11;
h_c[i] = 0;
h_bt[i] = 0;
}
/*******申请设备内存*******/
int *d_a,*d_b,*d_c,*d_bt,*d_data;
cudaMalloc((void**)&d_a,sizeof(int) * n * n);
cudaMalloc((void**)&d_b,sizeof(int) * n * n);
cudaMalloc((void**)&d_c,sizeof(int) * n * n);
cudaMalloc((void**)&d_bt,sizeof(int) * n * n);
cudaMalloc((void**)&d_data,sizeof(int)*n);
/******主机内存数据复制到设备内存中************/
cudaMemcpy(d_a,h_a,sizeof(int) * n * n,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,sizeof(int) * n * n,cudaMemcpyHostToDevice);
std::cout << "测试点" << std::endl;
/*******执行核函数******/
zhuanshi<<<n,n>>>(d_b,d_bt,n);
chengfa<<<n,n>>>(d_a,d_bt,d_c,d_data,n);
/*****设备内存数据复制到主机内存中*****/
cudaMemcpy(h_bt,d_bt,sizeof(int) * n * n,cudaMemcpyDeviceToHost);
cudaMemcpy(h_c,d_c,sizeof(int) * n * n,cudaMemcpyDeviceToHost);
std::cout << "CPU内存数据h_a:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_a[%d][%d] = %d\t",i,j,h_a[n * i + j]);
}
printf("\n");
}
std::cout << "CPU内存数据h_b:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_b[%d][%d] = %d\t",i,j,h_b[n * i + j]);
}
printf("\n");
}
std::cout << "CPU内存数据h_bt:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_bt[%d][%d] = %d\t",i,j,h_bt[n * i + j]);
}
printf("\n");
}
std::cout << "GPU内存数据:" << std::endl;
for(int i = 0; i < n; ++i)
{
for(int j = 0; j < n; ++j)
{
printf("h_c[%d][%d] = %d\t",i,j,h_c[n * i + j]);
}
printf("\n");
}
/*******释放内存*********/
free(h_a);
free(h_b);
free(h_c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
std::cout << "运行结束" << std::endl;
return 0;
}
|
868
|
#include "includes.h"
__global__ void multiply(int a, int b, int *c) {
*c = a * b;
}
|
869
|
#include "includes.h"
__global__ void median_reduce_shuffle_gpu(const float *d_in, float *d_out, float *d_random_numbers, int n_in) {
/**************/
/* initialize */
/**************/
// compute indices
int t_ind = threadIdx.x;
int g_ind = blockIdx.x * MED_BLOCK_SIZE + t_ind;
// allocate shared memory
__shared__ float DATA[MED_BLOCK_SIZE];
/**************/
/* load stage */
/**************/
int sample_ind = floorf(d_random_numbers[g_ind] * (float)n_in);
DATA[t_ind] = d_in[sample_ind];
__syncthreads();
/*******************/
/* reduction stage */
/*******************/
for (int s = 1; s < MED_BLOCK_SIZE; s *= 3) {
int index = 3 * s * t_ind;
if (index < MED_BLOCK_SIZE) {
// fetch three values
float value1 = DATA[index];
float value2 = DATA[index + s];
float value3 = DATA[index + 2 * s];
// extract the middle value (median)
float smallest = fminf(value1, value2);
value2 = fmaxf(value1, value2);
value1 = smallest;
value3 = fmaxf(value1, value3);
value2 = fminf(value2, value3);
DATA[index] = value2;
}
__syncthreads();
}
/***************/
/* write stage */
/***************/
// write this block's approx median (first element)
if (t_ind == 0) {
d_out[blockIdx.x] = DATA[0];
}
}
|
870
|
#include <stdio.h>
#include <math.h>
#define MAX 8192
#define LOG_MAX 13
#define BLOCK_SIZE 512
#define N 10
__host__ void fftHost(float x_r[], float x_i[])
{
float tmp_r, tmp_i ;
int i, j, i_lower ;
int stage, dft_pts, num_bf;
float pi;
pi = -2 * M_PI;
float arg, e, cos_result, sin_result;
for (stage = 1; stage <= LOG_MAX; stage++) {
dft_pts = 1 << stage;
num_bf = dft_pts / 2;
e = pi / dft_pts;
for (j = 0; j < num_bf; j++) {
arg = e * j;
cos_result = cos(arg);
sin_result = sin(arg);
for (i = j; i < MAX; i += dft_pts) {
i_lower = i + num_bf;
tmp_r = x_r[i_lower] * cos_result - x_i[i_lower] * sin_result;
tmp_i = x_i[i_lower] * cos_result + x_r[i_lower] * sin_result;
x_r[i_lower] = x_r[i] - tmp_r;
x_i[i_lower] = x_i[i] - tmp_i;
x_r[i] = x_r[i] + tmp_r;
x_i[i] = x_i[i] + tmp_i;
}
}
}
}
__global__ void fftKernel(float *dx_r, float *dx_i) {
int thread_id = blockDim.x * blockIdx.x + threadIdx.x;
float tmp_r, tmp_i;
int i, j, i_lower;
int dft_pts, num_bf;
float pi;
float arg, e, cos_result, sin_result;
pi = -2 * M_PI;
for (int stage = 1; stage <= 13; stage++) {
dft_pts = 1 << stage;
num_bf = dft_pts / 2;
e = pi / dft_pts;
if (thread_id < BLOCK_SIZE) {
int start = MAX/(2*BLOCK_SIZE) * thread_id;
for (int k = 0; k < MAX/(2*BLOCK_SIZE); k++) {
i = (start + k) + int(pow(2, stage - 1)) * ((start + k) / int(pow(2, stage - 1)));
j = i % num_bf;
arg = e * j;
cos_result = cos(arg);
sin_result = sin(arg);
i_lower = i + num_bf;
tmp_r = dx_r[i_lower] * cos_result - dx_i[i_lower] * sin_result;
tmp_i = dx_i[i_lower] * cos_result + dx_r[i_lower] * sin_result;
dx_r[i_lower] = dx_r[i] - tmp_r;
dx_i[i_lower] = dx_i[i] - tmp_i;
dx_r[i] = dx_r[i] + tmp_r;
dx_i[i] = dx_i[i] + tmp_i;
}
}
__syncthreads();
}
}
|
871
|
/*
** This program finds out the transfer bandwidth for a given transfer size (cudaMemcpy host to device).
*/
#include <stdio.h>
#define PG (4*1024)
__global__ void add_kern(float *x)
{
int current = 0;
for (int i = 0; i < 9; i++) {
for (; current < (int)(1024 * (1<<(i+2))); current+=(int)(1024 * (1<<(i+2)))){
x[i] += (int)(1024*pow(2.0,(i+2)));
}
}
}
int main(void)
{
int N = 2044*1024;
float *x;
cudaMallocManaged( &x, N*sizeof(float) );
add_kern<<<1,1>>>(x);
printf("x: %f\n", x[0]);
// Free memory
cudaFree(x);
return 0;
}
|
872
|
#include "includes.h"
__global__ void group_point_grad_gpu(int b, int n, int c, int m, int nsample, const float *grad_out, const int *idx, float *grad_points) {
int batch_index = blockIdx.x;
idx += m*nsample*batch_index;
grad_out += m*nsample*c*batch_index;
grad_points += n*c*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
for (int k=0;k<nsample;++k) {
int ii = idx[j*nsample+k];
for (int l=0;l<c;++l) {
atomicAdd(&grad_points[ii*c+l], grad_out[j*nsample*c+k*c+l]);
}
}
}
}
|
873
|
#include <stdio.h>
#include <stdlib.h>
#include <thrust/sort.h>
#include <thrust/merge.h>
#define NUM_SETS 100000
#define DSIZE 100
typedef int mytype;
// for ascending sorted data
#define cmp(A,B) ((A)<(B))
#define nTPB 512
#define nBLK 128
#include <time.h>
#include <sys/time.h>
#define USECPSEC 1000000ULL
long long dtime_usec(unsigned long long start){
timeval tv;
gettimeofday(&tv, 0);
return ((tv.tv_sec*USECPSEC)+tv.tv_usec)-start;
}
template <typename T>
__host__ __device__ void smerge(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, const unsigned len_a, const unsigned len_b, const unsigned stride_a = 1, const unsigned stride_b = 1, const unsigned stride_c = 1){
unsigned len_c = len_a+len_b;
unsigned nc = 0;
unsigned na = 0;
unsigned nb = 0;
unsigned fa = (len_b == 0);
unsigned fb = (len_a == 0);
T nxta = a[0];
T nxtb = b[0];
while (nc < len_c){
if (fa) {c[stride_c*nc++] = nxta; na++; nxta = a[stride_a*na];}
else if (fb) {c[stride_c*nc++] = nxtb; nb++; nxtb = b[stride_b*nb];}
else if (cmp(nxta,nxtb)){
c[stride_c*nc++] = nxta;
na++;
if (na == len_a) fb++;
else nxta = a[stride_a*na];}
else {
c[stride_c*nc++] = nxtb;
nb++;
if (nb == len_b) fa++;
else nxtb = b[stride_b*nb];}}
}
template <typename T>
__global__ void rmtest(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, int num_arr, int len){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
while (idx < num_arr){
int sel=idx*len;
smerge(a+sel, b+sel, c+(2*sel), len, len);
idx += blockDim.x*gridDim.x;}
}
template <typename T>
__global__ void cmtest(const T * __restrict__ a, const T * __restrict__ b, T * __restrict__ c, int num_arr, int len, int stride_a, int stride_b, int stride_c){
int idx=threadIdx.x+blockDim.x*blockIdx.x;
while (idx < num_arr){
smerge(a+idx, b+idx, c+idx, len, len, stride_a, stride_b, stride_c);
idx += blockDim.x*gridDim.x;}
}
template <typename T>
int rmvalidate(T *a, T *b, T *c, int num_arr, int len){
T *vc = (T *)malloc(2*len*sizeof(T));
for (int i = 0; i < num_arr; i++){
thrust::merge(a+(i*len), a+((i+1)*len), b+(i*len), b+((i+1)*len), vc);
#ifndef TIMING
for (int j = 0; j < len*2; j++)
if (vc[j] != c[(i*2*len)+j]) {printf("rm mismatch i: %d, j: %d, was: %d, should be: %d\n", i, j, c[(i*2*len)+j], vc[j]); return 0;}
#endif
}
return 1;
}
template <typename T>
int cmvalidate(const T *c1, const T *c2, int num_arr, int len){
for (int i = 0; i < num_arr; i++)
for (int j = 0; j < 2*len; j++)
if (c1[i*(2*len)+j] != c2[j*(num_arr)+i]) {printf("cm mismatch i: %d, j: %d, was: %d, should be: %d\n", i, j, c2[j*(num_arr)+i], c1[i*(2*len)+j]); return 0;}
return 1;
}
int main(){
mytype *h_a, *h_b, *h_c, *d_a, *d_b, *d_c;
h_a = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
h_b = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
h_c = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype)*2);
cudaMalloc(&d_a, (DSIZE*NUM_SETS+1)*sizeof(mytype));
cudaMalloc(&d_b, (DSIZE*NUM_SETS+1)*sizeof(mytype));
cudaMalloc(&d_c, DSIZE*NUM_SETS*sizeof(mytype)*2);
// test "row-major" storage
for (int i =0; i<DSIZE*NUM_SETS; i++){
h_a[i] = rand();
h_b[i] = rand();}
thrust::sort(h_a, h_a+DSIZE*NUM_SETS);
thrust::sort(h_b, h_b+DSIZE*NUM_SETS);
cudaMemcpy(d_a, h_a, DSIZE*NUM_SETS*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, DSIZE*NUM_SETS*sizeof(mytype), cudaMemcpyHostToDevice);
unsigned long gtime = dtime_usec(0);
rmtest<<<nBLK, nTPB>>>(d_a, d_b, d_c, NUM_SETS, DSIZE);
cudaDeviceSynchronize();
gtime = dtime_usec(gtime);
cudaMemcpy(h_c, d_c, DSIZE*NUM_SETS*2*sizeof(mytype), cudaMemcpyDeviceToHost);
unsigned long ctime = dtime_usec(0);
if (!rmvalidate(h_a, h_b, h_c, NUM_SETS, DSIZE)) {printf("fail!\n"); return 1;}
ctime = dtime_usec(ctime);
printf("CPU time: %f, GPU RM time: %f\n", ctime/(float)USECPSEC, gtime/(float)USECPSEC);
// test "col-major" storage
mytype *ch_a, *ch_b, *ch_c;
ch_a = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
ch_b = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
ch_c = (mytype *)malloc(DSIZE*NUM_SETS*sizeof(mytype));
for (int i = 0; i < NUM_SETS; i++)
for (int j = 0; j < DSIZE; j++){
ch_a[j*NUM_SETS+i] = h_a[i*DSIZE+j];
ch_b[j*NUM_SETS+i] = h_b[i*DSIZE+j];}
cudaMemcpy(d_a, ch_a, DSIZE*NUM_SETS*sizeof(mytype), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, ch_b, DSIZE*NUM_SETS*sizeof(mytype), cudaMemcpyHostToDevice);
gtime = dtime_usec(0);
cmtest<<<nBLK, nTPB>>>(d_a, d_b, d_c, NUM_SETS, DSIZE, NUM_SETS, NUM_SETS, NUM_SETS );
cudaDeviceSynchronize();
gtime = dtime_usec(gtime);
cudaMemcpy(ch_c, d_c, DSIZE*NUM_SETS*2*sizeof(mytype), cudaMemcpyDeviceToHost);
if (!cmvalidate(h_c, ch_c, NUM_SETS, DSIZE)) {printf("fail!\n"); return 1;}
printf("GPU CM time: %f\n", gtime/(float)USECPSEC);
return 0;
}
|
874
|
#include <math.h>
#include <stdio.h>
extern "C" __global__ void
jacobikernel( float* a, float* newa, float* lchange, int n, int m, float w0, float w1, float w2 )
{
int ti = threadIdx.x;
int tj = threadIdx.y;
int i = blockIdx.x * blockDim.x + ti + 1;
int j = blockIdx.y * blockDim.y + tj + 1;
newa[j*m+i] = w0*a[j*m+i] +
w1 * (a[j*m+i-1] + a[(j-1)*m+i] +
a[j*m+i+1] + a[(j+1)*m+i]) +
w2 * (a[(j-1)*m+i-1] + a[(j+1)*m+i-1] +
a[(j-1)*m+i+1] + a[(j+1)*m+i+1]);
__shared__ float mychange[256];
int ii = ti+blockDim.x*tj;
mychange[ii] = fabsf( newa[j*m+i] - a[j*m+i] );
__syncthreads();
int nn = blockDim.x * blockDim.y;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf( mychange[ii], mychange[ii+nn] );
__syncthreads();
}
if( ii == 0 )
lchange[blockIdx.x + gridDim.x*blockIdx.y] = mychange[0];
}
extern "C" __global__ void
reductionkernel( float* lchange, int n )
{
__shared__ float mychange[256];
float mych = 0.0f;
int ii = threadIdx.x, m;
if( ii < n ) mych = lchange[ii];
m = blockDim.x;
while( m <= n ){
mych = fmaxf( mych, lchange[ii+m] );
m += blockDim.x;
}
mychange[ii] = mych;
__syncthreads();
int nn = blockDim.x;
while( (nn>>=1) > 0 ){
if( ii < nn )
mychange[ii] = fmaxf(mychange[ii],mychange[ii+nn]);
__syncthreads();
}
if( ii == 0 )
lchange[0] = mychange[0];
}
static float sumtime;
extern "C"
void JacobiGPU( float* a, int n, int m, float w0, float w1, float w2, float tol )
{
float change;
int iters;
size_t memsize;
int bx, by, gx, gy;
float *da, *dnewa, *lchange;
cudaEvent_t e1, e2;
bx = 16;
by = 16;
gx = (n-2)/bx;
gy = (m-2)/by;
sumtime = 0.0f;
memsize = sizeof(float) * n * m;
cudaMalloc( &da, memsize );
cudaMalloc( &dnewa, memsize );
cudaMalloc( &lchange, gx * gy * sizeof(float) );
cudaEventCreate( &e1 );
cudaEventCreate( &e2 );
dim3 block( bx, by );
dim3 grid( gx, gy );
iters = 0;
do{
float msec;
++iters;
cudaMemcpy( da, a, memsize, cudaMemcpyHostToDevice );
cudaMemcpy( dnewa, a, memsize, cudaMemcpyHostToDevice );
cudaEventRecord( e1 );
jacobikernel<<< grid, block >>>( da, dnewa, lchange, n, m, w0, w1, w2 );
reductionkernel<<< 1, bx*by >>>( lchange, gx*gy );
cudaEventRecord( e2 );
cudaMemcpy( a, dnewa, memsize, cudaMemcpyDeviceToHost );
cudaMemcpy( &change, lchange, sizeof(float), cudaMemcpyDeviceToHost );
cudaEventElapsedTime( &msec, e1, e2 );
sumtime += msec;
}while( change > tol );
printf( "JacobiGPU converged in %d iterations to residual %f\n", iters, change );
printf( "JacobiGPU used %f seconds total\n", sumtime/1000.0f );
cudaFree( da );
cudaFree( dnewa );
cudaFree( lchange );
cudaEventDestroy( e1 );
cudaEventDestroy( e2 );
}
|
875
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <time.h>
#include <algorithm>
#include <iostream>
#include <cuda_fp16.h>
using namespace std;
#define N 32*1024*1024
#define kBlockSize 256
// CUDA: grid stride looping
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int32_t i = blockIdx.x * blockDim.x + threadIdx.x, step = blockDim.x * gridDim.x; i < (n); \
i += step)
// Upsample Nearest2D Kernel is copyed from https://github.com/Oneflow-Inc/oneflow/blob/master/oneflow/user/kernels/upsample_nearest_kernel.cu#L78
template<typename T>
struct alignas(2 * sizeof(T)) Pack2X {
T x;
T y;
};
template<typename T>
__global__ void UpsampleNearest2D2XForward(const int32_t in_elem_cnt, const T* in_dptr,
const int32_t in_height, const int32_t in_width,
T* out_dptr) {
const int32_t in_hw_size = in_width * in_height;
CUDA_1D_KERNEL_LOOP(index, in_elem_cnt) {
const T in_value = in_dptr[index];
const int32_t nc_idx = index / in_hw_size;
const int32_t hw_off = index - nc_idx * in_hw_size;
const int32_t h = hw_off / in_width;
const int32_t w = hw_off - h * in_width;
Pack2X<T> out_value{in_value, in_value};
Pack2X<T>* out_pack_dptr = reinterpret_cast<Pack2X<T>*>(out_dptr);
out_pack_dptr[nc_idx * in_hw_size * 2 + h * 2 * in_width + w] = out_value;
out_pack_dptr[nc_idx * in_hw_size * 2 + (h * 2 + 1) * in_width + w] = out_value;
}
}
template<typename T>
__global__ void UpsampleNearest2D2XBackward(const int32_t in_elem_cnt, const T* dy_dptr,
const int32_t dx_height, const int32_t dx_width,
T* dx_dptr) {
const int32_t dx_hw_size = dx_height * dx_width;
CUDA_1D_KERNEL_LOOP(index, in_elem_cnt) {
T dx_value = 0.0;
const int32_t nc_idx = index / dx_hw_size;
const int32_t dx_hw_off = index - nc_idx * dx_hw_size;
const int32_t dx_h = dx_hw_off / dx_width;
const int32_t dx_w = dx_hw_off - dx_h * dx_width;
const Pack2X<T>* dy_pack_dptr = reinterpret_cast<const Pack2X<T>*>(dy_dptr);
const Pack2X<T> dy_pack_value1 =
dy_pack_dptr[nc_idx * dx_hw_size * 2 + dx_h * 2 * dx_width + dx_w];
const Pack2X<T> dy_pack_value2 =
dy_pack_dptr[nc_idx * dx_hw_size * 2 + (dx_h * 2 + 1) * dx_width + dx_w];
dx_value += dy_pack_value1.x;
dx_value += dy_pack_value1.y;
dx_value += dy_pack_value2.x;
dx_value += dy_pack_value2.y;
dx_dptr[index] = dx_value;
}
}
int main(){
float *input_host = (float*)malloc(N*sizeof(float));
float *input_device;
cudaMalloc((void **)&input_device, N*sizeof(float));
for (int i = 0; i < N; i++) input_host[i] = 1.0;
cudaMemcpy(input_device, input_host, N*sizeof(float), cudaMemcpyHostToDevice);
float *output_host = (float*)malloc(N * 4 * sizeof(float));
float *output_device;
cudaMalloc((void **)&output_device, N * 4 * sizeof(float));
dim3 grid(N / kBlockSize, 1);
dim3 block(kBlockSize, 1);
UpsampleNearest2D2XForward<<<grid, block>>>(N, input_device, 1024, 1024, output_device);
cudaMemcpy(output_host, output_device, N * 4 * sizeof(float), cudaMemcpyDeviceToHost);
for(int i = 0; i < 50; i++) {
printf("%.5f\n", output_host[i]);
}
return 0;
}
|
876
|
#include "includes.h"
__global__ void calculatePi(double *piTotal, long int iterations, int totalThreads)
{ long int initIteration, endIteration;
long int i = 0;
double piPartial;
int index = (blockDim.x * blockIdx.x) + threadIdx.x;
initIteration = (iterations/totalThreads) * index;
endIteration = initIteration + (iterations/totalThreads) - 1;
i = initIteration;
piPartial = 0;
do{
piPartial = piPartial + (double)(4.0 / ((i*2)+1));
i++;
piPartial = piPartial - (double)(4.0 / ((i*2)+1));
i++;
}while(i < endIteration);
piTotal[index] = piPartial;
__syncthreads();
if(index == 0){
for(i = 1; i < totalThreads; i++)
piTotal[0] = piTotal[0] + piTotal[i];
}
}
|
877
|
#define N 10016
#define NR N
#define NC N
#define BLOCKSIZE 8
#include<stdio.h>
#include<stdlib.h>
#include<time.h>
void printMat(float A[NR][NC]);
void initMat(float A[NR][NC], float B[NR][NC]);
__global__ void multiply(float *A, float *B, float *C);
int main(){
int blocksize = BLOCKSIZE;
static float A[NR][NC];
static float B[NR][NC];
static float C[NR][NC] = {{0}}; /* initialize to 0 */
clock_t start_time, end_time;
double elapsed;
float *dev_A, *dev_B, *dev_C;
int size = NR*NC*sizeof(float);
start_time = clock();
cudaMalloc((void **)&dev_A,size);
cudaMalloc((void **)&dev_B,size);
cudaMalloc((void **)&dev_C,size);
initMat(A,B);
cudaMemcpy(dev_A,&A,size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_B,&B,size,cudaMemcpyHostToDevice);
dim3 dimGrid(N/BLOCKSIZE,N/BLOCKSIZE);
dim3 dimBlock(BLOCKSIZE,BLOCKSIZE);
multiply<<<dimGrid,dimBlock>>>(dev_A,dev_B,dev_C);
cudaError_t err = cudaGetLastError();
if (err != cudaSuccess)
printf("Error: %s\n", cudaGetErrorString(err));
cudaMemcpy(&C,dev_C,size,cudaMemcpyDeviceToHost);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
end_time = clock();
elapsed = ( (double) (end_time-start_time))/ CLOCKS_PER_SEC;
//printMat(C);
printf(" \n Time taken is %f for %d matrix size and %d block size \n",elapsed, N, blocksize);
return 0;
}
void printMat(float A[NR][NC]){
int i,j;
for( i=0; i<NR; i++ ){
printf("ROW %d:",i+1);
for( j=0; j<NC; j++ ){
printf("%.3f\t",A[i][j]);
}
printf("\n");
}
}
void initMat(float A[NR][NC],float B[NR][NC]){
int i,j;
for( i=0; i < NR; i++){
for( j=0; j<NC; j++){
A[i][j] = i*j;
B[i][j] = i+j;
}
}
}
__global__ void multiply(float *A, float *B, float *C){
// thread position in block
int row = threadIdx.y;
int col = threadIdx.x;
// absolute position
int absRow = blockIdx.y*blockDim.y + threadIdx.y;
int absCol = blockIdx.x*blockDim.x + threadIdx.x;
int index = absRow*NC + absCol; // location in contiguous 1-d
int j;
int sum = 0;
for(j=0;j<NC/BLOCKSIZE;j++){
__shared__ float Apatch[BLOCKSIZE][BLOCKSIZE];
__shared__ float Bpatch[BLOCKSIZE][BLOCKSIZE];
// fetch the corresponding rows and cols of A,B
// each thread gets one element
Apatch[row][col] = A[absRow*NC+j*BLOCKSIZE+col];
Bpatch[row][col] = B[absCol+j*BLOCKSIZE*NC+row*NC];
__syncthreads();
int i;
for(i=0; i<BLOCKSIZE; i++) sum += Apatch[row][i]*Bpatch[i][col];
__syncthreads();
}
C[index] = sum;
}
|
878
|
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,float var_3,float* var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float* var_11,float var_12,float var_13,float var_14,float var_15,float var_16) {
if (comp <= (var_3 - +0.0f)) {
for (int i=0; i < var_1; ++i) {
for (int i=0; i < var_2; ++i) {
var_4[i] = +1.7374E-42f;
comp += var_4[i] / var_6 + -0.0f;
if (comp <= (var_7 * (+1.5845E-35f - floorf(fmodf(-1.1767E35f - +1.5610E-13f / -1.7831E-42f, (var_8 - var_9 / +1.1565E-42f)))))) {
float tmp_1 = -1.0796E-42f;
comp = tmp_1 - floorf(-1.1935E-43f);
float tmp_2 = -1.5271E34f;
comp += tmp_2 * (-1.3054E36f - var_10 / ceilf((+1.0139E-27f - +1.4960E-36f)));
}
for (int i=0; i < var_5; ++i) {
comp += expf((-1.6751E-18f + (-1.0289E36f - (var_12 + (-1.4354E36f * (+1.0830E36f * +1.8676E27f))))));
var_11[i] = var_13 - var_14 + (var_15 + var_16);
float tmp_3 = +1.6604E-35f;
comp = tmp_3 * var_11[i] + +0.0f - +1.0079E-24f;
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
float tmp_4 = atof(argv[4]);
float* tmp_5 = initPointer( atof(argv[5]) );
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float* tmp_12 = initPointer( atof(argv[12]) );
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17);
cudaDeviceSynchronize();
return 0;
}
|
879
|
#include "includes.h"
__global__ void suma(float *A, float *B, float *C)
{
//indice de las columnas
int columna = threadIdx.x;
//indice de las filas
int fila = threadIdx.y;
//indice lineal
int Id = columna + fila * blockDim.x;
//sumamos cada elemento
C[Id] = A[Id] + B[Id];
}
|
880
|
#include <iostream>
#include <cstdlib>
__global__
void cuda_add(const int *ar, const int *br, int *cr) {
const unsigned idx = threadIdx.x + blockDim.x * blockIdx.x;
const int a = ar[idx];
const int b = br[idx];
cr[idx] = a + b;
}
__host__
int main(void) {
const unsigned threads = 1<<16;
const unsigned size = threads*sizeof(int);
// Initialize host arrays
int *a_host = new int[threads];
int *b_host = new int[threads];
int *c_host = new int[threads];
for (unsigned i=0; i<threads; i++) {
a_host[i] = (std::rand()%10);
b_host[i] = (std::rand()%10);
}
// Initialize device arrays
int *a_dev = NULL;
int *b_dev = NULL;
int *c_dev = NULL;
cudaMalloc((void**)&a_dev,size);
cudaMalloc((void**)&b_dev,size);
cudaMalloc((void**)&c_dev,size);
// Transfer memory
cudaMemcpy((void*)a_dev,(void*)a_host,size,cudaMemcpyHostToDevice);
cudaMemcpy((void*)b_dev,(void*)b_host,size,cudaMemcpyHostToDevice);
// Setup and launch kernel
const unsigned threads_per_block = 512;
const unsigned blocks_per_grid = threads / threads_per_block;
cuda_add<<<threads_per_block,blocks_per_grid>>>(a_dev,b_dev,c_dev);
// Copy back result and print it
cudaMemcpy((void*)c_host,(void*)c_dev,size,cudaMemcpyDeviceToHost);
for (size_t i=0; i<threads; i++) std::cout << c_host[i] << " ";
std::cout << std::endl;
// Clean up
delete a_host;
delete b_host;
delete c_host;
cudaFree(a_dev);
cudaFree(b_dev);
cudaFree(c_dev);
return 0;
}
|
881
|
#include <stdio.h>
#include <math.h>
#include <stdint.h>
#include <stdlib.h>
#include <cuda.h>
#define WID 1024
#define HEI 1024
#pragma pack(push,1)
typedef struct tagBITMAPFILEHEADER
{
unsigned short bfType;
uint32_t bfSize;
unsigned short bfReserved1;
unsigned short bfReserved2;
uint32_t bf0ffBits;
}BITMAPFILEHEADER;
#pragma pack(pop)
typedef struct tagBITMAPINFOHEADER
{
uint32_t biSize;
int32_t biWidth;
int32_t biHeight;
unsigned short biPlanes;
unsigned short biBitCount;
uint32_t biCompression;
uint32_t biSizeImage;
int32_t biXPelsPerMeter;
int32_t biYPelsPerMeter;
uint32_t biCirUsed;
uint32_t biCirImportant;
}BITMAPINFOHEADER;
typedef struct tagRGBQUAD
{
unsigned char rgbBlue;
unsigned char rgbGreen;
unsigned char rgbRed;
unsigned char rgbReserved;
}RGBQUAD;
typedef struct tagBITMAPINFO
{
BITMAPINFOHEADER bmiHeader;
RGBQUAD bmiColors[1];
}BITMAPINFO;
__global__ void distance_gpu(int *x_d,int *y_d,float *z_d,float *img_buf_d,int *tensuu_d)
{
int i,j,k;
i=blockIdx.x*128+threadIdx.x;
float kankaku,hatyou,goukei,pi;
hatyou=0.633F;
kankaku=10.5F;
pi=3.14159265F;
goukei=2.0F*pi*kankaku/hatyou;
float dx,dy,tmp;
for(j=0;j<WID;j++){
tmp=0.0F;
for(k=0;k<*tensuu_d;k++){
dx=(float)(x_d[k]-j);
dy=(float)(y_d[k]-i);
tmp=tmp+cos(goukei*0.5F*(dx*dx+dy*dy)/z_d[k]);
}
img_buf_d[i*WID+j] = tmp;
}
}
int main(){
int tensuu;
BITMAPFILEHEADER BmpFileHeader;
BITMAPINFOHEADER BmpInfoHeader;
RGBQUAD RGBQuad[256];
FILE *fp;
int i,j;
BmpFileHeader.bfType =19778;
BmpFileHeader.bfSize =14+40+1024+(WID*HEI);
BmpFileHeader.bfReserved1 =0;
BmpFileHeader.bfReserved2 =0;
BmpFileHeader.bf0ffBits =14+40+1024;
BmpInfoHeader.biSize =40;
BmpInfoHeader.biWidth =WID;
BmpInfoHeader.biHeight =HEI;
BmpInfoHeader.biPlanes =1;
BmpInfoHeader.biBitCount =8; //256階調
BmpInfoHeader.biCompression =0L;
BmpInfoHeader.biSizeImage =0L;
BmpInfoHeader.biXPelsPerMeter =0L;
BmpInfoHeader.biYPelsPerMeter =0L;
BmpInfoHeader.biCirUsed =0L;
BmpInfoHeader.biCirImportant =0L;
for(i=0;i<256;i++){
RGBQuad[i].rgbBlue =i;
RGBQuad[i].rgbGreen =i;
RGBQuad[i].rgbRed =i;
RGBQuad[i].rgbReserved =0;
}
char filename[20]={};
//printf("ファイル名を入力してください : ");
//scanf("%s",filename);
//fp=fopen(filename,"rb");
fp=fopen("cubex.3d","rb");
if(fp==NULL){
printf("File Open ERROR\n");
}
fread(&tensuu,sizeof(int),1,fp);
printf("num=%d\n",tensuu);
int x[tensuu];
int y[tensuu];
float z[tensuu];
int *tensuu_d;
cudaMalloc((void**)&tensuu_d,sizeof(int));
cudaMemcpy(tensuu_d,&tensuu,sizeof(int),cudaMemcpyHostToDevice);
int *x_d,*y_d;
float *z_d;
float *img_buf_d;
dim3 blocks(8,1,1);
dim3 threads(128,1,1);
int x_buf,y_buf,z_buf;
for(i=0;i<tensuu;i++){
fread(&x_buf,sizeof(int),1,fp);
fread(&y_buf,sizeof(int),1,fp);
fread(&z_buf,sizeof(int),1,fp);
x[i]=x_buf*40+512;
y[i]=y_buf*40+512;
z[i]=((float)z_buf)*40+100000.0F;
}
fclose(fp);
cudaMalloc((void**)&x_d,tensuu*sizeof(int));
cudaMalloc((void**)&y_d,tensuu*sizeof(int));
cudaMalloc((void**)&z_d,tensuu*sizeof(float));
cudaMalloc((void**)&img_buf_d,WID*HEI*sizeof(float));
float *img_buf;
img_buf=(float *)malloc(sizeof(float)*WID*HEI);
for(i=0;i<WID*HEI;i++){
img_buf[i]=0.0F;
}
cudaMemcpy(x_d,x,tensuu*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(y_d,y,tensuu*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(z_d,z,tensuu*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(img_buf_d,img_buf,WID*HEI*sizeof(float),cudaMemcpyHostToDevice);
distance_gpu<<<blocks,threads>>>(x_d,y_d,z_d,img_buf_d,tensuu_d);
cudaMemcpy(img_buf,img_buf_d,WID*HEI*sizeof(float),cudaMemcpyDeviceToHost);
float min,max,mid;
min=img_buf[0];
max=img_buf[0];
for(i=0;i<HEI;i++){
for(j=0;j<WID;j++){
if(min>img_buf[i*WID+j]){
min=img_buf[i*WID+j];
}
if(max<img_buf[i*WID+j]){
max=img_buf[i*WID+j];
}
}
}
mid=0.5F*(min+max);
printf("min = %lf max = %lf mid = %lf\n",min,max,mid);
unsigned char *img;
img=(unsigned char *)malloc(sizeof(unsigned char)*WID*HEI);
for(i=0;i<WID*HEI;i++){
if(img_buf[i]<mid){
img[i]=0;
}
if(img_buf[i]>mid){
img[i]=255;
}
}
FILE *fp1;
fp1=fopen("cgh_root_gpu.bmp","wb");
if(fp1==NULL){
printf("ファイルオープンエラー\n");
}
fwrite(&BmpFileHeader, sizeof(BmpFileHeader) , 1 ,fp1);
fwrite(&BmpInfoHeader, sizeof(BmpInfoHeader) , 1 ,fp1);
fwrite(&RGBQuad[0], sizeof(RGBQuad[0]) , 256 ,fp1);
fwrite(img,sizeof(unsigned char),WID*HEI,fp1);
free(img);
free(img_buf);
fclose(fp1);
cudaFree(tensuu_d);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cudaFree(img_buf_d);
return 0;
}
|
882
|
// Matrix Multiplication
#include <stdio.h>
#include <stdlib.h>
__global__ void matrixMul(int* m, int* n,int* p,int size)
{
//Calculate ow and Column
int row = blockIdx.y * blockDim.y + threadIdx.y;
int column = blockIdx.x * blockDim.x + threadIdx.x;
int p_sum = 0;
for(int i=0;i<size;i++)
{
p_sum += m[row * size + i] * n[i * size + column];
}
p[row * size + column] = p_sum;
}
int main()
{
int n = 1<<10; //1024 or 2^10
//Host matrix m,n,p
int* h_m;
int* h_n;
int* h_p;
//Device matrix m,n,p
int* d_m;
int* d_n;
int* d_p;
size_t bytes = n * n * sizeof(int);
//Allocating memory on Host side
h_m = (int*)malloc(bytes);
h_n = (int*)malloc(bytes);
h_p = (int*)malloc(bytes);
//Initialize matrix m,n,p
for(int i=0;i<n;i++)
{
for(int j=0;j<n;j++)
{
h_m[i*n+j]=rand()%1024;
h_n[i*n+j]=rand()%1024;
}
}
//Allocating memory on Device side
cudaMalloc(&d_m, bytes);
cudaMalloc(&d_n, bytes);
cudaMalloc(&d_p, bytes);
//Copy data from Host to the Device
cudaMemcpy(d_m, h_m, bytes, cudaMemcpyHostToDevice);
cudaMemcpy(d_n, h_n, bytes, cudaMemcpyHostToDevice);
int threads_per_block = 16;
dim3 block_size(threads_per_block, threads_per_block);
dim3 grid_size(n / block_size.x, n / block_size.y);
matrixMul <<< grid_size, block_size >>>(d_m,d_n,d_p,n);
cudaMemcpy(h_p, d_p, bytes, cudaMemcpyDeviceToHost);
printf("Completed Successfully!\n");
//Clean-Up
free(h_m);
free(h_n);
free(h_p);
cudaFree(d_m);
cudaFree(d_n);
cudaFree(d_p);
return 0;
}
|
883
|
#include "includes.h"
__global__ void And( bool * x, size_t idx, size_t N)
{
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x)
{
x[(idx-2)*N + i] = x[(idx-2)*N + i] & x[(idx-1)*N + i];
}
return;
}
|
884
|
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#define BLOCK_SIZE 5 //number of thread blocks
#define THREAD_NUM 5 //number of threads per block
#define STEP_SIZE 10000
__global__ void calculate(float* sum, int nbin, int step, int nthreads, int nblocks) {
float x;
int idx = blockIdx.x * blockDim.x + threadIdx.x;
int i;
for (i = idx; i < nbin; i += nthreads*nblocks) {
x = (i+0.5)*step;
sum[idx] += 4.0/(1.0+x*x);
}
}
int main(void)
{
float *sum_device, *sum_host;
float step = 1.0/STEP_SIZE;
size_t size = THREAD_NUM * BLOCK_SIZE * sizeof(float);
sum_host = (float*)malloc(size);
cudaMalloc((void**) &sum_device, size);
//set the sum_device to 0
cudaMemset(sum_device, 0, size);
int block_size = BLOCK_SIZE;
int grid_size = THREAD_NUM;
calculate<<<block_size, grid_size>>>(sum_device, STEP_SIZE, step, THREAD_NUM, BLOCK_SIZE);
//copy from device to host
cudaMemcpy(sum_host, sum_device, size, cudaMemcpyDeviceToHost);
int tid;
float pi = 0;
for (tid = 0; tid < THREAD_NUM * BLOCK_SIZE; tid++)
pi += sum_host[tid];
pi *= step;
printf("PI = %f\n", pi);
free(sum_host);
cudaFree(sum_device);
return EXIT_SUCCESS;
}
|
885
|
// Import the relevant header files.
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <time.h>
#include <stdlib.h>
// While taking readings, modify N to small and large values to observe CPU compute_time
#define N 10000 //Default matrix size NxN
// Performing the Covolution operation at the host side.
void convolution_host(int A[][N+2], int C[][N+2])
{
// Define the filter
int filter[3][3] = { {1, 2, 1}, {2, 4, 2}, {1, 2, 1} };
// Now loop though the entire matrix performing the convolution
for(int i=0;i<N+1;i++) //Looping through the rows..
{
for(int j=0;j<N+1;j++) //Looping through the columns.
{
if(0 < i && i < N + 1 && 0 < j && j < N + 1)
{
// Triued using double pointer reference.
/* int value = 0;
// Multiplying the matrix surrounding with the convolution filter.
value = value + *A((*((i - 1)*N)+ j - 1)) * filter[0][0];
value = value + *A((i - 1)*N + j) * filter[0][1];
value = value + *A((i - 1)*N j + 1) * filter[0][2];
value = value + *A(i*N+ j - 1) * filter[1][0];
value = value + *A(i*N+ j) * filter[1][1];
value = value + *A(i*N+ j + 1) * filter[1][2];
value = value + *A((i + 1)*N+ j - 1) * filter[2][0];
value = value + *A((i + 1)*N+ j) * filter[2][1];
value = value + *A((i + 1)*N+ j + 1) * filter[2][2];
C(i*N+ j) = value;*/
int value = 0;
value = value + A[i - 1] [j - 1] * filter[0][0];
value = value + A[i - 1] [j] * filter[0][1];
value = value + A[i - 1] [j + 1] * filter[0][2];
value = value + A[i] [j - 1] * filter[1][0];
value = value + A[i] [j] * filter[1][1];
value = value + A[i] [j + 1] * filter[1][2];
value = value + A[i + 1] [j - 1] * filter[2][0];
value = value + A[i + 1][j] * filter[2][1];
value = value + A[i + 1] [j + 1] * filter[2][2];
C[i] [j] = value;
}
}
}
}
// Main Function
int main(void)
{
//Host variables
int A[N+2][N+2] = {};//+2 for padding matrix
int *C;
int C_h[N+2][N+2]={};
//Device variables
int *A_d = 0, *C_d = 0;// A and C are variable used by kernel (GPU)
//Needs for row-major layout
int cols = N + 2;
//Calculate memory size
int memorySize = (N + 2) * (N + 2);// Entire matrix need to be transferred, so we require N * N as size of gpu memory
// timers to measure the appropriate time.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//Init matrix A and C_h to all 0 elements
for (int i = 0; i < N+2; i++) {
for (int j = 0; j < N+2; j++) {
A[i][j] = 0;
C_h[i][j] = 0;
}
}
//Generate random values between 0 and 9
// Populating the matrix with random values between 0 and 9.
srand(time(NULL));
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
A[i + 1][j + 1] = rand() % 10;
}
}
C = (int *)malloc(sizeof(*C)*memorySize);
// perform the processing on the CPU
clock_t begin = clock();
//Conv2D_CPU(h_outImg_CPU, h_inImg, h_filter, imgWidth, imgHeight, imgChans);
convolution_host(A, C_h); //Call teh host convolution method.
clock_t end = clock();
// calculate total time for CPU and GPU
double time_execn = (double)(end - begin) / CLOCKS_PER_SEC*1000;
printf("Total time for CPU execution is: %f milliseconds\n", time_execn);
// Verify that you're gettign the correct result for just a small matrix.
////Print result
/*printf("printing the result matrix");
for (int i = 0; i < N + 2; i++) {
for (int j = 0; j < N + 2; j++) {
printf("%d ", C_h[i][j]);
}
printf("\n");
}
*/
/*
//Print the original matrix
printf("Printing the original matrix\n");
for (int i = 0; i < N + 2; i++) {
for (int j = 0; j < N + 2; j++) {
printf("%d ", A[i][j]);
}
printf("\n");
}
*/
return EXIT_SUCCESS;
}
|
886
|
#include <iostream>
#include <stdio.h>
using namespace std;
__host__ __device__ void deriv(double* x0, double* array, double c, int size, double* ki, double*
ko);
__global__ void rk4(double* x0, double* array, double* h, int size);
int main(){
int n_voxel = 10;
int n_species = 100;
int species_square = n_species * n_species;
double x0[n_species];
double a_mat[species_square];
double h = 0.005;
double duration = 0.0;
//Filling up the a_mat and x0
for(int i = 0; i < n_species; ++i){
x0[i] = i;
for(int j = 0; j < n_species; ++j){
a_mat[i * n_species + j] = rand() % 9 + 1;
}
}
//Porting the problem onto gpu
double *d_x0, *d_a_mat, d_h;
int d_n_species;
clock_t start = clock();
cudaMalloc( (void**)&d_x0, sizeof(double) * n_species );
cudaMalloc( (void**)&d_a_mat, sizeof(double) * species_square );
cudaMalloc( (void**)&d_h, sizeof(double) * 1 );
cudaMalloc( (void**)&d_n_species, sizeof(int) * 1 );
cudaMemcpy( d_x0, x0, sizeof(double) * n_species, cudaMemcpyHostToDevice );
cudaMemcpy( d_a_mat, a_mat, sizeof(double) * species_square, cudaMemcpyHostToDevice );
cudaMemcpy( &d_h, &h, sizeof(double) * 1, cudaMemcpyHostToDevice );
cudaMemcpy( &d_n_species, &n_species, sizeof(int) * 1, cudaMemcpyHostToDevice );
dim3 blocks( 1, 1, 1 );
dim3 threads( 10, 1, 1 );
rk4 <<< blocks, threads >>> ( d_x0, d_a_mat, &d_h, d_n_species );
cudaMemcpy( x0, d_x0, sizeof(double) * n_species, cudaMemcpyDeviceToHost );
cudaMemcpy( a_mat, d_a_mat, sizeof(double) * species_square, cudaMemcpyDeviceToHost );
cudaFree( d_x0 );
cudaFree( d_a_mat );
cudaFree( &d_h );
cudaFree( &d_n_species );
duration = static_cast<double>(clock() - start);
cout << "Time = " << duration << endl;
/**
//Running rk4 over all the voxels
clock_t start = clock();
for(int i = 0; i < n_voxel; ++i){
rk4(x0, a_mat, &h, n_species);
// cout << "x0 address = " << x0 << endl;
// cout << "x0 value = " << x0[0] << endl;
}
duration = static_cast<double>(clock() - start);
cout << "Time = " << duration << endl;
**/
}
__host__ __device__ void deriv(double* x0, double* array, double c, int size, double* ki, double* ko){
for(int i = 0; i < size; ++i){
for(int j = 0; j< size; ++j){
ko[i] = ko[i] + array[ size * i + j ] * (x0[j] + c * ki[j]);
}
}
}
__global__ void rk4(double* x0, double* array, double* h, int size){
//int** arr = new int*[row];
//int size = 100;//size of the species
double* k1_v = new double [size];
double* k2_v = new double [size];
double* k3_v = new double [size];
double* k4_v = new double [size];
for(int i = 0; i < size; ++i){
k1_v[i] = x0[i];
k2_v[i] = x0[i];
k3_v[i] = x0[i];
k4_v[i] = x0[i];
}
deriv(x0, array, 0.0, size, x0, k1_v);
deriv(x0, array, *h/2.0, size, k1_v, k2_v);
deriv(x0, array, *h/2.0, size, k2_v, k3_v);
deriv(x0, array, *h, size, k3_v, k4_v);
for(int i = 0; i < size; ++i){
x0[i] = x0[i] + (k1_v[i] + 2.0 * k2_v[i] + 2.0 * k3_v[i] + k4_v[i]) *
(*h)/6.0;
}
delete[] k1_v;
delete[] k2_v;
delete[] k3_v;
delete[] k4_v;
// delete[] arr;
}
|
887
|
extern "C"
{
__global__ void gfill_32(const int n, const float *a, float *c)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<n)
{
c[i] = a[0];
}
}
}
|
888
|
#include "includes.h"
__global__ void totalWithThreadSyncInterleaved(float *input, float *output, int len) {
//@@ Compute reduction for a segment of the input vector
int tid = threadIdx.x, i = blockIdx.x * blockDim.x + threadIdx.x;
for(unsigned int j = 1; j <blockDim.x; j *= 2)
{
if (tid % (2 * j) == 0)
input[i] += input[i+j];
__syncthreads();
}
if(tid == 0)
{
output[blockIdx.x] = input[i];
}
}
|
889
|
#include "includes.h"
__global__ void convert2DVectorToAngleMagnitude_kernel( uchar4 *d_angle_image, uchar4 *d_magnitude_image, float *d_vector_X, float *d_vector_Y, int width, int height, float lower_ang, float upper_ang, float lower_mag, float upper_mag) {
const int x = __mul24(blockIdx.x, blockDim.x) + threadIdx.x;
const int y = __mul24(blockIdx.y, blockDim.y) + threadIdx.y;
uchar4 temp_angle, temp_magnitude;
if (x < width && y < height) {
float vector_X = d_vector_X[__mul24(y, width) + x];
float vector_Y = d_vector_Y[__mul24(y, width) + x];
// compute angle and magnitude
float angle = atan2f(vector_Y, vector_X);
float magnitude = vector_X * vector_X + vector_Y * vector_Y;
magnitude = sqrtf(magnitude);
// first draw unmatched pixels in white
if (!isfinite(magnitude)) {
temp_angle.x = 255;
temp_angle.y = 255;
temp_angle.z = 255;
temp_angle.w = 255;
temp_magnitude.x = 255;
temp_magnitude.y = 255;
temp_magnitude.z = 255;
temp_magnitude.w = 255;
} else {
// rescale angle and magnitude from [lower,upper] to [0,1] and convert to
// RGBA jet colorspace
angle -= lower_ang;
angle /= (upper_ang - lower_ang);
float r = 1.0f;
float g = 1.0f;
float b = 1.0f;
if (angle < 0.25f) {
r = 0;
g = 4.0f * angle;
} else if (angle < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - angle);
} else if (angle < 0.75f) {
r = 4.0f * (angle - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - angle);
b = 0;
}
temp_angle.x = 255.0 * r;
temp_angle.y = 255.0 * g;
temp_angle.z = 255.0 * b;
temp_angle.w = 255;
magnitude -= lower_mag;
magnitude /= (upper_mag - lower_mag);
r = 1.0f;
g = 1.0f;
b = 1.0f;
if (magnitude < 0.25f) {
r = 0;
g = 4.0f * magnitude;
} else if (magnitude < 0.5f) {
r = 0;
b = 1.0 + 4.0f * (0.25f - magnitude);
} else if (magnitude < 0.75f) {
r = 4.0f * (magnitude - 0.5f);
b = 0;
} else {
g = 1.0f + 4.0f * (0.75f - magnitude);
b = 0;
}
temp_magnitude.x = 255.0 * r;
temp_magnitude.y = 255.0 * g;
temp_magnitude.z = 255.0 * b;
temp_magnitude.w = 255;
}
d_angle_image[__mul24(y, width) + x] = temp_angle;
d_magnitude_image[__mul24(y, width) + x] = temp_magnitude;
}
}
|
890
|
#include<math.h>
#include<time.h>
#include<stdexcept>
#include<iostream>
using namespace std;
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS);
void sum( int* A, int* B, int* C, int n_el);
int main()
{
int NUMBER_OF_ELEMENTS;
cout<<"\nEnter number of elements:";
cin>>NUMBER_OF_ELEMENTS;
int SIZE = NUMBER_OF_ELEMENTS*sizeof(int);
int* hostA = (int*)malloc(SIZE);
int* hostB = (int*)malloc(SIZE);
int* hostC = (int*)malloc(SIZE);
int* ans = (int*)malloc(SIZE);
int* deviceA,*deviceB,*deviceC;
cudaEvent_t start,end,start1,end1;
cudaEventCreate(&start1);
cudaEventCreate(&end1);
srand(time(0));
int i;
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
hostA[i] = rand()%NUMBER_OF_ELEMENTS;
hostB[i] = rand()%NUMBER_OF_ELEMENTS;
}
cudaEventRecord(start1);
for(i=0;i<NUMBER_OF_ELEMENTS;i++)
{
ans[i]=hostA[i]+hostB[i];
}
cudaEventRecord(end1);
cudaEventSynchronize(end1);
float t1=0;
cudaEventElapsedTime(&t1,start1,end1);
cudaEventCreate(&start);
cudaEventCreate(&end);
cudaMalloc(&deviceA,SIZE);
cudaMalloc(&deviceB,SIZE);
cudaMalloc(&deviceC,SIZE);
cudaMemcpy(deviceA,hostA,SIZE,cudaMemcpyHostToDevice);
cudaMemcpy(deviceB,hostB,SIZE,cudaMemcpyHostToDevice);
cudaEventRecord(start);
sum(deviceA,deviceB,deviceC,NUMBER_OF_ELEMENTS);
cudaEventRecord(end);
cudaEventSynchronize(end);
float t=0;
cudaEventElapsedTime(&t,start,end);
cudaMemcpy(hostC,deviceC,SIZE,cudaMemcpyDeviceToHost);
cudaFree(deviceA);
cudaFree(deviceB);
cudaFree(deviceC);
double error = 0;
for(i = 0;i<NUMBER_OF_ELEMENTS;i++)
{
double diff = double((hostA[i]+hostB[i])-hostC[i]);
error+=diff;
cout<<"\nExpected value="<<ans[i];
cout<<"\tActual value="<<hostC[i];
}
error = sqrt(error);
cout<<"\nError = "<<error<<endl;
cout<<"\nSequential time="<<t1;
cout<<"\nParallel time="<<t<<endl;
delete[] hostA;
delete[] hostB;
delete[] hostC;
return cudaDeviceSynchronize();
}
void sum( int* A, int* B, int* C, int n_el)
{
int threadsPerblock,blocksperGrid;
if(n_el<512)
{
threadsPerblock = n_el;
blocksperGrid = 1;
}
else
{
threadsPerblock = 512;
blocksperGrid = ceil(double(n_el)/double(threadsPerblock));
}
//now invoke kernel method
kernel_sum<<<blocksperGrid,threadsPerblock>>>(A,B,C,n_el);
}
__global__ void kernel_sum( int* A, int* B, int* C, int NUMBERofELEMENTS)
{
//calculate unique thread index
int index = blockDim.x * blockIdx.x + threadIdx.x;
if(index<NUMBERofELEMENTS)
C[index] = A[index] + B[index];
}
|
891
|
//
// Created by kindr on 2021/5/12.
//
#include "graphConcurrent.cuh"
#include "multiKernelConcurrent.cuh"
const int N = 1 << 25;
void graphConcurrent() {
cudaStream_t s1, s2;
cudaStreamCreate(&s1);
cudaStreamCreate(&s2);
// 开始捕获流操作
cudaStreamBeginCapture(s1, cudaStreamCaptureModeGlobal);
math_kernel1<<<1, 1, 0, s1>>>(N);
cudaEvent_t e1, e2;
cudaEventCreate(&e1);
cudaEventCreate(&e2);
cudaEventRecord(e1, s1);
cudaStreamWaitEvent(s2, e1);
math_kernel2<<<1, 1, 0, s1>>>(N);
math_kernel2<<<1, 1, 0, s2>>>(N);
cudaEventRecord(e2, s2);
cudaStreamWaitEvent(s1, e2);
math_kernel1<<<1, 1, 0, s1>>>(N);
// 捕获结束
cudaGraph_t graph;
cudaStreamEndCapture(s1, &graph);
cudaGraphExec_t graphExec;
cudaGraphInstantiate(&graphExec, graph, nullptr, nullptr, 0);
for (int i = 0; i < 2; i++) {
cudaGraphLaunch(graphExec, nullptr);
}
cudaDeviceSynchronize();
}
|
892
|
#include <algorithm>
#include <cmath>
#include <cstdlib>
#include <dirent.h>
#include <fstream>
#include <iostream>
#include <limits>
#include <sstream>
#include <stdio.h>
#include <string.h>
#include <tuple>
#include <vector>
using namespace std;
template <class T>
void coutV (string text, vector<T> someVector) {
cout << endl << text << endl;
for (int i = 0; i < someVector.size(); i++) {
cout << someVector[i] << ", ";
}
cout << endl;
}
template <class T>
bool contains (vector<T> data, T element) {
return find(data.begin(), data.end(), element) != data.end();
}
/* pearson, spearman */
float mean (vector<float> values) {
float sum = 0;
int size = values.size();
for (int i = 0; i < size; i++) {
sum += values[i];
}
return sum / size;
}
float pearson_numerator (vector<float> A, vector<float> B, float meanA, float meanB) {
float numerator = 0;
for (int i = 0; i < A.size(); i++) {
numerator += (A[i] - meanA) * (B[i] - meanB);
}
return numerator;
}
float pearson_denominator (vector<float> A, vector<float> B, float meanA, float meanB) {
float denominator1;
float denominator1_sum = 0;
float denominator2;
float denominator2_sum = 0;
for (int i = 0; i < A.size(); i++) {
denominator1_sum += pow(A[i] - meanA, 2);
}
for (int i = 0; i < B.size(); i++) {
denominator2_sum += pow(B[i] - meanB, 2);
}
denominator1 = pow(denominator1_sum, 0.5);
denominator2 = pow(denominator2_sum, 0.5);
if (denominator1 == 0 || denominator2 == 0)
cout << endl << endl << "##### ERROR: Denominator equal to 0 - probable cause: all result values are equal" << endl << endl;
return denominator1 * denominator2;
}
float pearson (vector<float> A, vector<float> B) {
if (A.size() != B.size()) {
cout << "ERROR - wrong vector lengths" << endl;
return -1;
}
float meanA = mean(A);
float meanB = mean(B);
float numerator = pearson_numerator(A, B, meanA, meanB);
float denominator = pearson_denominator(A, B, meanA, meanB);
return numerator / denominator;
}
vector<float> toRanks (vector<float> A) {
vector<float> sorted = A;
sort(sorted.begin(), sorted.end());
vector<float> ranks;
for (int i = 0; i < A.size(); i++) {
vector<int> positions;
for (int j = 0; j < A.size(); j++) {
if (sorted[j] == A[i]) {
positions.push_back(j);
}
}
float sum = 0;
float avg;
for (int j = 0; j < positions.size(); j++) {
sum += positions[j] + 1;
}
avg = sum / positions.size();
ranks.push_back(avg);
//ranks.push_back(positions[positions.size()-1] + 1); //libreoffice calc ranks
}
/*
cout << "Ranking: " << endl;
for (int i = 0; i < ranks.size(); i++) {
cout << ranks[i] << ", ";
}
cout << endl << endl;
*/
return ranks;
}
vector<float> toPositions (vector<float> data, bool moreIsBetter) {
int dataPoints = data.size();
vector<float> sorted = data;
if (moreIsBetter) {
sort(sorted.begin(), sorted.end(), greater<int>()); // greater<int>() - provides reversed order (descending)
} else {
sort(sorted.begin(), sorted.end());
}
vector<float> positions;
for (int i = 0; i < dataPoints; i++) {
for (int j = 0; j < dataPoints; j++) {
if (sorted[j] == data[i]) {
positions.push_back(j + 1);
break;
}
}
}
return positions;
}
float spearman (vector<float> A, vector<float> B) {
vector<float> A_ranked = toRanks(A);
vector<float> B_ranked = toRanks(B);
return pearson(A_ranked, B_ranked);
}
/* rest */
vector<string> getFileNames (string path) {
DIR *pDIR;
struct dirent *entry;
vector<string> fileNames;
if (pDIR=opendir(path.c_str())) {
while (entry = readdir(pDIR)) {
if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) {
fileNames.push_back(entry->d_name);
}
}
closedir(pDIR);
}
return fileNames;
}
tuple<vector<float>, vector<float>, vector<float>, vector<float>> getRanksData (vector<vector<float>> datasets) {
vector<vector<float>> positions;
vector<vector<float>> ranks;
for (int i = 0; i < datasets.size(); i++) {
positions.push_back(toPositions(datasets[i], true));
}
for (int i = 0; i < datasets.size(); i++) {
ranks.push_back(toRanks(datasets[i]));
}
vector<float> positionAverages;
vector<float> positionSums;
vector<float> ranksAverages;
vector<float> ranksSums;
for (int parametersSet = 0; parametersSet < ranks[0].size(); parametersSet++) {
float ranksSum = 0;
float positionsSum = 0;
for (int dataset = 0; dataset < ranks.size(); dataset++) {
ranksSum += ranks[dataset][parametersSet];
positionsSum += positions[dataset][parametersSet];
}
positionAverages.push_back(positionsSum / positions.size());
positionSums.push_back(positionsSum);
ranksAverages.push_back(ranksSum / ranks.size());
ranksSums.push_back(ranksSum);
}
return make_tuple(positionAverages, positionSums, ranksAverages, ranksSums);
}
vector<int> getIndexesOfBest (vector<float> data, int nrOfBest, bool moreIsBetter) {
vector<int> bestIndexes;
int toFind = nrOfBest == -1 ? data.size() : nrOfBest;
for (int i = 0; i < toFind; i++) {
float max = moreIsBetter ? -1 : numeric_limits<int>::max();
int maxIndex = moreIsBetter ? -1 : numeric_limits<int>::max();
for (int j = 0; j < data.size(); j++) {
if (!contains(bestIndexes, j) && (moreIsBetter ? data[j] > max : data[j] < max)) {
max = data[j];
maxIndex = j;
}
}
bestIndexes.push_back(maxIndex);
}
return bestIndexes;
}
vector<int> getExcelRowNumbers (vector<int> data, int tests) {
vector<int> rowNumbers;
for (int i = 0; i < data.size(); i++) {
rowNumbers.push_back((tests + 1) * (data[i] + 1) + 1);
}
return rowNumbers;
}
int main() {
vector<vector<float>> results;
std::ofstream outfile;
/*
vector<float> facebook {1, 2, 2, 4};
vector<float> digg {4, 2, 2, 1};
vector<float> irvine {1, 2, 2, 4};
vector<float> enron {4, 2, 2, 1};
results.push_back(facebook);
results.push_back(digg);
results.push_back(irvine);
results.push_back(enron);
vector<string> datasetNames = {"facebook", "digg", "irvine", "enron"};
*/
vector<float> digg {374,375,378,375,375,371,341,341,328,373,381,374,371,377,377,340,340,328,377,379,374,379,376,377,340,344,328,375,377,375,375,374,370,342,342,328,374,379,373,372,376,374,345,345,326,375,372,375,373,374,376,342,340,326,371,375,374,371,373,370,337,340,326,374,369,372,370,375,370,343,339,326,373,372,374,368,375,375,338,340,326,376,385,382,380,381,381,347,342,345,378,384,383,377,383,384,347,346,344,376,378,381,381,383,383,343,349,344,376,378,384,382,379,377,343,343,347,383,381,386,376,380,383,342,347,340,379,382,381,380,377,386,348,338,347,375,377,378,380,382,380,342,353,349,377,382,377,382,380,377,340,346,342,379,376,382,379,379,383,342,342,348,379,381,386,378,384,386,346,350,346,386,383,386,378,386,383,337,343,349,378,385,383,381,386,384,339,343,351,382,381,387,381,379,386,344,349,345,379,382,386,381,381,384,348,346,350,377,383,388,382,382,385,340,350,347,377,381,383,382,382,382,338,347,343,379,383,382,382,383,380,342,349,348,376,383,381,378,381,384,341,347,346};
vector<float> enron {936,934,938,934,934,938,937,938,937,934,935,938,937,936,938,938,938,937,936,936,937,936,936,936,937,939,938,935,938,937,934,938,937,938,938,936,934,936,937,935,937,937,938,938,938,935,936,935,933,937,937,937,937,939,933,933,933,933,934,935,937,937,937,934,934,933,935,935,937,937,937,937,933,934,936,934,935,938,938,937,937,935,937,939,937,936,938,940,940,939,938,937,936,937,937,939,939,941,940,935,936,938,936,936,939,939,941,941,938,939,936,937,939,941,939,939,940,937,940,939,938,936,940,939,941,940,938,936,939,937,940,940,939,940,939,935,936,939,935,938,938,940,940,939,937,938,939,936,937,939,939,940,940,936,936,938,934,936,940,939,939,939,937,937,939,936,938,940,938,941,941,936,938,939,938,939,940,939,941,941,934,937,940,938,937,940,941,940,941,937,941,940,934,940,939,940,940,941,937,940,940,939,939,939,940,941,941,939,939,939,938,940,940,939,941,941,936,936,937,935,937,938,939,941,941,936,937,939,937,939,939,940,941,941,936,939,938,935,939,938,940,941,941};
//vector<float> facebook3 {249,247,251,247,246,250,227,230,218,250,247,247,247,252,246,220,230,213,248,248,249,249,247,250,226,229,215,248,248,249,248,248,246,219,228,218,243,248,249,249,247,247,225,232,218,250,250,245,250,245,247,220,228,217,250,249,251,249,253,249,227,230,220,252,250,248,250,249,249,226,230,223,248,250,249,248,250,249,220,229,219,251,250,247,250,247,246,215,221,229,249,251,251,253,248,252,217,226,228,246,248,253,247,252,249,216,224,232,250,246,250,250,249,250,215,222,228,249,249,250,249,250,251,218,222,229,250,251,251,249,254,247,215,226,229,250,251,253,250,251,251,220,227,231,249,250,254,250,252,251,221,223,228,251,248,253,250,246,252,225,222,234,253,246,252,250,248,251,215,219,226,252,252,252,246,247,252,217,220,221,251,249,252,245,249,251,217,225,226,249,251,247,247,250,254,219,219,230,249,247,251,250,248,253,216,221,226,246,253,251,250,250,249,220,226,230,250,249,249,250,250,251,213,217,231,250,251,251,248,248,252,221,217,228,252,249,252,251,252,249,220,220,228};
vector<float> facebook {354,356,359,361,359,357,301,304,271,358,360,357,359,357,355,293,307,273,358,356,358,359,360,361,301,313,277,359,361,358,361,362,360,304,311,278,360,358,359,361,359,353,305,304,280,358,355,356,362,354,358,296,304,278,360,357,354,361,360,354,302,310,275,361,359,354,359,360,354,301,307,278,357,357,355,354,359,358,301,312,281,362,362,364,361,362,363,288,300,294,365,357,361,360,362,358,287,299,305,359,358,363,358,361,364,288,296,303,357,362,361,358,364,361,293,303,301,363,362,362,356,362,364,295,297,306,364,365,362,361,361,362,297,298,305,356,364,363,360,365,359,289,298,307,364,360,361,361,362,361,290,306,309,359,364,359,355,363,360,289,298,303,363,360,361,355,357,366,284,286,306,362,361,365,361,363,359,289,294,307,362,367,363,359,362,365,295,292,300,362,366,362,358,366,365,290,302,308,359,363,364,361,361,364,290,292,308,359,363,358,360,366,368,289,304,312,364,362,362,361,365,364,295,297,302,358,357,360,357,358,364,292,294,307,362,363,363,356,364,362,291,297,305};
//vector<float> facebook7 {436,439,428,433,439,435,343,355,312,439,434,432,440,437,433,349,351,312,436,435,432,438,436,436,344,359,320,437,434,428,436,436,434,343,353,316,437,435,430,439,435,434,342,354,315,435,437,435,438,432,433,349,358,312,433,430,430,429,434,431,351,355,317,439,433,430,431,436,433,343,355,319,436,435,431,436,435,430,347,355,322,436,441,441,441,437,440,337,339,353,438,441,444,439,439,437,334,346,349,436,441,443,439,437,441,335,343,351,440,440,441,439,440,444,338,341,346,439,442,442,444,443,443,343,343,352,436,441,441,443,439,441,336,345,355,440,443,439,440,442,437,340,344,346,440,440,441,436,437,441,346,346,351,436,445,440,438,441,442,339,347,361,439,444,442,441,438,447,329,339,359,434,440,445,432,441,442,332,341,349,441,442,442,437,439,442,332,342,361,440,437,443,441,440,444,334,352,349,442,442,444,441,440,441,337,344,353,441,442,441,440,440,444,337,340,344,442,437,444,436,444,439,336,327,357,437,438,440,443,441,440,334,341,354,440,443,443,436,445,440,343,339,359};
vector<float> BA {402.1,403.5,401.9,401.8,403.9,402.9,339.9,344.9,326.8,405.4,403.9,402.4,401.7,403.2,402.3,341.8,346.3,327.9,404.8,405,401,404.5,403.2,403,343.3,347.1,329.9,401.3,401.8,401.8,402.4,401.2,397.9,343.3,344.4,326.7,401.6,400.9,400.5,399.1,401.7,401.2,344.2,344.4,328,402.1,402.7,401.4,402.4,403.1,400.2,342,349.1,332,392.1,393.4,390.6,389.1,392.6,392.5,341,341.9,324.8,392.8,394.9,392.3,393.5,393.3,392.8,341.8,344.2,327.1,393,395.5,393.9,392.9,394.1,394.3,340.3,343.9,329.1,410,414.1,414,409.1,413.9,414.2,338.5,344.1,349,412.4,413.6,414.4,409.4,413.8,413.9,337.2,344.6,347.5,412.4,415.1,414.6,410.2,413.6,414.1,342.1,343.2,349,410.4,414.2,414.2,410.7,413.2,411.9,340.2,342.3,347.6,411.6,413,414.1,412.4,412.7,413.2,340.3,348.3,351.7,409.5,413.3,413.4,410.6,412.5,415.3,339.2,345.5,351.7,403.3,406.7,406.8,403.8,404.5,406.2,340.4,342,346.2,404.4,405.2,408.8,403.3,407.9,408,338.7,343.1,348.2,404.6,407.8,408.1,403,406.7,407.2,341.1,343.3,351.3,413.9,415.9,417.5,412.9,417.6,417.6,338.2,341.5,348.4,414.1,419,417.4,414.7,416.9,418.9,340.7,341.9,348.7,412.9,417.7,419.6,413.7,417.4,417.9,337.5,343.4,349.6,414.2,416.4,418.2,414.3,417.3,418.9,340.9,344.3,350.1,412.7,418.4,418.3,413.3,417.6,418.8,338.3,342.9,350.6,416.2,416.3,417.5,414.7,416.7,419.2,341.5,343.5,351.5,407.3,411.1,410.4,406.6,411.8,412.8,338.5,341,347,408.9,411.2,412.8,408.4,409.6,412.8,340,342.4,347.1,409.2,410.6,413.2,408.5,410.8,413.5,340,342.1,350};
vector<float> ER {238.6,237.2,237.5,238.3,237.3,237.8,202,205.6,196,238,240.8,238.8,237.9,237.6,238.1,202.5,206.7,196.3,239.7,239.7,239,238.6,238.2,239.1,206.5,207.9,197.8,239,238.6,238.4,236.3,238,238,203.8,206,197.2,238.1,240.4,238.1,239.3,237.9,237.4,203.9,205.9,199.2,237.3,240.2,237.3,237.9,238.4,237.3,204.9,207.1,199.4,233.2,234.3,232.7,232.6,234.4,232.7,202.3,204.4,195.4,234.3,233.4,232.9,235,234.8,233.9,202.9,205.3,196.3,234,234,233.8,235.5,235,234.4,204.6,205.9,198.5,241.3,245.2,244,242.7,246.1,244,200.6,203.6,207,241.8,245.5,245.6,242.3,246.3,244.9,200.4,203.6,208.1,243.1,244.2,245.8,242,244.6,243.3,200.7,204.4,210.2,242.8,244.8,244.6,242,244.7,244.4,202.1,203.5,208.9,243,244.1,245.5,244.2,243.6,244.3,200.4,204.6,210,243.2,244.6,245.2,244.3,244.6,244.5,203.2,206.2,210.5,239.5,240.9,241.4,239.2,238.9,241.3,201.2,202.4,208.4,238.8,242.7,243,239.4,242.8,242.3,202,204.3,206.7,240.9,240.8,242.4,240.2,240.5,242.8,202.3,206.1,207.9,242.6,244,247.7,242.9,245.4,247.2,201.5,202.9,207.2,243.8,247.4,246.3,243.8,246.3,248.1,201.2,205.2,207.4,242.7,247.1,248,243.4,245.7,246.9,200.3,204.5,209.7,243.6,245.6,247.1,243.1,244.9,248.3,201.1,205.5,208.3,245,246.1,246.7,245.3,246.9,246.3,201.2,204,209.2,245,245.9,247.7,244,245.5,248.1,201.2,204.7,210.8,240.1,243,244.8,241.4,243.4,244.9,202.1,202.8,209.4,242.1,242.9,245.5,243.6,244.9,244.8,201,205,207,241.9,244,245.8,241.5,242.8,244.6,202.9,204.2,207.6};
vector<float> WS {260.2,265.3,267.5,265.5,266.2,265.4,228,226.7,216.2,267.7,266.8,268,267,268,265,226.6,228.5,214.3,265,265.1,265.7,265.1,265.7,264.1,227.3,226.6,216.6,263.1,263,265.6,265.6,264.3,261.5,224.7,225.7,213.4,265.6,266.7,260.4,264,261.8,262.8,228.3,225.1,215,261.2,263.3,264.8,263.8,261.6,263.7,228.8,226.3,215.6,255.1,250.7,251.3,253.8,254.9,254.4,222.4,221.5,212.7,253.5,253.3,253.7,253.8,256.4,253.8,222.8,221.4,211.4,254.9,253.7,254.6,253.4,256.3,253.2,225.4,223.7,212.6,272.9,276.5,278.8,274.8,278.3,281.8,228,231.5,230.3,275.6,279.6,279.7,274.3,281,281.7,230.8,232.1,231.9,276.6,277.1,281.3,274.2,277.3,279.1,225.8,231.6,234.1,274.4,276.7,277.3,271.7,270.6,276.5,226.8,233.4,231.1,276.6,275.4,280.5,273.9,274.1,277.9,230.2,231.6,230.6,274.3,276.9,277.1,269.5,276.3,278.6,231.1,236.4,232.2,264.1,267.1,274,266.8,267.4,273.6,227,230.8,228.5,268.9,268.2,268,268.6,265.8,269.6,225.9,230.9,227.2,268.8,270.1,272.8,265.8,270.3,270.8,227.4,231,229.4,276.5,282.9,285.7,275.7,281.7,284,229.2,231.1,235.6,276.6,283.8,287.9,279.8,283.9,285.4,229.2,233.8,233.4,278.4,280.3,285.7,278.6,282.9,284,229.8,232.2,236.2,280.2,282.4,285,279,279.9,283.4,228.9,231.4,234.8,277.5,282.3,284.2,277.5,281.4,283.7,232.4,233.8,235.8,278.1,284.5,287.6,280,281.4,282.9,231.5,232,238,268.2,275.4,276.1,272.9,273.6,276.6,227.5,230.5,234.4,270.4,276.1,276.8,269.4,275.9,275.9,226.6,231.9,232.9,272.2,276.8,279.1,271.1,275.6,277.2,226.3,233.3,235.8};
results.push_back(digg);
results.push_back(enron);
//results.push_back(facebook3);
results.push_back(facebook);
//results.push_back(facebook7);
results.push_back(BA);
results.push_back(ER);
results.push_back(WS);
vector<string> datasetNames = {"digg", "enron"/*, facebook 3%*/, "facebook"/*, facebook 7%*/, "BA", "ER", "WS"};
vector<float> positionAverages;
vector<float> positionSums;
vector<float> ranksAverages;
vector<float> ranksSums;
tie(positionAverages, positionSums, ranksAverages, ranksSums) = getRanksData(results);
//vector<int> bestRanksSumsIndexes = getIndexesOfBest(ranksSums, 10, true);
//coutV("Best ranks sums indexes: ", bestRanksSumsIndexes);-
int bestToFind = 10; // -1
vector<int> bestPositionAverageIndexes = getIndexesOfBest(positionAverages, min((int)results[0].size(), bestToFind), false);
coutV("Position on average best indexes: ", bestPositionAverageIndexes);
vector<int> parametersSetsExcelRows = getExcelRowNumbers(bestPositionAverageIndexes, 10);
coutV("Excel row numbers: ", parametersSetsExcelRows);
for (int i = 0; i < bestPositionAverageIndexes.size(); i++) {
int index = bestPositionAverageIndexes[i];
//cout << "Sum of ranks: " << ranksSums[index] << endl;
cout << "Avg position: " << positionAverages[index] << endl;
}
bool saveResultsCorrelation = true;
string suffix = "TEST";
if (saveResultsCorrelation) {
// using ofstream constructors.
outfile.open("results_correlation_" + suffix + "_.xls");
outfile << "<?xml version='1.0'?>" << std::endl;
outfile << "<Workbook xmlns='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:o='urn:schemas-microsoft-com:office:office'" << std::endl;
outfile << " xmlns:x='urn:schemas-microsoft-com:office:excel'" << std::endl;
outfile << " xmlns:ss='urn:schemas-microsoft-com:office:spreadsheet'" << std::endl;
outfile << " xmlns:html='http://www.w3.org/TR/REC-html40'>" << std::endl;
outfile << " <Worksheet ss:Name='Sheet1'>" << std::endl;
outfile << " <Table>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell></Cell>" << std::endl;
for (int i=0; i<datasetNames.size(); i++) {
outfile << " <Cell><Data ss:Type='String'>" + datasetNames[i] + "</Data></Cell>" << std::endl;
}
outfile << " </Row>" << std::endl;
for (int i=0; i<datasetNames.size(); i++) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>" + datasetNames[i] + "</Data></Cell>" << std::endl;
for (int j=0; j<datasetNames.size(); j++) {
if (j > i) {
outfile << " <Cell><Data ss:Type='Number'>" + to_string(pearson(results[i], results[j])) + "</Data></Cell>" << std::endl;
} else {
outfile << " <Cell></Cell>" << std::endl;
}
}
outfile << " </Row>" << std::endl;
}
outfile << " <Row></Row>" << std::endl;
outfile << " <Row></Row>" << std::endl;
outfile << " <Row></Row>" << std::endl;
outfile << " <Row>" << std::endl;
outfile << " <Cell></Cell>" << std::endl;
for (int i=0; i<datasetNames.size(); i++) {
outfile << " <Cell><Data ss:Type='String'>" + datasetNames[i] + "</Data></Cell>" << std::endl;
}
outfile << " </Row>" << std::endl;
for (int i=0; i<datasetNames.size(); i++) {
outfile << " <Row>" << std::endl;
outfile << " <Cell><Data ss:Type='String'>" + datasetNames[i] + "</Data></Cell>" << std::endl;
for (int j=0; j<datasetNames.size(); j++) {
if (j > i) {
outfile << " <Cell><Data ss:Type='Number'>" + to_string(spearman(results[i], results[j])) + "</Data></Cell>" << std::endl;
} else {
outfile << " <Cell></Cell>" << std::endl;
}
}
outfile << " </Row>" << std::endl;
}
outfile << " </Table>" << std::endl;
outfile << " </Worksheet>" << std::endl;
outfile << "</Workbook>" << std::endl;
outfile.close();
} else {
/*
cout << endl << endl << "Pearson: " << endl;
cout << pearson(facebook, digg) << endl;
cout << pearson(facebook, irvine) << endl;
cout << pearson(facebook, enron) << endl;
cout << pearson(digg, irvine) << endl;
cout << pearson(digg, enron) << endl;
cout << pearson(irvine, enron) << endl;
cout << endl << endl << "Spearman: " << endl;
cout << spearman(facebook, digg) << endl;
cout << spearman(facebook, irvine) << endl;
cout << spearman(facebook, enron) << endl;
cout << spearman(digg, irvine) << endl;
cout << spearman(digg, enron) << endl;
cout << spearman(irvine, enron) << endl;
*/
}
return 0;
}
|
893
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define REPEAT 1
__global__ void arrayFunc(float* d_idata, float* d_jdata, float* d_odata, int size)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
for(int i=0; i < REPEAT; i++)
d_odata[tid] = d_idata[tid] * __expf(d_jdata[tid]);
}
}
void initArrayData(float * array, float alpha, int size);
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size);
#define NSIZE 1048576
int
main (void) {
float *h_a, *h_b, *h_c;
float *d_a, *d_b, *d_c;
int nsize = NSIZE;
int nThreads = 256;
int nBlocks;
cudaEvent_t start, end;
float eventEtime;
// calculate block number
nBlocks = (nsize-1) / nThreads + 1;
printf("Number of elements: %d\n", nsize);
printf("GPU execution with %d blocks each one of %d threads\n", nBlocks, nThreads);
// allocation and initialization of host buffers
h_a = (float*) malloc (nsize * sizeof(float));
h_b = (float*) malloc (nsize * sizeof(float));
h_c = (float*) malloc (nsize * sizeof(float));
initArrayData(h_a, 1.0f, nsize);
initArrayData(h_b, 10.0f, nsize);
//-- insert CUDE code ----------------
// allocation of device buffers
//------------------------------------
// creation of cuda events: start, end
cudaEventCreate(&start);
cudaEventCreate(&end);
printf ("\nGPU computation ... ");
cudaEventRecord(start,0);
//-- insert CUDA code ----------------
// host to device buffer copies
//------------------------------------
//-- insert CUDA code ----------------
// arrayFunc kernel launch
//------------------------------------
//-- insert CUDA code ----------------
// copy back of results from device
//------------------------------------
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventEtime, start, end);
printf ("ok\n");
printf("Elapsed time on GPU: %.2f ms\n", eventEtime);
// host computation
printf("\nCPU computation ... ");
float *cpuResult;
float eventTimeCPU;
cudaMallocHost((void**)&cpuResult, nsize * sizeof(float));
cudaEventRecord(start,0);
arrayFuncCPU(h_a, h_b, cpuResult, nsize);
cudaEventRecord(end,0);
cudaEventSynchronize(end);
cudaEventElapsedTime(&eventTimeCPU, start, end);
printf ("ok\n");
printf("Elapsed time on CPU: %.2f ms\n", eventTimeCPU);
printf("\nSpeed UP CPU/GPU %.1fx\n", eventTimeCPU/eventEtime);
printf("\nCheck results:\n");
printf ("h_c[0] = %f\n", h_c[0]);
printf ("cpuResult[0] = %f\n", cpuResult[0]);
// free resources on device
cudaEventDestroy(start);
cudaEventDestroy(end);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
// free resources on host
free(h_a);
free(h_b);
free(h_c);
return 0;
}
void
initArrayData(float * array, float alpha, int size)
{
int i;
for (i=0; i< size; i++)
array[i] = alpha * (float) rand() / (float) RAND_MAX;
}
void arrayFuncCPU(const float* h_idata, const float* h_jdata, float* h_odata, int size)
{
int i, j;
for (i=0; i<size; i++)
for (j=0; j<REPEAT; j++)
h_odata[i] = h_idata[i] * expf(h_jdata[i]);
}
|
894
|
/* calculate how much memory can be really allocated (which is not the same as free)
https://stackoverflow.com/a/8923966/9201239
*/
#include <stdio.h>
#include <cuda.h>
#include <unistd.h>
const size_t Mb = 1<<20; // Assuming a 1Mb page size here
int main() {
size_t total;
size_t avail;
cudaError_t cuda_status = cudaMemGetInfo(&avail, &total);
if ( cudaSuccess != cuda_status ) {
printf("Error: cudaMemGetInfo fails, %s \n", cudaGetErrorString(cuda_status) );
exit(EXIT_FAILURE);
}
printf("free: %.f, total %.f\n", (double)avail/Mb, (double)total/Mb);
int *buf_d = 0;
size_t nwords = total / sizeof(int);
size_t words_per_Mb = Mb / sizeof(int);
while (cudaMalloc((void**)&buf_d, nwords * sizeof(int)) == cudaErrorMemoryAllocation) {
cudaFree(buf_d);
nwords -= words_per_Mb;
if (nwords < words_per_Mb) {
// signal no free memory
break;
}
}
cudaFree(buf_d);
printf("can allocate: %.fMB\n", (double)nwords/words_per_Mb);
//sleep(1000); /* keep consuming RAM */
return 0;
}
|
895
|
#define TIMES 10
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda.h>
#include <sys/time.h>
double
gettime ()
{
struct timeval t;
gettimeofday (&t, NULL);
return t.tv_sec + t.tv_usec * 1e-6;
}
__global__ void
dummy_function (int *array, unsigned int howlarge)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
int tot = 0;
int pos = 0;
for (int i = 0; i < 100; i++) {
for (int dummy = 0; dummy < 1000; dummy++) {
tot = tot + (i * dummy + threadIdx.x) & 0x000f;
array[pos++] = tot;
}
}
}
void
runTest ()
{
double start, end;
unsigned int nints = 100 * 1024 * 1024;
unsigned int sz = nints * sizeof (int);
unsigned int nints_small = 1 * 1024 * 1024;
unsigned int sz_small = nints_small * sizeof (int);
cudaStream_t stream1, stream2 ;
cudaStreamCreate ( &stream1) ;
cudaStreamCreate ( &stream2) ;
#ifdef _LP64
printf ("Running on a 64-bit platform!\n", 0);
#else
#endif
int *dummy_cpu, *dummy_cpu2, *dummy_small_cpu, *dummy_small_cpu2;
dummy_cpu = (int *) malloc (sz);
dummy_cpu2 = (int *) malloc (sz);
dummy_small_cpu = (int *) malloc (sz_small);
dummy_small_cpu2 = (int *) malloc (sz_small);
int *dummy_gpu, *dummy_gpu2, *dummy_small_gpu, *dummy_small_gpu2;
cudaMalloc ((void **) &dummy_gpu, sz);
cudaMalloc ((void **) &dummy_gpu2, sz);
cudaMalloc ((void **) &dummy_small_gpu, sz_small);
cudaMalloc ((void **) &dummy_small_gpu2, sz_small);
double kernelt = 0, memcpyt = 0, st = 0, ast = 0;
for (int i = 0; i < TIMES; i++) {
start = gettime ();
dummy_function <<< 100, 512 >>> (dummy_gpu, sz);
cudaDeviceSynchronize ();
end = gettime ();
printf ("kernel time: %f\n", end - start);
kernelt += end - start;
start = gettime ();
cudaMemcpy (dummy_cpu, dummy_gpu, sz, cudaMemcpyDeviceToHost);
end = gettime ();
printf ("memcpy time: %f\n", end - start);
memcpyt += end - start;
start = gettime ();
// Do the sync routine
cudaMemcpy (dummy_small_gpu, dummy_small_cpu, sz_small,
cudaMemcpyHostToDevice);
dummy_function <<< 100, 512 >>> (dummy_gpu, sz);
cudaMemcpy (dummy_cpu, dummy_gpu, sz, cudaMemcpyDeviceToHost);
cudaMemcpy (dummy_small_gpu2, dummy_small_cpu2, sz_small,
cudaMemcpyHostToDevice);
dummy_function <<< 100, 512 >>> (dummy_gpu2, sz);
cudaMemcpy (dummy_cpu2, dummy_gpu2, sz, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize ();
end = gettime ();
printf ("sync time: %f\n", end - start);
st += end - start;
start = gettime ();
// Do the async routine
cudaMemcpyAsync (dummy_small_gpu, dummy_small_cpu, sz_small,
cudaMemcpyHostToDevice, stream1);
dummy_function <<< 100, 512, 0, stream1 >>> (dummy_gpu, sz);
cudaDeviceSynchronize ();
cudaMemcpyAsync (dummy_cpu, dummy_gpu, sz, cudaMemcpyDeviceToHost, stream2);
cudaMemcpyAsync (dummy_small_gpu2, dummy_small_cpu2, sz_small,
cudaMemcpyHostToDevice, stream1);
dummy_function <<< 100, 512, 0, stream1 >>> (dummy_gpu2, sz);
cudaDeviceSynchronize ();
cudaMemcpyAsync (dummy_cpu2, dummy_gpu2, sz, cudaMemcpyDeviceToHost, stream2);
cudaDeviceSynchronize ();
end = gettime ();
printf ("async time: %f\n", end - start);
ast += end - start;
printf ("-- Done round %d --\n", i);
}
printf ("Average:\nkerneltime=%f, memcpy=%f, sync=%f, async=%f\n",
kernelt / TIMES, memcpyt / TIMES, st / TIMES, ast / TIMES);
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main (int argc, char **argv)
{
runTest ();
return EXIT_SUCCESS;
}
|
896
|
/* Includes, system */
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define N 1024
/* DEVICE CODE */
__global__ void suma_2_enteros(int *d1, int *d2, int *sum){
int idBloque = blockIdx.y * gridDim.x + blockIdx.x;
int idThread = idBloque * blockDim.z * blockDim.y * blockDim.x + threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x;
sum[idThread] = d1[idThread] + d2[idThread];
}
/* HOST CODE*/
int main(int argc, char** argv)
{
int DeviceCount = 0,i;
int *h_d1,*h_d2,*h_sum;
int *d_d1,*d_d2,*d_sum;
dim3 dimGrid(8,2);
dim3 dimBlock(8,4,2);
h_d1 = (int*)malloc(N * sizeof(h_d1[0]));
h_d2 = (int*)malloc(N * sizeof(h_d2[0]));
h_sum = (int*)malloc(N * sizeof(h_sum[0]));
for (i=0;i<N;i++){h_d1[i]=i;h_d2[i]=10*i;h_sum[i]=0;}
/* Initialize CUDA */
if (cuInit(0) != 0){
printf("ERROR de inicializacion\n");
exit(0);
}
cuDeviceGetCount(&DeviceCount);
if (DeviceCount == 0){
printf("ERROR ningun dispositivo soporta CUDA\n");
exit(0);
}
cudaMalloc((void**)&d_d1,N*sizeof(d_d1));cudaMemset(d_d1,0,N*sizeof(d_d1));
cudaMalloc((void**)&d_d2,N*sizeof(d_d2));cudaMemset(d_d2,0,N*sizeof(d_d2));
cudaMalloc((void**)&d_sum,N*sizeof(d_sum));cudaMemset(d_sum,0,N*sizeof(d_sum));
cudaMemcpy(d_d1,h_d1,N*sizeof(h_d1[0]),cudaMemcpyHostToDevice);
cudaMemcpy(d_d2,h_d2,N*sizeof(h_d2[0]),cudaMemcpyHostToDevice);
suma_2_enteros<<<dimGrid,dimBlock>>>(d_d1,d_d2,d_sum);
cudaMemcpy(h_sum,d_sum,N*sizeof(h_sum[0]),cudaMemcpyDeviceToHost);
for (i=510;i<520;i++) printf("Resultado: %d \n",h_sum[i]);
cudaFree(d_d1);cudaFree(d_d2);cudaFree(d_sum);
}
|
897
|
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
//increment wird auf host und device definiert
__host__ __device__ int increment(int a)
{
return a+1;
}
__global__ void kernel(int *a, int *b, int size)
{
int tid = threadIdx.x; //lokaler Thread Index
int bid = blockIdx.x; //Index des Blockes
int bdim= blockDim.x; //Anzahl an Threads pro Block
int i = tid+bid*bdim; //Globale Adresse
if (i<size) //Fehlerbehandlung
b[i]=increment(a[i]); //Increment
}
//Fülle A mit Werten
void fillA(int *a, int size)
{
for (int i=0;i<size;i++)
a[i]=i;
}
//Überprüfe Ergebnisse
bool checkResult(int *a, int *b, int size)
{
bool res=true;
printf("a b\n");
for (int i=0;i<size;i++)
{
res&=(increment(a[i])==b[i]);
if (i<10) printf("%i %i\n",a[i],b[i]);
}
if (res) printf("Test passed\n");
else printf("Test failed\n");
return res;
}
int main(int argc, char**argv)
{
//Problemgröße
int size=1024;
//Pointer auf Host/Device Speicher
int *a_host, *b_host, *a_dev, *b_dev;
//Allokiere Host-Speicher
a_host = (int*)malloc(size*sizeof(int));
b_host = (int*)malloc(size*sizeof(int));
fillA(a_host,size);
//Allokiere Device Speicher
//Achtung: (void**)& sehr wichtig
cudaMalloc((void**)&a_dev,size*sizeof(int));
cudaMalloc((void**)&b_dev,size*sizeof(int));
//Kopiere Host->Device
cudaMemcpy(a_dev,a_host,size*sizeof(int),cudaMemcpyHostToDevice);
//Konfguration des Kernels (nur 1 Dimensional):
//256 Threads pro Threadblock
//Mehrere Dimensionen möglich über dim3 threads(x,y,z); -> threads.x,threads.y,threads.z
dim3 threads(256);
//1024/256 = 4 Threadblöcke
//Mehrere Dimensionen wie bei Threads möglich
dim3 grid(size/threads.x);
//Starte Kernel mit Konfiguration <<<grid,threads>>> auf Device Speicher
//Wichtig: Spitze Klammern <<<>>> nicht vergessen!
//Kernel wird asynchron zu CPU ausgeführt, d.h. hier könnte die CPU noch Arbeit verrichten
kernel<<<grid,threads>>>(a_dev,b_dev,size);
//Kopiere Ergebnis zurück (implizite Synchronisierung)
cudaMemcpy(b_host,b_dev,size*sizeof(int),cudaMemcpyDeviceToHost);
checkResult(a_host,b_host,size);
//Gib Speicher wieder frei
cudaFree(a_dev);
cudaFree(b_dev);
free(a_host);
free(b_host);
return 0;
}
|
898
|
#include <stdio.h>
#include <stdio.h>
__global__ void matrixExponentiation(float *A,float *C, float *D, float *E, float *Coeff, unsigned *N, unsigned *rowsPerThread)
{
unsigned i,j,k,row,order;
/*Initializing by the identity matrix. And initialized the matrix C by matrix A.*/
for(i=0;i<(*rowsPerThread);i++)
{
row = blockIdx.x*blockDim.x + threadIdx.x*(*rowsPerThread) + i;
for(j=0;j<(*N);j++)
{
E[row*(*N)+j] = Coeff[1]*A[row*(*N)+j];
D[row*(*N)+j] = A[row*(*N)+j];
}
E[row*(*N)+row] += Coeff[0];
}
__syncthreads();
for(order=2;order<12;order++)
{
for(i=0;i<(*rowsPerThread);i++)
{
row = blockIdx.x*blockDim.x + threadIdx.x*(*rowsPerThread) + i;
for(j=0;j<(*N);j++)
{
C[row*(*N)+j] = 0.0;
for(k=0;k<(*N);k++)
C[row*(*N)+j]+=(D[row*(*N)+k]*A[k*(*N)+j]);
}
}
__syncthreads();
for(i=0;i<(*rowsPerThread);i++)
{
row = blockIdx.x*blockDim.x + threadIdx.x*(*rowsPerThread) + i;
for(j=0;j<(*N);j++)
{
E[row*(*N)+j]+=(Coeff[order]*C[row*(*N)+j]);
D[row*(*N)+j] =(C[row*(*N)+j]);
}
}
__syncthreads();
}
}
void makeA(float *A,unsigned N)
{
unsigned i,j;
for(i=0;i<N;i++)
for(j=0;j<N;j++)
A[i*N+j]=1e-3;
return ;
}
void printMatrix(float *A, unsigned N)
{
unsigned i,j;
for(i=0;i<N;i++)
{
for(j=0;j<N;j++)
printf("%6.4f\t",A[i*N+j]);
printf("\n");
}
return ;
}
int main()
{
unsigned N,blocks, threads,i;
unsigned *dev_N;
float *A,*Exp,*Coeff;
float *dev_A,*dev_B,*dev_C,*dev_Exp,*dev_Coeff;
unsigned size;
unsigned rowsPerThread, *dev_rowsPerThread;
printf("The order of matrix to be used\n");
scanf("%d",&N);
printf("Enter the number of blocks.\n");
scanf("%d",&blocks);
printf("Enter the number of threads per block.\n");
scanf("%d",&threads);
if((N%(threads*blocks))!=0)
{
printf("The order of the matrix `N` must be divisible by the product `threads*blocks`\n. Aborting the program!\n");
}
else
{
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
size = N*N*sizeof(float);
rowsPerThread = (N/(threads*blocks));
A = (float *)malloc(size);
Exp = (float *)malloc(size);
Coeff = (float *)malloc(12*sizeof(float));
Coeff[0] = 1.0;
for(i=1;i<12;i++)
Coeff[i] = (Coeff[i-1]/(1.0*i));
cudaEventRecord(start);
cudaMalloc((void**)&dev_A,size);
cudaMalloc((void**)&dev_B,size);
cudaMalloc((void**)&dev_C,size);
cudaMalloc((void**)&dev_Exp,size);
cudaMalloc((void**)&dev_Coeff,12*sizeof(float));
cudaMalloc((void**)&dev_rowsPerThread,sizeof(unsigned));
cudaMalloc((void**)&dev_N,sizeof(unsigned));
makeA(A,N);
cudaMemcpy(dev_A, A, size,cudaMemcpyHostToDevice);
cudaMemcpy(dev_Coeff, Coeff, 12*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dev_rowsPerThread,&rowsPerThread,sizeof(unsigned),cudaMemcpyHostToDevice);
cudaMemcpy(dev_N,&N,sizeof(unsigned),cudaMemcpyHostToDevice);
matrixExponentiation<<<blocks,threads>>>(dev_A, dev_B, dev_C, dev_Exp, dev_Coeff ,dev_N,dev_rowsPerThread);
cudaMemcpy(Exp,dev_Exp,size,cudaMemcpyDeviceToHost);
cudaFree(dev_A);
cudaFree(dev_B);
cudaFree(dev_C);
cudaFree(dev_rowsPerThread);
cudaFree(dev_N);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float milliseconds = 0.0;
cudaEventElapsedTime(&milliseconds,start,stop);
fprintf (stderr,"Time for the Matrix Multiplication of order %d : %f s using blocks %d and threads per block %d.\n\n",N ,0.001*milliseconds,blocks,threads);
/*freopen("A.dat","w",stdout);
printMatrix(A,N);
fclose(stdout);
freopen("B.dat","w",stdout);
printMatrix(B,N);
fclose(stdout);*/
freopen("C.dat","w",stdout);
printMatrix(Exp,N);
fclose(stdout);
}
return 0;
}
|
899
|
#include <math.h>
#include <time.h>
#include <stdio.h>
float* CPUEuler(float t_0, float y_0, float delta_t){
int n=10/delta_t + 1;
float *y = (float*) malloc(sizeof(float)*n);
for (int i=0;i<n;i++){
y[i]=y_0;
for (int j=0;j<i;j++){
float t_j=t_0+j*delta_t;
y[i]+=delta_t*(9*powf(t_j,2)-4*t_j+5);
}
}
return y;
}
int main(){
printf("seccion 1.a\n");
clock_t start, end;
float *y;
for(int i=1;i<7;i++){
float delta_t=pow(10,-i);
//int n=10/delta_t + 1;
start=clock();
y = CPUEuler(0,4,delta_t);
end=clock();
double cpu_time_used = 1000 * ((double) (end - start)) / CLOCKS_PER_SEC;
printf("%f\n",cpu_time_used);
}
return 0;
}
|
900
|
#include "includes.h"
__global__ void kInRangeExc(float* gData, float lower, float upper, float* target, unsigned int numElements) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
for (unsigned int i = idx; i < numElements; i += blockDim.x * gridDim.x)
target[i] = gData[i] > lower && gData[i] < upper;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.