serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
20,601 | #include "includes.h"
__global__ void squareFunc(unsigned int *d_in, unsigned int *d_out)
{
int idx = threadIdx.x;
unsigned int val = d_in[idx];
d_out[idx] = val * val;
//printf("%d square value %d \n ", idx, d_out[idx]);
} |
20,602 | #include <stdio.h>
#include <cuda.h>
#define THREADSPERBLOCK 1024
__global__ void primeiroLaco(long int* d_num, long int* d_den, long int start, long int end, int size)
{
int num_aux, den_aux, aux, resto;
long int factor, ii, sum, done, n;
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
ii = i - start;
sum = 1 + i;
done = i;
factor = 2;
while (factor < done)
{
resto = i / factor;
resto = i - (factor * resto);
if (resto == 0)
{
sum += (factor + (i / factor));
if ((done = i / factor) == factor)
sum -= factor;
}
factor++;
}
d_num[ii] = sum;
d_den[ii] = i;
num_aux = d_num[ii];
den_aux = d_den[ii];
while (num_aux != 0)
{
aux = num_aux;
resto = den_aux / num_aux;
resto = den_aux - (num_aux * resto);
num_aux = resto;
den_aux = aux;
}
n = den_aux;
d_num[ii] /= n;
d_den[ii] /= n;
}
}
__global__ void segundoLaco(long int* d_num, long int* d_den, long int* d_arrSomaC, int size)
{
int j, i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < size)
{
for (j = i + 1; j < size; j++)
{
if ((d_num[i] == d_num[j]) && (d_den[i] == d_den[j]))
d_arrSomaC[i]++;
}
}
}
void friendly_numbers(long int start, long int end)
{
cudaSetDevice(0);
long int *d_num, *d_den, last = end - start + 1;
size_t size = last * sizeof(long int);
int c = 0;
int tamanho = end - start;
int nBlocks = (tamanho + THREADSPERBLOCK - 1) / THREADSPERBLOCK;
printf("BLOCOS %d THREADS %d\n", nBlocks, THREADSPERBLOCK);
long int *num;
num = (long int*) malloc(size);
long int *den;
den = (long int*) malloc(size);
long int *arrSomaC;
arrSomaC = (long int*) malloc(size);
long int i;
for (i = 0; i < last; i++)
arrSomaC[i] = 0;
cudaMalloc((void**)&d_num, size);
cudaMalloc((void**)&d_den, size);
cudaMemcpy(d_num, num, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_den, den, size, cudaMemcpyHostToDevice);
primeiroLaco<<<nBlocks, THREADSPERBLOCK>>>(d_num, d_den, start, end, last);
long int *d_arrSomaC;
cudaMalloc((void**)&d_arrSomaC, size);
cudaMemcpy(d_arrSomaC, arrSomaC, size, cudaMemcpyHostToDevice);
segundoLaco<<<nBlocks, THREADSPERBLOCK>>>(d_num, d_den, d_arrSomaC, last);
cudaMemcpy(arrSomaC, d_arrSomaC, size, cudaMemcpyDeviceToHost);
for (i = 0; i < last; i++)
c += arrSomaC[i];
printf("Founded %d pairs of mutually friendly numbers\n", c);
cudaFree(d_num);
cudaFree(d_den);
cudaFree(d_arrSomaC);
free(num);
free(den);
free(arrSomaC);
}
int main(int argc, char **argv)
{
long int start;
long int end;
start = atoi(argv[1]);
end = atoi(argv[2]);
printf("NUMBER %ld TO %ld\n", start, end);
friendly_numbers(start, end);
return EXIT_SUCCESS;
} |
20,603 | #include <stdio.h>
#include <stdlib.h>
#define PI 3.14159265
#define PADDING_SIZE 1
#define FILTER_SIZE 3
#define X 8
#define Y 16
// declaring constant memory for kernel
__device__ __constant__ float d_filterKernel[FILTER_SIZE] = { -1, 0, 1};
__global__ void convolutionGlobal( float *image, int height, int width,
float *outputMag, float *outputAng )
{
/*
this blockOrigin(X,Y) are w.r.t to the padded image
these are the coordinates where blocks (0,0) are placed in padded image
*/
int blockOriginX = blockIdx.x * blockDim.x + PADDING_SIZE;
int blockOriginY = blockIdx.y * blockDim.y + PADDING_SIZE;
// printf("blockOriginX:%d blockOriginY:%d\n",blockOriginX,blockOriginY);
/*
this tileOrigin(X,Y) are w.r.t to the padded image
these are the coordinates where tiles (0,0) are placed in padded image
*/
int tileOriginX = blockOriginX - PADDING_SIZE;
int tileOriginY = blockOriginY - PADDING_SIZE;
// printf("tileOriginX:%d tileOriginY:%d\n",tileOriginX,tileOriginY);
int pixelX = blockOriginX + threadIdx.x;
int pixelY = blockOriginY + threadIdx.y;
if( (tileOriginX + threadIdx.x)< height - 2*PADDING_SIZE && tileOriginY + threadIdx.y < width - 2*PADDING_SIZE )
{
double gX = 0, gY = 0, gMag = 0, gAng = 0;
for (int k = -PADDING_SIZE; k <= PADDING_SIZE ; ++k)
{
// along x direction
gX += d_filterKernel[k+PADDING_SIZE] * image[(pixelX + k)*width + (pixelY)];
gY += d_filterKernel[k+PADDING_SIZE] * image[(pixelX)*width + (pixelY+k)];
}
// this 90.1 is to make gAng + else it becomes -0.0000
gMag = sqrt(gX*gX + gY*gY);
if( gX==0 )
gAng = 90;
else
gAng = atan(gY/gX)*180.0/PI + 90.1;
outputMag[(tileOriginX + threadIdx.x)*(width-2*PADDING_SIZE)+ tileOriginY + threadIdx.y] = gMag;
outputAng[(tileOriginX + threadIdx.x)*(width-2*PADDING_SIZE)+ tileOriginY + threadIdx.y] = gAng;
}
}
__global__ void convolutionShared(float *image, int paddedX, int paddedY,
int blockX, int blockY,
float *outputMag, float *outputAng,
int imgRows, int imgCols)
{
/*
this blockOrigin(X,Y) are w.r.t to the padded image
these are the coordinates where blocks (0,0) are placed in padded image
*/
int blockOriginX = blockIdx.x * blockX + PADDING_SIZE;
int blockOriginY = blockIdx.y * blockY + PADDING_SIZE;
// printf("blockOriginX:%d blockOriginY:%d\n",blockOriginX,blockOriginY);
/*
this tileOrigin(X,Y) are w.r.t to the padded image
these are the coordinates where tiles (0,0) are placed in padded image
*/
int tileOriginX = blockOriginX - PADDING_SIZE;
int tileOriginY = blockOriginY - PADDING_SIZE;
// printf("tileOriginX:%d tileOriginY:%d\n",tileOriginX,tileOriginY);
/*
these coordinates specify the ends of this tile
if( tileX is not divisible by 4 ) the last iteration when threads will be reused will exceed tile size
so less than this tilesize or if ending of image is hit
*/
int tileEndX = min(tileOriginX + blockX + 2*PADDING_SIZE, paddedX);
int tileEndY = min(tileOriginY + blockY + 2*PADDING_SIZE, paddedY);
// printf("tileEndX:%d \n",tileEndX );
// Allocating shared memory for this kernel
extern __shared__ float imageTile[];
/*
the same thread is used to bring as many as 4 global memory to shared memory depending on whether it hits any boundary
*/
for (int m = 0; m < 4; ++m)
{
/*
((m*blockDim.x)+threadIdx.x)) will give the threadIdx of tile along x direction in mth iteration
(tileOriginX +((m*blockDim.x)+threadIdx.x)) will give the global threadIdx along x direction in mth direction
*/
if( (tileOriginX +((m*blockDim.x)+threadIdx.x))<tileEndX && tileOriginY +threadIdx.y<tileEndY )
{
imageTile[((m*blockDim.x)+threadIdx.x)*blockDim.y+threadIdx.y]
= image[(tileOriginX +((m*blockDim.x)+threadIdx.x))*paddedY + tileOriginY +threadIdx.y];
}
// printf("threadIdx.x:%d threadIdx.y:%d \n",(m*blockDim.x)+threadIdx.x, threadIdx.y );
}
__syncthreads();
for (int m = 0; m < 4; ++m)
{
if( ((m*blockDim.x)+threadIdx.x)<blockX && threadIdx.y<blockY
&& (tileOriginX + ((m*blockDim.x)+threadIdx.x))<imgRows && tileOriginY + threadIdx.y<imgCols)
{
double gX = 0, gY = 0, gMag = 0, gAng = 0;
for (int k = -PADDING_SIZE; k <= PADDING_SIZE ; ++k)
{
// along x direction
gX += d_filterKernel[k+PADDING_SIZE] * imageTile[((m*blockDim.x)+threadIdx.x +k +PADDING_SIZE )*blockDim.y + threadIdx.y +PADDING_SIZE];
gY += d_filterKernel[k+PADDING_SIZE] * imageTile[((m*blockDim.x)+threadIdx.x +PADDING_SIZE )*blockDim.y + k +threadIdx.y +PADDING_SIZE];
}
// this 90.1 is to make gAng + else it becomes -0.0000
gMag = sqrt(gX*gX + gY*gY);
if( gX==0 )
gAng = 90;
else
gAng = atan(gY/gX)*180.0/PI + 90.1;
outputMag[(tileOriginX + ((m*blockDim.x)+threadIdx.x))*(imgCols)+ tileOriginY + threadIdx.y] = gMag;
outputAng[(tileOriginX + ((m*blockDim.x)+threadIdx.x))*(imgCols)+ tileOriginY + threadIdx.y] = gAng;
// printf("threadIdx.x:%d threadIdx.y:%d \n", (m*blockDim.x)+threadIdx.x, threadIdx.y );
}
}
}
__global__ void max(float *d_outputBMag,float *d_outputBAng,
float *d_outputGMag,float *d_outputGAng,
float *d_outputRMag,float *d_outputRAng,
float *d_outputMag,float *d_outputAng,
int imgRows, int imgCols)
{
int tidx = blockIdx.x*blockDim.x + threadIdx.x;
int tidy = blockIdx.y*blockDim.y + threadIdx.y;
if( tidx<imgRows && tidy<imgCols)
{
float maxMag = d_outputBMag[tidx*imgCols + tidy], maxAng = d_outputBAng[tidx*imgCols + tidy];
if( maxMag < d_outputGMag[tidx*imgCols + tidy] )
maxMag = d_outputGMag[tidx*imgCols + tidy], maxAng = d_outputGAng[tidx*imgCols + tidy];
if( maxMag < d_outputRMag[tidx*imgCols + tidy] )
maxMag = d_outputRMag[tidx*imgCols + tidy], maxAng = d_outputRAng[tidx*imgCols + tidy];
d_outputMag[tidx*imgCols + tidy] = maxMag,d_outputAng[tidx*imgCols + tidy] = maxAng;
}
}
__global__ void histogram(float *mag,float *dir, int height, int width,float *output)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
int i = blockIdx.y * blockDim.y + threadIdx.y;
float magnitude = mag[i*width+j];
float direction = dir[i*width+j];
int blockNum = blockIdx.y*gridDim.x + blockIdx.x;
// initializing the 9-element array for each block.
if( threadIdx.x==0 && threadIdx.y==0 )
{
for(int k=0;k<9;++k)
output[k+blockNum*9]=0;
}
// waiting for initialization.
__syncthreads();
// calculating the histogram values
// calculating the lower angle container for this gradient value
int low = (direction/20);
atomicAdd(&output[blockNum*9+low],magnitude*((low+1)*20 -direction)/20.0);
atomicAdd(&output[blockNum*9+(low+1)%9],magnitude*((direction-low*20)/20.0));
}
__global__
void l2norm(const int *input, float *output)
{
// Shared memory for kernel
__shared__ int hist[Y*X*9];
// Index for 16x16 window
int x = threadIdx.x;
int y = threadIdx.y;
// Copy the top left 8x8 block to shared memory
for(int i=0; i<9; ++i)
{
*(hist + 9*(y*X + x) + i) = *(input + 9*(y*X + x) + i);
}
// Synchronize threads after all shared memory is copied
__syncthreads();
// Normalize the 36 length feature vector
if(x != X-1 && y != Y-1)
{
// Calculate the normalizing factor for 16x16 window
float norm = 0;
for(int i=0; i<9; ++i)
{
norm += powf(*(hist + 9*(y*X + x) + i), 2);
norm += powf(*(hist + 9*(y*X + x + 1) + i), 2);
norm += powf(*(hist + 9*((y + 1)*X + x + 1) + i), 2);
norm += powf(*(hist + 9*((y + 1)*X + x) + i), 2);
}
norm = sqrt(norm);
// Normalize and store the output feature vector
for(int i=0; i<9; ++i)
{
*(output + 36*(y*(X-1) + x) + i) = *(hist + 9*(y*X + x) + i)/norm;
*(output + 36*(y*(X-1) + x) + i + 9) = *(hist + 9*(y*X + x + 1) + i)/norm;
*(output + 36*(y*(X-1) + x) + i + 18) = *(hist + 9*((y + 1)*X + x + 1) + i)/norm;
*(output + 36*(y*(X-1) + x) + i + 27) = *(hist + 9*((y + 1)*X + x) + i)/norm;
}
}
}
__global__ void LinearSVMEvaluation(float *inputs, float *weigths, float bias,
int blockSizeX, int blockSizeY, int numBlocksPerWindowX,
int numBlocksPerWindowY, float *svmScores
)
{
// int numBlocksX = 1;
int col = threadIdx.x;
int totalCols = blockDim.x;
//int imWidth = blockSizeX * numBlocksX;
//int WinOff = blockIdx.x * blockSizeX + blockIdx.y * blockSizeY * blockSizeX;
__shared__ float sum[18*7];
int i;
//multiply features by their respective weights parallely.
for(i = 0; i < numBlocksPerWindowY * blockSizeY; i++){
sum[col] = inputs[i * totalCols + col] * weigths[i * totalCols + col];
__syncthreads();
}
//parallel reduction.
for(unsigned int s=blockDim.x/2;s>0;s>>=1){
if(col < s){
sum[col] += sum[col + s];
}
__syncthreads();
}
//subtract bias and store final score in global memory.
if(col==0){
sum[0] -= bias;
svmScores[0] = sum[0];
}
} |
20,604 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cstdio>
#include <math.h>
__global__ void sumSingleBlockSharedMem(int* d) {
// declare that we're going to use shared memory in this kernel
extern __shared__ int dcopy[];
int tid = threadIdx.x;
// copy the memory over from global memory to shared memory
dcopy[tid*2] = d[tid*2];
dcopy[tid*2+1] = d[tid*2+1];
for (int tc=blockDim.x, stepSize=1; tc>0; tc>>=1, stepSize<<=1) {
if (tid < tc) {
int pa = tid * stepSize * 2;
int pb = pa + stepSize;
// now we need to change to use the shared memory instead of the global memory when doing the reduce
// d[pa] += d[pb];
dcopy[pa] += dcopy[pb];
}
}
// copy the final output value over to global memory
if (tid == 0) {
d[0] = dcopy[0];
}
}
int main() {
const int count = 32;
printf("%d elements\n", count);
const int size = count * sizeof(int);
int h[count];
for (int i=0; i<count; i++) {
h[i] = i+1;
}
int *d;
cudaMalloc(&d, size);
cudaMemcpy(d, h, size, cudaMemcpyHostToDevice);
sumSingleBlockSharedMem<<<1, count/2, size>>>(d); // third argument is the size of the shared memory
cudaMemcpy(h, d, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d);
printf("Sum is %d\n", h[0]);
} |
20,605 | #include "includes.h"
__global__ void ApplySubKeplerianBoundaryKernel(double *VthetaInt, double *Rmed, double OmegaFrame, int nsec, int nrad, double VKepIn, double VKepOut)
{
int j = threadIdx.x + blockDim.x*blockIdx.x;
int i = 0;
if (j<nsec)
VthetaInt[i*nsec + j] = VKepIn - Rmed[i]*OmegaFrame;
i = nrad - 1;
if (j<nsec)
VthetaInt[i*nsec + j] = VKepOut - Rmed[i]*OmegaFrame;
} |
20,606 | __global__ void ComputeLamda( float* g_VecV, float* g_VecW, float * g_Lamda,int N)
{
// shared memory size declared at kernel launch
extern __shared__ float sdataVW[];
unsigned int tid = threadIdx.x;
unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x;
// For thread ids greater than data space
if (globalid < N) {
sdataVW[tid] = g_VecV[globalid] * g_VecW[globalid];
}
else {
sdataVW[tid] = 0; // Case of extra threads above N
}
// each thread loads one element from global to shared mem
__syncthreads();
// do reduction in shared mem
for (unsigned int s=blockDim.x / 2; s > 0; s = s >> 1) {
if (tid < s) {
sdataVW[tid] = sdataVW[tid] + sdataVW[tid+ s];
}
__syncthreads();
}
// atomic operations:
if (tid == 0) atomicAdd(g_Lamda,sdataVW[0]);
} |
20,607 | #include "matrix.cuh"
#define ROW_INDEX 0
#define COL_INDEX 1
#define NUM_INDEXES 2
matrix_t* roll_matrix_list(matrix_list_t* list)
{
unsigned int i;
assert(list != NULL);
for(i=0; i<list->num; i++)
{
assert(list->matrix_list[i] != NULL);
}
unsigned int vector_size=0;
for(i=0; i<list->num; i++)
{
vector_size += list->matrix_list[i]->rows * list->matrix_list[i]->cols;
}
matrix_t* vector = matrix_constructor(1, vector_size);
float* current_index = vector->matrix;
for(i=0; i<list->num; i++)
{
unsigned int matrix_size = list->matrix_list[i]->rows * list->matrix_list[i]->cols;
memcpy(current_index, list->matrix_list[i]->matrix, matrix_size * sizeof(float));
current_index = current_index + matrix_size;
}
return vector;
}
matrix_list_t* unroll_matrix_list(matrix_t* vector, int num, unsigned int sizes[][NUM_INDEXES])
{
assert(vector != NULL);
matrix_list_t* list = matrix_list_constructor(num);
float* current_index = vector->matrix;
unsigned int i;
for(i=0; i<num; i++)
{
list->matrix_list[i] = matrix_constructor(sizes[i][ROW_INDEX], sizes[i][COL_INDEX]);
unsigned int matrix_size = sizes[i][ROW_INDEX] * sizes[i][COL_INDEX];
memcpy(list->matrix_list[i]->matrix, current_index, matrix_size * sizeof(float));
current_index = current_index + matrix_size;
}
return list;
}
|
20,608 | //hello.cu
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
int main(void) {
printf("Hello CUDA \n");
return 0;
}
|
20,609 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#include <stdio.h>
extern "C"
__global__ void distGrid(float *in1, float *in2, float *out, int columns1, int columns2 )
{
int idx = threadIdx.x + blockIdx.x*blockDim.x;
if (idx < columns1)
{
for (int i = 0; i < columns2; i++)
out[idx*i] = sqrt((in1[idx] - in2[i])*(in1[idx] - in2[i]) +
(in1[idx + columns1] - in2[i + columns2])*(in1[idx + columns1] - in2[i + columns2]) +
(in1[idx + 2 * columns1] - in2[i + columns2 * 2])*(in1[idx + 2 * columns1] - in2[i + columns2*2]));
}
}
|
20,610 | /** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA
* The modifications are
* removed texture memory usage
* removed split query KNN computation
* modified global distance computation
*
* Last modified by Lin Dong <ldong1@andrew.cmu.edu> 05/12/2019
*/
#include <cstdio>
#include "cuda.h"
// Constants used by the program
#define BLOCK_DIM 16
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal( float* A, int wA,
float* B, int wB, int dim, float* AB){
// Declaration of the shared memory arrays As and Bs used to store the sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * wA;
step_B = BLOCK_DIM * wB;
end_A = begin_A + (dim-1) * wA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to computations and to write in output matrix
int cond2 = (begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads one element of each matrix
if (a/wA + ty < dim){
shared_A[ty][tx] = (cond0)? A[a + wA * ty + tx] : 0;
shared_B[ty][tx] = (cond1)? B[b + wB * ty + tx] : 0;
}
else{
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one element of the block sub-matrix
if (cond2 && cond1){
for (int k = 0; k < BLOCK_DIM; ++k){
if (shared_A[k][ty] > shared_B[k][tx]) {
tmp = shared_B[k][tx];
} else {
tmp = shared_A[k][ty];
}
ssd -= tmp;
}
}
// Synchronize to make sure that the preceding computation is done before loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1)
AB[(begin_A + ty) * wB + begin_B + tx] = ssd;
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in the top.
*
* @param dist distance matrix
* @param ind index matrix
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int *ind, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l=1; l<k; l++){
curr_row = l * width;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i=l-1;
for (int a=0; a<l-1; a++){
if (p_dist[a*width]>curr_dist){
i=a;
break;
}
}
for (j=l; j>i; j--){
p_dist[j*width] = p_dist[(j-1)*width];
p_ind[j*width] = p_ind[(j-1)*width];
}
p_dist[i*width] = curr_dist;
p_ind[i*width] = l+1;
} else {
p_ind[l*width] = l+1;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k-1)*width;
for (l=k; l<height; l++){
curr_dist = p_dist[l*width];
if (curr_dist<max_dist){
i=k-1;
for (int a=0; a<k-1; a++){
if (p_dist[a*width]>curr_dist){
i=a;
break;
}
}
for (j=k-1; j>i; j--){
p_dist[j*width] = p_dist[(j-1)*width];
p_ind[j*width] = p_ind[(j-1)*width];
}
p_dist[i*width] = curr_dist;
p_ind[i*width] = l+1;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int k){
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
if (xIndex<width && yIndex<k)
dist[yIndex*width + xIndex] = sqrt(dist[yIndex*width + xIndex]);
}
|
20,611 | #include <thrust/reduce.h>
#include <thrust/functional.h>
#include <thrust/device_ptr.h>
float sum_thrust(float* in, unsigned int n) {
thrust::plus<float> binary_op;
// compute sum on the device
thrust::device_ptr<float> begin = thrust::device_pointer_cast(in);
return thrust::reduce(begin, begin + n, 0, binary_op);
} |
20,612 | #include "includes.h"
__global__ void yMaxDeltaIntegralKernel( const float *intData, const int intDataStrideChannel, float *tmpArray, const int batchSize, const int nInputPlane, const int nWindows, const int h, const int w, const float *xMin, const float *xMax, const float *yMax) {
int id = NUM_THREADS * blockIdx.x + threadIdx.x;
tmpArray += id; // tmpArray now points to our output pixel
const int y = id % w + 1; id /= w; // 1-indexed
const int x = id % h + 1; id /= h; // 1-indexed
const int windowIdx = id % nWindows; id /= nWindows;
// `id` is now is now the current global input plane number
intData += id * intDataStrideChannel;
const int globalWindowIdx = (id % nInputPlane) * nWindows + windowIdx; id /= nInputPlane;
const int & batchIdx = id;
if (batchIdx < batchSize) {
const int xMinInt = (int)ceil(xMin[globalWindowIdx]-1);
// const int yMinInt = (int)ceil(yMin[globalWindowIdx]-1);
const int xMaxInt = (int)floor(xMax[globalWindowIdx]);
const int yMaxInt = (int)floor(yMax[globalWindowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta -=
intData[max(0,min(x+xMaxInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta -=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt+1, w))];
delta +=
intData[max(0,min(x+xMinInt, h))*(w+1)
+ max(0,min(y+yMaxInt , w))];
delta *= (y+yMaxInt >= 1 and y+yMaxInt < w);
*tmpArray = delta;
}
} |
20,613 | #include <stdio.h>
#include <stdlib.h>
__global__ void VecAdd(float *A, float *B, float *C)
{
int i = threadIdx.x;
C[i] = A[i] + B[i];
}
void printVec(int N, float *vec)
{
for (int i = 0; i < N; i++)
{
printf("%.2f ", vec[i]);
}
printf("\n");
}
int main(int argc, char const *argv[])
{
int deviceCount, device;
cudaGetDeviceCount(&deviceCount);
for (device = 0; device < deviceCount; ++device)
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, device);
printf("Device %d (%s) has %d multiprocessors, and warps of size %d\n",
device, deviceProp.name, deviceProp.multiProcessorCount, deviceProp.warpSize);
}
int N = 50000;
size_t size = N * sizeof(float);
float *hA = (float *)malloc(size);
float *hB = (float *)malloc(size);
float *hC = (float *)malloc(size);
for (int i = 0; i < N; i++)
{
hA[i] = i;
hB[i] = -2*i;
}
printVec(10, hA);
printf("+\n");
printVec(10, hB);
float *dA, *dB, *dC;
cudaMalloc(&dA, size);
cudaMalloc(&dB, size);
cudaMalloc(&dC, size);
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
VecAdd<<<4, N/4>>>(dA, dB, dC);
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
printf("=\n");
printVec(10, hC);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
free(hA);
free(hB);
free(hC);
} |
20,614 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
unsigned int getmaxcu(unsigned int *, unsigned int);
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2)
{
printf("usage: maxseq num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *) malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++){
numbers[i] = rand() % size;
printf("%d\n", numbers[i]);
}
unsigned int max = getmaxcu(numbers, size);
printf(" The maximum number in the array is: %u\n", max);
free(numbers);
exit(0);
}//end of main
/*
input: pointer to an array of long int
number of elements in the array
output: the maximum number of the array
*/
__global__ void getmaxcu(unsigned int* globalInputArr, unsigned int* globalOutputArr, unsigned int* sizeArr){
//you need a shared array (per block) to put the block's max into
//you also need a global array to put the overall max into
extern __shared__ unsigned int sdata[];
unsigned int size = sizeArr[0];
unsigned int tid = threadIdx.x;
unsigned int gid = (blockIdx.x * blockDim.x) + threadIdx.x; //getting the unique index of thread
sdata[tid] = 0; //initializing the shared data array (shared per block)
if(gid < size){
sdata[tid] = globalInputArr[gid];
}
__syncthreads();
/*
for (unsigned int s = blockDim.x/2; s>0; s>>=1) {
if(gid < size && tid < s) {
sdata[tid] = max(sdata[tid], sdata[tid + s]);
}
__syncthreads();
}
*/
if (tid == 0){
globalOutputArr[blockIdx.x] = sdata[tid]; //putting all the max from each block into a global output array
}
}
__global__ void finalmaxcu(unsigned int* globalOutputArr, unsigned int* max){
int tid = threadIdx.x;
extern __shared__ unsigned int sdata[];
sdata[tid] = 0;
if(tid < blockDim.x){
sdata[tid] = globalOutputArr[tid];
}
__syncthreads();
/*
for (unsigned int s=blockDim.x/2; s>0; s>>=1){ //it starts at the half way mark and keeps div in 2
if(tid < s){
unsigned int greater = sdata[tid];
if(sdata[tid] < sdata[tid+s]){
greater = sdata[tid+s];
}
sdata[tid] = greater;
}
__syncthreads();
}
*/
if (tid == 0){
max[0] = sdata[tid];
}
}
unsigned int getmaxcu(unsigned int* numbers, unsigned int num_elem){
//max num of threads per SM : 2048
//max num of threads per block : 1024
unsigned int* sizeArr = (unsigned int*) malloc(sizeof(unsigned int));//creating an array to pass on to the device
sizeArr[0] = num_elem;
unsigned int* size; //declaring a size integer (device)
cudaMalloc((void**)&size, sizeof(unsigned int));
cudaMemcpy((void*) size, (void*) sizeArr, sizeof(unsigned int), cudaMemcpyHostToDevice);
unsigned int* globalInputArr;
cudaMalloc((void**)&globalInputArr, num_elem * sizeof(unsigned int));
cudaMemcpy((void*) globalInputArr, (void*) numbers, num_elem * sizeof(unsigned int), cudaMemcpyHostToDevice);
unsigned int* globalOutputArr;
cudaMalloc((void**)&globalOutputArr, num_elem*sizeof(unsigned int));
unsigned int* max;
cudaMalloc((void**)&max, sizeof(unsigned int));//allocating the max number pointer in the device
unsigned int* maxNum = (unsigned int*) malloc(sizeof(unsigned int)); //allocating the max number pointer in the host
//first experimenting with block size of 128, the max block size is 1024
//adding size as the third parameter in the triple bracket sets the byte size for the sdata (which is in the shared memory)
//whatever is in sdata should be the size of N divided by the number of blocks (which is 8 for now)
unsigned int sharedSize = (num_elem / 8) * sizeof(unsigned int);
getmaxcu<<<8, 128, sharedSize>>>(globalInputArr, globalOutputArr, size);
unsigned int* copy = (unsigned int*) malloc(8 * sizeof(unsigned int));
cudaMemcpy((void*) copy, (void*) globalOutputArr, (num_elem * sizeof(unsigned int)), cudaMemcpyDeviceToHost);
for(int i = 0; i < 8; i++){
printf("%u,",copy[i]);
}
printf("\n");
finalmaxcu<<<1, 128, (8 * sizeof(unsigned int))>>>(globalOutputArr, max);
cudaMemcpy((void*) maxNum, (void*) max, sizeof(unsigned int), cudaMemcpyDeviceToHost);//copying back the max from the device to host
cudaFree(max);
cudaFree(globalInputArr);
cudaFree(globalOutputArr);
return maxNum[0];
}
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int max = num[0];
for(i = 1; i < size; i++)
if(num[i] > max)
max = num[i];
return( max );
} |
20,615 | #include <iostream>
#include <cassert>
#include<algorithm>
using namespace std;
int main()
{
u_char root[10] = {1,2,5,6,7,10,8,4,3,9};
u_char *a = root;
if (*a < *(a+5))
{
cout << (int)*a << endl;
cout << (int)(*(a+5)) << endl;
}
// sort(*a, *(a+10));
// cout << root << endl;
return 0;
} |
20,616 | #include <iostream>
#include <fstream>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <stdlib.h>
#include <vector>
#include <random>
using namespace std;
void printNeighbours(unordered_map<long, unordered_set<long>> neighbours) {
for (auto& n : neighbours) {
cout << n.first << ": ";
for (auto& s : n.second) {
cout << s << " ";
}
cout << endl;
}
}
void printWalks(long** walks, int numNodes, int walkPerNode, int walkLength) {
for (int i = 0; i < numNodes * walkPerNode; i++) {
for (int j = 0; j < walkLength; j++) {
cout << walks[i][j] << " ";
}
cout << endl;
}
}
long findNextNode(unordered_map<long, unordered_set<long>> neighbours, long curNode, long prevNode, double p, double q) {
default_random_engine generator;
vector<long> curNeighbours;
curNeighbours.insert(curNeighbours.end(), neighbours[curNode].begin(), neighbours[curNode].end());
if (prevNode == -1) {
return curNeighbours[rand() % curNeighbours.size()];
} else {
unordered_set<long> prevNeighbours = neighbours[prevNode];
vector<double> weights(curNeighbours.size());
for (int i = 0; i < weights.size(); i++) {
long nextNode = curNeighbours[i];
if (nextNode == prevNode) {
weights[i] = 1.0 / p;
} else if (prevNeighbours.find(nextNode) != prevNeighbours.end()) {
weights[i] = 1.0;
} else {
weights[i] = 1.0 / q;
}
}
discrete_distribution<int> dist(weights.begin(), weights.end());
return curNeighbours[dist(generator)];
}
}
long** generateWalks(unordered_map<long, unordered_set<long>> neighbours, int walkPerNode, int walkLength, double p, double q) {
long** walks = new long*[walkPerNode * neighbours.size()];
int counter = 0;
for (auto& neighbour : neighbours) {
long node = neighbour.first;
for (int i = 0; i < walkPerNode; i++) {
walks[counter + i] = new long[walkLength];
walks[counter + i][0] = node;
int curNode = node;
int prevNode = -1;
for (int j = 1; j < walkLength; j++) {
int nextNode = findNextNode(neighbours, curNode, prevNode, p, q);
walks[counter + i][j] = nextNode;
prevNode = curNode;
curNode = nextNode;
}
}
counter += walkPerNode;
}
return walks;
}
int main(int argc, char* argv[]) {
string fileName;
int walkPerNode = 1;
int walkLength = 10;
double p = 1;
double q = 2;
if (argc == 2) {
fileName = argv[1];
} else {
cerr << "Invalid argument, must provide path to edge list" << endl;
return 1;
}
unordered_map<long, unordered_set<long>> neighbours = {};
ifstream in(fileName);
long s, e;
while (in >> s >> e) {
if (s == e) {
cerr << "Loop edge is not supported" << endl;
return 1;
} else if (s <= 0 || e <= 0) {
cerr << "node must be greater than or equal to 0" << endl;
return 1;
}
if (neighbours.find(s) == neighbours.end()) {
unordered_set<long> emptySet = {};
neighbours[s] = emptySet;
}
if (neighbours.find(e) == neighbours.end()) {
unordered_set<long> emptySet = {};
neighbours[e] = emptySet;
}
neighbours[s].insert(e);
neighbours[e].insert(s);
}
long** cuNeighbours;
cudaMallocManaged(&cuNeighbours, neighbours.size() * sizeof(long*));
long** walks = generateWalks(neighbours, walkPerNode, walkLength, p, q);
printWalks(walks, neighbours.size(), walkPerNode, walkLength);
// clean up
for (int i = 0; i < walkPerNode * neighbours.size(); i++) {
delete[] walks[i];
}
delete[] walks;
}
|
20,617 | #include <stdio.h>
#include <cuda.h>
#define MAX_TILE_SIZE 32
#define MAX_MASK_WIDTH 11
/*Declare the constant memory*/
__constant__ float M[MAX_MASK_WIDTH];
/***********************/
/** TODO, write KERNEL */
/***********************/
__global__ void Conv1D(float* N, float* P, int Mask_Width, int Width)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
__shared__ float N_ds[MAX_TILE_SIZE+MAX_MASK_WIDTH-1];
int n = Mask_Width/2;
/******************************************************************/
/* Your TODO-1 starts here: */
/* Load the data with halo from N to the shared memory N_ds */
/* remember that you need to load: */
/* + the left halo */
/* + the data */
/* + the right halo */
/******************************************************************/
int halo_index_left = (blockIdx.x-1) *blockDim.x + threadIdx.x;
int halo_index_right = (blockIdx.x+1)*blockDim.x + threadIdx.x;
if(threadIdx.x>=blockDim.x-n)
N_ds[threadIdx.x-(blockDim.x-n)] = (halo_index_left <0) ? 0:N[halo_index_left];
N_ds[n+threadIdx.x] = N[blockIdx.x*blockDim.x+threadIdx.x];
if(threadIdx.x<n)
N_ds[n+blockDim.x+threadIdx.x] = (halo_index_right >= Width) ? 0:N[halo_index_right];
__syncthreads();
/***********************/
/*Your TODO-1 ends here*/
/***********************/
/******************************************************************/
/* Your TODO-2 starts here: */
/* Calculate the value coresponding to each thread */
/* The result is saved into the array P */
/* It should be noted that the mask M is already copy to the */
/* constant memory */
/******************************************************************/
float Pvalue = 0;
for(int j=0;j<Mask_Width;j++) Pvalue += N_ds[threadIdx.x+j]*M[j];
P[i] = Pvalue;
/***********************/
/*Your TODO-2 ends here*/
/***********************/
}
/**/
void test(float* C, int length);
void checkCUDAError(const char *msg);
/**/
int main(int argc, char* argv[])
{
int i;
/*******************/
/** READING INPUT **/
/*******************/
/* dimension of mask */
int size_m = 0;
scanf("%d", &size_m);
int full_size_m = size_m*sizeof(float);
float* h_M = (float*)malloc(full_size_m);
for(i=0;i<size_m;++i){ scanf("%f", &h_M[i]);}
/* dimension of array */
int size = 0;
scanf("%d", &size);
int full_size = sizeof(float)*size;
/* Allocate host memory */
float* h_N = (float*)malloc(full_size);
float* h_P = (float*)malloc(full_size);
for(i=0;i<size;++i){ scanf("%f", &h_N[i]);}
/********************/
/** FINISHED INPUT **/
/********************/
/*************************/
/* allocate device memory */
/*************************/
float* d_N,*d_P;
cudaMalloc(&d_N, full_size);
cudaMalloc(&d_P, full_size);
/******************************/
/* copy array & mask to device */
/******************************/
cudaMemcpy(d_N,h_N,full_size,cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(M,h_M,full_size_m);
/****************/
/** CALL KERNEL */
/****************/
int threadsPerBlock = size;
Conv1D<<<1, threadsPerBlock>>>(d_N, d_P,size_m, size);
checkCUDAError("Kernel Invoking");
/**************************/
/* copy result back */
/**************************/
cudaMemcpy(h_P, d_P, full_size, cudaMemcpyDeviceToHost);
/*******************************************/
/* Testing output, don't change anything! */
/*******************************************/
test(h_P, size);
free(h_N);
free(h_P);
cudaFree(d_N);
cudaFree(d_P);
return 0;
}
/* to test the input, don't change anything! */
void test(float* C, int length){
int i=0;
for(i=0;i<length;++i){
printf("%.1f ", C[i]);
}
printf("\n");
}
/*function to test CUDA command*/
void checkCUDAError(const char *msg)
{
cudaError_t err = cudaGetLastError();
if( cudaSuccess != err)
{
fprintf(stderr, "Cuda error: %s: %s.\n", msg,
cudaGetErrorString( err) );
exit(EXIT_FAILURE);
}
}
|
20,618 | /////////////////////////
#include <stdio.h> /* Enables printing output to console */
#define N 64 /* Speficy array length value */
#define TPB 32 /* Threads per block used in kernel */
__device__
float scale(int i, int n){ return ((float)i)/(n-1); }
__device__
float distance(float x1, float x2){
return sqrt((x2-x1)*(x2-x1));
}
__global__
void distanceKernel(float *d_out, float ref, int len){
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const float x=scale(i,len);
d_out[i]=distance(x,ref);
printf("i = %2d: dist from %f to %f is %f.\n",i,ref,x,d_out[i]);
}
int main(){
const float ref=0.5f;
//Declare a pointer for an array of floats
float *d_out=0;
//Allocate device memory to store the output array
cudaMalloc(&d_out, N*sizeof(float));
//Launch kernel to compute and store distance values
distanceKernel<<<N/TPB,TPB>>>(d_out,ref,N);
cudaFree(d_out);
return 0;
}
|
20,619 | #include<cuda_runtime.h>
#include<stdio.h>
#include<stdlib.h>
__global__ void add(int* a,int* b,int* c,int* n)
{
int id=blockIdx.x*blockDim.x+threadIdx.x;
if(id<*n)
c[id]=a[id]+b[id];
}
int main()
{
int a[100],b[100],c[100],n,*da,*db,*dc;
int *dn;
printf("Enter size: ");
scanf("%d",&n);
printf("Enter elements for A: ");
for(int i=0;i<n;i++)
scanf("%d",&a[i]);
printf("Enter elements for B: ");
for(int i=0;i<n;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&da,n*sizeof(int));
cudaMalloc((void**)&db,n*sizeof(int));
cudaMalloc((void**)&dc,n*sizeof(int));
cudaMalloc((void**)&dn,sizeof(int));
cudaMemcpy(da,a,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(db,b,n*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(dn,&n,sizeof(int),cudaMemcpyHostToDevice);
add<<<n,1>>>(da,db,dc,dn);
cudaMemcpy(c,dc,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nBlock size as N: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
add<<<1,n>>>(da,db,dc,dn);
cudaMemcpy(c,dc,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("\nN Threads: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
int tsize=256;
add<<<(n+tsize-1)/tsize,tsize>>>(da,db,dc,dn);
cudaMemcpy(c,dc,n*sizeof(int),cudaMemcpyDeviceToHost);
printf("\n256 threads: ");
for(int i=0;i<n;i++)
printf("%d+%d=%d\n",a[i],b[i],c[i]);
cudaFree(da);
cudaFree(db);
cudaFree(dc);
cudaFree(dn);
} |
20,620 |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,float var_2,float var_3,float var_4,int var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float* var_13,float* var_14,float* var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23,float var_24,float var_25,float var_26) {
if (comp > +1.6697E28f - +1.5930E-44f - var_2 - var_3 * (+1.2367E-41f - +0.0f)) {
for (int i=0; i < var_1; ++i) {
if (comp >= var_4 / +1.7459E-35f) {
comp += -1.3952E-17f * +1.8434E-36f + var_6;
comp += var_7 * var_8 - -0.0f / (var_9 - -1.3547E-35f - +0.0f);
if (comp < var_10 + var_11) {
float tmp_1 = -1.0323E-35f;
comp = tmp_1 - +1.1624E34f / +1.6022E-42f * var_12;
}
for (int i=0; i < var_5; ++i) {
var_13[i] = -1.1417E15f;
var_14[i] = ceilf((var_16 / var_17 + (var_18 / +1.4887E36f)));
var_15[i] = (-1.8014E36f * var_19);
comp += var_15[i] / var_14[i] + var_13[i] * -1.9868E-35f + -1.7189E-44f;
}
if (comp == (var_20 / (var_21 + var_22 / var_23))) {
comp += -1.9387E-4f / (var_24 / (var_25 + (var_26 * -1.9819E-42f)));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
float tmp_5 = atof(argv[5]);
int tmp_6 = atoi(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float* tmp_14 = initPointer( atof(argv[14]) );
float* tmp_15 = initPointer( atof(argv[15]) );
float* tmp_16 = initPointer( atof(argv[16]) );
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
float tmp_25 = atof(argv[25]);
float tmp_26 = atof(argv[26]);
float tmp_27 = atof(argv[27]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24,tmp_25,tmp_26,tmp_27);
cudaDeviceSynchronize();
return 0;
}
|
20,621 | #include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <limits.h>
#pragma once
#define BLOCK_SIZE 32
#define BLOCK_SIZE_DIM1 1024
// Matrices are stored in row-major order:
// M(row, col) = *(M.elements + row * M.width + col)
typedef struct {
int width;
int height;
double* elements;
} Matrix;
//function to print a matrix
void printMatrix(Matrix A) {
printf("\n");
for (int i=0; i<A.height; i++) {
for (int j=0; j<A.width; j++) {
printf("%.4f ", A.elements[i*A.width+j]);
}
printf("\n");
}
printf("\n");
}
// function to save matrix in an ascii file */
void saveMatrix(Matrix A, char *filename) {
FILE *fp;
fp = fopen(filename, "w");
for (int i=0; i<A.height; i++) {
for (int j=0; j<A.width; j++) {
fprintf(fp, "%.4f ", A.elements[i*A.width+j]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
/*** matlib functions listed in alphabetical order *****/
/************************ G ************************/
// matrix getCol kernel
__global__
void getColKernel(Matrix d_In, Matrix d_Out, int num) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_In.height || col >= d_In.width) return;
if(col == num)
d_Out.elements[row] = d_In.elements[row*d_In.width+col];
}
void getCol(Matrix In, Matrix Out, int num) {
//printf("getCol()\n");
// load In to device memory
Matrix d_In;
d_In.width = In.width;
d_In.height = In.height;
size_t size = In.width * In.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_In.elements, size);
//printf("CUDA malloc In: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_In.elements, In.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
err = cudaMalloc(&d_Out.elements, size);
//printf("CUDA malloc Out: %s\n", cudaGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (In.width + dimBlock.x - 1)/dimBlock.x, (In.height + dimBlock.y - 1)/dimBlock.y );
getColKernel<<<dimGrid, dimBlock>>>(d_In, d_Out, num);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_In.elements);
cudaFree(d_Out.elements);
}
// matrix getRow kernel
__global__
void getRowKernel(Matrix d_In, Matrix d_Out, int num) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_In.height || col >= d_In.width) return;
if(row == num)
d_Out.elements[col] = d_In.elements[row*d_In.width+col];
}
/************************ I ************************/
// check if a square matrix is symmetric
__global__
void isSymmetricKernel(Matrix d_A, int *d_result) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
if(d_A.elements[row*d_A.width+col] != d_A.elements[row + col*d_A.width])
*(d_result) = 0;
}
int isSymmetric(Matrix A) {
//printf("isSymmetric()\n");
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
//printf("Copy A to device: %s\n", cudaGetErrorString(err));
// load result to device memory
int result = 1;
int *d_result;
err = cudaMalloc(&d_result, sizeof(int));
//printf("CUDA malloc d_result: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_result, &result, sizeof(int), cudaMemcpyHostToDevice);
//printf("Copy result to device: %s\n", cudaGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
isSymmetricKernel<<<dimGrid, dimBlock>>>(d_A, d_result);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
//read result from fdevice memory
err = cudaMemcpy(&result, d_result, sizeof(int), cudaMemcpyDeviceToHost);
//printf("Copy result off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_A.elements);
cudaFree(d_result);
return result;
}
/************************ M ************************/
// matAdd kernel
__global__
void matAddKernel(Matrix d_A, Matrix d_B, Matrix d_C) {
int col = blockIdx.y * blockDim.y + threadIdx.y;
int row = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
d_C.elements[row*d_C.width + col] = d_A.elements[row*d_A.width + col] + d_B.elements[row*d_B.width + col];
}
// matrix matDiv kernel called by matDiv()
__global__
void matDivKernel(Matrix d_A, Matrix d_B, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
int idx = row*d_A.width+col;
if(row > d_A.height || col > d_A.width) return;
d_Out.elements[idx] = d_A.elements[idx] / d_B.elements[idx];
}
void matDiv(Matrix A, Matrix B, Matrix Out) {
//printf("matDiv()\n");
if (A.width != B.width || A.height != B.height) {
printf("Input matrices must have the same dimension!\n");
return;
}
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix A to device: %s\n", cudaGetErrorString(err));
// load B to device memory
Matrix d_B;
d_B.width = B.width;
d_B.height = B.height;
err = cudaMalloc(&d_B.elements, size);
//printf("CUDA malloc B: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix B to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
cudaMalloc(&d_Out.elements, size);
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
matDivKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_Out);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_Out.elements);
}
// matrix matPlusScaler kernel called by matPlusScaler()
__global__
void matPlusScalerKernel(Matrix d_In, double scaler, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_In.height || col >= d_In.width) return;
int idx = row * d_In.width + col;
d_Out.elements[idx] = d_In.elements[idx] + scaler;
}
void matPlusScaler(Matrix In, double scaler, Matrix Out) {
//printf("matPlusScaler()\n");
// load In to device memory
Matrix d_In;
d_In.width = In.width;
d_In.height = In.height;
size_t size = In.width * In.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_In.elements, size);
//printf("CUDA malloc In: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_In.elements, In.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
cudaMalloc(&d_Out.elements, size);
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (In.width + dimBlock.x - 1)/dimBlock.x, (In.height + dimBlock.y - 1)/dimBlock.y );
matPlusScalerKernel<<<dimGrid, dimBlock>>>(d_In, scaler, d_Out);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_In.elements);
cudaFree(d_Out.elements);
}
// matSub kernel
__global__
void matSubKernel(Matrix d_A, Matrix d_B, Matrix d_C) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
d_C.elements[row*d_A.width + col] = d_A.elements[row*d_A.width + col] - d_B.elements[row*d_A.width + col];
}
void matSub(Matrix A, Matrix B, Matrix C){
//printf("matSub()\n");
// load A, B to device memory
Matrix d_A;
Matrix d_B;
d_A.width = A.width;
d_B.width = B.width;
d_A.height = A.height;
d_B.height = B.height;
size_t size = A.width * A.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
//printf("Copy A to device: %s\n", cudaGetErrorString(err));
err = cudaMalloc(&d_B.elements, size);
//printf("CUDA malloc B: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_B.elements, B.elements, size, cudaMemcpyHostToDevice);
//printf("Copy B to device: %s\n", cudaGetErrorString(err));
// allocate C to device memory
Matrix d_C;
d_C.width = C.width;
d_C.height = C.height;
err = cudaMalloc(&d_C.elements, size);
//printf("CUDA malloc C: %s\n", cudaGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
matSubKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read C from device memory
err = cudaMemcpy(C.elements, d_C.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy C off of device: %s\n", cudaGetErrorString(err));
// free device memory
cudaFree(d_A.elements);
cudaFree(d_B.elements);
cudaFree(d_C.elements);
}
// matrix matTimesScaler kernel called by matTimesScaler()
__global__
void matTimesScalerKernel(Matrix d_In, double scaler, Matrix d_Out) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_In.height || col >= d_In.width) return;
int idx = row * d_In.width + col;
d_Out.elements[idx] = d_In.elements[idx] * scaler;
}
void matTimesScaler(Matrix In, double scaler, Matrix Out) {
//printf("matTimesScaler()\n");
// load In to device memory
Matrix d_In;
d_In.width = In.width;
d_In.height = In.height;
size_t size = In.width * In.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_In.elements, size);
//printf("CUDA malloc In: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_In.elements, In.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
cudaMalloc(&d_Out.elements, size);
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (In.width + dimBlock.x - 1)/dimBlock.x, (In.height + dimBlock.y - 1)/dimBlock.y );
matTimesScalerKernel<<<dimGrid, dimBlock>>>(d_In, scaler, d_Out);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_In.elements);
cudaFree(d_Out.elements);
}
// need a better parallelized version
__global__
void maxOfMatrixRow(Matrix d_A, Matrix d_col) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
double max = d_A.elements[row*d_A.width];
for (int col=0; col<d_A.width; col++) {
max = (d_A.elements[row*d_A.width+col] > max)? d_A.elements[row*d_A.width+col] : max;
}
d_col.elements[row] = max;
}
/************************ O ************************/
// matrix ones kernel called by ones()
__global__
void onesKernel(Matrix d_A) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row > d_A.height || col > d_A.width) return;
d_A.elements[row*d_A.width+col] = 1;
}
void ones(Matrix A) {
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
//printf("Copy A to device: %s\n", cudaGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
onesKernel<<<dimGrid, dimBlock>>>(d_A);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read A from device memory
err = cudaMemcpy(A.elements, d_A.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy C off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_A.elements);
}
/************************ R ************************/
//create an m-by-n tiling of a given matrix
__global__
void repmatKernel(Matrix d_A, int m, int n, Matrix d_B) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
for(int i=0; i < m; i++) {
for(int j=0; j < n; j++) {
d_B.elements[(row + i*d_A.height)*d_B.width + (col + j*d_A.width)] = d_A.elements[row*d_A.width + col];
}
}
}
// matrix reshape kernel called by reshape()
__global__
void reshapeKernel(Matrix d_In, Matrix d_Out) {
int y = blockIdx.y * blockDim.y + threadIdx.y;
int x = blockIdx.x * blockDim.x + threadIdx.x;
if(y >= d_In.height || x >= d_In.width) return;
int c = x * d_In.height + y;
d_Out.elements[(c%d_Out.height)*d_Out.width+(c/d_Out.height)] = d_In.elements[(c%d_In.height)*d_In.width+(c/d_In.height)];
}
void reshape(Matrix In, Matrix Out) {
//printf("reshape()\n");
// load In to device memory
Matrix d_In;
d_In.width = In.width;
d_In.height = In.height;
size_t size = In.width * In.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_In.elements, size);
//printf("CUDA malloc In: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_In.elements, In.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
cudaMalloc(&d_Out.elements, size);
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (In.width + dimBlock.x - 1)/dimBlock.x, (In.height + dimBlock.y - 1)/dimBlock.y );
reshapeKernel<<<dimGrid, dimBlock>>>(d_In, d_Out);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_In.elements);
cudaFree(d_Out.elements);
}
/************************ S ************************/
__global__
void sumOfMatrixColKernel(Matrix d_A, Matrix d_row) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(col >= d_A.width) return;
for (int row=0; row<d_A.height; row++) {
d_row.elements[col] += d_A.elements[row*d_A.width+col];
}
}
void sumOfMatrixCol(Matrix In, Matrix Out) {
// load In to device memory
Matrix d_In;
d_In.width = In.width;
d_In.height = In.height;
size_t size = In.width * In.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_In.elements, size);
//printf("CUDA malloc In: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_In.elements, In.elements, size, cudaMemcpyHostToDevice);
//printf("Copy input matrix to device: %s\n", cudaGetErrorString(err));
// allocate Out in device memory
Matrix d_Out;
d_Out.width = Out.width; d_Out.height = Out.height;
size = Out.width * Out.height * sizeof(double);
cudaMalloc(&d_Out.elements, size);
// invoke kernel
dim3 dimBlock(BLOCK_SIZE_DIM1);
dim3 dimGrid( (In.width + dimBlock.x - 1)/dimBlock.x );
sumOfMatrixColKernel<<<dimGrid, dimBlock>>>(d_In, d_Out);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy output matrix off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_In.elements);
cudaFree(d_Out.elements);
}
/************************ T ************************/
//matrix transpose kernel
__global__
void transposeKernel(Matrix d_A, Matrix d_B){
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
d_B.elements[col*d_B.width+row] = d_A.elements[row*d_A.width+col];
}
void transpose(Matrix In, Matrix Out) {
//printf("transpose()\n");
// load In to device memory
Matrix d_In;
d_In.width = In.width;
d_In.height = In.height;
size_t size = In.width * In.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_In.elements, size);
//printf("CUDA malloc In: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_In.elements, In.elements, size, cudaMemcpyHostToDevice);
//printf("Copy In to device: %s\n", cudaGetErrorString(err));
// allocate Out on device memory
Matrix d_Out;
d_Out.width = Out.width;
d_Out.height = Out.height;
size = d_Out.width * d_Out.height * sizeof(double);
err = cudaMalloc(&d_Out.elements, size);
//printf("CUDA malloc d_Out: %s\n", cudaGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (In.width + dimBlock.x - 1)/dimBlock.x, (In.height + dimBlock.y - 1)/dimBlock.y );
transposeKernel<<<dimGrid, dimBlock>>>(d_In, d_Out);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read Out from device memory
err = cudaMemcpy(Out.elements, d_Out.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy d_Out off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_In.elements);
cudaFree(d_Out.elements);
}
/************************ Z ************************/
// matrix zeros kernel called by zeros()
__global__
void zerosKernel(Matrix d_A) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if(row >= d_A.height || col >= d_A.width) return;
d_A.elements[row*d_A.width+col] = 0;
}
void zeros(Matrix A) {
// load A to device memory
Matrix d_A;
d_A.width = A.width;
d_A.height = A.height;
size_t size = A.width * A.height * sizeof(double);
cudaError_t err = cudaMalloc(&d_A.elements, size);
//printf("CUDA malloc A: %s\n", cudaGetErrorString(err));
cudaMemcpy(d_A.elements, A.elements, size, cudaMemcpyHostToDevice);
//printf("Copy A to device: %s\n", cudaGetErrorString(err));
// invoke kernel
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid( (A.width + dimBlock.x - 1)/dimBlock.x, (A.height + dimBlock.y - 1)/dimBlock.y );
zerosKernel<<<dimGrid, dimBlock>>>(d_A);
err = cudaThreadSynchronize();
//printf("Run kernel: %s\n", cudaGetErrorString(err));
// read A from device memory
err = cudaMemcpy(A.elements, d_A.elements, size, cudaMemcpyDeviceToHost);
//printf("Copy A off of device: %s\n",cudaGetErrorString(err));
// free device memory
cudaFree(d_A.elements);
}
|
20,622 | #include<stdio.h>
#define ARRAY_SIZE 128*128
#define NUM_THREADS 128
#define BLOCK_SIZE 128
__global__ void reduce(float* d_out, float* d_in){
int global_id = blockDim.x*blockIdx.x + threadIdx.x;
int local_id = threadIdx.x;
//extern __shared__ float s_in[];
for(unsigned int s = blockDim.x/2; s > 0; s>>=1){
if(local_id < s) d_in[global_id] += d_in[global_id + s];
__syncthreads();
}
if(local_id==0) d_out[blockIdx.x] = d_in[global_id];
__syncthreads();
}
int main(){
float *d_in, *d_out, *d_final;
const int ARRAY_BYTES = ARRAY_SIZE*sizeof(float);
cudaMalloc((void**) &d_in, ARRAY_BYTES);
cudaMalloc((void**) &d_out, ARRAY_BYTES/NUM_THREADS);
cudaMalloc((void**) &d_final, sizeof(float));
float h_array[ARRAY_SIZE];
for(int i=0; i < ARRAY_SIZE; i+=1){
h_array[i] = 1;
}
float sum;
cudaMemcpy(d_in, h_array, ARRAY_BYTES, cudaMemcpyHostToDevice);
reduce<<<BLOCK_SIZE, NUM_THREADS>>>(d_out, d_in);
reduce<<<1, NUM_THREADS>>>(d_final, d_out);
cudaMemcpy(&sum, d_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("%f",sum);
cudaFree(d_in);
cudaFree(d_out);
cudaFree(d_final);
return 0;
} |
20,623 | #include<stdio.h>
#include<stdlib.h>
__global__ void matadd(int *d_a,int *d_b,int *d_c, int n){
int idx=threadIdx.x;
if(idx<n)
d_c[idx]=d_a[idx]+d_b[idx];
}
int main(){
int n;
scanf("%d",&n);
cudaEvent_t start,stop;
float escap_time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
cudaStream_t stream;
cudaStreamCreate(&stream);
int *h_a,*h_b,*h_c;
cudaHostAlloc((void**)&h_a,20*n*sizeof(int),cudaHostAllocDefault);
cudaHostAlloc((void**)&h_b,20*n*sizeof(int),cudaHostAllocDefault);
cudaHostAlloc((void**)&h_c,20*n*sizeof(int),cudaHostAllocDefault);
for(int i=0; i<20*n; i++){
h_a[i]=i;
h_b[i]=i+1;
}
int *d_a,*d_b,*d_c;
cudaMalloc((void**)&d_a,n*sizeof(int));
cudaMalloc((void**)&d_b,n*sizeof(int));
cudaMalloc((void**)&d_c,n*sizeof(int));
for(int i=0; i<20*n; i+=n){
cudaMemcpyAsync(d_a,h_a+i,n*sizeof(int),cudaMemcpyHostToDevice,stream);
cudaMemcpyAsync(d_b,h_b+i,n*sizeof(int),cudaMemcpyHostToDevice,stream);
matadd<<<1,n,0,stream>>>(d_a,d_b,d_c,n);
cudaMemcpyAsync(h_c+i,d_c,n*sizeof(int),cudaMemcpyDeviceToHost,stream);
}
cudaStreamSynchronize(stream);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&escap_time,start,stop);
printf("Time:%3.1f\n",escap_time);
for(int i=0; i<20*n; i++)
printf("%d ",h_c[i]);
cudaFreeHost(h_a);
cudaFreeHost(h_b);
cudaFreeHost(h_c);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(h_a);
cudaFree(h_b);
cudaFree(h_c);
cudaStreamDestroy(stream);
return 0;
}
|
20,624 | /* Host-side code to perform counting sort
*
* Author: Naga Kandasamy
* Date modified: March 2, 2021
*
* Student name(s): Abishek S Kumar
* Date modified: 03/08/2021
*
* Compile as follows: make clean && make
*/
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#include "counting_sort_kernel.cu"
/* Do not change the range value */
#define MIN_VALUE 0
#define MAX_VALUE 255
#define THREAD_BLOCK_SIZE 256
#define NUM_BLOCKS 32
#define HISTOGRAM_SIZE 256 /* Histogram has 256 bins */
/* Uncomment to spit out debug info */
//#define DEBUG
extern "C" int counting_sort_gold(int *, int *, int, int);
int rand_int(int, int);
void print_array(int *, int);
void print_min_and_max_in_array(int *, int);
void print_histogram(int *, int, int);
void compute_on_device(int *, int *, int, int);
int check_if_sorted(int *, int);
int compare_results(int *, int *, int);
int main(int argc, char **argv)
{
if (argc < 2) {
printf("Usage: %s num-elements\n", argv[0]);
exit(EXIT_FAILURE);
}
int num_elements = atoi(argv[1]);
int range = MAX_VALUE - MIN_VALUE;
int *input_array, *sorted_array_reference, *sorted_array_d;
/* Populate input array with random integers between [0, RANGE] */
printf("Generating input array with %d elements in the range 0 to %d\n", num_elements, range);
input_array = (int *)malloc(num_elements * sizeof(int));
if (input_array == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
srand(time(NULL));
int i;
for (i = 0; i < num_elements; i++)
input_array[i] = rand_int (1, MAX_VALUE);
#ifdef DEBUG
print_array(input_array, num_elements);
print_min_and_max_in_array(input_array, num_elements);
#endif
/* Sort elements in input array using reference implementation.
* The result is placed in sorted_array_reference. */
printf("\nSorting array on CPU\n");
int status;
sorted_array_reference = (int *)malloc(num_elements * sizeof(int));
if (sorted_array_reference == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
memset(sorted_array_reference, 0, num_elements);
struct timeval start, stop;
gettimeofday(&start, NULL);
status = counting_sort_gold(input_array, sorted_array_reference, num_elements, range);
gettimeofday(&stop, NULL);
printf("Eexecution time = %f\n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
if (status == -1) {
exit(EXIT_FAILURE);
}
status = check_if_sorted(sorted_array_reference, num_elements);
if (status == -1) {
printf("Error sorting the input array using the reference code\n");
exit(EXIT_FAILURE);
}
printf("Counting sort was successful on the CPU\n");
#ifdef DEBUG
print_array(sorted_array_reference, num_elements);
#endif
/* FIXME: Write function to sort elements in the array in parallel fashion.
* The result should be placed in sorted_array_mt. */
printf("\nSorting array on GPU\n");
sorted_array_d = (int *)malloc(num_elements * sizeof(int));
if (sorted_array_d == NULL) {
perror("malloc");
exit(EXIT_FAILURE);
}
memset(sorted_array_d, 0, num_elements);
compute_on_device(input_array, sorted_array_d, num_elements, range);
#ifdef DEBUG
print_array(sorted_array_d, num_elements);
#endif
/* Check the two results for correctness */
printf("\nComparing CPU and GPU results\n");
status = compare_results(sorted_array_reference, sorted_array_d, num_elements);
if (status == 0)
printf("Test passed\n");
else
printf("Test failed\n");
exit(EXIT_SUCCESS);
}
/* FIXME: Write the GPU implementation of counting sort */
void compute_on_device(int *input_array, int *sorted_array, int num_elements, int range)
{
//print_array(input_array, num_elements);
struct timeval start_a, stop_a;
gettimeofday(&start_a, NULL);
/* Allocate and initialize space to store histograms generated by the CPU and the GPU */
int *histogram_on_cpu = (int *)malloc(sizeof(int) * HISTOGRAM_SIZE);
memset(histogram_on_cpu, 0, sizeof(int) * HISTOGRAM_SIZE);
int *input_on_cpu = (int *)malloc(sizeof(int) * num_elements);
memset(input_on_cpu, 0, sizeof(int) * num_elements);
int *input_data_on_device = NULL;
int *histogram_on_device = NULL;
int *sorted_array_d = NULL;
//For inclusive scan mem allocation
int shared_mem_size = sizeof(int) * num_elements;
/* Allocate space on GPU for input data */
cudaMalloc((void**)&input_data_on_device, num_elements * sizeof(int));
cudaMemcpy(input_data_on_device, input_array, num_elements * sizeof(int), cudaMemcpyHostToDevice);
/* Allocate space on GPU for sorted output array and initialize contents to zero */
cudaMalloc((void**)&sorted_array_d, num_elements * sizeof(int));
//cudaMemcpy(sorted_array_d, sorted_array, num_elements * sizeof(int), cudaMemcpyHostToDevice);
int num_bins = range + 1;
/* Allocate space on GPU for histogram and initialize contents to zero */
cudaMalloc((void**)&histogram_on_device, num_bins * sizeof(int));
cudaMemset(histogram_on_device, 0, num_bins * sizeof(int));
/* Set up the execution grid on GPU */
dim3 thread_block(THREAD_BLOCK_SIZE, 1, 1);
dim3 grid(NUM_BLOCKS,1);
gettimeofday(&stop_a, NULL);
printf("Data transfer time = %f\n",stop_a.tv_sec - start_a.tv_sec + (stop_a.tv_usec - start_a.tv_usec)/(float)1000000);
struct timeval start, stop;
gettimeofday(&start, NULL);
printf("Using shared memory to generate sorted array\n");
gettimeofday(&start, NULL);
//cudaMemset(sorted_array_d, 0, range * sizeof(int)); /* Reset output array */
counting_sort_kernel<<<grid, thread_block, 2 * shared_mem_size>>>(input_data_on_device, histogram_on_device, sorted_array_d, num_elements, num_bins);
cudaDeviceSynchronize();
gettimeofday(&stop, NULL);
printf("Eexecution time = %f\n",stop.tv_sec - start.tv_sec + (stop.tv_usec - start.tv_usec)/(float)1000000);
/* Copy result from device to host */
cudaMemcpy(sorted_array, sorted_array_d, num_elements * sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(input_on_cpu, input_data_on_device, num_elements * sizeof(int), cudaMemcpyDeviceToHost);
//print_array(input_on_cpu, num_elements);
cudaMemcpy(histogram_on_cpu, histogram_on_device, HISTOGRAM_SIZE * sizeof(int), cudaMemcpyDeviceToHost);
//print_histogram(histogram_on_cpu, num_bins, num_elements);
//check_for_error("KERNEL FAILURE");
/* Free memory */
cudaFree(input_data_on_device);
cudaFree(histogram_on_device);
cudaFree(sorted_array_d);
}
/* Check if array is sorted */
int check_if_sorted(int *array, int num_elements)
{
int status = 0;
int i;
for (i = 1; i < num_elements; i++) {
if (array[i - 1] > array[i]) {
status = -1;
break;
}
}
return status;
}
/* Check if the arrays elements are identical */
int compare_results(int *array_1, int *array_2, int num_elements)
{
int status = 0;
int i;
for (i = 0; i < num_elements; i++) {
if (array_1[i] != array_2[i]) {
status = -1;
break;
}
}
return status;
}
/* Return random integer between [min, max] */
int rand_int(int min, int max)
{
float r = rand()/(float)RAND_MAX;
return (int)floorf(min + (max - min) * r);
}
/* Print given array */
void print_array(int *this_array, int num_elements)
{
printf("Array: ");
int i;
for (i = 0; i < num_elements; i++)
printf("%d ", this_array[i]);
printf("\n");
return;
}
/* Return min and max values in given array */
void print_min_and_max_in_array(int *this_array, int num_elements)
{
int i;
int current_min = INT_MAX;
for (i = 0; i < num_elements; i++)
if (this_array[i] < current_min)
current_min = this_array[i];
int current_max = INT_MIN;
for (i = 0; i < num_elements; i++)
if (this_array[i] > current_max)
current_max = this_array[i];
printf("Minimum value in the array = %d\n", current_min);
printf("Maximum value in the array = %d\n", current_max);
return;
}
|
20,625 | #include "includes.h"
__global__ void testKernel(float *g_idata, float *g_odata)
{
// shared memory
// the size is determined by the host application
extern __shared__ float sdata[];
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
sdata[tid] = g_idata[tid];
__syncthreads();
// perform some computations
sdata[tid] = (float) num_threads * sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
} |
20,626 | /********************************************************************************
* TEX Object API
*
* TODO:
* Test the behavior of memory cache of cuArray and 2D pitched memory tex.
* Test the behavior of float
* I suspect some other unit can be used in analysis.
*******************************************************************************/
#include <stdio.h>
#define DIM 2
//#define DATA_TYPE unsigned int //please note that if change this you many also need change cudaCreateChannelDesc
//#define DATA_TYPE float //please note that if change this you many also need change cudaCreateChannelDesc
#define DATA_TYPE unsigned char //please note that if change this you many also need change cudaCreateChannelDesc
#define DIM_1 16
#define DIM_2 16
#define DIM_3 16
#define CUDA_ARRAY
#if DIM == 1
#elif DIM == 2
__global__ void kernel(cudaTextureObject_t texObj){
// DATA_TYPE a = tex2D<DATA_TYPE>(texObj,2,1);//note: require a <DATA_TYPE> in object api version.
float a = tex2D<float>(texObj,1,0);//note: require a <DATA_TYPE> in object api version.
//printf("%d\n",(int)a);
printf("%f\n",a);
return;
}
#elif DIM == 3
#endif
int main(){
DATA_TYPE* hostPtr = (DATA_TYPE*)malloc(DIM_1*DIM_2*DIM_3*sizeof(DATA_TYPE));
for(int i=0;i<DIM_1*DIM_2*DIM_3;i++)
{
hostPtr[i] = i;
}
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(sizeof(DATA_TYPE)*8,0,0,0,cudaChannelFormatKindUnsigned);
//cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(sizeof(DATA_TYPE)*8,0,0,0,cudaChannelFormatKindFloat);
struct cudaTextureDesc texDesc;
memset(&texDesc, 0, sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeBorder;
texDesc.addressMode[1] = cudaAddressModeBorder;
//texDesc.filterMode = cudaFilterModePoint;
texDesc.filterMode = cudaFilterModeLinear; //only support when cudaCreateChannelDesc == cudaChannelFormatKindFloat
//texDesc.readMode = cudaReadModeElementType;
texDesc.readMode = cudaReadModeNormalizedFloat;
texDesc.normalizedCoords = 0;
//texDesc.normalizedCoords = 1;
cudaTextureObject_t texObj = 0;
struct cudaResourceDesc resDesc; //refer to the def of cudaCreateTextureObject in cuda_runtime_api.h
memset(&resDesc, 0, sizeof(resDesc));
#ifndef CUDA_ARRAY
DATA_TYPE* devPtr;
#if DIM == 1
#elif DIM == 2
size_t pitch;
cudaMallocPitch(&devPtr,&pitch,DIM_1*sizeof(DATA_TYPE),DIM_2);
cudaMemcpy2D(devPtr,pitch,hostPtr,DIM_1*sizeof(DATA_TYPE),DIM_1*sizeof(DATA_TYPE),DIM_2,cudaMemcpyHostToDevice);
resDesc.resType = cudaResourceTypePitch2D;
resDesc.res.pitch2D.devPtr = devPtr;
resDesc.res.pitch2D.desc = channelDesc;
resDesc.res.pitch2D.width = DIM_1; //should be in element not in byte
resDesc.res.pitch2D.height = DIM_2;
resDesc.res.pitch2D.pitchInBytes = pitch;
cudaCreateTextureObject(&texObj,&resDesc,&texDesc,NULL);
#elif DIM == 3
#endif//#if DIM == 1
#else
#if DIM == 1
#elif DIM == 2
cudaArray* cuArray_2d;
cudaExtent extent_2d = {DIM_1,DIM_2,0};
cudaMalloc3DArray(&cuArray_2d, &channelDesc,extent_2d ,cudaArrayDefault);//this function is able to alloc 1/3D array there are some interesting choice for the 4th parameter. Note zero in z of extent_2d.
cudaMemcpy3DParms cpy3DParms = {0}; //should be init to zero before use
cpy3DParms.srcPtr = make_cudaPitchedPtr(hostPtr,DIM_1*sizeof(DATA_TYPE),DIM_1,DIM_2);
cpy3DParms.dstArray = cuArray_2d;
cpy3DParms.extent = make_cudaExtent(DIM_1,DIM_2,1); //If no CUDA array is participating in the copy then the extents are defined in elements of unsigned char.
cpy3DParms.kind = cudaMemcpyHostToDevice;
cudaMemcpy3D(&cpy3DParms);
resDesc.resType = cudaResourceTypeArray;
resDesc.res.array.array = cuArray_2d;
cudaCreateTextureObject(&texObj,&resDesc,&texDesc,NULL);
#elif DIM == 3
#endif//#if DIM == 1
#endif//CUDA_ARRAY
kernel<<<1,1>>>(texObj);
cudaDeviceSynchronize();
printf("%s\n",cudaGetErrorString(cudaGetLastError()));
return 0;
}
|
20,627 | #include<stdio.h>
#include<time.h>
__global__ void threennKernel(int b, int n, int m, int t, const float * __restrict__ xyz1, const float * __restrict__ xyz2, float * __restrict__ dist, int * __restrict__ idx) {
for(int i=blockIdx.x;i<b;i+=gridDim.x){
for(int j=threadIdx.x;j<n;j+=blockDim.x){
float best1=1e20, best2=1e20, best3=1e20;
int besti1=0, besti2 = 0, besti3 = 0;
for(int u=0;u<m;u++){
float t_dist = 0;
for(int v=0;v<t;v++){
int tmp_idx1 = i*n*t*3 + j*t*3 + v*3;
int tmp_idx2 = i*m*t*3 + u*t*3 + v*3;
float x1 = xyz1[tmp_idx1+0];
float y1 = xyz1[tmp_idx1+1];
float z1 = xyz1[tmp_idx1+2];
float x2 = xyz2[tmp_idx2+0];
float y2 = xyz2[tmp_idx2+1];
float z2 = xyz2[tmp_idx2+2];
t_dist += max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
}
t_dist /= t;
if (t_dist<best1) {
best3=best2;
besti3=besti2;
best2=best1;
besti2=besti1;
best1=t_dist;
besti1=u;
} else if (t_dist<best2) {
best3=best2;
besti3=besti2;
best2=t_dist;
besti2=u;
} else if (t_dist<best3) {
best3=t_dist;
besti3=u;
}
}
int tmp_idx = i*n*3+j*3;
dist[tmp_idx+0]=best1;
idx[tmp_idx+0]=besti1;
dist[tmp_idx+1]=best2;
idx[tmp_idx+1]=besti2;
dist[tmp_idx+2]=best3;
idx[tmp_idx+2]=besti3;
}
}
}
void threennLauncher(int b,int n,int m,int t,const float *xyz1, const float *xyz2, float *dist, int *idx){
//clock_t start,finish;
//double totaltime;
//start=clock();
threennKernel<<<32,512>>>(b,n,m,t,xyz1,xyz2,dist,idx);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("threennKernel:%f \n",totaltime);
}
__global__ void threeinterpolateKernel(int b, int n, int m, int t,int c, const float * __restrict__ points, const int * __restrict__ idx, const float * __restrict__ weight, float * __restrict__ out) {
for(int i=blockIdx.x;i<b;i+=gridDim.x){
for(int j=threadIdx.x;j<n;j+=blockDim.x){
int tmp_idx = i*n*3+j*3;
float w1=weight[tmp_idx+0];
float w2=weight[tmp_idx+1];
float w3=weight[tmp_idx+2];
int i1=idx[tmp_idx+0];
int i2=idx[tmp_idx+1];
int i3=idx[tmp_idx+2];
for(int u=0;u<t;u++){
for(int v=0;v<c;v++){
int tmp_idx1 = i*n*t*c + j*t*c + u*c + v;
int tmp_idx2 = i*m*t*c + i1*t*c + u*c + v;
int tmp_idx3 = i*m*t*c + i2*t*c + u*c + v;
int tmp_idx4 = i*m*t*c + i3*t*c + u*c + v;
out[tmp_idx1] = points[tmp_idx2]*w1 + points[tmp_idx3]*w2 + points[tmp_idx4]*w3;
}
}
}
}
}
void threeinterpolateLauncher(int b, int n, int m, int t, int c,const float *points, const int *idx, const float *weight, float *out){
//clock_t start,finish;
//double totaltime;
//start=clock();
threeinterpolateKernel<<<32,512>>>(b,n,m,t,c,points,idx,weight,out);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("threeinterpolateKernel:%f \n",totaltime);
}
// input: grad_out (b,n,c), idx (b,n,3), weight (b,n,3)
// output: grad_points (b,m,c)
__global__ void threeinterpolategradKernel(int b, int n, int m, int t, int c, const float * __restrict__ grad_out, const int * __restrict__ idx, const float * __restrict__ weight, float * __restrict__ grad_points) {
for(int i=blockIdx.x;i<b;i+=gridDim.x){
for(int j=threadIdx.x;j<n;j+=blockDim.x){
int tmp_idx = i*n*3+j*3;
float w1=weight[tmp_idx+0];
float w2=weight[tmp_idx+1];
float w3=weight[tmp_idx+2];
int i1=idx[tmp_idx+0];
int i2=idx[tmp_idx+1];
int i3=idx[tmp_idx+2];
for(int u=0;u<t;u++){
for(int v=0;v<c;v++){
int tmp_idx1 = i*n*t*c + j*t*c + u*c + v;
int tmp_idx2 = i*m*t*c + i1*t*c + u*c + v;
int tmp_idx3 = i*m*t*c + i2*t*c + u*c + v;
int tmp_idx4 = i*m*t*c + i3*t*c + u*c + v;
atomicAdd(&grad_points[tmp_idx2],grad_out[tmp_idx1]*w1);
atomicAdd(&grad_points[tmp_idx3],grad_out[tmp_idx1]*w2);
atomicAdd(&grad_points[tmp_idx4],grad_out[tmp_idx1]*w3);
}
}
}
}
}
void threeinterpolategradLauncher(int b, int n, int m, int t, int c, const float *grad_out, const int *idx, const float *weight, float *grad_points){
//clock_t start,finish;
//double totaltime;
//start=clock();
threeinterpolategradKernel<<<32,128>>>(b,n,m,t,c,grad_out,idx,weight,grad_points);
//finish=clock();
//totaltime=(double)(finish-start)/CLOCKS_PER_SEC;
//printf("threeinterpolategradKernel:%f \n",totaltime);
}
|
20,628 | // RUN: %run_test hipify "%s" "%t" %hipify_args %clang_args
#pragma once
#include <cuda_runtime.h>
/**
* Allocate GPU memory for `count` elements of type `T`.
*/
template<typename T>
static T* gpuMalloc(size_t count) {
T* ret = nullptr;
// CHECK: hipMalloc(&ret, count * sizeof(T));
cudaMalloc(&ret, count * sizeof(T));
return ret;
}
|
20,629 | #include <stdio.h>
#define N (2048)
#define THREADS_PER_BLOCK 512
void random_ints(int* a, int num) {
int i;
for(i = 0; i < num; ++i) {
a[i] = rand();
// a[i] = 1;
}
}
__global__ void add(int *a, int *b, int *c) {
int index = threadIdx.x + blockIdx.x * blockDim.x;
c[index] = a[index] + b[index];
//*c = *a + *b;
}
__global__ void dot(int *a, int *b, int *c) {
__shared__ int temp[THREADS_PER_BLOCK];
int index = threadIdx.x + blockIdx.x * blockDim.x;
temp[threadIdx.x] = a[index] * b[index];
__syncthreads();
if(0 == threadIdx.x) {
int sum = 0;
for(int i = 0; i < THREADS_PER_BLOCK; i++) {
sum += temp[i];
}
atomicAdd(c, sum);
}
}
int main(void) {
int *a, *b, *c;
int *dev_a, *dev_b, *dev_c;
int size = N * sizeof(int);
cudaMalloc((void**)&dev_a, size);
cudaMalloc((void**)&dev_b, size);
cudaMalloc((void**)&dev_c, sizeof(int));
a = (int*)malloc(size);
b = (int*)malloc(size);
c = (int*)malloc(sizeof(int));
random_ints(a, N);
random_ints(b, N);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, size, cudaMemcpyHostToDevice);
dot<<<N/THREADS_PER_BLOCK, THREADS_PER_BLOCK>>>(dev_a, dev_b, dev_c);
cudaMemcpy(c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
free(a); free(b);
printf("Numbers:\n");
for(int i = 0; i < N; i++) {
printf("%d\n", c[i]);
}
free(c);
return 0;
}
|
20,630 | #include "includes.h"
__global__ void TgvComputeOpticalFlowVectorKernel(const float *u, const float2 *tv2, int width, int height, int stride, float2 *warpUV)
{
const int ix = threadIdx.x + blockIdx.x * blockDim.x;
const int iy = threadIdx.y + blockIdx.y * blockDim.y;
const int pos = ix + iy * stride;
if (ix >= width || iy >= height) return;
float us = u[pos];
float2 tv2s = tv2[pos];
warpUV[pos].x = us * tv2s.x;
warpUV[pos].y = us * tv2s.y;
} |
20,631 | #include "includes.h"
__global__ void deInterleave_kernel2(float *d_X_out, float *d_Y_out, char *d_XY_in, int pitch_out, int pitch_in, int width, int height) {
unsigned int x = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < width) & (y < height)) { // are we in the image?
float *data = (float *)(d_XY_in + y * pitch_in) + 2 * x;
*((float *)((char *)d_X_out + y *pitch_out) + x) = data[0];
*((float *)((char *)d_Y_out + y *pitch_out) + x) = data[1];
}
} |
20,632 | // Mike Hagenow
// ME759 - Final Project
// Loads a collision map from a CSV and calls the CUDA kernel
// to calculate the Laplacian
// Compile: nvcc harmonic_main.cu harmonickernel.cu -Xcompiler -O3 -Xcompiler -Wall -Xptxas -O3 -std c++14 -o harmonicmain
// Debug: nvcc -g -G harmonic_main.cu harmonickernel.cu -Xcompiler -O3 -Xcompiler -Wall -O3 -std c++14 -o harmonicmain
// Example call: ./harmonicmain /home/mike/Documents/ME759/FinalProject/Utilities/output.csv 200 300
#include <cuda.h>
#include <limits.h>
#include <stdio.h>
#include <chrono>
#include <fstream>
#include "harmonickernel.cuh"
#include <vector>
using namespace std;
// Used during the final gradient descent
// This looks into the matrix and gets a value of a specific row and column
// while also handling bounds on the array
unsigned long long int getVal( int row, int col,unsigned long long int *hB, unsigned int num_rows, unsigned int num_cols){
if(row>=0 && col>=0 && row<(int)num_rows && col<(int)num_cols){
return hB[num_cols*row + col];
}
else{
return 0;
}
}
// This writes a CSV with the trajectory for a particular start point
// to the goal point
void write_trajectory(string filename, vector<double> start, vector<double> end, vector<vector<double>> traj) {
ofstream fout(filename, ios_base::app);
fout << start[0] << "," << start[1] << "|" << end[0] << "," << end[1] << "|";
for(int i = 0; i < (int)traj.size(); i++) {
fout << traj[i][0] << "," << traj[i][1] << ";";
}
fout << "\n";
fout.close();
}
int main(int argc, char **argv){
if(argc<=3){
// No command line argument
printf("Insufficient Command Line Arg!\n Expected file goal_row goal_col\n");
return 0;
}
// CUDA configuration
unsigned int max_iters = 4000000;
unsigned int threads_per_block = 1024;
string filename = argv[1]; // file of collsion info
ifstream collisionFile(filename);
string temp;
// Results directory for storing paths
string results_dir = argv[3];
// Row and columns at top of the CSV
unsigned int num_rows = 0;
unsigned int num_cols = 0;
if(collisionFile.good()){
getline(collisionFile,temp,',');
num_rows = atoi(temp.c_str());
getline(collisionFile,temp);
num_cols = atoi(temp.c_str());
}
// Allocate host and device memory
unsigned long long int *hA, *dA;
unsigned long long int *hB, *dB;
unsigned long long int *dMask;
hA = new unsigned long long int[num_rows*num_cols];
hB = new unsigned long long int[num_rows*num_cols];
cudaMalloc((void **)&dA, sizeof(unsigned long long int) * (num_rows*num_cols));
cudaMalloc((void **)&dB, sizeof(unsigned long long int) * (num_rows*num_cols));
cudaMalloc((void **)&dMask, sizeof(unsigned long long int) * (num_rows*num_cols));
unsigned long long int max_val = ULLONG_MAX/4;
// The collision file has 0 and 1. Convert to 0 and the maximum int
// value for the unsigned long long int
for(unsigned long long int i=0;i<num_rows*num_cols;i++){
if(i%num_cols==num_cols-1){
getline(collisionFile,temp);
}
else{
getline(collisionFile,temp,',');
}
if(temp=="1"){
hA[i]=max_val;
}
else{
hA[i] = max_val/2;
}
}
collisionFile.close();
// Load the CSV of the goal position and starting positions
string filepts = argv[2]; // size of one dimension of matrix
ifstream ptsFile(filepts);
getline(ptsFile,temp,',');
int goal_row = atoi(temp.c_str());
getline(ptsFile,temp);
int goal_col = atoi(temp.c_str());
printf("Goal: %d %d\n",goal_row,goal_col);
// Get the goal position and adjust mask!
unsigned int goal_index = goal_row*num_cols + goal_col;
if(goal_index>(num_rows*num_cols-1)){
printf("Invalid goal (bounds)\n");
return 0;
}
else if(hA[goal_index]==max_val){
printf("Invalid goal (goal is a collision)\n");
return 0;
}
else{
hA[goal_index] = 0; // negative potential (pull towards)
}
printf("File: %s\n",filename.c_str());
// printf("%d %d\n",num_rows,num_cols);
cudaMemcpy(dA, hA, sizeof(unsigned long long int) * (num_rows*num_cols), cudaMemcpyHostToDevice);
cudaMemcpy(dMask, hA, sizeof(unsigned long long int) * (num_rows*num_cols), cudaMemcpyHostToDevice);
// Run and time the kernel call (calling matmul function in this case)
float ms;
cudaEvent_t start;
cudaEvent_t stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
harmonic(&dA,&dB,dMask,num_rows,num_cols,threads_per_block,max_iters);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
// Bring back results
cudaMemcpy(hB, dB, sizeof(unsigned long long int) * (num_rows*num_cols), cudaMemcpyDeviceToHost);
///////////////////////////////////////////////////////
// Gradient Descent for all remaining start points //
///////////////////////////////////////////////////////
// Load the remaining points
vector<double> goal = vector<double>();
goal.push_back((double)goal_row);
goal.push_back((double)goal_col);
vector<vector<vector<double>>> result;
while (ptsFile.peek() != EOF) {
vector<double> start = vector<double>();
getline(ptsFile, temp, ',');
start.push_back(stof(temp.c_str()));
getline(ptsFile, temp);
start.push_back(stof(temp.c_str()));
result.push_back(vector<vector<double>>({start, goal}));
}
ptsFile.close();
vector<vector<vector<double>>> trajs;
int goal_row_temp, goal_col_temp, i , j;
unsigned long long int l,r,u,d;
// run the gradient descent for each goal point
for(int k=0;k<(int)result.size();k++){
vector<vector<double>> traj_temp;
vector<double> point_temp;
i = (int) result[k][0][0];
j = (int) result[k][0][1];
goal_row_temp = (int) result[k][1][0];
goal_col_temp = (int) result[k][1][1];
bool finished = false;
traj_temp.clear();
while (!finished){
l = getVal(i,j-1,hB,num_rows,num_cols);
r = getVal(i,j+1,hB,num_rows,num_cols);
u = getVal(i-1,j,hB,num_rows,num_cols);
d = getVal(i+1,j,hB,num_rows,num_cols);
// printf("path %llu %llu %llu %llu\n",l,r,u,d);
unsigned long long int curr_val = getVal(i,j,hB,num_rows,num_cols);
// printf("path %llu %llu %llu %llu %llu\n",l,r,u,d,curr_val);
if(curr_val<=l && curr_val<= r && curr_val <=u && curr_val<=d){
finished=true;
printf("I have failed my mission!!!\n");
continue;
}
else if(l<r && l<u && l<d){
i = i;
j = j-1;
}
else if (r < u && r < d){
i = i;
j = j+1;
}
else if(u<d){
i = i-1;
j = j;
}
else{
i = i+1;
j = j;
}
if (i==goal_row_temp and j==goal_col_temp){
finished=true;
printf("I have completed my mission!!!\n");
}
point_temp.clear();
point_temp.push_back(i);
point_temp.push_back(j);
traj_temp.push_back(point_temp);
}
trajs.push_back(traj_temp);
}
string pathfile = results_dir+filename.substr(0,filename.find("."))+"_harmonic_paths.csv";
// Write trajectories to file
for(int k=0;k<(int)result.size();k++){
write_trajectory(pathfile, result[k][0], result[k][1], trajs[k]);
}
// // Write to output file
// string fileout = filename.substr(0,filename.find("."))+"_processed.csv";
// ofstream outfile;
// outfile.open(fileout);
// for(unsigned long long int ii=0;ii<num_rows;ii++){
// for(unsigned long long int jj=0;jj<num_rows;jj++){
// if(jj==num_cols-1){
// outfile << to_string(hB[num_rows*ii+jj]) + "\n";
// }
// else{
// // if(hB[num_rows*ii+jj]>0.0){
// // printf(" %u\n",hB[num_rows*ii+jj]);
// // }
// outfile << to_string(hB[num_rows*ii+jj]) + ",";
// }
// }
// }
// outfile.close();
printf("CUDA RUN (ms): %f\n",ms/1000);
// clean up memory
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(dA);
delete[] hA;
cudaFree(dB);
delete[] hB;
cudaFree(dMask);
return 0;
} |
20,633 | //
// simpleCUDA
//
// This simple code sample demonstrates how to perform a simple linear
// algebra operation using CUDA, single precision axpy:
// y[i] = alpha*x[i] + y[i] for x,y in R^N and a scalar alpha
//
// Please refer to the following article for detailed explanations:
// John Nickolls, Ian Buck, Michael Garland and Kevin Skadron
// Scalable parallel programming with CUDA
// ACM Queue, Volume 6 Number 2, pp 44-53, March 2008
// http://mags.acm.org/queue/20080304/
//
// Compilation instructions:
// - Install CUDA
// - Compile with nvcc -o simpleCUDA simpleCUDA.cu
// - Launch the executable
//
//
/////////////////////////////////////
// standard imports
/////////////////////////////////////
#include <stdio.h>
#include <math.h>
/////////////////////////////////////
// CUDA imports (CUDA runtime, not necessary when compiling with nvcc)
/////////////////////////////////////
//#include <cuda_runtime.h>
/////////////////////////////////////
// global variables and configuration section
/////////////////////////////////////
// problem size (vector length) N
static int N = 123456;
// number of threads per block
static int numThreadsPerBlock = 256;
// device to use in case there is more than one
static int selectedDevice = 0;
/////////////////////////////////////
// kernel function (CPU)
/////////////////////////////////////
void saxpy_serial(int n, float alpha, float *x, float *y)
{
int i;
for (i=0; i<n; i++)
y[i] = alpha*x[i] + y[i];
}
/////////////////////////////////////
// kernel function (CUDA device)
/////////////////////////////////////
__global__ void saxpy_parallel(int n, float alpha, float *x, float *y)
{
// compute the global index in the vector from
// the number of the current block, blockIdx,
// the number of threads per block, blockDim,
// and the number of the current thread within the block, threadIdx
int i = blockIdx.x * blockDim.x + threadIdx.x;
// except for special cases, the total number of threads in all blocks
// adds up to more than the vector length n, so this conditional is
// EXTREMELY important to avoid writing past the allocated memory for
// the vector y.
if (i<n)
y[i] = alpha*x[i] + y[i];
}
/////////////////////////////////////
// error checking routine
/////////////////////////////////////
void checkErrors(char *label)
{
// we need to synchronise first to catch errors due to
// asynchroneous operations that would otherwise
// potentially go unnoticed
cudaError_t err;
err = cudaThreadSynchronize();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
err = cudaGetLastError();
if (err != cudaSuccess)
{
char *e = (char*) cudaGetErrorString(err);
fprintf(stderr, "CUDA Error: %s (at %s)", e, label);
}
}
/////////////////////////////////////
// main routine
/////////////////////////////////////
int main (int argc, char **argv)
{
/////////////////////////////////////
// (1) initialisations:
// - perform basic sanity checks
// - set device
/////////////////////////////////////
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0)
{
fprintf(stderr, "Sorry, no CUDA device fount");
return 1;
}
if (selectedDevice >= deviceCount)
{
fprintf(stderr, "Choose device ID between 0 and %d\n", deviceCount-1);
return 1;
}
cudaSetDevice(selectedDevice);
checkErrors("initialisations");
/////////////////////////////////////
// (2) allocate memory on host (main CPU memory) and device,
// h_ denotes data residing on the host, d_ on device
/////////////////////////////////////
float *h_x = (float*)malloc(N*sizeof(float));
float *h_y = (float*)malloc(N*sizeof(float));
float *d_x;
cudaMalloc((void**)&d_x, N*sizeof(float));
float *d_y;
cudaMalloc((void**)&d_y, N*sizeof(float));
checkErrors("memory allocation");
/////////////////////////////////////
// (3) initialise data on the CPU
/////////////////////////////////////
int i;
for (i=0; i<N; i++)
{
h_x[i] = 1.0f + i;
h_y[i] = (float)(N-i+1);
}
/////////////////////////////////////
// (4) copy data to device
/////////////////////////////////////
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, h_y, N*sizeof(float), cudaMemcpyHostToDevice);
checkErrors("copy data to device");
/////////////////////////////////////
// (5) perform computation on host (to enable result comparison later)
/////////////////////////////////////
saxpy_serial(N, 2.0f, h_x, h_y);
/////////////////////////////////////
// (6) perform computation on device
// - we use numThreadsPerBlock threads per block
// - the total number of blocks is obtained by rounding the
// vector length N up to the next multiple of numThreadsPerBlock
/////////////////////////////////////
int numBlocks = (N+numThreadsPerBlock-1) / numThreadsPerBlock;
saxpy_parallel<<<numBlocks, numThreadsPerBlock>>>(N, 2.0, d_x, d_y);
checkErrors("compute on device");
/////////////////////////////////////
// (7) read back result from device into temp vector
/////////////////////////////////////
float *h_z = (float*)malloc(N*sizeof(float));
cudaMemcpy(h_z, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
checkErrors("copy data from device");
/////////////////////////////////////
// (8) perform result comparison
/////////////////////////////////////
int errorCount = 0;
for (i=0; i<N; i++)
{
if (abs(h_y[i]-h_z[i]) > 1e-6)
errorCount = errorCount + 1;
}
if (errorCount > 0)
printf("Result comparison failed.\n");
else
printf("Result comparison passed.\n");
/////////////////////////////////////
// (9) clean up, free memory
/////////////////////////////////////
free(h_x);
free(h_y);
free(h_z);
cudaFree(d_x);
cudaFree(d_y);
return 0;
}
|
20,634 | /******************************
* Tisma Miroslav 2006/0395
* Multiprocesorski sistemi
* domaci zadatak 6 - 2. zadatak
*******************************/
/**
* 2. Sastaviti program koji pronalazi najmanji i najveci element dvodimenzionalne matrice.
*/
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUM_OF_GPU_THREADS 256
__global__ void findMinMaxInMatrix(int *matrix, int *min, int *max, int size) {
int i;
int idx = threadIdx.x;
__shared__ int maxs[NUM_OF_GPU_THREADS];
__shared__ int mins[NUM_OF_GPU_THREADS];
int slice = size / NUM_OF_GPU_THREADS;
int start = idx * slice;
if (idx == NUM_OF_GPU_THREADS - 1)
slice += size % NUM_OF_GPU_THREADS;
int end = start + slice;
int local_min = matrix[start];
int local_max = matrix[start];
for (i = start; i < end; i++) {
if (matrix[i] < local_min)
local_min = matrix[i];
else if (matrix[i] > local_max)
local_max = matrix[i];
}
mins[idx] = local_min;
maxs[idx] = local_max;
__syncthreads();
int half = NUM_OF_GPU_THREADS;
do {
__syncthreads();
half >>= 1;
if (idx < half) {
if (mins[idx] < mins[idx + half])
mins[idx] = mins[idx];
else
mins[idx] = mins[idx + half];
if (maxs[idx] > maxs[idx + half])
maxs[idx] = maxs[idx];
else
maxs[idx] = maxs[idx + half];
}
} while(half != 1);
if (idx == 0) {
*min = mins[0];
*max = maxs[0];
}
}
int main(int argc, char *argv[]) {
int i, m, n;
int min, max;
int *h_matrix, *d_matrix, *d_min, *d_max;
printf("Matrica ce biti ispisana na standardnom izlazu\n");
printf("Unesite broj vrsta matrice:\n");
scanf("%d", &m);
printf("Unesite broj kolona matrice:\n");
scanf("%d", &n);
h_matrix = (int*)malloc(m*n*sizeof(int));
srand(time(0));
for (i = 0; i < m*n; i++) {
if (i % m == 0)
printf("\n");
h_matrix[i] = -10000 + rand() % 20000;
printf("%5d ", h_matrix[i]);
}
printf("\n");
cudaMalloc((void**)&d_matrix, m*n*sizeof(int));
cudaMalloc((void**)&d_min, sizeof(int));
cudaMalloc((void**)&d_max, sizeof(int));
cudaMemcpy(d_matrix, h_matrix, m*n*sizeof(int), cudaMemcpyHostToDevice);
findMinMaxInMatrix<<< 1, NUM_OF_GPU_THREADS >>>(d_matrix, d_min, d_max, m*n);
cudaThreadSynchronize();
cudaMemcpy(&min, d_min, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(&max, d_max, sizeof(int), cudaMemcpyDeviceToHost);
printf("\nNajmanji element u matrici je: %d\n", min);
printf("Najveci element u matrici je: %d\n", max);
cudaFree(d_min);
cudaFree(d_max);
cudaFree(d_matrix);
free(h_matrix);
return EXIT_SUCCESS;
}
|
20,635 | #include "Pixel.cuh"
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
/*
Pixel CLASS CASE
*/
////////////////////////////////////////////////////////
////////////////////////////////////////////////////////
__device__ Pixel::Pixel() :
R(NULL),
G(NULL),
B(NULL),
color_range(255)
{
//Nothing here
}
__device__ Pixel::Pixel(const unsigned char R, const unsigned char G, const unsigned char B) :
R(R),
G(G),
B(B),
color_range(255)
{
//Nothing here
}
__device__ Pixel::Pixel(const Pixel& Object) :
R(Object.R),
G(Object.G),
B(Object.B),
color_range(Object.color_range)
{
//Nothing here
}
__device__ void Pixel::Show_Object() const
{
//_STD cout << "Red: [" << static_cast<int>(this->R) << " ] " << "Green: [" << static_cast<int>(this->G) << " ] " << "Blue: [" << static_cast<int>(this->B) << " ] " << "Color range: " << this->color_range << NEW_LINE;
printf("RED [%d] GREEN [%d] BLUE [%d]", static_cast<int>(this->R), static_cast<int>(this->G), static_cast<int>(this->B));
}
__device__ void Pixel::Set_R(const unsigned char R)
{
this->R = R;
}
__device__ void Pixel::Set_G(const unsigned char G)
{
this->G = G;
}
__device__ void Pixel::Set_B(const unsigned char B)
{
this->B = B;
}
__device__ void Pixel::Set_Color_Range(const int Color_Range)
{
this->color_range = Color_Range;
}
__device__ const unsigned char Pixel::Get_R() const
{
return this->R;
}
__device__ const unsigned char Pixel::Get_G() const
{
return this->G;
}
__device__ const unsigned char Pixel::Get_B() const
{
return this->B;
}
__device__ const int Pixel::Get_Color_Range() const
{
return this->color_range;
}
__device__ Pixel& Pixel::operator=(const Pixel& Object)
{
//if (this != _STD addressof(Object))
if (this != &Object)
{
this->R = Object.R;
this->G = Object.G;
this->B = Object.B;
this->color_range = Object.color_range;
}
return *this;
}
__device__ Pixel::~Pixel()
{
this->R = NULL;
this->G = NULL;
this->B = NULL;
this->color_range = NULL;
}
////////////////////////////////////////////////////////
//////////////////////////////////////////////////////// |
20,636 |
#define SOURCE_INDEX(m,g,i,j,k,cmom,ng,nx,ny) ((m)+((cmom)*(g))+((cmom)*(ng)*(i))+((cmom)*(ng)*(nx)*(j))+((cmom)*(ng)*(nx)*(ny)*(k)))
#define SCATTERING_MATRIX_INDEX(m,g1,g2,nmom,ng) ((m)+((nmom)*(g1))+((nmom)*(ng)*(g2)))
#define SCALAR_FLUX_INDEX(g,i,j,k,ng,nx,ny) ((g)+((ng)*(i))+((ng)*(nx)*(j))+((ng)*(nx)*(ny)*(k)))
#define SCALAR_FLUX_MOMENTS_INDEX(m,g,i,j,k,cmom,ng,nx,ny) ((m)+((cmom-1)*(g))+((cmom-1)*(ng)*(i))+((cmom-1)*(ng)*(nx)*(j))+((cmom-1)*(ng)*(nx)*(ny)*(k)))
#define outer_source(m,g,i,j,k) outer_source[SOURCE_INDEX((m),(g),(i),(j),(k),cmom,ng,nx,ny)]
#define inner_source(m,g,i,j,k) inner_source[SOURCE_INDEX((m),(g),(i),(j),(k),cmom,ng,nx,ny)]
#define scattering_matrix(m,g1,g2) scattering_matrix[SCATTERING_MATRIX_INDEX((m),(g1),(g2),nmom,ng)]
#define scalar_flux(g,i,j,k) scalar_flux[SCALAR_FLUX_INDEX((g),(i),(j),(k),ng,nx,ny)]
#define scalar_flux_moments(m,g,i,j,k) scalar_flux_moments[SCALAR_FLUX_MOMENTS_INDEX((m),(g),(i),(j),(k),cmom,ng,nx,ny)]
// 3D kernel, in local nx,ny,nz dimensions
// Probably not going to vectorise very well..
__global__ void calc_inner_source(
const unsigned int nx,
const unsigned int ny,
const unsigned int nz,
const unsigned int ng,
const unsigned int cmom,
const unsigned int nmom,
const double * __restrict__ outer_source,
const double * __restrict__ scattering_matrix,
const double * __restrict__ scalar_flux,
const double * __restrict__ scalar_flux_moments,
double * __restrict__ inner_source
)
{
const size_t i = blockIdx.x * blockDim.x + threadIdx.x;
const size_t j = blockIdx.y * blockDim.y + threadIdx.y;
const size_t k = blockIdx.z * blockDim.z + threadIdx.z;
if (i >= nx) return;
if (j >= ny) return;
if (k >= nz) return;
for (unsigned int g = 0; g < ng; g++)
{
// Set first moment to outer source plus scattering contribution of scalar flux
inner_source(0,g,i,j,k) = outer_source(0,g,i,j,k) + scattering_matrix(0,g,g) * scalar_flux(g,i,j,k);
// Set other moments similarly based on scalar flux moments
unsigned int mom = 1;
for (unsigned int l = 1; l < nmom; l++)
{
for (unsigned int m = 0; m < 2*l+1; m++)
{
inner_source(mom,g,i,j,k) = outer_source(mom,g,i,j,k) + scattering_matrix(l,g,g) * scalar_flux_moments(mom-1,g,i,j,k);
mom += 1;
}
}
}
}
|
20,637 | #include "includes.h"
extern "C"
{
}
__global__ void elSq(int N, int M, float *Mat)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = j*N + i;
if (i < N && j < M)
{
Mat[index] = __fmul_rn(Mat[index], Mat[index]);
}
} |
20,638 | #include <stdio.h>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cmath>
#define N 1024
#define threads_per_block 512
template<typename T>
__global__ void blockwise_dot(T *d_a, T *d_b, T *block_sum)
{
__shared__ T partial_sum [threads_per_block];
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int idx = threadIdx.x;
T sum=0;
while(tid < N)
{
sum += d_a[tid]*d_b[tid];
tid += blockDim.x * gridDim.x;
}
//store partial sum of threads of current block
//in quickly accessible shared memory
partial_sum[idx] = sum;
//sync all threads
__syncthreads();
int i = blockDim.x /2;
while(i != 0)
{
if(idx < i)
partial_sum[idx] += partial_sum[idx+i];
__syncthreads();
i /= 2;
}
if(idx == 0)
block_sum[blockIdx.x] = partial_sum[0];
}
int main()
{
int num_blocks = std::ceil(float(N)/threads_per_block);
float h_a[N], h_b[N], *d_a, *d_b, *d_partsum;
for(int i=0; i<N; i++)
{
h_a[i] = i; h_b[i] = 1;
}
printf("#blocks %d #threads/block %d\n", num_blocks, threads_per_block);
cudaMalloc((void**)&d_a, N*sizeof(float));
cudaMalloc((void**)&d_b, N*sizeof(float));
cudaMalloc((void**)&d_partsum, num_blocks*sizeof(float));
cudaMemcpy(d_a, &h_a, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_b, &h_b, N*sizeof(float), cudaMemcpyHostToDevice);
blockwise_dot<float> <<<num_blocks,threads_per_block>>> (d_a, d_b, d_partsum);
cudaDeviceSynchronize();
float h_partsum[num_blocks], total_sum=0;
cudaMemcpy(&h_partsum, d_partsum, num_blocks*sizeof(float), cudaMemcpyDeviceToHost);
for(int i=0; i<num_blocks; i++)
{
// printf("%.3f ", h_partsum[i]);
total_sum += h_partsum[i];
}
std:: cout << "result = " << total_sum << "\n";
}
|
20,639 | /*#include <stdio.h>
#include <math.h>
#include <time.h>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "device_functions.h"
#include "book.h"
#include "cusparse.h"
*/
#define BlockDim 1024
template <typename T>
__global__ void spmv_csr_vector_kernel(T * d_val, T * d_vector, int * d_cols, int * d_ptr, int N, T * d_out)
{
// Thread ID in block
int t = threadIdx.x;
// Thread ID in warp
int lane = t & (warpSize - 1);
// Number of warps per block
int warpsPerBlock = blockDim.x / warpSize;
// One row per warp
int row = (blockIdx.x * warpsPerBlock) + (t / warpSize);
__shared__ volatile T vals[BlockDim];
if (row < N)
{
int rowStart = d_ptr[row];
int rowEnd = d_ptr[row + 1];
T sum = 0;
// Use all threads in a warp accumulate multiplied elements
for (int j = rowStart + lane; j < rowEnd; j += warpSize)
{
int col = d_cols[j];
sum += d_val[j] * d_vector[col];
}
vals[t] = sum;
__syncthreads();
// Reduce partial sums
if (lane < 16) vals[t] += vals[t + 16];
if (lane < 8) vals[t] += vals[t + 8];
if (lane < 4) vals[t] += vals[t + 4];
if (lane < 2) vals[t] += vals[t + 2];
if (lane < 1) vals[t] += vals[t + 1];
__syncthreads();
// Write result
if (lane == 0)
{
d_out[row] = vals[t];
}
}
} |
20,640 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void pathAdjacencyKernel(int noTransitions, int noSegments, float* XY1, float* XY2, float* X4_X3, float* Y4_Y3, float* X2_X1, float* Y2_Y1, int* adjacency) {
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int idx = blockId * blockDim.x + threadIdx.x;
if (idx < noTransitions*noSegments) {
int seg1 = idx/noSegments;
int seg2 = idx - seg1*noSegments;
float Y1_Y3 = XY1[seg1 + noTransitions] - XY2[seg2 + noSegments];
float X1_X3 = XY1[seg1] - XY2[seg2];
float numa = X4_X3[seg2]*Y1_Y3 - Y4_Y3[seg2]*X1_X3;
float numb = X2_X1[seg1]*Y1_Y3 - Y2_Y1[seg1]*X1_X3;
float deno = Y4_Y3[seg2]*X2_X1[seg1] - X4_X3[seg2]*Y2_Y1[seg1];
float u_a = numa/deno;
float u_b = numb/deno;
adjacency[idx] = (int)((u_a >= 0.0) && (u_a <= 1.0) && (u_b >= 0.0)
&& (u_b <= 1.0));
}
} |
20,641 | #include "includes.h"
// ERROR CHECKING MACROS //////////////////////////////////////////////////////
__global__ void expPVPath(const int noPaths, const float gr, const int nYears, const float meanP, const float timeStep, const float rrr, float current, float reversion, float jumpProb, const float* brownian, const float* jumpSize, const float* jump, float* result) {
// Get the global index for the matrix
unsigned int idx = blockIdx.x*blockDim.x+threadIdx.x;
if (idx < noPaths) {
// Simulate a forward path
float value = 0;
float curr = current;
for (int ii = 0; ii < nYears; ii++) {
float jumped = (jump[idx+ii] < jumpProb)? 1.0f : 0.0f;
curr += reversion*(meanP - curr)*timeStep + curr*brownian[idx+ii] +
(exp(jumpSize[idx+ii]) - 1)*curr*jumped;
value += pow(1 + gr,ii)*curr/pow((1 + rrr),ii);
}
result[idx] = value;
}
} |
20,642 |
#include "cuda_runtime.h"
#include <chrono>
#include <iostream>
#include <sstream>
#define arraySize 31 // 35 max
#define def_div 10 // 5<=X<=15
//#define W 100
//#define threads_per_block 32
//#define max_blocks 32
using namespace std;
__constant__ float coefs[arraySize * 2 + 1];
__global__ void hybrid(float *sh_sum_dev, long int *str_num_dev,
float num_of_blocks, int *bdevX, int *global_mem_bin,
int threads_per_block) {
float th_w_sum = 0;
float th_v_sum = 0;
int th_bin[arraySize];
int best_bin[arraySize];
extern __shared__ float sh_array[];
float *sh_maxs = (float *)sh_array;
int *indices = (int *)&sh_maxs[threads_per_block];
int reached = 0;
indices[threadIdx.x] = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
long signed int num_to_bin = blockIdx.x * blockDim.x + threadIdx.x;
// num_to_bin += max_blocks * n_of_it;
#pragma unroll
for (uint i = 0; i < def_div; i++) {
th_bin[i] = ((num_to_bin) >> i) % 2;
th_w_sum += th_bin[i] * coefs[i];
th_v_sum += th_bin[i] * coefs[i + arraySize];
best_bin[i] = th_bin[i];
}
#pragma unroll
for (uint i = def_div; i < arraySize; i++) {
th_bin[i] = -1;
}
int Capacity = coefs[arraySize * 2] - th_w_sum;
sh_maxs[threadIdx.x] = (th_w_sum > coefs[arraySize * 2]) ? 0 : th_v_sum;
__syncthreads();
// H_S
int h = def_div;
long int ns = 0;
bool forward;
while (h - def_div != -1) {
ns++;
forward = true;
if (th_bin[h] == -1) {
th_bin[h] = 1;
} else {
if (th_bin[h] == 1) {
th_bin[h] = 0;
} else {
if (th_bin[h] == 0) {
th_bin[h] = -1;
h--;
forward = false;
}
}
}
if (h == arraySize - 1) {
int cw = 0;
int cp = 0;
#pragma unroll
for (int i = def_div; i < arraySize; i++) {
cp += coefs[i + arraySize] * th_bin[i];
cw += coefs[i] * th_bin[i];
}
if ((cw <= Capacity) && (cp > reached)) {
reached = cp;
#pragma unroll
for (int i = def_div; i < arraySize; i++) {
best_bin[i] = th_bin[i];
}
}
} else {
int cw = 0;
for (int i = def_div; i < arraySize; i++) {
cw += coefs[i] * th_bin[i];
}
if (cw > Capacity)
forward = false;
cw = 0;
float cp = 0;
int nw = 0;
int np = 0;
#pragma unroll
for (int i = def_div; i < arraySize; i++) {
np = th_bin[i] != -1 ? th_bin[i] * coefs[i + arraySize]
: coefs[i + arraySize];
nw = th_bin[i] != -1 ? th_bin[i] * coefs[i] : coefs[i];
if (cw + nw <= Capacity) {
cw += nw;
cp += np;
} else {
cp += np * (Capacity - cw) / nw;
break;
}
}
int b = cp;
if (b <= reached) {
forward = false;
}
}
if (forward) {
if (h < arraySize - 1) {
h++;
}
}
}
sh_maxs[threadIdx.x] += reached;
__syncthreads();
// reduction on block
for (uint offset = blockDim.x >> 1; offset >= 1; offset >>= 1) {
if (threadIdx.x < offset) {
if (sh_maxs[threadIdx.x] < sh_maxs[threadIdx.x + offset]) {
sh_maxs[threadIdx.x] = sh_maxs[threadIdx.x + offset];
indices[threadIdx.x] = indices[threadIdx.x + offset];
}
}
__syncthreads();
}
// write result for this block to global mem
if (threadIdx.x == 0) {
sh_sum_dev[blockIdx.x] = sh_maxs[0];
str_num_dev[blockIdx.x] = indices[0];
}
if (blockIdx.x * blockDim.x + threadIdx.x == indices[0]) {
#pragma unroll
for (int i = 0; i < arraySize; i++) {
global_mem_bin[blockIdx.x * arraySize + i] = best_bin[i];
}
}
__syncthreads();
}
__global__ void hybrid_reduction(float *s, long int *str_num_dev,
int *global_mem_bin, int threads_per_block) {
int ID = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ int sh_hy_data[];
sh_hy_data[threadIdx.x] = s[ID];
sh_hy_data[threadIdx.x + threads_per_block] = str_num_dev[ID];
__syncthreads();
// do reduction in shared mem
for (uint s = blockDim.x >> 1; s > 0; s >>= 1) {
if (threadIdx.x < s) {
if (sh_hy_data[threadIdx.x] < sh_hy_data[threadIdx.x + s]) {
sh_hy_data[threadIdx.x] = sh_hy_data[threadIdx.x + s];
sh_hy_data[threadIdx.x + threads_per_block] =
sh_hy_data[threadIdx.x + threads_per_block + s];
}
}
__syncthreads();
}
// write result for this block to global mem
if (threadIdx.x == 0) {
// if(sh_hy_data[0]>s[0]){//}&&(blockIdx.x>0)){
s[blockIdx.x] = sh_hy_data[0];
str_num_dev[blockIdx.x] = sh_hy_data[threads_per_block];
#pragma unroll
for (int i = 0; i < arraySize; i++) {
global_mem_bin[i] =
global_mem_bin[(sh_hy_data[threads_per_block] / threads_per_block) *
arraySize +
i];
}
}
}
__global__ void which_string(long int a, int *view_dev) {
view_dev[threadIdx.x] = (a >> threadIdx.x) % 2;
}
void quickSortR(float *a, float *b, long N) {
// На входе - массив a[], a[N] - его последний элемент.
long i = 0, j = N; // поставить указатели на исходные места
float temp, p;
p = a[N >> 1]; // центральный элемент
// процедура разделения
do {
while (a[i] > p)
i++;
while (a[j] < p)
j--;
if (i <= j) {
temp = a[i];
a[i] = a[j];
a[j] = temp;
temp = b[i];
b[i] = b[j];
b[j] = temp;
temp = b[i + arraySize];
b[i + arraySize] = b[j + arraySize];
b[j + arraySize] = temp;
i++;
j--;
}
} while (i <= j);
// рекурсивные вызовы, если есть, что сортировать
if (j > 0)
quickSortR(a, b, j);
if (N > i)
quickSortR(a + i, b + i, N - i);
}
int main() {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
int threads_per_block = deviceProp.warpSize;
int max_blocks = pow(2, def_div) / threads_per_block;
long int strSize_b = pow(2, arraySize);
int num_of_blocks = strSize_b / threads_per_block;
float *Sum = new float[32]; // = { 0 };
float *sh_sum_dev;
int iter = 0;
string line;
float v;
float *dev_coefs = new float[arraySize * 2 + 1];
while (getline(cin, line)) {
istringstream iss(line);
int q = 0;
while (iss >> v) {
dev_coefs[q] = v;
q++;
}
cout<<"Iter = "<<iter<<"\n";iter++;
for (int i = 0; i < arraySize * 2 + 1; i++) {
cout << dev_coefs[i] << " ";
}
cout << "\n";
// int W = int(dev_coefs[arraySize*2]);
long int *str_num_dev;
long int *str_num = new long int[1];
float *additional_array = new float[arraySize];
for (int i = 0; i < arraySize; i++) {
additional_array[i] = dev_coefs[i + arraySize] / dev_coefs[i];
}
quickSortR(additional_array, dev_coefs, arraySize - 1);
int *bdevX;
cudaMalloc((void **)&bdevX, arraySize * sizeof(int));
int *global_mem_bin;
cudaMalloc((void **)&global_mem_bin, max_blocks * arraySize * sizeof(int));
cudaMalloc((void **)&sh_sum_dev, num_of_blocks * sizeof(float));
cudaMalloc((void **)&str_num_dev, num_of_blocks * sizeof(long));
cudaMemcpyToSymbol(coefs, dev_coefs, (2 * arraySize + 1) * sizeof(float));
std::chrono::time_point<std::chrono::high_resolution_clock> start, end;
start = std::chrono::high_resolution_clock::now();
hybrid<<<max_blocks, threads_per_block,
threads_per_block * 3 * sizeof(int)>>>(
sh_sum_dev, str_num_dev, num_of_blocks, bdevX, global_mem_bin,
threads_per_block);
hybrid_reduction<<<1, max_blocks, threads_per_block * 3 * sizeof(int)>>>(
sh_sum_dev, str_num_dev, global_mem_bin, threads_per_block);
int *suda = new int[arraySize];
cudaMemcpy(Sum, sh_sum_dev, sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpy(str_num, str_num_dev, sizeof(long int), cudaMemcpyDeviceToHost);
cudaMemcpy(suda, global_mem_bin, arraySize * sizeof(int),
cudaMemcpyDeviceToHost);
end = std::chrono::high_resolution_clock::now();
int elapsed_seconds =
std::chrono::duration_cast<std::chrono::microseconds>(end - start)
.count();
std::time_t end_time = std::chrono::system_clock::to_time_t(end);
std::cout << "Время выполнения: " << elapsed_seconds << "microseconds\n";
cout << "Acheived maximal sum = " << Sum[0] << "\n";
cout << str_num[0] << "\n";
for (int i = 0; i < arraySize; i++) {
cout << suda[i];
}
cout << "\n";
// check
int checksum = 0;
for (int i = 0; i < arraySize; i++) {
checksum += dev_coefs[i + arraySize] * suda[i];
}
cout << "Validation sum = " << checksum << "\n";
checksum = 0;
for (int i = 0; i < arraySize; i++) {
checksum += dev_coefs[i] * suda[i];
}
cout << "Weight = " << checksum << "\n";
cudaFree(coefs);
cudaFree(sh_sum_dev);
cudaFree(str_num_dev);
cudaFree(bdevX);
cudaFree(global_mem_bin);
/*
delete[] Sum;
delete[] suda;
delete[] str_num;
delete[] dev_coefs;
delete[] additional_array;
*/
//cudaDeviceReset();
}
return 0;
}
|
20,643 | #include <stdio.h>
#include <cuda.h>
#include <time.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
typedef struct
{
unsigned char red, green, blue;
} PPMPixel;
typedef struct
{
unsigned char gray;
} PGMPixel;
typedef struct
{
int x, y;
PPMPixel *data;
} PPMImage;
typedef struct
{
int x, y;
PGMPixel *data;
} PGMImage;
#define CREATOR "V"
#define RGB_COMPONENT_COLOR 255
static PPMImage *readPPM(const char *filename)
{
char buff[16];
PPMImage *img;
FILE *fp;
int c, rgb_comp_color;
//open PPM file for reading
fp = fopen(filename, "rb");
if (!fp)
{
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
//read image format
if (!fgets(buff, sizeof(buff), fp))
{
perror(filename);
exit(1);
}
//check the image format
if (buff[0] != 'P' || buff[1] != '6')
{
fprintf(stderr, "Invalid image format (must be 'P6')\n");
exit(1);
}
//alloc memory form image
img = (PPMImage *)malloc(sizeof(PPMImage));
if (!img)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
//check for comments
c = getc(fp);
while (c == '#')
{
while (getc(fp) != '\n')
;
c = getc(fp);
}
ungetc(c, fp);
//read image size information
if (fscanf(fp, "%d %d", &img->x, &img->y) != 2)
{
fprintf(stderr, "Invalid image size (error loading '%s')\n", filename);
exit(1);
}
//read rgb component
if (fscanf(fp, "%d", &rgb_comp_color) != 1)
{
fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename);
exit(1);
}
//check rgb component depth
if (rgb_comp_color != RGB_COMPONENT_COLOR)
{
fprintf(stderr, "'%s' does not have 8-bits components\n", filename);
exit(1);
}
while (fgetc(fp) != '\n')
;
//memory allocation for pixel data
img->data = (PPMPixel *)malloc(img->x * img->y * sizeof(PPMPixel));
if (!img)
{
fprintf(stderr, "Unable to allocate memory\n");
exit(1);
}
//read pixel data from file
if (fread(img->data, 3 * img->x, img->y, fp) != img->y)
{
fprintf(stderr, "Error loading image '%s'\n", filename);
exit(1);
}
fclose(fp);
return img;
}
void writePGM(const char *filename, PGMImage *gry)
{
FILE *fp;
//open file for output
fp = fopen(filename, "wb");
if (!fp)
{
fprintf(stderr, "Unable to open file '%s'\n", filename);
exit(1);
}
//write the header file
//image format
fprintf(fp, "P5\n");
//comments
fprintf(fp, "# Created by %s\n", CREATOR);
//image size
fprintf(fp, "%d %d\n", gry->x, gry->y);
// rgb component depth
fprintf(fp, "%d\n", RGB_COMPONENT_COLOR);
// pixel data
fwrite(gry->data, gry->x, gry->y, fp);
fclose(fp);
}
static PGMImage *changeColorPPM(PPMImage *img, double *ttt)
{
PGMImage *gry;
int i, grayValue;
double graymapval;
if (img)
{
gry = (PGMImage *)malloc(sizeof(PGMImage));
gry->x = img->x;
gry->y = img->y;
gry->data = (PGMPixel *)malloc(gry->x * gry->y * sizeof(PGMPixel));
clock_t start = clock();
for (i = 0; i < img->x * img->y; i++)
{
graymapval = 0.3 * (img->data[i].red) + 0.59 * (img->data[i].green) + 0.11 * (img->data[i].blue);
grayValue = (unsigned char)(((unsigned int)graymapval) % (RGB_COMPONENT_COLOR + 1));
gry->data[i].gray = grayValue;
}
clock_t end = clock();
*ttt = (((double)(end - start)) / CLOCKS_PER_SEC) * 1000;
}
return gry;
}
__global__ void toGRAY(unsigned char *R, unsigned char *G, unsigned char *B, unsigned char *GRAY, long long n)
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < n)
{
GRAY[tid] = (unsigned char)(((unsigned int)(0.3 * (R[tid]) + 0.59 * (G[tid]) + 0.11 * (B[tid]))) % (256));
}
}
int main()
{
char nameppm[] = "JX0.ppm";
char namepgm[] = "JX0.pgm";
double serialTime[10];
printf("Serial Computation Time: \n");
printf("I\tTime\n");
for (int i = 0; i <= 9; i++)
{
char str[10];
sprintf(str, "%d", i);
nameppm[1] = i + '0';
namepgm[1] = i + '0';
PPMImage *image;
PGMImage *grayImage;
image = readPPM(nameppm);
double ttt;
grayImage = changeColorPPM(image, &ttt);
printf("%d\t%f\n", i, ttt);
serialTime[i] = ttt;
writePGM(namepgm, grayImage);
free(image->data);
free(image);
free(grayImage->data);
free(grayImage);
}
long long len;
long long lenb, mx, mn;
mx = pow(2, 10);
mn = pow(2, 0);
lenb = mn;
double getTime[11][12];
getTime[0][0] = 0;
int ii = 0;
for (; lenb <= mx; lenb *= 2)
{
for (int i = 0; i <= 9; i++)
{
unsigned char *R, *G, *B, *GRAY, *dR, *dG, *dB, *dGRAY;
char str[10];
sprintf(str, "%d", i);
nameppm[1] = i + '0';
namepgm[1] = i + '0';
PPMImage *img;
PGMImage *gry;
img = readPPM(nameppm);
len = (img->x * img->y);
gry = (PGMImage *)malloc(sizeof(PGMImage));
gry->x = img->x;
gry->y = img->y;
gry->data = (PGMPixel *)malloc(gry->x * gry->y * sizeof(PGMPixel));
R = (unsigned char *)malloc(len * sizeof(unsigned char));
G = (unsigned char *)malloc(len * sizeof(unsigned char));
B = (unsigned char *)malloc(len * sizeof(unsigned char));
GRAY = (unsigned char *)malloc(len * sizeof(unsigned char));
cudaMalloc((void **)&dR, len * sizeof(unsigned char));
cudaMalloc((void **)&dG, len * sizeof(unsigned char));
cudaMalloc((void **)&dB, len * sizeof(unsigned char));
cudaMalloc((void **)&dGRAY, len * sizeof(unsigned char));
for (int j = 0; j < len; j++)
{
R[j] = img->data[j].red;
G[j] = img->data[j].green;
B[j] = img->data[j].blue;
}
cudaMemcpy(dR, R, len * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(dG, G, len * sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, len * sizeof(unsigned char), cudaMemcpyHostToDevice);
int blockSize = lenb;
int numBlocks = (len + blockSize - 1) / blockSize;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
toGRAY<<<numBlocks, blockSize>>>(dR, dG, dB, dGRAY, len);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
float tot_time = 0;
cudaEventElapsedTime(&tot_time, start, stop);
getTime[i + 1][0] = i;
getTime[0][ii + 1] = lenb;
getTime[i + 1][ii + 1] = tot_time; //FOR PARALLEL TIME GRAPH
//getTime[i+1][ii+1] = (serialTime[i]/tot_time); //FOR SPEEDUP GRAPH
cudaMemcpy(GRAY, dGRAY, len * sizeof(unsigned char), cudaMemcpyDeviceToHost);
for (int j = 0; j < len; j++)
{
gry->data[j].gray = GRAY[j];
}
writePGM(namepgm, gry);
free(R);
free(G);
free(B);
free(GRAY);
cudaFree(dR);
cudaFree(dG);
cudaFree(dB);
cudaFree(dGRAY);
free(img->data);
free(img);
free(gry->data);
free(gry);
}
ii++;
}
printf("Parallel Computation Time: \n");
for (int i = 0; i < 11; i++)
{
for (int j = 0; j < 12; j++)
{
if (j == 0)
{
printf("%.0f", getTime[i][j]);
}
else if (i == 0)
{
printf(",%.0f", getTime[i][j]);
}
else
printf(",%f", getTime[i][j]);
}
printf("\n");
}
printf("Speed Up: \n");
for (int i = 0; i < 11; i++)
{
for (int j = 0; j < 12; j++)
{
if (j == 0)
{
printf("%.0f", getTime[i][j]);
}
else if (i == 0)
{
printf(",%.0f", getTime[i][j]);
}
else
printf(",%f", (serialTime[i - 1] / (double)getTime[i][j]));
}
printf("\n");
}
return 0;
}
|
20,644 | #include <stdio.h>
#define SIZE 1024
// Функция сложения двух векторов
__global__ void addVector(float* left, float* right, float* result)
{
//Получаем id текущей нити.
int idx = threadIdx.x;
//Расчитываем результат.
for (int i = 0; i < SIZE; i++) {
for (int k = 0; k < SIZE; k++) {
result[idx * SIZE + i] = left[k * SIZE + i] + right[k * SIZE + idx];
//printf("%d %d %d\n", idx, i, k);
}
}
}
__host__ int main()
{
//Выделяем память под вектора
float* vec1 = new float[SIZE * SIZE];
float* vec3 = new float[SIZE * SIZE];
//Инициализируем значения векторов
for (int i = 0; i < SIZE * SIZE; i++)
{
vec1[i] = 20.19;
}
//Указатели на память видеокарте
float* devVec1;
float* devVec3;
//Выделяем память для векторов на видеокарте
cudaMalloc((void**)&devVec1, sizeof(float) * SIZE * SIZE);
cudaMalloc((void**)&devVec3, sizeof(float) * SIZE * SIZE);
//Копируем данные в память видеокарты
cudaMemcpy(devVec1, vec1, sizeof(float) * SIZE * SIZE, cudaMemcpyHostToDevice);
cudaMemcpy(devVec3, vec3, sizeof(float) * SIZE * SIZE, cudaMemcpyHostToDevice);
dim3 gridSize = dim3(1, 1, 1); //Размер используемого грида
dim3 blockSize = dim3(SIZE, 1, 1); //Размер используемого блока
//Выполняем вызов функции ядра
addVector<<<gridSize, blockSize>>>(devVec1, devVec1, devVec3);
//Выполняем вызов функции ядра
//addVector<<<blocks, threads>>>(devVec1, devVec2, devVec3);
//Хендл event'а
cudaEvent_t syncEvent;
cudaEventCreate(&syncEvent); //Создаем event
cudaEventRecord(syncEvent, 0); //Записываем event
cudaEventSynchronize(syncEvent); //Синхронизируем event
//Только теперь получаем результат расчета
cudaMemcpy(vec3, devVec3, sizeof(float) * SIZE * SIZE, cudaMemcpyDeviceToHost);
//Результаты расчета
for (int i = 0; i < SIZE + 10; i++)
{
printf("Element #%i: %.1f\n", i , vec3[i]);
}
//
// Высвобождаем ресурсы
//
cudaEventDestroy(syncEvent);
cudaFree(devVec1);
cudaFree(devVec3);
delete[] vec1; vec1 = 0;
delete[] vec3; vec3 = 0;
}
|
20,645 | #include <stdio.h>
#include <stdint.h>
#include <arpa/inet.h>
#define BUFFER_LEN 64
#define BUFFER_SIZE_OFFSET 56
//#define THREADS 4096
#define THREADS 64
#define LEFTROTATE(x, c) (((x) << (c)) | ((x) >> (32 - (c))))
size_t pad(const char * message, uint32_t buffer[]) {
size_t buffer_size = strnlen(message,BUFFER_LEN);
uint8_t * buffer_8 = (uint8_t *) buffer;
memset(buffer_8,0,BUFFER_LEN );
memcpy(buffer_8,message,buffer_size);
buffer_8[buffer_size] = 0x80;
size_t newLen = buffer_size * 8;
uint8_t * offset = &buffer_8[BUFFER_SIZE_OFFSET];
memcpy(offset, &newLen, 4);
return buffer_size;
}
void loadHash(char* argv, uint32_t* a, uint32_t* b, uint32_t* c, uint32_t* d) {
char tmp[8];
strncpy(tmp,&argv[0],8);
*a = htonl(strtoul(tmp,0,16));
strncpy(tmp,&argv[8],8);
*b = htonl(strtoul(tmp,0,16));
strncpy(tmp,&argv[16],8);
*c = htonl(strtoul(tmp,0,16));
strncpy(tmp,&argv[24],8);
*d = htonl(strtoul(tmp,0,16));
}
__global__ void find( uint32_t hostA, uint32_t hostB, uint32_t hostC, uint32_t hostD, uint32_t * outM, char * msg ) {
int tid = blockIdx.x;
char init = ';' + (tid / 64);
char stop = ';' + (tid % 64);
//printf("I am thread %d and I will explore from %c%c;;;; to %c%czzzz a hash \n", tid, init, stop, init,stop);
uint32_t targetA = hostA;
uint32_t targetB = hostB;
uint32_t targetC = hostC;
uint32_t targetD = hostD;
uint32_t M[BUFFER_LEN];
for (int i=0; i<BUFFER_LEN; ++i) {
M[i] = outM[i];
}
char char0,char1,char2,char3,char4;
char0 = stop;
for (char1 = ';'; char1 <= 'z' ; ++char1) {
for (char2 = ';'; char2 <= 'z' ; ++char2) {
for (char3 = ';'; char3 <= 'z' ; ++char3) {
for (char4 = ';'; char4 <= 'z' ; ++char4) {
((char *)M)[0] = char0;
((char *)M)[1] = char1;
((char *)M)[2] = char2;
((char *)M)[3] = char3;
((char *)M)[4] = char4;
//Initialize variables:
uint32_t a0 = 0x67452301;
uint32_t b0 = 0xefcdab89;
uint32_t c0 = 0x98badcfe;
uint32_t d0 = 0x10325476;
//Initialize hash value for this chunk:
uint32_t A = a0;
uint32_t B = b0;
uint32_t C = c0;
uint32_t D = d0;
//Main loop:
uint32_t F;
F = (B & C) | (( ~ B) & D);
F = F + A + 0xd76aa478 + M[0];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 7);
F = (B & C) | (( ~ B) & D);
F = F + A + 0xe8c7b756 + M[1];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 12);
F = (B & C) | (( ~ B) & D);
F = F + A + 0x242070db + M[2];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 17);
F = (B & C) | (( ~ B) & D);
F = F + A + 0xc1bdceee + M[3];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 22);
F = (B & C) | (( ~ B) & D);
F = F + A + 0xf57c0faf + M[4];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 7);
F = (B & C) | (( ~ B) & D);
F = F + A + 0x4787c62a + M[6];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 12);
F = (B & C) | (( ~ B) & D);
F = F + A + 0xa8304613 + M[6];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 17);
F = (B & C) | (( ~ B) & D);
F = F + A + 0xfd469501 + M[7];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 22);
F = (B & C) | (( ~ B) & D);
F = F + A + 0x698098d8 + M[8];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 7);
F = (B & C) | (( ~ B) & D);
F = F + A + 0x8b44f7af + M[9];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 12);
F = (B & C) | (( ~ B) & D);
F = F + A + 0xffff5bb1 + M[10];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 17);
F = (B & C) | (( ~ B) & D);
F = F + A + 0x895cd7be + M[11];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 22);
F = (B & C) | (( ~ B) & D);
F = F + A + 0x6b901122 + M[12];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 7);
F = (B & C) | (( ~ B) & D);
F = F + A + 0xfd987193 + M[13];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 12);
F = (B & C) | (( ~ B) & D);
F = F + A + 0xa679438e + M[14];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 17);
F = (B & C) | (( ~ B) & D);
F = F + A + 0x49b40821 + M[15];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 22);
// ********************************************************************************************
F = (D & B) | (( ~ D) & C);
F = F + A + 0xf61e2562 + M[1];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 5);
F = (D & B) | (( ~ D) & C);
F = F + A + 0xc040b340+ M[6];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 9);
F = (D & B) | (( ~ D) & C);
F = F + A + 0x265e5a51 + M[11];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 14);
F = (D & B) | (( ~ D) & C);
F = F + A +0xe9b6c7aa + M[0];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 20);
F = (D & B) | (( ~ D) & C);
F = F + A + 0xd62f105d + M[5];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 5);
F = (D & B) | (( ~ D) & C);
F = F + A + 0x2441453 + M[10];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 9);
F = (D & B) | (( ~ D) & C);
F = F + A + 0xd8a1e681 + M[15];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 14);
F = (D & B) | (( ~ D) & C);
F = F + A + 0xe7d3fbc8 + M[4];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 20);
F = (D & B) | (( ~ D) & C);
F = F + A + 0x21e1cde6 + M[9];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 5);
F = (D & B) | (( ~ D) & C);
F = F + A + 0xc33707d6 + M[14];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 9);
F = (D & B) | (( ~ D) & C);
F = F + A + 0xf4d50d87 + M[3];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 14);
F = (D & B) | (( ~ D) & C);
F = F + A + 0x455a14ed + M[8];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 20);
F = (D & B) | (( ~ D) & C);
F = F + A + 0xa9e3e905 + M[13];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 5);
F = (D & B) | (( ~ D) & C);
F = F + A + 0xfcefa3f8 + M[2];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 9);
F = (D & B) | (( ~ D) & C);
F = F + A + 0x676f02d9 + M[7];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 14);
F = (D & B) | (( ~ D) & C);
F = F + A + 0x8d2a4c8a + M[12];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 20);
// ***************************************************************************************************
F = B ^ C ^ D;
F = F + A + 0xfffa3942 + M[5];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 4);
F = B ^ C ^ D;
F = F + A + 0x8771f681+ M[8];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 11);
F = B ^ C ^ D;
F = F + A + 0x6d9d6122+ M[11];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 16);
F = B ^ C ^ D;
F = F + A + 0xfde5380c + M[14];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 23);
F = B ^ C ^ D;
F = F + A + 0xa4beea44 + M[1];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 4);
F = B ^ C ^ D;
F = F + A + 0x4bdecfa9 + M[4];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 11);
F = B ^ C ^ D;
F = F + A +0xf6bb4b60 + M[7];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 16);
F = B ^ C ^ D;
F = F + A + 0xbebfbc70 + M[10];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 23);
F = B ^ C ^ D;
F = F + A + 0x289b7ec6 + M[13];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 4);
F = B ^ C ^ D;
F = F + A + 0xeaa127fa + M[0];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 11);
F = B ^ C ^ D;
F = F + A + 0xd4ef3085+ M[3];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 16);
F = B ^ C ^ D;
F = F + A + 0x4881d05 + M[6];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 23);
F = B ^ C ^ D;
F = F + A + 0xd9d4d039 + M[9];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 4);
F = B ^ C ^ D;
F = F + A + 0xe6db99e5 + M[12];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 11);
F = B ^ C ^ D;
F = F + A + 0x1fa27cf8 + M[15];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 16);
F = B ^ C ^ D;
F = F + A + 0xc4ac5665 + M[2];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 23);
// **********************************************************************************************
F = C ^ (B | (~ D));
F = F + A + 0xf4292244 + M[0];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 6);
F = C ^ (B | (~ D));
F = F + A + 0x432aff97 + M[7];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 10);
F = C ^ (B | (~ D));
F = F + A + 0xab9423a7 + M[14];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 15);
F = C ^ (B | (~ D));
F = F + A + 0xfc93a039 + M[5];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 21);
F = C ^ (B | (~ D));
F = F + A + 0x655b59c3 + M[12];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 6);
F = C ^ (B | (~ D));
F = F + A + 0x8f0ccc92 + M[3];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 10);
F = C ^ (B | (~ D));
F = F + A + 0xffeff47d + M[10];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 15);
F = C ^ (B | (~ D));
F = F + A + 0x85845dd1 + M[1];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 21);
F = C ^ (B | (~ D));
F = F + A + 0x6fa87e4f + M[8];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 6);
F = C ^ (B | (~ D));
F = F + A + 0xfe2ce6e0 + M[15];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 10);
F = C ^ (B | (~ D));
F = F + A + 0xa3014314 + M[6];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 15);
F = C ^ (B | (~ D));
F = F + A + 0x4e0811a1 + M[13];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 21);
F = C ^ (B | (~ D));
F = F + A + 0xf7537e82 + M[4];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 6);
F = C ^ (B | (~ D));
F = F + A + 0xbd3af235 + M[11];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 10);
F = C ^ (B | (~ D));
F = F + A + 0x2ad7d2bb + M[2];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 15);
F = C ^ (B | (~ D));
F = F + A + 0xeb86d391 + M[9];
A = D;
D = C;
C = B;
B = B + LEFTROTATE(F, 21);
//Add this chunk's hash to result so far:
a0 = a0 + A;
b0 = b0 + B;
c0 = c0 + C;
d0 = d0 + D;
if ( targetA == a0
&& targetB == b0
&& targetC == c0
&& targetD == d0) {
msg[0] = char0;
msg[1] = char1;
msg[2] = char2;
msg[3] = char3;
msg[4] = char4;
// printf("hashcount : %lu\n", hashcount);
return ;
}
}
}
}
}
}
int main( int argc, char * argv[] ) {
// check arguments
if (argc != 2) return 1;
// allocate and prepare hash
char hash[]= "00000000000000000000000000000000";
memcpy(hash,argv[1],32);
uint32_t hostA;
uint32_t hostB;
uint32_t hostC;
uint32_t hostD;
loadHash(hash, &hostA, &hostB, &hostC, &hostD);
char msg[9] = {0,0,0,0,0,0,0,0,0};
char *dev_msg;
uint32_t M[BUFFER_LEN];
uint32_t * dev_M;
pad("01234", M);
cudaMalloc( (void**)&dev_M , BUFFER_LEN ) ;
cudaMalloc( (void**)&dev_msg , 9 ) ;
cudaMemcpy( (void*) dev_M, (void*) M, BUFFER_LEN ,cudaMemcpyHostToDevice ) ;
find<<<THREADS,1>>>( hostA, hostB, hostC, hostD, dev_M, dev_msg );
cudaMemcpy( msg, dev_msg, 8,cudaMemcpyDeviceToHost ) ;
if (msg[0] == 0) {
printf("key not found \n");
} else {
printf("<<< FOUND KEY : %s >>>\n", msg);
}
cudaFree( dev_msg ) ;
return 0;
}
|
20,646 | #include "includes.h"
__device__ float logarithmic_mapping(float k, float q, float val_pixel, float maxLum)
{
return (log10f(1.0 + q * val_pixel))/(log10f(1.0 + k * maxLum));
}
__device__ float rgb2Lum(float B, float G, float R)
{
return B * 0.0722 + G * 0.7152 + R * 0.2126;
}
__global__ void log_tonemap_kernel(float* imageIn, float* imageOut, int width, int height, int channels, float k, float q, float* max)
{
int Row = blockDim.y * blockIdx.y + threadIdx.y;
int Col = blockDim.x * blockIdx.x + threadIdx.x;
if(Row < height && Col < width) {
float B, G, R, L, nL, scale;
B = imageIn[(Row*width+Col)*3+BLUE];
G = imageIn[(Row*width+Col)*3+GREEN];
R = imageIn[(Row*width+Col)*3+RED];
L = rgb2Lum(B, G, R);
nL = logarithmic_mapping(k, q, L, *max);
scale = nL / L;
imageOut[(Row*width+Col)*3+BLUE] = B * scale;
imageOut[(Row*width+Col)*3+GREEN] = G * scale;
imageOut[(Row*width+Col)*3+RED] = R * scale;
}
} |
20,647 | #include "includes.h"
/*
#define N 512
#define N 2048
#define THREADS_PER_BLOCK 512
*/
const int THREADS_PER_BLOCK = 32;
const int N = 2048;
__global__ void shared_mult(int *a, int *b, int *c)
{
__shared__ int mem[THREADS_PER_BLOCK];
int pos = threadIdx.x + blockIdx.x * blockDim.x;
mem[threadIdx.x] = a[pos] * b[pos];
__syncthreads();
c[pos] = mem[threadIdx.x];
} |
20,648 | // calculate neural weights in real time.
// serial in 50 mins, pytorch 5 mins
// parallel target, solve in less than 10 ms - real time
// compile with
// nvcc -arch=sm_60 -o mapping neural.cu -rdc=true -lcudadevrt
#include <iostream>
#include <stdlib.h>
#include <cmath>
#include <ctime>
#include <cuda.h>
#include <float.h>
#include <algorithm>
#include <stdio.h>
#define THREADS_PER_DIM 32
using namespace std;
// struct RT
// {
// int idx;
// double ele;
// };
// __device__
// RT router(double *dA, int *dB, int g_idx){
// // @b_ele: element value in b, which is also C index
// int c_index = dB[g_idx];
// RT result = {c_index, dA[g_idx]};
// return result;
// }
__global__
void mapping(double *d_A, int *d_B, double *d_C,int m){
int block_total = blockDim.x * blockDim.y;
// int grid_row_one = block_total * gridDim.y;
int total_block_row_above = blockIdx.x* gridDim.y;
int total_block_prev = total_block_row_above +blockIdx.y+1; //id counting from 0
int total_threads_prev_blocks = block_total * total_block_prev;
// int row_prev_total = blockIdx.y * block_total;
int g_idx = total_threads_prev_blocks + threadIdx.x * blockDim.y + threadIdx.y;
if(g_idx<m){
int c_index = d_B[g_idx];
//get route info
// RT result = router(d_A, d_B, g_idx);
atomicAdd(d_C+c_index, d_A[g_idx]);
}
__syncthreads();
}
int main(int argc, char** argv){
int m= atoi(argv[1]);
// int m =1000000;
//n = atoi(argv[2]);
double *A,*C;
int *B; // index
int size_a = m*sizeof(double);
int size_b = m*sizeof(int);
A = (double *)malloc(size_a);
B = (int *)malloc(size_b);
// init below
for(int i =0; i<m; i++){
A[i] = rand()%1000000;
B[i] = rand()%15000;
}
int len_c = *std::max_element(B,B+m);
printf("m: %d\n",m);
printf("len_C %d\n",len_c);
int size_c = len_c *sizeof(double);
C = (double *)malloc(size_c);
// clock_t startTime = clock();
for(int j=0; j<len_c;j++){
C[j]=0; // init C
}
double *dA, *dC;
int *dB;
// allocate memory on device
cudaMalloc((void **)&dA, size_a);
cudaMalloc((void **)&dB, size_b);
cudaMalloc((void **)&dC, size_c);
// Copy inputs to device
cudaMemcpy(dA, A, size_a, cudaMemcpyHostToDevice);
cudaMemcpy(dB, B, size_b, cudaMemcpyHostToDevice);
cudaMemcpy(dC, C, size_c, cudaMemcpyHostToDevice);
int n_ele = m;
dim3 dimBlock(THREADS_PER_DIM, THREADS_PER_DIM);
dim3 dimGrid(ceil(((double)sqrt(n_ele))/dimBlock.x), ceil(((double)sqrt(n_ele))/ dimBlock.y));
//timer
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
// launch kernal on GPU
mapping<<<dimGrid,dimBlock>>>(dA,dB,dC,m);
//cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaDeviceSynchronize();
cudaEventElapsedTime(&time, start, stop);
cudaMemcpy(C,dC, size_c, cudaMemcpyDeviceToHost);
printf("runtime for parallel algorithm:%f ms\n", time);
free(A);
free(B);
free(C);
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
return 0;
}
|
20,649 | #include<stdio.h>
#include<cuda_runtime.h>
#include<device_launch_parameters.h>
__global__ void add(int *a, int *b, int m){
int id=blockIdx.x*blockDim.x+threadIdx.x;
// c[id]=a[id]+b[id];
// printf("id: %d m: %d ", id, m);
for (int i = 0; i < m; ++i){
b[id*m + i] = powf(a[id*m + i], id+1);
// printf("index %d element %d\n", id*m + i, a[id*m + i]);
}
}
int main()
{
int a[100], b[100],n, m;
printf("Enter n: ");
scanf("%d",&n);
printf("Enter m: ");
scanf("%d",&m);
printf("Enter Matrix:\n");
for(int i=0;i<n*m;i++)
scanf("%d",&a[i]);
int *d_a,*d_b;
int size=sizeof(int)*n*m;
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMemcpy(d_a,&a,size,cudaMemcpyHostToDevice);
add<<<n,1>>>(d_a, d_b, m);
cudaMemcpy(&b,d_b,size,cudaMemcpyDeviceToHost);
for(int i=0;i<n*m;i++){
if (i % m == 0)
{
printf("\n");
}
printf("%d ",b[i]);
}
printf("\n");
cudaFree(d_a);
cudaFree(d_b);
}
|
20,650 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
// Host input vectors.
float *uva_a;
float *uva_b;
// Host output vector.
float *uva_c;
// Size of arrays.
int n = 0;
/* CUDA kernel. Each thread takes care of one element of c. */
__global__ void vecAdd(float *a, float *b, float *c, int n) {
// Get our global thread ID
int id = blockIdx.x * blockDim.x + threadIdx.x;
// Make sure we do not go out of bounds
if (id < n)
c[id] = a[id] + b[id];
}
void init_array() {
fprintf(stdout, "Inicializando os arrays.\n");
int i;
// Initialize vectors on host.
for (i = 0; i < n; i++) {
uva_a[i] = sinf(i) * sinf(i);
uva_b[i] = cosf(i) * cosf(i);
}
}
void print_array() {
int i;
printf("Imprimindo o Resultado.\n");
for (i = 0; i < n; i++) {
fprintf(stdout, "uva_c[%07d]: %f\n", i, uva_c[i]);
}
}
void check_result(){
// Soma dos elementos do array C e divide por N, o valor deve ser igual a 1.
int i;
float sum = 0;
fprintf(stdout, "Verificando o Resultado.\n");
for (i = 0; i < n; i++) {
sum += uva_c[i];
}
fprintf(stdout, "Resultado Final: (%f, %f)\n", sum, (float)(sum / (float)n));
}
/* Main code */
int main(int argc, char *argv[]) {
// Size of vectors
n = atoi(argv[1]);
printf("Número de Elementos: %d\n", n);
// Size, in bytes, of each vector
size_t bytes = n * sizeof(float);
printf("Memória que será alocada para os 3 arrays: %d\n", 3 * bytes);
printf("Allocate memory for each vector on host\n");
// Allocate memory for each vector on host
cudaMallocManaged(&uva_a, bytes);
cudaMallocManaged(&uva_b, bytes);
cudaMallocManaged(&uva_c, bytes);
printf("Initialize vectors on host\n");
init_array();
// Number of threads in each thread block.
int threadsPerBlock = 256;
// Number of thread blocks in grid.
int blocksPerGrid = (n + threadsPerBlock - 1) / threadsPerBlock;
printf("Execute the kernel\n");
cudaEvent_t start_event, stop_event;
float time_kernel_execution;
int eventflags = cudaEventBlockingSync;
cudaEventCreateWithFlags(&start_event, eventflags);
cudaEventCreateWithFlags(&stop_event, eventflags);
/* Recording the time to kernel execution */
cudaEventRecord(start_event, 0);
/* Execute the kernel. */
vecAdd <<< blocksPerGrid, threadsPerBlock >>> (uva_a, uva_b, uva_c, n);
/* Synchronize */
cudaDeviceSynchronize();
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&time_kernel_execution, start_event, stop_event);
printf("Time Kernel Execution: %f s\n", (time_kernel_execution / 1000.0f));
print_array();
check_result();
printf("Time Kernel Execution: %f ms\n", (time_kernel_execution));
// Release device memory
cudaFree(uva_a);
cudaFree(uva_b);
cudaFree(uva_c);
return 0;
}
|
20,651 | /**
* Instituto de Ciencias Matematicas e de Computacao - USP Sao Carlos
*
* Programacao Concorrente 2013
* Grupo 05 Turma A
*
* Andre Luiz Catini Paro, 7152740
* Daniel Hideki Yoshimi, 7239173
* Rodrigo Toledo Amancio Silva, 7152308
*
* Projeto Final - Metodo Jacobi-Richardson em CUDA
*
* Este programa resolve sistema lineares utilizando o método de
* Jacobi-Richardson. O método é implementado de forma paralela
* utilizando a plataforma CUDA.
*
* Ele retorna as seguintes informações:
* -Quantidades de iterações;
* -Comparação entre o resultado utilizando o valor obtido
* aplicado em uma linha de testes com o valor real.
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <float.h>
#define TPB 256
/* Declarações de parâmetros */
void printMA(int J_ORDER, float *MA); // Função para debug. Imprime a matrix MA.
void printMB(int J_ORDER, float *MB); // Função para debug. Imprime o vetor MB.
void printX(int J_ORDER, float *X); // Função para debug. Imprime o vetor X.
void printResults(int ite, int J_ROW_TEST, float result, float MB); // Apresenta os resultados formatados.
/* __global__ void diag(float *dev_MA, float *dev_MB, float *dev_X, int J_ORDER);
*
* Descricao:
* Esta função realiza o cálculo da matriz MA* e do vetor MB*, além de atribuir
* o valor inicial para o vetor X. O retorno da função se dá em modificar os próprios
* parâmetros de entrada.
* Cada linha da matriz MA é calculada em um bloco e cada coluna dessa linha é calculada
* pelas threads desse bloco.
*
* Parametros de entrada:
* -(float) dev_MA: Matriz MA alocada da memória da GPU.
* -(float) dev_MB: Vetor MB alocado na memória da GPU.
* -(float) dev_X: Vetor X alocado na memória da GPU.
* -(int) J_ORDER: Ordem dos vetores.
*
* Parametros de saida:
* -
*
*/
__global__ void diag(float *dev_MA, float *dev_MB, float *dev_X, int J_ORDER){
int bid = blockIdx.x; // Id do bloco.
int tid = threadIdx.x; // Id da thread.
/* Condição para evitar erros. Impede que blocos e threads executem o código fora do nosso controle. */
if(bid < J_ORDER && tid < J_ORDER){
/* Declaração e atribuição da variável auxiliar que irá armazenar o valor da diagonal na linha "bid" */
int diagAux;
diagAux = dev_MA[bid*J_ORDER+bid];
/* Para cada linha calculada em um bloco (bId), as threads desse bloco irão calcular o valor de cada coluna */
while(tid<J_ORDER){
if(bid!=tid){
dev_MA[tid*J_ORDER+bid] = dev_MA[tid*J_ORDER+bid]/diagAux;
} else{
dev_MA[tid*J_ORDER+bid] = 0;
}
/* Incremento o tId para que todas as colunas sejam calculadas, já que cada bloco não executa mais que 512 threads */
tid += TPB;
}
/* Espero todas as threads do block bId executarem os seus cálculos e atribuo a saída nos parâmetros de entrada */
__syncthreads();
dev_MB[bid] = dev_MB[bid] / diagAux;
dev_X[bid] = dev_MB[bid];
}
}
/* __global__ void jacobi(float *dev_MA, float *dev_X, int J_ORDER, float *dev_sum);
*
* Descricao:
* Esta função realiza o cálculo do somatório presente no algoritmo. Para isso, utilizamos
* a seguinte abordagem: cada linha a ser multiplicada e somada é controlada por um bloco (bId),
* cada uma das colunas dessa linha é controlada por uma thread (tId). Cada thread irá realizar
* o cálculo das colunas de posição tId e tId+TPB (threads per block). Assim, caso a matriz tenha
* mais que 512 colunas, todas elas serão calculadas. O somatório de cada thread será salvo numa
* posição do cache compartilhado. Por fim, realizamos uma redução desse cache compartilhado, salvando
* o resultado final (o somatório da linha), na sua posição referente em dev_sum.
*
* Parametros de entrada:
* -(float) dev_MA: Matriz MA alocada da memória da GPU.
* -(float) dev_MB: Vetor MB alocado na memória da GPU.
* -(float) dev_X: Vetor X alocado na memória da GPU.
* -(int) J_ORDER: Ordem dos vetores.
* -(float) dev_sum: Vetor que armazenará os somatórios
*
* Parametros de saida:
* -
*
*/
__global__ void jacobi(float *dev_MA, float *dev_X, int J_ORDER, float *dev_sum){
int bId = blockIdx.x; // Id do bloco.
int tId = threadIdx.x; // Id da thread.
int cacheIndex = threadIdx.x; // Indice do cache da thread.
float temp = 0; // Variável auxiliar que irá armazenar o somatório de cada thread.
__shared__ float cache[TPB]; // Cache compartilhado que irá armazenar o somatório de cada thread, utilizado na posterior redução.
cache[cacheIndex] = 0; // Atribuição inicial.
/* Condição para evitar erros. Impede que blocos e threads executem o código fora do nosso controle. */
if(bId < J_ORDER && tId < J_ORDER){
/* Para cada linha (bId) da Matriz, as threads desse bloco irão seus respectivos somatórios */
while(tId < J_ORDER){
if(bId!=tId){
temp += (dev_MA[(tId * J_ORDER) + bId] * dev_X[tId]);
}
tId += TPB;
}
/* Salvo o somatório de cada thread na sua posição respectiva do cache e aguardo todas as threads terminarem os seus cálculos */
cache[cacheIndex] = temp;
__syncthreads();
/* Redução em paralelo */
int i = TPB/2;
while(i != 0){
if(cacheIndex < i )
cache[cacheIndex] += cache[cacheIndex + i];
__syncthreads();
i /= 2;
}
/* A thread 0 irá realizar a atribuição do valor final. */
if(cacheIndex == 0){
dev_sum[bId] = cache[0];
}
}
}
/* int main(int argc, char * argv[]);
*
* Descricao:
* Na main é realizada a leitura do arquivo de entrada bem como a declaração, alocação de memória
* das variáveis envolvidas e a comunidação entre host e device.
* Primeiramente o kernel diag é chamado para realizar o cálculo de MA* e MB*, bem como a atribuição inicial de X.
* Então, uma estrutura de repetição irá realizar chamadas ao kernel jacobi para que o somatório seja realizado na gpu.
* O somatório é copiado para o host e a atribuição do novo valor de X é feita, bem como o cálculo do erro. A estrutura
* de repetição para se ela atingir o número máximo de iterações ou o erro mínimo.
*
* Parametros de entrada:
* - argc: Quantidade de parametros de entrada junto a execucao do programa.
* - argv: Array contendo strings com cada parametro de entrada.
*
* Parametros de saida:
* - (int): Retorna 0 caso o programa seja executado com sucesso.
*/
int main(int argc, char * argv[]){
//ENTRADA
FILE * pFile; // Variavel para a abertura do arquivo de entrada.
/* Abertura do arquivo de entrada */
if((argc < 2) || (pFile = fopen(argv[1],"r")) == NULL){
fputs ("File error",stderr); exit (1);
}
int i,j; // Variavel de controle para as estrutudas de repeticao.
/* Parâmetros de entrada */
int J_ORDER, J_ROW_TEST;
float J_ERROR, J_ITE_MAX;
fscanf(pFile, "%d%d%f%f", &J_ORDER, &J_ROW_TEST, &J_ERROR, &J_ITE_MAX);
/* Declaração e alocamento das variáveis do host */
float *MA, *MB, *X, *Xold, *MAOriginal, MBOriginal, *sum;
MA = (float*)malloc(sizeof(float)*J_ORDER*J_ORDER);
MAOriginal = (float*)malloc(sizeof(float)*J_ORDER);
MB = (float*)malloc(sizeof(float)*J_ORDER);
X = (float*)malloc(sizeof(float)*J_ORDER);
Xold = (float*)malloc(sizeof(float)*J_ORDER);
sum = (float*)malloc(sizeof(float)*J_ORDER);
/* Leitura do arquivo de entrada */
for(i = 0; i<J_ORDER; i++){
for(j = 0; j<J_ORDER; j++){
fscanf(pFile, "%f", &MA[j*J_ORDER+i]);
if(i==J_ROW_TEST)
MAOriginal[j] = MA[j*J_ORDER+i];
}
}
for(i=0; i<J_ORDER; i++){
fscanf(pFile, "%f", &MB[i]);
if(i == J_ROW_TEST)
MBOriginal = MB[i];
}
fclose(pFile);
/* Declaração e alocamento das variáveis do device */
float *dev_MA, *dev_MB, *dev_X, *dev_sum;
cudaMalloc( (void**)&dev_MA, J_ORDER*J_ORDER*sizeof(float));
cudaMalloc( (void**)&dev_MB, J_ORDER*sizeof(float));
cudaMalloc( (void**)&dev_X, J_ORDER*sizeof(float));
cudaMalloc( (void**)&dev_sum, J_ORDER*sizeof(float));
/* Cópia de valores do host para o device */
cudaMemcpy(dev_MA, MA, J_ORDER*J_ORDER*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_MB, MB, J_ORDER*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_X, X, J_ORDER*sizeof(float), cudaMemcpyHostToDevice);
/* Chamada ao kernel diag
* Blocos: J_ORDER - cada bloco cuida de uma linha
* Threads: TPB - cada thread cuida de uma colunas *
*/
diag<<<J_ORDER , TPB>>>(dev_MA, dev_MB, dev_X, J_ORDER); // OK
/* Cópia dos valores de MB* e X do device para o host */
cudaMemcpy(MB, dev_MB, J_ORDER*sizeof(float), cudaMemcpyDeviceToHost);
cudaMemcpy(X, dev_X, J_ORDER*sizeof(float), cudaMemcpyDeviceToHost);
/* Variáveis auxiliáres para o cálculo de X */
int ite = 0;
float maxDif, maxX, Mr;
Mr = FLT_MAX;
float cmpAux1, cmpAux2;
/* Estrutura de repetição que irá realizar chamadas ao quernel jacobi, calcular o novo valor de X e o erro */
while(ite < J_ITE_MAX && Mr > J_ERROR){
/* Chamada ao kernel jacobi
* Blocos: J_ORDER - cada bloco cuida de uma linha
* Threads: TPB - cada thread cuida de uma colunas *
*/
jacobi<<<J_ORDER , TPB>>>(dev_MA, dev_X, J_ORDER, dev_sum);
/* Cópia dos valores do somatório do device para o host */
cudaMemcpy(sum, dev_sum, J_ORDER*sizeof(float), cudaMemcpyDeviceToHost);
/* Cálculo do novo X e do erro */
maxDif = maxX = FLT_MIN;
for(i = 0 ; i < J_ORDER; i++){
Xold[i] = X[i];
X[i] = (MB[i] - sum[i]); // Novo X
cmpAux1 = fabs(X[i] - Xold[i]);
if(cmpAux1 > maxDif)
maxDif = cmpAux1;
cmpAux2 = fabs(X[i]);
if(cmpAux2 > maxX)
maxX = cmpAux2;
}
Mr = maxDif / maxX; // Erro
ite++; // Iteração
/* Cópia do novo X para o device */
cudaMemcpy(dev_X, X, J_ORDER*sizeof(float), cudaMemcpyHostToDevice);
}
/* Cálculo do resultado utilizando a linha de teste */
float resultAux=0;
for(j=0; j<J_ORDER; j++){
resultAux += MAOriginal[j]*X[j];
}
/* Impressão do resultado formatado */
printResults(ite, J_ROW_TEST, resultAux, MBOriginal);
/* Liberação de memória */
free(MA); free(MB); free(X); free(Xold); free(MAOriginal); free(sum);
cudaFree(dev_MA); cudaFree(dev_MB); cudaFree(dev_X); cudaFree(dev_sum);
return 0;
}
/* void printMA(int J_ORDER, float *MA);
*
* Descricao:
* Impressão da matriz MA para debug.
*
* Parametros de entrada:
* -(int) J_ORDER: Ordem da matriz.
* -(float) MA: matriz MA.
*
* Parametros de saida:
* -
*/
void printMA(int J_ORDER, float *MA){
int i,j;
for(i = 0; i<J_ORDER; i++){
for(j = 0; j<J_ORDER; j++){
printf("%f ", MA[j*J_ORDER+i]);
}
printf("\n");
}
}
/* void printMB(int J_ORDER, float *MB);
*
* Descricao:
* Impressão do vator MB para debug.
*
* Parametros de entrada:
* -(int) J_ORDER: Ordem dos vetores.
* -(float) MB: Vetor MB.
*
* Parametros de saida:
* -
*/
void printMB(int J_ORDER, float *MB){
int i;
for(i = 0; i<J_ORDER; i++){
printf("%f ", MB[i]);
printf("\n");
}
}
/* void printX(int J_ORDER, float *X);
*
* Descricao:
* Impressão do vator X para debug.
*
* Parametros de entrada:
* -(int) J_ORDER: Ordem dos vetores.
* -(float) X: Vetor X.
*
* Parametros de saida:
* -
*/
void printX(int J_ORDER, float *X){
int i;
for(i = 0; i<J_ORDER; i++){
printf("%f ", X[i]);
}
printf("\n");
}
/* void printResults(int ite, int J_ROW_TEST, float result, float MB)
*
* Descricao:
* Impressão formatada dos resultados.
*
* Parametros de entrada:
* -(int) ite: Número de iterações obtido.
* -(int) J_ROW_TEST: Linha de teste.
* -(float) result: Resultado obtido.
* -(float) MB: Resultado esperado.
*
* Parametros de saida:
* -
*/
void printResults(int ite, int J_ROW_TEST, float result, float MB){
printf("\n\n---------------------------------------------------------\n"
"Iterations: %d\n"
"RowTest: %d => [%f] =? %f\n"
"---------------------------------------------------------\n\n", ite, J_ROW_TEST, result, MB);
} |
20,652 | #include "includes.h"
__global__ void find_maximum_kernel(float *array, int *mutex, unsigned int n, int blockSize){
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
extern __shared__ float cache[];
float temp = -1.0;
while(index + offset < n){
temp = fmaxf(temp, array[index + offset]);
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
maxLum = fmaxf(maxLum, cache[0]);
atomicExch(mutex, 0); //unlock
}
} |
20,653 | __device__ unsigned int countDigits(unsigned int number);
__device__ bool isNumberDisarium(unsigned int number);
__device__ unsigned int pow(unsigned int x, unsigned int n);
__global__ void generateDisariumNumbers(unsigned int *generatedNumbers, bool *result, const unsigned int NUMBERS_COUNT) {
unsigned int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < NUMBERS_COUNT)
result[index] = isNumberDisarium(generatedNumbers[index]);
}
__device__ bool isNumberDisarium(unsigned int number) {
unsigned int sum = 0, temp = number;
unsigned int digitsCount = countDigits(number);
while (temp) {
sum += pow(temp % 10, digitsCount--);
temp /= 10;
}
return sum == number;
}
__device__ unsigned int countDigits(unsigned int number) {
unsigned int digitsCount = 0;
while (number) {
number /= 10;
digitsCount++;
}
return digitsCount;
}
__device__ unsigned int pow(unsigned int x, unsigned int n) {
unsigned int result = 1;
for (unsigned int i = 0; i < n; i++)
result *= x;
return result;
} |
20,654 | /*
The solution.
*/
#include <sys/time.h>
#include <ctype.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
// Number of times to run the test (for better timings accuracy):
#define NTESTS 100
// Number of threads in one block (possible range is 32...1024):
#define BLOCK_SIZE 128
// Total number of threads (total number of elements to process in the kernel):
#define NMAX 1000000
// Number of chunks (NMAX should be dividable by NCHUNKS):
#define NCHUNKS 10
/* Subtract the `struct timeval' values X and Y,
storing the result in RESULT.
Return 1 if the difference is negative, otherwise 0. */
// It messes up with y!
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// The kernel:
__global__ void MyKernel (double *d_A, double *d_B, int ind, int Ntot)
{
double x, y, z;
// Local index:
int i0 = threadIdx.x + blockDim.x * blockIdx.x;
if (i0 >= Ntot)
return;
// Global index is shifted by ind:
int i = ind + i0;
// Some meaningless cpu-intensive computation:
x = pow(d_A[i], 2.71);
y = pow(d_A[i], 0.35);
z = 2*x + 5*y;
d_B[i] = x + y + z + x*y + x/y + y/z;
return;
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int main (int argc,char **argv)
{
struct timeval tdr0, tdr1, tdr;
double restime;
int devid, devcount, error, Max_gridsize;
double *h_A, *h_B, *d_A, *d_B;
cudaStream_t ID[2];
/* find number of device in current "context" */
cudaGetDevice(&devid);
/* find how many devices are available */
if (cudaGetDeviceCount(&devcount) || devcount==0)
{
printf ("No CUDA devices!\n");
exit (1);
}
else
{
cudaDeviceProp deviceProp;
cudaGetDeviceProperties (&deviceProp, devid);
printf ("Device count, devid: %d %d\n", devcount, devid);
printf ("Device: %s\n", deviceProp.name);
printf("[deviceProp.major.deviceProp.minor] = [%d.%d]\n\n", deviceProp.major, deviceProp.minor);
Max_gridsize = deviceProp.maxGridSize[0];
}
// Loop to run the timing test multiple times:
int kk;
for (kk=0; kk<NTESTS; kk++)
{
// Using cudaMallocHost (intead of malloc) to accelerate data copying:
// Initial data array on host:
if (error = cudaMallocHost (&h_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Results array on host:
if (error = cudaMallocHost (&h_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// ALlocating arrays on GPU:
if (error = cudaMalloc (&d_A, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = cudaMalloc (&d_B, NMAX*sizeof(double)))
{
printf ("Error %d\n", error);
exit (error);
}
// Initializing the input array:
for (int i=0; i<NMAX; i++)
{
h_A[i] = (double)rand()/(double)RAND_MAX;
}
// Creating streams:
for (int i = 0; i < 2; ++i)
cudaStreamCreate (&ID[i]);
// Number of threads in a chunk:
int Ntot = NMAX / NCHUNKS;
// Number of blocks of threads in a chunk:
int Nblocks = (Ntot+BLOCK_SIZE-1) / BLOCK_SIZE;
if (Nblocks > Max_gridsize)
{
printf ("Nblocks > Max_gridsize! %d %d\n", Nblocks, Max_gridsize);
exit (1);
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr0, NULL);
//--------------------------------------------------------------------------------
for (int j=0; j<NCHUNKS; j++)
{
// Starting global index for this chunk:
int ind = Ntot * j;
// Copying the j-th chunk to device - asynchronous relative to the host and stream ID[0] kernel (processing the previous chunk in parallel)
if (error = cudaMemcpyAsync (&d_A[ind], &h_A[ind], Ntot*sizeof(double), cudaMemcpyHostToDevice, ID[1]))
{
printf ("Error %d\n", error);
exit (error);
}
// This global synchronization between both streams and host ensures that the kernel can only start
// when the previous chunk copying is finished.
// This also ensures that at j=0 kernel will not start untill the first chunk is copied.
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
// The kernel call:
MyKernel <<<Nblocks, BLOCK_SIZE, 0, ID[0]>>> (d_A, d_B, ind, Ntot);
}
//--------------------------------------------------------------------------------
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
gettimeofday (&tdr1, NULL);
tdr = tdr0;
timeval_subtract (&restime, &tdr1, &tdr);
// Copying the result back to host (we don't time it):
if (error = cudaMemcpy (h_B, d_B, NMAX*sizeof(double), cudaMemcpyDeviceToHost))
{
printf ("Error %d\n", error);
exit (error);
}
if (error = cudaDeviceSynchronize())
{
printf ("Error %d\n", error);
exit (error);
}
// Adding up the results, for accuracy/correctness testing:
double result = 0.0;
for (int i=0; i<NMAX; i++)
{
result += h_B[i];
}
printf ("Result: %e\n\n", result);
printf ("Time: %e\n", restime);
cudaFreeHost (h_A);
cudaFreeHost (h_B);
cudaFree (d_A);
cudaFree (d_B);
} // kk loop
return 0;
}
|
20,655 | #include <iostream>
#include <malloc.h>
using namespace std;
__global__ void add(int* d_a, int* d_b, int* d_c, int* d_limit){
int tid = threadIdx.x + blockIdx.x*blockDim.x;
if(tid < 1000){
d_c[tid] = d_a[tid] + d_b[tid];
}
}
int main(){
int size = 2000; // size of an array
int ngpus = 2;
/* Device memory pointer for storing array*/
int *d_a[2], *d_b[2], *d_c[2];
const int Ns[2] = {size/2, size - size/2};
/* memory allocation for limit */
int* h_limit;
int* d_limit;
h_limit = (int *)malloc(sizeof(int));
cudaMalloc((void **)&d_limit, sizeof(int));
/* Host memory for storing array */
int h_a[size];
int h_b[size];
for(int i=0;i<size;i++){
h_a[i] = i+1;
h_b[i] = i+2;
}
/*int* h_c[ngpus];
for(int dev=0; dev < ngpus; dev++){
h_c[dev] = (int *)malloc(Ns[dev]*sizeof(int));
}*/
int* h_c;
h_c = (int *)malloc(size*sizeof(int));
/* allocate memory on gpus */
for(int dev=0; dev< ngpus ;dev++){
cudaSetDevice(dev);
cudaMalloc((void **)&d_a[dev], Ns[dev]*sizeof(int));
cudaMalloc((void **)&d_b[dev], Ns[dev]*sizeof(int));
cudaMalloc((void **)&d_c[dev], Ns[dev]*sizeof(int));
}
/* Copy the host array to gpus */
for(int dev=0,pos=0; dev < ngpus; pos+= Ns[dev], dev++){
cudaSetDevice(dev);
cudaMemcpy(d_a[dev], h_a+pos, Ns[dev]*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_b[dev], h_b+pos, Ns[dev]*sizeof(int), cudaMemcpyHostToDevice);
}
/* Compute addition */
for(int dev=0; dev< ngpus; dev++){
//h_limit[0] = Ns[dev];
cudaSetDevice(dev);
h_limit[0] = Ns[dev];
cudaMemcpy(d_limit, h_limit, sizeof(int), cudaMemcpyHostToDevice);
add<<<1,Ns[dev]>>>(d_a[dev],d_b[dev], d_c[dev], d_limit);
/*cudaMemcpy(h_c[dev], d_c[dev], Ns[dev]*sizeof(int), cudaMemcpyDeviceToHost);
for(int i=0;i<Ns[dev];i++){
if(i%100 == 0)
cout<<h_c[dev][i]<<endl;
}*/
}
for(int dev=0, pos=0; dev < ngpus; pos += Ns[dev], dev++){
cudaSetDevice(dev);
cudaMemcpy(h_c+pos, d_c[dev], Ns[dev]*sizeof(int), cudaMemcpyDeviceToHost);
}
/* Print Part */
for(int i=0;i<size;i++){
if(i%100 == 0)
cout<<"h_c["<<i<<"] = "<<h_c[i]<<endl;
}
}
|
20,656 | #include <iostream>
#include <math.h>
#include <float.h>
__global__ void sdt_compute(unsigned char *img, int *sz, float *sdt, int sz_edge, int width, float *d_min, int start, int val)
{
int tx = threadIdx.x + blockDim.x*blockIdx.x;
extern __shared__ int ep[];
for(int i=start, j=0;i< val; i++){
ep[j++] = sz[i];
}
__syncthreads();
float min_dist, dist2;
min_dist = d_min[tx];
float _x, _y;
float sign;
float dx, dy;
int x = tx % width;
int y = tx / width;
for(int k=0; k<val-start; k++)
{
_x = ep[k] % width;
_y = ep[k] / width;
dx = _x - x;
dy = _y - y;
dist2 = dx*dx + dy*dy;
if(dist2 < min_dist) min_dist = dist2;
}
d_min[tx] = min_dist;
}
__global__ void final_comp(unsigned char *img, float *d_min, float *d_min2, float *sdt)
{
float sign;
int tx = threadIdx.x + blockDim.x*blockIdx.x;
sign = (img[tx] >= 127)? 1.0f : -1.0f;
float dm = d_min[tx];
if(dm > d_min2[tx])
dm = d_min2[tx];
sdt[tx] = sign * sqrtf(dm);
}
extern "C" void run_sampleKernel(unsigned char * bitmap, float *sdt, int width, int height)
{
//Collect all edge pixels in an array
int sz = width*height;
int sz_edge = 0;
for(int i = 0; i<sz; i++) if(bitmap[i] == 255) sz_edge++;
int *edge_pixels = new int[sz_edge];
for(int i = 0, j = 0; i<sz; i++) if(bitmap[i] == 255) edge_pixels[j++] = i;
std::cout<< "\t"<<sz_edge << " edge pixels in the image of size " << width << " x " << height << "\n"<<std::flush;
int *d_sz;
float *temp_min;
unsigned char *d_img;
float *d_sdt, *d_min, *d_min2;
cudaMallocHost(&temp_min,height*width*sizeof(float));
// temp_min = new float[height*width];
for(int i=0;i<height*width;i++){
temp_min[i] = FLT_MAX;
}
cudaStream_t stream1, stream2;
cudaStreamCreate(&stream1);
cudaStreamCreate(&stream2);
// cudaStreamCreateWithFlags(&stream1,cudaStreamNonBlocking);
cudaMalloc((void**)&d_sz, sz_edge*sizeof(int));
cudaMalloc((void**)&d_img, height*width*sizeof(unsigned char));
cudaMalloc((void**)&d_sdt, height*width*sizeof(float));
cudaMalloc((void**)&d_min, height*width*sizeof(float));
cudaMalloc((void**)&d_min2, height*width*sizeof(float));
cudaMemcpyAsync(d_img, bitmap, width*height*sizeof(unsigned char), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_min, temp_min, width*height*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_min2, temp_min, width*height*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_sz, edge_pixels, sz_edge*sizeof(int), cudaMemcpyHostToDevice);
int divisions = 20;
int val_div = sz_edge/divisions;
int n, m;
m =1;
for(n =0; n<divisions; n+=2){
sdt_compute<<<(height*width)/256, 256, val_div*sizeof(int), stream2>>>(d_img, d_sz, d_sdt, sz_edge, width, d_min, n*val_div, (n+1)*val_div);
sdt_compute<<<(height*width)/256, 256, val_div*sizeof(int), stream1>>>(d_img, d_sz, d_sdt, sz_edge, width, d_min2, m*val_div, (m+1)*val_div);
m += 2;
}
// std::cout<<n<<std::endl;
if((sz_edge%divisions) !=0){
sdt_compute<<<(height*width)/256, 256, (sz_edge%divisions)*sizeof(int)>>>(d_img, d_sz, d_sdt, sz_edge, width, d_min, (m-1)*val_div, (m-1)*val_div + sz_edge%divisions);
}
final_comp<<<(height*width)/256, 256>>>(d_img, d_min, d_min2, d_sdt);
cudaDeviceSynchronize();
cudaMemcpy(sdt, d_sdt, height*width*sizeof(float), cudaMemcpyDeviceToHost);
}
|
20,657 | #include "includes.h"
__global__ void meshgrid_create(float* xx, float* yy, int w, int h, float K02, float K12) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
if (i < h && j < w) {
xx[j*h + i] = j - K02;
yy[j*h + i] = i - K12;
}
} |
20,658 | #include <stdio.h>
#include <iostream>
#include <cstdlib>
#include <ctime>
#include <climits>
#include <cuda_runtime.h>
__device__ void
vectorAdd1(int* d_A, int* d_B, int* d_C, int size, int* mapBlk, int blockDim){
int vId = threadIdx.x + mapBlk[blockIdx.x]*blockDim;
if(vId < size){
d_C[vId] = d_A[vId] + d_B[vId];
}
for(int i =0; i<100000;i++);
}
__device__ void
vectorAdd2(int* d_A, int* d_B, int* d_C, int size, int* mapBlk, int blockDim){
int vId = threadIdx.x + mapBlk[blockIdx.x]*blockDim;
if(vId < size){
d_C[vId] = d_A[vId] + d_B[vId];
}
for(int i =0; i<100000;i++);
}
__global__ void
scheduler(int* d_A1, int* d_B1, int* d_C1, int size1, int* d_A2, int* d_B2, int* d_C2, int size2, int* mapBlk, int* mapKernel, int gridDim_A, int blockDim_A, int gridDim_B, int blockDim_B ){
if(mapKernel[blockIdx.x] == 0)
vectorAdd1(d_A1, d_B1, d_C1, size1, mapBlk, blockDim_A);
else
vectorAdd2(d_A2, d_B2, d_C2, size2, mapBlk, blockDim_A);
}
int main(){
const int numElements = 1000000;
size_t size = numElements*sizeof(int);
int blocks = 2000;
int threads = 1000;
int* mapKernel = new int[blocks];
int* mapBlk = new int[blocks];
/*Będą wykonywały się na zmianę po jednym bloku*/
for(int blkA =0, blkB = 0, i = 0; i < blocks; i++)
{
mapKernel[i] = i%2;
if(mapKernel[i] == 0)
mapBlk[i] = blkA++;
else
mapBlk[i] = blkB++;
}
/*Alokowanie pamięci na wektory na hoście*/
int* h_A1 = (int*)malloc(size);
int* h_B1 = (int*)malloc(size);
int* h_C1 = (int*)malloc(size);
int* h_A2 = (int*)malloc(size);
int* h_B2 = (int*)malloc(size);
int* h_C2 = (int*)malloc(size);
/*Wylosowanie liczb do wektorów*/
srand( time(NULL));
for(int i = 0; i < numElements; i++){
h_A1[i] = (int)rand()%10000;
h_B1[i] = (int)rand()%10000;
h_A2[i] = (int)rand()%10000;
h_B2[i] = (int)rand()%10000;
}
int* d_A1 = NULL;
int* d_B1 = NULL;
int* d_C1 = NULL;
int* d_A2 = NULL;
int* d_B2 = NULL;
int* d_C2 = NULL;
cudaError_t err = cudaSuccess;
err = cudaMalloc((void**)&d_A1, size);
if(err != cudaSuccess)
{
printf("Nie udalo sie zaalokowac pamieci na urzadzeniu ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_B1, size);
if(err != cudaSuccess)
{
printf("Nie udalo sie zaalokowac pamieci na urzadzeniu ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_C1, size);
if(err != cudaSuccess)
{
printf("Nie udalo sie zaalokowac pamieci na urzadzeniu ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_A2, size);
if(err != cudaSuccess)
{
printf("Nie udalo sie zaalokowac pamieci na urzadzeniu ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_B2, size);
if(err != cudaSuccess)
{
printf("Nie udalo sie zaalokowac pamieci na urzadzeniu ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_C2, size);
if(err != cudaSuccess)
{
printf("Nie udalo sie zaalokowac pamieci na urzadzeniu ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*Memcpy host to device*/
printf("Kopiowanie wektorów z hosta do urzadzenia...\n");
err = cudaMemcpy(d_A1, h_A1, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("Nie udalo sie skopiowac danych do urzadzenia( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B1, h_B1, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("Nie udalo sie skopiowac danych do urzadzenia( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_A2, h_A2, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("Nie udalo sie skopiowac danych do urzadzenia( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_B2, h_B2, size, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("Nie udalo sie skopiowac danych do urzadzenia( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*Skopiowanie tablic mapujacych do urzadzenia*/
size_t mapSize = blocks*sizeof(int);
int* d_mapKernel = NULL;
int* d_mapBlk = NULL;
printf("Kopiowanie tablic mapujacych do urzadzenia...\n");
err = cudaMalloc((void**)&d_mapKernel,mapSize);
if(err != cudaSuccess)
{
printf("Nie udalo sie zaalokowac pamieci na urzadzeniu ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void**)&d_mapBlk, mapSize);
if(err != cudaSuccess)
{
printf("Nie udalo sie zaalokowac pamieci na urzadzeniu ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_mapKernel, mapKernel, mapSize, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("Nie udalo sie skopiowac map do urzadzenia( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_mapBlk, mapBlk, mapSize, cudaMemcpyHostToDevice);
if(err != cudaSuccess)
{
printf("Nie udalo sie skopiowac map do urzadzenia ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*Launching kernel!!!*/
scheduler<<<blocks,threads>>>(d_A1, d_B1, d_C1, size, d_A2, d_B2, d_C2, size, d_mapBlk, d_mapKernel, blocks, threads, blocks, threads );
/*Memcpy device to host*/
err = cudaMemcpy(h_C1, d_C1, size, cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
printf("Nie udalo sie skopiowac danych z urzadzenia do hosta ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_C2, d_C2, size, cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
printf("Nie udalo sie skopiowac danych z urzadzenia do hosta ( kod bledu %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
/*Checking the resultes*/
for(int i = 0; i < numElements; i++)
{
int w1,w2;
w1 = h_A1[i]+h_B1[i];
w2 = h_A2[i]+h_B2[i];
if( w1 != h_C1[i])
{
printf("nr %d Wynik 1 niepoprawny %d + %d != %d!!\n",i, h_A1[i], h_B1[i], h_C1[i] );
return 1;
}
if( w2 != h_C2[i])
{
printf("nr %d Wynik 2 niepoprawny %d + %d != %d!!\n",i,h_A2[i], h_B2[i], h_C2[i] );
return 1;
}
}
printf("Wynik poprawny!!\n");
return 0;
}
|
20,659 | #include <ctime>
#include <cstdlib>
#include <iostream>
#include <string>
#include <cmath>
#include <vector>
class Pt
{
public:
float x = 0;
float y = 0;
int group = 1;
};
__global__ void setFalse(bool*& Changed, int dsize);
__device__ float dist(const Pt& p1, const Pt& p2);
__global__ void Group_find(Pt*& data, int dsize, Pt* dev_ctrs, bool*& moved);
__global__ void Moved_find(bool* moved, int dsize, bool* dev_isMoved);
__global__ void Group_update(Pt*& data, int dsize, float* sums, int* cnts);
int main()
{
unsigned seed = time(0);
srand(seed);
int dsize=10;
printf("enter data size ");
scanf("%d", &dsize);
const int gsize = dsize/2;
const int m1 = 0, n1 = gsize;
const int m2 = n1+1, n2 = dsize;
Pt expected1, expected2;
float xsum = 0, ysum = 0;
Pt* data;
cudaMallocManaged( &data, dsize * sizeof(Pt) );
bool* moved;
cudaMallocManaged( &moved, dsize * sizeof(bool) );
int blockSize = 1024;
int blockNum = (dsize + blockSize - 1) / blockSize;
Pt* dataTemp = new Pt[dsize];
for(int i = 0; i < gsize; ++i)
{
Pt p;
p.x = m1 + rand() % (n1 - m1);
xsum += p.x;
p.y = m1 + rand() % (n1 - m1);
ysum += p.y;
dataTemp[i]=p;
}
expected1.x = xsum/gsize;
expected1.y = ysum/gsize;
xsum = 0, ysum = 0;
for(int i = 0; i < gsize; ++i)
{
Pt p;
p.x = m2 + rand() % (n2 - m2);
xsum += p.x;
p.y = m2 + rand() % (n2 - m2);
ysum += p.y;
dataTemp[i + gsize]=p;
}
expected2.x = xsum/gsize;
expected2.y = ysum/gsize;
cudaMemcpy(data,dataTemp, dsize * sizeof( Pt ), cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
Pt* ctrs = new Pt[2];
ctrs[0].x = m1 + rand() % (n2-m1);
ctrs[0].y = m1 + rand() % (n2-m1);
ctrs[1].x = m1 + rand() % (n2-m1);
ctrs[1].y = m1 + rand() % (n2-m1);
Pt* dev_ctrs;
cudaMallocManaged(&dev_ctrs, 2 * sizeof(Pt));
cudaMemcpy(dev_ctrs, ctrs, 2 * sizeof( Pt ), cudaMemcpyHostToDevice);
float* sums = new float[4];
for(int s = 0; s < 4; ++s) sums[s] = 0;
float* dev_sums;
cudaMallocManaged(&dev_sums, 4 * sizeof(float));
int* cnts = new int[2];
cnts[0] = 1; cnts[1] = 1;
int* dev_cnts;
cudaMallocManaged(&dev_cnts, 2 * sizeof(int));
bool* isMoved = new bool[1];
isMoved[0] = true;
bool* dev_isMoved;
cudaMallocManaged(&dev_isMoved, sizeof(bool));
while( isMoved[0] )
{
printf("Center1 = ( %.2f, %.2f )\n", ctrs[0].x ,ctrs[0].y);
printf("Center2 = ( %.2f, %.2f )\n", ctrs[1].x, ctrs[1].y);
isMoved[0] = false;
clock_t st = clock();
setFalse<<<blockNum, blockSize>>>(moved, dsize);
cudaDeviceSynchronize();
Group_find<<<blockNum, blockSize>>>(data, dsize, dev_ctrs, moved);
cudaDeviceSynchronize();
cudaMemcpy(dev_isMoved, isMoved, sizeof( bool ), cudaMemcpyHostToDevice);
Moved_find<<<1, 1>>>(moved, dsize, dev_isMoved);
cudaDeviceSynchronize();
cudaMemcpy(isMoved, dev_isMoved, sizeof( bool ), cudaMemcpyDeviceToHost);
clock_t st2 = clock();
clock_t st3 = 0;
clock_t st4 = 0;
if( isMoved[0] )
{
st3 = clock();
cudaMemcpy(dev_sums, sums, 4 * sizeof( float ), cudaMemcpyHostToDevice);
cudaMemcpy(dev_cnts, cnts, 2 * sizeof( int ), cudaMemcpyHostToDevice);
Group_update<<<blockNum, blockSize>>>(data, dsize, dev_sums, dev_cnts);
cudaDeviceSynchronize();
cudaMemcpy(sums, dev_sums, 4 * sizeof( float ), cudaMemcpyDeviceToHost);
cudaMemcpy(cnts, dev_cnts, 2 * sizeof( int ), cudaMemcpyDeviceToHost);
st4 = clock();
ctrs[0].x = sums[0] / cnts[0];
ctrs[0].y = sums[1] / cnts[0];
ctrs[1].x = sums[2] / cnts[1];
ctrs[1].y = sums[3] / cnts[1];
}
clock_t st5 = clock();
cudaMemcpy(ctrs,dev_ctrs, 2 * sizeof( Pt ), cudaMemcpyDeviceToHost);
printf("\n Elapsed Time : %u ms \n", clock() - st5 + (st4 - st3) + (st2 - st));
}
printf("---Result---:\n");
printf("Expected1 = ( %.2f, %.2f )\n",expected1.x, expected1.y);
printf("Expected2 = ( %.2f, %.2f )\n", expected2.x, expected2.y);
printf("random initial Center1 = ( %.2f, %.2f )" ,ctrs[0].x ,ctrs[0].y);
printf("random initial Center2 = ( %.2f, %.2f )", ctrs[1].x, ctrs[1].y);
cudaFree(&data);
cudaFree(&moved);
delete [] dataTemp;
delete [] isMoved;
cudaFree( &dev_isMoved);
delete [] sums;
cudaFree( &dev_sums);
delete [] cnts;
cudaFree( &dev_cnts);
}
__device__ float dist(const Pt& p1, const Pt& p2)
{
float s = sqrt(pow((p1.x - p2.x), 2) + pow((p1.y - p2.y), 2));
return s;
}
__global__ void setFalse(bool*& Changed, int dsize)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < dsize)
{
Changed[index] = false;
}
}
__global__ void Group_find(Pt*& data, int dsize, Pt* dev_ctrs, bool*& moved)
{
int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < dsize)
{
float d1 = dist(dev_ctrs[0], data[p]);
float d2 = dist(dev_ctrs[1], data[p]);
int oldGroup = data[p].group;
if (d1 < d2)
data[p].group = 1;
else
data[p].group = 2;
if (data[p].group != oldGroup)
{
moved[p] = true;
}
}
}
__global__ void Moved_find(bool* moved, int dsize, bool* dev_isMoved)
{
int index = 0;
while (index < dsize && !dev_isMoved[0])
{
if (moved[index] == true) {
dev_isMoved[0] = true;
}
index++;
}
}
__global__ void Group_update(Pt*& data, int dsize, float* sums, int* cnts)
{
int p = blockIdx.x * blockDim.x + threadIdx.x;
if (p < dsize)
{
if (data[p].group == 1)
{
sums[0] += data[p].x; sums[1] += data[p].y;
cnts[0]++;
}
else
{
sums[2] += data[p].x; sums[3] += data[p].y;
cnts[1]++;
}
}
}
|
20,660 | #include <vector>
#include <iostream>
#include <chrono>
using std::cout;
using std::chrono::high_resolution_clock;
using std::chrono::microseconds;
using std::chrono::nanoseconds;
using clock64_t = long long int;
const size_t maxWait = 10000;
const size_t nIter = 10000;
__device__ clock_t diff;
__global__ void SleepKernel(clock_t ticks, bool writeDiff)
{
clock64_t start = clock64();
while ((clock64() - start) < ticks){}
if(writeDiff)
diff = clock64() - start;
}
void warmup()
{
for(size_t _ = 0; _ != nIter*100; ++_)
{
SleepKernel<<<1,1>>>(1000,false);
}
}
int main()
{
warmup();
cout << "# waitticks ; elapsed time in nanoseconds" << std::endl;
for(size_t waitticks = 0; waitticks!=maxWait; waitticks+=10) {
auto start = high_resolution_clock::now();
for(size_t _ = 0; _ != nIter; ++_)
{
SleepKernel<<<1,1>>>(waitticks,false);
}
auto end = high_resolution_clock::now();
auto ns = std::chrono::duration_cast<nanoseconds>(end - start)/nIter;
cout << waitticks << ";" << ns.count() << std::endl;
}
}
|
20,661 | #include "includes.h"
__device__ float computeDeterminant (float e00, float e01, float e02, float e10, float e11, float e12, float e20, float e21, float e22)
{
return e00*e11*e22-e00*e12*e21+e10*e21*e02-e10*e01*e22+e20*e01*e12-e20*e11*e02;
}
__global__ void hessianKernelO ( float *d_output, float *d_output_theta, float *d_output_phi, const float *d_gxx, const float *d_gxy, const float *d_gxz, const float *d_gyy, const float *d_gyz, const float *d_gzz, float sigma, int imageW, int imageH, int imageD )
{
int n_blocks_per_width = imageW/blockDim.x;
int z = (int)ceilf(blockIdx.x/n_blocks_per_width);
int y = blockIdx.y*blockDim.y + threadIdx.y;
int x = (blockIdx.x - z*n_blocks_per_width)*blockDim.x + threadIdx.x;
int i = z*imageW*imageH + y*imageW + x;
// // //Brute force eigen-values computation
// http://en.wikipedia.org/wiki/Eigenvalue_algorithm
//Oliver K. Smith: Eigenvalues of a symmetric 3 × 3 matrix. Commun. ACM 4(4): 168 (1961)
float a0, b0, c0, d0, e0, f0;
a0 = -d_gxx[i]; b0 = -d_gxy[i]; c0 = -d_gxz[i];
d0 = -d_gyy[i]; e0 = -d_gyz[i]; f0 = -d_gzz[i];
float m = (a0+d0+f0)/3;
float q = computeDeterminant
(a0-m, b0, c0, b0, d0-m, e0, c0, e0, f0-m)/2;
float p = (a0-m)*(a0-m) + b0*b0 + c0*c0 + b0*b0 + (d0-m)*(d0-m) +
e0*e0 + c0*c0 + e0*e0 + (f0-m)*(f0-m);
p = p / 6;
float phi = 1.f/3.f*atan(sqrt(p*p*p-q*q)/q);
if(phi<0)
phi=phi+3.14159f/3;
float eig1 = m + 2*sqrt(p)*cos(phi);
float eig2 = m - sqrt(p)*(cos(phi) + sqrt(3.0f)*sin(phi));
float eig3 = m - sqrt(p)*(cos(phi) - sqrt(3.0f)*sin(phi));
if( (eig1 > eig2) & (eig1 > eig3))
d_output[i] = eig1*sigma*sigma;
if( (eig2 > eig1) & (eig2 > eig3))
d_output[i] = eig2*sigma*sigma;
if( (eig3 > eig2) & (eig3 > eig1))
d_output[i] = eig3*sigma*sigma;
// // Now it comes to compute the eigenvector
float l = d_output[i]/(sigma*sigma);
a0 = a0 - l;
d0 = d0 - l;
f0 = f0 - l;
float xv = b0*e0 - c0*d0;
float yv = e0*a0 - c0*b0;
float zv = d0*a0 - b0*b0;
float radius = sqrt(xv*xv+yv*yv+zv*zv);
float thetav = atan2(yv, xv);
float phiv = 0;
if(radius > 1e-6f)
phiv = acos( zv/radius);
d_output_theta[i] = thetav;
d_output_phi[i] = phiv;
} |
20,662 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define NUM_STREAM 100
#define NUM_BLOCK 1
#define NUM_THREAD 512
#define NUM_DATA 2000000
#define TYPE_DATA double
#define CHECK 0
void stopwatch(int);
void pp(int);
//a 에서 b 로 l 만큼
__global__ void data_trans(TYPE_DATA* a,TYPE_DATA* b,int l);
int main()
{
cudaStream_t stream_array[NUM_STREAM];
TYPE_DATA* host_a,*host_b;
TYPE_DATA* dev_a ,*dev_b;
cudaMallocHost((void**)&host_a,sizeof(TYPE_DATA)*NUM_DATA);
cudaMallocHost((void**)&host_b,sizeof(TYPE_DATA)*NUM_DATA);
cudaMalloc((void**)&dev_a,sizeof(TYPE_DATA)* NUM_DATA );
cudaMalloc((void**)&dev_b,sizeof(TYPE_DATA)* NUM_DATA );
printf("number of stream : %d\nnumber of data : %d\nnumber of block : %d\nnumber of thread : %d\n",NUM_STREAM,NUM_DATA,NUM_BLOCK,NUM_THREAD);
srand(time(NULL));
for(int i=0;i<NUM_DATA;i++)
{
host_a[i] = rand()/(TYPE_DATA)RAND_MAX;
}
printf("Creating Stream[%d] : ",NUM_STREAM);
stopwatch(0);
for(int i=0;i<NUM_STREAM; i++)
cudaStreamCreate(&(stream_array[i]));
stopwatch(1);
int offset[NUM_STREAM];
for(int i=0;i<NUM_STREAM;i++)
{
offset[i] = i * NUM_DATA/NUM_STREAM;
#if CHECK
printf("offset[%d] : %d\n",i,offset[i]);
#endif
}
/************************Streaming**********************************/
printf("Reading & Processing with Stream : ");
stopwatch(0);
//READ
for(int i=0;i<NUM_STREAM;i++)
{
cudaMemcpyAsync(dev_a+offset[i],host_a+offset[i],sizeof(TYPE_DATA)*NUM_DATA/NUM_STREAM,cudaMemcpyHostToDevice,stream_array[i]);
}
//TRANS
for(int j=0;j<NUM_STREAM;j++)
{
data_trans<<<NUM_BLOCK,NUM_THREAD,0,stream_array[j]>>>(dev_a,dev_b,NUM_DATA/NUM_STREAM);
}
//GET
for(int i=0;i<NUM_STREAM;i++)
{
cudaMemcpyAsync(host_b+offset[i],dev_b+offset[i],sizeof(TYPE_DATA)*NUM_DATA/NUM_STREAM,cudaMemcpyDeviceToHost,stream_array[i]);
}
for(int i=0; i<NUM_STREAM;i++)
cudaStreamSynchronize(stream_array[i]);
stopwatch(1);
#if CHECK
printf("CHECK 0-10, %d-%d\n",NUM_DATA-10,NUM_DATA-1);
for(int i=0;i<10;i++)
printf("%.4lf ",host_b[i]);
printf("\n");
for(int i=NUM_DATA-10;i<NUM_DATA;i++)
printf("%.4lf ",host_b[i]);
printf("\n");
#endif
/************************No Streaming**************************************/
printf("Reading & Processing without Stream : ");
stopwatch(0);
cudaMemcpy(dev_a,host_a,sizeof(TYPE_DATA)*NUM_DATA,cudaMemcpyHostToDevice);
data_trans<<<NUM_BLOCK,NUM_THREAD>>>(dev_a,dev_b,NUM_DATA);
cudaMemcpy(host_b,dev_b,sizeof(TYPE_DATA)*NUM_DATA,cudaMemcpyDeviceToHost);
stopwatch(1);
#if CHECK
printf("CHECK 0-10, %d-%d\n",NUM_DATA-10,NUM_DATA-1);
for(int i=0;i<10;i++)
printf("%.4lf ",host_b[i]);
printf("\n");
for(int i=NUM_DATA-10;i<NUM_DATA;i++)
printf("%.4lf ",host_b[i]);
printf("\n");
#endif
printf("Destroying Stream[%d] : ",NUM_STREAM);
stopwatch(0);
for(int i=0;i<NUM_STREAM; i++)
cudaStreamDestroy(stream_array[i] );
stopwatch(1);
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(host_a);
cudaFree(host_b);
return 0;
}
__global__ void data_trans(TYPE_DATA* a,TYPE_DATA* b,int l)
{
for(int i=threadIdx.x;i<l;i+=NUM_THREAD)
b[i]=a[i];
}
void pp(int num)
{
printf("%d\n",num);
}
void stopwatch(int flag)
{
enum clock_unit{nano = 0, micro , milli, sec} unit;
const long long NANOS = 1000000000LL;
static struct timespec startTS,endTS;
static long long diff = 0;
/*
여기서 단위 조정
nano, micro, milli, sec
*/
unit = micro;
//start
if(flag == 0)
{
diff = 0;
if(-1 == clock_gettime(CLOCK_MONOTONIC,&startTS))
printf("Failed to call clock_gettime\n");
}
//end
else if(flag == 1)
{
if(-1 == clock_gettime(CLOCK_MONOTONIC,&endTS))
printf("Failed to call clock_gettime\n");
diff = NANOS * (endTS.tv_sec - startTS.tv_sec) + (endTS.tv_nsec - startTS.tv_nsec);
switch(unit)
{
case nano :
printf("elapsed time : % lld micros\n",diff);
break;
case micro :
printf("elapsed time : % lld micros\n",diff/1000);
break;
case sec :
printf("elapsed time : % lld micros\n",diff/1000000000);
break;
default :
printf("elapsed time : % lld milli sec\n",diff/100000);
break;
}
}
else
{
printf("wrong flag | 0 : start, 1 : end\n");
}
}
|
20,663 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
//M and N number of threads (grid and block)
#define M 1
#define N 1
__global__ void multiply( const char string[] , const char substring[], const int dim_str,const int dim_substr, int pos[], const int thread_number)
{
int index = blockIdx.x* blockDim.x* blockDim.y* blockDim.z+threadIdx.z* blockDim.y* blockDim.x+ threadIdx.y* blockDim.x+ threadIdx.x;
bool gg=false;
if(index<dim_str){
if(dim_str<=thread_number){ //if more threads than array size
printf("Thread %i; Modifying value of index %i \n", index, index);
int j;
for( j=index; j<dim_str; j++){
if(string[j] != substring[j] )
break;
else if(j==dim_substr-1)
gg=true;
}
if(gg==true){
pos[0]=index;
return;
}
}
else{ //if less threads than array size
if(index!=thread_number-1){//if not last thread deal with size_array/thread_nb array entries
for(int i=index*(int)(dim_str/thread_number); i< index*(int)(dim_str/thread_number)+(int)(dim_str/thread_number); i++){
printf("Thread %i; Modifying value of index %i \n", index, i);
int j;
for( j=i; j<dim_str; j++){
if(string[j] != substring[j] )
break;
else if(j==dim_substr-1)
gg=true;
}
if(gg==true){
pos[0]=i;
return;
}
}
}
else{ //if last thread deal with all remaining array entries
for(int i=index*(int)(dim_str/thread_number); i< dim_str; i++){
printf("Thread %i; Modifying value of index %i\n",index, i);
int j;
for( j=i; j<dim_str; j++){
if(string[j] != substring[j] )
break;
else if(j==dim_substr-1)
gg=true;
}
if(gg==true){
pos[0]=i;
return;
}
}
}
}
}
}
int main(int argc, char *argv[]){
//Measure time
clock_t time_begin;
time_begin=clock();
// pointers to host & device arrays
char *device_array = 0;
char *host_array = "aglolsbdrc";
char *d_substr; cudaMalloc(&d_substr,3 * sizeof(char));
char *substr="lol";
cudaMemcpy(d_substr, substr, sizeof(char)*3, cudaMemcpyHostToDevice);
int *h_pos, *d_pos;
h_pos=( int*)malloc(sizeof( int));
cudaError_t er= cudaMalloc(&d_pos, sizeof(int));
int size_array=10;
// malloc a host array
// host_array = (char*)malloc( size_array * sizeof(char));
// host_array="aglolsbdrc";
for(int i=0; i<size_array; i++){
//host_array[i]=rand()%26+52;
printf("%c\t", host_array[i]);
}
printf("\n");
// cudaMalloc a device array
cudaError_t err= cudaMalloc(&device_array,size_array * sizeof(char));
// download and inspect the result on the host:
cudaError_t erro=cudaMemcpy(device_array, host_array, sizeof(char)*size_array, cudaMemcpyHostToDevice);
dim3 bloque(N,N); //Bloque bidimensional de N*N hilos
dim3 grid(M,M); //Grid bidimensional de M*M bloques
int thread_number= N*N*M*M;
multiply<<<grid, bloque>>>(device_array, d_substr, size_array ,3, d_pos, thread_number);
cudaThreadSynchronize();
// download and inspect the result on the host:
cudaError_t error=cudaMemcpy(h_pos, d_pos, sizeof(int), cudaMemcpyDeviceToHost);
printf("pos: %i\t", h_pos[0]);
// deallocate memory
// free(host_array);
free(h_pos);
cudaFree(device_array);
cudaFree(d_pos);
printf("Time elapsed: %f seconds\n", (((float)clock() - (float)time_begin) / 1000000.0F ) * 1000 ); //1.215s
} |
20,664 | #include "includes.h"
//Udacity HW 4
//Radix Sorting
__global__ void addPrevSum(unsigned int* blkSumsScan, unsigned int* blkScans, unsigned int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x + blockDim.x;
if (i < n)
{
blkScans[i] += blkSumsScan[blockIdx.x];
}
} |
20,665 | #include "includes.h"
__global__ void debugMark() {
//This is only for putting marks into the profile.
} |
20,666 | /*--------------------------------------------------------------------------*\
Copyright (c) 2008-2009, Danny Ruijters. All rights reserved.
http://www.dannyruijters.nl/cubicinterpolation/
This file is part of CUDA Cubic B-Spline Interpolation (CI).
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the copyright holders nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation are
those of the authors and should not be interpreted as representing official
policies, either expressed or implied.
\*--------------------------------------------------------------------------*/
#ifndef _VERSION_CUBIC_INTERPOLATION
#define _VERSION_CUBIC_INTERPOLATION 4
const char* ciVersion = "CUDA Cubic B-Spline Interpolation (CI) Version 0.4";
const char* ciDate = "20 Feb 2009";
const char* ciAuthors = "Danny Ruijters";
const char* ciCopyright = "(C) Copyright 2008-2009 Danny Ruijters.";
#endif //_VERSION_CUBIC_INTERPOLATION
|
20,667 | __device__ int createArgbColor(int iter, int maxIter)
{
int color = (255.0*iter)/maxIter;
return(255<<24) | (color<<16) | (color<<8) | color;
}
|
20,668 | // cudaTrivial.cu
#include <cuda.h>
#include <iostream>
__global__ void cudaKernel(int* data) {
//get thread id
int i = blockIdx.x * blockDim.x + threadIdx.x;
//assign to data
data[i] = i;
}
int main(int argc, char *argv[]){
//set thread count based on args of blocks and threads
//ideally would have some named params, but oh well
int numThreads = 512;
int numBlocks = 1;
if(argc < 2){
std::cout<<"No number of threads or blocks specified"<<std::endl;
}
else if(argc == 3){
std::cout<<"Detected block and thread params..."<<std::endl;
numBlocks = std::atoi(argv[1]);
numThreads = std::atoi(argv[2]);
}
else{
std::cout<<"Detected only 1 arg, assuming it is thread count..."<<std::endl;
numThreads = std::atoi(argv[1]);
}
std::cout<<"Using "<<numBlocks<<" blocks"<<std::endl;
std::cout<<"Using "<<numThreads<<" threads"<<std::endl;
int threadCount = numBlocks*numThreads;
int data[threadCount];
int* d_data;
//allocate memory on device for int array of numThreads size
cudaMalloc((void **) &d_data, threadCount*sizeof(int));
//invoke kernel
cudaKernel<<<numBlocks,numThreads>>>(d_data);
//copy back from device to host
cudaMemcpy(&data,d_data,threadCount*sizeof(int),cudaMemcpyDeviceToHost);
//free mem
cudaFree(d_data);
for(int i = 0; i<threadCount; ++i){
std::cout<<"Address "<<i<<" :: "<<data[i]<<std::endl;
}
}
|
20,669 | #include "includes.h"
#define VERTICES 600
__constant__ float2 d_vertices[VERTICES];
__constant__ float d_slopes[VERTICES];
/*
* This file contains the implementation of a CUDA Kernel for the
* point-in-polygon problem using the crossing number algorithm
*
* The kernel cn_pnpoly is can be tuned using the following parameters:
* * block_size_x any sensible thread block size
* * tile_size any sensible tile size value
* * between_method any of [0, 1, 2, 3]
* * use_precomputed_slopes enable or disable [0, 1]
* * use_method any of [0, 1]
*
* The kernel cn_pnpoly_naive is used for correctness checking.
*
* The algorithm used here is adapted from:
* 'Inclusion of a Point in a Polygon', Dan Sunday, 2001
* (http://geomalgorithms.com/a03-_inclusion.html)
*
* Author: Ben van Werkhoven <b.vanwerkhoven@esciencecenter.nl>
*/
#ifndef block_size_x
#define block_size_x 256
#endif
#ifndef block_size_y
#define block_size_y 1
#endif
#ifndef block_size_z
#define block_size_z 1
#endif
#ifndef tile_size
#define tile_size 1
#endif
__global__ void cn_pnpoly_naive(int* bitmap, float2* points, int n) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
int c = 0;
float2 p = points[i];
int k = VERTICES-1;
for (int j=0; j<VERTICES; k = j++) { // edge from v to vp
float2 vj = d_vertices[j];
float2 vk = d_vertices[k];
float slope = (vk.x-vj.x) / (vk.y-vj.y);
if ( ( (vj.y>p.y) != (vk.y>p.y)) && //if p is between vj and vk vertically
(p.x < slope * (p.y-vj.y) + vj.x) ) { //if p.x crosses the line vj-vk when moved in positive x-direction
c = !c;
}
}
bitmap[i] = c; // 0 if even (out), and 1 if odd (in)
}
} |
20,670 | /* NiuTrans.Tensor - an open-source tensor library
* Copyright (C) 2017, Natural Language Processing Lab, Northeastern University.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* $Created by: LI Yinqiao (li.yin.qiao.2012@hotmail.com) 2018-7-11
*/
#include "../../XTensor.h"
#include "../../XDevice.h"
#include "ConvertDataType.cuh"
namespace nts { // namespace nts(NiuTrans.Tensor)
#ifdef USE_CUDA
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cuda_fp16.h>
__global__
void KernelFloatToFloat16(float * s, __half * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __float2half(s[i]);
}
}
__global__
void KernelFloat16ToFloat(__half * s, float * t, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
t[i] = __half2float(s[i]);
}
}
__global__
void KernelFloatToInt(float * inputData, int * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (int)(inputData[i]);
}
}
__global__
void KernelIntToFloat(int * inputData, float * outputData, int size)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < size){
outputData[i] = (float)(inputData[i]);
}
}
/*
data conversion (cuda code)
>> devID - device id
>> s - source data array
>> typeS - source data type
>> t - target data array
>> typeT - target data type
>> size - number of the items in s (and t)
*/
void _CudaConvertDataType(int devID, void * s, TENSOR_DATA_TYPE typeS, void * t, TENSOR_DATA_TYPE typeT, int size)
{
CheckNTErrors((devID >= 0), "This code must be run on GPUs!");
if(typeS == typeT)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(devID, size, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(devID, devIDBackup);
if(typeS == X_FLOAT && typeT == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>((float*)s, (__half*)t, size);
else if(typeS == X_FLOAT16 && typeT == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>((__half*)s, (float*)t, size);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(devID, devIDBackup);
}
/*
convert data type (cuda code)
>> input - input tensor
>> output - output tensor
*/
void _CudaConvertDataType(const XTensor * input, XTensor * output)
{
if (input->dataType == output->dataType)
return;
int gridSize[3];
int blockSize[3];
GDevs.GetCudaThread(input->devID, input->unitNum, gridSize, blockSize);
dim3 blocks(gridSize[0]);
dim3 threads(blockSize[0]);
int devIDBackup;
ProtectCudaDev(input->devID, devIDBackup);
if(input->dataType == X_FLOAT && output->dataType == X_INT)
KernelFloatToInt<<<blocks, threads>>>
((float*)input->data, (int*)output->data, input->unitNum);
else if(input->dataType == X_INT && output->dataType == X_FLOAT)
KernelIntToFloat<<<blocks, threads>>>
((int*)input->data, (float*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT && output->dataType == X_FLOAT16)
KernelFloatToFloat16<<<blocks, threads>>>
((float*)input->data, (__half*)output->data, input->unitNum);
else if(input->dataType == X_FLOAT16 && output->dataType == X_FLOAT)
KernelFloat16ToFloat<<<blocks, threads>>>
((__half*)input->data, (float*)output->data, input->unitNum);
else{
ShowNTErrors("Unsupported data types for conversion!");
}
ProtectCudaDev(input->devID, devIDBackup);
}
#endif // USE_CUDA
} // namespace nts(NiuTrans.Tensor) |
20,671 | /*******************************************************************************
* PROGRAM: canny_edge_detector
* FILE: ui.cu / a small user interface, use WIN32 graphic lib
* PURPOSE: This program is a case study on porting algorithm implemented in C to CUDA
* The original C code is referenced from canny_edge program implemented by Profs. Mike Heath
* This program also uses some of the funtions from canny_edge program
* PURPOSE: Apply Gaussian Smooth to input pgm image
* NAME: Vuong Pham-Duy
* Faculty of Computer Science and Technology
* Ho Chi Minh University of Technology, Viet Nam
* vuongpd95@gmail.com
* DATE: 11/10/2016
*******************************************************************************/
#include<stdio.h>
#include<stdlib.h>
#define INFILENAME "D:\\Vuong_only\\Images\\pgm_in\\img2.pgm"
#define DEBUGFILE "D:\\Vuong_only\\Images\\edge_out\\debug.txt"
#define SIGMA 1.5
#define TLOW 0.35
#define THIGH 0.75
double cuda_canny(char *infilename, float sigma, float tlow, float thigh, double &init_time);
double one_canny(char *infilename, float sigma, float tlow, float thigh);
int main()
{
double one, cuda, init;
// TODO UI codes go here
cuda = cuda_canny(INFILENAME, SIGMA, TLOW, THIGH, init);
one = one_canny(INFILENAME, SIGMA, TLOW, THIGH);
printf("\n\nRESULT:\nCUDA: %f INIT_TIME: %f\nCPU: %f \n", cuda, init, one);
return 0;
}
|
20,672 | //
// global.cu
// Kernel of verifing ciphertext and constant-time copy.
//
// Copyright (c) 2021 Tatsuki Ono
//
// This software is released under the MIT License.
// https://opensource.org/licenses/mit-license.php
//
#include "device.cuh"
#include "global.cuh"
namespace atpqc_cuda::verify_cmov_ws::global {
__global__ void verify_cmov(std::uint8_t* r, std::size_t r_pitch,
const std::uint8_t* x, std::size_t x_pitch,
std::size_t cmove_len, const std::uint8_t* a,
std::size_t a_pitch, const std::uint8_t* b,
std::size_t b_pitch, std::size_t verify_len,
unsigned ninputs) {
device::verify_cmov vc;
if (unsigned pos = blockIdx.x * blockDim.y + threadIdx.y; pos < ninputs) {
r += r_pitch * pos;
x += x_pitch * pos;
a += a_pitch * pos;
b += b_pitch * pos;
vc.cmov(r, x, cmove_len, vc.verify(a, b, verify_len));
}
}
} // namespace atpqc_cuda::verify_cmov_ws::global
|
20,673 | /*
* Parakeet
*
* (c) 2009-2011 Eric Hielscher, Alex Rubinsteyn
*
*
*/
#include <cuda_runtime_api.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
void chkError(cudaError_t rslt, char *msg) {
if (rslt) {
printf("Error: %s\n", msg);
exit(1);
}
}
int main(int argc, char **argv) {
cudaError_t rslt;
int numDevices;
rslt = cudaGetDeviceCount(&numDevices);
chkError(rslt, "couldn't get number of devices");
cudaDeviceProp *deviceProps =
(cudaDeviceProp*)malloc(numDevices * sizeof(cudaDeviceProp));
int i;
for (i = 0; i < numDevices; ++i) {
rslt = cudaGetDeviceProperties(&deviceProps[i], i);
chkError(rslt, "couldn't get properties for device");
int canAccessPeer;
rslt = cudaDeviceCanAccessPeer(&canAccessPeer, i, (i+1)%numDevices);
chkError(rslt, "couldn't get peer access info");
printf(
"PROPERTIES FOR DEVICE %d\n"
"------------------------\n"
"\n"
"Device name: %s\n"
"Total Global mem: %d\n"
"Shared mem per block: %d\n"
"Regs per block: %d\n"
"Warp size: %d\n"
"Mem pitch: %d\n"
"Max threads per block: %d\n"
"Max threads per dim: [%d,%d,%d]\n"
"Max Grid size: [%d,%d,%d]\n"
"Clock rate: %d\n"
"Total const mem: %d\n"
"Compute capability: %d.%d\n"
"Texture alignment: %d\n"
"Device Overlap: %d\n"
"Multi Processor Count: %d\n"
"Kernel Exec Timeout Enabled: %d\n"
"Integrated: %d\n"
"Can Map Host Mem: %d\n"
"Compute mode: %d\n"
"Max texture 1D: %d\n"
"Max texture 2D: [%d,%d]\n"
"Max texture 3D: [%d,%d,%d]\n"
"Max texture 1D layered: [%d,%d]\n"
"Max texture 2D layered: [%d,%d,%d]\n"
"Surface alignment: %d\n"
"Concurrent kernels: %d\n"
"ECC Enabled: %d\n"
"PCI Bus ID: %d\n"
"PCI Device ID: %d\n"
"PCI Domain ID: %d\n"
"TCC Driver: %d\n"
"Async Engine Count: %d\n"
"Unified Addressing: %d\n"
"Memory Clock rate: %d\n"
"Memory Bus Width: %d\n"
"L2 Cache Size: %d\n"
"Max Threads per SM: %d\n"
"Can access peer: %d\n"
"\n",
i,
deviceProps[i].name,
deviceProps[i].totalGlobalMem,
deviceProps[i].sharedMemPerBlock,
deviceProps[i].regsPerBlock,
deviceProps[i].warpSize,
deviceProps[i].memPitch,
deviceProps[i].maxThreadsPerBlock,
deviceProps[i].maxThreadsDim[0],
deviceProps[i].maxThreadsDim[1],
deviceProps[i].maxThreadsDim[2],
deviceProps[i].maxGridSize[0],
deviceProps[i].maxGridSize[1],
deviceProps[i].maxGridSize[2],
deviceProps[i].clockRate,
deviceProps[i].totalConstMem,
deviceProps[i].major,
deviceProps[i].minor,
deviceProps[i].textureAlignment,
deviceProps[i].deviceOverlap,
deviceProps[i].multiProcessorCount,
deviceProps[i].kernelExecTimeoutEnabled,
deviceProps[i].integrated,
deviceProps[i].canMapHostMemory,
deviceProps[i].computeMode,
deviceProps[i].maxTexture1D,
deviceProps[i].maxTexture2D[0],
deviceProps[i].maxTexture2D[1],
deviceProps[i].maxTexture3D[0],
deviceProps[i].maxTexture3D[1],
deviceProps[i].maxTexture3D[2],
deviceProps[i].maxTexture1DLayered[0],
deviceProps[i].maxTexture1DLayered[1],
deviceProps[i].maxTexture2DLayered[0],
deviceProps[i].maxTexture2DLayered[1],
deviceProps[i].maxTexture2DLayered[2],
deviceProps[i].surfaceAlignment,
deviceProps[i].concurrentKernels,
deviceProps[i].ECCEnabled,
deviceProps[i].pciBusID,
deviceProps[i].pciDeviceID,
deviceProps[i].pciDomainID,
deviceProps[i].tccDriver,
deviceProps[i].asyncEngineCount,
deviceProps[i].unifiedAddressing,
deviceProps[i].memoryClockRate,
deviceProps[i].memoryBusWidth,
deviceProps[i].l2CacheSize,
deviceProps[i].maxThreadsPerMultiProcessor,
canAccessPeer);
}
free(deviceProps);
return 0;
}
|
20,674 | #include "includes.h"
__device__ int is_source_gpu(int i, int j, int radius, int source_active, int src_x, int src_y)
{
if (!source_active)
return 0;
if (sqrt(pow((float)(src_x - i), 2) + pow((float)(src_y - j), 2)) <= radius)
return 1;
return 0;
}
__global__ void wireless_src_pulse_kernel(int step, double amp, double MAX_TIME, double TIME_STEP, int radius, int source_active, int src_x, int src_y, double *ua_gpu, double *ub_gpu, double *uc_gpu)
{
int i, j;
int i_start, j_start;
int i_final, j_final;
int line_length;
int global_thread_x, global_thread_y;
int thread_work = 32;
line_length = gridDim.y * blockDim.y;
global_thread_x = blockDim.x * blockIdx.x + threadIdx.x;
global_thread_y = blockDim.y * blockIdx.y + threadIdx.y;
i_start = global_thread_x * thread_work;
j_start = global_thread_y * thread_work;
i_final = global_thread_x * (thread_work + 1);
j_final = global_thread_y * (thread_work + 1);
if (step < (int)(MAX_TIME / TIME_STEP) / 2){
// Pulse source
for (i = i_start; i < i_final; i++){
for (j = j_start; j < j_final; j++){
if (is_source_gpu(i, j, radius, 1, src_x, src_y))
uc_gpu[i * line_length + j] = amp * fabs(sin(step * M_PI/4));
}
}
} else if (source_active){
for (i = i_start; i < i_final; i++) {
for (j = j_start; j < j_final; j++) {
if (is_source_gpu(i, j, radius, source_active, src_x, src_y)) {
ua_gpu[i * line_length + j] = 0;
ub_gpu[i * line_length + j] = 0;
uc_gpu[i * line_length + j] = 0;
}
}
}
}
// All threads should reach this point before setting source_active.
// Option 1: need a thread barrier here -> not done, I chose option 2
// Option 2: simply write 2 kernels and syncCPU -> done, I chose this option
// CPU is setting source_active = 0 after this kernel is done executing.
} |
20,675 | #include "includes.h"
__global__ void setToOnes(float *data, int size)
{
int index = threadIdx.x + blockIdx.x * blockDim.x; // 1D grid of 1D blocks
if (index < size) data[index] = 1;
} |
20,676 | //VecAdd.cu
// author: Pan Yang
// date : 2015-7-2
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define SIZE 1024
// Kernel definition
__global__ void VecAdd_T(int *a, int *b, int *c, int n)
{
int i = threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
// Kernel definition
__global__ void VecAdd_B(int *a, int *b, int *c, int n)
{
int i = blockIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
// Kernel definition
__global__ void VecAdd_BT(int *a, int *b, int *c, int n)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n)
c[i] = a[i] + b[i];
}
int main()
{
// time for the whole process
clock_t start, finish;
float time;
start = clock();
// define a, b, c and alloc memory for them
int *a, *b, *c;
a = (int *)malloc(SIZE * sizeof(int));
b = (int *)malloc(SIZE * sizeof(int));
c = (int *)malloc(SIZE * sizeof(int));
// define d_a, d_b, d_c and alloc memory for them
int *d_a, *d_b, *d_c;
cudaMalloc( &d_a, SIZE * sizeof(int));
cudaMalloc( &d_b, SIZE * sizeof(int));
cudaMalloc( &d_c, SIZE * sizeof(int));
// initialize a, b, c
int i = 0;
for (i = 0; i < SIZE; ++i)
{
a[i] = i;
b[i] = i;
c[i] = 0;
}
// copy data from host memory to device memory
cudaMemcpy( d_a, a, SIZE * sizeof(int), cudaMemcpyHostToDevice );
cudaMemcpy( d_b, b, SIZE * sizeof(int), cudaMemcpyHostToDevice );
//cudaMemcpy( d_c, c, SIZE * sizeof(int), cudaMemcpyHostToDevice );
//----------------------------------------------------------------
/*
cudaEvent_t start_cu, stop_cu;
float time_gpu;
cudaEventCreate(&start_cu);
cudaEventCreate(&stop_cu);
cudaEventRecord( start_cu, 0);
*/
// Kernel invocation with N threads
VecAdd_T<<<1, SIZE>>>(d_a, d_b, d_c, SIZE);
// Kernel invocation with N blocks
//VecAdd_B<<<SIZE, 1>>>(d_a, d_b, d_c, SIZE);
// Kernel invocation with m blocks and n threads;
//int threadsPerBlock = 256;
//int blocksPerGrid = (SIZE + threadsPerBlock - 1) / threadsPerBlock;
//VecAdd_BT<<<blocksPerGrid, threadsPerBlock>>>(d_a, d_b, d_c, SIZE);
/*
cudaEventRecord( stop_cu, 0);
cudaEventSynchronize( stop_cu );
cudaEventElapsedTime( &time_gpu, start_cu, stop_cu );
cudaEventDestroy( start_cu );
cudaEventDestroy( stop_cu );
*/
// copy results form device memory to host memory
cudaMemcpy( c, d_c, SIZE * sizeof(int), cudaMemcpyDeviceToHost );
//------------------------------------------------------------------
for (i = 0; i < 10; ++i)
{
printf("c[%d] = %d\n", i, c[i]);
}
//printf("calculation time_gpu = %fms\n", time_gpu);
// free space
free(a);
free(b);
free(c);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
finish = clock();
time = (float)(finish - start) / CLOCKS_PER_SEC;
printf("calculation time = %fms\n", time);
return 0;
}
|
20,677 | #include "VectorOps.cuh"
void __device__ vvaddDev(int i2d, real alpha, real* x, real* y, int totpoints) {
if(i2d>=totpoints) return;
y[i2d] += alpha * x[i2d];
//if (i2d == printv) printf("vm: %.31f\n~~~~~~~~~~~~~~~~\n", g_dev.vm[i2d]);
} |
20,678 | #include <stdio.h>
#include <stdlib.h>
// __global__ keyword specifies a device kernel function
__global__ void cuda_hello() {
printf("Hello World from GPU!\n");
printf("hello form GPU B.x=%d, Thread.x=%d\n", blockIdx.x, threadIdx.x);
}
int main() {
printf("Hello World from CPU!\n");
// Call a device function from the host: a kernel launch Which will print from the device
cuda_hello<<<6,1>>>();
//cuda_hello<<<1,6>>>(); comment and uncomment lines 13-14 for lab step 7 and 8
// This call waits for all of the submitted GPU work to complete
cudaDeviceSynchronize();
return 0;
}
|
20,679 | #include <iostream>
#include <thrust/sort.h>
#include <set>
using namespace std;
int main(int argc, char const *argv[])
{
/* code */
string a, b;
int n, m;
cin>>n>>m;
// cin>>n>>m;
int *array = new int [2*m];
int *array2 = new int [2*m];
cout<<n<<"\t"<<m<<endl;
for (int i = 0; i < m; ++i)
{
/* code */
int start;
int end;
cin>>start>>end;
array[2*i]=start;
array2[2*i]=end;
array[2*i+1]=end;
array2[2*i+1]=start;
//
}
// for (int i=0; i<2*m; i++){
// cout<<array[i]<<"\t"<<array2[i]<<endl;
// }
thrust::sort_by_key(array2, array2 + 2*m, array);
thrust::sort_by_key(array, array + 2*m, array2);
// int prev1 = array[0];
// int prev2 = array2[0];
//
//
//
// cout<<array[0]<<"\t"<<array2[0]<<endl;
//
// int count = 1;
//
for (int i=0; i<2*m; i++){
cout<<array[i]<<"\t"<<array2[i]<<endl;
}
// typedef pair<int, int> pairs;
//
// pairs temp;
//
// set<pairs> setTemp;
// set<pairs> :: iterator it;
//
// int count = 0;
//
// for (int i=0; i<m; i++){
// if (array[i]==array2[i]){
// continue;
// }
//
// if (array[i]<array2[i]){
// temp.first = array[i];
// temp.second = array2[i];
// }
// else{
// temp.first = array2[i];
// temp.second = array[i];
// }
//
// it = setTemp.find(temp);
//
// if (it==setTemp.end()){
// setTemp.insert(temp);
// cout<<array[i]<<"\t"<<array2[i]<<endl;
// count++;
// }
//
// }
//cout<<count<<endl;
return 0;
}
|
20,680 | #include "includes.h"
// https://gist.github.com/wh5a/4641641
// https://www.evl.uic.edu/sjames/cs525/final.html
__global__ void CodeParallele(double td, double h, float matDest) {
} |
20,681 | /*
@author Jack Clark
Simple program to simulate 2D advection using the finite volume approach, with naive averaging at cell boundaries.
Compile with nvcc -O3 advection.cu -o gpu_advection
*/
#include <fstream>
#include <sstream>
#include <math.h>
#include <assert.h>
#include <cuda.h>
#define NUM_CELLS_X 40
#define NUM_CELLS_Y 40
#define TIMESTEP 0.001
#define NUM_TIMESTEPS 100000
#define DELTA_X 1
#define DELTA_Y 1
#define PLOT_FREQUENCY 100
// CPU data
double h_flux_x[NUM_CELLS_X*NUM_CELLS_Y];
double h_flux_y[NUM_CELLS_X*NUM_CELLS_Y];
double h_q[NUM_CELLS_X*NUM_CELLS_Y];
double h_velocities[2];
double h_max_velocity;
// GPU data
double * d_flux_x;
double * d_flux_y;
double * d_q;
double * d_max_velocity;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
int getIndex(int row, int col) {
return row * NUM_CELLS_X + col;
}
void printCSVFile(int counter) {
std::stringstream filename;
filename << "result-" << counter << ".csv";
std::ofstream out( filename.str().c_str() );
out << "x, y, z" << std::endl;
for (int i=0; i<NUM_CELLS_Y; i++) {
for(int j=0; j<NUM_CELLS_X; j++) {
out << i
<< ","
<< j
<< ","
<< h_q[i*NUM_CELLS_X+j]
<< std::endl;
}
}
}
void printResult(int timestep) {
printf("\nTimestep %d \n", timestep);
printf("Q: ");
for (int i=0; i<NUM_CELLS_Y; i++) {
printf("\n");
for(int j=0; j<NUM_CELLS_X; j++) {
printf("%f ", h_q[getIndex(i,j)]);
}
}
printf("\n");
printf("Flux_X: ");
for (int i=0; i<NUM_CELLS_Y; i++) {
printf("\n");
for(int j=0; j<NUM_CELLS_X; j++) {
printf("%f ", h_flux_x[getIndex(i,j)]);
}
}
printf("\n");
printf("Flux_Y: ");
for (int i=0; i<NUM_CELLS_Y; i++) {
printf("\n");
for(int j=0; j<NUM_CELLS_X; j++) {
printf("%f ", h_flux_y[getIndex(i,j)]);
}
}
printf("\n");
}
void setup() {
for (int i=1; i<4; i++) {
for(int j=1; j<4; j++) {
int index = getIndex(i,j);
h_q[index] = 5.0;
}
}
h_velocities[0] = 0.5;
h_velocities[1] = 0.5;
h_max_velocity = 0.0;
int i;
for(i = 0; i<sizeof(h_velocities)/sizeof(double); i++) {
h_max_velocity += h_velocities[i] * h_velocities[i];
}
h_max_velocity = sqrt(h_max_velocity);
}
void reconstruction() {
for(int i=0; i<NUM_CELLS_Y; i++) {
for(int j=0; j<NUM_CELLS_X; j++) {
int index = getIndex(i,j);
if(j != 0) {
h_flux_x[index] = ((h_q[index] + h_q[index-1]) / 2) - (h_max_velocity/2 * (h_q[index] - h_q[index-1]));
}
if(i != 0) {
h_flux_y[index] = ((h_q[index] + h_q[index-NUM_CELLS_X]) / 2) - (h_max_velocity/2 * (h_q[index] - h_q[index-NUM_CELLS_X]));
}
}
}
}
__global__ void reconstruction_kernel(const double * const d_q, double * d_flux_x, double * d_flux_y, const double * const d_max_velocity) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= NUM_CELLS_Y || col >= NUM_CELLS_X)
return;
int self = row * NUM_CELLS_X + col;
if(col != 0) {
d_flux_x[self] = ((d_q[self] + d_q[self-1]) / 2) - (*d_max_velocity/2 * (d_q[self] - d_q[self-1]));
}
if(row != 0) {
d_flux_y[self] = ((d_q[self] + d_q[self-NUM_CELLS_X]) / 2) - (*d_max_velocity/2 * (d_q[self] - d_q[self-NUM_CELLS_X]));
}
}
__global__ void update_kernel(double * d_q, const double * const d_flux_x, const double * const d_flux_y) {
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
if (row >= NUM_CELLS_Y || col >= NUM_CELLS_X)
return;
int self = row * NUM_CELLS_X + col;
if(self > ((NUM_CELLS_X-1) * (NUM_CELLS_Y-1) - 1))
return;
double temp = d_q[self] + ((TIMESTEP/DELTA_X) * (d_flux_x[self] - d_flux_x[self+1]));
if(row < NUM_CELLS_X-1)
temp += (TIMESTEP/DELTA_Y) * (d_flux_y[self] - d_flux_y[self+NUM_CELLS_X]);
// 0 limiter to remove numerical artefacts
if(temp <= 0.0) {
d_q[self] = 0.0;
} else {
d_q[self] = temp;
}
}
void update_cells() {
double temp;
for(int i=0; i<NUM_CELLS_Y-1; i++) {
for(int j=0; j<NUM_CELLS_X-1; j++) {
int index = getIndex(i,j);
assert(index >= 0);
assert(index < NUM_CELLS_X * NUM_CELLS_Y);
temp = h_q[index] + ((TIMESTEP/DELTA_X) * (h_flux_x[index] - h_flux_x[index+1])) + ((TIMESTEP/DELTA_Y) * (h_flux_y[index] - h_flux_y[index+NUM_CELLS_X]));
if(temp < 0) {
h_q[index] = 0;
} else {
h_q[index] = temp;
}
}
}
}
int main() {
setup();
printCSVFile(0);
//printResult(0);
// Allocate memory for GPU data - Naive assumption that our GPU can fit all our data in memory at once
gpuErrchk(cudaMalloc((void **) &d_flux_x, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y));
gpuErrchk(cudaMalloc((void **) &d_flux_y, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y));
gpuErrchk(cudaMalloc((void **) &d_q, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y));
gpuErrchk(cudaMalloc((void **) &d_max_velocity, sizeof(double)));
gpuErrchk(cudaMemset(d_flux_x, 0, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y));
gpuErrchk(cudaMemset(d_flux_y, 0, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y));
gpuErrchk(cudaMemcpy(d_max_velocity, &h_max_velocity, sizeof(double), cudaMemcpyHostToDevice));
gpuErrchk(cudaMemcpy(d_q, h_q, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y, cudaMemcpyHostToDevice));
float time;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
dim3 block(32, 32);
for(int i=0; i<NUM_TIMESTEPS; i++) {
reconstruction_kernel<<<3, block>>>(d_q, d_flux_x, d_flux_y, d_max_velocity);
gpuErrchk( cudaPeekAtLastError() );
update_kernel<<<3, block>>>(d_q, d_flux_x, d_flux_y);
gpuErrchk( cudaPeekAtLastError() );
// Copy GPU data back to GPU for recording - TODO: Improve this so that we only copy data back when we need it.
gpuErrchk(cudaMemcpy(h_q, d_q, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_flux_x, d_flux_x, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y, cudaMemcpyDeviceToHost));
gpuErrchk(cudaMemcpy(h_flux_y, d_flux_y, sizeof(double) * NUM_CELLS_X * NUM_CELLS_Y, cudaMemcpyDeviceToHost));
if (i%PLOT_FREQUENCY==0) {
printCSVFile(i/PLOT_FREQUENCY+1); // Please switch off all IO if you do performance tests.
//printResult(i+1);
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
printf ("Time for the program: %f ms\n", time);
cudaFree(d_flux_x);
cudaFree(d_flux_y);
cudaFree(d_q);
cudaFree(d_max_velocity);
return 0;
} |
20,682 | #include "includes.h"
__global__ void matmul_traditional(const float *a, const float *b, float *c, int n, int m){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
//printf("%d %d %d %d %d %d\n",blockDim.x,blockDim.y,blockIdx.x,blockIdx.y,threadIdx.x,threadIdx.y);
int idx = i * n + j;
int2 i2 = make_int2(1, 2);
float4 f4 = make_float4(0, 0, 0, 0);
f4.x = 0.1, f4.y = 0.2, f4.z = 0.3, f4.w = 0.4;
//printf("%d %d %f %f %f %f\n", i2.x, i2.y, f4.x, f4.y, f4.z, f4.w);
if(i < n and j < m){
//printf("%d %d %d %d %d %d\n", i, j, idx, a[idx], b[idx], c[idx]);
float sum = 0;
for(int k = 0; k < n; k++){
int idxa = i * n + k;
int idxb = k * n + j;
sum += a[idxa] * b[idxb];
}
c[idx] = sum;
}
} |
20,683 | #include "includes.h"
#define NOMINMAX
const unsigned int BLOCK_SIZE = 512;
__global__ void addKernelV2(float *c, const float *a, const float *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
c[i] = a[i] + b[i];
} |
20,684 | extern "C"
__global__ void add32(float* A, float *B, int size) {
int block = blockIdx.x + blockIdx.y * gridDim.x + gridDim.x * gridDim.y * blockIdx.z;
int index = block * (blockDim.x * blockDim.y * blockDim.z) + (threadIdx.z * (blockDim.x * blockDim.y)) + (threadIdx.y * blockDim.x) + threadIdx.x;
if(index >= size) return;
A[index] = A[index] + B[index];
} |
20,685 | #include "includes.h"
#define tileSize 32
//function for data initialization
void initialization( double *M, double *N, int arow, int acol, int brow, int bcol);
//(for Debugging) prints out the input data
void printInput( double *M, double *N, int arow, int acol, int brow, int bcol);
//(for Debugging) prints out the output data
void printOutput( double *P_C, double *P_G, int arow, int bcol);
//GPU kernels
__global__
__global__ void vectorSubtraction(const double *A, const double *B, double *C, int numElements)
{
int gridIndex = blockDim.x * blockIdx.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
for (int i = gridIndex; i<numElements; i+=stride)
{
C[i] = A[i] - B[i];
}
} |
20,686 | #include <stdio.h>
#define ARRAY_LEN 4096
#define RUN_COUNT 1000
int max_print=20;
unsigned long long fnd_count=0;
void checker(int round, char* buf)
{
int i;
for(i=0; i<ARRAY_LEN; i++)
{
switch(buf[i])
{
case 'A':
case 'B':
case 'C':
case 'D':
case 'E':
case 'F':
case 'G':
case 'H':
case 'I':
case 'J':
if(max_print)
{
printf("%d\t%d\t%c\n", round, i, buf[i]);
max_print--;
}
fnd_count++;
break;
default:
break;
}
}
}
int main()
{
int i;
size_t buf_size = ARRAY_LEN*sizeof(char);
char *h_buf, *d_buf;
cudaSetDevice(0);
for(i=0; i<RUN_COUNT; i++)
{
h_buf = (char*)malloc(buf_size);
cudaMalloc( (void**)&d_buf, buf_size );
cudaMemcpy( h_buf, d_buf, buf_size, cudaMemcpyDeviceToHost );
checker(i, h_buf);
free(h_buf);
cudaFree( d_buf );
}
printf("Total find %llu data\n", fnd_count);
return 0;
}
|
20,687 | // nvcc fft_cuda_2d.cu -lcublas -lcufft -arch=compute_52 -o fft_cuda_2d
//https://www.researchgate.net/figure/Computing-2D-FFT-of-size-NX-NY-using-CUDAs-cuFFT-library-49-FFT-fast-Fourier_fig3_324060154
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#include <cufft.h>
#include "stdio.h"
#include "stdlib.h"
#include "time.h"
#include <iostream>
#include <fstream>
#include <string>
#include <sstream>
#include <vector>
#define DIM 4*65536//65536 = 256 * 256
#define NX 220
#define NY 220
using namespace std;
int main()
{
int n = 0; //n is the number of the integers in the file ==> 12
int data[220*220];
int x;
ifstream File;
File.open("lenna_grayscale.txt");
if(!File.is_open()){
cout<<"It failed"<<endl;
return 0;
}
while(File>>x){
data[n] = x;
n++;
}
File.close();
cout<<"n : "<<n<<endl;
for(int i=0;i<n;i++){
cout << data[i] << " ";
}
float elapsedTime = 0;
cufftHandle plan;
cufftComplex *host_data = (cufftComplex*)malloc(NX*NY*sizeof(cufftComplex));
cufftComplex *fft_data = (cufftComplex*)malloc(NX*NY*sizeof(cufftComplex));
cufftComplex *dev_data;
cudaEvent_t start,stop;
//FEED INPUT
srand(time(NULL));
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
host_data[i*NY+j].x = (float)data[i*NY+j]; //rand()/(float)RAND_MAX;
host_data[i*NY+j].y = 0.0;
}
}
//SHOW HOST DATA
for(int i = 0;i<16;i++){
printf("DATA: %3.1f %3.1f \n",host_data[i*NY+1].x,host_data[i*NY+1].y);
}
//ALLOCATE GPU MEMORY
cudaMalloc((void**)&dev_data,sizeof(cufftComplex)*NX*NY);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//COPY INPUT
cudaMemcpy(dev_data,host_data,NX*NY*sizeof(cufftComplex),cudaMemcpyHostToDevice);
//CREATE CUFFT PLAN
cufftPlan2d(&plan,NX,NY,CUFFT_C2C);
//PERFORM COMPUTATION(fft and ifft)
cufftExecC2C(plan,dev_data,dev_data,CUFFT_FORWARD);
//COPY BACK RESULTS
cudaMemcpy(fft_data,dev_data,sizeof(cufftComplex)*NX*NY,cudaMemcpyDeviceToHost);
ofstream outfile2;
outfile2.open("fft_data.txt");
// int data2[220*220] = {0};
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
if(j == NY - 1){
outfile2<<fft_data[i*NY+j].x<<endl;
}else{
outfile2<<fft_data[i*NY+j].x<<",";
}
}
}
outfile2.close();
cufftExecC2C(plan,dev_data,dev_data,CUFFT_INVERSE);//https://stackoverflow.com/questions/46562575/how-to-cuda-ifft
//COPY BACK RESULTS
cudaMemcpy(host_data,dev_data,sizeof(cufftComplex)*NX*NY,cudaMemcpyDeviceToHost);
//GET CALCULATION TIME
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime,start,stop);
//SHOW RESULTS
for(int i = 0;i<16;i++){
printf("DATA: %3.1f %3.1f \n",host_data[i*NY+1].x/(NX*NY),host_data[i*NY+1].y/(NX*NY));
}
ofstream outfile;
outfile.open("output_data.txt");
// int data2[220*220] = {0};
for(int i = 0;i<NX;i++){
for(int j = 0;j<NY;j++){
// data2[i*NY+j] = host_data[i*NY+3].x/(NX*NY)
if(j == NY - 1){
outfile<<host_data[i*NY+j].x/(NX*NY)<<endl;
}else{
outfile<<host_data[i*NY+j].x/(NX*NY)<<",";
}
}
}
outfile.close();
//FREEE MEMORY
cufftDestroy(plan);
cudaFree(dev_data);
free(host_data);
printf("elapsed time %f\n",elapsedTime);
printf("CUFFT Calculation COMPLETED IN : % 5.3f ms \n",elapsedTime);
}
|
20,688 | #include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#define N 10
void add( int *a, int *b, int *c ) {
int tid = 0; // this is CPU zero, so we start at zero
while (tid < N) {
c[tid] = a[tid] + b[tid];
tid += 1; // we have one CPU, so we increment by one
}
}
__global__ void add_gpu( int *a, int *b, int *c ) {
int tid = blockIdx.x; // handle the data at this index
if (tid < N)
c[tid] = a[tid] + b[tid];
}
#define HANDLE_ERROR(e) _HANDLE_ERROR(e, __LINE__)
void _HANDLE_ERROR(cudaError_t e, int line)
{
if (e != cudaSuccess)
{
printf("line: %d. error %s\n", line, cudaGetErrorString(e));
exit (1);
}
}
int main()
{
int a[N], b[N], c[N];
int * dev_a, * dev_b, * dev_c;
int i;
// allocate the memory on the GPU
HANDLE_ERROR( cudaMalloc( (void**)&dev_a, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_b, N * sizeof(int) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_c, N * sizeof(int) ) );
for (i=0; i<N; i++) {
a[i] = -i;
b[i] = i * i;
}
// copy the arrays 'a' and 'b' to the GPU
HANDLE_ERROR( cudaMemcpy( dev_a, a, N * sizeof(int),
cudaMemcpyHostToDevice ) );
HANDLE_ERROR( cudaMemcpy( dev_b, b, N * sizeof(int),
cudaMemcpyHostToDevice ) );
//add(a,b,c);
add_gpu<<<N,1>>>( dev_a, dev_b, dev_c );
// copy the array 'c' back from the GPU to the CPU
HANDLE_ERROR( cudaMemcpy( c, dev_c, N * sizeof(int),
cudaMemcpyDeviceToHost) );
// display the results
for (i=0; i<N; i++) {
printf( "%d + %d = %d\n", a[i], b[i], c[i] );
}
cudaFree( dev_a );
cudaFree( dev_b );
cudaFree( dev_c );
return 0;
}
|
20,689 | #include <stdio.h>
__global__ void my_gpu_func(int* buf, int w, int h) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < w && y < h) {
buf[y * w + x] += 1;
}
}
extern "C"{
void my_c_func(int *buf, int *wptr, int *hptr)
{
int h = *hptr;
int w = *wptr;
printf("print from c, w=%d, h=%d\n", w, h);
for (int y = 0; y < h; ++y) {
for (int x = 0; x < w; ++x) {
printf("%d, ", buf[ y * w + x ]);
}
printf("\n");
}
printf("\n");
int *dev_buf;
size_t size = w * h * sizeof(int);
cudaMalloc((void**)&dev_buf, size);
cudaMemcpy(dev_buf, buf, size, cudaMemcpyHostToDevice);
my_gpu_func<<<1, dim3(w, h)>>>(dev_buf, w, h);
cudaMemcpy(buf, dev_buf, size, cudaMemcpyDeviceToHost);
cudaFree(dev_buf);
return;
}
}
|
20,690 | __global__ void
process_kernel1(const float *input1,const float *input2, float *output, int datasize){
int blockNum = blockIdx.z*(gridDim.x*gridDim.y)+blockIdx.y*(gridDim.x)+blockIdx.x;
int threadNum = threadIdx.z*(blockDim.x*blockDim.y)+threadIdx.y*(blockDim.x)+threadIdx.x;
int i = blockNum*(blockDim.x* blockDim.y * blockDim.z)+ threadNum;
output[i] = sin(input1[i]) + cos(input2[i]);
}
__global__ void
process_kernel2(float *input1, float *output, int datasize){
int blockNum = blockIdx.z*(gridDim.x*gridDim.y)+blockIdx.y*(gridDim.x)+blockIdx.x;
int threadNum = threadIdx.z*(blockDim.x*blockDim.y)+threadIdx.y*(blockDim.x)+threadIdx.x;
int i = blockNum*(blockDim.x* blockDim.y * blockDim.z)+ threadNum;
output[i] = log(input1[i]);
}
__global__ void
process_kernel3(float *input1, float *output, int datasize){
int blockNum = blockIdx.z*(gridDim.x*gridDim.y)+blockIdx.y*(gridDim.x)+blockIdx.x;
int threadNum = threadIdx.z*(blockDim.x*blockDim.y)+threadIdx.y*(blockDim.x)+threadIdx.x;
int i = blockNum*(blockDim.x* blockDim.y * blockDim.z)+ threadNum;
output[i] = sqrt(input1[i]);
}
|
20,691 | #include <iostream>
#include <fstream>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <sys/time.h>
using namespace std;
#include "bfs_kernel.cu"
/*
* gpu_bfs.cu
*
* Usage: ./executable <graph_file> <output file>
*
* Input: Name of the file containing the graph. Expected format
* is binary with the following data (with no newlines)
* number of nodes (4 bytes)
* cumulative list of node degrees (4 bytes * numer of nodes)
* edge list (4 bytes * number of links)
*
* example: 4 nodes; node 0 edges: 1,2; node 1 edges: 0; node 1 edges: 3
* 4
* 0,2,3
* 1,2,0,3
* written in binary with no newlines or commas
*
* Output:
* stdout: Time it took kernel to run, in microseconds
* output file: Each node and its distance from starting node
* (0) cost:0
* (1) cost:3
*
*/
const int MAX_THREADS_PER_BLOCK = 256;
char *infile = NULL;
int starting_node_id;
int nb_nodes;
int nb_links;
int *degrees;
int *starting;
int *links;
void readFile(char *filename)
{
ifstream finput(filename, ios::in | ios::binary);
if(!finput.is_open())
{
cout << "Unable to open file" << endl;
exit(EXIT_FAILURE);
}
// Read number of nodes, first 4 bytes of file
finput.read((char*)&nb_nodes, 4);
if (starting_node_id < 0 || starting_node_id > nb_nodes)
{
cerr << "Starting position is invalid" << endl;
exit(EXIT_FAILURE);
}
// Read cumulative degrees, 4 bytes per node
degrees = new int[nb_nodes];
finput.read((char*) degrees, nb_nodes * 4);
starting = new int[nb_nodes];
memset(starting, 0, sizeof(int) * nb_nodes);
for (int i = 1; i < nb_nodes; i++)
{
starting[i] = degrees[i - 1];
}
// Read links, 4 bytes per link
nb_links = degrees[nb_nodes - 1];
links = new int[nb_links];
finput.read((char*) links, nb_links * 4);
finput.close();
}
void bfsGraph(char *outFile)
{
// allocate host memory
Node *h_graph_nodes = (Node *) malloc(sizeof(Node) * nb_nodes);
bool* h_graph_visited = (bool *) malloc(sizeof(bool) * nb_nodes);
// Initialize memory of nodes
h_graph_nodes[0].starting = 0;
h_graph_nodes[0].no_of_edges = degrees[0];
h_graph_visited[0] = false;
for (unsigned int i = 1; i < nb_nodes; i++)
{
h_graph_nodes[i].starting = starting[i];
h_graph_nodes[i].no_of_edges = degrees[i] - degrees[i-1];
h_graph_visited[i] = false;
}
h_graph_visited[starting_node_id] = true;
// Copy node list to cuda memory
Node *d_graph_nodes;
cudaMalloc((void **) &d_graph_nodes, sizeof(Node) * nb_nodes);
cudaMemcpy(d_graph_nodes, h_graph_nodes, sizeof(Node) *
nb_nodes, cudaMemcpyHostToDevice);
// Copy edge list to device memory
int *d_edge_list;
cudaMalloc((void **) &d_edge_list, sizeof(int) * nb_links);
cudaMemcpy(d_edge_list, links, sizeof(int) * nb_links,
cudaMemcpyHostToDevice);
// Copy the visted array to device memory
bool *d_graph_visited;
cudaMalloc((void **) &d_graph_visited, sizeof(bool) * nb_nodes);
cudaMemcpy(d_graph_visited, h_graph_visited, sizeof(bool) *
nb_nodes, cudaMemcpyHostToDevice);
// Allocate memory for the result on host
int *h_cost = (int *) malloc(sizeof(int) * nb_nodes);
for (int i = 0; i < nb_nodes; i++)
{
h_cost[i] = -1;
}
h_cost[starting_node_id] = 0;
// Allocate device memory for result
int *d_cost;
cudaMalloc((void **) &d_cost, sizeof(int) * nb_nodes);
cudaMemcpy(d_cost, h_cost, sizeof(int) * nb_nodes,
cudaMemcpyHostToDevice);
// Determine number of blocks and threads
int num_of_blocks = 1; // at least 1
int num_of_threads_per_block = nb_nodes;
if (nb_nodes > MAX_THREADS_PER_BLOCK)
{
num_of_blocks = (int) ceil((double) nb_nodes/
(double) MAX_THREADS_PER_BLOCK);
num_of_threads_per_block = MAX_THREADS_PER_BLOCK;
}
bool *d_over;
cudaMalloc((void **) &d_over, sizeof(bool));
bool stop;
int level = 0;
// Call the kernel at each level iteration
struct timeval start, end;
gettimeofday(&start, NULL);
do
{
stop = false;
cudaMemcpy(d_over, &stop, sizeof(bool),
cudaMemcpyHostToDevice);
bfs_kernel<<<num_of_blocks,
num_of_threads_per_block>>>(d_graph_nodes, d_edge_list,
d_graph_visited, d_cost, level, d_over, nb_nodes);
cudaThreadSynchronize();
cudaMemcpy(&stop, d_over, sizeof(bool),
cudaMemcpyDeviceToHost);
level++;
} while(stop);
gettimeofday(&end, NULL);
// print duration of all iterations of kernel execution
printf("%ld\n",
(end.tv_sec * 1000000 + end.tv_usec)
- (start.tv_sec * 1000000 + start.tv_usec));
cudaMemcpy(h_cost, d_cost, sizeof(int) * nb_nodes,
cudaMemcpyDeviceToHost);
cudaMemcpy(h_graph_visited, d_graph_visited, sizeof(bool) *
nb_nodes, cudaMemcpyDeviceToHost);
// Store results into a file
FILE *fpo = fopen(outFile, "w");
for (int i = 0; i < nb_nodes; i++)
{
fprintf(fpo, "(%d) cost:%d\n", i, h_cost[i]);
}
fclose(fpo);
// clean up memory
free(h_graph_nodes);
free(links);
free(h_graph_visited);
free(h_cost);
cudaFree(d_graph_nodes);
cudaFree(d_edge_list);
cudaFree(d_graph_visited);
cudaFree(d_cost);
}
int main(int argc, char **argv)
{
// Code for printing distances to an output file
if (argc != 3)
{
printf("Usage: %s <input file> <output file>\n", argv[0]);
exit(EXIT_FAILURE);
}
char *filename = argv[1];
char*outFile = argv[2];
readFile(filename);
starting_node_id = rand() % nb_nodes;
bfsGraph(outFile);
return 0;
} |
20,692 | #include "heatmap_update.cuh"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <cstdlib>
#include <iostream>
#include <cmath>
#include <omp.h>
__global__
void fadeHeat(int *d_heatmap, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size*size)
{
d_heatmap[index] = (int)round(d_heatmap[index] * 0.80);
}
}
__global__
void heatIntensify(int *d_heatmap, int *x, int *y, int agent_size, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index >= 0 && index < agent_size)
{
atomicAdd(&d_heatmap[y[index]*size + x[index]], 40);
}
}
__global__
void setMaxHeat(int *d_heatmap, int size)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size*size)
{
if (d_heatmap[index] >= 255)
{
d_heatmap[index] = 255;
}
}
}
__global__
void scaleHeatmap(int *d_heatmap, int *d_scaledHeatmap, int size, int cellSize)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size*size)
{
for (int cellY = 0; cellY < cellSize; cellY++)
{
int scaledBlock = (blockIdx.x * cellSize + cellY) * blockDim.x * cellSize;
for (int cellX = 0; cellX < cellSize; cellX++)
{
int scaledThread = threadIdx.x * cellSize + cellX;
// atomicExch(&d_scaledHeatmap[scaledBlock + scaledThread], d_heatmap[index]);
d_scaledHeatmap[scaledBlock + scaledThread] = d_heatmap[index];
// # if __CUDA_ARCH__>=200
// printf("cellY: %d, cellX: %d\n", cellY, cellX);
// #endif
}
}
}
}
__global__
void blurHeatmap(int *d_scaledHeatmap, int *d_blurredHeatmap, int scaledSize)
{
int WEIGHTSUM = 273;
const int w[5][5] = {
{ 1, 4, 7, 4, 1 },
{ 4, 16, 26, 16, 4 },
{ 7, 26, 41, 26, 7 },
{ 4, 16, 26, 16, 4 },
{ 1, 4, 7, 4, 1 }
};
int i = blockIdx.x * blockDim.x;
int modI = (blockIdx.x * blockDim.x + threadIdx.x) % scaledSize;
int j = threadIdx.x;
int sum = 0;
if(modI >= 2 && modI < scaledSize-2 && i+j >= 2*scaledSize && blockIdx.x < 5*scaledSize-2)
for (int k = -2; k < 3; k++)
{
for (int l = -2; l < 3; l++)
{
if(i+scaledSize*k >= 2*scaledSize && i+scaledSize*k < scaledSize*scaledSize-2*scaledSize)
sum += w[2 + k][2 + l] * d_scaledHeatmap[(i + k) + (j + l)];
}
}
int value = sum / WEIGHTSUM;
d_blurredHeatmap[i + j] = 0x00FF0000 | value << 24;
}
int THREADSPERBLOCK = 1024;
// Updates the heatmap according to the agent positions
void updateHeatFade(int *heatmap, int SIZE)
{
int *d_heatmap;
cudaMalloc((void **)&d_heatmap, SIZE*SIZE*sizeof(int));
// printf("after malloc: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(d_heatmap, heatmap, SIZE*SIZE*sizeof(int), cudaMemcpyHostToDevice);
// printf("after HostToDevice: %s\n", cudaGetErrorString(cudaGetLastError()));
fadeHeat<<<((SIZE*SIZE)/THREADSPERBLOCK)+1,THREADSPERBLOCK>>>(d_heatmap, SIZE);
// printf("after fadeHeat: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(heatmap, d_heatmap, SIZE*SIZE*sizeof(int), cudaMemcpyDeviceToHost);
// printf("after DeviceToHost: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaFree(d_heatmap);
// printf("after Free: %s\n", cudaGetErrorString(cudaGetLastError()));
}
void updateHeatIntensity(int *heatmap, int *x, int *y, int agent_size, int SIZE)
{
int *d_heatmap, *d_x, *d_y;
// printf("before malloc: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMalloc((void **)&d_heatmap, SIZE*SIZE*sizeof(int));
cudaMalloc((void **)&d_x, agent_size*sizeof(int));
cudaMalloc((void **)&d_y, agent_size*sizeof(int));
// printf("after malloc: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(d_heatmap, heatmap, SIZE*SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_x, x, agent_size*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_y, y, agent_size*sizeof(int), cudaMemcpyHostToDevice);
// printf("after memcpy: %s\n", cudaGetErrorString(cudaGetLastError()));
heatIntensify<<<((agent_size)/THREADSPERBLOCK)+1,THREADSPERBLOCK>>>(d_heatmap, d_x, d_y, agent_size, SIZE);
// printf("after function: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(heatmap, d_heatmap, SIZE*SIZE*sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(x, d_x, agent_size*sizeof(int), cudaMemcpyDeviceToHost);
// cudaMemcpy(y, d_y, agent_size*sizeof(int), cudaMemcpyDeviceToHost);
// printf("after memcpy: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaFree(d_heatmap);
cudaFree(d_x);
cudaFree(d_y);
// printf("after free: %s\n", cudaGetErrorString(cudaGetLastError()));
}
void updateSetMaxHeat(int *heatmap, int SIZE)
{
int *d_heatmap;
// printf("before malloc: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMalloc((void **)&d_heatmap, SIZE*SIZE*sizeof(int));
// printf("after malloc: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(d_heatmap, heatmap, SIZE*SIZE*sizeof(int), cudaMemcpyHostToDevice);
// printf("after memcpy: %s\n", cudaGetErrorString(cudaGetLastError()));
setMaxHeat<<<((SIZE*SIZE)/THREADSPERBLOCK)+1,THREADSPERBLOCK>>>(d_heatmap, SIZE);
// printf("after function: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(heatmap, d_heatmap, SIZE*SIZE*sizeof(int), cudaMemcpyDeviceToHost);
// printf("after memcpy: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaFree(d_heatmap);
// printf("after free: %s\n", cudaGetErrorString(cudaGetLastError()));
}
void updateScaledHeatmap(int *heatmap, int *scaledHeatmap, int SIZE, int cellSize)
{
int scaledSizeSquared = SIZE*SIZE*cellSize*cellSize;
int *d_heatmap;
int *d_scaledHeatmap;
// printf("before malloc1: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMalloc((void **)&d_heatmap, SIZE*SIZE*sizeof(int));
cudaMalloc((void **)&d_scaledHeatmap, scaledSizeSquared*sizeof(int));
// printf("after malloc2: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(d_heatmap, heatmap, SIZE*SIZE*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_scaledHeatmap, scaledHeatmap, scaledSizeSquared*sizeof(int), cudaMemcpyHostToDevice);
// printf("after memcpy2: %s\n", cudaGetErrorString(cudaGetLastError()));
scaleHeatmap<<<(SIZE*SIZE)/THREADSPERBLOCK,THREADSPERBLOCK>>>(d_heatmap, d_scaledHeatmap, SIZE, cellSize);
// printf("after scaleHeatmap: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(heatmap, d_heatmap, SIZE*SIZE*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpyAsync(scaledHeatmap, d_scaledHeatmap, scaledSizeSquared*sizeof(int), cudaMemcpyDeviceToHost);
// printf("after memecpy: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaFree(d_heatmap);
cudaFree(d_scaledHeatmap);
// printf("after free: %s\n", cudaGetErrorString(cudaGetLastError()));
}
void updateBlurredHeatmap(int *scaledHeatmap, int *blurredHeatmap, int scaledSize)
{
int *d_scaledHeatmap;
int *d_blurredHeatmap;
// printf("before malloc1: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMalloc((void **)&d_scaledHeatmap, scaledSize*scaledSize*sizeof(int));
cudaMalloc((void **)&d_blurredHeatmap, scaledSize*scaledSize*sizeof(int));
// printf("after malloc2: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(d_blurredHeatmap, blurredHeatmap, scaledSize*scaledSize*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpyAsync(d_scaledHeatmap, scaledHeatmap, scaledSize*scaledSize*sizeof(int), cudaMemcpyHostToDevice);
// printf("after memcpy2: %s\n", cudaGetErrorString(cudaGetLastError()));
blurHeatmap<<<(scaledSize*scaledSize)/THREADSPERBLOCK,THREADSPERBLOCK>>>(d_scaledHeatmap, d_blurredHeatmap, scaledSize);
// printf("after scaleHeatmap: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaMemcpyAsync(blurredHeatmap, d_blurredHeatmap, scaledSize*scaledSize*sizeof(int), cudaMemcpyDeviceToHost);
cudaMemcpyAsync(scaledHeatmap, d_scaledHeatmap, scaledSize*scaledSize*sizeof(int), cudaMemcpyDeviceToHost);
// printf("after memecpy: %s\n", cudaGetErrorString(cudaGetLastError()));
cudaFree(d_blurredHeatmap);
cudaFree(d_scaledHeatmap);
// printf("after free: %s\n", cudaGetErrorString(cudaGetLastError()));
} |
20,693 | /******************************************************************************
This function converts HSV values to RGB values, scaled from 0 to maxBrightness
The ranges for the input variables are:
hue: 0-360
sat: 0-255
lig: 0-255
The ranges for the output variables are:
r: 0-maxBrightness
g: 0-maxBrightness
b: 0-maxBrightness
r,g, and b are passed as pointers, because a function cannot have 3 return variables
Use it like this:
int hue, sat, val;
unsigned char red, green, blue;
// set hue, sat and val
hsv2rgb(hue, sat, val, &red, &green, &blue, maxBrightness); //pass r, g, and b as the location where the result should be stored
// use r, b and g.
(c) Elco Jacobs, E-atelier Industrial Design TU/e, July 2011.
*****************************************************************************/
__device__ void hsv2rgb(unsigned int hue, unsigned int sat, unsigned int val, unsigned char * r, unsigned char * g, unsigned char * b,
unsigned char maxBrightness) {
if (val > 255) val = 255;
unsigned int H_accent = hue / 60;
unsigned int bottom = ((255 - sat) * val) >> 8;
unsigned int top = val;
unsigned char rising = ((top - bottom) * (hue % 60)) / 60 + bottom;
unsigned char falling = ((top - bottom) * (60 - hue % 60)) / 60 + bottom;
switch (H_accent) {
case 0:
*r = top;
*g = rising;
*b = bottom;
break;
case 1:
*r = falling;
*g = top;
*b = bottom;
break;
case 2:
*r = bottom;
*g = top;
*b = rising;
break;
case 3:
*r = bottom;
*g = falling;
*b = top;
break;
case 4:
*r = rising;
*g = bottom;
*b = top;
break;
case 5:
*r = top;
*g = bottom;
*b = falling;
break;
}
// Scale values to maxBrightness
*r = *r * maxBrightness / 255;
*g = *g * maxBrightness / 255;
*b = *b * maxBrightness / 255;
}
void hsv2rgbSerial(unsigned int hue, unsigned int sat, unsigned int val, unsigned char * r, unsigned char * g, unsigned char * b,
unsigned char maxBrightness) {
if (val > 255) val = 255;
unsigned int H_accent = hue / 60;
unsigned int bottom = ((255 - sat) * val) >> 8;
unsigned int top = val;
unsigned char rising = ((top - bottom) * (hue % 60)) / 60 + bottom;
unsigned char falling = ((top - bottom) * (60 - hue % 60)) / 60 + bottom;
switch (H_accent) {
case 0:
*r = top;
*g = rising;
*b = bottom;
break;
case 1:
*r = falling;
*g = top;
*b = bottom;
break;
case 2:
*r = bottom;
*g = top;
*b = rising;
break;
case 3:
*r = bottom;
*g = falling;
*b = top;
break;
case 4:
*r = rising;
*g = bottom;
*b = top;
break;
case 5:
*r = top;
*g = bottom;
*b = falling;
break;
}
// Scale values to maxBrightness
*r = *r * maxBrightness / 255;
*g = *g * maxBrightness / 255;
*b = *b * maxBrightness / 255;
}
|
20,694 | #include "includes.h"
__global__ void bitonic_sort_step(int *dev_values, int j, int k)
{
unsigned int i, ixj; /* Sorting partners: i and ixj */
i = threadIdx.x + blockDim.x * blockIdx.x;
ixj = i^j;
/* The threads with the lowest ids sort the array. */
if ((ixj)>i) {
if ((i&k)==0) {
/* Sort ascending */
if (dev_values[i]>dev_values[ixj]) {
// swap
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
if ((i&k)!=0) {
/* Sort descending */
if (dev_values[i]<dev_values[ixj]) {
// swap
int temp = dev_values[i];
dev_values[i] = dev_values[ixj];
dev_values[ixj] = temp;
}
}
}
} |
20,695 | #include<iostream>
#include <cuda.h>
__global__ void reduce_kernel(const int* g_idata, int* g_odata, unsigned int n)
{
extern __shared__ int sdata[];
unsigned int i = blockIdx.x*blockDim.x + threadIdx.x;
if(i<n)
{
sdata[threadIdx.x] = g_idata[i];
}
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (threadIdx.x< s) {
if(i+s<n)
{
sdata[threadIdx.x] += sdata[threadIdx.x+ s];
}
}
__syncthreads();
}
if(threadIdx.x==0)
{
g_odata[blockIdx.x] = sdata[0];
}
}
__host__ int reduce(const int* arr, unsigned int N, unsigned int threads_per_block)
{
int gdim = (N +threads_per_block-1)/ (threads_per_block);
int* dev_i;
int* dev_o;
int op =0;
cudaMallocManaged(&dev_i,N * sizeof(int));
cudaMallocManaged(&dev_o,gdim * sizeof(int));
cudaMemcpy(dev_i, arr, N * sizeof(int), cudaMemcpyHostToDevice);
int size = N;
int g = gdim;
for(int i=0;i<g;i++)
{
reduce_kernel<<<gdim,threads_per_block,threads_per_block*sizeof(int)>>>(dev_i,dev_o,size);
cudaDeviceSynchronize();
size = gdim;
int *temp = dev_o;
dev_o = dev_i;
dev_i = temp;
gdim = (size +threads_per_block-1)/ (threads_per_block);
}
op = dev_i[0];
cudaFree(dev_i);
cudaFree(dev_o);
return op;
} |
20,696 | __device__ float Pq2Luma(float N) {
float pq_m1 = 0.1593017578125; // ( 2610.0 / 4096.0 ) / 4.0;
float pq_m2 = 78.84375; // ( 2523.0 / 4096.0 ) * 128.0;
float pq_c1 = 0.8359375; // 3424.0 / 4096.0 or pq_c3 - pq_c2 + 1.0;
float pq_c2 = 18.8515625; // ( 2413.0 / 4096.0 ) * 32.0;
float pq_c3 = 18.6875; // ( 2392.0 / 4096.0 ) * 32.0;
float pq_C = 10000.0;
float Np = powf( N, 1.0 / pq_m2 );
float L = Np - pq_c1;
if ( L < 0.0 ) {
L = 0.0;
}
L = L / ( pq_c2 - pq_c3 * Np );
L = powf( L, 1.0 / pq_m1 );
L = L * pq_C;
return L; // returns cd/m^2
}
__device__ float Luma2Pq(float C) {
float pq_m1 = 0.1593017578125; // ( 2610.0 / 4096.0 ) / 4.0;
float pq_m2 = 78.84375; // ( 2523.0 / 4096.0 ) * 128.0;
float pq_c1 = 0.8359375; // 3424.0 / 4096.0 or pq_c3 - pq_c2 + 1.0;
float pq_c2 = 18.8515625; // ( 2413.0 / 4096.0 ) * 32.0;
float pq_c3 = 18.6875; // ( 2392.0 / 4096.0 ) * 32.0;
float pq_C = 10000.0;
float L = C / pq_C;
float Lm = powf( L, pq_m1 );
float N = ( pq_c1 + pq_c2 * Lm ) / ( 1.0 + pq_c3 * Lm );
N = powf( N, pq_m2 );
return N;
}
__device__ float Luma(float R, float G, float B) {
float lumaRec2020 = R * 0.2627f + G * 0.6780f + B * 0.0593f;
return lumaRec2020;
}
__global__ void LegalOverlayKernel(int p_Width, int p_Height, float p_Luminance, float p_OverlayR, float p_OverlayG, float p_OverlayB, int p_OverlayDisplay, int p_OverlayLuma, const float* p_Input, float* p_Output)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if ((x < p_Width) && (y < p_Height))
{
const int index = ((y * p_Width) + x) * 4;
float t = Luma2Pq(p_Luminance);
int overlay = p_OverlayDisplay;
int lumaWarn = p_OverlayLuma;
float rOver, gOver, bOver;
rOver = p_OverlayR / 100;
gOver = p_OverlayG / 100;
bOver = p_OverlayB / 100;
float r, g, b, a;
r = p_Input[index + 0];
g = p_Input[index + 1];
b = p_Input[index + 2];
a = p_Input[index + 3];
if (lumaWarn && overlay) {
float rY, gY, bY;
rY = Pq2Luma(r);
gY = Pq2Luma(g);
bY = Pq2Luma(b);
float luma = Luma(rY, gY, bY);
float pqLuma = Luma2Pq(luma);
if ( pqLuma >= t ) {
// if any channel is over threshold, replace with overlay color
r = rOver;
g = gOver;
b = bOver;
}
} else if (overlay && ( r >= t || g >= t || b >= t ) ) {
r = rOver;
g = gOver;
b = bOver;
}
p_Output[index + 0] = r;
p_Output[index + 1] = g;
p_Output[index + 2] = b;
p_Output[index + 3] = a;
}
}
void RunCudaKernel(int p_Width, int p_Height, float p_Luminance, float* p_OverlayRgb, int p_OverlayDisplay, int p_OverlayLuma, const float* p_Input, float* p_Output)
{
dim3 threads(128, 1, 1);
dim3 blocks(((p_Width + threads.x - 1) / threads.x), p_Height, 1);
LegalOverlayKernel<<<blocks, threads>>>(p_Width, p_Height, p_Luminance, p_OverlayRgb[0], p_OverlayRgb[1], p_OverlayRgb[2], p_OverlayDisplay, p_OverlayLuma, p_Input, p_Output);
}
|
20,697 | extern "C"
__global__
void sumReduction(double *v, double *v_r) {
extern __shared__ double partial_sum[];
int tid = blockIdx.x * blockDim.x + threadIdx.x;
partial_sum[threadIdx.x] = v[tid];
__syncthreads();
for (int s = 1; s < blockDim.x; s *= 2) {
int index = 2 * s * threadIdx.x;
if (index < blockDim.x) {
partial_sum[index] += partial_sum[index + s];
}
__syncthreads();
}
if (threadIdx.x == 0) {
v_r[blockIdx.x] = partial_sum[0];
}
} |
20,698 | #include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <stdio.h>
#define arraySize 6
#define threadPerBlock 6
/**枚举排序或者秩排序算法
* 对于数组中的每一个元素,通过统计小于其值的数组中其他元素的数量,
* 该统计数量就是该元素在最终结果数组中的位置索引。
*/
// Define kernel function to sort array with rank.
__global__ void rank_sort_kernel(int *device_a, int *device_b)
{
unsigned int count = 0;
unsigned int tid = threadIdx.x;
unsigned int ttid = blockIdx.x * threadPerBlock + tid;
int val = device_a[ttid];
// using shared memory.
__shared__ int cache[threadPerBlock];
for (unsigned int i = tid; i < arraySize; i += threadPerBlock)
{
cache[tid] = device_a[i];
__syncthreads();
for (unsigned j = 0; j < threadPerBlock; ++j)
{
if (val > cache[j])
{
count++;
}
__syncthreads();
}
}
device_b[count] = val;
}
int main(int argc, char **argv)
{
int host_a[arraySize] = {5, 9, 2, 3, 8, 4};
int host_b[arraySize];
int *device_a, *device_b;
cudaMalloc((void**)&device_a, arraySize * sizeof(int));
cudaMalloc((void**)&device_b, arraySize * sizeof(int));
cudaMemcpy(device_a, host_a, arraySize * sizeof(int), cudaMemcpyHostToDevice);
rank_sort_kernel <<< arraySize / threadPerBlock, threadPerBlock >>> (device_a, device_b);
cudaDeviceSynchronize();
cudaMemcpy(host_b, device_b, arraySize * sizeof(int), cudaMemcpyDeviceToHost);
printf("The before sorted Array is: \n");
for (unsigned int k = 0; k < arraySize; ++k)
{
printf("%d\t", host_a[k]);
}
printf("\n\nThe Enumeration sorted Array is: \n");
for (unsigned int k = 0; k < arraySize; ++k)
{
printf("%d\t", host_b[k]);
}
cudaFree(device_a);
cudaFree(device_b);
return 0;
} |
20,699 | #include <time.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
//Arreglo de estructuras
struct AoS{
int up;
int left;
int right;
int down;
};
//Estructura de arreglos
struct SoA{
int* up;
int* left;
int* right;
int* down;
};
//Imprime arreglo de estructuras
void printAoS(int* array,int size){
for (int i = 0; i < size; ++i)
{
printf("%d ",array[i*4] );
printf("%d ",array[i*4+1] );
printf("%d ",array[i*4+2] );
printf("%d\n",array[i*4+3] );
}
}
//imprime masa del sistema de arreglo de estructuras
void checkMassAoS(int *array,int size){
int sum=0;
for (int i = 0; i < size; ++i)
{
sum+=array[i*4] +array[i*4+2]+array[i*4+3]+array[i*4+1];
}
printf("%d\n",sum );
}
//Kernel de colision de arreglo de estructura
__global__ void collision_kernel_AoS(int *array,int size){
int myID = threadIdx.x + blockDim.x * blockIdx.x;
printf("ho\n");
if (myID<size)
{
printf("la\n");
if (array[myID*4]==1 && array[myID*4+1]==1 && array[myID*4+3]==0 && array[myID*4+2]==0)
{
array[myID*4]=0;
array[myID*4+1]=0;
array[myID*4+2]=1;
array[myID*4+3]=1;
}
else if (array[myID*4]==0 && array[myID*4+1]==0 && array[myID*4+3]==1 && array[myID*4+2]==1)
{
array[myID*4]=1;
array[myID*4+1]=1;
array[myID*4+2]=0;
array[myID*4+3]=0;
}
}
}
//Hace todo para arreglo de estructuras
void initAoS(){
//leer archivo
FILE* file = fopen ("a.txt", "r");
int N = 0;
int M = 0;
fscanf (file, "%d", &N);
fscanf (file, "%d", &M);
//crea y llena arreglo de estructuras
int* array =(int*) malloc(sizeof(int)*N*M*4);
for (int i = 0; i < N*M; ++i)
{
fscanf (file, "%d", &array[i*4+3]);
}
for (int i = 0; i < N*M; ++i)
{
fscanf (file, "%d", &array[i*4]);
}
for (int i = 0; i < N*M; ++i)
{
fscanf (file, "%d", &array[i*4+2]);
}
for (int i = 0; i < N*M; ++i)
{
fscanf (file, "%d", &array[i*4+1]);
}
fclose (file);
printAoS(array,M*N);
//inicia llamada a kernel de colision, ACA HAY ERROR
int block_size = 256;
int grid_size = (int)ceil((float)(N * M*4) / block_size);
int* gpuArray;
int* array2 =(int*)malloc(sizeof(int)*N*M*4);
cudaMalloc(&gpuArray, sizeof(int)*N*M*4);
cudaMemcpy(gpuArray,array,sizeof(int)*N*M*4,cudaMemcpyHostToDevice);
collision_kernel_AoS<<<1,1>>>(gpuArray,M*N);
cudaDeviceSynchronize();
//collision_kernel_AoS<<<grid_size,block_size>>>(gpuArray,M*N);
cudaMemcpy(array2,gpuArray,sizeof(int)*N*M*4,cudaMemcpyDeviceToHost);
cudaFree(gpuArray);
printAoS(array2,M*N);
return ;
}
//Imprime estructura de arreglo
void printSoA(struct SoA structure,int size){
for (int i = 0; i < size; ++i)
{
printf("%d ",structure.right[i] );
printf("%d ",structure.up[i] );
printf("%d ",structure.left[i] );
printf("%d\n",structure.down[i] );
}
}
//imprime masa de sistema de estructura de arreglos
void checkMassSoA(struct SoA structure,int size){
int sum=0;
for (int i = 0; i < size; ++i)
{
sum+=structure.up[i]+structure.left[i] +structure.right[i] +structure.down[i];
}
printf("%d\n",sum );
}
//kernel de colision de estructura de arreglos
__global__ void collision_kernel_SoA(struct SoA structure,int size){
int myID = threadIdx.x + blockDim.x * blockIdx.x;
if (myID<size)
{
if (structure.up[myID]==1 && structure.down[myID]==1 && structure.right[myID]==0 && structure.left[myID]==0)
{
structure.up[myID]=0;
structure.down[myID]=0;
structure.left[myID]=1;
structure.right[myID]=1;
}
else if (structure.up[myID]==0 && structure.down[myID]==0 && structure.right[myID]==1 && structure.left[myID]==1)
{
structure.up[myID]=1;
structure.down[myID]=1;
structure.left[myID]=0;
structure.right[myID]=0;
}
}
}
//Hace todo para estructura de arreglos
void initSoA(){
FILE* file = fopen ("initial.txt", "r");
int N = 0;
int M = 0;
//Leer archivo
fscanf (file, "%d", &N);
fscanf (file, "%d", &M);
//Crea y llena estructura de arreglos
struct SoA structure;
structure.up =(int*) malloc(sizeof(int)*N*M);
structure.down =(int*) malloc(sizeof(int)*N*M);
structure.left =(int*) malloc(sizeof(int)*N*M);
structure.right =(int*) malloc(sizeof(int)*N*M);
for (int i = 0; i < N*M; ++i)
{
fscanf (file, "%d", &structure.right[i]);
}
for (int i = 0; i < N*M; ++i)
{
fscanf (file, "%d", &structure.up[i]);
}
for (int i = 0; i < N*M; ++i)
{
fscanf (file, "%d", &structure.left[i]);
}
for (int i = 0; i < N*M; ++i)
{
fscanf (file, "%d", &structure.down[i]);
}
fclose (file);
checkMassSoA(structure,M*N);
//AUN NO EMPIEZO LAS LLAMADAS A LOS KERNEL, PRIMERO HARE EL AOS
return ;
}
int main()
{
initAoS();
return 0;
} |
20,700 | #include <stdio.h>
#include <stdint.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void readPnm(char * fileName,
int &width, int &height, uchar3 * &pixels)
{
FILE * f = fopen(fileName, "r");
if (f == NULL)
{
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
if (strcmp(type, "P3") != 0) // In this exercise, we don't touch other types
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int max_val;
fscanf(f, "%i", &max_val);
if (max_val > 255) // In this exercise, we assume 1 byte per value
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i < width * height; i++)
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
fclose(f);
}
void writePnm(uchar3 * pixels, int width, int height,
char * fileName)
{
FILE * f = fopen(fileName, "w");
if (f == NULL)
{
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++)
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
fclose(f);
}
__global__ void blurImgKernel(uchar3 * inPixels, int width, int height,
float * filter, int filterWidth,
uchar3 * outPixels)
{
// TODO
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < width && iy < height)
{
int i = iy * width + ix;
int half = filterWidth / 2;
float3 G;
int idx = 0, dy = 0;
G.x = G.y = G.z = 0.0f;
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
idx = iy + filterR - half;
dy = ix + filterC - half;
if (idx < 0)
idx = 0;
if (idx >= height)
idx = height - 1;
idx *= width;
if (dy < 0)
dy = 0;
if (dy >= width)
dy = width - 1;
idx += dy;
G.x += filter[filterR * filterWidth + filterC] * inPixels[idx].x;
G.y += filter[filterR * filterWidth + filterC] * inPixels[idx].y;
G.z += filter[filterR * filterWidth + filterC] * inPixels[idx].z;
}
}
outPixels[i].x = G.x;
outPixels[i].y = G.y;
outPixels[i].z = G.z;
}
}
void blurImg(uchar3 * inPixels, int width, int height, float * filter, int filterWidth,
uchar3 * outPixels,
bool useDevice=false, dim3 blockSize=dim3(1, 1))
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
// TODO
int half = filterWidth / 2;
float Gx, Gy, Gz;
for (int r = 0; r < height; r++)
{
for (int c = 0; c < width; c++)
{
int i = r * width + c;
int idx = 0, dy = 0;
Gx = Gy = Gz = 0.0f;
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
idx = r + filterR - half;
dy = c + filterC - half;
if (idx < 0)
idx = 0;
if (idx >= height)
idx = height - 1;
idx *= width;
if (dy < 0)
dy = 0;
if (dy >= width)
dy = width - 1;
idx += dy;
Gx += filter[filterR * filterWidth + filterC] * inPixels[idx].x;
Gy += filter[filterR * filterWidth + filterC] * inPixels[idx].y;
Gz += filter[filterR * filterWidth + filterC] * inPixels[idx].z;
}
}
outPixels[i].x = Gx;
outPixels[i].y = Gy;
outPixels[i].z = Gz;
}
}
}
else // Use device
{
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
printf("GPU name: %s\n", devProp.name);
printf("GPU compute capability: %d.%d\n", devProp.major, devProp.minor);
// TODO
// Allocate device memories
uchar3 *d_inPixels, * d_outPixels;
float * d_filter;
CHECK(cudaMalloc(&d_inPixels, height * width * sizeof(uchar3)));
CHECK(cudaMalloc(&d_outPixels, height * width * sizeof(uchar3)));
CHECK(cudaMalloc(&d_filter, filterWidth * filterWidth * sizeof(float)));
// Copy data to device memories
CHECK(cudaMemcpy(d_inPixels, inPixels, height * width * sizeof(uchar3), cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_filter, filter, filterWidth * filterWidth * sizeof(float), cudaMemcpyHostToDevice));
// Set grid size and call kernel (remember to check kernel error)
dim3 gridSize((width - 1)/blockSize.x + 1, (height - 1)/blockSize.y + 1);
blurImgKernel<<<gridSize, blockSize>>>(d_inPixels, width, height, d_filter, filterWidth, d_outPixels);
cudaError_t errSync = cudaGetLastError();
cudaError_t errAsync = cudaDeviceSynchronize();
if (errSync != cudaSuccess)
printf("Sync kernel error: %s\n", cudaGetErrorString(errSync));
if (errAsync != cudaSuccess)
printf("Async kernel error: %s\n", cudaGetErrorString(errAsync));
// Copy result from device memories
CHECK(cudaMemcpy(outPixels, d_outPixels, height * width * sizeof(uchar3), cudaMemcpyDeviceToHost));
// Free device memories
CHECK(cudaFree(d_inPixels));
CHECK(cudaFree(d_filter));
CHECK(cudaFree(d_outPixels));
}
timer.Stop();
float time = timer.Elapsed();
printf("Processing time (%s): %f ms\n",
useDevice == true? "use device" : "use host", time);
}
float computeError(uchar3 * a1, uchar3 * a2, int n)
{
float err = 0;
for (int i = 0; i < n; i++)
{
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
char * concatStr(const char * s1, const char * s2)
{
char * result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
int main(int argc, char ** argv)
{
if (argc != 4 && argc != 6)
{
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
// Read input image file
int width, height;
uchar3 * inPixels;
readPnm(argv[1], width, height, inPixels);
printf("Image size (width x height): %i x %i\n\n", width, height);
// Read correct output image file
int correctWidth, correctHeight;
uchar3 * correctOutPixels;
readPnm(argv[3], correctWidth, correctHeight, correctOutPixels);
if (correctWidth != width || correctHeight != height)
{
printf("The shape of the correct output image is invalid\n");
return EXIT_FAILURE;
}
// Set up a simple filter with blurring effect
int filterWidth = 9;
float * filter = (float *)malloc(filterWidth * filterWidth * sizeof(float));
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
filter[filterR * filterWidth + filterC] = 1. / (filterWidth * filterWidth);
}
}
// Blur input image using host
uchar3 * hostOutPixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, hostOutPixels);
// Compute mean absolute error between host result and correct result
float hostErr = computeError(hostOutPixels, correctOutPixels, width * height);
printf("Error: %f\n\n", hostErr);
// Blur input image using device
uchar3 * deviceOutPixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
dim3 blockSize(32, 32); // Default
if (argc == 6)
{
blockSize.x = atoi(argv[4]);
blockSize.y = atoi(argv[5]);
}
blurImg(inPixels, width, height, filter, filterWidth, deviceOutPixels, true, blockSize);
// Compute mean absolute error between device result and correct result
float deviceErr = computeError(deviceOutPixels, correctOutPixels, width * height);
printf("Error: %f\n\n", deviceErr);
// Write results to files
char * outFileNameBase = strtok(argv[2], "."); // Get rid of extension
writePnm(hostOutPixels, width, height, concatStr(outFileNameBase, "_host.pnm"));
writePnm(deviceOutPixels, width, height, concatStr(outFileNameBase, "_device.pnm"));
// Free memories
free(inPixels);
free(correctOutPixels);
free(hostOutPixels);
free(deviceOutPixels);
free(filter);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.