serial_no int64 1 24.2k | cuda_source stringlengths 11 9.01M |
|---|---|
3,101 | extern "C" __global__ void fill(int * A, int cnt){
const int gap = blockDim.x*gridDim.x;
for (int id = blockDim.x*blockIdx.x + threadIdx.x; id < cnt; id += gap)
A[id] = id * 2;
};
|
3,102 | #include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <iostream>
#include "device_launch_parameters.h"
#include "cuda_runtime.h"
using namespace std;
int main(int argc, char ** argv) {
int deviceCount;
cudaGetDeviceCount(&deviceCount);
for (int dev = 0; dev < deviceCount; dev++) {
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, dev);
if (dev == 0) {
if (deviceProp.major == 9999 && deviceProp.minor == 9999) {
// Ne detecte pas CUDA
return -1;
} else if (deviceCount == 1) {
// Ne supporte pas CUDA
cout << "Il y a un seul deviceCount" << endl;
} else {
// Afficher le nombre de deviceCount
cout << "Le nombre de deviceCount est de :" << deviceCount << "\n" << endl;
}
}
// Afficher le nom de la device
cout << "Le nom de la device est :" << deviceProp.name << "\n" << endl;
// Donner le numero de version majeur et mineur
cout << "Les numeros de version majeur et mineur sont :" << deviceProp.major << " " << deviceProp.minor << "\n" << endl;
// Donner la taille de la memoire globale
cout << "La taille de la memoire globale est de :" << deviceProp.totalGlobalMem << "\n" << endl;
// Donner la taille de la memoire constante
cout << "La taille de la memoire constante est de :" << deviceProp.totalConstMem << "\n" << endl;
// Donner la taille de la memoire partagee par bloc
cout << "La taille de la memoire partagee est de :" << deviceProp.sharedMemPerBlock << "\n" << endl;
// Donner le nombre de thread max dans chacune des directions
cout << "La nombre de threads max par direction est :" << deviceProp.maxThreadsDim[0] << " " << deviceProp.maxThreadsDim[1] << " " << deviceProp.maxThreadsDim[2] << "\n" << endl;
cout << "Nombre max thread par bloc " << deviceProp.maxThreadsPerBlock << "\n" << endl;
// Donner le taille maximum de la grille pour chaque direction
cout << "Nombre de block par grille :" << deviceProp.maxGridSize[0] << " " << deviceProp.maxGridSize[1] << " " << deviceProp.maxGridSize[2] << "\n" << endl;
// Donner la taille du warp
cout << "La taille du warp est de :" << deviceProp.warpSize << "\n" << endl;
}
system("pause");
return 0;
} |
3,103 | /* randgen.c => contains random number generator and related utilities
including advance_random, warmup_random, random, randomize
*/
#include <stdio.h>
#include <cstdlib>
#include "type.cuh"
/* GLOBAL VARIABLES */
double oldrand[56]; /* array of 55 random numbers */
int jrand; /* current random */
/* FUNCTIONS */
void advance_random() /* create next batch of 55 random numbers */
{
int j1;
double new_random;
/* printf("Entering Advance Random\n");*/
for (j1=1;j1<=24;j1++){
new_random = oldrand[j1] - oldrand[j1+31];
if (new_random < 0.0){
new_random = new_random + 1.0;
}
oldrand[j1] = new_random;
}
for (j1=25;j1<=55;j1++){
new_random = oldrand[j1] - oldrand[j1-24];
if (new_random < 0.0){
new_random = new_random + 1.0;
}
oldrand[j1] = new_random;
}
/* printf("leaving Advance Random\n");*/
}
void warmup_random(double random_seed)
{
int j1, ii;
double new_random, prev_random;
oldrand[55] = random_seed;
new_random = 0.0000000009;
prev_random = random_seed;
for (j1=1;j1<=54;j1++){
ii = (21*j1) % 55;
oldrand[ii] = new_random;
new_random = prev_random - new_random;
if (new_random < 0.0){
new_random += 1.0;
}
prev_random = oldrand[ii];
}
advance_random();
advance_random();
advance_random();
jrand = 0;
}
double FRandom()
{
/* Fetch a single random number between 0.0 and 1.0 - Substractive Method
See Knuth, D. (1969), v.2 for details */
jrand++;
if (jrand > 55){
jrand = 1;
advance_random();
}
return (oldrand[jrand]);
}
void Randomize(double seed) /* Get seed number for random and start it up */
{
if(seed < 0.0 || seed > 1.0) {
fprintf(stderr, "ERROR: seed random number must be betwwn 0 and 1");
exit(1);
}
warmup_random(seed);
}
int Flip(double prob)
{/* Flip a biased coin...true if heads */
if (prob == 1.0) {
return 1;
} else {
if (FRandom() <= prob) {
return 1;
}else {
return 0;
}
}
}
int Rnd(int low, int high)
{ /* pick a random int between low and high */
double fr;
int i;
fr = FRandom();
if (low >= high) {
i = low;
} else {
i = ((int)(fr * (double)(high - low + 1)) + low);
if (i > high) i = high;
}
return i;
}
|
3,104 | #include "includes.h"
char* concat(char *s1, char *s2);
__global__ void r_calculation(float* a , int * indeces , float* b , float* x,float * r ,int size)
{
int index = blockDim.x * blockIdx.x + threadIdx.x ;
if (index < size)
{
float sum = 0 ;
for (int i = 0 ; i<3 ; i++)
{
sum += a[3*index + i] * x[indeces[3*index + i]] ;
}
r[index] = b[index] - sum ;
}
} |
3,105 | #include "includes.h"
__global__ void addValue(int * array_val, int*b_array_val) {
int x = threadIdx.x;
int sum = 0;
for(unsigned int i = 0; i < ROWS; i++) {
sum += array_val[i*COLUMNS+x];
}
b_array_val[x] = sum;
} |
3,106 | /******************************************************************************
* Eric Blasko
* 6/02/19
* Homework #3
* RecMatMulTiled.cu
* This program performs rectangle matrix multiplication, which uses shared mem
* of size TILE_WIDTH x TILE_WIDTH. Values of Matrix M and N are chosen by the
* user such that M is of size JxK and N is of size KxL. The kernal function
* produces the results of the matrix multiplication between M and N, storing
* it in matrix P which is of size JxL.
******************************************************************************/
#include <stdio.h>
#include <assert.h>
#define TILE_WIDTH 4
//kernal to compute C = A * B. Uses shared memory/tile execution
__global__ void MatrixMulKernel(float *d_M, float *d_N, float *d_P, int JSIZE, int KSIZE, int LSIZE)
{
//tile size to store element in shared memory
__shared__ float Mds[TILE_WIDTH][TILE_WIDTH];
__shared__ float Nds[TILE_WIDTH][TILE_WIDTH];
//generate ids of threads and blocks
int bx = blockIdx.x; int by = blockIdx.y;
int tx = threadIdx.x; int ty = threadIdx.y;
// Identify row, col of d_P elem to work on
int Row = by*TILE_WIDTH + ty;
int Col = bx*TILE_WIDTH + tx;
float Pvalue = 0.0;
// loop over d_M and dN tiles required to compute d_P elem
for (int ph = 0; ph < ((KSIZE - 1)/TILE_WIDTH) + 1; ph++) {
//collaborative loading of d_M and d_N tiles into shared memory
if((Row < JSIZE) && (ph*TILE_WIDTH + tx) < KSIZE)
Mds[ty][tx] = d_M[Row*KSIZE + ((ph*TILE_WIDTH) + tx)];
else
Mds[ty][tx] = 0.0;
if((ph*TILE_WIDTH + ty) < KSIZE && Col < LSIZE)
Nds[ty][tx] = d_N[(ph*TILE_WIDTH + ty) * LSIZE + Col];
else
Nds[ty][tx] = 0.0;
__syncthreads();
for (int k = 0; k < TILE_WIDTH;k++)
Pvalue += Mds[ty][k] * Nds[k][tx];
__syncthreads();
}
//Store final results into C
if(Row < JSIZE && Col < LSIZE)
d_P[Row*LSIZE+Col] = Pvalue;
}
//Set up and launch of kernal function
void MatrixMultiplication(float *M, float *N, float *P, int j, int k, int l)
{
int mMatSize = (j*k)*sizeof(float);
int nMatSize = (k*l)*sizeof(float);
int pMatSize = (j*l)*sizeof(float);
float *d_M;
float *d_N;
float *d_P;
cudaMalloc((void**) &d_M, mMatSize);
cudaMalloc((void**) &d_N, nMatSize);
cudaMalloc((void**) &d_P, pMatSize);
cudaMemcpy(d_M, M, mMatSize, cudaMemcpyHostToDevice);
cudaMemcpy(d_N, N, nMatSize, cudaMemcpyHostToDevice);
// execution configuration
dim3 dimGrid((l/TILE_WIDTH) + 1, (j/TILE_WIDTH) + 1);
dim3 dimBlock(TILE_WIDTH, TILE_WIDTH);
// launch the kernels
MatrixMulKernel<<<dimGrid, dimBlock>>>(d_M, d_N, d_P, j, k, l);
cudaMemcpy(P, d_P, pMatSize, cudaMemcpyDeviceToHost);
cudaFree(d_M);
cudaFree(d_N);
cudaFree(d_P);
}
//Print the values in a matrix
void print(float * matrix, int size, int col, const char * name)
{
printf("%s:\n", name);
for (int i = 0; i < size; i++)
{
if((i % col) == 0)
printf("\n");
printf(" %10.2f", matrix[i]);
}
printf("\n");
}
//main function to get users input and to launch kernal
int main(int argc, char** argv)
{
int j,k,l;
int mSize, nSize, pSize;
float *M;
float *N;
float *P;
printf("Enter rows(j) for matrix m: ");
scanf("%d", &j);
printf("Enter columns(k) for matrix m and rows(k) for matrix n: ");
scanf("%d", &k);
printf("Enter columns(l) for matrix n: ");
scanf("%d", &l);
//get size of each matrix
mSize = j * k;
nSize = k * l;
pSize = j * l;
//allocate in memory
M = (float *) malloc(mSize*sizeof(float));
N = (float *) malloc(nSize*sizeof(float));
P = (float *) malloc(pSize*sizeof(float));
//assign values to each matrix
for (int i = 0; i < mSize; i++)
M[i] = i;
for (int i = 0; i < nSize; i++)
N[i] = i+1;
for (int i = 0; i < pSize; i++)
P[i] = 0;
MatrixMultiplication(M, N, P, j, k, l);
print(M, mSize,k, "M");
print(N, nSize,l, "N");
print(P, pSize,l, "P");
free(M);
free(N);
free(P);
}
|
3,107 | #include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#define N_size 16 //number of elements in array
#define thread_number 4 //number of threads per block
#define block_number 4 //number of blocks
__global__ void prescan(float *gpu_outdata, float *gpu_indata, int n);
void scanCPU(float *f_out, float *f_in, int i_n);
double myDiffTime(struct timeval &start, struct timeval &end)
{
double d_start, d_end;
d_start = (double)(start.tv_sec + start.tv_usec/1000000.0);
d_end = (double)(end.tv_sec + end.tv_usec/1000000.0);
return (d_end - d_start);
}
__global__ void prescan(float *gpu_outdata, float *gpu_indata, int n)
{
extern __shared__ float temp[];
int thid = threadIdx.x;
int bid = blockIdx.x;
int offset = 1;
if(bid * thread_number + thid<n)
{
temp[bid * thread_number + thid] = gpu_indata[bid * thread_number + thid];
}
else
{
temp[bid * thread_number + thid] = 0;
}
for (int d = thread_number>>1; d > 0; d >>= 1)
{
__syncthreads();
if (thid < d)
{
int ai = bid * thread_number + offset*(2*thid+1)-1;
int bi = bid * thread_number + offset*(2*thid+2)-1;
temp[bi] += temp[ai];
}
offset *= 2;
}
if (thid == 0)
{
temp[thread_number - 1] = 0;
}
for (int d = 1; d < thread_number; d *= 2)
{
offset >>= 1;
__syncthreads();
if (thid < d)
{
int ai = bid * thread_number + offset*(2*thid+1)-1;
int bi = bid * thread_number + offset*(2*thid+2)-1;
float t = temp[bid * thread_number + ai];
temp[ai] = temp[ bi];
temp[bi] += t;
}
}
__syncthreads();
gpu_outdata[bid * thread_number + thid] = temp[bid * thread_number + thid];
}
void scanCPU(float *f_out, float *f_in, int i_n)
{
f_out[0] = 0;
for (int i =1; i <=i_n; i++)
{
f_out[i] = f_out[i-1] + f_in[i-1];
}
}
int main()
{
float a[N_size]={2.0,1.0,3.0,1.0,0.0,4.0,1.0,2.0,0.0,3.0,1.0,2.0,5.0,3.0,1.0,2.0}, c[N_size], g[N_size];
timeval start, end;
float *dev_a, *dev_g;
int size = N_size * sizeof(float);
double d_gpuTime, d_cpuTime;
for (int i = 0; i < N_size; i++)
{
printf("each element of an array a[%i] = %f\n", i, a[i]);
}
cudaMalloc((void **) &dev_a, size);
cudaMalloc((void **) &dev_g, size);
gettimeofday(&start, NULL);
cudaMemcpy(dev_a, a, size, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaMemcpy(g, dev_g, size, cudaMemcpyDeviceToHost);
gettimeofday(&end, NULL);
d_gpuTime = myDiffTime(start, end);
gettimeofday(&start, NULL);
scanCPU(c, a, N_size);
gettimeofday(&end, NULL);
d_cpuTime = myDiffTime(start, end);
cudaFree(dev_a);
cudaFree(dev_g);
for (int i = 0; i <=N_size; i++)
{
printf("c[%i] = %0.3f, g[%i] = %0.3f\n", i, c[i], i, g[i]);
}
printf("GPU Time for array size %i: %f\n", N_size, d_gpuTime);
printf("CPU Time for array size %i: %f\n", N_size, d_cpuTime);
}
|
3,108 | #include "includes.h"
__global__ void FullyConnectedEstimateLearningRateKernel( float *weightLearningRatePtr, float *biasLearningRatePtr, float *avgWeightGradPtr, float *avgBiasGradPtr, float *avgWeightGradVarPtr, float *avgBiasGradVarPtr, float *avgWeightGradCurvePtr, float *avgBiasGradCurvePtr, float *avgWeightGradCurveVarPtr, float *avgBiasGradCurveVarPtr, float *dropoutMaskPtr, int prevLayerSize, int thisLayerSize )
{
// i: prev. layer neuron id
// j: current layer neuron id
int i;
int j = blockDim.x * blockIdx.y * gridDim.x //rows preceeding current row in grid
+ blockDim.x * blockIdx.x //blocks preceeding current block
+ threadIdx.x;
if (j < thisLayerSize)
{
if (!dropoutMaskPtr[j])
{
int index = j;
for (i = 0; i < prevLayerSize; i++)
{
// estimate the learning rate
weightLearningRatePtr[index] = (avgWeightGradCurvePtr[index] / avgWeightGradCurveVarPtr[index]) * (avgWeightGradPtr[index] * avgWeightGradPtr[index] / avgWeightGradVarPtr[index]);
index += thisLayerSize;
}
// estimate the learning rate
biasLearningRatePtr[j] = (avgBiasGradCurvePtr[j] / avgBiasGradCurveVarPtr[j]) * (avgBiasGradPtr[j] * avgBiasGradPtr[j] / avgBiasGradVarPtr[j]);
}
}
} |
3,109 | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "curand_kernel.h"
#include <cuda.h>
#include <curand.h>
#include <iostream>
#include <numeric>
using namespace std;
const long steps = 1 << 21;
__global__ void belongs_circle(double* x, double* y, double* result) {
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid >= steps)
return;
if (((x[tid] - 0.5) * (x[tid] - 0.5)) + ((y[tid] - 0.5) * (y[tid] - 0.5)) <= (0.5 * 0.5)) {
result[tid] = 1;
} else {
result[tid] = 0;
}
}
int main() {
const long size = steps * sizeof(double);
long blockSize = 256;
long numBlocks = (steps + blockSize - 1) / blockSize;
curandGenerator_t gen;
curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_DEFAULT);
curandSetPseudoRandomGeneratorSeed(gen, 1234ULL);
double *result, *x, *y;
cudaMalloc(&result, size);
cudaMalloc(&x, size);
cudaMalloc(&y, size);
curandGenerateUniformDouble(gen, x, steps);
curandGenerateUniformDouble(gen, y, steps);
belongs_circle <<<numBlocks, blockSize>>>(x, y, result);
double check[steps];
cudaMemcpy(check, result, size, cudaMemcpyDeviceToHost);
double sum = 0;
for (long i = 0; i < steps; ++i) {
sum += check[i];
}
cout << "Sum is " << sum << endl;
cout << "Pi is " << 4 * sum / steps << endl;
cudaFree(result);
cudaFree(x);
cudaFree(y);
return 0;
}
|
3,110 | #include "includes.h"
static const int n_el = 512;
static const size_t size = n_el * sizeof(float);
// declare the kernel function
// function which invokes the kernel
__global__ void kernel_sum(const float* A, const float* B, float* C, int n_el)
{
// calculate the unique thread index
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// perform tid-th elements addition
if (tid < n_el) C[tid] = A[tid] + B[tid];
} |
3,111 | #include <iostream>
#include <stdlib.h>
#include <stdio.h>
#include <memory>
#include "cuda_runtime.h"
__global__ void add_two_vectors(int* v1, int* v2, int* result){
int idx = threadIdx.x;
result[idx] = v1[idx] + v2[idx];
//printf("%i, ",result[idx]);
}
int main(int argc, char **argv) {
int* v1_host = (int*)malloc(64*sizeof(int));
int* v2_host = (int*)malloc(64*sizeof(int));
int* result_host = (int*)malloc(64*sizeof(int));
//memset(v1_host,64,1);
//memset(v2_host,64,2);
for(int i=0;i<64;i++){
v1_host[i] = 1;
v2_host[i] = 2;
}
int* v1_dev;
int* v2_dev;
int* result_dev;
cudaMalloc(&v1_dev, 64*sizeof(int));
cudaMalloc(&v2_dev, 64*sizeof(int));
cudaMalloc(&result_dev, 64*sizeof(int));
//copy memory
cudaMemcpy(v1_dev,v1_host,64*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(v2_dev,v2_host,64*sizeof(int),cudaMemcpyHostToDevice);
//launch kernel
dim3 grid(1,1,1);
dim3 block(64,1,1);
add_two_vectors<<<grid, block>>>(v1_dev, v2_dev, result_dev);
//sync
cudaDeviceSynchronize();
//copy memory from device to host
cudaMemcpy(result_host, result_dev, 64*sizeof(int),cudaMemcpyDeviceToHost);
for(int i=0;i<64;i++){
printf("%i, ",result_host[i]);
}
free(v1_host);
free(v2_host);
free(result_host);
cudaFree(v1_dev);
cudaFree(v2_dev);
cudaFree(result_dev);
std::shared_ptr<int> iptr = std::make_shared<int>(4);
printf("%i",*iptr);
}
|
3,112 | /*#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <opencv2/core.hpp>
#include <opencv2/imgcodecs.hpp>
#include <opencv2/highgui.hpp>
#include<opencv2\imgproc.hpp>
#include <iostream>
#define maxThreads 1023
#define maxBlocks 65534
#define imin(a,b)(a<b?a:b)
__global__ void invertColor(unsigned char* d_pic,int height, int width,int channels)
{
int x = blockIdx.x*blockDim.x + threadIdx.x;
int y = blockIdx.y*blockDim.y + threadIdx.y;
int tid = (x + y*gridDim.x*blockDim.x);
//the math cheks out my G
while (tid<height*width)
{
d_pic[tid*channels + 0] =255-d_pic[tid*channels + 0];
if (channels > 1)
{
d_pic[tid*channels + 1] =255- d_pic[tid*channels + 1];
d_pic[tid*channels + 2] = 255-d_pic[tid*channels + 2];
}
tid += (gridDim.x*blockDim.x)*(gridDim.y*blockDim.y);
}
printf("%d", (gridDim.x*blockDim.x)*(gridDim.y*blockDim.y));
}
void main()
{
std::string picName = "Resources/Highway.jpg";
cv::Mat h_img =cv::imread(picName, cv::IMREAD_COLOR);
unsigned char* d_img;
cudaEvent_t start, stop;
//float elapsedTime;
int width = h_img.cols;
int height= h_img.rows;
int channels = h_img.channels();
int totalSize = width*height;
//cudaEventCreate(&start);
//cudaEventCreate(&stop);
dim3 block(8, 2);
//dim3 grid((width+block.x-1)/ block.x, (height + block.y - 1) / block.y);
dim3 grid(5, 3);
//cudaEventRecord(start, 0);
cudaMalloc(&d_img,totalSize*channels);
cudaMemcpy(d_img,h_img.ptr(), totalSize*channels, cudaMemcpyHostToDevice);
invertColor << <grid,block >> >(d_img,height,width,channels);
cudaMemcpy(h_img.ptr(),d_img,totalSize*channels,cudaMemcpyDeviceToHost);
//cudaRecordEvent(stop, 0);
//cudaEventSynchronize(stop);
//cudaEventElapsedTime(&elapsedTime,start,stop);
//std::cout << "\nTime taken by GPU: " << elapsedTime << std::endl;
cv::imshow("img", h_img);
cv::waitKey();
cv::destroyAllWindows();
system("pause");
cudaFree(d_img);
}*/ |
3,113 | #include <limits>
using namespace std;
// Constantes
const double MENOS_INFINITO = -numeric_limits<double>::max();
const size_t BLOCK_SIZE = 128;
__device__ unsigned int contadorBloques = 0;
__device__ double logaritmoDeterminante(double *g_L, const size_t k, const size_t numDimensiones)
{
double suma = 0.0;
for (size_t j = 0; j < numDimensiones; j++) {
suma += log(g_L[k * numDimensiones * numDimensiones + j * numDimensiones + j]);
}
return 2.0 * suma;
}
template <size_t blockSize>
__device__ void reducirBloque(volatile double *sharedData, double suma, const size_t tid)
{
sharedData[tid] = suma;
__syncthreads();
if (blockSize >= 512) {
if (tid < 256) {
sharedData[tid] = suma = suma + sharedData[tid + 256];
}
__syncthreads();
}
if (blockSize >= 256) {
if (tid < 128) {
sharedData[tid] = suma = suma + sharedData[tid + 128];
}
__syncthreads();
}
if (blockSize >= 128) {
if (tid < 64) {
sharedData[tid] = suma = suma + sharedData[tid + 64];
}
__syncthreads();
}
if (tid < 32) {
if (blockSize >= 64) {
sharedData[tid] = suma = suma + sharedData[tid + 32];
}
if (blockSize >= 32) {
sharedData[tid] = suma = suma + sharedData[tid + 16];
}
if (blockSize >= 16) {
sharedData[tid] = suma = suma + sharedData[tid + 8];
}
if (blockSize >= 8) {
sharedData[tid] = suma = suma + sharedData[tid + 4];
}
if (blockSize >= 4) {
sharedData[tid] = suma = suma + sharedData[tid + 2];
}
if (blockSize >= 2) {
sharedData[tid] = suma = suma + sharedData[tid + 1];
}
}
}
template <size_t blockSize, typename Predicate, typename Predicate2>
__device__ void reducirFinal(Predicate valor, Predicate2 direccionResultado, volatile double *sharedData, size_t numTrozos)
{
const size_t tid = threadIdx.x;
double suma = 0.0;
int i = tid;
while (i < numTrozos)
{
suma += *(valor(i));
i += blockSize;
}
reducirBloque<blockSize>(sharedData, suma, tid);
if (tid == 0) {
*(direccionResultado()) = sharedData[0];
}
}
template <size_t blockSize, typename Predicate, typename Predicate2, typename Predicate3>
__device__ void reducir(Predicate valor, Predicate2 direccionResultado, Predicate3 reduccionFinal, const size_t n, volatile double *sharedData, const size_t numBloques)
{
__shared__ bool esUltimoBloque;
const size_t tid = threadIdx.x;
const size_t gridSize = (blockSize * 2) * gridDim.x;
size_t i = blockIdx.x * (blockSize * 2) + threadIdx.x;
double suma = 0.0;
while (i < n) {
suma += valor(i);
if (i + blockSize < n) {
suma += valor(i+blockSize);
}
i += gridSize;
}
reducirBloque<blockSize>(sharedData, suma, tid);
if (tid == 0) {
*(direccionResultado()) = sharedData[0];
__threadfence();
unsigned int ticket = atomicInc(&contadorBloques, numBloques);
esUltimoBloque = (ticket == numBloques - 1);
}
__syncthreads();
if (esUltimoBloque) {
reduccionFinal();
if (tid == 0) {
contadorBloques = 0;
}
}
}
|
3,114 | #include "includes.h"
__global__ void swap_middle_column(float* data, const int num_threads, const int nx, const int ny, const int xodd, const int yodd, const int offset) {
const uint x=threadIdx.x;
const uint y=blockIdx.x;
const uint r = x+y*num_threads+offset;
int c = nx/2;
int idx1 = r*nx + c;
int idx2 = (r+ny/2+yodd)*nx + c;
float tmp = data[idx1];
data[idx1] = data[idx2];
data[idx2] = tmp;
} |
3,115 | #include "includes.h"
__global__ void scatterSum(int N, float *input, float *output){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= N) return;
for(int j=0;j<N;++j){
atomicAdd(output+j, input[i]);
// if(i<N/2) atomicAdd(output+j, input[i]);
// atomicAdd(output+j, i<N/2: input[i]: 0.);
}
return;
} |
3,116 | /**
* Copyright 1993-2015 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/**
*/
#include <stdio.h>
// For the CUDA runtime routines (prefixed with "cuda_")
#include <cuda_runtime.h>
__global__ void averagePoolNCHW(const float *A, float *C, int batchSize, int channels, int width, int height, int stride, int kernelSize)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;// Get global thread ID
int tpr = (width+stride-1)/stride;// Threads to work in a row
int tpc = (height+stride-1)/stride;// Threads to work in a col
int tgrpSize = tpr * tpc;// Number of threads to work on a image
int tgrpNum = tid/tgrpSize;// Image number
int tgrpLocaltid = (tid % tgrpSize);// local id in working image
int startRow = tgrpNum * height;// starting row value of Image
int localRow = (tgrpLocaltid/tpr)*stride;// local row number in the working image
int col = (tid%tpr) * stride;
//check row boundaries
if(startRow + localRow > (channels * batchSize * height -1))
return;
float outTemp = 0.0f;
//Access elements from pooling window to compute Average pooling
for(int i = 0 ; i < kernelSize && localRow + i < height ; i++)
for( int j =0; j < kernelSize && col+ j < width; j++)
{
outTemp = outTemp + A[(startRow + localRow + i)*width+ col+ j];
}
C[tid] = outTemp/(kernelSize * kernelSize);// Store output
}
__global__ void averagePoolShared(const float *A, float *C, int batchSize, int channels, int width, int height, int stride, int kernelSize)
{
extern __shared__ float sArr[];// Shared memory to store image data
int size = height * width;// Image size
int tpr = (width+stride-1)/stride;// Threads to work in a row
int tpc = (height+stride-1)/stride;// Threads to work in a col
int startOutIndex = blockIdx.x * tpr * tpc;// Starting output index of image
//load image data into shared memory
for(int i = threadIdx.x; i < size ; i += blockDim.x)
{
sArr[i] = A[blockIdx.x * size + i];
}
__syncthreads();
if(threadIdx.x > tpr*tpc)
return;
// loop over image output indices
for(int k = threadIdx.x ; k < tpr * tpc ; k += blockDim.x)
{
float outTemp = 0.0f;
int row = (k/tpr) * stride;
int col = (k%tpr) * stride;
// Compute average pooling
for(int i = 0 ; i < kernelSize && row + i < height ; i++)
for( int j =0; j < kernelSize && col+ j < width; j++)
{
outTemp = outTemp+ sArr[(row + i)*width+ col+ j];
}
C[startOutIndex+k] = outTemp/(kernelSize * kernelSize); // Store output
}
}
int avgPool(int N,
const float* inputs,
float* outputs,
int C,
int H,
int W,
int kernelSize,
int begPad,
int endPad,
int stride,
cudaStream_t stream)
{
//size_t sharedMemorySize = sizeof(float)*((H * W) + (((W + stride -1)/stride)*((H + stride - 1)/stride)));
size_t sharedMemorySize = sizeof(float)*((H * W));
float* inArr = (float*)malloc(sizeof(float)*H*W);
float* outArr = (float*)malloc(sizeof(float)*((W+stride-1)/stride)*((H+stride-1)/stride));
cudaMemcpy(inArr,inputs,sizeof(float)*H*W,cudaMemcpyDeviceToHost);
if (stride < kernelSize)
{
unsigned int blocksPerGrid = N * C;
unsigned int threadsPerBlock = (W * H < 1024)?(W * H):1024;
averagePoolShared<<<blocksPerGrid, threadsPerBlock, sharedMemorySize,stream>>>(inputs,
outputs,
N,
C,
W,
H,
stride,
kernelSize);
}
else
{
unsigned int threadsPerBlock = 1024;
unsigned int blocksPerGrid =((N*C*H*W) + threadsPerBlock - 1) / threadsPerBlock;
averagePoolNCHW<<<blocksPerGrid, threadsPerBlock,0,stream>>>(inputs,
outputs,
N,
C,
W,
H,
1,
kernelSize);
}
cudaError_t err = cudaGetLastError();
if ( cudaSuccess != err )
{
fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n",
__FILE__, __LINE__, cudaGetErrorString( err ) );
}
return 0;
} |
3,117 | #include <cstdio>
__global__ void linearRegressionReducerKernel(const int * const keys,
const float * const vals,
const int * numVals,
int * const keySpace,
float * const valSpace)
{
__shared__ volatile int nv;
__shared__ volatile float numElems;
float fsum = 0.0f;
if (threadIdx.x == 0) nv = numVals[0];
for (int i = 0; i < nv; ++i)
{
fsum += vals[threadIdx.x * nv + i];
}
if (threadIdx.x == blockDim.x - 1) numElems = fsum;
keySpace[threadIdx.x] = keys[threadIdx.x];
valSpace[threadIdx.x] = fsum / numElems;
}
void linearRegressionReducerExecute(const int * const keys,
const float * const vals,
const int * numVals,
int * const keySpace,
float * const valSpace,
cudaStream_t & stream)
{
linearRegressionReducerKernel<<<1, 6, 0, stream>>>(keys, vals, numVals, keySpace, valSpace);
#if 0
{
int cpuKeys[6];
float cpuVals[6];
cudaMemcpy(cpuKeys, keySpace, sizeof(cpuKeys), cudaMemcpyDeviceToHost);
cudaMemcpy(cpuVals, valSpace, sizeof(cpuVals), cudaMemcpyDeviceToHost);
for (int i = 0; i < 6; ++i)
{
printf("%2d: %2d - %f\n", i, cpuKeys[i], cpuVals[i]);
}
fflush(stdout);
}
#endif
}
|
3,118 | #include <iostream>
#include <fstream>
#include <string.h>
#include <time.h>
#include <math.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
using namespace std;
int index(int i) { return i + 1; }
// Blocksize
#define BLOCKSIZE 64
// Number of mesh points
int n = 60000;
//*************************************************
// Swap two pointers to float
// ************************************************
void swap_pointers(float **a, float **b)
{
float *tmp = *a;
*a = *b;
*b = tmp;
}
//*************************************************
// GLOBAL MEMORY VERSION OF THE FD UPDATE
// ************************************************
__global__ void FD_kernel1(float *d_phi, float *d_phi_new, float cu, int n)
{
int i = threadIdx.x + blockDim.x * blockIdx.x + 1;
// Inner point update
if (i < n + 2)
d_phi_new[i] = 0.5 * ((d_phi[i + 1] + d_phi[i - 1]) - cu * (d_phi[i + 1] - d_phi[i - 1]));
// Boundary Conditions
if (i == 1)
d_phi_new[0] = d_phi_new[1];
if (i == n + 1)
d_phi_new[n + 2] = d_phi_new[n + 1];
}
//*************************************************
// TILING VERSION (USES SHARED MEMORY) OF THE FD UPDATE
// ************************************************
__global__ void FD_kernel2(float *d_phi, float *d_phi_new, float cu, int n)
{
int li = threadIdx.x + 1; //local index in shared memory vector
int gi = blockDim.x * blockIdx.x + threadIdx.x + 1; // global memory index
int lstart = 0;
int lend = BLOCKSIZE + 1;
__shared__ float s_phi[BLOCKSIZE + 2]; //shared mem. vector
float result;
// Load Tile in shared memory
if (gi < n + 2)
s_phi[li] = d_phi[gi];
if (threadIdx.x == 0) // First Thread (in the current block)
s_phi[lstart] = d_phi[gi - 1];
if (threadIdx.x == BLOCKSIZE - 1) // Last Thread
if (gi >= n + 1) // Last Block
s_phi[(n + 2) % BLOCKSIZE] = d_phi[n + 2];
else
s_phi[lend] = d_phi[gi + 1];
__syncthreads();
if (gi < n + 2)
{
// Lax-Friedrichs Update
result = 0.5 * ((s_phi[li + 1] + s_phi[li - 1]) - cu * (s_phi[li + 1] - s_phi[li - 1]));
d_phi_new[gi] = result;
}
// Boundary Conditions
if (gi == 1)
d_phi_new[0] = d_phi_new[1];
if (gi == n + 1)
d_phi_new[n + 2] = d_phi_new[n + 1];
}
//******************************
//**** MAIN FUNCTION ***********
int main(int argc, char *argv[])
{
//******************************
//Get GPU information
int devID;
cudaDeviceProp props;
cudaError_t err;
err = cudaGetDevice(&devID);
if (err != cudaSuccess)
{
cout << "ERRORRR" << endl;
}
cudaGetDeviceProperties(&props, devID);
printf("Device %d: \"%s\" with Compute %d.%d capability\n",
devID, props.name, props.major, props.minor);
cout << "Introduce number of points (1000-200000)" << endl;
cin >> n;
// Domain size (periodic)
float l = 10.0;
// Grid
float dx = l / n;
// Advecting velocity
float u = 1.0;
//Timestep size
float dt = 0.8 * u * dx;
float tend = 2.5;
// Courant number
float cu = u * dt / dx;
//Number of steps to take
int nsteps = (int)ceil(tend / dt);
cout << "dx=" << dx << "... dt= " << dt << "...Courant= " << cu << endl;
cout << endl;
cout << "Number of time steps=" << nsteps << endl;
//Mesh Definition blockDim.x*blockIdx.x
float *phi = new float[n + 3];
float *phi_new = new float[n + 3];
float *phi_GPU = new float[n + 3];
float xx[n + 1];
for (int i = 0; i <= n; i++)
xx[i] = -5.0 + i * dx;
// Initial values for phi--> Gaussian
for (int i = 0; i <= n; i++)
{
// Gaussian
phi[index(i)] = (1.0 / (2.0 * M_PI * 0.16)) * exp(-0.5 * (pow((xx[i] - 0.5), 2) / 0.01));
}
//**************************
// GPU phase
//**************************
int size = (n + 3) * sizeof(float);
// Allocation in device mem. for d_phi
float *d_phi = NULL;
err = cudaMalloc((void **)&d_phi, size);
if (err != cudaSuccess)
{
cout << "ALLOCATION ERROR" << endl;
}
// Allocation in device mem. for d_phi_new
float *d_phi_new = NULL;
err = cudaMalloc((void **)&d_phi_new, size);
if (err != cudaSuccess)
{
cout << "ALLOCATION ERROR" << endl;
}
// Take initial time
double t1 = clock();
// Impose Boundary Conditions
phi[index(-1)] = phi[index(0)];
phi[index(n + 1)] = phi[index(n)];
// Copy phi values to device memory
err = cudaMemcpy(d_phi, phi, size, cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
cout << "GPU COPY ERROR" << endl;
}
// *******************
// Time Step Iteration
// *******************
for (int k = 0; k < nsteps; k++)
{
int blocksPerGrid = (int)ceil((float)(n + 1) / BLOCKSIZE);
// ********* Kernel Launch ************************************
FD_kernel2<<<blocksPerGrid, BLOCKSIZE>>>(d_phi, d_phi_new, cu, n);
// ************************************************************
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch kernel! %d \n", err);
exit(EXIT_FAILURE);
}
swap_pointers(&d_phi, &d_phi_new);
}
cudaMemcpy(phi_GPU, d_phi, size, cudaMemcpyDeviceToHost);
double Tgpu = clock();
Tgpu = (Tgpu - t1) / CLOCKS_PER_SEC;
//**************************
// CPU phase
//**************************
double t1cpu = clock();
for (int k = 0; k < nsteps; k++)
{
// Impose Boundary Conditions
phi[index(-1)] = phi[index(0)];
phi[index(n + 1)] = phi[index(n)];
for (int i = 0; i <= n; i++)
{
float phi_i = phi[index(i)];
float phi_ip1 = phi[index(i + 1)];
float phi_im1 = phi[index(i - 1)];
//Lax-Friedrichs
phi_new[index(i)] = 0.5 * ((phi_ip1 + phi_im1) - cu * (phi_ip1 - phi_im1));
}
swap_pointers(&phi, &phi_new);
}
double Tcpu = clock();
Tcpu = (Tcpu - t1cpu) / CLOCKS_PER_SEC;
cout << endl;
cout << "GPU Time= " << Tgpu << endl
<< endl;
cout << "CPU Time= " << Tcpu << endl
<< endl;
//**************************
// CPU-GPU comparison and error checking
//**************************
int passed = 1;
int i = 0;
while (passed && i < n)
{
double diff = fabs((double)phi_GPU[index(i)] - (double)phi[index(i)]);
if (diff > 1.0e-5)
{
passed = 0;
cout << "DIFF= " << diff << endl;
}
i++;
}
if (passed)
cout << "PASSED TEST !!!" << endl;
else
cout << "ERROR IN TEST !!!" << endl;
cout << endl;
cout << "Speedup (T_CPU/T_GPU)= " << Tcpu / Tgpu << endl;
return 0;
}
|
3,119 | #include <iostream>
#include <string.h>
#include <stdio.h>
#include <math.h>
using namespace std;
__global__ void kernel(int* dval, int nword)
{
int tid = threadIdx.x;
int bid = blockIdx.x;
int i = blockDim.x*bid + tid;
dval[i] = i;
}
int main( int argc, char** argv)
{
/*
int nb = 65535; // max 65535
int nthre = 512; // max 512
*/
int nb = 512; // max 65535
int nthre = 128; // max 512
int nword = nb * nthre;
int mem_size = sizeof(int) * nword;
printf("# threads: %d \n", nb*nthre);
printf("mem_size: %d Kbyte\n", mem_size >> 10);
int* hval = (int*) malloc(mem_size);
int* dval;
cudaMalloc( (void**) &dval, mem_size);
dim3 grid(nb);
dim3 threads(nthre);
kernel<<< grid, threads >>>(dval, nword);
cudaMemcpy(hval, dval, mem_size, cudaMemcpyDeviceToHost);
for(int i=0; i<nword; i++){
int z = hval[i];
if(i != z){
printf("%d, %d\n", i, z);
}
}
free(hval);
cudaFree(dval);
return (0);
}
|
3,120 | #include "includes.h"
__global__ void xMinDeltaIntegralReplicateKernel( const float *intData, float *tmpArray, const int nWindows, const int h, const int w, const float *xMin, const float *yMin, const float *yMax, const int strideH, const int strideW) {
// TODO: use block dim instead
const int hOut = (h + strideH - 1) / strideH;
const int wOut = (w + strideW - 1) / strideW;
int id = BLOCK_SIZE * BLOCK_SIZE * blockIdx.x + threadIdx.x;
const int yOut = id % wOut; id /= wOut; // 0-indexed
const int xOut = id % hOut; id /= hOut; // 0-indexed
const int & windowIdx = id;
if (windowIdx < nWindows and xOut < hOut and yOut < wOut) {
const int x = xOut*strideH + 1;
const int y = yOut*strideW + 1;
tmpArray += windowIdx * hOut * wOut;
const int xMinInt = (int)ceil(xMin[windowIdx]-1);
const int yMinInt = (int)ceil(yMin[windowIdx]-1);
// const int xMaxInt = (int)floor(xMax[windowIdx]);
const int yMaxInt = (int)floor(yMax[windowIdx]);
float delta = 0;
delta +=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMaxInt, w))];
delta -=
intData[max(0,min(x+xMinInt , h-1))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta +=
intData[max(0,min(x+xMinInt-1, h ))*(w+1)
+ max(0,min(y+yMinInt, w))];
delta *= (x+xMinInt >= 1 and x+xMinInt < h);
tmpArray[xOut*wOut + yOut] *= -delta;
}
} |
3,121 | #define COALESCED_NUM 16
#define blockDimX 16
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define A(y,x) A[(y)*WIDTH_A+(x)]
#define B(y,x) B[(y)*WIDTH_B+(x)]
#define C(y,x) C[(y)*WIDTH_C+(x)]
#define WIDTH_C 2048
#define WIDTH_B 16
#define WIDTH_A (2048+16)
__global__ void conv(float * A, float * B, float * C, int width, int height, int w, int h)
{
__shared__ float shared_1[16];
__shared__ float shared_0[32];
int j;
float sum = 0;
for (j=0; j<h; j=(j+1))
{
shared_0[(tidx+0)]=A(((idy+(( - 1)*j))+h), (idx+(( - 1)*0)));
shared_0[(tidx+16)]=A(((idy+(( - 1)*j))+h), ((idx+(( - 1)*0))+16));
__syncthreads();
{
int it_2;
shared_1[(tidx+0)]=B(j, (0+tidx));
__syncthreads();
#pragma unroll
for (it_2=0; it_2<16; it_2=(it_2+1))
{
float a;
float b;
a=shared_0[((tidx+(( - 1)*(it_2+0)))+16)];
b=shared_1[it_2];
sum+=(a*b);
}
__syncthreads();
}
__syncthreads();
}
{
C(idy, idx)=sum;
}
}
|
3,122 | #include<iostream>
#include<ctime>
using namespace std;
#define O1
__global__ void add(int *a,int*b,unsigned int n) {
unsigned int tid = threadIdx.x;
int *idata = a + blockIdx.x * blockDim.x;
unsigned int idx = tid + blockIdx.x * blockDim.x ;
if(idx >= n) {
//printf("%d,",blockIdx.x);
return;
}
//printf("%d, ",a[idx]);
#ifdef O3
for(int stride = blockDim.x/2;stride > 0;stride>>=1) {
if(tid < stride) {
idata[tid] += idata[tid + stride];
}
__syncthreads();
}
#else
for (int i = 1;i < blockDim.x;i *= 2) {
#ifdef O1
if((tid %(2 * i)) == 0) {
idata[tid] += idata[tid + i];
}
#endif
#ifdef O2
int index = 2*i*tid;
if(index < blockDim.x) {
idata[index] += idata[index + i];
}
#endif
__syncthreads();
}
#endif
if (tid == 0) {b[blockIdx.x] = idata[0];}
}
int main(int argc,char*argv[]) {
int SIZE = 512;
int N = 1<<20;
dim3 block(SIZE,1);
int num_gridx = (block.x + N - 1) / block.x;
dim3 grid(num_gridx, 1);
std::cout<<"grid: "<<grid.x<<" block: "<<block.x<<std::endl;
int a[N];
auto init = [&](auto* a,unsigned int size) -> void{
for(int i = 0;i < size;i++) {
//a[i] = random()%100;
a[i] = 1;
}
};
int *a_dev, *ans_dev,ans[grid.x];
init(a, N);
cudaMalloc((int**)(&a_dev),sizeof(int)*N);
cudaMalloc((int**)(&ans_dev),sizeof(int)*grid.x);
cudaMemcpy(a_dev, a, sizeof(int)*N, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
clock_t start,end;
start = clock();
add<<<grid, block>>>(a_dev, ans_dev, N);
cudaDeviceSynchronize();
end = clock();
cout<<"GPU time : "<<end - start<<"ms"<<endl;
start = clock();
for(int i = 1;i < N;i++) {
a[0] += a[i];
}
cout<<a[0]<<endl;
end = clock();
cout<<"CPU time : "<<end - start<<"ms"<<endl;
cudaMemcpy (&ans, ans_dev, sizeof(int)*grid.x, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
int ret = 0;
for (int i = 0;i < grid.x;i++) {
ret += ans[i];
}
cout<<ret;
return 0;
}
|
3,123 | #include <cstdio>
#include <cmath>
#include <algorithm>
#include <climits>
#include <cuda_runtime.h>
#include "CudaGillespie_cuda.cuh"
/*
Atomic-max function. You may find it useful for normalization.
We haven't really talked about this yet, but __device__ functions not
only are run on the GPU, but are called from within a kernel.
Source:
http://stackoverflow.com/questions/17399119/
cant-we-use-atomic-operations-for-floating-point-variables-in-cuda
*/
__device__ static float atomicMin(float* address, float val)
{
int* address_as_i = (int*) address;
int old = *address_as_i, assumed;
do {
assumed = old;
old = ::atomicCAS(address_as_i, assumed,
__float_as_int(::fminf(val, __int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
/*
* Gillespie kernel. Each call to the kernel will advance each simulation
* by one step.
*
* We use the random numbers in rand_reactions to decide which transition
* occurs, and the random numbers in rand_times to decide on a dt.
*
* The times in simulation_times get updated with the calculated dt,
* and the concentrations/states may or may not get updated depending on the
* transition.
*/
__global__
void
cudaGillespieTimestepKernel(const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations) {
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// Go through every simulation.
while (thread_index < num_simulations) {
float rand_reaction = rand_reactions[thread_index];
float curr_conc = simulation_concentrations[thread_index];
State curr_state = simulation_states[thread_index];
float lambda = 0;
if (curr_state == OFF) {
// lambda = sum of rate parameters.
lambda = k_on + (curr_conc * g);
float cutoff = k_on / lambda;
if (rand_reaction < cutoff) {
// Flip to on
simulation_states[thread_index] = ON;
} else {
// Decay
simulation_concentrations[thread_index]--;
}
} else {
// lambda = sum of rate parameters.
lambda = k_off + b + (curr_conc * g);
float cutoff1 = k_off / lambda;
float cutoff2 = cutoff1 + (b / lambda);
if (rand_reaction < cutoff1) {
// Flip to off
simulation_states[thread_index] = OFF;
} else if (rand_reaction < cutoff2) {
// Grow
simulation_concentrations[thread_index]++;
} else {
// Decay
simulation_concentrations[thread_index]--;
}
}
// Update time by calculated lambda.
float rand_time = rand_times[thread_index];
simulation_times[thread_index] += -log(rand_time) / lambda;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
}
/*
* Helper function to call Gillespie kernel.
*/
void cudaCallGillespieTimestepKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *rand_reactions,
const float *rand_times,
float *simulation_times,
float *simulation_concentrations,
State *simulation_states,
const unsigned int b,
const unsigned int g,
const float k_on,
const float k_off,
const unsigned int num_simulations) {
cudaGillespieTimestepKernel<<<blocks, threads_per_block>>>(rand_reactions,
rand_times, simulation_times, simulation_concentrations,
simulation_states, b, g, k_on, k_off, num_simulations);
}
/*
* Resampling kernel. After each iteration of the Gillespie algorithm, update
* the values in an array of uniformly spaced samples. We use 1000 points
* "evenly" spaced from 0 to 100.
*
* For each simulation, check its current time. If the index corresponding
* to this time exceeds the last filled index, fill up to the current index.
* Then, update the last filled index.
*/
__global__
void
cudaResamplingKernel(float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations) {
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float curr_time = simulation_times[thread_index];
int last_sample_index = last_sample_indices[thread_index];
int curr_index = curr_time / ((float) SAMPLE_TIME / SAMPLE_SIZE);
// If the index corresponding to the current simulation time is
// beyond the last sample index, populate the array up to the
// curr_index.
if (curr_index > last_sample_index
&& last_sample_index < SAMPLE_SIZE) {
float curr_conc = simulation_concentrations[thread_index];
while (last_sample_index <= curr_index
&& last_sample_index < SAMPLE_SIZE) {
curr_sample[last_sample_index++] = curr_conc;
}
}
// Update last_sample_indices in GPU memory.
last_sample_indices[thread_index] = last_sample_index;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
}
/*
* Helper function to call Gillespie kernel.
*/
void cudaCallResamplingKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
int *last_sample_indices,
const float *simulation_times,
const float *simulation_concentrations,
const unsigned int num_simulations) {
cudaResamplingKernel<<<blocks, threads_per_block>>>(simulation_samples,
last_sample_indices, simulation_times, simulation_concentrations,
num_simulations);
}
/*
* Minimum kernel. Used to find the minimum in an array of floats. Mainly
* copied from the "maximum kernel" from lab 3.
*/
__global__
void
cudaMinimumKernel(const float *simulation_times,
float *min_val,
const unsigned int num_simulations) {
extern __shared__ float partial_outputs[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
float thread_min = INT_MAX;
while (thread_index < num_simulations) {
// Find the maximum MAGNITUDE (take abs value) for this thread.
thread_min = min(thread_min, simulation_times[thread_index]);
thread_index += blockDim.x * gridDim.x;
}
partial_outputs[threadIdx.x] = thread_min;
// Make sure all threads in block finish before continuing.
__syncthreads();
// Use the first thread in the block to calculate the block's
// max.
if (threadIdx.x == 0) {
float block_min = INT_MAX;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_min = min(block_min, partial_outputs[thread_idx]);
}
// Now we take the max with the output.
atomicMin(min_val, block_min);
}
}
/*
* Helper function to call minimum kernel.
*/
void cudaCallMinimumKernel(const unsigned int blocks,
const unsigned int threads_per_block,
const float *simulation_times,
float *min_val,
const unsigned int num_simulations) {
cudaMinimumKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_times, min_val, num_simulations);
}
/*
* Mean kernel. For each timepoint, we want to get the mean value for all the
* simulations. This means we must sum the values of all the simulations at
* that timepoint, then divide by the total number of simulations.
*/
__global__
void
cudaMeanKernel(float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
extern __shared__ float sdata[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
sdata[threadIdx.x] = 0;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float sample_conc = curr_sample[sample_index];
sdata[threadIdx.x] += sample_conc;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
__syncthreads();
// Use the first thread in the block to calculate the block's sum
if (threadIdx.x == 0) {
float block_sum = 0;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_sum += sdata[thread_idx];
}
block_sum /= (float) num_simulations;
atomicAdd(sample_means + sample_index, block_sum);
}
}
/*
* Helper function to call mean kernel.
*/
void cudaCallMeanKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
cudaMeanKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_samples, sample_means, sample_index,
num_simulations);
}
/*
* Variance kernel. For each timepoint, we want to get the variance for all the
* simulations. This means we must take sum the squared differences from the mean
* at that timepoint, then divide by the total number of simulations. We rely
* on the fact that sample_means has been populated at sample_index
* (i.e. sample_means[sample_index] holds the correct average) for this
* kernel to work.
*/
__global__
void
cudaVarianceKernel(float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
extern __shared__ float sdata[];
// Get current thread's index.
unsigned int thread_index = blockIdx.x * blockDim.x + threadIdx.x;
float average = sample_means[sample_index];
sdata[threadIdx.x] = 0;
// Go through every simulation.
while (thread_index < num_simulations) {
float *curr_sample = simulation_samples + (thread_index * SAMPLE_SIZE);
float sample_conc = curr_sample[sample_index];
float sq_diff = powf(average - sample_conc, 2);
sdata[threadIdx.x] += sq_diff;
// Update thread_index.
thread_index += blockDim.x * gridDim.x;
}
__syncthreads();
// Use the first thread in the block to calculate the block's sum
if (threadIdx.x == 0) {
float block_sum = 0;
for (uint thread_idx = 0; thread_idx < blockDim.x; ++thread_idx) {
block_sum += sdata[thread_idx];
}
block_sum /= (float) num_simulations;
atomicAdd(sample_vars + sample_index, block_sum);
}
}
/*
* Helper function to call variance kernel.
*/
void cudaCallVarianceKernel(const unsigned int blocks,
const unsigned int threads_per_block,
float *simulation_samples,
float *sample_vars,
float *sample_means,
const unsigned int sample_index,
const unsigned int num_simulations) {
cudaVarianceKernel<<<blocks, threads_per_block, threads_per_block * sizeof(float)>>>(
simulation_samples, sample_vars, sample_means,
sample_index, num_simulations);
}
|
3,124 | #include "includes.h"
__global__ void binZeros(int *d_bin_count, int bin_size){
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < bin_size){
d_bin_count[i] = 0;
}
} |
3,125 |
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <math.h>
#include <complex.h>
#include <cuda_runtime.h>
#include <utility>
#include <sys/time.h>
#define K 3
#define BLCH 8
#define BLCW 32
__constant__ float filter[K*K];
int compute_csr(float *img_csr, float *f, float * out, int *pos, int *coor, int num, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW) {
int con, imgg, ind1, ind2, pj, infi, infj;
for (int i = 0; i < imgN; i++){
con = i * convW * convH;
imgg = i * imgW * imgH;
//Visit input image by csr
for (int pi = 0; pi<imgH; pi++){
ind1 = pos[pi+i*(imgH+1)];
ind2 = pos[pi+1+i*(imgH+1)];
for(int ci = ind1; ci<ind2; ci++){
pj = coor[ci+i*num];
float value = img_csr[ci+imgg];
//For every element, compute all filters
for (int fi = pi-nF+1; fi<=pi; fi++){
if (fi>0 && fi<convH){
for (int fj = pj-nF+1; fj<=pj; fj++){
if (fj>0 && fj<convW){
infi = fi-(pi-nF+1);
infj = fj-(pj-nF+1);
out[fi*convW+fj+con] = value * f[infi*nF+infj];
}
}
}
}
}
}
}
}
__global__ void compute_gpu(float *img, float *out, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW){
int idX = blockDim.x * blockIdx.x + threadIdx.x;
int idY = blockDim.y * blockIdx.y + threadIdx.y;
for (int i = 0; i < imgN; i++){
int con = i * convW * convH;
int imgg = i * imgW * imgH;
if (idX < convH && idY < convW){
int ind = idY * convW + idX + con;
int inm = idY * imgW + idX + imgg;
for (int fi = 0; fi < nF; fi++){
inm += fi * imgW;
int inf = fi*nF;
for (int fj = 0; fj < nF; fj++){
inf += fj;
inm += fj;
float value = img[inm+imgg];
if (value != 0){
out[ind+con] += value * filter[inf];
}
}
}
}
}
}
__global__ void compute_gpu_csr(float *img_csr, float * out, int *pos, int *coor, int num, int bh, int bw, int imgH, int imgW, int imgN, int nF, int convH, int convW) {
int pi = blockDim.x * blockIdx.x + threadIdx.x;
int con, imgg, ind1, ind2, pj, infi, infj;
for (int i = 0; i < imgN; i++){
con = i * convW * convH;
imgg = i * imgW * imgH;
//Visit input image by csr
if(pi<imgH){
ind1 = pos[pi+i*(imgH+1)];
ind2 = pos[pi+1+i*(imgH+1)];
for(int ci = ind1; ci<ind2; ci++){
pj = coor[ci+i*num];
float value = img_csr[ci+imgg];
//For every element, compute all filters
for (int fi = pi-nF+1; fi<=pi; fi++){
if (fi>0 && fi<convH){
for (int fj = pj-nF+1; fj<=pj; fj++){
if (fj>0 && fj<convW){
infi = fi-(pi-nF+1);
infj = fj-(pj-nF+1);
out[fi*convW+fj+con] = value * filter[infi*nF+infj];
}
}
}
}
}
}
}
}
int main(int argc, char **argv){
//create parameters
int imgH = 2048;
int imgW = 2048;
int imgN = 10;
int blcH = BLCH;
int blcW = BLCW;
int k = K;
int s = 1;
int nB = (imgH * imgW) / (blcH * blcW);
//int nT = (blcW+k) * (blcH+k);
int nT = blcW * blcH;
int imgDims = imgH * imgW * imgN;
int imgSize = imgDims * sizeof(float);
int num=0;
srand (time(NULL));
// create host array that can hold pixel intensity values
float *h_img = new float[imgDims];
for(int k=0; k<imgN; k++){
for(int i=0; i<imgH; i++){
for (int j=0; j<imgW; j++){
if (rand() % 10 == 0){
num++;
h_img[i*imgW+j+k*imgH * imgW] = (float)(rand()%10485)/10485;
}
else{
h_img[i*imgW+j+k*imgH * imgW] = 0.0;
}
}
}
}
//create index arrays of CSR
int *pos = new int[(imgH+1)*imgN];
int *coor = new int[num*imgN];
float *h_imgcsr = new float[num*imgN];
int csrimgSize = num*imgN*sizeof(float);
int csrposSize = (imgH+1)*imgN*sizeof(int);
int csrcooSize = num*imgN*sizeof(int);
for(int k=0; k<imgN; k++){
pos[0+k*imgH * imgW] = 0;
int index_p=0;
int index_c=0;
int z = k*imgH * imgW;
for(int i=0; i<imgH; i++){
for (int j=0; j<imgW; j++){
if (h_img[i*imgW+j+k] != 0){
coor[index_p+k] = j;
h_imgcsr[index_p+k] = h_img[i*imgW+j+k];
index_p++;
}
}
pos[i] = index_p;
}
}
// create filter and copy to constant memory
int filterDims = k * k;
int filterSize = filterDims * sizeof(float);
float *f = new float[filterDims];
for(int i=0; i<filterDims; i++){
f[i] = (float)(rand()%10485)/10485;
}
// create host and device array that holds the convoluted matrix
int convH = ( (imgH - k) / s ) + 1;
int convW = ( (imgW - k) / s ) + 1;
int convDims = convH * convW;
int convSize = convDims * sizeof(float);
float *h_convolved = new float[convDims];
for(int i=0; i<convDims; i++){
h_convolved[i] = 0.0;
}
// create device array that can hold pixel intensity values in GPU GM
float *d_img_csr;
float *d_convolved;
int *d_pos;
int *d_coor;
cudaMemcpyToSymbol(filter, f, filterSize);
cudaMalloc((void **) &d_convolved, convSize);
cudaMemcpy(d_convolved, h_convolved, convSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_img_csr, csrimgSize);
cudaMemcpy(d_img_csr, h_imgcsr, csrimgSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_pos, csrposSize);
cudaMemcpy(d_pos, pos, csrposSize, cudaMemcpyHostToDevice);
cudaMalloc((void **) &d_coor, csrcooSize);
cudaMemcpy(d_coor, coor, csrcooSize, cudaMemcpyHostToDevice);
struct timeval starttime, endtime;
double elapsed = 0.0;
for (int i = 0; i<10000; i++){
gettimeofday(&starttime,NULL);
// call the kernel
//compute_gpu<<<nB, nT>>>(d_img, d_convolved, blcH, blcW, imgH, imgW, imgN, k, convH, convW);
compute_gpu_csr<<<nB, nT>>>(d_img_csr, d_convolved, d_pos, d_coor, num, blcH, blcW, imgH, imgW, imgN, k, convH, convW);
gettimeofday(&endtime,NULL);
elapsed += ((endtime.tv_sec-starttime.tv_sec)*1000000 + endtime.tv_usec-starttime.tv_usec)/1000000.0;
}
cudaMemcpy(h_convolved, d_convolved, convSize, cudaMemcpyDeviceToHost);
cudaDeviceReset();
printf("Input imgH: %d imgW: %d imgN: %d\n", imgH, imgW, imgN);
printf("Tile width: %d height: %d\n", blcW, blcH);
printf("Block number: %d, block size: %d \n", nB, nT);
printf("time: %f \n", elapsed);
delete h_img;
delete h_convolved;
delete pos;
delete h_imgcsr;
delete coor;
return 0;
}
|
3,126 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
__global__ void misaligned_write_test(float* a, float* b, float *c, int size, int offset)
{
int gid = blockIdx.x * blockDim.x + threadIdx.x;
int k = gid + offset;
if (k < size)
c[k] = a[gid] + b[gid];
}
//int main(int argc, char** argv)
//{
// printf("Runing 1D grid \n");
// int size = 1 << 25;
// int block_size = 128;
// unsigned int byte_size = size * sizeof(float);
// int offset = 0;
//
// if (argc > 1)
// offset = atoi(argv[1]);
//
// printf("Input size : %d \n", size);
//
// float * h_a, *h_b, *h_ref;
// h_a = (float*)malloc(byte_size);
// h_b = (float*)malloc(byte_size);
// h_ref = (float*)malloc(byte_size);
//
//
// if (!h_a)
// printf("host memory allocation error \n");
//
// for (size_t i = 0; i < size; i++)
// {
// h_a[i] = i % 10;
// h_b[i] = i % 7;
// }
//
// dim3 block(block_size);
// dim3 grid((size + block.x - 1) / block.x);
//
// printf("Kernel is lauch with grid(%d,%d,%d) and block(%d,%d,%d) \n",
// grid.x, grid.y, grid.z, block.x, block.y, block.z);
//
// float *d_a, *d_b, *d_c;
//
// cudaMalloc((void**)&d_a, byte_size);
// cudaMalloc((void**)&d_b, byte_size);
// cudaMalloc((void**)&d_c, byte_size);
// cudaMemset(d_c, 0, byte_size);
//
// cudaMemcpy(d_a, h_a, byte_size, cudaMemcpyHostToDevice);
// cudaMemcpy(d_b, h_b, byte_size, cudaMemcpyHostToDevice);
//
// misaligned_write_test << <grid, block >> > (d_a, d_b, d_c, size, offset);
//
// cudaDeviceSynchronize();
// cudaMemcpy(h_ref, d_c, byte_size, cudaMemcpyDeviceToHost);
//
// cudaFree(d_c);
// cudaFree(d_b);
// cudaFree(d_a);
// free(h_ref);
// free(h_b);
// free(h_a);
//} |
3,127 | #include "includes.h"
__global__ void MatMultipl_naive (float * A, float * B, float * C , int nColsA , int nColsB , int sizeC ) {
int i_col = blockIdx.x * blockDim.x + threadIdx.x; /// index in row
int i_row = blockIdx.y * blockDim.y + threadIdx.y; /// index in column
int idx = i_row * nColsB + i_col; // # of cols in B = # of cols in C
float Cvalue = 0;
if (idx < sizeC){
for (int e=0; e < nColsA; e++)
Cvalue += A[i_row * nColsA + e] * B[e * nColsB + i_col];
C[idx] = Cvalue;
}
} |
3,128 | #include <stdio.h>
#include <cuda_runtime.h>
#include <time.h>
#include <vector>
using namespace std;
const int GPUs[] = {0,1,2,3,4}; // If left blank all available GPUs will be used.
vector<int> g(GPUs, GPUs + sizeof(GPUs)/sizeof(int));
void configure(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaMalloc(&buffer_s[i], size);
cudaMalloc(&buffer_d[i], size);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceEnablePeerAccess(g[j], 0);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceEnablePeerAccess(g[i], 0);
cudaDeviceSynchronize();
}
}
}
}
}
void reset(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
for (int i=0; i<g.size(); i++)
{
cudaSetDevice(g[i]);
cudaFree(buffer_s[i]);
cudaFree(buffer_d[i]);
for (int j=0; j<g.size(); j++)
{
int access;
if (i!=j)
{
cudaDeviceCanAccessPeer(&access, g[i], g[j]);
if (access)
{
cudaSetDevice(g[i]);
cudaDeviceDisablePeerAccess(g[j]);
cudaDeviceSynchronize();
cudaSetDevice(g[j]);
cudaDeviceDisablePeerAccess(g[i]);
cudaDeviceSynchronize();
}
}
}
}
}
void blocked_copy(size_t size, vector<int*> &buffer_s, vector<int*> &buffer_d)
{
float time_taken[g.size()*g.size()], bw[g.size()*g.size()];
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
printf("\nBlocked Memory Transfers: Only one memory transfer at a time\n");
for (int i=0; i<g.size(); i++)
{
for (int j=0; j<g.size(); j++)
{
if (i!=j)
{
printf("Copying from %d to %d\n", g[i], g[j]);
cudaEventRecord(start);
cudaMemcpyPeerAsync(buffer_s[i],g[i],buffer_d[j],g[j], size);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaEventSynchronize(stop);
float time_ms=0.0;
cudaEventElapsedTime(&time_ms, start, stop);
time_taken[i*g.size()+j] = time_ms*1e3;
bw[i*g.size()+j] = (float)size*1000/time_ms/(1<<30);
}
}
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("Time(ms) spent in memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
{
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", time_taken[i*g.size()+j]);
}
printf("\n");
}
printf("bandwidth(Gbps) utilized during memcpy\n");
printf(" D\\D");
for (int j=0; j<g.size(); j++)
printf("%10d ", g[j]);
printf("\n");
for (int i=0; i<g.size(); i++)
{
printf("%6d", g[i]);
for (int j=0; j<g.size(); j++)
if (i==j)
printf("%12.4f", 0.0);
else
printf("%12.4f", bw[i*g.size()+j]);
printf("\n");
}
}
void perf_analyze(size_t size)
{
vector<int*> buffer_s(g.size());
vector<int*> buffer_d(g.size());
configure(size, buffer_s, buffer_d);
// Cyclic
blocked_copy(size, buffer_s, buffer_d);
reset(size, buffer_s, buffer_d);
}
int main(int argc, char** argv)
{
// NVLink D<->D performance
size_t size = (1<<30);
if (!g.size())
{
int n;
printf("Using all 8 GPUs\n");
cudaGetDeviceCount(&n);
for (int i=0; i<n; i++)
g.push_back(i);
}
//define size
perf_analyze(size);
return 0;
}
|
3,129 | #include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#define DEFAULT_THRESHOLD 4000
#define DEFAULT_FILENAME "BWstop-sign.ppm"
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// TODO really read
char duh[80];
char *line = chars;
// find the start of the pixel data. no doubt stupid
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++) pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
void write_ppm( char *filename, int xsize, int ysize, int maxval, int *pic) {
FILE *fp;
//int x,y;
fp = fopen(filename, "w");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
int main( int argc, char **argv )
{
int thresh = DEFAULT_THRESHOLD;
int number_of_files = 20000;//21312;
//filename = strdup( DEFAULT_FILENAME);
cudaEvent_t start_event, stop_event;
float seq_time_gpu;
if(argc > 1) {
number_of_files = atoi(argv[1]);
}
cudaEventCreate(&start_event);
cudaEventCreate(&stop_event);
cudaEventRecord(start_event, 0);
for(int k = 1; k <= number_of_files; k++) {
char *in_filename = (char*)malloc(36 * sizeof(char));
char *out_filename = (char*)malloc(36 * sizeof(char));
sprintf(in_filename, "./sintel/sintel%03d.ppm", k);
sprintf(out_filename, "./sintel-sobel-seq/sintel-sobel%03d.ppm", k);
int xsize, ysize, maxval;
unsigned int *pic = read_ppm( in_filename, &xsize, &ysize, &maxval );
int numbytes = xsize * ysize * 3 * sizeof( int );
int *result = (int *) malloc( numbytes );
if (!result) {
fprintf(stderr, "sobel() unable to malloc %d bytes\n", numbytes);
exit(-1); // fail
}
int i, j, magnitude, sum1, sum2;
int *out = result;
for (int col=0; col<ysize; col++) {
for (int row=0; row<xsize; row++) {
*out++ = 0;
}
}
for (i = 1; i < ysize - 1; i++) {
for (j = 1; j < xsize -1; j++) {
int offset = i*xsize + j;
sum1 = pic[ xsize * (i-1) + j+1 ] - pic[ xsize*(i-1) + j-1 ]
+ 2 * pic[ xsize * (i) + j+1 ] - 2 * pic[ xsize*(i) + j-1 ]
+ pic[ xsize * (i+1) + j+1 ] - pic[ xsize*(i+1) + j-1 ];
sum2 = pic[ xsize * (i-1) + j-1 ] + 2 * pic[ xsize * (i-1) + j ] + pic[ xsize * (i-1) + j+1 ]
- pic[xsize * (i+1) + j-1 ] - 2 * pic[ xsize * (i+1) + j ] - pic[ xsize * (i+1) + j+1 ];
magnitude = sum1*sum1 + sum2*sum2;
if (magnitude > thresh)
result[offset] = 255;
else
result[offset] = 0;
}
}
write_ppm( out_filename, xsize, ysize, 255, result);
}
cudaEventRecord(stop_event, 0);
cudaEventSynchronize(stop_event);
cudaEventElapsedTime(&seq_time_gpu,start_event, stop_event);
printf("Sequential Time: %.2f msec\n", seq_time_gpu);
fprintf(stderr, "sobel done\n");
} |
3,130 | #include <iostream>
#include <iterator>
#include <fstream>
#include <vector>
#include <stdlib.h>
#include <stdio.h>
#include <curand.h>
#include <curand_kernel.h>
#include <math.h>
using namespace std;
void validateNumOfArgs(int argc);
vector<unsigned long long> readNumbersFromFile(char* path);
__device__ unsigned long long modPow(unsigned long long a, unsigned int n, unsigned long long p);
__global__ void checkPrimes(int size, bool* d_answers, unsigned long long* d_numbers, curandState* curandStates, int iters=100);
__global__ void setupCurand(curandState* states);
int THREADS = 1024;
int main (int argc, char** argv){
//walidacja ilosci wprowadzonych argumentow
validateNumOfArgs(argc);
//pobranie liczb z pliku do tablicy
vector<unsigned long long> numbersFromFile = readNumbersFromFile(argv[1]);
//pobranie rozmiarow;
int sizeNumbersBytes = numbersFromFile.size()*sizeof(unsigned long long);
int sizeAnswersBytes = numbersFromFile.size()*sizeof(bool);
//tablice liczb i odpowiedzi dla GPU
unsigned long long* d_numbers;
bool* d_answers;
//alokacja pamieci na GPU
cudaMalloc((void **)&d_numbers, sizeNumbersBytes);
cudaMalloc((void **)&d_answers, sizeAnswersBytes);
//kopoiwanie tablicy liczb do pamieci karty graficznej
cudaMemcpy(d_numbers, numbersFromFile.data(), sizeNumbersBytes, cudaMemcpyHostToDevice);
//okreslanie ilosci blokow
int blocks = (numbersFromFile.size()+THREADS-1)/THREADS;
//deklaracja oraz alokacja pamieci na statusy potrzebne do generacji liczb losowych
curandState* d_states;
cudaMalloc(&d_states, blocks*THREADS*sizeof(curandState));
//inicjalizacja niezbedna do generacji liczb losowych
setupCurand<<<blocks,THREADS>>>(d_states);
//utworzenie eventów do pomiaru czasu
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//rozpoczecie pomiaru czasu
cudaEventRecord(start);
//sprawdzanie pierwszosci liczb
checkPrimes<<<blocks,THREADS>>>(numbersFromFile.size(),d_answers,d_numbers,d_states);
//zatrzymanie pomiaru czasu
cudaEventRecord(stop);
//sychronizacja pomiaru czasu
cudaEventSynchronize(stop);
//obliczanie roznicy czasow
float elapsedTimeMS;
cudaEventElapsedTime(&elapsedTimeMS, start, stop);
//wypisanie czasu
cout<<"Czas: "<<elapsedTimeMS<<"ms"<<endl;
//tablica odpowiedzi dla CPU (alokacja pamieci)
bool* answers = (bool*)malloc(sizeAnswersBytes);
//kopiowanie odpowiedzi z pamieci na karcie do RAMu
cudaMemcpy(answers, d_answers, sizeAnswersBytes, cudaMemcpyDeviceToHost);
//wypisanie wynikow obliczen
for(unsigned int i=0;i<numbersFromFile.size();i++){
cout << numbersFromFile[i] << ": ";
cout << ((answers[i]==1) ? "prime":"composite") << endl;
}
cudaFree(d_answers);
cudaFree(d_numbers);
free(answers);
return 0;
}
void validateNumOfArgs(int argc){
//sprawdzenie czy liczba argumentow programu jest poprawna
if(argc != 2){
cout<<"Niepoprawna ilosc argumentow"<<endl;
cout<<"Poprawne wywolanie: ./primes_gpu <primes>"<<endl;
exit(-1);
}
}
vector<unsigned long long> readNumbersFromFile(char* path){
//odczyt liczb z pliku i umieszczenie ich w kontenerze vector + obsluga bledow
ifstream primesFile;
primesFile.open(path);
vector<unsigned long long> numbersVector;
//otwarcie pliku do odczytu
if (primesFile.is_open())
{
//wczytywanie liczb z pliku linia po linii
string line;
while ( getline (primesFile,line) )
{
//jesli linia nie jest pusta to pobierana jest z niej liczba
if(!line.empty()){
try{
//pobrana z pliku linia jest rzutowana na typ unsigned long long i umieszczana w kontenerze
numbersVector.push_back(stoull(line));
}catch(exception e){
continue;
}
}
}
//zamkniecie pliku
primesFile.close();
}else{
//konczenie pracy wszystkich procesow
cout << "Nie mozna otworzyc pliku"<<endl;
exit(-1);
}
//zwrocenie odczytanych z pliku liczb
return numbersVector;
}
//inicjalizacja curanda (do generacji liczb losowych)
__global__ void setupCurand(curandState* states){
int tid = threadIdx.x+blockDim.x*blockIdx.x;
curand_init(1000, tid, 0, &states[tid]);
}
//potęgowanie modulo ( (a^n) mod p)
__device__ unsigned long long modPow(unsigned long long a, unsigned int n, unsigned long long p){
//inicjalizacja zwracanego rezultatu
unsigned long long result = 1;
//wykonywanie operacji dopoki wykladnik jest wiekszy niz 0
while(n>0){
if(n%2 == 1){ //n jest nieprarzyste
result = (result * a) % p;
}
// potegowanie modulo (a^2 mod p)
a = (a*a) % p;
//zmniejszenie wykladnika w taki sposob ze wykladnik = wykladnik/2
n/=2;
}
return result;
}
//sprawdzanie czy liczba jest prawdopodobnie pierwsza
//uzyto testu pierwszosci Fermata
__global__ void checkPrimes(int size, bool* d_answers, unsigned long long* d_numbers, curandState* curandStates, int iters){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if(tid<size){
if(d_numbers[tid]<2){ //liczby 0 i 1 nie sa pierwsze
d_answers[tid] = false;
return;
}else if(d_numbers[tid] == 2){ //zabezpieczenie przed operacja mod 0
d_answers[tid] = true;
return;
}
int a;
float randomNumber;
for(int i=0;i<iters;i++){
//wybor a z przedzialu [2,p-1]
randomNumber = curand_uniform(&curandStates[tid]);
randomNumber *= (d_numbers[tid] - 2.000001);
randomNumber += 2;
a = (int)truncf(randomNumber);
//sprawdzanie czy a^(p-1) = 1 (mod p)
if(modPow(a, d_numbers[tid]-1, d_numbers[tid]) != 1){
d_answers[tid] = false; //liczba nie jest pierwsza
return;
}
}
d_answers[tid] = true; //liczba jest prawdopodobnie pierwsza
}
}
|
3,131 | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#define checkCudaErrors(val)\
fprintf(stderr, "CUDA error at %s:%d (%s) \n", __FILE__, __LINE__, cudaGetErrorString(val));
//Par rapport a la question 7 N = 1000 et nb thread = 640
// =>si on fait 2 x nb_thread alors 1280 threads > N peut causer bufferoverflow/seg fault
__global__ void kernel(double *a, double *b, double *c, int N)
{
//int i = blockIdx.x * blockDim.x + threadIdx.x;
//Q 8 :
/*
int i = 2*(blockIdx.x * blockDim.x + threadIdx.x);
if(i<N-1){
c[i] = a[i] + b[i];
c[i+1] = a[i+1] + b[i+1];
}*/
//Q 8 second way :
int i = blockIdx.x * blockDim.x + threadIdx.x;
int totalthreads = (blockDim.x * gridDim.x);
c[i] = a[i] + b[i];
if(i<(N-totalthreads)){
c[i+totalthreads] = a[i+totalthreads] + b[i+totalthreads];
}
}
int main(int argc, char **argv)
{
int N = 1000;
int sz_in_bytes = N*sizeof(double);
double *h_a, *h_b, *h_c;
double *d_a, *d_b, *d_c;
h_a = (double*)malloc(sz_in_bytes);
h_b = (double*)malloc(sz_in_bytes);
h_c = (double*)malloc(sz_in_bytes);
// Initiate values on h_a and h_b
for(int i = 0 ; i < N ; i++)
{
h_a[i] = 1./(1.+i);
h_b[i] = (i-1.)/(i+1.);
}
checkCudaErrors(cudaMalloc((void**)&d_a, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_b, sz_in_bytes));
checkCudaErrors(cudaMalloc((void**)&d_c, sz_in_bytes));
checkCudaErrors(cudaMemcpy(d_a, h_a, sz_in_bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_b, h_b, sz_in_bytes, cudaMemcpyHostToDevice));
dim3 dimBlock(64, 1, 1);
dim3 dimGrid(10, 1, 1);
kernel<<<dimGrid , dimBlock>>>(d_a, d_b, d_c, N);
checkCudaErrors(cudaMemcpy(h_c, d_c, sz_in_bytes, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaFree(d_a));
checkCudaErrors(cudaFree(d_b));
checkCudaErrors(cudaFree(d_c));
// Verifying
double err = 0, norm = 0;
for(int i = 0 ; i < N ; i++)
{
double err_loc = fabs(h_c[i] - (h_a[i]+h_b[i]));
err += err_loc;
norm += fabs(h_c[i]);
}
if (err/norm < 1.e-16)
{
printf("SUCCESS (Relative error : %.3e)\n", err/norm);
}
else
{
printf("ERROR (Relative error : %.3e)\n", err/norm);
}
free(h_a);
free(h_b);
free(h_c);
return 0;
}
|
3,132 | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <cuda.h>
#define THREADS_PER_BLOCK 512
__global__ void s_match(const char *s1, const char *s2);
__device__ int s_cmp(const char *s1, const char *s2);
int main(int argc, char *argv[]){
if (argc != 3){
printf("Usage: %s <string 1> <string 2>", argv[0]);
exit(-1);
}
size_t s1_len = strlen(argv[1]), s2_len = strlen(argv[2]); /* length of input */
size_t s1_sz_mem = s1_len + 1, s2_sz_mem = s2_len + 1; /* memory required */
/*
* Host's arrays
*/
char *s1, *s2;
s1 = (char*) malloc(s1_sz_mem * sizeof(char));
s2 = (char*) malloc(s2_sz_mem * sizeof(char));
if (!s1 || !s2) { /* validate memory created successfully or throw error */
fputs ("error: name allocation failed, exiting.", stderr);
return 1;
}
/*
* Copy from arguments
*/
strcpy(s1, argv[1]);
strcpy(s2, argv[2]);
printf("matching %s with %s:\n", s1, s2);
float total_time, comp_time;
cudaEvent_t total_start, total_stop, comp_start, comp_stop;
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
cudaEventCreate(&comp_start);
cudaEventCreate(&comp_stop);
/*
* Start counting total time
*/
cudaEventRecord(total_start);
/*
* Device's array
*/
char *dev_s1, *dev_s2;
cudaMalloc(&dev_s1, s1_sz_mem*sizeof(char));
cudaMalloc(&dev_s2, s2_sz_mem*sizeof(char));
/*
* Copy c from host memory to host device memory
*/
cudaMemcpy(dev_s1, s1, s1_sz_mem*sizeof(char), cudaMemcpyHostToDevice);
cudaMemcpy(dev_s2, s2, s2_sz_mem*sizeof(char), cudaMemcpyHostToDevice);
/*
* Start counting compile time
*/
cudaEventRecord(comp_start);
/*
* Create sufficient blocks
*/
int blocks = (s1_len+s2_len + THREADS_PER_BLOCK - 1)/THREADS_PER_BLOCK;
/*
* Kernel call
*/
s_match<<<blocks, THREADS_PER_BLOCK>>>(dev_s1, dev_s2);
/*
* Compile time count
*/
cudaEventRecord(comp_stop);
cudaEventSynchronize(comp_stop);
cudaEventElapsedTime(&comp_time, comp_start, comp_stop);
/*
* Total time count
*/
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
cudaEventElapsedTime(&total_time, total_start, total_stop);
/*
* Free memory on device
*/
cudaFree(dev_s1);
cudaFree(dev_s2);
cudaEventDestroy(comp_start);
cudaEventDestroy(comp_stop);
cudaEventDestroy(total_start);
cudaEventDestroy(total_stop);
/*
* GPU timing
*/
printf("blocks: %d, total_threads: %d\n", blocks, THREADS_PER_BLOCK*blocks);
printf("Total time (ms): %f\n", total_time);
printf("Kernel time (ms): %f\n", comp_time);
printf("Data transfer time (ms): %f\n", total_time-comp_time);
return 0;
}
/*
* Function: s_cmp
* --------------------
* Compares the characters of s1 and s2 (until no match is found)
*
* s1: pointer to char array (string 1)
* s2: pointer to char array (string 2)
*
*/
/* returns 0 if no match, 1 if matched, -1 if matched and at end */
__device__ int s_cmp(const char *s1, const char *s2){
char c1 = 0, c2 = 0;
while (c1 == c2) {
c1 = *(s1++);
if ('\0' == (c2 = *(s2++)))
return c1 == '\0' ? -1 : 1;
}
return 0;
}
/*
* Function: s_match
* --------------------
* Performs string matching (between two strings)
*
* s1: pointer to char array (string 1)
* s2: pointer to char array (string 2)
*
*/
__global__ void s_match(const char *s1, const char *s2){
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (s1[idx] != '\0') {
switch (s_cmp(s1 + idx, s2)) {
case -1:
{
printf("matched: pos %d (at end)\n", idx);
return;
}
case 1:
{
printf("matched: pos %d\n", idx);
break;
}
}
}
}
|
3,133 | __global__ void transform(float* transform, int length, int *cdf, int cdf_min, int img_size)
{
int idx, offset;
idx = blockIdx.x * blockDim.x + threadIdx.x;
offset = blockDim.x * gridDim.x;
for (int i = idx; i < length; i += offset)
{
transform[i] = (float) (cdf[i] - cdf_min) / (img_size - 1);
}
}
__global__ void transform_values(float *img, int *values, float *transform, int width, int height)
{
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
int idx = x + y * width;
img[3 * idx + 2] = transform[values[idx]];
}
}
|
3,134 | #include "includes.h"
__global__ void NmDistanceGradKernel(int b,int n,const float * xyz1,int m,const float * xyz2,const float * grad_dist1,const int * idx1,float * grad_xyz1,float * grad_xyz2){
for (int i=blockIdx.x;i<b;i+=gridDim.x){
for (int j=threadIdx.x+blockIdx.y*blockDim.x;j<n;j+=blockDim.x*gridDim.y){
float x1=xyz1[(i*n+j)*3+0];
float y1=xyz1[(i*n+j)*3+1];
float z1=xyz1[(i*n+j)*3+2];
int j2=idx1[i*n+j];
float x2=xyz2[(i*m+j2)*3+0];
float y2=xyz2[(i*m+j2)*3+1];
float z2=xyz2[(i*m+j2)*3+2];
float g=grad_dist1[i*n+j]*2;
atomicAdd(&(grad_xyz1[(i*n+j)*3+0]),g*(x1-x2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+1]),g*(y1-y2));
atomicAdd(&(grad_xyz1[(i*n+j)*3+2]),g*(z1-z2));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+0]),-(g*(x1-x2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+1]),-(g*(y1-y2)));
atomicAdd(&(grad_xyz2[(i*m+j2)*3+2]),-(g*(z1-z2)));
}
}
} |
3,135 | #define THREADS 256
__global__ void select_matches(
const unsigned* in_idx,
const int* in_dist,
const unsigned nfeat,
const unsigned nelem,
const int max_dist)
{
unsigned f = blockIdx.x * blockDim.x + threadIdx.x;
unsigned sid = threadIdx.x * blockDim.y + threadIdx.y;
__shared__ int s_dist[THREADS];
__shared__ unsigned s_idx[THREADS];
// Reduce best matches and find the best of them all
for (unsigned i = blockDim.y / 2; i > 0; i >>= 1) {
if (threadIdx.y < i) {
int dist = s_dist[sid + i];
if (dist < s_dist[sid]) {
s_dist[sid] = dist;
s_idx[sid] = s_idx[sid + i];
}
__syncthreads();
}
}
} |
3,136 | #include <stdio.h>
#include <time.h>
__global__ void vAdd(int* a, int* b, int* c, int n){
int i = threadIdx.x;
if(i<n)
c[i] = a[i] + b[i];
}
void wrapper(int* a, int* b, int* c, int n){
int *d_a,*d_b,*d_c;
cudaMalloc(&d_a,n*sizeof(int));
cudaMalloc(&d_b,n*sizeof(int));
cudaMalloc(&d_c,n*sizeof(int));
cudaMemcpy(d_a,a,n * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,n * sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_c,c,n * sizeof(int),cudaMemcpyHostToDevice);
clock_t start = clock();
vAdd<<< 1,n >>>(d_a,d_b,d_c,n);
clock_t end = clock();
printf("Took %f Seconds", float(end-start)/CLOCKS_PER_SEC);
//cudaMemcpy(a,d_a,n*sizeof(int),cudaMemcpyDeviceToHost);
//cudaMemcpy(b,d_b,n*sizeof(int),cudaMemcpyDeviceToHost);
cudaMemcpy(c,d_c,n*sizeof(int),cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return;
}
|
3,137 | #include <stdio.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
/*
Common workflow of cuda programs:
1) Allocate host memory and initialized host data
2) Allocate device memory
3) Transfer input data from host to device memory
4) Execute kernels
5) Transfer output from device memory to host
*/
__global__ void add(int a, int b, int *c){
*c = a + b;
}
__global__ void test(int a, int *c){
*c = a;
}
__global__ void cuda_hello(){
printf("Hello World from GPU!\n");
}
int main(){
// Allocate host memory and initialized host data
int a, b, c;
int *dev_c;
a = 3;
b = 4;
// Allocate device memory
gpuErrchk(cudaMalloc((void**) &dev_c, sizeof(int)));
// Execute kernels
cuda_hello<<<1,1>>>();
cudaDeviceSynchronize();
//test<<<1,1>>>(a, dev_c);
add<<<1,1>>>(a, b, dev_c);
// Transfer output from device memory to host
cudaMemcpy(&c, dev_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d + %d = %d\n", a, b, c);
gpuErrchk( cudaPeekAtLastError() );
// Free device memory
cudaFree(dev_c);
return 0;
}
// #define N 10
// __global__ void vector_add(float *out, float *a, float *b, int n) {
// for(int i = 0; i < n; i++){
// out[i] = a[i] + b[i];
// }
// }
// int main(){
// float *a, *b, *out;
// // Allocate memory
// a = (float*)malloc(sizeof(float) * N);
// b = (float*)malloc(sizeof(float) * N);
// out = (float*)malloc(sizeof(float) * N);
// // Initialize array
// for(int i = 0; i < N; i++){
// a[i] = 1.0f; b[i] = 2.0f;
// }
// // Main function
// //vector_add(out, a, b, N);
// vector_add<<<1,1>>>(out, a, b, N);
// for(int i = 0; i < N; i++){
// printf("%f ", out[i]);
// }
// printf("\n");
// return 0;
// }
|
3,138 | #include "includes.h"
/*
cudaStructTest
testing/optimizing how to access/manipulate/return
structures in cuda.
*/
#define N 30
#define TRUE 1
#define FALSE 0
#define MAX_BLOCKS 65000
/*#define BLOCKS 2
#define THREADS 5*/
int cuda_setup(int computeCapability);
typedef struct{
int id;
int age;
int height;
} Person;
// Declare the Cuda kernels and any Cuda functions
__global__ void analyze_height(Person *people, int *statResults)
{
int id = threadIdx.x + blockIdx.x * blockDim.x;
if(id < N)
{
Person person = people[id];
if(person.height != 6)
{
statResults[id] = 1;
}
else
{
statResults[id] = 0;
}
}
} |
3,139 | #include "pgm_utility.cuh"
#include "mesh.cuh"
int write_image(char *filename, int n, struct mesh *mesh, double *xphys){
struct image img;
int img_index;
FILE *fout;
int i, npixel;
img.row = mesh->nely;
img.col = mesh->nelx;
img.max = 255;
img.data = (int*)malloc(n * sizeof(int));
img_index = 0;
for (int col_index = 0;col_index < mesh->nelx;col_index++) {
for (int row_index = 0;row_index < mesh->nely;row_index++) {
if ((int)((1.0f - xphys[img_index]) * img.max) > img.max) {
img.data[row_index*mesh->nelx + col_index] = img.max;
}
else {
img.data[row_index*mesh->nelx + col_index] = (int)((1.0f - xphys[img_index]) * img.max);
}
img_index++;
}
}
if((fout=fopen(filename,"w"))!=NULL){
fprintf(fout,"P2\n");
fprintf(fout,"#Creato con C\n");
fprintf(fout,"%d %d\n",img.col,img.row);
fprintf(fout,"%d\n",img.max);
npixel=img.col*img.row;
for(i=0;i<npixel;i++){
fprintf(fout,"%d\n",img.data[i]);
}
fclose(fout);
free(img.data);
}else{
printf("\nImpossibile creare il file %s\n",filename);
free(img.data);
return -1;
}
return 0;
}
|
3,140 | /****************************************************************************
* cuda_bigger_block.cu - a simple multi-layer Nerual Network
*
* Assignment of Module 2 of Ap4AI course of AI master degree @unibo
*
* Last updated in 2021 by Hanying Zhang <hanying.zhang@studio.unibo.it>
*
* To the extent possible under law, the author(s) nave dedicated all
* copyright and related and neighboring rights to this software to the
* public domain worldwide. This software is distributed without any warranty.
*
* --------------------------------------------------------------------------
*
* compile with:
* nvcc cuda_bigger_block.c -o cuda_bigger_block
*
* Run with:
* ./cuda_bigger_block -n # of nodes -k # of layers
*
****************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
#include <time.h>
#define R 3
const int BLKDIM = (64/R*R)*R;
/* BLKDIM optimization
* # of threads shoule be able to divide (32 * R)
* (n_node / R * R) is # of nodes being able to divide R ->
* (n_node / R * R) * R is # of threads being able to divide R
*/
// Use a __device__ function to calculate Sigmoid
__device__ float Sigmoid(float x, float b)
{
return 1.0 / ( expf(-x - b) + 1 );
}
/* The calculation of y values for one layer */
__global__ void one_layer_calc(float *x, float *W, float *b, float *y, int N)
{
int gidx = blockIdx.x * blockDim.x + threadIdx.x; // global thread index
int lidx = threadIdx.x; // local thread index
int gi = gidx / R; // global node index
int li = lidx / R; // local node index
int j = gidx - gi * R; // index of related values in previous layer for each value in y
int pre_layer_len = N - R + 1;
float y_tmp = 0.0;
// shared memory used to store local values in y
__shared__ float local_y[BLKDIM];
if(gi < pre_layer_len && j < R) {
local_y[lidx] = x[gi + j] * W[gi * R + j];
//printf("i:%d j:%d lidx: %d x:%.2f W:%.2f y:%.2f \n", \
gi, j, lidx, x[gi+j], W[gi * R + j], local_y[lidx]);
}
__syncthreads();
//printf("\n");
// Accumulate R values of each node in y
for (int p=0; p<R; p++) {
y_tmp += local_y[li * R + p];
//printf("i:%d j:%d lidx: %d local_y:%.2f tmp:%.2f \n", gi,j,lidx, local_y[li * R + p], y_tmp);
}
__syncthreads();
// Sigmoid
y_tmp = Sigmoid(y_tmp, *b);
// Copy temp values to y
y[gi] = y_tmp;
}
/* Random values between -1 and 1 */
float random_init_small()
{
return ((rand() % 2000) - 1000) / 1000.0; // random Initialization to values in range [-1,1]
}
/* Initialize the W and b parameters for one layer */
void init_layer_parameters(float (*W)[R], float *b, int layer_len)
{
for (int i=0; i<layer_len; i++) {
for (int j=0; j<R; j++) {
W[i][j] = random_init_small();
}
}
*b = random_init_small();
}
/* Read in the network parameters (N, K) from command-line input. */
void parse_command_line_parameters(int argc, char *argv[], int *N, int *K)
{
int c;
while ((c = getopt (argc, argv, "n:k:")) != -1) {
switch (c) {
case 'n': // N
*N = atoi(optarg);
break;
case 'k': // K
*K = atoi(optarg);
break;
}
}
}
int main( int argc, char *argv[] )
{
srand(42);
int N = 100;
int K = 3;
// get N, K from command line
parse_command_line_parameters(argc, argv, &N, &K);
printf("input size:%d, number of layers:%d.\n", N, K);
// Judge if the length of the k-th layer is bigger than 0
int last_layer_len = N - (K-1) * (R-1);
if (last_layer_len <= 0) {
printf("The parameters you input couldn't support k layers. \
Please give bigger size of layer 0 or use less layers.\n");
return EXIT_FAILURE;
}
// initialize the values of the input layer
float x[N];
for (int i=0; i < N; i++) {
x[i] = random_init_small();
}
//TEST
for (int i=0; i < N; i++) {
printf("%.2f ", x[i]);
}
printf("\n");
// create an array to store the latest layer got calculated
float latest_layer[N];
memcpy(latest_layer, x, N * sizeof(float)); //the latest layer is the input layer at the beginning
float *latest_layer_d;
cudaMalloc( (void**)&latest_layer_d, N * sizeof(float) );
cudaMemcpy( latest_layer_d, latest_layer, N * sizeof(float), cudaMemcpyHostToDevice);
// Start recording time costage
clock_t start = clock();
// Loop over K layers
for(int k=1; k<K; k++) {
// calculate lengthes of this layer and the previous layer
int layer_len = N - k * (R-1);
int in_layer_len = layer_len + R -1;
// initialize parameters
float b;
float W[layer_len][R];
float y[layer_len];
init_layer_parameters(W, &b, layer_len);
float *b_d;
float *W_d;
float *y_d;
cudaMalloc( (void**)&b_d, sizeof(float) );
cudaMalloc( (void**)&W_d, layer_len * R * sizeof(float) );
cudaMalloc( (void**)&y_d, layer_len * sizeof(float) );
cudaMemcpy(b_d, &b, sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(W_d, W, layer_len * R * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(y_d, y, layer_len * sizeof(float), cudaMemcpyHostToDevice);
// calculation of each layer
// printf("\nGRIDDIM %d BLKDIM: %d\n", (layer_len*R+BLKDIM-1)/BLKDIM, BLKDIM);
one_layer_calc<<<(layer_len*R+BLKDIM-1)/BLKDIM, BLKDIM>>>(latest_layer_d, W_d, b_d, y_d, in_layer_len);
cudaDeviceSynchronize();
// copy result back to host
cudaMemcpy(y, y_d, layer_len * sizeof(float), cudaMemcpyDeviceToHost);
/*
// Print the result of each layer
printf("\nThe layer result got\n");
for(int i=0; i<layer_len; i++) {
printf("%.2f ", y[i]);
}
printf("\n");
*/
// save the latest_layer result
memcpy(latest_layer, y, layer_len * sizeof(float));
cudaMemcpy(latest_layer_d, latest_layer, layer_len * sizeof(float), cudaMemcpyHostToDevice);
// Free cuda memory
cudaFree(W_d); cudaFree(y_d); cudaFree(b_d);
}
// calculate elapsed time
clock_t end = clock();
double time_elapsed = (double)(end - start) / CLOCKS_PER_SEC;
printf("Time elapsed: %.3f\n", time_elapsed);
cudaFree(latest_layer_d);
// print final result
printf("\nFinal result is: ");
for(int i=0; i<last_layer_len; i++) {
printf("%.3f ", latest_layer[i]);
}
printf("\n");
return EXIT_SUCCESS;
}
|
3,141 | #include "includes.h"
__global__ void swap_top_left_bot_right(float* data, const int num_threads, const int nx, const int ny, const int xodd, const int yodd, const int offset) {
const uint x=threadIdx.x;
const uint y=blockIdx.x;
const uint gpu_idx = x+y*num_threads+offset;
const uint c = gpu_idx % (nx/2);
const uint r = gpu_idx / (nx/2) + ny/2+yodd;
const uint idx1 = r*nx + c;
const uint idx2 = (r-ny/2-yodd)*nx + c + nx/2+xodd;
float tmp = data[idx1];
data[idx1] = data[idx2];
data[idx2] = tmp;
} |
3,142 | #include "includes.h"
/* Programmaufruf mit 2 Argumenten:
1. Größe des Gitters (mit Rand): Nx+2 (= Ny+2)
2. Dimension eines Cuda-Blocks: dim_block (findet nur Anwendung, wenn Nx+2 > dim_block)
*/
/*
Globale Variablen stehen in allen Funktionen zur Verfuegung.
Achtung: Das gilt *nicht* fuer Kernel-Funktionen!
*/
int Nx, Ny, npts;
int *active;
/*
Fuer die Koordinaten:
i = 0,1,...,Nx+1
j = 0,1,...,Ny+1
wird der fortlaufenden Index berechnet
*/
__global__ void laplace_2d_gpu(double *w, double *v, const int nx, const int ny)
{
unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x;
unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y;
if (ix>0 && ix<(nx+1) && iy>0 && iy<(ny+1)) // Bedingung, dass nur innere Punkte berechnet werden
{
unsigned int idx = iy*(blockDim.x * gridDim.x) + ix;
w[idx] = 4*v[idx] - (v[idx-1] + v[idx+1] + v[(idx-(gridDim.x*blockDim.x))] + v[(idx+(gridDim.x*blockDim.x))]);
}
} |
3,143 | #include "includes.h"
__global__ void convolution_kernel_v1(float *d_output, float *d_input, float *d_filter, int num_row, int num_col, int filter_size)
{
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
float result = 0.f;
for (int filter_row = -filter_size / 2; filter_row <= filter_size / 2; ++filter_row)
{
for (int filter_col = -filter_size / 2; filter_col <= filter_size / 2; ++filter_col)
{
// Find the global position to apply the given filter
int image_row = idx_y + filter_row;
int image_col = idx_x + filter_col;
float image_value = (image_row >= 0 && image_row < num_row && image_col >= 0 && image_col < num_col) ?
d_input[image_row * num_col + image_col] : 0.f;
float filter_value = d_filter[(filter_row + filter_size / 2) * filter_size + filter_col + filter_size / 2];
result += image_value * filter_value;
}
}
d_output[idx_y * num_col + idx_x] = result;
} |
3,144 | #include "reduce.cuh"
__global__ void reduce_kernel(const int *g_idata, int *g_odata,
unsigned int n) {
extern __shared__ int sdata[];
int i = blockIdx.x * blockDim.x + threadIdx.x;
sdata[threadIdx.x] = i < n ? g_idata[i] : 0;
__syncthreads();
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1) {
if (threadIdx.x < s)
sdata[threadIdx.x] += sdata[threadIdx.x + s];
__syncthreads();
}
if (threadIdx.x == 0)
g_odata[blockIdx.x] = sdata[0];
}
__host__ int reduce(const int *arr, unsigned int N, unsigned int t_len) {
int *g_idata, *g_odata;
cudaMalloc(&g_idata, N * sizeof(int));
cudaMalloc(&g_odata, (N + t_len - 1) / t_len * sizeof(int));
cudaMemcpy(g_idata, arr, N * sizeof(int), cudaMemcpyHostToDevice);
for (int n = N; n != 1; n = (n + t_len - 1) / t_len) {
size_t n_blk = (n + t_len - 1) / t_len;
reduce_kernel<<<n_blk, t_len, t_len * sizeof(int)>>>(g_idata, g_odata, n);
cudaMemcpy(g_idata, g_odata, n_blk * sizeof(int), cudaMemcpyDeviceToDevice);
}
cudaDeviceSynchronize();
int result;
cudaMemcpy(&result, g_odata, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(g_idata);
cudaFree(g_odata);
return result;
} |
3,145 | #include <iostream>
#include <math.h>
using namespace std;
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Function Declarations !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void storeOldValue(double *phinew, double *phiOld,int totCell);
void L2norm(double *Phinew, double *Phiold,double *L2Phi,int totCell);
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
// Main Function-->Poisson Solver for Pressure Finite Volume Solver !
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
void PoissonPressureCPU(double* Phi, int row, int col,
double delX,double delY,double* source,
int totCell){
double lam = 1;
int itr = 0;
int stop = 0;
while (stop==0){
itr++;
for(int i=1; i<(row-1); i++){
for(int j=1; j<(col-1); j++){
int k = i*col+j;
double PhiP = Phi[k];
double PhiE = Phi[k+1];
double PhiW = Phi[k-1];
double PhiN = Phi[k-col];
double PhiS = Phi[k+col];
double AP = (-2*delY/delX)-(2*delX/delY);
double AS = (delX/delY);
double AW = (delY/delX);
double AE = (delY/delX);
double AN = (delX/delY);
double R = source[k]- AP*PhiP-AE*PhiE-AW*PhiW-AN*PhiN-AS*PhiS;
double delPhi = R/AP;
Phi[k] = Phi[k]+lam*delPhi;
}
}
//L2norm2( R,L2R,col,row);// L2 norm
if(itr>1000){stop=1;}
}
}
//++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++!
__global__ void PoissonPressure(double* Phi, int row, int col,
double delX,double delY,double* source,
int totCell){
double lam = 1;
int itr = 0;
int stop = 0;
while (stop==0){
itr++;
// Get global thread ID
int k = blockIdx.x*blockDim.x+threadIdx.x;
int n = (row-1)*(col-1);
// Do for only inner points
if (k >0 && k<n) {
//int k = i*col+j;
double PhiP = Phi[k];
double PhiE = Phi[k+1];
double PhiW = Phi[k-1];
double PhiN = Phi[k-col];
double PhiS = Phi[k+col];
double AP = (-2*delY/delX)-(2*delX/delY);
double AS = (delX/delY);
double AW = (delY/delX);
double AE = (delY/delX);
double AN = (delX/delY);
double R = source[k]- AP*PhiP-AE*PhiE-AW*PhiW-AN*PhiN-AS*PhiS;
double delPhi = R/AP;
Phi[k] = Phi[k]+lam*delPhi;
}
//L2norm2( R,L2R,col,row);// L2 norm
if(itr>1000){stop=1;}
}
}
|
3,146 | #include <bits/stdc++.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/generate.h>
#include <thrust/sort.h>
#include <thrust/copy.h>
#include <thrust/random.h>
#include <thrust/random/uniform_int_distribution.h>
using namespace std;
class Rand{
//const int mod = 1E6;
thrust::uniform_int_distribution<int> g;
thrust::default_random_engine rng;
public:
Rand():g(0, 1000001){}
__host__ __device__ int operator ()(int idx){
rng.discard(idx);
return g(rng);
}
};
int main(){
int N = 1<<25;
srand(0);
thrust::host_vector<int> nums(N);
thrust::device_vector<int> d_vec(N);
Rand gr;
clock_t start_time = clock(), end_time;
/* Generating data testing*/
thrust::transform(thrust::make_counting_iterator(0), thrust::make_counting_iterator(N),
d_vec.begin(), gr);
end_time = clock();
cout<<"=====================Generating Data Time Usage========================"<<endl<<endl;
cout<<"\t\t"<<double(end_time-start_time)/CLOCKS_PER_SEC<<" s\t\t"<<endl<<endl;
cout<<"======================================================================="<<endl;
thrust::copy(d_vec.begin(), d_vec.end(), nums.begin());
for(int i=0;i<10; ++i) cout<<nums[i]<<' ';
cout<<endl<<endl<<endl;
/* Sorting testing*/
start_time = clock();
thrust::sort(d_vec.begin(), d_vec.end());
end_time = clock();
cout<<"===========================Sorting Time Usage=========================="<<endl<<endl;
cout<<"\t\t"<<double(end_time-start_time)/CLOCKS_PER_SEC<<" s\t\t"<<endl<<endl;
thrust::copy(d_vec.begin(), d_vec.end(), nums.begin());
cout<<"======================================================================="<<endl;
for(int i=0, block = N/10;i<N; i+=block) cout<<nums[i]<<' ';
cout<<endl<<endl<<endl;
return 0;
}
|
3,147 | //
// Created by sjhuang on 2021/8/21.
//
#include<stdio.h>
#include<stdlib.h>
#define N 100000
__global__ void vector_add(const float *a, const float *b, float *output,int n){
for(int i =0; i < n; i++){
output[i] = a[i] + b[i];
}
}
void vector_output(float *output, int n){
for(int i =0; i < n; i++){
printf("output[%d] is %f",i,output[i]);
}
}
int main(){
// malloc memory
float *a,*b,*output;
a = (float *)malloc(sizeof (float )*N);
b = (float *)malloc(sizeof (float )*N);
output = (float*)malloc(sizeof(float) * N);
// Initialize array
for(int i =0; i < N; i++){
a[i] = 1.0f, b[i] = 2.0f;
}
float *d_a,*d_b,*d_output;
// Device Memory malloc
cudaMalloc(&d_a,sizeof (float )*N);
cudaMalloc(&d_b,sizeof (float )*N);
cudaMalloc(&d_output,sizeof (float )*N);
// Transfer host data to device data
cudaMemcpy(d_a,a,sizeof (float )*N,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,sizeof (float )*N,cudaMemcpyHostToDevice);
// operate
vector_add<<<1,1>>>(d_a,d_b,d_output,N);
cudaDeviceSynchronize();
cudaMemcpy(output,d_output,sizeof (float )*N,cudaMemcpyDeviceToHost);
// output
printf("out[0] = %f\n", output[0]);
printf("PASSED\n");
//
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_output);
free(a);
free(b);
free(output);
return 0;
} |
3,148 | #include "includes.h"
__global__ void FloatDivByConstant(float *A, float constant)
{
unsigned int i = blockIdx.x * gridDim.y * gridDim.z * blockDim.x + blockIdx.y * gridDim.z * blockDim.x + blockIdx.z * blockDim.x + threadIdx.x;
A[i]=A[i]/constant;
} |
3,149 | /**
* Detect the number of CUDA capable devices.
*/
#include <iostream>
int main()
{
int count = 0;
cudaGetDeviceCount( &count );
std::cout << count << " device(s) found.\n";
return 0;
}
|
3,150 | #define d_vx(z,x) d_vx[(x)*(nz)+(z)]
#define d_vy(z,x) d_vy[(x)*(nz)+(z)]
#define d_vz(z,x) d_vz[(x)*(nz)+(z)]
#define d_szz(z,x) d_szz[(x)*(nz)+(z)] // Pressure
#define d_mem_dvz_dz(z,x) d_mem_dvz_dz[(x)*(nz)+(z)]
#define d_mem_dvx_dx(z,x) d_mem_dvx_dx[(x)*(nz)+(z)]
#define d_Lambda(z,x) d_Lambda[(x)*(nz)+(z)]
#define d_Den(z,x) d_Den[(x)*(nz)+(z)]
#define d_Cp(z,x) d_Cp[(x)*(nz)+(z)]
#define d_CpGrad(z,x) d_CpGrad[(x)*(nz)+(z)]
#define d_szz_plusone(z, x) d_szz_plusone[(x)*(nz)+(z)]
#define d_szz_adj(z, x) d_szz_adj[(x)*(nz)+(z)]
__global__ void image_vel_time(float *d_szz, float *d_szz_plusone, float *d_szz_adj,\
int nz, int nx, float dt, float dz, float dx, int nPml, int nPad, \
float *d_Cp, float *d_Lambda, float *d_CpGrad){
int gidz = blockIdx.x*blockDim.x + threadIdx.x;
int gidx = blockIdx.y*blockDim.y + threadIdx.y;
// if (gidz>=2 && gidz<=nz-nPad-3 && gidx>=2 && gidx<=nx-3) {
if (gidz>=nPml && gidz<=nz-nPml-nPad-1 && gidx>=nPml && gidx<=nx-nPml-1) {
// compute the Vp kernel on the fly
// d_szz_plusone(gidz, gidx) = (d_szz_plusone(gidz, gidx) - d_szz(gidz, gidx))/dt;
// d_CpGrad(gidz, gidx) += -2.0 / d_Cp(gidz, gidx) / d_Lambda(gidz, gidx) \
// * d_szz_plusone(gidz, gidx) * d_szz_adj(gidz, gidx);
d_szz_plusone(gidz, gidx) = (d_szz_plusone(gidz, gidx) - d_szz(gidz, gidx));
d_CpGrad(gidz, gidx) += -2.0 / d_Cp(gidz, gidx) \
* d_szz_plusone(gidz, gidx) * d_szz_adj(gidz, gidx);
}
else {
return;
}
} |
3,151 | #include "includes.h"
__global__ void histogram_equalization_gpu_son (unsigned char * d_in, unsigned char * d_out, int * d_lut, int img_size, int serialNum)
{
int x = threadIdx.x + blockDim.x*blockIdx.x;
if (x >= img_size) return;
d_out[x] = (unsigned char) d_lut[d_in[x]];
} |
3,152 | #include <stdio.h>
__global__ void cuda_hello(){
// printf("Hello\n");
printf("Hello from Thread %d out of %d in block %d.\n The ThreadID is %d of %d. \n", threadIdx.x +1, blockDim.x, blockIdx.x, threadIdx.x + (blockIdx.x*blockDim.x), 2*blockDim.x);
}
int main(){
cuda_hello<<<8,2>>>();
cudaDeviceSynchronize();
return 0;
}
|
3,153 | #include "includes.h"
int answersNumber;
int categoriesNumber;
int atribsNumber;
/**
* Funkcja wykonywana na karcie graficznej - kazdy watek sprawdza czy jego atrybut z atribsValues to ten sam co w query. Jesli tak, przepisuje do
* tablicy wynikowej prawdopodobiestwa dla kazdej jego odpowiedzi
* @param query - zapytanie uzytkownika w postacie zlepionych stringow
* @param atribsValues - tablica wszystkich atrybutow
* @param possibilities - tablica wszystkich prawdopodobienstw
* @param queryPrefix - tablica sum prefiksowych dlugosci slow w query
* @param atribsPrefix - j.k. dla atribsValues
* @param answersNumber - liczba mozliwych odpowiedzi
* @param categoriesNumber - liczba kategorii
* @param atribsNumber - liczba wszystkich atrybutow
* @param resultPossibilities - tablica prawdopodobienstw atrybutow z zapytania dla wszystkich mozliwych odpowiedzi
*/
__global__ void searchWithCuda(double *resultPossibilities, char *query, char *atribsValues, double *possibilities, int *queryPrefix, int *atribsPrefix, int *answersNumber, int *categoriesNumber, int *atribsNumber)
{
int category_id = blockIdx.x; // categories
int atrib_id = blockIdx.y; // atribs
// znajdz poczatek lancucha znakow atrybutu w zapytaniu i w atribsValue
char *queryAtrib = query + queryPrefix[category_id];
int queryAtribLength = queryPrefix[category_id + 1] - queryPrefix[category_id];
char *currAtrib = atribsValues + atribsPrefix[atrib_id];
int currAtribLength = atribsPrefix[atrib_id + 1] - atribsPrefix[atrib_id];
if (queryAtribLength == currAtribLength)
{
bool equal = true;
for (int i = 0; i < queryAtribLength; ++i)
{
if (queryAtrib[i] != currAtrib[i])
{
equal = false;
break;
}
}
if (equal) // przypisz odpowiednie prawdopodobienstwa
{
for (int i = 0; i < *answersNumber; ++i)
{
resultPossibilities[*categoriesNumber*i + category_id] = possibilities[*atribsNumber*i + atrib_id]; // na razie tylko dla jednej odpowiedzi
}
}
}
} |
3,154 | #include <stdio.h>
#include <stdlib.h>
#include "cuda.h"
// to compile for a 3.5 capable device (like the titan in bodge):
// nvcc -arch=sm_35 -O3 -o mxm mxm.cu -lm
//
// to run a partial reduction on a vector of length 8192 :
// ./mxm 8192
// assume going forward 32x32 threads in each thread-block
#define BDIM 32
// naive CUDA mxm kernel
__global__ void mxmV1(int N,
const float * __restrict__ A ,
const float * __restrict__ B ,
float * __restrict__ C){
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
const int idy = threadIdx.y + blockDim.y*blockIdx.y;
float axb = 0.;
for(int n=0;n<N;++n){
axb += A[n + idy*N] * B[idx + n*N];
}
C[idx + idy*N] = axb;
}
// smem CUDA matrix-matrix multiply kernel
__global__ void mxmV2(int N,
const float * const __restrict__ A,
const float * const __restrict__ B,
float * __restrict__ C){
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
const int idy = threadIdx.y + blockDim.y*blockIdx.y;
__shared__ float s_A[BDIM][BDIM];
__shared__ float s_B[BDIM][BDIM];
float axb = 0.;
for(int offset = 0; offset < N; offset += BDIM){
// load local block into shared memory
s_A[threadIdx.y][threadIdx.x] = A[idy*N + (offset + threadIdx.x)];
s_B[threadIdx.y][threadIdx.x] = B[(offset+threadIdx.y)*N + idx];
__syncthreads(); // make sure both blocks are loaded
for(int j = 0; j < BDIM; ++j){
axb += s_A[threadIdx.y][j] * s_B[j][threadIdx.x]; // col of A, row of B
}
}
C[idx+idy*N] = axb;
}
// smem CUDA matrix-matrix multiply kernel
__global__ void mxmV3(int N,
const float * __restrict__ A ,
const float * __restrict__ B ,
float * __restrict__ C){
const int idx = threadIdx.x + blockDim.x*blockIdx.x;
const int idy = threadIdx.y + blockDim.y*blockIdx.y;
__shared__ float s_A[BDIM][BDIM];
__shared__ float s_B[BDIM][BDIM+1]; // pad for column accesses
float axb = 0.;
for(int offset = 0; offset < N; offset+=BDIM){
s_A[threadIdx.y][threadIdx.x] = A[idy*N + (offset + threadIdx.x)];
s_B[threadIdx.y][threadIdx.x] = B[(offset+threadIdx.y)*N + idx];
__syncthreads(); // make sure both blocks are loaded
for(int j=0;j<BDIM;++j){
axb += s_A[threadIdx.y][j] * s_B[j][threadIdx.x];
}
}
C[idx+idy*N] = axb;
}
int main(int argc, char **argv){
int N = 1024;
float *A = (float*) calloc(N*N, sizeof(float));
float *B = (float*) calloc(N*N, sizeof(float));
float *C = (float*) calloc(N*N, sizeof(float));
printf("N=%d\n", N);
for(int i=0;i<N;++i){
for(int j=0;j<N;++j){
A[i+j*N] = 0.;
if (i==j){
A[i+j*N] = 1.;
}
B[i+j*N] = 1.;
}
}
float *c_A, *c_B, *c_C;
size_t sz = N*N*sizeof(float);
cudaMalloc(&c_A, sz);
cudaMalloc(&c_B, sz);
cudaMalloc(&c_C, sz);
cudaMemcpy(c_A, A, sz, cudaMemcpyHostToDevice);
cudaMemcpy(c_B, B, sz, cudaMemcpyHostToDevice);
int Nb = (N+BDIM-1)/BDIM;
dim3 threadsPerBlock(BDIM,BDIM,1);
dim3 blocks(Nb,Nb,1);
mxmV1 <<< blocks, threadsPerBlock >>> (N, c_A, c_B, c_C);
mxmV2 <<< blocks, threadsPerBlock >>> (N, c_A, c_B, c_C);
mxmV3 <<< blocks, threadsPerBlock >>> (N, c_A, c_B, c_C);
cudaMemcpy(C, c_C, sz, cudaMemcpyDeviceToHost);
float maxerr = 0.;
for (int i = 0; i < N; ++i){
for (int j = 0; j < N; ++j){
float errij = C[i+j*N]-1.0;
maxerr += errij*errij;
}
}
printf("err = %f\n",maxerr);
// --------------------------------------------------------------------------------
cudaError_t err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr, "CUDA ERROR: %s\n",
cudaGetErrorString(err));
}
}
|
3,155 | #include <string>
/*
struct PointCloud {
utility::device_vector<Eigen::Vector3f> points_;
};
namespace ply_pointcloud_reader {
struct PLYReaderState {
utility::ConsoleProgressBar *progress_bar;
HostPointCloud *pointcloud_ptr;
long vertex_index;
long vertex_num;
long normal_index;
long normal_num;
long color_index;
long color_num;
};
int ReadVertexCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->vertex_index >= state_ptr->vertex_num) {
return 0; // some sanity check
}
float value = ply_get_argument_value(argument);
state_ptr->pointcloud_ptr->points_[state_ptr->vertex_index](index) = value;
if (index == 2) { // reading 'z'
state_ptr->vertex_index++;
++(*state_ptr->progress_bar);
}
return 1;
}
int ReadNormalCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->normal_index >= state_ptr->normal_num) {
return 0;
}
float value = ply_get_argument_value(argument);
state_ptr->pointcloud_ptr->normals_[state_ptr->normal_index](index) = value;
if (index == 2) { // reading 'nz'
state_ptr->normal_index++;
}
return 1;
}
int ReadColorCallback(p_ply_argument argument) {
PLYReaderState *state_ptr;
long index;
ply_get_argument_user_data(argument, reinterpret_cast<void **>(&state_ptr),
&index);
if (state_ptr->color_index >= state_ptr->color_num) {
return 0;
}
float value = ply_get_argument_value(argument);
state_ptr->pointcloud_ptr->colors_[state_ptr->color_index](index) =
value / 255.0;
if (index == 2) { // reading 'blue'
state_ptr->color_index++;
}
return 1;
}
} // namespace ply_pointcloud_reader
bool ReadPointCloudFromPLY(const std::string &filename,
PointCloud &pointcloud,
bool print_progress = false);
bool ReadPointCloudFromPLY(const std::string &filename,
geometry::PointCloud &pointcloud,
bool print_progress) {
using namespace ply_pointcloud_reader;
p_ply ply_file = ply_open(filename.c_str(), NULL, 0, NULL);
if (!ply_file) {
utility::LogWarning("Read PLY failed: unable to open file: %s",
filename.c_str());
return false;
}
if (!ply_read_header(ply_file)) {
utility::LogWarning("Read PLY failed: unable to parse header.");
ply_close(ply_file);
return false;
}
PLYReaderState state;
HostPointCloud host_pc;
state.pointcloud_ptr = &host_pc;
state.vertex_num = ply_set_read_cb(ply_file, "vertex", "x",
ReadVertexCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "y", ReadVertexCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "z", ReadVertexCallback, &state, 2);
state.normal_num = ply_set_read_cb(ply_file, "vertex", "nx",
ReadNormalCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "ny", ReadNormalCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "nz", ReadNormalCallback, &state, 2);
state.color_num = ply_set_read_cb(ply_file, "vertex", "red",
ReadColorCallback, &state, 0);
ply_set_read_cb(ply_file, "vertex", "green", ReadColorCallback, &state, 1);
ply_set_read_cb(ply_file, "vertex", "blue", ReadColorCallback, &state, 2);
if (state.vertex_num <= 0) {
utility::LogWarning("Read PLY failed: number of vertex <= 0.");
ply_close(ply_file);
return false;
}
state.vertex_index = 0;
state.normal_index = 0;
state.color_index = 0;
host_pc.Clear();
host_pc.points_.resize(state.vertex_num);
host_pc.normals_.resize(state.normal_num);
host_pc.colors_.resize(state.color_num);
utility::ConsoleProgressBar progress_bar(state.vertex_num + 1,
"Reading PLY: ", print_progress);
state.progress_bar = &progress_bar;
if (!ply_read(ply_file)) {
utility::LogWarning("Read PLY failed: unable to read file: {}",
filename);
ply_close(ply_file);
return false;
}
ply_close(ply_file);
++progress_bar;
host_pc.ToDevice(pointcloud);
return true;
}
*/
int main() {
} |
3,156 | #include <stdio.h>
__global__ void AplusB( int *ret, int a, int b) {
/*
* Simple unimportant kernel
*/
ret[threadIdx.x] = a + b + threadIdx.x;
}
int main() {
// Create space in the device
int *ret;
cudaMalloc(&ret, 1000 * sizeof(int));
// Call the kernel
AplusB<<< 1, 1000 >>>(ret, 10, 100);
// Recieve the results
int *host_ret = (int *)malloc(1000 * sizeof(int));
cudaMemcpy(host_ret, ret, 1000 * sizeof(int), cudaMemcpyDefault);
// Display the results
for(int i=0; i<1000; i++) {
printf("%d: A+B = %d\n", i, host_ret[i]);
}
// Free unneeded memory
free(host_ret);
cudaFree(ret);
return 0;
}
|
3,157 | #include<stdio.h>
#include<time.h>
#include<stdlib.h>
__global__ void func1(int *c,int *a,int *b,int n,int startvalue)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < n && i >= startvalue )
{
a[i] = i * 2;
b[i] = i * 3;
i++;
}
}
__global__ void func2(int *c,int *a,int *b,int n,int startvalue)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if( i < n && i >= startvalue )
{
c[i] = a[i] + b[i];
i++;
}
}
int main()
{
int *d_c;
int *d_a;
int *d_b;
int n=5,x;
int a[n],b[n],c[n];
int i ;
i=0;
int startvalue;
int blocks = 1024;
int threads= 1024;
cudaMalloc((void **)&d_c, n*sizeof(int));
cudaMemcpy(d_c, &c, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_a, n*sizeof(int));
cudaMemcpy(d_a, &a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, n*sizeof(int));
cudaMemcpy(d_b, &b, n*sizeof(int), cudaMemcpyHostToDevice);
startvalue = i;
func1<<<blocks, threads>>>(d_c,d_a,d_b,n,startvalue);
cudaDeviceSynchronize();
cudaMemcpy(&c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaMemcpy(&a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaMemcpy(&b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
i=0;
cudaMalloc((void **)&d_c, n*sizeof(int));
cudaMemcpy(d_c, &c, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_a, n*sizeof(int));
cudaMemcpy(d_a, &a, n*sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d_b, n*sizeof(int));
cudaMemcpy(d_b, &b, n*sizeof(int), cudaMemcpyHostToDevice);
startvalue = i;
func2<<<blocks, threads>>>(d_c,d_a,d_b,n,startvalue);
cudaDeviceSynchronize();
cudaMemcpy(&c, d_c, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_c);
cudaMemcpy(&a, d_a, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_a);
cudaMemcpy(&b, d_b, n*sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(d_b);
i=0;
while(i<n){
printf("c =%d\n",c[i]);
i++;
}
}
|
3,158 | #include <cmath>
__global__ void my_copysign(double* v)
{
int i = threadIdx.x; // assume threadIdx < 2
*v = ((i << 1) - 1) * (*v);
}
|
3,159 | extern "C"
__global__ void sconv_fprop_K128_N128 (
float* param_test,
float *param_O,
const float *param_I,
const float *param_F,
float param_alpha,
int param_N,
int param_K,
int param_D,
int param_H,
int param_W,
int param_WN,
int param_HWN,
int param_DHWN,
int param_C,
int param_KRST,
int param_RST,
int param_RS,
int param_magic_RS,
int param_shift_RS,
int param_S,
int param_magic_S,
int param_shift_S,
int param_pad_d,
int param_pad_h,
int param_pad_w,
int param_str_d,
int param_str_h,
int param_str_w,
int param_Q,
int param_PQ,
int param_QN,
int param_PQN,
int param_MPQN,
int param_magic_Q,
int param_shift_Q,
int param_magic_PQ,
int param_shift_PQ) {
__shared__ float share[128 * 8 * 4 + 8];
int tid = threadIdx.x;
share[tid] = 1;
*param_O = share[127-tid];
*param_test = share[127-tid];
}
|
3,160 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size);
__global__ void addKernel(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
__global__ void global_scan(float* d_out, float* d_in)
{
int index = threadIdx.x;
float out = 0.00f;
d_out[index] = d_in[index];
__syncthreads();
for (int i = 1; i < sizeof(d_in); i*=2)
{
if (index - i >= 0)
{
out = d_out[index] + d_out[index - i];
}
__syncthreads();
if (index - i >=0)
{
d_out[index] = out;
out = 0.0f;
}
}
}
__global__ void shmem_scan(float* d_out, float* d_in) {
extern __shared__ float sdata[];
int idx = threadIdx.x;
float out = 0.00f;
sdata[idx] = d_in[idx];
__syncthreads();
for (int interpre = 1; interpre < sizeof(d_in); interpre *= 2) {
if (idx - interpre >= 0) {
out = sdata[idx] + sdata[idx - interpre];
}
__syncthreads();
if (idx - interpre >= 0) {
sdata[idx] = out;
out = 0.00f;
}
}
d_out[idx] = sdata[idx];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4, 5 };
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
// Add vectors in parallel.
cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addWithCuda failed!");
return 1;
}
printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
printf("Default finished\n");
const int ARRAY_SIZE = 8;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = float(i);
}
float h_out[ARRAY_SIZE];
// declare GPU memory pointers
float* d_in;
float* d_out;
// allocate GPU memory
cudaMalloc((void**)&d_in, ARRAY_BYTES);
cudaMalloc((void**)&d_out, ARRAY_BYTES);
// transfer the array to GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
// launch the kernel
global_scan <<<1, ARRAY_SIZE >>> (d_out, d_in);
//kernel << <Dg, Db, Ns, S >> > (param list);
//Dg intͻdim3(x, y, z) ڶһgridеblock֯ġ intֱӱʾΪ1ά֯ṹ
//Db intͻdim3(x, y, z) ڶһblockеthread֯ġ intֱӱʾΪ1ά֯ṹ
//Ns size_tͣȱʡĬΪ0 ÿblock˾̬Ĺڴ⣬̬ܶĹڴСλΪbyte 0ʾҪ̬䡣
//S cudaStream_tͣȱʡĬΪ0 ʾú˺λĸ
// copy back the result array to the GPU
cudaMemcpy(h_out, d_out, ARRAY_BYTES, cudaMemcpyDeviceToHost);
// print out the resulting array
for (int i = 0; i < ARRAY_SIZE; i++) {
printf("%f", h_out[i]);
printf(((i % 4) != 3) ? "\t" : "\n");
}
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_out);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t cudaStatus;
// Choose which GPU to run on, change this on a multi-GPU system.
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?");
goto Error;
}
// Allocate GPU buffers for three vectors (two input, one output) .
cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int));
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
// Launch a kernel on the GPU with one thread for each element.
addKernel<<<1, size>>>(dev_c, dev_a, dev_b);
// Check for any errors launching the kernel
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus);
goto Error;
}
// Copy output vector from GPU buffer to host memory.
cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!");
goto Error;
}
Error:
cudaFree(dev_c);
cudaFree(dev_a);
cudaFree(dev_b);
return cudaStatus;
}
|
3,161 | #include <string>
#include <map>
#include <vector>
#include <iostream>
#include <cuda.h>
#include <cuda_runtime.h>
std::map<std::string, CUfunction> functions;
std::vector<CUmodule> modules;
using namespace std;
void load_kernels() {
const size_t kernel_size = 1;
const string kernel_name[kernel_size] = {
"sgemm_tn_128x128_vec",
};
for (size_t i = 0; i < kernel_size; ++i) {
const string& name = kernel_name[i];
const string path = name + ".cubin";
CUmodule module;
CUfunction function;
CUresult res;
// load module
res = cuModuleLoad(&module, path.c_str());
if (res != CUDA_SUCCESS) {
std::cerr << "Failed to load module: " << name << std::endl;
exit(1);
}
// load function
res = cuModuleGetFunction(&function, module, name.c_str());
if (res != CUDA_SUCCESS) {
std::cerr << "Failed to load function: " << name << std::endl;
exit(1);
}
functions[name] = function;
modules.push_back(module);
}
}
void sgemm_tn(float *A, float *B, float *C, int size) {
float alpha = 1.0;
float beta = 0.0;
int gridA = size / 128 + (size % 128 != 0);
int gridB = size / 128 + (size % 128 != 0);
int lda = size * 32;
int ldb = size * 32;
int ldc = size;
void *args[11] = {&A, &B, &C, &alpha, &beta, &lda, &ldb, &ldc, &size, &size, &size};
CUresult res = cuLaunchKernel(functions["sgemm_tn_128x128_vec"], 1, gridA, gridB, 256, 1, 1, 0, 0, args, NULL);
if (res != CUDA_SUCCESS) {
std::cerr << "Error launching kernel " << res << std::endl;
exit(1);
}
}
void sgemm_nn(float *A, float *B, float *C, int size) {
float alpha = 1.0;
float beta = 0.0;
int gridA = size / 128 + (size % 128 != 0);
int gridB = size / 128 + (size % 128 != 0);
int lda = size;
int ldb = size * 32;
int ldc = size;
void *args[11] = {&A, &B, &C, &alpha, &beta, &lda, &ldb, &ldc, &size, &size, &size};
CUresult res = cuLaunchKernel(functions["sgemm_nn_128x128_vec"], 1, gridA, gridB, 256, 1, 1, 0, 0, args, NULL);
if (res != CUDA_SUCCESS) {
std::cerr << "Error launching kernel " << res << std::endl;
exit(1);
}
}
void sgemm_nt(float *A, float *B, float *C, int size) {
float alpha = 1.0;
float beta = 0.0;
int gridA = size / 128 + (size % 128 != 0);
int gridB = size / 128 + (size % 128 != 0);
int lda = size;
int ldb = size;
int ldc = size;
void *args[11] = {&A, &B, &C, &alpha, &beta, &lda, &ldb, &ldc, &size, &size, &size};
CUresult res = cuLaunchKernel(functions["sgemm_nt_128x128_vec"], 1, gridA, gridB, 256, 1, 1, 0, 0, args, NULL);
if (res != CUDA_SUCCESS) {
std::cerr << "Error launching kernel " << res << std::endl;
exit(1);
}
}
int main(int argc, char *argv[]) {
cudaFree(0);
float *h_A, *h_B, *h_C;
float *d_A, *d_B, *d_C;
int size = 4096;
h_A = (float *)malloc(size * size * sizeof(float));
h_B = (float *)malloc(size * size * sizeof(float));
h_C = (float *)malloc(size * size * sizeof(float));
for (size_t i = 0; i < size * size; ++i) {
h_A[i] = 1;
h_B[i] = 1;
}
cudaMalloc((void **)&d_A, sizeof(float) * size * size);
cudaMalloc((void **)&d_B, sizeof(float) * size * size);
cudaMalloc((void **)&d_C, sizeof(float) * size * size);
cudaMemcpy(d_A, h_A, sizeof(float) * size * size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, sizeof(float) * size * size, cudaMemcpyHostToDevice);
load_kernels();
std::string kernel = std::string(argv[0]);
if (kernel == "tn") {
sgemm_tn(d_A, d_B, d_C, size);
} else if (kernel == "nn") {
sgemm_nn(d_A, d_B, d_C, size);
} else if (kernel == "nt") {
sgemm_nt(d_A, d_B, d_C, size);
} else {
std::cerr << "tt kernel not supported: " << std::endl;
exit(1);
}
cudaMemcpy(h_C, d_C, sizeof(float) * size * size, cudaMemcpyDeviceToHost);
for (size_t i = 0; i < size * size; ++i) {
if (h_C[i] != size) {
std::cerr << "Error: " << i << ":" << h_C[i] << std::endl;
exit(1);
}
}
free(h_A);
free(h_B);
free(h_C);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
// run successfully
std::cout << "finish" << std::endl;
return 0;
}
|
3,162 | __global__ void MatrixMultiplication_cuda (float * __restrict__ a, float * __restrict__ b, float * __restrict__ c, int M, int N, int P)
{
float sum;
int lwpriv___ti_100_0;
int lwpriv__i;
int lwpriv__j;
int lwpriv__k;
lwpriv___ti_100_0=(threadIdx.x+(blockIdx.x*32));
if (lwpriv___ti_100_0<(M*N))
{
sum=0.0;
lwpriv__j=(lwpriv___ti_100_0%N);
lwpriv__i=(lwpriv___ti_100_0/N);
for (lwpriv__k=0; lwpriv__k<P; lwpriv__k ++ )
{
sum+=(b[((lwpriv__i*P)+lwpriv__k)]*c[((lwpriv__k*N)+lwpriv__j)]);
}
a[((lwpriv__i*N)+lwpriv__j)]=sum;
}
}
|
3,163 | #include "includes.h"
__global__ void incSumScanB1_kernel(unsigned int* d_outVals, unsigned int* d_inVals, size_t numVals, unsigned int* d_blockOffset, unsigned int valOffset)
{
unsigned int tIdx = threadIdx.x;
unsigned int gIdx = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ unsigned int s_incScan[];
if (gIdx >= numVals) return;
//if it is the first element of a block then we need to add the offset to it.
s_incScan[tIdx] = (tIdx == 0)? d_inVals[gIdx] + valOffset: d_inVals[gIdx];
// if (tIdx == 0) printf("gIdx = %d, d_inVals[ %d ] = %d , s_incScan[ %d ] = %d , valOffset = %d .\n", gIdx, gIdx, d_inVals[gIdx], tIdx, s_incScan[tIdx], valOffset);
__syncthreads();
//for (int offset = 1; offset <= numVals; offset = offset * 2)
for (int offset = 1; offset <= blockDim.x; offset = offset * 2)
{
unsigned int temp = s_incScan[tIdx];
unsigned int neighbor = 0;
if (tIdx >= offset) {
neighbor = s_incScan[tIdx - offset];
__syncthreads();
s_incScan[tIdx] = temp + neighbor;
}
__syncthreads();
}
d_outVals[gIdx] = s_incScan[tIdx];
//now set the cumulative sum for this block in the the blockoffsetarray
if ((tIdx + 1) == blockDim.x)
{
if ((blockIdx.x + 1) < gridDim.x)
{
d_blockOffset[blockIdx.x + 1] = s_incScan[tIdx]; //this will still need to be summed with other blocks
}
}
// if (gIdx < 10 || gIdx > (numVals - 10)) printf("gIdx = %d, d_inVals[ %d ] = %d, d_outvals[ %d ] = %d , s_incScan[ %d ] = %d , valOffset = %d .\n",
// gIdx, gIdx, d_inVals[gIdx], gIdx, d_outVals[gIdx], tIdx, s_incScan[tIdx], valOffset);
} |
3,164 | #include <stdio.h>
#include "orbit_integrator_cuda.cu"
#define N 256
#define N_TOT N * J
float x_h[N_TOT], y_h[N_TOT], vx_h[N_TOT], vy_h[N_TOT];
float *x_d, *y_d, *vx_d, *vy_d;
cudaError_t err;
int main(int argc, char** argv) {
for(int i = 0; i < N; i++) {
x_h[i*J] = 1;
y_h[i*J] = 0;
vx_h[i*J] = 0;
vy_h[i*J] = 0.1;
}
//printf("x[%d] = %f\n", N-1, x[N-1]);
if( argc > 1) {
cudaMalloc((void**) &x_d, sizeof(float)*N_TOT);
cudaMalloc((void**) &y_d, sizeof(float)*N_TOT);
cudaMalloc((void**) &vx_d, sizeof(float)*N_TOT);
cudaMalloc((void**) &vy_d, sizeof(float)*N_TOT);
err = cudaGetLastError ();
printf("malloc: %s\n", cudaGetErrorString(err));
cudaMemcpy(x_d, x_h, sizeof(float)*N_TOT, cudaMemcpyHostToDevice);
err = cudaGetLastError ();
printf("copy: %s\n", cudaGetErrorString(err));
cudaMemcpy(y_d, y_h, sizeof(float)*N_TOT, cudaMemcpyHostToDevice);
err = cudaGetLastError ();
printf("copy: %s\n", cudaGetErrorString(err));
cudaMemcpy(vx_d, vx_h, sizeof(float)*N_TOT, cudaMemcpyHostToDevice);
err = cudaGetLastError ();
printf("copy: %s\n", cudaGetErrorString(err));
cudaMemcpy(vy_d, vy_h, sizeof(float)*N_TOT, cudaMemcpyHostToDevice);
err = cudaGetLastError ();
printf("copy: %s\n", cudaGetErrorString(err));
dim3 dimBlock(512,64);
//square<<<dimBlock, 512>>>(x_device);
integrate_orbit_euler<<<32, N/32>>>(x_d, y_d, vx_d, vy_d);
err = cudaGetLastError ();
printf("call: %s\n", cudaGetErrorString(err));
//*
cudaMemcpy(x_h, x_d, sizeof(float)*N_TOT, cudaMemcpyDeviceToHost);
cudaMemcpy(y_h, y_d, sizeof(float)*N_TOT, cudaMemcpyDeviceToHost);
cudaMemcpy(vx_h, vx_d, sizeof(float)*N_TOT, cudaMemcpyDeviceToHost);
cudaMemcpy(vy_h, vy_d, sizeof(float)*N_TOT, cudaMemcpyDeviceToHost);
/**/
err = cudaGetLastError ();
printf("cpy: %s\n", cudaGetErrorString(err));
} else {
for(int i = 0; i < N; i++) {
int nr = i;
float x = x_h[nr * J];
float y = y_h[nr * J];
float vx = vx_h[nr * J];
float vy = vy_h[nr * J];
float dt = 0.01;
for(int j = 0; j < J; j++) {
for(int k = 0; k < K; k++) {
float r = sqrt(x*x+y*y);
float dphidr = G * mass * r / pow((r*r + scale*scale), (3./2));
float Fx = -dphidr*x/r;
float Fy = -dphidr*y/r;
vx += Fx * dt;
x += vx * dt;
vy += Fy * dt;
y += vy * dt;
}
x_h[nr*J+j] = x;
y_h[nr*J+j] = y;
vx_h[nr*J+j] = vx;
vy_h[nr*J+j] = vy;
}
}
}
for(int i = 0; i < 1; i++) {
printf(" x[%d] = %f\n", i, x_h[J*i]);
printf(" y[%d] = %f\n", i, y_h[J*i]);
printf("vx[%d] = %f\n", i, vx_h[J*i]);
printf("vy[%d] = %f\n", i, vy_h[J*i]);
}
//printf("x[%d] = %f\n", N-1, x[N-1]);
return 0;
}
|
3,165 | #include <cuda.h>
#include <iostream>
#include <stdlib.h>
#include <assert.h>
#include <chrono>
#define CUDA_CHECK(status) (assert(status == cudaSuccess))
#define threads_per_block 1024
// sum the shared data reductions into a single one
// loop unrolled version for increased performance
// Note: do not remove volatile!
// Note: some redudant calculation is done due to bank conflicts / sleeping threads
__device__ void columnWarpReduce(volatile float* sdata, int tid) {
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
__global__ void column_reduce(float * matrix, float * result, int m /* lines */, int n /* columns*/) {
extern __shared__ float sdata[];
// normal tid
int tid = threadIdx.x + threadIdx.y * blockDim.x;
// transposed tid for shared memory
int new_tid = threadIdx.y + threadIdx.x * blockDim.y;
// true x value in the matrix
int real_x = threadIdx.x + blockDim.x * blockIdx.x;
int i = real_x + n * threadIdx.y;
const int it = n*blockDim.y;
int offset = it;
float accumulator = 0;
if (threadIdx.y < m && real_x < n) {
// store all the values from this column in a warped way
accumulator = matrix[i];
while (i + offset < n*m) {
accumulator += matrix[i + offset];
offset += it;
}
}
// save column reduction data in a transposed way
sdata[new_tid] = accumulator;
__syncthreads();
// avoid last warp to run causing memory errors
if (tid < 32 * 32 - 16) {
columnWarpReduce(sdata, tid);
}
__syncthreads();
if (threadIdx.y == 0 && real_x < n)
result[real_x] = sdata[new_tid];
}
int main(int argc, char * argv[]) {
if (argc < 3) {
printf("Usage: %s <m> <n>\n", argv[0]);
return 0;
}
int m = atoi(argv[1]), n = atoi(argv[2]);
unsigned long seed = 1620144156; //time(NULL);
srand(seed); // seed
printf("Running with seed %ld\n", seed);
// create row-major matrix m x n
float * matrix = (float *) malloc(sizeof(float) * m * n); // m x n
// create array to store result
float * result_gpu = (float *) malloc(sizeof(float) * n); // tot_num_blocks x 1
float * result_cpu = (float *) malloc(sizeof(float) * n); // validation
memset(result_cpu, 0, sizeof(float) * n);
printf("Populating array \n");
// populate the array
for (int i = 0; i < m * n; i++)
matrix[i] = 1.0 / ((rand() % 977) + 1);
printf("Calculating final result\n");
auto cpu_start = std::chrono::high_resolution_clock::now();
// calculate cpu result
for (int i = 0; i < m; i++)
for (int j = 0; j < n; j++)
result_cpu[j] += matrix[i * n + j];
auto cpu_end = std::chrono::high_resolution_clock::now();
printf("CPU took %f ms.\n", std::chrono::duration_cast<std::chrono::microseconds>(cpu_end - cpu_start).count() / 1000.0);
printf("Allocating GPU memory, m=%d, n=%d\n", m, n);
// allocate gpu memory
float * matrix_gpu, * device_result, * helper_result = NULL;
CUDA_CHECK(cudaMalloc(&matrix_gpu, sizeof(float) * m * n));
CUDA_CHECK(cudaMalloc(&device_result, sizeof(float) * n));
CUDA_CHECK(cudaMemset(device_result, 0, sizeof(float) * n));
printf("Finished allocating. Copying matrix...\n");
// move matrix into gpu
CUDA_CHECK(cudaMemcpy(matrix_gpu, matrix, m * n * sizeof(float), cudaMemcpyHostToDevice));
cudaEvent_t start, stop;
float elapsed_time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// call kernel
dim3 block_threads(32, 32);
dim3 grid_threads(n / 32 + (n % 32 ? 1 : 0), 1);
printf("Calling kernel with m=%d n=%d, gridsize=(%d,%d)\n", m, n, grid_threads.x, grid_threads.y);
CUDA_CHECK(cudaEventRecord(start));
column_reduce<<<grid_threads, block_threads, sizeof(float)*32*32>>>(matrix_gpu, device_result, m, n);
CUDA_CHECK(cudaEventRecord(stop));
// Wait for final kernel to finish
CUDA_CHECK(cudaDeviceSynchronize());
// end = std::chrono::high_resolution_clock::now();
cudaEventElapsedTime(&elapsed_time, start, stop);
printf("Kernel finished. Took %f ms. Copying back results.\n", elapsed_time);
// copy back results
CUDA_CHECK(cudaMemcpy(result_gpu, device_result, n * sizeof(float), cudaMemcpyDeviceToHost));
// free gpu memory
CUDA_CHECK(cudaFree(matrix_gpu));
CUDA_CHECK(cudaFree(device_result));
if (helper_result) CUDA_CHECK(cudaFree(helper_result));
printf("Released GPU memory. Validating results...\n");
// compare results
for (int i = 0; i < n; i++) {
if (abs(result_cpu[i] - result_gpu[i]) > 1e-3)
printf("INCORRECT RESULT: cpu=%.10f gpu=%.10f @ index=%d, diff=%.10f\n", result_cpu[i], result_gpu[i], i, result_cpu[i] - result_gpu[i]);
// else printf("Correct result! cpu=%.10f, gpu=%.10f, diff=%.10f\n", result_cpu[i], result_gpu[i], result_cpu[i] - result_gpu[i]);
}
free(result_gpu);
free(result_cpu);
free(matrix);
return 0;
} |
3,166 | #include <stdio.h>
// no need to change this
void helloCPU()
{
printf("Hello from the CPU.\n");
}
// add __GLOBAL__ so that the function runs from gpu
__global__ void helloGPU()
{
printf("Hello from the GPU.\n");
}
int main()
{
// calling the GPU function
helloGPU<<<1, 1>>>();
cudaDeviceSynchronize(); // finish GPU first and then move to next
helloCPU();
helloGPU<<<1, 1>>>();
// synchronize CPU and GPU operations
cudaDeviceSynchronize();
}
|
3,167 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <cuda.h>
unsigned int getmax(unsigned int *, unsigned int);
int main(int argc, char *argv[])
{
unsigned int size = 0; // The size of the array
unsigned int i; // loop index
unsigned int * numbers; //pointer to the array
if(argc !=2)
{
printf("usage: maxgpu num\n");
printf("num = size of the array\n");
exit(1);
}
size = atol(argv[1]);
numbers = (unsigned int *)malloc(size * sizeof(unsigned int));
if( !numbers )
{
printf("Unable to allocate mem for an array of size %u\n", size);
exit(1);
}
srand(time(NULL)); // setting a seed for the random number generator
// Fill-up the array with random numbers from 0 to size-1
for( i = 0; i < size; i++)
numbers[i] = rand() % size;
printf(" The maximum number in the array is: %u\n", getmax(numbers, size));
free(numbers);
exit(0);
}
// kernel
__global__ void getmaxcu(unsigned int num[], unsigned int size, unsigned int offset) {
__shared__ unsigned int block_num[1000];
unsigned int t = threadIdx.x + (blockIdx.x * 1000);
unsigned int boundary;
if (offset != 1)
offset = 1000;
if (t < size) {
block_num[threadIdx.x] = num[t*offset];
boundary = 1000;
if (t > (size - 1 - (size % 1000)))
boundary = size % 1000;
__syncthreads();
while (boundary > 1) {
if ((threadIdx.x < boundary/2) && (block_num[threadIdx.x] < block_num[threadIdx.x + (boundary+1)/2]))
block_num[threadIdx.x] = block_num[threadIdx.x + (boundary+1)/2];
boundary = (boundary+1)/2;
__syncthreads();
}
if (threadIdx.x == 0)
num[t] = block_num[0];
}
}
/*
input: pointer to an array of long int
number of elements in the array
output: the maximum number of the array
*/
unsigned int getmax(unsigned int num[], unsigned int size)
{
unsigned int i;
unsigned int * device_num;
cudaMalloc(&device_num, size * sizeof(unsigned int));
cudaMemcpy(device_num, num, size * sizeof(unsigned int), cudaMemcpyHostToDevice);
unsigned int threads_per_block = 1000;
unsigned int tot_blocks = ceil((double)size/(threads_per_block));
for (i = 0; i < ceil((double)log10(size)/log10(1000)); i++)
getmaxcu<<<tot_blocks, threads_per_block>>>(device_num, (int)size*(1000/pow(1000, (i+1))), i+1);
cudaMemcpy(num, device_num, size * sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaFree(device_num);
return num[0];
} |
3,168 |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
cudaError_t VectorSum(int *c, const int *a, const int *b, unsigned int size);
__global__ void VectorSum(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = a[i] + b[i];
}
int main()
{
const int arraySize = 5;
const int a[arraySize] = { 1, 2, 3, 4,5};
const int b[arraySize] = { 10, 20, 30, 40, 50 };
int c[arraySize] = { 0 };
int d=1;
cudaError_t cudaStatus = VectorSum(c, a, b, arraySize);
if (cudaStatus != cudaSuccess) {
printf( "addWithCuda failed!");
return 1;
}
printf("\n{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n",
c[0], c[1], c[2], c[3], c[4]);
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
return 1;
}
getchar();
return 0;
}
cudaError_t VectorSum(int *c, const int *a, const int *b, unsigned int size)
{
int *dev_a = 0;
int *dev_b = 0;
int *dev_c = 0;
cudaError_t err = cudaGetLastError();
if(err!=cudaSuccess){
fprintf(stderr,cudaGetErrorString(err));
}
cudaMalloc((void**)&dev_a, size * sizeof(int));
err = cudaGetLastError();
if(err!=cudaSuccess){
fprintf(stderr,cudaGetErrorString(err));
}
cudaMalloc((void**)&dev_b, size * sizeof(int));
err = cudaGetLastError();
if(err!=cudaSuccess){
fprintf(stderr,cudaGetErrorString(err));
}
cudaMalloc((void**)&dev_c, size * sizeof(int));
err = cudaGetLastError();
if(err!=cudaSuccess){
fprintf(stderr,cudaGetErrorString(err));
}
cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if(err!=cudaSuccess){
fprintf(stderr,cudaGetErrorString(err));
}
cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice);
err = cudaGetLastError();
//printf("adsdsadasdsa");
//fprintf(stderr,cudaGetErrorString(err));
if(err!=cudaSuccess){
fprintf(stderr,cudaGetErrorString(err));
}
cudaMemcpy(dev_c, c, size * sizeof(int), cudaMemcpyHostToDevice);
err = cudaGetLastError();
if(err!=cudaSuccess){
fprintf(stderr,cudaGetErrorString(err));
}
VectorSum<<<1, size>>>(c, dev_a, dev_b);
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, cudaGetErrorString(err));
}
err = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, cudaGetErrorString(err));
}
return err;
}
|
3,169 | //******************************************************
// Assignment #1
// Names: Anthony Enem and Cavaughn Browne
// Parallel Programming Date: 10/10/16
//******************************************************
// This program implements the cooley tukey fft algorithm
// and computes the values fro X_k from 0 to N. The program
// should be compiled using "gcc -o cooley_fft Browne_Enem_A1.c"
// Then it should be run with "./cooley_fft N" where N is the
// maximum value N in the fft formula
//******************************************************
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
double PI = acos(-1);
#define N 16384
//define struct for complex numbers
typedef struct complex{
double real;
double imaginary;
} complex;
//create complex pointer for input
complex* x_input = NULL;
//******************************************************
// Parameters: two complex structs a and b
// This function multiplies 2 complex structs and returns
// the result as a complex struct
//******************************************************
complex multiplyComplex(complex a, complex b)
{
complex result;
result.real = a.real*b.real - a.imaginary*b.imaginary;
result.imaginary = a.real*b.imaginary + b.real*a.imaginary;
return result;
}
//******************************************************
// Parameters: two complex structs a and b
// This function adds 2 complex structs and returns
// the result as a complex struct
//******************************************************
complex addComplex(complex a, complex b)
{
complex result = {a.real+b.real, a.imaginary+b.imaginary};
return result;
}
//******************************************************
// Parameters: two complex structs a and b
// This function subtracts 2 complex structs and returns
// the result as a complex struct
//******************************************************
complex subtractComplex(complex a, complex b)
{
complex result = {a.real-b.real, a.imaginary-b.imaginary};
return result;
}
//******************************************************
// Parameters: integer N, integer k, pointers to complex
// structs E_k and O_k
// This function computes the even and odd portions of the
// fft formula and returns them by reference in the complex
// pointers
//******************************************************
void compute_Even_Odd(int k, complex* E_k, complex* O_k)
{
if(k >= N/2){
k -= N/2;
}
E_k->real = 0;
E_k->imaginary = 0;
O_k->real = 0;
O_k->imaginary = 0;
int N_half = N/2;
complex e_part;
double exp;
for(int m = 0; m <= N/2 - 1; m++)
{
//compute exponent
exp = -2.0 * PI * m * k / N_half;
//compute part with e^(i * exponent)
e_part.real = cos(exp);
e_part.imaginary = sin(exp);
//add even part
*E_k = addComplex(
*E_k,
multiplyComplex(x_input[2*m], e_part)
);
//add odd part
*O_k = addComplex(
*O_k,
multiplyComplex(x_input[2*m+1], e_part)
);
}
}
//******************************************************
// Parameters: integer N and integer k
// This function computes the twiddle factor for given
// N and k values
//******************************************************
complex compute_twiddle(int k)
{
double exp = -2.0 * PI * k / N;
complex twiddle = { cos(exp), sin(exp) };
return twiddle;
}
//Main function
int main(int argc, char** argv)
{
//make sure user enters a value for N
/*if (argc != 2)
{
printf("Wrong usage!\n");
printf("Must specify value for N!\n");
return 1;
}*/
//get N from command line argument
//allocate memory for N complex inputs
x_input = (complex *)malloc(N * sizeof(complex));
complex* result = (complex*)malloc(N *sizeof(complex));
//set defined values from 0 to 7 for x_input
x_input[0].real = 3.6;
x_input[0].imaginary = 2.6;
x_input[1].real = 2.9;
x_input[1].imaginary = 6.3;
x_input[2].real = 5.6;
x_input[2].imaginary = 4;
x_input[3].real = 4.8;
x_input[3].imaginary = 9.1;
x_input[4].real = 3.3;
x_input[4].imaginary = 0.4;
x_input[5].real = 5.9;
x_input[5].imaginary = 4.8;
x_input[6].real = 5;
x_input[6].imaginary = 2.6;
x_input[7].real = 4.3;
x_input[7].imaginary = 4.1;
//set defined values for x[8] to x[N-1] as 0
for (int i = 8; i < N; i++)
{
x_input[i].real = 0;
x_input[i].imaginary = 0;
}
complex twiddle_factor;
//allocate memory for E_k and O_k
complex* E_k = (complex *)malloc(sizeof(complex));
complex* O_k = (complex *)malloc(sizeof(complex));
clock_t st = clock();
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventRecord(start,0);
for(int k = 0; k < N/2; k++)
{
//get twiddle factor
twiddle_factor = compute_twiddle(k);
//compute even and odd portions
compute_Even_Odd(k, E_k, O_k);
//add up results
result[k] = addComplex(*E_k, multiplyComplex(twiddle_factor, *O_k));
result[k + N/2] = subtractComplex(*E_k, multiplyComplex(twiddle_factor, *O_k));
}
clock_t end = clock();
cudaEventCreate(&stop);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start,stop);
//PRINT FIRST 8 RESULTS
printf("SEQUENTIAL VERSION\n\n");
printf("TOTAL PROCESSED SAMPLES: %d\n", N);
for(int i = 0; i < 8; i++)
{
printf("==================\n");
printf("XR[%d]: %f\n", i, result[i].real);
printf("XI[%d]: %f\n", i, result[i].imaginary);
}
printf("==================\n");
printf("Cuda time : %f secs\n", elapsedTime/1000);
printf("\nC clock Time: %f\n", (double)(end - st) / CLOCKS_PER_SEC);
return 0;
} |
3,170 | #include "includes.h"
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void matrix_elementwise_add(const float *a, const float *b, float *c, int n) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < n) {
c[index] = a[index] + b[index];
}
} |
3,171 | // This program will demo how to use CUDA to accelerate inner-product
#include <iostream>
#include <cstdlib>
using namespace std;
#define VECNUM 50000
#define VECLEN 1000
int *inputA, *inputB;
int *devInputA, *devInputB, *devOut;
int *outCPU, *outGPU;
void init()
{
int i, j, idx;
inputA = new int[VECNUM * VECLEN] ;
inputB = new int[VECNUM * VECLEN];
for(i = 0; i < VECNUM; i++){
for(j = 0; j < VECLEN; j++){
idx = i*VECLEN + j;
inputA[idx] = rand()%1000;
inputB[idx] = rand()%1000;
}
}
outCPU = new int[VECNUM]();
outGPU = new int[VECNUM]();
}
void initGPU()
{
int inputSize = sizeof(int)*VECNUM*VECLEN;
cudaMalloc(&devInputA, inputSize);
cudaMalloc(&devInputB, inputSize);
cudaMalloc(&devOut, sizeof(int)*VECNUM);
cudaMemcpy(devInputA, inputA, inputSize, cudaMemcpyHostToDevice);
cudaMemcpy(devInputB, inputB, inputSize, cudaMemcpyHostToDevice);
}
__global__
void innerProductGPU(int *A, int *B, int *out)
{
int y = blockIdx.x;
int x = threadIdx.x;
__shared__ int tmp[VECLEN];
int idx = y * VECLEN + x;
tmp[x] = A[idx] * B[idx];
__syncthreads();
if(x == 0){
int i, sum = 0;
for(i = 0; i < VECLEN; i++)
sum += tmp[i];
out[y] = sum;
}
}
void innerProductCPU()
{
int i, j, acc, idx;
for(i = 0; i < VECNUM; i++){
acc = 0;
for(j = 0; j < VECLEN; j++){
idx = i*VECLEN + j;
acc += inputA[idx] * inputB[idx];
}
outCPU[i] = acc;
}
}
bool checker(){
int i;
for(i = 0; i < VECNUM; i++){
if(outCPU[i] != outGPU[i]){
cout << "The element: " << i << " is wrong!\n";
cout << "outCPU[" << i << "] = " << outCPU[i] << endl;
cout << "outGPU[" << i << "] = " << outGPU[i] << endl;
return false;
}
}
return true;
}
int timespec_diff_us(timespec& t1, timespec& t2)
{
return (t2.tv_sec - t1.tv_sec) * 1e6 + (t2.tv_nsec - t1.tv_nsec) / 1e3;
}
int main()
{
init();
timespec time_begin, time_end;
clock_gettime(CLOCK_REALTIME, &time_begin);
innerProductCPU();
clock_gettime(CLOCK_REALTIME, &time_end);
cout << "CPU time for executing inner-product = "
<< timespec_diff_us(time_begin, time_end) / 1000 << "ms" << endl;
/*** Inside area is CUDA related code ***/
initGPU();
dim3 threadsPerBlock(VECLEN);
dim3 numBlocks(VECNUM);
clock_gettime(CLOCK_REALTIME, &time_begin);
/*** Lunch your CUDA kernel here ***/
innerProductGPU<<<numBlocks, threadsPerBlock>>>(devInputA, devInputB, devOut);
cudaDeviceSynchronize();
/*** Lunch your CUDA kernel here ***/
clock_gettime(CLOCK_REALTIME, &time_end);
int outSize = sizeof(int)*VECNUM;
cudaMemcpy(outGPU, devOut, outSize, cudaMemcpyDeviceToHost);
cudaFree(&devInputA);
cudaFree(&devInputB);
cudaFree(&devOut);
/*** Inside area is CUDA related code ***/
cout << "GPU time for executing inner-product = "
<< timespec_diff_us(time_begin, time_end) / 1000 << "ms" << endl;
if(checker())
cout << "Congratulations! You pass the check." << endl;
else
cout << "Sorry! Your result is wrong." << endl;
return 0;
}
|
3,172 | // This example demonstrates how to
// query about the properties of a device
#include <stdlib.h>
#include <stdio.h>
#include <cuda_runtime.h>
int main(void)
{
int dev_count, driverVersion = 0, runtimeVersion = 0;;
cudaGetDeviceCount(&dev_count);
cudaDriverGetVersion(&driverVersion);
cudaRuntimeGetVersion(&runtimeVersion);
printf("CUDA Driver Version / Runtime Version %d.%d / %d.%d\n", driverVersion / 1000, (driverVersion % 100) / 10,
runtimeVersion / 1000, (runtimeVersion % 100) / 10);
cudaDeviceProp dev_prop;
for(int i=0; i<dev_count;i++){
cudaGetDeviceProperties(&dev_prop,i);
printf("---Device %d---\n", i);
printf("Name: \"%s\"\n", dev_prop.name);
printf("CUDA Capability Major/Minor version number: %d.%d\n", dev_prop.major, dev_prop.minor);
printf("--- Memory information for device ---\n");
printf("Total global mem: %.0f MB\n", dev_prop.totalGlobalMem/1048576.0f);
printf("Total constant mem: %lu B\n", dev_prop.totalConstMem);
printf("The size of shared memory per block: %lu B\n", dev_prop.sharedMemPerBlock);
printf("The maximum number of registers per block: %d\n", dev_prop.regsPerBlock);
printf("The number of SMs on the device: %d\n", dev_prop.multiProcessorCount);
printf("The number of threads in a warp: %d\n", dev_prop.warpSize);
printf("The maximal number of threads allowed in a block: %d\n", dev_prop.maxThreadsPerBlock);
printf("Max thread dimensions (x,y,z): (%d, %d, %d)\n", dev_prop.maxThreadsDim[0], dev_prop.maxThreadsDim[1], dev_prop.maxThreadsDim[2]);
printf("Max grid dimensions (x,y,z): (%d, %d, %d)\n", dev_prop.maxGridSize[0], dev_prop.maxGridSize[1], dev_prop.maxGridSize[2]);
}
}
|
3,173 | #include <iostream>
#include <string.h>
void __global__ run(float * h)
{
int idx = blockIdx.x*64+threadIdx.x;
if (idx > 10000) return;
h[idx] += 1.3f;
}
int main(int argc, char ** argv)
{
int times = atoi(argv[1]);
float * h_d;
cudaMalloc(&h_d, 10000*sizeof(float));
for (int i = 0; i < times; ++i)
run<<<157, 64>>>(h_d);
return 0;
}
|
3,174 | #include "includes.h"
__global__ void device_BFS(const int* edges, const int* dests, int* labels, int* visited, int* c_frontier_tail, int* c_frontier, int* p_frontier_tail, int* p_frontier) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < *p_frontier_tail) {
int c_vertex = p_frontier[index];
for (int i = edges[c_vertex]; i < edges[c_vertex+1]; i++) {
int was_visited = atomicExch(visited + dests[i], 1);
if (!was_visited) {
int old_tail = atomicAdd(c_frontier_tail, 1);
c_frontier[old_tail] = dests[i];
labels[dests[i]] = labels[c_vertex] + 1;
}
}
}
} |
3,175 | ///-------------------------------------------------------------------------------------------------
// file: descportsout.cu
//
// summary: test kernal for output descriptor ports test case:
// The test does a normal vector scale, but the output data
// block should also have 'N' in the metadata channel and
// the entire contents of the pMetaData array in the template channel.
///-------------------------------------------------------------------------------------------------
extern "C" __global__ void
scale(
float* A, // matrix
float scalar, // scalar for A * scalar
int N, // size of A, pOut
float * pOut, // output result
int * pMDOut, // meta data channel out (should have N)
float * pTmplDataIn, // input data destined for output template channel
int nTmpl, // size of pTmplDataIn
float * pTmplDataOut // output buffer for tmpl channel
)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if(i < N)
pOut[i] = A[i]*scalar;
if(i<nTmpl)
pTmplDataOut[i] = pTmplDataIn[i];
if(i==0)
pMDOut[i] = N;
}
|
3,176 | #include "includes.h"
/*
Vector addition with a single thread for each addition
*/
/*
Vector addition with thread mapping and thread accessing its neighbor parallely
*/
//slower than simpler
/*
Matrix Matrix multiplication with a single thread for each row
*/
/*
Matrix Matrix multiplication with a single thread for each result element
*/
/*
Matrix Vector multiplication with a block with 4 threads per block, shared block mem and parallel reduce
*/
__global__ void good_addition(int *a, int *b, int *c, int len)
{
int tid= threadIdx.x + blockIdx.x * blockDim.x;
const int thread_count= blockDim.x*gridDim.x;
int step = len/thread_count;
int start_index = tid*step;
int end_index= (tid+1)* step;
if (tid==thread_count-1) end_index=len;
//printf("Step is %d\n",step);
while(start_index< end_index)
{
c[start_index]=a[start_index]+b[start_index];
//printf("I am block: %d with tid: %d Result %d \n",blockIdx.x,tid,c[tid]);
start_index +=1;
}
} |
3,177 | // system libraries
// use nvcc -o (output name) -Wno-deprecated-gpu-targets -std=c++11 -Xcompiler -fopenmp file_name.cu
#include <cuda_runtime.h>
#include <cstdio>
#include <cstdlib>
#include <math.h>
#include <chrono>
// size definition. modify as needed
#define N 2000
#define T_SIZE 32
using namespace std;
// safe call definition
static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number){
if(err!=cudaSuccess){
fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n",msg,file_name,line_number,cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// safe call definition
#define SAFE_CALL(call,msg) _safe_cuda_call(call,msg,__FILE__,__LINE__)
// initialize major row matrix
void initializeMatrix(float *ip, const int nxy){
srand (static_cast <unsigned> (time(0)));
float random;
for(int i = 0; i < nxy; i++){
random = 1.0 + static_cast <float> (rand()) /( static_cast <float> (RAND_MAX/(10.0-1.0)));
ip[i] = random;
}
return;
}
// utility function to check result
void checkResult(float *hostRef, float *gpuRef, const int nxy){
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < nxy; i++){
if (abs(hostRef[i] - gpuRef[i]) > epsilon){
match = 0;
printf("host %f gpu %f\n", hostRef[i], gpuRef[i]);
break;
}
}
if (match)
printf("Arrays match.\n\n");
else
printf("Arrays do not match.\n\n");
}
// multiply matrix on host
void multiplyMatrixOnHost(float *A, float *B, float *C, const int nx){
for(int i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// function to multiply matrix on host with threads
void multiplyMatrixOnHostThreads(float *A, float *B, float *C, const int nx){
int i = 0;
// use the pragma directive to automatically paralelize
#pragma omp parallel for private(i) shared(A, B, C)
for(i = 0; i < nx; i++) {
for(int j = 0; j < nx; j++) {
for(int k = 0; k < nx; k++) {
C[i * nx + j] += A[i * nx + k] * B[j + k * nx];
}
}
}
return;
}
// kernel to multiply matrix on gpu
__global__ void multiplyMatrixOnGPU(float *A, float *B, float *C, const int nx){
// get ix and iy from cuda defined variables
unsigned int ix = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int iy = blockIdx.y * blockDim.y + threadIdx.y;
float sum = 0.0;
if (ix < nx && iy < nx){
for(int i = 0; i < nx ; i++)
sum += A[iy * nx + i] * B[i * nx + ix];
C[iy * nx + ix] = sum;
}
}
// Kernel GPU Tiles
__global__ void multiplyMatrixOnGPUTiles(float *A, float *B, float *C, const int nx){
// Create the shared memory space as tiles
__shared__ float tileOne[T_SIZE][T_SIZE], tileTwo[T_SIZE][T_SIZE];
// Get the ix and iy indexes
unsigned int ix = T_SIZE * blockIdx.x + threadIdx.x;
unsigned int iy = T_SIZE * blockIdx.y + threadIdx.y;
// int limit = (T_SIZE + nx - 1)/T_SIZE;
// Get other limit to experiment
int limit = ceilf(((float)T_SIZE + (float)nx)/(float)T_SIZE);
// Partial Sum acumulator
float partialSum = 0.0;
int i = 0;
while(i < limit){
// Fetch values for each value of the tiles with restriction
if ((iy < nx) && ((i * T_SIZE + threadIdx.x) < nx)){
int id = (iy * nx) + (i * T_SIZE) + threadIdx.x;
tileOne[threadIdx.y][threadIdx.x] = A[id];
}else{
tileOne[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// cuPrintf(""); <--- deprecated
// printf("Improper Tile Size in X domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
// Fetch values for each value of the tiles with restriction
if ((ix < nx) && ((i * T_SIZE + threadIdx.y) < nx)){
int id = (i * T_SIZE + threadIdx.y) * nx + ix;
tileTwo[threadIdx.y][threadIdx.x] = B[id];
}else{
tileTwo[threadIdx.y][threadIdx.x] = 0.0;
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
// printf("Improper Tile Size in Y domain, zeroing\n");
}
// Wait for threads to finish
__syncthreads();
//Perform partial sum on tile
#pragma unroll // T_SIZE is constant
for (int j = 0; j < T_SIZE; j++){
partialSum += tileOne[threadIdx.y][j] * tileTwo[j][threadIdx.x];
}
// DO NOT PRINT RACE CONDITION GIVES WRONG OUTPUT
//printf("Partial Sum fetched with value %f\n", partialSum);
// Wait for threads to finish
__syncthreads();
i++;
}
if (ix < nx && iy < nx)
C[((blockIdx.y * blockDim.y + threadIdx.y) * nx) + (blockIdx.x * blockDim.x) + threadIdx.x] = partialSum;
}
int main(int argc, char* argv[]) {
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop");
printf("Using Device %d: %s\n", dev, deviceProp.name);
SAFE_CALL(cudaSetDevice(dev), "Error setting device");
int nx = N;
int ny = N;
int nxy = nx * ny;
int nBytes = nxy * sizeof(float*);
printf("Matrix size: nx %d ny %d\n", nx, ny);
// malloc host memory
float *h_A = (float *)malloc(nBytes);
float *h_B = (float *)malloc(nBytes);
float *hostRef = (float *)malloc(nBytes);
float *hostRefThreads = (float *)malloc(nBytes);
float *gpuRef = (float *)malloc(nBytes);
float *gpuRefTiles = (float *)malloc(nBytes);
// initialize matrix
initializeMatrix(h_A, nxy);
initializeMatrix(h_B, nxy);
// initialize to 0
memset(hostRef, 0, nBytes);
memset(hostRefThreads, 0, nBytes);
memset(gpuRef, 0, nBytes);
memset(gpuRefTiles, 0, nBytes);
// // multiply matrix on host
// auto start_cpu = std::chrono::high_resolution_clock::now();
// multiplyMatrixOnHost(h_A, h_B, hostRef, nx);
// auto end_cpu = std::chrono::high_resolution_clock::now();
// std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
// printf("multiplyMatrixOnHost elapsed %f ms\n", duration_ms.count());
// // multiply matrix on host with threads
// start_cpu = std::chrono::high_resolution_clock::now();
// multiplyMatrixOnHostThreads(h_A, h_B, hostRefThreads, nx);
// end_cpu = std::chrono::high_resolution_clock::now();
// duration_ms = end_cpu - start_cpu;
// printf("multiplyMatrixOnHostThreads elapsed %f ms\n", duration_ms.count());
// // check results
// checkResult(hostRef, hostRefThreads, nx);
// malloc device global memory
float *d_MatA, *d_MatB, *d_MatC, *d_MatD;
SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA");
SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB");
SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC");
SAFE_CALL(cudaMalloc((void **)&d_MatD, nBytes), "Error allocating d_MatC");
// transfer data from host to device
SAFE_CALL(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA");
SAFE_CALL(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB");
SAFE_CALL(cudaMemset(d_MatC, 0, nBytes), "Error copying d_MatB");
SAFE_CALL(cudaMemset(d_MatD, 0, nBytes), "Error copying d_MatB");
// kernel definition and launch
dim3 block(T_SIZE, T_SIZE);
// use other grid to experiment
// dim3 grid((nx + block.x - 1) / block.x, (ny + block.y - 1) / block.y);
dim3 grid((int)ceil((float)nx / T_SIZE), (int)ceil((float)nx / T_SIZE));
// launch
auto start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnGPU<<<grid, block>>>(d_MatA, d_MatB, d_MatC, nx);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
auto end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
std::chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPU elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
//checkResult(hostRef, gpuRef, nx);
// GPU TILE VERSION AND COMPARISSON
// launch
start_cpu = std::chrono::high_resolution_clock::now();
multiplyMatrixOnGPUTiles<<<grid, block>>>(d_MatA, d_MatB, d_MatD, nx);
SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel");
end_cpu = std::chrono::high_resolution_clock::now();
// measure total time
duration_ms = end_cpu - start_cpu;
printf("multiplyMatrixOnGPUTiles elapsed %f ms\n", duration_ms.count());
// SAFE_CALL kernel error
SAFE_CALL(cudaGetLastError(), "Error with last error");
// copy kernel result back to host side
SAFE_CALL(cudaMemcpy(gpuRefTiles, d_MatD, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC");
// check device results
checkResult(gpuRef, gpuRefTiles, nx);
// END GPU TILE VERSION AND COMPARISSON
// free device global memory
SAFE_CALL(cudaFree(d_MatA), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatB), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatC), "Error freeing memory");
SAFE_CALL(cudaFree(d_MatD), "Error freeing memory");
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(hostRefThreads);
free(gpuRef);
free(gpuRefTiles);
// reset device
SAFE_CALL(cudaDeviceReset(), "Error reseting");
return (0);
}
|
3,178 | #include "includes.h"
__global__ void Corrector_gpu(double GTIME, double *local_time, double *step, int *next, unsigned long nextsize, double4 *pos_CH, double4 *vel_CH, double4 *a_tot_D, double4 *a1_tot_D, double4 *a2_tot_D, double4 *a_H0, double4 *a3_H, double ETA6, double ETA4, double DTMAX, double DTMIN, unsigned int N){
unsigned int gtid = blockIdx.x*blockDim.x + threadIdx.x;
double dt;
int who = next[gtid];
int who1 = gtid + nextsize;
int who2 = who1 + nextsize;
if(gtid >= nextsize )
return;
a_H0[gtid].w = a_H0[gtid].x * a_H0[gtid].x +
a_H0[gtid].y * a_H0[gtid].y +
a_H0[gtid].z * a_H0[gtid].z ;
a_H0[who1].w = a_H0[who1].x * a_H0[who1].x +
a_H0[who1].y * a_H0[who1].y +
a_H0[who1].z * a_H0[who1].z ;
a_H0[who2].w = a_H0[who2].x * a_H0[who2].x +
a_H0[who2].y * a_H0[who2].y +
a_H0[who2].z * a_H0[who2].z ;
double h = GTIME-local_time[who];
local_time[who] = GTIME;
double h1 = 0.5*h;
double h2 = h1*h1;
double h3 = 0.75/(h1*h1*h1);
double h4 = 1.5/(h2*h2);
double h5 = 7.5/(h2*h2*h1);
double Amin = a_H0[gtid].x - a_tot_D[who].x;
double Aplu = a_H0[gtid].x + a_tot_D[who].x;
double Jmin = h1 * (a_H0[who1].x - a1_tot_D[who].x);
double Jplu = h1 * (a_H0[who1].x + a1_tot_D[who].x);
double Smin = h1 * h1 * (a_H0[who2].x - a2_tot_D[who].x);
double Splu = h1 * h1 * (a_H0[who2].x + a2_tot_D[who].x);
double over= 1.0/15.0;
pos_CH[who].x = pos_CH[who].x + h1*vel_CH[who].x - 0.4*h2*Amin + over*h2*Jplu;
vel_CH[who].x = vel_CH[who].x + h1*Aplu - 0.4*h1*Jmin + over*h1*Splu;
pos_CH[who].x += h1*vel_CH[who].x;
a3_H[who].x = h3*(-5.0*Amin + 5.0*Jplu - Smin);
double a4halfx = h4*(-Jmin + Splu);
double a5halfx = h5*(3.0*Amin - 3.0*Jplu + Smin);
a3_H[who].x += h1*a4halfx + 0.5*h2*a5halfx;
a4halfx += h1*a5halfx;
Amin = a_H0[gtid].y - a_tot_D[who].y;
Aplu = a_H0[gtid].y + a_tot_D[who].y;
Jmin = h1 * (a_H0[who1].y - a1_tot_D[who].y);
Jplu = h1 * (a_H0[who1].y + a1_tot_D[who].y);
Smin = h1 * h1 * (a_H0[who2].y - a2_tot_D[who].y);
Splu = h1 * h1 * (a_H0[who2].y + a2_tot_D[who].y);
pos_CH[who].y = pos_CH[who].y + h1*vel_CH[who].y - 0.4*h2*Amin + over*h2*Jplu;
vel_CH[who].y = vel_CH[who].y + h1*Aplu - 0.4*h1*Jmin + over*h1*Splu;
pos_CH[who].y += h1*vel_CH[who].y;
a3_H[who].y = h3*(-5.0*Amin + 5.0*Jplu - Smin);
double a4halfy = h4*(-Jmin + Splu);
double a5halfy = h5*(3.0*Amin - 3.0*Jplu + Smin);
a3_H[who].y += h1*a4halfy + 0.5*h2*a5halfy;
a4halfy += h1*a5halfy;
Amin = a_H0[gtid].z - a_tot_D[who].z;
Aplu = a_H0[gtid].z + a_tot_D[who].z;
Jmin = h1 * (a_H0[who1].z - a1_tot_D[who].z);
Jplu = h1 * (a_H0[who1].z + a1_tot_D[who].z);
Smin = h1 * h1 * (a_H0[who2].z - a2_tot_D[who].z);
Splu = h1 * h1 * (a_H0[who2].z + a2_tot_D[who].z);
pos_CH[who].z = pos_CH[who].z + h1*vel_CH[who].z - 0.4*h2*Amin + over*h2*Jplu;
vel_CH[who].z = vel_CH[who].z + h1*Aplu - 0.4*h1*Jmin + over*h1*Splu;
pos_CH[who].z += h1*vel_CH[who].z;
a3_H[who].z = h3*(-5.0*Amin + 5.0*Jplu - Smin);
double a4halfz = h4*(-Jmin + Splu);
double a5halfz = h5*(3.0*Amin - 3.0*Jplu + Smin);
a3_H[who].z += h1*a4halfz + 0.5*h2*a5halfz;
a4halfz += h1*a5halfz;
a3_H[who].w = sqrt(a3_H[who].x*a3_H[who].x + a3_H[who].y*a3_H[who].y + a3_H[who].z*a3_H[who].z);
double a4mod = sqrt(a4halfx*a4halfx + a4halfy*a4halfy + a4halfz*a4halfz);
double a5mod = sqrt(a5halfx*a5halfx + a5halfy*a5halfy + a5halfz*a5halfz);
double dt6 = (sqrt(a_H0[gtid].w*a_H0[who2].w) + a_H0[who1].w) / (a5mod*a3_H[who].w + a4mod*a4mod);
dt6 = ETA6 * pow(dt6,1.0/6.0);
double stp = h;
double overh3 = 1.0/(stp*stp*stp);
double overh2 = 1.0/(stp*stp);
double a2dx = overh2 * (-6.0 * (a_tot_D[who].x - a_H0[gtid].x) -
stp * (4.0 * a_H0[who1].x + 2.0 * a1_tot_D[who].x));
double a2dy = overh2 * (-6.0 * (a_tot_D[who].y - a_H0[gtid].y) -
stp * (4.0 * a_H0[who1].y + 2.0 * a1_tot_D[who].y));
double a2dz = overh2 * (-6.0 * (a_tot_D[who].z - a_H0[gtid].z) -
stp * (4.0 * a_H0[who1].z + 2.0 * a1_tot_D[who].z));
double a3dx = overh3 * (12.0 * (a_tot_D[who].x - a_H0[gtid].x) +
6.0 * stp * (a_H0[who1].x + a1_tot_D[who].x));
double a3dy = overh3 * (12.0 * (a_tot_D[who].y - a_H0[gtid].y) +
6.0 * stp * (a_H0[who1].y + a1_tot_D[who].y));
double a3dz = overh3 * (12.0 * (a_tot_D[who].z - a_H0[gtid].z) +
6.0 * stp * (a_H0[who1].z + a1_tot_D[who].z));
a2dx += h*a3dx;
a2dy += h*a3dy;
a2dx += h*a3dz;
a_H0[who2].w = a2dx*a2dx + a2dy*a2dy + a2dz*a2dz;
a3_H[who].w = a3dx*a3dx + a3dy*a3dy + a3dz*a3dz;
double dt4 = sqrt(ETA4*(sqrt(a_H0[gtid].w*a_H0[who2].w) + a_H0[who1].w) / (sqrt(a_H0[who1].w*a3_H[who].w) + a_H0[who2].w));
dt = 0.5*dt4+0.5*dt6;
double rest = GTIME / (2.0 * step[who]);
rest = (double)((int)(rest)) - rest;
// return;
// pos_CH[who].x = step[who];
// return;
if(dt > 2.0*step[who] && rest == 0.0 && 2.0*step[who] <= DTMAX)
step[who] *= 2.0;
else if (dt < 0.5*step[who])
step[who] *= 0.25;
else if (dt < step[who])
step[who]*=0.5;
if(step[who] < DTMIN)
step[who] = DTMIN;
a_tot_D[who] = a_H0[gtid];
a1_tot_D[who] = a_H0[who1];
a2_tot_D[who] = a_H0[who2];
} |
3,179 | #include "user.cuh"
void generateVector(float *vec, int size, float *p_minVal, float *p_maxVal) {
random_device rd;
mt19937 gen(rd());
uniform_real_distribution<float> dis(-100000.0, 100000.0);
for (int i = 0; i < size; i++) {
vec[i] = dis(gen);
(*p_minVal) = (*p_minVal > vec[i]) ? vec[i] : (*p_minVal);
(*p_maxVal) = (*p_maxVal < vec[i]) ? vec[i] : (*p_maxVal);
}
}
|
3,180 | #include "includes.h"
//kernel for computing histogram right in memory
//computer partial histogram on shared memory and mix them on global memory
__global__ void hist_inGlobal (const int* values, int length, int* hist){
//compute index and interval
int idx = blockDim.x * blockIdx.x + threadIdx.x;
int stride = gridDim.x * blockDim.x;
//iterate over index and interval since it is less than the total length
while(idx < length){
//get value
int val = values[idx];
//increment value frequency on histogram using atomic in order to be thread safe
atomicAdd(&hist[val], 1);
idx += stride;
}
} |
3,181 | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <cuda.h>
#define N 10000000 // total number of items in vectors
#define nthreads 4 // total number of threads in a block
__global__ void square(int n, int *vect1, int *vect2, int *sum)
{
int threadID;
threadID = blockIdx.x * blockDim.x + threadIdx.x;
if(threadID < n)
sum[threadID] = vect1[threadID] * vect1[threadID] + vect2[threadID] * vect2[threadID];
}
int main()
{
srand(time(NULL));
int *vect1_h, *vect2_h, *sum_h;
int *vect1_d, *vect2_d, *sum_d;
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float milliseconds = 0;
vect1_h = (int*)malloc( N* sizeof(int));
vect2_h = (int*)malloc( N* sizeof(int));
sum_h = (int*)malloc( N* sizeof(int));
cudaMalloc((void**)&vect1_d, N * sizeof(int));
cudaMalloc((void**)&vect2_d, N * sizeof(int));
cudaMalloc((void**)&sum_d, N * sizeof(int));
for(int i = 0; i < N; i++)
{
vect1_h[i] = rand()%10;
vect2_h[i] = rand()%10;
}
cudaMemcpy(vect1_d, vect1_h, N * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(vect2_d, vect2_h, N * sizeof(int), cudaMemcpyHostToDevice);
int nblocks = (N + nthreads - 1)/nthreads;
cudaEventRecord(start);
square<<<nblocks,nthreads>>>(N, vect1_d, vect2_d, sum_d);
cudaEventRecord(stop);
cudaMemcpy(sum_h, sum_d, N * sizeof(int), cudaMemcpyDeviceToHost);
printf("Vector1: \n");
for(int i = 0; i < N; ++i)
printf(" %d", vect1_h[i]);
printf("\nVector2: \n");
for(int i = 0; i < N; ++i)
printf(" %d", vect2_h[i]);
printf("\nThe sum of squares of the vecors are is: \n");
for(int i = 0; i < N; ++i)
printf(" %d", sum_h[i]);
printf("\n");
cudaEventElapsedTime(&milliseconds, start, stop);
printf("elaspsed = %f ms", milliseconds);
cudaFree(vect1_d);
cudaFree(vect2_d);
cudaFree(sum_d);
free(vect1_h);
free(vect2_h);
free(sum_h);
}
|
3,182 | /**
* I wrote, compiled, and ran this code on the cssgpu01 machine.
* Which I believe runs Ubuntu 16.04
*
* There appeared to be other intensive computations happening, which may
* have slowed my execution output.
*
* To compile:
* nvcc vector.cu -o vector.out
*
* To run:
* ./vector.out <vector_size>
*/
#include <time.h>
#include <cstdlib>
#include <iostream>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#define MAX_NUM 5000
/** -------------------------------------------------------------------------
* add
* Adds two vectors in parallel on a GPU
*
* @param a The first vector to add
* @param b The second vector to add
* @param c The vector which will hold the result of the addition
*/
__global__ void add(int* a, int* b, int* c) {
c[blockIdx.x] = a[blockIdx.x] + b[blockIdx.x];
}
void random_ints(int* a, int size) {
for (int i = 0; i < size; i++) {
a[i] = rand() % MAX_NUM;
}
}
int main(int argc, char** argv) {
if (argc != 2) {
std::cerr << "Usage: ./vector <vector_size>" << std::endl;
return 1;
}
int* a;
int* b;
int* c;
int* d_a;
int* d_b;
int* d_c;
int vector_size = atoi(argv[1]);
int size = vector_size * sizeof(int);
// Allocate memory on device
cudaMalloc((void**) &d_a, size);
cudaMalloc((void**) &d_b, size);
cudaMalloc((void**) &d_c, size);
// Initialize vars
a = new int[size]; random_ints(a, vector_size);
b = new int[size]; random_ints(b, vector_size);
c = new int[size];
// Copy parameters to device
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Start clock
clock_t start = clock();
// Perform vector addition
add<<<vector_size,1>>>(d_a, d_b, d_c);
// Copy results back
cudaMemcpy(c, d_c, size, cudaMemcpyDeviceToHost);
cudaThreadSynchronize();
// Stop clock
clock_t timeElapsed = (clock() - start) / (CLOCKS_PER_SEC / 1000000);
std::cout << timeElapsed << std::endl;
// Clean up resources
delete[] a;
delete[] b;
delete[] c;
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
return 0;
}
|
3,183 | #include <iostream>
#include <cuda.h>
#include <stdlib.h>
#include <time.h>
using namespace std;
__global__ void vector_add(int *d_vec1,int *d_vec2,int *d_vec3)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
d_vec3[idx] = d_vec1[idx] + d_vec2[idx];
}
int main()
{
const int num_block = 1000;
const int thread_in_block = 512;
const int SIZE = num_block * thread_in_block;
const int BYTES = SIZE * sizeof (int);
clock_t start_t, end_t;
double time_spent;
int h_vec1[SIZE], h_vec2[SIZE],h_vec3[SIZE]; // c = a + b
int *d_vec1, *d_vec2, *d_vec3;
for(int i=0;i<SIZE;i++)
{
h_vec1[i] = rand()%100;
h_vec2[i] = rand()%100;
h_vec3[i] = 0;
}
cudaMalloc((void**) &d_vec1, BYTES);
cudaMalloc((void**) &d_vec2, BYTES);
cudaMalloc((void**) &d_vec3, BYTES);
cudaMemcpy(d_vec1,h_vec1,BYTES,cudaMemcpyHostToDevice);
cudaMemcpy(d_vec2,h_vec2,BYTES,cudaMemcpyHostToDevice);
start_t = clock();
vector_add<<<num_block,thread_in_block>>>(d_vec1,d_vec2,d_vec3);
end_t = clock();
time_spent = (double) (end_t-start_t)/CLOCKS_PER_SEC;
cudaMemcpy(h_vec3,d_vec3,BYTES,cudaMemcpyDeviceToHost);
// for(int i=0;i<SIZE;i++)
// {
// cout<<h_vec3[i];
// (i%5 == 0) ? cout<<endl : cout<<"\t";
// }
cout<<"Execution time = "<< time_spent<<endl;
} |
3,184 | #include <iostream>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
#define WIDTH 64
using crngState = curandStatePhilox4_32_10_t;
/* Each thread gets same seed, a different sequence
number, no offset */
__global__ void setup_curand(crngState *state, unsigned long seed, unsigned dim) {
uint32_t xid = blockIdx.z * blockDim.x + threadIdx.x;
uint32_t id = blockIdx.y*dim + xid;
if(xid >= dim) return;
//printf("blocks: %d %d %d / %d %d %d\n", blockIdx.x, blockIdx.y, blockIdx.z, gridDim.x, gridDim.y, gridDim.z);
//printf("threads: %d %d %d / %d %d %d\n", threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z);
curand_init(seed, id, 0, &state[id]);
}
__global__ void gen_image_kernel(crngState *state, unsigned int *result, unsigned dim) {
uint32_t xid = blockIdx.z * blockDim.x + threadIdx.x;
uint32_t id = blockIdx.y*dim + xid;
if(xid >= dim) return;
crngState localState = state[id];
unsigned int x = curand(&localState);
state[id] = localState;
result[id] = x;
}
unsigned int *gen_image_gpu(unsigned dim, unsigned int seed) {
unsigned int *image = (unsigned int *)malloc(dim*dim*sizeof(unsigned int));
unsigned int *image_d;
crngState *state;
cudaMalloc(&image_d, dim*dim*sizeof(unsigned int));
dim3 dims(1, dim, (dim+WIDTH-1)/WIDTH);
cudaMalloc(&state, WIDTH*sizeof(crngState));
setup_curand<<< dims, WIDTH >>>(state, seed, dim);
gen_image_kernel<<< dims, WIDTH >>>(state, image_d, dim);
cudaMemcpy(image, image_d, dim*dim*sizeof(unsigned int), cudaMemcpyDeviceToHost);
cudaFree(state);
cudaFree(image_d);
return image;
}
int main(int argc, char *argv[]) {
if(argc != 3) {
std::cout << "Usage: " << argv[0] << " <dim> <steps>\n";
return 1;
}
int dim = atoi(argv[1]);
int steps = atoi(argv[2]);
unsigned int *img = gen_image_gpu(dim, steps);
for(int i=0; i<dim; i += (dim+9)/10) {
uint32_t id = i*dim + i;
std::cout << i << ": " << img[id] << " " << img[id+1] << std::endl;
}
free(img);
return 0;
}
|
3,185 | #include "cuda.h"
#include <iostream>
#include <stdlib.h>
__global__ void simple_vec_add(float * inA,
float * inB,
float * outC,
int n)
{
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx<n)
{
outC[idx]=inA[idx]+inB[idx];
}
}
void fillRandomly(float * v,int n)
{
for(int i=0;i<n;i++)
{
v[i]=(float)rand()/(float)RAND_MAX;
}
}
bool checkResults(float * A,
float * B,
float * C,
int n)
{
bool res = true;
float v;
int i=0;
while((res==true) && (i<n))
{
v = A[i]+B[i];
if(C[i]!=v)
{
res = false;
}
i++;
}
return res;
}
int main(int argc,char **argv)
{
float * hA, * hB, * hC;
float * dA, * dB, * dC;
int nElements = 10000;
// Allocate host memory
hA = (float*) malloc(nElements*sizeof(float));
hB = (float*) malloc(nElements*sizeof(float));
hC = (float*) malloc(nElements*sizeof(float));
// Fill the input A and B vectors with random data
fillRandomly(hA,nElements);
fillRandomly(hB,nElements);
// Allocate device memory
cudaMalloc((void**)&dA,nElements*sizeof(float));
cudaMalloc((void**)&dB,nElements*sizeof(float));
cudaMalloc((void**)&dC,nElements*sizeof(float));
// Transfer data from host to device
cudaMemcpy(dA,hA,nElements*sizeof(float),cudaMemcpyHostToDevice);
cudaMemcpy(dB,hB,nElements*sizeof(float),cudaMemcpyHostToDevice);
// Perform CUDA kernel computation
int nThreadsPerBlock = 256;
dim3 dimGrid((nElements-1)/nThreadsPerBlock+1,1,1);
dim3 dimBlock(nThreadsPerBlock,1,1);
simple_vec_add<<<dimGrid,dimBlock>>>(dA,dB,dC,nElements);
cudaDeviceSynchronize();
// Transfer data from device to host
cudaMemcpy(hC,dC,nElements*sizeof(float),cudaMemcpyDeviceToHost);
// Deallocate device memory
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
// Check results
bool ok = checkResults(hA,hB,hC,nElements);
if(ok)
{
std::cout<<"OK"<<std::endl;
}
else
{
std::cout<<"FAIL"<<std::endl;
}
// Deallocate host memory
free(hA);
free(hB);
free(hC);
return 0;
}
|
3,186 | #include <iostream>
#include <assert.h>
#include <cstdlib>
#include "cuda_runtime.h"
const int SIZE = 4096;
__global__ void dymTrans(int *V, int N) {
extern __shared__ int array[];
int refIndex = threadIdx.x;
array[refIndex] = V[refIndex];
__syncthreads();
V[refIndex] = array[N-refIndex-1];
}
__global__ void stdTrans(int *V, int N) {
__shared__ int array[SIZE];
int refIndex = threadIdx.x;
array[refIndex] = V[refIndex];
__syncthreads();
V[refIndex] = array[N-refIndex-1];
}
__global__ void normalTrans(int *V, int N) {
int refIndex = blockDim.x * blockIdx.x + threadIdx.x;
if (refIndex <= N/2) {
int reff = V[refIndex];
V[refIndex] = V[N-refIndex-1];
V[N-refIndex-1] = reff;
}
}
int main(void) {
int n =0;
std::cin>>n;
size_t size = n * sizeof(int);
int *V = (int*)malloc(size);
int *V_t = (int*)malloc(size);
for (int i = 0; i < n; i++) {
V[i] = i+1;
}
int block = 1024;
int grid = 1;
int *v_D;
cudaMalloc(&v_D, size);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//nTrans
cudaEventRecord(start);
cudaMemcpy(v_D, V, size, cudaMemcpyHostToDevice);
normalTrans<<<grid, block>>>(v_D, n);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaMemcpy(V_t, v_D, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float nTrans = 0;
cudaEventElapsedTime(&nTrans, start, stop);
std::cout << "Normal Trans used time: " << nTrans << std::endl;
//sTrans
cudaEventRecord(start);
cudaMemcpy(v_D, V, size, cudaMemcpyHostToDevice);
stdTrans<<<grid, block, n>>>(v_D, n);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaMemcpy(V_t, v_D, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float sTrans=0.0;
cudaEventElapsedTime(&sTrans, start, stop);
std::cout << "Static Trans used time: " << sTrans << std::endl;
cudaMemcpy(V_t, v_D, size, cudaMemcpyDeviceToHost);
//dTrans
cudaEventRecord(start);
cudaMemcpy(v_D, V, size, cudaMemcpyHostToDevice);
dymTrans<<<grid, block, n>>>(v_D, n);
cudaEventRecord(stop);
cudaDeviceSynchronize();
cudaMemcpy(V_t, v_D, size, cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float dTrans=0.0;
cudaEventElapsedTime(&dTrans, start, stop);
std::cout << "Dynamic trans used: " << dTrans << std::endl;
//summaty
if(dTrans<=sTrans&&dTrans<=nTrans){
std::cout<<"Dymic trans is btetter than Static trans "<<sTrans-dTrans<<" ms "<<"and btetter than normal trans"<<nTrans-dTrans<<" ms "<<std::endl;
}else if(sTrans<=dTrans&&sTrans<=nTrans){
std::cout<<"Static trans is btetter than Dymic trans "<<dTrans-sTrans<<" ms "<<"and btetter than normal trans"<<nTrans-sTrans<<" ms "<<std::endl;
}else{
std::cout<<"Normal trans is btetter than Dymic trans "<<dTrans-nTrans<<" ms "<<"and btetter than Static trans"<<sTrans-nTrans<<" ms "<<std::endl;
}
free(V);
cudaFree(V_t);
return 0;
} |
3,187 | #include "blur.cuh"
#include "grayScale.cuh"
#include <iostream>
using namespace std;
int main(int argc, char **argv) {
if (argc < 3) {
cout << argv[0] << ": needs two arguments\n"
<< "<image_path> <option>\n";
return 0;
}
string image_path(argv[1]), option(argv[2]);
if (option == "gray") {
grayScale(image_path);
} else {
imageBlur(image_path);
}
return 0;
}
|
3,188 |
#include <stdio.h>
#include <stdlib.h>
#include <fcntl.h>
#include "string.h"
#include <math.h>
#define DEFAULT_FILENAME "small-zibra-unsplash.ppm"
#define MAX_VALUE 256 //max value of the pic luminance
#define NUM_BINS 256 //num of bins equals to the max value
__constant__ double PARAMS[4];
void write_ppm( char const *filename, int xsize, int ysize, int maxval, unsigned int *pic);
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval );
void write_CSV(char const *filename,int width, int height, unsigned int *input);
void matrixRotation(unsigned int *input, unsigned int *output, int width, int height, double angle);
void computerGoldHisto(unsigned int* input, unsigned int* histo, int width, int height);
void getNewXY(int inputX, int inputY, int width, int height,double angle, int *outputX, int *outputY);
__global__ void rotation_kernel_naive(unsigned int *input,unsigned int *output,int width, int height,double angle);
__global__ void rotation_kernel_2(unsigned int *input,unsigned int *output,int width, int height,double angle);
__global__ void rotation_kernel_3(unsigned int *input,unsigned int *output,int width,int height);
__global__ void histo_kernel_naive(unsigned int *input,unsigned int *histo,int width, int height);
__global__ void histo_kernel_2(unsigned int *input,unsigned int *histo,int width, int height);
int main( int argc, char **argv )
{
double ang ;
char *filename;
filename = strdup( DEFAULT_FILENAME);
ang = 45.0;
if (argc > 1) {
if (argc == 3) { //angle and filename
ang = atoi( argv[1] );
filename = strdup( argv[2]);
}
if (argc == 2) { // angle
ang = atoi( argv[1] );
}
}
fprintf(stderr, "file %s , rotation angle: %f\n", filename, ang);
//initialization paramters
int xsize, ysize,maxval;
unsigned int *h_histoCPU;
unsigned int *h_Input;
unsigned int *h_rotated;
//read input from image
unsigned int *pic = read_ppm( filename, &xsize, &ysize, &maxval );
int diaLen = (unsigned int) (sqrt(xsize * xsize + ysize* ysize) + 3);//paddle extra 3 for safer non-cropped rotation
printf("width:%d, height:%d,maxVal: %d, diagonal size: %d \n",xsize,ysize,maxval,diaLen );
//decide memory size
size_t histo_size = MAX_VALUE * diaLen * 3 * sizeof(int);
size_t rotate_size = diaLen * diaLen * 3 * sizeof(int);
//allocate memory
h_histoCPU = (unsigned int*)malloc(histo_size);
h_rotated= (unsigned int*)malloc(rotate_size);
h_Input = (unsigned int*)malloc(rotate_size);
if (!h_Input || !h_histoCPU || !h_rotated) {
fprintf(stderr, " unable to malloc \n");
exit(-1); // fail
}
//decide rotate angle, rotation is done swirlly along the image center.
double angle = - ang / 360 * M_PI * 2;
//paddle data for h_Input, make it squre with side length as the input diagonal length
int deltaX = diaLen - xsize ;
int deltaY = diaLen - ysize ;
for(int i=0;i<diaLen;i++){
for(int j = 0; j< diaLen;j++){
if(i>=deltaY / 2 && i< (ysize + deltaY/2) && j >= deltaX/2 && j < (xsize + deltaX/2)){
h_Input[i*diaLen+j]=pic[(i-deltaY/2)*xsize + (j-deltaX/2)];
}else{
h_Input[i*diaLen+j] = 0;
}
}
}
//calculate new coordinate
int outputX, outputY;
double realAngle = -angle;
int originX = 0;
int originY = 200;
getNewXY(originX,originY, xsize, ysize, realAngle, &outputX, &outputY);
printf("(%d, %d) rotated to newX: %d, newY: %d \n", originX,originY,outputX, outputY);
//output the paddledInput
write_ppm( "paddledInput.ppm", diaLen, diaLen, 255, h_Input);
//timer for cpu rotation------------------------
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//run rotation on cpu
matrixRotation( h_Input, h_rotated, diaLen, diaLen, angle);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time,start,stop);
printf("cpu rotation time: %f milliseconds\n",time);
cudaEventDestroy(start);
cudaEventDestroy(stop);
//output rotated image
write_ppm( "rotated_gold.ppm", diaLen, diaLen, 255, h_rotated);
write_CSV("rotated_gold.csv",diaLen,diaLen, h_rotated);
//write_CSV("picture.csv",xsize,ysize, pic);
//initialization h_BinsCPU
for(int i = 0;i<NUM_BINS;i++){
for(int j = 0;j<MAX_VALUE;j++){
h_histoCPU[i*MAX_VALUE+j] = 0;
}
}
//timer for CPU histogram
float time2;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//run histo on CPU
computerGoldHisto(h_rotated,h_histoCPU,diaLen,diaLen);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time2,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("cpu histo time: %f milliseconds\n",time2);
//output histo result to CSV file
write_CSV("histoBinCPU.csv",diaLen,MAX_VALUE, h_histoCPU);
//cudaMalloc for rotation kernel
unsigned int *h_histoGPU;
unsigned int *h_rotatedGPU;
unsigned int *h_rotatedGPU2;
unsigned int *h_rotatedGPU3;
unsigned int *d_Input;
unsigned int *d_rotated;
unsigned int *d_rotated_naive;
unsigned int *d_histo;
//memory allocate in the host
h_rotatedGPU = (unsigned int*)malloc(rotate_size);
h_rotatedGPU2 = (unsigned int*)malloc(rotate_size);
h_rotatedGPU3 = (unsigned int*)malloc(rotate_size);
h_histoGPU = (unsigned int*)malloc(histo_size);
if (!h_rotatedGPU ||!h_histoGPU||!h_rotatedGPU2||!h_rotatedGPU3) {
fprintf(stderr, " unable to malloc \n");
exit(-1); // fail
}
cudaMalloc((void**)&d_Input,rotate_size);
cudaMalloc((void**)&d_rotated,rotate_size);
cudaMalloc((void**)&d_rotated_naive,rotate_size);
//cudaMemcpy
cudaMemcpy(d_Input,h_Input,rotate_size,cudaMemcpyHostToDevice);
//kernel dimension
int blockSize = 8;
dim3 blockDim(blockSize,blockSize,1);
int gridSize = (diaLen + blockSize -1)/blockSize;
dim3 gridDim(gridSize,gridSize,1);
float time3;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//launch kernel 100 times and take average time
for(int i = 0;i<100;i++){
rotation_kernel_naive<<<gridDim,blockDim>>>(d_Input,d_rotated_naive,diaLen,diaLen,angle);
}
cudaEventRecord(stop,0);
cudaMemcpy(h_rotatedGPU,d_rotated_naive,rotate_size,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time3,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU rotation_kernel_naive time: %f milliseconds\n",time3 / 100);
//output GPU rotate result
write_ppm( "rotated_GPU_naive.ppm", diaLen, diaLen, 255, h_rotatedGPU);
//GPU rotation with optimization, using registers to store pre-calculated values
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//launch kernel 100 times and take average time
for(int i = 0;i<100;i++){
rotation_kernel_2<<<gridDim,blockDim>>>(d_Input,d_rotated,diaLen,diaLen,angle);
}
cudaEventRecord(stop,0);
cudaMemcpy(h_rotatedGPU2,d_rotated,rotate_size,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float time3_2;
cudaEventElapsedTime(&time3_2,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU rotation_kernel_2 time: %f milliseconds\n",time3_2 / 100);
//output GPU rotate result
write_ppm( "rotated_GPU2.ppm", diaLen, diaLen, 255, h_rotatedGPU2);
//rotation kernel_3 using constant memory---------------------------------
double *P;
P = (double*) malloc(4 * sizeof(double));
P[0] = (double)diaLen / 2; //xCenter
P[1] = (double)diaLen / 2; //yCenter
P[2] = sin(angle);
P[3] = cos(angle);
//load data to constant memory
cudaMemcpyToSymbol(PARAMS, P, 4 * sizeof(double));
//timer
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//launch kernel 100 times and take average time
for(int i = 0;i<100;i++){
rotation_kernel_3<<<gridDim,blockDim>>>(d_Input,d_rotated,diaLen,diaLen);
}
cudaEventRecord(stop,0);
cudaMemcpy(h_rotatedGPU3,d_rotated,rotate_size,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
float time3_3;
cudaEventElapsedTime(&time3_3,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU rotation kernel_3 tiem: %f milliseconds\n",time3_3 / 100);
//output GPU rotate result
write_ppm( "rotated_GPU3.ppm", diaLen, diaLen, 255, h_rotatedGPU3);
//-------end of rotation kernel-----------------------------------------------
//cudaFree part 1
cudaFree(d_Input);
cudaFree(d_rotated_naive);
//------GPU histo start----------------------------------------
// //cudaMalloc for histoGPU
cudaMalloc((void**)&d_histo,histo_size);
//------naive histo kernel-----------------------------------------
float time4;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//launch kernel 100 times and take average time
int blockSize2 = 32;
int gridSize2 = ceil((float)diaLen/blockSize2);
dim3 blockDim2 (blockSize2,blockSize2,1);
dim3 gridDim2 (gridSize2,gridSize2,1);
histo_kernel_naive<<<gridDim2,blockDim2>>>(d_rotated,d_histo,diaLen,diaLen);
cudaEventRecord(stop,0);
cudaMemcpy(h_histoGPU,d_histo,histo_size,cudaMemcpyDeviceToHost);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time4,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU histo_naive time: %f milliseconds\n",time4 );
// //output histo result to CSV file
write_CSV("histoBinGPU_kernel_naive.csv",diaLen,MAX_VALUE, h_histoGPU);
//----------end---------------------------------------------------------------
//------histo kernel 2, one thread work with one input----------------------------
unsigned int *h_histoGPU2;
h_histoGPU2 = (unsigned int*)malloc(histo_size);
if (!h_histoGPU2) {
fprintf(stderr, " unable to malloc \n");
exit(-1); // fail
}
unsigned int *d_histo2;
cudaMalloc((void**)&d_histo2,histo_size);
float time5;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//launch kernel 100 times and take average time
for(int i = 0;i<100;i++){
histo_kernel_2<<<diaLen,256>>>(d_rotated,d_histo2,diaLen,diaLen);
}
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaMemcpy(h_histoGPU2,d_histo2,histo_size,cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&time5,start,stop);
cudaEventDestroy(start);
cudaEventDestroy(stop);
printf("GPU histo_kernel_2 time: %f milliseconds\n",time5 / 100);
// //output histo result to CSV file
write_CSV("histoBinGPU_kernel_2.csv",diaLen,MAX_VALUE, h_histoGPU2);
//----------end---------------------------------------------------------------
//cudaFREE part 2
cudaFree(d_rotated);
cudaFree(d_histo);
cudaFree(d_histo2);
//Free host memory
free(h_Input);
free(h_rotated);
free(h_rotatedGPU);
free(h_rotatedGPU2);
free(h_rotatedGPU3);
free(h_histoCPU);
free(h_histoGPU);
free(h_histoGPU2);
fprintf(stderr, "done\n");
}
//--------rotation kernel naive----------------------------------------
__global__ void rotation_kernel_naive(unsigned int *input,unsigned int *output,int width, int height,double angle){
//TO DO
double xCenter = (double)width / 2;
double yCenter = (double)height / 2;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
int orgX = 0;
int orgY = 0;
//boundary check
if(x >=0 && x < width && y >=0 && y < height){
orgX = (int)(cos(angle) * ((double)x - xCenter)- sin(angle) * ((double)y - yCenter) + xCenter);
orgY = (int)(sin(angle) * ((double)x - xCenter) + cos(angle) * ((double)y - yCenter) + yCenter);
}
if(orgX>=0 && orgX < width && orgY>=0 && orgY<height){
output[index] = input[ orgY * width + orgX];
}
}
//--------rotation kernel 2----------------------------------------
__global__ void rotation_kernel_2(unsigned int *input,unsigned int *output,int width, int height,double angle){
//TO DO
double xCenter = (double)width / 2; // x center of image
double yCenter = (double)height / 2; // y center of image
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
double sinA = sin(angle);
double cosA = cos(angle);
double shiftX = (double)x - xCenter;
double shiftY = (double)y - yCenter;
int orgX = 0;
int orgY = 0;
//boundary check
if(x >=0 && x < width && y >=0 && y < height){
orgX = (int)(cosA * shiftX - sinA * shiftY + xCenter);
orgY = (int)(sinA * shiftX + cosA * shiftY + yCenter);
}
if(orgX>=0 && orgX < width && orgY>=0 && orgY<height){
output[index] = input[ orgY * width + orgX];
}
}
//--------rotation kernel 3 using constant memory----------------------------------------
__global__ void rotation_kernel_3(unsigned int *input,unsigned int *output,int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
int index = y * width + x;
double shiftX = (double)x - PARAMS[0];
double shiftY = (double)y - PARAMS[1];
int orgX = 0;
int orgY = 0;
//boundary check
if(x >=0 && x < width && y >=0 && y < height){
orgX = (int)(PARAMS[3] * shiftX - PARAMS[2] * shiftY + PARAMS[0]);
orgY = (int)(PARAMS[2] * shiftX + PARAMS[3] * shiftY + PARAMS[1]);
}
if(orgX>=0 && orgX < width && orgY>=0 && orgY<height){
output[index] = input[ orgY * width + orgX];
}
}
//----------cpu for matrix rotation--------------------------
//rotate as a swirl from , from the center
void matrixRotation(unsigned int *input, unsigned int *output, int width, int height, double angle){
double xCenter = (double)width / 2;
double yCenter = (double)height / 2;
//for non-crop rotation put both the height and width of the output the diagonal length of the origin input
for(int y = 0;y<height;y++){
for(int x = 0;x<width;x++){
int orgX = (int)(cos(angle) * ((double)x -xCenter) - sin(angle) * ((double)y - yCenter) + xCenter ) ;
int orgY = (int)(sin(angle) * ((double)x -xCenter) + cos(angle) * ((double)y - yCenter) + yCenter );
if(orgX>=0 && orgX < width && orgY>=0 && orgY < height){
output[y*width+x] = input[orgY * width +orgX];
}
}
}
}
//----------histo kernel naive: using global memory--------------------------------------
__global__ void histo_kernel_naive(unsigned int *input,unsigned int *histo,int width, int height){
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if(x >=0 && x < width && y >=0 && y < height){
atomicAdd(&histo[input[y*width+x]*width+x],1);
}
}
//histo_kernel_2<<<diaLen,256>>>(d_rotated,d_histo2,diaLen,diaLen);
//----------histo kernel 2: with shared memory,each threads deal with one input
__global__ void histo_kernel_2(unsigned int *input,unsigned int *histo,int width, int height){
__shared__ unsigned int H[NUM_BINS];// one column of the output bin
int tx = threadIdx.x;
int col = blockIdx.x;//col number of input
int bd = blockDim.x;
H[tx] = 0;
__syncthreads();
for(int t =0;t<(height+bd-1)/bd;t++){ //when height is bigger than blockDim
int row = t*bd+tx;
if(row<height){
int value = input[row*width+col];
atomicAdd(&H[value],1);
}
}
__syncthreads();
histo[tx*width+col] = H[tx];
}
//histo_kernel_3<<<diaLen,256>>>,
//----------histo kernel 3: with shared memory for input, each thread collecting one bin
__global__ void histo_kernel_3(unsigned int *input,unsigned int *histo,int width, int height){
__shared__ unsigned int IN[NUM_BINS]; //use to storing input
int tx = threadIdx.x;
int col = blockIdx.x;//col number of input
int h = 0;
for(int t =0;t<(height+NUM_BINS-1)/NUM_BINS;t++){ //when height is bigger than NUM_BINS
int row = t*NUM_BINS+tx;
if(row<height){
IN[tx] = input[row*width+col];
}else{
IN[tx] = 300; // padding nonsense value to avoid addition
}
__syncthreads();
for(int i = 0;i<NUM_BINS;i++){//each thread loop through the input
int value = IN[i];
if(tx == value){
h++;
}
}
}
histo[tx*width+col] = h;
}
//----------cpu for histo along y direction-------------------
void computerGoldHisto(unsigned int* input, unsigned int* histo, int width, int height){
for(int i = 0; i<height;i++){
for(int j = 0;j<width;j++){
int data = input[i*width+j];
histo[data*width+j]++;
}
}
}
//-----------read image to array-----------------------------------------------------------------
unsigned int *read_ppm( char *filename, int * xsize, int * ysize, int *maxval ){
if ( !filename || filename[0] == '\0') {
fprintf(stderr, "read_ppm but no file name\n");
return NULL; // fail
}
FILE *fp;
fprintf(stderr, "read_ppm( %s )\n", filename);
fp = fopen( filename, "rb");
if (!fp)
{
fprintf(stderr, "read_ppm() ERROR file '%s' cannot be opened for reading\n", filename);
return NULL; // fail
}
char chars[1024];
//int num = read(fd, chars, 1000);
int num = fread(chars, sizeof(char), 1000, fp);
if (chars[0] != 'P' || chars[1] != '6')
{
fprintf(stderr, "Texture::Texture() ERROR file '%s' does not start with \"P6\" I am expecting a binary PPM file\n", filename);
return NULL;
}
unsigned int width, height, maxvalue;
char *ptr = chars+3; // P 6 newline
if (*ptr == '#') // comment line!
{
ptr = 1 + strstr(ptr, "\n");
}
num = sscanf(ptr, "%d\n%d\n%d", &width, &height, &maxvalue);
fprintf(stderr, "read %d things width %d height %d maxval %d\n", num, width, height, maxvalue);
*xsize = width;
*ysize = height;
*maxval = maxvalue;
unsigned int *pic = (unsigned int *)malloc( width * height * sizeof(unsigned int));
if (!pic) {
fprintf(stderr, "read_ppm() unable to allocate %d x %d unsigned ints for the picture\n", width, height);
return NULL; // fail but return
}
// allocate buffer to read the rest of the file into
int bufsize = 3 * width * height * sizeof(unsigned char);
if ((*maxval) > 255) bufsize *= 2;
unsigned char *buf = (unsigned char *)malloc( bufsize );
if (!buf) {
fprintf(stderr, "read_ppm() unable to allocate %d bytes of read buffer\n", bufsize);
return NULL; // fail but return
}
// really read
char duh[80];
char *line = chars;
// find the start of the pixel data.
sprintf(duh, "%d\0", *xsize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *ysize);
line = strstr(line, duh);
//fprintf(stderr, "%s found at offset %d\n", duh, line-chars);
line += strlen(duh) + 1;
sprintf(duh, "%d\0", *maxval);
line = strstr(line, duh);
fprintf(stderr, "%s found at offset %d\n", duh, line - chars);
line += strlen(duh) + 1;
long offset = line - chars;
//lseek(fd, offset, SEEK_SET); // move to the correct offset
fseek(fp, offset, SEEK_SET); // move to the correct offset
//long numread = read(fd, buf, bufsize);
long numread = fread(buf, sizeof(char), bufsize, fp);
fprintf(stderr, "Texture %s read %ld of %ld bytes\n", filename, numread, bufsize);
fclose(fp);
int pixels = (*xsize) * (*ysize);
for (int i=0; i<pixels; i++)
pic[i] = (int) buf[3*i]; // red channel
return pic; // success
}
//--------------wiret array to a image-------------------------------------------------------------------
void write_ppm( char const *filename, int xsize, int ysize, int maxval,unsigned int *pic)
{
FILE *fp;
// int x,y;
fp = fopen(filename, "wb");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
fprintf(fp, "P6\n");
fprintf(fp,"%d %d\n%d\n", xsize, ysize, maxval);
int numpix = xsize * ysize;
for (int i=0; i<numpix; i++) {
unsigned char uc = (unsigned char) pic[i];
fprintf(fp, "%c%c%c", uc, uc, uc);
}
fclose(fp);
}
//write histoBin result to excel diaLen, MAX_VALUE
void write_CSV(char const *filename,int width, int height, unsigned int *input){
FILE *fp;
fp = fopen(filename, "w+");
if (!fp)
{
fprintf(stderr, "FAILED TO OPEN FILE '%s' for writing\n");
exit(-1);
}
for(int i = 0;i< height;i++){
for(int j = 0;j<width;j++){
fprintf(fp,"%d,",input[i*width+j]);
}
fprintf(fp,"\n");
}
fclose(fp);
}
// calculate newX newY after padding and rotation
void getNewXY(int inputX, int inputY, int width, int height,double angle, int *outputX, int *outputY){
double diaLen = (int)(sqrt(width * width + height * height) + 3);
double deltaX = diaLen - width ;
double deltaY = diaLen - height;
// printf("deltaX: %f, deltaY: %f\n",deltaX,deltaY);
double x = (double)inputX - deltaX / 2;
double y = (double)inputY - deltaY/ 2;
// printf("paddled x and y are: %f, %f\n", x, y);
double xCenter = -diaLen / 2;
double yCenter = -diaLen / 2;
// printf("deltaX: %f, deltaY: %f\n",xCenter,yCenter);
*outputX = -1 * (int)(cos(angle) * (x -xCenter) - sin(angle) * (y - yCenter) + xCenter ) ;
*outputY = -1 * (int)(sin(angle) * (x -xCenter) + cos(angle) * (y - yCenter) + yCenter );
}
|
3,189 | #include <stdio.h>
#define N 12
#define THREADS_X 3
#define THREADS_Y 4
#define A(i,j) A[i*N+j]
#define B(i,j) B[i*N+j]
#define C(i,j) C[i*N+j]
__global__ void index(int *A, int *B, int *C)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j = blockDim.y * blockIdx.y + threadIdx.y;
C(i,j) = A(i,j) + B(i,j);
}
int main()
{
int A[N*N], B[N*N], C[N*N], *A_d, *B_d, *C_d;
int i, j;
dim3 dimBlock(THREADS_X, THREADS_Y);
dim3 dimGrid(N/THREADS_X, N/THREADS_Y);
cudaMalloc((void **)&A_d, sizeof(int)*N*N);
cudaMalloc((void **)&B_d, sizeof(int)*N*N);
cudaMalloc((void **)&C_d, sizeof(int)*N*N);
// 초기화
for (i=0; i<N; i++){
for(j=0; j<N; j++){
A(i,j) = i+j;
B(i,j) = i*j;
}
}
cudaMemcpy(A_d, A, sizeof(int)*N*N, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B, sizeof(int)*N*N, cudaMemcpyHostToDevice);
index <<< dimGrid, dimBlock >>> (A_d, B_d, C_d);
cudaMemcpy(C, C_d, sizeof(int)*N*N, cudaMemcpyDeviceToHost);
for(i=0; i<N; i++){
for(j=0; j<N; j++){
printf("%d ", C(i,j));
}
printf("\n");
}
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
}
|
3,190 | //#include <iostream>
//#include "common.h"
//#include "cuda.h"
//#include "DeviceVector.cpp"
//#include "dev_noise.cuh"
//using namespace std;
//
//int main(){
// DeviceVector<float> vc1(0,10,1);
// DeviceVector<float> vc2(0, 10,1);
// for (float aa : vc1){
// cout << aa << " ";
// }
// cout << endl;
// for (float aa : vc2){
// cout << aa << " ";
// }
// cout << endl;
//
// cudaNoiseGeneWithSoS << <1, vc1.size() >> >(raw_pointer_cast(vc1.data()), raw_pointer_cast(vc2.data()), vc1.size(), vc1.size(), 0/*time(NULL)*/, 1, 1, 1, 1);
//
// for (float aa : vc1){
// cout << aa << " ";
// }
// cout << endl;
// for (float aa : vc2){
// cout << aa << " ";
// }
// cout << endl;
// cin.get();
//} |
3,191 | #include <cuda.h>
#include <stdio.h>
#include <stdint.h>
#define WIDTH 512
#define HEIGHT 512
#define ITERS 512
#define N (WIDTH*HEIGHT)
#define max_size 4
#define max_colors 16
#define xmax 1.2f
#define xmin -2.0f
#define ymax 1.2f
#define ymin -1.2f
#define deltaP ((xmax - xmin)/512)
#define deltaQ ((ymax - ymin)/512)
extern "C" __global__ void mandelGen(uint8_t* output0)
{
extern __shared__ uint8_t sbase[];
uint32_t tid = threadIdx.x;
float v0;
float v1;
uint32_t v2;
float t3;
float t4;
uint32_t t5;
if (blockIdx.x < 512U) {
v0 = 0.0F;
v1 = 0.0F;
v2 = 1U;
while (v0 * v0 + v1 * v1 < 4.0F && v2 < 512U) {
t3 = v0;
t4 = v1;
t5 = v2;
v0 = t3 * t3 - t4 * t4 + (-0.7931140065193176F + (float) tid *
1.3693165965378284e-4F);
v1 = 2.0F * t3 * t4 + (0.1409740000963211F -
(float) blockIdx.x *
2.0146874885540456e-4F);
v2 = t5 + 1U;
}
output0[blockIdx.x * 512U + tid] = (uint8_t) v2 % 16U * 16U;
}
}
__global__ void kernel(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-2.0f+(((float)threadIdx.x)*6.25e-3f)));
v2 = (((2.0f*t6)*t5)+(1.2f-(((float)blockIdx.x)*4.6875e-3f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate1(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.69106f+(((float)threadIdx.x)*3.008172e-7f)));
v2 = (((2.0f*t6)*t5)+(0.387228f-(((float)blockIdx.x)*2.4418114e-7f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate2(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.793114f+(((float)threadIdx.x)*1.3693166e-4f)));
v2 = (((2.0f*t6)*t5)+(0.140974f-(((float)blockIdx.x)*2.0146875e-4f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void plate3(uint8_t* output0){
float v3;
float v2;
uint32_t v1;
v3 = 0.0f;
v2 = 0.0f;
v1 = 1;
while (((((v3*v3)+(v2*v2))<4.0f)&&(v1<512))){
float t6;
float t5;
uint32_t t4;
t6 = v3;
t5 = v2;
t4 = v1;
v3 = (((t6*t6)-(t5*t5))+(-0.745464f+(((float)threadIdx.x)*1.4854595e-7f)));
v2 = (((2.0f*t6)*t5)+(0.11303f-(((float)blockIdx.x)*1.23051e-7f)));
v1 = (t4+1);
}
output0[((blockIdx.x*512)+threadIdx.x)] = ((((uint8_t)v1)%16)*16);
}
__global__ void mandel(uint8_t *out) {
int bid = blockIdx.x;
int tid = threadIdx.x;
float x = 0.0, y = 0.0, xsq = 0.0, ysq = 0.0;
int color = 1;
while (color < ITERS && (xsq + ysq) < max_size) {
xsq = x*x;
ysq = y*y;
y = 2*x*y+(ymax - blockIdx.x*deltaQ);
x = xsq - ysq + (xmin + threadIdx.x * deltaP);
color ++;
}
out[bid* 512 + tid] = (color % 8) * 32; // % max_colors;
}
/* ------------------------------------------------------------------------
MAIN
--------------------------------------------------------------------- */
int main(void) {
uint8_t *r;
uint8_t *dr;
r = (uint8_t*)malloc(sizeof(uint8_t) * N);
cudaMalloc((void**)&dr,N*sizeof(uint8_t));
cudaMemset(dr,0,N*sizeof(uint8_t));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start,0);
//mandel<<<HEIGHT,WIDTH,0>>>(dr);
//kernel<<<HEIGHT,WIDTH,0>>>(dr);
//plate1<<<HEIGHT,WIDTH,0>>>(dr);
//plate2<<<HEIGHT,WIDTH,0>>>(dr);
//plate3<<<HEIGHT,WIDTH,0>>>(dr);
mandelGen<<<HEIGHT,WIDTH,0>>>(dr);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
// std::cout << std::endl;
cudaMemcpy(r,dr,N*sizeof(uint8_t),cudaMemcpyDeviceToHost);
//cudaMemcpy(m,dm,64*sizeof(float),cudaMemcpyDeviceToHost);
for (int i = 0; i < N; i ++) {
printf("%d ",r[i]);
}
printf("Elapsed time: %f\n", elapsedTime);
FILE *file;
file = fopen("image.out","w");
fwrite(r,sizeof(uint8_t),N,file);
fclose(file);
return 0;
}
|
3,192 | #include <stdio.h>
typedef struct {
int n;
int m;
int tile;
float* arr;
} Matrix;
// Thread block size
#define BLOCK_SIZE 16
void printa(float *A, int n, int m);
void generateMatrix(float *A, int n, int m, int num);
__global__ void MulKernel(const Matrix, const Matrix, Matrix);
__global__ void MulKernelShared(const Matrix, const Matrix, Matrix);
__device__ float GetElement(const Matrix A, int row, int col)
{
return A.arr[row * A.tile + col];
}
__device__ void SetElement(Matrix A, int row, int col, float value)
{
A.arr[row * A.tile + col] = value;
}
// Get the BLOCK_SIZExBLOCK_SIZE sub-matrix
__device__ Matrix GetSubMatrix(Matrix A, int row, int col)
{
Matrix Asub;
Asub.n = BLOCK_SIZE;
Asub.m = BLOCK_SIZE;
Asub.tile = A.tile;
Asub.arr = &A.arr[A.tile * BLOCK_SIZE * row + BLOCK_SIZE * col];
return Asub;
}
// Matrix multiplication kernel called by MatrixMultiplication()
__global__ void MulKernel(Matrix A, Matrix B, Matrix C)
{
float sum = 0;
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
for (int e = 0; e < A.n; ++e)
sum += A.arr[row * A.n + e] * B.arr[e * B.n + col];
C.arr[row * C.n + col] = sum;
}
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
__host__ void MatrixMultiplication(const Matrix A, const Matrix B, Matrix C)
{
// Load A and B to device memory
Matrix d_A;
d_A.n = A.n; d_A.m = A.m;
size_t size = A.n * A.m * sizeof(float);
cudaMalloc(&d_A.arr, size);
cudaMemcpy(d_A.arr, A.arr, size,
cudaMemcpyHostToDevice);
Matrix d_B;
d_B.n = B.n; d_B.m = B.m;
size = B.n * B.m * sizeof(float);
cudaMalloc(&d_B.arr, size);
cudaMemcpy(d_B.arr, B.arr, size,
cudaMemcpyHostToDevice);
// Allocate C in device memory
Matrix d_C;
d_C.n = C.n; d_C.m = C.m;
size = C.n * C.m * sizeof(float);
cudaMalloc(&d_C.arr, size);
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE);
dim3 dimGrid(B.n / dimBlock.x, A.m / dimBlock.y);
MulKernel<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.arr, d_C.arr, size, cudaMemcpyDeviceToHost);
// printa(C.arr, C.n, C.m);
cudaFree(d_A.arr);
cudaFree(d_B.arr);
cudaFree(d_C.arr);
}
__global__ void MulKernelShared(Matrix A, Matrix B, Matrix C)
{
// Use the block size of subarr of Matrix C.
int blockRow = blockIdx.y;
int blockCol = blockIdx.x;
Matrix Csub = GetSubMatrix(C, blockRow, blockCol);
// Thread row and column within Csub
int row = threadIdx.y;
int col = threadIdx.x;
float sum = 0;
for (int m = 0; m < (A.n / BLOCK_SIZE); ++m) {
Matrix Asub = GetSubMatrix(A, blockRow, m);
Matrix Bsub = GetSubMatrix(B, m, blockCol);
// subarr A and B are stored in Shared memory
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[row][col] = GetElement(Asub, row, col);
Bs[row][col] = GetElement(Bsub, row, col);
// Synchronize the storing data Asub and B sub into As and Bs.
__syncthreads();
for (int e = 0; e < BLOCK_SIZE; ++e)
sum += As[row][e] * Bs[e][col];
// Synchronize to block the new generation of Asub and Bsub during iteration.
__syncthreads();
}
SetElement(Csub, row, col, sum);
}
__host__ void MatrixMultiplicationShared(const Matrix A, const Matrix B, Matrix C)
{
Matrix d_A;
d_A.n = d_A.tile = A.n; d_A.m = A.m;
size_t size = A.n * A.m * sizeof(float);
cudaMalloc(&d_A.arr, size);
cudaMemcpy(d_A.arr, A.arr, size, cudaMemcpyHostToDevice);
Matrix d_B;
d_B.n = d_B.tile = B.n; d_B.m = B.m;
size = B.n * B.m * sizeof(float);
cudaMalloc(&d_B.arr, size);
cudaMemcpy(d_B.arr, B.arr, size, cudaMemcpyHostToDevice);
Matrix d_C;
d_C.n = d_C.tile = C.n; d_C.m = C.m;
size = C.n * C.m * sizeof(float);
cudaMalloc(&d_C.arr, size);
// dim3(uint3 x, uint3 y), specify demensions. default is 1.
dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); // 16 x 16 , dimBlock.x * dimBlock.y, total 256 threads
// printf("dimBlock.x: %d, dim.y: %d\n", dimBlock.x, dimBlock.y);
dim3 dimGrid(B.n / dimBlock.x, A.m / dimBlock.y);
MulKernelShared<<<dimGrid, dimBlock>>>(d_A, d_B, d_C);
cudaMemcpy(C.arr, d_C.arr, size, cudaMemcpyDeviceToHost);
cudaFree(d_A.arr);
cudaFree(d_B.arr);
cudaFree(d_C.arr);
}
// print the float array
void printa(float *A, int n, int m){
for (int i=0; i<n*m; i++){
printf("%.f ", A[i]);
}
printf("\n");
}
// fill the number in float array
void generateMatrix(float *A, int n, int m, int num){
for (int i=0; i<n*m; i++){
A[i] = num;
}
}
void generateMatrix2d(float **a, int row, int col, int num){
a = (float **)calloc(row, sizeof(float*));
for(int i = 0; i < row; i++)
a[i] = (float *) calloc (col, sizeof(float));
for(int i = 0; i < row; i++){
for(int j = 0; j < col; j++){
a[i][j] = num;
}
}
}
void MatrixMultiplicationCPU(float **a, float **b, float **c, int n, int m){
for(int i = 0; i < n; ++i)
for(int j = 0; j < m; ++j)
for(int k = 0; k < n; ++k)
{
c[i][j] += a[i][k] * b[k][j];
}
}
int main(int argc, char const *argv[]) {
int n, w, m;
float ms = 0; // milliseconds
float **a, **b, **c;
int num, row, col;
size_t sizeA, sizeB, sizeC;
float *Ae, *Be, *Ce;
for (int i= 32384; i >= 128; i >>= 1){
// n = m = w = i;
n = m = i;
w = i / 2;
printf("N x N = %d \n", m * n);
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
//
// num = 2, row = n, col = w;
//
// a = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// a[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// a[i][j] = num;
// }
// }
//
// num = 3, row = w, col = m;
// b = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// b[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// b[i][j] = num;
// }
// }
//
// num = 0, row = n, col = m;
// c = (float **)calloc(row, sizeof(float*));
// for(int i = 0; i < row; i++)
// c[i] = (float *) calloc (col, sizeof(float));
//
// for(int i = 0; i < row; i++){
// for(int j = 0; j < col; j++){
// c[i][j] = num;
// }
// }
//// generateMatrix2d(a, n, w, 2);
//// generateMatrix2d(b, w, m, 3);
//// generateMatrix2d(a, n, m, 0);
//
// cudaEventRecord(start);
// // Matrix Multiplication on CPU, no parallel
// for(int i = 0; i < n; ++i)
// for(int j = 0; j < m; ++j)
// for(int k = 0; k < n; ++k)
// {
// c[i][j] += a[i][k] * b[k][j];
// }
//
// cudaEventRecord(stop);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&ms, start, stop);
// printf("CPU Multiplication time: %fn(ms)\n", ms);
sizeA = m * w * sizeof(float);
sizeB = w * n * sizeof(float);
sizeC = m * n * sizeof(float);
Ae = (float*) malloc(sizeA);
Be = (float*) malloc(sizeB);
Ce = (float*) malloc(sizeC);
Matrix A = {n, n, w, Ae};
Matrix B = {w, w, m, Be};
Matrix C = {n, n, m, Ce};
generateMatrix(A.arr, A.n, A.m, 2);
generateMatrix(B.arr, B.n, B.m, 3);
generateMatrix(C.arr, C.n, C.m, 0);
cudaEventRecord(start);
// Matrix Multiplication without shared memory
MatrixMultiplication(B, A, C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
printf("Matrix Multiplication time: %fn(ms)\n", ms);
cudaEventRecord(start);
// Matrix Multiplication with shared memory
MatrixMultiplicationShared(B, A, C);
cudaEventRecord(stop);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&ms, start, stop);
printf("Matrix Multiplication shared time: %fn(ms)\n", ms);
free(a); free(b); free(c); free(Ae); free(Be); free(Ce);
}
return 0;
}
|
3,193 | // Streams
#include <iostream>
#include <sstream>
#include <fstream>
// Containers
#include <string>
#include <vector>
// Time
#include <chrono>
// C headers
#include <cmath>
#include <cstdlib>
#include <cstring>
// CUDA headers
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
int reverse_int32(const int i) {
unsigned char byte1, byte2, byte3, byte4;
byte1 = i & 255;
byte2 = (i >> 8) & 255;
byte3 = (i >> 16) & 255;
byte4 = (i >> 24) & 255;
return ( (int)byte1 << 24 ) + ( (int)byte2 << 16 ) + ( (int)byte3 << 8 ) + (int)byte4;
}
struct image_t {
int magic_number;
int size, row, column;
float** data;
image_t(const char* file_path, const int padding_width) {
std::ifstream infile(file_path, std::ios::binary);
if (!infile.is_open()) {
printf("----ERROR: File open failure\n");
exit(1);
}
infile.read((char*)&this->magic_number, sizeof(this->magic_number));
this->magic_number = reverse_int32(this->magic_number);
infile.read((char*)&this->size, sizeof(this->size));
this->size = reverse_int32(this->size);
infile.read((char*)&this->row, sizeof(this->row));
this->row = reverse_int32(this->row);
infile.read((char*)&this->column, sizeof(this->column));
this->column = reverse_int32(this->column);
int new_row = this->row + 2 * padding_width;
int new_column = this->column + 2 * padding_width;
this->data = (float**)malloc(sizeof(float*) * this->size);
for(int i = 0; i < this->size; i++) {
this->data[i] = (float*)malloc(sizeof(float) * new_row * new_column); // 패딩 할 걸 생각해서 잡는다
memset(this->data[i], 0, sizeof(float) * new_row * new_column);
}
for(int image = 0; image < this->size; ++image) {
for(int i = 0; i < this->row; ++i) {
for(int j = 0; j < this->column; ++j) {
unsigned char temp = 0;
infile.read((char*)&temp, sizeof(temp));
this->data[image][(i + padding_width) * new_row + (j + padding_width)] = (float)temp * (float)((float)1/(float)255);
}
}
}
this->row = new_row;
this->column = new_column;
infile.close();
}
~image_t() {
for(int i = 0; i < size; i++) {
free(this->data[i]);
}
free(this->data);
}
};
struct label_t {
int magic_number;
int size;
int* data;
label_t(const char* file_path) {
std::ifstream infile(file_path, std::ios::binary);
if (!infile.is_open()) {
printf("----ERROR: File open failure\n");
exit(1);
}
infile.read((char*)&this->magic_number, sizeof(this->magic_number));
this->magic_number = reverse_int32(this->magic_number);
infile.read((char*)&this->size, sizeof(this->size));
this->size = reverse_int32(this->size);
this->data = (int*)malloc(sizeof(int) * this->size);
for(int i = 0; i < this->size; ++i) {
unsigned char temp = 0;
infile.read((char*)&temp, sizeof(temp));
this->data[i] = (int)temp;
}
infile.close();
}
~label_t() {
if (this->data != nullptr) {
free(this->data);
}
}
};
struct result_layers_t {
float input[1 * 32 * 32]; // 입력 이미지 (제로패딩 된 것)
float conv_1[6 * 28 * 28]; // 콘볼루션 결과 1
float sub_1[6 * 14 * 14]; // 서브샘플링 결과 1
float conv_2[16 * 10 * 10]; // 콘볼루션 결과 2
float sub_2[16 * 5 * 5]; // 서브샘플링 결과 2
float full_1[120]; // 뉴런 1
float full_2[84]; // 뉴런 2
float output[10]; // 출력
};
struct mask_bias_weight_t {
float mask_1[6 * 1 * 5 * 5]; // 콘볼루션 마스크 1
float bias_1[6]; // 바이어스 1
float mask_2[16 * 6 * 5 * 5]; // 콘볼루션 마스크 2
float bias_2[16]; // 바이어스 2
float weight_1[120 * 400]; // 웨이트 1
float bias_3[120]; // 바이어스 3
float weight_2[84 * 120]; // 웨이트 2
float bias_4[84]; // 바이어스 4
float weight_3[10 * 84]; // 웨이트 3
float bias_5[10]; // 바이어스 5
};
void print_mnist(const float* data, const int label) {
std::cout << "Check data for label " << label << std::endl;
for(int r = 0; r < 32; r++) {
for (int c = 0; c < 32; c++) {
if (data[r * 32 + c] > 0.5f) {
std::cout << "■";
} else {
std::cout << "□";
}
}
std::cout << std::endl;
}
}
void parse_mask_bias_weight(const char* file_path, mask_bias_weight_t* filter) {
std::ifstream infile(file_path);
if (!infile.is_open()) {
printf("----ERROR: File open failure\n");
exit(1);
}
float* temp = (float*)malloc(sizeof(mask_bias_weight_t));
std::string each_line;
double num;
int word_count = 0;
while (std::getline(infile, each_line)) {
if (each_line.length() == 0) { continue; }
if (each_line[0] == '#') { continue; }
std::stringstream ss;
ss.str(each_line);
while (ss >> num) {
temp[word_count] = (float)num;
word_count++;
}
}
memcpy(filter, temp, sizeof(mask_bias_weight_t));
free(temp);
infile.close();
}
__device__ float sigmoid(float x) {
return 1.0 / (1.0 + exp(-x));
}
__global__ void conv_forward(float* input, int input_count, int input_row, int input_column,
float* kernel, int kernel_count1, int kernel_count2, int kernel_row, int kernel_column,
float* output, int output_count, int output_row, int output_column)
{
__shared__ float s_kernel[5 * 5];
__shared__ float s_input[32 * 32];
int m = blockIdx.x; // output image number
int c = blockIdx.y; // input image number
int h = threadIdx.x; // input image height
int w = threadIdx.y; // input image width
// 커널 가져오기
if (h < kernel_row && w < kernel_column) {
s_kernel[kernel_column * h + w]
= kernel[kernel_count2 * kernel_row * kernel_column * m + kernel_row * kernel_column * c + kernel_column * h + w];
}
// 인풋 가져오기
if (h < input_row && w < input_column) {
s_input[input_column * h + w] = input[input_row * input_column * c + input_column * h + w];
}
__syncthreads();
// 계산
if (h < output_row && w < output_column) {
float accumulated = 0;
for (int p = 0; p < kernel_row; p++) {
for (int q = 0; q < kernel_column; q++) {
accumulated += s_input[input_column * (h + p) + (w + q)] * s_kernel[kernel_column * p + q];
}
}
atomicAdd(&output[output_row * output_column * m + output_column * h + w], accumulated);
}
}
__global__ void pool_forward(float* input, int input_count, int input_row, int input_column,
float* bias,
float* output, int output_count, int output_row, int output_column)
{
int m = blockIdx.x; // output image number
int h = threadIdx.x; // input height
int w = threadIdx.y; // input width
__shared__ float s_input[28 * 28];
// 인풋 가져오기
if (h < input_row && w < input_column) {
s_input[input_column * h + w] = input[input_row * input_column * m + input_column * h + w];
}
__syncthreads();
float accumulated = 0;
if (h < output_row && w < output_column) {
for (int p = 0; p < 2; p++) {
for (int q = 0; q < 2; q++) {
accumulated += s_input[input_column * (2 * h + p) + (2 * w + q)];
}
}
output[output_row * output_column * m + output_column * h + w] = sigmoid(accumulated / (float)4 + bias[m]);
}
}
__device__ void full_forward(float* input,
float* weight, int weight_row, int weight_column,
float* bias,
float* output)
{
int i = threadIdx.x; // weight row
if (i < weight_row) {
for (int k = 0; k < weight_column; k++) {
output[i] += weight[weight_column * i + k] * input[k];
}
output[i] = sigmoid(output[i] + bias[i]);
}
}
__global__ void full_forward_total(result_layers_t* d_result, mask_bias_weight_t* d_filter) {
int image = blockIdx.x;
// 서브샘플링과 웨이트 및 바이어스로 다음 뉴런 연산
full_forward(d_result[image].sub_2,
d_filter->weight_1, 120, 400,
d_filter->bias_3,
d_result[image].full_1);
__syncthreads();
// 뉴런 결과와 웨이트 및 바이어스로 다음 뉴런 연산
full_forward(d_result[image].full_1,
d_filter->weight_2, 84, 120,
d_filter->bias_4,
d_result[image].full_2);
__syncthreads();
// 뉴런 결과와 웨이트 및 바이어스로 출력 연산
full_forward(d_result[image].full_2,
d_filter->weight_3, 10, 84,
d_filter->bias_5,
d_result[image].output);
__syncthreads();
}
int find_max_index(float* input, const int size) {
int result = 0;
float max_value = -9.99;
for (int i = 0; i < size; i++) {
if (input[i] > max_value) {
max_value = input[i];
result = i;
}
}
return result;
}
int main() {
// MNIST 파일 불러오기
image_t train_image("/home/ic621/mnist/train-images-idx3-ubyte", 2);
label_t train_label("/home/ic621/mnist/train-labels-idx1-ubyte");
image_t test_image("/home/ic621/mnist/t10k-images-idx3-ubyte", 2);
label_t test_label("/home/ic621/mnist/t10k-labels-idx1-ubyte");
// result_layers에 적재하기
result_layers_t* train_result = (result_layers_t*)malloc(sizeof(result_layers_t) * train_image.size);
result_layers_t* test_result = (result_layers_t*)malloc(sizeof(result_layers_t) * test_image.size);
for (int image = 0; image < train_image.size; image++) {
for (int i = 0; i < train_image.row; i++) {
for (int j = 0; j < train_image.column; j++) {
train_result[image].input[32 * i + j] = train_image.data[image][i * train_image.row + j];
}
}
}
for (int image = 0; image < test_image.size; image++) {
for (int i = 0; i < test_image.row; i++) {
for (int j = 0; j < test_image.column; j++) {
test_result[image].input[32 * i + j] = test_image.data[image][i * test_image.row + j];
}
}
}
// 필터 불러오기
mask_bias_weight_t filter;
parse_mask_bias_weight("text_out_lenet5.txt", &filter);
int total = test_image.size;
// test_result와 filter모두 cuda로 보내기
result_layers_t* d_result;
mask_bias_weight_t* d_filter;
cudaMalloc(&d_result, sizeof(result_layers_t) * total);
cudaMalloc(&d_filter, sizeof(mask_bias_weight_t));
cudaMemcpy(d_result, test_result, sizeof(result_layers_t) * total, cudaMemcpyHostToDevice);
cudaMemcpy(d_filter, &filter, sizeof(mask_bias_weight_t), cudaMemcpyHostToDevice);
auto start = std::chrono::high_resolution_clock::now();
for (int image = 0; image < total; image++)
{
// 입력이미지와 콘볼루션 마스크로 콘볼루션 연산 수행
{
dim3 blocks(6, 1, 1); // 출력이미지와 입력이미지 개수를 써준다
dim3 threads(32, 32, 1); // 입력 높이/너비 개수를 써준다
conv_forward<<<blocks, threads>>>(d_result[image].input, 1, 32, 32,
d_filter->mask_1, 6, 1, 5, 5,
d_result[image].conv_1, 6, 28, 28);
}
// 콘볼루션 결과와 바이어스로 서브샘플링 수행
{
dim3 blocks(6, 1, 1); // output map 개수를 써준다
dim3 threads(28, 28, 1); // 입력 높이/ 너비 개수를 써준다
pool_forward<<<blocks, threads>>>(d_result[image].conv_1, 6, 28, 28,
d_filter->bias_1,
d_result[image].sub_1, 6, 14, 14);
}
// 입력이미지와 콘볼루션 마스크로 콘볼루션 연산 수행
{
dim3 blocks(16, 6, 1); // 출력이미지와 입력이미지 개수를 써준다
dim3 threads(14, 14, 1); // 입력 높이/너비 개수를 써준다
conv_forward<<<blocks, threads>>>(d_result[image].sub_1, 6, 14, 14,
d_filter->mask_2, 16, 6, 5, 5,
d_result[image].conv_2, 16, 10, 10);
}
// 콘볼루션 결과와 바이어스로 서브샘플링 수행
{
dim3 blocks(16, 1, 1); // output map 개수를 써준다
dim3 threads(10, 10, 1); // 입력 높이/ 너비 개수를 써준다
pool_forward<<<blocks, threads>>>(d_result[image].conv_2, 16, 10, 10,
d_filter->bias_2,
d_result[image].sub_2, 16, 5, 5);
}
}
{
dim3 blocks(total, 1, 1); // 전체 사진 개수, row개수
dim3 threads(120, 1, 1); //column 개수
full_forward_total<<<blocks, threads>>>(d_result, d_filter);
}
auto end = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::duration<double> >(end - start);
// GPU에서 계산한 거를 옮긴다
cudaMemcpy(test_result, d_result, sizeof(result_layers_t) * total, cudaMemcpyDeviceToHost);
// 정확도를 구한다
int accurate = 0;
for (int image = 0; image < total; image++) {
int result = find_max_index(test_result[image].output, 10);
int answer = test_label.data[image];
if (result == answer) {
accurate++;
}
}
printf("test data accuracy: %8lf\n", (float)accurate / (float)total);
printf("elapsed time : %8lf (sec)\n", elapsed.count());
free(train_result);
free(test_result);
return 0;
}
|
3,194 | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <curand_kernel.h>
extern "C"
{
__global__ void setup_kernel(curandState *state)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
curand_init(9131 + idx*17, idx, 0, &state[idx]);
return;
}
__global__ void runif_kernel(curandState *state, float *vals, int n, float lo, float hi)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if (idx < n){
vals[idx] = lo + (hi-lo)*curand_uniform(&state[idx]);
}
return;
}
__global__ void rexpo_kernel(curandState *state, float *vals, int n, float lambda)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if (idx < n){
vals[idx] = -log(curand_uniform(&state[idx]))/lambda;
}
return;
}
__global__ void rnorm_kernel(curandState *state, float *vals, int n, float mu, float sigma)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if (idx < n) {
vals[idx] = mu + sigma * curand_normal(&state[idx]);
}
return;
}
__global__ void rpois_kernel(curandState *state, int *vals, int n, double lambda)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
if (idx < n) {
vals[idx] = curand_poisson(&state[idx], lambda);
}
return;
}
/*
Few others are available for pseudo-RNGs:
curand_log_normal
curand_uniform_double (double precision uniforms)
curand_normal_double (double precision normals)
curand_log_normal_double (double precision log-normals)
curand_normal2 (pair of float normals)
curand_log_normal2 (pair of float log-normals)
curand_normal2_double (pair of double precision normals)
curand_log_normal2_double (pair of double precision log-normals)
*/
__global__ void rnorm_all_in_one_kernel(float *vals, int n, float mu, float sigma)
{
// Usual block/thread indexing...
int myblock = blockIdx.x + blockIdx.y * gridDim.x;
int blocksize = blockDim.x * blockDim.y * blockDim.z;
int subthread = threadIdx.z*(blockDim.x * blockDim.y) + threadIdx.y*blockDim.x + threadIdx.x;
int idx = myblock * blocksize + subthread;
// Setup the RNG:
curandState rng_state;
curand_init(9131 + idx*17, 0, 0, &rng_state);
if (idx < n) {
vals[idx] = mu + sigma * curand_normal(&rng_state);
}
return;
}
} // END extern
|
3,195 | #include <cstdio>
#include <cstdlib>
#include <cuda_runtime_api.h>
int main(int argc, char *argv[])
{
cudaDeviceProp prop;
cudaError_t status;
int device_count;
int min_v = 0;
status = cudaGetDeviceCount(&device_count);
if (status != cudaSuccess) {
fprintf(stderr,"cudaGetDeviceCount() failed: %s\n", cudaGetErrorString(status));
return -1;
}
for(int device_index=0; device_index < device_count; ++device_index){
status = cudaGetDeviceProperties(&prop, device_index);
if (status != cudaSuccess) {
fprintf(stderr,"cudaGetDeviceProperties() for device %i failed: %s\n", device_index, cudaGetErrorString(status));
return -1;
}
int v = prop.major * 10 + prop.minor;
if(min_v < v) min_v = v;
}
return min_v;
} |
3,196 | #include <stdio.h>
#include <stdlib.h>
#include "cuda_runtime.h"
//#include "cuda.h"
//#include "cuda_runtime_api.h"
//#include "cuda_device_runtime_api.h"
// Each thread performs one pair-wise addition
__global__
void vecAddKernel(const float* A, const float* B, float* C, int n) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < n) C[i] = A[i] + B[i];
}
void vecAdd(float* h_A, float* h_B, float* h_C, int n) {
int size = n * sizeof(float);
float* d_A, * d_B, * d_C;
cudaError_t err;
err = cudaMalloc((void**)& d_A, size);
if (err != cudaSuccess) {
printf("%s in %s at line %d \nYou can cry now", cudaGetErrorString(err), __FILE__, __LINE__);
return;
}
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)& d_B, size);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
cudaMalloc((void**)& d_C, size);
vecAddKernel <<<ceil(n / 256.0), 256 >>> (d_A, d_B, d_C, n);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
int main(void) {
int SIZE = 25600;
float* a, * b, * c;
a = (float*)malloc(SIZE * sizeof(float));
b = (float*)malloc(SIZE * sizeof(float));
c = (float*)malloc(SIZE * sizeof(float));
for (int i = 0; i < SIZE; i++) {
a[i] = i;
b[i] = i;
c[i] = 0;
}
vecAdd(a,b,c,SIZE);
printf("c[5] = %f", c[5]);
free(a);
free(b);
free(c);
} |
3,197 | #include <stdlib.h>
#include <unistd.h>
#include <iostream>
#include <string>
#include <sstream>
using namespace std;
#include "cuda_runtime_api.h"
#define SIZE_OF_MATRIX 1000
#define SIZE_OF_BLOCK 16
#define M SIZE_OF_MATRIX
unsigned int m = SIZE_OF_MATRIX;
#define idx(i,j,lda) ((j) + ((i)*(lda)))
__global__ void multiply_matrices(float *d_a, float *d_b, float *d_c, int lda)
{
unsigned int row = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int col = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int id = idx(row,col,lda);
float ctemp = 0.0;
if (row < M && col < M)
{
for (unsigned int j=0; j<M; j++)
{
ctemp = ctemp + d_a[idx(row,j,lda)] * d_b[idx(j,col,lda)];
}
d_c[id] = ctemp;
}
}
__global__ void multiply_matrices_shared_blocks(float *d_a, float *d_b, float *d_c,
int lda)
{
int bs = SIZE_OF_BLOCK;
unsigned int row = threadIdx.y + blockDim.y * blockIdx.y;
unsigned int col = threadIdx.x + blockDim.x * blockIdx.x;
unsigned int id = idx(row,col,lda);
//submatrices
float *sub_a, *sub_b;
//shared submatrices
__shared__ float a[SIZE_OF_BLOCK][SIZE_OF_BLOCK], b[SIZE_OF_BLOCK][SIZE_OF_BLOCK];
//temp element of d_c
float c = 0;
//top-level row,col of block
int block_row = blockIdx.y * bs;
int block_col = blockIdx.x * bs;
//id inside each block
int sub_row = threadIdx.y;
int sub_col = threadIdx.x;
//for each block
for (int k = 0; k < (M / bs); k++)
{
sub_a = &d_a[idx(block_row, bs*k, lda)];
sub_b = &d_b[idx(bs*k, block_col, lda)];
a[sub_row][sub_col] = sub_a[idx(sub_row, sub_col, lda)];
b[sub_row][sub_col] = sub_b[idx(sub_row, sub_col, lda)];
//wait for all threads to complete copy to shared memory.
__syncthreads();
//multiply each submatrix
for (int j=0; j < bs; j++)
{
c = c + a[sub_row][j] * b[j][sub_col];
}
// move results to device memory.
d_c[id] = c;
// wait for multiplication to finish before moving onto the next submatrix.
__syncthreads();
}
}
void multiply_by_element(dim3 grid, dim3 threads, float *d_a, float *d_b, float *d_c, int m, cudaStream_t cStream)
{
cudaError err;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
float* c = (float*)malloc(matsize);
multiply_matrices<<< grid, threads, 0, cStream >>>(d_a, d_b, d_c, m);
err = cudaGetLastError();
if (err != cudaSuccess)
{
cout << "error in kernel, " << cudaGetErrorString(err) << endl;
}
cudaStreamSynchronize(cStream);
err = cudaMemcpyAsync(c, d_c, matsize, cudaMemcpyDeviceToHost, cStream);
if (err != cudaSuccess)
{
cout << "error in memcpy, #=" << cudaGetErrorString(err) << endl;
}
}
void multiply_by_block(dim3 grid, dim3 threads, float *d_a, float *d_b, float *d_c, int m, cudaStream_t cStream)
{
cudaError err;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
float* c = (float*)malloc(matsize);
multiply_matrices_shared_blocks<<< grid, threads, 0, cStream >>>(d_a, d_b, d_c, m);
err = cudaGetLastError();
if (err != cudaSuccess)
{
cout << "error in kernel, " << cudaGetErrorString(err) << endl;
}
cudaStreamSynchronize(cStream);
err = cudaMemcpyAsync(c, d_c, matsize, cudaMemcpyDeviceToHost, cStream);
if (err != cudaSuccess)
{
cout << "error in memcpy, #=" << cudaGetErrorString(err) << endl;
}
}
#define STREAM_INDEX(_d,_i) ((_d*number_of_iterations) + _i)
int main(int argc, char** argv)
{
unsigned int number_of_threads = min(SIZE_OF_MATRIX, SIZE_OF_BLOCK);
unsigned int number_of_blocks;
if (SIZE_OF_MATRIX > SIZE_OF_BLOCK)
number_of_blocks = ceil(SIZE_OF_MATRIX / ((float) SIZE_OF_BLOCK));
else
number_of_blocks = 1;
unsigned int matsize = SIZE_OF_MATRIX*SIZE_OF_MATRIX*sizeof(float);
//cout << "blocks: " << number_of_blocks << " threads: " <<
//number_of_threads << endl;
//cout.flush();
float* a = (float*)malloc(matsize);
float* b = (float*)malloc(matsize);
float* c = (float*)malloc(matsize);
//initalize matrices
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
//a[i*m+j] = i;
//b[i*m+j] = i;
a[i*m+j] = i-j*2 + i-j+1 + 1;
b[i*m+j] = i-j*2 + i-j+1 + 1;
c[i*m+j] = 0;
//cout << a[i*m+j] << ", ";
}
//cout << endl;
}
cudaError_t err;
int count = 0;
err = cudaGetDeviceCount(&count);
cout << count << " devices found." << endl;
string device_list("");
int number_of_iterations = 1;
int opt = getopt(argc, argv, "d:i:");
while(opt != -1) {
stringstream str;
switch(opt) {
case 'd':
device_list = string(optarg);
break;
case 'i':
str << optarg;
str >> number_of_iterations;
cout << "Doing " << number_of_iterations << " iterations." << std::endl;
break;
case '?':
if (optopt == 'd')
cerr << "Error, option -d requires argument: comma delimted list of devices to run on." << endl;
else if (optopt == 'i')
cerr << "Error, option -i requires argument: number of iterations to run." << endl;
else
cerr << "Error, unknow option. Usage:\nmatmult [-d <device id>,...] [-i <number of iterations]" << endl;
return 1;
default:
break;
}
opt = getopt(argc, argv, "d:i:");
}
int devices[count];
int nDevices = 0;
//default: use all the devices
if (device_list.compare("") == 0)
{
for (int d=0;d<count;d++)
{
devices[d] = d;
}
nDevices = count;
}
else
{
for (int d=0;d<count;d++)
{
stringstream str;
str << d;
char c = 0;
if (str >> c) {
if (device_list.find(c) != string::npos) {
devices[nDevices++] = d;
}
}
}
}
//cout << "finnished mapping devices." << endl;
float *d_a[nDevices], *d_b[nDevices], *d_c[nDevices];
cudaStream_t streams[nDevices * number_of_iterations];
for (int d=0;d<nDevices;d++)
{
cudaSetDevice(devices[d]);
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, devices[d]);
cout << "Using device " << devices[d] << ", name: " << deviceProp.name << endl;
err = cudaSetDevice(devices[d]);
if (err != cudaSuccess)
{
cout << "error setting device, #=" << cudaGetErrorString(err) << endl;
}
for (int i=0; i<number_of_iterations; i++)
{
err = cudaStreamCreate(&streams[STREAM_INDEX(d,i)]);
if (err != cudaSuccess)
{
cout << "error in stream creation, #=" << cudaGetErrorString(err) << endl;
}
}
err = cudaMalloc((void **) &d_a[d], matsize);
if (err != cudaSuccess)
{
cout << "error in malloc, #=" << cudaGetErrorString(err) << endl;
}
err = cudaMalloc((void **) &d_b[d], matsize);
if (err != cudaSuccess)
{
cout << "error in malloc, #=" << cudaGetErrorString(err) << endl;
}
err = cudaMalloc((void **) &d_c[d], matsize);
if (err != cudaSuccess)
{
cout << "error in malloc, #=" << cudaGetErrorString(err) << endl;
}
}
for (int d=0; d<nDevices; d++)
{
for (int i=0; i<number_of_iterations; i++)
{
cout << "Running on device " << d << ", stream " << i << endl;
int cDevice = d;
cudaStream_t cStream = streams[STREAM_INDEX(d,i)];
cudaSetDevice(devices[cDevice]);
if (err != cudaSuccess) {
cout << "error setting device: " << devices[i%nDevices] << " #=" << cudaGetErrorString(err) << endl;
}
err = cudaMemcpyAsync(d_a[cDevice], a, matsize, cudaMemcpyHostToDevice, cStream);
if (err != cudaSuccess) {
cout << "error in memcpy, #=" << cudaGetErrorString(err) << endl;
}
err = cudaMemcpyAsync(d_b[cDevice], b, matsize, cudaMemcpyHostToDevice, cStream);
if (err != cudaSuccess) {
cout << "error in memcpy, #=" << cudaGetErrorString(err) << endl;
}
//cout << "running on device " << cDevice << endl;
dim3 grid(number_of_blocks, number_of_blocks);
dim3 threads(number_of_threads, number_of_threads, 1);
//multiply each element at a time.
multiply_by_element(grid, threads, d_a[cDevice], d_b[cDevice], d_c[cDevice], m, cStream);
//multiply by first load a 16x16 submatrix into shared memory.
multiply_by_block(grid, threads, d_a[cDevice], d_b[cDevice], d_c[cDevice], m, cStream);
}
}
cout << "Finished " << number_of_iterations << " iterations on " << nDevices << " devices." << endl;
for (int d=0;d<nDevices;d++) {
cudaSetDevice(devices[d]);
for (int i=0; i<number_of_iterations; i++) {
cudaStreamSynchronize(streams[STREAM_INDEX(d,i)]);
}
}
for (int d=0;d<nDevices;d++) {
for (int i=0; i<number_of_iterations; i++) {
cudaStreamDestroy(streams[STREAM_INDEX(d,i)]);
}
}
//print c
/*
cout << " results: " << endl;
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
cout << c[i*m+j] << ", ";
}
cout << endl;
}
*/
//print c
/*
cout << " results: " << endl;
for (int i=0; i<m; i++) {
for (int j=0; j<m; j++) {
cout << c[i*m+j] << ", ";
}
cout << endl;
}
*/
cudaFree(d_a);
cudaFree(d_b);
cudaFree(d_c);
}
|
3,198 | #include <stdio.h>
#include <cuda.h>
#include<sys/time.h>
__global__ void dkernel(unsigned *vector, unsigned vectorsize,int N) {
unsigned id = blockIdx.x * blockDim.x + threadIdx.x;
if(id<vectorsize)
vector[id]+=N;
}
#define BLOCKSIZE 1024
int main(int nn, char *str[]) {
unsigned long long N = 1024;
unsigned *vector, *hvector;
unsigned vec[N];
for (int i = 0; i < N; i++) {
vec[i] = i;
}
cudaMalloc(&vector, N * sizeof(unsigned));
cudaMemcpy(vector, vec, N * sizeof(unsigned), cudaMemcpyHostToDevice);
hvector = (unsigned *)malloc(N * sizeof(unsigned));
unsigned nblocks = ceil((float)N / BLOCKSIZE);
// printf("nblocks = %d\n", nblocks);
//here we run the kernel in a loop which runs 1024 times.
for(int j=1024;j<1000000;j+=10000) {
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(int i=0;i<j;i++)
dkernel<<<nblocks, BLOCKSIZE>>>(vector, N,i);
gettimeofday(&tv2, NULL);
printf ("%d\t%f\n",j,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
cudaMemcpy(hvector, vector, N * sizeof(unsigned), cudaMemcpyDeviceToHost);
/*for (unsigned ii = 0; ii < N; ++ii) {
printf("%4d ", hvector[ii]);
}*/
return 0;
}
|
3,199 | #include "includes.h"
__global__ void cuInsertionSort(float *dist, int dist_pitch, int *ind, int ind_pitch, int width, int height, int k){
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex<width){
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 1;
// Part 1 : sort kth firt elementZ
for (l=1; l<k; l++){
curr_row = l * dist_pitch;
curr_dist = p_dist[curr_row];
if (curr_dist<max_dist){
i=l-1;
for (int a=0; a<l-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=l; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
}
else
p_ind[l*ind_pitch] = l+1;
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k-1)*dist_pitch;
for (l=k; l<height; l++){
curr_dist = p_dist[l*dist_pitch];
if (curr_dist<max_dist){
i=k-1;
for (int a=0; a<k-1; a++){
if (p_dist[a*dist_pitch]>curr_dist){
i=a;
break;
}
}
for (j=k-1; j>i; j--){
p_dist[j*dist_pitch] = p_dist[(j-1)*dist_pitch];
p_ind[j*ind_pitch] = p_ind[(j-1)*ind_pitch];
}
p_dist[i*dist_pitch] = curr_dist;
p_ind[i*ind_pitch] = l+1;
max_dist = p_dist[max_row];
}
}
}
} |
3,200 | #include <iostream>
__global__
void helloWorldKernel() {
printf("Hello from Device, thread: %d\n", threadIdx.x);
}
int main() {
std::cout << "(1) Hello from Host" << std::endl;
helloWorldKernel<<< 2, 8 >>>(); // asynchronous call
std::cout << "(2) Hello from Host" << std::endl;
cudaDeviceSynchronize();
cudaDeviceReset();
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.